aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig2
-rw-r--r--net/appletalk/aarp.c21
-rw-r--r--net/atm/clip.c4
-rw-r--r--net/atm/lec.c29
-rw-r--r--net/atm/proc.c32
-rw-r--r--net/core/dev_mcast.c37
-rw-r--r--net/core/dst.c3
-rw-r--r--net/core/neighbour.c7
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/sock.c19
-rw-r--r--net/dccp/ipv4.c11
-rw-r--r--net/dccp/ipv6.c38
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/decnet/af_decnet.c21
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/Makefile3
-rw-r--r--net/ipv4/af_inet.c19
-rw-r--r--net/ipv4/arp.c9
-rw-r--r--net/ipv4/devinet.c21
-rw-r--r--net/ipv4/icmp.c101
-rw-r--r--net/ipv4/igmp.c39
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_options.c5
-rw-r--r--net/ipv4/ipconfig.c7
-rw-r--r--net/ipv4/netfilter/arp_tables.c5
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c14
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c2
-rw-r--r--net/ipv4/proc.c5
-rw-r--r--net/ipv4/route.c75
-rw-r--r--net/ipv4/syncookies.c11
-rw-r--r--net/ipv4/tcp_cubic.c35
-rw-r--r--net/ipv4/tcp_input.c1
-rw-r--r--net/ipv4/tcp_ipv4.c30
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c1090
-rw-r--r--net/ipv4/udp_ipv4.c1134
-rw-r--r--net/ipv4/udplite_ipv4.c (renamed from net/ipv4/udplite.c)0
-rw-r--r--net/ipv6/Makefile4
-rw-r--r--net/ipv6/addrconf.c203
-rw-r--r--net/ipv6/addrlabel.c5
-rw-r--r--net/ipv6/af_inet6.c189
-rw-r--r--net/ipv6/anycast.c2
-rw-r--r--net/ipv6/fib6_rules.c103
-rw-r--r--net/ipv6/icmp.c133
-rw-r--r--net/ipv6/ip6_fib.c292
-rw-r--r--net/ipv6/ip6_input.c5
-rw-r--r--net/ipv6/ip6_output.c8
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/ipv6_sockglue.c128
-rw-r--r--net/ipv6/mcast.c72
-rw-r--r--net/ipv6/ndisc.c34
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/proc.c9
-rw-r--r--net/ipv6/route.c551
-rw-r--r--net/ipv6/sit.c6
-rw-r--r--net/ipv6/syncookies.c267
-rw-r--r--net/ipv6/sysctl_net_ipv6.c15
-rw-r--r--net/ipv6/tcp_ipv6.c122
-rw-r--r--net/ipv6/udp_ipv6.c (renamed from net/ipv6/udp.c)16
-rw-r--r--net/ipv6/udplite_ipv6.c (renamed from net/ipv6/udplite.c)0
-rw-r--r--net/ipv6/xfrm6_policy.c7
-rw-r--r--net/key/af_key.c120
-rw-r--r--net/mac80211/Makefile1
-rw-r--r--net/mac80211/cfg.c68
-rw-r--r--net/mac80211/debugfs.c47
-rw-r--r--net/mac80211/debugfs_netdev.c9
-rw-r--r--net/mac80211/debugfs_sta.c170
-rw-r--r--net/mac80211/ieee80211.c825
-rw-r--r--net/mac80211/ieee80211_i.h164
-rw-r--r--net/mac80211/ieee80211_iface.c10
-rw-r--r--net/mac80211/ieee80211_ioctl.c217
-rw-r--r--net/mac80211/ieee80211_key.h26
-rw-r--r--net/mac80211/ieee80211_rate.c15
-rw-r--r--net/mac80211/ieee80211_rate.h28
-rw-r--r--net/mac80211/ieee80211_sta.c1004
-rw-r--r--net/mac80211/key.c171
-rw-r--r--net/mac80211/rc80211_pid_algo.c86
-rw-r--r--net/mac80211/rc80211_simple.c72
-rw-r--r--net/mac80211/regdomain.c152
-rw-r--r--net/mac80211/rx.c572
-rw-r--r--net/mac80211/sta_info.c168
-rw-r--r--net/mac80211/sta_info.h110
-rw-r--r--net/mac80211/tx.c335
-rw-r--r--net/mac80211/util.c142
-rw-r--r--net/mac80211/wep.c16
-rw-r--r--net/mac80211/wep.h4
-rw-r--r--net/mac80211/wme.c135
-rw-r--r--net/mac80211/wme.h23
-rw-r--r--net/mac80211/wpa.c72
-rw-r--r--net/mac80211/wpa.h12
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netlink/af_netlink.c26
-rw-r--r--net/rxrpc/ar-proc.c4
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/outqueue.c3
-rw-r--r--net/sctp/proc.c20
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/tipc/socket.c56
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/core.c41
-rw-r--r--net/wireless/core.h3
-rw-r--r--net/wireless/nl80211.c118
-rw-r--r--net/wireless/reg.c159
-rw-r--r--net/wireless/util.c98
-rw-r--r--net/xfrm/xfrm_policy.c79
-rw-r--r--net/xfrm/xfrm_state.c53
-rw-r--r--net/xfrm/xfrm_user.c71
110 files changed, 6315 insertions, 4242 deletions
diff --git a/net/Kconfig b/net/Kconfig
index 6627c6ae5db6..acbf7c60e89b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -45,7 +45,7 @@ config INET
45 ---help--- 45 ---help---
46 These are the protocols used on the Internet and on most local 46 These are the protocols used on the Internet and on most local
47 Ethernets. It is highly recommended to say Y here (this will enlarge 47 Ethernets. It is highly recommended to say Y here (this will enlarge
48 your kernel by about 144 KB), since some programs (e.g. the X window 48 your kernel by about 400 KB), since some programs (e.g. the X window
49 system) use TCP/IP even if your machine is not connected to any 49 system) use TCP/IP even if your machine is not connected to any
50 other computer. You will get the so-called loopback device which 50 other computer. You will get the so-called loopback device which
51 allows you to ping yourself (great fun, that!). 51 allows you to ping yourself (great fun, that!).
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 18058bbc7962..61166f66479f 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -1033,25 +1033,8 @@ static const struct seq_operations aarp_seq_ops = {
1033 1033
1034static int aarp_seq_open(struct inode *inode, struct file *file) 1034static int aarp_seq_open(struct inode *inode, struct file *file)
1035{ 1035{
1036 struct seq_file *seq; 1036 return seq_open_private(file, &aarp_seq_ops,
1037 int rc = -ENOMEM; 1037 sizeof(struct aarp_iter_state));
1038 struct aarp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1039
1040 if (!s)
1041 goto out;
1042
1043 rc = seq_open(file, &aarp_seq_ops);
1044 if (rc)
1045 goto out_kfree;
1046
1047 seq = file->private_data;
1048 seq->private = s;
1049 memset(s, 0, sizeof(*s));
1050out:
1051 return rc;
1052out_kfree:
1053 kfree(s);
1054 goto out;
1055} 1038}
1056 1039
1057const struct file_operations atalk_seq_arp_fops = { 1040const struct file_operations atalk_seq_arp_fops = {
diff --git a/net/atm/clip.c b/net/atm/clip.c
index d30167c0b48e..d45971bd286c 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -648,10 +648,6 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event,
648 struct in_device *in_dev; 648 struct in_device *in_dev;
649 649
650 in_dev = ((struct in_ifaddr *)ifa)->ifa_dev; 650 in_dev = ((struct in_ifaddr *)ifa)->ifa_dev;
651 if (!in_dev || !in_dev->dev) {
652 printk(KERN_WARNING "clip_inet_event: no device\n");
653 return NOTIFY_DONE;
654 }
655 /* 651 /*
656 * Transitions are of the down-change-up type, so it's sufficient to 652 * Transitions are of the down-change-up type, so it's sufficient to
657 * handle the change on up. 653 * handle the change on up.
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 0e450d12f035..e2d800d818e3 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1169,32 +1169,7 @@ static const struct seq_operations lec_seq_ops = {
1169 1169
1170static int lec_seq_open(struct inode *inode, struct file *file) 1170static int lec_seq_open(struct inode *inode, struct file *file)
1171{ 1171{
1172 struct lec_state *state; 1172 return seq_open_private(file, &lec_seq_ops, sizeof(struct lec_state));
1173 struct seq_file *seq;
1174 int rc = -EAGAIN;
1175
1176 state = kmalloc(sizeof(*state), GFP_KERNEL);
1177 if (!state) {
1178 rc = -ENOMEM;
1179 goto out;
1180 }
1181
1182 rc = seq_open(file, &lec_seq_ops);
1183 if (rc)
1184 goto out_kfree;
1185 seq = file->private_data;
1186 seq->private = state;
1187out:
1188 return rc;
1189
1190out_kfree:
1191 kfree(state);
1192 goto out;
1193}
1194
1195static int lec_seq_release(struct inode *inode, struct file *file)
1196{
1197 return seq_release_private(inode, file);
1198} 1173}
1199 1174
1200static const struct file_operations lec_seq_fops = { 1175static const struct file_operations lec_seq_fops = {
@@ -1202,7 +1177,7 @@ static const struct file_operations lec_seq_fops = {
1202 .open = lec_seq_open, 1177 .open = lec_seq_open,
1203 .read = seq_read, 1178 .read = seq_read,
1204 .llseek = seq_lseek, 1179 .llseek = seq_lseek,
1205 .release = lec_seq_release, 1180 .release = seq_release_private,
1206}; 1181};
1207#endif 1182#endif
1208 1183
diff --git a/net/atm/proc.c b/net/atm/proc.c
index e9693aed7ef8..b995b66b5585 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -114,31 +114,13 @@ static int __vcc_seq_open(struct inode *inode, struct file *file,
114 int family, const struct seq_operations *ops) 114 int family, const struct seq_operations *ops)
115{ 115{
116 struct vcc_state *state; 116 struct vcc_state *state;
117 struct seq_file *seq;
118 int rc = -ENOMEM;
119 117
120 state = kmalloc(sizeof(*state), GFP_KERNEL); 118 state = __seq_open_private(file, ops, sizeof(*state));
121 if (!state) 119 if (state == NULL)
122 goto out; 120 return -ENOMEM;
123
124 rc = seq_open(file, ops);
125 if (rc)
126 goto out_kfree;
127 121
128 state->family = family; 122 state->family = family;
129 123 return 0;
130 seq = file->private_data;
131 seq->private = state;
132out:
133 return rc;
134out_kfree:
135 kfree(state);
136 goto out;
137}
138
139static int vcc_seq_release(struct inode *inode, struct file *file)
140{
141 return seq_release_private(inode, file);
142} 124}
143 125
144static void *vcc_seq_start(struct seq_file *seq, loff_t *pos) 126static void *vcc_seq_start(struct seq_file *seq, loff_t *pos)
@@ -314,7 +296,7 @@ static const struct file_operations pvc_seq_fops = {
314 .open = pvc_seq_open, 296 .open = pvc_seq_open,
315 .read = seq_read, 297 .read = seq_read,
316 .llseek = seq_lseek, 298 .llseek = seq_lseek,
317 .release = vcc_seq_release, 299 .release = seq_release_private,
318}; 300};
319 301
320static int vcc_seq_show(struct seq_file *seq, void *v) 302static int vcc_seq_show(struct seq_file *seq, void *v)
@@ -348,7 +330,7 @@ static const struct file_operations vcc_seq_fops = {
348 .open = vcc_seq_open, 330 .open = vcc_seq_open,
349 .read = seq_read, 331 .read = seq_read,
350 .llseek = seq_lseek, 332 .llseek = seq_lseek,
351 .release = vcc_seq_release, 333 .release = seq_release_private,
352}; 334};
353 335
354static int svc_seq_show(struct seq_file *seq, void *v) 336static int svc_seq_show(struct seq_file *seq, void *v)
@@ -383,7 +365,7 @@ static const struct file_operations svc_seq_fops = {
383 .open = svc_seq_open, 365 .open = svc_seq_open,
384 .read = seq_read, 366 .read = seq_read,
385 .llseek = seq_lseek, 367 .llseek = seq_lseek,
386 .release = vcc_seq_release, 368 .release = seq_release_private,
387}; 369};
388 370
389static ssize_t proc_dev_atm_read(struct file *file, char __user *buf, 371static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index cec582563e0d..f8a3455f4493 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -156,39 +156,14 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
156EXPORT_SYMBOL(dev_mc_unsync); 156EXPORT_SYMBOL(dev_mc_unsync);
157 157
158#ifdef CONFIG_PROC_FS 158#ifdef CONFIG_PROC_FS
159static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
160 __acquires(dev_base_lock)
161{
162 struct net *net = seq_file_net(seq);
163 struct net_device *dev;
164 loff_t off = 0;
165
166 read_lock(&dev_base_lock);
167 for_each_netdev(net, dev) {
168 if (off++ == *pos)
169 return dev;
170 }
171 return NULL;
172}
173
174static void *dev_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
175{
176 ++*pos;
177 return next_net_device((struct net_device *)v);
178}
179
180static void dev_mc_seq_stop(struct seq_file *seq, void *v)
181 __releases(dev_base_lock)
182{
183 read_unlock(&dev_base_lock);
184}
185
186
187static int dev_mc_seq_show(struct seq_file *seq, void *v) 159static int dev_mc_seq_show(struct seq_file *seq, void *v)
188{ 160{
189 struct dev_addr_list *m; 161 struct dev_addr_list *m;
190 struct net_device *dev = v; 162 struct net_device *dev = v;
191 163
164 if (v == SEQ_START_TOKEN)
165 return 0;
166
192 netif_tx_lock_bh(dev); 167 netif_tx_lock_bh(dev);
193 for (m = dev->mc_list; m; m = m->next) { 168 for (m = dev->mc_list; m; m = m->next) {
194 int i; 169 int i;
@@ -206,9 +181,9 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
206} 181}
207 182
208static const struct seq_operations dev_mc_seq_ops = { 183static const struct seq_operations dev_mc_seq_ops = {
209 .start = dev_mc_seq_start, 184 .start = dev_seq_start,
210 .next = dev_mc_seq_next, 185 .next = dev_seq_next,
211 .stop = dev_mc_seq_stop, 186 .stop = dev_seq_stop,
212 .show = dev_mc_seq_show, 187 .show = dev_mc_seq_show,
213}; 188};
214 189
diff --git a/net/core/dst.c b/net/core/dst.c
index 7deef483c79f..3a01a819ba47 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -295,9 +295,6 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void
295 struct net_device *dev = ptr; 295 struct net_device *dev = ptr;
296 struct dst_entry *dst, *last = NULL; 296 struct dst_entry *dst, *last = NULL;
297 297
298 if (dev->nd_net != &init_net)
299 return NOTIFY_DONE;
300
301 switch (event) { 298 switch (event) {
302 case NETDEV_UNREGISTER: 299 case NETDEV_UNREGISTER:
303 case NETDEV_DOWN: 300 case NETDEV_DOWN:
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d9a02b2cc289..23c0a10c0c37 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1284,9 +1284,7 @@ static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1284 struct neigh_parms *p; 1284 struct neigh_parms *p;
1285 1285
1286 for (p = &tbl->parms; p; p = p->next) { 1286 for (p = &tbl->parms; p; p = p->next) {
1287 if (p->net != net) 1287 if ((p->dev && p->dev->ifindex == ifindex && p->net == net) ||
1288 continue;
1289 if ((p->dev && p->dev->ifindex == ifindex) ||
1290 (!p->dev && !ifindex)) 1288 (!p->dev && !ifindex))
1291 return p; 1289 return p;
1292 } 1290 }
@@ -2741,7 +2739,8 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2741 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name; 2739 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2742 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id; 2740 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2743 2741
2744 t->sysctl_header = register_sysctl_paths(neigh_path, t->neigh_vars); 2742 t->sysctl_header =
2743 register_net_sysctl_table(p->net, neigh_path, t->neigh_vars);
2745 if (!t->sysctl_header) 2744 if (!t->sysctl_header)
2746 goto free_procname; 2745 goto free_procname;
2747 2746
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 4b7e756181c9..d0c8bf585f06 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -388,9 +388,7 @@ static void arp_reply(struct sk_buff *skb)
388 if (skb->dev->flags & IFF_NOARP) 388 if (skb->dev->flags & IFF_NOARP)
389 return; 389 return;
390 390
391 if (!pskb_may_pull(skb, (sizeof(struct arphdr) + 391 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
392 (2 * skb->dev->addr_len) +
393 (2 * sizeof(u32)))))
394 return; 392 return;
395 393
396 skb_reset_network_header(skb); 394 skb_reset_network_header(skb);
@@ -418,7 +416,7 @@ static void arp_reply(struct sk_buff *skb)
418 ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) 416 ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
419 return; 417 return;
420 418
421 size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4); 419 size = arp_hdr_len(skb->dev);
422 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), 420 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
423 LL_RESERVED_SPACE(np->dev)); 421 LL_RESERVED_SPACE(np->dev));
424 422
diff --git a/net/core/sock.c b/net/core/sock.c
index 09cb3a74de7f..0ca069738021 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -987,6 +987,25 @@ void sk_free(struct sock *sk)
987 sk_prot_free(sk->sk_prot_creator, sk); 987 sk_prot_free(sk->sk_prot_creator, sk);
988} 988}
989 989
990/*
991 * Last sock_put should drop referrence to sk->sk_net. It has already
992 * been dropped in sk_change_net. Taking referrence to stopping namespace
993 * is not an option.
994 * Take referrence to a socket to remove it from hash _alive_ and after that
995 * destroy it in the context of init_net.
996 */
997void sk_release_kernel(struct sock *sk)
998{
999 if (sk == NULL || sk->sk_socket == NULL)
1000 return;
1001
1002 sock_hold(sk);
1003 sock_release(sk->sk_socket);
1004 sk->sk_net = get_net(&init_net);
1005 sock_put(sk);
1006}
1007EXPORT_SYMBOL(sk_release_kernel);
1008
990struct sock *sk_clone(const struct sock *sk, const gfp_t priority) 1009struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
991{ 1010{
992 struct sock *newsk; 1011 struct sock *newsk;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 474075adbde4..514a40b7fc7f 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -471,15 +471,14 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
471 return &rt->u.dst; 471 return &rt->u.dst;
472} 472}
473 473
474static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, 474static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
475 struct dst_entry *dst)
476{ 475{
477 int err = -1; 476 int err = -1;
478 struct sk_buff *skb; 477 struct sk_buff *skb;
478 struct dst_entry *dst;
479 479
480 /* First, grab a route. */ 480 dst = inet_csk_route_req(sk, req);
481 481 if (dst == NULL)
482 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
483 goto out; 482 goto out;
484 483
485 skb = dccp_make_response(sk, dst, req); 484 skb = dccp_make_response(sk, dst, req);
@@ -620,7 +619,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
620 dreq->dreq_iss = dccp_v4_init_sequence(skb); 619 dreq->dreq_iss = dccp_v4_init_sequence(skb);
621 dreq->dreq_service = service; 620 dreq->dreq_service = service;
622 621
623 if (dccp_v4_send_response(sk, req, NULL)) 622 if (dccp_v4_send_response(sk, req))
624 goto drop_and_free; 623 goto drop_and_free;
625 624
626 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 625 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 490333d47c7b..1a5e50b90677 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -224,8 +224,7 @@ out:
224} 224}
225 225
226 226
227static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, 227static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
228 struct dst_entry *dst)
229{ 228{
230 struct inet6_request_sock *ireq6 = inet6_rsk(req); 229 struct inet6_request_sock *ireq6 = inet6_rsk(req);
231 struct ipv6_pinfo *np = inet6_sk(sk); 230 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -234,6 +233,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
234 struct in6_addr *final_p = NULL, final; 233 struct in6_addr *final_p = NULL, final;
235 struct flowi fl; 234 struct flowi fl;
236 int err = -1; 235 int err = -1;
236 struct dst_entry *dst;
237 237
238 memset(&fl, 0, sizeof(fl)); 238 memset(&fl, 0, sizeof(fl));
239 fl.proto = IPPROTO_DCCP; 239 fl.proto = IPPROTO_DCCP;
@@ -245,28 +245,26 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
245 fl.fl_ip_sport = inet_sk(sk)->sport; 245 fl.fl_ip_sport = inet_sk(sk)->sport;
246 security_req_classify_flow(req, &fl); 246 security_req_classify_flow(req, &fl);
247 247
248 if (dst == NULL) { 248 opt = np->opt;
249 opt = np->opt;
250 249
251 if (opt != NULL && opt->srcrt != NULL) { 250 if (opt != NULL && opt->srcrt != NULL) {
252 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; 251 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
253 252
254 ipv6_addr_copy(&final, &fl.fl6_dst); 253 ipv6_addr_copy(&final, &fl.fl6_dst);
255 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 254 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
256 final_p = &final; 255 final_p = &final;
257 } 256 }
258 257
259 err = ip6_dst_lookup(sk, &dst, &fl); 258 err = ip6_dst_lookup(sk, &dst, &fl);
260 if (err) 259 if (err)
261 goto done; 260 goto done;
262 261
263 if (final_p) 262 if (final_p)
264 ipv6_addr_copy(&fl.fl6_dst, final_p); 263 ipv6_addr_copy(&fl.fl6_dst, final_p);
265 264
266 err = xfrm_lookup(&dst, &fl, sk, 0); 265 err = xfrm_lookup(&dst, &fl, sk, 0);
267 if (err < 0) 266 if (err < 0)
268 goto done; 267 goto done;
269 }
270 268
271 skb = dccp_make_response(sk, dst, req); 269 skb = dccp_make_response(sk, dst, req);
272 if (skb != NULL) { 270 if (skb != NULL) {
@@ -448,7 +446,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
448 dreq->dreq_iss = dccp_v6_init_sequence(skb); 446 dreq->dreq_iss = dccp_v6_init_sequence(skb);
449 dreq->dreq_service = service; 447 dreq->dreq_service = service;
450 448
451 if (dccp_v6_send_response(sk, req, NULL)) 449 if (dccp_v6_send_response(sk, req))
452 goto drop_and_free; 450 goto drop_and_free;
453 451
454 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 452 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 027d1814e1ab..33ad48321b08 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -216,7 +216,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
216 * counter (backoff, monitored by dccp_response_timer). 216 * counter (backoff, monitored by dccp_response_timer).
217 */ 217 */
218 req->retrans++; 218 req->retrans++;
219 req->rsk_ops->rtx_syn_ack(sk, req, NULL); 219 req->rsk_ops->rtx_syn_ack(sk, req);
220 } 220 }
221 /* Network Duplicate, discard packet */ 221 /* Network Duplicate, discard packet */
222 return NULL; 222 return NULL;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index acd48ee522d6..23fd95a7ad15 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2320,25 +2320,8 @@ static const struct seq_operations dn_socket_seq_ops = {
2320 2320
2321static int dn_socket_seq_open(struct inode *inode, struct file *file) 2321static int dn_socket_seq_open(struct inode *inode, struct file *file)
2322{ 2322{
2323 struct seq_file *seq; 2323 return seq_open_private(file, &dn_socket_seq_ops,
2324 int rc = -ENOMEM; 2324 sizeof(struct dn_iter_state));
2325 struct dn_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
2326
2327 if (!s)
2328 goto out;
2329
2330 rc = seq_open(file, &dn_socket_seq_ops);
2331 if (rc)
2332 goto out_kfree;
2333
2334 seq = file->private_data;
2335 seq->private = s;
2336 memset(s, 0, sizeof(*s));
2337out:
2338 return rc;
2339out_kfree:
2340 kfree(s);
2341 goto out;
2342} 2325}
2343 2326
2344static const struct file_operations dn_socket_seq_fops = { 2327static const struct file_operations dn_socket_seq_fops = {
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9c7e5ffb223d..5098fd2ff4d0 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -632,5 +632,15 @@ config TCP_MD5SIG
632 632
633 If unsure, say N. 633 If unsure, say N.
634 634
635config IP_UDPLITE
636 bool "IP: UDP-Lite Protocol (RFC 3828)"
637 default n
638 ---help---
639 UDP-Lite (RFC 3828) is a UDP-like protocol with variable-length
640 checksum. Read <file:Documentation/networking/udplite.txt> for
641 details.
642
643 If unsure, say N.
644
635source "net/ipv4/ipvs/Kconfig" 645source "net/ipv4/ipvs/Kconfig"
636 646
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index ad40ef3f9ebc..d5226241d5ed 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -8,7 +8,7 @@ obj-y := route.o inetpeer.o protocol.o \
8 inet_timewait_sock.o inet_connection_sock.o \ 8 inet_timewait_sock.o inet_connection_sock.o \
9 tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ 9 tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
10 tcp_minisocks.o tcp_cong.o \ 10 tcp_minisocks.o tcp_cong.o \
11 datagram.o raw.o udp.o udplite.o \ 11 datagram.o raw.o udp.o udp_ipv4.o \
12 arp.o icmp.o devinet.o af_inet.o igmp.o \ 12 arp.o icmp.o devinet.o af_inet.o igmp.o \
13 fib_frontend.o fib_semantics.o \ 13 fib_frontend.o fib_semantics.o \
14 inet_fragment.o 14 inet_fragment.o
@@ -49,6 +49,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
49obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o 49obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
50obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o 50obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
51obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o 51obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
52obj-$(CONFIG_IP_UDPLITE) += udplite_ipv4.o
52obj-$(CONFIG_NETLABEL) += cipso_ipv4.o 53obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
53 54
54obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ 55obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 09ca5293d08f..67260c0eaaa8 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -784,6 +784,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
784{ 784{
785 struct sock *sk = sock->sk; 785 struct sock *sk = sock->sk;
786 int err = 0; 786 int err = 0;
787 struct net *net = sk->sk_net;
787 788
788 switch (cmd) { 789 switch (cmd) {
789 case SIOCGSTAMP: 790 case SIOCGSTAMP:
@@ -795,12 +796,12 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
795 case SIOCADDRT: 796 case SIOCADDRT:
796 case SIOCDELRT: 797 case SIOCDELRT:
797 case SIOCRTMSG: 798 case SIOCRTMSG:
798 err = ip_rt_ioctl(sk->sk_net, cmd, (void __user *)arg); 799 err = ip_rt_ioctl(net, cmd, (void __user *)arg);
799 break; 800 break;
800 case SIOCDARP: 801 case SIOCDARP:
801 case SIOCGARP: 802 case SIOCGARP:
802 case SIOCSARP: 803 case SIOCSARP:
803 err = arp_ioctl(sk->sk_net, cmd, (void __user *)arg); 804 err = arp_ioctl(net, cmd, (void __user *)arg);
804 break; 805 break;
805 case SIOCGIFADDR: 806 case SIOCGIFADDR:
806 case SIOCSIFADDR: 807 case SIOCSIFADDR:
@@ -813,7 +814,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
813 case SIOCSIFPFLAGS: 814 case SIOCSIFPFLAGS:
814 case SIOCGIFPFLAGS: 815 case SIOCGIFPFLAGS:
815 case SIOCSIFFLAGS: 816 case SIOCSIFFLAGS:
816 err = devinet_ioctl(cmd, (void __user *)arg); 817 err = devinet_ioctl(net, cmd, (void __user *)arg);
817 break; 818 break;
818 default: 819 default:
819 if (sk->sk_prot->ioctl) 820 if (sk->sk_prot->ioctl)
@@ -1316,15 +1317,18 @@ static int __init init_ipv4_mibs(void)
1316 if (snmp_mib_init((void **)udp_statistics, 1317 if (snmp_mib_init((void **)udp_statistics,
1317 sizeof(struct udp_mib)) < 0) 1318 sizeof(struct udp_mib)) < 0)
1318 goto err_udp_mib; 1319 goto err_udp_mib;
1320#ifdef CONFIG_IP_UDPLITE
1319 if (snmp_mib_init((void **)udplite_statistics, 1321 if (snmp_mib_init((void **)udplite_statistics,
1320 sizeof(struct udp_mib)) < 0) 1322 sizeof(struct udp_mib)) < 0)
1321 goto err_udplite_mib; 1323 goto err_udplite_mib;
1322 1324#endif
1323 tcp_mib_init(); 1325 tcp_mib_init();
1324 1326
1325 return 0; 1327 return 0;
1326 1328
1329#ifdef CONFIG_IP_UDPLITE
1327err_udplite_mib: 1330err_udplite_mib:
1331#endif
1328 snmp_mib_free((void **)udp_statistics); 1332 snmp_mib_free((void **)udp_statistics);
1329err_udp_mib: 1333err_udp_mib:
1330 snmp_mib_free((void **)tcp_statistics); 1334 snmp_mib_free((void **)tcp_statistics);
@@ -1414,7 +1418,7 @@ static int __init inet_init(void)
1414 1418
1415 ip_init(); 1419 ip_init();
1416 1420
1417 tcp_v4_init(&inet_family_ops); 1421 tcp_v4_init();
1418 1422
1419 /* Setup TCP slab cache for open requests. */ 1423 /* Setup TCP slab cache for open requests. */
1420 tcp_init(); 1424 tcp_init();
@@ -1422,14 +1426,17 @@ static int __init inet_init(void)
1422 /* Setup UDP memory threshold */ 1426 /* Setup UDP memory threshold */
1423 udp_init(); 1427 udp_init();
1424 1428
1429#ifdef CONFIG_IP_UDPLITE
1425 /* Add UDP-Lite (RFC 3828) */ 1430 /* Add UDP-Lite (RFC 3828) */
1426 udplite4_register(); 1431 udplite4_register();
1432#endif
1427 1433
1428 /* 1434 /*
1429 * Set the ICMP layer up 1435 * Set the ICMP layer up
1430 */ 1436 */
1431 1437
1432 icmp_init(&inet_family_ops); 1438 if (icmp_init() < 0)
1439 panic("Failed to create the ICMP control socket.\n");
1433 1440
1434 /* 1441 /*
1435 * Initialise the multicast router 1442 * Initialise the multicast router
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 8e17f65f4002..69e80bd9774a 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -570,14 +570,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
570 * Allocate a buffer 570 * Allocate a buffer
571 */ 571 */
572 572
573 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4) 573 skb = alloc_skb(arp_hdr_len(dev) + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
574 + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
575 if (skb == NULL) 574 if (skb == NULL)
576 return NULL; 575 return NULL;
577 576
578 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 577 skb_reserve(skb, LL_RESERVED_SPACE(dev));
579 skb_reset_network_header(skb); 578 skb_reset_network_header(skb);
580 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4)); 579 arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
581 skb->dev = dev; 580 skb->dev = dev;
582 skb->protocol = htons(ETH_P_ARP); 581 skb->protocol = htons(ETH_P_ARP);
583 if (src_hw == NULL) 582 if (src_hw == NULL)
@@ -916,9 +915,7 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
916 goto freeskb; 915 goto freeskb;
917 916
918 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 917 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
919 if (!pskb_may_pull(skb, (sizeof(struct arphdr) + 918 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
920 (2 * dev->addr_len) +
921 (2 * sizeof(u32)))))
922 goto freeskb; 919 goto freeskb;
923 920
924 arp = arp_hdr(skb); 921 arp = arp_hdr(skb);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 87490f7bb0f7..4a10dbbbe0a1 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -446,9 +446,6 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
446 446
447 ASSERT_RTNL(); 447 ASSERT_RTNL();
448 448
449 if (net != &init_net)
450 return -EINVAL;
451
452 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); 449 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
453 if (err < 0) 450 if (err < 0)
454 goto errout; 451 goto errout;
@@ -560,9 +557,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
560 557
561 ASSERT_RTNL(); 558 ASSERT_RTNL();
562 559
563 if (net != &init_net)
564 return -EINVAL;
565
566 ifa = rtm_to_ifaddr(net, nlh); 560 ifa = rtm_to_ifaddr(net, nlh);
567 if (IS_ERR(ifa)) 561 if (IS_ERR(ifa))
568 return PTR_ERR(ifa); 562 return PTR_ERR(ifa);
@@ -595,7 +589,7 @@ static __inline__ int inet_abc_len(__be32 addr)
595} 589}
596 590
597 591
598int devinet_ioctl(unsigned int cmd, void __user *arg) 592int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
599{ 593{
600 struct ifreq ifr; 594 struct ifreq ifr;
601 struct sockaddr_in sin_orig; 595 struct sockaddr_in sin_orig;
@@ -624,7 +618,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
624 *colon = 0; 618 *colon = 0;
625 619
626#ifdef CONFIG_KMOD 620#ifdef CONFIG_KMOD
627 dev_load(&init_net, ifr.ifr_name); 621 dev_load(net, ifr.ifr_name);
628#endif 622#endif
629 623
630 switch (cmd) { 624 switch (cmd) {
@@ -665,7 +659,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
665 rtnl_lock(); 659 rtnl_lock();
666 660
667 ret = -ENODEV; 661 ret = -ENODEV;
668 if ((dev = __dev_get_by_name(&init_net, ifr.ifr_name)) == NULL) 662 if ((dev = __dev_get_by_name(net, ifr.ifr_name)) == NULL)
669 goto done; 663 goto done;
670 664
671 if (colon) 665 if (colon)
@@ -878,6 +872,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
878{ 872{
879 __be32 addr = 0; 873 __be32 addr = 0;
880 struct in_device *in_dev; 874 struct in_device *in_dev;
875 struct net *net = dev->nd_net;
881 876
882 rcu_read_lock(); 877 rcu_read_lock();
883 in_dev = __in_dev_get_rcu(dev); 878 in_dev = __in_dev_get_rcu(dev);
@@ -906,7 +901,7 @@ no_in_dev:
906 */ 901 */
907 read_lock(&dev_base_lock); 902 read_lock(&dev_base_lock);
908 rcu_read_lock(); 903 rcu_read_lock();
909 for_each_netdev(&init_net, dev) { 904 for_each_netdev(net, dev) {
910 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) 905 if ((in_dev = __in_dev_get_rcu(dev)) == NULL)
911 continue; 906 continue;
912 907
@@ -1045,9 +1040,6 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1045 struct net_device *dev = ptr; 1040 struct net_device *dev = ptr;
1046 struct in_device *in_dev = __in_dev_get_rtnl(dev); 1041 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1047 1042
1048 if (dev->nd_net != &init_net)
1049 return NOTIFY_DONE;
1050
1051 ASSERT_RTNL(); 1043 ASSERT_RTNL();
1052 1044
1053 if (!in_dev) { 1045 if (!in_dev) {
@@ -1173,9 +1165,6 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1173 struct in_ifaddr *ifa; 1165 struct in_ifaddr *ifa;
1174 int s_ip_idx, s_idx = cb->args[0]; 1166 int s_ip_idx, s_idx = cb->args[0];
1175 1167
1176 if (net != &init_net)
1177 return 0;
1178
1179 s_ip_idx = ip_idx = cb->args[1]; 1168 s_ip_idx = ip_idx = cb->args[1];
1180 idx = 0; 1169 idx = 0;
1181 for_each_netdev(net, dev) { 1170 for_each_netdev(net, dev) {
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index a13c074dac09..cee77d606fbe 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -229,14 +229,16 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
229 * 229 *
230 * On SMP we have one ICMP socket per-cpu. 230 * On SMP we have one ICMP socket per-cpu.
231 */ 231 */
232static DEFINE_PER_CPU(struct socket *, __icmp_socket) = NULL; 232static struct sock *icmp_sk(struct net *net)
233#define icmp_socket __get_cpu_var(__icmp_socket) 233{
234 return net->ipv4.icmp_sk[smp_processor_id()];
235}
234 236
235static inline int icmp_xmit_lock(void) 237static inline int icmp_xmit_lock(struct sock *sk)
236{ 238{
237 local_bh_disable(); 239 local_bh_disable();
238 240
239 if (unlikely(!spin_trylock(&icmp_socket->sk->sk_lock.slock))) { 241 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
240 /* This can happen if the output path signals a 242 /* This can happen if the output path signals a
241 * dst_link_failure() for an outgoing ICMP packet. 243 * dst_link_failure() for an outgoing ICMP packet.
242 */ 244 */
@@ -246,9 +248,9 @@ static inline int icmp_xmit_lock(void)
246 return 0; 248 return 0;
247} 249}
248 250
249static inline void icmp_xmit_unlock(void) 251static inline void icmp_xmit_unlock(struct sock *sk)
250{ 252{
251 spin_unlock_bh(&icmp_socket->sk->sk_lock.slock); 253 spin_unlock_bh(&sk->sk_lock.slock);
252} 254}
253 255
254/* 256/*
@@ -346,19 +348,21 @@ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
346static void icmp_push_reply(struct icmp_bxm *icmp_param, 348static void icmp_push_reply(struct icmp_bxm *icmp_param,
347 struct ipcm_cookie *ipc, struct rtable *rt) 349 struct ipcm_cookie *ipc, struct rtable *rt)
348{ 350{
351 struct sock *sk;
349 struct sk_buff *skb; 352 struct sk_buff *skb;
350 353
351 if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param, 354 sk = icmp_sk(rt->u.dst.dev->nd_net);
355 if (ip_append_data(sk, icmp_glue_bits, icmp_param,
352 icmp_param->data_len+icmp_param->head_len, 356 icmp_param->data_len+icmp_param->head_len,
353 icmp_param->head_len, 357 icmp_param->head_len,
354 ipc, rt, MSG_DONTWAIT) < 0) 358 ipc, rt, MSG_DONTWAIT) < 0)
355 ip_flush_pending_frames(icmp_socket->sk); 359 ip_flush_pending_frames(sk);
356 else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { 360 else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
357 struct icmphdr *icmph = icmp_hdr(skb); 361 struct icmphdr *icmph = icmp_hdr(skb);
358 __wsum csum = 0; 362 __wsum csum = 0;
359 struct sk_buff *skb1; 363 struct sk_buff *skb1;
360 364
361 skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) { 365 skb_queue_walk(&sk->sk_write_queue, skb1) {
362 csum = csum_add(csum, skb1->csum); 366 csum = csum_add(csum, skb1->csum);
363 } 367 }
364 csum = csum_partial_copy_nocheck((void *)&icmp_param->data, 368 csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
@@ -366,7 +370,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
366 icmp_param->head_len, csum); 370 icmp_param->head_len, csum);
367 icmph->checksum = csum_fold(csum); 371 icmph->checksum = csum_fold(csum);
368 skb->ip_summed = CHECKSUM_NONE; 372 skb->ip_summed = CHECKSUM_NONE;
369 ip_push_pending_frames(icmp_socket->sk); 373 ip_push_pending_frames(sk);
370 } 374 }
371} 375}
372 376
@@ -376,16 +380,17 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
376 380
377static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) 381static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
378{ 382{
379 struct sock *sk = icmp_socket->sk;
380 struct inet_sock *inet = inet_sk(sk);
381 struct ipcm_cookie ipc; 383 struct ipcm_cookie ipc;
382 struct rtable *rt = (struct rtable *)skb->dst; 384 struct rtable *rt = (struct rtable *)skb->dst;
385 struct net *net = rt->u.dst.dev->nd_net;
386 struct sock *sk = icmp_sk(net);
387 struct inet_sock *inet = inet_sk(sk);
383 __be32 daddr; 388 __be32 daddr;
384 389
385 if (ip_options_echo(&icmp_param->replyopts, skb)) 390 if (ip_options_echo(&icmp_param->replyopts, skb))
386 return; 391 return;
387 392
388 if (icmp_xmit_lock()) 393 if (icmp_xmit_lock(sk))
389 return; 394 return;
390 395
391 icmp_param->data.icmph.checksum = 0; 396 icmp_param->data.icmph.checksum = 0;
@@ -405,7 +410,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
405 .tos = RT_TOS(ip_hdr(skb)->tos) } }, 410 .tos = RT_TOS(ip_hdr(skb)->tos) } },
406 .proto = IPPROTO_ICMP }; 411 .proto = IPPROTO_ICMP };
407 security_skb_classify_flow(skb, &fl); 412 security_skb_classify_flow(skb, &fl);
408 if (ip_route_output_key(rt->u.dst.dev->nd_net, &rt, &fl)) 413 if (ip_route_output_key(net, &rt, &fl))
409 goto out_unlock; 414 goto out_unlock;
410 } 415 }
411 if (icmpv4_xrlim_allow(rt, icmp_param->data.icmph.type, 416 if (icmpv4_xrlim_allow(rt, icmp_param->data.icmph.type,
@@ -413,7 +418,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
413 icmp_push_reply(icmp_param, &ipc, rt); 418 icmp_push_reply(icmp_param, &ipc, rt);
414 ip_rt_put(rt); 419 ip_rt_put(rt);
415out_unlock: 420out_unlock:
416 icmp_xmit_unlock(); 421 icmp_xmit_unlock(sk);
417} 422}
418 423
419 424
@@ -438,10 +443,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
438 __be32 saddr; 443 __be32 saddr;
439 u8 tos; 444 u8 tos;
440 struct net *net; 445 struct net *net;
446 struct sock *sk;
441 447
442 if (!rt) 448 if (!rt)
443 goto out; 449 goto out;
444 net = rt->u.dst.dev->nd_net; 450 net = rt->u.dst.dev->nd_net;
451 sk = icmp_sk(net);
445 452
446 /* 453 /*
447 * Find the original header. It is expected to be valid, of course. 454 * Find the original header. It is expected to be valid, of course.
@@ -505,7 +512,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
505 } 512 }
506 } 513 }
507 514
508 if (icmp_xmit_lock()) 515 if (icmp_xmit_lock(sk))
509 return; 516 return;
510 517
511 /* 518 /*
@@ -544,7 +551,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
544 icmp_param.data.icmph.checksum = 0; 551 icmp_param.data.icmph.checksum = 0;
545 icmp_param.skb = skb_in; 552 icmp_param.skb = skb_in;
546 icmp_param.offset = skb_network_offset(skb_in); 553 icmp_param.offset = skb_network_offset(skb_in);
547 inet_sk(icmp_socket->sk)->tos = tos; 554 inet_sk(sk)->tos = tos;
548 ipc.addr = iph->saddr; 555 ipc.addr = iph->saddr;
549 ipc.opt = &icmp_param.replyopts; 556 ipc.opt = &icmp_param.replyopts;
550 557
@@ -652,7 +659,7 @@ route_done:
652ende: 659ende:
653 ip_rt_put(rt); 660 ip_rt_put(rt);
654out_unlock: 661out_unlock:
655 icmp_xmit_unlock(); 662 icmp_xmit_unlock(sk);
656out:; 663out:;
657} 664}
658 665
@@ -1139,29 +1146,46 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
1139 }, 1146 },
1140}; 1147};
1141 1148
1142void __init icmp_init(struct net_proto_family *ops) 1149static void __net_exit icmp_sk_exit(struct net *net)
1143{ 1150{
1144 struct inet_sock *inet;
1145 int i; 1151 int i;
1146 1152
1147 for_each_possible_cpu(i) { 1153 for_each_possible_cpu(i)
1148 int err; 1154 sk_release_kernel(net->ipv4.icmp_sk[i]);
1155 kfree(net->ipv4.icmp_sk);
1156 net->ipv4.icmp_sk = NULL;
1157}
1149 1158
1150 err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, 1159int __net_init icmp_sk_init(struct net *net)
1151 &per_cpu(__icmp_socket, i)); 1160{
1161 int i, err;
1152 1162
1163 net->ipv4.icmp_sk =
1164 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
1165 if (net->ipv4.icmp_sk == NULL)
1166 return -ENOMEM;
1167
1168 for_each_possible_cpu(i) {
1169 struct sock *sk;
1170 struct socket *sock;
1171 struct inet_sock *inet;
1172
1173 err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, &sock);
1153 if (err < 0) 1174 if (err < 0)
1154 panic("Failed to create the ICMP control socket.\n"); 1175 goto fail;
1176
1177 net->ipv4.icmp_sk[i] = sk = sock->sk;
1178 sk_change_net(sk, net);
1155 1179
1156 per_cpu(__icmp_socket, i)->sk->sk_allocation = GFP_ATOMIC; 1180 sk->sk_allocation = GFP_ATOMIC;
1157 1181
1158 /* Enough space for 2 64K ICMP packets, including 1182 /* Enough space for 2 64K ICMP packets, including
1159 * sk_buff struct overhead. 1183 * sk_buff struct overhead.
1160 */ 1184 */
1161 per_cpu(__icmp_socket, i)->sk->sk_sndbuf = 1185 sk->sk_sndbuf =
1162 (2 * ((64 * 1024) + sizeof(struct sk_buff))); 1186 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
1163 1187
1164 inet = inet_sk(per_cpu(__icmp_socket, i)->sk); 1188 inet = inet_sk(sk);
1165 inet->uc_ttl = -1; 1189 inet->uc_ttl = -1;
1166 inet->pmtudisc = IP_PMTUDISC_DONT; 1190 inet->pmtudisc = IP_PMTUDISC_DONT;
1167 1191
@@ -1169,8 +1193,25 @@ void __init icmp_init(struct net_proto_family *ops)
1169 * see it, we do not wish this socket to see incoming 1193 * see it, we do not wish this socket to see incoming
1170 * packets. 1194 * packets.
1171 */ 1195 */
1172 per_cpu(__icmp_socket, i)->sk->sk_prot->unhash(per_cpu(__icmp_socket, i)->sk); 1196 sk->sk_prot->unhash(sk);
1173 } 1197 }
1198 return 0;
1199
1200fail:
1201 for_each_possible_cpu(i)
1202 sk_release_kernel(net->ipv4.icmp_sk[i]);
1203 kfree(net->ipv4.icmp_sk);
1204 return err;
1205}
1206
1207static struct pernet_operations __net_initdata icmp_sk_ops = {
1208 .init = icmp_sk_init,
1209 .exit = icmp_sk_exit,
1210};
1211
1212int __init icmp_init(void)
1213{
1214 return register_pernet_device(&icmp_sk_ops);
1174} 1215}
1175 1216
1176EXPORT_SYMBOL(icmp_err_convert); 1217EXPORT_SYMBOL(icmp_err_convert);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 732cd07e6071..d3f34a772f3b 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1198,6 +1198,9 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1198 1198
1199 ASSERT_RTNL(); 1199 ASSERT_RTNL();
1200 1200
1201 if (in_dev->dev->nd_net != &init_net)
1202 return;
1203
1201 for (im=in_dev->mc_list; im; im=im->next) { 1204 for (im=in_dev->mc_list; im; im=im->next) {
1202 if (im->multiaddr == addr) { 1205 if (im->multiaddr == addr) {
1203 im->users++; 1206 im->users++;
@@ -1277,6 +1280,9 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1277 1280
1278 ASSERT_RTNL(); 1281 ASSERT_RTNL();
1279 1282
1283 if (in_dev->dev->nd_net != &init_net)
1284 return;
1285
1280 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { 1286 for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
1281 if (i->multiaddr==addr) { 1287 if (i->multiaddr==addr) {
1282 if (--i->users == 0) { 1288 if (--i->users == 0) {
@@ -1304,6 +1310,9 @@ void ip_mc_down(struct in_device *in_dev)
1304 1310
1305 ASSERT_RTNL(); 1311 ASSERT_RTNL();
1306 1312
1313 if (in_dev->dev->nd_net != &init_net)
1314 return;
1315
1307 for (i=in_dev->mc_list; i; i=i->next) 1316 for (i=in_dev->mc_list; i; i=i->next)
1308 igmp_group_dropped(i); 1317 igmp_group_dropped(i);
1309 1318
@@ -1324,6 +1333,9 @@ void ip_mc_init_dev(struct in_device *in_dev)
1324{ 1333{
1325 ASSERT_RTNL(); 1334 ASSERT_RTNL();
1326 1335
1336 if (in_dev->dev->nd_net != &init_net)
1337 return;
1338
1327 in_dev->mc_tomb = NULL; 1339 in_dev->mc_tomb = NULL;
1328#ifdef CONFIG_IP_MULTICAST 1340#ifdef CONFIG_IP_MULTICAST
1329 in_dev->mr_gq_running = 0; 1341 in_dev->mr_gq_running = 0;
@@ -1347,6 +1359,9 @@ void ip_mc_up(struct in_device *in_dev)
1347 1359
1348 ASSERT_RTNL(); 1360 ASSERT_RTNL();
1349 1361
1362 if (in_dev->dev->nd_net != &init_net)
1363 return;
1364
1350 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1365 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1351 1366
1352 for (i=in_dev->mc_list; i; i=i->next) 1367 for (i=in_dev->mc_list; i; i=i->next)
@@ -1363,6 +1378,9 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1363 1378
1364 ASSERT_RTNL(); 1379 ASSERT_RTNL();
1365 1380
1381 if (in_dev->dev->nd_net != &init_net)
1382 return;
1383
1366 /* Deactivate timers */ 1384 /* Deactivate timers */
1367 ip_mc_down(in_dev); 1385 ip_mc_down(in_dev);
1368 1386
@@ -1744,6 +1762,9 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1744 if (!ipv4_is_multicast(addr)) 1762 if (!ipv4_is_multicast(addr))
1745 return -EINVAL; 1763 return -EINVAL;
1746 1764
1765 if (sk->sk_net != &init_net)
1766 return -EPROTONOSUPPORT;
1767
1747 rtnl_lock(); 1768 rtnl_lock();
1748 1769
1749 in_dev = ip_mc_find_dev(imr); 1770 in_dev = ip_mc_find_dev(imr);
@@ -1812,6 +1833,9 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1812 u32 ifindex; 1833 u32 ifindex;
1813 int ret = -EADDRNOTAVAIL; 1834 int ret = -EADDRNOTAVAIL;
1814 1835
1836 if (sk->sk_net != &init_net)
1837 return -EPROTONOSUPPORT;
1838
1815 rtnl_lock(); 1839 rtnl_lock();
1816 in_dev = ip_mc_find_dev(imr); 1840 in_dev = ip_mc_find_dev(imr);
1817 ifindex = imr->imr_ifindex; 1841 ifindex = imr->imr_ifindex;
@@ -1857,6 +1881,9 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1857 if (!ipv4_is_multicast(addr)) 1881 if (!ipv4_is_multicast(addr))
1858 return -EINVAL; 1882 return -EINVAL;
1859 1883
1884 if (sk->sk_net != &init_net)
1885 return -EPROTONOSUPPORT;
1886
1860 rtnl_lock(); 1887 rtnl_lock();
1861 1888
1862 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; 1889 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
@@ -1990,6 +2017,9 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
1990 msf->imsf_fmode != MCAST_EXCLUDE) 2017 msf->imsf_fmode != MCAST_EXCLUDE)
1991 return -EINVAL; 2018 return -EINVAL;
1992 2019
2020 if (sk->sk_net != &init_net)
2021 return -EPROTONOSUPPORT;
2022
1993 rtnl_lock(); 2023 rtnl_lock();
1994 2024
1995 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2025 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
@@ -2070,6 +2100,9 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2070 if (!ipv4_is_multicast(addr)) 2100 if (!ipv4_is_multicast(addr))
2071 return -EINVAL; 2101 return -EINVAL;
2072 2102
2103 if (sk->sk_net != &init_net)
2104 return -EPROTONOSUPPORT;
2105
2073 rtnl_lock(); 2106 rtnl_lock();
2074 2107
2075 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; 2108 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
@@ -2132,6 +2165,9 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2132 if (!ipv4_is_multicast(addr)) 2165 if (!ipv4_is_multicast(addr))
2133 return -EINVAL; 2166 return -EINVAL;
2134 2167
2168 if (sk->sk_net != &init_net)
2169 return -EPROTONOSUPPORT;
2170
2135 rtnl_lock(); 2171 rtnl_lock();
2136 2172
2137 err = -EADDRNOTAVAIL; 2173 err = -EADDRNOTAVAIL;
@@ -2216,6 +2252,9 @@ void ip_mc_drop_socket(struct sock *sk)
2216 if (inet->mc_list == NULL) 2252 if (inet->mc_list == NULL)
2217 return; 2253 return;
2218 2254
2255 if (sk->sk_net != &init_net)
2256 return;
2257
2219 rtnl_lock(); 2258 rtnl_lock();
2220 while ((iml = inet->mc_list) != NULL) { 2259 while ((iml = inet->mc_list) != NULL) {
2221 struct in_device *in_dev; 2260 struct in_device *in_dev;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index b189278c7bc1..c0e0fa03fce1 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -463,7 +463,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
463 if (time_after_eq(now, req->expires)) { 463 if (time_after_eq(now, req->expires)) {
464 if ((req->retrans < thresh || 464 if ((req->retrans < thresh ||
465 (inet_rsk(req)->acked && req->retrans < max_retries)) 465 (inet_rsk(req)->acked && req->retrans < max_retries))
466 && !req->rsk_ops->rtx_syn_ack(parent, req, NULL)) { 466 && !req->rsk_ops->rtx_syn_ack(parent, req)) {
467 unsigned long timeo; 467 unsigned long timeo;
468 468
469 if (req->retrans++ == 0) 469 if (req->retrans++ == 0)
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 4d315158fd3c..baaedd9689a0 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -107,10 +107,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
107 sptr = skb_network_header(skb); 107 sptr = skb_network_header(skb);
108 dptr = dopt->__data; 108 dptr = dopt->__data;
109 109
110 if (skb->dst) 110 daddr = ((struct rtable*)skb->dst)->rt_spec_dst;
111 daddr = ((struct rtable*)skb->dst)->rt_spec_dst;
112 else
113 daddr = ip_hdr(skb)->daddr;
114 111
115 if (sopt->rr) { 112 if (sopt->rr) {
116 optlen = sptr[sopt->rr+1]; 113 optlen = sptr[sopt->rr+1];
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 5dd938579eeb..4afce0572806 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -291,7 +291,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
291 291
292 mm_segment_t oldfs = get_fs(); 292 mm_segment_t oldfs = get_fs();
293 set_fs(get_ds()); 293 set_fs(get_ds());
294 res = devinet_ioctl(cmd, (struct ifreq __user *) arg); 294 res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
295 set_fs(oldfs); 295 set_fs(oldfs);
296 return res; 296 return res;
297} 297}
@@ -459,10 +459,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
459 if (rarp->ar_pro != htons(ETH_P_IP)) 459 if (rarp->ar_pro != htons(ETH_P_IP))
460 goto drop; 460 goto drop;
461 461
462 if (!pskb_may_pull(skb, 462 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
463 sizeof(struct arphdr) +
464 (2 * dev->addr_len) +
465 (2 * 4)))
466 goto drop; 463 goto drop;
467 464
468 /* OK, it is all there and looks valid, process... */ 465 /* OK, it is all there and looks valid, process... */
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index a7591ce344d2..9b5904486184 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -233,10 +233,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
233 void *table_base; 233 void *table_base;
234 struct xt_table_info *private; 234 struct xt_table_info *private;
235 235
236 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 236 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
237 if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
238 (2 * skb->dev->addr_len) +
239 (2 * sizeof(u32)))))
240 return NF_DROP; 237 return NF_DROP;
241 238
242 indev = in ? in->name : nulldevname; 239 indev = in ? in->name : nulldevname;
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index d80fee8327e4..313b3fcf387e 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -139,18 +139,8 @@ static int masq_inet_event(struct notifier_block *this,
139 unsigned long event, 139 unsigned long event,
140 void *ptr) 140 void *ptr)
141{ 141{
142 const struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; 142 struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
143 143 return masq_device_event(this, event, dev);
144 if (event == NETDEV_DOWN) {
145 /* IP address was deleted. Search entire table for
146 conntracks which were associated with that device,
147 and forget them. */
148 NF_CT_ASSERT(dev->ifindex != 0);
149
150 nf_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex);
151 }
152
153 return NOTIFY_DONE;
154} 144}
155 145
156static struct notifier_block masq_dev_notifier = { 146static struct notifier_block masq_dev_notifier = {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 089252e82c01..9668c3a23efe 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -379,7 +379,7 @@ static const struct file_operations ct_cpu_seq_fops = {
379 .open = ct_cpu_seq_open, 379 .open = ct_cpu_seq_open,
380 .read = seq_read, 380 .read = seq_read,
381 .llseek = seq_lseek, 381 .llseek = seq_lseek,
382 .release = seq_release_private, 382 .release = seq_release,
383}; 383};
384 384
385int __init nf_conntrack_ipv4_compat_init(void) 385int __init nf_conntrack_ipv4_compat_init(void)
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index d63474c6b400..d75ddb7fa4b8 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -59,7 +59,9 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
59 atomic_read(&tcp_memory_allocated)); 59 atomic_read(&tcp_memory_allocated));
60 seq_printf(seq, "UDP: inuse %d mem %d\n", sock_prot_inuse_get(&udp_prot), 60 seq_printf(seq, "UDP: inuse %d mem %d\n", sock_prot_inuse_get(&udp_prot),
61 atomic_read(&udp_memory_allocated)); 61 atomic_read(&udp_memory_allocated));
62#ifdef CONFIG_IP_UDPLITE
62 seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse_get(&udplite_prot)); 63 seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse_get(&udplite_prot));
64#endif
63 seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse_get(&raw_prot)); 65 seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse_get(&raw_prot));
64 seq_printf(seq, "FRAG: inuse %d memory %d\n", 66 seq_printf(seq, "FRAG: inuse %d memory %d\n",
65 ip_frag_nqueues(&init_net), ip_frag_mem(&init_net)); 67 ip_frag_nqueues(&init_net), ip_frag_mem(&init_net));
@@ -349,6 +351,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
349 snmp_fold_field((void **)udp_statistics, 351 snmp_fold_field((void **)udp_statistics,
350 snmp4_udp_list[i].entry)); 352 snmp4_udp_list[i].entry));
351 353
354#ifdef CONFIG_IP_UDPLITE
352 /* the UDP and UDP-Lite MIBs are the same */ 355 /* the UDP and UDP-Lite MIBs are the same */
353 seq_puts(seq, "\nUdpLite:"); 356 seq_puts(seq, "\nUdpLite:");
354 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 357 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
@@ -359,7 +362,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
359 seq_printf(seq, " %lu", 362 seq_printf(seq, " %lu",
360 snmp_fold_field((void **)udplite_statistics, 363 snmp_fold_field((void **)udplite_statistics,
361 snmp4_udp_list[i].entry)); 364 snmp4_udp_list[i].entry));
362 365#endif
363 seq_putc(seq, '\n'); 366 seq_putc(seq, '\n');
364 return 0; 367 return 0;
365} 368}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 7b5e8e1d94be..8c3e165f0034 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -273,6 +273,7 @@ static unsigned int rt_hash_code(u32 daddr, u32 saddr)
273 273
274#ifdef CONFIG_PROC_FS 274#ifdef CONFIG_PROC_FS
275struct rt_cache_iter_state { 275struct rt_cache_iter_state {
276 struct seq_net_private p;
276 int bucket; 277 int bucket;
277 int genid; 278 int genid;
278}; 279};
@@ -285,7 +286,8 @@ static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st)
285 rcu_read_lock_bh(); 286 rcu_read_lock_bh();
286 r = rcu_dereference(rt_hash_table[st->bucket].chain); 287 r = rcu_dereference(rt_hash_table[st->bucket].chain);
287 while (r) { 288 while (r) {
288 if (r->rt_genid == st->genid) 289 if (r->u.dst.dev->nd_net == st->p.net &&
290 r->rt_genid == st->genid)
289 return r; 291 return r;
290 r = rcu_dereference(r->u.dst.rt_next); 292 r = rcu_dereference(r->u.dst.rt_next);
291 } 293 }
@@ -294,7 +296,8 @@ static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st)
294 return r; 296 return r;
295} 297}
296 298
297static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct rtable *r) 299static struct rtable *__rt_cache_get_next(struct rt_cache_iter_state *st,
300 struct rtable *r)
298{ 301{
299 r = r->u.dst.rt_next; 302 r = r->u.dst.rt_next;
300 while (!r) { 303 while (!r) {
@@ -307,16 +310,25 @@ static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct r
307 return rcu_dereference(r); 310 return rcu_dereference(r);
308} 311}
309 312
313static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st,
314 struct rtable *r)
315{
316 while ((r = __rt_cache_get_next(st, r)) != NULL) {
317 if (r->u.dst.dev->nd_net != st->p.net)
318 continue;
319 if (r->rt_genid == st->genid)
320 break;
321 }
322 return r;
323}
324
310static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos) 325static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos)
311{ 326{
312 struct rtable *r = rt_cache_get_first(st); 327 struct rtable *r = rt_cache_get_first(st);
313 328
314 if (r) 329 if (r)
315 while (pos && (r = rt_cache_get_next(st, r))) { 330 while (pos && (r = rt_cache_get_next(st, r)))
316 if (r->rt_genid != st->genid)
317 continue;
318 --pos; 331 --pos;
319 }
320 return pos ? NULL : r; 332 return pos ? NULL : r;
321} 333}
322 334
@@ -390,7 +402,7 @@ static const struct seq_operations rt_cache_seq_ops = {
390 402
391static int rt_cache_seq_open(struct inode *inode, struct file *file) 403static int rt_cache_seq_open(struct inode *inode, struct file *file)
392{ 404{
393 return seq_open_private(file, &rt_cache_seq_ops, 405 return seq_open_net(inode, file, &rt_cache_seq_ops,
394 sizeof(struct rt_cache_iter_state)); 406 sizeof(struct rt_cache_iter_state));
395} 407}
396 408
@@ -399,7 +411,7 @@ static const struct file_operations rt_cache_seq_fops = {
399 .open = rt_cache_seq_open, 411 .open = rt_cache_seq_open,
400 .read = seq_read, 412 .read = seq_read,
401 .llseek = seq_lseek, 413 .llseek = seq_lseek,
402 .release = seq_release_private, 414 .release = seq_release_net,
403}; 415};
404 416
405 417
@@ -533,7 +545,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
533} 545}
534#endif 546#endif
535 547
536static __init int ip_rt_proc_init(struct net *net) 548static int __net_init ip_rt_do_proc_init(struct net *net)
537{ 549{
538 struct proc_dir_entry *pde; 550 struct proc_dir_entry *pde;
539 551
@@ -564,8 +576,26 @@ err2:
564err1: 576err1:
565 return -ENOMEM; 577 return -ENOMEM;
566} 578}
579
580static void __net_exit ip_rt_do_proc_exit(struct net *net)
581{
582 remove_proc_entry("rt_cache", net->proc_net_stat);
583 remove_proc_entry("rt_cache", net->proc_net);
584 remove_proc_entry("rt_acct", net->proc_net);
585}
586
587static struct pernet_operations ip_rt_proc_ops __net_initdata = {
588 .init = ip_rt_do_proc_init,
589 .exit = ip_rt_do_proc_exit,
590};
591
592static int __init ip_rt_proc_init(void)
593{
594 return register_pernet_subsys(&ip_rt_proc_ops);
595}
596
567#else 597#else
568static inline int ip_rt_proc_init(struct net *net) 598static inline int ip_rt_proc_init(void)
569{ 599{
570 return 0; 600 return 0;
571} 601}
@@ -1131,10 +1161,12 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1131 __be32 skeys[2] = { saddr, 0 }; 1161 __be32 skeys[2] = { saddr, 0 };
1132 int ikeys[2] = { dev->ifindex, 0 }; 1162 int ikeys[2] = { dev->ifindex, 0 };
1133 struct netevent_redirect netevent; 1163 struct netevent_redirect netevent;
1164 struct net *net;
1134 1165
1135 if (!in_dev) 1166 if (!in_dev)
1136 return; 1167 return;
1137 1168
1169 net = dev->nd_net;
1138 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) 1170 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1139 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) 1171 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw)
1140 || ipv4_is_zeronet(new_gw)) 1172 || ipv4_is_zeronet(new_gw))
@@ -1146,7 +1178,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1146 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev)) 1178 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1147 goto reject_redirect; 1179 goto reject_redirect;
1148 } else { 1180 } else {
1149 if (inet_addr_type(&init_net, new_gw) != RTN_UNICAST) 1181 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1150 goto reject_redirect; 1182 goto reject_redirect;
1151 } 1183 }
1152 1184
@@ -1164,7 +1196,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1164 rth->fl.fl4_src != skeys[i] || 1196 rth->fl.fl4_src != skeys[i] ||
1165 rth->fl.oif != ikeys[k] || 1197 rth->fl.oif != ikeys[k] ||
1166 rth->fl.iif != 0 || 1198 rth->fl.iif != 0 ||
1167 rth->rt_genid != atomic_read(&rt_genid)) { 1199 rth->rt_genid != atomic_read(&rt_genid) ||
1200 rth->u.dst.dev->nd_net != net) {
1168 rthp = &rth->u.dst.rt_next; 1201 rthp = &rth->u.dst.rt_next;
1169 continue; 1202 continue;
1170 } 1203 }
@@ -2668,9 +2701,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2668 int err; 2701 int err;
2669 struct sk_buff *skb; 2702 struct sk_buff *skb;
2670 2703
2671 if (net != &init_net)
2672 return -EINVAL;
2673
2674 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); 2704 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2675 if (err < 0) 2705 if (err < 0)
2676 goto errout; 2706 goto errout;
@@ -2700,7 +2730,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2700 if (iif) { 2730 if (iif) {
2701 struct net_device *dev; 2731 struct net_device *dev;
2702 2732
2703 dev = __dev_get_by_index(&init_net, iif); 2733 dev = __dev_get_by_index(net, iif);
2704 if (dev == NULL) { 2734 if (dev == NULL) {
2705 err = -ENODEV; 2735 err = -ENODEV;
2706 goto errout_free; 2736 goto errout_free;
@@ -2726,7 +2756,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2726 }, 2756 },
2727 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, 2757 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2728 }; 2758 };
2729 err = ip_route_output_key(&init_net, &rt, &fl); 2759 err = ip_route_output_key(net, &rt, &fl);
2730 } 2760 }
2731 2761
2732 if (err) 2762 if (err)
@@ -2737,11 +2767,11 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2737 rt->rt_flags |= RTCF_NOTIFY; 2767 rt->rt_flags |= RTCF_NOTIFY;
2738 2768
2739 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 2769 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2740 RTM_NEWROUTE, 0, 0); 2770 RTM_NEWROUTE, 0, 0);
2741 if (err <= 0) 2771 if (err <= 0)
2742 goto errout_free; 2772 goto errout_free;
2743 2773
2744 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 2774 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2745errout: 2775errout:
2746 return err; 2776 return err;
2747 2777
@@ -2755,6 +2785,9 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2755 struct rtable *rt; 2785 struct rtable *rt;
2756 int h, s_h; 2786 int h, s_h;
2757 int idx, s_idx; 2787 int idx, s_idx;
2788 struct net *net;
2789
2790 net = skb->sk->sk_net;
2758 2791
2759 s_h = cb->args[0]; 2792 s_h = cb->args[0];
2760 if (s_h < 0) 2793 if (s_h < 0)
@@ -2764,7 +2797,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2764 rcu_read_lock_bh(); 2797 rcu_read_lock_bh();
2765 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 2798 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2766 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 2799 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2767 if (idx < s_idx) 2800 if (rt->u.dst.dev->nd_net != net || idx < s_idx)
2768 continue; 2801 continue;
2769 if (rt->rt_genid != atomic_read(&rt_genid)) 2802 if (rt->rt_genid != atomic_read(&rt_genid))
2770 continue; 2803 continue;
@@ -3040,7 +3073,7 @@ int __init ip_rt_init(void)
3040 ip_rt_secret_interval; 3073 ip_rt_secret_interval;
3041 add_timer(&rt_secret_timer); 3074 add_timer(&rt_secret_timer);
3042 3075
3043 if (ip_rt_proc_init(&init_net)) 3076 if (ip_rt_proc_init())
3044 printk(KERN_ERR "Unable to create route proc files\n"); 3077 printk(KERN_ERR "Unable to create route proc files\n");
3045#ifdef CONFIG_XFRM 3078#ifdef CONFIG_XFRM
3046 xfrm_init(); 3079 xfrm_init();
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index f470fe4511db..4704f27f6c0b 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -10,8 +10,6 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $ 12 * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $
13 *
14 * Missing: IPv6 support.
15 */ 13 */
16 14
17#include <linux/tcp.h> 15#include <linux/tcp.h>
@@ -23,22 +21,25 @@
23 21
24extern int sysctl_tcp_syncookies; 22extern int sysctl_tcp_syncookies;
25 23
26static __u32 syncookie_secret[2][16-3+SHA_DIGEST_WORDS]; 24__u32 syncookie_secret[2][16-3+SHA_DIGEST_WORDS];
25EXPORT_SYMBOL(syncookie_secret);
27 26
28static __init int init_syncookies(void) 27static __init int init_syncookies(void)
29{ 28{
30 get_random_bytes(syncookie_secret, sizeof(syncookie_secret)); 29 get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
31 return 0; 30 return 0;
32} 31}
33module_init(init_syncookies); 32__initcall(init_syncookies);
34 33
35#define COOKIEBITS 24 /* Upper bits store count */ 34#define COOKIEBITS 24 /* Upper bits store count */
36#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) 35#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
37 36
37static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS];
38
38static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, 39static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
39 u32 count, int c) 40 u32 count, int c)
40{ 41{
41 __u32 tmp[16 + 5 + SHA_WORKSPACE_WORDS]; 42 __u32 *tmp = __get_cpu_var(cookie_scratch);
42 43
43 memcpy(tmp + 3, syncookie_secret[c], sizeof(syncookie_secret[c])); 44 memcpy(tmp + 3, syncookie_secret[c], sizeof(syncookie_secret[c]));
44 tmp[0] = (__force u32)saddr; 45 tmp[0] = (__force u32)saddr;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 3aa0b23c1ea0..eb5b9854c8c7 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -1,12 +1,13 @@
1/* 1/*
2 * TCP CUBIC: Binary Increase Congestion control for TCP v2.1 2 * TCP CUBIC: Binary Increase Congestion control for TCP v2.2
3 * 3 * Home page:
4 * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
4 * This is from the implementation of CUBIC TCP in 5 * This is from the implementation of CUBIC TCP in
5 * Injong Rhee, Lisong Xu. 6 * Injong Rhee, Lisong Xu.
6 * "CUBIC: A New TCP-Friendly High-Speed TCP Variant 7 * "CUBIC: A New TCP-Friendly High-Speed TCP Variant
7 * in PFLDnet 2005 8 * in PFLDnet 2005
8 * Available from: 9 * Available from:
9 * http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/cubic-paper.pdf 10 * http://netsrv.csc.ncsu.edu/export/cubic-paper.pdf
10 * 11 *
11 * Unless CUBIC is enabled and congestion window is large 12 * Unless CUBIC is enabled and congestion window is large
12 * this behaves the same as the original Reno. 13 * this behaves the same as the original Reno.
@@ -20,15 +21,10 @@
20#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation 21#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
21 * max_cwnd = snd_cwnd * beta 22 * max_cwnd = snd_cwnd * beta
22 */ 23 */
23#define BICTCP_B 4 /*
24 * In binary search,
25 * go to point (max+min)/N
26 */
27#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ 24#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
28 25
29static int fast_convergence __read_mostly = 1; 26static int fast_convergence __read_mostly = 1;
30static int max_increment __read_mostly = 16; 27static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
31static int beta __read_mostly = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */
32static int initial_ssthresh __read_mostly; 28static int initial_ssthresh __read_mostly;
33static int bic_scale __read_mostly = 41; 29static int bic_scale __read_mostly = 41;
34static int tcp_friendliness __read_mostly = 1; 30static int tcp_friendliness __read_mostly = 1;
@@ -40,9 +36,7 @@ static u64 cube_factor __read_mostly;
40/* Note parameters that are used for precomputing scale factors are read-only */ 36/* Note parameters that are used for precomputing scale factors are read-only */
41module_param(fast_convergence, int, 0644); 37module_param(fast_convergence, int, 0644);
42MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence"); 38MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
43module_param(max_increment, int, 0644); 39module_param(beta, int, 0644);
44MODULE_PARM_DESC(max_increment, "Limit on increment allowed during binary search");
45module_param(beta, int, 0444);
46MODULE_PARM_DESC(beta, "beta for multiplicative increase"); 40MODULE_PARM_DESC(beta, "beta for multiplicative increase");
47module_param(initial_ssthresh, int, 0644); 41module_param(initial_ssthresh, int, 0644);
48MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); 42MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
@@ -145,7 +139,7 @@ static u32 cubic_root(u64 a)
145static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 139static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
146{ 140{
147 u64 offs; 141 u64 offs;
148 u32 delta, t, bic_target, min_cnt, max_cnt; 142 u32 delta, t, bic_target, max_cnt;
149 143
150 ca->ack_cnt++; /* count the number of ACKs */ 144 ca->ack_cnt++; /* count the number of ACKs */
151 145
@@ -211,19 +205,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
211 ca->cnt = 100 * cwnd; /* very small increment*/ 205 ca->cnt = 100 * cwnd; /* very small increment*/
212 } 206 }
213 207
214 if (ca->delay_min > 0) {
215 /* max increment = Smax * rtt / 0.1 */
216 min_cnt = (cwnd * HZ * 8)/(10 * max_increment * ca->delay_min);
217
218 /* use concave growth when the target is above the origin */
219 if (ca->cnt < min_cnt && t >= ca->bic_K)
220 ca->cnt = min_cnt;
221 }
222
223 /* slow start and low utilization */
224 if (ca->loss_cwnd == 0) /* could be aggressive in slow start */
225 ca->cnt = 50;
226
227 /* TCP Friendly */ 208 /* TCP Friendly */
228 if (tcp_friendliness) { 209 if (tcp_friendliness) {
229 u32 scale = beta_scale; 210 u32 scale = beta_scale;
@@ -391,4 +372,4 @@ module_exit(cubictcp_unregister);
391MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger"); 372MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger");
392MODULE_LICENSE("GPL"); 373MODULE_LICENSE("GPL");
393MODULE_DESCRIPTION("CUBIC TCP"); 374MODULE_DESCRIPTION("CUBIC TCP");
394MODULE_VERSION("2.1"); 375MODULE_VERSION("2.2");
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7facdb0f6960..c4679f343675 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5330,6 +5330,7 @@ discard:
5330 5330
5331EXPORT_SYMBOL(sysctl_tcp_ecn); 5331EXPORT_SYMBOL(sysctl_tcp_ecn);
5332EXPORT_SYMBOL(sysctl_tcp_reordering); 5332EXPORT_SYMBOL(sysctl_tcp_reordering);
5333EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
5333EXPORT_SYMBOL(tcp_parse_options); 5334EXPORT_SYMBOL(tcp_parse_options);
5334EXPORT_SYMBOL(tcp_rcv_established); 5335EXPORT_SYMBOL(tcp_rcv_established);
5335EXPORT_SYMBOL(tcp_rcv_state_process); 5336EXPORT_SYMBOL(tcp_rcv_state_process);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 00156bf421ca..3873c4dbeaeb 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -723,8 +723,8 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
723 * This still operates on a request_sock only, not on a big 723 * This still operates on a request_sock only, not on a big
724 * socket. 724 * socket.
725 */ 725 */
726static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, 726static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
727 struct dst_entry *dst) 727 struct dst_entry *dst)
728{ 728{
729 const struct inet_request_sock *ireq = inet_rsk(req); 729 const struct inet_request_sock *ireq = inet_rsk(req);
730 int err = -1; 730 int err = -1;
@@ -732,7 +732,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
732 732
733 /* First, grab a route. */ 733 /* First, grab a route. */
734 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) 734 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
735 goto out; 735 return -1;
736 736
737 skb = tcp_make_synack(sk, dst, req); 737 skb = tcp_make_synack(sk, dst, req);
738 738
@@ -751,11 +751,15 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
751 err = net_xmit_eval(err); 751 err = net_xmit_eval(err);
752 } 752 }
753 753
754out:
755 dst_release(dst); 754 dst_release(dst);
756 return err; 755 return err;
757} 756}
758 757
758static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
759{
760 return __tcp_v4_send_synack(sk, req, NULL);
761}
762
759/* 763/*
760 * IPv4 request_sock destructor. 764 * IPv4 request_sock destructor.
761 */ 765 */
@@ -1351,8 +1355,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1351 (s32)(peer->tcp_ts - req->ts_recent) > 1355 (s32)(peer->tcp_ts - req->ts_recent) >
1352 TCP_PAWS_WINDOW) { 1356 TCP_PAWS_WINDOW) {
1353 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); 1357 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1354 dst_release(dst); 1358 goto drop_and_release;
1355 goto drop_and_free;
1356 } 1359 }
1357 } 1360 }
1358 /* Kill the following clause, if you dislike this way. */ 1361 /* Kill the following clause, if you dislike this way. */
@@ -1372,24 +1375,21 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1372 "request from %u.%u.%u.%u/%u\n", 1375 "request from %u.%u.%u.%u/%u\n",
1373 NIPQUAD(saddr), 1376 NIPQUAD(saddr),
1374 ntohs(tcp_hdr(skb)->source)); 1377 ntohs(tcp_hdr(skb)->source));
1375 dst_release(dst); 1378 goto drop_and_release;
1376 goto drop_and_free;
1377 } 1379 }
1378 1380
1379 isn = tcp_v4_init_sequence(skb); 1381 isn = tcp_v4_init_sequence(skb);
1380 } 1382 }
1381 tcp_rsk(req)->snt_isn = isn; 1383 tcp_rsk(req)->snt_isn = isn;
1382 1384
1383 if (tcp_v4_send_synack(sk, req, dst)) 1385 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1384 goto drop_and_free; 1386 goto drop_and_free;
1385 1387
1386 if (want_cookie) { 1388 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1387 reqsk_free(req);
1388 } else {
1389 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1390 }
1391 return 0; 1389 return 0;
1392 1390
1391drop_and_release:
1392 dst_release(dst);
1393drop_and_free: 1393drop_and_free:
1394 reqsk_free(req); 1394 reqsk_free(req);
1395drop: 1395drop:
@@ -2443,7 +2443,7 @@ struct proto tcp_prot = {
2443 REF_PROTO_INUSE(tcp) 2443 REF_PROTO_INUSE(tcp)
2444}; 2444};
2445 2445
2446void __init tcp_v4_init(struct net_proto_family *ops) 2446void __init tcp_v4_init(void)
2447{ 2447{
2448 if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, 2448 if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW,
2449 IPPROTO_TCP) < 0) 2449 IPPROTO_TCP) < 0)
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b61b76847ad9..8245247a6ceb 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -35,6 +35,8 @@
35#endif 35#endif
36 36
37int sysctl_tcp_syncookies __read_mostly = SYNC_INIT; 37int sysctl_tcp_syncookies __read_mostly = SYNC_INIT;
38EXPORT_SYMBOL(sysctl_tcp_syncookies);
39
38int sysctl_tcp_abort_on_overflow __read_mostly; 40int sysctl_tcp_abort_on_overflow __read_mostly;
39 41
40struct inet_timewait_death_row tcp_death_row = { 42struct inet_timewait_death_row tcp_death_row = {
@@ -536,7 +538,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
536 * Enforce "SYN-ACK" according to figure 8, figure 6 538 * Enforce "SYN-ACK" according to figure 8, figure 6
537 * of RFC793, fixed by RFC1122. 539 * of RFC793, fixed by RFC1122.
538 */ 540 */
539 req->rsk_ops->rtx_syn_ack(sk, req, NULL); 541 req->rsk_ops->rtx_syn_ack(sk, req);
540 return NULL; 542 return NULL;
541 } 543 }
542 544
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ed750f9ceb07..cbfef8b1f5e8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2560,6 +2560,7 @@ void tcp_send_probe0(struct sock *sk)
2560 } 2560 }
2561} 2561}
2562 2562
2563EXPORT_SYMBOL(tcp_select_initial_window);
2563EXPORT_SYMBOL(tcp_connect); 2564EXPORT_SYMBOL(tcp_connect);
2564EXPORT_SYMBOL(tcp_make_synack); 2565EXPORT_SYMBOL(tcp_make_synack);
2565EXPORT_SYMBOL(tcp_simple_retransmit); 2566EXPORT_SYMBOL(tcp_simple_retransmit);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7ea1b67b6de1..c53d7673b57d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -246,553 +246,6 @@ int udp_get_port(struct sock *sk, unsigned short snum,
246 return __udp_lib_get_port(sk, snum, udp_hash, scmp); 246 return __udp_lib_get_port(sk, snum, udp_hash, scmp);
247} 247}
248 248
249int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
250{
251 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
252
253 return ( !ipv6_only_sock(sk2) &&
254 (!inet1->rcv_saddr || !inet2->rcv_saddr ||
255 inet1->rcv_saddr == inet2->rcv_saddr ));
256}
257
258static inline int udp_v4_get_port(struct sock *sk, unsigned short snum)
259{
260 return udp_get_port(sk, snum, ipv4_rcv_saddr_equal);
261}
262
263/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
264 * harder than this. -DaveM
265 */
266static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
267 __be16 sport, __be32 daddr, __be16 dport,
268 int dif, struct hlist_head udptable[])
269{
270 struct sock *sk, *result = NULL;
271 struct hlist_node *node;
272 unsigned short hnum = ntohs(dport);
273 int badness = -1;
274
275 read_lock(&udp_hash_lock);
276 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
277 struct inet_sock *inet = inet_sk(sk);
278
279 if (sk->sk_net == net && sk->sk_hash == hnum &&
280 !ipv6_only_sock(sk)) {
281 int score = (sk->sk_family == PF_INET ? 1 : 0);
282 if (inet->rcv_saddr) {
283 if (inet->rcv_saddr != daddr)
284 continue;
285 score+=2;
286 }
287 if (inet->daddr) {
288 if (inet->daddr != saddr)
289 continue;
290 score+=2;
291 }
292 if (inet->dport) {
293 if (inet->dport != sport)
294 continue;
295 score+=2;
296 }
297 if (sk->sk_bound_dev_if) {
298 if (sk->sk_bound_dev_if != dif)
299 continue;
300 score+=2;
301 }
302 if (score == 9) {
303 result = sk;
304 break;
305 } else if (score > badness) {
306 result = sk;
307 badness = score;
308 }
309 }
310 }
311 if (result)
312 sock_hold(result);
313 read_unlock(&udp_hash_lock);
314 return result;
315}
316
317static inline struct sock *udp_v4_mcast_next(struct sock *sk,
318 __be16 loc_port, __be32 loc_addr,
319 __be16 rmt_port, __be32 rmt_addr,
320 int dif)
321{
322 struct hlist_node *node;
323 struct sock *s = sk;
324 unsigned short hnum = ntohs(loc_port);
325
326 sk_for_each_from(s, node) {
327 struct inet_sock *inet = inet_sk(s);
328
329 if (s->sk_hash != hnum ||
330 (inet->daddr && inet->daddr != rmt_addr) ||
331 (inet->dport != rmt_port && inet->dport) ||
332 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
333 ipv6_only_sock(s) ||
334 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
335 continue;
336 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
337 continue;
338 goto found;
339 }
340 s = NULL;
341found:
342 return s;
343}
344
345/*
346 * This routine is called by the ICMP module when it gets some
347 * sort of error condition. If err < 0 then the socket should
348 * be closed and the error returned to the user. If err > 0
349 * it's just the icmp type << 8 | icmp code.
350 * Header points to the ip header of the error packet. We move
351 * on past this. Then (as it used to claim before adjustment)
352 * header points to the first 8 bytes of the udp header. We need
353 * to find the appropriate port.
354 */
355
356void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
357{
358 struct inet_sock *inet;
359 struct iphdr *iph = (struct iphdr*)skb->data;
360 struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
361 const int type = icmp_hdr(skb)->type;
362 const int code = icmp_hdr(skb)->code;
363 struct sock *sk;
364 int harderr;
365 int err;
366
367 sk = __udp4_lib_lookup(skb->dev->nd_net, iph->daddr, uh->dest,
368 iph->saddr, uh->source, skb->dev->ifindex, udptable);
369 if (sk == NULL) {
370 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
371 return; /* No socket for error */
372 }
373
374 err = 0;
375 harderr = 0;
376 inet = inet_sk(sk);
377
378 switch (type) {
379 default:
380 case ICMP_TIME_EXCEEDED:
381 err = EHOSTUNREACH;
382 break;
383 case ICMP_SOURCE_QUENCH:
384 goto out;
385 case ICMP_PARAMETERPROB:
386 err = EPROTO;
387 harderr = 1;
388 break;
389 case ICMP_DEST_UNREACH:
390 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
391 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
392 err = EMSGSIZE;
393 harderr = 1;
394 break;
395 }
396 goto out;
397 }
398 err = EHOSTUNREACH;
399 if (code <= NR_ICMP_UNREACH) {
400 harderr = icmp_err_convert[code].fatal;
401 err = icmp_err_convert[code].errno;
402 }
403 break;
404 }
405
406 /*
407 * RFC1122: OK. Passes ICMP errors back to application, as per
408 * 4.1.3.3.
409 */
410 if (!inet->recverr) {
411 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
412 goto out;
413 } else {
414 ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
415 }
416 sk->sk_err = err;
417 sk->sk_error_report(sk);
418out:
419 sock_put(sk);
420}
421
422void udp_err(struct sk_buff *skb, u32 info)
423{
424 __udp4_lib_err(skb, info, udp_hash);
425}
426
427/*
428 * Throw away all pending data and cancel the corking. Socket is locked.
429 */
430static void udp_flush_pending_frames(struct sock *sk)
431{
432 struct udp_sock *up = udp_sk(sk);
433
434 if (up->pending) {
435 up->len = 0;
436 up->pending = 0;
437 ip_flush_pending_frames(sk);
438 }
439}
440
441/**
442 * udp4_hwcsum_outgoing - handle outgoing HW checksumming
443 * @sk: socket we are sending on
444 * @skb: sk_buff containing the filled-in UDP header
445 * (checksum field must be zeroed out)
446 */
447static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
448 __be32 src, __be32 dst, int len )
449{
450 unsigned int offset;
451 struct udphdr *uh = udp_hdr(skb);
452 __wsum csum = 0;
453
454 if (skb_queue_len(&sk->sk_write_queue) == 1) {
455 /*
456 * Only one fragment on the socket.
457 */
458 skb->csum_start = skb_transport_header(skb) - skb->head;
459 skb->csum_offset = offsetof(struct udphdr, check);
460 uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
461 } else {
462 /*
463 * HW-checksum won't work as there are two or more
464 * fragments on the socket so that all csums of sk_buffs
465 * should be together
466 */
467 offset = skb_transport_offset(skb);
468 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
469
470 skb->ip_summed = CHECKSUM_NONE;
471
472 skb_queue_walk(&sk->sk_write_queue, skb) {
473 csum = csum_add(csum, skb->csum);
474 }
475
476 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
477 if (uh->check == 0)
478 uh->check = CSUM_MANGLED_0;
479 }
480}
481
482/*
483 * Push out all pending data as one UDP datagram. Socket is locked.
484 */
485static int udp_push_pending_frames(struct sock *sk)
486{
487 struct udp_sock *up = udp_sk(sk);
488 struct inet_sock *inet = inet_sk(sk);
489 struct flowi *fl = &inet->cork.fl;
490 struct sk_buff *skb;
491 struct udphdr *uh;
492 int err = 0;
493 int is_udplite = IS_UDPLITE(sk);
494 __wsum csum = 0;
495
496 /* Grab the skbuff where UDP header space exists. */
497 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
498 goto out;
499
500 /*
501 * Create a UDP header
502 */
503 uh = udp_hdr(skb);
504 uh->source = fl->fl_ip_sport;
505 uh->dest = fl->fl_ip_dport;
506 uh->len = htons(up->len);
507 uh->check = 0;
508
509 if (is_udplite) /* UDP-Lite */
510 csum = udplite_csum_outgoing(sk, skb);
511
512 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
513
514 skb->ip_summed = CHECKSUM_NONE;
515 goto send;
516
517 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
518
519 udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len);
520 goto send;
521
522 } else /* `normal' UDP */
523 csum = udp_csum_outgoing(sk, skb);
524
525 /* add protocol-dependent pseudo-header */
526 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
527 sk->sk_protocol, csum );
528 if (uh->check == 0)
529 uh->check = CSUM_MANGLED_0;
530
531send:
532 err = ip_push_pending_frames(sk);
533out:
534 up->len = 0;
535 up->pending = 0;
536 if (!err)
537 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
538 return err;
539}
540
541int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
542 size_t len)
543{
544 struct inet_sock *inet = inet_sk(sk);
545 struct udp_sock *up = udp_sk(sk);
546 int ulen = len;
547 struct ipcm_cookie ipc;
548 struct rtable *rt = NULL;
549 int free = 0;
550 int connected = 0;
551 __be32 daddr, faddr, saddr;
552 __be16 dport;
553 u8 tos;
554 int err, is_udplite = IS_UDPLITE(sk);
555 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
556 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
557
558 if (len > 0xFFFF)
559 return -EMSGSIZE;
560
561 /*
562 * Check the flags.
563 */
564
565 if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
566 return -EOPNOTSUPP;
567
568 ipc.opt = NULL;
569
570 if (up->pending) {
571 /*
572 * There are pending frames.
573 * The socket lock must be held while it's corked.
574 */
575 lock_sock(sk);
576 if (likely(up->pending)) {
577 if (unlikely(up->pending != AF_INET)) {
578 release_sock(sk);
579 return -EINVAL;
580 }
581 goto do_append_data;
582 }
583 release_sock(sk);
584 }
585 ulen += sizeof(struct udphdr);
586
587 /*
588 * Get and verify the address.
589 */
590 if (msg->msg_name) {
591 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
592 if (msg->msg_namelen < sizeof(*usin))
593 return -EINVAL;
594 if (usin->sin_family != AF_INET) {
595 if (usin->sin_family != AF_UNSPEC)
596 return -EAFNOSUPPORT;
597 }
598
599 daddr = usin->sin_addr.s_addr;
600 dport = usin->sin_port;
601 if (dport == 0)
602 return -EINVAL;
603 } else {
604 if (sk->sk_state != TCP_ESTABLISHED)
605 return -EDESTADDRREQ;
606 daddr = inet->daddr;
607 dport = inet->dport;
608 /* Open fast path for connected socket.
609 Route will not be used, if at least one option is set.
610 */
611 connected = 1;
612 }
613 ipc.addr = inet->saddr;
614
615 ipc.oif = sk->sk_bound_dev_if;
616 if (msg->msg_controllen) {
617 err = ip_cmsg_send(msg, &ipc);
618 if (err)
619 return err;
620 if (ipc.opt)
621 free = 1;
622 connected = 0;
623 }
624 if (!ipc.opt)
625 ipc.opt = inet->opt;
626
627 saddr = ipc.addr;
628 ipc.addr = faddr = daddr;
629
630 if (ipc.opt && ipc.opt->srr) {
631 if (!daddr)
632 return -EINVAL;
633 faddr = ipc.opt->faddr;
634 connected = 0;
635 }
636 tos = RT_TOS(inet->tos);
637 if (sock_flag(sk, SOCK_LOCALROUTE) ||
638 (msg->msg_flags & MSG_DONTROUTE) ||
639 (ipc.opt && ipc.opt->is_strictroute)) {
640 tos |= RTO_ONLINK;
641 connected = 0;
642 }
643
644 if (ipv4_is_multicast(daddr)) {
645 if (!ipc.oif)
646 ipc.oif = inet->mc_index;
647 if (!saddr)
648 saddr = inet->mc_addr;
649 connected = 0;
650 }
651
652 if (connected)
653 rt = (struct rtable*)sk_dst_check(sk, 0);
654
655 if (rt == NULL) {
656 struct flowi fl = { .oif = ipc.oif,
657 .nl_u = { .ip4_u =
658 { .daddr = faddr,
659 .saddr = saddr,
660 .tos = tos } },
661 .proto = sk->sk_protocol,
662 .uli_u = { .ports =
663 { .sport = inet->sport,
664 .dport = dport } } };
665 security_sk_classify_flow(sk, &fl);
666 err = ip_route_output_flow(&init_net, &rt, &fl, sk, 1);
667 if (err) {
668 if (err == -ENETUNREACH)
669 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
670 goto out;
671 }
672
673 err = -EACCES;
674 if ((rt->rt_flags & RTCF_BROADCAST) &&
675 !sock_flag(sk, SOCK_BROADCAST))
676 goto out;
677 if (connected)
678 sk_dst_set(sk, dst_clone(&rt->u.dst));
679 }
680
681 if (msg->msg_flags&MSG_CONFIRM)
682 goto do_confirm;
683back_from_confirm:
684
685 saddr = rt->rt_src;
686 if (!ipc.addr)
687 daddr = ipc.addr = rt->rt_dst;
688
689 lock_sock(sk);
690 if (unlikely(up->pending)) {
691 /* The socket is already corked while preparing it. */
692 /* ... which is an evident application bug. --ANK */
693 release_sock(sk);
694
695 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
696 err = -EINVAL;
697 goto out;
698 }
699 /*
700 * Now cork the socket to pend data.
701 */
702 inet->cork.fl.fl4_dst = daddr;
703 inet->cork.fl.fl_ip_dport = dport;
704 inet->cork.fl.fl4_src = saddr;
705 inet->cork.fl.fl_ip_sport = inet->sport;
706 up->pending = AF_INET;
707
708do_append_data:
709 up->len += ulen;
710 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
711 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
712 sizeof(struct udphdr), &ipc, rt,
713 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
714 if (err)
715 udp_flush_pending_frames(sk);
716 else if (!corkreq)
717 err = udp_push_pending_frames(sk);
718 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
719 up->pending = 0;
720 release_sock(sk);
721
722out:
723 ip_rt_put(rt);
724 if (free)
725 kfree(ipc.opt);
726 if (!err)
727 return len;
728 /*
729 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
730 * ENOBUFS might not be good (it's not tunable per se), but otherwise
731 * we don't have a good statistic (IpOutDiscards but it can be too many
732 * things). We could add another new stat but at least for now that
733 * seems like overkill.
734 */
735 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
736 UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite);
737 }
738 return err;
739
740do_confirm:
741 dst_confirm(&rt->u.dst);
742 if (!(msg->msg_flags&MSG_PROBE) || len)
743 goto back_from_confirm;
744 err = 0;
745 goto out;
746}
747
748int udp_sendpage(struct sock *sk, struct page *page, int offset,
749 size_t size, int flags)
750{
751 struct udp_sock *up = udp_sk(sk);
752 int ret;
753
754 if (!up->pending) {
755 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
756
757 /* Call udp_sendmsg to specify destination address which
758 * sendpage interface can't pass.
759 * This will succeed only when the socket is connected.
760 */
761 ret = udp_sendmsg(NULL, sk, &msg, 0);
762 if (ret < 0)
763 return ret;
764 }
765
766 lock_sock(sk);
767
768 if (unlikely(!up->pending)) {
769 release_sock(sk);
770
771 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
772 return -EINVAL;
773 }
774
775 ret = ip_append_page(sk, page, offset, size, flags);
776 if (ret == -EOPNOTSUPP) {
777 release_sock(sk);
778 return sock_no_sendpage(sk->sk_socket, page, offset,
779 size, flags);
780 }
781 if (ret < 0) {
782 udp_flush_pending_frames(sk);
783 goto out;
784 }
785
786 up->len += size;
787 if (!(up->corkflag || (flags&MSG_MORE)))
788 ret = udp_push_pending_frames(sk);
789 if (!ret)
790 ret = size;
791out:
792 release_sock(sk);
793 return ret;
794}
795
796/* 249/*
797 * IOCTL requests applicable to the UDP protocol 250 * IOCTL requests applicable to the UDP protocol
798 */ 251 */
@@ -833,107 +286,6 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
833 return 0; 286 return 0;
834} 287}
835 288
836/*
837 * This should be easy, if there is something there we
838 * return it, otherwise we block.
839 */
840
841int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
842 size_t len, int noblock, int flags, int *addr_len)
843{
844 struct inet_sock *inet = inet_sk(sk);
845 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
846 struct sk_buff *skb;
847 unsigned int ulen, copied;
848 int peeked;
849 int err;
850 int is_udplite = IS_UDPLITE(sk);
851
852 /*
853 * Check any passed addresses
854 */
855 if (addr_len)
856 *addr_len=sizeof(*sin);
857
858 if (flags & MSG_ERRQUEUE)
859 return ip_recv_error(sk, msg, len);
860
861try_again:
862 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
863 &peeked, &err);
864 if (!skb)
865 goto out;
866
867 ulen = skb->len - sizeof(struct udphdr);
868 copied = len;
869 if (copied > ulen)
870 copied = ulen;
871 else if (copied < ulen)
872 msg->msg_flags |= MSG_TRUNC;
873
874 /*
875 * If checksum is needed at all, try to do it while copying the
876 * data. If the data is truncated, or if we only want a partial
877 * coverage checksum (UDP-Lite), do it before the copy.
878 */
879
880 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
881 if (udp_lib_checksum_complete(skb))
882 goto csum_copy_err;
883 }
884
885 if (skb_csum_unnecessary(skb))
886 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
887 msg->msg_iov, copied );
888 else {
889 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
890
891 if (err == -EINVAL)
892 goto csum_copy_err;
893 }
894
895 if (err)
896 goto out_free;
897
898 if (!peeked)
899 UDP_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite);
900
901 sock_recv_timestamp(msg, sk, skb);
902
903 /* Copy the address. */
904 if (sin)
905 {
906 sin->sin_family = AF_INET;
907 sin->sin_port = udp_hdr(skb)->source;
908 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
909 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
910 }
911 if (inet->cmsg_flags)
912 ip_cmsg_recv(msg, skb);
913
914 err = copied;
915 if (flags & MSG_TRUNC)
916 err = ulen;
917
918out_free:
919 lock_sock(sk);
920 skb_free_datagram(sk, skb);
921 release_sock(sk);
922out:
923 return err;
924
925csum_copy_err:
926 lock_sock(sk);
927 if (!skb_kill_datagram(sk, skb, flags))
928 UDP_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
929 release_sock(sk);
930
931 if (noblock)
932 return -EAGAIN;
933 goto try_again;
934}
935
936
937int udp_disconnect(struct sock *sk, int flags) 289int udp_disconnect(struct sock *sk, int flags)
938{ 290{
939 struct inet_sock *inet = inet_sk(sk); 291 struct inet_sock *inet = inet_sk(sk);
@@ -956,319 +308,6 @@ int udp_disconnect(struct sock *sk, int flags)
956 return 0; 308 return 0;
957} 309}
958 310
959/* returns:
960 * -1: error
961 * 0: success
962 * >0: "udp encap" protocol resubmission
963 *
964 * Note that in the success and error cases, the skb is assumed to
965 * have either been requeued or freed.
966 */
967int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
968{
969 struct udp_sock *up = udp_sk(sk);
970 int rc;
971 int is_udplite = IS_UDPLITE(sk);
972
973 /*
974 * Charge it to the socket, dropping if the queue is full.
975 */
976 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
977 goto drop;
978 nf_reset(skb);
979
980 if (up->encap_type) {
981 /*
982 * This is an encapsulation socket so pass the skb to
983 * the socket's udp_encap_rcv() hook. Otherwise, just
984 * fall through and pass this up the UDP socket.
985 * up->encap_rcv() returns the following value:
986 * =0 if skb was successfully passed to the encap
987 * handler or was discarded by it.
988 * >0 if skb should be passed on to UDP.
989 * <0 if skb should be resubmitted as proto -N
990 */
991
992 /* if we're overly short, let UDP handle it */
993 if (skb->len > sizeof(struct udphdr) &&
994 up->encap_rcv != NULL) {
995 int ret;
996
997 ret = (*up->encap_rcv)(sk, skb);
998 if (ret <= 0) {
999 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS,
1000 is_udplite);
1001 return -ret;
1002 }
1003 }
1004
1005 /* FALLTHROUGH -- it's a UDP Packet */
1006 }
1007
1008 /*
1009 * UDP-Lite specific tests, ignored on UDP sockets
1010 */
1011 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
1012
1013 /*
1014 * MIB statistics other than incrementing the error count are
1015 * disabled for the following two types of errors: these depend
1016 * on the application settings, not on the functioning of the
1017 * protocol stack as such.
1018 *
1019 * RFC 3828 here recommends (sec 3.3): "There should also be a
1020 * way ... to ... at least let the receiving application block
1021 * delivery of packets with coverage values less than a value
1022 * provided by the application."
1023 */
1024 if (up->pcrlen == 0) { /* full coverage was set */
1025 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
1026 "%d while full coverage %d requested\n",
1027 UDP_SKB_CB(skb)->cscov, skb->len);
1028 goto drop;
1029 }
1030 /* The next case involves violating the min. coverage requested
1031 * by the receiver. This is subtle: if receiver wants x and x is
1032 * greater than the buffersize/MTU then receiver will complain
1033 * that it wants x while sender emits packets of smaller size y.
1034 * Therefore the above ...()->partial_cov statement is essential.
1035 */
1036 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
1037 LIMIT_NETDEBUG(KERN_WARNING
1038 "UDPLITE: coverage %d too small, need min %d\n",
1039 UDP_SKB_CB(skb)->cscov, up->pcrlen);
1040 goto drop;
1041 }
1042 }
1043
1044 if (sk->sk_filter) {
1045 if (udp_lib_checksum_complete(skb))
1046 goto drop;
1047 }
1048
1049 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
1050 /* Note that an ENOMEM error is charged twice */
1051 if (rc == -ENOMEM)
1052 UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
1053 goto drop;
1054 }
1055
1056 return 0;
1057
1058drop:
1059 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
1060 kfree_skb(skb);
1061 return -1;
1062}
1063
1064/*
1065 * Multicasts and broadcasts go to each listener.
1066 *
1067 * Note: called only from the BH handler context,
1068 * so we don't need to lock the hashes.
1069 */
1070static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
1071 struct udphdr *uh,
1072 __be32 saddr, __be32 daddr,
1073 struct hlist_head udptable[])
1074{
1075 struct sock *sk;
1076 int dif;
1077
1078 read_lock(&udp_hash_lock);
1079 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
1080 dif = skb->dev->ifindex;
1081 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
1082 if (sk) {
1083 struct sock *sknext = NULL;
1084
1085 do {
1086 struct sk_buff *skb1 = skb;
1087
1088 sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
1089 uh->source, saddr, dif);
1090 if (sknext)
1091 skb1 = skb_clone(skb, GFP_ATOMIC);
1092
1093 if (skb1) {
1094 int ret = 0;
1095
1096 bh_lock_sock_nested(sk);
1097 if (!sock_owned_by_user(sk))
1098 ret = udp_queue_rcv_skb(sk, skb1);
1099 else
1100 sk_add_backlog(sk, skb1);
1101 bh_unlock_sock(sk);
1102
1103 if (ret > 0)
1104 /* we should probably re-process instead
1105 * of dropping packets here. */
1106 kfree_skb(skb1);
1107 }
1108 sk = sknext;
1109 } while (sknext);
1110 } else
1111 kfree_skb(skb);
1112 read_unlock(&udp_hash_lock);
1113 return 0;
1114}
1115
1116/* Initialize UDP checksum. If exited with zero value (success),
1117 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1118 * Otherwise, csum completion requires chacksumming packet body,
1119 * including udp header and folding it to skb->csum.
1120 */
1121static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1122 int proto)
1123{
1124 const struct iphdr *iph;
1125 int err;
1126
1127 UDP_SKB_CB(skb)->partial_cov = 0;
1128 UDP_SKB_CB(skb)->cscov = skb->len;
1129
1130 if (proto == IPPROTO_UDPLITE) {
1131 err = udplite_checksum_init(skb, uh);
1132 if (err)
1133 return err;
1134 }
1135
1136 iph = ip_hdr(skb);
1137 if (uh->check == 0) {
1138 skb->ip_summed = CHECKSUM_UNNECESSARY;
1139 } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
1140 if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
1141 proto, skb->csum))
1142 skb->ip_summed = CHECKSUM_UNNECESSARY;
1143 }
1144 if (!skb_csum_unnecessary(skb))
1145 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1146 skb->len, proto, 0);
1147 /* Probably, we should checksum udp header (it should be in cache
1148 * in any case) and data in tiny packets (< rx copybreak).
1149 */
1150
1151 return 0;
1152}
1153
1154/*
1155 * All we need to do is get the socket, and then do a checksum.
1156 */
1157
1158int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
1159 int proto)
1160{
1161 struct sock *sk;
1162 struct udphdr *uh = udp_hdr(skb);
1163 unsigned short ulen;
1164 struct rtable *rt = (struct rtable*)skb->dst;
1165 __be32 saddr = ip_hdr(skb)->saddr;
1166 __be32 daddr = ip_hdr(skb)->daddr;
1167
1168 /*
1169 * Validate the packet.
1170 */
1171 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1172 goto drop; /* No space for header. */
1173
1174 ulen = ntohs(uh->len);
1175 if (ulen > skb->len)
1176 goto short_packet;
1177
1178 if (proto == IPPROTO_UDP) {
1179 /* UDP validates ulen. */
1180 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
1181 goto short_packet;
1182 uh = udp_hdr(skb);
1183 }
1184
1185 if (udp4_csum_init(skb, uh, proto))
1186 goto csum_error;
1187
1188 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
1189 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
1190
1191 sk = __udp4_lib_lookup(skb->dev->nd_net, saddr, uh->source, daddr,
1192 uh->dest, inet_iif(skb), udptable);
1193
1194 if (sk != NULL) {
1195 int ret = 0;
1196 bh_lock_sock_nested(sk);
1197 if (!sock_owned_by_user(sk))
1198 ret = udp_queue_rcv_skb(sk, skb);
1199 else
1200 sk_add_backlog(sk, skb);
1201 bh_unlock_sock(sk);
1202 sock_put(sk);
1203
1204 /* a return value > 0 means to resubmit the input, but
1205 * it wants the return to be -protocol, or 0
1206 */
1207 if (ret > 0)
1208 return -ret;
1209 return 0;
1210 }
1211
1212 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1213 goto drop;
1214 nf_reset(skb);
1215
1216 /* No socket. Drop packet silently, if checksum is wrong */
1217 if (udp_lib_checksum_complete(skb))
1218 goto csum_error;
1219
1220 UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1221 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
1222
1223 /*
1224 * Hmm. We got an UDP packet to a port to which we
1225 * don't wanna listen. Ignore it.
1226 */
1227 kfree_skb(skb);
1228 return 0;
1229
1230short_packet:
1231 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
1232 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1233 NIPQUAD(saddr),
1234 ntohs(uh->source),
1235 ulen,
1236 skb->len,
1237 NIPQUAD(daddr),
1238 ntohs(uh->dest));
1239 goto drop;
1240
1241csum_error:
1242 /*
1243 * RFC1122: OK. Discards the bad packet silently (as far as
1244 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1245 */
1246 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
1247 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1248 NIPQUAD(saddr),
1249 ntohs(uh->source),
1250 NIPQUAD(daddr),
1251 ntohs(uh->dest),
1252 ulen);
1253drop:
1254 UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1255 kfree_skb(skb);
1256 return 0;
1257}
1258
1259int udp_rcv(struct sk_buff *skb)
1260{
1261 return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
1262}
1263
1264int udp_destroy_sock(struct sock *sk)
1265{
1266 lock_sock(sk);
1267 udp_flush_pending_frames(sk);
1268 release_sock(sk);
1269 return 0;
1270}
1271
1272/* 311/*
1273 * Socket option code for UDP 312 * Socket option code for UDP
1274 */ 313 */
@@ -1279,7 +318,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1279 struct udp_sock *up = udp_sk(sk); 318 struct udp_sock *up = udp_sk(sk);
1280 int val; 319 int val;
1281 int err = 0; 320 int err = 0;
321#ifdef CONFIG_IP_UDPLITE
1282 int is_udplite = IS_UDPLITE(sk); 322 int is_udplite = IS_UDPLITE(sk);
323#endif
1283 324
1284 if (optlen<sizeof(int)) 325 if (optlen<sizeof(int))
1285 return -EINVAL; 326 return -EINVAL;
@@ -1315,6 +356,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1315 } 356 }
1316 break; 357 break;
1317 358
359#ifdef CONFIG_IP_UDPLITE
1318 /* 360 /*
1319 * UDP-Lite's partial checksum coverage (RFC 3828). 361 * UDP-Lite's partial checksum coverage (RFC 3828).
1320 */ 362 */
@@ -1340,6 +382,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1340 up->pcrlen = val; 382 up->pcrlen = val;
1341 up->pcflag |= UDPLITE_RECV_CC; 383 up->pcflag |= UDPLITE_RECV_CC;
1342 break; 384 break;
385#endif
1343 386
1344 default: 387 default:
1345 err = -ENOPROTOOPT; 388 err = -ENOPROTOOPT;
@@ -1349,26 +392,6 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1349 return err; 392 return err;
1350} 393}
1351 394
1352int udp_setsockopt(struct sock *sk, int level, int optname,
1353 char __user *optval, int optlen)
1354{
1355 if (level == SOL_UDP || level == SOL_UDPLITE)
1356 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1357 udp_push_pending_frames);
1358 return ip_setsockopt(sk, level, optname, optval, optlen);
1359}
1360
1361#ifdef CONFIG_COMPAT
1362int compat_udp_setsockopt(struct sock *sk, int level, int optname,
1363 char __user *optval, int optlen)
1364{
1365 if (level == SOL_UDP || level == SOL_UDPLITE)
1366 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1367 udp_push_pending_frames);
1368 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
1369}
1370#endif
1371
1372int udp_lib_getsockopt(struct sock *sk, int level, int optname, 395int udp_lib_getsockopt(struct sock *sk, int level, int optname,
1373 char __user *optval, int __user *optlen) 396 char __user *optval, int __user *optlen)
1374{ 397{
@@ -1413,23 +436,6 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
1413 return 0; 436 return 0;
1414} 437}
1415 438
1416int udp_getsockopt(struct sock *sk, int level, int optname,
1417 char __user *optval, int __user *optlen)
1418{
1419 if (level == SOL_UDP || level == SOL_UDPLITE)
1420 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1421 return ip_getsockopt(sk, level, optname, optval, optlen);
1422}
1423
1424#ifdef CONFIG_COMPAT
1425int compat_udp_getsockopt(struct sock *sk, int level, int optname,
1426 char __user *optval, int __user *optlen)
1427{
1428 if (level == SOL_UDP || level == SOL_UDPLITE)
1429 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1430 return compat_ip_getsockopt(sk, level, optname, optval, optlen);
1431}
1432#endif
1433/** 439/**
1434 * udp_poll - wait for a UDP event. 440 * udp_poll - wait for a UDP event.
1435 * @file - file struct 441 * @file - file struct
@@ -1474,36 +480,6 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1474 480
1475} 481}
1476 482
1477DEFINE_PROTO_INUSE(udp)
1478
1479struct proto udp_prot = {
1480 .name = "UDP",
1481 .owner = THIS_MODULE,
1482 .close = udp_lib_close,
1483 .connect = ip4_datagram_connect,
1484 .disconnect = udp_disconnect,
1485 .ioctl = udp_ioctl,
1486 .destroy = udp_destroy_sock,
1487 .setsockopt = udp_setsockopt,
1488 .getsockopt = udp_getsockopt,
1489 .sendmsg = udp_sendmsg,
1490 .recvmsg = udp_recvmsg,
1491 .sendpage = udp_sendpage,
1492 .backlog_rcv = udp_queue_rcv_skb,
1493 .hash = udp_lib_hash,
1494 .unhash = udp_lib_unhash,
1495 .get_port = udp_v4_get_port,
1496 .memory_allocated = &udp_memory_allocated,
1497 .sysctl_mem = sysctl_udp_mem,
1498 .sysctl_wmem = &sysctl_udp_wmem_min,
1499 .sysctl_rmem = &sysctl_udp_rmem_min,
1500 .obj_size = sizeof(struct udp_sock),
1501#ifdef CONFIG_COMPAT
1502 .compat_setsockopt = compat_udp_setsockopt,
1503 .compat_getsockopt = compat_udp_getsockopt,
1504#endif
1505 REF_PROTO_INUSE(udp)
1506};
1507 483
1508/* ------------------------------------------------------------------------ */ 484/* ------------------------------------------------------------------------ */
1509#ifdef CONFIG_PROC_FS 485#ifdef CONFIG_PROC_FS
@@ -1636,62 +612,6 @@ void udp_proc_unregister(struct udp_seq_afinfo *afinfo)
1636 proc_net_remove(&init_net, afinfo->name); 612 proc_net_remove(&init_net, afinfo->name);
1637 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 613 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1638} 614}
1639
1640/* ------------------------------------------------------------------------ */
1641static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
1642{
1643 struct inet_sock *inet = inet_sk(sp);
1644 __be32 dest = inet->daddr;
1645 __be32 src = inet->rcv_saddr;
1646 __u16 destp = ntohs(inet->dport);
1647 __u16 srcp = ntohs(inet->sport);
1648
1649 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1650 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
1651 bucket, src, srcp, dest, destp, sp->sk_state,
1652 atomic_read(&sp->sk_wmem_alloc),
1653 atomic_read(&sp->sk_rmem_alloc),
1654 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1655 atomic_read(&sp->sk_refcnt), sp);
1656}
1657
1658int udp4_seq_show(struct seq_file *seq, void *v)
1659{
1660 if (v == SEQ_START_TOKEN)
1661 seq_printf(seq, "%-127s\n",
1662 " sl local_address rem_address st tx_queue "
1663 "rx_queue tr tm->when retrnsmt uid timeout "
1664 "inode");
1665 else {
1666 char tmpbuf[129];
1667 struct udp_iter_state *state = seq->private;
1668
1669 udp4_format_sock(v, tmpbuf, state->bucket);
1670 seq_printf(seq, "%-127s\n", tmpbuf);
1671 }
1672 return 0;
1673}
1674
1675/* ------------------------------------------------------------------------ */
1676static struct file_operations udp4_seq_fops;
1677static struct udp_seq_afinfo udp4_seq_afinfo = {
1678 .owner = THIS_MODULE,
1679 .name = "udp",
1680 .family = AF_INET,
1681 .hashtable = udp_hash,
1682 .seq_show = udp4_seq_show,
1683 .seq_fops = &udp4_seq_fops,
1684};
1685
1686int __init udp4_proc_init(void)
1687{
1688 return udp_proc_register(&udp4_seq_afinfo);
1689}
1690
1691void udp4_proc_exit(void)
1692{
1693 udp_proc_unregister(&udp4_seq_afinfo);
1694}
1695#endif /* CONFIG_PROC_FS */ 615#endif /* CONFIG_PROC_FS */
1696 616
1697void __init udp_init(void) 617void __init udp_init(void)
@@ -1718,8 +638,6 @@ EXPORT_SYMBOL(udp_hash);
1718EXPORT_SYMBOL(udp_hash_lock); 638EXPORT_SYMBOL(udp_hash_lock);
1719EXPORT_SYMBOL(udp_ioctl); 639EXPORT_SYMBOL(udp_ioctl);
1720EXPORT_SYMBOL(udp_get_port); 640EXPORT_SYMBOL(udp_get_port);
1721EXPORT_SYMBOL(udp_prot);
1722EXPORT_SYMBOL(udp_sendmsg);
1723EXPORT_SYMBOL(udp_lib_getsockopt); 641EXPORT_SYMBOL(udp_lib_getsockopt);
1724EXPORT_SYMBOL(udp_lib_setsockopt); 642EXPORT_SYMBOL(udp_lib_setsockopt);
1725EXPORT_SYMBOL(udp_poll); 643EXPORT_SYMBOL(udp_poll);
diff --git a/net/ipv4/udp_ipv4.c b/net/ipv4/udp_ipv4.c
new file mode 100644
index 000000000000..40978de7fb51
--- /dev/null
+++ b/net/ipv4/udp_ipv4.c
@@ -0,0 +1,1134 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * UDP for IPv4.
7 *
8 * For full credits, see net/ipv4/udp.c.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <asm/system.h>
17#include <asm/uaccess.h>
18#include <asm/ioctls.h>
19#include <linux/bootmem.h>
20#include <linux/types.h>
21#include <linux/fcntl.h>
22#include <linux/module.h>
23#include <linux/socket.h>
24#include <linux/sockios.h>
25#include <linux/igmp.h>
26#include <linux/in.h>
27#include <linux/errno.h>
28#include <linux/timer.h>
29#include <linux/mm.h>
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <net/tcp_states.h>
33#include <linux/skbuff.h>
34#include <linux/proc_fs.h>
35#include <linux/seq_file.h>
36#include <net/net_namespace.h>
37#include <net/icmp.h>
38#include <net/route.h>
39#include <net/checksum.h>
40#include <net/xfrm.h>
41#include "udp_impl.h"
42
43int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
44{
45 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
46
47 return ( !ipv6_only_sock(sk2) &&
48 (!inet1->rcv_saddr || !inet2->rcv_saddr ||
49 inet1->rcv_saddr == inet2->rcv_saddr ));
50}
51
52static inline int udp_v4_get_port(struct sock *sk, unsigned short snum)
53{
54 return udp_get_port(sk, snum, ipv4_rcv_saddr_equal);
55}
56
57/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
58 * harder than this. -DaveM
59 */
60static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
61 __be16 sport, __be32 daddr, __be16 dport,
62 int dif, struct hlist_head udptable[])
63{
64 struct sock *sk, *result = NULL;
65 struct hlist_node *node;
66 unsigned short hnum = ntohs(dport);
67 int badness = -1;
68
69 read_lock(&udp_hash_lock);
70 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
71 struct inet_sock *inet = inet_sk(sk);
72
73 if (sk->sk_net == net && sk->sk_hash == hnum &&
74 !ipv6_only_sock(sk)) {
75 int score = (sk->sk_family == PF_INET ? 1 : 0);
76 if (inet->rcv_saddr) {
77 if (inet->rcv_saddr != daddr)
78 continue;
79 score+=2;
80 }
81 if (inet->daddr) {
82 if (inet->daddr != saddr)
83 continue;
84 score+=2;
85 }
86 if (inet->dport) {
87 if (inet->dport != sport)
88 continue;
89 score+=2;
90 }
91 if (sk->sk_bound_dev_if) {
92 if (sk->sk_bound_dev_if != dif)
93 continue;
94 score+=2;
95 }
96 if (score == 9) {
97 result = sk;
98 break;
99 } else if (score > badness) {
100 result = sk;
101 badness = score;
102 }
103 }
104 }
105 if (result)
106 sock_hold(result);
107 read_unlock(&udp_hash_lock);
108 return result;
109}
110
111static inline struct sock *udp_v4_mcast_next(struct sock *sk,
112 __be16 loc_port, __be32 loc_addr,
113 __be16 rmt_port, __be32 rmt_addr,
114 int dif)
115{
116 struct hlist_node *node;
117 struct sock *s = sk;
118 unsigned short hnum = ntohs(loc_port);
119
120 sk_for_each_from(s, node) {
121 struct inet_sock *inet = inet_sk(s);
122
123 if (s->sk_hash != hnum ||
124 (inet->daddr && inet->daddr != rmt_addr) ||
125 (inet->dport != rmt_port && inet->dport) ||
126 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
127 ipv6_only_sock(s) ||
128 (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
129 continue;
130 if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
131 continue;
132 goto found;
133 }
134 s = NULL;
135found:
136 return s;
137}
138
139/*
140 * This routine is called by the ICMP module when it gets some
141 * sort of error condition. If err < 0 then the socket should
142 * be closed and the error returned to the user. If err > 0
143 * it's just the icmp type << 8 | icmp code.
144 * Header points to the ip header of the error packet. We move
145 * on past this. Then (as it used to claim before adjustment)
146 * header points to the first 8 bytes of the udp header. We need
147 * to find the appropriate port.
148 */
149
150void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
151{
152 struct inet_sock *inet;
153 struct iphdr *iph = (struct iphdr*)skb->data;
154 struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
155 const int type = icmp_hdr(skb)->type;
156 const int code = icmp_hdr(skb)->code;
157 struct sock *sk;
158 int harderr;
159 int err;
160
161 sk = __udp4_lib_lookup(skb->dev->nd_net, iph->daddr, uh->dest,
162 iph->saddr, uh->source, skb->dev->ifindex, udptable);
163 if (sk == NULL) {
164 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
165 return; /* No socket for error */
166 }
167
168 err = 0;
169 harderr = 0;
170 inet = inet_sk(sk);
171
172 switch (type) {
173 default:
174 case ICMP_TIME_EXCEEDED:
175 err = EHOSTUNREACH;
176 break;
177 case ICMP_SOURCE_QUENCH:
178 goto out;
179 case ICMP_PARAMETERPROB:
180 err = EPROTO;
181 harderr = 1;
182 break;
183 case ICMP_DEST_UNREACH:
184 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
185 if (inet->pmtudisc != IP_PMTUDISC_DONT) {
186 err = EMSGSIZE;
187 harderr = 1;
188 break;
189 }
190 goto out;
191 }
192 err = EHOSTUNREACH;
193 if (code <= NR_ICMP_UNREACH) {
194 harderr = icmp_err_convert[code].fatal;
195 err = icmp_err_convert[code].errno;
196 }
197 break;
198 }
199
200 /*
201 * RFC1122: OK. Passes ICMP errors back to application, as per
202 * 4.1.3.3.
203 */
204 if (!inet->recverr) {
205 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
206 goto out;
207 } else {
208 ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
209 }
210 sk->sk_err = err;
211 sk->sk_error_report(sk);
212out:
213 sock_put(sk);
214}
215
216void udp_err(struct sk_buff *skb, u32 info)
217{
218 __udp4_lib_err(skb, info, udp_hash);
219}
220
221/*
222 * Throw away all pending data and cancel the corking. Socket is locked.
223 */
224static void udp_flush_pending_frames(struct sock *sk)
225{
226 struct udp_sock *up = udp_sk(sk);
227
228 if (up->pending) {
229 up->len = 0;
230 up->pending = 0;
231 ip_flush_pending_frames(sk);
232 }
233}
234
235/**
236 * udp4_hwcsum_outgoing - handle outgoing HW checksumming
237 * @sk: socket we are sending on
238 * @skb: sk_buff containing the filled-in UDP header
239 * (checksum field must be zeroed out)
240 */
241static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
242 __be32 src, __be32 dst, int len )
243{
244 unsigned int offset;
245 struct udphdr *uh = udp_hdr(skb);
246 __wsum csum = 0;
247
248 if (skb_queue_len(&sk->sk_write_queue) == 1) {
249 /*
250 * Only one fragment on the socket.
251 */
252 skb->csum_start = skb_transport_header(skb) - skb->head;
253 skb->csum_offset = offsetof(struct udphdr, check);
254 uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
255 } else {
256 /*
257 * HW-checksum won't work as there are two or more
258 * fragments on the socket so that all csums of sk_buffs
259 * should be together
260 */
261 offset = skb_transport_offset(skb);
262 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
263
264 skb->ip_summed = CHECKSUM_NONE;
265
266 skb_queue_walk(&sk->sk_write_queue, skb) {
267 csum = csum_add(csum, skb->csum);
268 }
269
270 uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
271 if (uh->check == 0)
272 uh->check = CSUM_MANGLED_0;
273 }
274}
275
276/*
277 * Push out all pending data as one UDP datagram. Socket is locked.
278 */
279static int udp_push_pending_frames(struct sock *sk)
280{
281 struct udp_sock *up = udp_sk(sk);
282 struct inet_sock *inet = inet_sk(sk);
283 struct flowi *fl = &inet->cork.fl;
284 struct sk_buff *skb;
285 struct udphdr *uh;
286 int err = 0;
287 int is_udplite = IS_UDPLITE(sk);
288 __wsum csum = 0;
289
290 /* Grab the skbuff where UDP header space exists. */
291 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
292 goto out;
293
294 /*
295 * Create a UDP header
296 */
297 uh = udp_hdr(skb);
298 uh->source = fl->fl_ip_sport;
299 uh->dest = fl->fl_ip_dport;
300 uh->len = htons(up->len);
301 uh->check = 0;
302
303 if (is_udplite) /* UDP-Lite */
304 csum = udplite_csum_outgoing(sk, skb);
305
306 else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
307
308 skb->ip_summed = CHECKSUM_NONE;
309 goto send;
310
311 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
312
313 udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len);
314 goto send;
315
316 } else /* `normal' UDP */
317 csum = udp_csum_outgoing(sk, skb);
318
319 /* add protocol-dependent pseudo-header */
320 uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
321 sk->sk_protocol, csum );
322 if (uh->check == 0)
323 uh->check = CSUM_MANGLED_0;
324
325send:
326 err = ip_push_pending_frames(sk);
327out:
328 up->len = 0;
329 up->pending = 0;
330 if (!err)
331 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
332 return err;
333}
334
335int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
336 size_t len)
337{
338 struct inet_sock *inet = inet_sk(sk);
339 struct udp_sock *up = udp_sk(sk);
340 int ulen = len;
341 struct ipcm_cookie ipc;
342 struct rtable *rt = NULL;
343 int free = 0;
344 int connected = 0;
345 __be32 daddr, faddr, saddr;
346 __be16 dport;
347 u8 tos;
348 int err, is_udplite = IS_UDPLITE(sk);
349 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
350 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
351
352 if (len > 0xFFFF)
353 return -EMSGSIZE;
354
355 /*
356 * Check the flags.
357 */
358
359 if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
360 return -EOPNOTSUPP;
361
362 ipc.opt = NULL;
363
364 if (up->pending) {
365 /*
366 * There are pending frames.
367 * The socket lock must be held while it's corked.
368 */
369 lock_sock(sk);
370 if (likely(up->pending)) {
371 if (unlikely(up->pending != AF_INET)) {
372 release_sock(sk);
373 return -EINVAL;
374 }
375 goto do_append_data;
376 }
377 release_sock(sk);
378 }
379 ulen += sizeof(struct udphdr);
380
381 /*
382 * Get and verify the address.
383 */
384 if (msg->msg_name) {
385 struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
386 if (msg->msg_namelen < sizeof(*usin))
387 return -EINVAL;
388 if (usin->sin_family != AF_INET) {
389 if (usin->sin_family != AF_UNSPEC)
390 return -EAFNOSUPPORT;
391 }
392
393 daddr = usin->sin_addr.s_addr;
394 dport = usin->sin_port;
395 if (dport == 0)
396 return -EINVAL;
397 } else {
398 if (sk->sk_state != TCP_ESTABLISHED)
399 return -EDESTADDRREQ;
400 daddr = inet->daddr;
401 dport = inet->dport;
402 /* Open fast path for connected socket.
403 Route will not be used, if at least one option is set.
404 */
405 connected = 1;
406 }
407 ipc.addr = inet->saddr;
408
409 ipc.oif = sk->sk_bound_dev_if;
410 if (msg->msg_controllen) {
411 err = ip_cmsg_send(msg, &ipc);
412 if (err)
413 return err;
414 if (ipc.opt)
415 free = 1;
416 connected = 0;
417 }
418 if (!ipc.opt)
419 ipc.opt = inet->opt;
420
421 saddr = ipc.addr;
422 ipc.addr = faddr = daddr;
423
424 if (ipc.opt && ipc.opt->srr) {
425 if (!daddr)
426 return -EINVAL;
427 faddr = ipc.opt->faddr;
428 connected = 0;
429 }
430 tos = RT_TOS(inet->tos);
431 if (sock_flag(sk, SOCK_LOCALROUTE) ||
432 (msg->msg_flags & MSG_DONTROUTE) ||
433 (ipc.opt && ipc.opt->is_strictroute)) {
434 tos |= RTO_ONLINK;
435 connected = 0;
436 }
437
438 if (ipv4_is_multicast(daddr)) {
439 if (!ipc.oif)
440 ipc.oif = inet->mc_index;
441 if (!saddr)
442 saddr = inet->mc_addr;
443 connected = 0;
444 }
445
446 if (connected)
447 rt = (struct rtable*)sk_dst_check(sk, 0);
448
449 if (rt == NULL) {
450 struct flowi fl = { .oif = ipc.oif,
451 .nl_u = { .ip4_u =
452 { .daddr = faddr,
453 .saddr = saddr,
454 .tos = tos } },
455 .proto = sk->sk_protocol,
456 .uli_u = { .ports =
457 { .sport = inet->sport,
458 .dport = dport } } };
459 security_sk_classify_flow(sk, &fl);
460 err = ip_route_output_flow(&init_net, &rt, &fl, sk, 1);
461 if (err) {
462 if (err == -ENETUNREACH)
463 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
464 goto out;
465 }
466
467 err = -EACCES;
468 if ((rt->rt_flags & RTCF_BROADCAST) &&
469 !sock_flag(sk, SOCK_BROADCAST))
470 goto out;
471 if (connected)
472 sk_dst_set(sk, dst_clone(&rt->u.dst));
473 }
474
475 if (msg->msg_flags&MSG_CONFIRM)
476 goto do_confirm;
477back_from_confirm:
478
479 saddr = rt->rt_src;
480 if (!ipc.addr)
481 daddr = ipc.addr = rt->rt_dst;
482
483 lock_sock(sk);
484 if (unlikely(up->pending)) {
485 /* The socket is already corked while preparing it. */
486 /* ... which is an evident application bug. --ANK */
487 release_sock(sk);
488
489 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
490 err = -EINVAL;
491 goto out;
492 }
493 /*
494 * Now cork the socket to pend data.
495 */
496 inet->cork.fl.fl4_dst = daddr;
497 inet->cork.fl.fl_ip_dport = dport;
498 inet->cork.fl.fl4_src = saddr;
499 inet->cork.fl.fl_ip_sport = inet->sport;
500 up->pending = AF_INET;
501
502do_append_data:
503 up->len += ulen;
504 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
505 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
506 sizeof(struct udphdr), &ipc, rt,
507 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
508 if (err)
509 udp_flush_pending_frames(sk);
510 else if (!corkreq)
511 err = udp_push_pending_frames(sk);
512 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
513 up->pending = 0;
514 release_sock(sk);
515
516out:
517 ip_rt_put(rt);
518 if (free)
519 kfree(ipc.opt);
520 if (!err)
521 return len;
522 /*
523 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
524 * ENOBUFS might not be good (it's not tunable per se), but otherwise
525 * we don't have a good statistic (IpOutDiscards but it can be too many
526 * things). We could add another new stat but at least for now that
527 * seems like overkill.
528 */
529 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
530 UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite);
531 }
532 return err;
533
534do_confirm:
535 dst_confirm(&rt->u.dst);
536 if (!(msg->msg_flags&MSG_PROBE) || len)
537 goto back_from_confirm;
538 err = 0;
539 goto out;
540}
541
542int udp_sendpage(struct sock *sk, struct page *page, int offset,
543 size_t size, int flags)
544{
545 struct udp_sock *up = udp_sk(sk);
546 int ret;
547
548 if (!up->pending) {
549 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
550
551 /* Call udp_sendmsg to specify destination address which
552 * sendpage interface can't pass.
553 * This will succeed only when the socket is connected.
554 */
555 ret = udp_sendmsg(NULL, sk, &msg, 0);
556 if (ret < 0)
557 return ret;
558 }
559
560 lock_sock(sk);
561
562 if (unlikely(!up->pending)) {
563 release_sock(sk);
564
565 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
566 return -EINVAL;
567 }
568
569 ret = ip_append_page(sk, page, offset, size, flags);
570 if (ret == -EOPNOTSUPP) {
571 release_sock(sk);
572 return sock_no_sendpage(sk->sk_socket, page, offset,
573 size, flags);
574 }
575 if (ret < 0) {
576 udp_flush_pending_frames(sk);
577 goto out;
578 }
579
580 up->len += size;
581 if (!(up->corkflag || (flags&MSG_MORE)))
582 ret = udp_push_pending_frames(sk);
583 if (!ret)
584 ret = size;
585out:
586 release_sock(sk);
587 return ret;
588}
589
590/*
591 * This should be easy, if there is something there we
592 * return it, otherwise we block.
593 */
594
595int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
596 size_t len, int noblock, int flags, int *addr_len)
597{
598 struct inet_sock *inet = inet_sk(sk);
599 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
600 struct sk_buff *skb;
601 unsigned int ulen, copied;
602 int peeked;
603 int err;
604 int is_udplite = IS_UDPLITE(sk);
605
606 /*
607 * Check any passed addresses
608 */
609 if (addr_len)
610 *addr_len=sizeof(*sin);
611
612 if (flags & MSG_ERRQUEUE)
613 return ip_recv_error(sk, msg, len);
614
615try_again:
616 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
617 &peeked, &err);
618 if (!skb)
619 goto out;
620
621 ulen = skb->len - sizeof(struct udphdr);
622 copied = len;
623 if (copied > ulen)
624 copied = ulen;
625 else if (copied < ulen)
626 msg->msg_flags |= MSG_TRUNC;
627
628 /*
629 * If checksum is needed at all, try to do it while copying the
630 * data. If the data is truncated, or if we only want a partial
631 * coverage checksum (UDP-Lite), do it before the copy.
632 */
633
634 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
635 if (udp_lib_checksum_complete(skb))
636 goto csum_copy_err;
637 }
638
639 if (skb_csum_unnecessary(skb))
640 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
641 msg->msg_iov, copied );
642 else {
643 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
644
645 if (err == -EINVAL)
646 goto csum_copy_err;
647 }
648
649 if (err)
650 goto out_free;
651
652 if (!peeked)
653 UDP_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite);
654
655 sock_recv_timestamp(msg, sk, skb);
656
657 /* Copy the address. */
658 if (sin)
659 {
660 sin->sin_family = AF_INET;
661 sin->sin_port = udp_hdr(skb)->source;
662 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
663 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
664 }
665 if (inet->cmsg_flags)
666 ip_cmsg_recv(msg, skb);
667
668 err = copied;
669 if (flags & MSG_TRUNC)
670 err = ulen;
671
672out_free:
673 lock_sock(sk);
674 skb_free_datagram(sk, skb);
675 release_sock(sk);
676out:
677 return err;
678
679csum_copy_err:
680 lock_sock(sk);
681 if (!skb_kill_datagram(sk, skb, flags))
682 UDP_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
683 release_sock(sk);
684
685 if (noblock)
686 return -EAGAIN;
687 goto try_again;
688}
689
690
691/* returns:
692 * -1: error
693 * 0: success
694 * >0: "udp encap" protocol resubmission
695 *
696 * Note that in the success and error cases, the skb is assumed to
697 * have either been requeued or freed.
698 */
699int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
700{
701 struct udp_sock *up = udp_sk(sk);
702 int rc;
703 int is_udplite = IS_UDPLITE(sk);
704
705 /*
706 * Charge it to the socket, dropping if the queue is full.
707 */
708 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
709 goto drop;
710 nf_reset(skb);
711
712 if (up->encap_type) {
713 /*
714 * This is an encapsulation socket so pass the skb to
715 * the socket's udp_encap_rcv() hook. Otherwise, just
716 * fall through and pass this up the UDP socket.
717 * up->encap_rcv() returns the following value:
718 * =0 if skb was successfully passed to the encap
719 * handler or was discarded by it.
720 * >0 if skb should be passed on to UDP.
721 * <0 if skb should be resubmitted as proto -N
722 */
723
724 /* if we're overly short, let UDP handle it */
725 if (skb->len > sizeof(struct udphdr) &&
726 up->encap_rcv != NULL) {
727 int ret;
728
729 ret = (*up->encap_rcv)(sk, skb);
730 if (ret <= 0) {
731 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS,
732 is_udplite);
733 return -ret;
734 }
735 }
736
737 /* FALLTHROUGH -- it's a UDP Packet */
738 }
739
740 /*
741 * UDP-Lite specific tests, ignored on UDP sockets
742 */
743 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
744
745 /*
746 * MIB statistics other than incrementing the error count are
747 * disabled for the following two types of errors: these depend
748 * on the application settings, not on the functioning of the
749 * protocol stack as such.
750 *
751 * RFC 3828 here recommends (sec 3.3): "There should also be a
752 * way ... to ... at least let the receiving application block
753 * delivery of packets with coverage values less than a value
754 * provided by the application."
755 */
756 if (up->pcrlen == 0) { /* full coverage was set */
757 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
758 "%d while full coverage %d requested\n",
759 UDP_SKB_CB(skb)->cscov, skb->len);
760 goto drop;
761 }
762 /* The next case involves violating the min. coverage requested
763 * by the receiver. This is subtle: if receiver wants x and x is
764 * greater than the buffersize/MTU then receiver will complain
765 * that it wants x while sender emits packets of smaller size y.
766 * Therefore the above ...()->partial_cov statement is essential.
767 */
768 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
769 LIMIT_NETDEBUG(KERN_WARNING
770 "UDPLITE: coverage %d too small, need min %d\n",
771 UDP_SKB_CB(skb)->cscov, up->pcrlen);
772 goto drop;
773 }
774 }
775
776 if (sk->sk_filter) {
777 if (udp_lib_checksum_complete(skb))
778 goto drop;
779 }
780
781 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
782 /* Note that an ENOMEM error is charged twice */
783 if (rc == -ENOMEM)
784 UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
785 goto drop;
786 }
787
788 return 0;
789
790drop:
791 UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
792 kfree_skb(skb);
793 return -1;
794}
795
796/*
797 * Multicasts and broadcasts go to each listener.
798 *
799 * Note: called only from the BH handler context,
800 * so we don't need to lock the hashes.
801 */
802static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
803 struct udphdr *uh,
804 __be32 saddr, __be32 daddr,
805 struct hlist_head udptable[])
806{
807 struct sock *sk;
808 int dif;
809
810 read_lock(&udp_hash_lock);
811 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
812 dif = skb->dev->ifindex;
813 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
814 if (sk) {
815 struct sock *sknext = NULL;
816
817 do {
818 struct sk_buff *skb1 = skb;
819
820 sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
821 uh->source, saddr, dif);
822 if (sknext)
823 skb1 = skb_clone(skb, GFP_ATOMIC);
824
825 if (skb1) {
826 int ret = 0;
827
828 bh_lock_sock_nested(sk);
829 if (!sock_owned_by_user(sk))
830 ret = udp_queue_rcv_skb(sk, skb1);
831 else
832 sk_add_backlog(sk, skb1);
833 bh_unlock_sock(sk);
834
835 if (ret > 0)
836 /* we should probably re-process instead
837 * of dropping packets here. */
838 kfree_skb(skb1);
839 }
840 sk = sknext;
841 } while (sknext);
842 } else
843 kfree_skb(skb);
844 read_unlock(&udp_hash_lock);
845 return 0;
846}
847
848/* Initialize UDP checksum. If exited with zero value (success),
849 * CHECKSUM_UNNECESSARY means, that no more checks are required.
850 * Otherwise, csum completion requires chacksumming packet body,
851 * including udp header and folding it to skb->csum.
852 */
853static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
854 int proto)
855{
856 const struct iphdr *iph;
857 int err;
858
859 UDP_SKB_CB(skb)->partial_cov = 0;
860 UDP_SKB_CB(skb)->cscov = skb->len;
861
862 if (IS_PROTO_UDPLITE(proto)) {
863 err = udplite_checksum_init(skb, uh);
864 if (err)
865 return err;
866 }
867
868 iph = ip_hdr(skb);
869 if (uh->check == 0) {
870 skb->ip_summed = CHECKSUM_UNNECESSARY;
871 } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
872 if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
873 proto, skb->csum))
874 skb->ip_summed = CHECKSUM_UNNECESSARY;
875 }
876 if (!skb_csum_unnecessary(skb))
877 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
878 skb->len, proto, 0);
879 /* Probably, we should checksum udp header (it should be in cache
880 * in any case) and data in tiny packets (< rx copybreak).
881 */
882
883 return 0;
884}
885
886/*
887 * All we need to do is get the socket, and then do a checksum.
888 */
889
890int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
891 int proto)
892{
893 struct sock *sk;
894 struct udphdr *uh = udp_hdr(skb);
895 unsigned short ulen;
896 struct rtable *rt = (struct rtable*)skb->dst;
897 __be32 saddr = ip_hdr(skb)->saddr;
898 __be32 daddr = ip_hdr(skb)->daddr;
899
900 /*
901 * Validate the packet.
902 */
903 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
904 goto drop; /* No space for header. */
905
906 ulen = ntohs(uh->len);
907 if (ulen > skb->len)
908 goto short_packet;
909
910 if (IS_PROTO_UDPLITE(proto)) {
911 /* UDP validates ulen. */
912 if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
913 goto short_packet;
914 uh = udp_hdr(skb);
915 }
916
917 if (udp4_csum_init(skb, uh, proto))
918 goto csum_error;
919
920 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
921 return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
922
923 sk = __udp4_lib_lookup(skb->dev->nd_net, saddr, uh->source, daddr,
924 uh->dest, inet_iif(skb), udptable);
925
926 if (sk != NULL) {
927 int ret = 0;
928 bh_lock_sock_nested(sk);
929 if (!sock_owned_by_user(sk))
930 ret = udp_queue_rcv_skb(sk, skb);
931 else
932 sk_add_backlog(sk, skb);
933 bh_unlock_sock(sk);
934 sock_put(sk);
935
936 /* a return value > 0 means to resubmit the input, but
937 * it wants the return to be -protocol, or 0
938 */
939 if (ret > 0)
940 return -ret;
941 return 0;
942 }
943
944 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
945 goto drop;
946 nf_reset(skb);
947
948 /* No socket. Drop packet silently, if checksum is wrong */
949 if (udp_lib_checksum_complete(skb))
950 goto csum_error;
951
952 UDP_INC_STATS_BH(UDP_MIB_NOPORTS, IS_PROTO_UDPLITE(proto));
953 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
954
955 /*
956 * Hmm. We got an UDP packet to a port to which we
957 * don't wanna listen. Ignore it.
958 */
959 kfree_skb(skb);
960 return 0;
961
962short_packet:
963 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
964 IS_PROTO_UDPLITE(proto) ? "-Lite" : "",
965 NIPQUAD(saddr),
966 ntohs(uh->source),
967 ulen,
968 skb->len,
969 NIPQUAD(daddr),
970 ntohs(uh->dest));
971 goto drop;
972
973csum_error:
974 /*
975 * RFC1122: OK. Discards the bad packet silently (as far as
976 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
977 */
978 LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
979 IS_PROTO_UDPLITE(proto) ? "-Lite" : "",
980 NIPQUAD(saddr),
981 ntohs(uh->source),
982 NIPQUAD(daddr),
983 ntohs(uh->dest),
984 ulen);
985drop:
986 UDP_INC_STATS_BH(UDP_MIB_INERRORS, IS_PROTO_UDPLITE(proto));
987 kfree_skb(skb);
988 return 0;
989}
990
991int udp_rcv(struct sk_buff *skb)
992{
993 return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
994}
995
996int udp_destroy_sock(struct sock *sk)
997{
998 lock_sock(sk);
999 udp_flush_pending_frames(sk);
1000 release_sock(sk);
1001 return 0;
1002}
1003
1004int udp_setsockopt(struct sock *sk, int level, int optname,
1005 char __user *optval, int optlen)
1006{
1007 if (IS_SOL_UDPFAMILY(level))
1008 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1009 udp_push_pending_frames);
1010 return ip_setsockopt(sk, level, optname, optval, optlen);
1011}
1012
1013#ifdef CONFIG_COMPAT
1014int compat_udp_setsockopt(struct sock *sk, int level, int optname,
1015 char __user *optval, int optlen)
1016{
1017 if (IS_SOL_UDPFAMILY(level))
1018 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1019 udp_push_pending_frames);
1020 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
1021}
1022#endif
1023
1024int udp_getsockopt(struct sock *sk, int level, int optname,
1025 char __user *optval, int __user *optlen)
1026{
1027 if (IS_SOL_UDPFAMILY(level))
1028 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1029 return ip_getsockopt(sk, level, optname, optval, optlen);
1030}
1031
1032#ifdef CONFIG_COMPAT
1033int compat_udp_getsockopt(struct sock *sk, int level, int optname,
1034 char __user *optval, int __user *optlen)
1035{
1036 if (IS_SOL_UDPFAMILY(level))
1037 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1038 return compat_ip_getsockopt(sk, level, optname, optval, optlen);
1039}
1040#endif
1041
1042/* ------------------------------------------------------------------------ */
1043DEFINE_PROTO_INUSE(udp)
1044
1045struct proto udp_prot = {
1046 .name = "UDP",
1047 .owner = THIS_MODULE,
1048 .close = udp_lib_close,
1049 .connect = ip4_datagram_connect,
1050 .disconnect = udp_disconnect,
1051 .ioctl = udp_ioctl,
1052 .destroy = udp_destroy_sock,
1053 .setsockopt = udp_setsockopt,
1054 .getsockopt = udp_getsockopt,
1055 .sendmsg = udp_sendmsg,
1056 .recvmsg = udp_recvmsg,
1057 .sendpage = udp_sendpage,
1058 .backlog_rcv = udp_queue_rcv_skb,
1059 .hash = udp_lib_hash,
1060 .unhash = udp_lib_unhash,
1061 .get_port = udp_v4_get_port,
1062 .memory_allocated = &udp_memory_allocated,
1063 .sysctl_mem = sysctl_udp_mem,
1064 .sysctl_wmem = &sysctl_udp_wmem_min,
1065 .sysctl_rmem = &sysctl_udp_rmem_min,
1066 .obj_size = sizeof(struct udp_sock),
1067#ifdef CONFIG_COMPAT
1068 .compat_setsockopt = compat_udp_setsockopt,
1069 .compat_getsockopt = compat_udp_getsockopt,
1070#endif
1071 REF_PROTO_INUSE(udp)
1072};
1073
1074/* ------------------------------------------------------------------------ */
1075static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
1076{
1077 struct inet_sock *inet = inet_sk(sp);
1078 __be32 dest = inet->daddr;
1079 __be32 src = inet->rcv_saddr;
1080 __u16 destp = ntohs(inet->dport);
1081 __u16 srcp = ntohs(inet->sport);
1082
1083 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1084 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
1085 bucket, src, srcp, dest, destp, sp->sk_state,
1086 atomic_read(&sp->sk_wmem_alloc),
1087 atomic_read(&sp->sk_rmem_alloc),
1088 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1089 atomic_read(&sp->sk_refcnt), sp);
1090}
1091
1092int udp4_seq_show(struct seq_file *seq, void *v)
1093{
1094 if (v == SEQ_START_TOKEN)
1095 seq_printf(seq, "%-127s\n",
1096 " sl local_address rem_address st tx_queue "
1097 "rx_queue tr tm->when retrnsmt uid timeout "
1098 "inode");
1099 else {
1100 char tmpbuf[129];
1101 struct udp_iter_state *state = seq->private;
1102
1103 udp4_format_sock(v, tmpbuf, state->bucket);
1104 seq_printf(seq, "%-127s\n", tmpbuf);
1105 }
1106 return 0;
1107}
1108
1109/* ------------------------------------------------------------------------ */
1110#ifdef CONFIG_PROC_FS
1111static struct file_operations udp4_seq_fops;
1112static struct udp_seq_afinfo udp4_seq_afinfo = {
1113 .owner = THIS_MODULE,
1114 .name = "udp",
1115 .family = AF_INET,
1116 .hashtable = udp_hash,
1117 .seq_show = udp4_seq_show,
1118 .seq_fops = &udp4_seq_fops,
1119};
1120
1121int __init udp4_proc_init(void)
1122{
1123 return udp_proc_register(&udp4_seq_afinfo);
1124}
1125
1126void udp4_proc_exit(void)
1127{
1128 udp_proc_unregister(&udp4_seq_afinfo);
1129}
1130#endif /* CONFIG_PROC_FS */
1131
1132EXPORT_SYMBOL(udp_prot);
1133EXPORT_SYMBOL(udp_sendmsg);
1134
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite_ipv4.c
index 001b881ca36f..001b881ca36f 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite_ipv4.c
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 24f3aa0f2a35..107051f7c227 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_IPV6) += ipv6.o
6 6
7ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ 7ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
8 addrlabel.o \ 8 addrlabel.o \
9 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ 9 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp_ipv6.o \
10 raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ 10 raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
11 exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o 11 exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
12 12
@@ -16,6 +16,8 @@ ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
16ipv6-$(CONFIG_NETFILTER) += netfilter.o 16ipv6-$(CONFIG_NETFILTER) += netfilter.o
17ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o 17ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o
18ipv6-$(CONFIG_PROC_FS) += proc.o 18ipv6-$(CONFIG_PROC_FS) += proc.o
19ipv6-$(CONFIG_SYN_COOKIES) += syncookies.o
20ipv6-$(CONFIG_IP_UDPLITE) += udplite_ipv6.o
19 21
20ipv6-objs += $(ipv6-y) 22ipv6-objs += $(ipv6-y)
21 23
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 101e0e70ba27..c878fb681efb 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -493,7 +493,7 @@ static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
493 dev_forward_change((struct inet6_dev *)table->extra1); 493 dev_forward_change((struct inet6_dev *)table->extra1);
494 494
495 if (*p) 495 if (*p)
496 rt6_purge_dflt_routers(); 496 rt6_purge_dflt_routers(net);
497} 497}
498#endif 498#endif
499 499
@@ -561,7 +561,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
561 write_lock(&addrconf_hash_lock); 561 write_lock(&addrconf_hash_lock);
562 562
563 /* Ignore adding duplicate addresses on an interface */ 563 /* Ignore adding duplicate addresses on an interface */
564 if (ipv6_chk_same_addr(&init_net, addr, idev->dev)) { 564 if (ipv6_chk_same_addr(idev->dev->nd_net, addr, idev->dev)) {
565 ADBG(("ipv6_add_addr: already assigned\n")); 565 ADBG(("ipv6_add_addr: already assigned\n"));
566 err = -EEXIST; 566 err = -EEXIST;
567 goto out; 567 goto out;
@@ -751,9 +751,9 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
751 if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) { 751 if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
752 struct in6_addr prefix; 752 struct in6_addr prefix;
753 struct rt6_info *rt; 753 struct rt6_info *rt;
754 754 struct net *net = ifp->idev->dev->nd_net;
755 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len); 755 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
756 rt = rt6_lookup(&prefix, NULL, ifp->idev->dev->ifindex, 1); 756 rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1);
757 757
758 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 758 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) {
759 if (onlink == 0) { 759 if (onlink == 0) {
@@ -905,6 +905,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
905{ 905{
906 struct ipv6_saddr_score hiscore; 906 struct ipv6_saddr_score hiscore;
907 struct inet6_ifaddr *ifa_result = NULL; 907 struct inet6_ifaddr *ifa_result = NULL;
908 struct net *net = daddr_dev->nd_net;
908 int daddr_type = __ipv6_addr_type(daddr); 909 int daddr_type = __ipv6_addr_type(daddr);
909 int daddr_scope = __ipv6_addr_src_scope(daddr_type); 910 int daddr_scope = __ipv6_addr_src_scope(daddr_type);
910 int daddr_ifindex = daddr_dev ? daddr_dev->ifindex : 0; 911 int daddr_ifindex = daddr_dev ? daddr_dev->ifindex : 0;
@@ -916,7 +917,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
916 read_lock(&dev_base_lock); 917 read_lock(&dev_base_lock);
917 rcu_read_lock(); 918 rcu_read_lock();
918 919
919 for_each_netdev(&init_net, dev) { 920 for_each_netdev(net, dev) {
920 struct inet6_dev *idev; 921 struct inet6_dev *idev;
921 struct inet6_ifaddr *ifa; 922 struct inet6_ifaddr *ifa;
922 923
@@ -1125,6 +1126,11 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
1125 if (hiscore.rule < 7) 1126 if (hiscore.rule < 7)
1126 hiscore.rule++; 1127 hiscore.rule++;
1127#endif 1128#endif
1129
1130 /* Skip rule 8 for orchid -> non-orchid address pairs. */
1131 if (ipv6_addr_orchid(&ifa->addr) && !ipv6_addr_orchid(daddr))
1132 continue;
1133
1128 /* Rule 8: Use longest matching prefix */ 1134 /* Rule 8: Use longest matching prefix */
1129 if (hiscore.rule < 8) { 1135 if (hiscore.rule < 8) {
1130 hiscore.matchlen = ipv6_addr_diff(&ifa_result->addr, daddr); 1136 hiscore.matchlen = ipv6_addr_diff(&ifa_result->addr, daddr);
@@ -1162,14 +1168,7 @@ record_it:
1162 return 0; 1168 return 0;
1163} 1169}
1164 1170
1165 1171EXPORT_SYMBOL(ipv6_dev_get_saddr);
1166int ipv6_get_saddr(struct dst_entry *dst,
1167 struct in6_addr *daddr, struct in6_addr *saddr)
1168{
1169 return ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, daddr, saddr);
1170}
1171
1172EXPORT_SYMBOL(ipv6_get_saddr);
1173 1172
1174int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, 1173int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1175 unsigned char banned_flags) 1174 unsigned char banned_flags)
@@ -1557,7 +1556,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
1557 .fc_expires = expires, 1556 .fc_expires = expires,
1558 .fc_dst_len = plen, 1557 .fc_dst_len = plen,
1559 .fc_flags = RTF_UP | flags, 1558 .fc_flags = RTF_UP | flags,
1560 .fc_nlinfo.nl_net = &init_net, 1559 .fc_nlinfo.nl_net = dev->nd_net,
1561 }; 1560 };
1562 1561
1563 ipv6_addr_copy(&cfg.fc_dst, pfx); 1562 ipv6_addr_copy(&cfg.fc_dst, pfx);
@@ -1584,7 +1583,7 @@ static void addrconf_add_mroute(struct net_device *dev)
1584 .fc_ifindex = dev->ifindex, 1583 .fc_ifindex = dev->ifindex,
1585 .fc_dst_len = 8, 1584 .fc_dst_len = 8,
1586 .fc_flags = RTF_UP, 1585 .fc_flags = RTF_UP,
1587 .fc_nlinfo.nl_net = &init_net, 1586 .fc_nlinfo.nl_net = dev->nd_net,
1588 }; 1587 };
1589 1588
1590 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); 1589 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
@@ -1601,7 +1600,7 @@ static void sit_route_add(struct net_device *dev)
1601 .fc_ifindex = dev->ifindex, 1600 .fc_ifindex = dev->ifindex,
1602 .fc_dst_len = 96, 1601 .fc_dst_len = 96,
1603 .fc_flags = RTF_UP | RTF_NONEXTHOP, 1602 .fc_flags = RTF_UP | RTF_NONEXTHOP,
1604 .fc_nlinfo.nl_net = &init_net, 1603 .fc_nlinfo.nl_net = dev->nd_net,
1605 }; 1604 };
1606 1605
1607 /* prefix length - 96 bits "::d.d.d.d" */ 1606 /* prefix length - 96 bits "::d.d.d.d" */
@@ -1702,7 +1701,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1702 1701
1703 if (pinfo->onlink) { 1702 if (pinfo->onlink) {
1704 struct rt6_info *rt; 1703 struct rt6_info *rt;
1705 rt = rt6_lookup(&pinfo->prefix, NULL, dev->ifindex, 1); 1704 rt = rt6_lookup(dev->nd_net, &pinfo->prefix, NULL,
1705 dev->ifindex, 1);
1706 1706
1707 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) { 1707 if (rt && ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0)) {
1708 if (rt->rt6i_flags&RTF_EXPIRES) { 1708 if (rt->rt6i_flags&RTF_EXPIRES) {
@@ -1745,7 +1745,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1745 1745
1746ok: 1746ok:
1747 1747
1748 ifp = ipv6_get_ifaddr(&init_net, &addr, dev, 1); 1748 ifp = ipv6_get_ifaddr(dev->nd_net, &addr, dev, 1);
1749 1749
1750 if (ifp == NULL && valid_lft) { 1750 if (ifp == NULL && valid_lft) {
1751 int max_addresses = in6_dev->cnf.max_addresses; 1751 int max_addresses = in6_dev->cnf.max_addresses;
@@ -1868,7 +1868,7 @@ ok:
1868 * Special case for SIT interfaces where we create a new "virtual" 1868 * Special case for SIT interfaces where we create a new "virtual"
1869 * device. 1869 * device.
1870 */ 1870 */
1871int addrconf_set_dstaddr(void __user *arg) 1871int addrconf_set_dstaddr(struct net *net, void __user *arg)
1872{ 1872{
1873 struct in6_ifreq ireq; 1873 struct in6_ifreq ireq;
1874 struct net_device *dev; 1874 struct net_device *dev;
@@ -1880,7 +1880,7 @@ int addrconf_set_dstaddr(void __user *arg)
1880 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) 1880 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
1881 goto err_exit; 1881 goto err_exit;
1882 1882
1883 dev = __dev_get_by_index(&init_net, ireq.ifr6_ifindex); 1883 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
1884 1884
1885 err = -ENODEV; 1885 err = -ENODEV;
1886 if (dev == NULL) 1886 if (dev == NULL)
@@ -1911,7 +1911,8 @@ int addrconf_set_dstaddr(void __user *arg)
1911 1911
1912 if (err == 0) { 1912 if (err == 0) {
1913 err = -ENOBUFS; 1913 err = -ENOBUFS;
1914 if ((dev = __dev_get_by_name(&init_net, p.name)) == NULL) 1914 dev = __dev_get_by_name(net, p.name);
1915 if (!dev)
1915 goto err_exit; 1916 goto err_exit;
1916 err = dev_open(dev); 1917 err = dev_open(dev);
1917 } 1918 }
@@ -1926,8 +1927,9 @@ err_exit:
1926/* 1927/*
1927 * Manual configuration of address on an interface 1928 * Manual configuration of address on an interface
1928 */ 1929 */
1929static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, 1930static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
1930 __u8 ifa_flags, __u32 prefered_lft, __u32 valid_lft) 1931 int plen, __u8 ifa_flags, __u32 prefered_lft,
1932 __u32 valid_lft)
1931{ 1933{
1932 struct inet6_ifaddr *ifp; 1934 struct inet6_ifaddr *ifp;
1933 struct inet6_dev *idev; 1935 struct inet6_dev *idev;
@@ -1941,7 +1943,8 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
1941 if (!valid_lft || prefered_lft > valid_lft) 1943 if (!valid_lft || prefered_lft > valid_lft)
1942 return -EINVAL; 1944 return -EINVAL;
1943 1945
1944 if ((dev = __dev_get_by_index(&init_net, ifindex)) == NULL) 1946 dev = __dev_get_by_index(net, ifindex);
1947 if (!dev)
1945 return -ENODEV; 1948 return -ENODEV;
1946 1949
1947 if ((idev = addrconf_add_dev(dev)) == NULL) 1950 if ((idev = addrconf_add_dev(dev)) == NULL)
@@ -1986,13 +1989,15 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen,
1986 return PTR_ERR(ifp); 1989 return PTR_ERR(ifp);
1987} 1990}
1988 1991
1989static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen) 1992static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
1993 int plen)
1990{ 1994{
1991 struct inet6_ifaddr *ifp; 1995 struct inet6_ifaddr *ifp;
1992 struct inet6_dev *idev; 1996 struct inet6_dev *idev;
1993 struct net_device *dev; 1997 struct net_device *dev;
1994 1998
1995 if ((dev = __dev_get_by_index(&init_net, ifindex)) == NULL) 1999 dev = __dev_get_by_index(net, ifindex);
2000 if (!dev)
1996 return -ENODEV; 2001 return -ENODEV;
1997 2002
1998 if ((idev = __in6_dev_get(dev)) == NULL) 2003 if ((idev = __in6_dev_get(dev)) == NULL)
@@ -2020,7 +2025,7 @@ static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen)
2020} 2025}
2021 2026
2022 2027
2023int addrconf_add_ifaddr(void __user *arg) 2028int addrconf_add_ifaddr(struct net *net, void __user *arg)
2024{ 2029{
2025 struct in6_ifreq ireq; 2030 struct in6_ifreq ireq;
2026 int err; 2031 int err;
@@ -2032,13 +2037,14 @@ int addrconf_add_ifaddr(void __user *arg)
2032 return -EFAULT; 2037 return -EFAULT;
2033 2038
2034 rtnl_lock(); 2039 rtnl_lock();
2035 err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen, 2040 err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr,
2036 IFA_F_PERMANENT, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); 2041 ireq.ifr6_prefixlen, IFA_F_PERMANENT,
2042 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2037 rtnl_unlock(); 2043 rtnl_unlock();
2038 return err; 2044 return err;
2039} 2045}
2040 2046
2041int addrconf_del_ifaddr(void __user *arg) 2047int addrconf_del_ifaddr(struct net *net, void __user *arg)
2042{ 2048{
2043 struct in6_ifreq ireq; 2049 struct in6_ifreq ireq;
2044 int err; 2050 int err;
@@ -2050,7 +2056,8 @@ int addrconf_del_ifaddr(void __user *arg)
2050 return -EFAULT; 2056 return -EFAULT;
2051 2057
2052 rtnl_lock(); 2058 rtnl_lock();
2053 err = inet6_addr_del(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen); 2059 err = inet6_addr_del(net, ireq.ifr6_ifindex, &ireq.ifr6_addr,
2060 ireq.ifr6_prefixlen);
2054 rtnl_unlock(); 2061 rtnl_unlock();
2055 return err; 2062 return err;
2056} 2063}
@@ -2061,6 +2068,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2061 struct inet6_ifaddr * ifp; 2068 struct inet6_ifaddr * ifp;
2062 struct in6_addr addr; 2069 struct in6_addr addr;
2063 struct net_device *dev; 2070 struct net_device *dev;
2071 struct net *net = idev->dev->nd_net;
2064 int scope; 2072 int scope;
2065 2073
2066 ASSERT_RTNL(); 2074 ASSERT_RTNL();
@@ -2087,7 +2095,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2087 return; 2095 return;
2088 } 2096 }
2089 2097
2090 for_each_netdev(&init_net, dev) { 2098 for_each_netdev(net, dev) {
2091 struct in_device * in_dev = __in_dev_get_rtnl(dev); 2099 struct in_device * in_dev = __in_dev_get_rtnl(dev);
2092 if (in_dev && (dev->flags & IFF_UP)) { 2100 if (in_dev && (dev->flags & IFF_UP)) {
2093 struct in_ifaddr * ifa; 2101 struct in_ifaddr * ifa;
@@ -2250,15 +2258,16 @@ ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
2250static void ip6_tnl_add_linklocal(struct inet6_dev *idev) 2258static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
2251{ 2259{
2252 struct net_device *link_dev; 2260 struct net_device *link_dev;
2261 struct net *net = idev->dev->nd_net;
2253 2262
2254 /* first try to inherit the link-local address from the link device */ 2263 /* first try to inherit the link-local address from the link device */
2255 if (idev->dev->iflink && 2264 if (idev->dev->iflink &&
2256 (link_dev = __dev_get_by_index(&init_net, idev->dev->iflink))) { 2265 (link_dev = __dev_get_by_index(net, idev->dev->iflink))) {
2257 if (!ipv6_inherit_linklocal(idev, link_dev)) 2266 if (!ipv6_inherit_linklocal(idev, link_dev))
2258 return; 2267 return;
2259 } 2268 }
2260 /* then try to inherit it from any device */ 2269 /* then try to inherit it from any device */
2261 for_each_netdev(&init_net, link_dev) { 2270 for_each_netdev(net, link_dev) {
2262 if (!ipv6_inherit_linklocal(idev, link_dev)) 2271 if (!ipv6_inherit_linklocal(idev, link_dev))
2263 return; 2272 return;
2264 } 2273 }
@@ -2291,9 +2300,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2291 int run_pending = 0; 2300 int run_pending = 0;
2292 int err; 2301 int err;
2293 2302
2294 if (dev->nd_net != &init_net)
2295 return NOTIFY_DONE;
2296
2297 switch(event) { 2303 switch(event) {
2298 case NETDEV_REGISTER: 2304 case NETDEV_REGISTER:
2299 if (!idev && dev->mtu >= IPV6_MIN_MTU) { 2305 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
@@ -2433,6 +2439,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2433{ 2439{
2434 struct inet6_dev *idev; 2440 struct inet6_dev *idev;
2435 struct inet6_ifaddr *ifa, **bifa; 2441 struct inet6_ifaddr *ifa, **bifa;
2442 struct net *net = dev->nd_net;
2436 int i; 2443 int i;
2437 2444
2438 ASSERT_RTNL(); 2445 ASSERT_RTNL();
@@ -2440,7 +2447,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2440 if (dev == init_net.loopback_dev && how == 1) 2447 if (dev == init_net.loopback_dev && how == 1)
2441 how = 0; 2448 how = 0;
2442 2449
2443 rt6_ifdown(dev); 2450 rt6_ifdown(net, dev);
2444 neigh_ifdown(&nd_tbl, dev); 2451 neigh_ifdown(&nd_tbl, dev);
2445 2452
2446 idev = __in6_dev_get(dev); 2453 idev = __in6_dev_get(dev);
@@ -3050,9 +3057,6 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3050 struct in6_addr *pfx; 3057 struct in6_addr *pfx;
3051 int err; 3058 int err;
3052 3059
3053 if (net != &init_net)
3054 return -EINVAL;
3055
3056 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); 3060 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
3057 if (err < 0) 3061 if (err < 0)
3058 return err; 3062 return err;
@@ -3062,7 +3066,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3062 if (pfx == NULL) 3066 if (pfx == NULL)
3063 return -EINVAL; 3067 return -EINVAL;
3064 3068
3065 return inet6_addr_del(ifm->ifa_index, pfx, ifm->ifa_prefixlen); 3069 return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen);
3066} 3070}
3067 3071
3068static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags, 3072static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
@@ -3115,9 +3119,6 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3115 u8 ifa_flags; 3119 u8 ifa_flags;
3116 int err; 3120 int err;
3117 3121
3118 if (net != &init_net)
3119 return -EINVAL;
3120
3121 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); 3122 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
3122 if (err < 0) 3123 if (err < 0)
3123 return err; 3124 return err;
@@ -3138,7 +3139,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3138 valid_lft = INFINITY_LIFE_TIME; 3139 valid_lft = INFINITY_LIFE_TIME;
3139 } 3140 }
3140 3141
3141 dev = __dev_get_by_index(&init_net, ifm->ifa_index); 3142 dev = __dev_get_by_index(net, ifm->ifa_index);
3142 if (dev == NULL) 3143 if (dev == NULL)
3143 return -ENODEV; 3144 return -ENODEV;
3144 3145
@@ -3151,8 +3152,9 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
3151 * It would be best to check for !NLM_F_CREATE here but 3152 * It would be best to check for !NLM_F_CREATE here but
3152 * userspace alreay relies on not having to provide this. 3153 * userspace alreay relies on not having to provide this.
3153 */ 3154 */
3154 return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen, 3155 return inet6_addr_add(net, ifm->ifa_index, pfx,
3155 ifa_flags, preferred_lft, valid_lft); 3156 ifm->ifa_prefixlen, ifa_flags,
3157 preferred_lft, valid_lft);
3156 } 3158 }
3157 3159
3158 if (nlh->nlmsg_flags & NLM_F_EXCL || 3160 if (nlh->nlmsg_flags & NLM_F_EXCL ||
@@ -3317,12 +3319,13 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3317 struct inet6_ifaddr *ifa; 3319 struct inet6_ifaddr *ifa;
3318 struct ifmcaddr6 *ifmca; 3320 struct ifmcaddr6 *ifmca;
3319 struct ifacaddr6 *ifaca; 3321 struct ifacaddr6 *ifaca;
3322 struct net *net = skb->sk->sk_net;
3320 3323
3321 s_idx = cb->args[0]; 3324 s_idx = cb->args[0];
3322 s_ip_idx = ip_idx = cb->args[1]; 3325 s_ip_idx = ip_idx = cb->args[1];
3323 3326
3324 idx = 0; 3327 idx = 0;
3325 for_each_netdev(&init_net, dev) { 3328 for_each_netdev(net, dev) {
3326 if (idx < s_idx) 3329 if (idx < s_idx)
3327 goto cont; 3330 goto cont;
3328 if (idx > s_idx) 3331 if (idx > s_idx)
@@ -3389,35 +3392,23 @@ cont:
3389 3392
3390static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 3393static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
3391{ 3394{
3392 struct net *net = skb->sk->sk_net;
3393 enum addr_type_t type = UNICAST_ADDR; 3395 enum addr_type_t type = UNICAST_ADDR;
3394 3396
3395 if (net != &init_net)
3396 return 0;
3397
3398 return inet6_dump_addr(skb, cb, type); 3397 return inet6_dump_addr(skb, cb, type);
3399} 3398}
3400 3399
3401static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb) 3400static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
3402{ 3401{
3403 struct net *net = skb->sk->sk_net;
3404 enum addr_type_t type = MULTICAST_ADDR; 3402 enum addr_type_t type = MULTICAST_ADDR;
3405 3403
3406 if (net != &init_net)
3407 return 0;
3408
3409 return inet6_dump_addr(skb, cb, type); 3404 return inet6_dump_addr(skb, cb, type);
3410} 3405}
3411 3406
3412 3407
3413static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb) 3408static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
3414{ 3409{
3415 struct net *net = skb->sk->sk_net;
3416 enum addr_type_t type = ANYCAST_ADDR; 3410 enum addr_type_t type = ANYCAST_ADDR;
3417 3411
3418 if (net != &init_net)
3419 return 0;
3420
3421 return inet6_dump_addr(skb, cb, type); 3412 return inet6_dump_addr(skb, cb, type);
3422} 3413}
3423 3414
@@ -3433,9 +3424,6 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3433 struct sk_buff *skb; 3424 struct sk_buff *skb;
3434 int err; 3425 int err;
3435 3426
3436 if (net != &init_net)
3437 return -EINVAL;
3438
3439 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy); 3427 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
3440 if (err < 0) 3428 if (err < 0)
3441 goto errout; 3429 goto errout;
@@ -3448,7 +3436,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3448 3436
3449 ifm = nlmsg_data(nlh); 3437 ifm = nlmsg_data(nlh);
3450 if (ifm->ifa_index) 3438 if (ifm->ifa_index)
3451 dev = __dev_get_by_index(&init_net, ifm->ifa_index); 3439 dev = __dev_get_by_index(net, ifm->ifa_index);
3452 3440
3453 if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) { 3441 if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) {
3454 err = -EADDRNOTAVAIL; 3442 err = -EADDRNOTAVAIL;
@@ -3468,7 +3456,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3468 kfree_skb(skb); 3456 kfree_skb(skb);
3469 goto errout_ifa; 3457 goto errout_ifa;
3470 } 3458 }
3471 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 3459 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
3472errout_ifa: 3460errout_ifa:
3473 in6_ifa_put(ifa); 3461 in6_ifa_put(ifa);
3474errout: 3462errout:
@@ -3478,6 +3466,7 @@ errout:
3478static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) 3466static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
3479{ 3467{
3480 struct sk_buff *skb; 3468 struct sk_buff *skb;
3469 struct net *net = ifa->idev->dev->nd_net;
3481 int err = -ENOBUFS; 3470 int err = -ENOBUFS;
3482 3471
3483 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); 3472 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
@@ -3491,10 +3480,10 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
3491 kfree_skb(skb); 3480 kfree_skb(skb);
3492 goto errout; 3481 goto errout;
3493 } 3482 }
3494 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 3483 err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
3495errout: 3484errout:
3496 if (err < 0) 3485 if (err < 0)
3497 rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_IFADDR, err); 3486 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
3498} 3487}
3499 3488
3500static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, 3489static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
@@ -3659,12 +3648,9 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3659 struct net_device *dev; 3648 struct net_device *dev;
3660 struct inet6_dev *idev; 3649 struct inet6_dev *idev;
3661 3650
3662 if (net != &init_net)
3663 return 0;
3664
3665 read_lock(&dev_base_lock); 3651 read_lock(&dev_base_lock);
3666 idx = 0; 3652 idx = 0;
3667 for_each_netdev(&init_net, dev) { 3653 for_each_netdev(net, dev) {
3668 if (idx < s_idx) 3654 if (idx < s_idx)
3669 goto cont; 3655 goto cont;
3670 if ((idev = in6_dev_get(dev)) == NULL) 3656 if ((idev = in6_dev_get(dev)) == NULL)
@@ -3686,6 +3672,7 @@ cont:
3686void inet6_ifinfo_notify(int event, struct inet6_dev *idev) 3672void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
3687{ 3673{
3688 struct sk_buff *skb; 3674 struct sk_buff *skb;
3675 struct net *net = idev->dev->nd_net;
3689 int err = -ENOBUFS; 3676 int err = -ENOBUFS;
3690 3677
3691 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC); 3678 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
@@ -3699,10 +3686,10 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
3699 kfree_skb(skb); 3686 kfree_skb(skb);
3700 goto errout; 3687 goto errout;
3701 } 3688 }
3702 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); 3689 err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
3703errout: 3690errout:
3704 if (err < 0) 3691 if (err < 0)
3705 rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_IFADDR, err); 3692 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
3706} 3693}
3707 3694
3708static inline size_t inet6_prefix_nlmsg_size(void) 3695static inline size_t inet6_prefix_nlmsg_size(void)
@@ -3755,6 +3742,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
3755 struct prefix_info *pinfo) 3742 struct prefix_info *pinfo)
3756{ 3743{
3757 struct sk_buff *skb; 3744 struct sk_buff *skb;
3745 struct net *net = idev->dev->nd_net;
3758 int err = -ENOBUFS; 3746 int err = -ENOBUFS;
3759 3747
3760 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC); 3748 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
@@ -3768,10 +3756,10 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
3768 kfree_skb(skb); 3756 kfree_skb(skb);
3769 goto errout; 3757 goto errout;
3770 } 3758 }
3771 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC); 3759 err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
3772errout: 3760errout:
3773 if (err < 0) 3761 if (err < 0)
3774 rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_PREFIX, err); 3762 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
3775} 3763}
3776 3764
3777static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) 3765static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@ -4261,6 +4249,41 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
4261 4249
4262EXPORT_SYMBOL(unregister_inet6addr_notifier); 4250EXPORT_SYMBOL(unregister_inet6addr_notifier);
4263 4251
4252
4253static int addrconf_net_init(struct net *net)
4254{
4255 return 0;
4256}
4257
4258static void addrconf_net_exit(struct net *net)
4259{
4260 struct net_device *dev;
4261
4262 /*
4263 * Remove loopback references from default routing entries
4264 */
4265/* in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); */
4266/* #ifdef CONFIG_IPV6_MULTIPLE_TABLES */
4267/* in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); */
4268/* in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev); */
4269/* #endif */
4270
4271 rtnl_lock();
4272 /* clean dev list */
4273 for_each_netdev(net, dev) {
4274 if (__in6_dev_get(dev) == NULL)
4275 continue;
4276 addrconf_ifdown(dev, 1);
4277 }
4278 addrconf_ifdown(net->loopback_dev, 2);
4279 rtnl_unlock();
4280}
4281
4282static struct pernet_operations addrconf_net_ops = {
4283 .init = addrconf_net_init,
4284 .exit = addrconf_net_exit,
4285};
4286
4264/* 4287/*
4265 * Init / cleanup code 4288 * Init / cleanup code
4266 */ 4289 */
@@ -4302,14 +4325,9 @@ int __init addrconf_init(void)
4302 if (err) 4325 if (err)
4303 goto errlo; 4326 goto errlo;
4304 4327
4305 ip6_null_entry.u.dst.dev = init_net.loopback_dev; 4328 err = register_pernet_device(&addrconf_net_ops);
4306 ip6_null_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev); 4329 if (err)
4307#ifdef CONFIG_IPV6_MULTIPLE_TABLES 4330 return err;
4308 ip6_prohibit_entry.u.dst.dev = init_net.loopback_dev;
4309 ip6_prohibit_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev);
4310 ip6_blk_hole_entry.u.dst.dev = init_net.loopback_dev;
4311 ip6_blk_hole_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev);
4312#endif
4313 4331
4314 register_netdevice_notifier(&ipv6_dev_notf); 4332 register_netdevice_notifier(&ipv6_dev_notf);
4315 4333
@@ -4339,31 +4357,19 @@ errlo:
4339 4357
4340void addrconf_cleanup(void) 4358void addrconf_cleanup(void)
4341{ 4359{
4342 struct net_device *dev;
4343 struct inet6_ifaddr *ifa; 4360 struct inet6_ifaddr *ifa;
4344 int i; 4361 int i;
4345 4362
4346 unregister_netdevice_notifier(&ipv6_dev_notf); 4363 unregister_netdevice_notifier(&ipv6_dev_notf);
4364 unregister_pernet_device(&addrconf_net_ops);
4347 4365
4348 unregister_pernet_subsys(&addrconf_ops); 4366 unregister_pernet_subsys(&addrconf_ops);
4349 4367
4350 rtnl_lock(); 4368 rtnl_lock();
4351 4369
4352 /* 4370 /*
4353 * clean dev list.
4354 */
4355
4356 for_each_netdev(&init_net, dev) {
4357 if (__in6_dev_get(dev) == NULL)
4358 continue;
4359 addrconf_ifdown(dev, 1);
4360 }
4361 addrconf_ifdown(init_net.loopback_dev, 2);
4362
4363 /*
4364 * Check hash table. 4371 * Check hash table.
4365 */ 4372 */
4366
4367 write_lock_bh(&addrconf_hash_lock); 4373 write_lock_bh(&addrconf_hash_lock);
4368 for (i=0; i < IN6_ADDR_HSIZE; i++) { 4374 for (i=0; i < IN6_ADDR_HSIZE; i++) {
4369 for (ifa=inet6_addr_lst[i]; ifa; ) { 4375 for (ifa=inet6_addr_lst[i]; ifa; ) {
@@ -4380,6 +4386,7 @@ void addrconf_cleanup(void)
4380 write_unlock_bh(&addrconf_hash_lock); 4386 write_unlock_bh(&addrconf_hash_lock);
4381 4387
4382 del_timer(&addr_chk_timer); 4388 del_timer(&addr_chk_timer);
4383
4384 rtnl_unlock(); 4389 rtnl_unlock();
4390
4391 unregister_pernet_subsys(&addrconf_net_ops);
4385} 4392}
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index a3c5a72218fd..3a8b3f52da35 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -58,6 +58,7 @@ static struct ip6addrlbl_table
58 * ::ffff:0:0/96 V4MAPPED 4 58 * ::ffff:0:0/96 V4MAPPED 4
59 * fc00::/7 N/A 5 ULA (RFC 4193) 59 * fc00::/7 N/A 5 ULA (RFC 4193)
60 * 2001::/32 N/A 6 Teredo (RFC 4380) 60 * 2001::/32 N/A 6 Teredo (RFC 4380)
61 * 2001:10::/28 N/A 7 ORCHID (RFC 4843)
61 * 62 *
62 * Note: 0xffffffff is used if we do not have any policies. 63 * Note: 0xffffffff is used if we do not have any policies.
63 */ 64 */
@@ -85,6 +86,10 @@ static const __initdata struct ip6addrlbl_init_table
85 .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}}, 86 .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}},
86 .prefixlen = 32, 87 .prefixlen = 32,
87 .label = 6, 88 .label = 6,
89 },{ /* 2001:10::/28 */
90 .prefix = &(struct in6_addr){{{ 0x20, 0x01, 0x00, 0x10 }}},
91 .prefixlen = 28,
92 .label = 7,
88 },{ /* ::ffff:0:0 */ 93 },{ /* ::ffff:0:0 */
89 .prefix = &(struct in6_addr){{{ [10] = 0xff, [11] = 0xff }}}, 94 .prefix = &(struct in6_addr){{{ [10] = 0xff, [11] = 0xff }}},
90 .prefixlen = 96, 95 .prefixlen = 96,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index f0aa97738746..afe9276d0420 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -92,9 +92,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol)
92 int try_loading_module = 0; 92 int try_loading_module = 0;
93 int err; 93 int err;
94 94
95 if (net != &init_net)
96 return -EAFNOSUPPORT;
97
98 if (sock->type != SOCK_RAW && 95 if (sock->type != SOCK_RAW &&
99 sock->type != SOCK_DGRAM && 96 sock->type != SOCK_DGRAM &&
100 !inet_ehash_secret) 97 !inet_ehash_secret)
@@ -248,6 +245,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
248 struct sock *sk = sock->sk; 245 struct sock *sk = sock->sk;
249 struct inet_sock *inet = inet_sk(sk); 246 struct inet_sock *inet = inet_sk(sk);
250 struct ipv6_pinfo *np = inet6_sk(sk); 247 struct ipv6_pinfo *np = inet6_sk(sk);
248 struct net *net = sk->sk_net;
251 __be32 v4addr = 0; 249 __be32 v4addr = 0;
252 unsigned short snum; 250 unsigned short snum;
253 int addr_type = 0; 251 int addr_type = 0;
@@ -278,7 +276,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
278 /* Check if the address belongs to the host. */ 276 /* Check if the address belongs to the host. */
279 if (addr_type == IPV6_ADDR_MAPPED) { 277 if (addr_type == IPV6_ADDR_MAPPED) {
280 v4addr = addr->sin6_addr.s6_addr32[3]; 278 v4addr = addr->sin6_addr.s6_addr32[3];
281 if (inet_addr_type(&init_net, v4addr) != RTN_LOCAL) { 279 if (inet_addr_type(net, v4addr) != RTN_LOCAL) {
282 err = -EADDRNOTAVAIL; 280 err = -EADDRNOTAVAIL;
283 goto out; 281 goto out;
284 } 282 }
@@ -300,7 +298,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
300 err = -EINVAL; 298 err = -EINVAL;
301 goto out; 299 goto out;
302 } 300 }
303 dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); 301 dev = dev_get_by_index(net, sk->sk_bound_dev_if);
304 if (!dev) { 302 if (!dev) {
305 err = -ENODEV; 303 err = -ENODEV;
306 goto out; 304 goto out;
@@ -312,7 +310,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
312 */ 310 */
313 v4addr = LOOPBACK4_IPV6; 311 v4addr = LOOPBACK4_IPV6;
314 if (!(addr_type & IPV6_ADDR_MULTICAST)) { 312 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
315 if (!ipv6_chk_addr(&init_net, &addr->sin6_addr, 313 if (!ipv6_chk_addr(net, &addr->sin6_addr,
316 dev, 0)) { 314 dev, 0)) {
317 if (dev) 315 if (dev)
318 dev_put(dev); 316 dev_put(dev);
@@ -440,6 +438,7 @@ EXPORT_SYMBOL(inet6_getname);
440int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 438int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
441{ 439{
442 struct sock *sk = sock->sk; 440 struct sock *sk = sock->sk;
441 struct net *net = sk->sk_net;
443 442
444 switch(cmd) 443 switch(cmd)
445 { 444 {
@@ -452,14 +451,14 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
452 case SIOCADDRT: 451 case SIOCADDRT:
453 case SIOCDELRT: 452 case SIOCDELRT:
454 453
455 return(ipv6_route_ioctl(cmd,(void __user *)arg)); 454 return(ipv6_route_ioctl(net, cmd, (void __user *)arg));
456 455
457 case SIOCSIFADDR: 456 case SIOCSIFADDR:
458 return addrconf_add_ifaddr((void __user *) arg); 457 return addrconf_add_ifaddr(net, (void __user *) arg);
459 case SIOCDIFADDR: 458 case SIOCDIFADDR:
460 return addrconf_del_ifaddr((void __user *) arg); 459 return addrconf_del_ifaddr(net, (void __user *) arg);
461 case SIOCSIFDSTADDR: 460 case SIOCSIFDSTADDR:
462 return addrconf_set_dstaddr((void __user *) arg); 461 return addrconf_set_dstaddr(net, (void __user *) arg);
463 default: 462 default:
464 if (!sk->sk_prot->ioctl) 463 if (!sk->sk_prot->ioctl)
465 return -ENOIOCTLCMD; 464 return -ENOIOCTLCMD;
@@ -678,6 +677,129 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
678 677
679EXPORT_SYMBOL_GPL(ipv6_opt_accepted); 678EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
680 679
680static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
681 int proto)
682{
683 struct inet6_protocol *ops = NULL;
684
685 for (;;) {
686 struct ipv6_opt_hdr *opth;
687 int len;
688
689 if (proto != NEXTHDR_HOP) {
690 ops = rcu_dereference(inet6_protos[proto]);
691
692 if (unlikely(!ops))
693 break;
694
695 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
696 break;
697 }
698
699 if (unlikely(!pskb_may_pull(skb, 8)))
700 break;
701
702 opth = (void *)skb->data;
703 len = ipv6_optlen(opth);
704
705 if (unlikely(!pskb_may_pull(skb, len)))
706 break;
707
708 proto = opth->nexthdr;
709 __skb_pull(skb, len);
710 }
711
712 return ops;
713}
714
715static int ipv6_gso_send_check(struct sk_buff *skb)
716{
717 struct ipv6hdr *ipv6h;
718 struct inet6_protocol *ops;
719 int err = -EINVAL;
720
721 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
722 goto out;
723
724 ipv6h = ipv6_hdr(skb);
725 __skb_pull(skb, sizeof(*ipv6h));
726 err = -EPROTONOSUPPORT;
727
728 rcu_read_lock();
729 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
730 if (likely(ops && ops->gso_send_check)) {
731 skb_reset_transport_header(skb);
732 err = ops->gso_send_check(skb);
733 }
734 rcu_read_unlock();
735
736out:
737 return err;
738}
739
740static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
741{
742 struct sk_buff *segs = ERR_PTR(-EINVAL);
743 struct ipv6hdr *ipv6h;
744 struct inet6_protocol *ops;
745
746 if (!(features & NETIF_F_V6_CSUM))
747 features &= ~NETIF_F_SG;
748
749 if (unlikely(skb_shinfo(skb)->gso_type &
750 ~(SKB_GSO_UDP |
751 SKB_GSO_DODGY |
752 SKB_GSO_TCP_ECN |
753 SKB_GSO_TCPV6 |
754 0)))
755 goto out;
756
757 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
758 goto out;
759
760 ipv6h = ipv6_hdr(skb);
761 __skb_pull(skb, sizeof(*ipv6h));
762 segs = ERR_PTR(-EPROTONOSUPPORT);
763
764 rcu_read_lock();
765 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
766 if (likely(ops && ops->gso_segment)) {
767 skb_reset_transport_header(skb);
768 segs = ops->gso_segment(skb, features);
769 }
770 rcu_read_unlock();
771
772 if (unlikely(IS_ERR(segs)))
773 goto out;
774
775 for (skb = segs; skb; skb = skb->next) {
776 ipv6h = ipv6_hdr(skb);
777 ipv6h->payload_len = htons(skb->len - skb->mac_len -
778 sizeof(*ipv6h));
779 }
780
781out:
782 return segs;
783}
784
785static struct packet_type ipv6_packet_type = {
786 .type = __constant_htons(ETH_P_IPV6),
787 .func = ipv6_rcv,
788 .gso_send_check = ipv6_gso_send_check,
789 .gso_segment = ipv6_gso_segment,
790};
791
792static int __init ipv6_packet_init(void)
793{
794 dev_add_pack(&ipv6_packet_type);
795 return 0;
796}
797
798static void ipv6_packet_cleanup(void)
799{
800 dev_remove_pack(&ipv6_packet_type);
801}
802
681static int __init init_ipv6_mibs(void) 803static int __init init_ipv6_mibs(void)
682{ 804{
683 if (snmp_mib_init((void **)ipv6_statistics, 805 if (snmp_mib_init((void **)ipv6_statistics,
@@ -691,12 +813,16 @@ static int __init init_ipv6_mibs(void)
691 goto err_icmpmsg_mib; 813 goto err_icmpmsg_mib;
692 if (snmp_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib)) < 0) 814 if (snmp_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib)) < 0)
693 goto err_udp_mib; 815 goto err_udp_mib;
816#ifdef CONFIG_IP_UDPLITE
694 if (snmp_mib_init((void **)udplite_stats_in6, 817 if (snmp_mib_init((void **)udplite_stats_in6,
695 sizeof (struct udp_mib)) < 0) 818 sizeof (struct udp_mib)) < 0)
696 goto err_udplite_mib; 819 goto err_udplite_mib;
820#endif
697 return 0; 821 return 0;
698 822
823#ifdef CONFIG_IP_UDPLITE
699err_udplite_mib: 824err_udplite_mib:
825#endif
700 snmp_mib_free((void **)udp_stats_in6); 826 snmp_mib_free((void **)udp_stats_in6);
701err_udp_mib: 827err_udp_mib:
702 snmp_mib_free((void **)icmpv6msg_statistics); 828 snmp_mib_free((void **)icmpv6msg_statistics);
@@ -715,7 +841,9 @@ static void cleanup_ipv6_mibs(void)
715 snmp_mib_free((void **)icmpv6_statistics); 841 snmp_mib_free((void **)icmpv6_statistics);
716 snmp_mib_free((void **)icmpv6msg_statistics); 842 snmp_mib_free((void **)icmpv6msg_statistics);
717 snmp_mib_free((void **)udp_stats_in6); 843 snmp_mib_free((void **)udp_stats_in6);
844#ifdef CONFIG_IP_UDPLITE
718 snmp_mib_free((void **)udplite_stats_in6); 845 snmp_mib_free((void **)udplite_stats_in6);
846#endif
719} 847}
720 848
721static int inet6_net_init(struct net *net) 849static int inet6_net_init(struct net *net)
@@ -760,9 +888,11 @@ static int __init inet6_init(void)
760 if (err) 888 if (err)
761 goto out_unregister_tcp_proto; 889 goto out_unregister_tcp_proto;
762 890
891#ifdef CONFIG_IP_UDPLITE
763 err = proto_register(&udplitev6_prot, 1); 892 err = proto_register(&udplitev6_prot, 1);
764 if (err) 893 if (err)
765 goto out_unregister_udp_proto; 894 goto out_unregister_udp_proto;
895#endif
766 896
767 err = proto_register(&rawv6_prot, 1); 897 err = proto_register(&rawv6_prot, 1);
768 if (err) 898 if (err)
@@ -802,19 +932,13 @@ static int __init inet6_init(void)
802 err = register_pernet_subsys(&inet6_net_ops); 932 err = register_pernet_subsys(&inet6_net_ops);
803 if (err) 933 if (err)
804 goto register_pernet_fail; 934 goto register_pernet_fail;
805 935 err = icmpv6_init();
806#ifdef CONFIG_SYSCTL
807 err = ipv6_sysctl_register();
808 if (err)
809 goto sysctl_fail;
810#endif
811 err = icmpv6_init(&inet6_family_ops);
812 if (err) 936 if (err)
813 goto icmp_fail; 937 goto icmp_fail;
814 err = ndisc_init(&inet6_family_ops); 938 err = ndisc_init();
815 if (err) 939 if (err)
816 goto ndisc_fail; 940 goto ndisc_fail;
817 err = igmp6_init(&inet6_family_ops); 941 err = igmp6_init();
818 if (err) 942 if (err)
819 goto igmp_fail; 943 goto igmp_fail;
820 err = ipv6_netfilter_init(); 944 err = ipv6_netfilter_init();
@@ -874,9 +998,19 @@ static int __init inet6_init(void)
874 err = ipv6_packet_init(); 998 err = ipv6_packet_init();
875 if (err) 999 if (err)
876 goto ipv6_packet_fail; 1000 goto ipv6_packet_fail;
1001
1002#ifdef CONFIG_SYSCTL
1003 err = ipv6_sysctl_register();
1004 if (err)
1005 goto sysctl_fail;
1006#endif
877out: 1007out:
878 return err; 1008 return err;
879 1009
1010#ifdef CONFIG_SYSCTL
1011sysctl_fail:
1012 ipv6_packet_cleanup();
1013#endif
880ipv6_packet_fail: 1014ipv6_packet_fail:
881 tcpv6_exit(); 1015 tcpv6_exit();
882tcpv6_fail: 1016tcpv6_fail:
@@ -918,10 +1052,6 @@ igmp_fail:
918ndisc_fail: 1052ndisc_fail:
919 icmpv6_cleanup(); 1053 icmpv6_cleanup();
920icmp_fail: 1054icmp_fail:
921#ifdef CONFIG_SYSCTL
922 ipv6_sysctl_unregister();
923sysctl_fail:
924#endif
925 unregister_pernet_subsys(&inet6_net_ops); 1055 unregister_pernet_subsys(&inet6_net_ops);
926register_pernet_fail: 1056register_pernet_fail:
927 cleanup_ipv6_mibs(); 1057 cleanup_ipv6_mibs();
@@ -933,8 +1063,10 @@ out_sock_register_fail:
933out_unregister_raw_proto: 1063out_unregister_raw_proto:
934 proto_unregister(&rawv6_prot); 1064 proto_unregister(&rawv6_prot);
935out_unregister_udplite_proto: 1065out_unregister_udplite_proto:
1066#ifdef CONFIG_IP_UDPLITE
936 proto_unregister(&udplitev6_prot); 1067 proto_unregister(&udplitev6_prot);
937out_unregister_udp_proto: 1068out_unregister_udp_proto:
1069#endif
938 proto_unregister(&udpv6_prot); 1070 proto_unregister(&udpv6_prot);
939out_unregister_tcp_proto: 1071out_unregister_tcp_proto:
940 proto_unregister(&tcpv6_prot); 1072 proto_unregister(&tcpv6_prot);
@@ -949,8 +1081,13 @@ static void __exit inet6_exit(void)
949 /* Disallow any further netlink messages */ 1081 /* Disallow any further netlink messages */
950 rtnl_unregister_all(PF_INET6); 1082 rtnl_unregister_all(PF_INET6);
951 1083
1084#ifdef CONFIG_SYSCTL
1085 ipv6_sysctl_unregister();
1086#endif
952 udpv6_exit(); 1087 udpv6_exit();
1088#ifdef CONFIG_IP_UDPLITE
953 udplitev6_exit(); 1089 udplitev6_exit();
1090#endif
954 tcpv6_exit(); 1091 tcpv6_exit();
955 1092
956 /* Cleanup code parts. */ 1093 /* Cleanup code parts. */
@@ -976,13 +1113,13 @@ static void __exit inet6_exit(void)
976 ndisc_cleanup(); 1113 ndisc_cleanup();
977 icmpv6_cleanup(); 1114 icmpv6_cleanup();
978 rawv6_exit(); 1115 rawv6_exit();
979#ifdef CONFIG_SYSCTL 1116
980 ipv6_sysctl_unregister();
981#endif
982 unregister_pernet_subsys(&inet6_net_ops); 1117 unregister_pernet_subsys(&inet6_net_ops);
983 cleanup_ipv6_mibs(); 1118 cleanup_ipv6_mibs();
984 proto_unregister(&rawv6_prot); 1119 proto_unregister(&rawv6_prot);
1120#ifdef CONFIG_IP_UDPLITE
985 proto_unregister(&udplitev6_prot); 1121 proto_unregister(&udplitev6_prot);
1122#endif
986 proto_unregister(&udpv6_prot); 1123 proto_unregister(&udpv6_prot);
987 proto_unregister(&tcpv6_prot); 1124 proto_unregister(&tcpv6_prot);
988} 1125}
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 9c7f83fbc3a1..96868b994b37 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -101,7 +101,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
101 if (ifindex == 0) { 101 if (ifindex == 0) {
102 struct rt6_info *rt; 102 struct rt6_info *rt;
103 103
104 rt = rt6_lookup(addr, NULL, 0, 0); 104 rt = rt6_lookup(&init_net, addr, NULL, 0, 0);
105 if (rt) { 105 if (rt) {
106 dev = rt->rt6i_dev; 106 dev = rt->rt6i_dev;
107 dev_hold(dev); 107 dev_hold(dev);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 695c0ca8a417..55137408f054 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -29,24 +29,22 @@ struct fib6_rule
29 u8 tclass; 29 u8 tclass;
30}; 30};
31 31
32static struct fib_rules_ops fib6_rules_ops; 32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
33 33 int flags, pol_lookup_t lookup)
34struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags,
35 pol_lookup_t lookup)
36{ 34{
37 struct fib_lookup_arg arg = { 35 struct fib_lookup_arg arg = {
38 .lookup_ptr = lookup, 36 .lookup_ptr = lookup,
39 }; 37 };
40 38
41 fib_rules_lookup(&fib6_rules_ops, fl, flags, &arg); 39 fib_rules_lookup(net->ipv6.fib6_rules_ops, fl, flags, &arg);
42 if (arg.rule) 40 if (arg.rule)
43 fib_rule_put(arg.rule); 41 fib_rule_put(arg.rule);
44 42
45 if (arg.result) 43 if (arg.result)
46 return arg.result; 44 return arg.result;
47 45
48 dst_hold(&ip6_null_entry.u.dst); 46 dst_hold(&net->ipv6.ip6_null_entry->u.dst);
49 return &ip6_null_entry.u.dst; 47 return &net->ipv6.ip6_null_entry->u.dst;
50} 48}
51 49
52static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, 50static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -54,28 +52,29 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
54{ 52{
55 struct rt6_info *rt = NULL; 53 struct rt6_info *rt = NULL;
56 struct fib6_table *table; 54 struct fib6_table *table;
55 struct net *net = rule->fr_net;
57 pol_lookup_t lookup = arg->lookup_ptr; 56 pol_lookup_t lookup = arg->lookup_ptr;
58 57
59 switch (rule->action) { 58 switch (rule->action) {
60 case FR_ACT_TO_TBL: 59 case FR_ACT_TO_TBL:
61 break; 60 break;
62 case FR_ACT_UNREACHABLE: 61 case FR_ACT_UNREACHABLE:
63 rt = &ip6_null_entry; 62 rt = net->ipv6.ip6_null_entry;
64 goto discard_pkt; 63 goto discard_pkt;
65 default: 64 default:
66 case FR_ACT_BLACKHOLE: 65 case FR_ACT_BLACKHOLE:
67 rt = &ip6_blk_hole_entry; 66 rt = net->ipv6.ip6_blk_hole_entry;
68 goto discard_pkt; 67 goto discard_pkt;
69 case FR_ACT_PROHIBIT: 68 case FR_ACT_PROHIBIT:
70 rt = &ip6_prohibit_entry; 69 rt = net->ipv6.ip6_prohibit_entry;
71 goto discard_pkt; 70 goto discard_pkt;
72 } 71 }
73 72
74 table = fib6_get_table(rule->table); 73 table = fib6_get_table(net, rule->table);
75 if (table) 74 if (table)
76 rt = lookup(table, flp, flags); 75 rt = lookup(net, table, flp, flags);
77 76
78 if (rt != &ip6_null_entry) { 77 if (rt != net->ipv6.ip6_null_entry) {
79 struct fib6_rule *r = (struct fib6_rule *)rule; 78 struct fib6_rule *r = (struct fib6_rule *)rule;
80 79
81 /* 80 /*
@@ -85,8 +84,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
85 if ((rule->flags & FIB_RULE_FIND_SADDR) && 84 if ((rule->flags & FIB_RULE_FIND_SADDR) &&
86 r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { 85 r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) {
87 struct in6_addr saddr; 86 struct in6_addr saddr;
88 if (ipv6_get_saddr(&rt->u.dst, &flp->fl6_dst, 87 if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
89 &saddr)) 88 &flp->fl6_dst, &saddr))
90 goto again; 89 goto again;
91 if (!ipv6_prefix_equal(&saddr, &r->src.addr, 90 if (!ipv6_prefix_equal(&saddr, &r->src.addr,
92 r->src.plen)) 91 r->src.plen))
@@ -145,13 +144,14 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
145 struct nlattr **tb) 144 struct nlattr **tb)
146{ 145{
147 int err = -EINVAL; 146 int err = -EINVAL;
147 struct net *net = skb->sk->sk_net;
148 struct fib6_rule *rule6 = (struct fib6_rule *) rule; 148 struct fib6_rule *rule6 = (struct fib6_rule *) rule;
149 149
150 if (rule->action == FR_ACT_TO_TBL) { 150 if (rule->action == FR_ACT_TO_TBL) {
151 if (rule->table == RT6_TABLE_UNSPEC) 151 if (rule->table == RT6_TABLE_UNSPEC)
152 goto errout; 152 goto errout;
153 153
154 if (fib6_new_table(rule->table) == NULL) { 154 if (fib6_new_table(net, rule->table) == NULL) {
155 err = -ENOBUFS; 155 err = -ENOBUFS;
156 goto errout; 156 goto errout;
157 } 157 }
@@ -234,7 +234,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
234 + nla_total_size(16); /* src */ 234 + nla_total_size(16); /* src */
235} 235}
236 236
237static struct fib_rules_ops fib6_rules_ops = { 237static struct fib_rules_ops fib6_rules_ops_template = {
238 .family = AF_INET6, 238 .family = AF_INET6,
239 .rule_size = sizeof(struct fib6_rule), 239 .rule_size = sizeof(struct fib6_rule),
240 .addr_size = sizeof(struct in6_addr), 240 .addr_size = sizeof(struct in6_addr),
@@ -247,45 +247,64 @@ static struct fib_rules_ops fib6_rules_ops = {
247 .nlmsg_payload = fib6_rule_nlmsg_payload, 247 .nlmsg_payload = fib6_rule_nlmsg_payload,
248 .nlgroup = RTNLGRP_IPV6_RULE, 248 .nlgroup = RTNLGRP_IPV6_RULE,
249 .policy = fib6_rule_policy, 249 .policy = fib6_rule_policy,
250 .rules_list = LIST_HEAD_INIT(fib6_rules_ops.rules_list),
251 .owner = THIS_MODULE, 250 .owner = THIS_MODULE,
252 .fro_net = &init_net, 251 .fro_net = &init_net,
253}; 252};
254 253
255static int __init fib6_default_rules_init(void) 254static int fib6_rules_net_init(struct net *net)
256{ 255{
257 int err; 256 int err = -ENOMEM;
258 257
259 err = fib_default_rule_add(&fib6_rules_ops, 0, 258 net->ipv6.fib6_rules_ops = kmemdup(&fib6_rules_ops_template,
260 RT6_TABLE_LOCAL, FIB_RULE_PERMANENT); 259 sizeof(*net->ipv6.fib6_rules_ops),
261 if (err < 0) 260 GFP_KERNEL);
262 return err; 261 if (!net->ipv6.fib6_rules_ops)
263 err = fib_default_rule_add(&fib6_rules_ops, 0x7FFE, RT6_TABLE_MAIN, 0); 262 goto out;
264 if (err < 0)
265 return err;
266 return 0;
267}
268 263
269int __init fib6_rules_init(void) 264 net->ipv6.fib6_rules_ops->fro_net = net;
270{ 265 INIT_LIST_HEAD(&net->ipv6.fib6_rules_ops->rules_list);
271 int ret;
272 266
273 ret = fib6_default_rules_init(); 267 err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0,
274 if (ret) 268 RT6_TABLE_LOCAL, FIB_RULE_PERMANENT);
275 goto out; 269 if (err)
270 goto out_fib6_rules_ops;
276 271
277 ret = fib_rules_register(&fib6_rules_ops); 272 err = fib_default_rule_add(net->ipv6.fib6_rules_ops,
278 if (ret) 273 0x7FFE, RT6_TABLE_MAIN, 0);
279 goto out_default_rules_init; 274 if (err)
275 goto out_fib6_default_rule_add;
276
277 err = fib_rules_register(net->ipv6.fib6_rules_ops);
278 if (err)
279 goto out_fib6_default_rule_add;
280out: 280out:
281 return ret; 281 return err;
282 282
283out_default_rules_init: 283out_fib6_default_rule_add:
284 fib_rules_cleanup_ops(&fib6_rules_ops); 284 fib_rules_cleanup_ops(net->ipv6.fib6_rules_ops);
285out_fib6_rules_ops:
286 kfree(net->ipv6.fib6_rules_ops);
285 goto out; 287 goto out;
286} 288}
287 289
290static void fib6_rules_net_exit(struct net *net)
291{
292 fib_rules_unregister(net->ipv6.fib6_rules_ops);
293 kfree(net->ipv6.fib6_rules_ops);
294}
295
296static struct pernet_operations fib6_rules_net_ops = {
297 .init = fib6_rules_net_init,
298 .exit = fib6_rules_net_exit,
299};
300
301int __init fib6_rules_init(void)
302{
303 return register_pernet_subsys(&fib6_rules_net_ops);
304}
305
306
288void fib6_rules_cleanup(void) 307void fib6_rules_cleanup(void)
289{ 308{
290 fib_rules_unregister(&fib6_rules_ops); 309 return unregister_pernet_subsys(&fib6_rules_net_ops);
291} 310}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 121d517bf91c..6b5391ab8346 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -80,8 +80,10 @@ EXPORT_SYMBOL(icmpv6msg_statistics);
80 * 80 *
81 * On SMP we have one ICMP socket per-cpu. 81 * On SMP we have one ICMP socket per-cpu.
82 */ 82 */
83static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL; 83static inline struct sock *icmpv6_sk(struct net *net)
84#define icmpv6_socket __get_cpu_var(__icmpv6_socket) 84{
85 return net->ipv6.icmp_sk[smp_processor_id()];
86}
85 87
86static int icmpv6_rcv(struct sk_buff *skb); 88static int icmpv6_rcv(struct sk_buff *skb);
87 89
@@ -90,11 +92,11 @@ static struct inet6_protocol icmpv6_protocol = {
90 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 92 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
91}; 93};
92 94
93static __inline__ int icmpv6_xmit_lock(void) 95static __inline__ int icmpv6_xmit_lock(struct sock *sk)
94{ 96{
95 local_bh_disable(); 97 local_bh_disable();
96 98
97 if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) { 99 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
98 /* This can happen if the output path (f.e. SIT or 100 /* This can happen if the output path (f.e. SIT or
99 * ip6ip6 tunnel) signals dst_link_failure() for an 101 * ip6ip6 tunnel) signals dst_link_failure() for an
100 * outgoing ICMP6 packet. 102 * outgoing ICMP6 packet.
@@ -105,9 +107,9 @@ static __inline__ int icmpv6_xmit_lock(void)
105 return 0; 107 return 0;
106} 108}
107 109
108static __inline__ void icmpv6_xmit_unlock(void) 110static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
109{ 111{
110 spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock); 112 spin_unlock_bh(&sk->sk_lock.slock);
111} 113}
112 114
113/* 115/*
@@ -161,6 +163,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
161 struct flowi *fl) 163 struct flowi *fl)
162{ 164{
163 struct dst_entry *dst; 165 struct dst_entry *dst;
166 struct net *net = sk->sk_net;
164 int res = 0; 167 int res = 0;
165 168
166 /* Informational messages are not limited. */ 169 /* Informational messages are not limited. */
@@ -176,7 +179,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
176 * XXX: perhaps the expire for routing entries cloned by 179 * XXX: perhaps the expire for routing entries cloned by
177 * this lookup should be more aggressive (not longer than timeout). 180 * this lookup should be more aggressive (not longer than timeout).
178 */ 181 */
179 dst = ip6_route_output(sk, fl); 182 dst = ip6_route_output(net, sk, fl);
180 if (dst->error) { 183 if (dst->error) {
181 IP6_INC_STATS(ip6_dst_idev(dst), 184 IP6_INC_STATS(ip6_dst_idev(dst),
182 IPSTATS_MIB_OUTNOROUTES); 185 IPSTATS_MIB_OUTNOROUTES);
@@ -184,7 +187,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
184 res = 1; 187 res = 1;
185 } else { 188 } else {
186 struct rt6_info *rt = (struct rt6_info *)dst; 189 struct rt6_info *rt = (struct rt6_info *)dst;
187 int tmo = init_net.ipv6.sysctl.icmpv6_time; 190 int tmo = net->ipv6.sysctl.icmpv6_time;
188 191
189 /* Give more bandwidth to wider prefixes. */ 192 /* Give more bandwidth to wider prefixes. */
190 if (rt->rt6i_dst.plen < 128) 193 if (rt->rt6i_dst.plen < 128)
@@ -303,6 +306,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {}
303void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, 306void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
304 struct net_device *dev) 307 struct net_device *dev)
305{ 308{
309 struct net *net = skb->dev->nd_net;
306 struct inet6_dev *idev = NULL; 310 struct inet6_dev *idev = NULL;
307 struct ipv6hdr *hdr = ipv6_hdr(skb); 311 struct ipv6hdr *hdr = ipv6_hdr(skb);
308 struct sock *sk; 312 struct sock *sk;
@@ -332,7 +336,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
332 */ 336 */
333 addr_type = ipv6_addr_type(&hdr->daddr); 337 addr_type = ipv6_addr_type(&hdr->daddr);
334 338
335 if (ipv6_chk_addr(&init_net, &hdr->daddr, skb->dev, 0)) 339 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
336 saddr = &hdr->daddr; 340 saddr = &hdr->daddr;
337 341
338 /* 342 /*
@@ -389,12 +393,12 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
389 fl.fl_icmp_code = code; 393 fl.fl_icmp_code = code;
390 security_skb_classify_flow(skb, &fl); 394 security_skb_classify_flow(skb, &fl);
391 395
392 if (icmpv6_xmit_lock()) 396 sk = icmpv6_sk(net);
393 return;
394
395 sk = icmpv6_socket->sk;
396 np = inet6_sk(sk); 397 np = inet6_sk(sk);
397 398
399 if (icmpv6_xmit_lock(sk))
400 return;
401
398 if (!icmpv6_xrlim_allow(sk, type, &fl)) 402 if (!icmpv6_xrlim_allow(sk, type, &fl))
399 goto out; 403 goto out;
400 404
@@ -498,13 +502,14 @@ out_put:
498out_dst_release: 502out_dst_release:
499 dst_release(dst); 503 dst_release(dst);
500out: 504out:
501 icmpv6_xmit_unlock(); 505 icmpv6_xmit_unlock(sk);
502} 506}
503 507
504EXPORT_SYMBOL(icmpv6_send); 508EXPORT_SYMBOL(icmpv6_send);
505 509
506static void icmpv6_echo_reply(struct sk_buff *skb) 510static void icmpv6_echo_reply(struct sk_buff *skb)
507{ 511{
512 struct net *net = skb->dev->nd_net;
508 struct sock *sk; 513 struct sock *sk;
509 struct inet6_dev *idev; 514 struct inet6_dev *idev;
510 struct ipv6_pinfo *np; 515 struct ipv6_pinfo *np;
@@ -535,12 +540,12 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
535 fl.fl_icmp_type = ICMPV6_ECHO_REPLY; 540 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
536 security_skb_classify_flow(skb, &fl); 541 security_skb_classify_flow(skb, &fl);
537 542
538 if (icmpv6_xmit_lock()) 543 sk = icmpv6_sk(net);
539 return;
540
541 sk = icmpv6_socket->sk;
542 np = inet6_sk(sk); 544 np = inet6_sk(sk);
543 545
546 if (icmpv6_xmit_lock(sk))
547 return;
548
544 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) 549 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
545 fl.oif = np->mcast_oif; 550 fl.oif = np->mcast_oif;
546 551
@@ -584,7 +589,7 @@ out_put:
584 in6_dev_put(idev); 589 in6_dev_put(idev);
585 dst_release(dst); 590 dst_release(dst);
586out: 591out:
587 icmpv6_xmit_unlock(); 592 icmpv6_xmit_unlock(sk);
588} 593}
589 594
590static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info) 595static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
@@ -775,19 +780,41 @@ drop_no_count:
775 return 0; 780 return 0;
776} 781}
777 782
783void icmpv6_flow_init(struct sock *sk, struct flowi *fl,
784 u8 type,
785 const struct in6_addr *saddr,
786 const struct in6_addr *daddr,
787 int oif)
788{
789 memset(fl, 0, sizeof(*fl));
790 ipv6_addr_copy(&fl->fl6_src, saddr);
791 ipv6_addr_copy(&fl->fl6_dst, daddr);
792 fl->proto = IPPROTO_ICMPV6;
793 fl->fl_icmp_type = type;
794 fl->fl_icmp_code = 0;
795 fl->oif = oif;
796 security_sk_classify_flow(sk, fl);
797}
798
778/* 799/*
779 * Special lock-class for __icmpv6_socket: 800 * Special lock-class for __icmpv6_sk:
780 */ 801 */
781static struct lock_class_key icmpv6_socket_sk_dst_lock_key; 802static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
782 803
783int __init icmpv6_init(struct net_proto_family *ops) 804static int __net_init icmpv6_sk_init(struct net *net)
784{ 805{
785 struct sock *sk; 806 struct sock *sk;
786 int err, i, j; 807 int err, i, j;
787 808
809 net->ipv6.icmp_sk =
810 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
811 if (net->ipv6.icmp_sk == NULL)
812 return -ENOMEM;
813
788 for_each_possible_cpu(i) { 814 for_each_possible_cpu(i) {
815 struct socket *sock;
789 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, 816 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
790 &per_cpu(__icmpv6_socket, i)); 817 &sock);
791 if (err < 0) { 818 if (err < 0) {
792 printk(KERN_ERR 819 printk(KERN_ERR
793 "Failed to initialize the ICMP6 control socket " 820 "Failed to initialize the ICMP6 control socket "
@@ -796,12 +823,14 @@ int __init icmpv6_init(struct net_proto_family *ops)
796 goto fail; 823 goto fail;
797 } 824 }
798 825
799 sk = per_cpu(__icmpv6_socket, i)->sk; 826 net->ipv6.icmp_sk[i] = sk = sock->sk;
827 sk_change_net(sk, net);
828
800 sk->sk_allocation = GFP_ATOMIC; 829 sk->sk_allocation = GFP_ATOMIC;
801 /* 830 /*
802 * Split off their lock-class, because sk->sk_dst_lock 831 * Split off their lock-class, because sk->sk_dst_lock
803 * gets used from softirqs, which is safe for 832 * gets used from softirqs, which is safe for
804 * __icmpv6_socket (because those never get directly used 833 * __icmpv6_sk (because those never get directly used
805 * via userspace syscalls), but unsafe for normal sockets. 834 * via userspace syscalls), but unsafe for normal sockets.
806 */ 835 */
807 lockdep_set_class(&sk->sk_dst_lock, 836 lockdep_set_class(&sk->sk_dst_lock,
@@ -815,36 +844,56 @@ int __init icmpv6_init(struct net_proto_family *ops)
815 844
816 sk->sk_prot->unhash(sk); 845 sk->sk_prot->unhash(sk);
817 } 846 }
818
819
820 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) {
821 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
822 err = -EAGAIN;
823 goto fail;
824 }
825
826 return 0; 847 return 0;
827 848
828 fail: 849 fail:
829 for (j = 0; j < i; j++) { 850 for (j = 0; j < i; j++)
830 if (!cpu_possible(j)) 851 sk_release_kernel(net->ipv6.icmp_sk[j]);
831 continue; 852 kfree(net->ipv6.icmp_sk);
832 sock_release(per_cpu(__icmpv6_socket, j));
833 }
834
835 return err; 853 return err;
836} 854}
837 855
838void icmpv6_cleanup(void) 856static void __net_exit icmpv6_sk_exit(struct net *net)
839{ 857{
840 int i; 858 int i;
841 859
842 for_each_possible_cpu(i) { 860 for_each_possible_cpu(i) {
843 sock_release(per_cpu(__icmpv6_socket, i)); 861 sk_release_kernel(net->ipv6.icmp_sk[i]);
844 } 862 }
863 kfree(net->ipv6.icmp_sk);
864}
865
866static struct pernet_operations icmpv6_sk_ops = {
867 .init = icmpv6_sk_init,
868 .exit = icmpv6_sk_exit,
869};
870
871int __init icmpv6_init(void)
872{
873 int err;
874
875 err = register_pernet_subsys(&icmpv6_sk_ops);
876 if (err < 0)
877 return err;
878
879 err = -EAGAIN;
880 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
881 goto fail;
882 return 0;
883
884fail:
885 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
886 unregister_pernet_subsys(&icmpv6_sk_ops);
887 return err;
888}
889
890void icmpv6_cleanup(void)
891{
892 unregister_pernet_subsys(&icmpv6_sk_ops);
845 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); 893 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
846} 894}
847 895
896
848static const struct icmp6_err { 897static const struct icmp6_err {
849 int err; 898 int err;
850 int fatal; 899 int fatal;
@@ -925,6 +974,10 @@ struct ctl_table *ipv6_icmp_sysctl_init(struct net *net)
925 table = kmemdup(ipv6_icmp_table_template, 974 table = kmemdup(ipv6_icmp_table_template,
926 sizeof(ipv6_icmp_table_template), 975 sizeof(ipv6_icmp_table_template),
927 GFP_KERNEL); 976 GFP_KERNEL);
977
978 if (table)
979 table[0].data = &net->ipv6.sysctl.icmpv6_time;
980
928 return table; 981 return table;
929} 982}
930#endif 983#endif
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bab72b6f1444..b0814b0082e7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -48,8 +48,6 @@
48#define RT6_TRACE(x...) do { ; } while (0) 48#define RT6_TRACE(x...) do { ; } while (0)
49#endif 49#endif
50 50
51struct rt6_statistics rt6_stats;
52
53static struct kmem_cache * fib6_node_kmem __read_mostly; 51static struct kmem_cache * fib6_node_kmem __read_mostly;
54 52
55enum fib_walk_state_t 53enum fib_walk_state_t
@@ -66,6 +64,7 @@ enum fib_walk_state_t
66struct fib6_cleaner_t 64struct fib6_cleaner_t
67{ 65{
68 struct fib6_walker_t w; 66 struct fib6_walker_t w;
67 struct net *net;
69 int (*func)(struct rt6_info *, void *arg); 68 int (*func)(struct rt6_info *, void *arg);
70 void *arg; 69 void *arg;
71}; 70};
@@ -78,9 +77,10 @@ static DEFINE_RWLOCK(fib6_walker_lock);
78#define FWS_INIT FWS_L 77#define FWS_INIT FWS_L
79#endif 78#endif
80 79
81static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt); 80static void fib6_prune_clones(struct net *net, struct fib6_node *fn,
82static struct rt6_info * fib6_find_prefix(struct fib6_node *fn); 81 struct rt6_info *rt);
83static struct fib6_node * fib6_repair_tree(struct fib6_node *fn); 82static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
83static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
84static int fib6_walk(struct fib6_walker_t *w); 84static int fib6_walk(struct fib6_walker_t *w);
85static int fib6_walk_continue(struct fib6_walker_t *w); 85static int fib6_walk_continue(struct fib6_walker_t *w);
86 86
@@ -93,7 +93,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w);
93 93
94static __u32 rt_sernum; 94static __u32 rt_sernum;
95 95
96static DEFINE_TIMER(ip6_fib_timer, fib6_run_gc, 0, 0); 96static void fib6_gc_timer_cb(unsigned long arg);
97 97
98static struct fib6_walker_t fib6_walker_list = { 98static struct fib6_walker_t fib6_walker_list = {
99 .prev = &fib6_walker_list, 99 .prev = &fib6_walker_list,
@@ -166,22 +166,13 @@ static __inline__ void rt6_release(struct rt6_info *rt)
166 dst_free(&rt->u.dst); 166 dst_free(&rt->u.dst);
167} 167}
168 168
169static struct fib6_table fib6_main_tbl = {
170 .tb6_id = RT6_TABLE_MAIN,
171 .tb6_root = {
172 .leaf = &ip6_null_entry,
173 .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO,
174 },
175};
176
177#ifdef CONFIG_IPV6_MULTIPLE_TABLES 169#ifdef CONFIG_IPV6_MULTIPLE_TABLES
178#define FIB_TABLE_HASHSZ 256 170#define FIB_TABLE_HASHSZ 256
179#else 171#else
180#define FIB_TABLE_HASHSZ 1 172#define FIB_TABLE_HASHSZ 1
181#endif 173#endif
182static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ];
183 174
184static void fib6_link_table(struct fib6_table *tb) 175static void fib6_link_table(struct net *net, struct fib6_table *tb)
185{ 176{
186 unsigned int h; 177 unsigned int h;
187 178
@@ -197,52 +188,46 @@ static void fib6_link_table(struct fib6_table *tb)
197 * No protection necessary, this is the only list mutatation 188 * No protection necessary, this is the only list mutatation
198 * operation, tables never disappear once they exist. 189 * operation, tables never disappear once they exist.
199 */ 190 */
200 hlist_add_head_rcu(&tb->tb6_hlist, &fib_table_hash[h]); 191 hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]);
201} 192}
202 193
203#ifdef CONFIG_IPV6_MULTIPLE_TABLES 194#ifdef CONFIG_IPV6_MULTIPLE_TABLES
204static struct fib6_table fib6_local_tbl = {
205 .tb6_id = RT6_TABLE_LOCAL,
206 .tb6_root = {
207 .leaf = &ip6_null_entry,
208 .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO,
209 },
210};
211 195
212static struct fib6_table *fib6_alloc_table(u32 id) 196static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
213{ 197{
214 struct fib6_table *table; 198 struct fib6_table *table;
215 199
216 table = kzalloc(sizeof(*table), GFP_ATOMIC); 200 table = kzalloc(sizeof(*table), GFP_ATOMIC);
217 if (table != NULL) { 201 if (table != NULL) {
218 table->tb6_id = id; 202 table->tb6_id = id;
219 table->tb6_root.leaf = &ip6_null_entry; 203 table->tb6_root.leaf = net->ipv6.ip6_null_entry;
220 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; 204 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
221 } 205 }
222 206
223 return table; 207 return table;
224} 208}
225 209
226struct fib6_table *fib6_new_table(u32 id) 210struct fib6_table *fib6_new_table(struct net *net, u32 id)
227{ 211{
228 struct fib6_table *tb; 212 struct fib6_table *tb;
229 213
230 if (id == 0) 214 if (id == 0)
231 id = RT6_TABLE_MAIN; 215 id = RT6_TABLE_MAIN;
232 tb = fib6_get_table(id); 216 tb = fib6_get_table(net, id);
233 if (tb) 217 if (tb)
234 return tb; 218 return tb;
235 219
236 tb = fib6_alloc_table(id); 220 tb = fib6_alloc_table(net, id);
237 if (tb != NULL) 221 if (tb != NULL)
238 fib6_link_table(tb); 222 fib6_link_table(net, tb);
239 223
240 return tb; 224 return tb;
241} 225}
242 226
243struct fib6_table *fib6_get_table(u32 id) 227struct fib6_table *fib6_get_table(struct net *net, u32 id)
244{ 228{
245 struct fib6_table *tb; 229 struct fib6_table *tb;
230 struct hlist_head *head;
246 struct hlist_node *node; 231 struct hlist_node *node;
247 unsigned int h; 232 unsigned int h;
248 233
@@ -250,7 +235,8 @@ struct fib6_table *fib6_get_table(u32 id)
250 id = RT6_TABLE_MAIN; 235 id = RT6_TABLE_MAIN;
251 h = id & (FIB_TABLE_HASHSZ - 1); 236 h = id & (FIB_TABLE_HASHSZ - 1);
252 rcu_read_lock(); 237 rcu_read_lock();
253 hlist_for_each_entry_rcu(tb, node, &fib_table_hash[h], tb6_hlist) { 238 head = &net->ipv6.fib_table_hash[h];
239 hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
254 if (tb->tb6_id == id) { 240 if (tb->tb6_id == id) {
255 rcu_read_unlock(); 241 rcu_read_unlock();
256 return tb; 242 return tb;
@@ -261,33 +247,32 @@ struct fib6_table *fib6_get_table(u32 id)
261 return NULL; 247 return NULL;
262} 248}
263 249
264static void __init fib6_tables_init(void) 250static void fib6_tables_init(struct net *net)
265{ 251{
266 fib6_link_table(&fib6_main_tbl); 252 fib6_link_table(net, net->ipv6.fib6_main_tbl);
267 fib6_link_table(&fib6_local_tbl); 253 fib6_link_table(net, net->ipv6.fib6_local_tbl);
268} 254}
269
270#else 255#else
271 256
272struct fib6_table *fib6_new_table(u32 id) 257struct fib6_table *fib6_new_table(struct net *net, u32 id)
273{ 258{
274 return fib6_get_table(id); 259 return fib6_get_table(net, id);
275} 260}
276 261
277struct fib6_table *fib6_get_table(u32 id) 262struct fib6_table *fib6_get_table(struct net *net, u32 id)
278{ 263{
279 return &fib6_main_tbl; 264 return net->ipv6.fib6_main_tbl;
280} 265}
281 266
282struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags, 267struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
283 pol_lookup_t lookup) 268 int flags, pol_lookup_t lookup)
284{ 269{
285 return (struct dst_entry *) lookup(&fib6_main_tbl, fl, flags); 270 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags);
286} 271}
287 272
288static void __init fib6_tables_init(void) 273static void fib6_tables_init(struct net *net)
289{ 274{
290 fib6_link_table(&fib6_main_tbl); 275 fib6_link_table(net, net->ipv6.fib6_main_tbl);
291} 276}
292 277
293#endif 278#endif
@@ -368,11 +353,9 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
368 struct fib6_walker_t *w; 353 struct fib6_walker_t *w;
369 struct fib6_table *tb; 354 struct fib6_table *tb;
370 struct hlist_node *node; 355 struct hlist_node *node;
356 struct hlist_head *head;
371 int res = 0; 357 int res = 0;
372 358
373 if (net != &init_net)
374 return 0;
375
376 s_h = cb->args[0]; 359 s_h = cb->args[0];
377 s_e = cb->args[1]; 360 s_e = cb->args[1];
378 361
@@ -401,7 +384,8 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
401 384
402 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { 385 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
403 e = 0; 386 e = 0;
404 hlist_for_each_entry(tb, node, &fib_table_hash[h], tb6_hlist) { 387 head = &net->ipv6.fib_table_hash[h];
388 hlist_for_each_entry(tb, node, head, tb6_hlist) {
405 if (e < s_e) 389 if (e < s_e)
406 goto next; 390 goto next;
407 res = fib6_dump_table(tb, skb, cb); 391 res = fib6_dump_table(tb, skb, cb);
@@ -667,29 +651,29 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
667 rt->rt6i_node = fn; 651 rt->rt6i_node = fn;
668 atomic_inc(&rt->rt6i_ref); 652 atomic_inc(&rt->rt6i_ref);
669 inet6_rt_notify(RTM_NEWROUTE, rt, info); 653 inet6_rt_notify(RTM_NEWROUTE, rt, info);
670 rt6_stats.fib_rt_entries++; 654 info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
671 655
672 if ((fn->fn_flags & RTN_RTINFO) == 0) { 656 if ((fn->fn_flags & RTN_RTINFO) == 0) {
673 rt6_stats.fib_route_nodes++; 657 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
674 fn->fn_flags |= RTN_RTINFO; 658 fn->fn_flags |= RTN_RTINFO;
675 } 659 }
676 660
677 return 0; 661 return 0;
678} 662}
679 663
680static __inline__ void fib6_start_gc(struct rt6_info *rt) 664static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
681{ 665{
682 if (ip6_fib_timer.expires == 0 && 666 if (net->ipv6.ip6_fib_timer->expires == 0 &&
683 (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) 667 (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
684 mod_timer(&ip6_fib_timer, jiffies + 668 mod_timer(net->ipv6.ip6_fib_timer, jiffies +
685 init_net.ipv6.sysctl.ip6_rt_gc_interval); 669 net->ipv6.sysctl.ip6_rt_gc_interval);
686} 670}
687 671
688void fib6_force_start_gc(void) 672void fib6_force_start_gc(struct net *net)
689{ 673{
690 if (ip6_fib_timer.expires == 0) 674 if (net->ipv6.ip6_fib_timer->expires == 0)
691 mod_timer(&ip6_fib_timer, jiffies + 675 mod_timer(net->ipv6.ip6_fib_timer, jiffies +
692 init_net.ipv6.sysctl.ip6_rt_gc_interval); 676 net->ipv6.sysctl.ip6_rt_gc_interval);
693} 677}
694 678
695/* 679/*
@@ -733,8 +717,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
733 if (sfn == NULL) 717 if (sfn == NULL)
734 goto st_failure; 718 goto st_failure;
735 719
736 sfn->leaf = &ip6_null_entry; 720 sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
737 atomic_inc(&ip6_null_entry.rt6i_ref); 721 atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
738 sfn->fn_flags = RTN_ROOT; 722 sfn->fn_flags = RTN_ROOT;
739 sfn->fn_sernum = fib6_new_sernum(); 723 sfn->fn_sernum = fib6_new_sernum();
740 724
@@ -776,9 +760,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
776 err = fib6_add_rt2node(fn, rt, info); 760 err = fib6_add_rt2node(fn, rt, info);
777 761
778 if (err == 0) { 762 if (err == 0) {
779 fib6_start_gc(rt); 763 fib6_start_gc(info->nl_net, rt);
780 if (!(rt->rt6i_flags&RTF_CACHE)) 764 if (!(rt->rt6i_flags&RTF_CACHE))
781 fib6_prune_clones(pn, rt); 765 fib6_prune_clones(info->nl_net, pn, rt);
782 } 766 }
783 767
784out: 768out:
@@ -789,11 +773,11 @@ out:
789 * super-tree leaf node we have to find a new one for it. 773 * super-tree leaf node we have to find a new one for it.
790 */ 774 */
791 if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) { 775 if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) {
792 pn->leaf = fib6_find_prefix(pn); 776 pn->leaf = fib6_find_prefix(info->nl_net, pn);
793#if RT6_DEBUG >= 2 777#if RT6_DEBUG >= 2
794 if (!pn->leaf) { 778 if (!pn->leaf) {
795 BUG_TRAP(pn->leaf != NULL); 779 BUG_TRAP(pn->leaf != NULL);
796 pn->leaf = &ip6_null_entry; 780 pn->leaf = info->nl_net->ipv6.ip6_null_entry;
797 } 781 }
798#endif 782#endif
799 atomic_inc(&pn->leaf->rt6i_ref); 783 atomic_inc(&pn->leaf->rt6i_ref);
@@ -809,7 +793,7 @@ out:
809 */ 793 */
810st_failure: 794st_failure:
811 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 795 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
812 fib6_repair_tree(fn); 796 fib6_repair_tree(info->nl_net, fn);
813 dst_free(&rt->u.dst); 797 dst_free(&rt->u.dst);
814 return err; 798 return err;
815#endif 799#endif
@@ -975,10 +959,10 @@ struct fib6_node * fib6_locate(struct fib6_node *root,
975 * 959 *
976 */ 960 */
977 961
978static struct rt6_info * fib6_find_prefix(struct fib6_node *fn) 962static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn)
979{ 963{
980 if (fn->fn_flags&RTN_ROOT) 964 if (fn->fn_flags&RTN_ROOT)
981 return &ip6_null_entry; 965 return net->ipv6.ip6_null_entry;
982 966
983 while(fn) { 967 while(fn) {
984 if(fn->left) 968 if(fn->left)
@@ -997,7 +981,8 @@ static struct rt6_info * fib6_find_prefix(struct fib6_node *fn)
997 * is the node we want to try and remove. 981 * is the node we want to try and remove.
998 */ 982 */
999 983
1000static struct fib6_node * fib6_repair_tree(struct fib6_node *fn) 984static struct fib6_node *fib6_repair_tree(struct net *net,
985 struct fib6_node *fn)
1001{ 986{
1002 int children; 987 int children;
1003 int nstate; 988 int nstate;
@@ -1024,11 +1009,11 @@ static struct fib6_node * fib6_repair_tree(struct fib6_node *fn)
1024 || (children && fn->fn_flags&RTN_ROOT) 1009 || (children && fn->fn_flags&RTN_ROOT)
1025#endif 1010#endif
1026 ) { 1011 ) {
1027 fn->leaf = fib6_find_prefix(fn); 1012 fn->leaf = fib6_find_prefix(net, fn);
1028#if RT6_DEBUG >= 2 1013#if RT6_DEBUG >= 2
1029 if (fn->leaf==NULL) { 1014 if (fn->leaf==NULL) {
1030 BUG_TRAP(fn->leaf); 1015 BUG_TRAP(fn->leaf);
1031 fn->leaf = &ip6_null_entry; 1016 fn->leaf = net->ipv6.ip6_null_entry;
1032 } 1017 }
1033#endif 1018#endif
1034 atomic_inc(&fn->leaf->rt6i_ref); 1019 atomic_inc(&fn->leaf->rt6i_ref);
@@ -1101,14 +1086,15 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1101{ 1086{
1102 struct fib6_walker_t *w; 1087 struct fib6_walker_t *w;
1103 struct rt6_info *rt = *rtp; 1088 struct rt6_info *rt = *rtp;
1089 struct net *net = info->nl_net;
1104 1090
1105 RT6_TRACE("fib6_del_route\n"); 1091 RT6_TRACE("fib6_del_route\n");
1106 1092
1107 /* Unlink it */ 1093 /* Unlink it */
1108 *rtp = rt->u.dst.rt6_next; 1094 *rtp = rt->u.dst.rt6_next;
1109 rt->rt6i_node = NULL; 1095 rt->rt6i_node = NULL;
1110 rt6_stats.fib_rt_entries--; 1096 net->ipv6.rt6_stats->fib_rt_entries--;
1111 rt6_stats.fib_discarded_routes++; 1097 net->ipv6.rt6_stats->fib_discarded_routes++;
1112 1098
1113 /* Reset round-robin state, if necessary */ 1099 /* Reset round-robin state, if necessary */
1114 if (fn->rr_ptr == rt) 1100 if (fn->rr_ptr == rt)
@@ -1131,8 +1117,8 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1131 /* If it was last route, expunge its radix tree node */ 1117 /* If it was last route, expunge its radix tree node */
1132 if (fn->leaf == NULL) { 1118 if (fn->leaf == NULL) {
1133 fn->fn_flags &= ~RTN_RTINFO; 1119 fn->fn_flags &= ~RTN_RTINFO;
1134 rt6_stats.fib_route_nodes--; 1120 net->ipv6.rt6_stats->fib_route_nodes--;
1135 fn = fib6_repair_tree(fn); 1121 fn = fib6_repair_tree(net, fn);
1136 } 1122 }
1137 1123
1138 if (atomic_read(&rt->rt6i_ref) != 1) { 1124 if (atomic_read(&rt->rt6i_ref) != 1) {
@@ -1144,7 +1130,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1144 */ 1130 */
1145 while (fn) { 1131 while (fn) {
1146 if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) { 1132 if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) {
1147 fn->leaf = fib6_find_prefix(fn); 1133 fn->leaf = fib6_find_prefix(net, fn);
1148 atomic_inc(&fn->leaf->rt6i_ref); 1134 atomic_inc(&fn->leaf->rt6i_ref);
1149 rt6_release(rt); 1135 rt6_release(rt);
1150 } 1136 }
@@ -1160,6 +1146,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1160 1146
1161int fib6_del(struct rt6_info *rt, struct nl_info *info) 1147int fib6_del(struct rt6_info *rt, struct nl_info *info)
1162{ 1148{
1149 struct net *net = info->nl_net;
1163 struct fib6_node *fn = rt->rt6i_node; 1150 struct fib6_node *fn = rt->rt6i_node;
1164 struct rt6_info **rtp; 1151 struct rt6_info **rtp;
1165 1152
@@ -1169,7 +1156,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1169 return -ENOENT; 1156 return -ENOENT;
1170 } 1157 }
1171#endif 1158#endif
1172 if (fn == NULL || rt == &ip6_null_entry) 1159 if (fn == NULL || rt == net->ipv6.ip6_null_entry)
1173 return -ENOENT; 1160 return -ENOENT;
1174 1161
1175 BUG_TRAP(fn->fn_flags&RTN_RTINFO); 1162 BUG_TRAP(fn->fn_flags&RTN_RTINFO);
@@ -1184,7 +1171,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1184 pn = pn->parent; 1171 pn = pn->parent;
1185 } 1172 }
1186#endif 1173#endif
1187 fib6_prune_clones(pn, rt); 1174 fib6_prune_clones(info->nl_net, pn, rt);
1188 } 1175 }
1189 1176
1190 /* 1177 /*
@@ -1314,12 +1301,12 @@ static int fib6_walk(struct fib6_walker_t *w)
1314 1301
1315static int fib6_clean_node(struct fib6_walker_t *w) 1302static int fib6_clean_node(struct fib6_walker_t *w)
1316{ 1303{
1317 struct nl_info info = {
1318 .nl_net = &init_net,
1319 };
1320 int res; 1304 int res;
1321 struct rt6_info *rt; 1305 struct rt6_info *rt;
1322 struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w); 1306 struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w);
1307 struct nl_info info = {
1308 .nl_net = c->net,
1309 };
1323 1310
1324 for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { 1311 for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
1325 res = c->func(rt, c->arg); 1312 res = c->func(rt, c->arg);
@@ -1351,7 +1338,7 @@ static int fib6_clean_node(struct fib6_walker_t *w)
1351 * ignoring pure split nodes) will be scanned. 1338 * ignoring pure split nodes) will be scanned.
1352 */ 1339 */
1353 1340
1354static void fib6_clean_tree(struct fib6_node *root, 1341static void fib6_clean_tree(struct net *net, struct fib6_node *root,
1355 int (*func)(struct rt6_info *, void *arg), 1342 int (*func)(struct rt6_info *, void *arg),
1356 int prune, void *arg) 1343 int prune, void *arg)
1357{ 1344{
@@ -1362,23 +1349,26 @@ static void fib6_clean_tree(struct fib6_node *root,
1362 c.w.prune = prune; 1349 c.w.prune = prune;
1363 c.func = func; 1350 c.func = func;
1364 c.arg = arg; 1351 c.arg = arg;
1352 c.net = net;
1365 1353
1366 fib6_walk(&c.w); 1354 fib6_walk(&c.w);
1367} 1355}
1368 1356
1369void fib6_clean_all(int (*func)(struct rt6_info *, void *arg), 1357void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
1370 int prune, void *arg) 1358 int prune, void *arg)
1371{ 1359{
1372 struct fib6_table *table; 1360 struct fib6_table *table;
1373 struct hlist_node *node; 1361 struct hlist_node *node;
1362 struct hlist_head *head;
1374 unsigned int h; 1363 unsigned int h;
1375 1364
1376 rcu_read_lock(); 1365 rcu_read_lock();
1377 for (h = 0; h < FIB_TABLE_HASHSZ; h++) { 1366 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
1378 hlist_for_each_entry_rcu(table, node, &fib_table_hash[h], 1367 head = &net->ipv6.fib_table_hash[h];
1379 tb6_hlist) { 1368 hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
1380 write_lock_bh(&table->tb6_lock); 1369 write_lock_bh(&table->tb6_lock);
1381 fib6_clean_tree(&table->tb6_root, func, prune, arg); 1370 fib6_clean_tree(net, &table->tb6_root,
1371 func, prune, arg);
1382 write_unlock_bh(&table->tb6_lock); 1372 write_unlock_bh(&table->tb6_lock);
1383 } 1373 }
1384 } 1374 }
@@ -1395,9 +1385,10 @@ static int fib6_prune_clone(struct rt6_info *rt, void *arg)
1395 return 0; 1385 return 0;
1396} 1386}
1397 1387
1398static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt) 1388static void fib6_prune_clones(struct net *net, struct fib6_node *fn,
1389 struct rt6_info *rt)
1399{ 1390{
1400 fib6_clean_tree(fn, fib6_prune_clone, 1, rt); 1391 fib6_clean_tree(net, fn, fib6_prune_clone, 1, rt);
1401} 1392}
1402 1393
1403/* 1394/*
@@ -1447,54 +1438,145 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1447 1438
1448static DEFINE_SPINLOCK(fib6_gc_lock); 1439static DEFINE_SPINLOCK(fib6_gc_lock);
1449 1440
1450void fib6_run_gc(unsigned long dummy) 1441void fib6_run_gc(unsigned long expires, struct net *net)
1451{ 1442{
1452 if (dummy != ~0UL) { 1443 if (expires != ~0UL) {
1453 spin_lock_bh(&fib6_gc_lock); 1444 spin_lock_bh(&fib6_gc_lock);
1454 gc_args.timeout = dummy ? (int)dummy : 1445 gc_args.timeout = expires ? (int)expires :
1455 init_net.ipv6.sysctl.ip6_rt_gc_interval; 1446 net->ipv6.sysctl.ip6_rt_gc_interval;
1456 } else { 1447 } else {
1457 local_bh_disable(); 1448 local_bh_disable();
1458 if (!spin_trylock(&fib6_gc_lock)) { 1449 if (!spin_trylock(&fib6_gc_lock)) {
1459 mod_timer(&ip6_fib_timer, jiffies + HZ); 1450 mod_timer(net->ipv6.ip6_fib_timer, jiffies + HZ);
1460 local_bh_enable(); 1451 local_bh_enable();
1461 return; 1452 return;
1462 } 1453 }
1463 gc_args.timeout = init_net.ipv6.sysctl.ip6_rt_gc_interval; 1454 gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
1464 } 1455 }
1465 gc_args.more = 0; 1456 gc_args.more = 0;
1466 1457
1467 ndisc_dst_gc(&gc_args.more); 1458 icmp6_dst_gc(&gc_args.more);
1468 fib6_clean_all(fib6_age, 0, NULL); 1459
1460 fib6_clean_all(net, fib6_age, 0, NULL);
1469 1461
1470 if (gc_args.more) 1462 if (gc_args.more)
1471 mod_timer(&ip6_fib_timer, jiffies + 1463 mod_timer(net->ipv6.ip6_fib_timer, jiffies +
1472 init_net.ipv6.sysctl.ip6_rt_gc_interval); 1464 net->ipv6.sysctl.ip6_rt_gc_interval);
1473 else { 1465 else {
1474 del_timer(&ip6_fib_timer); 1466 del_timer(net->ipv6.ip6_fib_timer);
1475 ip6_fib_timer.expires = 0; 1467 net->ipv6.ip6_fib_timer->expires = 0;
1476 } 1468 }
1477 spin_unlock_bh(&fib6_gc_lock); 1469 spin_unlock_bh(&fib6_gc_lock);
1478} 1470}
1479 1471
1480int __init fib6_init(void) 1472static void fib6_gc_timer_cb(unsigned long arg)
1473{
1474 fib6_run_gc(0, (struct net *)arg);
1475}
1476
1477static int fib6_net_init(struct net *net)
1481{ 1478{
1482 int ret; 1479 int ret;
1480 struct timer_list *timer;
1481
1482 ret = -ENOMEM;
1483 timer = kzalloc(sizeof(*timer), GFP_KERNEL);
1484 if (!timer)
1485 goto out;
1486
1487 setup_timer(timer, fib6_gc_timer_cb, (unsigned long)net);
1488 net->ipv6.ip6_fib_timer = timer;
1489
1490 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
1491 if (!net->ipv6.rt6_stats)
1492 goto out_timer;
1493
1494 net->ipv6.fib_table_hash =
1495 kzalloc(sizeof(*net->ipv6.fib_table_hash)*FIB_TABLE_HASHSZ,
1496 GFP_KERNEL);
1497 if (!net->ipv6.fib_table_hash)
1498 goto out_rt6_stats;
1499
1500 net->ipv6.fib6_main_tbl = kzalloc(sizeof(*net->ipv6.fib6_main_tbl),
1501 GFP_KERNEL);
1502 if (!net->ipv6.fib6_main_tbl)
1503 goto out_fib_table_hash;
1504
1505 net->ipv6.fib6_main_tbl->tb6_id = RT6_TABLE_MAIN;
1506 net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1507 net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
1508 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1509
1510#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1511 net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
1512 GFP_KERNEL);
1513 if (!net->ipv6.fib6_local_tbl)
1514 goto out_fib6_main_tbl;
1515 net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL;
1516 net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1517 net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
1518 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1519#endif
1520 fib6_tables_init(net);
1521
1522 ret = 0;
1523out:
1524 return ret;
1525
1526#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1527out_fib6_main_tbl:
1528 kfree(net->ipv6.fib6_main_tbl);
1529#endif
1530out_fib_table_hash:
1531 kfree(net->ipv6.fib_table_hash);
1532out_rt6_stats:
1533 kfree(net->ipv6.rt6_stats);
1534out_timer:
1535 kfree(timer);
1536 goto out;
1537 }
1538
1539static void fib6_net_exit(struct net *net)
1540{
1541 rt6_ifdown(net, NULL);
1542 del_timer(net->ipv6.ip6_fib_timer);
1543 kfree(net->ipv6.ip6_fib_timer);
1544#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1545 kfree(net->ipv6.fib6_local_tbl);
1546#endif
1547 kfree(net->ipv6.fib6_main_tbl);
1548 kfree(net->ipv6.fib_table_hash);
1549 kfree(net->ipv6.rt6_stats);
1550}
1551
1552static struct pernet_operations fib6_net_ops = {
1553 .init = fib6_net_init,
1554 .exit = fib6_net_exit,
1555};
1556
1557int __init fib6_init(void)
1558{
1559 int ret = -ENOMEM;
1560
1483 fib6_node_kmem = kmem_cache_create("fib6_nodes", 1561 fib6_node_kmem = kmem_cache_create("fib6_nodes",
1484 sizeof(struct fib6_node), 1562 sizeof(struct fib6_node),
1485 0, SLAB_HWCACHE_ALIGN, 1563 0, SLAB_HWCACHE_ALIGN,
1486 NULL); 1564 NULL);
1487 if (!fib6_node_kmem) 1565 if (!fib6_node_kmem)
1488 return -ENOMEM; 1566 goto out;
1489 1567
1490 fib6_tables_init(); 1568 ret = register_pernet_subsys(&fib6_net_ops);
1569 if (ret)
1570 goto out_kmem_cache_create;
1491 1571
1492 ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib); 1572 ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib);
1493 if (ret) 1573 if (ret)
1494 goto out_kmem_cache_create; 1574 goto out_unregister_subsys;
1495out: 1575out:
1496 return ret; 1576 return ret;
1497 1577
1578out_unregister_subsys:
1579 unregister_pernet_subsys(&fib6_net_ops);
1498out_kmem_cache_create: 1580out_kmem_cache_create:
1499 kmem_cache_destroy(fib6_node_kmem); 1581 kmem_cache_destroy(fib6_node_kmem);
1500 goto out; 1582 goto out;
@@ -1502,6 +1584,6 @@ out_kmem_cache_create:
1502 1584
1503void fib6_gc_cleanup(void) 1585void fib6_gc_cleanup(void)
1504{ 1586{
1505 del_timer(&ip6_fib_timer); 1587 unregister_pernet_subsys(&fib6_net_ops);
1506 kmem_cache_destroy(fib6_node_kmem); 1588 kmem_cache_destroy(fib6_node_kmem);
1507} 1589}
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 178aebc0427a..7e36269826ba 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -61,11 +61,6 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
61 u32 pkt_len; 61 u32 pkt_len;
62 struct inet6_dev *idev; 62 struct inet6_dev *idev;
63 63
64 if (dev->nd_net != &init_net) {
65 kfree_skb(skb);
66 return 0;
67 }
68
69 if (skb->pkt_type == PACKET_OTHERHOST) { 64 if (skb->pkt_type == PACKET_OTHERHOST) {
70 kfree_skb(skb); 65 kfree_skb(skb);
71 return 0; 66 return 0;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 8b67ca07467d..937018529d18 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -596,7 +596,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
596 596
597 return offset; 597 return offset;
598} 598}
599EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
600 599
601static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 600static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
602{ 601{
@@ -914,13 +913,14 @@ static int ip6_dst_lookup_tail(struct sock *sk,
914 int err; 913 int err;
915 914
916 if (*dst == NULL) 915 if (*dst == NULL)
917 *dst = ip6_route_output(sk, fl); 916 *dst = ip6_route_output(sk->sk_net, sk, fl);
918 917
919 if ((err = (*dst)->error)) 918 if ((err = (*dst)->error))
920 goto out_err_release; 919 goto out_err_release;
921 920
922 if (ipv6_addr_any(&fl->fl6_src)) { 921 if (ipv6_addr_any(&fl->fl6_src)) {
923 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); 922 err = ipv6_dev_get_saddr(ip6_dst_idev(*dst)->dev,
923 &fl->fl6_dst, &fl->fl6_src);
924 if (err) 924 if (err)
925 goto out_err_release; 925 goto out_err_release;
926 } 926 }
@@ -954,7 +954,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
954 dst_release(*dst); 954 dst_release(*dst);
955 memcpy(&fl_gw, fl, sizeof(struct flowi)); 955 memcpy(&fl_gw, fl, sizeof(struct flowi));
956 memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr)); 956 memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
957 *dst = ip6_route_output(sk, &fl_gw); 957 *dst = ip6_route_output(sk->sk_net, sk, &fl_gw);
958 if ((err = (*dst)->error)) 958 if ((err = (*dst)->error))
959 goto out_err_release; 959 goto out_err_release;
960 } 960 }
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 78f438880923..1e1ad1ed87e6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -602,7 +602,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
602 skb_reset_network_header(skb2); 602 skb_reset_network_header(skb2);
603 603
604 /* Try to guess incoming interface */ 604 /* Try to guess incoming interface */
605 rt = rt6_lookup(&ipv6_hdr(skb2)->saddr, NULL, 0, 0); 605 rt = rt6_lookup(&init_net, &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
606 606
607 if (rt && rt->rt6i_dev) 607 if (rt && rt->rt6i_dev)
608 skb2->dev = rt->rt6i_dev; 608 skb2->dev = rt->rt6i_dev;
@@ -847,7 +847,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
847 if ((dst = ip6_tnl_dst_check(t)) != NULL) 847 if ((dst = ip6_tnl_dst_check(t)) != NULL)
848 dst_hold(dst); 848 dst_hold(dst);
849 else { 849 else {
850 dst = ip6_route_output(NULL, fl); 850 dst = ip6_route_output(&init_net, NULL, fl);
851 851
852 if (dst->error || xfrm_lookup(&dst, fl, NULL, 0) < 0) 852 if (dst->error || xfrm_lookup(&dst, fl, NULL, 0) < 0)
853 goto tx_err_link_failure; 853 goto tx_err_link_failure;
@@ -1112,7 +1112,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1112 int strict = (ipv6_addr_type(&p->raddr) & 1112 int strict = (ipv6_addr_type(&p->raddr) &
1113 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1113 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1114 1114
1115 struct rt6_info *rt = rt6_lookup(&p->raddr, &p->laddr, 1115 struct rt6_info *rt = rt6_lookup(&init_net, &p->raddr, &p->laddr,
1116 p->link, strict); 1116 p->link, strict);
1117 1117
1118 if (rt == NULL) 1118 if (rt == NULL)
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index bf2a686aa13d..3bbfdff698d2 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -57,118 +57,6 @@
57 57
58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; 58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
59 59
60static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
61 int proto)
62{
63 struct inet6_protocol *ops = NULL;
64
65 for (;;) {
66 struct ipv6_opt_hdr *opth;
67 int len;
68
69 if (proto != NEXTHDR_HOP) {
70 ops = rcu_dereference(inet6_protos[proto]);
71
72 if (unlikely(!ops))
73 break;
74
75 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
76 break;
77 }
78
79 if (unlikely(!pskb_may_pull(skb, 8)))
80 break;
81
82 opth = (void *)skb->data;
83 len = opth->hdrlen * 8 + 8;
84
85 if (unlikely(!pskb_may_pull(skb, len)))
86 break;
87
88 proto = opth->nexthdr;
89 __skb_pull(skb, len);
90 }
91
92 return ops;
93}
94
95static int ipv6_gso_send_check(struct sk_buff *skb)
96{
97 struct ipv6hdr *ipv6h;
98 struct inet6_protocol *ops;
99 int err = -EINVAL;
100
101 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
102 goto out;
103
104 ipv6h = ipv6_hdr(skb);
105 __skb_pull(skb, sizeof(*ipv6h));
106 err = -EPROTONOSUPPORT;
107
108 rcu_read_lock();
109 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
110 if (likely(ops && ops->gso_send_check)) {
111 skb_reset_transport_header(skb);
112 err = ops->gso_send_check(skb);
113 }
114 rcu_read_unlock();
115
116out:
117 return err;
118}
119
120static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
121{
122 struct sk_buff *segs = ERR_PTR(-EINVAL);
123 struct ipv6hdr *ipv6h;
124 struct inet6_protocol *ops;
125
126 if (!(features & NETIF_F_V6_CSUM))
127 features &= ~NETIF_F_SG;
128
129 if (unlikely(skb_shinfo(skb)->gso_type &
130 ~(SKB_GSO_UDP |
131 SKB_GSO_DODGY |
132 SKB_GSO_TCP_ECN |
133 SKB_GSO_TCPV6 |
134 0)))
135 goto out;
136
137 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
138 goto out;
139
140 ipv6h = ipv6_hdr(skb);
141 __skb_pull(skb, sizeof(*ipv6h));
142 segs = ERR_PTR(-EPROTONOSUPPORT);
143
144 rcu_read_lock();
145 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
146 if (likely(ops && ops->gso_segment)) {
147 skb_reset_transport_header(skb);
148 segs = ops->gso_segment(skb, features);
149 }
150 rcu_read_unlock();
151
152 if (unlikely(IS_ERR(segs)))
153 goto out;
154
155 for (skb = segs; skb; skb = skb->next) {
156 ipv6h = ipv6_hdr(skb);
157 ipv6h->payload_len = htons(skb->len - skb->mac_len -
158 sizeof(*ipv6h));
159 }
160
161out:
162 return segs;
163}
164
165static struct packet_type ipv6_packet_type = {
166 .type = __constant_htons(ETH_P_IPV6),
167 .func = ipv6_rcv,
168 .gso_send_check = ipv6_gso_send_check,
169 .gso_segment = ipv6_gso_segment,
170};
171
172struct ip6_ra_chain *ip6_ra_chain; 60struct ip6_ra_chain *ip6_ra_chain;
173DEFINE_RWLOCK(ip6_ra_lock); 61DEFINE_RWLOCK(ip6_ra_lock);
174 62
@@ -239,7 +127,9 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
239 struct sk_buff *pktopt; 127 struct sk_buff *pktopt;
240 128
241 if (sk->sk_protocol != IPPROTO_UDP && 129 if (sk->sk_protocol != IPPROTO_UDP &&
130#ifdef CONFIG_IP_UDPLITE
242 sk->sk_protocol != IPPROTO_UDPLITE && 131 sk->sk_protocol != IPPROTO_UDPLITE &&
132#endif
243 sk->sk_protocol != IPPROTO_TCP) 133 sk->sk_protocol != IPPROTO_TCP)
244 break; 134 break;
245 135
@@ -279,7 +169,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
279 } else { 169 } else {
280 struct proto *prot = &udp_prot; 170 struct proto *prot = &udp_prot;
281 171
282 if (sk->sk_protocol == IPPROTO_UDPLITE) 172 if (IS_PROTO_UDPLITE(sk->sk_protocol))
283 prot = &udplite_prot; 173 prot = &udplite_prot;
284 local_bh_disable(); 174 local_bh_disable();
285 sock_prot_inuse_add(sk->sk_prot, -1); 175 sock_prot_inuse_add(sk->sk_prot, -1);
@@ -844,7 +734,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
844 switch (optname) { 734 switch (optname) {
845 case IPV6_ADDRFORM: 735 case IPV6_ADDRFORM:
846 if (sk->sk_protocol != IPPROTO_UDP && 736 if (sk->sk_protocol != IPPROTO_UDP &&
737#ifdef CONFIG_IP_UDPLITE
847 sk->sk_protocol != IPPROTO_UDPLITE && 738 sk->sk_protocol != IPPROTO_UDPLITE &&
739#endif
848 sk->sk_protocol != IPPROTO_TCP) 740 sk->sk_protocol != IPPROTO_TCP)
849 return -EINVAL; 741 return -EINVAL;
850 if (sk->sk_state != TCP_ESTABLISHED) 742 if (sk->sk_state != TCP_ESTABLISHED)
@@ -1128,13 +1020,3 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
1128EXPORT_SYMBOL(compat_ipv6_getsockopt); 1020EXPORT_SYMBOL(compat_ipv6_getsockopt);
1129#endif 1021#endif
1130 1022
1131int __init ipv6_packet_init(void)
1132{
1133 dev_add_pack(&ipv6_packet_type);
1134 return 0;
1135}
1136
1137void ipv6_packet_cleanup(void)
1138{
1139 dev_remove_pack(&ipv6_packet_type);
1140}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index ab228d1ea114..197ca390a15d 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -208,7 +208,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, struct in6_addr *addr)
208 208
209 if (ifindex == 0) { 209 if (ifindex == 0) {
210 struct rt6_info *rt; 210 struct rt6_info *rt;
211 rt = rt6_lookup(addr, NULL, 0, 0); 211 rt = rt6_lookup(&init_net, addr, NULL, 0, 0);
212 if (rt) { 212 if (rt) {
213 dev = rt->rt6i_dev; 213 dev = rt->rt6i_dev;
214 dev_hold(dev); 214 dev_hold(dev);
@@ -294,7 +294,7 @@ static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex)
294 if (ifindex == 0) { 294 if (ifindex == 0) {
295 struct rt6_info *rt; 295 struct rt6_info *rt;
296 296
297 rt = rt6_lookup(group, NULL, 0, 0); 297 rt = rt6_lookup(&init_net, group, NULL, 0, 0);
298 if (rt) { 298 if (rt) {
299 dev = rt->rt6i_dev; 299 dev = rt->rt6i_dev;
300 dev_hold(dev); 300 dev_hold(dev);
@@ -1433,25 +1433,6 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1433 return skb; 1433 return skb;
1434} 1434}
1435 1435
1436static inline int mld_dev_queue_xmit2(struct sk_buff *skb)
1437{
1438 struct net_device *dev = skb->dev;
1439 unsigned char ha[MAX_ADDR_LEN];
1440
1441 ndisc_mc_map(&ipv6_hdr(skb)->daddr, ha, dev, 1);
1442 if (dev_hard_header(skb, dev, ETH_P_IPV6, ha, NULL, skb->len) < 0) {
1443 kfree_skb(skb);
1444 return -EINVAL;
1445 }
1446 return dev_queue_xmit(skb);
1447}
1448
1449static inline int mld_dev_queue_xmit(struct sk_buff *skb)
1450{
1451 return NF_HOOK(PF_INET6, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
1452 mld_dev_queue_xmit2);
1453}
1454
1455static void mld_sendpack(struct sk_buff *skb) 1436static void mld_sendpack(struct sk_buff *skb)
1456{ 1437{
1457 struct ipv6hdr *pip6 = ipv6_hdr(skb); 1438 struct ipv6hdr *pip6 = ipv6_hdr(skb);
@@ -1460,6 +1441,7 @@ static void mld_sendpack(struct sk_buff *skb)
1460 int payload_len, mldlen; 1441 int payload_len, mldlen;
1461 struct inet6_dev *idev = in6_dev_get(skb->dev); 1442 struct inet6_dev *idev = in6_dev_get(skb->dev);
1462 int err; 1443 int err;
1444 struct flowi fl;
1463 1445
1464 IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); 1446 IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
1465 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); 1447 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
@@ -1469,8 +1451,25 @@ static void mld_sendpack(struct sk_buff *skb)
1469 pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, 1451 pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1470 IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), 1452 IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb),
1471 mldlen, 0)); 1453 mldlen, 0));
1454
1455 skb->dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
1456
1457 if (!skb->dst) {
1458 err = -ENOMEM;
1459 goto err_out;
1460 }
1461
1462 icmpv6_flow_init(igmp6_socket->sk, &fl, ICMPV6_MLD2_REPORT,
1463 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1464 skb->dev->ifindex);
1465
1466 err = xfrm_lookup(&skb->dst, &fl, NULL, 0);
1467 if (err)
1468 goto err_out;
1469
1472 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1470 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1473 mld_dev_queue_xmit); 1471 dst_output);
1472out:
1474 if (!err) { 1473 if (!err) {
1475 ICMP6MSGOUT_INC_STATS_BH(idev, ICMPV6_MLD2_REPORT); 1474 ICMP6MSGOUT_INC_STATS_BH(idev, ICMPV6_MLD2_REPORT);
1476 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS); 1475 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
@@ -1480,6 +1479,11 @@ static void mld_sendpack(struct sk_buff *skb)
1480 1479
1481 if (likely(idev != NULL)) 1480 if (likely(idev != NULL))
1482 in6_dev_put(idev); 1481 in6_dev_put(idev);
1482 return;
1483
1484err_out:
1485 kfree_skb(skb);
1486 goto out;
1483} 1487}
1484 1488
1485static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) 1489static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
@@ -1761,6 +1765,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1761 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1765 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1762 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1766 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1763 IPV6_TLV_PADN, 0 }; 1767 IPV6_TLV_PADN, 0 };
1768 struct flowi fl;
1764 1769
1765 rcu_read_lock(); 1770 rcu_read_lock();
1766 IP6_INC_STATS(__in6_dev_get(dev), 1771 IP6_INC_STATS(__in6_dev_get(dev),
@@ -1813,8 +1818,23 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1813 1818
1814 idev = in6_dev_get(skb->dev); 1819 idev = in6_dev_get(skb->dev);
1815 1820
1821 skb->dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
1822 if (!skb->dst) {
1823 err = -ENOMEM;
1824 goto err_out;
1825 }
1826
1827 icmpv6_flow_init(igmp6_socket->sk, &fl, type,
1828 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1829 skb->dev->ifindex);
1830
1831 err = xfrm_lookup(&skb->dst, &fl, NULL, 0);
1832 if (err)
1833 goto err_out;
1834
1816 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1835 err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev,
1817 mld_dev_queue_xmit); 1836 dst_output);
1837out:
1818 if (!err) { 1838 if (!err) {
1819 ICMP6MSGOUT_INC_STATS(idev, type); 1839 ICMP6MSGOUT_INC_STATS(idev, type);
1820 ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS); 1840 ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS);
@@ -1825,6 +1845,10 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1825 if (likely(idev != NULL)) 1845 if (likely(idev != NULL))
1826 in6_dev_put(idev); 1846 in6_dev_put(idev);
1827 return; 1847 return;
1848
1849err_out:
1850 kfree_skb(skb);
1851 goto out;
1828} 1852}
1829 1853
1830static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 1854static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
@@ -2597,7 +2621,7 @@ static const struct file_operations igmp6_mcf_seq_fops = {
2597}; 2621};
2598#endif 2622#endif
2599 2623
2600int __init igmp6_init(struct net_proto_family *ops) 2624int __init igmp6_init(void)
2601{ 2625{
2602 struct ipv6_pinfo *np; 2626 struct ipv6_pinfo *np;
2603 struct sock *sk; 2627 struct sock *sk;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0d33a7d32125..b5b4fd173e98 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -441,21 +441,6 @@ static void pndisc_destructor(struct pneigh_entry *n)
441/* 441/*
442 * Send a Neighbour Advertisement 442 * Send a Neighbour Advertisement
443 */ 443 */
444
445static inline void ndisc_flow_init(struct flowi *fl, u8 type,
446 struct in6_addr *saddr, struct in6_addr *daddr,
447 int oif)
448{
449 memset(fl, 0, sizeof(*fl));
450 ipv6_addr_copy(&fl->fl6_src, saddr);
451 ipv6_addr_copy(&fl->fl6_dst, daddr);
452 fl->proto = IPPROTO_ICMPV6;
453 fl->fl_icmp_type = type;
454 fl->fl_icmp_code = 0;
455 fl->oif = oif;
456 security_sk_classify_flow(ndisc_socket->sk, fl);
457}
458
459static void __ndisc_send(struct net_device *dev, 444static void __ndisc_send(struct net_device *dev,
460 struct neighbour *neigh, 445 struct neighbour *neigh,
461 struct in6_addr *daddr, struct in6_addr *saddr, 446 struct in6_addr *daddr, struct in6_addr *saddr,
@@ -474,10 +459,10 @@ static void __ndisc_send(struct net_device *dev,
474 459
475 type = icmp6h->icmp6_type; 460 type = icmp6h->icmp6_type;
476 461
477 ndisc_flow_init(&fl, type, saddr, daddr, 462 icmpv6_flow_init(ndisc_socket->sk, &fl, type,
478 dev->ifindex); 463 saddr, daddr, dev->ifindex);
479 464
480 dst = ndisc_dst_alloc(dev, neigh, daddr, ip6_output); 465 dst = icmp6_dst_alloc(dev, neigh, daddr);
481 if (!dst) 466 if (!dst)
482 return; 467 return;
483 468
@@ -1439,10 +1424,10 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1439 return; 1424 return;
1440 } 1425 }
1441 1426
1442 ndisc_flow_init(&fl, NDISC_REDIRECT, &saddr_buf, &ipv6_hdr(skb)->saddr, 1427 icmpv6_flow_init(ndisc_socket->sk, &fl, NDISC_REDIRECT,
1443 dev->ifindex); 1428 &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
1444 1429
1445 dst = ip6_route_output(NULL, &fl); 1430 dst = ip6_route_output(&init_net, NULL, &fl);
1446 if (dst == NULL) 1431 if (dst == NULL)
1447 return; 1432 return;
1448 1433
@@ -1613,6 +1598,7 @@ int ndisc_rcv(struct sk_buff *skb)
1613static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1598static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1614{ 1599{
1615 struct net_device *dev = ptr; 1600 struct net_device *dev = ptr;
1601 struct net *net = dev->nd_net;
1616 1602
1617 if (dev->nd_net != &init_net) 1603 if (dev->nd_net != &init_net)
1618 return NOTIFY_DONE; 1604 return NOTIFY_DONE;
@@ -1620,11 +1606,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1620 switch (event) { 1606 switch (event) {
1621 case NETDEV_CHANGEADDR: 1607 case NETDEV_CHANGEADDR:
1622 neigh_changeaddr(&nd_tbl, dev); 1608 neigh_changeaddr(&nd_tbl, dev);
1623 fib6_run_gc(~0UL); 1609 fib6_run_gc(~0UL, net);
1624 break; 1610 break;
1625 case NETDEV_DOWN: 1611 case NETDEV_DOWN:
1626 neigh_ifdown(&nd_tbl, dev); 1612 neigh_ifdown(&nd_tbl, dev);
1627 fib6_run_gc(~0UL); 1613 fib6_run_gc(~0UL, net);
1628 break; 1614 break;
1629 default: 1615 default:
1630 break; 1616 break;
@@ -1733,7 +1719,7 @@ static int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, int __user *name,
1733 1719
1734#endif 1720#endif
1735 1721
1736int __init ndisc_init(struct net_proto_family *ops) 1722int __init ndisc_init(void)
1737{ 1723{
1738 struct ipv6_pinfo *np; 1724 struct ipv6_pinfo *np;
1739 struct sock *sk; 1725 struct sock *sk;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 2e06724dc348..aed51bcc66b4 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -23,7 +23,7 @@ int ip6_route_me_harder(struct sk_buff *skb)
23 .saddr = iph->saddr, } }, 23 .saddr = iph->saddr, } },
24 }; 24 };
25 25
26 dst = ip6_route_output(skb->sk, &fl); 26 dst = ip6_route_output(&init_net, skb->sk, &fl);
27 27
28#ifdef CONFIG_XFRM 28#ifdef CONFIG_XFRM
29 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 29 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -86,7 +86,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
86 86
87static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) 87static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl)
88{ 88{
89 *dst = ip6_route_output(NULL, fl); 89 *dst = ip6_route_output(&init_net, NULL, fl);
90 return (*dst)->error; 90 return (*dst)->error;
91} 91}
92 92
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index b23baa635fe0..831708aeab37 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -93,7 +93,7 @@ static void send_reset(struct sk_buff *oldskb)
93 fl.fl_ip_sport = otcph.dest; 93 fl.fl_ip_sport = otcph.dest;
94 fl.fl_ip_dport = otcph.source; 94 fl.fl_ip_dport = otcph.source;
95 security_skb_classify_flow(oldskb, &fl); 95 security_skb_classify_flow(oldskb, &fl);
96 dst = ip6_route_output(NULL, &fl); 96 dst = ip6_route_output(&init_net, NULL, &fl);
97 if (dst == NULL) 97 if (dst == NULL)
98 return; 98 return;
99 if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0)) 99 if (dst->error || xfrm_lookup(&dst, &fl, NULL, 0))
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 199ef379e501..2453f2229ef7 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -39,8 +39,10 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v)
39 sock_prot_inuse_get(&tcpv6_prot)); 39 sock_prot_inuse_get(&tcpv6_prot));
40 seq_printf(seq, "UDP6: inuse %d\n", 40 seq_printf(seq, "UDP6: inuse %d\n",
41 sock_prot_inuse_get(&udpv6_prot)); 41 sock_prot_inuse_get(&udpv6_prot));
42#ifdef CONFIG_IP_UDPLITE
42 seq_printf(seq, "UDPLITE6: inuse %d\n", 43 seq_printf(seq, "UDPLITE6: inuse %d\n",
43 sock_prot_inuse_get(&udplitev6_prot)); 44 sock_prot_inuse_get(&udplitev6_prot));
45#endif
44 seq_printf(seq, "RAW6: inuse %d\n", 46 seq_printf(seq, "RAW6: inuse %d\n",
45 sock_prot_inuse_get(&rawv6_prot)); 47 sock_prot_inuse_get(&rawv6_prot));
46 seq_printf(seq, "FRAG6: inuse %d memory %d\n", 48 seq_printf(seq, "FRAG6: inuse %d memory %d\n",
@@ -111,6 +113,7 @@ static struct snmp_mib snmp6_udp6_list[] = {
111 SNMP_MIB_SENTINEL 113 SNMP_MIB_SENTINEL
112}; 114};
113 115
116#ifdef CONFIG_IP_UDPLITE
114static struct snmp_mib snmp6_udplite6_list[] = { 117static struct snmp_mib snmp6_udplite6_list[] = {
115 SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS), 118 SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS),
116 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), 119 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS),
@@ -118,6 +121,7 @@ static struct snmp_mib snmp6_udplite6_list[] = {
118 SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS), 121 SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
119 SNMP_MIB_SENTINEL 122 SNMP_MIB_SENTINEL
120}; 123};
124#endif
121 125
122static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) 126static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
123{ 127{
@@ -176,7 +180,9 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
176 snmp6_seq_show_item(seq, (void **)icmpv6_statistics, snmp6_icmp6_list); 180 snmp6_seq_show_item(seq, (void **)icmpv6_statistics, snmp6_icmp6_list);
177 snmp6_seq_show_icmpv6msg(seq, (void **)icmpv6msg_statistics); 181 snmp6_seq_show_icmpv6msg(seq, (void **)icmpv6msg_statistics);
178 snmp6_seq_show_item(seq, (void **)udp_stats_in6, snmp6_udp6_list); 182 snmp6_seq_show_item(seq, (void **)udp_stats_in6, snmp6_udp6_list);
183#ifdef CONFIG_IP_UDPLITE
179 snmp6_seq_show_item(seq, (void **)udplite_stats_in6, snmp6_udplite6_list); 184 snmp6_seq_show_item(seq, (void **)udplite_stats_in6, snmp6_udplite6_list);
185#endif
180 } 186 }
181 return 0; 187 return 0;
182} 188}
@@ -214,6 +220,9 @@ int snmp6_register_dev(struct inet6_dev *idev)
214 if (!idev || !idev->dev) 220 if (!idev || !idev->dev)
215 return -EINVAL; 221 return -EINVAL;
216 222
223 if (idev->dev->nd_net != &init_net)
224 return 0;
225
217 if (!proc_net_devsnmp6) 226 if (!proc_net_devsnmp6)
218 return -ENOENT; 227 return -ENOENT;
219 228
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e8b241cb60bc..f31d7dc11e72 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -40,6 +40,7 @@
40#include <linux/if_arp.h> 40#include <linux/if_arp.h>
41#include <linux/proc_fs.h> 41#include <linux/proc_fs.h>
42#include <linux/seq_file.h> 42#include <linux/seq_file.h>
43#include <linux/nsproxy.h>
43#include <net/net_namespace.h> 44#include <net/net_namespace.h>
44#include <net/snmp.h> 45#include <net/snmp.h>
45#include <net/ipv6.h> 46#include <net/ipv6.h>
@@ -87,14 +88,16 @@ static void ip6_link_failure(struct sk_buff *skb);
87static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu); 88static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
88 89
89#ifdef CONFIG_IPV6_ROUTE_INFO 90#ifdef CONFIG_IPV6_ROUTE_INFO
90static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen, 91static struct rt6_info *rt6_add_route_info(struct net *net,
92 struct in6_addr *prefix, int prefixlen,
91 struct in6_addr *gwaddr, int ifindex, 93 struct in6_addr *gwaddr, int ifindex,
92 unsigned pref); 94 unsigned pref);
93static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen, 95static struct rt6_info *rt6_get_route_info(struct net *net,
96 struct in6_addr *prefix, int prefixlen,
94 struct in6_addr *gwaddr, int ifindex); 97 struct in6_addr *gwaddr, int ifindex);
95#endif 98#endif
96 99
97static struct dst_ops ip6_dst_ops = { 100static struct dst_ops ip6_dst_ops_template = {
98 .family = AF_INET6, 101 .family = AF_INET6,
99 .protocol = __constant_htons(ETH_P_IPV6), 102 .protocol = __constant_htons(ETH_P_IPV6),
100 .gc = ip6_dst_gc, 103 .gc = ip6_dst_gc,
@@ -124,7 +127,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
124 .entries = ATOMIC_INIT(0), 127 .entries = ATOMIC_INIT(0),
125}; 128};
126 129
127struct rt6_info ip6_null_entry = { 130static struct rt6_info ip6_null_entry_template = {
128 .u = { 131 .u = {
129 .dst = { 132 .dst = {
130 .__refcnt = ATOMIC_INIT(1), 133 .__refcnt = ATOMIC_INIT(1),
@@ -134,8 +137,6 @@ struct rt6_info ip6_null_entry = {
134 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 137 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
135 .input = ip6_pkt_discard, 138 .input = ip6_pkt_discard,
136 .output = ip6_pkt_discard_out, 139 .output = ip6_pkt_discard_out,
137 .ops = &ip6_dst_ops,
138 .path = (struct dst_entry*)&ip6_null_entry,
139 } 140 }
140 }, 141 },
141 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 142 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
@@ -148,7 +149,7 @@ struct rt6_info ip6_null_entry = {
148static int ip6_pkt_prohibit(struct sk_buff *skb); 149static int ip6_pkt_prohibit(struct sk_buff *skb);
149static int ip6_pkt_prohibit_out(struct sk_buff *skb); 150static int ip6_pkt_prohibit_out(struct sk_buff *skb);
150 151
151struct rt6_info ip6_prohibit_entry = { 152struct rt6_info ip6_prohibit_entry_template = {
152 .u = { 153 .u = {
153 .dst = { 154 .dst = {
154 .__refcnt = ATOMIC_INIT(1), 155 .__refcnt = ATOMIC_INIT(1),
@@ -158,8 +159,6 @@ struct rt6_info ip6_prohibit_entry = {
158 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 159 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
159 .input = ip6_pkt_prohibit, 160 .input = ip6_pkt_prohibit,
160 .output = ip6_pkt_prohibit_out, 161 .output = ip6_pkt_prohibit_out,
161 .ops = &ip6_dst_ops,
162 .path = (struct dst_entry*)&ip6_prohibit_entry,
163 } 162 }
164 }, 163 },
165 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 164 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
@@ -167,7 +166,7 @@ struct rt6_info ip6_prohibit_entry = {
167 .rt6i_ref = ATOMIC_INIT(1), 166 .rt6i_ref = ATOMIC_INIT(1),
168}; 167};
169 168
170struct rt6_info ip6_blk_hole_entry = { 169static struct rt6_info ip6_blk_hole_entry_template = {
171 .u = { 170 .u = {
172 .dst = { 171 .dst = {
173 .__refcnt = ATOMIC_INIT(1), 172 .__refcnt = ATOMIC_INIT(1),
@@ -177,8 +176,6 @@ struct rt6_info ip6_blk_hole_entry = {
177 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 176 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
178 .input = dst_discard, 177 .input = dst_discard,
179 .output = dst_discard, 178 .output = dst_discard,
180 .ops = &ip6_dst_ops,
181 .path = (struct dst_entry*)&ip6_blk_hole_entry,
182 } 179 }
183 }, 180 },
184 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 181 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
@@ -189,9 +186,9 @@ struct rt6_info ip6_blk_hole_entry = {
189#endif 186#endif
190 187
191/* allocate dst with ip6_dst_ops */ 188/* allocate dst with ip6_dst_ops */
192static __inline__ struct rt6_info *ip6_dst_alloc(void) 189static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
193{ 190{
194 return (struct rt6_info *)dst_alloc(&ip6_dst_ops); 191 return (struct rt6_info *)dst_alloc(ops);
195} 192}
196 193
197static void ip6_dst_destroy(struct dst_entry *dst) 194static void ip6_dst_destroy(struct dst_entry *dst)
@@ -239,7 +236,8 @@ static inline int rt6_need_strict(struct in6_addr *daddr)
239 * Route lookup. Any table->tb6_lock is implied. 236 * Route lookup. Any table->tb6_lock is implied.
240 */ 237 */
241 238
242static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt, 239static inline struct rt6_info *rt6_device_match(struct net *net,
240 struct rt6_info *rt,
243 int oif, 241 int oif,
244 int strict) 242 int strict)
245{ 243{
@@ -268,7 +266,7 @@ static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
268 return local; 266 return local;
269 267
270 if (strict) 268 if (strict)
271 return &ip6_null_entry; 269 return net->ipv6.ip6_null_entry;
272 } 270 }
273 return rt; 271 return rt;
274} 272}
@@ -409,6 +407,7 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
409static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) 407static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
410{ 408{
411 struct rt6_info *match, *rt0; 409 struct rt6_info *match, *rt0;
410 struct net *net;
412 411
413 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n", 412 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
414 __FUNCTION__, fn->leaf, oif); 413 __FUNCTION__, fn->leaf, oif);
@@ -434,13 +433,15 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
434 RT6_TRACE("%s() => %p\n", 433 RT6_TRACE("%s() => %p\n",
435 __FUNCTION__, match); 434 __FUNCTION__, match);
436 435
437 return (match ? match : &ip6_null_entry); 436 net = rt0->rt6i_dev->nd_net;
437 return (match ? match : net->ipv6.ip6_null_entry);
438} 438}
439 439
440#ifdef CONFIG_IPV6_ROUTE_INFO 440#ifdef CONFIG_IPV6_ROUTE_INFO
441int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 441int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
442 struct in6_addr *gwaddr) 442 struct in6_addr *gwaddr)
443{ 443{
444 struct net *net = dev->nd_net;
444 struct route_info *rinfo = (struct route_info *) opt; 445 struct route_info *rinfo = (struct route_info *) opt;
445 struct in6_addr prefix_buf, *prefix; 446 struct in6_addr prefix_buf, *prefix;
446 unsigned int pref; 447 unsigned int pref;
@@ -488,7 +489,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
488 prefix = &prefix_buf; 489 prefix = &prefix_buf;
489 } 490 }
490 491
491 rt = rt6_get_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex); 492 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
493 dev->ifindex);
492 494
493 if (rt && !lifetime) { 495 if (rt && !lifetime) {
494 ip6_del_rt(rt); 496 ip6_del_rt(rt);
@@ -496,7 +498,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
496 } 498 }
497 499
498 if (!rt && lifetime) 500 if (!rt && lifetime)
499 rt = rt6_add_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex, 501 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
500 pref); 502 pref);
501 else if (rt) 503 else if (rt)
502 rt->rt6i_flags = RTF_ROUTEINFO | 504 rt->rt6i_flags = RTF_ROUTEINFO |
@@ -515,9 +517,9 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
515} 517}
516#endif 518#endif
517 519
518#define BACKTRACK(saddr) \ 520#define BACKTRACK(__net, saddr) \
519do { \ 521do { \
520 if (rt == &ip6_null_entry) { \ 522 if (rt == __net->ipv6.ip6_null_entry) { \
521 struct fib6_node *pn; \ 523 struct fib6_node *pn; \
522 while (1) { \ 524 while (1) { \
523 if (fn->fn_flags & RTN_TL_ROOT) \ 525 if (fn->fn_flags & RTN_TL_ROOT) \
@@ -533,7 +535,8 @@ do { \
533 } \ 535 } \
534} while(0) 536} while(0)
535 537
536static struct rt6_info *ip6_pol_route_lookup(struct fib6_table *table, 538static struct rt6_info *ip6_pol_route_lookup(struct net *net,
539 struct fib6_table *table,
537 struct flowi *fl, int flags) 540 struct flowi *fl, int flags)
538{ 541{
539 struct fib6_node *fn; 542 struct fib6_node *fn;
@@ -543,8 +546,8 @@ static struct rt6_info *ip6_pol_route_lookup(struct fib6_table *table,
543 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); 546 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
544restart: 547restart:
545 rt = fn->leaf; 548 rt = fn->leaf;
546 rt = rt6_device_match(rt, fl->oif, flags); 549 rt = rt6_device_match(net, rt, fl->oif, flags);
547 BACKTRACK(&fl->fl6_src); 550 BACKTRACK(net, &fl->fl6_src);
548out: 551out:
549 dst_use(&rt->u.dst, jiffies); 552 dst_use(&rt->u.dst, jiffies);
550 read_unlock_bh(&table->tb6_lock); 553 read_unlock_bh(&table->tb6_lock);
@@ -552,8 +555,8 @@ out:
552 555
553} 556}
554 557
555struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr, 558struct rt6_info *rt6_lookup(struct net *net, struct in6_addr *daddr,
556 int oif, int strict) 559 struct in6_addr *saddr, int oif, int strict)
557{ 560{
558 struct flowi fl = { 561 struct flowi fl = {
559 .oif = oif, 562 .oif = oif,
@@ -571,7 +574,7 @@ struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
571 flags |= RT6_LOOKUP_F_HAS_SADDR; 574 flags |= RT6_LOOKUP_F_HAS_SADDR;
572 } 575 }
573 576
574 dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_lookup); 577 dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_lookup);
575 if (dst->error == 0) 578 if (dst->error == 0)
576 return (struct rt6_info *) dst; 579 return (struct rt6_info *) dst;
577 580
@@ -604,7 +607,7 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
604int ip6_ins_rt(struct rt6_info *rt) 607int ip6_ins_rt(struct rt6_info *rt)
605{ 608{
606 struct nl_info info = { 609 struct nl_info info = {
607 .nl_net = &init_net, 610 .nl_net = rt->rt6i_dev->nd_net,
608 }; 611 };
609 return __ip6_ins_rt(rt, &info); 612 return __ip6_ins_rt(rt, &info);
610} 613}
@@ -660,8 +663,8 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *d
660 return rt; 663 return rt;
661} 664}
662 665
663static struct rt6_info *ip6_pol_route(struct fib6_table *table, int oif, 666static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
664 struct flowi *fl, int flags) 667 struct flowi *fl, int flags)
665{ 668{
666 struct fib6_node *fn; 669 struct fib6_node *fn;
667 struct rt6_info *rt, *nrt; 670 struct rt6_info *rt, *nrt;
@@ -680,8 +683,9 @@ restart_2:
680 683
681restart: 684restart:
682 rt = rt6_select(fn, oif, strict | reachable); 685 rt = rt6_select(fn, oif, strict | reachable);
683 BACKTRACK(&fl->fl6_src); 686
684 if (rt == &ip6_null_entry || 687 BACKTRACK(net, &fl->fl6_src);
688 if (rt == net->ipv6.ip6_null_entry ||
685 rt->rt6i_flags & RTF_CACHE) 689 rt->rt6i_flags & RTF_CACHE)
686 goto out; 690 goto out;
687 691
@@ -699,7 +703,7 @@ restart:
699 } 703 }
700 704
701 dst_release(&rt->u.dst); 705 dst_release(&rt->u.dst);
702 rt = nrt ? : &ip6_null_entry; 706 rt = nrt ? : net->ipv6.ip6_null_entry;
703 707
704 dst_hold(&rt->u.dst); 708 dst_hold(&rt->u.dst);
705 if (nrt) { 709 if (nrt) {
@@ -732,15 +736,16 @@ out2:
732 return rt; 736 return rt;
733} 737}
734 738
735static struct rt6_info *ip6_pol_route_input(struct fib6_table *table, 739static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
736 struct flowi *fl, int flags) 740 struct flowi *fl, int flags)
737{ 741{
738 return ip6_pol_route(table, fl->iif, fl, flags); 742 return ip6_pol_route(net, table, fl->iif, fl, flags);
739} 743}
740 744
741void ip6_route_input(struct sk_buff *skb) 745void ip6_route_input(struct sk_buff *skb)
742{ 746{
743 struct ipv6hdr *iph = ipv6_hdr(skb); 747 struct ipv6hdr *iph = ipv6_hdr(skb);
748 struct net *net = skb->dev->nd_net;
744 int flags = RT6_LOOKUP_F_HAS_SADDR; 749 int flags = RT6_LOOKUP_F_HAS_SADDR;
745 struct flowi fl = { 750 struct flowi fl = {
746 .iif = skb->dev->ifindex, 751 .iif = skb->dev->ifindex,
@@ -758,16 +763,17 @@ void ip6_route_input(struct sk_buff *skb)
758 if (rt6_need_strict(&iph->daddr)) 763 if (rt6_need_strict(&iph->daddr))
759 flags |= RT6_LOOKUP_F_IFACE; 764 flags |= RT6_LOOKUP_F_IFACE;
760 765
761 skb->dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_input); 766 skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);
762} 767}
763 768
764static struct rt6_info *ip6_pol_route_output(struct fib6_table *table, 769static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
765 struct flowi *fl, int flags) 770 struct flowi *fl, int flags)
766{ 771{
767 return ip6_pol_route(table, fl->oif, fl, flags); 772 return ip6_pol_route(net, table, fl->oif, fl, flags);
768} 773}
769 774
770struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl) 775struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
776 struct flowi *fl)
771{ 777{
772 int flags = 0; 778 int flags = 0;
773 779
@@ -777,7 +783,7 @@ struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
777 if (!ipv6_addr_any(&fl->fl6_src)) 783 if (!ipv6_addr_any(&fl->fl6_src))
778 flags |= RT6_LOOKUP_F_HAS_SADDR; 784 flags |= RT6_LOOKUP_F_HAS_SADDR;
779 785
780 return fib6_rule_lookup(fl, flags, ip6_pol_route_output); 786 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
781} 787}
782 788
783EXPORT_SYMBOL(ip6_route_output); 789EXPORT_SYMBOL(ip6_route_output);
@@ -886,12 +892,12 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
886 892
887static int ipv6_get_mtu(struct net_device *dev); 893static int ipv6_get_mtu(struct net_device *dev);
888 894
889static inline unsigned int ipv6_advmss(unsigned int mtu) 895static inline unsigned int ipv6_advmss(struct net *net, unsigned int mtu)
890{ 896{
891 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); 897 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
892 898
893 if (mtu < init_net.ipv6.sysctl.ip6_rt_min_advmss) 899 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
894 mtu = init_net.ipv6.sysctl.ip6_rt_min_advmss; 900 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
895 901
896 /* 902 /*
897 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and 903 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
@@ -904,21 +910,21 @@ static inline unsigned int ipv6_advmss(unsigned int mtu)
904 return mtu; 910 return mtu;
905} 911}
906 912
907static struct dst_entry *ndisc_dst_gc_list; 913static struct dst_entry *icmp6_dst_gc_list;
908static DEFINE_SPINLOCK(ndisc_lock); 914static DEFINE_SPINLOCK(icmp6_dst_lock);
909 915
910struct dst_entry *ndisc_dst_alloc(struct net_device *dev, 916struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
911 struct neighbour *neigh, 917 struct neighbour *neigh,
912 struct in6_addr *addr, 918 struct in6_addr *addr)
913 int (*output)(struct sk_buff *))
914{ 919{
915 struct rt6_info *rt; 920 struct rt6_info *rt;
916 struct inet6_dev *idev = in6_dev_get(dev); 921 struct inet6_dev *idev = in6_dev_get(dev);
922 struct net *net = dev->nd_net;
917 923
918 if (unlikely(idev == NULL)) 924 if (unlikely(idev == NULL))
919 return NULL; 925 return NULL;
920 926
921 rt = ip6_dst_alloc(); 927 rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
922 if (unlikely(rt == NULL)) { 928 if (unlikely(rt == NULL)) {
923 in6_dev_put(idev); 929 in6_dev_put(idev);
924 goto out; 930 goto out;
@@ -936,8 +942,8 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
936 atomic_set(&rt->u.dst.__refcnt, 1); 942 atomic_set(&rt->u.dst.__refcnt, 1);
937 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255; 943 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
938 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); 944 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
939 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst)); 945 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
940 rt->u.dst.output = output; 946 rt->u.dst.output = ip6_output;
941 947
942#if 0 /* there's no chance to use these for ndisc */ 948#if 0 /* there's no chance to use these for ndisc */
943 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST 949 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
@@ -947,18 +953,18 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
947 rt->rt6i_dst.plen = 128; 953 rt->rt6i_dst.plen = 128;
948#endif 954#endif
949 955
950 spin_lock_bh(&ndisc_lock); 956 spin_lock_bh(&icmp6_dst_lock);
951 rt->u.dst.next = ndisc_dst_gc_list; 957 rt->u.dst.next = icmp6_dst_gc_list;
952 ndisc_dst_gc_list = &rt->u.dst; 958 icmp6_dst_gc_list = &rt->u.dst;
953 spin_unlock_bh(&ndisc_lock); 959 spin_unlock_bh(&icmp6_dst_lock);
954 960
955 fib6_force_start_gc(); 961 fib6_force_start_gc(net);
956 962
957out: 963out:
958 return &rt->u.dst; 964 return &rt->u.dst;
959} 965}
960 966
961int ndisc_dst_gc(int *more) 967int icmp6_dst_gc(int *more)
962{ 968{
963 struct dst_entry *dst, *next, **pprev; 969 struct dst_entry *dst, *next, **pprev;
964 int freed; 970 int freed;
@@ -966,8 +972,8 @@ int ndisc_dst_gc(int *more)
966 next = NULL; 972 next = NULL;
967 freed = 0; 973 freed = 0;
968 974
969 spin_lock_bh(&ndisc_lock); 975 spin_lock_bh(&icmp6_dst_lock);
970 pprev = &ndisc_dst_gc_list; 976 pprev = &icmp6_dst_gc_list;
971 977
972 while ((dst = *pprev) != NULL) { 978 while ((dst = *pprev) != NULL) {
973 if (!atomic_read(&dst->__refcnt)) { 979 if (!atomic_read(&dst->__refcnt)) {
@@ -980,30 +986,33 @@ int ndisc_dst_gc(int *more)
980 } 986 }
981 } 987 }
982 988
983 spin_unlock_bh(&ndisc_lock); 989 spin_unlock_bh(&icmp6_dst_lock);
984 990
985 return freed; 991 return freed;
986} 992}
987 993
988static int ip6_dst_gc(struct dst_ops *ops) 994static int ip6_dst_gc(struct dst_ops *ops)
989{ 995{
990 static unsigned expire = 30*HZ;
991 static unsigned long last_gc;
992 unsigned long now = jiffies; 996 unsigned long now = jiffies;
993 997 struct net *net = ops->dst_net;
994 if (time_after(last_gc + init_net.ipv6.sysctl.ip6_rt_gc_min_interval, now) && 998 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
995 atomic_read(&ip6_dst_ops.entries) <= init_net.ipv6.sysctl.ip6_rt_max_size) 999 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1000 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1001 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1002 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1003
1004 if (time_after(rt_last_gc + rt_min_interval, now) &&
1005 atomic_read(&ops->entries) <= rt_max_size)
996 goto out; 1006 goto out;
997 1007
998 expire++; 1008 net->ipv6.ip6_rt_gc_expire++;
999 fib6_run_gc(expire); 1009 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1000 last_gc = now; 1010 net->ipv6.ip6_rt_last_gc = now;
1001 if (atomic_read(&ip6_dst_ops.entries) < ip6_dst_ops.gc_thresh) 1011 if (atomic_read(&ops->entries) < ops->gc_thresh)
1002 expire = init_net.ipv6.sysctl.ip6_rt_gc_timeout>>1; 1012 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1003
1004out: 1013out:
1005 expire -= expire>>init_net.ipv6.sysctl.ip6_rt_gc_elasticity; 1014 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1006 return (atomic_read(&ip6_dst_ops.entries) > init_net.ipv6.sysctl.ip6_rt_max_size); 1015 return (atomic_read(&ops->entries) > rt_max_size);
1007} 1016}
1008 1017
1009/* Clean host part of a prefix. Not necessary in radix tree, 1018/* Clean host part of a prefix. Not necessary in radix tree,
@@ -1045,6 +1054,7 @@ int ipv6_get_hoplimit(struct net_device *dev)
1045int ip6_route_add(struct fib6_config *cfg) 1054int ip6_route_add(struct fib6_config *cfg)
1046{ 1055{
1047 int err; 1056 int err;
1057 struct net *net = cfg->fc_nlinfo.nl_net;
1048 struct rt6_info *rt = NULL; 1058 struct rt6_info *rt = NULL;
1049 struct net_device *dev = NULL; 1059 struct net_device *dev = NULL;
1050 struct inet6_dev *idev = NULL; 1060 struct inet6_dev *idev = NULL;
@@ -1059,7 +1069,7 @@ int ip6_route_add(struct fib6_config *cfg)
1059#endif 1069#endif
1060 if (cfg->fc_ifindex) { 1070 if (cfg->fc_ifindex) {
1061 err = -ENODEV; 1071 err = -ENODEV;
1062 dev = dev_get_by_index(&init_net, cfg->fc_ifindex); 1072 dev = dev_get_by_index(net, cfg->fc_ifindex);
1063 if (!dev) 1073 if (!dev)
1064 goto out; 1074 goto out;
1065 idev = in6_dev_get(dev); 1075 idev = in6_dev_get(dev);
@@ -1070,13 +1080,13 @@ int ip6_route_add(struct fib6_config *cfg)
1070 if (cfg->fc_metric == 0) 1080 if (cfg->fc_metric == 0)
1071 cfg->fc_metric = IP6_RT_PRIO_USER; 1081 cfg->fc_metric = IP6_RT_PRIO_USER;
1072 1082
1073 table = fib6_new_table(cfg->fc_table); 1083 table = fib6_new_table(net, cfg->fc_table);
1074 if (table == NULL) { 1084 if (table == NULL) {
1075 err = -ENOBUFS; 1085 err = -ENOBUFS;
1076 goto out; 1086 goto out;
1077 } 1087 }
1078 1088
1079 rt = ip6_dst_alloc(); 1089 rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1080 1090
1081 if (rt == NULL) { 1091 if (rt == NULL) {
1082 err = -ENOMEM; 1092 err = -ENOMEM;
@@ -1117,12 +1127,12 @@ int ip6_route_add(struct fib6_config *cfg)
1117 if ((cfg->fc_flags & RTF_REJECT) || 1127 if ((cfg->fc_flags & RTF_REJECT) ||
1118 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) { 1128 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
1119 /* hold loopback dev/idev if we haven't done so. */ 1129 /* hold loopback dev/idev if we haven't done so. */
1120 if (dev != init_net.loopback_dev) { 1130 if (dev != net->loopback_dev) {
1121 if (dev) { 1131 if (dev) {
1122 dev_put(dev); 1132 dev_put(dev);
1123 in6_dev_put(idev); 1133 in6_dev_put(idev);
1124 } 1134 }
1125 dev = init_net.loopback_dev; 1135 dev = net->loopback_dev;
1126 dev_hold(dev); 1136 dev_hold(dev);
1127 idev = in6_dev_get(dev); 1137 idev = in6_dev_get(dev);
1128 if (!idev) { 1138 if (!idev) {
@@ -1159,7 +1169,7 @@ int ip6_route_add(struct fib6_config *cfg)
1159 if (!(gwa_type&IPV6_ADDR_UNICAST)) 1169 if (!(gwa_type&IPV6_ADDR_UNICAST))
1160 goto out; 1170 goto out;
1161 1171
1162 grt = rt6_lookup(gw_addr, NULL, cfg->fc_ifindex, 1); 1172 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1163 1173
1164 err = -EHOSTUNREACH; 1174 err = -EHOSTUNREACH;
1165 if (grt == NULL) 1175 if (grt == NULL)
@@ -1226,10 +1236,13 @@ install_route:
1226 if (!rt->u.dst.metrics[RTAX_MTU-1]) 1236 if (!rt->u.dst.metrics[RTAX_MTU-1])
1227 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); 1237 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1228 if (!rt->u.dst.metrics[RTAX_ADVMSS-1]) 1238 if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
1229 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst)); 1239 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
1230 rt->u.dst.dev = dev; 1240 rt->u.dst.dev = dev;
1231 rt->rt6i_idev = idev; 1241 rt->rt6i_idev = idev;
1232 rt->rt6i_table = table; 1242 rt->rt6i_table = table;
1243
1244 cfg->fc_nlinfo.nl_net = dev->nd_net;
1245
1233 return __ip6_ins_rt(rt, &cfg->fc_nlinfo); 1246 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1234 1247
1235out: 1248out:
@@ -1246,8 +1259,9 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1246{ 1259{
1247 int err; 1260 int err;
1248 struct fib6_table *table; 1261 struct fib6_table *table;
1262 struct net *net = rt->rt6i_dev->nd_net;
1249 1263
1250 if (rt == &ip6_null_entry) 1264 if (rt == net->ipv6.ip6_null_entry)
1251 return -ENOENT; 1265 return -ENOENT;
1252 1266
1253 table = rt->rt6i_table; 1267 table = rt->rt6i_table;
@@ -1264,7 +1278,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1264int ip6_del_rt(struct rt6_info *rt) 1278int ip6_del_rt(struct rt6_info *rt)
1265{ 1279{
1266 struct nl_info info = { 1280 struct nl_info info = {
1267 .nl_net = &init_net, 1281 .nl_net = rt->rt6i_dev->nd_net,
1268 }; 1282 };
1269 return __ip6_del_rt(rt, &info); 1283 return __ip6_del_rt(rt, &info);
1270} 1284}
@@ -1276,7 +1290,7 @@ static int ip6_route_del(struct fib6_config *cfg)
1276 struct rt6_info *rt; 1290 struct rt6_info *rt;
1277 int err = -ESRCH; 1291 int err = -ESRCH;
1278 1292
1279 table = fib6_get_table(cfg->fc_table); 1293 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1280 if (table == NULL) 1294 if (table == NULL)
1281 return err; 1295 return err;
1282 1296
@@ -1316,7 +1330,8 @@ struct ip6rd_flowi {
1316 struct in6_addr gateway; 1330 struct in6_addr gateway;
1317}; 1331};
1318 1332
1319static struct rt6_info *__ip6_route_redirect(struct fib6_table *table, 1333static struct rt6_info *__ip6_route_redirect(struct net *net,
1334 struct fib6_table *table,
1320 struct flowi *fl, 1335 struct flowi *fl,
1321 int flags) 1336 int flags)
1322{ 1337{
@@ -1359,8 +1374,8 @@ restart:
1359 } 1374 }
1360 1375
1361 if (!rt) 1376 if (!rt)
1362 rt = &ip6_null_entry; 1377 rt = net->ipv6.ip6_null_entry;
1363 BACKTRACK(&fl->fl6_src); 1378 BACKTRACK(net, &fl->fl6_src);
1364out: 1379out:
1365 dst_hold(&rt->u.dst); 1380 dst_hold(&rt->u.dst);
1366 1381
@@ -1375,6 +1390,7 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1375 struct net_device *dev) 1390 struct net_device *dev)
1376{ 1391{
1377 int flags = RT6_LOOKUP_F_HAS_SADDR; 1392 int flags = RT6_LOOKUP_F_HAS_SADDR;
1393 struct net *net = dev->nd_net;
1378 struct ip6rd_flowi rdfl = { 1394 struct ip6rd_flowi rdfl = {
1379 .fl = { 1395 .fl = {
1380 .oif = dev->ifindex, 1396 .oif = dev->ifindex,
@@ -1391,7 +1407,8 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1391 if (rt6_need_strict(dest)) 1407 if (rt6_need_strict(dest))
1392 flags |= RT6_LOOKUP_F_IFACE; 1408 flags |= RT6_LOOKUP_F_IFACE;
1393 1409
1394 return (struct rt6_info *)fib6_rule_lookup((struct flowi *)&rdfl, flags, __ip6_route_redirect); 1410 return (struct rt6_info *)fib6_rule_lookup(net, (struct flowi *)&rdfl,
1411 flags, __ip6_route_redirect);
1395} 1412}
1396 1413
1397void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, 1414void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
@@ -1400,10 +1417,11 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1400{ 1417{
1401 struct rt6_info *rt, *nrt = NULL; 1418 struct rt6_info *rt, *nrt = NULL;
1402 struct netevent_redirect netevent; 1419 struct netevent_redirect netevent;
1420 struct net *net = neigh->dev->nd_net;
1403 1421
1404 rt = ip6_route_redirect(dest, src, saddr, neigh->dev); 1422 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1405 1423
1406 if (rt == &ip6_null_entry) { 1424 if (rt == net->ipv6.ip6_null_entry) {
1407 if (net_ratelimit()) 1425 if (net_ratelimit())
1408 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop " 1426 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1409 "for redirect target\n"); 1427 "for redirect target\n");
@@ -1448,7 +1466,8 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1448 nrt->rt6i_nexthop = neigh_clone(neigh); 1466 nrt->rt6i_nexthop = neigh_clone(neigh);
1449 /* Reset pmtu, it may be better */ 1467 /* Reset pmtu, it may be better */
1450 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); 1468 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1451 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&nrt->u.dst)); 1469 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(neigh->dev->nd_net,
1470 dst_mtu(&nrt->u.dst));
1452 1471
1453 if (ip6_ins_rt(nrt)) 1472 if (ip6_ins_rt(nrt))
1454 goto out; 1473 goto out;
@@ -1476,9 +1495,10 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1476 struct net_device *dev, u32 pmtu) 1495 struct net_device *dev, u32 pmtu)
1477{ 1496{
1478 struct rt6_info *rt, *nrt; 1497 struct rt6_info *rt, *nrt;
1498 struct net *net = dev->nd_net;
1479 int allfrag = 0; 1499 int allfrag = 0;
1480 1500
1481 rt = rt6_lookup(daddr, saddr, dev->ifindex, 0); 1501 rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
1482 if (rt == NULL) 1502 if (rt == NULL)
1483 return; 1503 return;
1484 1504
@@ -1511,7 +1531,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1511 rt->u.dst.metrics[RTAX_MTU-1] = pmtu; 1531 rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1512 if (allfrag) 1532 if (allfrag)
1513 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 1533 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1514 dst_set_expires(&rt->u.dst, init_net.ipv6.sysctl.ip6_rt_mtu_expires); 1534 dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1515 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; 1535 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1516 goto out; 1536 goto out;
1517 } 1537 }
@@ -1537,7 +1557,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1537 * which is 10 mins. After 10 mins the decreased pmtu is expired 1557 * which is 10 mins. After 10 mins the decreased pmtu is expired
1538 * and detecting PMTU increase will be automatically happened. 1558 * and detecting PMTU increase will be automatically happened.
1539 */ 1559 */
1540 dst_set_expires(&nrt->u.dst, init_net.ipv6.sysctl.ip6_rt_mtu_expires); 1560 dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1541 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; 1561 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1542 1562
1543 ip6_ins_rt(nrt); 1563 ip6_ins_rt(nrt);
@@ -1552,7 +1572,8 @@ out:
1552 1572
1553static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) 1573static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1554{ 1574{
1555 struct rt6_info *rt = ip6_dst_alloc(); 1575 struct net *net = ort->rt6i_dev->nd_net;
1576 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1556 1577
1557 if (rt) { 1578 if (rt) {
1558 rt->u.dst.input = ort->u.dst.input; 1579 rt->u.dst.input = ort->u.dst.input;
@@ -1583,14 +1604,15 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1583} 1604}
1584 1605
1585#ifdef CONFIG_IPV6_ROUTE_INFO 1606#ifdef CONFIG_IPV6_ROUTE_INFO
1586static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen, 1607static struct rt6_info *rt6_get_route_info(struct net *net,
1608 struct in6_addr *prefix, int prefixlen,
1587 struct in6_addr *gwaddr, int ifindex) 1609 struct in6_addr *gwaddr, int ifindex)
1588{ 1610{
1589 struct fib6_node *fn; 1611 struct fib6_node *fn;
1590 struct rt6_info *rt = NULL; 1612 struct rt6_info *rt = NULL;
1591 struct fib6_table *table; 1613 struct fib6_table *table;
1592 1614
1593 table = fib6_get_table(RT6_TABLE_INFO); 1615 table = fib6_get_table(net, RT6_TABLE_INFO);
1594 if (table == NULL) 1616 if (table == NULL)
1595 return NULL; 1617 return NULL;
1596 1618
@@ -1614,7 +1636,8 @@ out:
1614 return rt; 1636 return rt;
1615} 1637}
1616 1638
1617static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen, 1639static struct rt6_info *rt6_add_route_info(struct net *net,
1640 struct in6_addr *prefix, int prefixlen,
1618 struct in6_addr *gwaddr, int ifindex, 1641 struct in6_addr *gwaddr, int ifindex,
1619 unsigned pref) 1642 unsigned pref)
1620{ 1643{
@@ -1625,6 +1648,9 @@ static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixle
1625 .fc_dst_len = prefixlen, 1648 .fc_dst_len = prefixlen,
1626 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 1649 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1627 RTF_UP | RTF_PREF(pref), 1650 RTF_UP | RTF_PREF(pref),
1651 .fc_nlinfo.pid = 0,
1652 .fc_nlinfo.nlh = NULL,
1653 .fc_nlinfo.nl_net = net,
1628 }; 1654 };
1629 1655
1630 ipv6_addr_copy(&cfg.fc_dst, prefix); 1656 ipv6_addr_copy(&cfg.fc_dst, prefix);
@@ -1636,7 +1662,7 @@ static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixle
1636 1662
1637 ip6_route_add(&cfg); 1663 ip6_route_add(&cfg);
1638 1664
1639 return rt6_get_route_info(prefix, prefixlen, gwaddr, ifindex); 1665 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1640} 1666}
1641#endif 1667#endif
1642 1668
@@ -1645,7 +1671,7 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
1645 struct rt6_info *rt; 1671 struct rt6_info *rt;
1646 struct fib6_table *table; 1672 struct fib6_table *table;
1647 1673
1648 table = fib6_get_table(RT6_TABLE_DFLT); 1674 table = fib6_get_table(dev->nd_net, RT6_TABLE_DFLT);
1649 if (table == NULL) 1675 if (table == NULL)
1650 return NULL; 1676 return NULL;
1651 1677
@@ -1674,6 +1700,9 @@ struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1674 .fc_ifindex = dev->ifindex, 1700 .fc_ifindex = dev->ifindex,
1675 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 1701 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1676 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 1702 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1703 .fc_nlinfo.pid = 0,
1704 .fc_nlinfo.nlh = NULL,
1705 .fc_nlinfo.nl_net = dev->nd_net,
1677 }; 1706 };
1678 1707
1679 ipv6_addr_copy(&cfg.fc_gateway, gwaddr); 1708 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
@@ -1683,13 +1712,13 @@ struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1683 return rt6_get_dflt_router(gwaddr, dev); 1712 return rt6_get_dflt_router(gwaddr, dev);
1684} 1713}
1685 1714
1686void rt6_purge_dflt_routers(void) 1715void rt6_purge_dflt_routers(struct net *net)
1687{ 1716{
1688 struct rt6_info *rt; 1717 struct rt6_info *rt;
1689 struct fib6_table *table; 1718 struct fib6_table *table;
1690 1719
1691 /* NOTE: Keep consistent with rt6_get_dflt_router */ 1720 /* NOTE: Keep consistent with rt6_get_dflt_router */
1692 table = fib6_get_table(RT6_TABLE_DFLT); 1721 table = fib6_get_table(net, RT6_TABLE_DFLT);
1693 if (table == NULL) 1722 if (table == NULL)
1694 return; 1723 return;
1695 1724
@@ -1706,7 +1735,8 @@ restart:
1706 read_unlock_bh(&table->tb6_lock); 1735 read_unlock_bh(&table->tb6_lock);
1707} 1736}
1708 1737
1709static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg, 1738static void rtmsg_to_fib6_config(struct net *net,
1739 struct in6_rtmsg *rtmsg,
1710 struct fib6_config *cfg) 1740 struct fib6_config *cfg)
1711{ 1741{
1712 memset(cfg, 0, sizeof(*cfg)); 1742 memset(cfg, 0, sizeof(*cfg));
@@ -1719,14 +1749,14 @@ static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg,
1719 cfg->fc_src_len = rtmsg->rtmsg_src_len; 1749 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1720 cfg->fc_flags = rtmsg->rtmsg_flags; 1750 cfg->fc_flags = rtmsg->rtmsg_flags;
1721 1751
1722 cfg->fc_nlinfo.nl_net = &init_net; 1752 cfg->fc_nlinfo.nl_net = net;
1723 1753
1724 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst); 1754 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1725 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src); 1755 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1726 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway); 1756 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1727} 1757}
1728 1758
1729int ipv6_route_ioctl(unsigned int cmd, void __user *arg) 1759int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1730{ 1760{
1731 struct fib6_config cfg; 1761 struct fib6_config cfg;
1732 struct in6_rtmsg rtmsg; 1762 struct in6_rtmsg rtmsg;
@@ -1742,7 +1772,7 @@ int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1742 if (err) 1772 if (err)
1743 return -EFAULT; 1773 return -EFAULT;
1744 1774
1745 rtmsg_to_fib6_config(&rtmsg, &cfg); 1775 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1746 1776
1747 rtnl_lock(); 1777 rtnl_lock();
1748 switch (cmd) { 1778 switch (cmd) {
@@ -1821,21 +1851,22 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1821 const struct in6_addr *addr, 1851 const struct in6_addr *addr,
1822 int anycast) 1852 int anycast)
1823{ 1853{
1824 struct rt6_info *rt = ip6_dst_alloc(); 1854 struct net *net = idev->dev->nd_net;
1855 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1825 1856
1826 if (rt == NULL) 1857 if (rt == NULL)
1827 return ERR_PTR(-ENOMEM); 1858 return ERR_PTR(-ENOMEM);
1828 1859
1829 dev_hold(init_net.loopback_dev); 1860 dev_hold(net->loopback_dev);
1830 in6_dev_hold(idev); 1861 in6_dev_hold(idev);
1831 1862
1832 rt->u.dst.flags = DST_HOST; 1863 rt->u.dst.flags = DST_HOST;
1833 rt->u.dst.input = ip6_input; 1864 rt->u.dst.input = ip6_input;
1834 rt->u.dst.output = ip6_output; 1865 rt->u.dst.output = ip6_output;
1835 rt->rt6i_dev = init_net.loopback_dev; 1866 rt->rt6i_dev = net->loopback_dev;
1836 rt->rt6i_idev = idev; 1867 rt->rt6i_idev = idev;
1837 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); 1868 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1838 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst)); 1869 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
1839 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; 1870 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1840 rt->u.dst.obsolete = -1; 1871 rt->u.dst.obsolete = -1;
1841 1872
@@ -1852,26 +1883,39 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1852 1883
1853 ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1884 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1854 rt->rt6i_dst.plen = 128; 1885 rt->rt6i_dst.plen = 128;
1855 rt->rt6i_table = fib6_get_table(RT6_TABLE_LOCAL); 1886 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
1856 1887
1857 atomic_set(&rt->u.dst.__refcnt, 1); 1888 atomic_set(&rt->u.dst.__refcnt, 1);
1858 1889
1859 return rt; 1890 return rt;
1860} 1891}
1861 1892
1893struct arg_dev_net {
1894 struct net_device *dev;
1895 struct net *net;
1896};
1897
1862static int fib6_ifdown(struct rt6_info *rt, void *arg) 1898static int fib6_ifdown(struct rt6_info *rt, void *arg)
1863{ 1899{
1864 if (((void*)rt->rt6i_dev == arg || arg == NULL) && 1900 struct net_device *dev = ((struct arg_dev_net *)arg)->dev;
1865 rt != &ip6_null_entry) { 1901 struct net *net = ((struct arg_dev_net *)arg)->net;
1902
1903 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
1904 rt != net->ipv6.ip6_null_entry) {
1866 RT6_TRACE("deleted by ifdown %p\n", rt); 1905 RT6_TRACE("deleted by ifdown %p\n", rt);
1867 return -1; 1906 return -1;
1868 } 1907 }
1869 return 0; 1908 return 0;
1870} 1909}
1871 1910
1872void rt6_ifdown(struct net_device *dev) 1911void rt6_ifdown(struct net *net, struct net_device *dev)
1873{ 1912{
1874 fib6_clean_all(fib6_ifdown, 0, dev); 1913 struct arg_dev_net adn = {
1914 .dev = dev,
1915 .net = net,
1916 };
1917
1918 fib6_clean_all(net, fib6_ifdown, 0, &adn);
1875} 1919}
1876 1920
1877struct rt6_mtu_change_arg 1921struct rt6_mtu_change_arg
@@ -1884,6 +1928,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1884{ 1928{
1885 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; 1929 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
1886 struct inet6_dev *idev; 1930 struct inet6_dev *idev;
1931 struct net *net = arg->dev->nd_net;
1887 1932
1888 /* In IPv6 pmtu discovery is not optional, 1933 /* In IPv6 pmtu discovery is not optional,
1889 so that RTAX_MTU lock cannot disable it. 1934 so that RTAX_MTU lock cannot disable it.
@@ -1915,7 +1960,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1915 (dst_mtu(&rt->u.dst) < arg->mtu && 1960 (dst_mtu(&rt->u.dst) < arg->mtu &&
1916 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) { 1961 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
1917 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu; 1962 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
1918 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu); 1963 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
1919 } 1964 }
1920 return 0; 1965 return 0;
1921} 1966}
@@ -1927,7 +1972,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned mtu)
1927 .mtu = mtu, 1972 .mtu = mtu,
1928 }; 1973 };
1929 1974
1930 fib6_clean_all(rt6_mtu_change_route, 0, &arg); 1975 fib6_clean_all(dev->nd_net, rt6_mtu_change_route, 0, &arg);
1931} 1976}
1932 1977
1933static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { 1978static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
@@ -2010,13 +2055,9 @@ errout:
2010 2055
2011static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 2056static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2012{ 2057{
2013 struct net *net = skb->sk->sk_net;
2014 struct fib6_config cfg; 2058 struct fib6_config cfg;
2015 int err; 2059 int err;
2016 2060
2017 if (net != &init_net)
2018 return -EINVAL;
2019
2020 err = rtm_to_fib6_config(skb, nlh, &cfg); 2061 err = rtm_to_fib6_config(skb, nlh, &cfg);
2021 if (err < 0) 2062 if (err < 0)
2022 return err; 2063 return err;
@@ -2026,13 +2067,9 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *a
2026 2067
2027static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 2068static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2028{ 2069{
2029 struct net *net = skb->sk->sk_net;
2030 struct fib6_config cfg; 2070 struct fib6_config cfg;
2031 int err; 2071 int err;
2032 2072
2033 if (net != &init_net)
2034 return -EINVAL;
2035
2036 err = rtm_to_fib6_config(skb, nlh, &cfg); 2073 err = rtm_to_fib6_config(skb, nlh, &cfg);
2037 if (err < 0) 2074 if (err < 0)
2038 return err; 2075 return err;
@@ -2122,7 +2159,8 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2122 NLA_PUT_U32(skb, RTA_IIF, iif); 2159 NLA_PUT_U32(skb, RTA_IIF, iif);
2123 else if (dst) { 2160 else if (dst) {
2124 struct in6_addr saddr_buf; 2161 struct in6_addr saddr_buf;
2125 if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf) == 0) 2162 if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
2163 dst, &saddr_buf) == 0)
2126 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2164 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2127 } 2165 }
2128 2166
@@ -2175,9 +2213,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2175 struct flowi fl; 2213 struct flowi fl;
2176 int err, iif = 0; 2214 int err, iif = 0;
2177 2215
2178 if (net != &init_net)
2179 return -EINVAL;
2180
2181 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); 2216 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2182 if (err < 0) 2217 if (err < 0)
2183 goto errout; 2218 goto errout;
@@ -2207,7 +2242,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2207 2242
2208 if (iif) { 2243 if (iif) {
2209 struct net_device *dev; 2244 struct net_device *dev;
2210 dev = __dev_get_by_index(&init_net, iif); 2245 dev = __dev_get_by_index(net, iif);
2211 if (!dev) { 2246 if (!dev) {
2212 err = -ENODEV; 2247 err = -ENODEV;
2213 goto errout; 2248 goto errout;
@@ -2226,7 +2261,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2226 skb_reset_mac_header(skb); 2261 skb_reset_mac_header(skb);
2227 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); 2262 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2228 2263
2229 rt = (struct rt6_info*) ip6_route_output(NULL, &fl); 2264 rt = (struct rt6_info*) ip6_route_output(&init_net, NULL, &fl);
2230 skb->dst = &rt->u.dst; 2265 skb->dst = &rt->u.dst;
2231 2266
2232 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, 2267 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
@@ -2237,7 +2272,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2237 goto errout; 2272 goto errout;
2238 } 2273 }
2239 2274
2240 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 2275 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2241errout: 2276errout:
2242 return err; 2277 return err;
2243} 2278}
@@ -2245,6 +2280,7 @@ errout:
2245void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) 2280void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2246{ 2281{
2247 struct sk_buff *skb; 2282 struct sk_buff *skb;
2283 struct net *net = info->nl_net;
2248 u32 seq; 2284 u32 seq;
2249 int err; 2285 int err;
2250 2286
@@ -2263,11 +2299,31 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2263 kfree_skb(skb); 2299 kfree_skb(skb);
2264 goto errout; 2300 goto errout;
2265 } 2301 }
2266 err = rtnl_notify(skb, &init_net, info->pid, 2302 err = rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
2267 RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); 2303 info->nlh, gfp_any());
2268errout: 2304errout:
2269 if (err < 0) 2305 if (err < 0)
2270 rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_ROUTE, err); 2306 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2307}
2308
2309static int ip6_route_dev_notify(struct notifier_block *this,
2310 unsigned long event, void *data)
2311{
2312 struct net_device *dev = (struct net_device *)data;
2313 struct net *net = dev->nd_net;
2314
2315 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2316 net->ipv6.ip6_null_entry->u.dst.dev = dev;
2317 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2318#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2319 net->ipv6.ip6_prohibit_entry->u.dst.dev = dev;
2320 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2321 net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev;
2322 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2323#endif
2324 }
2325
2326 return NOTIFY_OK;
2271} 2327}
2272 2328
2273/* 2329/*
@@ -2316,13 +2372,25 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2316 2372
2317static int ipv6_route_show(struct seq_file *m, void *v) 2373static int ipv6_route_show(struct seq_file *m, void *v)
2318{ 2374{
2319 fib6_clean_all(rt6_info_route, 0, m); 2375 struct net *net = (struct net *)m->private;
2376 fib6_clean_all(net, rt6_info_route, 0, m);
2320 return 0; 2377 return 0;
2321} 2378}
2322 2379
2323static int ipv6_route_open(struct inode *inode, struct file *file) 2380static int ipv6_route_open(struct inode *inode, struct file *file)
2324{ 2381{
2325 return single_open(file, ipv6_route_show, NULL); 2382 struct net *net = get_proc_net(inode);
2383 if (!net)
2384 return -ENXIO;
2385 return single_open(file, ipv6_route_show, net);
2386}
2387
2388static int ipv6_route_release(struct inode *inode, struct file *file)
2389{
2390 struct seq_file *seq = file->private_data;
2391 struct net *net = seq->private;
2392 put_net(net);
2393 return single_release(inode, file);
2326} 2394}
2327 2395
2328static const struct file_operations ipv6_route_proc_fops = { 2396static const struct file_operations ipv6_route_proc_fops = {
@@ -2330,24 +2398,36 @@ static const struct file_operations ipv6_route_proc_fops = {
2330 .open = ipv6_route_open, 2398 .open = ipv6_route_open,
2331 .read = seq_read, 2399 .read = seq_read,
2332 .llseek = seq_lseek, 2400 .llseek = seq_lseek,
2333 .release = single_release, 2401 .release = ipv6_route_release,
2334}; 2402};
2335 2403
2336static int rt6_stats_seq_show(struct seq_file *seq, void *v) 2404static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2337{ 2405{
2406 struct net *net = (struct net *)seq->private;
2338 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", 2407 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2339 rt6_stats.fib_nodes, rt6_stats.fib_route_nodes, 2408 net->ipv6.rt6_stats->fib_nodes,
2340 rt6_stats.fib_rt_alloc, rt6_stats.fib_rt_entries, 2409 net->ipv6.rt6_stats->fib_route_nodes,
2341 rt6_stats.fib_rt_cache, 2410 net->ipv6.rt6_stats->fib_rt_alloc,
2342 atomic_read(&ip6_dst_ops.entries), 2411 net->ipv6.rt6_stats->fib_rt_entries,
2343 rt6_stats.fib_discarded_routes); 2412 net->ipv6.rt6_stats->fib_rt_cache,
2413 atomic_read(&net->ipv6.ip6_dst_ops->entries),
2414 net->ipv6.rt6_stats->fib_discarded_routes);
2344 2415
2345 return 0; 2416 return 0;
2346} 2417}
2347 2418
2348static int rt6_stats_seq_open(struct inode *inode, struct file *file) 2419static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2349{ 2420{
2350 return single_open(file, rt6_stats_seq_show, NULL); 2421 struct net *net = get_proc_net(inode);
2422 return single_open(file, rt6_stats_seq_show, net);
2423}
2424
2425static int rt6_stats_seq_release(struct inode *inode, struct file *file)
2426{
2427 struct seq_file *seq = file->private_data;
2428 struct net *net = (struct net *)seq->private;
2429 put_net(net);
2430 return single_release(inode, file);
2351} 2431}
2352 2432
2353static const struct file_operations rt6_stats_seq_fops = { 2433static const struct file_operations rt6_stats_seq_fops = {
@@ -2355,42 +2435,8 @@ static const struct file_operations rt6_stats_seq_fops = {
2355 .open = rt6_stats_seq_open, 2435 .open = rt6_stats_seq_open,
2356 .read = seq_read, 2436 .read = seq_read,
2357 .llseek = seq_lseek, 2437 .llseek = seq_lseek,
2358 .release = single_release, 2438 .release = rt6_stats_seq_release,
2359}; 2439};
2360
2361static int ipv6_route_proc_init(struct net *net)
2362{
2363 int ret = -ENOMEM;
2364 if (!proc_net_fops_create(net, "ipv6_route",
2365 0, &ipv6_route_proc_fops))
2366 goto out;
2367
2368 if (!proc_net_fops_create(net, "rt6_stats",
2369 S_IRUGO, &rt6_stats_seq_fops))
2370 goto out_ipv6_route;
2371
2372 ret = 0;
2373out:
2374 return ret;
2375out_ipv6_route:
2376 proc_net_remove(net, "ipv6_route");
2377 goto out;
2378}
2379
2380static void ipv6_route_proc_fini(struct net *net)
2381{
2382 proc_net_remove(net, "ipv6_route");
2383 proc_net_remove(net, "rt6_stats");
2384}
2385#else
2386static inline int ipv6_route_proc_init(struct net *net)
2387{
2388 return 0;
2389}
2390static inline void ipv6_route_proc_fini(struct net *net)
2391{
2392 return ;
2393}
2394#endif /* CONFIG_PROC_FS */ 2440#endif /* CONFIG_PROC_FS */
2395 2441
2396#ifdef CONFIG_SYSCTL 2442#ifdef CONFIG_SYSCTL
@@ -2399,10 +2445,11 @@ static
2399int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp, 2445int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
2400 void __user *buffer, size_t *lenp, loff_t *ppos) 2446 void __user *buffer, size_t *lenp, loff_t *ppos)
2401{ 2447{
2402 int delay = init_net.ipv6.sysctl.flush_delay; 2448 struct net *net = current->nsproxy->net_ns;
2449 int delay = net->ipv6.sysctl.flush_delay;
2403 if (write) { 2450 if (write) {
2404 proc_dointvec(ctl, write, filp, buffer, lenp, ppos); 2451 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2405 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay); 2452 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2406 return 0; 2453 return 0;
2407 } else 2454 } else
2408 return -EINVAL; 2455 return -EINVAL;
@@ -2419,7 +2466,7 @@ ctl_table ipv6_route_table_template[] = {
2419 { 2466 {
2420 .ctl_name = NET_IPV6_ROUTE_GC_THRESH, 2467 .ctl_name = NET_IPV6_ROUTE_GC_THRESH,
2421 .procname = "gc_thresh", 2468 .procname = "gc_thresh",
2422 .data = &ip6_dst_ops.gc_thresh, 2469 .data = &ip6_dst_ops_template.gc_thresh,
2423 .maxlen = sizeof(int), 2470 .maxlen = sizeof(int),
2424 .mode = 0644, 2471 .mode = 0644,
2425 .proc_handler = &proc_dointvec, 2472 .proc_handler = &proc_dointvec,
@@ -2505,33 +2552,141 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2505 table = kmemdup(ipv6_route_table_template, 2552 table = kmemdup(ipv6_route_table_template,
2506 sizeof(ipv6_route_table_template), 2553 sizeof(ipv6_route_table_template),
2507 GFP_KERNEL); 2554 GFP_KERNEL);
2555
2556 if (table) {
2557 table[0].data = &net->ipv6.sysctl.flush_delay;
2558 table[1].data = &net->ipv6.ip6_dst_ops->gc_thresh;
2559 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2560 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2561 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2562 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2563 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2564 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2565 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2566 }
2567
2508 return table; 2568 return table;
2509} 2569}
2510#endif 2570#endif
2511 2571
2572static int ip6_route_net_init(struct net *net)
2573{
2574 int ret = 0;
2575
2576 ret = -ENOMEM;
2577 net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template,
2578 sizeof(*net->ipv6.ip6_dst_ops),
2579 GFP_KERNEL);
2580 if (!net->ipv6.ip6_dst_ops)
2581 goto out;
2582 net->ipv6.ip6_dst_ops->dst_net = net;
2583
2584 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2585 sizeof(*net->ipv6.ip6_null_entry),
2586 GFP_KERNEL);
2587 if (!net->ipv6.ip6_null_entry)
2588 goto out_ip6_dst_ops;
2589 net->ipv6.ip6_null_entry->u.dst.path =
2590 (struct dst_entry *)net->ipv6.ip6_null_entry;
2591 net->ipv6.ip6_null_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2592
2593#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2594 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2595 sizeof(*net->ipv6.ip6_prohibit_entry),
2596 GFP_KERNEL);
2597 if (!net->ipv6.ip6_prohibit_entry) {
2598 kfree(net->ipv6.ip6_null_entry);
2599 goto out;
2600 }
2601 net->ipv6.ip6_prohibit_entry->u.dst.path =
2602 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2603 net->ipv6.ip6_prohibit_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2604
2605 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2606 sizeof(*net->ipv6.ip6_blk_hole_entry),
2607 GFP_KERNEL);
2608 if (!net->ipv6.ip6_blk_hole_entry) {
2609 kfree(net->ipv6.ip6_null_entry);
2610 kfree(net->ipv6.ip6_prohibit_entry);
2611 goto out;
2612 }
2613 net->ipv6.ip6_blk_hole_entry->u.dst.path =
2614 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2615 net->ipv6.ip6_blk_hole_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2616#endif
2617
2618#ifdef CONFIG_PROC_FS
2619 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2620 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2621#endif
2622 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2623
2624 ret = 0;
2625out:
2626 return ret;
2627
2628out_ip6_dst_ops:
2629 kfree(net->ipv6.ip6_dst_ops);
2630 goto out;
2631}
2632
2633static void ip6_route_net_exit(struct net *net)
2634{
2635#ifdef CONFIG_PROC_FS
2636 proc_net_remove(net, "ipv6_route");
2637 proc_net_remove(net, "rt6_stats");
2638#endif
2639 kfree(net->ipv6.ip6_null_entry);
2640#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2641 kfree(net->ipv6.ip6_prohibit_entry);
2642 kfree(net->ipv6.ip6_blk_hole_entry);
2643#endif
2644 kfree(net->ipv6.ip6_dst_ops);
2645}
2646
2647static struct pernet_operations ip6_route_net_ops = {
2648 .init = ip6_route_net_init,
2649 .exit = ip6_route_net_exit,
2650};
2651
2652static struct notifier_block ip6_route_dev_notifier = {
2653 .notifier_call = ip6_route_dev_notify,
2654 .priority = 0,
2655};
2656
2512int __init ip6_route_init(void) 2657int __init ip6_route_init(void)
2513{ 2658{
2514 int ret; 2659 int ret;
2515 2660
2516 ip6_dst_ops.kmem_cachep = 2661 ret = -ENOMEM;
2662 ip6_dst_ops_template.kmem_cachep =
2517 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 2663 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2518 SLAB_HWCACHE_ALIGN, NULL); 2664 SLAB_HWCACHE_ALIGN, NULL);
2519 if (!ip6_dst_ops.kmem_cachep) 2665 if (!ip6_dst_ops_template.kmem_cachep)
2520 return -ENOMEM; 2666 goto out;;
2521 2667
2522 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep; 2668 ret = register_pernet_subsys(&ip6_route_net_ops);
2523
2524 ret = fib6_init();
2525 if (ret) 2669 if (ret)
2526 goto out_kmem_cache; 2670 goto out_kmem_cache;
2527 2671
2528 ret = ipv6_route_proc_init(&init_net); 2672 /* Registering of the loopback is done before this portion of code,
2673 * the loopback reference in rt6_info will not be taken, do it
2674 * manually for init_net */
2675 init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev;
2676 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2677 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2678 init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev;
2679 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2680 init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev;
2681 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2682 #endif
2683 ret = fib6_init();
2529 if (ret) 2684 if (ret)
2530 goto out_fib6_init; 2685 goto out_register_subsys;
2531 2686
2532 ret = xfrm6_init(); 2687 ret = xfrm6_init();
2533 if (ret) 2688 if (ret)
2534 goto out_proc_init; 2689 goto out_fib6_init;
2535 2690
2536 ret = fib6_rules_init(); 2691 ret = fib6_rules_init();
2537 if (ret) 2692 if (ret)
@@ -2543,7 +2698,10 @@ int __init ip6_route_init(void)
2543 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL)) 2698 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL))
2544 goto fib6_rules_init; 2699 goto fib6_rules_init;
2545 2700
2546 ret = 0; 2701 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2702 if (ret)
2703 goto fib6_rules_init;
2704
2547out: 2705out:
2548 return ret; 2706 return ret;
2549 2707
@@ -2551,22 +2709,21 @@ fib6_rules_init:
2551 fib6_rules_cleanup(); 2709 fib6_rules_cleanup();
2552xfrm6_init: 2710xfrm6_init:
2553 xfrm6_fini(); 2711 xfrm6_fini();
2554out_proc_init:
2555 ipv6_route_proc_fini(&init_net);
2556out_fib6_init: 2712out_fib6_init:
2557 rt6_ifdown(NULL);
2558 fib6_gc_cleanup(); 2713 fib6_gc_cleanup();
2714out_register_subsys:
2715 unregister_pernet_subsys(&ip6_route_net_ops);
2559out_kmem_cache: 2716out_kmem_cache:
2560 kmem_cache_destroy(ip6_dst_ops.kmem_cachep); 2717 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2561 goto out; 2718 goto out;
2562} 2719}
2563 2720
2564void ip6_route_cleanup(void) 2721void ip6_route_cleanup(void)
2565{ 2722{
2723 unregister_netdevice_notifier(&ip6_route_dev_notifier);
2566 fib6_rules_cleanup(); 2724 fib6_rules_cleanup();
2567 ipv6_route_proc_fini(&init_net);
2568 xfrm6_fini(); 2725 xfrm6_fini();
2569 rt6_ifdown(NULL);
2570 fib6_gc_cleanup(); 2726 fib6_gc_cleanup();
2571 kmem_cache_destroy(ip6_dst_ops.kmem_cachep); 2727 unregister_pernet_subsys(&ip6_route_net_ops);
2728 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2572} 2729}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1656c003b989..1b8196c8d145 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -339,11 +339,11 @@ out:
339 skb_reset_network_header(skb2); 339 skb_reset_network_header(skb2);
340 340
341 /* Try to guess incoming interface */ 341 /* Try to guess incoming interface */
342 rt6i = rt6_lookup(&iph6->saddr, NULL, NULL, 0); 342 rt6i = rt6_lookup(&init_net, &iph6->saddr, NULL, NULL, 0);
343 if (rt6i && rt6i->rt6i_dev) { 343 if (rt6i && rt6i->rt6i_dev) {
344 skb2->dev = rt6i->rt6i_dev; 344 skb2->dev = rt6i->rt6i_dev;
345 345
346 rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0); 346 rt6i = rt6_lookup(&init_net, &iph6->daddr, &iph6->saddr, NULL, 0);
347 347
348 if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) { 348 if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) {
349 struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev); 349 struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev);
@@ -393,7 +393,7 @@ isatap_srcok(struct sk_buff *skb, struct iphdr *iph, struct net_device *dev)
393 fl.oif = dev->ifindex; 393 fl.oif = dev->ifindex;
394 security_skb_classify_flow(skb, &fl); 394 security_skb_classify_flow(skb, &fl);
395 395
396 dst = ip6_route_output(NULL, &fl); 396 dst = ip6_route_output(&init_net, NULL, &fl);
397 if (!dst->error && (dst->dev == dev) && (neigh = dst->neighbour)) { 397 if (!dst->error && (dst->dev == dev) && (neigh = dst->neighbour)) {
398 398
399 addr6 = (struct in6_addr*)&neigh->primary_key; 399 addr6 = (struct in6_addr*)&neigh->primary_key;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
new file mode 100644
index 000000000000..827c5aa7524c
--- /dev/null
+++ b/net/ipv6/syncookies.c
@@ -0,0 +1,267 @@
1/*
2 * IPv6 Syncookies implementation for the Linux kernel
3 *
4 * Authors:
5 * Glenn Griffin <ggriffin.kernel@gmail.com>
6 *
7 * Based on IPv4 implementation by Andi Kleen
8 * linux/net/ipv4/syncookies.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/tcp.h>
18#include <linux/random.h>
19#include <linux/cryptohash.h>
20#include <linux/kernel.h>
21#include <net/ipv6.h>
22#include <net/tcp.h>
23
24extern int sysctl_tcp_syncookies;
25extern __u32 syncookie_secret[2][16-3+SHA_DIGEST_WORDS];
26
27#define COOKIEBITS 24 /* Upper bits store count */
28#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
29
30/*
31 * This table has to be sorted and terminated with (__u16)-1.
32 * XXX generate a better table.
33 * Unresolved Issues: HIPPI with a 64k MSS is not well supported.
34 *
35 * Taken directly from ipv4 implementation.
36 * Should this list be modified for ipv6 use or is it close enough?
37 * rfc 2460 8.3 suggests mss values 20 bytes less than ipv4 counterpart
38 */
39static __u16 const msstab[] = {
40 64 - 1,
41 256 - 1,
42 512 - 1,
43 536 - 1,
44 1024 - 1,
45 1440 - 1,
46 1460 - 1,
47 4312 - 1,
48 (__u16)-1
49};
50/* The number doesn't include the -1 terminator */
51#define NUM_MSS (ARRAY_SIZE(msstab) - 1)
52
53/*
54 * This (misnamed) value is the age of syncookie which is permitted.
55 * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
56 * sysctl_tcp_retries1. It's a rather complicated formula (exponential
57 * backoff) to compute at runtime so it's currently hardcoded here.
58 */
59#define COUNTER_TRIES 4
60
61static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
62 struct request_sock *req,
63 struct dst_entry *dst)
64{
65 struct inet_connection_sock *icsk = inet_csk(sk);
66 struct sock *child;
67
68 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
69 if (child)
70 inet_csk_reqsk_queue_add(sk, req, child);
71 else
72 reqsk_free(req);
73
74 return child;
75}
76
77static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS];
78
79static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
80 __be16 sport, __be16 dport, u32 count, int c)
81{
82 __u32 *tmp = __get_cpu_var(cookie_scratch);
83
84 /*
85 * we have 320 bits of information to hash, copy in the remaining
86 * 192 bits required for sha_transform, from the syncookie_secret
87 * and overwrite the digest with the secret
88 */
89 memcpy(tmp + 10, syncookie_secret[c], 44);
90 memcpy(tmp, saddr, 16);
91 memcpy(tmp + 4, daddr, 16);
92 tmp[8] = ((__force u32)sport << 16) + (__force u32)dport;
93 tmp[9] = count;
94 sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5);
95
96 return tmp[17];
97}
98
99static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *daddr,
100 __be16 sport, __be16 dport, __u32 sseq,
101 __u32 count, __u32 data)
102{
103 return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
104 sseq + (count << COOKIEBITS) +
105 ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
106 & COOKIEMASK));
107}
108
109static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr,
110 struct in6_addr *daddr, __be16 sport,
111 __be16 dport, __u32 sseq, __u32 count,
112 __u32 maxdiff)
113{
114 __u32 diff;
115
116 cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
117
118 diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
119 if (diff >= maxdiff)
120 return (__u32)-1;
121
122 return (cookie -
123 cookie_hash(saddr, daddr, sport, dport, count - diff, 1))
124 & COOKIEMASK;
125}
126
127__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
128{
129 struct ipv6hdr *iph = ipv6_hdr(skb);
130 const struct tcphdr *th = tcp_hdr(skb);
131 int mssind;
132 const __u16 mss = *mssp;
133
134 tcp_sk(sk)->last_synq_overflow = jiffies;
135
136 for (mssind = 0; mss > msstab[mssind + 1]; mssind++)
137 ;
138 *mssp = msstab[mssind] + 1;
139
140 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
141
142 return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
143 th->dest, ntohl(th->seq),
144 jiffies / (HZ * 60), mssind);
145}
146
147static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
148{
149 struct ipv6hdr *iph = ipv6_hdr(skb);
150 const struct tcphdr *th = tcp_hdr(skb);
151 __u32 seq = ntohl(th->seq) - 1;
152 __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
153 th->source, th->dest, seq,
154 jiffies / (HZ * 60), COUNTER_TRIES);
155
156 return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
157}
158
159struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
160{
161 struct inet_request_sock *ireq;
162 struct inet6_request_sock *ireq6;
163 struct tcp_request_sock *treq;
164 struct ipv6_pinfo *np = inet6_sk(sk);
165 struct tcp_sock *tp = tcp_sk(sk);
166 const struct tcphdr *th = tcp_hdr(skb);
167 __u32 cookie = ntohl(th->ack_seq) - 1;
168 struct sock *ret = sk;
169 struct request_sock *req;
170 int mss;
171 struct dst_entry *dst;
172 __u8 rcv_wscale;
173
174 if (!sysctl_tcp_syncookies || !th->ack)
175 goto out;
176
177 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
178 (mss = cookie_check(skb, cookie)) == 0) {
179 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED);
180 goto out;
181 }
182
183 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESRECV);
184
185 ret = NULL;
186 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
187 if (!req)
188 goto out;
189
190 ireq = inet_rsk(req);
191 ireq6 = inet6_rsk(req);
192 treq = tcp_rsk(req);
193 ireq6->pktopts = NULL;
194
195 if (security_inet_conn_request(sk, skb, req)) {
196 reqsk_free(req);
197 goto out;
198 }
199
200 req->mss = mss;
201 ireq->rmt_port = th->source;
202 ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
203 ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
204 if (ipv6_opt_accepted(sk, skb) ||
205 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
206 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
207 atomic_inc(&skb->users);
208 ireq6->pktopts = skb;
209 }
210
211 ireq6->iif = sk->sk_bound_dev_if;
212 /* So that link locals have meaning */
213 if (!sk->sk_bound_dev_if &&
214 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
215 ireq6->iif = inet6_iif(skb);
216
217 req->expires = 0UL;
218 req->retrans = 0;
219 ireq->snd_wscale = ireq->rcv_wscale = ireq->tstamp_ok = 0;
220 ireq->wscale_ok = ireq->sack_ok = 0;
221 treq->rcv_isn = ntohl(th->seq) - 1;
222 treq->snt_isn = cookie;
223
224 /*
225 * We need to lookup the dst_entry to get the correct window size.
226 * This is taken from tcp_v6_syn_recv_sock. Somebody please enlighten
227 * me if there is a preferred way.
228 */
229 {
230 struct in6_addr *final_p = NULL, final;
231 struct flowi fl;
232 memset(&fl, 0, sizeof(fl));
233 fl.proto = IPPROTO_TCP;
234 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
235 if (np->opt && np->opt->srcrt) {
236 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
237 ipv6_addr_copy(&final, &fl.fl6_dst);
238 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
239 final_p = &final;
240 }
241 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
242 fl.oif = sk->sk_bound_dev_if;
243 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
244 fl.fl_ip_sport = inet_sk(sk)->sport;
245 security_req_classify_flow(req, &fl);
246 if (ip6_dst_lookup(sk, &dst, &fl)) {
247 reqsk_free(req);
248 goto out;
249 }
250 if (final_p)
251 ipv6_addr_copy(&fl.fl6_dst, final_p);
252 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
253 goto out;
254 }
255
256 req->window_clamp = dst_metric(dst, RTAX_WINDOW);
257 tcp_select_initial_window(tcp_full_space(sk), req->mss,
258 &req->rcv_wnd, &req->window_clamp,
259 0, &rcv_wscale);
260
261 ireq->rcv_wscale = rcv_wscale;
262
263 ret = get_cookie_sock(sk, skb, req, dst);
264
265out: return ret;
266}
267
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index d6d3e68086f8..3804dcbbfab0 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -71,24 +71,11 @@ static int ipv6_sysctl_net_init(struct net *net)
71 ipv6_route_table = ipv6_route_sysctl_init(net); 71 ipv6_route_table = ipv6_route_sysctl_init(net);
72 if (!ipv6_route_table) 72 if (!ipv6_route_table)
73 goto out_ipv6_table; 73 goto out_ipv6_table;
74 ipv6_table[0].child = ipv6_route_table;
74 75
75 ipv6_icmp_table = ipv6_icmp_sysctl_init(net); 76 ipv6_icmp_table = ipv6_icmp_sysctl_init(net);
76 if (!ipv6_icmp_table) 77 if (!ipv6_icmp_table)
77 goto out_ipv6_route_table; 78 goto out_ipv6_route_table;
78
79 ipv6_route_table[0].data = &net->ipv6.sysctl.flush_delay;
80 /* ipv6_route_table[1].data will be handled when we have
81 routes per namespace */
82 ipv6_route_table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
83 ipv6_route_table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
84 ipv6_route_table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
85 ipv6_route_table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
86 ipv6_route_table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
87 ipv6_route_table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
88 ipv6_route_table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
89 ipv6_table[0].child = ipv6_route_table;
90
91 ipv6_icmp_table[0].data = &net->ipv6.sysctl.icmpv6_time;
92 ipv6_table[1].child = ipv6_icmp_table; 79 ipv6_table[1].child = ipv6_icmp_table;
93 80
94 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; 81 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 12750f2b05ab..aacbb7688bf9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -455,8 +455,7 @@ out:
455} 455}
456 456
457 457
458static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 458static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
459 struct dst_entry *dst)
460{ 459{
461 struct inet6_request_sock *treq = inet6_rsk(req); 460 struct inet6_request_sock *treq = inet6_rsk(req);
462 struct ipv6_pinfo *np = inet6_sk(sk); 461 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -464,6 +463,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
464 struct ipv6_txoptions *opt = NULL; 463 struct ipv6_txoptions *opt = NULL;
465 struct in6_addr * final_p = NULL, final; 464 struct in6_addr * final_p = NULL, final;
466 struct flowi fl; 465 struct flowi fl;
466 struct dst_entry *dst;
467 int err = -1; 467 int err = -1;
468 468
469 memset(&fl, 0, sizeof(fl)); 469 memset(&fl, 0, sizeof(fl));
@@ -476,24 +476,22 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
476 fl.fl_ip_sport = inet_sk(sk)->sport; 476 fl.fl_ip_sport = inet_sk(sk)->sport;
477 security_req_classify_flow(req, &fl); 477 security_req_classify_flow(req, &fl);
478 478
479 if (dst == NULL) { 479 opt = np->opt;
480 opt = np->opt; 480 if (opt && opt->srcrt) {
481 if (opt && opt->srcrt) { 481 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
482 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 482 ipv6_addr_copy(&final, &fl.fl6_dst);
483 ipv6_addr_copy(&final, &fl.fl6_dst); 483 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
484 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 484 final_p = &final;
485 final_p = &final;
486 }
487
488 err = ip6_dst_lookup(sk, &dst, &fl);
489 if (err)
490 goto done;
491 if (final_p)
492 ipv6_addr_copy(&fl.fl6_dst, final_p);
493 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
494 goto done;
495 } 485 }
496 486
487 err = ip6_dst_lookup(sk, &dst, &fl);
488 if (err)
489 goto done;
490 if (final_p)
491 ipv6_addr_copy(&fl.fl6_dst, final_p);
492 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
493 goto done;
494
497 skb = tcp_make_synack(sk, dst, req); 495 skb = tcp_make_synack(sk, dst, req);
498 if (skb) { 496 if (skb) {
499 struct tcphdr *th = tcp_hdr(skb); 497 struct tcphdr *th = tcp_hdr(skb);
@@ -514,6 +512,20 @@ done:
514 return err; 512 return err;
515} 513}
516 514
515static inline void syn_flood_warning(struct sk_buff *skb)
516{
517#ifdef CONFIG_SYN_COOKIES
518 if (sysctl_tcp_syncookies)
519 printk(KERN_INFO
520 "TCPv6: Possible SYN flooding on port %d. "
521 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
522 else
523#endif
524 printk(KERN_INFO
525 "TCPv6: Possible SYN flooding on port %d. "
526 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
527}
528
517static void tcp_v6_reqsk_destructor(struct request_sock *req) 529static void tcp_v6_reqsk_destructor(struct request_sock *req)
518{ 530{
519 if (inet6_rsk(req)->pktopts) 531 if (inet6_rsk(req)->pktopts)
@@ -917,7 +929,7 @@ done_opts:
917} 929}
918#endif 930#endif
919 931
920static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 932struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
921 .family = AF_INET6, 933 .family = AF_INET6,
922 .obj_size = sizeof(struct tcp6_request_sock), 934 .obj_size = sizeof(struct tcp6_request_sock),
923 .rtx_syn_ack = tcp_v6_send_synack, 935 .rtx_syn_ack = tcp_v6_send_synack,
@@ -1059,8 +1071,11 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1059 fl.fl_ip_sport = t1->source; 1071 fl.fl_ip_sport = t1->source;
1060 security_skb_classify_flow(skb, &fl); 1072 security_skb_classify_flow(skb, &fl);
1061 1073
1062 /* sk = NULL, but it is safe for now. RST socket required. */ 1074 /* Pass a socket to ip6_dst_lookup either it is for RST
1063 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { 1075 * Underlying function will use this to retrieve the network
1076 * namespace
1077 */
1078 if (!ip6_dst_lookup(tcp6_socket->sk, &buff->dst, &fl)) {
1064 1079
1065 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 1080 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1066 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0); 1081 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
@@ -1160,7 +1175,7 @@ static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1160 fl.fl_ip_sport = t1->source; 1175 fl.fl_ip_sport = t1->source;
1161 security_skb_classify_flow(skb, &fl); 1176 security_skb_classify_flow(skb, &fl);
1162 1177
1163 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { 1178 if (!ip6_dst_lookup(tcp6_socket->sk, &buff->dst, &fl)) {
1164 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 1179 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1165 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0); 1180 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1166 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 1181 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
@@ -1215,9 +1230,9 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1215 return NULL; 1230 return NULL;
1216 } 1231 }
1217 1232
1218#if 0 /*def CONFIG_SYN_COOKIES*/ 1233#ifdef CONFIG_SYN_COOKIES
1219 if (!th->rst && !th->syn && th->ack) 1234 if (!th->rst && !th->syn && th->ack)
1220 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt)); 1235 sk = cookie_v6_check(sk, skb);
1221#endif 1236#endif
1222 return sk; 1237 return sk;
1223} 1238}
@@ -1233,6 +1248,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1233 struct tcp_sock *tp = tcp_sk(sk); 1248 struct tcp_sock *tp = tcp_sk(sk);
1234 struct request_sock *req = NULL; 1249 struct request_sock *req = NULL;
1235 __u32 isn = TCP_SKB_CB(skb)->when; 1250 __u32 isn = TCP_SKB_CB(skb)->when;
1251#ifdef CONFIG_SYN_COOKIES
1252 int want_cookie = 0;
1253#else
1254#define want_cookie 0
1255#endif
1236 1256
1237 if (skb->protocol == htons(ETH_P_IP)) 1257 if (skb->protocol == htons(ETH_P_IP))
1238 return tcp_v4_conn_request(sk, skb); 1258 return tcp_v4_conn_request(sk, skb);
@@ -1240,12 +1260,14 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1240 if (!ipv6_unicast_destination(skb)) 1260 if (!ipv6_unicast_destination(skb))
1241 goto drop; 1261 goto drop;
1242 1262
1243 /*
1244 * There are no SYN attacks on IPv6, yet...
1245 */
1246 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1263 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1247 if (net_ratelimit()) 1264 if (net_ratelimit())
1248 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); 1265 syn_flood_warning(skb);
1266#ifdef CONFIG_SYN_COOKIES
1267 if (sysctl_tcp_syncookies)
1268 want_cookie = 1;
1269 else
1270#endif
1249 goto drop; 1271 goto drop;
1250 } 1272 }
1251 1273
@@ -1266,39 +1288,51 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1266 1288
1267 tcp_parse_options(skb, &tmp_opt, 0); 1289 tcp_parse_options(skb, &tmp_opt, 0);
1268 1290
1291 if (want_cookie) {
1292 tcp_clear_options(&tmp_opt);
1293 tmp_opt.saw_tstamp = 0;
1294 }
1295
1269 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 1296 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1270 tcp_openreq_init(req, &tmp_opt, skb); 1297 tcp_openreq_init(req, &tmp_opt, skb);
1271 1298
1272 treq = inet6_rsk(req); 1299 treq = inet6_rsk(req);
1273 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); 1300 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1274 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); 1301 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1275 TCP_ECN_create_request(req, tcp_hdr(skb));
1276 treq->pktopts = NULL; 1302 treq->pktopts = NULL;
1277 if (ipv6_opt_accepted(sk, skb) || 1303 if (!want_cookie)
1278 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1304 TCP_ECN_create_request(req, tcp_hdr(skb));
1279 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1305
1280 atomic_inc(&skb->users); 1306 if (want_cookie) {
1281 treq->pktopts = skb; 1307 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1282 } 1308 } else if (!isn) {
1283 treq->iif = sk->sk_bound_dev_if; 1309 if (ipv6_opt_accepted(sk, skb) ||
1310 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1311 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1312 atomic_inc(&skb->users);
1313 treq->pktopts = skb;
1314 }
1315 treq->iif = sk->sk_bound_dev_if;
1284 1316
1285 /* So that link locals have meaning */ 1317 /* So that link locals have meaning */
1286 if (!sk->sk_bound_dev_if && 1318 if (!sk->sk_bound_dev_if &&
1287 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1319 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1288 treq->iif = inet6_iif(skb); 1320 treq->iif = inet6_iif(skb);
1289 1321
1290 if (isn == 0)
1291 isn = tcp_v6_init_sequence(skb); 1322 isn = tcp_v6_init_sequence(skb);
1323 }
1292 1324
1293 tcp_rsk(req)->snt_isn = isn; 1325 tcp_rsk(req)->snt_isn = isn;
1294 1326
1295 security_inet_conn_request(sk, skb, req); 1327 security_inet_conn_request(sk, skb, req);
1296 1328
1297 if (tcp_v6_send_synack(sk, req, NULL)) 1329 if (tcp_v6_send_synack(sk, req))
1298 goto drop; 1330 goto drop;
1299 1331
1300 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1332 if (!want_cookie) {
1301 return 0; 1333 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1334 return 0;
1335 }
1302 1336
1303drop: 1337drop:
1304 if (req) 1338 if (req)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp_ipv6.c
index 53739de829db..55feac7ba717 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp_ipv6.c
@@ -400,7 +400,7 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
400 UDP_SKB_CB(skb)->partial_cov = 0; 400 UDP_SKB_CB(skb)->partial_cov = 0;
401 UDP_SKB_CB(skb)->cscov = skb->len; 401 UDP_SKB_CB(skb)->cscov = skb->len;
402 402
403 if (proto == IPPROTO_UDPLITE) { 403 if (IS_PROTO_UDPLITE(proto)) {
404 err = udplite_checksum_init(skb, uh); 404 err = udplite_checksum_init(skb, uh);
405 if (err) 405 if (err)
406 return err; 406 return err;
@@ -489,7 +489,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
489 489
490 if (udp_lib_checksum_complete(skb)) 490 if (udp_lib_checksum_complete(skb))
491 goto discard; 491 goto discard;
492 UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 492 UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, IS_PROTO_UDPLITE(proto));
493 493
494 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); 494 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
495 495
@@ -510,11 +510,11 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
510 510
511short_packet: 511short_packet:
512 LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n", 512 LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n",
513 proto == IPPROTO_UDPLITE ? "-Lite" : "", 513 IS_PROTO_UDPLITE(proto) ? "-Lite" : "",
514 ulen, skb->len); 514 ulen, skb->len);
515 515
516discard: 516discard:
517 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 517 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, IS_PROTO_UDPLITE(proto));
518 kfree_skb(skb); 518 kfree_skb(skb);
519 return 0; 519 return 0;
520} 520}
@@ -890,7 +890,7 @@ int udpv6_destroy_sock(struct sock *sk)
890int udpv6_setsockopt(struct sock *sk, int level, int optname, 890int udpv6_setsockopt(struct sock *sk, int level, int optname,
891 char __user *optval, int optlen) 891 char __user *optval, int optlen)
892{ 892{
893 if (level == SOL_UDP || level == SOL_UDPLITE) 893 if (IS_SOL_UDPFAMILY(level))
894 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 894 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
895 udp_v6_push_pending_frames); 895 udp_v6_push_pending_frames);
896 return ipv6_setsockopt(sk, level, optname, optval, optlen); 896 return ipv6_setsockopt(sk, level, optname, optval, optlen);
@@ -900,7 +900,7 @@ int udpv6_setsockopt(struct sock *sk, int level, int optname,
900int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, 900int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
901 char __user *optval, int optlen) 901 char __user *optval, int optlen)
902{ 902{
903 if (level == SOL_UDP || level == SOL_UDPLITE) 903 if (IS_SOL_UDPFAMILY(level))
904 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 904 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
905 udp_v6_push_pending_frames); 905 udp_v6_push_pending_frames);
906 return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); 906 return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
@@ -910,7 +910,7 @@ int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
910int udpv6_getsockopt(struct sock *sk, int level, int optname, 910int udpv6_getsockopt(struct sock *sk, int level, int optname,
911 char __user *optval, int __user *optlen) 911 char __user *optval, int __user *optlen)
912{ 912{
913 if (level == SOL_UDP || level == SOL_UDPLITE) 913 if (IS_SOL_UDPFAMILY(level))
914 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 914 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
915 return ipv6_getsockopt(sk, level, optname, optval, optlen); 915 return ipv6_getsockopt(sk, level, optname, optval, optlen);
916} 916}
@@ -919,7 +919,7 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
919int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, 919int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
920 char __user *optval, int __user *optlen) 920 char __user *optval, int __user *optlen)
921{ 921{
922 if (level == SOL_UDP || level == SOL_UDPLITE) 922 if (IS_SOL_UDPFAMILY(level))
923 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 923 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
924 return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); 924 return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
925} 925}
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite_ipv6.c
index 87d4202522ee..87d4202522ee 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite_ipv6.c
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7d20199ee1f3..e96dafdc7032 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -38,7 +38,7 @@ static struct dst_entry *xfrm6_dst_lookup(int tos, xfrm_address_t *saddr,
38 if (saddr) 38 if (saddr)
39 memcpy(&fl.fl6_src, saddr, sizeof(fl.fl6_src)); 39 memcpy(&fl.fl6_src, saddr, sizeof(fl.fl6_src));
40 40
41 dst = ip6_route_output(NULL, &fl); 41 dst = ip6_route_output(&init_net, NULL, &fl);
42 42
43 err = dst->error; 43 err = dst->error;
44 if (dst->error) { 44 if (dst->error) {
@@ -57,8 +57,9 @@ static int xfrm6_get_saddr(xfrm_address_t *saddr, xfrm_address_t *daddr)
57 if (IS_ERR(dst)) 57 if (IS_ERR(dst))
58 return -EHOSTUNREACH; 58 return -EHOSTUNREACH;
59 59
60 ipv6_get_saddr(dst, (struct in6_addr *)&daddr->a6, 60 ipv6_dev_get_saddr(ip6_dst_idev(dst)->dev,
61 (struct in6_addr *)&saddr->a6); 61 (struct in6_addr *)&daddr->a6,
62 (struct in6_addr *)&saddr->a6);
62 dst_release(dst); 63 dst_release(dst);
63 return 0; 64 return 0;
64} 65}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 8b5f486ac80f..50c442fc99ce 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -48,6 +48,17 @@ struct pfkey_sock {
48 struct sock sk; 48 struct sock sk;
49 int registered; 49 int registered;
50 int promisc; 50 int promisc;
51
52 struct {
53 uint8_t msg_version;
54 uint32_t msg_pid;
55 int (*dump)(struct pfkey_sock *sk);
56 void (*done)(struct pfkey_sock *sk);
57 union {
58 struct xfrm_policy_walk policy;
59 struct xfrm_state_walk state;
60 } u;
61 } dump;
51}; 62};
52 63
53static inline struct pfkey_sock *pfkey_sk(struct sock *sk) 64static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
@@ -55,6 +66,27 @@ static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
55 return (struct pfkey_sock *)sk; 66 return (struct pfkey_sock *)sk;
56} 67}
57 68
69static int pfkey_can_dump(struct sock *sk)
70{
71 if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf)
72 return 1;
73 return 0;
74}
75
76static int pfkey_do_dump(struct pfkey_sock *pfk)
77{
78 int rc;
79
80 rc = pfk->dump.dump(pfk);
81 if (rc == -ENOBUFS)
82 return 0;
83
84 pfk->dump.done(pfk);
85 pfk->dump.dump = NULL;
86 pfk->dump.done = NULL;
87 return rc;
88}
89
58static void pfkey_sock_destruct(struct sock *sk) 90static void pfkey_sock_destruct(struct sock *sk)
59{ 91{
60 skb_queue_purge(&sk->sk_receive_queue); 92 skb_queue_purge(&sk->sk_receive_queue);
@@ -1709,45 +1741,60 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
1709 return 0; 1741 return 0;
1710} 1742}
1711 1743
1712struct pfkey_dump_data
1713{
1714 struct sk_buff *skb;
1715 struct sadb_msg *hdr;
1716 struct sock *sk;
1717};
1718
1719static int dump_sa(struct xfrm_state *x, int count, void *ptr) 1744static int dump_sa(struct xfrm_state *x, int count, void *ptr)
1720{ 1745{
1721 struct pfkey_dump_data *data = ptr; 1746 struct pfkey_sock *pfk = ptr;
1722 struct sk_buff *out_skb; 1747 struct sk_buff *out_skb;
1723 struct sadb_msg *out_hdr; 1748 struct sadb_msg *out_hdr;
1724 1749
1750 if (!pfkey_can_dump(&pfk->sk))
1751 return -ENOBUFS;
1752
1725 out_skb = pfkey_xfrm_state2msg(x); 1753 out_skb = pfkey_xfrm_state2msg(x);
1726 if (IS_ERR(out_skb)) 1754 if (IS_ERR(out_skb))
1727 return PTR_ERR(out_skb); 1755 return PTR_ERR(out_skb);
1728 1756
1729 out_hdr = (struct sadb_msg *) out_skb->data; 1757 out_hdr = (struct sadb_msg *) out_skb->data;
1730 out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; 1758 out_hdr->sadb_msg_version = pfk->dump.msg_version;
1731 out_hdr->sadb_msg_type = SADB_DUMP; 1759 out_hdr->sadb_msg_type = SADB_DUMP;
1732 out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto); 1760 out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto);
1733 out_hdr->sadb_msg_errno = 0; 1761 out_hdr->sadb_msg_errno = 0;
1734 out_hdr->sadb_msg_reserved = 0; 1762 out_hdr->sadb_msg_reserved = 0;
1735 out_hdr->sadb_msg_seq = count; 1763 out_hdr->sadb_msg_seq = count;
1736 out_hdr->sadb_msg_pid = data->hdr->sadb_msg_pid; 1764 out_hdr->sadb_msg_pid = pfk->dump.msg_pid;
1737 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, data->sk); 1765 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk);
1738 return 0; 1766 return 0;
1739} 1767}
1740 1768
1769static int pfkey_dump_sa(struct pfkey_sock *pfk)
1770{
1771 return xfrm_state_walk(&pfk->dump.u.state, dump_sa, (void *) pfk);
1772}
1773
1774static void pfkey_dump_sa_done(struct pfkey_sock *pfk)
1775{
1776 xfrm_state_walk_done(&pfk->dump.u.state);
1777}
1778
1741static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 1779static int pfkey_dump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
1742{ 1780{
1743 u8 proto; 1781 u8 proto;
1744 struct pfkey_dump_data data = { .skb = skb, .hdr = hdr, .sk = sk }; 1782 struct pfkey_sock *pfk = pfkey_sk(sk);
1783
1784 if (pfk->dump.dump != NULL)
1785 return -EBUSY;
1745 1786
1746 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1787 proto = pfkey_satype2proto(hdr->sadb_msg_satype);
1747 if (proto == 0) 1788 if (proto == 0)
1748 return -EINVAL; 1789 return -EINVAL;
1749 1790
1750 return xfrm_state_walk(proto, dump_sa, &data); 1791 pfk->dump.msg_version = hdr->sadb_msg_version;
1792 pfk->dump.msg_pid = hdr->sadb_msg_pid;
1793 pfk->dump.dump = pfkey_dump_sa;
1794 pfk->dump.done = pfkey_dump_sa_done;
1795 xfrm_state_walk_init(&pfk->dump.u.state, proto);
1796
1797 return pfkey_do_dump(pfk);
1751} 1798}
1752 1799
1753static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 1800static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
@@ -1780,7 +1827,9 @@ static int check_reqid(struct xfrm_policy *xp, int dir, int count, void *ptr)
1780 1827
1781static u32 gen_reqid(void) 1828static u32 gen_reqid(void)
1782{ 1829{
1830 struct xfrm_policy_walk walk;
1783 u32 start; 1831 u32 start;
1832 int rc;
1784 static u32 reqid = IPSEC_MANUAL_REQID_MAX; 1833 static u32 reqid = IPSEC_MANUAL_REQID_MAX;
1785 1834
1786 start = reqid; 1835 start = reqid;
@@ -1788,8 +1837,10 @@ static u32 gen_reqid(void)
1788 ++reqid; 1837 ++reqid;
1789 if (reqid == 0) 1838 if (reqid == 0)
1790 reqid = IPSEC_MANUAL_REQID_MAX+1; 1839 reqid = IPSEC_MANUAL_REQID_MAX+1;
1791 if (xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, check_reqid, 1840 xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN);
1792 (void*)&reqid) != -EEXIST) 1841 rc = xfrm_policy_walk(&walk, check_reqid, (void*)&reqid);
1842 xfrm_policy_walk_done(&walk);
1843 if (rc != -EEXIST)
1793 return reqid; 1844 return reqid;
1794 } while (reqid != start); 1845 } while (reqid != start);
1795 return 0; 1846 return 0;
@@ -2638,11 +2689,14 @@ out:
2638 2689
2639static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) 2690static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
2640{ 2691{
2641 struct pfkey_dump_data *data = ptr; 2692 struct pfkey_sock *pfk = ptr;
2642 struct sk_buff *out_skb; 2693 struct sk_buff *out_skb;
2643 struct sadb_msg *out_hdr; 2694 struct sadb_msg *out_hdr;
2644 int err; 2695 int err;
2645 2696
2697 if (!pfkey_can_dump(&pfk->sk))
2698 return -ENOBUFS;
2699
2646 out_skb = pfkey_xfrm_policy2msg_prep(xp); 2700 out_skb = pfkey_xfrm_policy2msg_prep(xp);
2647 if (IS_ERR(out_skb)) 2701 if (IS_ERR(out_skb))
2648 return PTR_ERR(out_skb); 2702 return PTR_ERR(out_skb);
@@ -2652,21 +2706,40 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
2652 return err; 2706 return err;
2653 2707
2654 out_hdr = (struct sadb_msg *) out_skb->data; 2708 out_hdr = (struct sadb_msg *) out_skb->data;
2655 out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; 2709 out_hdr->sadb_msg_version = pfk->dump.msg_version;
2656 out_hdr->sadb_msg_type = SADB_X_SPDDUMP; 2710 out_hdr->sadb_msg_type = SADB_X_SPDDUMP;
2657 out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; 2711 out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
2658 out_hdr->sadb_msg_errno = 0; 2712 out_hdr->sadb_msg_errno = 0;
2659 out_hdr->sadb_msg_seq = count; 2713 out_hdr->sadb_msg_seq = count;
2660 out_hdr->sadb_msg_pid = data->hdr->sadb_msg_pid; 2714 out_hdr->sadb_msg_pid = pfk->dump.msg_pid;
2661 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, data->sk); 2715 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk);
2662 return 0; 2716 return 0;
2663} 2717}
2664 2718
2719static int pfkey_dump_sp(struct pfkey_sock *pfk)
2720{
2721 return xfrm_policy_walk(&pfk->dump.u.policy, dump_sp, (void *) pfk);
2722}
2723
2724static void pfkey_dump_sp_done(struct pfkey_sock *pfk)
2725{
2726 xfrm_policy_walk_done(&pfk->dump.u.policy);
2727}
2728
2665static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 2729static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
2666{ 2730{
2667 struct pfkey_dump_data data = { .skb = skb, .hdr = hdr, .sk = sk }; 2731 struct pfkey_sock *pfk = pfkey_sk(sk);
2732
2733 if (pfk->dump.dump != NULL)
2734 return -EBUSY;
2735
2736 pfk->dump.msg_version = hdr->sadb_msg_version;
2737 pfk->dump.msg_pid = hdr->sadb_msg_pid;
2738 pfk->dump.dump = pfkey_dump_sp;
2739 pfk->dump.done = pfkey_dump_sp_done;
2740 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
2668 2741
2669 return xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_sp, &data); 2742 return pfkey_do_dump(pfk);
2670} 2743}
2671 2744
2672static int key_notify_policy_flush(struct km_event *c) 2745static int key_notify_policy_flush(struct km_event *c)
@@ -3671,6 +3744,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
3671 int flags) 3744 int flags)
3672{ 3745{
3673 struct sock *sk = sock->sk; 3746 struct sock *sk = sock->sk;
3747 struct pfkey_sock *pfk = pfkey_sk(sk);
3674 struct sk_buff *skb; 3748 struct sk_buff *skb;
3675 int copied, err; 3749 int copied, err;
3676 3750
@@ -3698,6 +3772,10 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
3698 3772
3699 err = (flags & MSG_TRUNC) ? skb->len : copied; 3773 err = (flags & MSG_TRUNC) ? skb->len : copied;
3700 3774
3775 if (pfk->dump.dump != NULL &&
3776 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
3777 pfkey_do_dump(pfk);
3778
3701out_free: 3779out_free:
3702 skb_free_datagram(sk, skb); 3780 skb_free_datagram(sk, skb);
3703out: 3781out:
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 54f46bc80cfe..9d7a19581a29 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -19,7 +19,6 @@ mac80211-y := \
19 ieee80211_iface.o \ 19 ieee80211_iface.o \
20 ieee80211_rate.o \ 20 ieee80211_rate.o \
21 michael.o \ 21 michael.o \
22 regdomain.o \
23 tkip.o \ 22 tkip.o \
24 aes_ccm.o \ 23 aes_ccm.o \
25 cfg.o \ 24 cfg.o \
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 22c9619ba776..e7535ffc8e1c 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -34,10 +34,13 @@ nl80211_type_to_mac80211_type(enum nl80211_iftype type)
34} 34}
35 35
36static int ieee80211_add_iface(struct wiphy *wiphy, char *name, 36static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
37 enum nl80211_iftype type) 37 enum nl80211_iftype type, u32 *flags)
38{ 38{
39 struct ieee80211_local *local = wiphy_priv(wiphy); 39 struct ieee80211_local *local = wiphy_priv(wiphy);
40 enum ieee80211_if_types itype; 40 enum ieee80211_if_types itype;
41 struct net_device *dev;
42 struct ieee80211_sub_if_data *sdata;
43 int err;
41 44
42 if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) 45 if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED))
43 return -ENODEV; 46 return -ENODEV;
@@ -46,7 +49,13 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
46 if (itype == IEEE80211_IF_TYPE_INVALID) 49 if (itype == IEEE80211_IF_TYPE_INVALID)
47 return -EINVAL; 50 return -EINVAL;
48 51
49 return ieee80211_if_add(local->mdev, name, NULL, itype); 52 err = ieee80211_if_add(local->mdev, name, &dev, itype);
53 if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags)
54 return err;
55
56 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
57 sdata->u.mntr_flags = *flags;
58 return 0;
50} 59}
51 60
52static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) 61static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex)
@@ -69,7 +78,7 @@ static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex)
69} 78}
70 79
71static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, 80static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
72 enum nl80211_iftype type) 81 enum nl80211_iftype type, u32 *flags)
73{ 82{
74 struct ieee80211_local *local = wiphy_priv(wiphy); 83 struct ieee80211_local *local = wiphy_priv(wiphy);
75 struct net_device *dev; 84 struct net_device *dev;
@@ -99,6 +108,10 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
99 ieee80211_if_reinit(dev); 108 ieee80211_if_reinit(dev);
100 ieee80211_if_set_type(dev, itype); 109 ieee80211_if_set_type(dev, itype);
101 110
111 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || !flags)
112 return 0;
113
114 sdata->u.mntr_flags = *flags;
102 return 0; 115 return 0;
103} 116}
104 117
@@ -110,6 +123,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
110 struct sta_info *sta = NULL; 123 struct sta_info *sta = NULL;
111 enum ieee80211_key_alg alg; 124 enum ieee80211_key_alg alg;
112 int ret; 125 int ret;
126 struct ieee80211_key *key;
113 127
114 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 128 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
115 129
@@ -128,16 +142,21 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
128 return -EINVAL; 142 return -EINVAL;
129 } 143 }
130 144
145 key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key);
146 if (!key)
147 return -ENOMEM;
148
131 if (mac_addr) { 149 if (mac_addr) {
132 sta = sta_info_get(sdata->local, mac_addr); 150 sta = sta_info_get(sdata->local, mac_addr);
133 if (!sta) 151 if (!sta) {
152 ieee80211_key_free(key);
134 return -ENOENT; 153 return -ENOENT;
154 }
135 } 155 }
136 156
157 ieee80211_key_link(key, sdata, sta);
158
137 ret = 0; 159 ret = 0;
138 if (!ieee80211_key_alloc(sdata, sta, alg, key_idx,
139 params->key_len, params->key))
140 ret = -ENOMEM;
141 160
142 if (sta) 161 if (sta)
143 sta_info_put(sta); 162 sta_info_put(sta);
@@ -151,6 +170,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
151 struct ieee80211_sub_if_data *sdata; 170 struct ieee80211_sub_if_data *sdata;
152 struct sta_info *sta; 171 struct sta_info *sta;
153 int ret; 172 int ret;
173 struct ieee80211_key *key;
154 174
155 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 175 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
156 176
@@ -160,9 +180,11 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
160 return -ENOENT; 180 return -ENOENT;
161 181
162 ret = 0; 182 ret = 0;
163 if (sta->key) 183 if (sta->key) {
164 ieee80211_key_free(sta->key); 184 key = sta->key;
165 else 185 ieee80211_key_free(key);
186 WARN_ON(sta->key);
187 } else
166 ret = -ENOENT; 188 ret = -ENOENT;
167 189
168 sta_info_put(sta); 190 sta_info_put(sta);
@@ -172,7 +194,9 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
172 if (!sdata->keys[key_idx]) 194 if (!sdata->keys[key_idx])
173 return -ENOENT; 195 return -ENOENT;
174 196
175 ieee80211_key_free(sdata->keys[key_idx]); 197 key = sdata->keys[key_idx];
198 ieee80211_key_free(key);
199 WARN_ON(sdata->keys[key_idx]);
176 200
177 return 0; 201 return 0;
178} 202}
@@ -498,7 +522,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
498{ 522{
499 u32 rates; 523 u32 rates;
500 int i, j; 524 int i, j;
501 struct ieee80211_hw_mode *mode; 525 struct ieee80211_supported_band *sband;
502 526
503 if (params->station_flags & STATION_FLAG_CHANGED) { 527 if (params->station_flags & STATION_FLAG_CHANGED) {
504 sta->flags &= ~WLAN_STA_AUTHORIZED; 528 sta->flags &= ~WLAN_STA_AUTHORIZED;
@@ -525,15 +549,16 @@ static void sta_apply_parameters(struct ieee80211_local *local,
525 549
526 if (params->supported_rates) { 550 if (params->supported_rates) {
527 rates = 0; 551 rates = 0;
528 mode = local->oper_hw_mode; 552 sband = local->hw.wiphy->bands[local->oper_channel->band];
553
529 for (i = 0; i < params->supported_rates_len; i++) { 554 for (i = 0; i < params->supported_rates_len; i++) {
530 int rate = (params->supported_rates[i] & 0x7f) * 5; 555 int rate = (params->supported_rates[i] & 0x7f) * 5;
531 for (j = 0; j < mode->num_rates; j++) { 556 for (j = 0; j < sband->n_bitrates; j++) {
532 if (mode->rates[j].rate == rate) 557 if (sband->bitrates[j].bitrate == rate)
533 rates |= BIT(j); 558 rates |= BIT(j);
534 } 559 }
535 } 560 }
536 sta->supp_rates = rates; 561 sta->supp_rates[local->oper_channel->band] = rates;
537 } 562 }
538} 563}
539 564
@@ -548,13 +573,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
548 if (!netif_running(dev)) 573 if (!netif_running(dev))
549 return -ENETDOWN; 574 return -ENETDOWN;
550 575
551 /* XXX: get sta belonging to dev */
552 sta = sta_info_get(local, mac);
553 if (sta) {
554 sta_info_put(sta);
555 return -EEXIST;
556 }
557
558 if (params->vlan) { 576 if (params->vlan) {
559 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 577 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
560 578
@@ -565,8 +583,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
565 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 583 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
566 584
567 sta = sta_info_add(local, dev, mac, GFP_KERNEL); 585 sta = sta_info_add(local, dev, mac, GFP_KERNEL);
568 if (!sta) 586 if (IS_ERR(sta))
569 return -ENOMEM; 587 return PTR_ERR(sta);
570 588
571 sta->dev = sdata->dev; 589 sta->dev = sdata->dev;
572 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN || 590 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN ||
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 60514b2c97b9..4736c64937b4 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -19,41 +19,6 @@ int mac80211_open_file_generic(struct inode *inode, struct file *file)
19 return 0; 19 return 0;
20} 20}
21 21
22static const char *ieee80211_mode_str(int mode)
23{
24 switch (mode) {
25 case MODE_IEEE80211A:
26 return "IEEE 802.11a";
27 case MODE_IEEE80211B:
28 return "IEEE 802.11b";
29 case MODE_IEEE80211G:
30 return "IEEE 802.11g";
31 default:
32 return "UNKNOWN";
33 }
34}
35
36static ssize_t modes_read(struct file *file, char __user *userbuf,
37 size_t count, loff_t *ppos)
38{
39 struct ieee80211_local *local = file->private_data;
40 struct ieee80211_hw_mode *mode;
41 char buf[150], *p = buf;
42
43 /* FIXME: locking! */
44 list_for_each_entry(mode, &local->modes_list, list) {
45 p += scnprintf(p, sizeof(buf)+buf-p,
46 "%s\n", ieee80211_mode_str(mode->mode));
47 }
48
49 return simple_read_from_buffer(userbuf, count, ppos, buf, p-buf);
50}
51
52static const struct file_operations modes_ops = {
53 .read = modes_read,
54 .open = mac80211_open_file_generic,
55};
56
57#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \ 22#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
58static ssize_t name## _read(struct file *file, char __user *userbuf, \ 23static ssize_t name## _read(struct file *file, char __user *userbuf, \
59 size_t count, loff_t *ppos) \ 24 size_t count, loff_t *ppos) \
@@ -80,10 +45,8 @@ static const struct file_operations name## _ops = { \
80 local->debugfs.name = NULL; 45 local->debugfs.name = NULL;
81 46
82 47
83DEBUGFS_READONLY_FILE(channel, 20, "%d",
84 local->hw.conf.channel);
85DEBUGFS_READONLY_FILE(frequency, 20, "%d", 48DEBUGFS_READONLY_FILE(frequency, 20, "%d",
86 local->hw.conf.freq); 49 local->hw.conf.channel->center_freq);
87DEBUGFS_READONLY_FILE(antenna_sel_tx, 20, "%d", 50DEBUGFS_READONLY_FILE(antenna_sel_tx, 20, "%d",
88 local->hw.conf.antenna_sel_tx); 51 local->hw.conf.antenna_sel_tx);
89DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d", 52DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d",
@@ -100,8 +63,6 @@ DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
100 local->long_retry_limit); 63 local->long_retry_limit);
101DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d", 64DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d",
102 local->total_ps_buffered); 65 local->total_ps_buffered);
103DEBUGFS_READONLY_FILE(mode, 20, "%s",
104 ieee80211_mode_str(local->hw.conf.phymode));
105DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x", 66DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x",
106 local->wep_iv & 0xffffff); 67 local->wep_iv & 0xffffff);
107DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", 68DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s",
@@ -294,7 +255,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
294 local->debugfs.stations = debugfs_create_dir("stations", phyd); 255 local->debugfs.stations = debugfs_create_dir("stations", phyd);
295 local->debugfs.keys = debugfs_create_dir("keys", phyd); 256 local->debugfs.keys = debugfs_create_dir("keys", phyd);
296 257
297 DEBUGFS_ADD(channel);
298 DEBUGFS_ADD(frequency); 258 DEBUGFS_ADD(frequency);
299 DEBUGFS_ADD(antenna_sel_tx); 259 DEBUGFS_ADD(antenna_sel_tx);
300 DEBUGFS_ADD(antenna_sel_rx); 260 DEBUGFS_ADD(antenna_sel_rx);
@@ -304,9 +264,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
304 DEBUGFS_ADD(short_retry_limit); 264 DEBUGFS_ADD(short_retry_limit);
305 DEBUGFS_ADD(long_retry_limit); 265 DEBUGFS_ADD(long_retry_limit);
306 DEBUGFS_ADD(total_ps_buffered); 266 DEBUGFS_ADD(total_ps_buffered);
307 DEBUGFS_ADD(mode);
308 DEBUGFS_ADD(wep_iv); 267 DEBUGFS_ADD(wep_iv);
309 DEBUGFS_ADD(modes);
310 268
311 statsd = debugfs_create_dir("statistics", phyd); 269 statsd = debugfs_create_dir("statistics", phyd);
312 local->debugfs.statistics = statsd; 270 local->debugfs.statistics = statsd;
@@ -356,7 +314,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
356 314
357void debugfs_hw_del(struct ieee80211_local *local) 315void debugfs_hw_del(struct ieee80211_local *local)
358{ 316{
359 DEBUGFS_DEL(channel);
360 DEBUGFS_DEL(frequency); 317 DEBUGFS_DEL(frequency);
361 DEBUGFS_DEL(antenna_sel_tx); 318 DEBUGFS_DEL(antenna_sel_tx);
362 DEBUGFS_DEL(antenna_sel_rx); 319 DEBUGFS_DEL(antenna_sel_rx);
@@ -366,9 +323,7 @@ void debugfs_hw_del(struct ieee80211_local *local)
366 DEBUGFS_DEL(short_retry_limit); 323 DEBUGFS_DEL(short_retry_limit);
367 DEBUGFS_DEL(long_retry_limit); 324 DEBUGFS_DEL(long_retry_limit);
368 DEBUGFS_DEL(total_ps_buffered); 325 DEBUGFS_DEL(total_ps_buffered);
369 DEBUGFS_DEL(mode);
370 DEBUGFS_DEL(wep_iv); 326 DEBUGFS_DEL(wep_iv);
371 DEBUGFS_DEL(modes);
372 327
373 DEBUGFS_STATS_DEL(transmitted_fragment_count); 328 DEBUGFS_STATS_DEL(transmitted_fragment_count);
374 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); 329 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 829872a3ae81..29f7b98ba1fb 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -91,7 +91,6 @@ static const struct file_operations name##_ops = { \
91/* common attributes */ 91/* common attributes */
92IEEE80211_IF_FILE(channel_use, channel_use, DEC); 92IEEE80211_IF_FILE(channel_use, channel_use, DEC);
93IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); 93IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
94IEEE80211_IF_FILE(ieee802_1x_pac, ieee802_1x_pac, DEC);
95 94
96/* STA/IBSS attributes */ 95/* STA/IBSS attributes */
97IEEE80211_IF_FILE(state, u.sta.state, DEC); 96IEEE80211_IF_FILE(state, u.sta.state, DEC);
@@ -148,7 +147,6 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
148{ 147{
149 DEBUGFS_ADD(channel_use, sta); 148 DEBUGFS_ADD(channel_use, sta);
150 DEBUGFS_ADD(drop_unencrypted, sta); 149 DEBUGFS_ADD(drop_unencrypted, sta);
151 DEBUGFS_ADD(ieee802_1x_pac, sta);
152 DEBUGFS_ADD(state, sta); 150 DEBUGFS_ADD(state, sta);
153 DEBUGFS_ADD(bssid, sta); 151 DEBUGFS_ADD(bssid, sta);
154 DEBUGFS_ADD(prev_bssid, sta); 152 DEBUGFS_ADD(prev_bssid, sta);
@@ -169,7 +167,6 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
169{ 167{
170 DEBUGFS_ADD(channel_use, ap); 168 DEBUGFS_ADD(channel_use, ap);
171 DEBUGFS_ADD(drop_unencrypted, ap); 169 DEBUGFS_ADD(drop_unencrypted, ap);
172 DEBUGFS_ADD(ieee802_1x_pac, ap);
173 DEBUGFS_ADD(num_sta_ps, ap); 170 DEBUGFS_ADD(num_sta_ps, ap);
174 DEBUGFS_ADD(dtim_count, ap); 171 DEBUGFS_ADD(dtim_count, ap);
175 DEBUGFS_ADD(num_beacons, ap); 172 DEBUGFS_ADD(num_beacons, ap);
@@ -182,7 +179,6 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
182{ 179{
183 DEBUGFS_ADD(channel_use, wds); 180 DEBUGFS_ADD(channel_use, wds);
184 DEBUGFS_ADD(drop_unencrypted, wds); 181 DEBUGFS_ADD(drop_unencrypted, wds);
185 DEBUGFS_ADD(ieee802_1x_pac, wds);
186 DEBUGFS_ADD(peer, wds); 182 DEBUGFS_ADD(peer, wds);
187} 183}
188 184
@@ -190,7 +186,6 @@ static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
190{ 186{
191 DEBUGFS_ADD(channel_use, vlan); 187 DEBUGFS_ADD(channel_use, vlan);
192 DEBUGFS_ADD(drop_unencrypted, vlan); 188 DEBUGFS_ADD(drop_unencrypted, vlan);
193 DEBUGFS_ADD(ieee802_1x_pac, vlan);
194} 189}
195 190
196static void add_monitor_files(struct ieee80211_sub_if_data *sdata) 191static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -234,7 +229,6 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata)
234{ 229{
235 DEBUGFS_DEL(channel_use, sta); 230 DEBUGFS_DEL(channel_use, sta);
236 DEBUGFS_DEL(drop_unencrypted, sta); 231 DEBUGFS_DEL(drop_unencrypted, sta);
237 DEBUGFS_DEL(ieee802_1x_pac, sta);
238 DEBUGFS_DEL(state, sta); 232 DEBUGFS_DEL(state, sta);
239 DEBUGFS_DEL(bssid, sta); 233 DEBUGFS_DEL(bssid, sta);
240 DEBUGFS_DEL(prev_bssid, sta); 234 DEBUGFS_DEL(prev_bssid, sta);
@@ -255,7 +249,6 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata)
255{ 249{
256 DEBUGFS_DEL(channel_use, ap); 250 DEBUGFS_DEL(channel_use, ap);
257 DEBUGFS_DEL(drop_unencrypted, ap); 251 DEBUGFS_DEL(drop_unencrypted, ap);
258 DEBUGFS_DEL(ieee802_1x_pac, ap);
259 DEBUGFS_DEL(num_sta_ps, ap); 252 DEBUGFS_DEL(num_sta_ps, ap);
260 DEBUGFS_DEL(dtim_count, ap); 253 DEBUGFS_DEL(dtim_count, ap);
261 DEBUGFS_DEL(num_beacons, ap); 254 DEBUGFS_DEL(num_beacons, ap);
@@ -268,7 +261,6 @@ static void del_wds_files(struct ieee80211_sub_if_data *sdata)
268{ 261{
269 DEBUGFS_DEL(channel_use, wds); 262 DEBUGFS_DEL(channel_use, wds);
270 DEBUGFS_DEL(drop_unencrypted, wds); 263 DEBUGFS_DEL(drop_unencrypted, wds);
271 DEBUGFS_DEL(ieee802_1x_pac, wds);
272 DEBUGFS_DEL(peer, wds); 264 DEBUGFS_DEL(peer, wds);
273} 265}
274 266
@@ -276,7 +268,6 @@ static void del_vlan_files(struct ieee80211_sub_if_data *sdata)
276{ 268{
277 DEBUGFS_DEL(channel_use, vlan); 269 DEBUGFS_DEL(channel_use, vlan);
278 DEBUGFS_DEL(drop_unencrypted, vlan); 270 DEBUGFS_DEL(drop_unencrypted, vlan);
279 DEBUGFS_DEL(ieee802_1x_pac, vlan);
280} 271}
281 272
282static void del_monitor_files(struct ieee80211_sub_if_data *sdata) 273static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 8f5944c53d4e..ed7c9f3b4602 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -33,25 +33,16 @@ static ssize_t sta_ ##name## _read(struct file *file, \
33#define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n") 33#define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n")
34#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") 34#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n")
35 35
36#define STA_READ_RATE(name, field) \ 36#define STA_OPS(name) \
37static ssize_t sta_##name##_read(struct file *file, \ 37static const struct file_operations sta_ ##name## _ops = { \
38 char __user *userbuf, \ 38 .read = sta_##name##_read, \
39 size_t count, loff_t *ppos) \ 39 .open = mac80211_open_file_generic, \
40{ \
41 struct sta_info *sta = file->private_data; \
42 struct ieee80211_local *local = wdev_priv(sta->dev->ieee80211_ptr);\
43 struct ieee80211_hw_mode *mode = local->oper_hw_mode; \
44 char buf[20]; \
45 int res = scnprintf(buf, sizeof(buf), "%d\n", \
46 (sta->field >= 0 && \
47 sta->field < mode->num_rates) ? \
48 mode->rates[sta->field].rate : -1); \
49 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
50} 40}
51 41
52#define STA_OPS(name) \ 42#define STA_OPS_WR(name) \
53static const struct file_operations sta_ ##name## _ops = { \ 43static const struct file_operations sta_ ##name## _ops = { \
54 .read = sta_##name##_read, \ 44 .read = sta_##name##_read, \
45 .write = sta_##name##_write, \
55 .open = mac80211_open_file_generic, \ 46 .open = mac80211_open_file_generic, \
56} 47}
57 48
@@ -70,8 +61,6 @@ STA_FILE(rx_fragments, rx_fragments, LU);
70STA_FILE(rx_dropped, rx_dropped, LU); 61STA_FILE(rx_dropped, rx_dropped, LU);
71STA_FILE(tx_fragments, tx_fragments, LU); 62STA_FILE(tx_fragments, tx_fragments, LU);
72STA_FILE(tx_filtered, tx_filtered_count, LU); 63STA_FILE(tx_filtered, tx_filtered_count, LU);
73STA_FILE(txrate, txrate, RATE);
74STA_FILE(last_txrate, last_txrate, RATE);
75STA_FILE(tx_retry_failed, tx_retry_failed, LU); 64STA_FILE(tx_retry_failed, tx_retry_failed, LU);
76STA_FILE(tx_retry_count, tx_retry_count, LU); 65STA_FILE(tx_retry_count, tx_retry_count, LU);
77STA_FILE(last_rssi, last_rssi, D); 66STA_FILE(last_rssi, last_rssi, D);
@@ -85,12 +74,10 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
85{ 74{
86 char buf[100]; 75 char buf[100];
87 struct sta_info *sta = file->private_data; 76 struct sta_info *sta = file->private_data;
88 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s", 77 int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s",
89 sta->flags & WLAN_STA_AUTH ? "AUTH\n" : "", 78 sta->flags & WLAN_STA_AUTH ? "AUTH\n" : "",
90 sta->flags & WLAN_STA_ASSOC ? "ASSOC\n" : "", 79 sta->flags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
91 sta->flags & WLAN_STA_PS ? "PS\n" : "", 80 sta->flags & WLAN_STA_PS ? "PS\n" : "",
92 sta->flags & WLAN_STA_TIM ? "TIM\n" : "",
93 sta->flags & WLAN_STA_PERM ? "PERM\n" : "",
94 sta->flags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", 81 sta->flags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
95 sta->flags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", 82 sta->flags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
96 sta->flags & WLAN_STA_WME ? "WME\n" : "", 83 sta->flags & WLAN_STA_WME ? "WME\n" : "",
@@ -111,31 +98,6 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
111} 98}
112STA_OPS(num_ps_buf_frames); 99STA_OPS(num_ps_buf_frames);
113 100
114static ssize_t sta_last_ack_rssi_read(struct file *file, char __user *userbuf,
115 size_t count, loff_t *ppos)
116{
117 char buf[100];
118 struct sta_info *sta = file->private_data;
119 int res = scnprintf(buf, sizeof(buf), "%d %d %d\n",
120 sta->last_ack_rssi[0],
121 sta->last_ack_rssi[1],
122 sta->last_ack_rssi[2]);
123 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
124}
125STA_OPS(last_ack_rssi);
126
127static ssize_t sta_last_ack_ms_read(struct file *file, char __user *userbuf,
128 size_t count, loff_t *ppos)
129{
130 char buf[20];
131 struct sta_info *sta = file->private_data;
132 int res = scnprintf(buf, sizeof(buf), "%d\n",
133 sta->last_ack ?
134 jiffies_to_msecs(jiffies - sta->last_ack) : -1);
135 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
136}
137STA_OPS(last_ack_ms);
138
139static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf, 101static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
140 size_t count, loff_t *ppos) 102 size_t count, loff_t *ppos)
141{ 103{
@@ -191,6 +153,113 @@ static ssize_t sta_wme_tx_queue_read(struct file *file, char __user *userbuf,
191STA_OPS(wme_tx_queue); 153STA_OPS(wme_tx_queue);
192#endif 154#endif
193 155
156static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
157 size_t count, loff_t *ppos)
158{
159 char buf[768], *p = buf;
160 int i;
161 struct sta_info *sta = file->private_data;
162 p += scnprintf(p, sizeof(buf)+buf-p, "Agg state for STA is:\n");
163 p += scnprintf(p, sizeof(buf)+buf-p, " STA next dialog_token is %d \n "
164 "TIDs info is: \n TID :",
165 (sta->ampdu_mlme.dialog_token_allocator + 1));
166 for (i = 0; i < STA_TID_NUM; i++)
167 p += scnprintf(p, sizeof(buf)+buf-p, "%5d", i);
168
169 p += scnprintf(p, sizeof(buf)+buf-p, "\n RX :");
170 for (i = 0; i < STA_TID_NUM; i++)
171 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
172 sta->ampdu_mlme.tid_rx[i].state);
173
174 p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:");
175 for (i = 0; i < STA_TID_NUM; i++)
176 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
177 sta->ampdu_mlme.tid_rx[i].dialog_token);
178
179 p += scnprintf(p, sizeof(buf)+buf-p, "\n TX :");
180 for (i = 0; i < STA_TID_NUM; i++)
181 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
182 sta->ampdu_mlme.tid_tx[i].state);
183
184 p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:");
185 for (i = 0; i < STA_TID_NUM; i++)
186 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
187 sta->ampdu_mlme.tid_tx[i].dialog_token);
188
189 p += scnprintf(p, sizeof(buf)+buf-p, "\n SSN :");
190 for (i = 0; i < STA_TID_NUM; i++)
191 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
192 sta->ampdu_mlme.tid_tx[i].ssn);
193
194 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
195
196 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
197}
198
199static ssize_t sta_agg_status_write(struct file *file,
200 const char __user *user_buf, size_t count, loff_t *ppos)
201{
202 struct sta_info *sta = file->private_data;
203 struct net_device *dev = sta->dev;
204 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
205 struct ieee80211_hw *hw = &local->hw;
206 u8 *da = sta->addr;
207 static int tid_static_tx[16] = {0, 0, 0, 0, 0, 0, 0, 0,
208 0, 0, 0, 0, 0, 0, 0, 0};
209 static int tid_static_rx[16] = {1, 1, 1, 1, 1, 1, 1, 1,
210 1, 1, 1, 1, 1, 1, 1, 1};
211 char *endp;
212 char buf[32];
213 int buf_size, rs;
214 unsigned int tid_num;
215 char state[4];
216
217 memset(buf, 0x00, sizeof(buf));
218 buf_size = min(count, (sizeof(buf)-1));
219 if (copy_from_user(buf, user_buf, buf_size))
220 return -EFAULT;
221
222 tid_num = simple_strtoul(buf, &endp, 0);
223 if (endp == buf)
224 return -EINVAL;
225
226 if ((tid_num >= 100) && (tid_num <= 115)) {
227 /* toggle Rx aggregation command */
228 tid_num = tid_num - 100;
229 if (tid_static_rx[tid_num] == 1) {
230 strcpy(state, "off ");
231 ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0,
232 WLAN_REASON_QSTA_REQUIRE_SETUP);
233 sta->ampdu_mlme.tid_rx[tid_num].buf_size = 0xFF;
234 tid_static_rx[tid_num] = 0;
235 } else {
236 strcpy(state, "on ");
237 sta->ampdu_mlme.tid_rx[tid_num].buf_size = 0x00;
238 tid_static_rx[tid_num] = 1;
239 }
240 printk(KERN_DEBUG "debugfs - try switching tid %u %s\n",
241 tid_num, state);
242 } else if ((tid_num >= 0) && (tid_num <= 15)) {
243 /* toggle Tx aggregation command */
244 if (tid_static_tx[tid_num] == 0) {
245 strcpy(state, "on ");
246 rs = ieee80211_start_tx_ba_session(hw, da, tid_num);
247 if (rs == 0)
248 tid_static_tx[tid_num] = 1;
249 } else {
250 strcpy(state, "off");
251 rs = ieee80211_stop_tx_ba_session(hw, da, tid_num, 1);
252 if (rs == 0)
253 tid_static_tx[tid_num] = 0;
254 }
255 printk(KERN_DEBUG "debugfs - switching tid %u %s, return=%d\n",
256 tid_num, state, rs);
257 }
258
259 return count;
260}
261STA_OPS_WR(agg_status);
262
194#define DEBUGFS_ADD(name) \ 263#define DEBUGFS_ADD(name) \
195 sta->debugfs.name = debugfs_create_file(#name, 0444, \ 264 sta->debugfs.name = debugfs_create_file(#name, 0444, \
196 sta->debugfs.dir, sta, &sta_ ##name## _ops); 265 sta->debugfs.dir, sta, &sta_ ##name## _ops);
@@ -203,12 +272,13 @@ STA_OPS(wme_tx_queue);
203void ieee80211_sta_debugfs_add(struct sta_info *sta) 272void ieee80211_sta_debugfs_add(struct sta_info *sta)
204{ 273{
205 struct dentry *stations_dir = sta->local->debugfs.stations; 274 struct dentry *stations_dir = sta->local->debugfs.stations;
206 DECLARE_MAC_BUF(mac); 275 DECLARE_MAC_BUF(mbuf);
276 u8 *mac;
207 277
208 if (!stations_dir) 278 if (!stations_dir)
209 return; 279 return;
210 280
211 print_mac(mac, sta->addr); 281 mac = print_mac(mbuf, sta->addr);
212 282
213 sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); 283 sta->debugfs.dir = debugfs_create_dir(mac, stations_dir);
214 if (!sta->debugfs.dir) 284 if (!sta->debugfs.dir)
@@ -216,28 +286,26 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
216 286
217 DEBUGFS_ADD(flags); 287 DEBUGFS_ADD(flags);
218 DEBUGFS_ADD(num_ps_buf_frames); 288 DEBUGFS_ADD(num_ps_buf_frames);
219 DEBUGFS_ADD(last_ack_rssi);
220 DEBUGFS_ADD(last_ack_ms);
221 DEBUGFS_ADD(inactive_ms); 289 DEBUGFS_ADD(inactive_ms);
222 DEBUGFS_ADD(last_seq_ctrl); 290 DEBUGFS_ADD(last_seq_ctrl);
223#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 291#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
224 DEBUGFS_ADD(wme_rx_queue); 292 DEBUGFS_ADD(wme_rx_queue);
225 DEBUGFS_ADD(wme_tx_queue); 293 DEBUGFS_ADD(wme_tx_queue);
226#endif 294#endif
295 DEBUGFS_ADD(agg_status);
227} 296}
228 297
229void ieee80211_sta_debugfs_remove(struct sta_info *sta) 298void ieee80211_sta_debugfs_remove(struct sta_info *sta)
230{ 299{
231 DEBUGFS_DEL(flags); 300 DEBUGFS_DEL(flags);
232 DEBUGFS_DEL(num_ps_buf_frames); 301 DEBUGFS_DEL(num_ps_buf_frames);
233 DEBUGFS_DEL(last_ack_rssi);
234 DEBUGFS_DEL(last_ack_ms);
235 DEBUGFS_DEL(inactive_ms); 302 DEBUGFS_DEL(inactive_ms);
236 DEBUGFS_DEL(last_seq_ctrl); 303 DEBUGFS_DEL(last_seq_ctrl);
237#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 304#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
238 DEBUGFS_DEL(wme_rx_queue); 305 DEBUGFS_DEL(wme_rx_queue);
239 DEBUGFS_DEL(wme_tx_queue); 306 DEBUGFS_DEL(wme_tx_queue);
240#endif 307#endif
308 DEBUGFS_DEL(agg_status);
241 309
242 debugfs_remove(sta->debugfs.dir); 310 debugfs_remove(sta->debugfs.dir);
243 sta->debugfs.dir = NULL; 311 sta->debugfs.dir = NULL;
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 28bcdf9fc3df..2133c9fd27a4 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -67,9 +67,19 @@ static void ieee80211_configure_filter(struct ieee80211_local *local)
67 new_flags |= FIF_ALLMULTI; 67 new_flags |= FIF_ALLMULTI;
68 68
69 if (local->monitors) 69 if (local->monitors)
70 new_flags |= FIF_CONTROL | 70 new_flags |= FIF_BCN_PRBRESP_PROMISC;
71 FIF_OTHER_BSS | 71
72 FIF_BCN_PRBRESP_PROMISC; 72 if (local->fif_fcsfail)
73 new_flags |= FIF_FCSFAIL;
74
75 if (local->fif_plcpfail)
76 new_flags |= FIF_PLCPFAIL;
77
78 if (local->fif_control)
79 new_flags |= FIF_CONTROL;
80
81 if (local->fif_other_bss)
82 new_flags |= FIF_OTHER_BSS;
73 83
74 changed_flags = local->filter_flags ^ new_flags; 84 changed_flags = local->filter_flags ^ new_flags;
75 85
@@ -173,8 +183,52 @@ static int ieee80211_open(struct net_device *dev)
173 list_for_each_entry(nsdata, &local->interfaces, list) { 183 list_for_each_entry(nsdata, &local->interfaces, list) {
174 struct net_device *ndev = nsdata->dev; 184 struct net_device *ndev = nsdata->dev;
175 185
176 if (ndev != dev && ndev != local->mdev && netif_running(ndev) && 186 if (ndev != dev && ndev != local->mdev && netif_running(ndev)) {
177 compare_ether_addr(dev->dev_addr, ndev->dev_addr) == 0) { 187 /*
188 * Allow only a single IBSS interface to be up at any
189 * time. This is restricted because beacon distribution
190 * cannot work properly if both are in the same IBSS.
191 *
192 * To remove this restriction we'd have to disallow them
193 * from setting the same SSID on different IBSS interfaces
194 * belonging to the same hardware. Then, however, we're
195 * faced with having to adopt two different TSF timers...
196 */
197 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
198 nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
199 return -EBUSY;
200
201 /*
202 * Disallow multiple IBSS/STA mode interfaces.
203 *
204 * This is a technical restriction, it is possible although
205 * most likely not IEEE 802.11 compliant to have multiple
206 * STAs with just a single hardware (the TSF timer will not
207 * be adjusted properly.)
208 *
209 * However, because mac80211 uses the master device's BSS
210 * information for each STA/IBSS interface, doing this will
211 * currently corrupt that BSS information completely, unless,
212 * a not very useful case, both STAs are associated to the
213 * same BSS.
214 *
215 * To remove this restriction, the BSS information needs to
216 * be embedded in the STA/IBSS mode sdata instead of using
217 * the master device's BSS structure.
218 */
219 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
220 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) &&
221 (nsdata->vif.type == IEEE80211_IF_TYPE_STA ||
222 nsdata->vif.type == IEEE80211_IF_TYPE_IBSS))
223 return -EBUSY;
224
225 /*
226 * The remaining checks are only performed for interfaces
227 * with the same MAC address.
228 */
229 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
230 continue;
231
178 /* 232 /*
179 * check whether it may have the same address 233 * check whether it may have the same address
180 */ 234 */
@@ -186,8 +240,7 @@ static int ieee80211_open(struct net_device *dev)
186 * can only add VLANs to enabled APs 240 * can only add VLANs to enabled APs
187 */ 241 */
188 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN && 242 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
189 nsdata->vif.type == IEEE80211_IF_TYPE_AP && 243 nsdata->vif.type == IEEE80211_IF_TYPE_AP)
190 netif_running(nsdata->dev))
191 sdata->u.vlan.ap = nsdata; 244 sdata->u.vlan.ap = nsdata;
192 } 245 }
193 } 246 }
@@ -229,15 +282,28 @@ static int ieee80211_open(struct net_device *dev)
229 /* no need to tell driver */ 282 /* no need to tell driver */
230 break; 283 break;
231 case IEEE80211_IF_TYPE_MNTR: 284 case IEEE80211_IF_TYPE_MNTR:
285 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
286 local->cooked_mntrs++;
287 break;
288 }
289
232 /* must be before the call to ieee80211_configure_filter */ 290 /* must be before the call to ieee80211_configure_filter */
233 local->monitors++; 291 local->monitors++;
234 if (local->monitors == 1) { 292 if (local->monitors == 1)
235 netif_tx_lock_bh(local->mdev);
236 ieee80211_configure_filter(local);
237 netif_tx_unlock_bh(local->mdev);
238
239 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; 293 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
240 } 294
295 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
296 local->fif_fcsfail++;
297 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
298 local->fif_plcpfail++;
299 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
300 local->fif_control++;
301 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
302 local->fif_other_bss++;
303
304 netif_tx_lock_bh(local->mdev);
305 ieee80211_configure_filter(local);
306 netif_tx_unlock_bh(local->mdev);
241 break; 307 break;
242 case IEEE80211_IF_TYPE_STA: 308 case IEEE80211_IF_TYPE_STA:
243 case IEEE80211_IF_TYPE_IBSS: 309 case IEEE80211_IF_TYPE_IBSS:
@@ -352,14 +418,27 @@ static int ieee80211_stop(struct net_device *dev)
352 /* no need to tell driver */ 418 /* no need to tell driver */
353 break; 419 break;
354 case IEEE80211_IF_TYPE_MNTR: 420 case IEEE80211_IF_TYPE_MNTR:
355 local->monitors--; 421 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
356 if (local->monitors == 0) { 422 local->cooked_mntrs--;
357 netif_tx_lock_bh(local->mdev); 423 break;
358 ieee80211_configure_filter(local); 424 }
359 netif_tx_unlock_bh(local->mdev);
360 425
426 local->monitors--;
427 if (local->monitors == 0)
361 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; 428 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
362 } 429
430 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
431 local->fif_fcsfail--;
432 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
433 local->fif_plcpfail--;
434 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
435 local->fif_control--;
436 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
437 local->fif_other_bss--;
438
439 netif_tx_lock_bh(local->mdev);
440 ieee80211_configure_filter(local);
441 netif_tx_unlock_bh(local->mdev);
363 break; 442 break;
364 case IEEE80211_IF_TYPE_STA: 443 case IEEE80211_IF_TYPE_STA:
365 case IEEE80211_IF_TYPE_IBSS: 444 case IEEE80211_IF_TYPE_IBSS:
@@ -414,6 +493,329 @@ static int ieee80211_stop(struct net_device *dev)
414 return 0; 493 return 0;
415} 494}
416 495
496int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
497{
498 struct ieee80211_local *local = hw_to_local(hw);
499 struct sta_info *sta;
500 struct ieee80211_sub_if_data *sdata;
501 u16 start_seq_num = 0;
502 u8 *state;
503 int ret;
504 DECLARE_MAC_BUF(mac);
505
506 if (tid >= STA_TID_NUM)
507 return -EINVAL;
508
509#ifdef CONFIG_MAC80211_HT_DEBUG
510 printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
511 print_mac(mac, ra), tid);
512#endif /* CONFIG_MAC80211_HT_DEBUG */
513
514 sta = sta_info_get(local, ra);
515 if (!sta) {
516 printk(KERN_DEBUG "Could not find the station\n");
517 return -ENOENT;
518 }
519
520 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
521
522 /* we have tried too many times, receiver does not want A-MPDU */
523 if (sta->ampdu_mlme.tid_tx[tid].addba_req_num > HT_AGG_MAX_RETRIES) {
524 ret = -EBUSY;
525 goto start_ba_exit;
526 }
527
528 state = &sta->ampdu_mlme.tid_tx[tid].state;
529 /* check if the TID is not in aggregation flow already */
530 if (*state != HT_AGG_STATE_IDLE) {
531#ifdef CONFIG_MAC80211_HT_DEBUG
532 printk(KERN_DEBUG "BA request denied - session is not "
533 "idle on tid %u\n", tid);
534#endif /* CONFIG_MAC80211_HT_DEBUG */
535 ret = -EAGAIN;
536 goto start_ba_exit;
537 }
538
539 /* ensure that TX flow won't interrupt us
540 * until the end of the call to requeue function */
541 spin_lock_bh(&local->mdev->queue_lock);
542
543 /* create a new queue for this aggregation */
544 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
545
546 /* case no queue is available to aggregation
547 * don't switch to aggregation */
548 if (ret) {
549#ifdef CONFIG_MAC80211_HT_DEBUG
550 printk(KERN_DEBUG "BA request denied - no queue available for"
551 " tid %d\n", tid);
552#endif /* CONFIG_MAC80211_HT_DEBUG */
553 spin_unlock_bh(&local->mdev->queue_lock);
554 goto start_ba_exit;
555 }
556 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
557
558 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
559 * call back right away, it must see that the flow has begun */
560 *state |= HT_ADDBA_REQUESTED_MSK;
561
562 if (local->ops->ampdu_action)
563 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
564 ra, tid, &start_seq_num);
565
566 if (ret) {
567 /* No need to requeue the packets in the agg queue, since we
568 * held the tx lock: no packet could be enqueued to the newly
569 * allocated queue */
570 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
571#ifdef CONFIG_MAC80211_HT_DEBUG
572 printk(KERN_DEBUG "BA request denied - HW or queue unavailable"
573 " for tid %d\n", tid);
574#endif /* CONFIG_MAC80211_HT_DEBUG */
575 spin_unlock_bh(&local->mdev->queue_lock);
576 *state = HT_AGG_STATE_IDLE;
577 goto start_ba_exit;
578 }
579
580 /* Will put all the packets in the new SW queue */
581 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
582 spin_unlock_bh(&local->mdev->queue_lock);
583
584 /* We have most probably almost emptied the legacy queue */
585 /* ieee80211_wake_queue(local_to_hw(local), ieee802_1d_to_ac[tid]); */
586
587 /* send an addBA request */
588 sta->ampdu_mlme.dialog_token_allocator++;
589 sta->ampdu_mlme.tid_tx[tid].dialog_token =
590 sta->ampdu_mlme.dialog_token_allocator;
591 sta->ampdu_mlme.tid_tx[tid].ssn = start_seq_num;
592
593 ieee80211_send_addba_request(sta->dev, ra, tid,
594 sta->ampdu_mlme.tid_tx[tid].dialog_token,
595 sta->ampdu_mlme.tid_tx[tid].ssn,
596 0x40, 5000);
597
598 /* activate the timer for the recipient's addBA response */
599 sta->ampdu_mlme.tid_tx[tid].addba_resp_timer.expires =
600 jiffies + ADDBA_RESP_INTERVAL;
601 add_timer(&sta->ampdu_mlme.tid_tx[tid].addba_resp_timer);
602 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
603
604start_ba_exit:
605 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
606 sta_info_put(sta);
607 return ret;
608}
609EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
610
611int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
612 u8 *ra, u16 tid,
613 enum ieee80211_back_parties initiator)
614{
615 struct ieee80211_local *local = hw_to_local(hw);
616 struct sta_info *sta;
617 u8 *state;
618 int ret = 0;
619 DECLARE_MAC_BUF(mac);
620
621 if (tid >= STA_TID_NUM)
622 return -EINVAL;
623
624#ifdef CONFIG_MAC80211_HT_DEBUG
625 printk(KERN_DEBUG "Stop a BA session requested for %s tid %u\n",
626 print_mac(mac, ra), tid);
627#endif /* CONFIG_MAC80211_HT_DEBUG */
628
629 sta = sta_info_get(local, ra);
630 if (!sta)
631 return -ENOENT;
632
633 /* check if the TID is in aggregation */
634 state = &sta->ampdu_mlme.tid_tx[tid].state;
635 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
636
637 if (*state != HT_AGG_STATE_OPERATIONAL) {
638#ifdef CONFIG_MAC80211_HT_DEBUG
639 printk(KERN_DEBUG "Try to stop Tx aggregation on"
640 " non active TID\n");
641#endif /* CONFIG_MAC80211_HT_DEBUG */
642 ret = -ENOENT;
643 goto stop_BA_exit;
644 }
645
646 ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
647
648 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
649 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
650
651 if (local->ops->ampdu_action)
652 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
653 ra, tid, NULL);
654
655 /* case HW denied going back to legacy */
656 if (ret) {
657 WARN_ON(ret != -EBUSY);
658 *state = HT_AGG_STATE_OPERATIONAL;
659 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
660 goto stop_BA_exit;
661 }
662
663stop_BA_exit:
664 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
665 sta_info_put(sta);
666 return ret;
667}
668EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
669
670void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
671{
672 struct ieee80211_local *local = hw_to_local(hw);
673 struct sta_info *sta;
674 u8 *state;
675 DECLARE_MAC_BUF(mac);
676
677 if (tid >= STA_TID_NUM) {
678 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
679 tid, STA_TID_NUM);
680 return;
681 }
682
683 sta = sta_info_get(local, ra);
684 if (!sta) {
685 printk(KERN_DEBUG "Could not find station: %s\n",
686 print_mac(mac, ra));
687 return;
688 }
689
690 state = &sta->ampdu_mlme.tid_tx[tid].state;
691 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
692
693 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
694 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
695 *state);
696 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
697 sta_info_put(sta);
698 return;
699 }
700
701 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
702
703 *state |= HT_ADDBA_DRV_READY_MSK;
704
705 if (*state == HT_AGG_STATE_OPERATIONAL) {
706 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
707 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
708 }
709 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
710 sta_info_put(sta);
711}
712EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
713
714void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
715{
716 struct ieee80211_local *local = hw_to_local(hw);
717 struct sta_info *sta;
718 u8 *state;
719 int agg_queue;
720 DECLARE_MAC_BUF(mac);
721
722 if (tid >= STA_TID_NUM) {
723 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
724 tid, STA_TID_NUM);
725 return;
726 }
727
728 printk(KERN_DEBUG "Stop a BA session requested on DA %s tid %d\n",
729 print_mac(mac, ra), tid);
730
731 sta = sta_info_get(local, ra);
732 if (!sta) {
733 printk(KERN_DEBUG "Could not find station: %s\n",
734 print_mac(mac, ra));
735 return;
736 }
737 state = &sta->ampdu_mlme.tid_tx[tid].state;
738
739 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
740 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
741 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
742 sta_info_put(sta);
743 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
744 return;
745 }
746
747 if (*state & HT_AGG_STATE_INITIATOR_MSK)
748 ieee80211_send_delba(sta->dev, ra, tid,
749 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
750
751 agg_queue = sta->tid_to_tx_q[tid];
752
753 /* avoid ordering issues: we are the only one that can modify
754 * the content of the qdiscs */
755 spin_lock_bh(&local->mdev->queue_lock);
756 /* remove the queue for this aggregation */
757 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
758 spin_unlock_bh(&local->mdev->queue_lock);
759
760 /* we just requeued the all the frames that were in the removed
761 * queue, and since we might miss a softirq we do netif_schedule.
762 * ieee80211_wake_queue is not used here as this queue is not
763 * necessarily stopped */
764 netif_schedule(local->mdev);
765 *state = HT_AGG_STATE_IDLE;
766 sta->ampdu_mlme.tid_tx[tid].addba_req_num = 0;
767 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
768
769 sta_info_put(sta);
770}
771EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
772
773void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
774 const u8 *ra, u16 tid)
775{
776 struct ieee80211_local *local = hw_to_local(hw);
777 struct ieee80211_ra_tid *ra_tid;
778 struct sk_buff *skb = dev_alloc_skb(0);
779
780 if (unlikely(!skb)) {
781 if (net_ratelimit())
782 printk(KERN_WARNING "%s: Not enough memory, "
783 "dropping start BA session", skb->dev->name);
784 return;
785 }
786 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
787 memcpy(&ra_tid->ra, ra, ETH_ALEN);
788 ra_tid->tid = tid;
789
790 skb->pkt_type = IEEE80211_ADDBA_MSG;
791 skb_queue_tail(&local->skb_queue, skb);
792 tasklet_schedule(&local->tasklet);
793}
794EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
795
796void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
797 const u8 *ra, u16 tid)
798{
799 struct ieee80211_local *local = hw_to_local(hw);
800 struct ieee80211_ra_tid *ra_tid;
801 struct sk_buff *skb = dev_alloc_skb(0);
802
803 if (unlikely(!skb)) {
804 if (net_ratelimit())
805 printk(KERN_WARNING "%s: Not enough memory, "
806 "dropping stop BA session", skb->dev->name);
807 return;
808 }
809 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
810 memcpy(&ra_tid->ra, ra, ETH_ALEN);
811 ra_tid->tid = tid;
812
813 skb->pkt_type = IEEE80211_DELBA_MSG;
814 skb_queue_tail(&local->skb_queue, skb);
815 tasklet_schedule(&local->tasklet);
816}
817EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
818
417static void ieee80211_set_multicast_list(struct net_device *dev) 819static void ieee80211_set_multicast_list(struct net_device *dev)
418{ 820{
419 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 821 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -479,8 +881,11 @@ int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr)
479 881
480 /* Create STA entry for the new peer */ 882 /* Create STA entry for the new peer */
481 sta = sta_info_add(local, dev, remote_addr, GFP_KERNEL); 883 sta = sta_info_add(local, dev, remote_addr, GFP_KERNEL);
482 if (!sta) 884 if (IS_ERR(sta))
483 return -ENOMEM; 885 return PTR_ERR(sta);
886
887 sta->flags |= WLAN_STA_AUTHORIZED;
888
484 sta_info_put(sta); 889 sta_info_put(sta);
485 890
486 /* Remove STA entry for the old peer */ 891 /* Remove STA entry for the old peer */
@@ -553,37 +958,28 @@ int ieee80211_if_config_beacon(struct net_device *dev)
553 958
554int ieee80211_hw_config(struct ieee80211_local *local) 959int ieee80211_hw_config(struct ieee80211_local *local)
555{ 960{
556 struct ieee80211_hw_mode *mode;
557 struct ieee80211_channel *chan; 961 struct ieee80211_channel *chan;
558 int ret = 0; 962 int ret = 0;
559 963
560 if (local->sta_sw_scanning) { 964 if (local->sta_sw_scanning)
561 chan = local->scan_channel; 965 chan = local->scan_channel;
562 mode = local->scan_hw_mode; 966 else
563 } else {
564 chan = local->oper_channel; 967 chan = local->oper_channel;
565 mode = local->oper_hw_mode;
566 }
567 968
568 local->hw.conf.channel = chan->chan; 969 local->hw.conf.channel = chan;
569 local->hw.conf.channel_val = chan->val; 970
570 if (!local->hw.conf.power_level) { 971 if (!local->hw.conf.power_level)
571 local->hw.conf.power_level = chan->power_level; 972 local->hw.conf.power_level = chan->max_power;
572 } else { 973 else
573 local->hw.conf.power_level = min(chan->power_level, 974 local->hw.conf.power_level = min(chan->max_power,
574 local->hw.conf.power_level); 975 local->hw.conf.power_level);
575 } 976
576 local->hw.conf.freq = chan->freq; 977 local->hw.conf.max_antenna_gain = chan->max_antenna_gain;
577 local->hw.conf.phymode = mode->mode;
578 local->hw.conf.antenna_max = chan->antenna_max;
579 local->hw.conf.chan = chan;
580 local->hw.conf.mode = mode;
581 978
582#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 979#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
583 printk(KERN_DEBUG "HW CONFIG: channel=%d freq=%d " 980 printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n",
584 "phymode=%d\n", local->hw.conf.channel, local->hw.conf.freq, 981 wiphy_name(local->hw.wiphy), chan->center_freq);
585 local->hw.conf.phymode); 982#endif
586#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
587 983
588 if (local->open_count) 984 if (local->open_count)
589 ret = local->ops->config(local_to_hw(local), &local->hw.conf); 985 ret = local->ops->config(local_to_hw(local), &local->hw.conf);
@@ -601,11 +997,13 @@ int ieee80211_hw_config_ht(struct ieee80211_local *local, int enable_ht,
601 struct ieee80211_ht_bss_info *req_bss_cap) 997 struct ieee80211_ht_bss_info *req_bss_cap)
602{ 998{
603 struct ieee80211_conf *conf = &local->hw.conf; 999 struct ieee80211_conf *conf = &local->hw.conf;
604 struct ieee80211_hw_mode *mode = conf->mode; 1000 struct ieee80211_supported_band *sband;
605 int i; 1001 int i;
606 1002
1003 sband = local->hw.wiphy->bands[conf->channel->band];
1004
607 /* HT is not supported */ 1005 /* HT is not supported */
608 if (!mode->ht_info.ht_supported) { 1006 if (!sband->ht_info.ht_supported) {
609 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; 1007 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
610 return -EOPNOTSUPP; 1008 return -EOPNOTSUPP;
611 } 1009 }
@@ -615,17 +1013,17 @@ int ieee80211_hw_config_ht(struct ieee80211_local *local, int enable_ht,
615 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE; 1013 conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
616 } else { 1014 } else {
617 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE; 1015 conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
618 conf->ht_conf.cap = req_ht_cap->cap & mode->ht_info.cap; 1016 conf->ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
619 conf->ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); 1017 conf->ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
620 conf->ht_conf.cap |= 1018 conf->ht_conf.cap |=
621 mode->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS; 1019 sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
622 conf->ht_bss_conf.primary_channel = 1020 conf->ht_bss_conf.primary_channel =
623 req_bss_cap->primary_channel; 1021 req_bss_cap->primary_channel;
624 conf->ht_bss_conf.bss_cap = req_bss_cap->bss_cap; 1022 conf->ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
625 conf->ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; 1023 conf->ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
626 for (i = 0; i < SUPP_MCS_SET_LEN; i++) 1024 for (i = 0; i < SUPP_MCS_SET_LEN; i++)
627 conf->ht_conf.supp_mcs_set[i] = 1025 conf->ht_conf.supp_mcs_set[i] =
628 mode->ht_info.supp_mcs_set[i] & 1026 sband->ht_info.supp_mcs_set[i] &
629 req_ht_cap->supp_mcs_set[i]; 1027 req_ht_cap->supp_mcs_set[i];
630 1028
631 /* In STA mode, this gives us indication 1029 /* In STA mode, this gives us indication
@@ -713,6 +1111,7 @@ static void ieee80211_tasklet_handler(unsigned long data)
713 struct sk_buff *skb; 1111 struct sk_buff *skb;
714 struct ieee80211_rx_status rx_status; 1112 struct ieee80211_rx_status rx_status;
715 struct ieee80211_tx_status *tx_status; 1113 struct ieee80211_tx_status *tx_status;
1114 struct ieee80211_ra_tid *ra_tid;
716 1115
717 while ((skb = skb_dequeue(&local->skb_queue)) || 1116 while ((skb = skb_dequeue(&local->skb_queue)) ||
718 (skb = skb_dequeue(&local->skb_queue_unreliable))) { 1117 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
@@ -733,6 +1132,18 @@ static void ieee80211_tasklet_handler(unsigned long data)
733 skb, tx_status); 1132 skb, tx_status);
734 kfree(tx_status); 1133 kfree(tx_status);
735 break; 1134 break;
1135 case IEEE80211_DELBA_MSG:
1136 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
1137 ieee80211_stop_tx_ba_cb(local_to_hw(local),
1138 ra_tid->ra, ra_tid->tid);
1139 dev_kfree_skb(skb);
1140 break;
1141 case IEEE80211_ADDBA_MSG:
1142 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
1143 ieee80211_start_tx_ba_cb(local_to_hw(local),
1144 ra_tid->ra, ra_tid->tid);
1145 dev_kfree_skb(skb);
1146 break ;
736 default: /* should never get here! */ 1147 default: /* should never get here! */
737 printk(KERN_ERR "%s: Unknown message type (%d)\n", 1148 printk(KERN_ERR "%s: Unknown message type (%d)\n",
738 wiphy_name(local->hw.wiphy), skb->pkt_type); 1149 wiphy_name(local->hw.wiphy), skb->pkt_type);
@@ -810,6 +1221,77 @@ no_key:
810 } 1221 }
811} 1222}
812 1223
1224static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
1225 struct sta_info *sta,
1226 struct sk_buff *skb,
1227 struct ieee80211_tx_status *status)
1228{
1229 sta->tx_filtered_count++;
1230
1231 /*
1232 * Clear the TX filter mask for this STA when sending the next
1233 * packet. If the STA went to power save mode, this will happen
1234 * happen when it wakes up for the next time.
1235 */
1236 sta->flags |= WLAN_STA_CLEAR_PS_FILT;
1237
1238 /*
1239 * This code races in the following way:
1240 *
1241 * (1) STA sends frame indicating it will go to sleep and does so
1242 * (2) hardware/firmware adds STA to filter list, passes frame up
1243 * (3) hardware/firmware processes TX fifo and suppresses a frame
1244 * (4) we get TX status before having processed the frame and
1245 * knowing that the STA has gone to sleep.
1246 *
1247 * This is actually quite unlikely even when both those events are
1248 * processed from interrupts coming in quickly after one another or
1249 * even at the same time because we queue both TX status events and
1250 * RX frames to be processed by a tasklet and process them in the
1251 * same order that they were received or TX status last. Hence, there
1252 * is no race as long as the frame RX is processed before the next TX
1253 * status, which drivers can ensure, see below.
1254 *
1255 * Note that this can only happen if the hardware or firmware can
1256 * actually add STAs to the filter list, if this is done by the
1257 * driver in response to set_tim() (which will only reduce the race
1258 * this whole filtering tries to solve, not completely solve it)
1259 * this situation cannot happen.
1260 *
1261 * To completely solve this race drivers need to make sure that they
1262 * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
1263 * functions and
1264 * (b) always process RX events before TX status events if ordering
1265 * can be unknown, for example with different interrupt status
1266 * bits.
1267 */
1268 if (sta->flags & WLAN_STA_PS &&
1269 skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
1270 ieee80211_remove_tx_extra(local, sta->key, skb,
1271 &status->control);
1272 skb_queue_tail(&sta->tx_filtered, skb);
1273 return;
1274 }
1275
1276 if (!(sta->flags & WLAN_STA_PS) &&
1277 !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) {
1278 /* Software retry the packet once */
1279 status->control.flags |= IEEE80211_TXCTL_REQUEUE;
1280 ieee80211_remove_tx_extra(local, sta->key, skb,
1281 &status->control);
1282 dev_queue_xmit(skb);
1283 return;
1284 }
1285
1286 if (net_ratelimit())
1287 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
1288 "queue_len=%d PS=%d @%lu\n",
1289 wiphy_name(local->hw.wiphy),
1290 skb_queue_len(&sta->tx_filtered),
1291 !!(sta->flags & WLAN_STA_PS), jiffies);
1292 dev_kfree_skb(skb);
1293}
1294
813void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 1295void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
814 struct ieee80211_tx_status *status) 1296 struct ieee80211_tx_status *status)
815{ 1297{
@@ -819,7 +1301,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
819 u16 frag, type; 1301 u16 frag, type;
820 struct ieee80211_tx_status_rtap_hdr *rthdr; 1302 struct ieee80211_tx_status_rtap_hdr *rthdr;
821 struct ieee80211_sub_if_data *sdata; 1303 struct ieee80211_sub_if_data *sdata;
822 int monitors; 1304 struct net_device *prev_dev = NULL;
823 1305
824 if (!status) { 1306 if (!status) {
825 printk(KERN_ERR 1307 printk(KERN_ERR
@@ -834,11 +1316,16 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
834 sta = sta_info_get(local, hdr->addr1); 1316 sta = sta_info_get(local, hdr->addr1);
835 if (sta) { 1317 if (sta) {
836 if (sta->flags & WLAN_STA_PS) { 1318 if (sta->flags & WLAN_STA_PS) {
837 /* The STA is in power save mode, so assume 1319 /*
1320 * The STA is in power save mode, so assume
838 * that this TX packet failed because of that. 1321 * that this TX packet failed because of that.
839 */ 1322 */
840 status->excessive_retries = 0; 1323 status->excessive_retries = 0;
841 status->flags |= IEEE80211_TX_STATUS_TX_FILTERED; 1324 status->flags |= IEEE80211_TX_STATUS_TX_FILTERED;
1325 ieee80211_handle_filtered_frame(local, sta,
1326 skb, status);
1327 sta_info_put(sta);
1328 return;
842 } 1329 }
843 sta_info_put(sta); 1330 sta_info_put(sta);
844 } 1331 }
@@ -848,47 +1335,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
848 struct sta_info *sta; 1335 struct sta_info *sta;
849 sta = sta_info_get(local, hdr->addr1); 1336 sta = sta_info_get(local, hdr->addr1);
850 if (sta) { 1337 if (sta) {
851 sta->tx_filtered_count++; 1338 ieee80211_handle_filtered_frame(local, sta, skb,
852 1339 status);
853 /* Clear the TX filter mask for this STA when sending
854 * the next packet. If the STA went to power save mode,
855 * this will happen when it is waking up for the next
856 * time. */
857 sta->clear_dst_mask = 1;
858
859 /* TODO: Is the WLAN_STA_PS flag always set here or is
860 * the race between RX and TX status causing some
861 * packets to be filtered out before 80211.o gets an
862 * update for PS status? This seems to be the case, so
863 * no changes are likely to be needed. */
864 if (sta->flags & WLAN_STA_PS &&
865 skb_queue_len(&sta->tx_filtered) <
866 STA_MAX_TX_BUFFER) {
867 ieee80211_remove_tx_extra(local, sta->key,
868 skb,
869 &status->control);
870 skb_queue_tail(&sta->tx_filtered, skb);
871 } else if (!(sta->flags & WLAN_STA_PS) &&
872 !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) {
873 /* Software retry the packet once */
874 status->control.flags |= IEEE80211_TXCTL_REQUEUE;
875 ieee80211_remove_tx_extra(local, sta->key,
876 skb,
877 &status->control);
878 dev_queue_xmit(skb);
879 } else {
880 if (net_ratelimit()) {
881 printk(KERN_DEBUG "%s: dropped TX "
882 "filtered frame queue_len=%d "
883 "PS=%d @%lu\n",
884 wiphy_name(local->hw.wiphy),
885 skb_queue_len(
886 &sta->tx_filtered),
887 !!(sta->flags & WLAN_STA_PS),
888 jiffies);
889 }
890 dev_kfree_skb(skb);
891 }
892 sta_info_put(sta); 1340 sta_info_put(sta);
893 return; 1341 return;
894 } 1342 }
@@ -932,7 +1380,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
932 /* this was a transmitted frame, but now we want to reuse it */ 1380 /* this was a transmitted frame, but now we want to reuse it */
933 skb_orphan(skb); 1381 skb_orphan(skb);
934 1382
935 if (!local->monitors) { 1383 /*
1384 * This is a bit racy but we can avoid a lot of work
1385 * with this test...
1386 */
1387 if (!local->monitors && !local->cooked_mntrs) {
936 dev_kfree_skb(skb); 1388 dev_kfree_skb(skb);
937 return; 1389 return;
938 } 1390 }
@@ -966,51 +1418,44 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
966 1418
967 rthdr->data_retries = status->retry_count; 1419 rthdr->data_retries = status->retry_count;
968 1420
1421 /* XXX: is this sufficient for BPF? */
1422 skb_set_mac_header(skb, 0);
1423 skb->ip_summed = CHECKSUM_UNNECESSARY;
1424 skb->pkt_type = PACKET_OTHERHOST;
1425 skb->protocol = htons(ETH_P_802_2);
1426 memset(skb->cb, 0, sizeof(skb->cb));
1427
969 rcu_read_lock(); 1428 rcu_read_lock();
970 monitors = local->monitors;
971 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 1429 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
972 /*
973 * Using the monitors counter is possibly racy, but
974 * if the value is wrong we simply either clone the skb
975 * once too much or forget sending it to one monitor iface
976 * The latter case isn't nice but fixing the race is much
977 * more complicated.
978 */
979 if (!monitors || !skb)
980 goto out;
981
982 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) { 1430 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
983 if (!netif_running(sdata->dev)) 1431 if (!netif_running(sdata->dev))
984 continue; 1432 continue;
985 monitors--; 1433
986 if (monitors) 1434 if (prev_dev) {
987 skb2 = skb_clone(skb, GFP_ATOMIC); 1435 skb2 = skb_clone(skb, GFP_ATOMIC);
988 else 1436 if (skb2) {
989 skb2 = NULL; 1437 skb2->dev = prev_dev;
990 skb->dev = sdata->dev; 1438 netif_rx(skb2);
991 /* XXX: is this sufficient for BPF? */ 1439 }
992 skb_set_mac_header(skb, 0); 1440 }
993 skb->ip_summed = CHECKSUM_UNNECESSARY; 1441
994 skb->pkt_type = PACKET_OTHERHOST; 1442 prev_dev = sdata->dev;
995 skb->protocol = htons(ETH_P_802_2);
996 memset(skb->cb, 0, sizeof(skb->cb));
997 netif_rx(skb);
998 skb = skb2;
999 } 1443 }
1000 } 1444 }
1001 out: 1445 if (prev_dev) {
1446 skb->dev = prev_dev;
1447 netif_rx(skb);
1448 skb = NULL;
1449 }
1002 rcu_read_unlock(); 1450 rcu_read_unlock();
1003 if (skb) 1451 dev_kfree_skb(skb);
1004 dev_kfree_skb(skb);
1005} 1452}
1006EXPORT_SYMBOL(ieee80211_tx_status); 1453EXPORT_SYMBOL(ieee80211_tx_status);
1007 1454
1008struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 1455struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1009 const struct ieee80211_ops *ops) 1456 const struct ieee80211_ops *ops)
1010{ 1457{
1011 struct net_device *mdev;
1012 struct ieee80211_local *local; 1458 struct ieee80211_local *local;
1013 struct ieee80211_sub_if_data *sdata;
1014 int priv_size; 1459 int priv_size;
1015 struct wiphy *wiphy; 1460 struct wiphy *wiphy;
1016 1461
@@ -1056,25 +1501,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1056 BUG_ON(!ops->configure_filter); 1501 BUG_ON(!ops->configure_filter);
1057 local->ops = ops; 1502 local->ops = ops;
1058 1503
1059 /* for now, mdev needs sub_if_data :/ */
1060 mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data),
1061 "wmaster%d", ether_setup);
1062 if (!mdev) {
1063 wiphy_free(wiphy);
1064 return NULL;
1065 }
1066
1067 sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
1068 mdev->ieee80211_ptr = &sdata->wdev;
1069 sdata->wdev.wiphy = wiphy;
1070
1071 local->hw.queues = 1; /* default */ 1504 local->hw.queues = 1; /* default */
1072 1505
1073 local->mdev = mdev;
1074 local->rx_pre_handlers = ieee80211_rx_pre_handlers;
1075 local->rx_handlers = ieee80211_rx_handlers;
1076 local->tx_handlers = ieee80211_tx_handlers;
1077
1078 local->bridge_packets = 1; 1506 local->bridge_packets = 1;
1079 1507
1080 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 1508 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
@@ -1083,33 +1511,12 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1083 local->long_retry_limit = 4; 1511 local->long_retry_limit = 4;
1084 local->hw.conf.radio_enabled = 1; 1512 local->hw.conf.radio_enabled = 1;
1085 1513
1086 local->enabled_modes = ~0;
1087
1088 INIT_LIST_HEAD(&local->modes_list);
1089
1090 INIT_LIST_HEAD(&local->interfaces); 1514 INIT_LIST_HEAD(&local->interfaces);
1091 1515
1092 INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work); 1516 INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
1093 ieee80211_rx_bss_list_init(mdev);
1094 1517
1095 sta_info_init(local); 1518 sta_info_init(local);
1096 1519
1097 mdev->hard_start_xmit = ieee80211_master_start_xmit;
1098 mdev->open = ieee80211_master_open;
1099 mdev->stop = ieee80211_master_stop;
1100 mdev->type = ARPHRD_IEEE80211;
1101 mdev->header_ops = &ieee80211_header_ops;
1102 mdev->set_multicast_list = ieee80211_master_set_multicast_list;
1103
1104 sdata->vif.type = IEEE80211_IF_TYPE_AP;
1105 sdata->dev = mdev;
1106 sdata->local = local;
1107 sdata->u.ap.force_unicast_rateidx = -1;
1108 sdata->u.ap.max_ratectrl_rateidx = -1;
1109 ieee80211_if_sdata_init(sdata);
1110 /* no RCU needed since we're still during init phase */
1111 list_add_tail(&sdata->list, &local->interfaces);
1112
1113 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, 1520 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
1114 (unsigned long)local); 1521 (unsigned long)local);
1115 tasklet_disable(&local->tx_pending_tasklet); 1522 tasklet_disable(&local->tx_pending_tasklet);
@@ -1131,11 +1538,63 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1131 struct ieee80211_local *local = hw_to_local(hw); 1538 struct ieee80211_local *local = hw_to_local(hw);
1132 const char *name; 1539 const char *name;
1133 int result; 1540 int result;
1541 enum ieee80211_band band;
1542 struct net_device *mdev;
1543 struct ieee80211_sub_if_data *sdata;
1544
1545 /*
1546 * generic code guarantees at least one band,
1547 * set this very early because much code assumes
1548 * that hw.conf.channel is assigned
1549 */
1550 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1551 struct ieee80211_supported_band *sband;
1552
1553 sband = local->hw.wiphy->bands[band];
1554 if (sband) {
1555 /* init channel we're on */
1556 local->hw.conf.channel =
1557 local->oper_channel =
1558 local->scan_channel = &sband->channels[0];
1559 break;
1560 }
1561 }
1134 1562
1135 result = wiphy_register(local->hw.wiphy); 1563 result = wiphy_register(local->hw.wiphy);
1136 if (result < 0) 1564 if (result < 0)
1137 return result; 1565 return result;
1138 1566
1567 /* for now, mdev needs sub_if_data :/ */
1568 mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data),
1569 "wmaster%d", ether_setup);
1570 if (!mdev)
1571 goto fail_mdev_alloc;
1572
1573 sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
1574 mdev->ieee80211_ptr = &sdata->wdev;
1575 sdata->wdev.wiphy = local->hw.wiphy;
1576
1577 local->mdev = mdev;
1578
1579 ieee80211_rx_bss_list_init(mdev);
1580
1581 mdev->hard_start_xmit = ieee80211_master_start_xmit;
1582 mdev->open = ieee80211_master_open;
1583 mdev->stop = ieee80211_master_stop;
1584 mdev->type = ARPHRD_IEEE80211;
1585 mdev->header_ops = &ieee80211_header_ops;
1586 mdev->set_multicast_list = ieee80211_master_set_multicast_list;
1587
1588 sdata->vif.type = IEEE80211_IF_TYPE_AP;
1589 sdata->dev = mdev;
1590 sdata->local = local;
1591 sdata->u.ap.force_unicast_rateidx = -1;
1592 sdata->u.ap.max_ratectrl_rateidx = -1;
1593 ieee80211_if_sdata_init(sdata);
1594
1595 /* no RCU needed since we're still during init phase */
1596 list_add_tail(&sdata->list, &local->interfaces);
1597
1139 name = wiphy_dev(local->hw.wiphy)->driver->name; 1598 name = wiphy_dev(local->hw.wiphy)->driver->name;
1140 local->hw.workqueue = create_singlethread_workqueue(name); 1599 local->hw.workqueue = create_singlethread_workqueue(name);
1141 if (!local->hw.workqueue) { 1600 if (!local->hw.workqueue) {
@@ -1227,49 +1686,18 @@ fail_sta_info:
1227 debugfs_hw_del(local); 1686 debugfs_hw_del(local);
1228 destroy_workqueue(local->hw.workqueue); 1687 destroy_workqueue(local->hw.workqueue);
1229fail_workqueue: 1688fail_workqueue:
1689 ieee80211_if_free(local->mdev);
1690 local->mdev = NULL;
1691fail_mdev_alloc:
1230 wiphy_unregister(local->hw.wiphy); 1692 wiphy_unregister(local->hw.wiphy);
1231 return result; 1693 return result;
1232} 1694}
1233EXPORT_SYMBOL(ieee80211_register_hw); 1695EXPORT_SYMBOL(ieee80211_register_hw);
1234 1696
1235int ieee80211_register_hwmode(struct ieee80211_hw *hw,
1236 struct ieee80211_hw_mode *mode)
1237{
1238 struct ieee80211_local *local = hw_to_local(hw);
1239 struct ieee80211_rate *rate;
1240 int i;
1241
1242 INIT_LIST_HEAD(&mode->list);
1243 list_add_tail(&mode->list, &local->modes_list);
1244
1245 local->hw_modes |= (1 << mode->mode);
1246 for (i = 0; i < mode->num_rates; i++) {
1247 rate = &(mode->rates[i]);
1248 rate->rate_inv = CHAN_UTIL_RATE_LCM / rate->rate;
1249 }
1250 ieee80211_prepare_rates(local, mode);
1251
1252 if (!local->oper_hw_mode) {
1253 /* Default to this mode */
1254 local->hw.conf.phymode = mode->mode;
1255 local->oper_hw_mode = local->scan_hw_mode = mode;
1256 local->oper_channel = local->scan_channel = &mode->channels[0];
1257 local->hw.conf.mode = local->oper_hw_mode;
1258 local->hw.conf.chan = local->oper_channel;
1259 }
1260
1261 if (!(hw->flags & IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED))
1262 ieee80211_set_default_regdomain(mode);
1263
1264 return 0;
1265}
1266EXPORT_SYMBOL(ieee80211_register_hwmode);
1267
1268void ieee80211_unregister_hw(struct ieee80211_hw *hw) 1697void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1269{ 1698{
1270 struct ieee80211_local *local = hw_to_local(hw); 1699 struct ieee80211_local *local = hw_to_local(hw);
1271 struct ieee80211_sub_if_data *sdata, *tmp; 1700 struct ieee80211_sub_if_data *sdata, *tmp;
1272 int i;
1273 1701
1274 tasklet_kill(&local->tx_pending_tasklet); 1702 tasklet_kill(&local->tx_pending_tasklet);
1275 tasklet_kill(&local->tasklet); 1703 tasklet_kill(&local->tasklet);
@@ -1310,11 +1738,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1310 rate_control_deinitialize(local); 1738 rate_control_deinitialize(local);
1311 debugfs_hw_del(local); 1739 debugfs_hw_del(local);
1312 1740
1313 for (i = 0; i < NUM_IEEE80211_MODES; i++) {
1314 kfree(local->supp_rates[i]);
1315 kfree(local->basic_rates[i]);
1316 }
1317
1318 if (skb_queue_len(&local->skb_queue) 1741 if (skb_queue_len(&local->skb_queue)
1319 || skb_queue_len(&local->skb_queue_unreliable)) 1742 || skb_queue_len(&local->skb_queue_unreliable))
1320 printk(KERN_WARNING "%s: skb_queue not empty\n", 1743 printk(KERN_WARNING "%s: skb_queue not empty\n",
@@ -1326,6 +1749,8 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1326 wiphy_unregister(local->hw.wiphy); 1749 wiphy_unregister(local->hw.wiphy);
1327 ieee80211_wep_free(local); 1750 ieee80211_wep_free(local);
1328 ieee80211_led_exit(local); 1751 ieee80211_led_exit(local);
1752 ieee80211_if_free(local->mdev);
1753 local->mdev = NULL;
1329} 1754}
1330EXPORT_SYMBOL(ieee80211_unregister_hw); 1755EXPORT_SYMBOL(ieee80211_unregister_hw);
1331 1756
@@ -1333,7 +1758,6 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
1333{ 1758{
1334 struct ieee80211_local *local = hw_to_local(hw); 1759 struct ieee80211_local *local = hw_to_local(hw);
1335 1760
1336 ieee80211_if_free(local->mdev);
1337 wiphy_free(local->hw.wiphy); 1761 wiphy_free(local->hw.wiphy);
1338} 1762}
1339EXPORT_SYMBOL(ieee80211_free_hw); 1763EXPORT_SYMBOL(ieee80211_free_hw);
@@ -1361,7 +1785,6 @@ static int __init ieee80211_init(void)
1361 } 1785 }
1362 1786
1363 ieee80211_debugfs_netdev_init(); 1787 ieee80211_debugfs_netdev_init();
1364 ieee80211_regdomain_init();
1365 1788
1366 return 0; 1789 return 0;
1367 1790
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 72ecbf7bf962..b07b3cbfd039 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -79,8 +79,7 @@ struct ieee80211_sta_bss {
79 u8 ssid[IEEE80211_MAX_SSID_LEN]; 79 u8 ssid[IEEE80211_MAX_SSID_LEN];
80 size_t ssid_len; 80 size_t ssid_len;
81 u16 capability; /* host byte order */ 81 u16 capability; /* host byte order */
82 int hw_mode; 82 enum ieee80211_band band;
83 int channel;
84 int freq; 83 int freq;
85 int rssi, signal, noise; 84 int rssi, signal, noise;
86 u8 *wpa_ie; 85 u8 *wpa_ie;
@@ -109,9 +108,17 @@ struct ieee80211_sta_bss {
109}; 108};
110 109
111 110
112typedef enum { 111typedef unsigned __bitwise__ ieee80211_tx_result;
113 TXRX_CONTINUE, TXRX_DROP, TXRX_QUEUED 112#define TX_CONTINUE ((__force ieee80211_tx_result) 0u)
114} ieee80211_txrx_result; 113#define TX_DROP ((__force ieee80211_tx_result) 1u)
114#define TX_QUEUED ((__force ieee80211_tx_result) 2u)
115
116typedef unsigned __bitwise__ ieee80211_rx_result;
117#define RX_CONTINUE ((__force ieee80211_rx_result) 0u)
118#define RX_DROP_UNUSABLE ((__force ieee80211_rx_result) 1u)
119#define RX_DROP_MONITOR ((__force ieee80211_rx_result) 2u)
120#define RX_QUEUED ((__force ieee80211_rx_result) 3u)
121
115 122
116/* flags used in struct ieee80211_txrx_data.flags */ 123/* flags used in struct ieee80211_txrx_data.flags */
117/* whether the MSDU was fragmented */ 124/* whether the MSDU was fragmented */
@@ -124,6 +131,7 @@ typedef enum {
124#define IEEE80211_TXRXD_RXRA_MATCH BIT(5) 131#define IEEE80211_TXRXD_RXRA_MATCH BIT(5)
125#define IEEE80211_TXRXD_TX_INJECTED BIT(6) 132#define IEEE80211_TXRXD_TX_INJECTED BIT(6)
126#define IEEE80211_TXRXD_RX_AMSDU BIT(7) 133#define IEEE80211_TXRXD_RX_AMSDU BIT(7)
134#define IEEE80211_TXRXD_RX_CMNTR_REPORTED BIT(8)
127struct ieee80211_txrx_data { 135struct ieee80211_txrx_data {
128 struct sk_buff *skb; 136 struct sk_buff *skb;
129 struct net_device *dev; 137 struct net_device *dev;
@@ -136,13 +144,12 @@ struct ieee80211_txrx_data {
136 union { 144 union {
137 struct { 145 struct {
138 struct ieee80211_tx_control *control; 146 struct ieee80211_tx_control *control;
139 struct ieee80211_hw_mode *mode; 147 struct ieee80211_channel *channel;
140 struct ieee80211_rate *rate; 148 struct ieee80211_rate *rate;
141 /* use this rate (if set) for last fragment; rate can 149 /* use this rate (if set) for last fragment; rate can
142 * be set to lower rate for the first fragments, e.g., 150 * be set to lower rate for the first fragments, e.g.,
143 * when using CTS protection with IEEE 802.11g. */ 151 * when using CTS protection with IEEE 802.11g. */
144 struct ieee80211_rate *last_frag_rate; 152 struct ieee80211_rate *last_frag_rate;
145 int last_frag_hwrate;
146 153
147 /* Extra fragments (in addition to the first fragment 154 /* Extra fragments (in addition to the first fragment
148 * in skb) */ 155 * in skb) */
@@ -151,6 +158,7 @@ struct ieee80211_txrx_data {
151 } tx; 158 } tx;
152 struct { 159 struct {
153 struct ieee80211_rx_status *status; 160 struct ieee80211_rx_status *status;
161 struct ieee80211_rate *rate;
154 int sent_ps_buffered; 162 int sent_ps_buffered;
155 int queue; 163 int queue;
156 int load; 164 int load;
@@ -165,6 +173,7 @@ struct ieee80211_txrx_data {
165#define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1) 173#define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1)
166#define IEEE80211_TXPD_REQUEUE BIT(2) 174#define IEEE80211_TXPD_REQUEUE BIT(2)
167#define IEEE80211_TXPD_EAPOL_FRAME BIT(3) 175#define IEEE80211_TXPD_EAPOL_FRAME BIT(3)
176#define IEEE80211_TXPD_AMPDU BIT(4)
168/* Stored in sk_buff->cb */ 177/* Stored in sk_buff->cb */
169struct ieee80211_tx_packet_data { 178struct ieee80211_tx_packet_data {
170 int ifindex; 179 int ifindex;
@@ -178,18 +187,10 @@ struct ieee80211_tx_stored_packet {
178 struct sk_buff *skb; 187 struct sk_buff *skb;
179 int num_extra_frag; 188 int num_extra_frag;
180 struct sk_buff **extra_frag; 189 struct sk_buff **extra_frag;
181 int last_frag_rateidx;
182 int last_frag_hwrate;
183 struct ieee80211_rate *last_frag_rate; 190 struct ieee80211_rate *last_frag_rate;
184 unsigned int last_frag_rate_ctrl_probe; 191 unsigned int last_frag_rate_ctrl_probe;
185}; 192};
186 193
187typedef ieee80211_txrx_result (*ieee80211_tx_handler)
188(struct ieee80211_txrx_data *tx);
189
190typedef ieee80211_txrx_result (*ieee80211_rx_handler)
191(struct ieee80211_txrx_data *rx);
192
193struct beacon_data { 194struct beacon_data {
194 u8 *head, *tail; 195 u8 *head, *tail;
195 int head_len, tail_len; 196 int head_len, tail_len;
@@ -206,7 +207,7 @@ struct ieee80211_if_ap {
206 207
207 /* yes, this looks ugly, but guarantees that we can later use 208 /* yes, this looks ugly, but guarantees that we can later use
208 * bitmap_empty :) 209 * bitmap_empty :)
209 * NB: don't ever use set_bit, use bss_tim_set/bss_tim_clear! */ 210 * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */
210 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; 211 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
211 atomic_t num_sta_ps; /* number of stations in PS mode */ 212 atomic_t num_sta_ps; /* number of stations in PS mode */
212 struct sk_buff_head ps_bc_buf; 213 struct sk_buff_head ps_bc_buf;
@@ -282,7 +283,7 @@ struct ieee80211_if_sta {
282 283
283 unsigned long ibss_join_req; 284 unsigned long ibss_join_req;
284 struct sk_buff *probe_resp; /* ProbeResp template for IBSS */ 285 struct sk_buff *probe_resp; /* ProbeResp template for IBSS */
285 u32 supp_rates_bits; 286 u32 supp_rates_bits[IEEE80211_NUM_BANDS];
286 287
287 int wmm_last_param_set; 288 int wmm_last_param_set;
288}; 289};
@@ -292,6 +293,7 @@ struct ieee80211_if_sta {
292#define IEEE80211_SDATA_ALLMULTI BIT(0) 293#define IEEE80211_SDATA_ALLMULTI BIT(0)
293#define IEEE80211_SDATA_PROMISC BIT(1) 294#define IEEE80211_SDATA_PROMISC BIT(1)
294#define IEEE80211_SDATA_USERSPACE_MLME BIT(2) 295#define IEEE80211_SDATA_USERSPACE_MLME BIT(2)
296#define IEEE80211_SDATA_OPERATING_GMODE BIT(3)
295struct ieee80211_sub_if_data { 297struct ieee80211_sub_if_data {
296 struct list_head list; 298 struct list_head list;
297 299
@@ -306,11 +308,11 @@ struct ieee80211_sub_if_data {
306 unsigned int flags; 308 unsigned int flags;
307 309
308 int drop_unencrypted; 310 int drop_unencrypted;
311
309 /* 312 /*
310 * IEEE 802.1X Port access control in effect, 313 * basic rates of this AP or the AP we're associated to
311 * drop packets to/from unauthorized port
312 */ 314 */
313 int ieee802_1x_pac; 315 u64 basic_rates;
314 316
315 u16 sequence; 317 u16 sequence;
316 318
@@ -338,6 +340,7 @@ struct ieee80211_sub_if_data {
338 struct ieee80211_if_wds wds; 340 struct ieee80211_if_wds wds;
339 struct ieee80211_if_vlan vlan; 341 struct ieee80211_if_vlan vlan;
340 struct ieee80211_if_sta sta; 342 struct ieee80211_if_sta sta;
343 u32 mntr_flags;
341 } u; 344 } u;
342 int channel_use; 345 int channel_use;
343 int channel_use_raw; 346 int channel_use_raw;
@@ -348,7 +351,6 @@ struct ieee80211_sub_if_data {
348 struct { 351 struct {
349 struct dentry *channel_use; 352 struct dentry *channel_use;
350 struct dentry *drop_unencrypted; 353 struct dentry *drop_unencrypted;
351 struct dentry *ieee802_1x_pac;
352 struct dentry *state; 354 struct dentry *state;
353 struct dentry *bssid; 355 struct dentry *bssid;
354 struct dentry *prev_bssid; 356 struct dentry *prev_bssid;
@@ -367,7 +369,6 @@ struct ieee80211_sub_if_data {
367 struct { 369 struct {
368 struct dentry *channel_use; 370 struct dentry *channel_use;
369 struct dentry *drop_unencrypted; 371 struct dentry *drop_unencrypted;
370 struct dentry *ieee802_1x_pac;
371 struct dentry *num_sta_ps; 372 struct dentry *num_sta_ps;
372 struct dentry *dtim_count; 373 struct dentry *dtim_count;
373 struct dentry *num_beacons; 374 struct dentry *num_beacons;
@@ -378,13 +379,11 @@ struct ieee80211_sub_if_data {
378 struct { 379 struct {
379 struct dentry *channel_use; 380 struct dentry *channel_use;
380 struct dentry *drop_unencrypted; 381 struct dentry *drop_unencrypted;
381 struct dentry *ieee802_1x_pac;
382 struct dentry *peer; 382 struct dentry *peer;
383 } wds; 383 } wds;
384 struct { 384 struct {
385 struct dentry *channel_use; 385 struct dentry *channel_use;
386 struct dentry *drop_unencrypted; 386 struct dentry *drop_unencrypted;
387 struct dentry *ieee802_1x_pac;
388 } vlan; 387 } vlan;
389 struct { 388 struct {
390 struct dentry *mode; 389 struct dentry *mode;
@@ -407,6 +406,8 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
407enum { 406enum {
408 IEEE80211_RX_MSG = 1, 407 IEEE80211_RX_MSG = 1,
409 IEEE80211_TX_STATUS_MSG = 2, 408 IEEE80211_TX_STATUS_MSG = 2,
409 IEEE80211_DELBA_MSG = 3,
410 IEEE80211_ADDBA_MSG = 4,
410}; 411};
411 412
412struct ieee80211_local { 413struct ieee80211_local {
@@ -417,12 +418,11 @@ struct ieee80211_local {
417 418
418 const struct ieee80211_ops *ops; 419 const struct ieee80211_ops *ops;
419 420
420 /* List of registered struct ieee80211_hw_mode */
421 struct list_head modes_list;
422
423 struct net_device *mdev; /* wmaster# - "master" 802.11 device */ 421 struct net_device *mdev; /* wmaster# - "master" 802.11 device */
424 int open_count; 422 int open_count;
425 int monitors; 423 int monitors, cooked_mntrs;
424 /* number of interfaces with corresponding FIF_ flags */
425 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss;
426 unsigned int filter_flags; /* FIF_* */ 426 unsigned int filter_flags; /* FIF_* */
427 struct iw_statistics wstats; 427 struct iw_statistics wstats;
428 u8 wstats_flags; 428 u8 wstats_flags;
@@ -450,8 +450,8 @@ struct ieee80211_local {
450 struct sta_info *sta_hash[STA_HASH_SIZE]; 450 struct sta_info *sta_hash[STA_HASH_SIZE];
451 struct timer_list sta_cleanup; 451 struct timer_list sta_cleanup;
452 452
453 unsigned long state[NUM_TX_DATA_QUEUES]; 453 unsigned long state[NUM_TX_DATA_QUEUES_AMPDU];
454 struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES]; 454 struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES_AMPDU];
455 struct tasklet_struct tx_pending_tasklet; 455 struct tasklet_struct tx_pending_tasklet;
456 456
457 /* number of interfaces with corresponding IFF_ flags */ 457 /* number of interfaces with corresponding IFF_ flags */
@@ -459,11 +459,6 @@ struct ieee80211_local {
459 459
460 struct rate_control_ref *rate_ctrl; 460 struct rate_control_ref *rate_ctrl;
461 461
462 /* Supported and basic rate filters for different modes. These are
463 * pointers to -1 terminated lists and rates in 100 kbps units. */
464 int *supp_rates[NUM_IEEE80211_MODES];
465 int *basic_rates[NUM_IEEE80211_MODES];
466
467 int rts_threshold; 462 int rts_threshold;
468 int fragmentation_threshold; 463 int fragmentation_threshold;
469 int short_retry_limit; /* dot11ShortRetryLimit */ 464 int short_retry_limit; /* dot11ShortRetryLimit */
@@ -477,21 +472,18 @@ struct ieee80211_local {
477 * deliver multicast frames both back to wireless 472 * deliver multicast frames both back to wireless
478 * media and to the local net stack */ 473 * media and to the local net stack */
479 474
480 ieee80211_rx_handler *rx_pre_handlers;
481 ieee80211_rx_handler *rx_handlers;
482 ieee80211_tx_handler *tx_handlers;
483
484 struct list_head interfaces; 475 struct list_head interfaces;
485 476
486 bool sta_sw_scanning; 477 bool sta_sw_scanning;
487 bool sta_hw_scanning; 478 bool sta_hw_scanning;
488 int scan_channel_idx; 479 int scan_channel_idx;
480 enum ieee80211_band scan_band;
481
489 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; 482 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state;
490 unsigned long last_scan_completed; 483 unsigned long last_scan_completed;
491 struct delayed_work scan_work; 484 struct delayed_work scan_work;
492 struct net_device *scan_dev; 485 struct net_device *scan_dev;
493 struct ieee80211_channel *oper_channel, *scan_channel; 486 struct ieee80211_channel *oper_channel, *scan_channel;
494 struct ieee80211_hw_mode *oper_hw_mode, *scan_hw_mode;
495 u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; 487 u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
496 size_t scan_ssid_len; 488 size_t scan_ssid_len;
497 struct list_head sta_bss_list; 489 struct list_head sta_bss_list;
@@ -560,14 +552,8 @@ struct ieee80211_local {
560 int wifi_wme_noack_test; 552 int wifi_wme_noack_test;
561 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 553 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
562 554
563 unsigned int enabled_modes; /* bitfield of allowed modes;
564 * (1 << MODE_*) */
565 unsigned int hw_modes; /* bitfield of supported hardware modes;
566 * (1 << MODE_*) */
567
568#ifdef CONFIG_MAC80211_DEBUGFS 555#ifdef CONFIG_MAC80211_DEBUGFS
569 struct local_debugfsdentries { 556 struct local_debugfsdentries {
570 struct dentry *channel;
571 struct dentry *frequency; 557 struct dentry *frequency;
572 struct dentry *antenna_sel_tx; 558 struct dentry *antenna_sel_tx;
573 struct dentry *antenna_sel_rx; 559 struct dentry *antenna_sel_rx;
@@ -577,9 +563,7 @@ struct ieee80211_local {
577 struct dentry *short_retry_limit; 563 struct dentry *short_retry_limit;
578 struct dentry *long_retry_limit; 564 struct dentry *long_retry_limit;
579 struct dentry *total_ps_buffered; 565 struct dentry *total_ps_buffered;
580 struct dentry *mode;
581 struct dentry *wep_iv; 566 struct dentry *wep_iv;
582 struct dentry *modes;
583 struct dentry *statistics; 567 struct dentry *statistics;
584 struct local_debugfsdentries_statsdentries { 568 struct local_debugfsdentries_statsdentries {
585 struct dentry *transmitted_fragment_count; 569 struct dentry *transmitted_fragment_count;
@@ -627,6 +611,12 @@ struct ieee80211_local {
627#endif 611#endif
628}; 612};
629 613
614/* this struct represents 802.11n's RA/TID combination */
615struct ieee80211_ra_tid {
616 u8 ra[ETH_ALEN];
617 u16 tid;
618};
619
630static inline struct ieee80211_local *hw_to_local( 620static inline struct ieee80211_local *hw_to_local(
631 struct ieee80211_hw *hw) 621 struct ieee80211_hw *hw)
632{ 622{
@@ -650,57 +640,6 @@ struct sta_attribute {
650 ssize_t (*store)(struct sta_info *, const char *buf, size_t count); 640 ssize_t (*store)(struct sta_info *, const char *buf, size_t count);
651}; 641};
652 642
653static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
654{
655 /*
656 * This format has been mandated by the IEEE specifications,
657 * so this line may not be changed to use the __set_bit() format.
658 */
659 bss->tim[aid / 8] |= (1 << (aid % 8));
660}
661
662static inline void bss_tim_set(struct ieee80211_local *local,
663 struct ieee80211_if_ap *bss, u16 aid)
664{
665 read_lock_bh(&local->sta_lock);
666 __bss_tim_set(bss, aid);
667 read_unlock_bh(&local->sta_lock);
668}
669
670static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
671{
672 /*
673 * This format has been mandated by the IEEE specifications,
674 * so this line may not be changed to use the __clear_bit() format.
675 */
676 bss->tim[aid / 8] &= ~(1 << (aid % 8));
677}
678
679static inline void bss_tim_clear(struct ieee80211_local *local,
680 struct ieee80211_if_ap *bss, u16 aid)
681{
682 read_lock_bh(&local->sta_lock);
683 __bss_tim_clear(bss, aid);
684 read_unlock_bh(&local->sta_lock);
685}
686
687/**
688 * ieee80211_is_erp_rate - Check if a rate is an ERP rate
689 * @phymode: The PHY-mode for this rate (MODE_IEEE80211...)
690 * @rate: Transmission rate to check, in 100 kbps
691 *
692 * Check if a given rate is an Extended Rate PHY (ERP) rate.
693 */
694static inline int ieee80211_is_erp_rate(int phymode, int rate)
695{
696 if (phymode == MODE_IEEE80211G) {
697 if (rate != 10 && rate != 20 &&
698 rate != 55 && rate != 110)
699 return 1;
700 }
701 return 0;
702}
703
704static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) 643static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
705{ 644{
706 return compare_ether_addr(raddr, addr) == 0 || 645 return compare_ether_addr(raddr, addr) == 0 ||
@@ -712,13 +651,9 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
712int ieee80211_hw_config(struct ieee80211_local *local); 651int ieee80211_hw_config(struct ieee80211_local *local);
713int ieee80211_if_config(struct net_device *dev); 652int ieee80211_if_config(struct net_device *dev);
714int ieee80211_if_config_beacon(struct net_device *dev); 653int ieee80211_if_config_beacon(struct net_device *dev);
715void ieee80211_prepare_rates(struct ieee80211_local *local,
716 struct ieee80211_hw_mode *mode);
717void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx); 654void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx);
718int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr); 655int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr);
719void ieee80211_if_setup(struct net_device *dev); 656void ieee80211_if_setup(struct net_device *dev);
720struct ieee80211_rate *ieee80211_get_rate(struct ieee80211_local *local,
721 int phymode, int hwrate);
722int ieee80211_hw_config_ht(struct ieee80211_local *local, int enable_ht, 657int ieee80211_hw_config_ht(struct ieee80211_local *local, int enable_ht,
723 struct ieee80211_ht_info *req_ht_cap, 658 struct ieee80211_ht_info *req_ht_cap,
724 struct ieee80211_ht_bss_info *req_bss_cap); 659 struct ieee80211_ht_bss_info *req_bss_cap);
@@ -749,7 +684,7 @@ extern const struct iw_handler_def ieee80211_iw_handler_def;
749/* ieee80211_ioctl.c */ 684/* ieee80211_ioctl.c */
750int ieee80211_set_compression(struct ieee80211_local *local, 685int ieee80211_set_compression(struct ieee80211_local *local,
751 struct net_device *dev, struct sta_info *sta); 686 struct net_device *dev, struct sta_info *sta);
752int ieee80211_set_channel(struct ieee80211_local *local, int channel, int freq); 687int ieee80211_set_freq(struct ieee80211_local *local, int freq);
753/* ieee80211_sta.c */ 688/* ieee80211_sta.c */
754void ieee80211_sta_timer(unsigned long data); 689void ieee80211_sta_timer(unsigned long data);
755void ieee80211_sta_work(struct work_struct *work); 690void ieee80211_sta_work(struct work_struct *work);
@@ -763,9 +698,9 @@ int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len);
763void ieee80211_sta_req_auth(struct net_device *dev, 698void ieee80211_sta_req_auth(struct net_device *dev,
764 struct ieee80211_if_sta *ifsta); 699 struct ieee80211_if_sta *ifsta);
765int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len); 700int ieee80211_sta_scan_results(struct net_device *dev, char *buf, size_t len);
766ieee80211_txrx_result ieee80211_sta_rx_scan(struct net_device *dev, 701ieee80211_rx_result ieee80211_sta_rx_scan(
767 struct sk_buff *skb, 702 struct net_device *dev, struct sk_buff *skb,
768 struct ieee80211_rx_status *rx_status); 703 struct ieee80211_rx_status *rx_status);
769void ieee80211_rx_bss_list_init(struct net_device *dev); 704void ieee80211_rx_bss_list_init(struct net_device *dev);
770void ieee80211_rx_bss_list_deinit(struct net_device *dev); 705void ieee80211_rx_bss_list_deinit(struct net_device *dev);
771int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); 706int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len);
@@ -782,9 +717,15 @@ int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
782int ieee80211_ht_addt_info_ie_to_ht_bss_info( 717int ieee80211_ht_addt_info_ie_to_ht_bss_info(
783 struct ieee80211_ht_addt_info *ht_add_info_ie, 718 struct ieee80211_ht_addt_info *ht_add_info_ie,
784 struct ieee80211_ht_bss_info *bss_info); 719 struct ieee80211_ht_bss_info *bss_info);
720void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
721 u16 tid, u8 dialog_token, u16 start_seq_num,
722 u16 agg_size, u16 timeout);
723void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
724 u16 initiator, u16 reason_code);
785void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, 725void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da,
786 u16 tid, u16 initiator, u16 reason); 726 u16 tid, u16 initiator, u16 reason);
787void sta_rx_agg_session_timer_expired(unsigned long data); 727void sta_rx_agg_session_timer_expired(unsigned long data);
728void sta_addba_resp_timer_expired(unsigned long data);
788/* ieee80211_iface.c */ 729/* ieee80211_iface.c */
789int ieee80211_if_add(struct net_device *dev, const char *name, 730int ieee80211_if_add(struct net_device *dev, const char *name,
790 struct net_device **new_dev, int type); 731 struct net_device **new_dev, int type);
@@ -796,16 +737,7 @@ int ieee80211_if_remove(struct net_device *dev, const char *name, int id);
796void ieee80211_if_free(struct net_device *dev); 737void ieee80211_if_free(struct net_device *dev);
797void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata); 738void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata);
798 739
799/* regdomain.c */
800void ieee80211_regdomain_init(void);
801void ieee80211_set_default_regdomain(struct ieee80211_hw_mode *mode);
802
803/* rx handling */
804extern ieee80211_rx_handler ieee80211_rx_pre_handlers[];
805extern ieee80211_rx_handler ieee80211_rx_handlers[];
806
807/* tx handling */ 740/* tx handling */
808extern ieee80211_tx_handler ieee80211_tx_handlers[];
809void ieee80211_clear_tx_pending(struct ieee80211_local *local); 741void ieee80211_clear_tx_pending(struct ieee80211_local *local);
810void ieee80211_tx_pending(unsigned long data); 742void ieee80211_tx_pending(unsigned long data);
811int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev); 743int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/net/mac80211/ieee80211_iface.c b/net/mac80211/ieee80211_iface.c
index 92f1eb2da311..0d6824bca92b 100644
--- a/net/mac80211/ieee80211_iface.c
+++ b/net/mac80211/ieee80211_iface.c
@@ -118,6 +118,8 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
118 sdata->bss = NULL; 118 sdata->bss = NULL;
119 sdata->vif.type = type; 119 sdata->vif.type = type;
120 120
121 sdata->basic_rates = 0;
122
121 switch (type) { 123 switch (type) {
122 case IEEE80211_IF_TYPE_WDS: 124 case IEEE80211_IF_TYPE_WDS:
123 /* nothing special */ 125 /* nothing special */
@@ -158,6 +160,8 @@ void ieee80211_if_set_type(struct net_device *dev, int type)
158 case IEEE80211_IF_TYPE_MNTR: 160 case IEEE80211_IF_TYPE_MNTR:
159 dev->type = ARPHRD_IEEE80211_RADIOTAP; 161 dev->type = ARPHRD_IEEE80211_RADIOTAP;
160 dev->hard_start_xmit = ieee80211_monitor_start_xmit; 162 dev->hard_start_xmit = ieee80211_monitor_start_xmit;
163 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
164 MONITOR_FLAG_OTHER_BSS;
161 break; 165 break;
162 default: 166 default:
163 printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x", 167 printk(KERN_WARNING "%s: %s: Unknown interface type 0x%x",
@@ -189,6 +193,7 @@ void ieee80211_if_reinit(struct net_device *dev)
189 /* Remove all virtual interfaces that use this BSS 193 /* Remove all virtual interfaces that use this BSS
190 * as their sdata->bss */ 194 * as their sdata->bss */
191 struct ieee80211_sub_if_data *tsdata, *n; 195 struct ieee80211_sub_if_data *tsdata, *n;
196 struct beacon_data *beacon;
192 197
193 list_for_each_entry_safe(tsdata, n, &local->interfaces, list) { 198 list_for_each_entry_safe(tsdata, n, &local->interfaces, list) {
194 if (tsdata != sdata && tsdata->bss == &sdata->u.ap) { 199 if (tsdata != sdata && tsdata->bss == &sdata->u.ap) {
@@ -206,7 +211,10 @@ void ieee80211_if_reinit(struct net_device *dev)
206 } 211 }
207 } 212 }
208 213
209 kfree(sdata->u.ap.beacon); 214 beacon = sdata->u.ap.beacon;
215 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
216 synchronize_rcu();
217 kfree(beacon);
210 218
211 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { 219 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
212 local->total_ps_buffered--; 220 local->total_ps_buffered--;
diff --git a/net/mac80211/ieee80211_ioctl.c b/net/mac80211/ieee80211_ioctl.c
index 5024d3733834..7551db3f3abc 100644
--- a/net/mac80211/ieee80211_ioctl.c
+++ b/net/mac80211/ieee80211_ioctl.c
@@ -33,8 +33,8 @@ static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr,
33 size_t key_len) 33 size_t key_len)
34{ 34{
35 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 35 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
36 int ret = 0; 36 int ret;
37 struct sta_info *sta; 37 struct sta_info *sta = NULL;
38 struct ieee80211_key *key; 38 struct ieee80211_key *key;
39 struct ieee80211_sub_if_data *sdata; 39 struct ieee80211_sub_if_data *sdata;
40 40
@@ -46,58 +46,64 @@ static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr,
46 return -EINVAL; 46 return -EINVAL;
47 } 47 }
48 48
49 if (is_broadcast_ether_addr(sta_addr)) { 49 if (remove) {
50 sta = NULL; 50 if (is_broadcast_ether_addr(sta_addr)) {
51 key = sdata->keys[idx]; 51 key = sdata->keys[idx];
52 } else { 52 } else {
53 set_tx_key = 0; 53 sta = sta_info_get(local, sta_addr);
54 /* 54 if (!sta) {
55 * According to the standard, the key index of a pairwise 55 ret = -ENOENT;
56 * key must be zero. However, some AP are broken when it 56 key = NULL;
57 * comes to WEP key indices, so we work around this. 57 goto err_out;
58 */ 58 }
59 if (idx != 0 && alg != ALG_WEP) { 59
60 printk(KERN_DEBUG "%s: set_encrypt - non-zero idx for " 60 key = sta->key;
61 "individual key\n", dev->name);
62 return -EINVAL;
63 } 61 }
64 62
65 sta = sta_info_get(local, sta_addr); 63 if (!key)
66 if (!sta) { 64 ret = -ENOENT;
67#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 65 else
68 DECLARE_MAC_BUF(mac); 66 ret = 0;
69 printk(KERN_DEBUG "%s: set_encrypt - unknown addr " 67 } else {
70 "%s\n", 68 key = ieee80211_key_alloc(alg, idx, key_len, _key);
71 dev->name, print_mac(mac, sta_addr)); 69 if (!key)
72#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 70 return -ENOMEM;
73 71
74 return -ENOENT; 72 if (!is_broadcast_ether_addr(sta_addr)) {
73 set_tx_key = 0;
74 /*
75 * According to the standard, the key index of a
76 * pairwise key must be zero. However, some AP are
77 * broken when it comes to WEP key indices, so we
78 * work around this.
79 */
80 if (idx != 0 && alg != ALG_WEP) {
81 ret = -EINVAL;
82 goto err_out;
83 }
84
85 sta = sta_info_get(local, sta_addr);
86 if (!sta) {
87 ret = -ENOENT;
88 goto err_out;
89 }
75 } 90 }
76 91
77 key = sta->key; 92 ieee80211_key_link(key, sdata, sta);
78 }
79 93
80 if (remove) { 94 if (set_tx_key || (!sta && !sdata->default_key && key))
81 ieee80211_key_free(key); 95 ieee80211_set_default_key(sdata, idx);
96
97 /* don't free key later */
82 key = NULL; 98 key = NULL;
83 } else {
84 /*
85 * Automatically frees any old key if present.
86 */
87 key = ieee80211_key_alloc(sdata, sta, alg, idx, key_len, _key);
88 if (!key) {
89 ret = -ENOMEM;
90 goto err_out;
91 }
92 }
93 99
94 if (set_tx_key || (!sta && !sdata->default_key && key)) 100 ret = 0;
95 ieee80211_set_default_key(sdata, idx); 101 }
96 102
97 ret = 0;
98 err_out: 103 err_out:
99 if (sta) 104 if (sta)
100 sta_info_put(sta); 105 sta_info_put(sta);
106 ieee80211_key_free(key);
101 return ret; 107 return ret;
102} 108}
103 109
@@ -129,22 +135,7 @@ static int ieee80211_ioctl_giwname(struct net_device *dev,
129 struct iw_request_info *info, 135 struct iw_request_info *info,
130 char *name, char *extra) 136 char *name, char *extra)
131{ 137{
132 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 138 strcpy(name, "IEEE 802.11");
133
134 switch (local->hw.conf.phymode) {
135 case MODE_IEEE80211A:
136 strcpy(name, "IEEE 802.11a");
137 break;
138 case MODE_IEEE80211B:
139 strcpy(name, "IEEE 802.11b");
140 break;
141 case MODE_IEEE80211G:
142 strcpy(name, "IEEE 802.11g");
143 break;
144 default:
145 strcpy(name, "IEEE 802.11");
146 break;
147 }
148 139
149 return 0; 140 return 0;
150} 141}
@@ -156,7 +147,7 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
156{ 147{
157 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 148 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
158 struct iw_range *range = (struct iw_range *) extra; 149 struct iw_range *range = (struct iw_range *) extra;
159 struct ieee80211_hw_mode *mode = NULL; 150 enum ieee80211_band band;
160 int c = 0; 151 int c = 0;
161 152
162 data->length = sizeof(struct iw_range); 153 data->length = sizeof(struct iw_range);
@@ -191,24 +182,27 @@ static int ieee80211_ioctl_giwrange(struct net_device *dev,
191 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 182 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
192 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 183 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
193 184
194 list_for_each_entry(mode, &local->modes_list, list) {
195 int i = 0;
196 185
197 if (!(local->enabled_modes & (1 << mode->mode)) || 186 for (band = 0; band < IEEE80211_NUM_BANDS; band ++) {
198 (local->hw_modes & local->enabled_modes & 187 int i;
199 (1 << MODE_IEEE80211G) && mode->mode == MODE_IEEE80211B)) 188 struct ieee80211_supported_band *sband;
189
190 sband = local->hw.wiphy->bands[band];
191
192 if (!sband)
200 continue; 193 continue;
201 194
202 while (i < mode->num_channels && c < IW_MAX_FREQUENCIES) { 195 for (i = 0; i < sband->n_channels && c < IW_MAX_FREQUENCIES; i++) {
203 struct ieee80211_channel *chan = &mode->channels[i]; 196 struct ieee80211_channel *chan = &sband->channels[i];
204 197
205 if (chan->flag & IEEE80211_CHAN_W_SCAN) { 198 if (!(chan->flags & IEEE80211_CHAN_DISABLED)) {
206 range->freq[c].i = chan->chan; 199 range->freq[c].i =
207 range->freq[c].m = chan->freq * 100000; 200 ieee80211_frequency_to_channel(
208 range->freq[c].e = 1; 201 chan->center_freq);
202 range->freq[c].m = chan->center_freq;
203 range->freq[c].e = 6;
209 c++; 204 c++;
210 } 205 }
211 i++;
212 } 206 }
213 } 207 }
214 range->num_channels = c; 208 range->num_channels = c;
@@ -294,22 +288,29 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev,
294 return 0; 288 return 0;
295} 289}
296 290
297int ieee80211_set_channel(struct ieee80211_local *local, int channel, int freq) 291int ieee80211_set_freq(struct ieee80211_local *local, int freqMHz)
298{ 292{
299 struct ieee80211_hw_mode *mode; 293 int set = 0;
300 int c, set = 0;
301 int ret = -EINVAL; 294 int ret = -EINVAL;
295 enum ieee80211_band band;
296 struct ieee80211_supported_band *sband;
297 int i;
298
299 for (band = 0; band < IEEE80211_NUM_BANDS; band ++) {
300 sband = local->hw.wiphy->bands[band];
302 301
303 list_for_each_entry(mode, &local->modes_list, list) { 302 if (!sband)
304 if (!(local->enabled_modes & (1 << mode->mode)))
305 continue; 303 continue;
306 for (c = 0; c < mode->num_channels; c++) { 304
307 struct ieee80211_channel *chan = &mode->channels[c]; 305 for (i = 0; i < sband->n_channels; i++) {
308 if (chan->flag & IEEE80211_CHAN_W_SCAN && 306 struct ieee80211_channel *chan = &sband->channels[i];
309 ((chan->chan == channel) || (chan->freq == freq))) { 307
310 local->oper_channel = chan; 308 if (chan->flags & IEEE80211_CHAN_DISABLED)
311 local->oper_hw_mode = mode; 309 continue;
310
311 if (chan->center_freq == freqMHz) {
312 set = 1; 312 set = 1;
313 local->oper_channel = chan;
313 break; 314 break;
314 } 315 }
315 } 316 }
@@ -347,13 +348,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev,
347 IEEE80211_STA_AUTO_CHANNEL_SEL; 348 IEEE80211_STA_AUTO_CHANNEL_SEL;
348 return 0; 349 return 0;
349 } else 350 } else
350 return ieee80211_set_channel(local, freq->m, -1); 351 return ieee80211_set_freq(local,
352 ieee80211_channel_to_frequency(freq->m));
351 } else { 353 } else {
352 int i, div = 1000000; 354 int i, div = 1000000;
353 for (i = 0; i < freq->e; i++) 355 for (i = 0; i < freq->e; i++)
354 div /= 10; 356 div /= 10;
355 if (div > 0) 357 if (div > 0)
356 return ieee80211_set_channel(local, -1, freq->m / div); 358 return ieee80211_set_freq(local, freq->m / div);
357 else 359 else
358 return -EINVAL; 360 return -EINVAL;
359 } 361 }
@@ -366,10 +368,7 @@ static int ieee80211_ioctl_giwfreq(struct net_device *dev,
366{ 368{
367 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 369 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
368 370
369 /* TODO: in station mode (Managed/Ad-hoc) might need to poll low-level 371 freq->m = local->hw.conf.channel->center_freq;
370 * driver for the current channel with firmware-based management */
371
372 freq->m = local->hw.conf.freq;
373 freq->e = 6; 372 freq->e = 6;
374 373
375 return 0; 374 return 0;
@@ -566,15 +565,17 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev,
566 struct iw_param *rate, char *extra) 565 struct iw_param *rate, char *extra)
567{ 566{
568 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 567 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
569 struct ieee80211_hw_mode *mode; 568 int i, err = -EINVAL;
570 int i;
571 u32 target_rate = rate->value / 100000; 569 u32 target_rate = rate->value / 100000;
572 struct ieee80211_sub_if_data *sdata; 570 struct ieee80211_sub_if_data *sdata;
571 struct ieee80211_supported_band *sband;
573 572
574 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 573 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
575 if (!sdata->bss) 574 if (!sdata->bss)
576 return -ENODEV; 575 return -ENODEV;
577 mode = local->oper_hw_mode; 576
577 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
578
578 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates 579 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
579 * target_rate = X, rate->fixed = 1 means only rate X 580 * target_rate = X, rate->fixed = 1 means only rate X
580 * target_rate = X, rate->fixed = 0 means all rates <= X */ 581 * target_rate = X, rate->fixed = 0 means all rates <= X */
@@ -582,18 +583,20 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev,
582 sdata->bss->force_unicast_rateidx = -1; 583 sdata->bss->force_unicast_rateidx = -1;
583 if (rate->value < 0) 584 if (rate->value < 0)
584 return 0; 585 return 0;
585 for (i=0; i < mode->num_rates; i++) { 586
586 struct ieee80211_rate *rates = &mode->rates[i]; 587 for (i=0; i< sband->n_bitrates; i++) {
587 int this_rate = rates->rate; 588 struct ieee80211_rate *brate = &sband->bitrates[i];
589 int this_rate = brate->bitrate;
588 590
589 if (target_rate == this_rate) { 591 if (target_rate == this_rate) {
590 sdata->bss->max_ratectrl_rateidx = i; 592 sdata->bss->max_ratectrl_rateidx = i;
591 if (rate->fixed) 593 if (rate->fixed)
592 sdata->bss->force_unicast_rateidx = i; 594 sdata->bss->force_unicast_rateidx = i;
593 return 0; 595 err = 0;
596 break;
594 } 597 }
595 } 598 }
596 return -EINVAL; 599 return err;
597} 600}
598 601
599static int ieee80211_ioctl_giwrate(struct net_device *dev, 602static int ieee80211_ioctl_giwrate(struct net_device *dev,
@@ -603,18 +606,24 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev,
603 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 606 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
604 struct sta_info *sta; 607 struct sta_info *sta;
605 struct ieee80211_sub_if_data *sdata; 608 struct ieee80211_sub_if_data *sdata;
609 struct ieee80211_supported_band *sband;
606 610
607 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 611 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
612
608 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) 613 if (sdata->vif.type == IEEE80211_IF_TYPE_STA)
609 sta = sta_info_get(local, sdata->u.sta.bssid); 614 sta = sta_info_get(local, sdata->u.sta.bssid);
610 else 615 else
611 return -EOPNOTSUPP; 616 return -EOPNOTSUPP;
612 if (!sta) 617 if (!sta)
613 return -ENODEV; 618 return -ENODEV;
614 if (sta->txrate < local->oper_hw_mode->num_rates) 619
615 rate->value = local->oper_hw_mode->rates[sta->txrate].rate * 100000; 620 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
621
622 if (sta->txrate_idx < sband->n_bitrates)
623 rate->value = sband->bitrates[sta->txrate_idx].bitrate;
616 else 624 else
617 rate->value = 0; 625 rate->value = 0;
626 rate->value *= 100000;
618 sta_info_put(sta); 627 sta_info_put(sta);
619 return 0; 628 return 0;
620} 629}
@@ -625,7 +634,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
625{ 634{
626 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 635 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
627 bool need_reconfig = 0; 636 bool need_reconfig = 0;
628 u8 new_power_level; 637 int new_power_level;
629 638
630 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) 639 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
631 return -EINVAL; 640 return -EINVAL;
@@ -635,13 +644,15 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
635 if (data->txpower.fixed) { 644 if (data->txpower.fixed) {
636 new_power_level = data->txpower.value; 645 new_power_level = data->txpower.value;
637 } else { 646 } else {
638 /* Automatic power level. Get the px power from the current 647 /*
639 * channel. */ 648 * Automatic power level. Use maximum power for the current
640 struct ieee80211_channel* chan = local->oper_channel; 649 * channel. Should be part of rate control.
650 */
651 struct ieee80211_channel* chan = local->hw.conf.channel;
641 if (!chan) 652 if (!chan)
642 return -EINVAL; 653 return -EINVAL;
643 654
644 new_power_level = chan->power_level; 655 new_power_level = chan->max_power;
645 } 656 }
646 657
647 if (local->hw.conf.power_level != new_power_level) { 658 if (local->hw.conf.power_level != new_power_level) {
diff --git a/net/mac80211/ieee80211_key.h b/net/mac80211/ieee80211_key.h
index fc770e98d47b..d670e6dbfa39 100644
--- a/net/mac80211/ieee80211_key.h
+++ b/net/mac80211/ieee80211_key.h
@@ -13,6 +13,7 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/rcupdate.h>
16#include <net/mac80211.h> 17#include <net/mac80211.h>
17 18
18/* ALG_TKIP 19/* ALG_TKIP
@@ -45,7 +46,19 @@ struct ieee80211_local;
45struct ieee80211_sub_if_data; 46struct ieee80211_sub_if_data;
46struct sta_info; 47struct sta_info;
47 48
48#define KEY_FLAG_UPLOADED_TO_HARDWARE (1<<0) 49/**
50 * enum ieee80211_internal_key_flags - internal key flags
51 *
52 * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present
53 * in the hardware for TX crypto hardware acceleration.
54 * @KEY_FLAG_REMOVE_FROM_HARDWARE: Indicates to the key code that this
55 * key is present in the hardware (but it cannot be used for
56 * hardware acceleration any more!)
57 */
58enum ieee80211_internal_key_flags {
59 KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0),
60 KEY_FLAG_REMOVE_FROM_HARDWARE = BIT(1),
61};
49 62
50struct ieee80211_key { 63struct ieee80211_key {
51 struct ieee80211_local *local; 64 struct ieee80211_local *local;
@@ -112,12 +125,17 @@ struct ieee80211_key {
112 struct ieee80211_key_conf conf; 125 struct ieee80211_key_conf conf;
113}; 126};
114 127
115struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, 128struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
116 struct sta_info *sta,
117 enum ieee80211_key_alg alg,
118 int idx, 129 int idx,
119 size_t key_len, 130 size_t key_len,
120 const u8 *key_data); 131 const u8 *key_data);
132/*
133 * Insert a key into data structures (sdata, sta if necessary)
134 * to make it used, free old key.
135 */
136void ieee80211_key_link(struct ieee80211_key *key,
137 struct ieee80211_sub_if_data *sdata,
138 struct sta_info *sta);
121void ieee80211_key_free(struct ieee80211_key *key); 139void ieee80211_key_free(struct ieee80211_key *key);
122void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); 140void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx);
123void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); 141void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/ieee80211_rate.c b/net/mac80211/ieee80211_rate.c
index b957e67c5fba..ebe29b716b27 100644
--- a/net/mac80211/ieee80211_rate.c
+++ b/net/mac80211/ieee80211_rate.c
@@ -163,7 +163,8 @@ static void rate_control_release(struct kref *kref)
163} 163}
164 164
165void rate_control_get_rate(struct net_device *dev, 165void rate_control_get_rate(struct net_device *dev,
166 struct ieee80211_hw_mode *mode, struct sk_buff *skb, 166 struct ieee80211_supported_band *sband,
167 struct sk_buff *skb,
167 struct rate_selection *sel) 168 struct rate_selection *sel)
168{ 169{
169 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 170 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -174,17 +175,17 @@ void rate_control_get_rate(struct net_device *dev,
174 175
175 memset(sel, 0, sizeof(struct rate_selection)); 176 memset(sel, 0, sizeof(struct rate_selection));
176 177
177 ref->ops->get_rate(ref->priv, dev, mode, skb, sel); 178 ref->ops->get_rate(ref->priv, dev, sband, skb, sel);
178 179
179 /* Select a non-ERP backup rate. */ 180 /* Select a non-ERP backup rate. */
180 if (!sel->nonerp) { 181 if (!sel->nonerp) {
181 for (i = 0; i < mode->num_rates - 1; i++) { 182 for (i = 0; i < sband->n_bitrates; i++) {
182 struct ieee80211_rate *rate = &mode->rates[i]; 183 struct ieee80211_rate *rate = &sband->bitrates[i];
183 if (sel->rate->rate < rate->rate) 184 if (sel->rate->bitrate < rate->bitrate)
184 break; 185 break;
185 186
186 if (rate_supported(sta, mode, i) && 187 if (rate_supported(sta, sband->band, i) &&
187 !(rate->flags & IEEE80211_RATE_ERP)) 188 !(rate->flags & IEEE80211_RATE_ERP_G))
188 sel->nonerp = rate; 189 sel->nonerp = rate;
189 } 190 }
190 } 191 }
diff --git a/net/mac80211/ieee80211_rate.h b/net/mac80211/ieee80211_rate.h
index 73f19e8aa51c..5f9a2ca49a57 100644
--- a/net/mac80211/ieee80211_rate.h
+++ b/net/mac80211/ieee80211_rate.h
@@ -18,6 +18,7 @@
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "sta_info.h" 19#include "sta_info.h"
20 20
21/* TODO: kdoc */
21struct rate_selection { 22struct rate_selection {
22 /* Selected transmission rate */ 23 /* Selected transmission rate */
23 struct ieee80211_rate *rate; 24 struct ieee80211_rate *rate;
@@ -34,7 +35,8 @@ struct rate_control_ops {
34 struct sk_buff *skb, 35 struct sk_buff *skb,
35 struct ieee80211_tx_status *status); 36 struct ieee80211_tx_status *status);
36 void (*get_rate)(void *priv, struct net_device *dev, 37 void (*get_rate)(void *priv, struct net_device *dev,
37 struct ieee80211_hw_mode *mode, struct sk_buff *skb, 38 struct ieee80211_supported_band *band,
39 struct sk_buff *skb,
38 struct rate_selection *sel); 40 struct rate_selection *sel);
39 void (*rate_init)(void *priv, void *priv_sta, 41 void (*rate_init)(void *priv, void *priv_sta,
40 struct ieee80211_local *local, struct sta_info *sta); 42 struct ieee80211_local *local, struct sta_info *sta);
@@ -66,7 +68,8 @@ void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
66struct rate_control_ref *rate_control_alloc(const char *name, 68struct rate_control_ref *rate_control_alloc(const char *name,
67 struct ieee80211_local *local); 69 struct ieee80211_local *local);
68void rate_control_get_rate(struct net_device *dev, 70void rate_control_get_rate(struct net_device *dev,
69 struct ieee80211_hw_mode *mode, struct sk_buff *skb, 71 struct ieee80211_supported_band *sband,
72 struct sk_buff *skb,
70 struct rate_selection *sel); 73 struct rate_selection *sel);
71struct rate_control_ref *rate_control_get(struct rate_control_ref *ref); 74struct rate_control_ref *rate_control_get(struct rate_control_ref *ref);
72void rate_control_put(struct rate_control_ref *ref); 75void rate_control_put(struct rate_control_ref *ref);
@@ -127,23 +130,23 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta)
127#endif 130#endif
128} 131}
129 132
130static inline int 133static inline int rate_supported(struct sta_info *sta,
131rate_supported(struct sta_info *sta, struct ieee80211_hw_mode *mode, int index) 134 enum ieee80211_band band,
135 int index)
132{ 136{
133 return (sta == NULL || sta->supp_rates & BIT(index)) && 137 return (sta == NULL || sta->supp_rates[band] & BIT(index));
134 (mode->rates[index].flags & IEEE80211_RATE_SUPPORTED);
135} 138}
136 139
137static inline int 140static inline int
138rate_lowest_index(struct ieee80211_local *local, struct ieee80211_hw_mode *mode, 141rate_lowest_index(struct ieee80211_local *local,
142 struct ieee80211_supported_band *sband,
139 struct sta_info *sta) 143 struct sta_info *sta)
140{ 144{
141 int i; 145 int i;
142 146
143 for (i = 0; i < mode->num_rates; i++) { 147 for (i = 0; i < sband->n_bitrates; i++)
144 if (rate_supported(sta, mode, i)) 148 if (rate_supported(sta, sband->band, i))
145 return i; 149 return i;
146 }
147 150
148 /* warn when we cannot find a rate. */ 151 /* warn when we cannot find a rate. */
149 WARN_ON(1); 152 WARN_ON(1);
@@ -152,10 +155,11 @@ rate_lowest_index(struct ieee80211_local *local, struct ieee80211_hw_mode *mode,
152} 155}
153 156
154static inline struct ieee80211_rate * 157static inline struct ieee80211_rate *
155rate_lowest(struct ieee80211_local *local, struct ieee80211_hw_mode *mode, 158rate_lowest(struct ieee80211_local *local,
159 struct ieee80211_supported_band *sband,
156 struct sta_info *sta) 160 struct sta_info *sta)
157{ 161{
158 return &mode->rates[rate_lowest_index(local, mode, sta)]; 162 return &sband->bitrates[rate_lowest_index(local, sband, sta)];
159} 163}
160 164
161 165
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index 9aeed5320228..ddb5832f37cb 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -74,7 +74,7 @@
74static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, 74static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
75 u8 *ssid, size_t ssid_len); 75 u8 *ssid, size_t ssid_len);
76static struct ieee80211_sta_bss * 76static struct ieee80211_sta_bss *
77ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel, 77ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
78 u8 *ssid, u8 ssid_len); 78 u8 *ssid, u8 ssid_len);
79static void ieee80211_rx_bss_put(struct net_device *dev, 79static void ieee80211_rx_bss_put(struct net_device *dev,
80 struct ieee80211_sta_bss *bss); 80 struct ieee80211_sta_bss *bss);
@@ -227,12 +227,7 @@ static void ieee802_11_parse_elems(u8 *start, size_t len,
227 227
228static int ecw2cw(int ecw) 228static int ecw2cw(int ecw)
229{ 229{
230 int cw = 1; 230 return (1 << ecw) - 1;
231 while (ecw > 0) {
232 cw <<= 1;
233 ecw--;
234 }
235 return cw - 1;
236} 231}
237 232
238static void ieee80211_sta_wmm_params(struct net_device *dev, 233static void ieee80211_sta_wmm_params(struct net_device *dev,
@@ -297,12 +292,13 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
297 params.aifs = pos[0] & 0x0f; 292 params.aifs = pos[0] & 0x0f;
298 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); 293 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
299 params.cw_min = ecw2cw(pos[1] & 0x0f); 294 params.cw_min = ecw2cw(pos[1] & 0x0f);
300 /* TXOP is in units of 32 usec; burst_time in 0.1 ms */ 295 params.txop = pos[2] | (pos[3] << 8);
301 params.burst_time = (pos[2] | (pos[3] << 8)) * 32 / 100; 296#ifdef CONFIG_MAC80211_DEBUG
302 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 297 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
303 "cWmin=%d cWmax=%d burst=%d\n", 298 "cWmin=%d cWmax=%d txop=%d\n",
304 dev->name, queue, aci, acm, params.aifs, params.cw_min, 299 dev->name, queue, aci, acm, params.aifs, params.cw_min,
305 params.cw_max, params.burst_time); 300 params.cw_max, params.txop);
301#endif
306 /* TODO: handle ACM (block TX, fallback to next lowest allowed 302 /* TODO: handle ACM (block TX, fallback to next lowest allowed
307 * AC for now) */ 303 * AC for now) */
308 if (local->ops->conf_tx(local_to_hw(local), queue, &params)) { 304 if (local->ops->conf_tx(local_to_hw(local), queue, &params)) {
@@ -466,7 +462,7 @@ static void ieee80211_set_associated(struct net_device *dev,
466 return; 462 return;
467 463
468 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 464 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
469 local->hw.conf.channel, 465 local->hw.conf.channel->center_freq,
470 ifsta->ssid, ifsta->ssid_len); 466 ifsta->ssid, ifsta->ssid_len);
471 if (bss) { 467 if (bss) {
472 if (bss->has_erp_value) 468 if (bss->has_erp_value)
@@ -492,6 +488,7 @@ static void ieee80211_set_associated(struct net_device *dev,
492 ifsta->last_probe = jiffies; 488 ifsta->last_probe = jiffies;
493 ieee80211_led_assoc(local, assoc); 489 ieee80211_led_assoc(local, assoc);
494 490
491 sdata->bss_conf.assoc = assoc;
495 ieee80211_bss_info_change_notify(sdata, changed); 492 ieee80211_bss_info_change_notify(sdata, changed);
496} 493}
497 494
@@ -592,7 +589,6 @@ static void ieee80211_send_assoc(struct net_device *dev,
592 struct ieee80211_if_sta *ifsta) 589 struct ieee80211_if_sta *ifsta)
593{ 590{
594 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 591 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
595 struct ieee80211_hw_mode *mode;
596 struct sk_buff *skb; 592 struct sk_buff *skb;
597 struct ieee80211_mgmt *mgmt; 593 struct ieee80211_mgmt *mgmt;
598 u8 *pos, *ies; 594 u8 *pos, *ies;
@@ -600,6 +596,7 @@ static void ieee80211_send_assoc(struct net_device *dev,
600 u16 capab; 596 u16 capab;
601 struct ieee80211_sta_bss *bss; 597 struct ieee80211_sta_bss *bss;
602 int wmm = 0; 598 int wmm = 0;
599 struct ieee80211_supported_band *sband;
603 600
604 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 601 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
605 sizeof(*mgmt) + 200 + ifsta->extra_ie_len + 602 sizeof(*mgmt) + 200 + ifsta->extra_ie_len +
@@ -611,13 +608,19 @@ static void ieee80211_send_assoc(struct net_device *dev,
611 } 608 }
612 skb_reserve(skb, local->hw.extra_tx_headroom); 609 skb_reserve(skb, local->hw.extra_tx_headroom);
613 610
614 mode = local->oper_hw_mode; 611 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
612
615 capab = ifsta->capab; 613 capab = ifsta->capab;
616 if (mode->mode == MODE_IEEE80211G) { 614
617 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME | 615 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) {
618 WLAN_CAPABILITY_SHORT_PREAMBLE; 616 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
617 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
618 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
619 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
619 } 620 }
620 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, local->hw.conf.channel, 621
622 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
623 local->hw.conf.channel->center_freq,
621 ifsta->ssid, ifsta->ssid_len); 624 ifsta->ssid, ifsta->ssid_len);
622 if (bss) { 625 if (bss) {
623 if (bss->capability & WLAN_CAPABILITY_PRIVACY) 626 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
@@ -656,23 +659,23 @@ static void ieee80211_send_assoc(struct net_device *dev,
656 *pos++ = ifsta->ssid_len; 659 *pos++ = ifsta->ssid_len;
657 memcpy(pos, ifsta->ssid, ifsta->ssid_len); 660 memcpy(pos, ifsta->ssid, ifsta->ssid_len);
658 661
659 len = mode->num_rates; 662 len = sband->n_bitrates;
660 if (len > 8) 663 if (len > 8)
661 len = 8; 664 len = 8;
662 pos = skb_put(skb, len + 2); 665 pos = skb_put(skb, len + 2);
663 *pos++ = WLAN_EID_SUPP_RATES; 666 *pos++ = WLAN_EID_SUPP_RATES;
664 *pos++ = len; 667 *pos++ = len;
665 for (i = 0; i < len; i++) { 668 for (i = 0; i < len; i++) {
666 int rate = mode->rates[i].rate; 669 int rate = sband->bitrates[i].bitrate;
667 *pos++ = (u8) (rate / 5); 670 *pos++ = (u8) (rate / 5);
668 } 671 }
669 672
670 if (mode->num_rates > len) { 673 if (sband->n_bitrates > len) {
671 pos = skb_put(skb, mode->num_rates - len + 2); 674 pos = skb_put(skb, sband->n_bitrates - len + 2);
672 *pos++ = WLAN_EID_EXT_SUPP_RATES; 675 *pos++ = WLAN_EID_EXT_SUPP_RATES;
673 *pos++ = mode->num_rates - len; 676 *pos++ = sband->n_bitrates - len;
674 for (i = len; i < mode->num_rates; i++) { 677 for (i = len; i < sband->n_bitrates; i++) {
675 int rate = mode->rates[i].rate; 678 int rate = sband->bitrates[i].bitrate;
676 *pos++ = (u8) (rate / 5); 679 *pos++ = (u8) (rate / 5);
677 } 680 }
678 } 681 }
@@ -695,17 +698,18 @@ static void ieee80211_send_assoc(struct net_device *dev,
695 *pos++ = 0; 698 *pos++ = 0;
696 } 699 }
697 /* wmm support is a must to HT */ 700 /* wmm support is a must to HT */
698 if (wmm && mode->ht_info.ht_supported) { 701 if (wmm && sband->ht_info.ht_supported) {
699 __le16 tmp = cpu_to_le16(mode->ht_info.cap); 702 __le16 tmp = cpu_to_le16(sband->ht_info.cap);
700 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2); 703 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
701 *pos++ = WLAN_EID_HT_CAPABILITY; 704 *pos++ = WLAN_EID_HT_CAPABILITY;
702 *pos++ = sizeof(struct ieee80211_ht_cap); 705 *pos++ = sizeof(struct ieee80211_ht_cap);
703 memset(pos, 0, sizeof(struct ieee80211_ht_cap)); 706 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
704 memcpy(pos, &tmp, sizeof(u16)); 707 memcpy(pos, &tmp, sizeof(u16));
705 pos += sizeof(u16); 708 pos += sizeof(u16);
706 *pos++ = (mode->ht_info.ampdu_factor | 709 /* TODO: needs a define here for << 2 */
707 (mode->ht_info.ampdu_density << 2)); 710 *pos++ = sband->ht_info.ampdu_factor |
708 memcpy(pos, mode->ht_info.supp_mcs_set, 16); 711 (sband->ht_info.ampdu_density << 2);
712 memcpy(pos, sband->ht_info.supp_mcs_set, 16);
709 } 713 }
710 714
711 kfree(ifsta->assocreq_ies); 715 kfree(ifsta->assocreq_ies);
@@ -788,7 +792,8 @@ static int ieee80211_privacy_mismatch(struct net_device *dev,
788 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL)) 792 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL))
789 return 0; 793 return 0;
790 794
791 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, local->hw.conf.channel, 795 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
796 local->hw.conf.channel->center_freq,
792 ifsta->ssid, ifsta->ssid_len); 797 ifsta->ssid, ifsta->ssid_len);
793 if (!bss) 798 if (!bss)
794 return 0; 799 return 0;
@@ -898,7 +903,7 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
898 u8 *ssid, size_t ssid_len) 903 u8 *ssid, size_t ssid_len)
899{ 904{
900 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 905 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
901 struct ieee80211_hw_mode *mode; 906 struct ieee80211_supported_band *sband;
902 struct sk_buff *skb; 907 struct sk_buff *skb;
903 struct ieee80211_mgmt *mgmt; 908 struct ieee80211_mgmt *mgmt;
904 u8 *pos, *supp_rates, *esupp_rates = NULL; 909 u8 *pos, *supp_rates, *esupp_rates = NULL;
@@ -932,11 +937,10 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
932 supp_rates = skb_put(skb, 2); 937 supp_rates = skb_put(skb, 2);
933 supp_rates[0] = WLAN_EID_SUPP_RATES; 938 supp_rates[0] = WLAN_EID_SUPP_RATES;
934 supp_rates[1] = 0; 939 supp_rates[1] = 0;
935 mode = local->oper_hw_mode; 940 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
936 for (i = 0; i < mode->num_rates; i++) { 941
937 struct ieee80211_rate *rate = &mode->rates[i]; 942 for (i = 0; i < sband->n_bitrates; i++) {
938 if (!(rate->flags & IEEE80211_RATE_SUPPORTED)) 943 struct ieee80211_rate *rate = &sband->bitrates[i];
939 continue;
940 if (esupp_rates) { 944 if (esupp_rates) {
941 pos = skb_put(skb, 1); 945 pos = skb_put(skb, 1);
942 esupp_rates[1]++; 946 esupp_rates[1]++;
@@ -949,7 +953,7 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
949 pos = skb_put(skb, 1); 953 pos = skb_put(skb, 1);
950 supp_rates[1]++; 954 supp_rates[1]++;
951 } 955 }
952 *pos = rate->rate / 5; 956 *pos = rate->bitrate / 5;
953 } 957 }
954 958
955 ieee80211_sta_tx(dev, skb, 0); 959 ieee80211_sta_tx(dev, skb, 0);
@@ -1044,6 +1048,58 @@ static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid,
1044 return; 1048 return;
1045} 1049}
1046 1050
1051void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
1052 u16 tid, u8 dialog_token, u16 start_seq_num,
1053 u16 agg_size, u16 timeout)
1054{
1055 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1056 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1057 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1058 struct sk_buff *skb;
1059 struct ieee80211_mgmt *mgmt;
1060 u16 capab;
1061
1062 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 +
1063 sizeof(mgmt->u.action.u.addba_req));
1064
1065
1066 if (!skb) {
1067 printk(KERN_ERR "%s: failed to allocate buffer "
1068 "for addba request frame\n", dev->name);
1069 return;
1070 }
1071 skb_reserve(skb, local->hw.extra_tx_headroom);
1072 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
1073 memset(mgmt, 0, 24);
1074 memcpy(mgmt->da, da, ETH_ALEN);
1075 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
1076 if (sdata->vif.type == IEEE80211_IF_TYPE_AP)
1077 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN);
1078 else
1079 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1080
1081 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1082 IEEE80211_STYPE_ACTION);
1083
1084 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
1085
1086 mgmt->u.action.category = WLAN_CATEGORY_BACK;
1087 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
1088
1089 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
1090 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
1091 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
1092 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
1093
1094 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
1095
1096 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
1097 mgmt->u.action.u.addba_req.start_seq_num =
1098 cpu_to_le16(start_seq_num << 4);
1099
1100 ieee80211_sta_tx(dev, skb, 0);
1101}
1102
1047static void ieee80211_sta_process_addba_request(struct net_device *dev, 1103static void ieee80211_sta_process_addba_request(struct net_device *dev,
1048 struct ieee80211_mgmt *mgmt, 1104 struct ieee80211_mgmt *mgmt,
1049 size_t len) 1105 size_t len)
@@ -1093,9 +1149,11 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1093 } 1149 }
1094 /* determine default buffer size */ 1150 /* determine default buffer size */
1095 if (buf_size == 0) { 1151 if (buf_size == 0) {
1096 struct ieee80211_hw_mode *mode = conf->mode; 1152 struct ieee80211_supported_band *sband;
1153
1154 sband = local->hw.wiphy->bands[conf->channel->band];
1097 buf_size = IEEE80211_MIN_AMPDU_BUF; 1155 buf_size = IEEE80211_MIN_AMPDU_BUF;
1098 buf_size = buf_size << mode->ht_info.ampdu_factor; 1156 buf_size = buf_size << sband->ht_info.ampdu_factor;
1099 } 1157 }
1100 1158
1101 tid_agg_rx = &sta->ampdu_mlme.tid_rx[tid]; 1159 tid_agg_rx = &sta->ampdu_mlme.tid_rx[tid];
@@ -1127,7 +1185,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1127 1185
1128 if (local->ops->ampdu_action) 1186 if (local->ops->ampdu_action)
1129 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, 1187 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
1130 sta->addr, tid, start_seq_num); 1188 sta->addr, tid, &start_seq_num);
1131#ifdef CONFIG_MAC80211_HT_DEBUG 1189#ifdef CONFIG_MAC80211_HT_DEBUG
1132 printk(KERN_DEBUG "Rx A-MPDU on tid %d result %d", tid, ret); 1190 printk(KERN_DEBUG "Rx A-MPDU on tid %d result %d", tid, ret);
1133#endif /* CONFIG_MAC80211_HT_DEBUG */ 1191#endif /* CONFIG_MAC80211_HT_DEBUG */
@@ -1155,8 +1213,80 @@ end_no_lock:
1155 sta_info_put(sta); 1213 sta_info_put(sta);
1156} 1214}
1157 1215
1158static void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, 1216static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1159 u16 initiator, u16 reason_code) 1217 struct ieee80211_mgmt *mgmt,
1218 size_t len)
1219{
1220 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1221 struct ieee80211_hw *hw = &local->hw;
1222 struct sta_info *sta;
1223 u16 capab;
1224 u16 tid;
1225 u8 *state;
1226
1227 sta = sta_info_get(local, mgmt->sa);
1228 if (!sta)
1229 return;
1230
1231 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
1232 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
1233
1234 state = &sta->ampdu_mlme.tid_tx[tid].state;
1235
1236 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
1237
1238 if (mgmt->u.action.u.addba_resp.dialog_token !=
1239 sta->ampdu_mlme.tid_tx[tid].dialog_token) {
1240 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1241#ifdef CONFIG_MAC80211_HT_DEBUG
1242 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
1243#endif /* CONFIG_MAC80211_HT_DEBUG */
1244 sta_info_put(sta);
1245 return;
1246 }
1247
1248 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid].addba_resp_timer);
1249#ifdef CONFIG_MAC80211_HT_DEBUG
1250 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
1251#endif /* CONFIG_MAC80211_HT_DEBUG */
1252 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
1253 == WLAN_STATUS_SUCCESS) {
1254 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1255 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1256 printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:"
1257 "%d\n", *state);
1258 sta_info_put(sta);
1259 return;
1260 }
1261
1262 if (*state & HT_ADDBA_RECEIVED_MSK)
1263 printk(KERN_DEBUG "double addBA response\n");
1264
1265 *state |= HT_ADDBA_RECEIVED_MSK;
1266 sta->ampdu_mlme.tid_tx[tid].addba_req_num = 0;
1267
1268 if (*state == HT_AGG_STATE_OPERATIONAL) {
1269 printk(KERN_DEBUG "Aggregation on for tid %d \n", tid);
1270 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
1271 }
1272
1273 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1274 printk(KERN_DEBUG "recipient accepted agg: tid %d \n", tid);
1275 } else {
1276 printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid);
1277
1278 sta->ampdu_mlme.tid_tx[tid].addba_req_num++;
1279 /* this will allow the state check in stop_BA_session */
1280 *state = HT_AGG_STATE_OPERATIONAL;
1281 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1282 ieee80211_stop_tx_ba_session(hw, sta->addr, tid,
1283 WLAN_BACK_INITIATOR);
1284 }
1285 sta_info_put(sta);
1286}
1287
1288void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
1289 u16 initiator, u16 reason_code)
1160{ 1290{
1161 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1291 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1162 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1292 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1229,7 +1359,7 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1229 BUG_ON(!local->ops->ampdu_action); 1359 BUG_ON(!local->ops->ampdu_action);
1230 1360
1231 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, 1361 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP,
1232 ra, tid, EINVAL); 1362 ra, tid, NULL);
1233 if (ret) 1363 if (ret)
1234 printk(KERN_DEBUG "HW problem - can not stop rx " 1364 printk(KERN_DEBUG "HW problem - can not stop rx "
1235 "aggergation for tid %d\n", tid); 1365 "aggergation for tid %d\n", tid);
@@ -1258,6 +1388,7 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1258 sta_info_put(sta); 1388 sta_info_put(sta);
1259} 1389}
1260 1390
1391
1261static void ieee80211_sta_process_delba(struct net_device *dev, 1392static void ieee80211_sta_process_delba(struct net_device *dev,
1262 struct ieee80211_mgmt *mgmt, size_t len) 1393 struct ieee80211_mgmt *mgmt, size_t len)
1263{ 1394{
@@ -1277,14 +1408,70 @@ static void ieee80211_sta_process_delba(struct net_device *dev,
1277 1408
1278#ifdef CONFIG_MAC80211_HT_DEBUG 1409#ifdef CONFIG_MAC80211_HT_DEBUG
1279 if (net_ratelimit()) 1410 if (net_ratelimit())
1280 printk(KERN_DEBUG "delba from %s on tid %d reason code %d\n", 1411 printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n",
1281 print_mac(mac, mgmt->sa), tid, 1412 print_mac(mac, mgmt->sa),
1413 initiator ? "recipient" : "initiator", tid,
1282 mgmt->u.action.u.delba.reason_code); 1414 mgmt->u.action.u.delba.reason_code);
1283#endif /* CONFIG_MAC80211_HT_DEBUG */ 1415#endif /* CONFIG_MAC80211_HT_DEBUG */
1284 1416
1285 if (initiator == WLAN_BACK_INITIATOR) 1417 if (initiator == WLAN_BACK_INITIATOR)
1286 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, 1418 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid,
1287 WLAN_BACK_INITIATOR, 0); 1419 WLAN_BACK_INITIATOR, 0);
1420 else { /* WLAN_BACK_RECIPIENT */
1421 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
1422 sta->ampdu_mlme.tid_tx[tid].state =
1423 HT_AGG_STATE_OPERATIONAL;
1424 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1425 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
1426 WLAN_BACK_RECIPIENT);
1427 }
1428 sta_info_put(sta);
1429}
1430
1431/*
1432 * After sending add Block Ack request we activated a timer until
1433 * add Block Ack response will arrive from the recipient.
1434 * If this timer expires sta_addba_resp_timer_expired will be executed.
1435 */
1436void sta_addba_resp_timer_expired(unsigned long data)
1437{
1438 /* not an elegant detour, but there is no choice as the timer passes
1439 * only one argument, and both sta_info and TID are needed, so init
1440 * flow in sta_info_add gives the TID as data, while the timer_to_id
1441 * array gives the sta through container_of */
1442 u16 tid = *(int *)data;
1443 struct sta_info *temp_sta = container_of((void *)data,
1444 struct sta_info, timer_to_tid[tid]);
1445
1446 struct ieee80211_local *local = temp_sta->local;
1447 struct ieee80211_hw *hw = &local->hw;
1448 struct sta_info *sta;
1449 u8 *state;
1450
1451 sta = sta_info_get(local, temp_sta->addr);
1452 if (!sta)
1453 return;
1454
1455 state = &sta->ampdu_mlme.tid_tx[tid].state;
1456 /* check if the TID waits for addBA response */
1457 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
1458 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1459 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1460 *state = HT_AGG_STATE_IDLE;
1461 printk(KERN_DEBUG "timer expired on tid %d but we are not "
1462 "expecting addBA response there", tid);
1463 goto timer_expired_exit;
1464 }
1465
1466 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
1467
1468 /* go through the state check in stop_BA_session */
1469 *state = HT_AGG_STATE_OPERATIONAL;
1470 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1471 ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid,
1472 WLAN_BACK_INITIATOR);
1473
1474timer_expired_exit:
1288 sta_info_put(sta); 1475 sta_info_put(sta);
1289} 1476}
1290 1477
@@ -1536,15 +1723,16 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1536{ 1723{
1537 struct ieee80211_local *local = sdata->local; 1724 struct ieee80211_local *local = sdata->local;
1538 struct net_device *dev = sdata->dev; 1725 struct net_device *dev = sdata->dev;
1539 struct ieee80211_hw_mode *mode; 1726 struct ieee80211_supported_band *sband;
1540 struct sta_info *sta; 1727 struct sta_info *sta;
1541 u32 rates; 1728 u64 rates, basic_rates;
1542 u16 capab_info, status_code, aid; 1729 u16 capab_info, status_code, aid;
1543 struct ieee802_11_elems elems; 1730 struct ieee802_11_elems elems;
1544 struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; 1731 struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf;
1545 u8 *pos; 1732 u8 *pos;
1546 int i, j; 1733 int i, j;
1547 DECLARE_MAC_BUF(mac); 1734 DECLARE_MAC_BUF(mac);
1735 bool have_higher_than_11mbit = false;
1548 1736
1549 /* AssocResp and ReassocResp have identical structure, so process both 1737 /* AssocResp and ReassocResp have identical structure, so process both
1550 * of them in this function. */ 1738 * of them in this function. */
@@ -1614,22 +1802,18 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1614 if (ifsta->assocresp_ies) 1802 if (ifsta->assocresp_ies)
1615 memcpy(ifsta->assocresp_ies, pos, ifsta->assocresp_ies_len); 1803 memcpy(ifsta->assocresp_ies, pos, ifsta->assocresp_ies_len);
1616 1804
1617 /* set AID, ieee80211_set_associated() will tell the driver */
1618 bss_conf->aid = aid;
1619 ieee80211_set_associated(dev, ifsta, 1);
1620
1621 /* Add STA entry for the AP */ 1805 /* Add STA entry for the AP */
1622 sta = sta_info_get(local, ifsta->bssid); 1806 sta = sta_info_get(local, ifsta->bssid);
1623 if (!sta) { 1807 if (!sta) {
1624 struct ieee80211_sta_bss *bss; 1808 struct ieee80211_sta_bss *bss;
1625 sta = sta_info_add(local, dev, ifsta->bssid, GFP_KERNEL); 1809 sta = sta_info_add(local, dev, ifsta->bssid, GFP_KERNEL);
1626 if (!sta) { 1810 if (IS_ERR(sta)) {
1627 printk(KERN_DEBUG "%s: failed to add STA entry for the" 1811 printk(KERN_DEBUG "%s: failed to add STA entry for the"
1628 " AP\n", dev->name); 1812 " AP (error %ld)\n", dev->name, PTR_ERR(sta));
1629 return; 1813 return;
1630 } 1814 }
1631 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 1815 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
1632 local->hw.conf.channel, 1816 local->hw.conf.channel->center_freq,
1633 ifsta->ssid, ifsta->ssid_len); 1817 ifsta->ssid, ifsta->ssid_len);
1634 if (bss) { 1818 if (bss) {
1635 sta->last_rssi = bss->rssi; 1819 sta->last_rssi = bss->rssi;
@@ -1640,23 +1824,50 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1640 } 1824 }
1641 1825
1642 sta->dev = dev; 1826 sta->dev = dev;
1643 sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP; 1827 sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP |
1828 WLAN_STA_AUTHORIZED;
1644 1829
1645 rates = 0; 1830 rates = 0;
1646 mode = local->oper_hw_mode; 1831 basic_rates = 0;
1832 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1833
1647 for (i = 0; i < elems.supp_rates_len; i++) { 1834 for (i = 0; i < elems.supp_rates_len; i++) {
1648 int rate = (elems.supp_rates[i] & 0x7f) * 5; 1835 int rate = (elems.supp_rates[i] & 0x7f) * 5;
1649 for (j = 0; j < mode->num_rates; j++) 1836
1650 if (mode->rates[j].rate == rate) 1837 if (rate > 110)
1838 have_higher_than_11mbit = true;
1839
1840 for (j = 0; j < sband->n_bitrates; j++) {
1841 if (sband->bitrates[j].bitrate == rate)
1651 rates |= BIT(j); 1842 rates |= BIT(j);
1843 if (elems.supp_rates[i] & 0x80)
1844 basic_rates |= BIT(j);
1845 }
1652 } 1846 }
1847
1653 for (i = 0; i < elems.ext_supp_rates_len; i++) { 1848 for (i = 0; i < elems.ext_supp_rates_len; i++) {
1654 int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; 1849 int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
1655 for (j = 0; j < mode->num_rates; j++) 1850
1656 if (mode->rates[j].rate == rate) 1851 if (rate > 110)
1852 have_higher_than_11mbit = true;
1853
1854 for (j = 0; j < sband->n_bitrates; j++) {
1855 if (sband->bitrates[j].bitrate == rate)
1657 rates |= BIT(j); 1856 rates |= BIT(j);
1857 if (elems.ext_supp_rates[i] & 0x80)
1858 basic_rates |= BIT(j);
1859 }
1658 } 1860 }
1659 sta->supp_rates = rates; 1861
1862 sta->supp_rates[local->hw.conf.channel->band] = rates;
1863 sdata->basic_rates = basic_rates;
1864
1865 /* cf. IEEE 802.11 9.2.12 */
1866 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
1867 have_higher_than_11mbit)
1868 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
1869 else
1870 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
1660 1871
1661 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && 1872 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param &&
1662 local->ops->conf_ht) { 1873 local->ops->conf_ht) {
@@ -1679,6 +1890,9 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1679 elems.wmm_param_len); 1890 elems.wmm_param_len);
1680 } 1891 }
1681 1892
1893 /* set AID, ieee80211_set_associated() will tell the driver */
1894 bss_conf->aid = aid;
1895 ieee80211_set_associated(dev, ifsta, 1);
1682 1896
1683 sta_info_put(sta); 1897 sta_info_put(sta);
1684 1898
@@ -1719,7 +1933,7 @@ static void __ieee80211_rx_bss_hash_del(struct net_device *dev,
1719 1933
1720 1934
1721static struct ieee80211_sta_bss * 1935static struct ieee80211_sta_bss *
1722ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int channel, 1936ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int freq,
1723 u8 *ssid, u8 ssid_len) 1937 u8 *ssid, u8 ssid_len)
1724{ 1938{
1725 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1939 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -1731,7 +1945,7 @@ ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int channel,
1731 atomic_inc(&bss->users); 1945 atomic_inc(&bss->users);
1732 atomic_inc(&bss->users); 1946 atomic_inc(&bss->users);
1733 memcpy(bss->bssid, bssid, ETH_ALEN); 1947 memcpy(bss->bssid, bssid, ETH_ALEN);
1734 bss->channel = channel; 1948 bss->freq = freq;
1735 if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) { 1949 if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) {
1736 memcpy(bss->ssid, ssid, ssid_len); 1950 memcpy(bss->ssid, ssid, ssid_len);
1737 bss->ssid_len = ssid_len; 1951 bss->ssid_len = ssid_len;
@@ -1747,7 +1961,7 @@ ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int channel,
1747 1961
1748 1962
1749static struct ieee80211_sta_bss * 1963static struct ieee80211_sta_bss *
1750ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel, 1964ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
1751 u8 *ssid, u8 ssid_len) 1965 u8 *ssid, u8 ssid_len)
1752{ 1966{
1753 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1967 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -1757,7 +1971,7 @@ ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int channel,
1757 bss = local->sta_bss_hash[STA_HASH(bssid)]; 1971 bss = local->sta_bss_hash[STA_HASH(bssid)];
1758 while (bss) { 1972 while (bss) {
1759 if (!memcmp(bss->bssid, bssid, ETH_ALEN) && 1973 if (!memcmp(bss->bssid, bssid, ETH_ALEN) &&
1760 bss->channel == channel && 1974 bss->freq == freq &&
1761 bss->ssid_len == ssid_len && 1975 bss->ssid_len == ssid_len &&
1762 (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) { 1976 (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) {
1763 atomic_inc(&bss->users); 1977 atomic_inc(&bss->users);
@@ -1813,6 +2027,165 @@ void ieee80211_rx_bss_list_deinit(struct net_device *dev)
1813} 2027}
1814 2028
1815 2029
2030static int ieee80211_sta_join_ibss(struct net_device *dev,
2031 struct ieee80211_if_sta *ifsta,
2032 struct ieee80211_sta_bss *bss)
2033{
2034 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2035 int res, rates, i, j;
2036 struct sk_buff *skb;
2037 struct ieee80211_mgmt *mgmt;
2038 struct ieee80211_tx_control control;
2039 struct rate_selection ratesel;
2040 u8 *pos;
2041 struct ieee80211_sub_if_data *sdata;
2042 struct ieee80211_supported_band *sband;
2043
2044 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2045
2046 /* Remove possible STA entries from other IBSS networks. */
2047 sta_info_flush(local, NULL);
2048
2049 if (local->ops->reset_tsf) {
2050 /* Reset own TSF to allow time synchronization work. */
2051 local->ops->reset_tsf(local_to_hw(local));
2052 }
2053 memcpy(ifsta->bssid, bss->bssid, ETH_ALEN);
2054 res = ieee80211_if_config(dev);
2055 if (res)
2056 return res;
2057
2058 local->hw.conf.beacon_int = bss->beacon_int >= 10 ? bss->beacon_int : 10;
2059
2060 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2061 sdata->drop_unencrypted = bss->capability &
2062 WLAN_CAPABILITY_PRIVACY ? 1 : 0;
2063
2064 res = ieee80211_set_freq(local, bss->freq);
2065
2066 if (local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS) {
2067 printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
2068 "%d MHz\n", dev->name, local->oper_channel->center_freq);
2069 return -1;
2070 }
2071
2072 /* Set beacon template */
2073 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
2074 do {
2075 if (!skb)
2076 break;
2077
2078 skb_reserve(skb, local->hw.extra_tx_headroom);
2079
2080 mgmt = (struct ieee80211_mgmt *)
2081 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
2082 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
2083 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
2084 IEEE80211_STYPE_BEACON);
2085 memset(mgmt->da, 0xff, ETH_ALEN);
2086 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
2087 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
2088 mgmt->u.beacon.beacon_int =
2089 cpu_to_le16(local->hw.conf.beacon_int);
2090 mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability);
2091
2092 pos = skb_put(skb, 2 + ifsta->ssid_len);
2093 *pos++ = WLAN_EID_SSID;
2094 *pos++ = ifsta->ssid_len;
2095 memcpy(pos, ifsta->ssid, ifsta->ssid_len);
2096
2097 rates = bss->supp_rates_len;
2098 if (rates > 8)
2099 rates = 8;
2100 pos = skb_put(skb, 2 + rates);
2101 *pos++ = WLAN_EID_SUPP_RATES;
2102 *pos++ = rates;
2103 memcpy(pos, bss->supp_rates, rates);
2104
2105 if (bss->band == IEEE80211_BAND_2GHZ) {
2106 pos = skb_put(skb, 2 + 1);
2107 *pos++ = WLAN_EID_DS_PARAMS;
2108 *pos++ = 1;
2109 *pos++ = ieee80211_frequency_to_channel(bss->freq);
2110 }
2111
2112 pos = skb_put(skb, 2 + 2);
2113 *pos++ = WLAN_EID_IBSS_PARAMS;
2114 *pos++ = 2;
2115 /* FIX: set ATIM window based on scan results */
2116 *pos++ = 0;
2117 *pos++ = 0;
2118
2119 if (bss->supp_rates_len > 8) {
2120 rates = bss->supp_rates_len - 8;
2121 pos = skb_put(skb, 2 + rates);
2122 *pos++ = WLAN_EID_EXT_SUPP_RATES;
2123 *pos++ = rates;
2124 memcpy(pos, &bss->supp_rates[8], rates);
2125 }
2126
2127 memset(&control, 0, sizeof(control));
2128 rate_control_get_rate(dev, sband, skb, &ratesel);
2129 if (!ratesel.rate) {
2130 printk(KERN_DEBUG "%s: Failed to determine TX rate "
2131 "for IBSS beacon\n", dev->name);
2132 break;
2133 }
2134 control.vif = &sdata->vif;
2135 control.tx_rate = ratesel.rate;
2136 if (sdata->bss_conf.use_short_preamble &&
2137 ratesel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
2138 control.flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
2139 control.antenna_sel_tx = local->hw.conf.antenna_sel_tx;
2140 control.flags |= IEEE80211_TXCTL_NO_ACK;
2141 control.retry_limit = 1;
2142
2143 ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC);
2144 if (ifsta->probe_resp) {
2145 mgmt = (struct ieee80211_mgmt *)
2146 ifsta->probe_resp->data;
2147 mgmt->frame_control =
2148 IEEE80211_FC(IEEE80211_FTYPE_MGMT,
2149 IEEE80211_STYPE_PROBE_RESP);
2150 } else {
2151 printk(KERN_DEBUG "%s: Could not allocate ProbeResp "
2152 "template for IBSS\n", dev->name);
2153 }
2154
2155 if (local->ops->beacon_update &&
2156 local->ops->beacon_update(local_to_hw(local),
2157 skb, &control) == 0) {
2158 printk(KERN_DEBUG "%s: Configured IBSS beacon "
2159 "template\n", dev->name);
2160 skb = NULL;
2161 }
2162
2163 rates = 0;
2164 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2165 for (i = 0; i < bss->supp_rates_len; i++) {
2166 int bitrate = (bss->supp_rates[i] & 0x7f) * 5;
2167 for (j = 0; j < sband->n_bitrates; j++)
2168 if (sband->bitrates[j].bitrate == bitrate)
2169 rates |= BIT(j);
2170 }
2171 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates;
2172 } while (0);
2173
2174 if (skb) {
2175 printk(KERN_DEBUG "%s: Failed to configure IBSS beacon "
2176 "template\n", dev->name);
2177 dev_kfree_skb(skb);
2178 }
2179
2180 ifsta->state = IEEE80211_IBSS_JOINED;
2181 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
2182
2183 ieee80211_rx_bss_put(dev, bss);
2184
2185 return res;
2186}
2187
2188
1816static void ieee80211_rx_bss_info(struct net_device *dev, 2189static void ieee80211_rx_bss_info(struct net_device *dev,
1817 struct ieee80211_mgmt *mgmt, 2190 struct ieee80211_mgmt *mgmt,
1818 size_t len, 2191 size_t len,
@@ -1822,11 +2195,11 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
1822 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2195 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1823 struct ieee802_11_elems elems; 2196 struct ieee802_11_elems elems;
1824 size_t baselen; 2197 size_t baselen;
1825 int channel, clen; 2198 int freq, clen;
1826 struct ieee80211_sta_bss *bss; 2199 struct ieee80211_sta_bss *bss;
1827 struct sta_info *sta; 2200 struct sta_info *sta;
1828 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2201 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1829 u64 timestamp; 2202 u64 beacon_timestamp, rx_timestamp;
1830 DECLARE_MAC_BUF(mac); 2203 DECLARE_MAC_BUF(mac);
1831 DECLARE_MAC_BUF(mac2); 2204 DECLARE_MAC_BUF(mac2);
1832 2205
@@ -1843,56 +2216,28 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
1843 if (baselen > len) 2216 if (baselen > len)
1844 return; 2217 return;
1845 2218
1846 timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); 2219 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
1847
1848 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon &&
1849 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0) {
1850#ifdef CONFIG_MAC80211_IBSS_DEBUG
1851 static unsigned long last_tsf_debug = 0;
1852 u64 tsf;
1853 if (local->ops->get_tsf)
1854 tsf = local->ops->get_tsf(local_to_hw(local));
1855 else
1856 tsf = -1LLU;
1857 if (time_after(jiffies, last_tsf_debug + 5 * HZ)) {
1858 printk(KERN_DEBUG "RX beacon SA=%s BSSID="
1859 "%s TSF=0x%llx BCN=0x%llx diff=%lld "
1860 "@%lu\n",
1861 print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->bssid),
1862 (unsigned long long)tsf,
1863 (unsigned long long)timestamp,
1864 (unsigned long long)(tsf - timestamp),
1865 jiffies);
1866 last_tsf_debug = jiffies;
1867 }
1868#endif /* CONFIG_MAC80211_IBSS_DEBUG */
1869 }
1870
1871 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); 2220 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
1872 2221
1873 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates && 2222 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems.supp_rates &&
1874 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && 2223 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 &&
1875 (sta = sta_info_get(local, mgmt->sa))) { 2224 (sta = sta_info_get(local, mgmt->sa))) {
1876 struct ieee80211_hw_mode *mode; 2225 struct ieee80211_supported_band *sband;
1877 struct ieee80211_rate *rates; 2226 struct ieee80211_rate *bitrates;
1878 size_t num_rates; 2227 size_t num_rates;
1879 u32 supp_rates, prev_rates; 2228 u64 supp_rates, prev_rates;
1880 int i, j; 2229 int i, j;
1881 2230
1882 mode = local->sta_sw_scanning ? 2231 sband = local->hw.wiphy->bands[rx_status->band];
1883 local->scan_hw_mode : local->oper_hw_mode;
1884 2232
1885 if (local->sta_hw_scanning) { 2233 if (!sband) {
1886 /* search for the correct mode matches the beacon */ 2234 WARN_ON(1);
1887 list_for_each_entry(mode, &local->modes_list, list) 2235 sband = local->hw.wiphy->bands[
1888 if (mode->mode == rx_status->phymode) 2236 local->hw.conf.channel->band];
1889 break;
1890
1891 if (mode == NULL)
1892 mode = local->oper_hw_mode;
1893 } 2237 }
1894 rates = mode->rates; 2238
1895 num_rates = mode->num_rates; 2239 bitrates = sband->bitrates;
2240 num_rates = sband->n_bitrates;
1896 2241
1897 supp_rates = 0; 2242 supp_rates = 0;
1898 for (i = 0; i < elems.supp_rates_len + 2243 for (i = 0; i < elems.supp_rates_len +
@@ -1906,24 +2251,27 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
1906 [i - elems.supp_rates_len]; 2251 [i - elems.supp_rates_len];
1907 own_rate = 5 * (rate & 0x7f); 2252 own_rate = 5 * (rate & 0x7f);
1908 for (j = 0; j < num_rates; j++) 2253 for (j = 0; j < num_rates; j++)
1909 if (rates[j].rate == own_rate) 2254 if (bitrates[j].bitrate == own_rate)
1910 supp_rates |= BIT(j); 2255 supp_rates |= BIT(j);
1911 } 2256 }
1912 2257
1913 prev_rates = sta->supp_rates; 2258 prev_rates = sta->supp_rates[rx_status->band];
1914 sta->supp_rates &= supp_rates; 2259 sta->supp_rates[rx_status->band] &= supp_rates;
1915 if (sta->supp_rates == 0) { 2260 if (sta->supp_rates[rx_status->band] == 0) {
1916 /* No matching rates - this should not really happen. 2261 /* No matching rates - this should not really happen.
1917 * Make sure that at least one rate is marked 2262 * Make sure that at least one rate is marked
1918 * supported to avoid issues with TX rate ctrl. */ 2263 * supported to avoid issues with TX rate ctrl. */
1919 sta->supp_rates = sdata->u.sta.supp_rates_bits; 2264 sta->supp_rates[rx_status->band] =
2265 sdata->u.sta.supp_rates_bits[rx_status->band];
1920 } 2266 }
1921 if (sta->supp_rates != prev_rates) { 2267 if (sta->supp_rates[rx_status->band] != prev_rates) {
1922 printk(KERN_DEBUG "%s: updated supp_rates set for " 2268 printk(KERN_DEBUG "%s: updated supp_rates set for "
1923 "%s based on beacon info (0x%x & 0x%x -> " 2269 "%s based on beacon info (0x%llx & 0x%llx -> "
1924 "0x%x)\n", 2270 "0x%llx)\n",
1925 dev->name, print_mac(mac, sta->addr), prev_rates, 2271 dev->name, print_mac(mac, sta->addr),
1926 supp_rates, sta->supp_rates); 2272 (unsigned long long) prev_rates,
2273 (unsigned long long) supp_rates,
2274 (unsigned long long) sta->supp_rates[rx_status->band]);
1927 } 2275 }
1928 sta_info_put(sta); 2276 sta_info_put(sta);
1929 } 2277 }
@@ -1932,14 +2280,14 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
1932 return; 2280 return;
1933 2281
1934 if (elems.ds_params && elems.ds_params_len == 1) 2282 if (elems.ds_params && elems.ds_params_len == 1)
1935 channel = elems.ds_params[0]; 2283 freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
1936 else 2284 else
1937 channel = rx_status->channel; 2285 freq = rx_status->freq;
1938 2286
1939 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, channel, 2287 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq,
1940 elems.ssid, elems.ssid_len); 2288 elems.ssid, elems.ssid_len);
1941 if (!bss) { 2289 if (!bss) {
1942 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, channel, 2290 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq,
1943 elems.ssid, elems.ssid_len); 2291 elems.ssid, elems.ssid_len);
1944 if (!bss) 2292 if (!bss)
1945 return; 2293 return;
@@ -1952,8 +2300,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
1952#endif 2300#endif
1953 } 2301 }
1954 2302
1955 if (bss->probe_resp && beacon) { 2303 bss->band = rx_status->band;
1956 /* Do not allow beacon to override data from Probe Response. */ 2304
2305 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
2306 bss->probe_resp && beacon) {
2307 /* STA mode:
2308 * Do not allow beacon to override data from Probe Response. */
1957 ieee80211_rx_bss_put(dev, bss); 2309 ieee80211_rx_bss_put(dev, bss);
1958 return; 2310 return;
1959 } 2311 }
@@ -2050,27 +2402,69 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2050 bss->ht_ie_len = 0; 2402 bss->ht_ie_len = 0;
2051 } 2403 }
2052 2404
2053 bss->hw_mode = rx_status->phymode; 2405 bss->timestamp = beacon_timestamp;
2054 bss->freq = rx_status->freq;
2055 if (channel != rx_status->channel &&
2056 (bss->hw_mode == MODE_IEEE80211G ||
2057 bss->hw_mode == MODE_IEEE80211B) &&
2058 channel >= 1 && channel <= 14) {
2059 static const int freq_list[] = {
2060 2412, 2417, 2422, 2427, 2432, 2437, 2442,
2061 2447, 2452, 2457, 2462, 2467, 2472, 2484
2062 };
2063 /* IEEE 802.11g/b mode can receive packets from neighboring
2064 * channels, so map the channel into frequency. */
2065 bss->freq = freq_list[channel - 1];
2066 }
2067 bss->timestamp = timestamp;
2068 bss->last_update = jiffies; 2406 bss->last_update = jiffies;
2069 bss->rssi = rx_status->ssi; 2407 bss->rssi = rx_status->ssi;
2070 bss->signal = rx_status->signal; 2408 bss->signal = rx_status->signal;
2071 bss->noise = rx_status->noise; 2409 bss->noise = rx_status->noise;
2072 if (!beacon) 2410 if (!beacon)
2073 bss->probe_resp++; 2411 bss->probe_resp++;
2412
2413 /* check if we need to merge IBSS */
2414 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon &&
2415 !local->sta_sw_scanning && !local->sta_hw_scanning &&
2416 bss->capability & WLAN_CAPABILITY_IBSS &&
2417 bss->freq == local->oper_channel->center_freq &&
2418 elems.ssid_len == sdata->u.sta.ssid_len &&
2419 memcmp(elems.ssid, sdata->u.sta.ssid, sdata->u.sta.ssid_len) == 0) {
2420 if (rx_status->flag & RX_FLAG_TSFT) {
2421 /* in order for correct IBSS merging we need mactime
2422 *
2423 * since mactime is defined as the time the first data
2424 * symbol of the frame hits the PHY, and the timestamp
2425 * of the beacon is defined as "the time that the data
2426 * symbol containing the first bit of the timestamp is
2427 * transmitted to the PHY plus the transmitting STA’s
2428 * delays through its local PHY from the MAC-PHY
2429 * interface to its interface with the WM"
2430 * (802.11 11.1.2) - equals the time this bit arrives at
2431 * the receiver - we have to take into account the
2432 * offset between the two.
2433 * e.g: at 1 MBit that means mactime is 192 usec earlier
2434 * (=24 bytes * 8 usecs/byte) than the beacon timestamp.
2435 */
2436 int rate = local->hw.wiphy->bands[rx_status->band]->
2437 bitrates[rx_status->rate_idx].bitrate;
2438 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
2439 } else if (local && local->ops && local->ops->get_tsf)
2440 /* second best option: get current TSF */
2441 rx_timestamp = local->ops->get_tsf(local_to_hw(local));
2442 else
2443 /* can't merge without knowing the TSF */
2444 rx_timestamp = -1LLU;
2445#ifdef CONFIG_MAC80211_IBSS_DEBUG
2446 printk(KERN_DEBUG "RX beacon SA=%s BSSID="
2447 "%s TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
2448 print_mac(mac, mgmt->sa),
2449 print_mac(mac2, mgmt->bssid),
2450 (unsigned long long)rx_timestamp,
2451 (unsigned long long)beacon_timestamp,
2452 (unsigned long long)(rx_timestamp - beacon_timestamp),
2453 jiffies);
2454#endif /* CONFIG_MAC80211_IBSS_DEBUG */
2455 if (beacon_timestamp > rx_timestamp) {
2456#ifndef CONFIG_MAC80211_IBSS_DEBUG
2457 if (net_ratelimit())
2458#endif
2459 printk(KERN_DEBUG "%s: beacon TSF higher than "
2460 "local TSF - IBSS merge with BSSID %s\n",
2461 dev->name, print_mac(mac, mgmt->bssid));
2462 ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss);
2463 ieee80211_ibss_add_sta(dev, NULL,
2464 mgmt->bssid, mgmt->sa);
2465 }
2466 }
2467
2074 ieee80211_rx_bss_put(dev, bss); 2468 ieee80211_rx_bss_put(dev, bss);
2075} 2469}
2076 2470
@@ -2235,6 +2629,12 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
2235 break; 2629 break;
2236 ieee80211_sta_process_addba_request(dev, mgmt, len); 2630 ieee80211_sta_process_addba_request(dev, mgmt, len);
2237 break; 2631 break;
2632 case WLAN_ACTION_ADDBA_RESP:
2633 if (len < (IEEE80211_MIN_ACTION_SIZE +
2634 sizeof(mgmt->u.action.u.addba_resp)))
2635 break;
2636 ieee80211_sta_process_addba_resp(dev, mgmt, len);
2637 break;
2238 case WLAN_ACTION_DELBA: 2638 case WLAN_ACTION_DELBA:
2239 if (len < (IEEE80211_MIN_ACTION_SIZE + 2639 if (len < (IEEE80211_MIN_ACTION_SIZE +
2240 sizeof(mgmt->u.action.u.delba))) 2640 sizeof(mgmt->u.action.u.delba)))
@@ -2348,7 +2748,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
2348} 2748}
2349 2749
2350 2750
2351ieee80211_txrx_result 2751ieee80211_rx_result
2352ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb, 2752ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
2353 struct ieee80211_rx_status *rx_status) 2753 struct ieee80211_rx_status *rx_status)
2354{ 2754{
@@ -2356,31 +2756,31 @@ ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
2356 u16 fc; 2756 u16 fc;
2357 2757
2358 if (skb->len < 2) 2758 if (skb->len < 2)
2359 return TXRX_DROP; 2759 return RX_DROP_UNUSABLE;
2360 2760
2361 mgmt = (struct ieee80211_mgmt *) skb->data; 2761 mgmt = (struct ieee80211_mgmt *) skb->data;
2362 fc = le16_to_cpu(mgmt->frame_control); 2762 fc = le16_to_cpu(mgmt->frame_control);
2363 2763
2364 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) 2764 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
2365 return TXRX_CONTINUE; 2765 return RX_CONTINUE;
2366 2766
2367 if (skb->len < 24) 2767 if (skb->len < 24)
2368 return TXRX_DROP; 2768 return RX_DROP_MONITOR;
2369 2769
2370 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { 2770 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2371 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP) { 2771 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP) {
2372 ieee80211_rx_mgmt_probe_resp(dev, mgmt, 2772 ieee80211_rx_mgmt_probe_resp(dev, mgmt,
2373 skb->len, rx_status); 2773 skb->len, rx_status);
2374 dev_kfree_skb(skb); 2774 dev_kfree_skb(skb);
2375 return TXRX_QUEUED; 2775 return RX_QUEUED;
2376 } else if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) { 2776 } else if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) {
2377 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, 2777 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len,
2378 rx_status); 2778 rx_status);
2379 dev_kfree_skb(skb); 2779 dev_kfree_skb(skb);
2380 return TXRX_QUEUED; 2780 return RX_QUEUED;
2381 } 2781 }
2382 } 2782 }
2383 return TXRX_CONTINUE; 2783 return RX_CONTINUE;
2384} 2784}
2385 2785
2386 2786
@@ -2629,7 +3029,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
2629 } 3029 }
2630 3030
2631 spin_lock_bh(&local->sta_bss_lock); 3031 spin_lock_bh(&local->sta_bss_lock);
2632 freq = local->oper_channel->freq; 3032 freq = local->oper_channel->center_freq;
2633 list_for_each_entry(bss, &local->sta_bss_list, list) { 3033 list_for_each_entry(bss, &local->sta_bss_list, list) {
2634 if (!(bss->capability & WLAN_CAPABILITY_ESS)) 3034 if (!(bss->capability & WLAN_CAPABILITY_ESS))
2635 continue; 3035 continue;
@@ -2660,7 +3060,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
2660 spin_unlock_bh(&local->sta_bss_lock); 3060 spin_unlock_bh(&local->sta_bss_lock);
2661 3061
2662 if (selected) { 3062 if (selected) {
2663 ieee80211_set_channel(local, -1, selected->freq); 3063 ieee80211_set_freq(local, selected->freq);
2664 if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) 3064 if (!(ifsta->flags & IEEE80211_STA_SSID_SET))
2665 ieee80211_sta_set_ssid(dev, selected->ssid, 3065 ieee80211_sta_set_ssid(dev, selected->ssid,
2666 selected->ssid_len); 3066 selected->ssid_len);
@@ -2684,162 +3084,6 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
2684 return -1; 3084 return -1;
2685} 3085}
2686 3086
2687static int ieee80211_sta_join_ibss(struct net_device *dev,
2688 struct ieee80211_if_sta *ifsta,
2689 struct ieee80211_sta_bss *bss)
2690{
2691 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2692 int res, rates, i, j;
2693 struct sk_buff *skb;
2694 struct ieee80211_mgmt *mgmt;
2695 struct ieee80211_tx_control control;
2696 struct ieee80211_hw_mode *mode;
2697 struct rate_selection ratesel;
2698 u8 *pos;
2699 struct ieee80211_sub_if_data *sdata;
2700
2701 /* Remove possible STA entries from other IBSS networks. */
2702 sta_info_flush(local, NULL);
2703
2704 if (local->ops->reset_tsf) {
2705 /* Reset own TSF to allow time synchronization work. */
2706 local->ops->reset_tsf(local_to_hw(local));
2707 }
2708 memcpy(ifsta->bssid, bss->bssid, ETH_ALEN);
2709 res = ieee80211_if_config(dev);
2710 if (res)
2711 return res;
2712
2713 local->hw.conf.beacon_int = bss->beacon_int >= 10 ? bss->beacon_int : 10;
2714
2715 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2716 sdata->drop_unencrypted = bss->capability &
2717 WLAN_CAPABILITY_PRIVACY ? 1 : 0;
2718
2719 res = ieee80211_set_channel(local, -1, bss->freq);
2720
2721 if (!(local->oper_channel->flag & IEEE80211_CHAN_W_IBSS)) {
2722 printk(KERN_DEBUG "%s: IBSS not allowed on channel %d "
2723 "(%d MHz)\n", dev->name, local->hw.conf.channel,
2724 local->hw.conf.freq);
2725 return -1;
2726 }
2727
2728 /* Set beacon template based on scan results */
2729 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
2730 do {
2731 if (!skb)
2732 break;
2733
2734 skb_reserve(skb, local->hw.extra_tx_headroom);
2735
2736 mgmt = (struct ieee80211_mgmt *)
2737 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
2738 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
2739 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
2740 IEEE80211_STYPE_BEACON);
2741 memset(mgmt->da, 0xff, ETH_ALEN);
2742 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
2743 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
2744 mgmt->u.beacon.beacon_int =
2745 cpu_to_le16(local->hw.conf.beacon_int);
2746 mgmt->u.beacon.capab_info = cpu_to_le16(bss->capability);
2747
2748 pos = skb_put(skb, 2 + ifsta->ssid_len);
2749 *pos++ = WLAN_EID_SSID;
2750 *pos++ = ifsta->ssid_len;
2751 memcpy(pos, ifsta->ssid, ifsta->ssid_len);
2752
2753 rates = bss->supp_rates_len;
2754 if (rates > 8)
2755 rates = 8;
2756 pos = skb_put(skb, 2 + rates);
2757 *pos++ = WLAN_EID_SUPP_RATES;
2758 *pos++ = rates;
2759 memcpy(pos, bss->supp_rates, rates);
2760
2761 pos = skb_put(skb, 2 + 1);
2762 *pos++ = WLAN_EID_DS_PARAMS;
2763 *pos++ = 1;
2764 *pos++ = bss->channel;
2765
2766 pos = skb_put(skb, 2 + 2);
2767 *pos++ = WLAN_EID_IBSS_PARAMS;
2768 *pos++ = 2;
2769 /* FIX: set ATIM window based on scan results */
2770 *pos++ = 0;
2771 *pos++ = 0;
2772
2773 if (bss->supp_rates_len > 8) {
2774 rates = bss->supp_rates_len - 8;
2775 pos = skb_put(skb, 2 + rates);
2776 *pos++ = WLAN_EID_EXT_SUPP_RATES;
2777 *pos++ = rates;
2778 memcpy(pos, &bss->supp_rates[8], rates);
2779 }
2780
2781 memset(&control, 0, sizeof(control));
2782 rate_control_get_rate(dev, local->oper_hw_mode, skb, &ratesel);
2783 if (!ratesel.rate) {
2784 printk(KERN_DEBUG "%s: Failed to determine TX rate "
2785 "for IBSS beacon\n", dev->name);
2786 break;
2787 }
2788 control.vif = &sdata->vif;
2789 control.tx_rate =
2790 (sdata->bss_conf.use_short_preamble &&
2791 (ratesel.rate->flags & IEEE80211_RATE_PREAMBLE2)) ?
2792 ratesel.rate->val2 : ratesel.rate->val;
2793 control.antenna_sel_tx = local->hw.conf.antenna_sel_tx;
2794 control.power_level = local->hw.conf.power_level;
2795 control.flags |= IEEE80211_TXCTL_NO_ACK;
2796 control.retry_limit = 1;
2797
2798 ifsta->probe_resp = skb_copy(skb, GFP_ATOMIC);
2799 if (ifsta->probe_resp) {
2800 mgmt = (struct ieee80211_mgmt *)
2801 ifsta->probe_resp->data;
2802 mgmt->frame_control =
2803 IEEE80211_FC(IEEE80211_FTYPE_MGMT,
2804 IEEE80211_STYPE_PROBE_RESP);
2805 } else {
2806 printk(KERN_DEBUG "%s: Could not allocate ProbeResp "
2807 "template for IBSS\n", dev->name);
2808 }
2809
2810 if (local->ops->beacon_update &&
2811 local->ops->beacon_update(local_to_hw(local),
2812 skb, &control) == 0) {
2813 printk(KERN_DEBUG "%s: Configured IBSS beacon "
2814 "template based on scan results\n", dev->name);
2815 skb = NULL;
2816 }
2817
2818 rates = 0;
2819 mode = local->oper_hw_mode;
2820 for (i = 0; i < bss->supp_rates_len; i++) {
2821 int bitrate = (bss->supp_rates[i] & 0x7f) * 5;
2822 for (j = 0; j < mode->num_rates; j++)
2823 if (mode->rates[j].rate == bitrate)
2824 rates |= BIT(j);
2825 }
2826 ifsta->supp_rates_bits = rates;
2827 } while (0);
2828
2829 if (skb) {
2830 printk(KERN_DEBUG "%s: Failed to configure IBSS beacon "
2831 "template\n", dev->name);
2832 dev_kfree_skb(skb);
2833 }
2834
2835 ifsta->state = IEEE80211_IBSS_JOINED;
2836 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
2837
2838 ieee80211_rx_bss_put(dev, bss);
2839
2840 return res;
2841}
2842
2843 3087
2844static int ieee80211_sta_create_ibss(struct net_device *dev, 3088static int ieee80211_sta_create_ibss(struct net_device *dev,
2845 struct ieee80211_if_sta *ifsta) 3089 struct ieee80211_if_sta *ifsta)
@@ -2847,7 +3091,7 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
2847 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3091 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2848 struct ieee80211_sta_bss *bss; 3092 struct ieee80211_sta_bss *bss;
2849 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 3093 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2850 struct ieee80211_hw_mode *mode; 3094 struct ieee80211_supported_band *sband;
2851 u8 bssid[ETH_ALEN], *pos; 3095 u8 bssid[ETH_ALEN], *pos;
2852 int i; 3096 int i;
2853 DECLARE_MAC_BUF(mac); 3097 DECLARE_MAC_BUF(mac);
@@ -2869,28 +3113,28 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
2869 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", 3113 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n",
2870 dev->name, print_mac(mac, bssid)); 3114 dev->name, print_mac(mac, bssid));
2871 3115
2872 bss = ieee80211_rx_bss_add(dev, bssid, local->hw.conf.channel, 3116 bss = ieee80211_rx_bss_add(dev, bssid,
3117 local->hw.conf.channel->center_freq,
2873 sdata->u.sta.ssid, sdata->u.sta.ssid_len); 3118 sdata->u.sta.ssid, sdata->u.sta.ssid_len);
2874 if (!bss) 3119 if (!bss)
2875 return -ENOMEM; 3120 return -ENOMEM;
2876 3121
2877 mode = local->oper_hw_mode; 3122 bss->band = local->hw.conf.channel->band;
3123 sband = local->hw.wiphy->bands[bss->band];
2878 3124
2879 if (local->hw.conf.beacon_int == 0) 3125 if (local->hw.conf.beacon_int == 0)
2880 local->hw.conf.beacon_int = 100; 3126 local->hw.conf.beacon_int = 100;
2881 bss->beacon_int = local->hw.conf.beacon_int; 3127 bss->beacon_int = local->hw.conf.beacon_int;
2882 bss->hw_mode = local->hw.conf.phymode;
2883 bss->freq = local->hw.conf.freq;
2884 bss->last_update = jiffies; 3128 bss->last_update = jiffies;
2885 bss->capability = WLAN_CAPABILITY_IBSS; 3129 bss->capability = WLAN_CAPABILITY_IBSS;
2886 if (sdata->default_key) { 3130 if (sdata->default_key) {
2887 bss->capability |= WLAN_CAPABILITY_PRIVACY; 3131 bss->capability |= WLAN_CAPABILITY_PRIVACY;
2888 } else 3132 } else
2889 sdata->drop_unencrypted = 0; 3133 sdata->drop_unencrypted = 0;
2890 bss->supp_rates_len = mode->num_rates; 3134 bss->supp_rates_len = sband->n_bitrates;
2891 pos = bss->supp_rates; 3135 pos = bss->supp_rates;
2892 for (i = 0; i < mode->num_rates; i++) { 3136 for (i = 0; i < sband->n_bitrates; i++) {
2893 int rate = mode->rates[i].rate; 3137 int rate = sband->bitrates[i].bitrate;
2894 *pos++ = (u8) (rate / 5); 3138 *pos++ = (u8) (rate / 5);
2895 } 3139 }
2896 3140
@@ -2939,7 +3183,8 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
2939 "%s\n", print_mac(mac, bssid), print_mac(mac2, ifsta->bssid)); 3183 "%s\n", print_mac(mac, bssid), print_mac(mac2, ifsta->bssid));
2940#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 3184#endif /* CONFIG_MAC80211_IBSS_DEBUG */
2941 if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0 && 3185 if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0 &&
2942 (bss = ieee80211_rx_bss_get(dev, bssid, local->hw.conf.channel, 3186 (bss = ieee80211_rx_bss_get(dev, bssid,
3187 local->hw.conf.channel->center_freq,
2943 ifsta->ssid, ifsta->ssid_len))) { 3188 ifsta->ssid, ifsta->ssid_len))) {
2944 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" 3189 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s"
2945 " based on configured SSID\n", 3190 " based on configured SSID\n",
@@ -2967,13 +3212,13 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
2967 if (time_after(jiffies, ifsta->ibss_join_req + 3212 if (time_after(jiffies, ifsta->ibss_join_req +
2968 IEEE80211_IBSS_JOIN_TIMEOUT)) { 3213 IEEE80211_IBSS_JOIN_TIMEOUT)) {
2969 if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && 3214 if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) &&
2970 local->oper_channel->flag & IEEE80211_CHAN_W_IBSS) 3215 (!(local->oper_channel->flags &
3216 IEEE80211_CHAN_NO_IBSS)))
2971 return ieee80211_sta_create_ibss(dev, ifsta); 3217 return ieee80211_sta_create_ibss(dev, ifsta);
2972 if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { 3218 if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) {
2973 printk(KERN_DEBUG "%s: IBSS not allowed on the" 3219 printk(KERN_DEBUG "%s: IBSS not allowed on"
2974 " configured channel %d (%d MHz)\n", 3220 " %d MHz\n", dev->name,
2975 dev->name, local->hw.conf.channel, 3221 local->hw.conf.channel->center_freq);
2976 local->hw.conf.freq);
2977 } 3222 }
2978 3223
2979 /* No IBSS found - decrease scan interval and continue 3224 /* No IBSS found - decrease scan interval and continue
@@ -2992,7 +3237,7 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
2992 3237
2993int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) 3238int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len)
2994{ 3239{
2995 struct ieee80211_sub_if_data *sdata; 3240 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2996 struct ieee80211_if_sta *ifsta; 3241 struct ieee80211_if_sta *ifsta;
2997 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3242 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2998 3243
@@ -3006,18 +3251,23 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len)
3006 int i; 3251 int i;
3007 3252
3008 memset(&qparam, 0, sizeof(qparam)); 3253 memset(&qparam, 0, sizeof(qparam));
3009 /* TODO: are these ok defaults for all hw_modes? */ 3254
3010 qparam.aifs = 2; 3255 qparam.aifs = 2;
3011 qparam.cw_min = 3256
3012 local->hw.conf.phymode == MODE_IEEE80211B ? 31 : 15; 3257 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
3258 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE))
3259 qparam.cw_min = 31;
3260 else
3261 qparam.cw_min = 15;
3262
3013 qparam.cw_max = 1023; 3263 qparam.cw_max = 1023;
3014 qparam.burst_time = 0; 3264 qparam.txop = 0;
3265
3015 for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++) 3266 for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++)
3016 {
3017 local->ops->conf_tx(local_to_hw(local), 3267 local->ops->conf_tx(local_to_hw(local),
3018 i + IEEE80211_TX_QUEUE_DATA0, 3268 i + IEEE80211_TX_QUEUE_DATA0,
3019 &qparam); 3269 &qparam);
3020 } 3270
3021 /* IBSS uses different parameters for Beacon sending */ 3271 /* IBSS uses different parameters for Beacon sending */
3022 qparam.cw_min++; 3272 qparam.cw_min++;
3023 qparam.cw_min *= 2; 3273 qparam.cw_min *= 2;
@@ -3026,7 +3276,6 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len)
3026 IEEE80211_TX_QUEUE_BEACON, &qparam); 3276 IEEE80211_TX_QUEUE_BEACON, &qparam);
3027 } 3277 }
3028 3278
3029 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3030 ifsta = &sdata->u.sta; 3279 ifsta = &sdata->u.sta;
3031 3280
3032 if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) 3281 if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0)
@@ -3185,7 +3434,7 @@ void ieee80211_sta_scan_work(struct work_struct *work)
3185 container_of(work, struct ieee80211_local, scan_work.work); 3434 container_of(work, struct ieee80211_local, scan_work.work);
3186 struct net_device *dev = local->scan_dev; 3435 struct net_device *dev = local->scan_dev;
3187 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 3436 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3188 struct ieee80211_hw_mode *mode; 3437 struct ieee80211_supported_band *sband;
3189 struct ieee80211_channel *chan; 3438 struct ieee80211_channel *chan;
3190 int skip; 3439 int skip;
3191 unsigned long next_delay = 0; 3440 unsigned long next_delay = 0;
@@ -3195,44 +3444,59 @@ void ieee80211_sta_scan_work(struct work_struct *work)
3195 3444
3196 switch (local->scan_state) { 3445 switch (local->scan_state) {
3197 case SCAN_SET_CHANNEL: 3446 case SCAN_SET_CHANNEL:
3198 mode = local->scan_hw_mode; 3447 /*
3199 if (local->scan_hw_mode->list.next == &local->modes_list && 3448 * Get current scan band. scan_band may be IEEE80211_NUM_BANDS
3200 local->scan_channel_idx >= mode->num_channels) { 3449 * after we successfully scanned the last channel of the last
3450 * band (and the last band is supported by the hw)
3451 */
3452 if (local->scan_band < IEEE80211_NUM_BANDS)
3453 sband = local->hw.wiphy->bands[local->scan_band];
3454 else
3455 sband = NULL;
3456
3457 /*
3458 * If we are at an unsupported band and have more bands
3459 * left to scan, advance to the next supported one.
3460 */
3461 while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) {
3462 local->scan_band++;
3463 sband = local->hw.wiphy->bands[local->scan_band];
3464 local->scan_channel_idx = 0;
3465 }
3466
3467 /* if no more bands/channels left, complete scan */
3468 if (!sband || local->scan_channel_idx >= sband->n_channels) {
3201 ieee80211_scan_completed(local_to_hw(local)); 3469 ieee80211_scan_completed(local_to_hw(local));
3202 return; 3470 return;
3203 } 3471 }
3204 skip = !(local->enabled_modes & (1 << mode->mode)); 3472 skip = 0;
3205 chan = &mode->channels[local->scan_channel_idx]; 3473 chan = &sband->channels[local->scan_channel_idx];
3206 if (!(chan->flag & IEEE80211_CHAN_W_SCAN) || 3474
3475 if (chan->flags & IEEE80211_CHAN_DISABLED ||
3207 (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && 3476 (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
3208 !(chan->flag & IEEE80211_CHAN_W_IBSS)) || 3477 chan->flags & IEEE80211_CHAN_NO_IBSS))
3209 (local->hw_modes & local->enabled_modes &
3210 (1 << MODE_IEEE80211G) && mode->mode == MODE_IEEE80211B))
3211 skip = 1; 3478 skip = 1;
3212 3479
3213 if (!skip) { 3480 if (!skip) {
3214#if 0
3215 printk(KERN_DEBUG "%s: scan channel %d (%d MHz)\n",
3216 dev->name, chan->chan, chan->freq);
3217#endif
3218
3219 local->scan_channel = chan; 3481 local->scan_channel = chan;
3220 if (ieee80211_hw_config(local)) { 3482 if (ieee80211_hw_config(local)) {
3221 printk(KERN_DEBUG "%s: failed to set channel " 3483 printk(KERN_DEBUG "%s: failed to set freq to "
3222 "%d (%d MHz) for scan\n", dev->name, 3484 "%d MHz for scan\n", dev->name,
3223 chan->chan, chan->freq); 3485 chan->center_freq);
3224 skip = 1; 3486 skip = 1;
3225 } 3487 }
3226 } 3488 }
3227 3489
3490 /* advance state machine to next channel/band */
3228 local->scan_channel_idx++; 3491 local->scan_channel_idx++;
3229 if (local->scan_channel_idx >= local->scan_hw_mode->num_channels) { 3492 if (local->scan_channel_idx >= sband->n_channels) {
3230 if (local->scan_hw_mode->list.next != &local->modes_list) { 3493 /*
3231 local->scan_hw_mode = list_entry(local->scan_hw_mode->list.next, 3494 * scan_band may end up == IEEE80211_NUM_BANDS, but
3232 struct ieee80211_hw_mode, 3495 * we'll catch that case above and complete the scan
3233 list); 3496 * if that is the case.
3234 local->scan_channel_idx = 0; 3497 */
3235 } 3498 local->scan_band++;
3499 local->scan_channel_idx = 0;
3236 } 3500 }
3237 3501
3238 if (skip) 3502 if (skip)
@@ -3243,13 +3507,14 @@ void ieee80211_sta_scan_work(struct work_struct *work)
3243 local->scan_state = SCAN_SEND_PROBE; 3507 local->scan_state = SCAN_SEND_PROBE;
3244 break; 3508 break;
3245 case SCAN_SEND_PROBE: 3509 case SCAN_SEND_PROBE:
3246 if (local->scan_channel->flag & IEEE80211_CHAN_W_ACTIVE_SCAN) { 3510 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
3247 ieee80211_send_probe_req(dev, NULL, local->scan_ssid,
3248 local->scan_ssid_len);
3249 next_delay = IEEE80211_CHANNEL_TIME;
3250 } else
3251 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
3252 local->scan_state = SCAN_SET_CHANNEL; 3511 local->scan_state = SCAN_SET_CHANNEL;
3512
3513 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN)
3514 break;
3515 ieee80211_send_probe_req(dev, NULL, local->scan_ssid,
3516 local->scan_ssid_len);
3517 next_delay = IEEE80211_CHANNEL_TIME;
3253 break; 3518 break;
3254 } 3519 }
3255 3520
@@ -3324,10 +3589,8 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
3324 } else 3589 } else
3325 local->scan_ssid_len = 0; 3590 local->scan_ssid_len = 0;
3326 local->scan_state = SCAN_SET_CHANNEL; 3591 local->scan_state = SCAN_SET_CHANNEL;
3327 local->scan_hw_mode = list_entry(local->modes_list.next,
3328 struct ieee80211_hw_mode,
3329 list);
3330 local->scan_channel_idx = 0; 3592 local->scan_channel_idx = 0;
3593 local->scan_band = IEEE80211_BAND_2GHZ;
3331 local->scan_dev = dev; 3594 local->scan_dev = dev;
3332 3595
3333 netif_tx_lock_bh(local->mdev); 3596 netif_tx_lock_bh(local->mdev);
@@ -3382,9 +3645,6 @@ ieee80211_sta_scan_result(struct net_device *dev,
3382 bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE)) 3645 bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE))
3383 return current_ev; 3646 return current_ev;
3384 3647
3385 if (!(local->enabled_modes & (1 << bss->hw_mode)))
3386 return current_ev;
3387
3388 memset(&iwe, 0, sizeof(iwe)); 3648 memset(&iwe, 0, sizeof(iwe));
3389 iwe.cmd = SIOCGIWAP; 3649 iwe.cmd = SIOCGIWAP;
3390 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 3650 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
@@ -3412,12 +3672,15 @@ ieee80211_sta_scan_result(struct net_device *dev,
3412 3672
3413 memset(&iwe, 0, sizeof(iwe)); 3673 memset(&iwe, 0, sizeof(iwe));
3414 iwe.cmd = SIOCGIWFREQ; 3674 iwe.cmd = SIOCGIWFREQ;
3415 iwe.u.freq.m = bss->channel; 3675 iwe.u.freq.m = bss->freq;
3416 iwe.u.freq.e = 0; 3676 iwe.u.freq.e = 6;
3417 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 3677 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
3418 IW_EV_FREQ_LEN); 3678 IW_EV_FREQ_LEN);
3419 iwe.u.freq.m = bss->freq * 100000; 3679
3420 iwe.u.freq.e = 1; 3680 memset(&iwe, 0, sizeof(iwe));
3681 iwe.cmd = SIOCGIWFREQ;
3682 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
3683 iwe.u.freq.e = 0;
3421 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 3684 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
3422 IW_EV_FREQ_LEN); 3685 IW_EV_FREQ_LEN);
3423 3686
@@ -3557,10 +3820,13 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev,
3557 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); 3820 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name);
3558 3821
3559 sta = sta_info_add(local, dev, addr, GFP_ATOMIC); 3822 sta = sta_info_add(local, dev, addr, GFP_ATOMIC);
3560 if (!sta) 3823 if (IS_ERR(sta))
3561 return NULL; 3824 return NULL;
3562 3825
3563 sta->supp_rates = sdata->u.sta.supp_rates_bits; 3826 sta->flags |= WLAN_STA_AUTHORIZED;
3827
3828 sta->supp_rates[local->hw.conf.channel->band] =
3829 sdata->u.sta.supp_rates_bits[local->hw.conf.channel->band];
3564 3830
3565 rate_control_rate_init(sta, local); 3831 rate_control_rate_init(sta, local);
3566 3832
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ed57fb8e82fc..eac9c59dbc4d 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -13,6 +13,7 @@
13#include <linux/etherdevice.h> 13#include <linux/etherdevice.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16#include <linux/rtnetlink.h>
16#include <net/mac80211.h> 17#include <net/mac80211.h>
17#include "ieee80211_i.h" 18#include "ieee80211_i.h"
18#include "debugfs_key.h" 19#include "debugfs_key.h"
@@ -34,6 +35,10 @@
34 * 35 *
35 * All operations here are called under RTNL so no extra locking is 36 * All operations here are called under RTNL so no extra locking is
36 * required. 37 * required.
38 *
39 * NOTE: This code requires that sta info *destruction* is done under
40 * RTNL, otherwise it can try to access already freed STA structs
41 * when a STA key is being freed.
37 */ 42 */
38 43
39static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 44static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -84,16 +89,25 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
84 key->conf.keyidx, print_mac(mac, addr), ret); 89 key->conf.keyidx, print_mac(mac, addr), ret);
85} 90}
86 91
92static void ieee80211_key_mark_hw_accel_off(struct ieee80211_key *key)
93{
94 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
95 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
96 key->flags |= KEY_FLAG_REMOVE_FROM_HARDWARE;
97 }
98}
99
87static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) 100static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
88{ 101{
89 const u8 *addr; 102 const u8 *addr;
90 int ret; 103 int ret;
91 DECLARE_MAC_BUF(mac); 104 DECLARE_MAC_BUF(mac);
92 105
93 if (!key->local->ops->set_key) 106 if (!key || !key->local->ops->set_key)
94 return; 107 return;
95 108
96 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 109 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
110 !(key->flags & KEY_FLAG_REMOVE_FROM_HARDWARE))
97 return; 111 return;
98 112
99 addr = get_mac_for_key(key); 113 addr = get_mac_for_key(key);
@@ -108,12 +122,11 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
108 wiphy_name(key->local->hw.wiphy), 122 wiphy_name(key->local->hw.wiphy),
109 key->conf.keyidx, print_mac(mac, addr), ret); 123 key->conf.keyidx, print_mac(mac, addr), ret);
110 124
111 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 125 key->flags &= ~(KEY_FLAG_UPLOADED_TO_HARDWARE |
126 KEY_FLAG_REMOVE_FROM_HARDWARE);
112} 127}
113 128
114struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, 129struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
115 struct sta_info *sta,
116 enum ieee80211_key_alg alg,
117 int idx, 130 int idx,
118 size_t key_len, 131 size_t key_len,
119 const u8 *key_data) 132 const u8 *key_data)
@@ -137,10 +150,7 @@ struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata,
137 key->conf.keyidx = idx; 150 key->conf.keyidx = idx;
138 key->conf.keylen = key_len; 151 key->conf.keylen = key_len;
139 memcpy(key->conf.key, key_data, key_len); 152 memcpy(key->conf.key, key_data, key_len);
140 153 INIT_LIST_HEAD(&key->list);
141 key->local = sdata->local;
142 key->sdata = sdata;
143 key->sta = sta;
144 154
145 if (alg == ALG_CCMP) { 155 if (alg == ALG_CCMP) {
146 /* 156 /*
@@ -154,13 +164,68 @@ struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata,
154 } 164 }
155 } 165 }
156 166
157 ieee80211_debugfs_key_add(key->local, key); 167 return key;
168}
158 169
159 /* remove key first */ 170static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
160 if (sta) 171 struct sta_info *sta,
161 ieee80211_key_free(sta->key); 172 struct ieee80211_key *key,
162 else 173 struct ieee80211_key *new)
163 ieee80211_key_free(sdata->keys[idx]); 174{
175 int idx, defkey;
176
177 if (sta) {
178 rcu_assign_pointer(sta->key, new);
179 } else {
180 WARN_ON(new && key && new->conf.keyidx != key->conf.keyidx);
181
182 if (key)
183 idx = key->conf.keyidx;
184 else
185 idx = new->conf.keyidx;
186
187 defkey = key && sdata->default_key == key;
188
189 if (defkey && !new)
190 ieee80211_set_default_key(sdata, -1);
191
192 rcu_assign_pointer(sdata->keys[idx], new);
193 if (new)
194 list_add(&new->list, &sdata->key_list);
195
196 if (defkey && new)
197 ieee80211_set_default_key(sdata, new->conf.keyidx);
198 }
199
200 if (key) {
201 ieee80211_key_mark_hw_accel_off(key);
202 /*
203 * We'll use an empty list to indicate that the key
204 * has already been removed.
205 */
206 list_del_init(&key->list);
207 }
208}
209
210void ieee80211_key_link(struct ieee80211_key *key,
211 struct ieee80211_sub_if_data *sdata,
212 struct sta_info *sta)
213{
214 struct ieee80211_key *old_key;
215 int idx;
216
217 ASSERT_RTNL();
218 might_sleep();
219
220 BUG_ON(!sdata);
221 BUG_ON(!key);
222
223 idx = key->conf.keyidx;
224 key->local = sdata->local;
225 key->sdata = sdata;
226 key->sta = sta;
227
228 ieee80211_debugfs_key_add(key->local, key);
164 229
165 if (sta) { 230 if (sta) {
166 ieee80211_debugfs_key_sta_link(key, sta); 231 ieee80211_debugfs_key_sta_link(key, sta);
@@ -186,50 +251,59 @@ struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata,
186 } 251 }
187 } 252 }
188 253
189 /* enable hwaccel if appropriate */
190 if (netif_running(key->sdata->dev))
191 ieee80211_key_enable_hw_accel(key);
192
193 if (sta) 254 if (sta)
194 rcu_assign_pointer(sta->key, key); 255 old_key = sta->key;
195 else 256 else
196 rcu_assign_pointer(sdata->keys[idx], key); 257 old_key = sdata->keys[idx];
197 258
198 list_add(&key->list, &sdata->key_list); 259 __ieee80211_key_replace(sdata, sta, old_key, key);
199 260
200 return key; 261 if (old_key) {
262 synchronize_rcu();
263 ieee80211_key_free(old_key);
264 }
265
266 if (netif_running(sdata->dev))
267 ieee80211_key_enable_hw_accel(key);
201} 268}
202 269
203void ieee80211_key_free(struct ieee80211_key *key) 270void ieee80211_key_free(struct ieee80211_key *key)
204{ 271{
272 ASSERT_RTNL();
273 might_sleep();
274
205 if (!key) 275 if (!key)
206 return; 276 return;
207 277
208 if (key->sta) { 278 if (key->sdata) {
209 rcu_assign_pointer(key->sta->key, NULL); 279 /*
210 } else { 280 * Replace key with nothingness.
211 if (key->sdata->default_key == key) 281 *
212 ieee80211_set_default_key(key->sdata, -1); 282 * Because other code may have key reference (RCU protected)
213 if (key->conf.keyidx >= 0 && 283 * right now, we then wait for a grace period before freeing
214 key->conf.keyidx < NUM_DEFAULT_KEYS) 284 * it.
215 rcu_assign_pointer(key->sdata->keys[key->conf.keyidx], 285 * An empty list indicates it was never added to the key list
216 NULL); 286 * or has been removed already. It may, however, still be in
217 else 287 * hardware for acceleration.
218 WARN_ON(1); 288 */
219 } 289 if (!list_empty(&key->list))
290 __ieee80211_key_replace(key->sdata, key->sta,
291 key, NULL);
220 292
221 /* wait for all key users to complete */ 293 synchronize_rcu();
222 synchronize_rcu();
223 294
224 /* remove from hwaccel if appropriate */ 295 /*
225 ieee80211_key_disable_hw_accel(key); 296 * Remove from hwaccel if appropriate, this will
297 * only happen when the key is actually unlinked,
298 * it will already be done when the key was replaced.
299 */
300 ieee80211_key_disable_hw_accel(key);
301 }
226 302
227 if (key->conf.alg == ALG_CCMP) 303 if (key->conf.alg == ALG_CCMP)
228 ieee80211_aes_key_free(key->u.ccmp.tfm); 304 ieee80211_aes_key_free(key->u.ccmp.tfm);
229 ieee80211_debugfs_key_remove(key); 305 ieee80211_debugfs_key_remove(key);
230 306
231 list_del(&key->list);
232
233 kfree(key); 307 kfree(key);
234} 308}
235 309
@@ -253,6 +327,10 @@ void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx)
253void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) 327void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
254{ 328{
255 struct ieee80211_key *key, *tmp; 329 struct ieee80211_key *key, *tmp;
330 LIST_HEAD(tmp_list);
331
332 ASSERT_RTNL();
333 might_sleep();
256 334
257 list_for_each_entry_safe(key, tmp, &sdata->key_list, list) 335 list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
258 ieee80211_key_free(key); 336 ieee80211_key_free(key);
@@ -262,8 +340,10 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
262{ 340{
263 struct ieee80211_key *key; 341 struct ieee80211_key *key;
264 342
265 WARN_ON(!netif_running(sdata->dev)); 343 ASSERT_RTNL();
266 if (!netif_running(sdata->dev)) 344 might_sleep();
345
346 if (WARN_ON(!netif_running(sdata->dev)))
267 return; 347 return;
268 348
269 list_for_each_entry(key, &sdata->key_list, list) 349 list_for_each_entry(key, &sdata->key_list, list)
@@ -274,6 +354,9 @@ void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata)
274{ 354{
275 struct ieee80211_key *key; 355 struct ieee80211_key *key;
276 356
357 ASSERT_RTNL();
358 might_sleep();
359
277 list_for_each_entry(key, &sdata->key_list, list) 360 list_for_each_entry(key, &sdata->key_list, list)
278 ieee80211_key_disable_hw_accel(key); 361 ieee80211_key_disable_hw_accel(key);
279} 362}
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 3b77410588e7..9762803e4876 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -63,6 +63,7 @@
63 * RC_PID_ARITH_SHIFT. 63 * RC_PID_ARITH_SHIFT.
64 */ 64 */
65 65
66
66/* Adjust the rate while ensuring that we won't switch to a lower rate if it 67/* Adjust the rate while ensuring that we won't switch to a lower rate if it
67 * exhibited a worse failed frames behaviour and we'll choose the highest rate 68 * exhibited a worse failed frames behaviour and we'll choose the highest rate
68 * whose failed frames behaviour is not worse than the one of the original rate 69 * whose failed frames behaviour is not worse than the one of the original rate
@@ -72,14 +73,14 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
72 struct rc_pid_rateinfo *rinfo) 73 struct rc_pid_rateinfo *rinfo)
73{ 74{
74 struct ieee80211_sub_if_data *sdata; 75 struct ieee80211_sub_if_data *sdata;
75 struct ieee80211_hw_mode *mode; 76 struct ieee80211_supported_band *sband;
76 int cur_sorted, new_sorted, probe, tmp, n_bitrates; 77 int cur_sorted, new_sorted, probe, tmp, n_bitrates, band;
77 int cur = sta->txrate; 78 int cur = sta->txrate_idx;
78 79
79 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 80 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
80 81 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
81 mode = local->oper_hw_mode; 82 band = sband->band;
82 n_bitrates = mode->num_rates; 83 n_bitrates = sband->n_bitrates;
83 84
84 /* Map passed arguments to sorted values. */ 85 /* Map passed arguments to sorted values. */
85 cur_sorted = rinfo[cur].rev_index; 86 cur_sorted = rinfo[cur].rev_index;
@@ -97,20 +98,20 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
97 /* Ensure that the rate decrease isn't disadvantageous. */ 98 /* Ensure that the rate decrease isn't disadvantageous. */
98 for (probe = cur_sorted; probe >= new_sorted; probe--) 99 for (probe = cur_sorted; probe >= new_sorted; probe--)
99 if (rinfo[probe].diff <= rinfo[cur_sorted].diff && 100 if (rinfo[probe].diff <= rinfo[cur_sorted].diff &&
100 rate_supported(sta, mode, rinfo[probe].index)) 101 rate_supported(sta, band, rinfo[probe].index))
101 tmp = probe; 102 tmp = probe;
102 } else { 103 } else {
103 /* Look for rate increase with zero (or below) cost. */ 104 /* Look for rate increase with zero (or below) cost. */
104 for (probe = new_sorted + 1; probe < n_bitrates; probe++) 105 for (probe = new_sorted + 1; probe < n_bitrates; probe++)
105 if (rinfo[probe].diff <= rinfo[new_sorted].diff && 106 if (rinfo[probe].diff <= rinfo[new_sorted].diff &&
106 rate_supported(sta, mode, rinfo[probe].index)) 107 rate_supported(sta, band, rinfo[probe].index))
107 tmp = probe; 108 tmp = probe;
108 } 109 }
109 110
110 /* Fit the rate found to the nearest supported rate. */ 111 /* Fit the rate found to the nearest supported rate. */
111 do { 112 do {
112 if (rate_supported(sta, mode, rinfo[tmp].index)) { 113 if (rate_supported(sta, band, rinfo[tmp].index)) {
113 sta->txrate = rinfo[tmp].index; 114 sta->txrate_idx = rinfo[tmp].index;
114 break; 115 break;
115 } 116 }
116 if (adj < 0) 117 if (adj < 0)
@@ -122,7 +123,7 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
122#ifdef CONFIG_MAC80211_DEBUGFS 123#ifdef CONFIG_MAC80211_DEBUGFS
123 rate_control_pid_event_rate_change( 124 rate_control_pid_event_rate_change(
124 &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events, 125 &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events,
125 cur, mode->rates[cur].rate); 126 sta->txrate_idx, sband->bitrates[sta->txrate_idx].bitrate);
126#endif 127#endif
127} 128}
128 129
@@ -149,7 +150,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
149{ 150{
150 struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv; 151 struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv;
151 struct rc_pid_rateinfo *rinfo = pinfo->rinfo; 152 struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
152 struct ieee80211_hw_mode *mode; 153 struct ieee80211_supported_band *sband;
153 u32 pf; 154 u32 pf;
154 s32 err_avg; 155 s32 err_avg;
155 u32 err_prop; 156 u32 err_prop;
@@ -158,7 +159,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
158 int adj, i, j, tmp; 159 int adj, i, j, tmp;
159 unsigned long period; 160 unsigned long period;
160 161
161 mode = local->oper_hw_mode; 162 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
162 spinfo = sta->rate_ctrl_priv; 163 spinfo = sta->rate_ctrl_priv;
163 164
164 /* In case nothing happened during the previous control interval, turn 165 /* In case nothing happened during the previous control interval, turn
@@ -184,18 +185,18 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
184 spinfo->tx_num_failed = 0; 185 spinfo->tx_num_failed = 0;
185 186
186 /* If we just switched rate, update the rate behaviour info. */ 187 /* If we just switched rate, update the rate behaviour info. */
187 if (pinfo->oldrate != sta->txrate) { 188 if (pinfo->oldrate != sta->txrate_idx) {
188 189
189 i = rinfo[pinfo->oldrate].rev_index; 190 i = rinfo[pinfo->oldrate].rev_index;
190 j = rinfo[sta->txrate].rev_index; 191 j = rinfo[sta->txrate_idx].rev_index;
191 192
192 tmp = (pf - spinfo->last_pf); 193 tmp = (pf - spinfo->last_pf);
193 tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT); 194 tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT);
194 195
195 rinfo[j].diff = rinfo[i].diff + tmp; 196 rinfo[j].diff = rinfo[i].diff + tmp;
196 pinfo->oldrate = sta->txrate; 197 pinfo->oldrate = sta->txrate_idx;
197 } 198 }
198 rate_control_pid_normalize(pinfo, mode->num_rates); 199 rate_control_pid_normalize(pinfo, sband->n_bitrates);
199 200
200 /* Compute the proportional, integral and derivative errors. */ 201 /* Compute the proportional, integral and derivative errors. */
201 err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf; 202 err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf;
@@ -236,8 +237,10 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
236 struct sta_info *sta; 237 struct sta_info *sta;
237 struct rc_pid_sta_info *spinfo; 238 struct rc_pid_sta_info *spinfo;
238 unsigned long period; 239 unsigned long period;
240 struct ieee80211_supported_band *sband;
239 241
240 sta = sta_info_get(local, hdr->addr1); 242 sta = sta_info_get(local, hdr->addr1);
243 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
241 244
242 if (!sta) 245 if (!sta)
243 return; 246 return;
@@ -245,13 +248,13 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
245 /* Don't update the state if we're not controlling the rate. */ 248 /* Don't update the state if we're not controlling the rate. */
246 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 249 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
247 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) { 250 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) {
248 sta->txrate = sdata->bss->max_ratectrl_rateidx; 251 sta->txrate_idx = sdata->bss->max_ratectrl_rateidx;
249 return; 252 return;
250 } 253 }
251 254
252 /* Ignore all frames that were sent with a different rate than the rate 255 /* Ignore all frames that were sent with a different rate than the rate
253 * we currently advise mac80211 to use. */ 256 * we currently advise mac80211 to use. */
254 if (status->control.rate != &local->oper_hw_mode->rates[sta->txrate]) 257 if (status->control.tx_rate != &sband->bitrates[sta->txrate_idx])
255 goto ignore; 258 goto ignore;
256 259
257 spinfo = sta->rate_ctrl_priv; 260 spinfo = sta->rate_ctrl_priv;
@@ -277,9 +280,6 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
277 sta->tx_num_consecutive_failures++; 280 sta->tx_num_consecutive_failures++;
278 sta->tx_num_mpdu_fail++; 281 sta->tx_num_mpdu_fail++;
279 } else { 282 } else {
280 sta->last_ack_rssi[0] = sta->last_ack_rssi[1];
281 sta->last_ack_rssi[1] = sta->last_ack_rssi[2];
282 sta->last_ack_rssi[2] = status->ack_signal;
283 sta->tx_num_consecutive_failures = 0; 283 sta->tx_num_consecutive_failures = 0;
284 sta->tx_num_mpdu_ok++; 284 sta->tx_num_mpdu_ok++;
285 } 285 }
@@ -298,7 +298,7 @@ ignore:
298} 298}
299 299
300static void rate_control_pid_get_rate(void *priv, struct net_device *dev, 300static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
301 struct ieee80211_hw_mode *mode, 301 struct ieee80211_supported_band *sband,
302 struct sk_buff *skb, 302 struct sk_buff *skb,
303 struct rate_selection *sel) 303 struct rate_selection *sel)
304{ 304{
@@ -316,7 +316,7 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
316 fc = le16_to_cpu(hdr->frame_control); 316 fc = le16_to_cpu(hdr->frame_control);
317 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 317 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
318 is_multicast_ether_addr(hdr->addr1) || !sta) { 318 is_multicast_ether_addr(hdr->addr1) || !sta) {
319 sel->rate = rate_lowest(local, mode, sta); 319 sel->rate = rate_lowest(local, sband, sta);
320 if (sta) 320 if (sta)
321 sta_info_put(sta); 321 sta_info_put(sta);
322 return; 322 return;
@@ -325,23 +325,23 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
325 /* If a forced rate is in effect, select it. */ 325 /* If a forced rate is in effect, select it. */
326 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 326 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
327 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) 327 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1)
328 sta->txrate = sdata->bss->force_unicast_rateidx; 328 sta->txrate_idx = sdata->bss->force_unicast_rateidx;
329 329
330 rateidx = sta->txrate; 330 rateidx = sta->txrate_idx;
331 331
332 if (rateidx >= mode->num_rates) 332 if (rateidx >= sband->n_bitrates)
333 rateidx = mode->num_rates - 1; 333 rateidx = sband->n_bitrates - 1;
334 334
335 sta->last_txrate = rateidx; 335 sta->last_txrate_idx = rateidx;
336 336
337 sta_info_put(sta); 337 sta_info_put(sta);
338 338
339 sel->rate = &mode->rates[rateidx]; 339 sel->rate = &sband->bitrates[rateidx];
340 340
341#ifdef CONFIG_MAC80211_DEBUGFS 341#ifdef CONFIG_MAC80211_DEBUGFS
342 rate_control_pid_event_tx_rate( 342 rate_control_pid_event_tx_rate(
343 &((struct rc_pid_sta_info *) sta->rate_ctrl_priv)->events, 343 &((struct rc_pid_sta_info *) sta->rate_ctrl_priv)->events,
344 rateidx, mode->rates[rateidx].rate); 344 rateidx, sband->bitrates[rateidx].bitrate);
345#endif 345#endif
346} 346}
347 347
@@ -353,28 +353,32 @@ static void rate_control_pid_rate_init(void *priv, void *priv_sta,
353 * as we need to have IEEE 802.1X auth succeed immediately after assoc.. 353 * as we need to have IEEE 802.1X auth succeed immediately after assoc..
354 * Until that method is implemented, we will use the lowest supported 354 * Until that method is implemented, we will use the lowest supported
355 * rate as a workaround. */ 355 * rate as a workaround. */
356 sta->txrate = rate_lowest_index(local, local->oper_hw_mode, sta); 356 struct ieee80211_supported_band *sband;
357
358 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
359 sta->txrate_idx = rate_lowest_index(local, sband, sta);
357} 360}
358 361
359static void *rate_control_pid_alloc(struct ieee80211_local *local) 362static void *rate_control_pid_alloc(struct ieee80211_local *local)
360{ 363{
361 struct rc_pid_info *pinfo; 364 struct rc_pid_info *pinfo;
362 struct rc_pid_rateinfo *rinfo; 365 struct rc_pid_rateinfo *rinfo;
363 struct ieee80211_hw_mode *mode; 366 struct ieee80211_supported_band *sband;
364 int i, j, tmp; 367 int i, j, tmp;
365 bool s; 368 bool s;
366#ifdef CONFIG_MAC80211_DEBUGFS 369#ifdef CONFIG_MAC80211_DEBUGFS
367 struct rc_pid_debugfs_entries *de; 370 struct rc_pid_debugfs_entries *de;
368#endif 371#endif
369 372
373 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
374
370 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); 375 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
371 if (!pinfo) 376 if (!pinfo)
372 return NULL; 377 return NULL;
373 378
374 /* We can safely assume that oper_hw_mode won't change unless we get 379 /* We can safely assume that sband won't change unless we get
375 * reinitialized. */ 380 * reinitialized. */
376 mode = local->oper_hw_mode; 381 rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC);
377 rinfo = kmalloc(sizeof(*rinfo) * mode->num_rates, GFP_ATOMIC);
378 if (!rinfo) { 382 if (!rinfo) {
379 kfree(pinfo); 383 kfree(pinfo);
380 return NULL; 384 return NULL;
@@ -383,7 +387,7 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local)
383 /* Sort the rates. This is optimized for the most common case (i.e. 387 /* Sort the rates. This is optimized for the most common case (i.e.
384 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed 388 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
385 * mapping too. */ 389 * mapping too. */
386 for (i = 0; i < mode->num_rates; i++) { 390 for (i = 0; i < sband->n_bitrates; i++) {
387 rinfo[i].index = i; 391 rinfo[i].index = i;
388 rinfo[i].rev_index = i; 392 rinfo[i].rev_index = i;
389 if (pinfo->fast_start) 393 if (pinfo->fast_start)
@@ -391,11 +395,11 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local)
391 else 395 else
392 rinfo[i].diff = i * pinfo->norm_offset; 396 rinfo[i].diff = i * pinfo->norm_offset;
393 } 397 }
394 for (i = 1; i < mode->num_rates; i++) { 398 for (i = 1; i < sband->n_bitrates; i++) {
395 s = 0; 399 s = 0;
396 for (j = 0; j < mode->num_rates - i; j++) 400 for (j = 0; j < sband->n_bitrates - i; j++)
397 if (unlikely(mode->rates[rinfo[j].index].rate > 401 if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
398 mode->rates[rinfo[j + 1].index].rate)) { 402 sband->bitrates[rinfo[j + 1].index].bitrate)) {
399 tmp = rinfo[j].index; 403 tmp = rinfo[j].index;
400 rinfo[j].index = rinfo[j + 1].index; 404 rinfo[j].index = rinfo[j + 1].index;
401 rinfo[j + 1].index = tmp; 405 rinfo[j + 1].index = tmp;
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
index 9a78b116acff..bcc541d4b95c 100644
--- a/net/mac80211/rc80211_simple.c
+++ b/net/mac80211/rc80211_simple.c
@@ -7,6 +7,7 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/jiffies.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/netdevice.h> 12#include <linux/netdevice.h>
12#include <linux/types.h> 13#include <linux/types.h>
@@ -35,8 +36,8 @@ static void rate_control_rate_inc(struct ieee80211_local *local,
35 struct sta_info *sta) 36 struct sta_info *sta)
36{ 37{
37 struct ieee80211_sub_if_data *sdata; 38 struct ieee80211_sub_if_data *sdata;
38 struct ieee80211_hw_mode *mode; 39 struct ieee80211_supported_band *sband;
39 int i = sta->txrate; 40 int i = sta->txrate_idx;
40 int maxrate; 41 int maxrate;
41 42
42 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 43 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
@@ -45,18 +46,17 @@ static void rate_control_rate_inc(struct ieee80211_local *local,
45 return; 46 return;
46 } 47 }
47 48
48 mode = local->oper_hw_mode; 49 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
49 maxrate = sdata->bss ? sdata->bss->max_ratectrl_rateidx : -1; 50 maxrate = sdata->bss ? sdata->bss->max_ratectrl_rateidx : -1;
50 51
51 if (i > mode->num_rates) 52 if (i > sband->n_bitrates)
52 i = mode->num_rates - 2; 53 i = sband->n_bitrates - 2;
53 54
54 while (i + 1 < mode->num_rates) { 55 while (i + 1 < sband->n_bitrates) {
55 i++; 56 i++;
56 if (sta->supp_rates & BIT(i) && 57 if (rate_supported(sta, sband->band, i) &&
57 mode->rates[i].flags & IEEE80211_RATE_SUPPORTED &&
58 (maxrate < 0 || i <= maxrate)) { 58 (maxrate < 0 || i <= maxrate)) {
59 sta->txrate = i; 59 sta->txrate_idx = i;
60 break; 60 break;
61 } 61 }
62 } 62 }
@@ -67,8 +67,8 @@ static void rate_control_rate_dec(struct ieee80211_local *local,
67 struct sta_info *sta) 67 struct sta_info *sta)
68{ 68{
69 struct ieee80211_sub_if_data *sdata; 69 struct ieee80211_sub_if_data *sdata;
70 struct ieee80211_hw_mode *mode; 70 struct ieee80211_supported_band *sband;
71 int i = sta->txrate; 71 int i = sta->txrate_idx;
72 72
73 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 73 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
74 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) { 74 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) {
@@ -76,15 +76,14 @@ static void rate_control_rate_dec(struct ieee80211_local *local,
76 return; 76 return;
77 } 77 }
78 78
79 mode = local->oper_hw_mode; 79 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
80 if (i > mode->num_rates) 80 if (i > sband->n_bitrates)
81 i = mode->num_rates; 81 i = sband->n_bitrates;
82 82
83 while (i > 0) { 83 while (i > 0) {
84 i--; 84 i--;
85 if (sta->supp_rates & BIT(i) && 85 if (rate_supported(sta, sband->band, i)) {
86 mode->rates[i].flags & IEEE80211_RATE_SUPPORTED) { 86 sta->txrate_idx = i;
87 sta->txrate = i;
88 break; 87 break;
89 } 88 }
90 } 89 }
@@ -132,9 +131,6 @@ static void rate_control_simple_tx_status(void *priv, struct net_device *dev,
132 sta->tx_num_consecutive_failures++; 131 sta->tx_num_consecutive_failures++;
133 sta->tx_num_mpdu_fail++; 132 sta->tx_num_mpdu_fail++;
134 } else { 133 } else {
135 sta->last_ack_rssi[0] = sta->last_ack_rssi[1];
136 sta->last_ack_rssi[1] = sta->last_ack_rssi[2];
137 sta->last_ack_rssi[2] = status->ack_signal;
138 sta->tx_num_consecutive_failures = 0; 134 sta->tx_num_consecutive_failures = 0;
139 sta->tx_num_mpdu_ok++; 135 sta->tx_num_mpdu_ok++;
140 } 136 }
@@ -168,7 +164,7 @@ static void rate_control_simple_tx_status(void *priv, struct net_device *dev,
168 } else if (per_failed < RATE_CONTROL_NUM_UP) { 164 } else if (per_failed < RATE_CONTROL_NUM_UP) {
169 rate_control_rate_inc(local, sta); 165 rate_control_rate_inc(local, sta);
170 } 166 }
171 srctrl->tx_avg_rate_sum += status->control.rate->rate; 167 srctrl->tx_avg_rate_sum += status->control.tx_rate->bitrate;
172 srctrl->tx_avg_rate_num++; 168 srctrl->tx_avg_rate_num++;
173 srctrl->tx_num_failures = 0; 169 srctrl->tx_num_failures = 0;
174 srctrl->tx_num_xmit = 0; 170 srctrl->tx_num_xmit = 0;
@@ -177,7 +173,7 @@ static void rate_control_simple_tx_status(void *priv, struct net_device *dev,
177 rate_control_rate_dec(local, sta); 173 rate_control_rate_dec(local, sta);
178 } 174 }
179 175
180 if (srctrl->avg_rate_update + 60 * HZ < jiffies) { 176 if (time_after(jiffies, srctrl->avg_rate_update + 60 * HZ)) {
181 srctrl->avg_rate_update = jiffies; 177 srctrl->avg_rate_update = jiffies;
182 if (srctrl->tx_avg_rate_num > 0) { 178 if (srctrl->tx_avg_rate_num > 0) {
183#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 179#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -201,7 +197,7 @@ static void rate_control_simple_tx_status(void *priv, struct net_device *dev,
201 197
202static void 198static void
203rate_control_simple_get_rate(void *priv, struct net_device *dev, 199rate_control_simple_get_rate(void *priv, struct net_device *dev,
204 struct ieee80211_hw_mode *mode, 200 struct ieee80211_supported_band *sband,
205 struct sk_buff *skb, 201 struct sk_buff *skb,
206 struct rate_selection *sel) 202 struct rate_selection *sel)
207{ 203{
@@ -219,7 +215,7 @@ rate_control_simple_get_rate(void *priv, struct net_device *dev,
219 fc = le16_to_cpu(hdr->frame_control); 215 fc = le16_to_cpu(hdr->frame_control);
220 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 216 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
221 is_multicast_ether_addr(hdr->addr1) || !sta) { 217 is_multicast_ether_addr(hdr->addr1) || !sta) {
222 sel->rate = rate_lowest(local, mode, sta); 218 sel->rate = rate_lowest(local, sband, sta);
223 if (sta) 219 if (sta)
224 sta_info_put(sta); 220 sta_info_put(sta);
225 return; 221 return;
@@ -228,18 +224,18 @@ rate_control_simple_get_rate(void *priv, struct net_device *dev,
228 /* If a forced rate is in effect, select it. */ 224 /* If a forced rate is in effect, select it. */
229 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 225 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
230 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1) 226 if (sdata->bss && sdata->bss->force_unicast_rateidx > -1)
231 sta->txrate = sdata->bss->force_unicast_rateidx; 227 sta->txrate_idx = sdata->bss->force_unicast_rateidx;
232 228
233 rateidx = sta->txrate; 229 rateidx = sta->txrate_idx;
234 230
235 if (rateidx >= mode->num_rates) 231 if (rateidx >= sband->n_bitrates)
236 rateidx = mode->num_rates - 1; 232 rateidx = sband->n_bitrates - 1;
237 233
238 sta->last_txrate = rateidx; 234 sta->last_txrate_idx = rateidx;
239 235
240 sta_info_put(sta); 236 sta_info_put(sta);
241 237
242 sel->rate = &mode->rates[rateidx]; 238 sel->rate = &sband->bitrates[rateidx];
243} 239}
244 240
245 241
@@ -247,21 +243,15 @@ static void rate_control_simple_rate_init(void *priv, void *priv_sta,
247 struct ieee80211_local *local, 243 struct ieee80211_local *local,
248 struct sta_info *sta) 244 struct sta_info *sta)
249{ 245{
250 struct ieee80211_hw_mode *mode; 246 struct ieee80211_supported_band *sband;
251 int i; 247
252 sta->txrate = 0; 248 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
253 mode = local->oper_hw_mode; 249
254 /* TODO: This routine should consider using RSSI from previous packets 250 /* TODO: This routine should consider using RSSI from previous packets
255 * as we need to have IEEE 802.1X auth succeed immediately after assoc.. 251 * as we need to have IEEE 802.1X auth succeed immediately after assoc..
256 * Until that method is implemented, we will use the lowest supported rate 252 * Until that method is implemented, we will use the lowest supported rate
257 * as a workaround, */ 253 * as a workaround, */
258 for (i = 0; i < mode->num_rates; i++) { 254 sta->txrate_idx = rate_lowest_index(local, sband, sta);
259 if ((sta->supp_rates & BIT(i)) &&
260 (mode->rates[i].flags & IEEE80211_RATE_SUPPORTED)) {
261 sta->txrate = i;
262 break;
263 }
264 }
265} 255}
266 256
267 257
diff --git a/net/mac80211/regdomain.c b/net/mac80211/regdomain.c
deleted file mode 100644
index f42678fa62d1..000000000000
--- a/net/mac80211/regdomain.c
+++ /dev/null
@@ -1,152 +0,0 @@
1/*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10/*
11 * This regulatory domain control implementation is known to be incomplete
12 * and confusing. mac80211 regulatory domain control will be significantly
13 * reworked in the not-too-distant future.
14 *
15 * For now, drivers wishing to control which channels are and aren't available
16 * are advised as follows:
17 * - set the IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED flag
18 * - continue to include *ALL* possible channels in the modes registered
19 * through ieee80211_register_hwmode()
20 * - for each allowable ieee80211_channel structure registered in the above
21 * call, set the flag member to some meaningful value such as
22 * IEEE80211_CHAN_W_SCAN | IEEE80211_CHAN_W_ACTIVE_SCAN |
23 * IEEE80211_CHAN_W_IBSS.
24 * - leave flag as 0 for non-allowable channels
25 *
26 * The usual implementation is for a driver to read a device EEPROM to
27 * determine which regulatory domain it should be operating under, then
28 * looking up the allowable channels in a driver-local table, then performing
29 * the above.
30 */
31
32#include <linux/module.h>
33#include <linux/netdevice.h>
34#include <net/mac80211.h>
35#include "ieee80211_i.h"
36
37static int ieee80211_regdom = 0x10; /* FCC */
38module_param(ieee80211_regdom, int, 0444);
39MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain; 64=MKK");
40
41/*
42 * If firmware is upgraded by the vendor, additional channels can be used based
43 * on the new Japanese regulatory rules. This is indicated by setting
44 * ieee80211_japan_5ghz module parameter to one when loading the 80211 kernel
45 * module.
46 */
47static int ieee80211_japan_5ghz /* = 0 */;
48module_param(ieee80211_japan_5ghz, int, 0444);
49MODULE_PARM_DESC(ieee80211_japan_5ghz, "Vendor-updated firmware for 5 GHz");
50
51
52struct ieee80211_channel_range {
53 short start_freq;
54 short end_freq;
55 unsigned char power_level;
56 unsigned char antenna_max;
57};
58
59static const struct ieee80211_channel_range ieee80211_fcc_channels[] = {
60 { 2412, 2462, 27, 6 } /* IEEE 802.11b/g, channels 1..11 */,
61 { 5180, 5240, 17, 6 } /* IEEE 802.11a, channels 36..48 */,
62 { 5260, 5320, 23, 6 } /* IEEE 802.11a, channels 52..64 */,
63 { 5745, 5825, 30, 6 } /* IEEE 802.11a, channels 149..165, outdoor */,
64 { 0 }
65};
66
67static const struct ieee80211_channel_range ieee80211_mkk_channels[] = {
68 { 2412, 2472, 20, 6 } /* IEEE 802.11b/g, channels 1..13 */,
69 { 5170, 5240, 20, 6 } /* IEEE 802.11a, channels 34..48 */,
70 { 5260, 5320, 20, 6 } /* IEEE 802.11a, channels 52..64 */,
71 { 0 }
72};
73
74
75static const struct ieee80211_channel_range *channel_range =
76 ieee80211_fcc_channels;
77
78
79static void ieee80211_unmask_channel(int mode, struct ieee80211_channel *chan)
80{
81 int i;
82
83 chan->flag = 0;
84
85 for (i = 0; channel_range[i].start_freq; i++) {
86 const struct ieee80211_channel_range *r = &channel_range[i];
87 if (r->start_freq <= chan->freq && r->end_freq >= chan->freq) {
88 if (ieee80211_regdom == 64 && !ieee80211_japan_5ghz &&
89 chan->freq >= 5260 && chan->freq <= 5320) {
90 /*
91 * Skip new channels in Japan since the
92 * firmware was not marked having been upgraded
93 * by the vendor.
94 */
95 continue;
96 }
97
98 if (ieee80211_regdom == 0x10 &&
99 (chan->freq == 5190 || chan->freq == 5210 ||
100 chan->freq == 5230)) {
101 /* Skip MKK channels when in FCC domain. */
102 continue;
103 }
104
105 chan->flag |= IEEE80211_CHAN_W_SCAN |
106 IEEE80211_CHAN_W_ACTIVE_SCAN |
107 IEEE80211_CHAN_W_IBSS;
108 chan->power_level = r->power_level;
109 chan->antenna_max = r->antenna_max;
110
111 if (ieee80211_regdom == 64 &&
112 (chan->freq == 5170 || chan->freq == 5190 ||
113 chan->freq == 5210 || chan->freq == 5230)) {
114 /*
115 * New regulatory rules in Japan have backwards
116 * compatibility with old channels in 5.15-5.25
117 * GHz band, but the station is not allowed to
118 * use active scan on these old channels.
119 */
120 chan->flag &= ~IEEE80211_CHAN_W_ACTIVE_SCAN;
121 }
122
123 if (ieee80211_regdom == 64 &&
124 (chan->freq == 5260 || chan->freq == 5280 ||
125 chan->freq == 5300 || chan->freq == 5320)) {
126 /*
127 * IBSS is not allowed on 5.25-5.35 GHz band
128 * due to radar detection requirements.
129 */
130 chan->flag &= ~IEEE80211_CHAN_W_IBSS;
131 }
132
133 break;
134 }
135 }
136}
137
138
139void ieee80211_set_default_regdomain(struct ieee80211_hw_mode *mode)
140{
141 int c;
142 for (c = 0; c < mode->num_channels; c++)
143 ieee80211_unmask_channel(mode->mode, &mode->channels[c]);
144}
145
146
147void ieee80211_regdomain_init(void)
148{
149 if (ieee80211_regdom == 0x40)
150 channel_range = ieee80211_mkk_channels;
151}
152
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 535407d07fa4..48574f6c0e74 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -9,6 +9,7 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/jiffies.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/skbuff.h> 14#include <linux/skbuff.h>
14#include <linux/netdevice.h> 15#include <linux/netdevice.h>
@@ -82,10 +83,10 @@ static inline int should_drop_frame(struct ieee80211_rx_status *status,
82 */ 83 */
83static struct sk_buff * 84static struct sk_buff *
84ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 85ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
85 struct ieee80211_rx_status *status) 86 struct ieee80211_rx_status *status,
87 struct ieee80211_rate *rate)
86{ 88{
87 struct ieee80211_sub_if_data *sdata; 89 struct ieee80211_sub_if_data *sdata;
88 struct ieee80211_rate *rate;
89 int needed_headroom = 0; 90 int needed_headroom = 0;
90 struct ieee80211_radiotap_header *rthdr; 91 struct ieee80211_radiotap_header *rthdr;
91 __le64 *rttsft = NULL; 92 __le64 *rttsft = NULL;
@@ -194,14 +195,11 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
194 rtfixed->rx_flags |= 195 rtfixed->rx_flags |=
195 cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS); 196 cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
196 197
197 rate = ieee80211_get_rate(local, status->phymode, 198 rtfixed->rate = rate->bitrate / 5;
198 status->rate);
199 if (rate)
200 rtfixed->rate = rate->rate / 5;
201 199
202 rtfixed->chan_freq = cpu_to_le16(status->freq); 200 rtfixed->chan_freq = cpu_to_le16(status->freq);
203 201
204 if (status->phymode == MODE_IEEE80211A) 202 if (status->band == IEEE80211_BAND_5GHZ)
205 rtfixed->chan_flags = 203 rtfixed->chan_flags =
206 cpu_to_le16(IEEE80211_CHAN_OFDM | 204 cpu_to_le16(IEEE80211_CHAN_OFDM |
207 IEEE80211_CHAN_5GHZ); 205 IEEE80211_CHAN_5GHZ);
@@ -226,6 +224,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
226 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR) 224 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR)
227 continue; 225 continue;
228 226
227 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
228 continue;
229
229 if (prev_dev) { 230 if (prev_dev) {
230 skb2 = skb_clone(skb, GFP_ATOMIC); 231 skb2 = skb_clone(skb, GFP_ATOMIC);
231 if (skb2) { 232 if (skb2) {
@@ -249,15 +250,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
249} 250}
250 251
251 252
252/* pre-rx handlers 253static void ieee80211_parse_qos(struct ieee80211_txrx_data *rx)
253 *
254 * these don't have dev/sdata fields in the rx data
255 * The sta value should also not be used because it may
256 * be NULL even though a STA (in IBSS mode) will be added.
257 */
258
259static ieee80211_txrx_result
260ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx)
261{ 254{
262 u8 *data = rx->skb->data; 255 u8 *data = rx->skb->data;
263 int tid; 256 int tid;
@@ -290,64 +283,15 @@ ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx)
290 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 283 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
291 * For now, set skb->priority to 0 for other cases. */ 284 * For now, set skb->priority to 0 for other cases. */
292 rx->skb->priority = (tid > 7) ? 0 : tid; 285 rx->skb->priority = (tid > 7) ? 0 : tid;
293
294 return TXRX_CONTINUE;
295} 286}
296 287
297 288static void ieee80211_verify_ip_alignment(struct ieee80211_txrx_data *rx)
298static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
299 struct sk_buff *skb,
300 struct ieee80211_rx_status *status)
301{ 289{
302 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
303 u32 load = 0, hdrtime;
304 struct ieee80211_rate *rate;
305 struct ieee80211_hw_mode *mode = local->hw.conf.mode;
306 int i;
307
308 /* Estimate total channel use caused by this frame */
309
310 if (unlikely(mode->num_rates < 0))
311 return TXRX_CONTINUE;
312
313 rate = &mode->rates[0];
314 for (i = 0; i < mode->num_rates; i++) {
315 if (mode->rates[i].val == status->rate) {
316 rate = &mode->rates[i];
317 break;
318 }
319 }
320
321 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
322 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
323
324 if (mode->mode == MODE_IEEE80211A ||
325 (mode->mode == MODE_IEEE80211G &&
326 rate->flags & IEEE80211_RATE_ERP))
327 hdrtime = CHAN_UTIL_HDR_SHORT;
328 else
329 hdrtime = CHAN_UTIL_HDR_LONG;
330
331 load = hdrtime;
332 if (!is_multicast_ether_addr(hdr->addr1))
333 load += hdrtime;
334
335 load += skb->len * rate->rate_inv;
336
337 /* Divide channel_use by 8 to avoid wrapping around the counter */
338 load >>= CHAN_UTIL_SHIFT;
339
340 return load;
341}
342
343#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT 290#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
344static ieee80211_txrx_result
345ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx)
346{
347 int hdrlen; 291 int hdrlen;
348 292
349 if (!WLAN_FC_DATA_PRESENT(rx->fc)) 293 if (!WLAN_FC_DATA_PRESENT(rx->fc))
350 return TXRX_CONTINUE; 294 return;
351 295
352 /* 296 /*
353 * Drivers are required to align the payload data in a way that 297 * Drivers are required to align the payload data in a way that
@@ -372,32 +316,55 @@ ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx)
372 if (rx->flags & IEEE80211_TXRXD_RX_AMSDU) 316 if (rx->flags & IEEE80211_TXRXD_RX_AMSDU)
373 hdrlen += ETH_HLEN; 317 hdrlen += ETH_HLEN;
374 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3); 318 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3);
375
376 return TXRX_CONTINUE;
377}
378#endif 319#endif
320}
379 321
380ieee80211_rx_handler ieee80211_rx_pre_handlers[] = 322
323static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
324 struct sk_buff *skb,
325 struct ieee80211_rx_status *status,
326 struct ieee80211_rate *rate)
381{ 327{
382 ieee80211_rx_h_parse_qos, 328 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
383#ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT 329 u32 load = 0, hdrtime;
384 ieee80211_rx_h_verify_ip_alignment, 330
385#endif 331 /* Estimate total channel use caused by this frame */
386 NULL 332
387}; 333 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
334 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
335
336 if (status->band == IEEE80211_BAND_5GHZ ||
337 (status->band == IEEE80211_BAND_5GHZ &&
338 rate->flags & IEEE80211_RATE_ERP_G))
339 hdrtime = CHAN_UTIL_HDR_SHORT;
340 else
341 hdrtime = CHAN_UTIL_HDR_LONG;
342
343 load = hdrtime;
344 if (!is_multicast_ether_addr(hdr->addr1))
345 load += hdrtime;
346
347 /* TODO: optimise again */
348 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate;
349
350 /* Divide channel_use by 8 to avoid wrapping around the counter */
351 load >>= CHAN_UTIL_SHIFT;
352
353 return load;
354}
388 355
389/* rx handlers */ 356/* rx handlers */
390 357
391static ieee80211_txrx_result 358static ieee80211_rx_result
392ieee80211_rx_h_if_stats(struct ieee80211_txrx_data *rx) 359ieee80211_rx_h_if_stats(struct ieee80211_txrx_data *rx)
393{ 360{
394 if (rx->sta) 361 if (rx->sta)
395 rx->sta->channel_use_raw += rx->u.rx.load; 362 rx->sta->channel_use_raw += rx->u.rx.load;
396 rx->sdata->channel_use_raw += rx->u.rx.load; 363 rx->sdata->channel_use_raw += rx->u.rx.load;
397 return TXRX_CONTINUE; 364 return RX_CONTINUE;
398} 365}
399 366
400static ieee80211_txrx_result 367static ieee80211_rx_result
401ieee80211_rx_h_passive_scan(struct ieee80211_txrx_data *rx) 368ieee80211_rx_h_passive_scan(struct ieee80211_txrx_data *rx)
402{ 369{
403 struct ieee80211_local *local = rx->local; 370 struct ieee80211_local *local = rx->local;
@@ -409,21 +376,21 @@ ieee80211_rx_h_passive_scan(struct ieee80211_txrx_data *rx)
409 if (unlikely(local->sta_sw_scanning)) { 376 if (unlikely(local->sta_sw_scanning)) {
410 /* drop all the other packets during a software scan anyway */ 377 /* drop all the other packets during a software scan anyway */
411 if (ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status) 378 if (ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status)
412 != TXRX_QUEUED) 379 != RX_QUEUED)
413 dev_kfree_skb(skb); 380 dev_kfree_skb(skb);
414 return TXRX_QUEUED; 381 return RX_QUEUED;
415 } 382 }
416 383
417 if (unlikely(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) { 384 if (unlikely(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) {
418 /* scanning finished during invoking of handlers */ 385 /* scanning finished during invoking of handlers */
419 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); 386 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
420 return TXRX_DROP; 387 return RX_DROP_UNUSABLE;
421 } 388 }
422 389
423 return TXRX_CONTINUE; 390 return RX_CONTINUE;
424} 391}
425 392
426static ieee80211_txrx_result 393static ieee80211_rx_result
427ieee80211_rx_h_check(struct ieee80211_txrx_data *rx) 394ieee80211_rx_h_check(struct ieee80211_txrx_data *rx)
428{ 395{
429 struct ieee80211_hdr *hdr; 396 struct ieee80211_hdr *hdr;
@@ -438,14 +405,14 @@ ieee80211_rx_h_check(struct ieee80211_txrx_data *rx)
438 rx->local->dot11FrameDuplicateCount++; 405 rx->local->dot11FrameDuplicateCount++;
439 rx->sta->num_duplicates++; 406 rx->sta->num_duplicates++;
440 } 407 }
441 return TXRX_DROP; 408 return RX_DROP_MONITOR;
442 } else 409 } else
443 rx->sta->last_seq_ctrl[rx->u.rx.queue] = hdr->seq_ctrl; 410 rx->sta->last_seq_ctrl[rx->u.rx.queue] = hdr->seq_ctrl;
444 } 411 }
445 412
446 if (unlikely(rx->skb->len < 16)) { 413 if (unlikely(rx->skb->len < 16)) {
447 I802_DEBUG_INC(rx->local->rx_handlers_drop_short); 414 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
448 return TXRX_DROP; 415 return RX_DROP_MONITOR;
449 } 416 }
450 417
451 /* Drop disallowed frame classes based on STA auth/assoc state; 418 /* Drop disallowed frame classes based on STA auth/assoc state;
@@ -467,23 +434,23 @@ ieee80211_rx_h_check(struct ieee80211_txrx_data *rx)
467 || !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) { 434 || !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) {
468 /* Drop IBSS frames and frames for other hosts 435 /* Drop IBSS frames and frames for other hosts
469 * silently. */ 436 * silently. */
470 return TXRX_DROP; 437 return RX_DROP_MONITOR;
471 } 438 }
472 439
473 return TXRX_DROP; 440 return RX_DROP_MONITOR;
474 } 441 }
475 442
476 return TXRX_CONTINUE; 443 return RX_CONTINUE;
477} 444}
478 445
479 446
480static ieee80211_txrx_result 447static ieee80211_rx_result
481ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx) 448ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
482{ 449{
483 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 450 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
484 int keyidx; 451 int keyidx;
485 int hdrlen; 452 int hdrlen;
486 ieee80211_txrx_result result = TXRX_DROP; 453 ieee80211_rx_result result = RX_DROP_UNUSABLE;
487 struct ieee80211_key *stakey = NULL; 454 struct ieee80211_key *stakey = NULL;
488 455
489 /* 456 /*
@@ -513,14 +480,14 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
513 */ 480 */
514 481
515 if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) 482 if (!(rx->fc & IEEE80211_FCTL_PROTECTED))
516 return TXRX_CONTINUE; 483 return RX_CONTINUE;
517 484
518 /* 485 /*
519 * No point in finding a key and decrypting if the frame is neither 486 * No point in finding a key and decrypting if the frame is neither
520 * addressed to us nor a multicast frame. 487 * addressed to us nor a multicast frame.
521 */ 488 */
522 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 489 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))
523 return TXRX_CONTINUE; 490 return RX_CONTINUE;
524 491
525 if (rx->sta) 492 if (rx->sta)
526 stakey = rcu_dereference(rx->sta->key); 493 stakey = rcu_dereference(rx->sta->key);
@@ -539,12 +506,12 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
539 */ 506 */
540 if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && 507 if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) &&
541 (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) 508 (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED))
542 return TXRX_CONTINUE; 509 return RX_CONTINUE;
543 510
544 hdrlen = ieee80211_get_hdrlen(rx->fc); 511 hdrlen = ieee80211_get_hdrlen(rx->fc);
545 512
546 if (rx->skb->len < 8 + hdrlen) 513 if (rx->skb->len < 8 + hdrlen)
547 return TXRX_DROP; /* TODO: count this? */ 514 return RX_DROP_UNUSABLE; /* TODO: count this? */
548 515
549 /* 516 /*
550 * no need to call ieee80211_wep_get_keyidx, 517 * no need to call ieee80211_wep_get_keyidx,
@@ -573,7 +540,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
573 printk(KERN_DEBUG "%s: RX protected frame," 540 printk(KERN_DEBUG "%s: RX protected frame,"
574 " but have no key\n", rx->dev->name); 541 " but have no key\n", rx->dev->name);
575#endif /* CONFIG_MAC80211_DEBUG */ 542#endif /* CONFIG_MAC80211_DEBUG */
576 return TXRX_DROP; 543 return RX_DROP_MONITOR;
577 } 544 }
578 545
579 /* Check for weak IVs if possible */ 546 /* Check for weak IVs if possible */
@@ -612,7 +579,7 @@ static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
612 if (sdata->bss) 579 if (sdata->bss)
613 atomic_inc(&sdata->bss->num_sta_ps); 580 atomic_inc(&sdata->bss->num_sta_ps);
614 sta->flags |= WLAN_STA_PS; 581 sta->flags |= WLAN_STA_PS;
615 sta->pspoll = 0; 582 sta->flags &= ~WLAN_STA_PSPOLL;
616#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 583#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
617 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", 584 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
618 dev->name, print_mac(mac, sta->addr), sta->aid); 585 dev->name, print_mac(mac, sta->addr), sta->aid);
@@ -629,20 +596,20 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
629 DECLARE_MAC_BUF(mac); 596 DECLARE_MAC_BUF(mac);
630 597
631 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 598 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
599
632 if (sdata->bss) 600 if (sdata->bss)
633 atomic_dec(&sdata->bss->num_sta_ps); 601 atomic_dec(&sdata->bss->num_sta_ps);
634 sta->flags &= ~(WLAN_STA_PS | WLAN_STA_TIM); 602
635 sta->pspoll = 0; 603 sta->flags &= ~(WLAN_STA_PS | WLAN_STA_PSPOLL);
636 if (!skb_queue_empty(&sta->ps_tx_buf)) { 604
637 if (local->ops->set_tim) 605 if (!skb_queue_empty(&sta->ps_tx_buf))
638 local->ops->set_tim(local_to_hw(local), sta->aid, 0); 606 sta_info_clear_tim_bit(sta);
639 if (sdata->bss) 607
640 bss_tim_clear(local, sdata->bss, sta->aid);
641 }
642#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 608#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
643 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n", 609 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n",
644 dev->name, print_mac(mac, sta->addr), sta->aid); 610 dev->name, print_mac(mac, sta->addr), sta->aid);
645#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 611#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
612
646 /* Send all buffered frames to the station */ 613 /* Send all buffered frames to the station */
647 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 614 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
648 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 615 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb;
@@ -666,7 +633,7 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
666 return sent; 633 return sent;
667} 634}
668 635
669static ieee80211_txrx_result 636static ieee80211_rx_result
670ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx) 637ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx)
671{ 638{
672 struct sta_info *sta = rx->sta; 639 struct sta_info *sta = rx->sta;
@@ -674,7 +641,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx)
674 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 641 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
675 642
676 if (!sta) 643 if (!sta)
677 return TXRX_CONTINUE; 644 return RX_CONTINUE;
678 645
679 /* Update last_rx only for IBSS packets which are for the current 646 /* Update last_rx only for IBSS packets which are for the current
680 * BSSID to avoid keeping the current IBSS network alive in cases where 647 * BSSID to avoid keeping the current IBSS network alive in cases where
@@ -695,7 +662,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx)
695 } 662 }
696 663
697 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 664 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))
698 return TXRX_CONTINUE; 665 return RX_CONTINUE;
699 666
700 sta->rx_fragments++; 667 sta->rx_fragments++;
701 sta->rx_bytes += rx->skb->len; 668 sta->rx_bytes += rx->skb->len;
@@ -722,10 +689,10 @@ ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx)
722 * as a dropped packed. */ 689 * as a dropped packed. */
723 sta->rx_packets++; 690 sta->rx_packets++;
724 dev_kfree_skb(rx->skb); 691 dev_kfree_skb(rx->skb);
725 return TXRX_QUEUED; 692 return RX_QUEUED;
726 } 693 }
727 694
728 return TXRX_CONTINUE; 695 return RX_CONTINUE;
729} /* ieee80211_rx_h_sta_process */ 696} /* ieee80211_rx_h_sta_process */
730 697
731static inline struct ieee80211_fragment_entry * 698static inline struct ieee80211_fragment_entry *
@@ -801,7 +768,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
801 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 768 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
802 continue; 769 continue;
803 770
804 if (entry->first_frag_time + 2 * HZ < jiffies) { 771 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
805 __skb_queue_purge(&entry->skb_list); 772 __skb_queue_purge(&entry->skb_list);
806 continue; 773 continue;
807 } 774 }
@@ -811,7 +778,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
811 return NULL; 778 return NULL;
812} 779}
813 780
814static ieee80211_txrx_result 781static ieee80211_rx_result
815ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) 782ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
816{ 783{
817 struct ieee80211_hdr *hdr; 784 struct ieee80211_hdr *hdr;
@@ -848,7 +815,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
848 rx->key->u.ccmp.rx_pn[rx->u.rx.queue], 815 rx->key->u.ccmp.rx_pn[rx->u.rx.queue],
849 CCMP_PN_LEN); 816 CCMP_PN_LEN);
850 } 817 }
851 return TXRX_QUEUED; 818 return RX_QUEUED;
852 } 819 }
853 820
854 /* This is a fragment for a frame that should already be pending in 821 /* This is a fragment for a frame that should already be pending in
@@ -858,7 +825,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
858 rx->u.rx.queue, hdr); 825 rx->u.rx.queue, hdr);
859 if (!entry) { 826 if (!entry) {
860 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 827 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
861 return TXRX_DROP; 828 return RX_DROP_MONITOR;
862 } 829 }
863 830
864 /* Verify that MPDUs within one MSDU have sequential PN values. 831 /* Verify that MPDUs within one MSDU have sequential PN values.
@@ -867,7 +834,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
867 int i; 834 int i;
868 u8 pn[CCMP_PN_LEN], *rpn; 835 u8 pn[CCMP_PN_LEN], *rpn;
869 if (!rx->key || rx->key->conf.alg != ALG_CCMP) 836 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
870 return TXRX_DROP; 837 return RX_DROP_UNUSABLE;
871 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 838 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
872 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 839 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
873 pn[i]++; 840 pn[i]++;
@@ -885,7 +852,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
885 rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], 852 rpn[0], rpn[1], rpn[2], rpn[3], rpn[4],
886 rpn[5], pn[0], pn[1], pn[2], pn[3], 853 rpn[5], pn[0], pn[1], pn[2], pn[3],
887 pn[4], pn[5]); 854 pn[4], pn[5]);
888 return TXRX_DROP; 855 return RX_DROP_UNUSABLE;
889 } 856 }
890 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 857 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
891 } 858 }
@@ -896,7 +863,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
896 entry->extra_len += rx->skb->len; 863 entry->extra_len += rx->skb->len;
897 if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { 864 if (rx->fc & IEEE80211_FCTL_MOREFRAGS) {
898 rx->skb = NULL; 865 rx->skb = NULL;
899 return TXRX_QUEUED; 866 return RX_QUEUED;
900 } 867 }
901 868
902 rx->skb = __skb_dequeue(&entry->skb_list); 869 rx->skb = __skb_dequeue(&entry->skb_list);
@@ -906,7 +873,7 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
906 GFP_ATOMIC))) { 873 GFP_ATOMIC))) {
907 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 874 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
908 __skb_queue_purge(&entry->skb_list); 875 __skb_queue_purge(&entry->skb_list);
909 return TXRX_DROP; 876 return RX_DROP_UNUSABLE;
910 } 877 }
911 } 878 }
912 while ((skb = __skb_dequeue(&entry->skb_list))) { 879 while ((skb = __skb_dequeue(&entry->skb_list))) {
@@ -924,10 +891,10 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
924 rx->local->dot11MulticastReceivedFrameCount++; 891 rx->local->dot11MulticastReceivedFrameCount++;
925 else 892 else
926 ieee80211_led_rx(rx->local); 893 ieee80211_led_rx(rx->local);
927 return TXRX_CONTINUE; 894 return RX_CONTINUE;
928} 895}
929 896
930static ieee80211_txrx_result 897static ieee80211_rx_result
931ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx) 898ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx)
932{ 899{
933 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 900 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
@@ -939,11 +906,11 @@ ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx)
939 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL || 906 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL ||
940 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL || 907 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL ||
941 !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))) 908 !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)))
942 return TXRX_CONTINUE; 909 return RX_CONTINUE;
943 910
944 if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) && 911 if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) &&
945 (sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) 912 (sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
946 return TXRX_DROP; 913 return RX_DROP_UNUSABLE;
947 914
948 skb = skb_dequeue(&rx->sta->tx_filtered); 915 skb = skb_dequeue(&rx->sta->tx_filtered);
949 if (!skb) { 916 if (!skb) {
@@ -958,9 +925,11 @@ ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx)
958 struct ieee80211_hdr *hdr = 925 struct ieee80211_hdr *hdr =
959 (struct ieee80211_hdr *) skb->data; 926 (struct ieee80211_hdr *) skb->data;
960 927
961 /* tell TX path to send one frame even though the STA may 928 /*
962 * still remain is PS mode after this frame exchange */ 929 * Tell TX path to send one frame even though the STA may
963 rx->sta->pspoll = 1; 930 * still remain is PS mode after this frame exchange.
931 */
932 rx->sta->flags |= WLAN_STA_PSPOLL;
964 933
965#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 934#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
966 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", 935 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
@@ -970,38 +939,37 @@ ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx)
970 939
971 /* Use MoreData flag to indicate whether there are more 940 /* Use MoreData flag to indicate whether there are more
972 * buffered frames for this STA */ 941 * buffered frames for this STA */
973 if (no_pending_pkts) { 942 if (no_pending_pkts)
974 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 943 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
975 rx->sta->flags &= ~WLAN_STA_TIM; 944 else
976 } else
977 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 945 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
978 946
979 dev_queue_xmit(skb); 947 dev_queue_xmit(skb);
980 948
981 if (no_pending_pkts) { 949 if (no_pending_pkts)
982 if (rx->local->ops->set_tim) 950 sta_info_clear_tim_bit(rx->sta);
983 rx->local->ops->set_tim(local_to_hw(rx->local),
984 rx->sta->aid, 0);
985 if (rx->sdata->bss)
986 bss_tim_clear(rx->local, rx->sdata->bss, rx->sta->aid);
987 }
988#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 951#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
989 } else if (!rx->u.rx.sent_ps_buffered) { 952 } else if (!rx->u.rx.sent_ps_buffered) {
953 /*
954 * FIXME: This can be the result of a race condition between
955 * us expiring a frame and the station polling for it.
956 * Should we send it a null-func frame indicating we
957 * have nothing buffered for it?
958 */
990 printk(KERN_DEBUG "%s: STA %s sent PS Poll even " 959 printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
991 "though there is no buffered frames for it\n", 960 "though there is no buffered frames for it\n",
992 rx->dev->name, print_mac(mac, rx->sta->addr)); 961 rx->dev->name, print_mac(mac, rx->sta->addr));
993#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 962#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
994
995 } 963 }
996 964
997 /* Free PS Poll skb here instead of returning TXRX_DROP that would 965 /* Free PS Poll skb here instead of returning RX_DROP that would
998 * count as an dropped frame. */ 966 * count as an dropped frame. */
999 dev_kfree_skb(rx->skb); 967 dev_kfree_skb(rx->skb);
1000 968
1001 return TXRX_QUEUED; 969 return RX_QUEUED;
1002} 970}
1003 971
1004static ieee80211_txrx_result 972static ieee80211_rx_result
1005ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx) 973ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx)
1006{ 974{
1007 u16 fc = rx->fc; 975 u16 fc = rx->fc;
@@ -1009,7 +977,7 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx)
1009 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data; 977 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data;
1010 978
1011 if (!WLAN_FC_IS_QOS_DATA(fc)) 979 if (!WLAN_FC_IS_QOS_DATA(fc))
1012 return TXRX_CONTINUE; 980 return RX_CONTINUE;
1013 981
1014 /* remove the qos control field, update frame type and meta-data */ 982 /* remove the qos control field, update frame type and meta-data */
1015 memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2); 983 memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2);
@@ -1018,17 +986,17 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx)
1018 rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA; 986 rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA;
1019 hdr->frame_control = cpu_to_le16(fc); 987 hdr->frame_control = cpu_to_le16(fc);
1020 988
1021 return TXRX_CONTINUE; 989 return RX_CONTINUE;
1022} 990}
1023 991
1024static int 992static int
1025ieee80211_802_1x_port_control(struct ieee80211_txrx_data *rx) 993ieee80211_802_1x_port_control(struct ieee80211_txrx_data *rx)
1026{ 994{
1027 if (unlikely(rx->sdata->ieee802_1x_pac && 995 if (unlikely(!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED))) {
1028 (!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED)))) {
1029#ifdef CONFIG_MAC80211_DEBUG 996#ifdef CONFIG_MAC80211_DEBUG
1030 printk(KERN_DEBUG "%s: dropped frame " 997 if (net_ratelimit())
1031 "(unauthorized port)\n", rx->dev->name); 998 printk(KERN_DEBUG "%s: dropped frame "
999 "(unauthorized port)\n", rx->dev->name);
1032#endif /* CONFIG_MAC80211_DEBUG */ 1000#endif /* CONFIG_MAC80211_DEBUG */
1033 return -EACCES; 1001 return -EACCES;
1034 } 1002 }
@@ -1275,7 +1243,7 @@ ieee80211_deliver_skb(struct ieee80211_txrx_data *rx)
1275 } 1243 }
1276} 1244}
1277 1245
1278static ieee80211_txrx_result 1246static ieee80211_rx_result
1279ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) 1247ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1280{ 1248{
1281 struct net_device *dev = rx->dev; 1249 struct net_device *dev = rx->dev;
@@ -1291,17 +1259,17 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1291 1259
1292 fc = rx->fc; 1260 fc = rx->fc;
1293 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) 1261 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1294 return TXRX_CONTINUE; 1262 return RX_CONTINUE;
1295 1263
1296 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1264 if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
1297 return TXRX_DROP; 1265 return RX_DROP_MONITOR;
1298 1266
1299 if (!(rx->flags & IEEE80211_TXRXD_RX_AMSDU)) 1267 if (!(rx->flags & IEEE80211_TXRXD_RX_AMSDU))
1300 return TXRX_CONTINUE; 1268 return RX_CONTINUE;
1301 1269
1302 err = ieee80211_data_to_8023(rx); 1270 err = ieee80211_data_to_8023(rx);
1303 if (unlikely(err)) 1271 if (unlikely(err))
1304 return TXRX_DROP; 1272 return RX_DROP_UNUSABLE;
1305 1273
1306 skb->dev = dev; 1274 skb->dev = dev;
1307 1275
@@ -1311,7 +1279,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1311 /* skip the wrapping header */ 1279 /* skip the wrapping header */
1312 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr)); 1280 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1313 if (!eth) 1281 if (!eth)
1314 return TXRX_DROP; 1282 return RX_DROP_UNUSABLE;
1315 1283
1316 while (skb != frame) { 1284 while (skb != frame) {
1317 u8 padding; 1285 u8 padding;
@@ -1326,7 +1294,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1326 /* the last MSDU has no padding */ 1294 /* the last MSDU has no padding */
1327 if (subframe_len > remaining) { 1295 if (subframe_len > remaining) {
1328 printk(KERN_DEBUG "%s: wrong buffer size", dev->name); 1296 printk(KERN_DEBUG "%s: wrong buffer size", dev->name);
1329 return TXRX_DROP; 1297 return RX_DROP_UNUSABLE;
1330 } 1298 }
1331 1299
1332 skb_pull(skb, sizeof(struct ethhdr)); 1300 skb_pull(skb, sizeof(struct ethhdr));
@@ -1338,7 +1306,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1338 subframe_len); 1306 subframe_len);
1339 1307
1340 if (frame == NULL) 1308 if (frame == NULL)
1341 return TXRX_DROP; 1309 return RX_DROP_UNUSABLE;
1342 1310
1343 skb_reserve(frame, local->hw.extra_tx_headroom + 1311 skb_reserve(frame, local->hw.extra_tx_headroom +
1344 sizeof(struct ethhdr)); 1312 sizeof(struct ethhdr));
@@ -1351,7 +1319,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1351 printk(KERN_DEBUG "%s: wrong buffer size ", 1319 printk(KERN_DEBUG "%s: wrong buffer size ",
1352 dev->name); 1320 dev->name);
1353 dev_kfree_skb(frame); 1321 dev_kfree_skb(frame);
1354 return TXRX_DROP; 1322 return RX_DROP_UNUSABLE;
1355 } 1323 }
1356 } 1324 }
1357 1325
@@ -1381,7 +1349,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1381 1349
1382 if (!ieee80211_frame_allowed(rx)) { 1350 if (!ieee80211_frame_allowed(rx)) {
1383 if (skb == frame) /* last frame */ 1351 if (skb == frame) /* last frame */
1384 return TXRX_DROP; 1352 return RX_DROP_UNUSABLE;
1385 dev_kfree_skb(frame); 1353 dev_kfree_skb(frame);
1386 continue; 1354 continue;
1387 } 1355 }
@@ -1389,10 +1357,10 @@ ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
1389 ieee80211_deliver_skb(rx); 1357 ieee80211_deliver_skb(rx);
1390 } 1358 }
1391 1359
1392 return TXRX_QUEUED; 1360 return RX_QUEUED;
1393} 1361}
1394 1362
1395static ieee80211_txrx_result 1363static ieee80211_rx_result
1396ieee80211_rx_h_data(struct ieee80211_txrx_data *rx) 1364ieee80211_rx_h_data(struct ieee80211_txrx_data *rx)
1397{ 1365{
1398 struct net_device *dev = rx->dev; 1366 struct net_device *dev = rx->dev;
@@ -1401,17 +1369,17 @@ ieee80211_rx_h_data(struct ieee80211_txrx_data *rx)
1401 1369
1402 fc = rx->fc; 1370 fc = rx->fc;
1403 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) 1371 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1404 return TXRX_CONTINUE; 1372 return RX_CONTINUE;
1405 1373
1406 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1374 if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
1407 return TXRX_DROP; 1375 return RX_DROP_MONITOR;
1408 1376
1409 err = ieee80211_data_to_8023(rx); 1377 err = ieee80211_data_to_8023(rx);
1410 if (unlikely(err)) 1378 if (unlikely(err))
1411 return TXRX_DROP; 1379 return RX_DROP_UNUSABLE;
1412 1380
1413 if (!ieee80211_frame_allowed(rx)) 1381 if (!ieee80211_frame_allowed(rx))
1414 return TXRX_DROP; 1382 return RX_DROP_MONITOR;
1415 1383
1416 rx->skb->dev = dev; 1384 rx->skb->dev = dev;
1417 1385
@@ -1420,10 +1388,10 @@ ieee80211_rx_h_data(struct ieee80211_txrx_data *rx)
1420 1388
1421 ieee80211_deliver_skb(rx); 1389 ieee80211_deliver_skb(rx);
1422 1390
1423 return TXRX_QUEUED; 1391 return RX_QUEUED;
1424} 1392}
1425 1393
1426static ieee80211_txrx_result 1394static ieee80211_rx_result
1427ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx) 1395ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx)
1428{ 1396{
1429 struct ieee80211_local *local = rx->local; 1397 struct ieee80211_local *local = rx->local;
@@ -1435,15 +1403,15 @@ ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx)
1435 u16 tid; 1403 u16 tid;
1436 1404
1437 if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL)) 1405 if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL))
1438 return TXRX_CONTINUE; 1406 return RX_CONTINUE;
1439 1407
1440 if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) { 1408 if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) {
1441 if (!rx->sta) 1409 if (!rx->sta)
1442 return TXRX_CONTINUE; 1410 return RX_CONTINUE;
1443 tid = le16_to_cpu(bar->control) >> 12; 1411 tid = le16_to_cpu(bar->control) >> 12;
1444 tid_agg_rx = &(rx->sta->ampdu_mlme.tid_rx[tid]); 1412 tid_agg_rx = &(rx->sta->ampdu_mlme.tid_rx[tid]);
1445 if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) 1413 if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL)
1446 return TXRX_CONTINUE; 1414 return RX_CONTINUE;
1447 1415
1448 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1416 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1449 1417
@@ -1460,19 +1428,19 @@ ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx)
1460 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, 1428 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
1461 start_seq_num, 1); 1429 start_seq_num, 1);
1462 rcu_read_unlock(); 1430 rcu_read_unlock();
1463 return TXRX_DROP; 1431 return RX_DROP_UNUSABLE;
1464 } 1432 }
1465 1433
1466 return TXRX_CONTINUE; 1434 return RX_CONTINUE;
1467} 1435}
1468 1436
1469static ieee80211_txrx_result 1437static ieee80211_rx_result
1470ieee80211_rx_h_mgmt(struct ieee80211_txrx_data *rx) 1438ieee80211_rx_h_mgmt(struct ieee80211_txrx_data *rx)
1471{ 1439{
1472 struct ieee80211_sub_if_data *sdata; 1440 struct ieee80211_sub_if_data *sdata;
1473 1441
1474 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 1442 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))
1475 return TXRX_DROP; 1443 return RX_DROP_MONITOR;
1476 1444
1477 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 1445 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1478 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || 1446 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
@@ -1480,56 +1448,13 @@ ieee80211_rx_h_mgmt(struct ieee80211_txrx_data *rx)
1480 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) 1448 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
1481 ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->u.rx.status); 1449 ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->u.rx.status);
1482 else 1450 else
1483 return TXRX_DROP; 1451 return RX_DROP_MONITOR;
1484 1452
1485 return TXRX_QUEUED; 1453 return RX_QUEUED;
1486}
1487
1488static inline ieee80211_txrx_result __ieee80211_invoke_rx_handlers(
1489 struct ieee80211_local *local,
1490 ieee80211_rx_handler *handlers,
1491 struct ieee80211_txrx_data *rx,
1492 struct sta_info *sta)
1493{
1494 ieee80211_rx_handler *handler;
1495 ieee80211_txrx_result res = TXRX_DROP;
1496
1497 for (handler = handlers; *handler != NULL; handler++) {
1498 res = (*handler)(rx);
1499
1500 switch (res) {
1501 case TXRX_CONTINUE:
1502 continue;
1503 case TXRX_DROP:
1504 I802_DEBUG_INC(local->rx_handlers_drop);
1505 if (sta)
1506 sta->rx_dropped++;
1507 break;
1508 case TXRX_QUEUED:
1509 I802_DEBUG_INC(local->rx_handlers_queued);
1510 break;
1511 }
1512 break;
1513 }
1514
1515 if (res == TXRX_DROP)
1516 dev_kfree_skb(rx->skb);
1517 return res;
1518}
1519
1520static inline void ieee80211_invoke_rx_handlers(struct ieee80211_local *local,
1521 ieee80211_rx_handler *handlers,
1522 struct ieee80211_txrx_data *rx,
1523 struct sta_info *sta)
1524{
1525 if (__ieee80211_invoke_rx_handlers(local, handlers, rx, sta) ==
1526 TXRX_CONTINUE)
1527 dev_kfree_skb(rx->skb);
1528} 1454}
1529 1455
1530static void ieee80211_rx_michael_mic_report(struct net_device *dev, 1456static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1531 struct ieee80211_hdr *hdr, 1457 struct ieee80211_hdr *hdr,
1532 struct sta_info *sta,
1533 struct ieee80211_txrx_data *rx) 1458 struct ieee80211_txrx_data *rx)
1534{ 1459{
1535 int keyidx, hdrlen; 1460 int keyidx, hdrlen;
@@ -1548,7 +1473,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1548 dev->name, print_mac(mac, hdr->addr2), 1473 dev->name, print_mac(mac, hdr->addr2),
1549 print_mac(mac2, hdr->addr1), keyidx); 1474 print_mac(mac2, hdr->addr1), keyidx);
1550 1475
1551 if (!sta) { 1476 if (!rx->sta) {
1552 /* 1477 /*
1553 * Some hardware seem to generate incorrect Michael MIC 1478 * Some hardware seem to generate incorrect Michael MIC
1554 * reports; ignore them to avoid triggering countermeasures. 1479 * reports; ignore them to avoid triggering countermeasures.
@@ -1600,7 +1525,88 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1600 rx->skb = NULL; 1525 rx->skb = NULL;
1601} 1526}
1602 1527
1603ieee80211_rx_handler ieee80211_rx_handlers[] = 1528static void ieee80211_rx_cooked_monitor(struct ieee80211_txrx_data *rx)
1529{
1530 struct ieee80211_sub_if_data *sdata;
1531 struct ieee80211_local *local = rx->local;
1532 struct ieee80211_rtap_hdr {
1533 struct ieee80211_radiotap_header hdr;
1534 u8 flags;
1535 u8 rate;
1536 __le16 chan_freq;
1537 __le16 chan_flags;
1538 } __attribute__ ((packed)) *rthdr;
1539 struct sk_buff *skb = rx->skb, *skb2;
1540 struct net_device *prev_dev = NULL;
1541 struct ieee80211_rx_status *status = rx->u.rx.status;
1542
1543 if (rx->flags & IEEE80211_TXRXD_RX_CMNTR_REPORTED)
1544 goto out_free_skb;
1545
1546 if (skb_headroom(skb) < sizeof(*rthdr) &&
1547 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
1548 goto out_free_skb;
1549
1550 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
1551 memset(rthdr, 0, sizeof(*rthdr));
1552 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
1553 rthdr->hdr.it_present =
1554 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
1555 (1 << IEEE80211_RADIOTAP_RATE) |
1556 (1 << IEEE80211_RADIOTAP_CHANNEL));
1557
1558 rthdr->rate = rx->u.rx.rate->bitrate / 5;
1559 rthdr->chan_freq = cpu_to_le16(status->freq);
1560
1561 if (status->band == IEEE80211_BAND_5GHZ)
1562 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
1563 IEEE80211_CHAN_5GHZ);
1564 else
1565 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
1566 IEEE80211_CHAN_2GHZ);
1567
1568 skb_set_mac_header(skb, 0);
1569 skb->ip_summed = CHECKSUM_UNNECESSARY;
1570 skb->pkt_type = PACKET_OTHERHOST;
1571 skb->protocol = htons(ETH_P_802_2);
1572
1573 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1574 if (!netif_running(sdata->dev))
1575 continue;
1576
1577 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR ||
1578 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1579 continue;
1580
1581 if (prev_dev) {
1582 skb2 = skb_clone(skb, GFP_ATOMIC);
1583 if (skb2) {
1584 skb2->dev = prev_dev;
1585 netif_rx(skb2);
1586 }
1587 }
1588
1589 prev_dev = sdata->dev;
1590 sdata->dev->stats.rx_packets++;
1591 sdata->dev->stats.rx_bytes += skb->len;
1592 }
1593
1594 if (prev_dev) {
1595 skb->dev = prev_dev;
1596 netif_rx(skb);
1597 skb = NULL;
1598 } else
1599 goto out_free_skb;
1600
1601 rx->flags |= IEEE80211_TXRXD_RX_CMNTR_REPORTED;
1602 return;
1603
1604 out_free_skb:
1605 dev_kfree_skb(skb);
1606}
1607
1608typedef ieee80211_rx_result (*ieee80211_rx_handler)(struct ieee80211_txrx_data *);
1609static ieee80211_rx_handler ieee80211_rx_handlers[] =
1604{ 1610{
1605 ieee80211_rx_h_if_stats, 1611 ieee80211_rx_h_if_stats,
1606 ieee80211_rx_h_passive_scan, 1612 ieee80211_rx_h_passive_scan,
@@ -1622,6 +1628,47 @@ ieee80211_rx_handler ieee80211_rx_handlers[] =
1622 NULL 1628 NULL
1623}; 1629};
1624 1630
1631static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1632 struct ieee80211_txrx_data *rx,
1633 struct sk_buff *skb)
1634{
1635 ieee80211_rx_handler *handler;
1636 ieee80211_rx_result res = RX_DROP_MONITOR;
1637
1638 rx->skb = skb;
1639 rx->sdata = sdata;
1640 rx->dev = sdata->dev;
1641
1642 for (handler = ieee80211_rx_handlers; *handler != NULL; handler++) {
1643 res = (*handler)(rx);
1644
1645 switch (res) {
1646 case RX_CONTINUE:
1647 continue;
1648 case RX_DROP_UNUSABLE:
1649 case RX_DROP_MONITOR:
1650 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1651 if (rx->sta)
1652 rx->sta->rx_dropped++;
1653 break;
1654 case RX_QUEUED:
1655 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1656 break;
1657 }
1658 break;
1659 }
1660
1661 switch (res) {
1662 case RX_CONTINUE:
1663 case RX_DROP_MONITOR:
1664 ieee80211_rx_cooked_monitor(rx);
1665 break;
1666 case RX_DROP_UNUSABLE:
1667 dev_kfree_skb(rx->skb);
1668 break;
1669 }
1670}
1671
1625/* main receive path */ 1672/* main receive path */
1626 1673
1627static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, 1674static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
@@ -1649,7 +1696,10 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1649 case IEEE80211_IF_TYPE_IBSS: 1696 case IEEE80211_IF_TYPE_IBSS:
1650 if (!bssid) 1697 if (!bssid)
1651 return 0; 1698 return 0;
1652 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1699 if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
1700 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
1701 return 1;
1702 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
1653 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) 1703 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN))
1654 return 0; 1704 return 0;
1655 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1705 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
@@ -1707,11 +1757,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1707static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 1757static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1708 struct sk_buff *skb, 1758 struct sk_buff *skb,
1709 struct ieee80211_rx_status *status, 1759 struct ieee80211_rx_status *status,
1710 u32 load) 1760 u32 load,
1761 struct ieee80211_rate *rate)
1711{ 1762{
1712 struct ieee80211_local *local = hw_to_local(hw); 1763 struct ieee80211_local *local = hw_to_local(hw);
1713 struct ieee80211_sub_if_data *sdata; 1764 struct ieee80211_sub_if_data *sdata;
1714 struct sta_info *sta;
1715 struct ieee80211_hdr *hdr; 1765 struct ieee80211_hdr *hdr;
1716 struct ieee80211_txrx_data rx; 1766 struct ieee80211_txrx_data rx;
1717 u16 type; 1767 u16 type;
@@ -1727,40 +1777,31 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1727 1777
1728 rx.u.rx.status = status; 1778 rx.u.rx.status = status;
1729 rx.u.rx.load = load; 1779 rx.u.rx.load = load;
1780 rx.u.rx.rate = rate;
1730 rx.fc = le16_to_cpu(hdr->frame_control); 1781 rx.fc = le16_to_cpu(hdr->frame_control);
1731 type = rx.fc & IEEE80211_FCTL_FTYPE; 1782 type = rx.fc & IEEE80211_FCTL_FTYPE;
1732 1783
1733 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) 1784 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT)
1734 local->dot11ReceivedFragmentCount++; 1785 local->dot11ReceivedFragmentCount++;
1735 1786
1736 sta = rx.sta = sta_info_get(local, hdr->addr2); 1787 rx.sta = sta_info_get(local, hdr->addr2);
1737 if (sta) { 1788 if (rx.sta) {
1738 rx.dev = rx.sta->dev; 1789 rx.dev = rx.sta->dev;
1739 rx.sdata = IEEE80211_DEV_TO_SUB_IF(rx.dev); 1790 rx.sdata = IEEE80211_DEV_TO_SUB_IF(rx.dev);
1740 } 1791 }
1741 1792
1742 if ((status->flag & RX_FLAG_MMIC_ERROR)) { 1793 if ((status->flag & RX_FLAG_MMIC_ERROR)) {
1743 ieee80211_rx_michael_mic_report(local->mdev, hdr, sta, &rx); 1794 ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
1744 goto end; 1795 goto end;
1745 } 1796 }
1746 1797
1747 if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning)) 1798 if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning))
1748 rx.flags |= IEEE80211_TXRXD_RXIN_SCAN; 1799 rx.flags |= IEEE80211_TXRXD_RXIN_SCAN;
1749 1800
1750 if (__ieee80211_invoke_rx_handlers(local, local->rx_pre_handlers, &rx, 1801 ieee80211_parse_qos(&rx);
1751 sta) != TXRX_CONTINUE) 1802 ieee80211_verify_ip_alignment(&rx);
1752 goto end;
1753 skb = rx.skb;
1754 1803
1755 if (sta && !(sta->flags & (WLAN_STA_WDS | WLAN_STA_ASSOC_AP)) && 1804 skb = rx.skb;
1756 !atomic_read(&local->iff_promiscs) &&
1757 !is_multicast_ether_addr(hdr->addr1)) {
1758 rx.flags |= IEEE80211_TXRXD_RXRA_MATCH;
1759 ieee80211_invoke_rx_handlers(local, local->rx_handlers, &rx,
1760 rx.sta);
1761 sta_info_put(sta);
1762 return;
1763 }
1764 1805
1765 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 1806 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1766 if (!netif_running(sdata->dev)) 1807 if (!netif_running(sdata->dev))
@@ -1772,8 +1813,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1772 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 1813 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
1773 rx.flags |= IEEE80211_TXRXD_RXRA_MATCH; 1814 rx.flags |= IEEE80211_TXRXD_RXRA_MATCH;
1774 prepares = prepare_for_handlers(sdata, bssid, &rx, hdr); 1815 prepares = prepare_for_handlers(sdata, bssid, &rx, hdr);
1775 /* prepare_for_handlers can change sta */
1776 sta = rx.sta;
1777 1816
1778 if (!prepares) 1817 if (!prepares)
1779 continue; 1818 continue;
@@ -1804,26 +1843,18 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1804 continue; 1843 continue;
1805 } 1844 }
1806 rx.fc = le16_to_cpu(hdr->frame_control); 1845 rx.fc = le16_to_cpu(hdr->frame_control);
1807 rx.skb = skb_new; 1846 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
1808 rx.dev = prev->dev;
1809 rx.sdata = prev;
1810 ieee80211_invoke_rx_handlers(local, local->rx_handlers,
1811 &rx, sta);
1812 prev = sdata; 1847 prev = sdata;
1813 } 1848 }
1814 if (prev) { 1849 if (prev) {
1815 rx.fc = le16_to_cpu(hdr->frame_control); 1850 rx.fc = le16_to_cpu(hdr->frame_control);
1816 rx.skb = skb; 1851 ieee80211_invoke_rx_handlers(prev, &rx, skb);
1817 rx.dev = prev->dev;
1818 rx.sdata = prev;
1819 ieee80211_invoke_rx_handlers(local, local->rx_handlers,
1820 &rx, sta);
1821 } else 1852 } else
1822 dev_kfree_skb(skb); 1853 dev_kfree_skb(skb);
1823 1854
1824 end: 1855 end:
1825 if (sta) 1856 if (rx.sta)
1826 sta_info_put(sta); 1857 sta_info_put(rx.sta);
1827} 1858}
1828 1859
1829#define SEQ_MODULO 0x1000 1860#define SEQ_MODULO 0x1000
@@ -1859,6 +1890,8 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
1859 u16 head_seq_num, buf_size; 1890 u16 head_seq_num, buf_size;
1860 int index; 1891 int index;
1861 u32 pkt_load; 1892 u32 pkt_load;
1893 struct ieee80211_supported_band *sband;
1894 struct ieee80211_rate *rate;
1862 1895
1863 buf_size = tid_agg_rx->buf_size; 1896 buf_size = tid_agg_rx->buf_size;
1864 head_seq_num = tid_agg_rx->head_seq_num; 1897 head_seq_num = tid_agg_rx->head_seq_num;
@@ -1889,12 +1922,14 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
1889 memcpy(&status, 1922 memcpy(&status,
1890 tid_agg_rx->reorder_buf[index]->cb, 1923 tid_agg_rx->reorder_buf[index]->cb,
1891 sizeof(status)); 1924 sizeof(status));
1925 sband = local->hw.wiphy->bands[status.band];
1926 rate = &sband->bitrates[status.rate_idx];
1892 pkt_load = ieee80211_rx_load_stats(local, 1927 pkt_load = ieee80211_rx_load_stats(local,
1893 tid_agg_rx->reorder_buf[index], 1928 tid_agg_rx->reorder_buf[index],
1894 &status); 1929 &status, rate);
1895 __ieee80211_rx_handle_packet(hw, 1930 __ieee80211_rx_handle_packet(hw,
1896 tid_agg_rx->reorder_buf[index], 1931 tid_agg_rx->reorder_buf[index],
1897 &status, pkt_load); 1932 &status, pkt_load, rate);
1898 tid_agg_rx->stored_mpdu_num--; 1933 tid_agg_rx->stored_mpdu_num--;
1899 tid_agg_rx->reorder_buf[index] = NULL; 1934 tid_agg_rx->reorder_buf[index] = NULL;
1900 } 1935 }
@@ -1934,11 +1969,13 @@ u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
1934 /* release the reordered frame back to stack */ 1969 /* release the reordered frame back to stack */
1935 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, 1970 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb,
1936 sizeof(status)); 1971 sizeof(status));
1972 sband = local->hw.wiphy->bands[status.band];
1973 rate = &sband->bitrates[status.rate_idx];
1937 pkt_load = ieee80211_rx_load_stats(local, 1974 pkt_load = ieee80211_rx_load_stats(local,
1938 tid_agg_rx->reorder_buf[index], 1975 tid_agg_rx->reorder_buf[index],
1939 &status); 1976 &status, rate);
1940 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], 1977 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
1941 &status, pkt_load); 1978 &status, pkt_load, rate);
1942 tid_agg_rx->stored_mpdu_num--; 1979 tid_agg_rx->stored_mpdu_num--;
1943 tid_agg_rx->reorder_buf[index] = NULL; 1980 tid_agg_rx->reorder_buf[index] = NULL;
1944 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 1981 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -2019,6 +2056,25 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2019{ 2056{
2020 struct ieee80211_local *local = hw_to_local(hw); 2057 struct ieee80211_local *local = hw_to_local(hw);
2021 u32 pkt_load; 2058 u32 pkt_load;
2059 struct ieee80211_rate *rate = NULL;
2060 struct ieee80211_supported_band *sband;
2061
2062 if (status->band < 0 ||
2063 status->band > IEEE80211_NUM_BANDS) {
2064 WARN_ON(1);
2065 return;
2066 }
2067
2068 sband = local->hw.wiphy->bands[status->band];
2069
2070 if (!sband ||
2071 status->rate_idx < 0 ||
2072 status->rate_idx >= sband->n_bitrates) {
2073 WARN_ON(1);
2074 return;
2075 }
2076
2077 rate = &sband->bitrates[status->rate_idx];
2022 2078
2023 /* 2079 /*
2024 * key references and virtual interfaces are protected using RCU 2080 * key references and virtual interfaces are protected using RCU
@@ -2033,17 +2089,17 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2033 * if it was previously present. 2089 * if it was previously present.
2034 * Also, frames with less than 16 bytes are dropped. 2090 * Also, frames with less than 16 bytes are dropped.
2035 */ 2091 */
2036 skb = ieee80211_rx_monitor(local, skb, status); 2092 skb = ieee80211_rx_monitor(local, skb, status, rate);
2037 if (!skb) { 2093 if (!skb) {
2038 rcu_read_unlock(); 2094 rcu_read_unlock();
2039 return; 2095 return;
2040 } 2096 }
2041 2097
2042 pkt_load = ieee80211_rx_load_stats(local, skb, status); 2098 pkt_load = ieee80211_rx_load_stats(local, skb, status, rate);
2043 local->channel_use_raw += pkt_load; 2099 local->channel_use_raw += pkt_load;
2044 2100
2045 if (!ieee80211_rx_reorder_ampdu(local, skb)) 2101 if (!ieee80211_rx_reorder_ampdu(local, skb))
2046 __ieee80211_rx_handle_packet(hw, skb, status, pkt_load); 2102 __ieee80211_rx_handle_packet(hw, skb, status, pkt_load, rate);
2047 2103
2048 rcu_read_unlock(); 2104 rcu_read_unlock();
2049} 2105}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 1f74bd296357..e384e6632d97 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -55,48 +55,34 @@ static int sta_info_hash_del(struct ieee80211_local *local,
55 return -ENOENT; 55 return -ENOENT;
56} 56}
57 57
58struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr) 58/* must hold local->sta_lock */
59static struct sta_info *__sta_info_find(struct ieee80211_local *local,
60 u8 *addr)
59{ 61{
60 struct sta_info *sta; 62 struct sta_info *sta;
61 63
62 read_lock_bh(&local->sta_lock);
63 sta = local->sta_hash[STA_HASH(addr)]; 64 sta = local->sta_hash[STA_HASH(addr)];
64 while (sta) { 65 while (sta) {
65 if (memcmp(sta->addr, addr, ETH_ALEN) == 0) { 66 if (compare_ether_addr(sta->addr, addr) == 0)
66 __sta_info_get(sta);
67 break; 67 break;
68 }
69 sta = sta->hnext; 68 sta = sta->hnext;
70 } 69 }
71 read_unlock_bh(&local->sta_lock);
72
73 return sta; 70 return sta;
74} 71}
75EXPORT_SYMBOL(sta_info_get);
76 72
77int sta_info_min_txrate_get(struct ieee80211_local *local) 73struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr)
78{ 74{
79 struct sta_info *sta; 75 struct sta_info *sta;
80 struct ieee80211_hw_mode *mode;
81 int min_txrate = 9999999;
82 int i;
83 76
84 read_lock_bh(&local->sta_lock); 77 read_lock_bh(&local->sta_lock);
85 mode = local->oper_hw_mode; 78 sta = __sta_info_find(local, addr);
86 for (i = 0; i < STA_HASH_SIZE; i++) { 79 if (sta)
87 sta = local->sta_hash[i]; 80 __sta_info_get(sta);
88 while (sta) {
89 if (sta->txrate < min_txrate)
90 min_txrate = sta->txrate;
91 sta = sta->hnext;
92 }
93 }
94 read_unlock_bh(&local->sta_lock); 81 read_unlock_bh(&local->sta_lock);
95 if (min_txrate == 9999999)
96 min_txrate = 0;
97 82
98 return mode->rates[min_txrate].rate; 83 return sta;
99} 84}
85EXPORT_SYMBOL(sta_info_get);
100 86
101 87
102static void sta_info_release(struct kref *kref) 88static void sta_info_release(struct kref *kref)
@@ -117,8 +103,10 @@ static void sta_info_release(struct kref *kref)
117 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 103 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
118 dev_kfree_skb_any(skb); 104 dev_kfree_skb_any(skb);
119 } 105 }
120 for (i = 0; i < STA_TID_NUM; i++) 106 for (i = 0; i < STA_TID_NUM; i++) {
121 del_timer_sync(&sta->ampdu_mlme.tid_rx[i].session_timer); 107 del_timer_sync(&sta->ampdu_mlme.tid_rx[i].session_timer);
108 del_timer_sync(&sta->ampdu_mlme.tid_tx[i].addba_resp_timer);
109 }
122 rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv); 110 rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv);
123 rate_control_put(sta->rate_ctrl); 111 rate_control_put(sta->rate_ctrl);
124 kfree(sta); 112 kfree(sta);
@@ -132,8 +120,8 @@ void sta_info_put(struct sta_info *sta)
132EXPORT_SYMBOL(sta_info_put); 120EXPORT_SYMBOL(sta_info_put);
133 121
134 122
135struct sta_info * sta_info_add(struct ieee80211_local *local, 123struct sta_info *sta_info_add(struct ieee80211_local *local,
136 struct net_device *dev, u8 *addr, gfp_t gfp) 124 struct net_device *dev, u8 *addr, gfp_t gfp)
137{ 125{
138 struct sta_info *sta; 126 struct sta_info *sta;
139 int i; 127 int i;
@@ -141,7 +129,7 @@ struct sta_info * sta_info_add(struct ieee80211_local *local,
141 129
142 sta = kzalloc(sizeof(*sta), gfp); 130 sta = kzalloc(sizeof(*sta), gfp);
143 if (!sta) 131 if (!sta)
144 return NULL; 132 return ERR_PTR(-ENOMEM);
145 133
146 kref_init(&sta->kref); 134 kref_init(&sta->kref);
147 135
@@ -150,30 +138,45 @@ struct sta_info * sta_info_add(struct ieee80211_local *local,
150 if (!sta->rate_ctrl_priv) { 138 if (!sta->rate_ctrl_priv) {
151 rate_control_put(sta->rate_ctrl); 139 rate_control_put(sta->rate_ctrl);
152 kfree(sta); 140 kfree(sta);
153 return NULL; 141 return ERR_PTR(-ENOMEM);
154 } 142 }
155 143
156 memcpy(sta->addr, addr, ETH_ALEN); 144 memcpy(sta->addr, addr, ETH_ALEN);
157 sta->local = local; 145 sta->local = local;
158 sta->dev = dev; 146 sta->dev = dev;
159 spin_lock_init(&sta->ampdu_mlme.ampdu_rx); 147 spin_lock_init(&sta->ampdu_mlme.ampdu_rx);
148 spin_lock_init(&sta->ampdu_mlme.ampdu_tx);
160 for (i = 0; i < STA_TID_NUM; i++) { 149 for (i = 0; i < STA_TID_NUM; i++) {
161 /* timer_to_tid must be initialized with identity mapping to 150 /* timer_to_tid must be initialized with identity mapping to
162 * enable session_timer's data differentiation. refer to 151 * enable session_timer's data differentiation. refer to
163 * sta_rx_agg_session_timer_expired for useage */ 152 * sta_rx_agg_session_timer_expired for useage */
164 sta->timer_to_tid[i] = i; 153 sta->timer_to_tid[i] = i;
154 /* tid to tx queue: initialize according to HW (0 is valid) */
155 sta->tid_to_tx_q[i] = local->hw.queues;
165 /* rx timers */ 156 /* rx timers */
166 sta->ampdu_mlme.tid_rx[i].session_timer.function = 157 sta->ampdu_mlme.tid_rx[i].session_timer.function =
167 sta_rx_agg_session_timer_expired; 158 sta_rx_agg_session_timer_expired;
168 sta->ampdu_mlme.tid_rx[i].session_timer.data = 159 sta->ampdu_mlme.tid_rx[i].session_timer.data =
169 (unsigned long)&sta->timer_to_tid[i]; 160 (unsigned long)&sta->timer_to_tid[i];
170 init_timer(&sta->ampdu_mlme.tid_rx[i].session_timer); 161 init_timer(&sta->ampdu_mlme.tid_rx[i].session_timer);
162 /* tx timers */
163 sta->ampdu_mlme.tid_tx[i].addba_resp_timer.function =
164 sta_addba_resp_timer_expired;
165 sta->ampdu_mlme.tid_tx[i].addba_resp_timer.data =
166 (unsigned long)&sta->timer_to_tid[i];
167 init_timer(&sta->ampdu_mlme.tid_tx[i].addba_resp_timer);
171 } 168 }
172 skb_queue_head_init(&sta->ps_tx_buf); 169 skb_queue_head_init(&sta->ps_tx_buf);
173 skb_queue_head_init(&sta->tx_filtered); 170 skb_queue_head_init(&sta->tx_filtered);
174 __sta_info_get(sta); /* sta used by caller, decremented by
175 * sta_info_put() */
176 write_lock_bh(&local->sta_lock); 171 write_lock_bh(&local->sta_lock);
172 /* mark sta as used (by caller) */
173 __sta_info_get(sta);
174 /* check if STA exists already */
175 if (__sta_info_find(local, addr)) {
176 write_unlock_bh(&local->sta_lock);
177 sta_info_put(sta);
178 return ERR_PTR(-EEXIST);
179 }
177 list_add(&sta->list, &local->sta_list); 180 list_add(&sta->list, &local->sta_list);
178 local->num_sta++; 181 local->num_sta++;
179 sta_info_hash_add(local, sta); 182 sta_info_hash_add(local, sta);
@@ -204,6 +207,64 @@ struct sta_info * sta_info_add(struct ieee80211_local *local,
204 return sta; 207 return sta;
205} 208}
206 209
210static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
211{
212 /*
213 * This format has been mandated by the IEEE specifications,
214 * so this line may not be changed to use the __set_bit() format.
215 */
216 bss->tim[aid / 8] |= (1 << (aid % 8));
217}
218
219static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
220{
221 /*
222 * This format has been mandated by the IEEE specifications,
223 * so this line may not be changed to use the __clear_bit() format.
224 */
225 bss->tim[aid / 8] &= ~(1 << (aid % 8));
226}
227
228static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss,
229 struct sta_info *sta)
230{
231 if (bss)
232 __bss_tim_set(bss, sta->aid);
233 if (sta->local->ops->set_tim)
234 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1);
235}
236
237void sta_info_set_tim_bit(struct sta_info *sta)
238{
239 struct ieee80211_sub_if_data *sdata;
240
241 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
242
243 read_lock_bh(&sta->local->sta_lock);
244 __sta_info_set_tim_bit(sdata->bss, sta);
245 read_unlock_bh(&sta->local->sta_lock);
246}
247
248static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss,
249 struct sta_info *sta)
250{
251 if (bss)
252 __bss_tim_clear(bss, sta->aid);
253 if (sta->local->ops->set_tim)
254 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0);
255}
256
257void sta_info_clear_tim_bit(struct sta_info *sta)
258{
259 struct ieee80211_sub_if_data *sdata;
260
261 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
262
263 read_lock_bh(&sta->local->sta_lock);
264 __sta_info_clear_tim_bit(sdata->bss, sta);
265 read_unlock_bh(&sta->local->sta_lock);
266}
267
207/* Caller must hold local->sta_lock */ 268/* Caller must hold local->sta_lock */
208void sta_info_remove(struct sta_info *sta) 269void sta_info_remove(struct sta_info *sta)
209{ 270{
@@ -220,10 +281,9 @@ void sta_info_remove(struct sta_info *sta)
220 sta->flags &= ~WLAN_STA_PS; 281 sta->flags &= ~WLAN_STA_PS;
221 if (sdata->bss) 282 if (sdata->bss)
222 atomic_dec(&sdata->bss->num_sta_ps); 283 atomic_dec(&sdata->bss->num_sta_ps);
284 __sta_info_clear_tim_bit(sdata->bss, sta);
223 } 285 }
224 local->num_sta--; 286 local->num_sta--;
225 sta_info_remove_aid_ptr(sta);
226
227} 287}
228 288
229void sta_info_free(struct sta_info *sta) 289void sta_info_free(struct sta_info *sta)
@@ -252,7 +312,7 @@ void sta_info_free(struct sta_info *sta)
252#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 312#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
253 313
254 ieee80211_key_free(sta->key); 314 ieee80211_key_free(sta->key);
255 sta->key = NULL; 315 WARN_ON(sta->key);
256 316
257 if (local->ops->sta_notify) { 317 if (local->ops->sta_notify) {
258 struct ieee80211_sub_if_data *sdata; 318 struct ieee80211_sub_if_data *sdata;
@@ -299,6 +359,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
299{ 359{
300 unsigned long flags; 360 unsigned long flags;
301 struct sk_buff *skb; 361 struct sk_buff *skb;
362 struct ieee80211_sub_if_data *sdata;
302 DECLARE_MAC_BUF(mac); 363 DECLARE_MAC_BUF(mac);
303 364
304 if (skb_queue_empty(&sta->ps_tx_buf)) 365 if (skb_queue_empty(&sta->ps_tx_buf))
@@ -307,21 +368,23 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
307 for (;;) { 368 for (;;) {
308 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); 369 spin_lock_irqsave(&sta->ps_tx_buf.lock, flags);
309 skb = skb_peek(&sta->ps_tx_buf); 370 skb = skb_peek(&sta->ps_tx_buf);
310 if (sta_info_buffer_expired(local, sta, skb)) { 371 if (sta_info_buffer_expired(local, sta, skb))
311 skb = __skb_dequeue(&sta->ps_tx_buf); 372 skb = __skb_dequeue(&sta->ps_tx_buf);
312 if (skb_queue_empty(&sta->ps_tx_buf)) 373 else
313 sta->flags &= ~WLAN_STA_TIM;
314 } else
315 skb = NULL; 374 skb = NULL;
316 spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags); 375 spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags);
317 376
318 if (skb) { 377 if (!skb)
319 local->total_ps_buffered--;
320 printk(KERN_DEBUG "Buffered frame expired (STA "
321 "%s)\n", print_mac(mac, sta->addr));
322 dev_kfree_skb(skb);
323 } else
324 break; 378 break;
379
380 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
381 local->total_ps_buffered--;
382 printk(KERN_DEBUG "Buffered frame expired (STA "
383 "%s)\n", print_mac(mac, sta->addr));
384 dev_kfree_skb(skb);
385
386 if (skb_queue_empty(&sta->ps_tx_buf))
387 sta_info_clear_tim_bit(sta);
325 } 388 }
326} 389}
327 390
@@ -400,23 +463,6 @@ void sta_info_stop(struct ieee80211_local *local)
400 sta_info_flush(local, NULL); 463 sta_info_flush(local, NULL);
401} 464}
402 465
403void sta_info_remove_aid_ptr(struct sta_info *sta)
404{
405 struct ieee80211_sub_if_data *sdata;
406
407 if (sta->aid <= 0)
408 return;
409
410 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
411
412 if (sdata->local->ops->set_tim)
413 sdata->local->ops->set_tim(local_to_hw(sdata->local),
414 sta->aid, 0);
415 if (sdata->bss)
416 __bss_tim_clear(sdata->bss, sta->aid);
417}
418
419
420/** 466/**
421 * sta_info_flush - flush matching STA entries from the STA table 467 * sta_info_flush - flush matching STA entries from the STA table
422 * @local: local interface data 468 * @local: local interface data
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 96fe3ed95038..86eed40ada78 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -15,31 +15,72 @@
15#include <linux/kref.h> 15#include <linux/kref.h>
16#include "ieee80211_key.h" 16#include "ieee80211_key.h"
17 17
18/* Stations flags (struct sta_info::flags) */ 18/**
19#define WLAN_STA_AUTH BIT(0) 19 * enum ieee80211_sta_info_flags - Stations flags
20#define WLAN_STA_ASSOC BIT(1) 20 *
21#define WLAN_STA_PS BIT(2) 21 * These flags are used with &struct sta_info's @flags member.
22#define WLAN_STA_TIM BIT(3) /* TIM bit is on for PS stations */ 22 *
23#define WLAN_STA_PERM BIT(4) /* permanent; do not remove entry on expiration */ 23 * @WLAN_STA_AUTH: Station is authenticated.
24#define WLAN_STA_AUTHORIZED BIT(5) /* If 802.1X is used, this flag is 24 * @WLAN_STA_ASSOC: Station is associated.
25 * controlling whether STA is authorized to 25 * @WLAN_STA_PS: Station is in power-save mode
26 * send and receive non-IEEE 802.1X frames 26 * @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic.
27 */ 27 * This bit is always checked so needs to be enabled for all stations
28#define WLAN_STA_SHORT_PREAMBLE BIT(7) 28 * when virtual port control is not in use.
29/* whether this is an AP that we are associated with as a client */ 29 * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble
30#define WLAN_STA_ASSOC_AP BIT(8) 30 * frames.
31#define WLAN_STA_WME BIT(9) 31 * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP.
32#define WLAN_STA_WDS BIT(27) 32 * @WLAN_STA_WME: Station is a QoS-STA.
33 * @WLAN_STA_WDS: Station is one of our WDS peers.
34 * @WLAN_STA_PSPOLL: Station has just PS-polled us.
35 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
36 * IEEE80211_TXCTL_CLEAR_PS_FILT control flag) when the next
37 * frame to this station is transmitted.
38 */
39enum ieee80211_sta_info_flags {
40 WLAN_STA_AUTH = 1<<0,
41 WLAN_STA_ASSOC = 1<<1,
42 WLAN_STA_PS = 1<<2,
43 WLAN_STA_AUTHORIZED = 1<<3,
44 WLAN_STA_SHORT_PREAMBLE = 1<<4,
45 WLAN_STA_ASSOC_AP = 1<<5,
46 WLAN_STA_WME = 1<<6,
47 WLAN_STA_WDS = 1<<7,
48 WLAN_STA_PSPOLL = 1<<8,
49 WLAN_STA_CLEAR_PS_FILT = 1<<9,
50};
33 51
34#define STA_TID_NUM 16 52#define STA_TID_NUM 16
35#define ADDBA_RESP_INTERVAL HZ 53#define ADDBA_RESP_INTERVAL HZ
54#define HT_AGG_MAX_RETRIES (0x3)
36 55
37#define HT_AGG_STATE_INITIATOR_SHIFT (4) 56#define HT_AGG_STATE_INITIATOR_SHIFT (4)
38 57
58#define HT_ADDBA_REQUESTED_MSK BIT(0)
59#define HT_ADDBA_DRV_READY_MSK BIT(1)
60#define HT_ADDBA_RECEIVED_MSK BIT(2)
39#define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3) 61#define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3)
40 62#define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT)
41#define HT_AGG_STATE_IDLE (0x0) 63#define HT_AGG_STATE_IDLE (0x0)
42#define HT_AGG_STATE_OPERATIONAL (0x7) 64#define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \
65 HT_ADDBA_DRV_READY_MSK | \
66 HT_ADDBA_RECEIVED_MSK)
67
68/**
69 * struct tid_ampdu_tx - TID aggregation information (Tx).
70 *
71 * @state: TID's state in session state machine.
72 * @dialog_token: dialog token for aggregation session
73 * @ssn: Starting Sequence Number expected to be aggregated.
74 * @addba_resp_timer: timer for peer's response to addba request
75 * @addba_req_num: number of times addBA request has been sent.
76 */
77struct tid_ampdu_tx {
78 u8 state;
79 u8 dialog_token;
80 u16 ssn;
81 struct timer_list addba_resp_timer;
82 u8 addba_req_num;
83};
43 84
44/** 85/**
45 * struct tid_ampdu_rx - TID aggregation information (Rx). 86 * struct tid_ampdu_rx - TID aggregation information (Rx).
@@ -69,12 +110,18 @@ struct tid_ampdu_rx {
69/** 110/**
70 * struct sta_ampdu_mlme - STA aggregation information. 111 * struct sta_ampdu_mlme - STA aggregation information.
71 * 112 *
72 * @tid_agg_info_rx: aggregation info for Rx per TID 113 * @tid_rx: aggregation info for Rx per TID
114 * @tid_tx: aggregation info for Tx per TID
73 * @ampdu_rx: for locking sections in aggregation Rx flow 115 * @ampdu_rx: for locking sections in aggregation Rx flow
116 * @ampdu_tx: for locking sectionsi in aggregation Tx flow
117 * @dialog_token_allocator: dialog token enumerator for each new session;
74 */ 118 */
75struct sta_ampdu_mlme { 119struct sta_ampdu_mlme {
76 struct tid_ampdu_rx tid_rx[STA_TID_NUM]; 120 struct tid_ampdu_rx tid_rx[STA_TID_NUM];
121 struct tid_ampdu_tx tid_tx[STA_TID_NUM];
77 spinlock_t ampdu_rx; 122 spinlock_t ampdu_rx;
123 spinlock_t ampdu_tx;
124 u8 dialog_token_allocator;
78}; 125};
79 126
80struct sta_info { 127struct sta_info {
@@ -90,12 +137,9 @@ struct sta_info {
90 137
91 struct sk_buff_head ps_tx_buf; /* buffer of TX frames for station in 138 struct sk_buff_head ps_tx_buf; /* buffer of TX frames for station in
92 * power saving state */ 139 * power saving state */
93 int pspoll; /* whether STA has send a PS Poll frame */
94 struct sk_buff_head tx_filtered; /* buffer of TX frames that were 140 struct sk_buff_head tx_filtered; /* buffer of TX frames that were
95 * already given to low-level driver, 141 * already given to low-level driver,
96 * but were filtered */ 142 * but were filtered */
97 int clear_dst_mask;
98
99 unsigned long rx_packets, tx_packets; /* number of RX/TX MSDUs */ 143 unsigned long rx_packets, tx_packets; /* number of RX/TX MSDUs */
100 unsigned long rx_bytes, tx_bytes; 144 unsigned long rx_bytes, tx_bytes;
101 unsigned long tx_retry_failed, tx_retry_count; 145 unsigned long tx_retry_failed, tx_retry_count;
@@ -104,10 +148,11 @@ struct sta_info {
104 unsigned int wep_weak_iv_count; /* number of RX frames with weak IV */ 148 unsigned int wep_weak_iv_count; /* number of RX frames with weak IV */
105 149
106 unsigned long last_rx; 150 unsigned long last_rx;
107 u32 supp_rates; /* bitmap of supported rates in local->curr_rates */ 151 /* bitmap of supported rates per band */
108 int txrate; /* index in local->curr_rates */ 152 u64 supp_rates[IEEE80211_NUM_BANDS];
109 int last_txrate; /* last rate used to send a frame to this STA */ 153 int txrate_idx;
110 int last_nonerp_idx; 154 /* last rates used to send a frame to this STA */
155 int last_txrate_idx, last_nonerp_txrate_idx;
111 156
112 struct net_device *dev; /* which net device is this station associated 157 struct net_device *dev; /* which net device is this station associated
113 * to */ 158 * to */
@@ -132,8 +177,6 @@ struct sta_info {
132 int last_rssi; /* RSSI of last received frame from this STA */ 177 int last_rssi; /* RSSI of last received frame from this STA */
133 int last_signal; /* signal of last received frame from this STA */ 178 int last_signal; /* signal of last received frame from this STA */
134 int last_noise; /* noise of last received frame from this STA */ 179 int last_noise; /* noise of last received frame from this STA */
135 int last_ack_rssi[3]; /* RSSI of last received ACKs from this STA */
136 unsigned long last_ack;
137 int channel_use; 180 int channel_use;
138 int channel_use_raw; 181 int channel_use_raw;
139 182
@@ -148,20 +191,20 @@ struct sta_info {
148 of this STA */ 191 of this STA */
149 struct sta_ampdu_mlme ampdu_mlme; 192 struct sta_ampdu_mlme ampdu_mlme;
150 u8 timer_to_tid[STA_TID_NUM]; /* convert timer id to tid */ 193 u8 timer_to_tid[STA_TID_NUM]; /* convert timer id to tid */
194 u8 tid_to_tx_q[STA_TID_NUM]; /* map tid to tx queue */
151 195
152#ifdef CONFIG_MAC80211_DEBUGFS 196#ifdef CONFIG_MAC80211_DEBUGFS
153 struct sta_info_debugfsdentries { 197 struct sta_info_debugfsdentries {
154 struct dentry *dir; 198 struct dentry *dir;
155 struct dentry *flags; 199 struct dentry *flags;
156 struct dentry *num_ps_buf_frames; 200 struct dentry *num_ps_buf_frames;
157 struct dentry *last_ack_rssi;
158 struct dentry *last_ack_ms;
159 struct dentry *inactive_ms; 201 struct dentry *inactive_ms;
160 struct dentry *last_seq_ctrl; 202 struct dentry *last_seq_ctrl;
161#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 203#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
162 struct dentry *wme_rx_queue; 204 struct dentry *wme_rx_queue;
163 struct dentry *wme_tx_queue; 205 struct dentry *wme_tx_queue;
164#endif 206#endif
207 struct dentry *agg_status;
165 } debugfs; 208 } debugfs;
166#endif 209#endif
167}; 210};
@@ -191,16 +234,17 @@ static inline void __sta_info_get(struct sta_info *sta)
191} 234}
192 235
193struct sta_info * sta_info_get(struct ieee80211_local *local, u8 *addr); 236struct sta_info * sta_info_get(struct ieee80211_local *local, u8 *addr);
194int sta_info_min_txrate_get(struct ieee80211_local *local);
195void sta_info_put(struct sta_info *sta); 237void sta_info_put(struct sta_info *sta);
196struct sta_info * sta_info_add(struct ieee80211_local *local, 238struct sta_info *sta_info_add(struct ieee80211_local *local,
197 struct net_device *dev, u8 *addr, gfp_t gfp); 239 struct net_device *dev, u8 *addr, gfp_t gfp);
198void sta_info_remove(struct sta_info *sta); 240void sta_info_remove(struct sta_info *sta);
199void sta_info_free(struct sta_info *sta); 241void sta_info_free(struct sta_info *sta);
200void sta_info_init(struct ieee80211_local *local); 242void sta_info_init(struct ieee80211_local *local);
201int sta_info_start(struct ieee80211_local *local); 243int sta_info_start(struct ieee80211_local *local);
202void sta_info_stop(struct ieee80211_local *local); 244void sta_info_stop(struct ieee80211_local *local);
203void sta_info_remove_aid_ptr(struct sta_info *sta);
204void sta_info_flush(struct ieee80211_local *local, struct net_device *dev); 245void sta_info_flush(struct ieee80211_local *local, struct net_device *dev);
205 246
247void sta_info_set_tim_bit(struct sta_info *sta);
248void sta_info_clear_tim_bit(struct sta_info *sta);
249
206#endif /* STA_INFO_H */ 250#endif /* STA_INFO_H */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 67b509edd431..1cd58e01f1ee 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -92,9 +92,13 @@ static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr,
92 int rate, mrate, erp, dur, i; 92 int rate, mrate, erp, dur, i;
93 struct ieee80211_rate *txrate = tx->u.tx.rate; 93 struct ieee80211_rate *txrate = tx->u.tx.rate;
94 struct ieee80211_local *local = tx->local; 94 struct ieee80211_local *local = tx->local;
95 struct ieee80211_hw_mode *mode = tx->u.tx.mode; 95 struct ieee80211_supported_band *sband;
96 96
97 erp = txrate->flags & IEEE80211_RATE_ERP; 97 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
98
99 erp = 0;
100 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
101 erp = txrate->flags & IEEE80211_RATE_ERP_G;
98 102
99 /* 103 /*
100 * data and mgmt (except PS Poll): 104 * data and mgmt (except PS Poll):
@@ -150,20 +154,36 @@ static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr,
150 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps 154 * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
151 */ 155 */
152 rate = -1; 156 rate = -1;
153 mrate = 10; /* use 1 Mbps if everything fails */ 157 /* use lowest available if everything fails */
154 for (i = 0; i < mode->num_rates; i++) { 158 mrate = sband->bitrates[0].bitrate;
155 struct ieee80211_rate *r = &mode->rates[i]; 159 for (i = 0; i < sband->n_bitrates; i++) {
156 if (r->rate > txrate->rate) 160 struct ieee80211_rate *r = &sband->bitrates[i];
157 break;
158 161
159 if (IEEE80211_RATE_MODULATION(txrate->flags) != 162 if (r->bitrate > txrate->bitrate)
160 IEEE80211_RATE_MODULATION(r->flags)) 163 break;
161 continue;
162 164
163 if (r->flags & IEEE80211_RATE_BASIC) 165 if (tx->sdata->basic_rates & BIT(i))
164 rate = r->rate; 166 rate = r->bitrate;
165 else if (r->flags & IEEE80211_RATE_MANDATORY) 167
166 mrate = r->rate; 168 switch (sband->band) {
169 case IEEE80211_BAND_2GHZ: {
170 u32 flag;
171 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
172 flag = IEEE80211_RATE_MANDATORY_G;
173 else
174 flag = IEEE80211_RATE_MANDATORY_B;
175 if (r->flags & flag)
176 mrate = r->bitrate;
177 break;
178 }
179 case IEEE80211_BAND_5GHZ:
180 if (r->flags & IEEE80211_RATE_MANDATORY_A)
181 mrate = r->bitrate;
182 break;
183 case IEEE80211_NUM_BANDS:
184 WARN_ON(1);
185 break;
186 }
167 } 187 }
168 if (rate == -1) { 188 if (rate == -1) {
169 /* No matching basic rate found; use highest suitable mandatory 189 /* No matching basic rate found; use highest suitable mandatory
@@ -184,7 +204,7 @@ static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr,
184 dur *= 2; /* ACK + SIFS */ 204 dur *= 2; /* ACK + SIFS */
185 /* next fragment */ 205 /* next fragment */
186 dur += ieee80211_frame_duration(local, next_frag_len, 206 dur += ieee80211_frame_duration(local, next_frag_len,
187 txrate->rate, erp, 207 txrate->bitrate, erp,
188 tx->sdata->bss_conf.use_short_preamble); 208 tx->sdata->bss_conf.use_short_preamble);
189 } 209 }
190 210
@@ -212,7 +232,7 @@ static int inline is_ieee80211_device(struct net_device *dev,
212 232
213/* tx handlers */ 233/* tx handlers */
214 234
215static ieee80211_txrx_result 235static ieee80211_tx_result
216ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx) 236ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx)
217{ 237{
218#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 238#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -222,15 +242,15 @@ ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx)
222 u32 sta_flags; 242 u32 sta_flags;
223 243
224 if (unlikely(tx->flags & IEEE80211_TXRXD_TX_INJECTED)) 244 if (unlikely(tx->flags & IEEE80211_TXRXD_TX_INJECTED))
225 return TXRX_CONTINUE; 245 return TX_CONTINUE;
226 246
227 if (unlikely(tx->local->sta_sw_scanning) && 247 if (unlikely(tx->local->sta_sw_scanning) &&
228 ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 248 ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT ||
229 (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ)) 249 (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ))
230 return TXRX_DROP; 250 return TX_DROP;
231 251
232 if (tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED) 252 if (tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED)
233 return TXRX_CONTINUE; 253 return TX_CONTINUE;
234 254
235 sta_flags = tx->sta ? tx->sta->flags : 0; 255 sta_flags = tx->sta ? tx->sta->flags : 0;
236 256
@@ -245,7 +265,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx)
245 tx->dev->name, print_mac(mac, hdr->addr1)); 265 tx->dev->name, print_mac(mac, hdr->addr1));
246#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 266#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
247 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); 267 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
248 return TXRX_DROP; 268 return TX_DROP;
249 } 269 }
250 } else { 270 } else {
251 if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 271 if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
@@ -255,15 +275,15 @@ ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx)
255 * No associated STAs - no need to send multicast 275 * No associated STAs - no need to send multicast
256 * frames. 276 * frames.
257 */ 277 */
258 return TXRX_DROP; 278 return TX_DROP;
259 } 279 }
260 return TXRX_CONTINUE; 280 return TX_CONTINUE;
261 } 281 }
262 282
263 return TXRX_CONTINUE; 283 return TX_CONTINUE;
264} 284}
265 285
266static ieee80211_txrx_result 286static ieee80211_tx_result
267ieee80211_tx_h_sequence(struct ieee80211_txrx_data *tx) 287ieee80211_tx_h_sequence(struct ieee80211_txrx_data *tx)
268{ 288{
269 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 289 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
@@ -271,7 +291,7 @@ ieee80211_tx_h_sequence(struct ieee80211_txrx_data *tx)
271 if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24) 291 if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24)
272 ieee80211_include_sequence(tx->sdata, hdr); 292 ieee80211_include_sequence(tx->sdata, hdr);
273 293
274 return TXRX_CONTINUE; 294 return TX_CONTINUE;
275} 295}
276 296
277/* This function is called whenever the AP is about to exceed the maximum limit 297/* This function is called whenever the AP is about to exceed the maximum limit
@@ -321,7 +341,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
321 wiphy_name(local->hw.wiphy), purged); 341 wiphy_name(local->hw.wiphy), purged);
322} 342}
323 343
324static ieee80211_txrx_result 344static ieee80211_tx_result
325ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx) 345ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx)
326{ 346{
327 /* 347 /*
@@ -334,11 +354,11 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx)
334 354
335 /* not AP/IBSS or ordered frame */ 355 /* not AP/IBSS or ordered frame */
336 if (!tx->sdata->bss || (tx->fc & IEEE80211_FCTL_ORDER)) 356 if (!tx->sdata->bss || (tx->fc & IEEE80211_FCTL_ORDER))
337 return TXRX_CONTINUE; 357 return TX_CONTINUE;
338 358
339 /* no stations in PS mode */ 359 /* no stations in PS mode */
340 if (!atomic_read(&tx->sdata->bss->num_sta_ps)) 360 if (!atomic_read(&tx->sdata->bss->num_sta_ps))
341 return TXRX_CONTINUE; 361 return TX_CONTINUE;
342 362
343 /* buffered in mac80211 */ 363 /* buffered in mac80211 */
344 if (tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) { 364 if (tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) {
@@ -355,16 +375,16 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx)
355 } else 375 } else
356 tx->local->total_ps_buffered++; 376 tx->local->total_ps_buffered++;
357 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); 377 skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb);
358 return TXRX_QUEUED; 378 return TX_QUEUED;
359 } 379 }
360 380
361 /* buffered in hardware */ 381 /* buffered in hardware */
362 tx->u.tx.control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM; 382 tx->u.tx.control->flags |= IEEE80211_TXCTL_SEND_AFTER_DTIM;
363 383
364 return TXRX_CONTINUE; 384 return TX_CONTINUE;
365} 385}
366 386
367static ieee80211_txrx_result 387static ieee80211_tx_result
368ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx) 388ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx)
369{ 389{
370 struct sta_info *sta = tx->sta; 390 struct sta_info *sta = tx->sta;
@@ -373,9 +393,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx)
373 if (unlikely(!sta || 393 if (unlikely(!sta ||
374 ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && 394 ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
375 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) 395 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)))
376 return TXRX_CONTINUE; 396 return TX_CONTINUE;
377 397
378 if (unlikely((sta->flags & WLAN_STA_PS) && !sta->pspoll)) { 398 if (unlikely((sta->flags & WLAN_STA_PS) &&
399 !(sta->flags & WLAN_STA_PSPOLL))) {
379 struct ieee80211_tx_packet_data *pkt_data; 400 struct ieee80211_tx_packet_data *pkt_data;
380#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 401#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
381 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " 402 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries "
@@ -383,7 +404,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx)
383 print_mac(mac, sta->addr), sta->aid, 404 print_mac(mac, sta->addr), sta->aid,
384 skb_queue_len(&sta->ps_tx_buf)); 405 skb_queue_len(&sta->ps_tx_buf));
385#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 406#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
386 sta->flags |= WLAN_STA_TIM;
387 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 407 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
388 purge_old_ps_buffers(tx->local); 408 purge_old_ps_buffers(tx->local);
389 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { 409 if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) {
@@ -396,18 +416,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx)
396 dev_kfree_skb(old); 416 dev_kfree_skb(old);
397 } else 417 } else
398 tx->local->total_ps_buffered++; 418 tx->local->total_ps_buffered++;
419
399 /* Queue frame to be sent after STA sends an PS Poll frame */ 420 /* Queue frame to be sent after STA sends an PS Poll frame */
400 if (skb_queue_empty(&sta->ps_tx_buf)) { 421 if (skb_queue_empty(&sta->ps_tx_buf))
401 if (tx->local->ops->set_tim) 422 sta_info_set_tim_bit(sta);
402 tx->local->ops->set_tim(local_to_hw(tx->local), 423
403 sta->aid, 1);
404 if (tx->sdata->bss)
405 bss_tim_set(tx->local, tx->sdata->bss, sta->aid);
406 }
407 pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; 424 pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb;
408 pkt_data->jiffies = jiffies; 425 pkt_data->jiffies = jiffies;
409 skb_queue_tail(&sta->ps_tx_buf, tx->skb); 426 skb_queue_tail(&sta->ps_tx_buf, tx->skb);
410 return TXRX_QUEUED; 427 return TX_QUEUED;
411 } 428 }
412#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 429#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
413 else if (unlikely(sta->flags & WLAN_STA_PS)) { 430 else if (unlikely(sta->flags & WLAN_STA_PS)) {
@@ -416,16 +433,16 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx)
416 print_mac(mac, sta->addr)); 433 print_mac(mac, sta->addr));
417 } 434 }
418#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 435#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
419 sta->pspoll = 0; 436 sta->flags &= ~WLAN_STA_PSPOLL;
420 437
421 return TXRX_CONTINUE; 438 return TX_CONTINUE;
422} 439}
423 440
424static ieee80211_txrx_result 441static ieee80211_tx_result
425ieee80211_tx_h_ps_buf(struct ieee80211_txrx_data *tx) 442ieee80211_tx_h_ps_buf(struct ieee80211_txrx_data *tx)
426{ 443{
427 if (unlikely(tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED)) 444 if (unlikely(tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED))
428 return TXRX_CONTINUE; 445 return TX_CONTINUE;
429 446
430 if (tx->flags & IEEE80211_TXRXD_TXUNICAST) 447 if (tx->flags & IEEE80211_TXRXD_TXUNICAST)
431 return ieee80211_tx_h_unicast_ps_buf(tx); 448 return ieee80211_tx_h_unicast_ps_buf(tx);
@@ -433,7 +450,7 @@ ieee80211_tx_h_ps_buf(struct ieee80211_txrx_data *tx)
433 return ieee80211_tx_h_multicast_ps_buf(tx); 450 return ieee80211_tx_h_multicast_ps_buf(tx);
434} 451}
435 452
436static ieee80211_txrx_result 453static ieee80211_tx_result
437ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx) 454ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx)
438{ 455{
439 struct ieee80211_key *key; 456 struct ieee80211_key *key;
@@ -449,7 +466,7 @@ ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx)
449 !(tx->u.tx.control->flags & IEEE80211_TXCTL_EAPOL_FRAME) && 466 !(tx->u.tx.control->flags & IEEE80211_TXCTL_EAPOL_FRAME) &&
450 !(tx->flags & IEEE80211_TXRXD_TX_INJECTED)) { 467 !(tx->flags & IEEE80211_TXRXD_TX_INJECTED)) {
451 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); 468 I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
452 return TXRX_DROP; 469 return TX_DROP;
453 } else 470 } else
454 tx->key = NULL; 471 tx->key = NULL;
455 472
@@ -478,10 +495,10 @@ ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx)
478 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 495 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
479 tx->u.tx.control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 496 tx->u.tx.control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
480 497
481 return TXRX_CONTINUE; 498 return TX_CONTINUE;
482} 499}
483 500
484static ieee80211_txrx_result 501static ieee80211_tx_result
485ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx) 502ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx)
486{ 503{
487 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; 504 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
@@ -493,7 +510,7 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx)
493 int frag_threshold = tx->local->fragmentation_threshold; 510 int frag_threshold = tx->local->fragmentation_threshold;
494 511
495 if (!(tx->flags & IEEE80211_TXRXD_FRAGMENTED)) 512 if (!(tx->flags & IEEE80211_TXRXD_FRAGMENTED))
496 return TXRX_CONTINUE; 513 return TX_CONTINUE;
497 514
498 first = tx->skb; 515 first = tx->skb;
499 516
@@ -547,7 +564,7 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx)
547 tx->u.tx.num_extra_frag = num_fragm - 1; 564 tx->u.tx.num_extra_frag = num_fragm - 1;
548 tx->u.tx.extra_frag = frags; 565 tx->u.tx.extra_frag = frags;
549 566
550 return TXRX_CONTINUE; 567 return TX_CONTINUE;
551 568
552 fail: 569 fail:
553 printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name); 570 printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name);
@@ -558,14 +575,14 @@ ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx)
558 kfree(frags); 575 kfree(frags);
559 } 576 }
560 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment); 577 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
561 return TXRX_DROP; 578 return TX_DROP;
562} 579}
563 580
564static ieee80211_txrx_result 581static ieee80211_tx_result
565ieee80211_tx_h_encrypt(struct ieee80211_txrx_data *tx) 582ieee80211_tx_h_encrypt(struct ieee80211_txrx_data *tx)
566{ 583{
567 if (!tx->key) 584 if (!tx->key)
568 return TXRX_CONTINUE; 585 return TX_CONTINUE;
569 586
570 switch (tx->key->conf.alg) { 587 switch (tx->key->conf.alg) {
571 case ALG_WEP: 588 case ALG_WEP:
@@ -578,33 +595,35 @@ ieee80211_tx_h_encrypt(struct ieee80211_txrx_data *tx)
578 595
579 /* not reached */ 596 /* not reached */
580 WARN_ON(1); 597 WARN_ON(1);
581 return TXRX_DROP; 598 return TX_DROP;
582} 599}
583 600
584static ieee80211_txrx_result 601static ieee80211_tx_result
585ieee80211_tx_h_rate_ctrl(struct ieee80211_txrx_data *tx) 602ieee80211_tx_h_rate_ctrl(struct ieee80211_txrx_data *tx)
586{ 603{
587 struct rate_selection rsel; 604 struct rate_selection rsel;
605 struct ieee80211_supported_band *sband;
606
607 sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band];
588 608
589 if (likely(!tx->u.tx.rate)) { 609 if (likely(!tx->u.tx.rate)) {
590 rate_control_get_rate(tx->dev, tx->u.tx.mode, tx->skb, &rsel); 610 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel);
591 tx->u.tx.rate = rsel.rate; 611 tx->u.tx.rate = rsel.rate;
592 if (unlikely(rsel.probe != NULL)) { 612 if (unlikely(rsel.probe)) {
593 tx->u.tx.control->flags |= 613 tx->u.tx.control->flags |=
594 IEEE80211_TXCTL_RATE_CTRL_PROBE; 614 IEEE80211_TXCTL_RATE_CTRL_PROBE;
595 tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; 615 tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG;
596 tx->u.tx.control->alt_retry_rate = tx->u.tx.rate->val; 616 tx->u.tx.control->alt_retry_rate = tx->u.tx.rate;
597 tx->u.tx.rate = rsel.probe; 617 tx->u.tx.rate = rsel.probe;
598 } else 618 } else
599 tx->u.tx.control->alt_retry_rate = -1; 619 tx->u.tx.control->alt_retry_rate = NULL;
600 620
601 if (!tx->u.tx.rate) 621 if (!tx->u.tx.rate)
602 return TXRX_DROP; 622 return TX_DROP;
603 } else 623 } else
604 tx->u.tx.control->alt_retry_rate = -1; 624 tx->u.tx.control->alt_retry_rate = NULL;
605 625
606 if (tx->u.tx.mode->mode == MODE_IEEE80211G && 626 if (tx->sdata->bss_conf.use_cts_prot &&
607 tx->sdata->bss_conf.use_cts_prot &&
608 (tx->flags & IEEE80211_TXRXD_FRAGMENTED) && rsel.nonerp) { 627 (tx->flags & IEEE80211_TXRXD_FRAGMENTED) && rsel.nonerp) {
609 tx->u.tx.last_frag_rate = tx->u.tx.rate; 628 tx->u.tx.last_frag_rate = tx->u.tx.rate;
610 if (rsel.probe) 629 if (rsel.probe)
@@ -612,25 +631,24 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_txrx_data *tx)
612 else 631 else
613 tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; 632 tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG;
614 tx->u.tx.rate = rsel.nonerp; 633 tx->u.tx.rate = rsel.nonerp;
615 tx->u.tx.control->rate = rsel.nonerp; 634 tx->u.tx.control->tx_rate = rsel.nonerp;
616 tx->u.tx.control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE; 635 tx->u.tx.control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE;
617 } else { 636 } else {
618 tx->u.tx.last_frag_rate = tx->u.tx.rate; 637 tx->u.tx.last_frag_rate = tx->u.tx.rate;
619 tx->u.tx.control->rate = tx->u.tx.rate; 638 tx->u.tx.control->tx_rate = tx->u.tx.rate;
620 } 639 }
621 tx->u.tx.control->tx_rate = tx->u.tx.rate->val; 640 tx->u.tx.control->tx_rate = tx->u.tx.rate;
622 641
623 return TXRX_CONTINUE; 642 return TX_CONTINUE;
624} 643}
625 644
626static ieee80211_txrx_result 645static ieee80211_tx_result
627ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx) 646ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx)
628{ 647{
629 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; 648 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data;
630 u16 fc = le16_to_cpu(hdr->frame_control); 649 u16 fc = le16_to_cpu(hdr->frame_control);
631 u16 dur; 650 u16 dur;
632 struct ieee80211_tx_control *control = tx->u.tx.control; 651 struct ieee80211_tx_control *control = tx->u.tx.control;
633 struct ieee80211_hw_mode *mode = tx->u.tx.mode;
634 652
635 if (!control->retry_limit) { 653 if (!control->retry_limit) {
636 if (!is_multicast_ether_addr(hdr->addr1)) { 654 if (!is_multicast_ether_addr(hdr->addr1)) {
@@ -657,14 +675,14 @@ ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx)
657 * frames. 675 * frames.
658 * TODO: The last fragment could still use multiple retry 676 * TODO: The last fragment could still use multiple retry
659 * rates. */ 677 * rates. */
660 control->alt_retry_rate = -1; 678 control->alt_retry_rate = NULL;
661 } 679 }
662 680
663 /* Use CTS protection for unicast frames sent using extended rates if 681 /* Use CTS protection for unicast frames sent using extended rates if
664 * there are associated non-ERP stations and RTS/CTS is not configured 682 * there are associated non-ERP stations and RTS/CTS is not configured
665 * for the frame. */ 683 * for the frame. */
666 if (mode->mode == MODE_IEEE80211G && 684 if ((tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) &&
667 (tx->u.tx.rate->flags & IEEE80211_RATE_ERP) && 685 (tx->u.tx.rate->flags & IEEE80211_RATE_ERP_G) &&
668 (tx->flags & IEEE80211_TXRXD_TXUNICAST) && 686 (tx->flags & IEEE80211_TXRXD_TXUNICAST) &&
669 tx->sdata->bss_conf.use_cts_prot && 687 tx->sdata->bss_conf.use_cts_prot &&
670 !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS)) 688 !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS))
@@ -674,10 +692,10 @@ ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx)
674 * short preambles at the selected rate and short preambles are 692 * short preambles at the selected rate and short preambles are
675 * available on the network at the current point in time. */ 693 * available on the network at the current point in time. */
676 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && 694 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
677 (tx->u.tx.rate->flags & IEEE80211_RATE_PREAMBLE2) && 695 (tx->u.tx.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
678 tx->sdata->bss_conf.use_short_preamble && 696 tx->sdata->bss_conf.use_short_preamble &&
679 (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) { 697 (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) {
680 tx->u.tx.control->tx_rate = tx->u.tx.rate->val2; 698 tx->u.tx.control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
681 } 699 }
682 700
683 /* Setup duration field for the first fragment of the frame. Duration 701 /* Setup duration field for the first fragment of the frame. Duration
@@ -690,19 +708,33 @@ ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx)
690 708
691 if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) || 709 if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) ||
692 (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { 710 (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) {
693 struct ieee80211_rate *rate; 711 struct ieee80211_supported_band *sband;
712 struct ieee80211_rate *rate, *baserate;
713 int idx;
714
715 sband = tx->local->hw.wiphy->bands[
716 tx->local->hw.conf.channel->band];
694 717
695 /* Do not use multiple retry rates when using RTS/CTS */ 718 /* Do not use multiple retry rates when using RTS/CTS */
696 control->alt_retry_rate = -1; 719 control->alt_retry_rate = NULL;
697 720
698 /* Use min(data rate, max base rate) as CTS/RTS rate */ 721 /* Use min(data rate, max base rate) as CTS/RTS rate */
699 rate = tx->u.tx.rate; 722 rate = tx->u.tx.rate;
700 while (rate > mode->rates && 723 baserate = NULL;
701 !(rate->flags & IEEE80211_RATE_BASIC))
702 rate--;
703 724
704 control->rts_cts_rate = rate->val; 725 for (idx = 0; idx < sband->n_bitrates; idx++) {
705 control->rts_rate = rate; 726 if (sband->bitrates[idx].bitrate > rate->bitrate)
727 continue;
728 if (tx->sdata->basic_rates & BIT(idx) &&
729 (!baserate ||
730 (baserate->bitrate < sband->bitrates[idx].bitrate)))
731 baserate = &sband->bitrates[idx];
732 }
733
734 if (baserate)
735 control->rts_cts_rate = baserate;
736 else
737 control->rts_cts_rate = &sband->bitrates[0];
706 } 738 }
707 739
708 if (tx->sta) { 740 if (tx->sta) {
@@ -719,17 +751,17 @@ ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx)
719 } 751 }
720 } 752 }
721 753
722 return TXRX_CONTINUE; 754 return TX_CONTINUE;
723} 755}
724 756
725static ieee80211_txrx_result 757static ieee80211_tx_result
726ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx) 758ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx)
727{ 759{
728 struct ieee80211_local *local = tx->local; 760 struct ieee80211_local *local = tx->local;
729 struct ieee80211_hw_mode *mode = tx->u.tx.mode;
730 struct sk_buff *skb = tx->skb; 761 struct sk_buff *skb = tx->skb;
731 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 762 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
732 u32 load = 0, hdrtime; 763 u32 load = 0, hdrtime;
764 struct ieee80211_rate *rate = tx->u.tx.rate;
733 765
734 /* TODO: this could be part of tx_status handling, so that the number 766 /* TODO: this could be part of tx_status handling, so that the number
735 * of retries would be known; TX rate should in that case be stored 767 * of retries would be known; TX rate should in that case be stored
@@ -740,9 +772,9 @@ ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx)
740 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, 772 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
741 * 1 usec = 1/8 * (1080 / 10) = 13.5 */ 773 * 1 usec = 1/8 * (1080 / 10) = 13.5 */
742 774
743 if (mode->mode == MODE_IEEE80211A || 775 if (tx->u.tx.channel->band == IEEE80211_BAND_5GHZ ||
744 (mode->mode == MODE_IEEE80211G && 776 (tx->u.tx.channel->band == IEEE80211_BAND_2GHZ &&
745 tx->u.tx.rate->flags & IEEE80211_RATE_ERP)) 777 rate->flags & IEEE80211_RATE_ERP_G))
746 hdrtime = CHAN_UTIL_HDR_SHORT; 778 hdrtime = CHAN_UTIL_HDR_SHORT;
747 else 779 else
748 hdrtime = CHAN_UTIL_HDR_LONG; 780 hdrtime = CHAN_UTIL_HDR_LONG;
@@ -756,14 +788,15 @@ ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx)
756 else if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 788 else if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)
757 load += hdrtime; 789 load += hdrtime;
758 790
759 load += skb->len * tx->u.tx.rate->rate_inv; 791 /* TODO: optimise again */
792 load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate;
760 793
761 if (tx->u.tx.extra_frag) { 794 if (tx->u.tx.extra_frag) {
762 int i; 795 int i;
763 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 796 for (i = 0; i < tx->u.tx.num_extra_frag; i++) {
764 load += 2 * hdrtime; 797 load += 2 * hdrtime;
765 load += tx->u.tx.extra_frag[i]->len * 798 load += tx->u.tx.extra_frag[i]->len *
766 tx->u.tx.rate->rate; 799 tx->u.tx.rate->bitrate;
767 } 800 }
768 } 801 }
769 802
@@ -774,13 +807,12 @@ ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx)
774 tx->sta->channel_use_raw += load; 807 tx->sta->channel_use_raw += load;
775 tx->sdata->channel_use_raw += load; 808 tx->sdata->channel_use_raw += load;
776 809
777 return TXRX_CONTINUE; 810 return TX_CONTINUE;
778} 811}
779 812
780/* TODO: implement register/unregister functions for adding TX/RX handlers
781 * into ordered list */
782 813
783ieee80211_tx_handler ieee80211_tx_handlers[] = 814typedef ieee80211_tx_result (*ieee80211_tx_handler)(struct ieee80211_txrx_data *);
815static ieee80211_tx_handler ieee80211_tx_handlers[] =
784{ 816{
785 ieee80211_tx_h_check_assoc, 817 ieee80211_tx_h_check_assoc,
786 ieee80211_tx_h_sequence, 818 ieee80211_tx_h_sequence,
@@ -801,7 +833,7 @@ ieee80211_tx_handler ieee80211_tx_handlers[] =
801 * deal with packet injection down monitor interface 833 * deal with packet injection down monitor interface
802 * with Radiotap Header -- only called for monitor mode interface 834 * with Radiotap Header -- only called for monitor mode interface
803 */ 835 */
804static ieee80211_txrx_result 836static ieee80211_tx_result
805__ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, 837__ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
806 struct sk_buff *skb) 838 struct sk_buff *skb)
807{ 839{
@@ -816,10 +848,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
816 struct ieee80211_radiotap_iterator iterator; 848 struct ieee80211_radiotap_iterator iterator;
817 struct ieee80211_radiotap_header *rthdr = 849 struct ieee80211_radiotap_header *rthdr =
818 (struct ieee80211_radiotap_header *) skb->data; 850 (struct ieee80211_radiotap_header *) skb->data;
819 struct ieee80211_hw_mode *mode = tx->local->hw.conf.mode; 851 struct ieee80211_supported_band *sband;
820 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); 852 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len);
821 struct ieee80211_tx_control *control = tx->u.tx.control; 853 struct ieee80211_tx_control *control = tx->u.tx.control;
822 854
855 sband = tx->local->hw.wiphy->bands[tx->local->hw.conf.channel->band];
856
823 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 857 control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT;
824 tx->flags |= IEEE80211_TXRXD_TX_INJECTED; 858 tx->flags |= IEEE80211_TXRXD_TX_INJECTED;
825 tx->flags &= ~IEEE80211_TXRXD_FRAGMENTED; 859 tx->flags &= ~IEEE80211_TXRXD_FRAGMENTED;
@@ -852,10 +886,12 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
852 * ieee80211 rate int is in 100kbps units eg, 0x0a=1Mbps 886 * ieee80211 rate int is in 100kbps units eg, 0x0a=1Mbps
853 */ 887 */
854 target_rate = (*iterator.this_arg) * 5; 888 target_rate = (*iterator.this_arg) * 5;
855 for (i = 0; i < mode->num_rates; i++) { 889 for (i = 0; i < sband->n_bitrates; i++) {
856 struct ieee80211_rate *r = &mode->rates[i]; 890 struct ieee80211_rate *r;
891
892 r = &sband->bitrates[i];
857 893
858 if (r->rate == target_rate) { 894 if (r->bitrate == target_rate) {
859 tx->u.tx.rate = r; 895 tx->u.tx.rate = r;
860 break; 896 break;
861 } 897 }
@@ -870,9 +906,11 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
870 control->antenna_sel_tx = (*iterator.this_arg) + 1; 906 control->antenna_sel_tx = (*iterator.this_arg) + 1;
871 break; 907 break;
872 908
909#if 0
873 case IEEE80211_RADIOTAP_DBM_TX_POWER: 910 case IEEE80211_RADIOTAP_DBM_TX_POWER:
874 control->power_level = *iterator.this_arg; 911 control->power_level = *iterator.this_arg;
875 break; 912 break;
913#endif
876 914
877 case IEEE80211_RADIOTAP_FLAGS: 915 case IEEE80211_RADIOTAP_FLAGS:
878 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { 916 if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
@@ -884,7 +922,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
884 * on transmission 922 * on transmission
885 */ 923 */
886 if (skb->len < (iterator.max_length + FCS_LEN)) 924 if (skb->len < (iterator.max_length + FCS_LEN))
887 return TXRX_DROP; 925 return TX_DROP;
888 926
889 skb_trim(skb, skb->len - FCS_LEN); 927 skb_trim(skb, skb->len - FCS_LEN);
890 } 928 }
@@ -907,7 +945,7 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
907 } 945 }
908 946
909 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ 947 if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
910 return TXRX_DROP; 948 return TX_DROP;
911 949
912 /* 950 /*
913 * remove the radiotap header 951 * remove the radiotap header
@@ -916,13 +954,13 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx,
916 */ 954 */
917 skb_pull(skb, iterator.max_length); 955 skb_pull(skb, iterator.max_length);
918 956
919 return TXRX_CONTINUE; 957 return TX_CONTINUE;
920} 958}
921 959
922/* 960/*
923 * initialises @tx 961 * initialises @tx
924 */ 962 */
925static ieee80211_txrx_result 963static ieee80211_tx_result
926__ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, 964__ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
927 struct sk_buff *skb, 965 struct sk_buff *skb,
928 struct net_device *dev, 966 struct net_device *dev,
@@ -949,8 +987,8 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
949 /* process and remove the injection radiotap header */ 987 /* process and remove the injection radiotap header */
950 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 988 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
951 if (unlikely(sdata->vif.type == IEEE80211_IF_TYPE_MNTR)) { 989 if (unlikely(sdata->vif.type == IEEE80211_IF_TYPE_MNTR)) {
952 if (__ieee80211_parse_tx_radiotap(tx, skb) == TXRX_DROP) 990 if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP)
953 return TXRX_DROP; 991 return TX_DROP;
954 992
955 /* 993 /*
956 * __ieee80211_parse_tx_radiotap has now removed 994 * __ieee80211_parse_tx_radiotap has now removed
@@ -982,10 +1020,10 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
982 } 1020 }
983 1021
984 if (!tx->sta) 1022 if (!tx->sta)
985 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; 1023 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT;
986 else if (tx->sta->clear_dst_mask) { 1024 else if (tx->sta->flags & WLAN_STA_CLEAR_PS_FILT) {
987 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; 1025 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT;
988 tx->sta->clear_dst_mask = 0; 1026 tx->sta->flags &= ~WLAN_STA_CLEAR_PS_FILT;
989 } 1027 }
990 1028
991 hdrlen = ieee80211_get_hdrlen(tx->fc); 1029 hdrlen = ieee80211_get_hdrlen(tx->fc);
@@ -995,7 +1033,7 @@ __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
995 } 1033 }
996 control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; 1034 control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT;
997 1035
998 return TXRX_CONTINUE; 1036 return TX_CONTINUE;
999} 1037}
1000 1038
1001/* 1039/*
@@ -1046,7 +1084,7 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1046 if (tx->u.tx.extra_frag) { 1084 if (tx->u.tx.extra_frag) {
1047 control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS | 1085 control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS |
1048 IEEE80211_TXCTL_USE_CTS_PROTECT | 1086 IEEE80211_TXCTL_USE_CTS_PROTECT |
1049 IEEE80211_TXCTL_CLEAR_DST_MASK | 1087 IEEE80211_TXCTL_CLEAR_PS_FILT |
1050 IEEE80211_TXCTL_FIRST_FRAGMENT); 1088 IEEE80211_TXCTL_FIRST_FRAGMENT);
1051 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 1089 for (i = 0; i < tx->u.tx.num_extra_frag; i++) {
1052 if (!tx->u.tx.extra_frag[i]) 1090 if (!tx->u.tx.extra_frag[i])
@@ -1054,8 +1092,8 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1054 if (__ieee80211_queue_stopped(local, control->queue)) 1092 if (__ieee80211_queue_stopped(local, control->queue))
1055 return IEEE80211_TX_FRAG_AGAIN; 1093 return IEEE80211_TX_FRAG_AGAIN;
1056 if (i == tx->u.tx.num_extra_frag) { 1094 if (i == tx->u.tx.num_extra_frag) {
1057 control->tx_rate = tx->u.tx.last_frag_hwrate; 1095 control->tx_rate = tx->u.tx.last_frag_rate;
1058 control->rate = tx->u.tx.last_frag_rate; 1096
1059 if (tx->flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG) 1097 if (tx->flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG)
1060 control->flags |= 1098 control->flags |=
1061 IEEE80211_TXCTL_RATE_CTRL_PROBE; 1099 IEEE80211_TXCTL_RATE_CTRL_PROBE;
@@ -1089,7 +1127,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1089 struct sta_info *sta; 1127 struct sta_info *sta;
1090 ieee80211_tx_handler *handler; 1128 ieee80211_tx_handler *handler;
1091 struct ieee80211_txrx_data tx; 1129 struct ieee80211_txrx_data tx;
1092 ieee80211_txrx_result res = TXRX_DROP, res_prepare; 1130 ieee80211_tx_result res = TX_DROP, res_prepare;
1093 int ret, i; 1131 int ret, i;
1094 1132
1095 WARN_ON(__ieee80211_queue_pending(local, control->queue)); 1133 WARN_ON(__ieee80211_queue_pending(local, control->queue));
@@ -1102,7 +1140,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1102 /* initialises tx */ 1140 /* initialises tx */
1103 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); 1141 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control);
1104 1142
1105 if (res_prepare == TXRX_DROP) { 1143 if (res_prepare == TX_DROP) {
1106 dev_kfree_skb(skb); 1144 dev_kfree_skb(skb);
1107 return 0; 1145 return 0;
1108 } 1146 }
@@ -1114,12 +1152,12 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1114 rcu_read_lock(); 1152 rcu_read_lock();
1115 1153
1116 sta = tx.sta; 1154 sta = tx.sta;
1117 tx.u.tx.mode = local->hw.conf.mode; 1155 tx.u.tx.channel = local->hw.conf.channel;
1118 1156
1119 for (handler = local->tx_handlers; *handler != NULL; 1157 for (handler = ieee80211_tx_handlers; *handler != NULL;
1120 handler++) { 1158 handler++) {
1121 res = (*handler)(&tx); 1159 res = (*handler)(&tx);
1122 if (res != TXRX_CONTINUE) 1160 if (res != TX_CONTINUE)
1123 break; 1161 break;
1124 } 1162 }
1125 1163
@@ -1128,12 +1166,12 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1128 if (sta) 1166 if (sta)
1129 sta_info_put(sta); 1167 sta_info_put(sta);
1130 1168
1131 if (unlikely(res == TXRX_DROP)) { 1169 if (unlikely(res == TX_DROP)) {
1132 I802_DEBUG_INC(local->tx_handlers_drop); 1170 I802_DEBUG_INC(local->tx_handlers_drop);
1133 goto drop; 1171 goto drop;
1134 } 1172 }
1135 1173
1136 if (unlikely(res == TXRX_QUEUED)) { 1174 if (unlikely(res == TX_QUEUED)) {
1137 I802_DEBUG_INC(local->tx_handlers_queued); 1175 I802_DEBUG_INC(local->tx_handlers_queued);
1138 rcu_read_unlock(); 1176 rcu_read_unlock();
1139 return 0; 1177 return 0;
@@ -1151,7 +1189,6 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1151 } else { 1189 } else {
1152 next_len = 0; 1190 next_len = 0;
1153 tx.u.tx.rate = tx.u.tx.last_frag_rate; 1191 tx.u.tx.rate = tx.u.tx.last_frag_rate;
1154 tx.u.tx.last_frag_hwrate = tx.u.tx.rate->val;
1155 } 1192 }
1156 dur = ieee80211_duration(&tx, 0, next_len); 1193 dur = ieee80211_duration(&tx, 0, next_len);
1157 hdr->duration_id = cpu_to_le16(dur); 1194 hdr->duration_id = cpu_to_le16(dur);
@@ -1188,7 +1225,6 @@ retry:
1188 store->skb = skb; 1225 store->skb = skb;
1189 store->extra_frag = tx.u.tx.extra_frag; 1226 store->extra_frag = tx.u.tx.extra_frag;
1190 store->num_extra_frag = tx.u.tx.num_extra_frag; 1227 store->num_extra_frag = tx.u.tx.num_extra_frag;
1191 store->last_frag_hwrate = tx.u.tx.last_frag_hwrate;
1192 store->last_frag_rate = tx.u.tx.last_frag_rate; 1228 store->last_frag_rate = tx.u.tx.last_frag_rate;
1193 store->last_frag_rate_ctrl_probe = 1229 store->last_frag_rate_ctrl_probe =
1194 !!(tx.flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG); 1230 !!(tx.flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG);
@@ -1260,6 +1296,8 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1260 control.flags |= IEEE80211_TXCTL_REQUEUE; 1296 control.flags |= IEEE80211_TXCTL_REQUEUE;
1261 if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME) 1297 if (pkt_data->flags & IEEE80211_TXPD_EAPOL_FRAME)
1262 control.flags |= IEEE80211_TXCTL_EAPOL_FRAME; 1298 control.flags |= IEEE80211_TXCTL_EAPOL_FRAME;
1299 if (pkt_data->flags & IEEE80211_TXPD_AMPDU)
1300 control.flags |= IEEE80211_TXCTL_AMPDU;
1263 control.queue = pkt_data->queue; 1301 control.queue = pkt_data->queue;
1264 1302
1265 ret = ieee80211_tx(odev, skb, &control); 1303 ret = ieee80211_tx(odev, skb, &control);
@@ -1409,10 +1447,17 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1409 goto fail; 1447 goto fail;
1410 } 1448 }
1411 1449
1412 sta = sta_info_get(local, hdr.addr1); 1450 /*
1413 if (sta) { 1451 * There's no need to try to look up the destination
1414 sta_flags = sta->flags; 1452 * if it is a multicast address (which can only happen
1415 sta_info_put(sta); 1453 * in AP mode)
1454 */
1455 if (!is_multicast_ether_addr(hdr.addr1)) {
1456 sta = sta_info_get(local, hdr.addr1);
1457 if (sta) {
1458 sta_flags = sta->flags;
1459 sta_info_put(sta);
1460 }
1416 } 1461 }
1417 1462
1418 /* receiver is QoS enabled, use a QoS type frame */ 1463 /* receiver is QoS enabled, use a QoS type frame */
@@ -1422,10 +1467,10 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1422 } 1467 }
1423 1468
1424 /* 1469 /*
1425 * If port access control is enabled, drop frames to unauthorised 1470 * Drop unicast frames to unauthorised stations unless they are
1426 * stations unless they are EAPOL frames from the local station. 1471 * EAPOL frames from the local station.
1427 */ 1472 */
1428 if (unlikely(sdata->ieee802_1x_pac && 1473 if (unlikely(!is_multicast_ether_addr(hdr.addr1) &&
1429 !(sta_flags & WLAN_STA_AUTHORIZED) && 1474 !(sta_flags & WLAN_STA_AUTHORIZED) &&
1430 !(ethertype == ETH_P_PAE && 1475 !(ethertype == ETH_P_PAE &&
1431 compare_ether_addr(dev->dev_addr, 1476 compare_ether_addr(dev->dev_addr,
@@ -1598,7 +1643,6 @@ void ieee80211_tx_pending(unsigned long data)
1598 tx.u.tx.control = &store->control; 1643 tx.u.tx.control = &store->control;
1599 tx.u.tx.extra_frag = store->extra_frag; 1644 tx.u.tx.extra_frag = store->extra_frag;
1600 tx.u.tx.num_extra_frag = store->num_extra_frag; 1645 tx.u.tx.num_extra_frag = store->num_extra_frag;
1601 tx.u.tx.last_frag_hwrate = store->last_frag_hwrate;
1602 tx.u.tx.last_frag_rate = store->last_frag_rate; 1646 tx.u.tx.last_frag_rate = store->last_frag_rate;
1603 tx.flags = 0; 1647 tx.flags = 0;
1604 if (store->last_frag_rate_ctrl_probe) 1648 if (store->last_frag_rate_ctrl_probe)
@@ -1701,6 +1745,9 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1701 struct ieee80211_if_ap *ap = NULL; 1745 struct ieee80211_if_ap *ap = NULL;
1702 struct rate_selection rsel; 1746 struct rate_selection rsel;
1703 struct beacon_data *beacon; 1747 struct beacon_data *beacon;
1748 struct ieee80211_supported_band *sband;
1749
1750 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1704 1751
1705 rcu_read_lock(); 1752 rcu_read_lock();
1706 1753
@@ -1739,8 +1786,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1739 beacon->tail_len); 1786 beacon->tail_len);
1740 1787
1741 if (control) { 1788 if (control) {
1742 rate_control_get_rate(local->mdev, local->oper_hw_mode, skb, 1789 rate_control_get_rate(local->mdev, sband, skb, &rsel);
1743 &rsel);
1744 if (!rsel.rate) { 1790 if (!rsel.rate) {
1745 if (net_ratelimit()) { 1791 if (net_ratelimit()) {
1746 printk(KERN_DEBUG "%s: ieee80211_beacon_get: " 1792 printk(KERN_DEBUG "%s: ieee80211_beacon_get: "
@@ -1753,15 +1799,14 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1753 } 1799 }
1754 1800
1755 control->vif = vif; 1801 control->vif = vif;
1756 control->tx_rate = 1802 control->tx_rate = rsel.rate;
1757 (sdata->bss_conf.use_short_preamble && 1803 if (sdata->bss_conf.use_short_preamble &&
1758 (rsel.rate->flags & IEEE80211_RATE_PREAMBLE2)) ? 1804 rsel.rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
1759 rsel.rate->val2 : rsel.rate->val; 1805 control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE;
1760 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; 1806 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1761 control->power_level = local->hw.conf.power_level;
1762 control->flags |= IEEE80211_TXCTL_NO_ACK; 1807 control->flags |= IEEE80211_TXCTL_NO_ACK;
1763 control->retry_limit = 1; 1808 control->retry_limit = 1;
1764 control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; 1809 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT;
1765 } 1810 }
1766 1811
1767 ap->num_beacons++; 1812 ap->num_beacons++;
@@ -1815,7 +1860,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1815 struct sta_info *sta; 1860 struct sta_info *sta;
1816 ieee80211_tx_handler *handler; 1861 ieee80211_tx_handler *handler;
1817 struct ieee80211_txrx_data tx; 1862 struct ieee80211_txrx_data tx;
1818 ieee80211_txrx_result res = TXRX_DROP; 1863 ieee80211_tx_result res = TX_DROP;
1819 struct net_device *bdev; 1864 struct net_device *bdev;
1820 struct ieee80211_sub_if_data *sdata; 1865 struct ieee80211_sub_if_data *sdata;
1821 struct ieee80211_if_ap *bss = NULL; 1866 struct ieee80211_if_ap *bss = NULL;
@@ -1863,20 +1908,20 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
1863 } 1908 }
1864 sta = tx.sta; 1909 sta = tx.sta;
1865 tx.flags |= IEEE80211_TXRXD_TXPS_BUFFERED; 1910 tx.flags |= IEEE80211_TXRXD_TXPS_BUFFERED;
1866 tx.u.tx.mode = local->hw.conf.mode; 1911 tx.u.tx.channel = local->hw.conf.channel;
1867 1912
1868 for (handler = local->tx_handlers; *handler != NULL; handler++) { 1913 for (handler = ieee80211_tx_handlers; *handler != NULL; handler++) {
1869 res = (*handler)(&tx); 1914 res = (*handler)(&tx);
1870 if (res == TXRX_DROP || res == TXRX_QUEUED) 1915 if (res == TX_DROP || res == TX_QUEUED)
1871 break; 1916 break;
1872 } 1917 }
1873 skb = tx.skb; /* handlers are allowed to change skb */ 1918 skb = tx.skb; /* handlers are allowed to change skb */
1874 1919
1875 if (res == TXRX_DROP) { 1920 if (res == TX_DROP) {
1876 I802_DEBUG_INC(local->tx_handlers_drop); 1921 I802_DEBUG_INC(local->tx_handlers_drop);
1877 dev_kfree_skb(skb); 1922 dev_kfree_skb(skb);
1878 skb = NULL; 1923 skb = NULL;
1879 } else if (res == TXRX_QUEUED) { 1924 } else if (res == TX_QUEUED) {
1880 I802_DEBUG_INC(local->tx_handlers_queued); 1925 I802_DEBUG_INC(local->tx_handlers_queued);
1881 skb = NULL; 1926 skb = NULL;
1882 } 1927 }
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 5e631ce98d7e..f64804fed0a9 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -41,92 +41,6 @@ const unsigned char bridge_tunnel_header[] =
41 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; 41 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
42 42
43 43
44static int rate_list_match(const int *rate_list, int rate)
45{
46 int i;
47
48 if (!rate_list)
49 return 0;
50
51 for (i = 0; rate_list[i] >= 0; i++)
52 if (rate_list[i] == rate)
53 return 1;
54
55 return 0;
56}
57
58void ieee80211_prepare_rates(struct ieee80211_local *local,
59 struct ieee80211_hw_mode *mode)
60{
61 int i;
62
63 for (i = 0; i < mode->num_rates; i++) {
64 struct ieee80211_rate *rate = &mode->rates[i];
65
66 rate->flags &= ~(IEEE80211_RATE_SUPPORTED |
67 IEEE80211_RATE_BASIC);
68
69 if (local->supp_rates[mode->mode]) {
70 if (!rate_list_match(local->supp_rates[mode->mode],
71 rate->rate))
72 continue;
73 }
74
75 rate->flags |= IEEE80211_RATE_SUPPORTED;
76
77 /* Use configured basic rate set if it is available. If not,
78 * use defaults that are sane for most cases. */
79 if (local->basic_rates[mode->mode]) {
80 if (rate_list_match(local->basic_rates[mode->mode],
81 rate->rate))
82 rate->flags |= IEEE80211_RATE_BASIC;
83 } else switch (mode->mode) {
84 case MODE_IEEE80211A:
85 if (rate->rate == 60 || rate->rate == 120 ||
86 rate->rate == 240)
87 rate->flags |= IEEE80211_RATE_BASIC;
88 break;
89 case MODE_IEEE80211B:
90 if (rate->rate == 10 || rate->rate == 20)
91 rate->flags |= IEEE80211_RATE_BASIC;
92 break;
93 case MODE_IEEE80211G:
94 if (rate->rate == 10 || rate->rate == 20 ||
95 rate->rate == 55 || rate->rate == 110)
96 rate->flags |= IEEE80211_RATE_BASIC;
97 break;
98 case NUM_IEEE80211_MODES:
99 /* not useful */
100 break;
101 }
102
103 /* Set ERP and MANDATORY flags based on phymode */
104 switch (mode->mode) {
105 case MODE_IEEE80211A:
106 if (rate->rate == 60 || rate->rate == 120 ||
107 rate->rate == 240)
108 rate->flags |= IEEE80211_RATE_MANDATORY;
109 break;
110 case MODE_IEEE80211B:
111 if (rate->rate == 10)
112 rate->flags |= IEEE80211_RATE_MANDATORY;
113 break;
114 case MODE_IEEE80211G:
115 if (rate->rate == 10 || rate->rate == 20 ||
116 rate->rate == 55 || rate->rate == 110 ||
117 rate->rate == 60 || rate->rate == 120 ||
118 rate->rate == 240)
119 rate->flags |= IEEE80211_RATE_MANDATORY;
120 break;
121 case NUM_IEEE80211_MODES:
122 /* not useful */
123 break;
124 }
125 if (ieee80211_is_erp_rate(mode->mode, rate->rate))
126 rate->flags |= IEEE80211_RATE_ERP;
127 }
128}
129
130u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 44u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
131 enum ieee80211_if_types type) 45 enum ieee80211_if_types type)
132{ 46{
@@ -262,7 +176,7 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
262 * DIV_ROUND_UP() operations. 176 * DIV_ROUND_UP() operations.
263 */ 177 */
264 178
265 if (local->hw.conf.phymode == MODE_IEEE80211A || erp) { 179 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) {
266 /* 180 /*
267 * OFDM: 181 * OFDM:
268 * 182 *
@@ -304,15 +218,19 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
304/* Exported duration function for driver use */ 218/* Exported duration function for driver use */
305__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, 219__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
306 struct ieee80211_vif *vif, 220 struct ieee80211_vif *vif,
307 size_t frame_len, int rate) 221 size_t frame_len,
222 struct ieee80211_rate *rate)
308{ 223{
309 struct ieee80211_local *local = hw_to_local(hw); 224 struct ieee80211_local *local = hw_to_local(hw);
310 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 225 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
311 u16 dur; 226 u16 dur;
312 int erp; 227 int erp;
313 228
314 erp = ieee80211_is_erp_rate(hw->conf.phymode, rate); 229 erp = 0;
315 dur = ieee80211_frame_duration(local, frame_len, rate, erp, 230 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
231 erp = rate->flags & IEEE80211_RATE_ERP_G;
232
233 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp,
316 sdata->bss_conf.use_short_preamble); 234 sdata->bss_conf.use_short_preamble);
317 235
318 return cpu_to_le16(dur); 236 return cpu_to_le16(dur);
@@ -332,17 +250,20 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
332 250
333 short_preamble = sdata->bss_conf.use_short_preamble; 251 short_preamble = sdata->bss_conf.use_short_preamble;
334 252
335 rate = frame_txctl->rts_rate; 253 rate = frame_txctl->rts_cts_rate;
336 erp = !!(rate->flags & IEEE80211_RATE_ERP); 254
255 erp = 0;
256 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
257 erp = rate->flags & IEEE80211_RATE_ERP_G;
337 258
338 /* CTS duration */ 259 /* CTS duration */
339 dur = ieee80211_frame_duration(local, 10, rate->rate, 260 dur = ieee80211_frame_duration(local, 10, rate->bitrate,
340 erp, short_preamble); 261 erp, short_preamble);
341 /* Data frame duration */ 262 /* Data frame duration */
342 dur += ieee80211_frame_duration(local, frame_len, rate->rate, 263 dur += ieee80211_frame_duration(local, frame_len, rate->bitrate,
343 erp, short_preamble); 264 erp, short_preamble);
344 /* ACK duration */ 265 /* ACK duration */
345 dur += ieee80211_frame_duration(local, 10, rate->rate, 266 dur += ieee80211_frame_duration(local, 10, rate->bitrate,
346 erp, short_preamble); 267 erp, short_preamble);
347 268
348 return cpu_to_le16(dur); 269 return cpu_to_le16(dur);
@@ -363,15 +284,17 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
363 284
364 short_preamble = sdata->bss_conf.use_short_preamble; 285 short_preamble = sdata->bss_conf.use_short_preamble;
365 286
366 rate = frame_txctl->rts_rate; 287 rate = frame_txctl->rts_cts_rate;
367 erp = !!(rate->flags & IEEE80211_RATE_ERP); 288 erp = 0;
289 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
290 erp = rate->flags & IEEE80211_RATE_ERP_G;
368 291
369 /* Data frame duration */ 292 /* Data frame duration */
370 dur = ieee80211_frame_duration(local, frame_len, rate->rate, 293 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
371 erp, short_preamble); 294 erp, short_preamble);
372 if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) { 295 if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) {
373 /* ACK duration */ 296 /* ACK duration */
374 dur += ieee80211_frame_duration(local, 10, rate->rate, 297 dur += ieee80211_frame_duration(local, 10, rate->bitrate,
375 erp, short_preamble); 298 erp, short_preamble);
376 } 299 }
377 300
@@ -379,27 +302,6 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
379} 302}
380EXPORT_SYMBOL(ieee80211_ctstoself_duration); 303EXPORT_SYMBOL(ieee80211_ctstoself_duration);
381 304
382struct ieee80211_rate *
383ieee80211_get_rate(struct ieee80211_local *local, int phymode, int hw_rate)
384{
385 struct ieee80211_hw_mode *mode;
386 int r;
387
388 list_for_each_entry(mode, &local->modes_list, list) {
389 if (mode->mode != phymode)
390 continue;
391 for (r = 0; r < mode->num_rates; r++) {
392 struct ieee80211_rate *rate = &mode->rates[r];
393 if (rate->val == hw_rate ||
394 (rate->flags & IEEE80211_RATE_PREAMBLE2 &&
395 rate->val2 == hw_rate))
396 return rate;
397 }
398 }
399
400 return NULL;
401}
402
403void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) 305void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
404{ 306{
405 struct ieee80211_local *local = hw_to_local(hw); 307 struct ieee80211_local *local = hw_to_local(hw);
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index a0cff72a580b..a33ef5cfa9ad 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -305,13 +305,13 @@ u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
305 return NULL; 305 return NULL;
306} 306}
307 307
308ieee80211_txrx_result 308ieee80211_rx_result
309ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx) 309ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx)
310{ 310{
311 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && 311 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA &&
312 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 312 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT ||
313 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) 313 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH))
314 return TXRX_CONTINUE; 314 return RX_CONTINUE;
315 315
316 if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) { 316 if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) {
317 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) { 317 if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) {
@@ -320,7 +320,7 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx)
320 printk(KERN_DEBUG "%s: RX WEP frame, decrypt " 320 printk(KERN_DEBUG "%s: RX WEP frame, decrypt "
321 "failed\n", rx->dev->name); 321 "failed\n", rx->dev->name);
322#endif /* CONFIG_MAC80211_DEBUG */ 322#endif /* CONFIG_MAC80211_DEBUG */
323 return TXRX_DROP; 323 return RX_DROP_UNUSABLE;
324 } 324 }
325 } else if (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) { 325 } else if (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) {
326 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 326 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
@@ -328,7 +328,7 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx)
328 skb_trim(rx->skb, rx->skb->len - 4); 328 skb_trim(rx->skb, rx->skb->len - 4);
329 } 329 }
330 330
331 return TXRX_CONTINUE; 331 return RX_CONTINUE;
332} 332}
333 333
334static int wep_encrypt_skb(struct ieee80211_txrx_data *tx, struct sk_buff *skb) 334static int wep_encrypt_skb(struct ieee80211_txrx_data *tx, struct sk_buff *skb)
@@ -346,7 +346,7 @@ static int wep_encrypt_skb(struct ieee80211_txrx_data *tx, struct sk_buff *skb)
346 return 0; 346 return 0;
347} 347}
348 348
349ieee80211_txrx_result 349ieee80211_tx_result
350ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx) 350ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx)
351{ 351{
352 tx->u.tx.control->iv_len = WEP_IV_LEN; 352 tx->u.tx.control->iv_len = WEP_IV_LEN;
@@ -355,7 +355,7 @@ ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx)
355 355
356 if (wep_encrypt_skb(tx, tx->skb) < 0) { 356 if (wep_encrypt_skb(tx, tx->skb) < 0) {
357 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); 357 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
358 return TXRX_DROP; 358 return TX_DROP;
359 } 359 }
360 360
361 if (tx->u.tx.extra_frag) { 361 if (tx->u.tx.extra_frag) {
@@ -364,10 +364,10 @@ ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx)
364 if (wep_encrypt_skb(tx, tx->u.tx.extra_frag[i]) < 0) { 364 if (wep_encrypt_skb(tx, tx->u.tx.extra_frag[i]) < 0) {
365 I802_DEBUG_INC(tx->local-> 365 I802_DEBUG_INC(tx->local->
366 tx_handlers_drop_wep); 366 tx_handlers_drop_wep);
367 return TXRX_DROP; 367 return TX_DROP;
368 } 368 }
369 } 369 }
370 } 370 }
371 371
372 return TXRX_CONTINUE; 372 return TX_CONTINUE;
373} 373}
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 785fbb4e0dd7..43aef50cd0d6 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -28,9 +28,9 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
28 struct ieee80211_key *key); 28 struct ieee80211_key *key);
29u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); 29u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
30 30
31ieee80211_txrx_result 31ieee80211_rx_result
32ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx); 32ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx);
33ieee80211_txrx_result 33ieee80211_tx_result
34ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx); 34ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx);
35 35
36#endif /* WEP_H */ 36#endif /* WEP_H */
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 4e236599dd31..8cc036decc82 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -19,10 +19,13 @@
19#include "wme.h" 19#include "wme.h"
20 20
21/* maximum number of hardware queues we support. */ 21/* maximum number of hardware queues we support. */
22#define TC_80211_MAX_QUEUES 8 22#define TC_80211_MAX_QUEUES 16
23
24const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
23 25
24struct ieee80211_sched_data 26struct ieee80211_sched_data
25{ 27{
28 unsigned long qdisc_pool[BITS_TO_LONGS(TC_80211_MAX_QUEUES)];
26 struct tcf_proto *filter_list; 29 struct tcf_proto *filter_list;
27 struct Qdisc *queues[TC_80211_MAX_QUEUES]; 30 struct Qdisc *queues[TC_80211_MAX_QUEUES];
28 struct sk_buff_head requeued[TC_80211_MAX_QUEUES]; 31 struct sk_buff_head requeued[TC_80211_MAX_QUEUES];
@@ -98,7 +101,6 @@ static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd)
98 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 101 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
99 unsigned short fc = le16_to_cpu(hdr->frame_control); 102 unsigned short fc = le16_to_cpu(hdr->frame_control);
100 int qos; 103 int qos;
101 const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
102 104
103 /* see if frame is data or non data frame */ 105 /* see if frame is data or non data frame */
104 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) { 106 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) {
@@ -146,9 +148,25 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
146 unsigned short fc = le16_to_cpu(hdr->frame_control); 148 unsigned short fc = le16_to_cpu(hdr->frame_control);
147 struct Qdisc *qdisc; 149 struct Qdisc *qdisc;
148 int err, queue; 150 int err, queue;
151 struct sta_info *sta;
152 u8 tid;
149 153
150 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) { 154 if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) {
151 skb_queue_tail(&q->requeued[pkt_data->queue], skb); 155 queue = pkt_data->queue;
156 sta = sta_info_get(local, hdr->addr1);
157 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
158 if (sta) {
159 int ampdu_queue = sta->tid_to_tx_q[tid];
160 if ((ampdu_queue < local->hw.queues) &&
161 test_bit(ampdu_queue, q->qdisc_pool)) {
162 queue = ampdu_queue;
163 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
164 } else {
165 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
166 }
167 sta_info_put(sta);
168 }
169 skb_queue_tail(&q->requeued[queue], skb);
152 qd->q.qlen++; 170 qd->q.qlen++;
153 return 0; 171 return 0;
154 } 172 }
@@ -159,14 +177,28 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
159 */ 177 */
160 if (WLAN_FC_IS_QOS_DATA(fc)) { 178 if (WLAN_FC_IS_QOS_DATA(fc)) {
161 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2; 179 u8 *p = skb->data + ieee80211_get_hdrlen(fc) - 2;
162 u8 qos_hdr = skb->priority & QOS_CONTROL_TAG1D_MASK; 180 u8 ack_policy = 0;
181 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
163 if (local->wifi_wme_noack_test) 182 if (local->wifi_wme_noack_test)
164 qos_hdr |= QOS_CONTROL_ACK_POLICY_NOACK << 183 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
165 QOS_CONTROL_ACK_POLICY_SHIFT; 184 QOS_CONTROL_ACK_POLICY_SHIFT;
166 /* qos header is 2 bytes, second reserved */ 185 /* qos header is 2 bytes, second reserved */
167 *p = qos_hdr; 186 *p = ack_policy | tid;
168 p++; 187 p++;
169 *p = 0; 188 *p = 0;
189
190 sta = sta_info_get(local, hdr->addr1);
191 if (sta) {
192 int ampdu_queue = sta->tid_to_tx_q[tid];
193 if ((ampdu_queue < local->hw.queues) &&
194 test_bit(ampdu_queue, q->qdisc_pool)) {
195 queue = ampdu_queue;
196 pkt_data->flags |= IEEE80211_TXPD_AMPDU;
197 } else {
198 pkt_data->flags &= ~IEEE80211_TXPD_AMPDU;
199 }
200 sta_info_put(sta);
201 }
170 } 202 }
171 203
172 if (unlikely(queue >= local->hw.queues)) { 204 if (unlikely(queue >= local->hw.queues)) {
@@ -184,6 +216,7 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
184 kfree_skb(skb); 216 kfree_skb(skb);
185 err = NET_XMIT_DROP; 217 err = NET_XMIT_DROP;
186 } else { 218 } else {
219 tid = skb->priority & QOS_CONTROL_TAG1D_MASK;
187 pkt_data->queue = (unsigned int) queue; 220 pkt_data->queue = (unsigned int) queue;
188 qdisc = q->queues[queue]; 221 qdisc = q->queues[queue];
189 err = qdisc->enqueue(skb, qdisc); 222 err = qdisc->enqueue(skb, qdisc);
@@ -235,10 +268,11 @@ static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
235 /* check all the h/w queues in numeric/priority order */ 268 /* check all the h/w queues in numeric/priority order */
236 for (queue = 0; queue < hw->queues; queue++) { 269 for (queue = 0; queue < hw->queues; queue++) {
237 /* see if there is room in this hardware queue */ 270 /* see if there is room in this hardware queue */
238 if (test_bit(IEEE80211_LINK_STATE_XOFF, 271 if ((test_bit(IEEE80211_LINK_STATE_XOFF,
239 &local->state[queue]) || 272 &local->state[queue])) ||
240 test_bit(IEEE80211_LINK_STATE_PENDING, 273 (test_bit(IEEE80211_LINK_STATE_PENDING,
241 &local->state[queue])) 274 &local->state[queue])) ||
275 (!test_bit(queue, q->qdisc_pool)))
242 continue; 276 continue;
243 277
244 /* there is space - try and get a frame */ 278 /* there is space - try and get a frame */
@@ -360,6 +394,10 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
360 } 394 }
361 } 395 }
362 396
397 /* reserve all legacy QoS queues */
398 for (i = 0; i < min(IEEE80211_TX_QUEUE_DATA4, queues); i++)
399 set_bit(i, q->qdisc_pool);
400
363 return err; 401 return err;
364} 402}
365 403
@@ -605,3 +643,80 @@ void ieee80211_wme_unregister(void)
605{ 643{
606 unregister_qdisc(&wme_qdisc_ops); 644 unregister_qdisc(&wme_qdisc_ops);
607} 645}
646
647int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
648 struct sta_info *sta, u16 tid)
649{
650 int i;
651 struct ieee80211_sched_data *q =
652 qdisc_priv(local->mdev->qdisc_sleeping);
653 DECLARE_MAC_BUF(mac);
654
655 /* prepare the filter and save it for the SW queue
656 * matching the recieved HW queue */
657
658 /* try to get a Qdisc from the pool */
659 for (i = IEEE80211_TX_QUEUE_BEACON; i < local->hw.queues; i++)
660 if (!test_and_set_bit(i, q->qdisc_pool)) {
661 ieee80211_stop_queue(local_to_hw(local), i);
662 sta->tid_to_tx_q[tid] = i;
663
664 /* IF there are already pending packets
665 * on this tid first we need to drain them
666 * on the previous queue
667 * since HT is strict in order */
668#ifdef CONFIG_MAC80211_HT_DEBUG
669 if (net_ratelimit())
670 printk(KERN_DEBUG "allocated aggregation queue"
671 " %d tid %d addr %s pool=0x%lX",
672 i, tid, print_mac(mac, sta->addr),
673 q->qdisc_pool[0]);
674#endif /* CONFIG_MAC80211_HT_DEBUG */
675 return 0;
676 }
677
678 return -EAGAIN;
679}
680
681/**
682 * the caller needs to hold local->mdev->queue_lock
683 */
684void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
685 struct sta_info *sta, u16 tid,
686 u8 requeue)
687{
688 struct ieee80211_sched_data *q =
689 qdisc_priv(local->mdev->qdisc_sleeping);
690 int agg_queue = sta->tid_to_tx_q[tid];
691
692 /* return the qdisc to the pool */
693 clear_bit(agg_queue, q->qdisc_pool);
694 sta->tid_to_tx_q[tid] = local->hw.queues;
695
696 if (requeue)
697 ieee80211_requeue(local, agg_queue);
698 else
699 q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
700}
701
702void ieee80211_requeue(struct ieee80211_local *local, int queue)
703{
704 struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
705 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
706 struct Qdisc *qdisc = q->queues[queue];
707 struct sk_buff *skb = NULL;
708 u32 len = qdisc->q.qlen;
709
710 if (!qdisc || !qdisc->dequeue)
711 return;
712
713 printk(KERN_DEBUG "requeue: qlen = %d\n", qdisc->q.qlen);
714 for (len = qdisc->q.qlen; len > 0; len--) {
715 skb = qdisc->dequeue(qdisc);
716 root_qd->q.qlen--;
717 /* packet will be classified again and */
718 /* skb->packet_data->queue will be overridden if needed */
719 if (skb)
720 wme_qdiscop_enqueue(skb, root_qd);
721 }
722}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 76c713a6450c..fcc6b05508cc 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -24,6 +24,8 @@
24 24
25#define QOS_CONTROL_TAG1D_MASK 0x07 25#define QOS_CONTROL_TAG1D_MASK 0x07
26 26
27extern const int ieee802_1d_to_ac[8];
28
27static inline int WLAN_FC_IS_QOS_DATA(u16 fc) 29static inline int WLAN_FC_IS_QOS_DATA(u16 fc)
28{ 30{
29 return (fc & 0x8C) == 0x88; 31 return (fc & 0x8C) == 0x88;
@@ -32,7 +34,12 @@ static inline int WLAN_FC_IS_QOS_DATA(u16 fc)
32#ifdef CONFIG_NET_SCHED 34#ifdef CONFIG_NET_SCHED
33void ieee80211_install_qdisc(struct net_device *dev); 35void ieee80211_install_qdisc(struct net_device *dev);
34int ieee80211_qdisc_installed(struct net_device *dev); 36int ieee80211_qdisc_installed(struct net_device *dev);
35 37int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
38 struct sta_info *sta, u16 tid);
39void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
40 struct sta_info *sta, u16 tid,
41 u8 requeue);
42void ieee80211_requeue(struct ieee80211_local *local, int queue);
36int ieee80211_wme_register(void); 43int ieee80211_wme_register(void);
37void ieee80211_wme_unregister(void); 44void ieee80211_wme_unregister(void);
38#else 45#else
@@ -43,7 +50,19 @@ static inline int ieee80211_qdisc_installed(struct net_device *dev)
43{ 50{
44 return 0; 51 return 0;
45} 52}
46 53static inline int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
54 struct sta_info *sta, u16 tid)
55{
56 return -EAGAIN;
57}
58static inline void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
59 struct sta_info *sta, u16 tid,
60 u8 requeue)
61{
62}
63static inline void ieee80211_requeue(struct ieee80211_local *local, int queue)
64{
65}
47static inline int ieee80211_wme_register(void) 66static inline int ieee80211_wme_register(void)
48{ 67{
49 return 0; 68 return 0;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 6f04311cf0a0..b35e51c6ce0c 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -70,7 +70,7 @@ static int ieee80211_get_hdr_info(const struct sk_buff *skb, u8 **sa, u8 **da,
70} 70}
71 71
72 72
73ieee80211_txrx_result 73ieee80211_tx_result
74ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx) 74ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx)
75{ 75{
76 u8 *data, *sa, *da, *key, *mic, qos_tid; 76 u8 *data, *sa, *da, *key, *mic, qos_tid;
@@ -84,10 +84,10 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx)
84 84
85 if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || 85 if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 ||
86 !WLAN_FC_DATA_PRESENT(fc)) 86 !WLAN_FC_DATA_PRESENT(fc))
87 return TXRX_CONTINUE; 87 return TX_CONTINUE;
88 88
89 if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len)) 89 if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len))
90 return TXRX_DROP; 90 return TX_DROP;
91 91
92 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 92 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
93 !(tx->flags & IEEE80211_TXRXD_FRAGMENTED) && 93 !(tx->flags & IEEE80211_TXRXD_FRAGMENTED) &&
@@ -95,7 +95,7 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx)
95 !wpa_test) { 95 !wpa_test) {
96 /* hwaccel - with no need for preallocated room for Michael MIC 96 /* hwaccel - with no need for preallocated room for Michael MIC
97 */ 97 */
98 return TXRX_CONTINUE; 98 return TX_CONTINUE;
99 } 99 }
100 100
101 if (skb_tailroom(skb) < MICHAEL_MIC_LEN) { 101 if (skb_tailroom(skb) < MICHAEL_MIC_LEN) {
@@ -105,7 +105,7 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx)
105 GFP_ATOMIC))) { 105 GFP_ATOMIC))) {
106 printk(KERN_DEBUG "%s: failed to allocate more memory " 106 printk(KERN_DEBUG "%s: failed to allocate more memory "
107 "for Michael MIC\n", tx->dev->name); 107 "for Michael MIC\n", tx->dev->name);
108 return TXRX_DROP; 108 return TX_DROP;
109 } 109 }
110 } 110 }
111 111
@@ -119,11 +119,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx)
119 mic = skb_put(skb, MICHAEL_MIC_LEN); 119 mic = skb_put(skb, MICHAEL_MIC_LEN);
120 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); 120 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic);
121 121
122 return TXRX_CONTINUE; 122 return TX_CONTINUE;
123} 123}
124 124
125 125
126ieee80211_txrx_result 126ieee80211_rx_result
127ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx) 127ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx)
128{ 128{
129 u8 *data, *sa, *da, *key = NULL, qos_tid; 129 u8 *data, *sa, *da, *key = NULL, qos_tid;
@@ -140,15 +140,15 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx)
140 * No way to verify the MIC if the hardware stripped it 140 * No way to verify the MIC if the hardware stripped it
141 */ 141 */
142 if (rx->u.rx.status->flag & RX_FLAG_MMIC_STRIPPED) 142 if (rx->u.rx.status->flag & RX_FLAG_MMIC_STRIPPED)
143 return TXRX_CONTINUE; 143 return RX_CONTINUE;
144 144
145 if (!rx->key || rx->key->conf.alg != ALG_TKIP || 145 if (!rx->key || rx->key->conf.alg != ALG_TKIP ||
146 !(rx->fc & IEEE80211_FCTL_PROTECTED) || !WLAN_FC_DATA_PRESENT(fc)) 146 !(rx->fc & IEEE80211_FCTL_PROTECTED) || !WLAN_FC_DATA_PRESENT(fc))
147 return TXRX_CONTINUE; 147 return RX_CONTINUE;
148 148
149 if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len) 149 if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len)
150 || data_len < MICHAEL_MIC_LEN) 150 || data_len < MICHAEL_MIC_LEN)
151 return TXRX_DROP; 151 return RX_DROP_UNUSABLE;
152 152
153 data_len -= MICHAEL_MIC_LEN; 153 data_len -= MICHAEL_MIC_LEN;
154 154
@@ -162,14 +162,14 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx)
162 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); 162 michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic);
163 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { 163 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) {
164 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 164 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))
165 return TXRX_DROP; 165 return RX_DROP_UNUSABLE;
166 166
167 printk(KERN_DEBUG "%s: invalid Michael MIC in data frame from " 167 printk(KERN_DEBUG "%s: invalid Michael MIC in data frame from "
168 "%s\n", rx->dev->name, print_mac(mac, sa)); 168 "%s\n", rx->dev->name, print_mac(mac, sa));
169 169
170 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, 170 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx,
171 (void *) skb->data); 171 (void *) skb->data);
172 return TXRX_DROP; 172 return RX_DROP_UNUSABLE;
173 } 173 }
174 174
175 /* remove Michael MIC from payload */ 175 /* remove Michael MIC from payload */
@@ -179,7 +179,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx)
179 rx->key->u.tkip.iv32_rx[rx->u.rx.queue] = rx->u.rx.tkip_iv32; 179 rx->key->u.tkip.iv32_rx[rx->u.rx.queue] = rx->u.rx.tkip_iv32;
180 rx->key->u.tkip.iv16_rx[rx->u.rx.queue] = rx->u.rx.tkip_iv16; 180 rx->key->u.tkip.iv16_rx[rx->u.rx.queue] = rx->u.rx.tkip_iv16;
181 181
182 return TXRX_CONTINUE; 182 return RX_CONTINUE;
183} 183}
184 184
185 185
@@ -242,7 +242,7 @@ static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx,
242} 242}
243 243
244 244
245ieee80211_txrx_result 245ieee80211_tx_result
246ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx) 246ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx)
247{ 247{
248 struct sk_buff *skb = tx->skb; 248 struct sk_buff *skb = tx->skb;
@@ -257,26 +257,26 @@ ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx)
257 !wpa_test) { 257 !wpa_test) {
258 /* hwaccel - with no need for preallocated room for IV/ICV */ 258 /* hwaccel - with no need for preallocated room for IV/ICV */
259 tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; 259 tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx;
260 return TXRX_CONTINUE; 260 return TX_CONTINUE;
261 } 261 }
262 262
263 if (tkip_encrypt_skb(tx, skb, test) < 0) 263 if (tkip_encrypt_skb(tx, skb, test) < 0)
264 return TXRX_DROP; 264 return TX_DROP;
265 265
266 if (tx->u.tx.extra_frag) { 266 if (tx->u.tx.extra_frag) {
267 int i; 267 int i;
268 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 268 for (i = 0; i < tx->u.tx.num_extra_frag; i++) {
269 if (tkip_encrypt_skb(tx, tx->u.tx.extra_frag[i], test) 269 if (tkip_encrypt_skb(tx, tx->u.tx.extra_frag[i], test)
270 < 0) 270 < 0)
271 return TXRX_DROP; 271 return TX_DROP;
272 } 272 }
273 } 273 }
274 274
275 return TXRX_CONTINUE; 275 return TX_CONTINUE;
276} 276}
277 277
278 278
279ieee80211_txrx_result 279ieee80211_rx_result
280ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx) 280ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
281{ 281{
282 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 282 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
@@ -290,10 +290,10 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
290 hdrlen = ieee80211_get_hdrlen(fc); 290 hdrlen = ieee80211_get_hdrlen(fc);
291 291
292 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) 292 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)
293 return TXRX_CONTINUE; 293 return RX_CONTINUE;
294 294
295 if (!rx->sta || skb->len - hdrlen < 12) 295 if (!rx->sta || skb->len - hdrlen < 12)
296 return TXRX_DROP; 296 return RX_DROP_UNUSABLE;
297 297
298 if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED) { 298 if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED) {
299 if (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) { 299 if (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) {
@@ -302,7 +302,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
302 * replay protection, and stripped the ICV/IV so 302 * replay protection, and stripped the ICV/IV so
303 * we cannot do any checks here. 303 * we cannot do any checks here.
304 */ 304 */
305 return TXRX_CONTINUE; 305 return RX_CONTINUE;
306 } 306 }
307 307
308 /* let TKIP code verify IV, but skip decryption */ 308 /* let TKIP code verify IV, but skip decryption */
@@ -322,7 +322,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
322 "frame from %s (res=%d)\n", rx->dev->name, 322 "frame from %s (res=%d)\n", rx->dev->name,
323 print_mac(mac, rx->sta->addr), res); 323 print_mac(mac, rx->sta->addr), res);
324#endif /* CONFIG_MAC80211_DEBUG */ 324#endif /* CONFIG_MAC80211_DEBUG */
325 return TXRX_DROP; 325 return RX_DROP_UNUSABLE;
326 } 326 }
327 327
328 /* Trim ICV */ 328 /* Trim ICV */
@@ -332,7 +332,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
332 memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen); 332 memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen);
333 skb_pull(skb, TKIP_IV_LEN); 333 skb_pull(skb, TKIP_IV_LEN);
334 334
335 return TXRX_CONTINUE; 335 return RX_CONTINUE;
336} 336}
337 337
338 338
@@ -491,7 +491,7 @@ static int ccmp_encrypt_skb(struct ieee80211_txrx_data *tx,
491} 491}
492 492
493 493
494ieee80211_txrx_result 494ieee80211_tx_result
495ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx) 495ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx)
496{ 496{
497 struct sk_buff *skb = tx->skb; 497 struct sk_buff *skb = tx->skb;
@@ -506,26 +506,26 @@ ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx)
506 /* hwaccel - with no need for preallocated room for CCMP " 506 /* hwaccel - with no need for preallocated room for CCMP "
507 * header or MIC fields */ 507 * header or MIC fields */
508 tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; 508 tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx;
509 return TXRX_CONTINUE; 509 return TX_CONTINUE;
510 } 510 }
511 511
512 if (ccmp_encrypt_skb(tx, skb, test) < 0) 512 if (ccmp_encrypt_skb(tx, skb, test) < 0)
513 return TXRX_DROP; 513 return TX_DROP;
514 514
515 if (tx->u.tx.extra_frag) { 515 if (tx->u.tx.extra_frag) {
516 int i; 516 int i;
517 for (i = 0; i < tx->u.tx.num_extra_frag; i++) { 517 for (i = 0; i < tx->u.tx.num_extra_frag; i++) {
518 if (ccmp_encrypt_skb(tx, tx->u.tx.extra_frag[i], test) 518 if (ccmp_encrypt_skb(tx, tx->u.tx.extra_frag[i], test)
519 < 0) 519 < 0)
520 return TXRX_DROP; 520 return TX_DROP;
521 } 521 }
522 } 522 }
523 523
524 return TXRX_CONTINUE; 524 return TX_CONTINUE;
525} 525}
526 526
527 527
528ieee80211_txrx_result 528ieee80211_rx_result
529ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx) 529ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx)
530{ 530{
531 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 531 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
@@ -541,15 +541,15 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx)
541 hdrlen = ieee80211_get_hdrlen(fc); 541 hdrlen = ieee80211_get_hdrlen(fc);
542 542
543 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) 543 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)
544 return TXRX_CONTINUE; 544 return RX_CONTINUE;
545 545
546 data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; 546 data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN;
547 if (!rx->sta || data_len < 0) 547 if (!rx->sta || data_len < 0)
548 return TXRX_DROP; 548 return RX_DROP_UNUSABLE;
549 549
550 if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && 550 if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) &&
551 (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) 551 (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED))
552 return TXRX_CONTINUE; 552 return RX_CONTINUE;
553 553
554 (void) ccmp_hdr2pn(pn, skb->data + hdrlen); 554 (void) ccmp_hdr2pn(pn, skb->data + hdrlen);
555 555
@@ -565,7 +565,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx)
565 ppn[0], ppn[1], ppn[2], ppn[3], ppn[4], ppn[5]); 565 ppn[0], ppn[1], ppn[2], ppn[3], ppn[4], ppn[5]);
566#endif /* CONFIG_MAC80211_DEBUG */ 566#endif /* CONFIG_MAC80211_DEBUG */
567 key->u.ccmp.replays++; 567 key->u.ccmp.replays++;
568 return TXRX_DROP; 568 return RX_DROP_UNUSABLE;
569 } 569 }
570 570
571 if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) { 571 if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) {
@@ -589,7 +589,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx)
589 "for RX frame from %s\n", rx->dev->name, 589 "for RX frame from %s\n", rx->dev->name,
590 print_mac(mac, rx->sta->addr)); 590 print_mac(mac, rx->sta->addr));
591#endif /* CONFIG_MAC80211_DEBUG */ 591#endif /* CONFIG_MAC80211_DEBUG */
592 return TXRX_DROP; 592 return RX_DROP_UNUSABLE;
593 } 593 }
594 } 594 }
595 595
@@ -600,5 +600,5 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx)
600 memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); 600 memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen);
601 skb_pull(skb, CCMP_HDR_LEN); 601 skb_pull(skb, CCMP_HDR_LEN);
602 602
603 return TXRX_CONTINUE; 603 return RX_CONTINUE;
604} 604}
diff --git a/net/mac80211/wpa.h b/net/mac80211/wpa.h
index 49d80cf0cd75..16e4dba4aa70 100644
--- a/net/mac80211/wpa.h
+++ b/net/mac80211/wpa.h
@@ -13,19 +13,19 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include "ieee80211_i.h" 14#include "ieee80211_i.h"
15 15
16ieee80211_txrx_result 16ieee80211_tx_result
17ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx); 17ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx);
18ieee80211_txrx_result 18ieee80211_rx_result
19ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx); 19ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx);
20 20
21ieee80211_txrx_result 21ieee80211_tx_result
22ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx); 22ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx);
23ieee80211_txrx_result 23ieee80211_rx_result
24ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx); 24ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx);
25 25
26ieee80211_txrx_result 26ieee80211_tx_result
27ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx); 27ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx);
28ieee80211_txrx_result 28ieee80211_rx_result
29ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx); 29ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx);
30 30
31#endif /* WPA_H */ 31#endif /* WPA_H */
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index e88e96af613d..a9bf6e4fd0cc 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -293,7 +293,7 @@ static const struct file_operations ct_cpu_seq_fops = {
293 .open = ct_cpu_seq_open, 293 .open = ct_cpu_seq_open,
294 .read = seq_read, 294 .read = seq_read,
295 .llseek = seq_lseek, 295 .llseek = seq_lseek,
296 .release = seq_release_private, 296 .release = seq_release,
297}; 297};
298#endif /* CONFIG_PROC_FS */ 298#endif /* CONFIG_PROC_FS */
299 299
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1ab0da2632e1..524e826bb976 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1344,22 +1344,6 @@ static void netlink_data_ready(struct sock *sk, int len)
1344 * queueing. 1344 * queueing.
1345 */ 1345 */
1346 1346
1347static void __netlink_release(struct sock *sk)
1348{
1349 /*
1350 * Last sock_put should drop referrence to sk->sk_net. It has already
1351 * been dropped in netlink_kernel_create. Taking referrence to stopping
1352 * namespace is not an option.
1353 * Take referrence to a socket to remove it from netlink lookup table
1354 * _alive_ and after that destroy it in the context of init_net.
1355 */
1356
1357 sock_hold(sk);
1358 sock_release(sk->sk_socket);
1359 sk->sk_net = get_net(&init_net);
1360 sock_put(sk);
1361}
1362
1363struct sock * 1347struct sock *
1364netlink_kernel_create(struct net *net, int unit, unsigned int groups, 1348netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1365 void (*input)(struct sk_buff *skb), 1349 void (*input)(struct sk_buff *skb),
@@ -1388,8 +1372,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1388 goto out_sock_release_nosk; 1372 goto out_sock_release_nosk;
1389 1373
1390 sk = sock->sk; 1374 sk = sock->sk;
1391 put_net(sk->sk_net); 1375 sk_change_net(sk, net);
1392 sk->sk_net = net;
1393 1376
1394 if (groups < 32) 1377 if (groups < 32)
1395 groups = 32; 1378 groups = 32;
@@ -1424,7 +1407,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1424 1407
1425out_sock_release: 1408out_sock_release:
1426 kfree(listeners); 1409 kfree(listeners);
1427 __netlink_release(sk); 1410 netlink_kernel_release(sk);
1428 return NULL; 1411 return NULL;
1429 1412
1430out_sock_release_nosk: 1413out_sock_release_nosk:
@@ -1437,10 +1420,7 @@ EXPORT_SYMBOL(netlink_kernel_create);
1437void 1420void
1438netlink_kernel_release(struct sock *sk) 1421netlink_kernel_release(struct sock *sk)
1439{ 1422{
1440 if (sk == NULL || sk->sk_socket == NULL) 1423 sk_release_kernel(sk);
1441 return;
1442
1443 __netlink_release(sk);
1444} 1424}
1445EXPORT_SYMBOL(netlink_kernel_release); 1425EXPORT_SYMBOL(netlink_kernel_release);
1446 1426
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
index 83eda247fe48..017322e2786d 100644
--- a/net/rxrpc/ar-proc.c
+++ b/net/rxrpc/ar-proc.c
@@ -103,7 +103,7 @@ const struct file_operations rxrpc_call_seq_fops = {
103 .open = rxrpc_call_seq_open, 103 .open = rxrpc_call_seq_open,
104 .read = seq_read, 104 .read = seq_read,
105 .llseek = seq_lseek, 105 .llseek = seq_lseek,
106 .release = seq_release_private, 106 .release = seq_release,
107}; 107};
108 108
109/* 109/*
@@ -188,5 +188,5 @@ const struct file_operations rxrpc_connection_seq_fops = {
188 .open = rxrpc_connection_seq_open, 188 .open = rxrpc_connection_seq_open,
189 .read = seq_read, 189 .read = seq_read,
190 .llseek = seq_lseek, 190 .llseek = seq_lseek,
191 .release = seq_release_private, 191 .release = seq_release,
192}; 192};
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 87f940587d5f..4862835b0c39 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -257,7 +257,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
257 NIP6(fl.fl6_src)); 257 NIP6(fl.fl6_src));
258 } 258 }
259 259
260 dst = ip6_route_output(NULL, &fl); 260 dst = ip6_route_output(&init_net, NULL, &fl);
261 if (!dst->error) { 261 if (!dst->error) {
262 struct rt6_info *rt; 262 struct rt6_info *rt;
263 rt = (struct rt6_info *)dst; 263 rt = (struct rt6_info *)dst;
@@ -313,7 +313,8 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
313 __FUNCTION__, asoc, dst, NIP6(daddr->v6.sin6_addr)); 313 __FUNCTION__, asoc, dst, NIP6(daddr->v6.sin6_addr));
314 314
315 if (!asoc) { 315 if (!asoc) {
316 ipv6_get_saddr(dst, &daddr->v6.sin6_addr,&saddr->v6.sin6_addr); 316 ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL,
317 &daddr->v6.sin6_addr, &saddr->v6.sin6_addr);
317 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n", 318 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n",
318 NIP6(saddr->v6.sin6_addr)); 319 NIP6(saddr->v6.sin6_addr));
319 return; 320 return;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 1bb3c5c35d2a..fd4deefab3cf 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -494,6 +494,8 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
494 */ 494 */
495 if (transport == transport->asoc->peer.retran_path) 495 if (transport == transport->asoc->peer.retran_path)
496 sctp_assoc_update_retran_path(transport->asoc); 496 sctp_assoc_update_retran_path(transport->asoc);
497 transport->asoc->rtx_data_chunks +=
498 transport->asoc->unack_data;
497 break; 499 break;
498 case SCTP_RTXR_FAST_RTX: 500 case SCTP_RTXR_FAST_RTX:
499 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); 501 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
@@ -504,6 +506,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
504 break; 506 break;
505 case SCTP_RTXR_T1_RTX: 507 case SCTP_RTXR_T1_RTX:
506 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS); 508 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
509 transport->asoc->init_retries++;
507 break; 510 break;
508 default: 511 default:
509 BUG(); 512 BUG();
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 973f1dbc2ec3..ddca90e5e3a5 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -279,8 +279,10 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
279 *pos = 0; 279 *pos = 0;
280 280
281 if (*pos == 0) 281 if (*pos == 0)
282 seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " 282 seq_printf(seq, " ASSOC SOCK STY SST ST HBKT "
283 "RPORT LADDRS <-> RADDRS\n"); 283 "ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT "
284 "RPORT LADDRS <-> RADDRS "
285 "HBINT INS OUTS MAXRT T1X T2X RTXC\n");
284 286
285 return (void *)pos; 287 return (void *)pos;
286} 288}
@@ -319,15 +321,21 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
319 assoc = sctp_assoc(epb); 321 assoc = sctp_assoc(epb);
320 sk = epb->sk; 322 sk = epb->sk;
321 seq_printf(seq, 323 seq_printf(seq,
322 "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ", 324 "%8p %8p %-3d %-3d %-2d %-4d "
325 "%4d %8d %8d %7d %5lu %-5d %5d "
326 "%8lu %5d %5d %4d %4d %4d %8d ",
323 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 327 assoc, sk, sctp_sk(sk)->type, sk->sk_state,
324 assoc->state, hash, assoc->assoc_id, 328 assoc->state, hash,
329 assoc->assoc_id,
325 assoc->sndbuf_used, 330 assoc->sndbuf_used,
326 atomic_read(&assoc->rmem_alloc), 331 atomic_read(&assoc->rmem_alloc),
327 sock_i_uid(sk), sock_i_ino(sk), 332 sock_i_uid(sk), sock_i_ino(sk),
328 epb->bind_addr.port, 333 epb->bind_addr.port,
329 assoc->peer.port); 334 assoc->peer.port,
330 335 assoc->hbinterval, assoc->c.sinit_max_instreams,
336 assoc->c.sinit_num_ostreams, assoc->max_retrans,
337 assoc->init_retries, assoc->shutdown_retries,
338 assoc->rtx_data_chunks);
331 seq_printf(seq, " "); 339 seq_printf(seq, " ");
332 sctp_seq_dump_local_addrs(seq, epb); 340 sctp_seq_dump_local_addrs(seq, epb);
333 seq_printf(seq, "<-> "); 341 seq_printf(seq, "<-> ");
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 688546dccd82..8d9d929f6cea 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -629,6 +629,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
629 struct sctp_sockaddr_entry *addr = NULL; 629 struct sctp_sockaddr_entry *addr = NULL;
630 struct sctp_sockaddr_entry *temp; 630 struct sctp_sockaddr_entry *temp;
631 631
632 if (ifa->ifa_dev->dev->nd_net != &init_net)
633 return NOTIFY_DONE;
634
632 switch (ev) { 635 switch (ev) {
633 case NETDEV_UP: 636 case NETDEV_UP:
634 addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); 637 addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f2ed6473feef..ade0cbd3a52b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5312,6 +5312,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
5312 SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); 5312 SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
5313 SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS); 5313 SCTP_INC_STATS(SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
5314 5314
5315 ((struct sctp_association *)asoc)->shutdown_retries++;
5316
5315 if (asoc->overall_error_count >= asoc->max_retrans) { 5317 if (asoc->overall_error_count >= asoc->max_retrans) {
5316 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5318 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5317 SCTP_ERROR(ETIMEDOUT)); 5319 SCTP_ERROR(ETIMEDOUT));
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 22909036b9bc..9ae8e9f74028 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -43,7 +43,7 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/fcntl.h> 45#include <linux/fcntl.h>
46#include <asm/semaphore.h> 46#include <linux/mutex.h>
47#include <asm/string.h> 47#include <asm/string.h>
48#include <asm/atomic.h> 48#include <asm/atomic.h>
49#include <net/sock.h> 49#include <net/sock.h>
@@ -63,7 +63,7 @@
63struct tipc_sock { 63struct tipc_sock {
64 struct sock sk; 64 struct sock sk;
65 struct tipc_port *p; 65 struct tipc_port *p;
66 struct semaphore sem; 66 struct mutex lock;
67}; 67};
68 68
69#define tipc_sk(sk) ((struct tipc_sock*)sk) 69#define tipc_sk(sk) ((struct tipc_sock*)sk)
@@ -217,7 +217,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
217 tsock->p = port; 217 tsock->p = port;
218 port->usr_handle = tsock; 218 port->usr_handle = tsock;
219 219
220 init_MUTEX(&tsock->sem); 220 mutex_init(&tsock->lock);
221 221
222 dbg("sock_create: %x\n",tsock); 222 dbg("sock_create: %x\n",tsock);
223 223
@@ -253,9 +253,9 @@ static int release(struct socket *sock)
253 dbg("sock_delete: %x\n",tsock); 253 dbg("sock_delete: %x\n",tsock);
254 if (!tsock) 254 if (!tsock)
255 return 0; 255 return 0;
256 down(&tsock->sem); 256 mutex_lock(&tsock->lock);
257 if (!sock->sk) { 257 if (!sock->sk) {
258 up(&tsock->sem); 258 mutex_unlock(&tsock->lock);
259 return 0; 259 return 0;
260 } 260 }
261 261
@@ -288,7 +288,7 @@ static int release(struct socket *sock)
288 atomic_dec(&tipc_queue_size); 288 atomic_dec(&tipc_queue_size);
289 } 289 }
290 290
291 up(&tsock->sem); 291 mutex_unlock(&tsock->lock);
292 292
293 sock_put(sk); 293 sock_put(sk);
294 294
@@ -315,7 +315,7 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
315 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 315 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
316 int res; 316 int res;
317 317
318 if (down_interruptible(&tsock->sem)) 318 if (mutex_lock_interruptible(&tsock->lock))
319 return -ERESTARTSYS; 319 return -ERESTARTSYS;
320 320
321 if (unlikely(!uaddr_len)) { 321 if (unlikely(!uaddr_len)) {
@@ -346,7 +346,7 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
346 res = tipc_withdraw(tsock->p->ref, -addr->scope, 346 res = tipc_withdraw(tsock->p->ref, -addr->scope,
347 &addr->addr.nameseq); 347 &addr->addr.nameseq);
348exit: 348exit:
349 up(&tsock->sem); 349 mutex_unlock(&tsock->lock);
350 return res; 350 return res;
351} 351}
352 352
@@ -367,7 +367,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
367 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 367 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
368 u32 res; 368 u32 res;
369 369
370 if (down_interruptible(&tsock->sem)) 370 if (mutex_lock_interruptible(&tsock->lock))
371 return -ERESTARTSYS; 371 return -ERESTARTSYS;
372 372
373 *uaddr_len = sizeof(*addr); 373 *uaddr_len = sizeof(*addr);
@@ -380,7 +380,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
380 res = tipc_ownidentity(tsock->p->ref, &addr->addr.id); 380 res = tipc_ownidentity(tsock->p->ref, &addr->addr.id);
381 addr->addr.name.domain = 0; 381 addr->addr.name.domain = 0;
382 382
383 up(&tsock->sem); 383 mutex_unlock(&tsock->lock);
384 return res; 384 return res;
385} 385}
386 386
@@ -477,7 +477,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
477 } 477 }
478 } 478 }
479 479
480 if (down_interruptible(&tsock->sem)) 480 if (mutex_lock_interruptible(&tsock->lock))
481 return -ERESTARTSYS; 481 return -ERESTARTSYS;
482 482
483 if (needs_conn) { 483 if (needs_conn) {
@@ -523,7 +523,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
523 } 523 }
524 if (likely(res != -ELINKCONG)) { 524 if (likely(res != -ELINKCONG)) {
525exit: 525exit:
526 up(&tsock->sem); 526 mutex_unlock(&tsock->lock);
527 return res; 527 return res;
528 } 528 }
529 if (m->msg_flags & MSG_DONTWAIT) { 529 if (m->msg_flags & MSG_DONTWAIT) {
@@ -562,7 +562,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
562 if (unlikely(dest)) 562 if (unlikely(dest))
563 return send_msg(iocb, sock, m, total_len); 563 return send_msg(iocb, sock, m, total_len);
564 564
565 if (down_interruptible(&tsock->sem)) { 565 if (mutex_lock_interruptible(&tsock->lock)) {
566 return -ERESTARTSYS; 566 return -ERESTARTSYS;
567 } 567 }
568 568
@@ -578,7 +578,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
578 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov); 578 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
579 if (likely(res != -ELINKCONG)) { 579 if (likely(res != -ELINKCONG)) {
580exit: 580exit:
581 up(&tsock->sem); 581 mutex_unlock(&tsock->lock);
582 return res; 582 return res;
583 } 583 }
584 if (m->msg_flags & MSG_DONTWAIT) { 584 if (m->msg_flags & MSG_DONTWAIT) {
@@ -846,7 +846,7 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
846 846
847 /* Look for a message in receive queue; wait if necessary */ 847 /* Look for a message in receive queue; wait if necessary */
848 848
849 if (unlikely(down_interruptible(&tsock->sem))) 849 if (unlikely(mutex_lock_interruptible(&tsock->lock)))
850 return -ERESTARTSYS; 850 return -ERESTARTSYS;
851 851
852restart: 852restart:
@@ -930,7 +930,7 @@ restart:
930 advance_queue(tsock); 930 advance_queue(tsock);
931 } 931 }
932exit: 932exit:
933 up(&tsock->sem); 933 mutex_unlock(&tsock->lock);
934 return res; 934 return res;
935} 935}
936 936
@@ -981,7 +981,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
981 981
982 /* Look for a message in receive queue; wait if necessary */ 982 /* Look for a message in receive queue; wait if necessary */
983 983
984 if (unlikely(down_interruptible(&tsock->sem))) 984 if (unlikely(mutex_lock_interruptible(&tsock->lock)))
985 return -ERESTARTSYS; 985 return -ERESTARTSYS;
986 986
987restart: 987restart:
@@ -1077,7 +1077,7 @@ restart:
1077 goto restart; 1077 goto restart;
1078 1078
1079exit: 1079exit:
1080 up(&tsock->sem); 1080 mutex_unlock(&tsock->lock);
1081 return sz_copied ? sz_copied : res; 1081 return sz_copied ? sz_copied : res;
1082} 1082}
1083 1083
@@ -1293,7 +1293,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1293 return res; 1293 return res;
1294 } 1294 }
1295 1295
1296 if (down_interruptible(&tsock->sem)) 1296 if (mutex_lock_interruptible(&tsock->lock))
1297 return -ERESTARTSYS; 1297 return -ERESTARTSYS;
1298 1298
1299 /* Wait for destination's 'ACK' response */ 1299 /* Wait for destination's 'ACK' response */
@@ -1317,7 +1317,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1317 sock->state = SS_DISCONNECTING; 1317 sock->state = SS_DISCONNECTING;
1318 } 1318 }
1319 1319
1320 up(&tsock->sem); 1320 mutex_unlock(&tsock->lock);
1321 return res; 1321 return res;
1322} 1322}
1323 1323
@@ -1365,7 +1365,7 @@ static int accept(struct socket *sock, struct socket *newsock, int flags)
1365 (flags & O_NONBLOCK))) 1365 (flags & O_NONBLOCK)))
1366 return -EWOULDBLOCK; 1366 return -EWOULDBLOCK;
1367 1367
1368 if (down_interruptible(&tsock->sem)) 1368 if (mutex_lock_interruptible(&tsock->lock))
1369 return -ERESTARTSYS; 1369 return -ERESTARTSYS;
1370 1370
1371 if (wait_event_interruptible(*sock->sk->sk_sleep, 1371 if (wait_event_interruptible(*sock->sk->sk_sleep,
@@ -1412,7 +1412,7 @@ static int accept(struct socket *sock, struct socket *newsock, int flags)
1412 } 1412 }
1413 } 1413 }
1414exit: 1414exit:
1415 up(&tsock->sem); 1415 mutex_unlock(&tsock->lock);
1416 return res; 1416 return res;
1417} 1417}
1418 1418
@@ -1434,7 +1434,7 @@ static int shutdown(struct socket *sock, int how)
1434 1434
1435 /* Could return -EINVAL for an invalid "how", but why bother? */ 1435 /* Could return -EINVAL for an invalid "how", but why bother? */
1436 1436
1437 if (down_interruptible(&tsock->sem)) 1437 if (mutex_lock_interruptible(&tsock->lock))
1438 return -ERESTARTSYS; 1438 return -ERESTARTSYS;
1439 1439
1440 sock_lock(tsock); 1440 sock_lock(tsock);
@@ -1484,7 +1484,7 @@ restart:
1484 1484
1485 sock_unlock(tsock); 1485 sock_unlock(tsock);
1486 1486
1487 up(&tsock->sem); 1487 mutex_unlock(&tsock->lock);
1488 return res; 1488 return res;
1489} 1489}
1490 1490
@@ -1518,7 +1518,7 @@ static int setsockopt(struct socket *sock,
1518 if ((res = get_user(value, (u32 __user *)ov))) 1518 if ((res = get_user(value, (u32 __user *)ov)))
1519 return res; 1519 return res;
1520 1520
1521 if (down_interruptible(&tsock->sem)) 1521 if (mutex_lock_interruptible(&tsock->lock))
1522 return -ERESTARTSYS; 1522 return -ERESTARTSYS;
1523 1523
1524 switch (opt) { 1524 switch (opt) {
@@ -1541,7 +1541,7 @@ static int setsockopt(struct socket *sock,
1541 res = -EINVAL; 1541 res = -EINVAL;
1542 } 1542 }
1543 1543
1544 up(&tsock->sem); 1544 mutex_unlock(&tsock->lock);
1545 return res; 1545 return res;
1546} 1546}
1547 1547
@@ -1574,7 +1574,7 @@ static int getsockopt(struct socket *sock,
1574 if ((res = get_user(len, ol))) 1574 if ((res = get_user(len, ol)))
1575 return res; 1575 return res;
1576 1576
1577 if (down_interruptible(&tsock->sem)) 1577 if (mutex_lock_interruptible(&tsock->lock))
1578 return -ERESTARTSYS; 1578 return -ERESTARTSYS;
1579 1579
1580 switch (opt) { 1580 switch (opt) {
@@ -1607,7 +1607,7 @@ static int getsockopt(struct socket *sock,
1607 res = put_user(sizeof(value), ol); 1607 res = put_user(sizeof(value), ol);
1608 } 1608 }
1609 1609
1610 up(&tsock->sem); 1610 mutex_unlock(&tsock->lock);
1611 return res; 1611 return res;
1612} 1612}
1613 1613
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 65710a42e5a7..b9f943c45f3b 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_WIRELESS_EXT) += wext.o 1obj-$(CONFIG_WIRELESS_EXT) += wext.o
2obj-$(CONFIG_CFG80211) += cfg80211.o 2obj-$(CONFIG_CFG80211) += cfg80211.o
3 3
4cfg80211-y += core.o sysfs.o radiotap.o 4cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o
5cfg80211-$(CONFIG_NL80211) += nl80211.o 5cfg80211-$(CONFIG_NL80211) += nl80211.o
diff --git a/net/wireless/core.c b/net/wireless/core.c
index cfc5fc5f9e75..80afacdae46c 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -232,6 +232,47 @@ int wiphy_register(struct wiphy *wiphy)
232{ 232{
233 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); 233 struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
234 int res; 234 int res;
235 enum ieee80211_band band;
236 struct ieee80211_supported_band *sband;
237 bool have_band = false;
238 int i;
239
240 /* sanity check supported bands/channels */
241 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
242 sband = wiphy->bands[band];
243 if (!sband)
244 continue;
245
246 sband->band = band;
247
248 if (!sband->n_channels || !sband->n_bitrates) {
249 WARN_ON(1);
250 return -EINVAL;
251 }
252
253 for (i = 0; i < sband->n_channels; i++) {
254 sband->channels[i].orig_flags =
255 sband->channels[i].flags;
256 sband->channels[i].orig_mag =
257 sband->channels[i].max_antenna_gain;
258 sband->channels[i].orig_mpwr =
259 sband->channels[i].max_power;
260 sband->channels[i].band = band;
261 }
262
263 have_band = true;
264 }
265
266 if (!have_band) {
267 WARN_ON(1);
268 return -EINVAL;
269 }
270
271 /* check and set up bitrates */
272 ieee80211_set_bitrate_flags(wiphy);
273
274 /* set up regulatory info */
275 wiphy_update_regulatory(wiphy);
235 276
236 mutex_lock(&cfg80211_drv_mutex); 277 mutex_lock(&cfg80211_drv_mutex);
237 278
diff --git a/net/wireless/core.h b/net/wireless/core.h
index eb0f846b40df..7a02c356d63d 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -78,4 +78,7 @@ extern void cfg80211_dev_free(struct cfg80211_registered_device *drv);
78extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv, 78extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv,
79 char *newname); 79 char *newname);
80 80
81void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
82void wiphy_update_regulatory(struct wiphy *wiphy);
83
81#endif /* __NET_WIRELESS_CORE_H */ 84#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e3a214f63f91..5b3474798b8d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -82,6 +82,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
82 [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY, 82 [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY,
83 .len = NL80211_MAX_SUPP_RATES }, 83 .len = NL80211_MAX_SUPP_RATES },
84 [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, 84 [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 },
85 [NL80211_ATTR_MNTR_FLAGS] = { .type = NLA_NESTED },
85}; 86};
86 87
87/* message building helper */ 88/* message building helper */
@@ -98,6 +99,13 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
98 struct cfg80211_registered_device *dev) 99 struct cfg80211_registered_device *dev)
99{ 100{
100 void *hdr; 101 void *hdr;
102 struct nlattr *nl_bands, *nl_band;
103 struct nlattr *nl_freqs, *nl_freq;
104 struct nlattr *nl_rates, *nl_rate;
105 enum ieee80211_band band;
106 struct ieee80211_channel *chan;
107 struct ieee80211_rate *rate;
108 int i;
101 109
102 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); 110 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
103 if (!hdr) 111 if (!hdr)
@@ -105,6 +113,73 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
105 113
106 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); 114 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx);
107 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 115 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
116
117 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
118 if (!nl_bands)
119 goto nla_put_failure;
120
121 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
122 if (!dev->wiphy.bands[band])
123 continue;
124
125 nl_band = nla_nest_start(msg, band);
126 if (!nl_band)
127 goto nla_put_failure;
128
129 /* add frequencies */
130 nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
131 if (!nl_freqs)
132 goto nla_put_failure;
133
134 for (i = 0; i < dev->wiphy.bands[band]->n_channels; i++) {
135 nl_freq = nla_nest_start(msg, i);
136 if (!nl_freq)
137 goto nla_put_failure;
138
139 chan = &dev->wiphy.bands[band]->channels[i];
140 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ,
141 chan->center_freq);
142
143 if (chan->flags & IEEE80211_CHAN_DISABLED)
144 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED);
145 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
146 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN);
147 if (chan->flags & IEEE80211_CHAN_NO_IBSS)
148 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS);
149 if (chan->flags & IEEE80211_CHAN_RADAR)
150 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR);
151
152 nla_nest_end(msg, nl_freq);
153 }
154
155 nla_nest_end(msg, nl_freqs);
156
157 /* add bitrates */
158 nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES);
159 if (!nl_rates)
160 goto nla_put_failure;
161
162 for (i = 0; i < dev->wiphy.bands[band]->n_bitrates; i++) {
163 nl_rate = nla_nest_start(msg, i);
164 if (!nl_rate)
165 goto nla_put_failure;
166
167 rate = &dev->wiphy.bands[band]->bitrates[i];
168 NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE,
169 rate->bitrate);
170 if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
171 NLA_PUT_FLAG(msg,
172 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE);
173
174 nla_nest_end(msg, nl_rate);
175 }
176
177 nla_nest_end(msg, nl_rates);
178
179 nla_nest_end(msg, nl_band);
180 }
181 nla_nest_end(msg, nl_bands);
182
108 return genlmsg_end(msg, hdr); 183 return genlmsg_end(msg, hdr);
109 184
110 nla_put_failure: 185 nla_put_failure:
@@ -262,12 +337,42 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
262 return -ENOBUFS; 337 return -ENOBUFS;
263} 338}
264 339
340static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = {
341 [NL80211_MNTR_FLAG_FCSFAIL] = { .type = NLA_FLAG },
342 [NL80211_MNTR_FLAG_PLCPFAIL] = { .type = NLA_FLAG },
343 [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG },
344 [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG },
345 [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG },
346};
347
348static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags)
349{
350 struct nlattr *flags[NL80211_MNTR_FLAG_MAX + 1];
351 int flag;
352
353 *mntrflags = 0;
354
355 if (!nla)
356 return -EINVAL;
357
358 if (nla_parse_nested(flags, NL80211_MNTR_FLAG_MAX,
359 nla, mntr_flags_policy))
360 return -EINVAL;
361
362 for (flag = 1; flag <= NL80211_MNTR_FLAG_MAX; flag++)
363 if (flags[flag])
364 *mntrflags |= (1<<flag);
365
366 return 0;
367}
368
265static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) 369static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
266{ 370{
267 struct cfg80211_registered_device *drv; 371 struct cfg80211_registered_device *drv;
268 int err, ifindex; 372 int err, ifindex;
269 enum nl80211_iftype type; 373 enum nl80211_iftype type;
270 struct net_device *dev; 374 struct net_device *dev;
375 u32 flags;
271 376
272 if (info->attrs[NL80211_ATTR_IFTYPE]) { 377 if (info->attrs[NL80211_ATTR_IFTYPE]) {
273 type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); 378 type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
@@ -288,7 +393,11 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
288 } 393 }
289 394
290 rtnl_lock(); 395 rtnl_lock();
291 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, type); 396 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
397 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
398 &flags);
399 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex,
400 type, err ? NULL : &flags);
292 rtnl_unlock(); 401 rtnl_unlock();
293 402
294 unlock: 403 unlock:
@@ -301,6 +410,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
301 struct cfg80211_registered_device *drv; 410 struct cfg80211_registered_device *drv;
302 int err; 411 int err;
303 enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; 412 enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
413 u32 flags;
304 414
305 if (!info->attrs[NL80211_ATTR_IFNAME]) 415 if (!info->attrs[NL80211_ATTR_IFNAME])
306 return -EINVAL; 416 return -EINVAL;
@@ -321,8 +431,12 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
321 } 431 }
322 432
323 rtnl_lock(); 433 rtnl_lock();
434 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
435 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
436 &flags);
324 err = drv->ops->add_virtual_intf(&drv->wiphy, 437 err = drv->ops->add_virtual_intf(&drv->wiphy,
325 nla_data(info->attrs[NL80211_ATTR_IFNAME]), type); 438 nla_data(info->attrs[NL80211_ATTR_IFNAME]),
439 type, err ? NULL : &flags);
326 rtnl_unlock(); 440 rtnl_unlock();
327 441
328 unlock: 442 unlock:
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
new file mode 100644
index 000000000000..8cc6037eb2ae
--- /dev/null
+++ b/net/wireless/reg.c
@@ -0,0 +1,159 @@
1/*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * This regulatory domain control implementation is highly incomplete, it
13 * only exists for the purpose of not regressing mac80211.
14 *
15 * For now, drivers can restrict the set of allowed channels by either
16 * not registering those channels or setting the IEEE80211_CHAN_DISABLED
17 * flag; that flag will only be *set* by this code, never *cleared.
18 *
19 * The usual implementation is for a driver to read a device EEPROM to
20 * determine which regulatory domain it should be operating under, then
21 * looking up the allowable channels in a driver-local table and finally
22 * registering those channels in the wiphy structure.
23 *
24 * Alternatively, drivers that trust the regulatory domain control here
25 * will register a complete set of capabilities and the control code
26 * will restrict the set by setting the IEEE80211_CHAN_* flags.
27 */
28#include <linux/kernel.h>
29#include <net/wireless.h>
30#include "core.h"
31
32static char *ieee80211_regdom = "US";
33module_param(ieee80211_regdom, charp, 0444);
34MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
35
36struct ieee80211_channel_range {
37 short start_freq;
38 short end_freq;
39 int max_power;
40 int max_antenna_gain;
41 u32 flags;
42};
43
44struct ieee80211_regdomain {
45 const char *code;
46 const struct ieee80211_channel_range *ranges;
47 int n_ranges;
48};
49
50#define RANGE_PWR(_start, _end, _pwr, _ag, _flags) \
51 { _start, _end, _pwr, _ag, _flags }
52
53
54/*
55 * Ideally, in the future, these definitions will be loaded from a
56 * userspace table via some daemon.
57 */
58static const struct ieee80211_channel_range ieee80211_US_channels[] = {
59 /* IEEE 802.11b/g, channels 1..11 */
60 RANGE_PWR(2412, 2462, 27, 6, 0),
61 /* IEEE 802.11a, channel 36*/
62 RANGE_PWR(5180, 5180, 23, 6, 0),
63 /* IEEE 802.11a, channel 40*/
64 RANGE_PWR(5200, 5200, 23, 6, 0),
65 /* IEEE 802.11a, channel 44*/
66 RANGE_PWR(5220, 5220, 23, 6, 0),
67 /* IEEE 802.11a, channels 48..64 */
68 RANGE_PWR(5240, 5320, 23, 6, 0),
69 /* IEEE 802.11a, channels 149..165, outdoor */
70 RANGE_PWR(5745, 5825, 30, 6, 0),
71};
72
73static const struct ieee80211_channel_range ieee80211_JP_channels[] = {
74 /* IEEE 802.11b/g, channels 1..14 */
75 RANGE_PWR(2412, 2484, 20, 6, 0),
76 /* IEEE 802.11a, channels 34..48 */
77 RANGE_PWR(5170, 5240, 20, 6, IEEE80211_CHAN_PASSIVE_SCAN),
78 /* IEEE 802.11a, channels 52..64 */
79 RANGE_PWR(5260, 5320, 20, 6, IEEE80211_CHAN_NO_IBSS |
80 IEEE80211_CHAN_RADAR),
81};
82
83#define REGDOM(_code) \
84 { \
85 .code = __stringify(_code), \
86 .ranges = ieee80211_ ##_code## _channels, \
87 .n_ranges = ARRAY_SIZE(ieee80211_ ##_code## _channels), \
88 }
89
90static const struct ieee80211_regdomain ieee80211_regdoms[] = {
91 REGDOM(US),
92 REGDOM(JP),
93};
94
95
96static const struct ieee80211_regdomain *get_regdom(void)
97{
98 static const struct ieee80211_channel_range
99 ieee80211_world_channels[] = {
100 /* IEEE 802.11b/g, channels 1..11 */
101 RANGE_PWR(2412, 2462, 27, 6, 0),
102 };
103 static const struct ieee80211_regdomain regdom_world = REGDOM(world);
104 int i;
105
106 for (i = 0; i < ARRAY_SIZE(ieee80211_regdoms); i++)
107 if (strcmp(ieee80211_regdom, ieee80211_regdoms[i].code) == 0)
108 return &ieee80211_regdoms[i];
109
110 return &regdom_world;
111}
112
113
114static void handle_channel(struct ieee80211_channel *chan,
115 const struct ieee80211_regdomain *rd)
116{
117 int i;
118 u32 flags = chan->orig_flags;
119 const struct ieee80211_channel_range *rg = NULL;
120
121 for (i = 0; i < rd->n_ranges; i++) {
122 if (rd->ranges[i].start_freq <= chan->center_freq &&
123 chan->center_freq <= rd->ranges[i].end_freq) {
124 rg = &rd->ranges[i];
125 break;
126 }
127 }
128
129 if (!rg) {
130 /* not found */
131 flags |= IEEE80211_CHAN_DISABLED;
132 chan->flags = flags;
133 return;
134 }
135
136 chan->flags = flags;
137 chan->max_antenna_gain = min(chan->orig_mag,
138 rg->max_antenna_gain);
139 chan->max_power = min(chan->orig_mpwr, rg->max_power);
140}
141
142static void handle_band(struct ieee80211_supported_band *sband,
143 const struct ieee80211_regdomain *rd)
144{
145 int i;
146
147 for (i = 0; i < sband->n_channels; i++)
148 handle_channel(&sband->channels[i], rd);
149}
150
151void wiphy_update_regulatory(struct wiphy *wiphy)
152{
153 enum ieee80211_band band;
154 const struct ieee80211_regdomain *rd = get_regdom();
155
156 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
157 if (wiphy->bands[band])
158 handle_band(wiphy->bands[band], rd);
159}
diff --git a/net/wireless/util.c b/net/wireless/util.c
new file mode 100644
index 000000000000..77336c22fcf2
--- /dev/null
+++ b/net/wireless/util.c
@@ -0,0 +1,98 @@
1/*
2 * Wireless utility functions
3 *
4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5 */
6#include <net/wireless.h>
7#include <asm/bitops.h>
8#include "core.h"
9
10int ieee80211_channel_to_frequency(int chan)
11{
12 if (chan < 14)
13 return 2407 + chan * 5;
14
15 if (chan == 14)
16 return 2484;
17
18 /* FIXME: 802.11j 17.3.8.3.2 */
19 return (chan + 1000) * 5;
20}
21EXPORT_SYMBOL(ieee80211_channel_to_frequency);
22
23int ieee80211_frequency_to_channel(int freq)
24{
25 if (freq == 2484)
26 return 14;
27
28 if (freq < 2484)
29 return (freq - 2407) / 5;
30
31 /* FIXME: 802.11j 17.3.8.3.2 */
32 return freq/5 - 1000;
33}
34EXPORT_SYMBOL(ieee80211_frequency_to_channel);
35
36static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
37 enum ieee80211_band band)
38{
39 int i, want;
40
41 switch (band) {
42 case IEEE80211_BAND_5GHZ:
43 want = 3;
44 for (i = 0; i < sband->n_bitrates; i++) {
45 if (sband->bitrates[i].bitrate == 60 ||
46 sband->bitrates[i].bitrate == 120 ||
47 sband->bitrates[i].bitrate == 240) {
48 sband->bitrates[i].flags |=
49 IEEE80211_RATE_MANDATORY_A;
50 want--;
51 }
52 }
53 WARN_ON(want);
54 break;
55 case IEEE80211_BAND_2GHZ:
56 want = 7;
57 for (i = 0; i < sband->n_bitrates; i++) {
58 if (sband->bitrates[i].bitrate == 10) {
59 sband->bitrates[i].flags |=
60 IEEE80211_RATE_MANDATORY_B |
61 IEEE80211_RATE_MANDATORY_G;
62 want--;
63 }
64
65 if (sband->bitrates[i].bitrate == 20 ||
66 sband->bitrates[i].bitrate == 55 ||
67 sband->bitrates[i].bitrate == 110 ||
68 sband->bitrates[i].bitrate == 60 ||
69 sband->bitrates[i].bitrate == 120 ||
70 sband->bitrates[i].bitrate == 240) {
71 sband->bitrates[i].flags |=
72 IEEE80211_RATE_MANDATORY_G;
73 want--;
74 }
75
76 if (sband->bitrates[i].bitrate != 10 &&
77 sband->bitrates[i].bitrate != 20 &&
78 sband->bitrates[i].bitrate != 55 &&
79 sband->bitrates[i].bitrate != 110)
80 sband->bitrates[i].flags |=
81 IEEE80211_RATE_ERP_G;
82 }
83 WARN_ON(want != 0 && want != 3 && want != 6);
84 break;
85 case IEEE80211_NUM_BANDS:
86 WARN_ON(1);
87 break;
88 }
89}
90
91void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
92{
93 enum ieee80211_band band;
94
95 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
96 if (wiphy->bands[band])
97 set_mandatory_flags_band(wiphy->bands[band], band);
98}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9fc4c315f6cd..bae94a8031a2 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -46,6 +46,7 @@ EXPORT_SYMBOL(xfrm_cfg_mutex);
46 46
47static DEFINE_RWLOCK(xfrm_policy_lock); 47static DEFINE_RWLOCK(xfrm_policy_lock);
48 48
49static struct list_head xfrm_policy_bytype[XFRM_POLICY_TYPE_MAX];
49unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2]; 50unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
50EXPORT_SYMBOL(xfrm_policy_count); 51EXPORT_SYMBOL(xfrm_policy_count);
51 52
@@ -208,6 +209,7 @@ struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
208 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 209 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
209 210
210 if (policy) { 211 if (policy) {
212 INIT_LIST_HEAD(&policy->bytype);
211 INIT_HLIST_NODE(&policy->bydst); 213 INIT_HLIST_NODE(&policy->bydst);
212 INIT_HLIST_NODE(&policy->byidx); 214 INIT_HLIST_NODE(&policy->byidx);
213 rwlock_init(&policy->lock); 215 rwlock_init(&policy->lock);
@@ -230,6 +232,10 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
230 if (del_timer(&policy->timer)) 232 if (del_timer(&policy->timer))
231 BUG(); 233 BUG();
232 234
235 write_lock_bh(&xfrm_policy_lock);
236 list_del(&policy->bytype);
237 write_unlock_bh(&xfrm_policy_lock);
238
233 security_xfrm_policy_free(policy); 239 security_xfrm_policy_free(policy);
234 kfree(policy); 240 kfree(policy);
235} 241}
@@ -584,6 +590,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
584 policy->curlft.use_time = 0; 590 policy->curlft.use_time = 0;
585 if (!mod_timer(&policy->timer, jiffies + HZ)) 591 if (!mod_timer(&policy->timer, jiffies + HZ))
586 xfrm_pol_hold(policy); 592 xfrm_pol_hold(policy);
593 list_add_tail(&policy->bytype, &xfrm_policy_bytype[policy->type]);
587 write_unlock_bh(&xfrm_policy_lock); 594 write_unlock_bh(&xfrm_policy_lock);
588 595
589 if (delpol) 596 if (delpol)
@@ -822,57 +829,60 @@ out:
822} 829}
823EXPORT_SYMBOL(xfrm_policy_flush); 830EXPORT_SYMBOL(xfrm_policy_flush);
824 831
825int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), 832int xfrm_policy_walk(struct xfrm_policy_walk *walk,
833 int (*func)(struct xfrm_policy *, int, int, void*),
826 void *data) 834 void *data)
827{ 835{
828 struct xfrm_policy *pol, *last = NULL; 836 struct xfrm_policy *old, *pol, *last = NULL;
829 struct hlist_node *entry; 837 int error = 0;
830 int dir, last_dir = 0, count, error; 838
839 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
840 walk->type != XFRM_POLICY_TYPE_ANY)
841 return -EINVAL;
831 842
843 if (walk->policy == NULL && walk->count != 0)
844 return 0;
845
846 old = pol = walk->policy;
847 walk->policy = NULL;
832 read_lock_bh(&xfrm_policy_lock); 848 read_lock_bh(&xfrm_policy_lock);
833 count = 0;
834 849
835 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { 850 for (; walk->cur_type < XFRM_POLICY_TYPE_MAX; walk->cur_type++) {
836 struct hlist_head *table = xfrm_policy_bydst[dir].table; 851 if (walk->type != walk->cur_type &&
837 int i; 852 walk->type != XFRM_POLICY_TYPE_ANY)
853 continue;
838 854
839 hlist_for_each_entry(pol, entry, 855 if (pol == NULL) {
840 &xfrm_policy_inexact[dir], bydst) { 856 pol = list_first_entry(&xfrm_policy_bytype[walk->cur_type],
841 if (pol->type != type) 857 struct xfrm_policy, bytype);
858 }
859 list_for_each_entry_from(pol, &xfrm_policy_bytype[walk->cur_type], bytype) {
860 if (pol->dead)
842 continue; 861 continue;
843 if (last) { 862 if (last) {
844 error = func(last, last_dir % XFRM_POLICY_MAX, 863 error = func(last, xfrm_policy_id2dir(last->index),
845 count, data); 864 walk->count, data);
846 if (error) 865 if (error) {
866 xfrm_pol_hold(last);
867 walk->policy = last;
847 goto out; 868 goto out;
848 }
849 last = pol;
850 last_dir = dir;
851 count++;
852 }
853 for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
854 hlist_for_each_entry(pol, entry, table + i, bydst) {
855 if (pol->type != type)
856 continue;
857 if (last) {
858 error = func(last, last_dir % XFRM_POLICY_MAX,
859 count, data);
860 if (error)
861 goto out;
862 } 869 }
863 last = pol;
864 last_dir = dir;
865 count++;
866 } 870 }
871 last = pol;
872 walk->count++;
867 } 873 }
874 pol = NULL;
868 } 875 }
869 if (count == 0) { 876 if (walk->count == 0) {
870 error = -ENOENT; 877 error = -ENOENT;
871 goto out; 878 goto out;
872 } 879 }
873 error = func(last, last_dir % XFRM_POLICY_MAX, 0, data); 880 if (last)
881 error = func(last, xfrm_policy_id2dir(last->index), 0, data);
874out: 882out:
875 read_unlock_bh(&xfrm_policy_lock); 883 read_unlock_bh(&xfrm_policy_lock);
884 if (old != NULL)
885 xfrm_pol_put(old);
876 return error; 886 return error;
877} 887}
878EXPORT_SYMBOL(xfrm_policy_walk); 888EXPORT_SYMBOL(xfrm_policy_walk);
@@ -2365,6 +2375,9 @@ static void __init xfrm_policy_init(void)
2365 panic("XFRM: failed to allocate bydst hash\n"); 2375 panic("XFRM: failed to allocate bydst hash\n");
2366 } 2376 }
2367 2377
2378 for (dir = 0; dir < XFRM_POLICY_TYPE_MAX; dir++)
2379 INIT_LIST_HEAD(&xfrm_policy_bytype[dir]);
2380
2368 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); 2381 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
2369 register_netdevice_notifier(&xfrm_dev_notifier); 2382 register_netdevice_notifier(&xfrm_dev_notifier);
2370} 2383}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 7ba65e82941c..9880b792e6a5 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -50,6 +50,7 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
50 * Main use is finding SA after policy selected tunnel or transport mode. 50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA. 51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 */ 52 */
53static LIST_HEAD(xfrm_state_all);
53static struct hlist_head *xfrm_state_bydst __read_mostly; 54static struct hlist_head *xfrm_state_bydst __read_mostly;
54static struct hlist_head *xfrm_state_bysrc __read_mostly; 55static struct hlist_head *xfrm_state_bysrc __read_mostly;
55static struct hlist_head *xfrm_state_byspi __read_mostly; 56static struct hlist_head *xfrm_state_byspi __read_mostly;
@@ -510,6 +511,7 @@ struct xfrm_state *xfrm_state_alloc(void)
510 if (x) { 511 if (x) {
511 atomic_set(&x->refcnt, 1); 512 atomic_set(&x->refcnt, 1);
512 atomic_set(&x->tunnel_users, 0); 513 atomic_set(&x->tunnel_users, 0);
514 INIT_LIST_HEAD(&x->all);
513 INIT_HLIST_NODE(&x->bydst); 515 INIT_HLIST_NODE(&x->bydst);
514 INIT_HLIST_NODE(&x->bysrc); 516 INIT_HLIST_NODE(&x->bysrc);
515 INIT_HLIST_NODE(&x->byspi); 517 INIT_HLIST_NODE(&x->byspi);
@@ -533,6 +535,10 @@ void __xfrm_state_destroy(struct xfrm_state *x)
533{ 535{
534 BUG_TRAP(x->km.state == XFRM_STATE_DEAD); 536 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
535 537
538 spin_lock_bh(&xfrm_state_lock);
539 list_del(&x->all);
540 spin_unlock_bh(&xfrm_state_lock);
541
536 spin_lock_bh(&xfrm_state_gc_lock); 542 spin_lock_bh(&xfrm_state_gc_lock);
537 hlist_add_head(&x->bydst, &xfrm_state_gc_list); 543 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
538 spin_unlock_bh(&xfrm_state_gc_lock); 544 spin_unlock_bh(&xfrm_state_gc_lock);
@@ -909,6 +915,8 @@ static void __xfrm_state_insert(struct xfrm_state *x)
909 915
910 x->genid = ++xfrm_state_genid; 916 x->genid = ++xfrm_state_genid;
911 917
918 list_add_tail(&x->all, &xfrm_state_all);
919
912 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr, 920 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
913 x->props.reqid, x->props.family); 921 x->props.reqid, x->props.family);
914 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 922 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
@@ -1518,36 +1526,47 @@ unlock:
1518} 1526}
1519EXPORT_SYMBOL(xfrm_alloc_spi); 1527EXPORT_SYMBOL(xfrm_alloc_spi);
1520 1528
1521int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), 1529int xfrm_state_walk(struct xfrm_state_walk *walk,
1530 int (*func)(struct xfrm_state *, int, void*),
1522 void *data) 1531 void *data)
1523{ 1532{
1524 int i; 1533 struct xfrm_state *old, *x, *last = NULL;
1525 struct xfrm_state *x, *last = NULL;
1526 struct hlist_node *entry;
1527 int count = 0;
1528 int err = 0; 1534 int err = 0;
1529 1535
1536 if (walk->state == NULL && walk->count != 0)
1537 return 0;
1538
1539 old = x = walk->state;
1540 walk->state = NULL;
1530 spin_lock_bh(&xfrm_state_lock); 1541 spin_lock_bh(&xfrm_state_lock);
1531 for (i = 0; i <= xfrm_state_hmask; i++) { 1542 if (x == NULL)
1532 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { 1543 x = list_first_entry(&xfrm_state_all, struct xfrm_state, all);
1533 if (!xfrm_id_proto_match(x->id.proto, proto)) 1544 list_for_each_entry_from(x, &xfrm_state_all, all) {
1534 continue; 1545 if (x->km.state == XFRM_STATE_DEAD)
1535 if (last) { 1546 continue;
1536 err = func(last, count, data); 1547 if (!xfrm_id_proto_match(x->id.proto, walk->proto))
1537 if (err) 1548 continue;
1538 goto out; 1549 if (last) {
1550 err = func(last, walk->count, data);
1551 if (err) {
1552 xfrm_state_hold(last);
1553 walk->state = last;
1554 goto out;
1539 } 1555 }
1540 last = x;
1541 count++;
1542 } 1556 }
1557 last = x;
1558 walk->count++;
1543 } 1559 }
1544 if (count == 0) { 1560 if (walk->count == 0) {
1545 err = -ENOENT; 1561 err = -ENOENT;
1546 goto out; 1562 goto out;
1547 } 1563 }
1548 err = func(last, 0, data); 1564 if (last)
1565 err = func(last, 0, data);
1549out: 1566out:
1550 spin_unlock_bh(&xfrm_state_lock); 1567 spin_unlock_bh(&xfrm_state_lock);
1568 if (old != NULL)
1569 xfrm_state_put(old);
1551 return err; 1570 return err;
1552} 1571}
1553EXPORT_SYMBOL(xfrm_state_walk); 1572EXPORT_SYMBOL(xfrm_state_walk);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index f971ca5645f8..f5fd5b3147cc 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -532,8 +532,6 @@ struct xfrm_dump_info {
532 struct sk_buff *out_skb; 532 struct sk_buff *out_skb;
533 u32 nlmsg_seq; 533 u32 nlmsg_seq;
534 u16 nlmsg_flags; 534 u16 nlmsg_flags;
535 int start_idx;
536 int this_idx;
537}; 535};
538 536
539static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 537static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
@@ -600,9 +598,6 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
600 struct nlmsghdr *nlh; 598 struct nlmsghdr *nlh;
601 int err; 599 int err;
602 600
603 if (sp->this_idx < sp->start_idx)
604 goto out;
605
606 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 601 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
607 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 602 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
608 if (nlh == NULL) 603 if (nlh == NULL)
@@ -615,8 +610,6 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
615 goto nla_put_failure; 610 goto nla_put_failure;
616 611
617 nlmsg_end(skb, nlh); 612 nlmsg_end(skb, nlh);
618out:
619 sp->this_idx++;
620 return 0; 613 return 0;
621 614
622nla_put_failure: 615nla_put_failure:
@@ -624,18 +617,32 @@ nla_put_failure:
624 return err; 617 return err;
625} 618}
626 619
620static int xfrm_dump_sa_done(struct netlink_callback *cb)
621{
622 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
623 xfrm_state_walk_done(walk);
624 return 0;
625}
626
627static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 627static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
628{ 628{
629 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
629 struct xfrm_dump_info info; 630 struct xfrm_dump_info info;
630 631
632 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
633 sizeof(cb->args) - sizeof(cb->args[0]));
634
631 info.in_skb = cb->skb; 635 info.in_skb = cb->skb;
632 info.out_skb = skb; 636 info.out_skb = skb;
633 info.nlmsg_seq = cb->nlh->nlmsg_seq; 637 info.nlmsg_seq = cb->nlh->nlmsg_seq;
634 info.nlmsg_flags = NLM_F_MULTI; 638 info.nlmsg_flags = NLM_F_MULTI;
635 info.this_idx = 0; 639
636 info.start_idx = cb->args[0]; 640 if (!cb->args[0]) {
637 (void) xfrm_state_walk(0, dump_one_state, &info); 641 cb->args[0] = 1;
638 cb->args[0] = info.this_idx; 642 xfrm_state_walk_init(walk, 0);
643 }
644
645 (void) xfrm_state_walk(walk, dump_one_state, &info);
639 646
640 return skb->len; 647 return skb->len;
641} 648}
@@ -654,7 +661,6 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
654 info.out_skb = skb; 661 info.out_skb = skb;
655 info.nlmsg_seq = seq; 662 info.nlmsg_seq = seq;
656 info.nlmsg_flags = 0; 663 info.nlmsg_flags = 0;
657 info.this_idx = info.start_idx = 0;
658 664
659 if (dump_one_state(x, 0, &info)) { 665 if (dump_one_state(x, 0, &info)) {
660 kfree_skb(skb); 666 kfree_skb(skb);
@@ -1232,9 +1238,6 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1232 struct sk_buff *skb = sp->out_skb; 1238 struct sk_buff *skb = sp->out_skb;
1233 struct nlmsghdr *nlh; 1239 struct nlmsghdr *nlh;
1234 1240
1235 if (sp->this_idx < sp->start_idx)
1236 goto out;
1237
1238 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 1241 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1239 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1242 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1240 if (nlh == NULL) 1243 if (nlh == NULL)
@@ -1250,8 +1253,6 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1250 goto nlmsg_failure; 1253 goto nlmsg_failure;
1251 1254
1252 nlmsg_end(skb, nlh); 1255 nlmsg_end(skb, nlh);
1253out:
1254 sp->this_idx++;
1255 return 0; 1256 return 0;
1256 1257
1257nlmsg_failure: 1258nlmsg_failure:
@@ -1259,21 +1260,33 @@ nlmsg_failure:
1259 return -EMSGSIZE; 1260 return -EMSGSIZE;
1260} 1261}
1261 1262
1263static int xfrm_dump_policy_done(struct netlink_callback *cb)
1264{
1265 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1266
1267 xfrm_policy_walk_done(walk);
1268 return 0;
1269}
1270
1262static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1271static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1263{ 1272{
1273 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1264 struct xfrm_dump_info info; 1274 struct xfrm_dump_info info;
1265 1275
1276 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1277 sizeof(cb->args) - sizeof(cb->args[0]));
1278
1266 info.in_skb = cb->skb; 1279 info.in_skb = cb->skb;
1267 info.out_skb = skb; 1280 info.out_skb = skb;
1268 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1281 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1269 info.nlmsg_flags = NLM_F_MULTI; 1282 info.nlmsg_flags = NLM_F_MULTI;
1270 info.this_idx = 0; 1283
1271 info.start_idx = cb->args[0]; 1284 if (!cb->args[0]) {
1272 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_one_policy, &info); 1285 cb->args[0] = 1;
1273#ifdef CONFIG_XFRM_SUB_POLICY 1286 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1274 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_SUB, dump_one_policy, &info); 1287 }
1275#endif 1288
1276 cb->args[0] = info.this_idx; 1289 (void) xfrm_policy_walk(walk, dump_one_policy, &info);
1277 1290
1278 return skb->len; 1291 return skb->len;
1279} 1292}
@@ -1293,7 +1306,6 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1293 info.out_skb = skb; 1306 info.out_skb = skb;
1294 info.nlmsg_seq = seq; 1307 info.nlmsg_seq = seq;
1295 info.nlmsg_flags = 0; 1308 info.nlmsg_flags = 0;
1296 info.this_idx = info.start_idx = 0;
1297 1309
1298 if (dump_one_policy(xp, dir, 0, &info) < 0) { 1310 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1299 kfree_skb(skb); 1311 kfree_skb(skb);
@@ -1891,15 +1903,18 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
1891static struct xfrm_link { 1903static struct xfrm_link {
1892 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 1904 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
1893 int (*dump)(struct sk_buff *, struct netlink_callback *); 1905 int (*dump)(struct sk_buff *, struct netlink_callback *);
1906 int (*done)(struct netlink_callback *);
1894} xfrm_dispatch[XFRM_NR_MSGTYPES] = { 1907} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
1895 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 1908 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1896 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 1909 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
1897 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 1910 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
1898 .dump = xfrm_dump_sa }, 1911 .dump = xfrm_dump_sa,
1912 .done = xfrm_dump_sa_done },
1899 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 1913 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1900 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 1914 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1901 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 1915 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1902 .dump = xfrm_dump_policy }, 1916 .dump = xfrm_dump_policy,
1917 .done = xfrm_dump_policy_done },
1903 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 1918 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1904 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 1919 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
1905 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 1920 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
@@ -1938,7 +1953,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1938 if (link->dump == NULL) 1953 if (link->dump == NULL)
1939 return -EINVAL; 1954 return -EINVAL;
1940 1955
1941 return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL); 1956 return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, link->done);
1942 } 1957 }
1943 1958
1944 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, 1959 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,