aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig8
-rw-r--r--net/bridge/br.c22
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_if.c15
-rw-r--r--net/bridge/br_ioctl.c20
-rw-r--r--net/bridge/br_netlink.c15
-rw-r--r--net/bridge/br_notify.c3
-rw-r--r--net/bridge/br_private.h6
-rw-r--r--net/bridge/br_stp_bpdu.c3
-rw-r--r--net/core/dev.c6
-rw-r--r--net/dccp/ccids/ccid2.c2
-rw-r--r--net/dccp/ccids/ccid3.c2
-rw-r--r--net/dccp/ccids/lib/loss_interval.c6
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dccp/input.c4
-rw-r--r--net/dccp/options.c13
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/ieee80211/ieee80211_module.c8
-rw-r--r--net/ipv4/inet_diag.c6
-rw-r--r--net/ipv4/ipvs/Kconfig6
-rw-r--r--net/ipv4/ipvs/Makefile3
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c8
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c896
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c18
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c213
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c238
-rw-r--r--net/ipv4/ipvs/ip_vs_lc.c21
-rw-r--r--net/ipv4/ipvs/ip_vs_nq.c24
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_ah_esp.c (renamed from net/ipv4/ipvs/ip_vs_proto_ah.c)69
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_esp.c176
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c7
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c24
-rw-r--r--net/ipv4/ipvs/ip_vs_wlc.c24
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/ipv4/tcp_input.c208
-rw-r--r--net/ipv4/tcp_ipv4.c26
-rw-r--r--net/mac80211/cfg.c63
-rw-r--r--net/mac80211/debugfs_sta.c2
-rw-r--r--net/mac80211/event.c5
-rw-r--r--net/mac80211/ieee80211_i.h121
-rw-r--r--net/mac80211/iface.c8
-rw-r--r--net/mac80211/main.c93
-rw-r--r--net/mac80211/mesh.c29
-rw-r--r--net/mac80211/mesh.h61
-rw-r--r--net/mac80211/mesh_hwmp.c120
-rw-r--r--net/mac80211/mesh_pathtbl.c76
-rw-r--r--net/mac80211/mesh_plink.c56
-rw-r--r--net/mac80211/mlme.c1213
-rw-r--r--net/mac80211/rx.c142
-rw-r--r--net/mac80211/sta_info.h1
-rw-r--r--net/mac80211/tx.c46
-rw-r--r--net/mac80211/util.c46
-rw-r--r--net/mac80211/wep.c14
-rw-r--r--net/mac80211/wext.c53
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/mac80211/wme.h2
-rw-r--r--net/mac80211/wpa.c2
-rw-r--r--net/rfkill/rfkill-input.h1
-rw-r--r--net/rfkill/rfkill.c238
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sched/sch_netem.c18
-rw-r--r--net/sched/sch_prio.c6
-rw-r--r--net/wireless/core.c9
-rw-r--r--net/wireless/nl80211.c84
-rw-r--r--net/xfrm/xfrm_policy.c2
65 files changed, 2762 insertions, 1877 deletions
diff --git a/net/Kconfig b/net/Kconfig
index 7612cc8c337c..d87de48ba656 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -236,14 +236,18 @@ source "net/rxrpc/Kconfig"
236config FIB_RULES 236config FIB_RULES
237 bool 237 bool
238 238
239menu "Wireless" 239menuconfig WIRELESS
240 bool "Wireless"
240 depends on !S390 241 depends on !S390
242 default y
243
244if WIRELESS
241 245
242source "net/wireless/Kconfig" 246source "net/wireless/Kconfig"
243source "net/mac80211/Kconfig" 247source "net/mac80211/Kconfig"
244source "net/ieee80211/Kconfig" 248source "net/ieee80211/Kconfig"
245 249
246endmenu 250endif # WIRELESS
247 251
248source "net/rfkill/Kconfig" 252source "net/rfkill/Kconfig"
249source "net/9p/Kconfig" 253source "net/9p/Kconfig"
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 573acdf6f9ff..4d2c1f1cb524 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -28,6 +28,10 @@ static const struct stp_proto br_stp_proto = {
28 .rcv = br_stp_rcv, 28 .rcv = br_stp_rcv,
29}; 29};
30 30
31static struct pernet_operations br_net_ops = {
32 .exit = br_net_exit,
33};
34
31static int __init br_init(void) 35static int __init br_init(void)
32{ 36{
33 int err; 37 int err;
@@ -42,18 +46,22 @@ static int __init br_init(void)
42 if (err) 46 if (err)
43 goto err_out; 47 goto err_out;
44 48
45 err = br_netfilter_init(); 49 err = register_pernet_subsys(&br_net_ops);
46 if (err) 50 if (err)
47 goto err_out1; 51 goto err_out1;
48 52
49 err = register_netdevice_notifier(&br_device_notifier); 53 err = br_netfilter_init();
50 if (err) 54 if (err)
51 goto err_out2; 55 goto err_out2;
52 56
53 err = br_netlink_init(); 57 err = register_netdevice_notifier(&br_device_notifier);
54 if (err) 58 if (err)
55 goto err_out3; 59 goto err_out3;
56 60
61 err = br_netlink_init();
62 if (err)
63 goto err_out4;
64
57 brioctl_set(br_ioctl_deviceless_stub); 65 brioctl_set(br_ioctl_deviceless_stub);
58 br_handle_frame_hook = br_handle_frame; 66 br_handle_frame_hook = br_handle_frame;
59 67
@@ -61,10 +69,12 @@ static int __init br_init(void)
61 br_fdb_put_hook = br_fdb_put; 69 br_fdb_put_hook = br_fdb_put;
62 70
63 return 0; 71 return 0;
64err_out3: 72err_out4:
65 unregister_netdevice_notifier(&br_device_notifier); 73 unregister_netdevice_notifier(&br_device_notifier);
66err_out2: 74err_out3:
67 br_netfilter_fini(); 75 br_netfilter_fini();
76err_out2:
77 unregister_pernet_subsys(&br_net_ops);
68err_out1: 78err_out1:
69 br_fdb_fini(); 79 br_fdb_fini();
70err_out: 80err_out:
@@ -80,7 +90,7 @@ static void __exit br_deinit(void)
80 unregister_netdevice_notifier(&br_device_notifier); 90 unregister_netdevice_notifier(&br_device_notifier);
81 brioctl_set(NULL); 91 brioctl_set(NULL);
82 92
83 br_cleanup_bridges(); 93 unregister_pernet_subsys(&br_net_ops);
84 94
85 synchronize_net(); 95 synchronize_net();
86 96
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 4f52c3d50ebe..22ba8632196f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -178,5 +178,6 @@ void br_dev_setup(struct net_device *dev)
178 dev->priv_flags = IFF_EBRIDGE; 178 dev->priv_flags = IFF_EBRIDGE;
179 179
180 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 180 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
181 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX; 181 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
182 NETIF_F_NETNS_LOCAL;
182} 183}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 63c18aacde8c..573e20f7dba4 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -168,7 +168,7 @@ static void del_br(struct net_bridge *br)
168 unregister_netdevice(br->dev); 168 unregister_netdevice(br->dev);
169} 169}
170 170
171static struct net_device *new_bridge_dev(const char *name) 171static struct net_device *new_bridge_dev(struct net *net, const char *name)
172{ 172{
173 struct net_bridge *br; 173 struct net_bridge *br;
174 struct net_device *dev; 174 struct net_device *dev;
@@ -178,6 +178,7 @@ static struct net_device *new_bridge_dev(const char *name)
178 178
179 if (!dev) 179 if (!dev)
180 return NULL; 180 return NULL;
181 dev_net_set(dev, net);
181 182
182 br = netdev_priv(dev); 183 br = netdev_priv(dev);
183 br->dev = dev; 184 br->dev = dev;
@@ -262,12 +263,12 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
262 return p; 263 return p;
263} 264}
264 265
265int br_add_bridge(const char *name) 266int br_add_bridge(struct net *net, const char *name)
266{ 267{
267 struct net_device *dev; 268 struct net_device *dev;
268 int ret; 269 int ret;
269 270
270 dev = new_bridge_dev(name); 271 dev = new_bridge_dev(net, name);
271 if (!dev) 272 if (!dev)
272 return -ENOMEM; 273 return -ENOMEM;
273 274
@@ -294,13 +295,13 @@ out_free:
294 goto out; 295 goto out;
295} 296}
296 297
297int br_del_bridge(const char *name) 298int br_del_bridge(struct net *net, const char *name)
298{ 299{
299 struct net_device *dev; 300 struct net_device *dev;
300 int ret = 0; 301 int ret = 0;
301 302
302 rtnl_lock(); 303 rtnl_lock();
303 dev = __dev_get_by_name(&init_net, name); 304 dev = __dev_get_by_name(net, name);
304 if (dev == NULL) 305 if (dev == NULL)
305 ret = -ENXIO; /* Could not find device */ 306 ret = -ENXIO; /* Could not find device */
306 307
@@ -445,13 +446,13 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
445 return 0; 446 return 0;
446} 447}
447 448
448void __exit br_cleanup_bridges(void) 449void br_net_exit(struct net *net)
449{ 450{
450 struct net_device *dev; 451 struct net_device *dev;
451 452
452 rtnl_lock(); 453 rtnl_lock();
453restart: 454restart:
454 for_each_netdev(&init_net, dev) { 455 for_each_netdev(net, dev) {
455 if (dev->priv_flags & IFF_EBRIDGE) { 456 if (dev->priv_flags & IFF_EBRIDGE) {
456 del_br(dev->priv); 457 del_br(dev->priv);
457 goto restart; 458 goto restart;
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 5bbf07362172..6a6433daaf27 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -21,12 +21,12 @@
21#include "br_private.h" 21#include "br_private.h"
22 22
23/* called with RTNL */ 23/* called with RTNL */
24static int get_bridge_ifindices(int *indices, int num) 24static int get_bridge_ifindices(struct net *net, int *indices, int num)
25{ 25{
26 struct net_device *dev; 26 struct net_device *dev;
27 int i = 0; 27 int i = 0;
28 28
29 for_each_netdev(&init_net, dev) { 29 for_each_netdev(net, dev) {
30 if (i >= num) 30 if (i >= num)
31 break; 31 break;
32 if (dev->priv_flags & IFF_EBRIDGE) 32 if (dev->priv_flags & IFF_EBRIDGE)
@@ -89,7 +89,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
89 if (!capable(CAP_NET_ADMIN)) 89 if (!capable(CAP_NET_ADMIN))
90 return -EPERM; 90 return -EPERM;
91 91
92 dev = dev_get_by_index(&init_net, ifindex); 92 dev = dev_get_by_index(dev_net(br->dev), ifindex);
93 if (dev == NULL) 93 if (dev == NULL)
94 return -EINVAL; 94 return -EINVAL;
95 95
@@ -315,7 +315,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
315 return -EOPNOTSUPP; 315 return -EOPNOTSUPP;
316} 316}
317 317
318static int old_deviceless(void __user *uarg) 318static int old_deviceless(struct net *net, void __user *uarg)
319{ 319{
320 unsigned long args[3]; 320 unsigned long args[3];
321 321
@@ -337,7 +337,7 @@ static int old_deviceless(void __user *uarg)
337 if (indices == NULL) 337 if (indices == NULL)
338 return -ENOMEM; 338 return -ENOMEM;
339 339
340 args[2] = get_bridge_ifindices(indices, args[2]); 340 args[2] = get_bridge_ifindices(net, indices, args[2]);
341 341
342 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) 342 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
343 ? -EFAULT : args[2]; 343 ? -EFAULT : args[2];
@@ -360,9 +360,9 @@ static int old_deviceless(void __user *uarg)
360 buf[IFNAMSIZ-1] = 0; 360 buf[IFNAMSIZ-1] = 0;
361 361
362 if (args[0] == BRCTL_ADD_BRIDGE) 362 if (args[0] == BRCTL_ADD_BRIDGE)
363 return br_add_bridge(buf); 363 return br_add_bridge(net, buf);
364 364
365 return br_del_bridge(buf); 365 return br_del_bridge(net, buf);
366 } 366 }
367 } 367 }
368 368
@@ -374,7 +374,7 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar
374 switch (cmd) { 374 switch (cmd) {
375 case SIOCGIFBR: 375 case SIOCGIFBR:
376 case SIOCSIFBR: 376 case SIOCSIFBR:
377 return old_deviceless(uarg); 377 return old_deviceless(net, uarg);
378 378
379 case SIOCBRADDBR: 379 case SIOCBRADDBR:
380 case SIOCBRDELBR: 380 case SIOCBRDELBR:
@@ -389,9 +389,9 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar
389 389
390 buf[IFNAMSIZ-1] = 0; 390 buf[IFNAMSIZ-1] = 0;
391 if (cmd == SIOCBRADDBR) 391 if (cmd == SIOCBRADDBR)
392 return br_add_bridge(buf); 392 return br_add_bridge(net, buf);
393 393
394 return br_del_bridge(buf); 394 return br_del_bridge(net, buf);
395 } 395 }
396 } 396 }
397 return -EOPNOTSUPP; 397 return -EOPNOTSUPP;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f155e6ce8a21..ba7be195803c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -82,6 +82,7 @@ nla_put_failure:
82 */ 82 */
83void br_ifinfo_notify(int event, struct net_bridge_port *port) 83void br_ifinfo_notify(int event, struct net_bridge_port *port)
84{ 84{
85 struct net *net = dev_net(port->dev);
85 struct sk_buff *skb; 86 struct sk_buff *skb;
86 int err = -ENOBUFS; 87 int err = -ENOBUFS;
87 88
@@ -97,10 +98,10 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
97 kfree_skb(skb); 98 kfree_skb(skb);
98 goto errout; 99 goto errout;
99 } 100 }
100 err = rtnl_notify(skb, &init_net,0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 101 err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
101errout: 102errout:
102 if (err < 0) 103 if (err < 0)
103 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err); 104 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
104} 105}
105 106
106/* 107/*
@@ -112,11 +113,8 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
112 struct net_device *dev; 113 struct net_device *dev;
113 int idx; 114 int idx;
114 115
115 if (net != &init_net)
116 return 0;
117
118 idx = 0; 116 idx = 0;
119 for_each_netdev(&init_net, dev) { 117 for_each_netdev(net, dev) {
120 /* not a bridge port */ 118 /* not a bridge port */
121 if (dev->br_port == NULL || idx < cb->args[0]) 119 if (dev->br_port == NULL || idx < cb->args[0])
122 goto skip; 120 goto skip;
@@ -147,9 +145,6 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
147 struct net_bridge_port *p; 145 struct net_bridge_port *p;
148 u8 new_state; 146 u8 new_state;
149 147
150 if (net != &init_net)
151 return -EINVAL;
152
153 if (nlmsg_len(nlh) < sizeof(*ifm)) 148 if (nlmsg_len(nlh) < sizeof(*ifm))
154 return -EINVAL; 149 return -EINVAL;
155 150
@@ -165,7 +160,7 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
165 if (new_state > BR_STATE_BLOCKING) 160 if (new_state > BR_STATE_BLOCKING)
166 return -EINVAL; 161 return -EINVAL;
167 162
168 dev = __dev_get_by_index(&init_net, ifm->ifi_index); 163 dev = __dev_get_by_index(net, ifm->ifi_index);
169 if (!dev) 164 if (!dev)
170 return -ENODEV; 165 return -ENODEV;
171 166
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 76340bdd052e..763a3ec292e5 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -35,9 +35,6 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
35 struct net_bridge_port *p = dev->br_port; 35 struct net_bridge_port *p = dev->br_port;
36 struct net_bridge *br; 36 struct net_bridge *br;
37 37
38 if (!net_eq(dev_net(dev), &init_net))
39 return NOTIFY_DONE;
40
41 /* not a port of a bridge */ 38 /* not a port of a bridge */
42 if (p == NULL) 39 if (p == NULL)
43 return NOTIFY_DONE; 40 return NOTIFY_DONE;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c3dc18ddc043..b6c3b71974dc 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -178,9 +178,9 @@ extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb);
178 178
179/* br_if.c */ 179/* br_if.c */
180extern void br_port_carrier_check(struct net_bridge_port *p); 180extern void br_port_carrier_check(struct net_bridge_port *p);
181extern int br_add_bridge(const char *name); 181extern int br_add_bridge(struct net *net, const char *name);
182extern int br_del_bridge(const char *name); 182extern int br_del_bridge(struct net *net, const char *name);
183extern void br_cleanup_bridges(void); 183extern void br_net_exit(struct net *net);
184extern int br_add_if(struct net_bridge *br, 184extern int br_add_if(struct net_bridge *br,
185 struct net_device *dev); 185 struct net_device *dev);
186extern int br_del_if(struct net_bridge *br, 186extern int br_del_if(struct net_bridge *br,
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8b200f96f722..81ae40b3f655 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -140,9 +140,6 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
140 struct net_bridge *br; 140 struct net_bridge *br;
141 const unsigned char *buf; 141 const unsigned char *buf;
142 142
143 if (!net_eq(dev_net(dev), &init_net))
144 goto err;
145
146 if (!p) 143 if (!p)
147 goto err; 144 goto err;
148 145
diff --git a/net/core/dev.c b/net/core/dev.c
index e719ed29310f..f48d1b24f9ce 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4668,6 +4668,12 @@ int netdev_compute_features(unsigned long all, unsigned long one)
4668 one |= NETIF_F_GSO_SOFTWARE; 4668 one |= NETIF_F_GSO_SOFTWARE;
4669 one |= NETIF_F_GSO; 4669 one |= NETIF_F_GSO;
4670 4670
4671 /*
4672 * If even one device supports a GSO protocol with software fallback,
4673 * enable it for all.
4674 */
4675 all |= one & NETIF_F_GSO_SOFTWARE;
4676
4671 /* If even one device supports robust GSO, enable it for all. */ 4677 /* If even one device supports robust GSO, enable it for all. */
4672 if (one & NETIF_F_GSO_ROBUST) 4678 if (one & NETIF_F_GSO_ROBUST)
4673 all |= NETIF_F_GSO_ROBUST; 4679 all |= NETIF_F_GSO_ROBUST;
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 8e9580874216..9a430734530c 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -783,7 +783,7 @@ static struct ccid_operations ccid2 = {
783}; 783};
784 784
785#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 785#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
786module_param(ccid2_debug, bool, 0444); 786module_param(ccid2_debug, bool, 0644);
787MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); 787MODULE_PARM_DESC(ccid2_debug, "Enable debug messages");
788#endif 788#endif
789 789
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index f6756e0c9e69..3b8bd7ca6761 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -963,7 +963,7 @@ static struct ccid_operations ccid3 = {
963}; 963};
964 964
965#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 965#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
966module_param(ccid3_debug, bool, 0444); 966module_param(ccid3_debug, bool, 0644);
967MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); 967MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
968#endif 968#endif
969 969
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index bcd6ac415bb9..5b3ce0688c5c 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -67,7 +67,10 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
67 u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0; 67 u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0;
68 int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */ 68 int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */
69 69
70 for (i=0; i <= k; i++) { 70 if (k <= 0)
71 return;
72
73 for (i = 0; i <= k; i++) {
71 i_i = tfrc_lh_get_interval(lh, i); 74 i_i = tfrc_lh_get_interval(lh, i);
72 75
73 if (i < k) { 76 if (i < k) {
@@ -78,7 +81,6 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
78 i_tot1 += i_i * tfrc_lh_weights[i-1]; 81 i_tot1 += i_i * tfrc_lh_weights[i-1];
79 } 82 }
80 83
81 BUG_ON(w_tot == 0);
82 lh->i_mean = max(i_tot0, i_tot1) / w_tot; 84 lh->i_mean = max(i_tot0, i_tot1) / w_tot;
83} 85}
84 86
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 97ecec0a8e76..185916218e07 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -10,7 +10,7 @@
10 10
11#ifdef CONFIG_IP_DCCP_TFRC_DEBUG 11#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
12int tfrc_debug; 12int tfrc_debug;
13module_param(tfrc_debug, bool, 0444); 13module_param(tfrc_debug, bool, 0644);
14MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); 14MODULE_PARM_DESC(tfrc_debug, "Enable debug messages");
15#endif 15#endif
16 16
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 803933ab396d..779d0ed9ae94 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -370,7 +370,7 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
370 goto discard; 370 goto discard;
371 371
372 if (dccp_parse_options(sk, NULL, skb)) 372 if (dccp_parse_options(sk, NULL, skb))
373 goto discard; 373 return 1;
374 374
375 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 375 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
376 dccp_event_ack_recv(sk, skb); 376 dccp_event_ack_recv(sk, skb);
@@ -610,7 +610,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
610 * Step 8: Process options and mark acknowledgeable 610 * Step 8: Process options and mark acknowledgeable
611 */ 611 */
612 if (dccp_parse_options(sk, NULL, skb)) 612 if (dccp_parse_options(sk, NULL, skb))
613 goto discard; 613 return 1;
614 614
615 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 615 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
616 dccp_event_ack_recv(sk, skb); 616 dccp_event_ack_recv(sk, skb);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index dc7c158a2f4b..0809b63cb055 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -81,11 +81,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
81 /* Check if this isn't a single byte option */ 81 /* Check if this isn't a single byte option */
82 if (opt > DCCPO_MAX_RESERVED) { 82 if (opt > DCCPO_MAX_RESERVED) {
83 if (opt_ptr == opt_end) 83 if (opt_ptr == opt_end)
84 goto out_invalid_option; 84 goto out_nonsensical_length;
85 85
86 len = *opt_ptr++; 86 len = *opt_ptr++;
87 if (len < 3) 87 if (len < 2)
88 goto out_invalid_option; 88 goto out_nonsensical_length;
89 /* 89 /*
90 * Remove the type and len fields, leaving 90 * Remove the type and len fields, leaving
91 * just the value size 91 * just the value size
@@ -95,7 +95,7 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
95 opt_ptr += len; 95 opt_ptr += len;
96 96
97 if (opt_ptr > opt_end) 97 if (opt_ptr > opt_end)
98 goto out_invalid_option; 98 goto out_nonsensical_length;
99 } 99 }
100 100
101 /* 101 /*
@@ -283,12 +283,17 @@ ignore_option:
283 if (mandatory) 283 if (mandatory)
284 goto out_invalid_option; 284 goto out_invalid_option;
285 285
286out_nonsensical_length:
287 /* RFC 4340, 5.8: ignore option and all remaining option space */
286 return 0; 288 return 0;
287 289
288out_invalid_option: 290out_invalid_option:
289 DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); 291 DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT);
290 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; 292 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR;
291 DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len); 293 DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len);
294 DCCP_SKB_CB(skb)->dccpd_reset_data[0] = opt;
295 DCCP_SKB_CB(skb)->dccpd_reset_data[1] = len > 0 ? value[0] : 0;
296 DCCP_SKB_CB(skb)->dccpd_reset_data[2] = len > 1 ? value[1] : 0;
292 return -1; 297 return -1;
293} 298}
294 299
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 1ca3b26eed0f..d0bd34819761 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -309,7 +309,9 @@ int dccp_disconnect(struct sock *sk, int flags)
309 sk->sk_err = ECONNRESET; 309 sk->sk_err = ECONNRESET;
310 310
311 dccp_clear_xmit_timers(sk); 311 dccp_clear_xmit_timers(sk);
312
312 __skb_queue_purge(&sk->sk_receive_queue); 313 __skb_queue_purge(&sk->sk_receive_queue);
314 __skb_queue_purge(&sk->sk_write_queue);
313 if (sk->sk_send_head != NULL) { 315 if (sk->sk_send_head != NULL) {
314 __kfree_skb(sk->sk_send_head); 316 __kfree_skb(sk->sk_send_head);
315 sk->sk_send_head = NULL; 317 sk->sk_send_head = NULL;
@@ -1028,7 +1030,7 @@ MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1028 1030
1029#ifdef CONFIG_IP_DCCP_DEBUG 1031#ifdef CONFIG_IP_DCCP_DEBUG
1030int dccp_debug; 1032int dccp_debug;
1031module_param(dccp_debug, bool, 0444); 1033module_param(dccp_debug, bool, 0644);
1032MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); 1034MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1033 1035
1034EXPORT_SYMBOL_GPL(dccp_debug); 1036EXPORT_SYMBOL_GPL(dccp_debug);
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 3bca97f55d47..949772a5a7dc 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -157,7 +157,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
157 err = ieee80211_networks_allocate(ieee); 157 err = ieee80211_networks_allocate(ieee);
158 if (err) { 158 if (err) {
159 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err); 159 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err);
160 goto failed; 160 goto failed_free_netdev;
161 } 161 }
162 ieee80211_networks_initialize(ieee); 162 ieee80211_networks_initialize(ieee);
163 163
@@ -193,9 +193,9 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
193 193
194 return dev; 194 return dev;
195 195
196 failed: 196failed_free_netdev:
197 if (dev) 197 free_netdev(dev);
198 free_netdev(dev); 198failed:
199 return NULL; 199 return NULL;
200} 200}
201 201
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c10036e7a463..89cb047ab314 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -782,11 +782,15 @@ skip_listen_ht:
782 struct sock *sk; 782 struct sock *sk;
783 struct hlist_node *node; 783 struct hlist_node *node;
784 784
785 num = 0;
786
787 if (hlist_empty(&head->chain) && hlist_empty(&head->twchain))
788 continue;
789
785 if (i > s_i) 790 if (i > s_i)
786 s_num = 0; 791 s_num = 0;
787 792
788 read_lock_bh(lock); 793 read_lock_bh(lock);
789 num = 0;
790 sk_for_each(sk, node, &head->chain) { 794 sk_for_each(sk, node, &head->chain) {
791 struct inet_sock *inet = inet_sk(sk); 795 struct inet_sock *inet = inet_sk(sk);
792 796
diff --git a/net/ipv4/ipvs/Kconfig b/net/ipv4/ipvs/Kconfig
index 09d0c3f35669..2e48a7e27223 100644
--- a/net/ipv4/ipvs/Kconfig
+++ b/net/ipv4/ipvs/Kconfig
@@ -71,14 +71,20 @@ config IP_VS_PROTO_UDP
71 This option enables support for load balancing UDP transport 71 This option enables support for load balancing UDP transport
72 protocol. Say Y if unsure. 72 protocol. Say Y if unsure.
73 73
74config IP_VS_PROTO_AH_ESP
75 bool
76 depends on UNDEFINED
77
74config IP_VS_PROTO_ESP 78config IP_VS_PROTO_ESP
75 bool "ESP load balancing support" 79 bool "ESP load balancing support"
80 select IP_VS_PROTO_AH_ESP
76 ---help--- 81 ---help---
77 This option enables support for load balancing ESP (Encapsulation 82 This option enables support for load balancing ESP (Encapsulation
78 Security Payload) transport protocol. Say Y if unsure. 83 Security Payload) transport protocol. Say Y if unsure.
79 84
80config IP_VS_PROTO_AH 85config IP_VS_PROTO_AH
81 bool "AH load balancing support" 86 bool "AH load balancing support"
87 select IP_VS_PROTO_AH_ESP
82 ---help--- 88 ---help---
83 This option enables support for load balancing AH (Authentication 89 This option enables support for load balancing AH (Authentication
84 Header) transport protocol. Say Y if unsure. 90 Header) transport protocol. Say Y if unsure.
diff --git a/net/ipv4/ipvs/Makefile b/net/ipv4/ipvs/Makefile
index 30e85de9ffff..73a46fe1fe4c 100644
--- a/net/ipv4/ipvs/Makefile
+++ b/net/ipv4/ipvs/Makefile
@@ -6,8 +6,7 @@
6ip_vs_proto-objs-y := 6ip_vs_proto-objs-y :=
7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o 7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o
8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o 8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o
9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_ESP) += ip_vs_proto_esp.o 9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o
10ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH) += ip_vs_proto_ah.o
11 10
12ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ 11ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \
13 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ 12 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index a7879eafc3b5..9fbf0a6d7392 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -1070,10 +1070,12 @@ static int __init ip_vs_init(void)
1070{ 1070{
1071 int ret; 1071 int ret;
1072 1072
1073 ip_vs_estimator_init();
1074
1073 ret = ip_vs_control_init(); 1075 ret = ip_vs_control_init();
1074 if (ret < 0) { 1076 if (ret < 0) {
1075 IP_VS_ERR("can't setup control.\n"); 1077 IP_VS_ERR("can't setup control.\n");
1076 goto cleanup_nothing; 1078 goto cleanup_estimator;
1077 } 1079 }
1078 1080
1079 ip_vs_protocol_init(); 1081 ip_vs_protocol_init();
@@ -1106,7 +1108,8 @@ static int __init ip_vs_init(void)
1106 cleanup_protocol: 1108 cleanup_protocol:
1107 ip_vs_protocol_cleanup(); 1109 ip_vs_protocol_cleanup();
1108 ip_vs_control_cleanup(); 1110 ip_vs_control_cleanup();
1109 cleanup_nothing: 1111 cleanup_estimator:
1112 ip_vs_estimator_cleanup();
1110 return ret; 1113 return ret;
1111} 1114}
1112 1115
@@ -1117,6 +1120,7 @@ static void __exit ip_vs_cleanup(void)
1117 ip_vs_app_cleanup(); 1120 ip_vs_app_cleanup();
1118 ip_vs_protocol_cleanup(); 1121 ip_vs_protocol_cleanup();
1119 ip_vs_control_cleanup(); 1122 ip_vs_control_cleanup();
1123 ip_vs_estimator_cleanup();
1120 IP_VS_INFO("ipvs unloaded.\n"); 1124 IP_VS_INFO("ipvs unloaded.\n");
1121} 1125}
1122 1126
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 6379705a8dcb..ede101eeec17 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -37,6 +37,7 @@
37#include <net/ip.h> 37#include <net/ip.h>
38#include <net/route.h> 38#include <net/route.h>
39#include <net/sock.h> 39#include <net/sock.h>
40#include <net/genetlink.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42 43
@@ -868,7 +869,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
868 svc->num_dests++; 869 svc->num_dests++;
869 870
870 /* call the update_service function of its scheduler */ 871 /* call the update_service function of its scheduler */
871 svc->scheduler->update_service(svc); 872 if (svc->scheduler->update_service)
873 svc->scheduler->update_service(svc);
872 874
873 write_unlock_bh(&__ip_vs_svc_lock); 875 write_unlock_bh(&__ip_vs_svc_lock);
874 return 0; 876 return 0;
@@ -898,7 +900,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
898 svc->num_dests++; 900 svc->num_dests++;
899 901
900 /* call the update_service function of its scheduler */ 902 /* call the update_service function of its scheduler */
901 svc->scheduler->update_service(svc); 903 if (svc->scheduler->update_service)
904 svc->scheduler->update_service(svc);
902 905
903 write_unlock_bh(&__ip_vs_svc_lock); 906 write_unlock_bh(&__ip_vs_svc_lock);
904 907
@@ -948,7 +951,8 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
948 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); 951 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
949 952
950 /* call the update_service, because server weight may be changed */ 953 /* call the update_service, because server weight may be changed */
951 svc->scheduler->update_service(svc); 954 if (svc->scheduler->update_service)
955 svc->scheduler->update_service(svc);
952 956
953 write_unlock_bh(&__ip_vs_svc_lock); 957 write_unlock_bh(&__ip_vs_svc_lock);
954 958
@@ -1011,12 +1015,12 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1011 */ 1015 */
1012 list_del(&dest->n_list); 1016 list_del(&dest->n_list);
1013 svc->num_dests--; 1017 svc->num_dests--;
1014 if (svcupd) { 1018
1015 /* 1019 /*
1016 * Call the update_service function of its scheduler 1020 * Call the update_service function of its scheduler
1017 */ 1021 */
1018 svc->scheduler->update_service(svc); 1022 if (svcupd && svc->scheduler->update_service)
1019 } 1023 svc->scheduler->update_service(svc);
1020} 1024}
1021 1025
1022 1026
@@ -2320,6 +2324,872 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
2320 .owner = THIS_MODULE, 2324 .owner = THIS_MODULE,
2321}; 2325};
2322 2326
2327/*
2328 * Generic Netlink interface
2329 */
2330
2331/* IPVS genetlink family */
2332static struct genl_family ip_vs_genl_family = {
2333 .id = GENL_ID_GENERATE,
2334 .hdrsize = 0,
2335 .name = IPVS_GENL_NAME,
2336 .version = IPVS_GENL_VERSION,
2337 .maxattr = IPVS_CMD_MAX,
2338};
2339
2340/* Policy used for first-level command attributes */
2341static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
2342 [IPVS_CMD_ATTR_SERVICE] = { .type = NLA_NESTED },
2343 [IPVS_CMD_ATTR_DEST] = { .type = NLA_NESTED },
2344 [IPVS_CMD_ATTR_DAEMON] = { .type = NLA_NESTED },
2345 [IPVS_CMD_ATTR_TIMEOUT_TCP] = { .type = NLA_U32 },
2346 [IPVS_CMD_ATTR_TIMEOUT_TCP_FIN] = { .type = NLA_U32 },
2347 [IPVS_CMD_ATTR_TIMEOUT_UDP] = { .type = NLA_U32 },
2348};
2349
2350/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */
2351static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
2352 [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
2353 [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
2354 .len = IP_VS_IFNAME_MAXLEN },
2355 [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
2356};
2357
2358/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */
2359static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
2360 [IPVS_SVC_ATTR_AF] = { .type = NLA_U16 },
2361 [IPVS_SVC_ATTR_PROTOCOL] = { .type = NLA_U16 },
2362 [IPVS_SVC_ATTR_ADDR] = { .type = NLA_BINARY,
2363 .len = sizeof(union nf_inet_addr) },
2364 [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
2365 [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
2366 [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
2367 .len = IP_VS_SCHEDNAME_MAXLEN },
2368 [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
2369 .len = sizeof(struct ip_vs_flags) },
2370 [IPVS_SVC_ATTR_TIMEOUT] = { .type = NLA_U32 },
2371 [IPVS_SVC_ATTR_NETMASK] = { .type = NLA_U32 },
2372 [IPVS_SVC_ATTR_STATS] = { .type = NLA_NESTED },
2373};
2374
2375/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */
2376static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
2377 [IPVS_DEST_ATTR_ADDR] = { .type = NLA_BINARY,
2378 .len = sizeof(union nf_inet_addr) },
2379 [IPVS_DEST_ATTR_PORT] = { .type = NLA_U16 },
2380 [IPVS_DEST_ATTR_FWD_METHOD] = { .type = NLA_U32 },
2381 [IPVS_DEST_ATTR_WEIGHT] = { .type = NLA_U32 },
2382 [IPVS_DEST_ATTR_U_THRESH] = { .type = NLA_U32 },
2383 [IPVS_DEST_ATTR_L_THRESH] = { .type = NLA_U32 },
2384 [IPVS_DEST_ATTR_ACTIVE_CONNS] = { .type = NLA_U32 },
2385 [IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 },
2386 [IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 },
2387 [IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED },
2388};
2389
2390static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
2391 struct ip_vs_stats *stats)
2392{
2393 struct nlattr *nl_stats = nla_nest_start(skb, container_type);
2394 if (!nl_stats)
2395 return -EMSGSIZE;
2396
2397 spin_lock_bh(&stats->lock);
2398
2399 NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->conns);
2400 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->inpkts);
2401 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->outpkts);
2402 NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->inbytes);
2403 NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->outbytes);
2404 NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->cps);
2405 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->inpps);
2406 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->outpps);
2407 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->inbps);
2408 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->outbps);
2409
2410 spin_unlock_bh(&stats->lock);
2411
2412 nla_nest_end(skb, nl_stats);
2413
2414 return 0;
2415
2416nla_put_failure:
2417 spin_unlock_bh(&stats->lock);
2418 nla_nest_cancel(skb, nl_stats);
2419 return -EMSGSIZE;
2420}
2421
2422static int ip_vs_genl_fill_service(struct sk_buff *skb,
2423 struct ip_vs_service *svc)
2424{
2425 struct nlattr *nl_service;
2426 struct ip_vs_flags flags = { .flags = svc->flags,
2427 .mask = ~0 };
2428
2429 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
2430 if (!nl_service)
2431 return -EMSGSIZE;
2432
2433 NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, AF_INET);
2434
2435 if (svc->fwmark) {
2436 NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark);
2437 } else {
2438 NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol);
2439 NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr);
2440 NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port);
2441 }
2442
2443 NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name);
2444 NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags);
2445 NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ);
2446 NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask);
2447
2448 if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
2449 goto nla_put_failure;
2450
2451 nla_nest_end(skb, nl_service);
2452
2453 return 0;
2454
2455nla_put_failure:
2456 nla_nest_cancel(skb, nl_service);
2457 return -EMSGSIZE;
2458}
2459
2460static int ip_vs_genl_dump_service(struct sk_buff *skb,
2461 struct ip_vs_service *svc,
2462 struct netlink_callback *cb)
2463{
2464 void *hdr;
2465
2466 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
2467 &ip_vs_genl_family, NLM_F_MULTI,
2468 IPVS_CMD_NEW_SERVICE);
2469 if (!hdr)
2470 return -EMSGSIZE;
2471
2472 if (ip_vs_genl_fill_service(skb, svc) < 0)
2473 goto nla_put_failure;
2474
2475 return genlmsg_end(skb, hdr);
2476
2477nla_put_failure:
2478 genlmsg_cancel(skb, hdr);
2479 return -EMSGSIZE;
2480}
2481
2482static int ip_vs_genl_dump_services(struct sk_buff *skb,
2483 struct netlink_callback *cb)
2484{
2485 int idx = 0, i;
2486 int start = cb->args[0];
2487 struct ip_vs_service *svc;
2488
2489 mutex_lock(&__ip_vs_mutex);
2490 for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
2491 list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
2492 if (++idx <= start)
2493 continue;
2494 if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
2495 idx--;
2496 goto nla_put_failure;
2497 }
2498 }
2499 }
2500
2501 for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
2502 list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
2503 if (++idx <= start)
2504 continue;
2505 if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
2506 idx--;
2507 goto nla_put_failure;
2508 }
2509 }
2510 }
2511
2512nla_put_failure:
2513 mutex_unlock(&__ip_vs_mutex);
2514 cb->args[0] = idx;
2515
2516 return skb->len;
2517}
2518
2519static int ip_vs_genl_parse_service(struct ip_vs_service_user *usvc,
2520 struct nlattr *nla, int full_entry)
2521{
2522 struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1];
2523 struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr;
2524
2525 /* Parse mandatory identifying service fields first */
2526 if (nla == NULL ||
2527 nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy))
2528 return -EINVAL;
2529
2530 nla_af = attrs[IPVS_SVC_ATTR_AF];
2531 nla_protocol = attrs[IPVS_SVC_ATTR_PROTOCOL];
2532 nla_addr = attrs[IPVS_SVC_ATTR_ADDR];
2533 nla_port = attrs[IPVS_SVC_ATTR_PORT];
2534 nla_fwmark = attrs[IPVS_SVC_ATTR_FWMARK];
2535
2536 if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
2537 return -EINVAL;
2538
2539 /* For now, only support IPv4 */
2540 if (nla_get_u16(nla_af) != AF_INET)
2541 return -EAFNOSUPPORT;
2542
2543 if (nla_fwmark) {
2544 usvc->protocol = IPPROTO_TCP;
2545 usvc->fwmark = nla_get_u32(nla_fwmark);
2546 } else {
2547 usvc->protocol = nla_get_u16(nla_protocol);
2548 nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr));
2549 usvc->port = nla_get_u16(nla_port);
2550 usvc->fwmark = 0;
2551 }
2552
2553 /* If a full entry was requested, check for the additional fields */
2554 if (full_entry) {
2555 struct nlattr *nla_sched, *nla_flags, *nla_timeout,
2556 *nla_netmask;
2557 struct ip_vs_flags flags;
2558 struct ip_vs_service *svc;
2559
2560 nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME];
2561 nla_flags = attrs[IPVS_SVC_ATTR_FLAGS];
2562 nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT];
2563 nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK];
2564
2565 if (!(nla_sched && nla_flags && nla_timeout && nla_netmask))
2566 return -EINVAL;
2567
2568 nla_memcpy(&flags, nla_flags, sizeof(flags));
2569
2570 /* prefill flags from service if it already exists */
2571 if (usvc->fwmark)
2572 svc = __ip_vs_svc_fwm_get(usvc->fwmark);
2573 else
2574 svc = __ip_vs_service_get(usvc->protocol, usvc->addr,
2575 usvc->port);
2576 if (svc) {
2577 usvc->flags = svc->flags;
2578 ip_vs_service_put(svc);
2579 } else
2580 usvc->flags = 0;
2581
2582 /* set new flags from userland */
2583 usvc->flags = (usvc->flags & ~flags.mask) |
2584 (flags.flags & flags.mask);
2585
2586 strlcpy(usvc->sched_name, nla_data(nla_sched),
2587 sizeof(usvc->sched_name));
2588 usvc->timeout = nla_get_u32(nla_timeout);
2589 usvc->netmask = nla_get_u32(nla_netmask);
2590 }
2591
2592 return 0;
2593}
2594
2595static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla)
2596{
2597 struct ip_vs_service_user usvc;
2598 int ret;
2599
2600 ret = ip_vs_genl_parse_service(&usvc, nla, 0);
2601 if (ret)
2602 return ERR_PTR(ret);
2603
2604 if (usvc.fwmark)
2605 return __ip_vs_svc_fwm_get(usvc.fwmark);
2606 else
2607 return __ip_vs_service_get(usvc.protocol, usvc.addr,
2608 usvc.port);
2609}
2610
2611static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
2612{
2613 struct nlattr *nl_dest;
2614
2615 nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST);
2616 if (!nl_dest)
2617 return -EMSGSIZE;
2618
2619 NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr);
2620 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
2621
2622 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
2623 atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
2624 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
2625 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
2626 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
2627 NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
2628 atomic_read(&dest->activeconns));
2629 NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS,
2630 atomic_read(&dest->inactconns));
2631 NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
2632 atomic_read(&dest->persistconns));
2633
2634 if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
2635 goto nla_put_failure;
2636
2637 nla_nest_end(skb, nl_dest);
2638
2639 return 0;
2640
2641nla_put_failure:
2642 nla_nest_cancel(skb, nl_dest);
2643 return -EMSGSIZE;
2644}
2645
2646static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
2647 struct netlink_callback *cb)
2648{
2649 void *hdr;
2650
2651 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
2652 &ip_vs_genl_family, NLM_F_MULTI,
2653 IPVS_CMD_NEW_DEST);
2654 if (!hdr)
2655 return -EMSGSIZE;
2656
2657 if (ip_vs_genl_fill_dest(skb, dest) < 0)
2658 goto nla_put_failure;
2659
2660 return genlmsg_end(skb, hdr);
2661
2662nla_put_failure:
2663 genlmsg_cancel(skb, hdr);
2664 return -EMSGSIZE;
2665}
2666
2667static int ip_vs_genl_dump_dests(struct sk_buff *skb,
2668 struct netlink_callback *cb)
2669{
2670 int idx = 0;
2671 int start = cb->args[0];
2672 struct ip_vs_service *svc;
2673 struct ip_vs_dest *dest;
2674 struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
2675
2676 mutex_lock(&__ip_vs_mutex);
2677
2678 /* Try to find the service for which to dump destinations */
2679 if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs,
2680 IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
2681 goto out_err;
2682
2683 svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]);
2684 if (IS_ERR(svc) || svc == NULL)
2685 goto out_err;
2686
2687 /* Dump the destinations */
2688 list_for_each_entry(dest, &svc->destinations, n_list) {
2689 if (++idx <= start)
2690 continue;
2691 if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) {
2692 idx--;
2693 goto nla_put_failure;
2694 }
2695 }
2696
2697nla_put_failure:
2698 cb->args[0] = idx;
2699 ip_vs_service_put(svc);
2700
2701out_err:
2702 mutex_unlock(&__ip_vs_mutex);
2703
2704 return skb->len;
2705}
2706
2707static int ip_vs_genl_parse_dest(struct ip_vs_dest_user *udest,
2708 struct nlattr *nla, int full_entry)
2709{
2710 struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1];
2711 struct nlattr *nla_addr, *nla_port;
2712
2713 /* Parse mandatory identifying destination fields first */
2714 if (nla == NULL ||
2715 nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy))
2716 return -EINVAL;
2717
2718 nla_addr = attrs[IPVS_DEST_ATTR_ADDR];
2719 nla_port = attrs[IPVS_DEST_ATTR_PORT];
2720
2721 if (!(nla_addr && nla_port))
2722 return -EINVAL;
2723
2724 nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
2725 udest->port = nla_get_u16(nla_port);
2726
2727 /* If a full entry was requested, check for the additional fields */
2728 if (full_entry) {
2729 struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
2730 *nla_l_thresh;
2731
2732 nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD];
2733 nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT];
2734 nla_u_thresh = attrs[IPVS_DEST_ATTR_U_THRESH];
2735 nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH];
2736
2737 if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh))
2738 return -EINVAL;
2739
2740 udest->conn_flags = nla_get_u32(nla_fwd)
2741 & IP_VS_CONN_F_FWD_MASK;
2742 udest->weight = nla_get_u32(nla_weight);
2743 udest->u_threshold = nla_get_u32(nla_u_thresh);
2744 udest->l_threshold = nla_get_u32(nla_l_thresh);
2745 }
2746
2747 return 0;
2748}
2749
2750static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state,
2751 const char *mcast_ifn, __be32 syncid)
2752{
2753 struct nlattr *nl_daemon;
2754
2755 nl_daemon = nla_nest_start(skb, IPVS_CMD_ATTR_DAEMON);
2756 if (!nl_daemon)
2757 return -EMSGSIZE;
2758
2759 NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state);
2760 NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn);
2761 NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid);
2762
2763 nla_nest_end(skb, nl_daemon);
2764
2765 return 0;
2766
2767nla_put_failure:
2768 nla_nest_cancel(skb, nl_daemon);
2769 return -EMSGSIZE;
2770}
2771
2772static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state,
2773 const char *mcast_ifn, __be32 syncid,
2774 struct netlink_callback *cb)
2775{
2776 void *hdr;
2777 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
2778 &ip_vs_genl_family, NLM_F_MULTI,
2779 IPVS_CMD_NEW_DAEMON);
2780 if (!hdr)
2781 return -EMSGSIZE;
2782
2783 if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
2784 goto nla_put_failure;
2785
2786 return genlmsg_end(skb, hdr);
2787
2788nla_put_failure:
2789 genlmsg_cancel(skb, hdr);
2790 return -EMSGSIZE;
2791}
2792
2793static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
2794 struct netlink_callback *cb)
2795{
2796 mutex_lock(&__ip_vs_mutex);
2797 if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
2798 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
2799 ip_vs_master_mcast_ifn,
2800 ip_vs_master_syncid, cb) < 0)
2801 goto nla_put_failure;
2802
2803 cb->args[0] = 1;
2804 }
2805
2806 if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
2807 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
2808 ip_vs_backup_mcast_ifn,
2809 ip_vs_backup_syncid, cb) < 0)
2810 goto nla_put_failure;
2811
2812 cb->args[1] = 1;
2813 }
2814
2815nla_put_failure:
2816 mutex_unlock(&__ip_vs_mutex);
2817
2818 return skb->len;
2819}
2820
2821static int ip_vs_genl_new_daemon(struct nlattr **attrs)
2822{
2823 if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
2824 attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
2825 attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
2826 return -EINVAL;
2827
2828 return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
2829 nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
2830 nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
2831}
2832
2833static int ip_vs_genl_del_daemon(struct nlattr **attrs)
2834{
2835 if (!attrs[IPVS_DAEMON_ATTR_STATE])
2836 return -EINVAL;
2837
2838 return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
2839}
2840
2841static int ip_vs_genl_set_config(struct nlattr **attrs)
2842{
2843 struct ip_vs_timeout_user t;
2844
2845 __ip_vs_get_timeouts(&t);
2846
2847 if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
2848 t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
2849
2850 if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN])
2851 t.tcp_fin_timeout =
2852 nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]);
2853
2854 if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
2855 t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
2856
2857 return ip_vs_set_timeout(&t);
2858}
2859
2860static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
2861{
2862 struct ip_vs_service *svc = NULL;
2863 struct ip_vs_service_user usvc;
2864 struct ip_vs_dest_user udest;
2865 int ret = 0, cmd;
2866 int need_full_svc = 0, need_full_dest = 0;
2867
2868 cmd = info->genlhdr->cmd;
2869
2870 mutex_lock(&__ip_vs_mutex);
2871
2872 if (cmd == IPVS_CMD_FLUSH) {
2873 ret = ip_vs_flush();
2874 goto out;
2875 } else if (cmd == IPVS_CMD_SET_CONFIG) {
2876 ret = ip_vs_genl_set_config(info->attrs);
2877 goto out;
2878 } else if (cmd == IPVS_CMD_NEW_DAEMON ||
2879 cmd == IPVS_CMD_DEL_DAEMON) {
2880
2881 struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
2882
2883 if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
2884 nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
2885 info->attrs[IPVS_CMD_ATTR_DAEMON],
2886 ip_vs_daemon_policy)) {
2887 ret = -EINVAL;
2888 goto out;
2889 }
2890
2891 if (cmd == IPVS_CMD_NEW_DAEMON)
2892 ret = ip_vs_genl_new_daemon(daemon_attrs);
2893 else
2894 ret = ip_vs_genl_del_daemon(daemon_attrs);
2895 goto out;
2896 } else if (cmd == IPVS_CMD_ZERO &&
2897 !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
2898 ret = ip_vs_zero_all();
2899 goto out;
2900 }
2901
2902 /* All following commands require a service argument, so check if we
2903 * received a valid one. We need a full service specification when
2904 * adding / editing a service. Only identifying members otherwise. */
2905 if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
2906 need_full_svc = 1;
2907
2908 ret = ip_vs_genl_parse_service(&usvc,
2909 info->attrs[IPVS_CMD_ATTR_SERVICE],
2910 need_full_svc);
2911 if (ret)
2912 goto out;
2913
2914 /* Lookup the exact service by <protocol, addr, port> or fwmark */
2915 if (usvc.fwmark == 0)
2916 svc = __ip_vs_service_get(usvc.protocol, usvc.addr, usvc.port);
2917 else
2918 svc = __ip_vs_svc_fwm_get(usvc.fwmark);
2919
2920 /* Unless we're adding a new service, the service must already exist */
2921 if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) {
2922 ret = -ESRCH;
2923 goto out;
2924 }
2925
2926 /* Destination commands require a valid destination argument. For
2927 * adding / editing a destination, we need a full destination
2928 * specification. */
2929 if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST ||
2930 cmd == IPVS_CMD_DEL_DEST) {
2931 if (cmd != IPVS_CMD_DEL_DEST)
2932 need_full_dest = 1;
2933
2934 ret = ip_vs_genl_parse_dest(&udest,
2935 info->attrs[IPVS_CMD_ATTR_DEST],
2936 need_full_dest);
2937 if (ret)
2938 goto out;
2939 }
2940
2941 switch (cmd) {
2942 case IPVS_CMD_NEW_SERVICE:
2943 if (svc == NULL)
2944 ret = ip_vs_add_service(&usvc, &svc);
2945 else
2946 ret = -EEXIST;
2947 break;
2948 case IPVS_CMD_SET_SERVICE:
2949 ret = ip_vs_edit_service(svc, &usvc);
2950 break;
2951 case IPVS_CMD_DEL_SERVICE:
2952 ret = ip_vs_del_service(svc);
2953 break;
2954 case IPVS_CMD_NEW_DEST:
2955 ret = ip_vs_add_dest(svc, &udest);
2956 break;
2957 case IPVS_CMD_SET_DEST:
2958 ret = ip_vs_edit_dest(svc, &udest);
2959 break;
2960 case IPVS_CMD_DEL_DEST:
2961 ret = ip_vs_del_dest(svc, &udest);
2962 break;
2963 case IPVS_CMD_ZERO:
2964 ret = ip_vs_zero_service(svc);
2965 break;
2966 default:
2967 ret = -EINVAL;
2968 }
2969
2970out:
2971 if (svc)
2972 ip_vs_service_put(svc);
2973 mutex_unlock(&__ip_vs_mutex);
2974
2975 return ret;
2976}
2977
2978static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
2979{
2980 struct sk_buff *msg;
2981 void *reply;
2982 int ret, cmd, reply_cmd;
2983
2984 cmd = info->genlhdr->cmd;
2985
2986 if (cmd == IPVS_CMD_GET_SERVICE)
2987 reply_cmd = IPVS_CMD_NEW_SERVICE;
2988 else if (cmd == IPVS_CMD_GET_INFO)
2989 reply_cmd = IPVS_CMD_SET_INFO;
2990 else if (cmd == IPVS_CMD_GET_CONFIG)
2991 reply_cmd = IPVS_CMD_SET_CONFIG;
2992 else {
2993 IP_VS_ERR("unknown Generic Netlink command\n");
2994 return -EINVAL;
2995 }
2996
2997 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2998 if (!msg)
2999 return -ENOMEM;
3000
3001 mutex_lock(&__ip_vs_mutex);
3002
3003 reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd);
3004 if (reply == NULL)
3005 goto nla_put_failure;
3006
3007 switch (cmd) {
3008 case IPVS_CMD_GET_SERVICE:
3009 {
3010 struct ip_vs_service *svc;
3011
3012 svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]);
3013 if (IS_ERR(svc)) {
3014 ret = PTR_ERR(svc);
3015 goto out_err;
3016 } else if (svc) {
3017 ret = ip_vs_genl_fill_service(msg, svc);
3018 ip_vs_service_put(svc);
3019 if (ret)
3020 goto nla_put_failure;
3021 } else {
3022 ret = -ESRCH;
3023 goto out_err;
3024 }
3025
3026 break;
3027 }
3028
3029 case IPVS_CMD_GET_CONFIG:
3030 {
3031 struct ip_vs_timeout_user t;
3032
3033 __ip_vs_get_timeouts(&t);
3034#ifdef CONFIG_IP_VS_PROTO_TCP
3035 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
3036 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
3037 t.tcp_fin_timeout);
3038#endif
3039#ifdef CONFIG_IP_VS_PROTO_UDP
3040 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout);
3041#endif
3042
3043 break;
3044 }
3045
3046 case IPVS_CMD_GET_INFO:
3047 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
3048 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
3049 IP_VS_CONN_TAB_SIZE);
3050 break;
3051 }
3052
3053 genlmsg_end(msg, reply);
3054 ret = genlmsg_unicast(msg, info->snd_pid);
3055 goto out;
3056
3057nla_put_failure:
3058 IP_VS_ERR("not enough space in Netlink message\n");
3059 ret = -EMSGSIZE;
3060
3061out_err:
3062 nlmsg_free(msg);
3063out:
3064 mutex_unlock(&__ip_vs_mutex);
3065
3066 return ret;
3067}
3068
3069
3070static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
3071 {
3072 .cmd = IPVS_CMD_NEW_SERVICE,
3073 .flags = GENL_ADMIN_PERM,
3074 .policy = ip_vs_cmd_policy,
3075 .doit = ip_vs_genl_set_cmd,
3076 },
3077 {
3078 .cmd = IPVS_CMD_SET_SERVICE,
3079 .flags = GENL_ADMIN_PERM,
3080 .policy = ip_vs_cmd_policy,
3081 .doit = ip_vs_genl_set_cmd,
3082 },
3083 {
3084 .cmd = IPVS_CMD_DEL_SERVICE,
3085 .flags = GENL_ADMIN_PERM,
3086 .policy = ip_vs_cmd_policy,
3087 .doit = ip_vs_genl_set_cmd,
3088 },
3089 {
3090 .cmd = IPVS_CMD_GET_SERVICE,
3091 .flags = GENL_ADMIN_PERM,
3092 .doit = ip_vs_genl_get_cmd,
3093 .dumpit = ip_vs_genl_dump_services,
3094 .policy = ip_vs_cmd_policy,
3095 },
3096 {
3097 .cmd = IPVS_CMD_NEW_DEST,
3098 .flags = GENL_ADMIN_PERM,
3099 .policy = ip_vs_cmd_policy,
3100 .doit = ip_vs_genl_set_cmd,
3101 },
3102 {
3103 .cmd = IPVS_CMD_SET_DEST,
3104 .flags = GENL_ADMIN_PERM,
3105 .policy = ip_vs_cmd_policy,
3106 .doit = ip_vs_genl_set_cmd,
3107 },
3108 {
3109 .cmd = IPVS_CMD_DEL_DEST,
3110 .flags = GENL_ADMIN_PERM,
3111 .policy = ip_vs_cmd_policy,
3112 .doit = ip_vs_genl_set_cmd,
3113 },
3114 {
3115 .cmd = IPVS_CMD_GET_DEST,
3116 .flags = GENL_ADMIN_PERM,
3117 .policy = ip_vs_cmd_policy,
3118 .dumpit = ip_vs_genl_dump_dests,
3119 },
3120 {
3121 .cmd = IPVS_CMD_NEW_DAEMON,
3122 .flags = GENL_ADMIN_PERM,
3123 .policy = ip_vs_cmd_policy,
3124 .doit = ip_vs_genl_set_cmd,
3125 },
3126 {
3127 .cmd = IPVS_CMD_DEL_DAEMON,
3128 .flags = GENL_ADMIN_PERM,
3129 .policy = ip_vs_cmd_policy,
3130 .doit = ip_vs_genl_set_cmd,
3131 },
3132 {
3133 .cmd = IPVS_CMD_GET_DAEMON,
3134 .flags = GENL_ADMIN_PERM,
3135 .dumpit = ip_vs_genl_dump_daemons,
3136 },
3137 {
3138 .cmd = IPVS_CMD_SET_CONFIG,
3139 .flags = GENL_ADMIN_PERM,
3140 .policy = ip_vs_cmd_policy,
3141 .doit = ip_vs_genl_set_cmd,
3142 },
3143 {
3144 .cmd = IPVS_CMD_GET_CONFIG,
3145 .flags = GENL_ADMIN_PERM,
3146 .doit = ip_vs_genl_get_cmd,
3147 },
3148 {
3149 .cmd = IPVS_CMD_GET_INFO,
3150 .flags = GENL_ADMIN_PERM,
3151 .doit = ip_vs_genl_get_cmd,
3152 },
3153 {
3154 .cmd = IPVS_CMD_ZERO,
3155 .flags = GENL_ADMIN_PERM,
3156 .policy = ip_vs_cmd_policy,
3157 .doit = ip_vs_genl_set_cmd,
3158 },
3159 {
3160 .cmd = IPVS_CMD_FLUSH,
3161 .flags = GENL_ADMIN_PERM,
3162 .doit = ip_vs_genl_set_cmd,
3163 },
3164};
3165
3166static int __init ip_vs_genl_register(void)
3167{
3168 int ret, i;
3169
3170 ret = genl_register_family(&ip_vs_genl_family);
3171 if (ret)
3172 return ret;
3173
3174 for (i = 0; i < ARRAY_SIZE(ip_vs_genl_ops); i++) {
3175 ret = genl_register_ops(&ip_vs_genl_family, &ip_vs_genl_ops[i]);
3176 if (ret)
3177 goto err_out;
3178 }
3179 return 0;
3180
3181err_out:
3182 genl_unregister_family(&ip_vs_genl_family);
3183 return ret;
3184}
3185
3186static void ip_vs_genl_unregister(void)
3187{
3188 genl_unregister_family(&ip_vs_genl_family);
3189}
3190
3191/* End of Generic Netlink interface definitions */
3192
2323 3193
2324int __init ip_vs_control_init(void) 3194int __init ip_vs_control_init(void)
2325{ 3195{
@@ -2334,6 +3204,13 @@ int __init ip_vs_control_init(void)
2334 return ret; 3204 return ret;
2335 } 3205 }
2336 3206
3207 ret = ip_vs_genl_register();
3208 if (ret) {
3209 IP_VS_ERR("cannot register Generic Netlink interface.\n");
3210 nf_unregister_sockopt(&ip_vs_sockopts);
3211 return ret;
3212 }
3213
2337 proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops); 3214 proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
2338 proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops); 3215 proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
2339 3216
@@ -2368,6 +3245,7 @@ void ip_vs_control_cleanup(void)
2368 unregister_sysctl_table(sysctl_header); 3245 unregister_sysctl_table(sysctl_header);
2369 proc_net_remove(&init_net, "ip_vs_stats"); 3246 proc_net_remove(&init_net, "ip_vs_stats");
2370 proc_net_remove(&init_net, "ip_vs"); 3247 proc_net_remove(&init_net, "ip_vs");
3248 ip_vs_genl_unregister();
2371 nf_unregister_sockopt(&ip_vs_sockopts); 3249 nf_unregister_sockopt(&ip_vs_sockopts);
2372 LeaveFunction(2); 3250 LeaveFunction(2);
2373} 3251}
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index 5a20f93bd7f9..4fb620ec2086 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -124,8 +124,6 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats)
124 est->outbps = stats->outbps<<5; 124 est->outbps = stats->outbps<<5;
125 125
126 spin_lock_bh(&est_lock); 126 spin_lock_bh(&est_lock);
127 if (list_empty(&est_list))
128 mod_timer(&est_timer, jiffies + 2 * HZ);
129 list_add(&est->list, &est_list); 127 list_add(&est->list, &est_list);
130 spin_unlock_bh(&est_lock); 128 spin_unlock_bh(&est_lock);
131} 129}
@@ -136,11 +134,6 @@ void ip_vs_kill_estimator(struct ip_vs_stats *stats)
136 134
137 spin_lock_bh(&est_lock); 135 spin_lock_bh(&est_lock);
138 list_del(&est->list); 136 list_del(&est->list);
139 while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
140 spin_unlock_bh(&est_lock);
141 cpu_relax();
142 spin_lock_bh(&est_lock);
143 }
144 spin_unlock_bh(&est_lock); 137 spin_unlock_bh(&est_lock);
145} 138}
146 139
@@ -160,3 +153,14 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
160 est->inbps = 0; 153 est->inbps = 0;
161 est->outbps = 0; 154 est->outbps = 0;
162} 155}
156
157int __init ip_vs_estimator_init(void)
158{
159 mod_timer(&est_timer, jiffies + 2 * HZ);
160 return 0;
161}
162
163void ip_vs_estimator_cleanup(void)
164{
165 del_timer_sync(&est_timer);
166}
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 7a6a319f544a..d2a43aa3fe4c 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -96,7 +96,6 @@ struct ip_vs_lblc_entry {
96 * IPVS lblc hash table 96 * IPVS lblc hash table
97 */ 97 */
98struct ip_vs_lblc_table { 98struct ip_vs_lblc_table {
99 rwlock_t lock; /* lock for this table */
100 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ 99 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
101 atomic_t entries; /* number of entries */ 100 atomic_t entries; /* number of entries */
102 int max_size; /* maximum size of entries */ 101 int max_size; /* maximum size of entries */
@@ -123,31 +122,6 @@ static ctl_table vs_vars_table[] = {
123 122
124static struct ctl_table_header * sysctl_header; 123static struct ctl_table_header * sysctl_header;
125 124
126/*
127 * new/free a ip_vs_lblc_entry, which is a mapping of a destionation
128 * IP address to a server.
129 */
130static inline struct ip_vs_lblc_entry *
131ip_vs_lblc_new(__be32 daddr, struct ip_vs_dest *dest)
132{
133 struct ip_vs_lblc_entry *en;
134
135 en = kmalloc(sizeof(struct ip_vs_lblc_entry), GFP_ATOMIC);
136 if (en == NULL) {
137 IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
138 return NULL;
139 }
140
141 INIT_LIST_HEAD(&en->list);
142 en->addr = daddr;
143
144 atomic_inc(&dest->refcnt);
145 en->dest = dest;
146
147 return en;
148}
149
150
151static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) 125static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
152{ 126{
153 list_del(&en->list); 127 list_del(&en->list);
@@ -173,55 +147,66 @@ static inline unsigned ip_vs_lblc_hashkey(__be32 addr)
173 * Hash an entry in the ip_vs_lblc_table. 147 * Hash an entry in the ip_vs_lblc_table.
174 * returns bool success. 148 * returns bool success.
175 */ 149 */
176static int 150static void
177ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) 151ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
178{ 152{
179 unsigned hash; 153 unsigned hash = ip_vs_lblc_hashkey(en->addr);
180
181 if (!list_empty(&en->list)) {
182 IP_VS_ERR("ip_vs_lblc_hash(): request for already hashed, "
183 "called from %p\n", __builtin_return_address(0));
184 return 0;
185 }
186 154
187 /*
188 * Hash by destination IP address
189 */
190 hash = ip_vs_lblc_hashkey(en->addr);
191
192 write_lock(&tbl->lock);
193 list_add(&en->list, &tbl->bucket[hash]); 155 list_add(&en->list, &tbl->bucket[hash]);
194 atomic_inc(&tbl->entries); 156 atomic_inc(&tbl->entries);
195 write_unlock(&tbl->lock);
196
197 return 1;
198} 157}
199 158
200 159
201/* 160/*
202 * Get ip_vs_lblc_entry associated with supplied parameters. 161 * Get ip_vs_lblc_entry associated with supplied parameters. Called under read
162 * lock
203 */ 163 */
204static inline struct ip_vs_lblc_entry * 164static inline struct ip_vs_lblc_entry *
205ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) 165ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
206{ 166{
207 unsigned hash; 167 unsigned hash = ip_vs_lblc_hashkey(addr);
208 struct ip_vs_lblc_entry *en; 168 struct ip_vs_lblc_entry *en;
209 169
210 hash = ip_vs_lblc_hashkey(addr); 170 list_for_each_entry(en, &tbl->bucket[hash], list)
171 if (en->addr == addr)
172 return en;
211 173
212 read_lock(&tbl->lock); 174 return NULL;
175}
213 176
214 list_for_each_entry(en, &tbl->bucket[hash], list) { 177
215 if (en->addr == addr) { 178/*
216 /* HIT */ 179 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
217 read_unlock(&tbl->lock); 180 * address to a server. Called under write lock.
218 return en; 181 */
182static inline struct ip_vs_lblc_entry *
183ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, __be32 daddr,
184 struct ip_vs_dest *dest)
185{
186 struct ip_vs_lblc_entry *en;
187
188 en = ip_vs_lblc_get(tbl, daddr);
189 if (!en) {
190 en = kmalloc(sizeof(*en), GFP_ATOMIC);
191 if (!en) {
192 IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
193 return NULL;
219 } 194 }
220 }
221 195
222 read_unlock(&tbl->lock); 196 en->addr = daddr;
197 en->lastuse = jiffies;
223 198
224 return NULL; 199 atomic_inc(&dest->refcnt);
200 en->dest = dest;
201
202 ip_vs_lblc_hash(tbl, en);
203 } else if (en->dest != dest) {
204 atomic_dec(&en->dest->refcnt);
205 atomic_inc(&dest->refcnt);
206 en->dest = dest;
207 }
208
209 return en;
225} 210}
226 211
227 212
@@ -230,30 +215,29 @@ ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
230 */ 215 */
231static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) 216static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
232{ 217{
233 int i;
234 struct ip_vs_lblc_entry *en, *nxt; 218 struct ip_vs_lblc_entry *en, *nxt;
219 int i;
235 220
236 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 221 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
237 write_lock(&tbl->lock);
238 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { 222 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
239 ip_vs_lblc_free(en); 223 ip_vs_lblc_free(en);
240 atomic_dec(&tbl->entries); 224 atomic_dec(&tbl->entries);
241 } 225 }
242 write_unlock(&tbl->lock);
243 } 226 }
244} 227}
245 228
246 229
247static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) 230static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
248{ 231{
232 struct ip_vs_lblc_table *tbl = svc->sched_data;
233 struct ip_vs_lblc_entry *en, *nxt;
249 unsigned long now = jiffies; 234 unsigned long now = jiffies;
250 int i, j; 235 int i, j;
251 struct ip_vs_lblc_entry *en, *nxt;
252 236
253 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { 237 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
254 j = (j + 1) & IP_VS_LBLC_TAB_MASK; 238 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
255 239
256 write_lock(&tbl->lock); 240 write_lock(&svc->sched_lock);
257 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 241 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
258 if (time_before(now, 242 if (time_before(now,
259 en->lastuse + sysctl_ip_vs_lblc_expiration)) 243 en->lastuse + sysctl_ip_vs_lblc_expiration))
@@ -262,7 +246,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
262 ip_vs_lblc_free(en); 246 ip_vs_lblc_free(en);
263 atomic_dec(&tbl->entries); 247 atomic_dec(&tbl->entries);
264 } 248 }
265 write_unlock(&tbl->lock); 249 write_unlock(&svc->sched_lock);
266 } 250 }
267 tbl->rover = j; 251 tbl->rover = j;
268} 252}
@@ -281,17 +265,16 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
281 */ 265 */
282static void ip_vs_lblc_check_expire(unsigned long data) 266static void ip_vs_lblc_check_expire(unsigned long data)
283{ 267{
284 struct ip_vs_lblc_table *tbl; 268 struct ip_vs_service *svc = (struct ip_vs_service *) data;
269 struct ip_vs_lblc_table *tbl = svc->sched_data;
285 unsigned long now = jiffies; 270 unsigned long now = jiffies;
286 int goal; 271 int goal;
287 int i, j; 272 int i, j;
288 struct ip_vs_lblc_entry *en, *nxt; 273 struct ip_vs_lblc_entry *en, *nxt;
289 274
290 tbl = (struct ip_vs_lblc_table *)data;
291
292 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { 275 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
293 /* do full expiration check */ 276 /* do full expiration check */
294 ip_vs_lblc_full_check(tbl); 277 ip_vs_lblc_full_check(svc);
295 tbl->counter = 1; 278 tbl->counter = 1;
296 goto out; 279 goto out;
297 } 280 }
@@ -308,7 +291,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
308 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { 291 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
309 j = (j + 1) & IP_VS_LBLC_TAB_MASK; 292 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
310 293
311 write_lock(&tbl->lock); 294 write_lock(&svc->sched_lock);
312 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 295 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
313 if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) 296 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
314 continue; 297 continue;
@@ -317,7 +300,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
317 atomic_dec(&tbl->entries); 300 atomic_dec(&tbl->entries);
318 goal--; 301 goal--;
319 } 302 }
320 write_unlock(&tbl->lock); 303 write_unlock(&svc->sched_lock);
321 if (goal <= 0) 304 if (goal <= 0)
322 break; 305 break;
323 } 306 }
@@ -336,15 +319,14 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
336 /* 319 /*
337 * Allocate the ip_vs_lblc_table for this service 320 * Allocate the ip_vs_lblc_table for this service
338 */ 321 */
339 tbl = kmalloc(sizeof(struct ip_vs_lblc_table), GFP_ATOMIC); 322 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
340 if (tbl == NULL) { 323 if (tbl == NULL) {
341 IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n"); 324 IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
342 return -ENOMEM; 325 return -ENOMEM;
343 } 326 }
344 svc->sched_data = tbl; 327 svc->sched_data = tbl;
345 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " 328 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
346 "current service\n", 329 "current service\n", sizeof(*tbl));
347 sizeof(struct ip_vs_lblc_table));
348 330
349 /* 331 /*
350 * Initialize the hash buckets 332 * Initialize the hash buckets
@@ -352,7 +334,6 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
352 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 334 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
353 INIT_LIST_HEAD(&tbl->bucket[i]); 335 INIT_LIST_HEAD(&tbl->bucket[i]);
354 } 336 }
355 rwlock_init(&tbl->lock);
356 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; 337 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
357 tbl->rover = 0; 338 tbl->rover = 0;
358 tbl->counter = 1; 339 tbl->counter = 1;
@@ -361,9 +342,8 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
361 * Hook periodic timer for garbage collection 342 * Hook periodic timer for garbage collection
362 */ 343 */
363 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire, 344 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
364 (unsigned long)tbl); 345 (unsigned long)svc);
365 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; 346 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
366 add_timer(&tbl->periodic_timer);
367 347
368 return 0; 348 return 0;
369} 349}
@@ -380,22 +360,16 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
380 ip_vs_lblc_flush(tbl); 360 ip_vs_lblc_flush(tbl);
381 361
382 /* release the table itself */ 362 /* release the table itself */
383 kfree(svc->sched_data); 363 kfree(tbl);
384 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", 364 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
385 sizeof(struct ip_vs_lblc_table)); 365 sizeof(*tbl));
386 366
387 return 0; 367 return 0;
388} 368}
389 369
390 370
391static int ip_vs_lblc_update_svc(struct ip_vs_service *svc)
392{
393 return 0;
394}
395
396
397static inline struct ip_vs_dest * 371static inline struct ip_vs_dest *
398__ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) 372__ip_vs_lblc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
399{ 373{
400 struct ip_vs_dest *dest, *least; 374 struct ip_vs_dest *dest, *least;
401 int loh, doh; 375 int loh, doh;
@@ -484,46 +458,54 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
484static struct ip_vs_dest * 458static struct ip_vs_dest *
485ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 459ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
486{ 460{
487 struct ip_vs_dest *dest; 461 struct ip_vs_lblc_table *tbl = svc->sched_data;
488 struct ip_vs_lblc_table *tbl;
489 struct ip_vs_lblc_entry *en;
490 struct iphdr *iph = ip_hdr(skb); 462 struct iphdr *iph = ip_hdr(skb);
463 struct ip_vs_dest *dest = NULL;
464 struct ip_vs_lblc_entry *en;
491 465
492 IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); 466 IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
493 467
494 tbl = (struct ip_vs_lblc_table *)svc->sched_data; 468 /* First look in our cache */
469 read_lock(&svc->sched_lock);
495 en = ip_vs_lblc_get(tbl, iph->daddr); 470 en = ip_vs_lblc_get(tbl, iph->daddr);
496 if (en == NULL) { 471 if (en) {
497 dest = __ip_vs_wlc_schedule(svc, iph); 472 /* We only hold a read lock, but this is atomic */
498 if (dest == NULL) { 473 en->lastuse = jiffies;
499 IP_VS_DBG(1, "no destination available\n"); 474
500 return NULL; 475 /*
501 } 476 * If the destination is not available, i.e. it's in the trash,
502 en = ip_vs_lblc_new(iph->daddr, dest); 477 * we must ignore it, as it may be removed from under our feet,
503 if (en == NULL) { 478 * if someone drops our reference count. Our caller only makes
504 return NULL; 479 * sure that destinations, that are not in the trash, are not
505 } 480 * moved to the trash, while we are scheduling. But anyone can
506 ip_vs_lblc_hash(tbl, en); 481 * free up entries from the trash at any time.
507 } else { 482 */
508 dest = en->dest; 483
509 if (!(dest->flags & IP_VS_DEST_F_AVAILABLE) 484 if (en->dest->flags & IP_VS_DEST_F_AVAILABLE)
510 || atomic_read(&dest->weight) <= 0 485 dest = en->dest;
511 || is_overloaded(dest, svc)) { 486 }
512 dest = __ip_vs_wlc_schedule(svc, iph); 487 read_unlock(&svc->sched_lock);
513 if (dest == NULL) { 488
514 IP_VS_DBG(1, "no destination available\n"); 489 /* If the destination has a weight and is not overloaded, use it */
515 return NULL; 490 if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
516 } 491 goto out;
517 atomic_dec(&en->dest->refcnt); 492
518 atomic_inc(&dest->refcnt); 493 /* No cache entry or it is invalid, time to schedule */
519 en->dest = dest; 494 dest = __ip_vs_lblc_schedule(svc, iph);
520 } 495 if (!dest) {
496 IP_VS_DBG(1, "no destination available\n");
497 return NULL;
521 } 498 }
522 en->lastuse = jiffies;
523 499
500 /* If we fail to create a cache entry, we'll just use the valid dest */
501 write_lock(&svc->sched_lock);
502 ip_vs_lblc_new(tbl, iph->daddr, dest);
503 write_unlock(&svc->sched_lock);
504
505out:
524 IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u " 506 IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u "
525 "--> server %u.%u.%u.%u:%d\n", 507 "--> server %u.%u.%u.%u:%d\n",
526 NIPQUAD(en->addr), 508 NIPQUAD(iph->daddr),
527 NIPQUAD(dest->addr), 509 NIPQUAD(dest->addr),
528 ntohs(dest->port)); 510 ntohs(dest->port));
529 511
@@ -542,7 +524,6 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
542 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list), 524 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
543 .init_service = ip_vs_lblc_init_svc, 525 .init_service = ip_vs_lblc_init_svc,
544 .done_service = ip_vs_lblc_done_svc, 526 .done_service = ip_vs_lblc_done_svc,
545 .update_service = ip_vs_lblc_update_svc,
546 .schedule = ip_vs_lblc_schedule, 527 .schedule = ip_vs_lblc_schedule,
547}; 528};
548 529
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index c234e73968a6..375a1ffb6b65 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -106,7 +106,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
106 return NULL; 106 return NULL;
107 } 107 }
108 108
109 e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC); 109 e = kmalloc(sizeof(*e), GFP_ATOMIC);
110 if (e == NULL) { 110 if (e == NULL) {
111 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n"); 111 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
112 return NULL; 112 return NULL;
@@ -116,11 +116,9 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
116 e->dest = dest; 116 e->dest = dest;
117 117
118 /* link it to the list */ 118 /* link it to the list */
119 write_lock(&set->lock);
120 e->next = set->list; 119 e->next = set->list;
121 set->list = e; 120 set->list = e;
122 atomic_inc(&set->size); 121 atomic_inc(&set->size);
123 write_unlock(&set->lock);
124 122
125 set->lastmod = jiffies; 123 set->lastmod = jiffies;
126 return e; 124 return e;
@@ -131,7 +129,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
131{ 129{
132 struct ip_vs_dest_list *e, **ep; 130 struct ip_vs_dest_list *e, **ep;
133 131
134 write_lock(&set->lock);
135 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { 132 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
136 if (e->dest == dest) { 133 if (e->dest == dest) {
137 /* HIT */ 134 /* HIT */
@@ -144,7 +141,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
144 } 141 }
145 ep = &e->next; 142 ep = &e->next;
146 } 143 }
147 write_unlock(&set->lock);
148} 144}
149 145
150static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) 146static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
@@ -174,7 +170,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
174 if (set == NULL) 170 if (set == NULL)
175 return NULL; 171 return NULL;
176 172
177 read_lock(&set->lock);
178 /* select the first destination server, whose weight > 0 */ 173 /* select the first destination server, whose weight > 0 */
179 for (e=set->list; e!=NULL; e=e->next) { 174 for (e=set->list; e!=NULL; e=e->next) {
180 least = e->dest; 175 least = e->dest;
@@ -188,7 +183,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
188 goto nextstage; 183 goto nextstage;
189 } 184 }
190 } 185 }
191 read_unlock(&set->lock);
192 return NULL; 186 return NULL;
193 187
194 /* find the destination with the weighted least load */ 188 /* find the destination with the weighted least load */
@@ -207,7 +201,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
207 loh = doh; 201 loh = doh;
208 } 202 }
209 } 203 }
210 read_unlock(&set->lock);
211 204
212 IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d " 205 IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
213 "activeconns %d refcnt %d weight %d overhead %d\n", 206 "activeconns %d refcnt %d weight %d overhead %d\n",
@@ -229,7 +222,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
229 if (set == NULL) 222 if (set == NULL)
230 return NULL; 223 return NULL;
231 224
232 read_lock(&set->lock);
233 /* select the first destination server, whose weight > 0 */ 225 /* select the first destination server, whose weight > 0 */
234 for (e=set->list; e!=NULL; e=e->next) { 226 for (e=set->list; e!=NULL; e=e->next) {
235 most = e->dest; 227 most = e->dest;
@@ -239,7 +231,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
239 goto nextstage; 231 goto nextstage;
240 } 232 }
241 } 233 }
242 read_unlock(&set->lock);
243 return NULL; 234 return NULL;
244 235
245 /* find the destination with the weighted most load */ 236 /* find the destination with the weighted most load */
@@ -256,7 +247,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
256 moh = doh; 247 moh = doh;
257 } 248 }
258 } 249 }
259 read_unlock(&set->lock);
260 250
261 IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d " 251 IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
262 "activeconns %d refcnt %d weight %d overhead %d\n", 252 "activeconns %d refcnt %d weight %d overhead %d\n",
@@ -284,7 +274,6 @@ struct ip_vs_lblcr_entry {
284 * IPVS lblcr hash table 274 * IPVS lblcr hash table
285 */ 275 */
286struct ip_vs_lblcr_table { 276struct ip_vs_lblcr_table {
287 rwlock_t lock; /* lock for this table */
288 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ 277 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
289 atomic_t entries; /* number of entries */ 278 atomic_t entries; /* number of entries */
290 int max_size; /* maximum size of entries */ 279 int max_size; /* maximum size of entries */
@@ -311,32 +300,6 @@ static ctl_table vs_vars_table[] = {
311 300
312static struct ctl_table_header * sysctl_header; 301static struct ctl_table_header * sysctl_header;
313 302
314/*
315 * new/free a ip_vs_lblcr_entry, which is a mapping of a destination
316 * IP address to a server.
317 */
318static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr)
319{
320 struct ip_vs_lblcr_entry *en;
321
322 en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC);
323 if (en == NULL) {
324 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
325 return NULL;
326 }
327
328 INIT_LIST_HEAD(&en->list);
329 en->addr = daddr;
330
331 /* initilize its dest set */
332 atomic_set(&(en->set.size), 0);
333 en->set.list = NULL;
334 rwlock_init(&en->set.lock);
335
336 return en;
337}
338
339
340static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) 303static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
341{ 304{
342 list_del(&en->list); 305 list_del(&en->list);
@@ -358,55 +321,68 @@ static inline unsigned ip_vs_lblcr_hashkey(__be32 addr)
358 * Hash an entry in the ip_vs_lblcr_table. 321 * Hash an entry in the ip_vs_lblcr_table.
359 * returns bool success. 322 * returns bool success.
360 */ 323 */
361static int 324static void
362ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) 325ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
363{ 326{
364 unsigned hash; 327 unsigned hash = ip_vs_lblcr_hashkey(en->addr);
365
366 if (!list_empty(&en->list)) {
367 IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, "
368 "called from %p\n", __builtin_return_address(0));
369 return 0;
370 }
371 328
372 /*
373 * Hash by destination IP address
374 */
375 hash = ip_vs_lblcr_hashkey(en->addr);
376
377 write_lock(&tbl->lock);
378 list_add(&en->list, &tbl->bucket[hash]); 329 list_add(&en->list, &tbl->bucket[hash]);
379 atomic_inc(&tbl->entries); 330 atomic_inc(&tbl->entries);
380 write_unlock(&tbl->lock);
381
382 return 1;
383} 331}
384 332
385 333
386/* 334/*
387 * Get ip_vs_lblcr_entry associated with supplied parameters. 335 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
336 * read lock.
388 */ 337 */
389static inline struct ip_vs_lblcr_entry * 338static inline struct ip_vs_lblcr_entry *
390ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr) 339ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr)
391{ 340{
392 unsigned hash; 341 unsigned hash = ip_vs_lblcr_hashkey(addr);
393 struct ip_vs_lblcr_entry *en; 342 struct ip_vs_lblcr_entry *en;
394 343
395 hash = ip_vs_lblcr_hashkey(addr); 344 list_for_each_entry(en, &tbl->bucket[hash], list)
345 if (en->addr == addr)
346 return en;
396 347
397 read_lock(&tbl->lock); 348 return NULL;
349}
398 350
399 list_for_each_entry(en, &tbl->bucket[hash], list) { 351
400 if (en->addr == addr) { 352/*
401 /* HIT */ 353 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
402 read_unlock(&tbl->lock); 354 * IP address to a server. Called under write lock.
403 return en; 355 */
356static inline struct ip_vs_lblcr_entry *
357ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, __be32 daddr,
358 struct ip_vs_dest *dest)
359{
360 struct ip_vs_lblcr_entry *en;
361
362 en = ip_vs_lblcr_get(tbl, daddr);
363 if (!en) {
364 en = kmalloc(sizeof(*en), GFP_ATOMIC);
365 if (!en) {
366 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
367 return NULL;
404 } 368 }
369
370 en->addr = daddr;
371 en->lastuse = jiffies;
372
373 /* initilize its dest set */
374 atomic_set(&(en->set.size), 0);
375 en->set.list = NULL;
376 rwlock_init(&en->set.lock);
377
378 ip_vs_lblcr_hash(tbl, en);
405 } 379 }
406 380
407 read_unlock(&tbl->lock); 381 write_lock(&en->set.lock);
382 ip_vs_dest_set_insert(&en->set, dest);
383 write_unlock(&en->set.lock);
408 384
409 return NULL; 385 return en;
410} 386}
411 387
412 388
@@ -418,19 +394,18 @@ static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
418 int i; 394 int i;
419 struct ip_vs_lblcr_entry *en, *nxt; 395 struct ip_vs_lblcr_entry *en, *nxt;
420 396
397 /* No locking required, only called during cleanup. */
421 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 398 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
422 write_lock(&tbl->lock);
423 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { 399 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
424 ip_vs_lblcr_free(en); 400 ip_vs_lblcr_free(en);
425 atomic_dec(&tbl->entries);
426 } 401 }
427 write_unlock(&tbl->lock);
428 } 402 }
429} 403}
430 404
431 405
432static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) 406static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
433{ 407{
408 struct ip_vs_lblcr_table *tbl = svc->sched_data;
434 unsigned long now = jiffies; 409 unsigned long now = jiffies;
435 int i, j; 410 int i, j;
436 struct ip_vs_lblcr_entry *en, *nxt; 411 struct ip_vs_lblcr_entry *en, *nxt;
@@ -438,7 +413,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
438 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 413 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
439 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 414 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
440 415
441 write_lock(&tbl->lock); 416 write_lock(&svc->sched_lock);
442 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 417 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
443 if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration, 418 if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
444 now)) 419 now))
@@ -447,7 +422,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
447 ip_vs_lblcr_free(en); 422 ip_vs_lblcr_free(en);
448 atomic_dec(&tbl->entries); 423 atomic_dec(&tbl->entries);
449 } 424 }
450 write_unlock(&tbl->lock); 425 write_unlock(&svc->sched_lock);
451 } 426 }
452 tbl->rover = j; 427 tbl->rover = j;
453} 428}
@@ -466,17 +441,16 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
466 */ 441 */
467static void ip_vs_lblcr_check_expire(unsigned long data) 442static void ip_vs_lblcr_check_expire(unsigned long data)
468{ 443{
469 struct ip_vs_lblcr_table *tbl; 444 struct ip_vs_service *svc = (struct ip_vs_service *) data;
445 struct ip_vs_lblcr_table *tbl = svc->sched_data;
470 unsigned long now = jiffies; 446 unsigned long now = jiffies;
471 int goal; 447 int goal;
472 int i, j; 448 int i, j;
473 struct ip_vs_lblcr_entry *en, *nxt; 449 struct ip_vs_lblcr_entry *en, *nxt;
474 450
475 tbl = (struct ip_vs_lblcr_table *)data;
476
477 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { 451 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
478 /* do full expiration check */ 452 /* do full expiration check */
479 ip_vs_lblcr_full_check(tbl); 453 ip_vs_lblcr_full_check(svc);
480 tbl->counter = 1; 454 tbl->counter = 1;
481 goto out; 455 goto out;
482 } 456 }
@@ -493,7 +467,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
493 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 467 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
494 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 468 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
495 469
496 write_lock(&tbl->lock); 470 write_lock(&svc->sched_lock);
497 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 471 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
498 if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) 472 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
499 continue; 473 continue;
@@ -502,7 +476,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
502 atomic_dec(&tbl->entries); 476 atomic_dec(&tbl->entries);
503 goal--; 477 goal--;
504 } 478 }
505 write_unlock(&tbl->lock); 479 write_unlock(&svc->sched_lock);
506 if (goal <= 0) 480 if (goal <= 0)
507 break; 481 break;
508 } 482 }
@@ -520,15 +494,14 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
520 /* 494 /*
521 * Allocate the ip_vs_lblcr_table for this service 495 * Allocate the ip_vs_lblcr_table for this service
522 */ 496 */
523 tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC); 497 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
524 if (tbl == NULL) { 498 if (tbl == NULL) {
525 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n"); 499 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
526 return -ENOMEM; 500 return -ENOMEM;
527 } 501 }
528 svc->sched_data = tbl; 502 svc->sched_data = tbl;
529 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " 503 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
530 "current service\n", 504 "current service\n", sizeof(*tbl));
531 sizeof(struct ip_vs_lblcr_table));
532 505
533 /* 506 /*
534 * Initialize the hash buckets 507 * Initialize the hash buckets
@@ -536,7 +509,6 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
536 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 509 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
537 INIT_LIST_HEAD(&tbl->bucket[i]); 510 INIT_LIST_HEAD(&tbl->bucket[i]);
538 } 511 }
539 rwlock_init(&tbl->lock);
540 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; 512 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
541 tbl->rover = 0; 513 tbl->rover = 0;
542 tbl->counter = 1; 514 tbl->counter = 1;
@@ -545,9 +517,8 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
545 * Hook periodic timer for garbage collection 517 * Hook periodic timer for garbage collection
546 */ 518 */
547 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire, 519 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
548 (unsigned long)tbl); 520 (unsigned long)svc);
549 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; 521 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
550 add_timer(&tbl->periodic_timer);
551 522
552 return 0; 523 return 0;
553} 524}
@@ -564,22 +535,16 @@ static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
564 ip_vs_lblcr_flush(tbl); 535 ip_vs_lblcr_flush(tbl);
565 536
566 /* release the table itself */ 537 /* release the table itself */
567 kfree(svc->sched_data); 538 kfree(tbl);
568 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", 539 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
569 sizeof(struct ip_vs_lblcr_table)); 540 sizeof(*tbl));
570 541
571 return 0; 542 return 0;
572} 543}
573 544
574 545
575static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc)
576{
577 return 0;
578}
579
580
581static inline struct ip_vs_dest * 546static inline struct ip_vs_dest *
582__ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) 547__ip_vs_lblcr_schedule(struct ip_vs_service *svc, struct iphdr *iph)
583{ 548{
584 struct ip_vs_dest *dest, *least; 549 struct ip_vs_dest *dest, *least;
585 int loh, doh; 550 int loh, doh;
@@ -669,50 +634,78 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
669static struct ip_vs_dest * 634static struct ip_vs_dest *
670ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 635ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
671{ 636{
672 struct ip_vs_dest *dest; 637 struct ip_vs_lblcr_table *tbl = svc->sched_data;
673 struct ip_vs_lblcr_table *tbl;
674 struct ip_vs_lblcr_entry *en;
675 struct iphdr *iph = ip_hdr(skb); 638 struct iphdr *iph = ip_hdr(skb);
639 struct ip_vs_dest *dest = NULL;
640 struct ip_vs_lblcr_entry *en;
676 641
677 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n"); 642 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
678 643
679 tbl = (struct ip_vs_lblcr_table *)svc->sched_data; 644 /* First look in our cache */
645 read_lock(&svc->sched_lock);
680 en = ip_vs_lblcr_get(tbl, iph->daddr); 646 en = ip_vs_lblcr_get(tbl, iph->daddr);
681 if (en == NULL) { 647 if (en) {
682 dest = __ip_vs_wlc_schedule(svc, iph); 648 /* We only hold a read lock, but this is atomic */
683 if (dest == NULL) { 649 en->lastuse = jiffies;
684 IP_VS_DBG(1, "no destination available\n"); 650
685 return NULL; 651 /* Get the least loaded destination */
686 } 652 read_lock(&en->set.lock);
687 en = ip_vs_lblcr_new(iph->daddr);
688 if (en == NULL) {
689 return NULL;
690 }
691 ip_vs_dest_set_insert(&en->set, dest);
692 ip_vs_lblcr_hash(tbl, en);
693 } else {
694 dest = ip_vs_dest_set_min(&en->set); 653 dest = ip_vs_dest_set_min(&en->set);
695 if (!dest || is_overloaded(dest, svc)) { 654 read_unlock(&en->set.lock);
696 dest = __ip_vs_wlc_schedule(svc, iph); 655
697 if (dest == NULL) { 656 /* More than one destination + enough time passed by, cleanup */
698 IP_VS_DBG(1, "no destination available\n");
699 return NULL;
700 }
701 ip_vs_dest_set_insert(&en->set, dest);
702 }
703 if (atomic_read(&en->set.size) > 1 && 657 if (atomic_read(&en->set.size) > 1 &&
704 jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) { 658 time_after(jiffies, en->set.lastmod +
659 sysctl_ip_vs_lblcr_expiration)) {
705 struct ip_vs_dest *m; 660 struct ip_vs_dest *m;
661
662 write_lock(&en->set.lock);
706 m = ip_vs_dest_set_max(&en->set); 663 m = ip_vs_dest_set_max(&en->set);
707 if (m) 664 if (m)
708 ip_vs_dest_set_erase(&en->set, m); 665 ip_vs_dest_set_erase(&en->set, m);
666 write_unlock(&en->set.lock);
667 }
668
669 /* If the destination is not overloaded, use it */
670 if (dest && !is_overloaded(dest, svc)) {
671 read_unlock(&svc->sched_lock);
672 goto out;
709 } 673 }
674
675 /* The cache entry is invalid, time to schedule */
676 dest = __ip_vs_lblcr_schedule(svc, iph);
677 if (!dest) {
678 IP_VS_DBG(1, "no destination available\n");
679 read_unlock(&svc->sched_lock);
680 return NULL;
681 }
682
683 /* Update our cache entry */
684 write_lock(&en->set.lock);
685 ip_vs_dest_set_insert(&en->set, dest);
686 write_unlock(&en->set.lock);
687 }
688 read_unlock(&svc->sched_lock);
689
690 if (dest)
691 goto out;
692
693 /* No cache entry, time to schedule */
694 dest = __ip_vs_lblcr_schedule(svc, iph);
695 if (!dest) {
696 IP_VS_DBG(1, "no destination available\n");
697 return NULL;
710 } 698 }
711 en->lastuse = jiffies;
712 699
700 /* If we fail to create a cache entry, we'll just use the valid dest */
701 write_lock(&svc->sched_lock);
702 ip_vs_lblcr_new(tbl, iph->daddr, dest);
703 write_unlock(&svc->sched_lock);
704
705out:
713 IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u " 706 IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
714 "--> server %u.%u.%u.%u:%d\n", 707 "--> server %u.%u.%u.%u:%d\n",
715 NIPQUAD(en->addr), 708 NIPQUAD(iph->daddr),
716 NIPQUAD(dest->addr), 709 NIPQUAD(dest->addr),
717 ntohs(dest->port)); 710 ntohs(dest->port));
718 711
@@ -731,7 +724,6 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
731 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), 724 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
732 .init_service = ip_vs_lblcr_init_svc, 725 .init_service = ip_vs_lblcr_init_svc,
733 .done_service = ip_vs_lblcr_done_svc, 726 .done_service = ip_vs_lblcr_done_svc,
734 .update_service = ip_vs_lblcr_update_svc,
735 .schedule = ip_vs_lblcr_schedule, 727 .schedule = ip_vs_lblcr_schedule,
736}; 728};
737 729
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index ebcdbf75ac65..2c3de1b63518 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -20,24 +20,6 @@
20#include <net/ip_vs.h> 20#include <net/ip_vs.h>
21 21
22 22
23static int ip_vs_lc_init_svc(struct ip_vs_service *svc)
24{
25 return 0;
26}
27
28
29static int ip_vs_lc_done_svc(struct ip_vs_service *svc)
30{
31 return 0;
32}
33
34
35static int ip_vs_lc_update_svc(struct ip_vs_service *svc)
36{
37 return 0;
38}
39
40
41static inline unsigned int 23static inline unsigned int
42ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) 24ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
43{ 25{
@@ -99,9 +81,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
99 .refcnt = ATOMIC_INIT(0), 81 .refcnt = ATOMIC_INIT(0),
100 .module = THIS_MODULE, 82 .module = THIS_MODULE,
101 .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), 83 .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
102 .init_service = ip_vs_lc_init_svc,
103 .done_service = ip_vs_lc_done_svc,
104 .update_service = ip_vs_lc_update_svc,
105 .schedule = ip_vs_lc_schedule, 84 .schedule = ip_vs_lc_schedule,
106}; 85};
107 86
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index 92f3a6770031..5330d5a2de14 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -37,27 +37,6 @@
37#include <net/ip_vs.h> 37#include <net/ip_vs.h>
38 38
39 39
40static int
41ip_vs_nq_init_svc(struct ip_vs_service *svc)
42{
43 return 0;
44}
45
46
47static int
48ip_vs_nq_done_svc(struct ip_vs_service *svc)
49{
50 return 0;
51}
52
53
54static int
55ip_vs_nq_update_svc(struct ip_vs_service *svc)
56{
57 return 0;
58}
59
60
61static inline unsigned int 40static inline unsigned int
62ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) 41ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
63{ 42{
@@ -137,9 +116,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
137 .refcnt = ATOMIC_INIT(0), 116 .refcnt = ATOMIC_INIT(0),
138 .module = THIS_MODULE, 117 .module = THIS_MODULE,
139 .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), 118 .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
140 .init_service = ip_vs_nq_init_svc,
141 .done_service = ip_vs_nq_done_svc,
142 .update_service = ip_vs_nq_update_svc,
143 .schedule = ip_vs_nq_schedule, 119 .schedule = ip_vs_nq_schedule,
144}; 120};
145 121
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c
index 73e0ea87c1f5..3f9ebd7639ae 100644
--- a/net/ipv4/ipvs/ip_vs_proto_ah.c
+++ b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS 2 * ip_vs_proto_ah_esp.c: AH/ESP IPSec load balancing support for IPVS
3 * 3 *
4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
5 * Wensong Zhang <wensong@linuxvirtualserver.org> 5 * Wensong Zhang <wensong@linuxvirtualserver.org>
@@ -39,11 +39,11 @@ struct isakmp_hdr {
39 39
40 40
41static struct ip_vs_conn * 41static struct ip_vs_conn *
42ah_conn_in_get(const struct sk_buff *skb, 42ah_esp_conn_in_get(const struct sk_buff *skb,
43 struct ip_vs_protocol *pp, 43 struct ip_vs_protocol *pp,
44 const struct iphdr *iph, 44 const struct iphdr *iph,
45 unsigned int proto_off, 45 unsigned int proto_off,
46 int inverse) 46 int inverse)
47{ 47{
48 struct ip_vs_conn *cp; 48 struct ip_vs_conn *cp;
49 49
@@ -79,8 +79,8 @@ ah_conn_in_get(const struct sk_buff *skb,
79 79
80 80
81static struct ip_vs_conn * 81static struct ip_vs_conn *
82ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 82ah_esp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
83 const struct iphdr *iph, unsigned int proto_off, int inverse) 83 const struct iphdr *iph, unsigned int proto_off, int inverse)
84{ 84{
85 struct ip_vs_conn *cp; 85 struct ip_vs_conn *cp;
86 86
@@ -112,12 +112,12 @@ ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
112 112
113 113
114static int 114static int
115ah_conn_schedule(struct sk_buff *skb, 115ah_esp_conn_schedule(struct sk_buff *skb,
116 struct ip_vs_protocol *pp, 116 struct ip_vs_protocol *pp,
117 int *verdict, struct ip_vs_conn **cpp) 117 int *verdict, struct ip_vs_conn **cpp)
118{ 118{
119 /* 119 /*
120 * AH is only related traffic. Pass the packet to IP stack. 120 * AH/ESP is only related traffic. Pass the packet to IP stack.
121 */ 121 */
122 *verdict = NF_ACCEPT; 122 *verdict = NF_ACCEPT;
123 return 0; 123 return 0;
@@ -125,8 +125,8 @@ ah_conn_schedule(struct sk_buff *skb,
125 125
126 126
127static void 127static void
128ah_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, 128ah_esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
129 int offset, const char *msg) 129 int offset, const char *msg)
130{ 130{
131 char buf[256]; 131 char buf[256];
132 struct iphdr _iph, *ih; 132 struct iphdr _iph, *ih;
@@ -143,28 +143,29 @@ ah_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
143} 143}
144 144
145 145
146static void ah_init(struct ip_vs_protocol *pp) 146static void ah_esp_init(struct ip_vs_protocol *pp)
147{ 147{
148 /* nothing to do now */ 148 /* nothing to do now */
149} 149}
150 150
151 151
152static void ah_exit(struct ip_vs_protocol *pp) 152static void ah_esp_exit(struct ip_vs_protocol *pp)
153{ 153{
154 /* nothing to do now */ 154 /* nothing to do now */
155} 155}
156 156
157 157
158#ifdef CONFIG_IP_VS_PROTO_AH
158struct ip_vs_protocol ip_vs_protocol_ah = { 159struct ip_vs_protocol ip_vs_protocol_ah = {
159 .name = "AH", 160 .name = "AH",
160 .protocol = IPPROTO_AH, 161 .protocol = IPPROTO_AH,
161 .num_states = 1, 162 .num_states = 1,
162 .dont_defrag = 1, 163 .dont_defrag = 1,
163 .init = ah_init, 164 .init = ah_esp_init,
164 .exit = ah_exit, 165 .exit = ah_esp_exit,
165 .conn_schedule = ah_conn_schedule, 166 .conn_schedule = ah_esp_conn_schedule,
166 .conn_in_get = ah_conn_in_get, 167 .conn_in_get = ah_esp_conn_in_get,
167 .conn_out_get = ah_conn_out_get, 168 .conn_out_get = ah_esp_conn_out_get,
168 .snat_handler = NULL, 169 .snat_handler = NULL,
169 .dnat_handler = NULL, 170 .dnat_handler = NULL,
170 .csum_check = NULL, 171 .csum_check = NULL,
@@ -172,7 +173,31 @@ struct ip_vs_protocol ip_vs_protocol_ah = {
172 .register_app = NULL, 173 .register_app = NULL,
173 .unregister_app = NULL, 174 .unregister_app = NULL,
174 .app_conn_bind = NULL, 175 .app_conn_bind = NULL,
175 .debug_packet = ah_debug_packet, 176 .debug_packet = ah_esp_debug_packet,
176 .timeout_change = NULL, /* ISAKMP */ 177 .timeout_change = NULL, /* ISAKMP */
177 .set_state_timeout = NULL, 178 .set_state_timeout = NULL,
178}; 179};
180#endif
181
182#ifdef CONFIG_IP_VS_PROTO_ESP
183struct ip_vs_protocol ip_vs_protocol_esp = {
184 .name = "ESP",
185 .protocol = IPPROTO_ESP,
186 .num_states = 1,
187 .dont_defrag = 1,
188 .init = ah_esp_init,
189 .exit = ah_esp_exit,
190 .conn_schedule = ah_esp_conn_schedule,
191 .conn_in_get = ah_esp_conn_in_get,
192 .conn_out_get = ah_esp_conn_out_get,
193 .snat_handler = NULL,
194 .dnat_handler = NULL,
195 .csum_check = NULL,
196 .state_transition = NULL,
197 .register_app = NULL,
198 .unregister_app = NULL,
199 .app_conn_bind = NULL,
200 .debug_packet = ah_esp_debug_packet,
201 .timeout_change = NULL, /* ISAKMP */
202};
203#endif
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c
deleted file mode 100644
index 21d70c8ffa54..000000000000
--- a/net/ipv4/ipvs/ip_vs_proto_esp.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS
3 *
4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
5 * Wensong Zhang <wensong@linuxvirtualserver.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation;
10 *
11 */
12
13#include <linux/in.h>
14#include <linux/ip.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netfilter.h>
18#include <linux/netfilter_ipv4.h>
19
20#include <net/ip_vs.h>
21
22
23/* TODO:
24
25struct isakmp_hdr {
26 __u8 icookie[8];
27 __u8 rcookie[8];
28 __u8 np;
29 __u8 version;
30 __u8 xchgtype;
31 __u8 flags;
32 __u32 msgid;
33 __u32 length;
34};
35
36*/
37
38#define PORT_ISAKMP 500
39
40
41static struct ip_vs_conn *
42esp_conn_in_get(const struct sk_buff *skb,
43 struct ip_vs_protocol *pp,
44 const struct iphdr *iph,
45 unsigned int proto_off,
46 int inverse)
47{
48 struct ip_vs_conn *cp;
49
50 if (likely(!inverse)) {
51 cp = ip_vs_conn_in_get(IPPROTO_UDP,
52 iph->saddr,
53 htons(PORT_ISAKMP),
54 iph->daddr,
55 htons(PORT_ISAKMP));
56 } else {
57 cp = ip_vs_conn_in_get(IPPROTO_UDP,
58 iph->daddr,
59 htons(PORT_ISAKMP),
60 iph->saddr,
61 htons(PORT_ISAKMP));
62 }
63
64 if (!cp) {
65 /*
66 * We are not sure if the packet is from our
67 * service, so our conn_schedule hook should return NF_ACCEPT
68 */
69 IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet "
70 "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
71 inverse ? "ICMP+" : "",
72 pp->name,
73 NIPQUAD(iph->saddr),
74 NIPQUAD(iph->daddr));
75 }
76
77 return cp;
78}
79
80
81static struct ip_vs_conn *
82esp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
83 const struct iphdr *iph, unsigned int proto_off, int inverse)
84{
85 struct ip_vs_conn *cp;
86
87 if (likely(!inverse)) {
88 cp = ip_vs_conn_out_get(IPPROTO_UDP,
89 iph->saddr,
90 htons(PORT_ISAKMP),
91 iph->daddr,
92 htons(PORT_ISAKMP));
93 } else {
94 cp = ip_vs_conn_out_get(IPPROTO_UDP,
95 iph->daddr,
96 htons(PORT_ISAKMP),
97 iph->saddr,
98 htons(PORT_ISAKMP));
99 }
100
101 if (!cp) {
102 IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet "
103 "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
104 inverse ? "ICMP+" : "",
105 pp->name,
106 NIPQUAD(iph->saddr),
107 NIPQUAD(iph->daddr));
108 }
109
110 return cp;
111}
112
113
114static int
115esp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
116 int *verdict, struct ip_vs_conn **cpp)
117{
118 /*
119 * ESP is only related traffic. Pass the packet to IP stack.
120 */
121 *verdict = NF_ACCEPT;
122 return 0;
123}
124
125
126static void
127esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
128 int offset, const char *msg)
129{
130 char buf[256];
131 struct iphdr _iph, *ih;
132
133 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
134 if (ih == NULL)
135 sprintf(buf, "%s TRUNCATED", pp->name);
136 else
137 sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
138 pp->name, NIPQUAD(ih->saddr),
139 NIPQUAD(ih->daddr));
140
141 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
142}
143
144
145static void esp_init(struct ip_vs_protocol *pp)
146{
147 /* nothing to do now */
148}
149
150
151static void esp_exit(struct ip_vs_protocol *pp)
152{
153 /* nothing to do now */
154}
155
156
157struct ip_vs_protocol ip_vs_protocol_esp = {
158 .name = "ESP",
159 .protocol = IPPROTO_ESP,
160 .num_states = 1,
161 .dont_defrag = 1,
162 .init = esp_init,
163 .exit = esp_exit,
164 .conn_schedule = esp_conn_schedule,
165 .conn_in_get = esp_conn_in_get,
166 .conn_out_get = esp_conn_out_get,
167 .snat_handler = NULL,
168 .dnat_handler = NULL,
169 .csum_check = NULL,
170 .state_transition = NULL,
171 .register_app = NULL,
172 .unregister_app = NULL,
173 .app_conn_bind = NULL,
174 .debug_packet = esp_debug_packet,
175 .timeout_change = NULL, /* ISAKMP */
176};
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index 358110d17e59..f74929117534 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -32,12 +32,6 @@ static int ip_vs_rr_init_svc(struct ip_vs_service *svc)
32} 32}
33 33
34 34
35static int ip_vs_rr_done_svc(struct ip_vs_service *svc)
36{
37 return 0;
38}
39
40
41static int ip_vs_rr_update_svc(struct ip_vs_service *svc) 35static int ip_vs_rr_update_svc(struct ip_vs_service *svc)
42{ 36{
43 svc->sched_data = &svc->destinations; 37 svc->sched_data = &svc->destinations;
@@ -96,7 +90,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
96 .module = THIS_MODULE, 90 .module = THIS_MODULE,
97 .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), 91 .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
98 .init_service = ip_vs_rr_init_svc, 92 .init_service = ip_vs_rr_init_svc,
99 .done_service = ip_vs_rr_done_svc,
100 .update_service = ip_vs_rr_update_svc, 93 .update_service = ip_vs_rr_update_svc,
101 .schedule = ip_vs_rr_schedule, 94 .schedule = ip_vs_rr_schedule,
102}; 95};
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index 77663d84cbd1..53f73bea66ce 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -41,27 +41,6 @@
41#include <net/ip_vs.h> 41#include <net/ip_vs.h>
42 42
43 43
44static int
45ip_vs_sed_init_svc(struct ip_vs_service *svc)
46{
47 return 0;
48}
49
50
51static int
52ip_vs_sed_done_svc(struct ip_vs_service *svc)
53{
54 return 0;
55}
56
57
58static int
59ip_vs_sed_update_svc(struct ip_vs_service *svc)
60{
61 return 0;
62}
63
64
65static inline unsigned int 44static inline unsigned int
66ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) 45ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
67{ 46{
@@ -139,9 +118,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
139 .refcnt = ATOMIC_INIT(0), 118 .refcnt = ATOMIC_INIT(0),
140 .module = THIS_MODULE, 119 .module = THIS_MODULE,
141 .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), 120 .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
142 .init_service = ip_vs_sed_init_svc,
143 .done_service = ip_vs_sed_done_svc,
144 .update_service = ip_vs_sed_update_svc,
145 .schedule = ip_vs_sed_schedule, 121 .schedule = ip_vs_sed_schedule,
146}; 122};
147 123
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 9b0ef86bb1f7..df7ad8d74766 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -25,27 +25,6 @@
25#include <net/ip_vs.h> 25#include <net/ip_vs.h>
26 26
27 27
28static int
29ip_vs_wlc_init_svc(struct ip_vs_service *svc)
30{
31 return 0;
32}
33
34
35static int
36ip_vs_wlc_done_svc(struct ip_vs_service *svc)
37{
38 return 0;
39}
40
41
42static int
43ip_vs_wlc_update_svc(struct ip_vs_service *svc)
44{
45 return 0;
46}
47
48
49static inline unsigned int 28static inline unsigned int
50ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) 29ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
51{ 30{
@@ -127,9 +106,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
127 .refcnt = ATOMIC_INIT(0), 106 .refcnt = ATOMIC_INIT(0),
128 .module = THIS_MODULE, 107 .module = THIS_MODULE,
129 .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list), 108 .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
130 .init_service = ip_vs_wlc_init_svc,
131 .done_service = ip_vs_wlc_done_svc,
132 .update_service = ip_vs_wlc_update_svc,
133 .schedule = ip_vs_wlc_schedule, 109 .schedule = ip_vs_wlc_schedule,
134}; 110};
135 111
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6ee5354c9aa1..f62187bb6d08 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -282,6 +282,8 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
282 struct rtable *r = NULL; 282 struct rtable *r = NULL;
283 283
284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { 284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
285 if (!rt_hash_table[st->bucket].chain)
286 continue;
285 rcu_read_lock_bh(); 287 rcu_read_lock_bh();
286 r = rcu_dereference(rt_hash_table[st->bucket].chain); 288 r = rcu_dereference(rt_hash_table[st->bucket].chain);
287 while (r) { 289 while (r) {
@@ -299,11 +301,14 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
299 struct rtable *r) 301 struct rtable *r)
300{ 302{
301 struct rt_cache_iter_state *st = seq->private; 303 struct rt_cache_iter_state *st = seq->private;
304
302 r = r->u.dst.rt_next; 305 r = r->u.dst.rt_next;
303 while (!r) { 306 while (!r) {
304 rcu_read_unlock_bh(); 307 rcu_read_unlock_bh();
305 if (--st->bucket < 0) 308 do {
306 break; 309 if (--st->bucket < 0)
310 return NULL;
311 } while (!rt_hash_table[st->bucket].chain);
307 rcu_read_lock_bh(); 312 rcu_read_lock_bh();
308 r = rt_hash_table[st->bucket].chain; 313 r = rt_hash_table[st->bucket].chain;
309 } 314 }
@@ -2840,7 +2845,9 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2840 if (s_h < 0) 2845 if (s_h < 0)
2841 s_h = 0; 2846 s_h = 0;
2842 s_idx = idx = cb->args[1]; 2847 s_idx = idx = cb->args[1];
2843 for (h = s_h; h <= rt_hash_mask; h++) { 2848 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2849 if (!rt_hash_table[h].chain)
2850 continue;
2844 rcu_read_lock_bh(); 2851 rcu_read_lock_bh();
2845 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 2852 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2846 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 2853 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
@@ -2859,7 +2866,6 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2859 dst_release(xchg(&skb->dst, NULL)); 2866 dst_release(xchg(&skb->dst, NULL));
2860 } 2867 }
2861 rcu_read_unlock_bh(); 2868 rcu_read_unlock_bh();
2862 s_idx = 0;
2863 } 2869 }
2864 2870
2865done: 2871done:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 67ccce2a96bd..f79a51607292 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3442,6 +3442,22 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3442 } 3442 }
3443} 3443}
3444 3444
3445static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
3446{
3447 __be32 *ptr = (__be32 *)(th + 1);
3448
3449 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3450 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
3451 tp->rx_opt.saw_tstamp = 1;
3452 ++ptr;
3453 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3454 ++ptr;
3455 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3456 return 1;
3457 }
3458 return 0;
3459}
3460
3445/* Fast parse options. This hopes to only see timestamps. 3461/* Fast parse options. This hopes to only see timestamps.
3446 * If it is wrong it falls back on tcp_parse_options(). 3462 * If it is wrong it falls back on tcp_parse_options().
3447 */ 3463 */
@@ -3453,16 +3469,8 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3453 return 0; 3469 return 0;
3454 } else if (tp->rx_opt.tstamp_ok && 3470 } else if (tp->rx_opt.tstamp_ok &&
3455 th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { 3471 th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
3456 __be32 *ptr = (__be32 *)(th + 1); 3472 if (tcp_parse_aligned_timestamp(tp, th))
3457 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3458 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
3459 tp->rx_opt.saw_tstamp = 1;
3460 ++ptr;
3461 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3462 ++ptr;
3463 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3464 return 1; 3473 return 1;
3465 }
3466 } 3474 }
3467 tcp_parse_options(skb, &tp->rx_opt, 1); 3475 tcp_parse_options(skb, &tp->rx_opt, 1);
3468 return 1; 3476 return 1;
@@ -4161,6 +4169,18 @@ add_sack:
4161 } 4169 }
4162} 4170}
4163 4171
4172static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4173 struct sk_buff_head *list)
4174{
4175 struct sk_buff *next = skb->next;
4176
4177 __skb_unlink(skb, list);
4178 __kfree_skb(skb);
4179 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4180
4181 return next;
4182}
4183
4164/* Collapse contiguous sequence of skbs head..tail with 4184/* Collapse contiguous sequence of skbs head..tail with
4165 * sequence numbers start..end. 4185 * sequence numbers start..end.
4166 * Segments with FIN/SYN are not collapsed (only because this 4186 * Segments with FIN/SYN are not collapsed (only because this
@@ -4178,11 +4198,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4178 for (skb = head; skb != tail;) { 4198 for (skb = head; skb != tail;) {
4179 /* No new bits? It is possible on ofo queue. */ 4199 /* No new bits? It is possible on ofo queue. */
4180 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4200 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4181 struct sk_buff *next = skb->next; 4201 skb = tcp_collapse_one(sk, skb, list);
4182 __skb_unlink(skb, list);
4183 __kfree_skb(skb);
4184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4185 skb = next;
4186 continue; 4202 continue;
4187 } 4203 }
4188 4204
@@ -4246,11 +4262,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4246 start += size; 4262 start += size;
4247 } 4263 }
4248 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4264 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4249 struct sk_buff *next = skb->next; 4265 skb = tcp_collapse_one(sk, skb, list);
4250 __skb_unlink(skb, list);
4251 __kfree_skb(skb);
4252 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4253 skb = next;
4254 if (skb == tail || 4266 if (skb == tail ||
4255 tcp_hdr(skb)->syn || 4267 tcp_hdr(skb)->syn ||
4256 tcp_hdr(skb)->fin) 4268 tcp_hdr(skb)->fin)
@@ -4691,6 +4703,67 @@ out:
4691} 4703}
4692#endif /* CONFIG_NET_DMA */ 4704#endif /* CONFIG_NET_DMA */
4693 4705
4706/* Does PAWS and seqno based validation of an incoming segment, flags will
4707 * play significant role here.
4708 */
4709static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
4710 struct tcphdr *th, int syn_inerr)
4711{
4712 struct tcp_sock *tp = tcp_sk(sk);
4713
4714 /* RFC1323: H1. Apply PAWS check first. */
4715 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4716 tcp_paws_discard(sk, skb)) {
4717 if (!th->rst) {
4718 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4719 tcp_send_dupack(sk, skb);
4720 goto discard;
4721 }
4722 /* Reset is accepted even if it did not pass PAWS. */
4723 }
4724
4725 /* Step 1: check sequence number */
4726 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
4727 /* RFC793, page 37: "In all states except SYN-SENT, all reset
4728 * (RST) segments are validated by checking their SEQ-fields."
4729 * And page 69: "If an incoming segment is not acceptable,
4730 * an acknowledgment should be sent in reply (unless the RST
4731 * bit is set, if so drop the segment and return)".
4732 */
4733 if (!th->rst)
4734 tcp_send_dupack(sk, skb);
4735 goto discard;
4736 }
4737
4738 /* Step 2: check RST bit */
4739 if (th->rst) {
4740 tcp_reset(sk);
4741 goto discard;
4742 }
4743
4744 /* ts_recent update must be made after we are sure that the packet
4745 * is in window.
4746 */
4747 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4748
4749 /* step 3: check security and precedence [ignored] */
4750
4751 /* step 4: Check for a SYN in window. */
4752 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4753 if (syn_inerr)
4754 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4755 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4756 tcp_reset(sk);
4757 return -1;
4758 }
4759
4760 return 1;
4761
4762discard:
4763 __kfree_skb(skb);
4764 return 0;
4765}
4766
4694/* 4767/*
4695 * TCP receive function for the ESTABLISHED state. 4768 * TCP receive function for the ESTABLISHED state.
4696 * 4769 *
@@ -4718,6 +4791,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4718 struct tcphdr *th, unsigned len) 4791 struct tcphdr *th, unsigned len)
4719{ 4792{
4720 struct tcp_sock *tp = tcp_sk(sk); 4793 struct tcp_sock *tp = tcp_sk(sk);
4794 int res;
4721 4795
4722 /* 4796 /*
4723 * Header prediction. 4797 * Header prediction.
@@ -4756,19 +4830,10 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4756 4830
4757 /* Check timestamp */ 4831 /* Check timestamp */
4758 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 4832 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
4759 __be32 *ptr = (__be32 *)(th + 1);
4760
4761 /* No? Slow path! */ 4833 /* No? Slow path! */
4762 if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 4834 if (!tcp_parse_aligned_timestamp(tp, th))
4763 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
4764 goto slow_path; 4835 goto slow_path;
4765 4836
4766 tp->rx_opt.saw_tstamp = 1;
4767 ++ptr;
4768 tp->rx_opt.rcv_tsval = ntohl(*ptr);
4769 ++ptr;
4770 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
4771
4772 /* If PAWS failed, check it more carefully in slow path */ 4837 /* If PAWS failed, check it more carefully in slow path */
4773 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 4838 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
4774 goto slow_path; 4839 goto slow_path;
@@ -4899,51 +4964,12 @@ slow_path:
4899 goto csum_error; 4964 goto csum_error;
4900 4965
4901 /* 4966 /*
4902 * RFC1323: H1. Apply PAWS check first.
4903 */
4904 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4905 tcp_paws_discard(sk, skb)) {
4906 if (!th->rst) {
4907 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4908 tcp_send_dupack(sk, skb);
4909 goto discard;
4910 }
4911 /* Resets are accepted even if PAWS failed.
4912
4913 ts_recent update must be made after we are sure
4914 that the packet is in window.
4915 */
4916 }
4917
4918 /*
4919 * Standard slow path. 4967 * Standard slow path.
4920 */ 4968 */
4921 4969
4922 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 4970 res = tcp_validate_incoming(sk, skb, th, 1);
4923 /* RFC793, page 37: "In all states except SYN-SENT, all reset 4971 if (res <= 0)
4924 * (RST) segments are validated by checking their SEQ-fields." 4972 return -res;
4925 * And page 69: "If an incoming segment is not acceptable,
4926 * an acknowledgment should be sent in reply (unless the RST bit
4927 * is set, if so drop the segment and return)".
4928 */
4929 if (!th->rst)
4930 tcp_send_dupack(sk, skb);
4931 goto discard;
4932 }
4933
4934 if (th->rst) {
4935 tcp_reset(sk);
4936 goto discard;
4937 }
4938
4939 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4940
4941 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4942 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4943 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4944 tcp_reset(sk);
4945 return 1;
4946 }
4947 4973
4948step5: 4974step5:
4949 if (th->ack) 4975 if (th->ack)
@@ -5225,6 +5251,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5225 struct tcp_sock *tp = tcp_sk(sk); 5251 struct tcp_sock *tp = tcp_sk(sk);
5226 struct inet_connection_sock *icsk = inet_csk(sk); 5252 struct inet_connection_sock *icsk = inet_csk(sk);
5227 int queued = 0; 5253 int queued = 0;
5254 int res;
5228 5255
5229 tp->rx_opt.saw_tstamp = 0; 5256 tp->rx_opt.saw_tstamp = 0;
5230 5257
@@ -5277,42 +5304,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5277 return 0; 5304 return 0;
5278 } 5305 }
5279 5306
5280 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5307 res = tcp_validate_incoming(sk, skb, th, 0);
5281 tcp_paws_discard(sk, skb)) { 5308 if (res <= 0)
5282 if (!th->rst) { 5309 return -res;
5283 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5284 tcp_send_dupack(sk, skb);
5285 goto discard;
5286 }
5287 /* Reset is accepted even if it did not pass PAWS. */
5288 }
5289
5290 /* step 1: check sequence number */
5291 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
5292 if (!th->rst)
5293 tcp_send_dupack(sk, skb);
5294 goto discard;
5295 }
5296
5297 /* step 2: check RST bit */
5298 if (th->rst) {
5299 tcp_reset(sk);
5300 goto discard;
5301 }
5302
5303 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
5304
5305 /* step 3: check security and precedence [ignored] */
5306
5307 /* step 4:
5308 *
5309 * Check for a SYN in window.
5310 */
5311 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5312 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
5313 tcp_reset(sk);
5314 return 1;
5315 }
5316 5310
5317 /* step 5: check the ACK field */ 5311 /* step 5: check the ACK field */
5318 if (th->ack) { 5312 if (th->ack) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1b4fee20fc93..3dfbc21e555a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1946,6 +1946,12 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1946 return rc; 1946 return rc;
1947} 1947}
1948 1948
1949static inline int empty_bucket(struct tcp_iter_state *st)
1950{
1951 return hlist_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
1952 hlist_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
1953}
1954
1949static void *established_get_first(struct seq_file *seq) 1955static void *established_get_first(struct seq_file *seq)
1950{ 1956{
1951 struct tcp_iter_state* st = seq->private; 1957 struct tcp_iter_state* st = seq->private;
@@ -1958,6 +1964,10 @@ static void *established_get_first(struct seq_file *seq)
1958 struct inet_timewait_sock *tw; 1964 struct inet_timewait_sock *tw;
1959 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); 1965 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1960 1966
1967 /* Lockless fast path for the common case of empty buckets */
1968 if (empty_bucket(st))
1969 continue;
1970
1961 read_lock_bh(lock); 1971 read_lock_bh(lock);
1962 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 1972 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1963 if (sk->sk_family != st->family || 1973 if (sk->sk_family != st->family ||
@@ -2008,13 +2018,15 @@ get_tw:
2008 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2018 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2009 st->state = TCP_SEQ_STATE_ESTABLISHED; 2019 st->state = TCP_SEQ_STATE_ESTABLISHED;
2010 2020
2011 if (++st->bucket < tcp_hashinfo.ehash_size) { 2021 /* Look for next non empty bucket */
2012 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2022 while (++st->bucket < tcp_hashinfo.ehash_size &&
2013 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); 2023 empty_bucket(st))
2014 } else { 2024 ;
2015 cur = NULL; 2025 if (st->bucket >= tcp_hashinfo.ehash_size)
2016 goto out; 2026 return NULL;
2017 } 2027
2028 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2029 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2018 } else 2030 } else
2019 sk = sk_next(sk); 2031 sk = sk_next(sk);
2020 2032
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 297c257864c7..928813ce08e2 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -66,13 +66,16 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
66static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) 66static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex)
67{ 67{
68 struct net_device *dev; 68 struct net_device *dev;
69 struct ieee80211_sub_if_data *sdata;
69 70
70 /* we're under RTNL */ 71 /* we're under RTNL */
71 dev = __dev_get_by_index(&init_net, ifindex); 72 dev = __dev_get_by_index(&init_net, ifindex);
72 if (!dev) 73 if (!dev)
73 return -ENODEV; 74 return -ENODEV;
74 75
75 ieee80211_if_remove(dev); 76 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
77
78 ieee80211_if_remove(sdata);
76 79
77 return 0; 80 return 0;
78} 81}
@@ -671,6 +674,11 @@ static void sta_apply_parameters(struct ieee80211_local *local,
671 sta->supp_rates[local->oper_channel->band] = rates; 674 sta->supp_rates[local->oper_channel->band] = rates;
672 } 675 }
673 676
677 if (params->ht_capa) {
678 ieee80211_ht_cap_ie_to_ht_info(params->ht_capa,
679 &sta->ht_info);
680 }
681
674 if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { 682 if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) {
675 switch (params->plink_action) { 683 switch (params->plink_action) {
676 case PLINK_ACTION_OPEN: 684 case PLINK_ACTION_OPEN:
@@ -842,13 +850,13 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
842 return -ENOENT; 850 return -ENOENT;
843 } 851 }
844 852
845 err = mesh_path_add(dst, dev); 853 err = mesh_path_add(dst, sdata);
846 if (err) { 854 if (err) {
847 rcu_read_unlock(); 855 rcu_read_unlock();
848 return err; 856 return err;
849 } 857 }
850 858
851 mpath = mesh_path_lookup(dst, dev); 859 mpath = mesh_path_lookup(dst, sdata);
852 if (!mpath) { 860 if (!mpath) {
853 rcu_read_unlock(); 861 rcu_read_unlock();
854 return -ENXIO; 862 return -ENXIO;
@@ -862,10 +870,12 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
862static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, 870static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
863 u8 *dst) 871 u8 *dst)
864{ 872{
873 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
874
865 if (dst) 875 if (dst)
866 return mesh_path_del(dst, dev); 876 return mesh_path_del(dst, sdata);
867 877
868 mesh_path_flush(dev); 878 mesh_path_flush(sdata);
869 return 0; 879 return 0;
870} 880}
871 881
@@ -897,7 +907,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
897 return -ENOENT; 907 return -ENOENT;
898 } 908 }
899 909
900 mpath = mesh_path_lookup(dst, dev); 910 mpath = mesh_path_lookup(dst, sdata);
901 if (!mpath) { 911 if (!mpath) {
902 rcu_read_unlock(); 912 rcu_read_unlock();
903 return -ENOENT; 913 return -ENOENT;
@@ -965,7 +975,7 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
965 return -ENOTSUPP; 975 return -ENOTSUPP;
966 976
967 rcu_read_lock(); 977 rcu_read_lock();
968 mpath = mesh_path_lookup(dst, dev); 978 mpath = mesh_path_lookup(dst, sdata);
969 if (!mpath) { 979 if (!mpath) {
970 rcu_read_unlock(); 980 rcu_read_unlock();
971 return -ENOENT; 981 return -ENOENT;
@@ -993,7 +1003,7 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
993 return -ENOTSUPP; 1003 return -ENOTSUPP;
994 1004
995 rcu_read_lock(); 1005 rcu_read_lock();
996 mpath = mesh_path_lookup_by_idx(idx, dev); 1006 mpath = mesh_path_lookup_by_idx(idx, sdata);
997 if (!mpath) { 1007 if (!mpath) {
998 rcu_read_unlock(); 1008 rcu_read_unlock();
999 return -ENOENT; 1009 return -ENOENT;
@@ -1005,6 +1015,42 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
1005} 1015}
1006#endif 1016#endif
1007 1017
1018static int ieee80211_change_bss(struct wiphy *wiphy,
1019 struct net_device *dev,
1020 struct bss_parameters *params)
1021{
1022 struct ieee80211_local *local = wiphy_priv(wiphy);
1023 struct ieee80211_sub_if_data *sdata;
1024 u32 changed = 0;
1025
1026 if (dev == local->mdev)
1027 return -EOPNOTSUPP;
1028
1029 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1030
1031 if (sdata->vif.type != IEEE80211_IF_TYPE_AP)
1032 return -EINVAL;
1033
1034 if (params->use_cts_prot >= 0) {
1035 sdata->bss_conf.use_cts_prot = params->use_cts_prot;
1036 changed |= BSS_CHANGED_ERP_CTS_PROT;
1037 }
1038 if (params->use_short_preamble >= 0) {
1039 sdata->bss_conf.use_short_preamble =
1040 params->use_short_preamble;
1041 changed |= BSS_CHANGED_ERP_PREAMBLE;
1042 }
1043 if (params->use_short_slot_time >= 0) {
1044 sdata->bss_conf.use_short_slot =
1045 params->use_short_slot_time;
1046 changed |= BSS_CHANGED_ERP_SLOT;
1047 }
1048
1049 ieee80211_bss_info_change_notify(sdata, changed);
1050
1051 return 0;
1052}
1053
1008struct cfg80211_ops mac80211_config_ops = { 1054struct cfg80211_ops mac80211_config_ops = {
1009 .add_virtual_intf = ieee80211_add_iface, 1055 .add_virtual_intf = ieee80211_add_iface,
1010 .del_virtual_intf = ieee80211_del_iface, 1056 .del_virtual_intf = ieee80211_del_iface,
@@ -1028,4 +1074,5 @@ struct cfg80211_ops mac80211_config_ops = {
1028 .get_mpath = ieee80211_get_mpath, 1074 .get_mpath = ieee80211_get_mpath,
1029 .dump_mpath = ieee80211_dump_mpath, 1075 .dump_mpath = ieee80211_dump_mpath,
1030#endif 1076#endif
1077 .change_bss = ieee80211_change_bss,
1031}; 1078};
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 79a062782d52..6abe5427752b 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -201,7 +201,7 @@ static ssize_t sta_agg_status_write(struct file *file,
201 tid_num = tid_num - 100; 201 tid_num = tid_num - 100;
202 if (tid_static_rx[tid_num] == 1) { 202 if (tid_static_rx[tid_num] == 1) {
203 strcpy(state, "off "); 203 strcpy(state, "off ");
204 ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0, 204 ieee80211_sta_stop_rx_ba_session(sta->sdata, da, tid_num, 0,
205 WLAN_REASON_QSTA_REQUIRE_SETUP); 205 WLAN_REASON_QSTA_REQUIRE_SETUP);
206 sta->ampdu_mlme.tid_state_rx[tid_num] |= 206 sta->ampdu_mlme.tid_state_rx[tid_num] |=
207 HT_AGG_STATE_DEBUGFS_CTL; 207 HT_AGG_STATE_DEBUGFS_CTL;
diff --git a/net/mac80211/event.c b/net/mac80211/event.c
index 2280f40b4560..8de60de70bc9 100644
--- a/net/mac80211/event.c
+++ b/net/mac80211/event.c
@@ -8,7 +8,6 @@
8 * mac80211 - events 8 * mac80211 - events
9 */ 9 */
10 10
11#include <linux/netdevice.h>
12#include <net/iw_handler.h> 11#include <net/iw_handler.h>
13#include "ieee80211_i.h" 12#include "ieee80211_i.h"
14 13
@@ -17,7 +16,7 @@
17 * (in the variable hdr) must be long enough to extract the TKIP 16 * (in the variable hdr) must be long enough to extract the TKIP
18 * fields like TSC 17 * fields like TSC
19 */ 18 */
20void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, 19void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
21 struct ieee80211_hdr *hdr) 20 struct ieee80211_hdr *hdr)
22{ 21{
23 union iwreq_data wrqu; 22 union iwreq_data wrqu;
@@ -32,7 +31,7 @@ void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx,
32 print_mac(mac, hdr->addr2)); 31 print_mac(mac, hdr->addr2));
33 memset(&wrqu, 0, sizeof(wrqu)); 32 memset(&wrqu, 0, sizeof(wrqu));
34 wrqu.data.length = strlen(buf); 33 wrqu.data.length = strlen(buf);
35 wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); 34 wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf);
36 kfree(buf); 35 kfree(buf);
37 } 36 }
38 37
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 4498d8713652..c68d4df11196 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -32,14 +32,6 @@
32/* ieee80211.o internal definitions, etc. These are not included into 32/* ieee80211.o internal definitions, etc. These are not included into
33 * low-level drivers. */ 33 * low-level drivers. */
34 34
35#ifndef ETH_P_PAE
36#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
37#endif /* ETH_P_PAE */
38
39#define WLAN_FC_DATA_PRESENT(fc) (((fc) & 0x4c) == 0x08)
40
41#define IEEE80211_FC(type, subtype) cpu_to_le16(type | subtype)
42
43struct ieee80211_local; 35struct ieee80211_local;
44 36
45/* Maximum number of broadcast/multicast frames to buffer when some of the 37/* Maximum number of broadcast/multicast frames to buffer when some of the
@@ -87,16 +79,11 @@ struct ieee80211_sta_bss {
87 enum ieee80211_band band; 79 enum ieee80211_band band;
88 int freq; 80 int freq;
89 int signal, noise, qual; 81 int signal, noise, qual;
90 u8 *wpa_ie; 82 u8 *ies; /* all information elements from the last Beacon or Probe
91 size_t wpa_ie_len; 83 * Response frames; note Beacon frame is not allowed to
92 u8 *rsn_ie; 84 * override values from Probe Response */
93 size_t rsn_ie_len; 85 size_t ies_len;
94 u8 *wmm_ie; 86 bool wmm_used;
95 size_t wmm_ie_len;
96 u8 *ht_ie;
97 size_t ht_ie_len;
98 u8 *ht_add_ie;
99 size_t ht_add_ie_len;
100#ifdef CONFIG_MAC80211_MESH 87#ifdef CONFIG_MAC80211_MESH
101 u8 *mesh_id; 88 u8 *mesh_id;
102 size_t mesh_id_len; 89 size_t mesh_id_len;
@@ -108,7 +95,7 @@ struct ieee80211_sta_bss {
108 u64 timestamp; 95 u64 timestamp;
109 int beacon_int; 96 int beacon_int;
110 97
111 bool probe_resp; 98 unsigned long last_probe_resp;
112 unsigned long last_update; 99 unsigned long last_update;
113 100
114 /* during assocation, we save an ERP value from a probe response so 101 /* during assocation, we save an ERP value from a probe response so
@@ -174,7 +161,7 @@ struct ieee80211_tx_data {
174 struct sk_buff **extra_frag; 161 struct sk_buff **extra_frag;
175 int num_extra_frag; 162 int num_extra_frag;
176 163
177 u16 fc, ethertype; 164 u16 ethertype;
178 unsigned int flags; 165 unsigned int flags;
179}; 166};
180 167
@@ -202,7 +189,7 @@ struct ieee80211_rx_data {
202 struct ieee80211_rx_status *status; 189 struct ieee80211_rx_status *status;
203 struct ieee80211_rate *rate; 190 struct ieee80211_rate *rate;
204 191
205 u16 fc, ethertype; 192 u16 ethertype;
206 unsigned int flags; 193 unsigned int flags;
207 int sent_ps_buffered; 194 int sent_ps_buffered;
208 int queue; 195 int queue;
@@ -300,17 +287,35 @@ struct mesh_config {
300#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) 287#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11)
301#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) 288#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
302#define IEEE80211_STA_PRIVACY_INVOKED BIT(13) 289#define IEEE80211_STA_PRIVACY_INVOKED BIT(13)
290/* flags for MLME request*/
291#define IEEE80211_STA_REQ_SCAN 0
292#define IEEE80211_STA_REQ_DIRECT_PROBE 1
293#define IEEE80211_STA_REQ_AUTH 2
294#define IEEE80211_STA_REQ_RUN 3
295
296/* flags used for setting mlme state */
297enum ieee80211_sta_mlme_state {
298 IEEE80211_STA_MLME_DISABLED,
299 IEEE80211_STA_MLME_DIRECT_PROBE,
300 IEEE80211_STA_MLME_AUTHENTICATE,
301 IEEE80211_STA_MLME_ASSOCIATE,
302 IEEE80211_STA_MLME_ASSOCIATED,
303 IEEE80211_STA_MLME_IBSS_SEARCH,
304 IEEE80211_STA_MLME_IBSS_JOINED,
305 IEEE80211_STA_MLME_MESH_UP
306};
307
308/* bitfield of allowed auth algs */
309#define IEEE80211_AUTH_ALG_OPEN BIT(0)
310#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
311#define IEEE80211_AUTH_ALG_LEAP BIT(2)
312
303struct ieee80211_if_sta { 313struct ieee80211_if_sta {
304 struct timer_list timer; 314 struct timer_list timer;
305 struct work_struct work; 315 struct work_struct work;
306 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; 316 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
307 u8 ssid[IEEE80211_MAX_SSID_LEN]; 317 u8 ssid[IEEE80211_MAX_SSID_LEN];
308 enum { 318 enum ieee80211_sta_mlme_state state;
309 IEEE80211_DISABLED, IEEE80211_AUTHENTICATE,
310 IEEE80211_ASSOCIATE, IEEE80211_ASSOCIATED,
311 IEEE80211_IBSS_SEARCH, IEEE80211_IBSS_JOINED,
312 IEEE80211_MESH_UP
313 } state;
314 size_t ssid_len; 319 size_t ssid_len;
315 u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; 320 u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
316 size_t scan_ssid_len; 321 size_t scan_ssid_len;
@@ -353,20 +358,17 @@ struct ieee80211_if_sta {
353 358
354 struct sk_buff_head skb_queue; 359 struct sk_buff_head skb_queue;
355 360
356 int auth_tries, assoc_tries; 361 int assoc_scan_tries; /* number of scans done pre-association */
362 int direct_probe_tries; /* retries for direct probes */
363 int auth_tries; /* retries for auth req */
364 int assoc_tries; /* retries for assoc req */
357 365
358 unsigned long request; 366 unsigned long request;
359 367
360 unsigned long last_probe; 368 unsigned long last_probe;
361 369
362 unsigned int flags; 370 unsigned int flags;
363#define IEEE80211_STA_REQ_SCAN 0
364#define IEEE80211_STA_REQ_AUTH 1
365#define IEEE80211_STA_REQ_RUN 2
366 371
367#define IEEE80211_AUTH_ALG_OPEN BIT(0)
368#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
369#define IEEE80211_AUTH_ALG_LEAP BIT(2)
370 unsigned int auth_algs; /* bitfield of allowed auth algs */ 372 unsigned int auth_algs; /* bitfield of allowed auth algs */
371 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ 373 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */
372 int auth_transaction; 374 int auth_transaction;
@@ -774,6 +776,9 @@ struct ieee80211_ra_tid {
774 776
775/* Parsed Information Elements */ 777/* Parsed Information Elements */
776struct ieee802_11_elems { 778struct ieee802_11_elems {
779 u8 *ie_start;
780 size_t total_len;
781
777 /* pointers to IEs */ 782 /* pointers to IEs */
778 u8 *ssid; 783 u8 *ssid;
779 u8 *supp_rates; 784 u8 *supp_rates;
@@ -867,65 +872,65 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
867 872
868/* ieee80211_ioctl.c */ 873/* ieee80211_ioctl.c */
869extern const struct iw_handler_def ieee80211_iw_handler_def; 874extern const struct iw_handler_def ieee80211_iw_handler_def;
870int ieee80211_set_freq(struct net_device *dev, int freq); 875int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq);
871 876
872/* ieee80211_sta.c */ 877/* ieee80211_sta.c */
873void ieee80211_sta_timer(unsigned long data); 878void ieee80211_sta_timer(unsigned long data);
874void ieee80211_sta_work(struct work_struct *work); 879void ieee80211_sta_work(struct work_struct *work);
875void ieee80211_sta_scan_work(struct work_struct *work); 880void ieee80211_sta_scan_work(struct work_struct *work);
876void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, 881void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
877 struct ieee80211_rx_status *rx_status); 882 struct ieee80211_rx_status *rx_status);
878int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len); 883int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len);
879int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len); 884int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len);
880int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid); 885int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid);
881int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len); 886int ieee80211_sta_req_scan(struct ieee80211_sub_if_data *sdata, u8 *ssid, size_t ssid_len);
882void ieee80211_sta_req_auth(struct net_device *dev, 887void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata,
883 struct ieee80211_if_sta *ifsta); 888 struct ieee80211_if_sta *ifsta);
884int ieee80211_sta_scan_results(struct net_device *dev, 889int ieee80211_sta_scan_results(struct ieee80211_local *local,
885 struct iw_request_info *info, 890 struct iw_request_info *info,
886 char *buf, size_t len); 891 char *buf, size_t len);
887ieee80211_rx_result ieee80211_sta_rx_scan( 892ieee80211_rx_result ieee80211_sta_rx_scan(
888 struct net_device *dev, struct sk_buff *skb, 893 struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
889 struct ieee80211_rx_status *rx_status); 894 struct ieee80211_rx_status *rx_status);
890void ieee80211_rx_bss_list_init(struct ieee80211_local *local); 895void ieee80211_rx_bss_list_init(struct ieee80211_local *local);
891void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local); 896void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local);
892int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); 897int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len);
893struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, 898struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
894 struct sk_buff *skb, u8 *bssid, 899 struct sk_buff *skb, u8 *bssid,
895 u8 *addr, u64 supp_rates); 900 u8 *addr, u64 supp_rates);
896int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); 901int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason);
897int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); 902int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason);
898void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, 903void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
899 u32 changed); 904 u32 changed);
900u32 ieee80211_reset_erp_info(struct net_device *dev); 905u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
901int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, 906int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
902 struct ieee80211_ht_info *ht_info); 907 struct ieee80211_ht_info *ht_info);
903int ieee80211_ht_addt_info_ie_to_ht_bss_info( 908int ieee80211_ht_addt_info_ie_to_ht_bss_info(
904 struct ieee80211_ht_addt_info *ht_add_info_ie, 909 struct ieee80211_ht_addt_info *ht_add_info_ie,
905 struct ieee80211_ht_bss_info *bss_info); 910 struct ieee80211_ht_bss_info *bss_info);
906void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, 911void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, const u8 *da,
907 u16 tid, u8 dialog_token, u16 start_seq_num, 912 u16 tid, u8 dialog_token, u16 start_seq_num,
908 u16 agg_size, u16 timeout); 913 u16 agg_size, u16 timeout);
909void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, 914void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid,
910 u16 initiator, u16 reason_code); 915 u16 initiator, u16 reason_code);
911void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn); 916void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
912 917
913void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, 918void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
914 u16 tid, u16 initiator, u16 reason); 919 u16 tid, u16 initiator, u16 reason);
915void sta_addba_resp_timer_expired(unsigned long data); 920void sta_addba_resp_timer_expired(unsigned long data);
916void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr); 921void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr);
917u64 ieee80211_sta_get_rates(struct ieee80211_local *local, 922u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
918 struct ieee802_11_elems *elems, 923 struct ieee802_11_elems *elems,
919 enum ieee80211_band band); 924 enum ieee80211_band band);
920void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, 925void ieee80211_sta_tx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
921 int encrypt); 926 int encrypt);
922void ieee802_11_parse_elems(u8 *start, size_t len, 927void ieee802_11_parse_elems(u8 *start, size_t len,
923 struct ieee802_11_elems *elems); 928 struct ieee802_11_elems *elems);
924 929
925#ifdef CONFIG_MAC80211_MESH 930#ifdef CONFIG_MAC80211_MESH
926void ieee80211_start_mesh(struct net_device *dev); 931void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
927#else 932#else
928static inline void ieee80211_start_mesh(struct net_device *dev) 933static inline void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
929{} 934{}
930#endif 935#endif
931 936
@@ -936,7 +941,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
936 struct vif_params *params); 941 struct vif_params *params);
937int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, 942int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
938 enum ieee80211_if_types type); 943 enum ieee80211_if_types type);
939void ieee80211_if_remove(struct net_device *dev); 944void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
940void ieee80211_remove_interfaces(struct ieee80211_local *local); 945void ieee80211_remove_interfaces(struct ieee80211_local *local);
941 946
942/* tx handling */ 947/* tx handling */
@@ -954,7 +959,7 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
954 enum ieee80211_if_types type); 959 enum ieee80211_if_types type);
955int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 960int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
956 int rate, int erp, int short_preamble); 961 int rate, int erp, int short_preamble);
957void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, 962void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
958 struct ieee80211_hdr *hdr); 963 struct ieee80211_hdr *hdr);
959 964
960#ifdef CONFIG_MAC80211_NOINLINE 965#ifdef CONFIG_MAC80211_NOINLINE
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 610ed1d9893a..4a623b8e91fd 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -56,7 +56,7 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
56 case IEEE80211_IF_TYPE_MESH_POINT: 56 case IEEE80211_IF_TYPE_MESH_POINT:
57 /* Allow compiler to elide mesh_rmc_free call. */ 57 /* Allow compiler to elide mesh_rmc_free call. */
58 if (ieee80211_vif_is_mesh(&sdata->vif)) 58 if (ieee80211_vif_is_mesh(&sdata->vif))
59 mesh_rmc_free(dev); 59 mesh_rmc_free(sdata);
60 /* fall through */ 60 /* fall through */
61 case IEEE80211_IF_TYPE_STA: 61 case IEEE80211_IF_TYPE_STA:
62 case IEEE80211_IF_TYPE_IBSS: 62 case IEEE80211_IF_TYPE_IBSS:
@@ -241,15 +241,13 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
241 return ret; 241 return ret;
242} 242}
243 243
244void ieee80211_if_remove(struct net_device *dev) 244void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
245{ 245{
246 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
247
248 ASSERT_RTNL(); 246 ASSERT_RTNL();
249 247
250 list_del_rcu(&sdata->list); 248 list_del_rcu(&sdata->list);
251 synchronize_rcu(); 249 synchronize_rcu();
252 unregister_netdevice(dev); 250 unregister_netdevice(sdata->dev);
253} 251}
254 252
255/* 253/*
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index aa5a191598c9..396cfb2d0f46 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -187,9 +187,15 @@ static int ieee80211_open(struct net_device *dev)
187 u32 changed = 0; 187 u32 changed = 0;
188 int res; 188 int res;
189 bool need_hw_reconfig = 0; 189 bool need_hw_reconfig = 0;
190 u8 null_addr[ETH_ALEN] = {0};
190 191
191 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 192 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
192 193
194 /* fail early if user set an invalid address */
195 if (compare_ether_addr(dev->dev_addr, null_addr) &&
196 !is_valid_ether_addr(dev->dev_addr))
197 return -EADDRNOTAVAIL;
198
193 /* we hold the RTNL here so can safely walk the list */ 199 /* we hold the RTNL here so can safely walk the list */
194 list_for_each_entry(nsdata, &local->interfaces, list) { 200 list_for_each_entry(nsdata, &local->interfaces, list) {
195 struct net_device *ndev = nsdata->dev; 201 struct net_device *ndev = nsdata->dev;
@@ -270,6 +276,36 @@ static int ieee80211_open(struct net_device *dev)
270 ieee80211_led_radio(local, local->hw.conf.radio_enabled); 276 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
271 } 277 }
272 278
279 /*
280 * Check all interfaces and copy the hopefully now-present
281 * MAC address to those that have the special null one.
282 */
283 list_for_each_entry(nsdata, &local->interfaces, list) {
284 struct net_device *ndev = nsdata->dev;
285
286 /*
287 * No need to check netif_running since we do not allow
288 * it to start up with this invalid address.
289 */
290 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0)
291 memcpy(ndev->dev_addr,
292 local->hw.wiphy->perm_addr,
293 ETH_ALEN);
294 }
295
296 if (compare_ether_addr(null_addr, local->mdev->dev_addr) == 0)
297 memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr,
298 ETH_ALEN);
299
300 /*
301 * Validate the MAC address for this device.
302 */
303 if (!is_valid_ether_addr(dev->dev_addr)) {
304 if (!local->open_count && local->ops->stop)
305 local->ops->stop(local_to_hw(local));
306 return -EADDRNOTAVAIL;
307 }
308
273 switch (sdata->vif.type) { 309 switch (sdata->vif.type) {
274 case IEEE80211_IF_TYPE_VLAN: 310 case IEEE80211_IF_TYPE_VLAN:
275 /* no need to tell driver */ 311 /* no need to tell driver */
@@ -311,8 +347,8 @@ static int ieee80211_open(struct net_device *dev)
311 goto err_stop; 347 goto err_stop;
312 348
313 if (ieee80211_vif_is_mesh(&sdata->vif)) 349 if (ieee80211_vif_is_mesh(&sdata->vif))
314 ieee80211_start_mesh(sdata->dev); 350 ieee80211_start_mesh(sdata);
315 changed |= ieee80211_reset_erp_info(dev); 351 changed |= ieee80211_reset_erp_info(sdata);
316 ieee80211_bss_info_change_notify(sdata, changed); 352 ieee80211_bss_info_change_notify(sdata, changed);
317 ieee80211_enable_keys(sdata); 353 ieee80211_enable_keys(sdata);
318 354
@@ -412,7 +448,7 @@ static int ieee80211_stop(struct net_device *dev)
412 448
413 list_for_each_entry_rcu(sta, &local->sta_list, list) { 449 list_for_each_entry_rcu(sta, &local->sta_list, list) {
414 if (sta->sdata == sdata) 450 if (sta->sdata == sdata)
415 ieee80211_sta_tear_down_BA_sessions(dev, sta->addr); 451 ieee80211_sta_tear_down_BA_sessions(sdata, sta->addr);
416 } 452 }
417 453
418 rcu_read_unlock(); 454 rcu_read_unlock();
@@ -503,7 +539,7 @@ static int ieee80211_stop(struct net_device *dev)
503 /* fall through */ 539 /* fall through */
504 case IEEE80211_IF_TYPE_STA: 540 case IEEE80211_IF_TYPE_STA:
505 case IEEE80211_IF_TYPE_IBSS: 541 case IEEE80211_IF_TYPE_IBSS:
506 sdata->u.sta.state = IEEE80211_DISABLED; 542 sdata->u.sta.state = IEEE80211_STA_MLME_DISABLED;
507 memset(sdata->u.sta.bssid, 0, ETH_ALEN); 543 memset(sdata->u.sta.bssid, 0, ETH_ALEN);
508 del_timer_sync(&sdata->u.sta.timer); 544 del_timer_sync(&sdata->u.sta.timer);
509 /* 545 /*
@@ -562,7 +598,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
562 struct ieee80211_local *local = hw_to_local(hw); 598 struct ieee80211_local *local = hw_to_local(hw);
563 struct sta_info *sta; 599 struct sta_info *sta;
564 struct ieee80211_sub_if_data *sdata; 600 struct ieee80211_sub_if_data *sdata;
565 u16 start_seq_num = 0; 601 u16 start_seq_num;
566 u8 *state; 602 u8 *state;
567 int ret; 603 int ret;
568 DECLARE_MAC_BUF(mac); 604 DECLARE_MAC_BUF(mac);
@@ -642,6 +678,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
642 * call back right away, it must see that the flow has begun */ 678 * call back right away, it must see that the flow has begun */
643 *state |= HT_ADDBA_REQUESTED_MSK; 679 *state |= HT_ADDBA_REQUESTED_MSK;
644 680
681 /* This is slightly racy because the queue isn't stopped */
682 start_seq_num = sta->tid_seq[tid];
683
645 if (local->ops->ampdu_action) 684 if (local->ops->ampdu_action)
646 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, 685 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
647 ra, tid, &start_seq_num); 686 ra, tid, &start_seq_num);
@@ -670,7 +709,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
670 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 709 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
671 710
672 711
673 ieee80211_send_addba_request(sta->sdata->dev, ra, tid, 712 ieee80211_send_addba_request(sta->sdata, ra, tid,
674 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 713 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
675 sta->ampdu_mlme.tid_tx[tid]->ssn, 714 sta->ampdu_mlme.tid_tx[tid]->ssn,
676 0x40, 5000); 715 0x40, 5000);
@@ -853,7 +892,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
853 } 892 }
854 893
855 if (*state & HT_AGG_STATE_INITIATOR_MSK) 894 if (*state & HT_AGG_STATE_INITIATOR_MSK)
856 ieee80211_send_delba(sta->sdata->dev, ra, tid, 895 ieee80211_send_delba(sta->sdata, ra, tid,
857 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 896 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
858 897
859 agg_queue = sta->tid_to_tx_q[tid]; 898 agg_queue = sta->tid_to_tx_q[tid];
@@ -975,6 +1014,8 @@ void ieee80211_if_setup(struct net_device *dev)
975 dev->open = ieee80211_open; 1014 dev->open = ieee80211_open;
976 dev->stop = ieee80211_stop; 1015 dev->stop = ieee80211_stop;
977 dev->destructor = free_netdev; 1016 dev->destructor = free_netdev;
1017 /* we will validate the address ourselves in ->open */
1018 dev->validate_addr = NULL;
978} 1019}
979 1020
980/* everything else */ 1021/* everything else */
@@ -1162,10 +1203,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
1162 changed); 1203 changed);
1163} 1204}
1164 1205
1165u32 ieee80211_reset_erp_info(struct net_device *dev) 1206u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
1166{ 1207{
1167 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1168
1169 sdata->bss_conf.use_cts_prot = 0; 1208 sdata->bss_conf.use_cts_prot = 0;
1170 sdata->bss_conf.use_short_preamble = 0; 1209 sdata->bss_conf.use_short_preamble = 0;
1171 return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE; 1210 return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE;
@@ -1244,9 +1283,10 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1244 struct ieee80211_key *key, 1283 struct ieee80211_key *key,
1245 struct sk_buff *skb) 1284 struct sk_buff *skb)
1246{ 1285{
1247 int hdrlen, iv_len, mic_len; 1286 unsigned int hdrlen, iv_len, mic_len;
1287 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1248 1288
1249 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1289 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1250 1290
1251 if (!key) 1291 if (!key)
1252 goto no_key; 1292 goto no_key;
@@ -1268,24 +1308,20 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1268 goto no_key; 1308 goto no_key;
1269 } 1309 }
1270 1310
1271 if (skb->len >= mic_len && 1311 if (skb->len >= hdrlen + mic_len &&
1272 !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 1312 !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
1273 skb_trim(skb, skb->len - mic_len); 1313 skb_trim(skb, skb->len - mic_len);
1274 if (skb->len >= iv_len && skb->len > hdrlen) { 1314 if (skb->len >= hdrlen + iv_len) {
1275 memmove(skb->data + iv_len, skb->data, hdrlen); 1315 memmove(skb->data + iv_len, skb->data, hdrlen);
1276 skb_pull(skb, iv_len); 1316 hdr = (struct ieee80211_hdr *)skb_pull(skb, iv_len);
1277 } 1317 }
1278 1318
1279no_key: 1319no_key:
1280 { 1320 if (ieee80211_is_data_qos(hdr->frame_control)) {
1281 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1321 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1282 u16 fc = le16_to_cpu(hdr->frame_control); 1322 memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data,
1283 if ((fc & 0x8C) == 0x88) /* QoS Control Field */ { 1323 hdrlen - IEEE80211_QOS_CTL_LEN);
1284 fc &= ~IEEE80211_STYPE_QOS_DATA; 1324 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
1285 hdr->frame_control = cpu_to_le16(fc);
1286 memmove(skb->data + 2, skb->data, hdrlen - 2);
1287 skb_pull(skb, 2);
1288 }
1289 } 1325 }
1290} 1326}
1291 1327
@@ -1403,7 +1439,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1403 tid = qc[0] & 0xf; 1439 tid = qc[0] & 0xf;
1404 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) 1440 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
1405 & IEEE80211_SCTL_SEQ); 1441 & IEEE80211_SCTL_SEQ);
1406 ieee80211_send_bar(sta->sdata->dev, hdr->addr1, 1442 ieee80211_send_bar(sta->sdata, hdr->addr1,
1407 tid, ssn); 1443 tid, ssn);
1408 } 1444 }
1409 } 1445 }
@@ -1639,6 +1675,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1639 } 1675 }
1640 } 1676 }
1641 1677
1678 /* if low-level driver supports AP, we also support VLAN */
1679 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP))
1680 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
1681
1682 /* mac80211 always supports monitor */
1683 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
1684
1642 result = wiphy_register(local->hw.wiphy); 1685 result = wiphy_register(local->hw.wiphy);
1643 if (result < 0) 1686 if (result < 0)
1644 return result; 1687 return result;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 35f2f95f2fa7..3ccb3599c04f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -39,14 +39,13 @@ void ieee80211s_stop(void)
39 * mesh_matches_local - check if the config of a mesh point matches ours 39 * mesh_matches_local - check if the config of a mesh point matches ours
40 * 40 *
41 * @ie: information elements of a management frame from the mesh peer 41 * @ie: information elements of a management frame from the mesh peer
42 * @dev: local mesh interface 42 * @sdata: local mesh subif
43 * 43 *
44 * This function checks if the mesh configuration of a mesh point matches the 44 * This function checks if the mesh configuration of a mesh point matches the
45 * local mesh configuration, i.e. if both nodes belong to the same mesh network. 45 * local mesh configuration, i.e. if both nodes belong to the same mesh network.
46 */ 46 */
47bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev) 47bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
48{ 48{
49 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
50 struct ieee80211_if_sta *sta = &sdata->u.sta; 49 struct ieee80211_if_sta *sta = &sdata->u.sta;
51 50
52 /* 51 /*
@@ -73,10 +72,8 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev)
73 * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links 72 * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links
74 * 73 *
75 * @ie: information elements of a management frame from the mesh peer 74 * @ie: information elements of a management frame from the mesh peer
76 * @dev: local mesh interface
77 */ 75 */
78bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, 76bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
79 struct net_device *dev)
80{ 77{
81 return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0; 78 return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0;
82} 79}
@@ -111,9 +108,8 @@ void mesh_ids_set_default(struct ieee80211_if_sta *sta)
111 memcpy(sta->mesh_cc_id, def_id, 4); 108 memcpy(sta->mesh_cc_id, def_id, 4);
112} 109}
113 110
114int mesh_rmc_init(struct net_device *dev) 111int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
115{ 112{
116 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
117 int i; 113 int i;
118 114
119 sdata->u.sta.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); 115 sdata->u.sta.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL);
@@ -125,9 +121,8 @@ int mesh_rmc_init(struct net_device *dev)
125 return 0; 121 return 0;
126} 122}
127 123
128void mesh_rmc_free(struct net_device *dev) 124void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
129{ 125{
130 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
131 struct mesh_rmc *rmc = sdata->u.sta.rmc; 126 struct mesh_rmc *rmc = sdata->u.sta.rmc;
132 struct rmc_entry *p, *n; 127 struct rmc_entry *p, *n;
133 int i; 128 int i;
@@ -158,9 +153,8 @@ void mesh_rmc_free(struct net_device *dev)
158 * it. 153 * it.
159 */ 154 */
160int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, 155int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
161 struct net_device *dev) 156 struct ieee80211_sub_if_data *sdata)
162{ 157{
163 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
164 struct mesh_rmc *rmc = sdata->u.sta.rmc; 158 struct mesh_rmc *rmc = sdata->u.sta.rmc;
165 u32 seqnum = 0; 159 u32 seqnum = 0;
166 int entries = 0; 160 int entries = 0;
@@ -194,10 +188,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
194 return 0; 188 return 0;
195} 189}
196 190
197void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev) 191void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
198{ 192{
199 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 193 struct ieee80211_local *local = sdata->local;
200 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
201 struct ieee80211_supported_band *sband; 194 struct ieee80211_supported_band *sband;
202 u8 *pos; 195 u8 *pos;
203 int len, i, rate; 196 int len, i, rate;
@@ -262,10 +255,10 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev)
262 return; 255 return;
263} 256}
264 257
265u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl) 258u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
266{ 259{
267 /* Use last four bytes of hw addr and interface index as hash index */ 260 /* Use last four bytes of hw addr and interface index as hash index */
268 return jhash_2words(*(u32 *)(addr+2), dev->ifindex, tbl->hash_rnd) 261 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
269 & tbl->hash_mask; 262 & tbl->hash_mask;
270} 263}
271 264
@@ -434,7 +427,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
434 ifsta->preq_id = 0; 427 ifsta->preq_id = 0;
435 ifsta->dsn = 0; 428 ifsta->dsn = 0;
436 atomic_set(&ifsta->mpaths, 0); 429 atomic_set(&ifsta->mpaths, 0);
437 mesh_rmc_init(sdata->dev); 430 mesh_rmc_init(sdata);
438 ifsta->last_preq = jiffies; 431 ifsta->last_preq = jiffies;
439 /* Allocate all mesh structures when creating the first mesh interface. */ 432 /* Allocate all mesh structures when creating the first mesh interface. */
440 if (!mesh_allocated) 433 if (!mesh_allocated)
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 7495fbb0d211..84ff5d828fdb 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -47,7 +47,7 @@ enum mesh_path_flags {
47 * struct mesh_path - mac80211 mesh path structure 47 * struct mesh_path - mac80211 mesh path structure
48 * 48 *
49 * @dst: mesh path destination mac address 49 * @dst: mesh path destination mac address
50 * @dev: mesh path device 50 * @sdata: mesh subif
51 * @next_hop: mesh neighbor to which frames for this destination will be 51 * @next_hop: mesh neighbor to which frames for this destination will be
52 * forwarded 52 * forwarded
53 * @timer: mesh path discovery timer 53 * @timer: mesh path discovery timer
@@ -64,14 +64,14 @@ enum mesh_path_flags {
64 * @state_lock: mesh pat state lock 64 * @state_lock: mesh pat state lock
65 * 65 *
66 * 66 *
67 * The combination of dst and dev is unique in the mesh path table. Since the 67 * The combination of dst and sdata is unique in the mesh path table. Since the
68 * next_hop STA is only protected by RCU as well, deleting the STA must also 68 * next_hop STA is only protected by RCU as well, deleting the STA must also
69 * remove/substitute the mesh_path structure and wait until that is no longer 69 * remove/substitute the mesh_path structure and wait until that is no longer
70 * reachable before destroying the STA completely. 70 * reachable before destroying the STA completely.
71 */ 71 */
72struct mesh_path { 72struct mesh_path {
73 u8 dst[ETH_ALEN]; 73 u8 dst[ETH_ALEN];
74 struct net_device *dev; 74 struct ieee80211_sub_if_data *sdata;
75 struct sta_info *next_hop; 75 struct sta_info *next_hop;
76 struct timer_list timer; 76 struct timer_list timer;
77 struct sk_buff_head frame_queue; 77 struct sk_buff_head frame_queue;
@@ -203,59 +203,66 @@ int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
203int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 203int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
204 struct ieee80211_sub_if_data *sdata); 204 struct ieee80211_sub_if_data *sdata);
205int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, 205int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
206 struct net_device *dev); 206 struct ieee80211_sub_if_data *sdata);
207bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev); 207bool mesh_matches_local(struct ieee802_11_elems *ie,
208 struct ieee80211_sub_if_data *sdata);
208void mesh_ids_set_default(struct ieee80211_if_sta *sta); 209void mesh_ids_set_default(struct ieee80211_if_sta *sta);
209void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev); 210void mesh_mgmt_ies_add(struct sk_buff *skb,
210void mesh_rmc_free(struct net_device *dev); 211 struct ieee80211_sub_if_data *sdata);
211int mesh_rmc_init(struct net_device *dev); 212void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
213int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
212void ieee80211s_init(void); 214void ieee80211s_init(void);
213void ieee80211s_stop(void); 215void ieee80211s_stop(void);
214void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 216void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
215 217
216/* Mesh paths */ 218/* Mesh paths */
217int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev); 219int mesh_nexthop_lookup(struct sk_buff *skb,
218void mesh_path_start_discovery(struct net_device *dev); 220 struct ieee80211_sub_if_data *sdata);
219struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev); 221void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
220struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev); 222struct mesh_path *mesh_path_lookup(u8 *dst,
223 struct ieee80211_sub_if_data *sdata);
224struct mesh_path *mesh_path_lookup_by_idx(int idx,
225 struct ieee80211_sub_if_data *sdata);
221void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); 226void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
222void mesh_path_expire(struct net_device *dev); 227void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
223void mesh_path_flush(struct net_device *dev); 228void mesh_path_flush(struct ieee80211_sub_if_data *sdata);
224void mesh_rx_path_sel_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, 229void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
225 size_t len); 230 struct ieee80211_mgmt *mgmt, size_t len);
226int mesh_path_add(u8 *dst, struct net_device *dev); 231int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
227/* Mesh plinks */ 232/* Mesh plinks */
228void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, 233void mesh_neighbour_update(u8 *hw_addr, u64 rates,
229 bool add); 234 struct ieee80211_sub_if_data *sdata, bool add);
230bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, 235bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
231 struct net_device *dev);
232void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 236void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
233void mesh_plink_broken(struct sta_info *sta); 237void mesh_plink_broken(struct sta_info *sta);
234void mesh_plink_deactivate(struct sta_info *sta); 238void mesh_plink_deactivate(struct sta_info *sta);
235int mesh_plink_open(struct sta_info *sta); 239int mesh_plink_open(struct sta_info *sta);
236int mesh_plink_close(struct sta_info *sta); 240int mesh_plink_close(struct sta_info *sta);
237void mesh_plink_block(struct sta_info *sta); 241void mesh_plink_block(struct sta_info *sta);
238void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, 242void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
239 size_t len, struct ieee80211_rx_status *rx_status); 243 struct ieee80211_mgmt *mgmt, size_t len,
244 struct ieee80211_rx_status *rx_status);
240 245
241/* Private interfaces */ 246/* Private interfaces */
242/* Mesh tables */ 247/* Mesh tables */
243struct mesh_table *mesh_table_alloc(int size_order); 248struct mesh_table *mesh_table_alloc(int size_order);
244void mesh_table_free(struct mesh_table *tbl, bool free_leafs); 249void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
245struct mesh_table *mesh_table_grow(struct mesh_table *tbl); 250struct mesh_table *mesh_table_grow(struct mesh_table *tbl);
246u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl); 251u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
252 struct mesh_table *tbl);
247/* Mesh paths */ 253/* Mesh paths */
248int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra, 254int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra,
249 struct net_device *dev); 255 struct ieee80211_sub_if_data *sdata);
250void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); 256void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
251void mesh_path_flush_pending(struct mesh_path *mpath); 257void mesh_path_flush_pending(struct mesh_path *mpath);
252void mesh_path_tx_pending(struct mesh_path *mpath); 258void mesh_path_tx_pending(struct mesh_path *mpath);
253int mesh_pathtbl_init(void); 259int mesh_pathtbl_init(void);
254void mesh_pathtbl_unregister(void); 260void mesh_pathtbl_unregister(void);
255int mesh_path_del(u8 *addr, struct net_device *dev); 261int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
256void mesh_path_timer(unsigned long data); 262void mesh_path_timer(unsigned long data);
257void mesh_path_flush_by_nexthop(struct sta_info *sta); 263void mesh_path_flush_by_nexthop(struct sta_info *sta);
258void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev); 264void mesh_path_discard_frame(struct sk_buff *skb,
265 struct ieee80211_sub_if_data *sdata);
259 266
260#ifdef CONFIG_MAC80211_MESH 267#ifdef CONFIG_MAC80211_MESH
261extern int mesh_allocated; 268extern int mesh_allocated;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 08aca446ca01..eeb0ce2d5d37 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -82,9 +82,9 @@ enum mpath_frame_type {
82static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, 82static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
83 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, 83 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst,
84 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, 84 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime,
85 __le32 metric, __le32 preq_id, struct net_device *dev) 85 __le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata)
86{ 86{
87 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 87 struct ieee80211_local *local = sdata->local;
88 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 88 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
89 struct ieee80211_mgmt *mgmt; 89 struct ieee80211_mgmt *mgmt;
90 u8 *pos; 90 u8 *pos;
@@ -99,11 +99,11 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
99 mgmt = (struct ieee80211_mgmt *) 99 mgmt = (struct ieee80211_mgmt *)
100 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); 100 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
101 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); 101 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
102 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 102 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
103 IEEE80211_STYPE_ACTION); 103 IEEE80211_STYPE_ACTION);
104 104
105 memcpy(mgmt->da, da, ETH_ALEN); 105 memcpy(mgmt->da, da, ETH_ALEN);
106 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 106 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
107 /* BSSID is left zeroed, wildcard value */ 107 /* BSSID is left zeroed, wildcard value */
108 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 108 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
109 mgmt->u.action.u.mesh_action.action_code = action; 109 mgmt->u.action.u.mesh_action.action_code = action;
@@ -149,7 +149,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
149 pos += ETH_ALEN; 149 pos += ETH_ALEN;
150 memcpy(pos, &dst_dsn, 4); 150 memcpy(pos, &dst_dsn, 4);
151 151
152 ieee80211_sta_tx(dev, skb, 0); 152 ieee80211_sta_tx(sdata, skb, 0);
153 return 0; 153 return 0;
154} 154}
155 155
@@ -161,9 +161,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
161 * @ra: node this frame is addressed to 161 * @ra: node this frame is addressed to
162 */ 162 */
163int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, 163int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
164 struct net_device *dev) 164 struct ieee80211_sub_if_data *sdata)
165{ 165{
166 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 166 struct ieee80211_local *local = sdata->local;
167 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 167 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
168 struct ieee80211_mgmt *mgmt; 168 struct ieee80211_mgmt *mgmt;
169 u8 *pos; 169 u8 *pos;
@@ -178,11 +178,11 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
178 mgmt = (struct ieee80211_mgmt *) 178 mgmt = (struct ieee80211_mgmt *)
179 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); 179 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
180 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); 180 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
181 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 181 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
182 IEEE80211_STYPE_ACTION); 182 IEEE80211_STYPE_ACTION);
183 183
184 memcpy(mgmt->da, ra, ETH_ALEN); 184 memcpy(mgmt->da, ra, ETH_ALEN);
185 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 185 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
186 /* BSSID is left zeroed, wildcard value */ 186 /* BSSID is left zeroed, wildcard value */
187 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 187 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
188 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; 188 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR;
@@ -198,7 +198,7 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
198 pos += ETH_ALEN; 198 pos += ETH_ALEN;
199 memcpy(pos, &dst_dsn, 4); 199 memcpy(pos, &dst_dsn, 4);
200 200
201 ieee80211_sta_tx(dev, skb, 0); 201 ieee80211_sta_tx(sdata, skb, 0);
202 return 0; 202 return 0;
203} 203}
204 204
@@ -233,7 +233,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
233/** 233/**
234 * hwmp_route_info_get - Update routing info to originator and transmitter 234 * hwmp_route_info_get - Update routing info to originator and transmitter
235 * 235 *
236 * @dev: local mesh interface 236 * @sdata: local mesh subif
237 * @mgmt: mesh management frame 237 * @mgmt: mesh management frame
238 * @hwmp_ie: hwmp information element (PREP or PREQ) 238 * @hwmp_ie: hwmp information element (PREP or PREQ)
239 * 239 *
@@ -246,11 +246,11 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
246 * Notes: this function is the only place (besides user-provided info) where 246 * Notes: this function is the only place (besides user-provided info) where
247 * path routing information is updated. 247 * path routing information is updated.
248 */ 248 */
249static u32 hwmp_route_info_get(struct net_device *dev, 249static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
250 struct ieee80211_mgmt *mgmt, 250 struct ieee80211_mgmt *mgmt,
251 u8 *hwmp_ie) 251 u8 *hwmp_ie)
252{ 252{
253 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 253 struct ieee80211_local *local = sdata->local;
254 struct mesh_path *mpath; 254 struct mesh_path *mpath;
255 struct sta_info *sta; 255 struct sta_info *sta;
256 bool fresh_info; 256 bool fresh_info;
@@ -301,14 +301,14 @@ static u32 hwmp_route_info_get(struct net_device *dev,
301 new_metric = MAX_METRIC; 301 new_metric = MAX_METRIC;
302 exp_time = TU_TO_EXP_TIME(orig_lifetime); 302 exp_time = TU_TO_EXP_TIME(orig_lifetime);
303 303
304 if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) { 304 if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
305 /* This MP is the originator, we are not interested in this 305 /* This MP is the originator, we are not interested in this
306 * frame, except for updating transmitter's path info. 306 * frame, except for updating transmitter's path info.
307 */ 307 */
308 process = false; 308 process = false;
309 fresh_info = false; 309 fresh_info = false;
310 } else { 310 } else {
311 mpath = mesh_path_lookup(orig_addr, dev); 311 mpath = mesh_path_lookup(orig_addr, sdata);
312 if (mpath) { 312 if (mpath) {
313 spin_lock_bh(&mpath->state_lock); 313 spin_lock_bh(&mpath->state_lock);
314 if (mpath->flags & MESH_PATH_FIXED) 314 if (mpath->flags & MESH_PATH_FIXED)
@@ -324,8 +324,8 @@ static u32 hwmp_route_info_get(struct net_device *dev,
324 } 324 }
325 } 325 }
326 } else { 326 } else {
327 mesh_path_add(orig_addr, dev); 327 mesh_path_add(orig_addr, sdata);
328 mpath = mesh_path_lookup(orig_addr, dev); 328 mpath = mesh_path_lookup(orig_addr, sdata);
329 if (!mpath) { 329 if (!mpath) {
330 rcu_read_unlock(); 330 rcu_read_unlock();
331 return 0; 331 return 0;
@@ -357,7 +357,7 @@ static u32 hwmp_route_info_get(struct net_device *dev,
357 else { 357 else {
358 fresh_info = true; 358 fresh_info = true;
359 359
360 mpath = mesh_path_lookup(ta, dev); 360 mpath = mesh_path_lookup(ta, sdata);
361 if (mpath) { 361 if (mpath) {
362 spin_lock_bh(&mpath->state_lock); 362 spin_lock_bh(&mpath->state_lock);
363 if ((mpath->flags & MESH_PATH_FIXED) || 363 if ((mpath->flags & MESH_PATH_FIXED) ||
@@ -365,8 +365,8 @@ static u32 hwmp_route_info_get(struct net_device *dev,
365 (last_hop_metric > mpath->metric))) 365 (last_hop_metric > mpath->metric)))
366 fresh_info = false; 366 fresh_info = false;
367 } else { 367 } else {
368 mesh_path_add(ta, dev); 368 mesh_path_add(ta, sdata);
369 mpath = mesh_path_lookup(ta, dev); 369 mpath = mesh_path_lookup(ta, sdata);
370 if (!mpath) { 370 if (!mpath) {
371 rcu_read_unlock(); 371 rcu_read_unlock();
372 return 0; 372 return 0;
@@ -392,10 +392,9 @@ static u32 hwmp_route_info_get(struct net_device *dev,
392 return process ? new_metric : 0; 392 return process ? new_metric : 0;
393} 393}
394 394
395static void hwmp_preq_frame_process(struct net_device *dev, 395static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
396 struct ieee80211_mgmt *mgmt, 396 struct ieee80211_mgmt *mgmt,
397 u8 *preq_elem, u32 metric) { 397 u8 *preq_elem, u32 metric) {
398 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
399 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 398 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
400 struct mesh_path *mpath; 399 struct mesh_path *mpath;
401 u8 *dst_addr, *orig_addr; 400 u8 *dst_addr, *orig_addr;
@@ -411,7 +410,7 @@ static void hwmp_preq_frame_process(struct net_device *dev,
411 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); 410 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem);
412 dst_flags = PREQ_IE_DST_F(preq_elem); 411 dst_flags = PREQ_IE_DST_F(preq_elem);
413 412
414 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) { 413 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
415 forward = false; 414 forward = false;
416 reply = true; 415 reply = true;
417 metric = 0; 416 metric = 0;
@@ -423,7 +422,7 @@ static void hwmp_preq_frame_process(struct net_device *dev,
423 } 422 }
424 } else { 423 } else {
425 rcu_read_lock(); 424 rcu_read_lock();
426 mpath = mesh_path_lookup(dst_addr, dev); 425 mpath = mesh_path_lookup(dst_addr, sdata);
427 if (mpath) { 426 if (mpath) {
428 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || 427 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
429 DSN_LT(mpath->dsn, dst_dsn)) { 428 DSN_LT(mpath->dsn, dst_dsn)) {
@@ -451,7 +450,7 @@ static void hwmp_preq_frame_process(struct net_device *dev,
451 cpu_to_le32(dst_dsn), 0, orig_addr, 450 cpu_to_le32(dst_dsn), 0, orig_addr,
452 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, 451 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl,
453 cpu_to_le32(lifetime), cpu_to_le32(metric), 452 cpu_to_le32(lifetime), cpu_to_le32(metric),
454 0, dev); 453 0, sdata);
455 else 454 else
456 ifsta->mshstats.dropped_frames_ttl++; 455 ifsta->mshstats.dropped_frames_ttl++;
457 } 456 }
@@ -472,20 +471,19 @@ static void hwmp_preq_frame_process(struct net_device *dev,
472 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; 471 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
473 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, 472 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
474 cpu_to_le32(orig_dsn), dst_flags, dst_addr, 473 cpu_to_le32(orig_dsn), dst_flags, dst_addr,
475 cpu_to_le32(dst_dsn), dev->broadcast, 474 cpu_to_le32(dst_dsn), sdata->dev->broadcast,
476 hopcount, ttl, cpu_to_le32(lifetime), 475 hopcount, ttl, cpu_to_le32(lifetime),
477 cpu_to_le32(metric), cpu_to_le32(preq_id), 476 cpu_to_le32(metric), cpu_to_le32(preq_id),
478 dev); 477 sdata);
479 ifsta->mshstats.fwded_frames++; 478 ifsta->mshstats.fwded_frames++;
480 } 479 }
481} 480}
482 481
483 482
484static void hwmp_prep_frame_process(struct net_device *dev, 483static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
485 struct ieee80211_mgmt *mgmt, 484 struct ieee80211_mgmt *mgmt,
486 u8 *prep_elem, u32 metric) 485 u8 *prep_elem, u32 metric)
487{ 486{
488 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
489 struct mesh_path *mpath; 487 struct mesh_path *mpath;
490 u8 *dst_addr, *orig_addr; 488 u8 *dst_addr, *orig_addr;
491 u8 ttl, hopcount, flags; 489 u8 ttl, hopcount, flags;
@@ -499,7 +497,7 @@ static void hwmp_prep_frame_process(struct net_device *dev,
499 * replies 497 * replies
500 */ 498 */
501 dst_addr = PREP_IE_DST_ADDR(prep_elem); 499 dst_addr = PREP_IE_DST_ADDR(prep_elem);
502 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) 500 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
503 /* destination, no forwarding required */ 501 /* destination, no forwarding required */
504 return; 502 return;
505 503
@@ -510,7 +508,7 @@ static void hwmp_prep_frame_process(struct net_device *dev,
510 } 508 }
511 509
512 rcu_read_lock(); 510 rcu_read_lock();
513 mpath = mesh_path_lookup(dst_addr, dev); 511 mpath = mesh_path_lookup(dst_addr, sdata);
514 if (mpath) 512 if (mpath)
515 spin_lock_bh(&mpath->state_lock); 513 spin_lock_bh(&mpath->state_lock);
516 else 514 else
@@ -533,7 +531,7 @@ static void hwmp_prep_frame_process(struct net_device *dev,
533 cpu_to_le32(orig_dsn), 0, dst_addr, 531 cpu_to_le32(orig_dsn), 0, dst_addr,
534 cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl, 532 cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl,
535 cpu_to_le32(lifetime), cpu_to_le32(metric), 533 cpu_to_le32(lifetime), cpu_to_le32(metric),
536 0, dev); 534 0, sdata);
537 rcu_read_unlock(); 535 rcu_read_unlock();
538 sdata->u.sta.mshstats.fwded_frames++; 536 sdata->u.sta.mshstats.fwded_frames++;
539 return; 537 return;
@@ -544,7 +542,7 @@ fail:
544 return; 542 return;
545} 543}
546 544
547static void hwmp_perr_frame_process(struct net_device *dev, 545static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
548 struct ieee80211_mgmt *mgmt, u8 *perr_elem) 546 struct ieee80211_mgmt *mgmt, u8 *perr_elem)
549{ 547{
550 struct mesh_path *mpath; 548 struct mesh_path *mpath;
@@ -555,7 +553,7 @@ static void hwmp_perr_frame_process(struct net_device *dev,
555 dst_addr = PERR_IE_DST_ADDR(perr_elem); 553 dst_addr = PERR_IE_DST_ADDR(perr_elem);
556 dst_dsn = PERR_IE_DST_DSN(perr_elem); 554 dst_dsn = PERR_IE_DST_DSN(perr_elem);
557 rcu_read_lock(); 555 rcu_read_lock();
558 mpath = mesh_path_lookup(dst_addr, dev); 556 mpath = mesh_path_lookup(dst_addr, sdata);
559 if (mpath) { 557 if (mpath) {
560 spin_lock_bh(&mpath->state_lock); 558 spin_lock_bh(&mpath->state_lock);
561 if (mpath->flags & MESH_PATH_ACTIVE && 559 if (mpath->flags & MESH_PATH_ACTIVE &&
@@ -566,7 +564,7 @@ static void hwmp_perr_frame_process(struct net_device *dev,
566 mpath->dsn = dst_dsn; 564 mpath->dsn = dst_dsn;
567 spin_unlock_bh(&mpath->state_lock); 565 spin_unlock_bh(&mpath->state_lock);
568 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), 566 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn),
569 dev->broadcast, dev); 567 sdata->dev->broadcast, sdata);
570 } else 568 } else
571 spin_unlock_bh(&mpath->state_lock); 569 spin_unlock_bh(&mpath->state_lock);
572 } 570 }
@@ -575,7 +573,7 @@ static void hwmp_perr_frame_process(struct net_device *dev,
575 573
576 574
577 575
578void mesh_rx_path_sel_frame(struct net_device *dev, 576void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
579 struct ieee80211_mgmt *mgmt, 577 struct ieee80211_mgmt *mgmt,
580 size_t len) 578 size_t len)
581{ 579{
@@ -592,25 +590,25 @@ void mesh_rx_path_sel_frame(struct net_device *dev,
592 if (!elems.preq || elems.preq_len != 37) 590 if (!elems.preq || elems.preq_len != 37)
593 /* Right now we support just 1 destination and no AE */ 591 /* Right now we support just 1 destination and no AE */
594 return; 592 return;
595 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.preq); 593 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq);
596 if (!last_hop_metric) 594 if (!last_hop_metric)
597 return; 595 return;
598 hwmp_preq_frame_process(dev, mgmt, elems.preq, last_hop_metric); 596 hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric);
599 break; 597 break;
600 case MPATH_PREP: 598 case MPATH_PREP:
601 if (!elems.prep || elems.prep_len != 31) 599 if (!elems.prep || elems.prep_len != 31)
602 /* Right now we support no AE */ 600 /* Right now we support no AE */
603 return; 601 return;
604 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.prep); 602 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep);
605 if (!last_hop_metric) 603 if (!last_hop_metric)
606 return; 604 return;
607 hwmp_prep_frame_process(dev, mgmt, elems.prep, last_hop_metric); 605 hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric);
608 break; 606 break;
609 case MPATH_PERR: 607 case MPATH_PERR:
610 if (!elems.perr || elems.perr_len != 12) 608 if (!elems.perr || elems.perr_len != 12)
611 /* Right now we support only one destination per PERR */ 609 /* Right now we support only one destination per PERR */
612 return; 610 return;
613 hwmp_perr_frame_process(dev, mgmt, elems.perr); 611 hwmp_perr_frame_process(sdata, mgmt, elems.perr);
614 default: 612 default:
615 return; 613 return;
616 } 614 }
@@ -628,8 +626,7 @@ void mesh_rx_path_sel_frame(struct net_device *dev,
628 */ 626 */
629static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) 627static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
630{ 628{
631 struct ieee80211_sub_if_data *sdata = 629 struct ieee80211_sub_if_data *sdata = mpath->sdata;
632 IEEE80211_DEV_TO_SUB_IF(mpath->dev);
633 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 630 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
634 struct mesh_preq_queue *preq_node; 631 struct mesh_preq_queue *preq_node;
635 632
@@ -672,12 +669,10 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
672/** 669/**
673 * mesh_path_start_discovery - launch a path discovery from the PREQ queue 670 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
674 * 671 *
675 * @dev: local mesh interface 672 * @sdata: local mesh subif
676 */ 673 */
677void mesh_path_start_discovery(struct net_device *dev) 674void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
678{ 675{
679 struct ieee80211_sub_if_data *sdata =
680 IEEE80211_DEV_TO_SUB_IF(dev);
681 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 676 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
682 struct mesh_preq_queue *preq_node; 677 struct mesh_preq_queue *preq_node;
683 struct mesh_path *mpath; 678 struct mesh_path *mpath;
@@ -699,7 +694,7 @@ void mesh_path_start_discovery(struct net_device *dev)
699 spin_unlock(&ifsta->mesh_preq_queue_lock); 694 spin_unlock(&ifsta->mesh_preq_queue_lock);
700 695
701 rcu_read_lock(); 696 rcu_read_lock();
702 mpath = mesh_path_lookup(preq_node->dst, dev); 697 mpath = mesh_path_lookup(preq_node->dst, sdata);
703 if (!mpath) 698 if (!mpath)
704 goto enddiscovery; 699 goto enddiscovery;
705 700
@@ -743,11 +738,11 @@ void mesh_path_start_discovery(struct net_device *dev)
743 dst_flags = MP_F_RF; 738 dst_flags = MP_F_RF;
744 739
745 spin_unlock_bh(&mpath->state_lock); 740 spin_unlock_bh(&mpath->state_lock);
746 mesh_path_sel_frame_tx(MPATH_PREQ, 0, dev->dev_addr, 741 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr,
747 cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst, 742 cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst,
748 cpu_to_le32(mpath->dsn), dev->broadcast, 0, 743 cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0,
749 ttl, cpu_to_le32(lifetime), 0, 744 ttl, cpu_to_le32(lifetime), 0,
750 cpu_to_le32(ifsta->preq_id++), dev); 745 cpu_to_le32(ifsta->preq_id++), sdata);
751 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 746 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
752 747
753enddiscovery: 748enddiscovery:
@@ -759,7 +754,7 @@ enddiscovery:
759 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame 754 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame
760 * 755 *
761 * @skb: 802.11 frame to be sent 756 * @skb: 802.11 frame to be sent
762 * @dev: network device the frame will be sent through 757 * @sdata: network subif the frame will be sent through
763 * @fwd_frame: true if this frame was originally from a different host 758 * @fwd_frame: true if this frame was originally from a different host
764 * 759 *
765 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is 760 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
@@ -767,9 +762,9 @@ enddiscovery:
767 * sent when the path is resolved. This means the caller must not free the skb 762 * sent when the path is resolved. This means the caller must not free the skb
768 * in this case. 763 * in this case.
769 */ 764 */
770int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) 765int mesh_nexthop_lookup(struct sk_buff *skb,
766 struct ieee80211_sub_if_data *sdata)
771{ 767{
772 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
773 struct sk_buff *skb_to_free = NULL; 768 struct sk_buff *skb_to_free = NULL;
774 struct mesh_path *mpath; 769 struct mesh_path *mpath;
775 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 770 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -777,11 +772,11 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev)
777 int err = 0; 772 int err = 0;
778 773
779 rcu_read_lock(); 774 rcu_read_lock();
780 mpath = mesh_path_lookup(dst_addr, dev); 775 mpath = mesh_path_lookup(dst_addr, sdata);
781 776
782 if (!mpath) { 777 if (!mpath) {
783 mesh_path_add(dst_addr, dev); 778 mesh_path_add(dst_addr, sdata);
784 mpath = mesh_path_lookup(dst_addr, dev); 779 mpath = mesh_path_lookup(dst_addr, sdata);
785 if (!mpath) { 780 if (!mpath) {
786 dev_kfree_skb(skb); 781 dev_kfree_skb(skb);
787 sdata->u.sta.mshstats.dropped_frames_no_route++; 782 sdata->u.sta.mshstats.dropped_frames_no_route++;
@@ -793,7 +788,8 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev)
793 if (mpath->flags & MESH_PATH_ACTIVE) { 788 if (mpath->flags & MESH_PATH_ACTIVE) {
794 if (time_after(jiffies, mpath->exp_time - 789 if (time_after(jiffies, mpath->exp_time -
795 msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time)) 790 msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time))
796 && !memcmp(dev->dev_addr, hdr->addr4, ETH_ALEN) 791 && !memcmp(sdata->dev->dev_addr, hdr->addr4,
792 ETH_ALEN)
797 && !(mpath->flags & MESH_PATH_RESOLVING) 793 && !(mpath->flags & MESH_PATH_RESOLVING)
798 && !(mpath->flags & MESH_PATH_FIXED)) { 794 && !(mpath->flags & MESH_PATH_FIXED)) {
799 mesh_queue_preq(mpath, 795 mesh_queue_preq(mpath,
@@ -815,7 +811,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev)
815 811
816 skb_queue_tail(&mpath->frame_queue, skb); 812 skb_queue_tail(&mpath->frame_queue, skb);
817 if (skb_to_free) 813 if (skb_to_free)
818 mesh_path_discard_frame(skb_to_free, dev); 814 mesh_path_discard_frame(skb_to_free, sdata);
819 err = -ENOENT; 815 err = -ENOENT;
820 } 816 }
821 817
@@ -835,7 +831,7 @@ void mesh_path_timer(unsigned long data)
835 if (!mpath) 831 if (!mpath)
836 goto endmpathtimer; 832 goto endmpathtimer;
837 spin_lock_bh(&mpath->state_lock); 833 spin_lock_bh(&mpath->state_lock);
838 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); 834 sdata = mpath->sdata;
839 if (mpath->flags & MESH_PATH_RESOLVED || 835 if (mpath->flags & MESH_PATH_RESOLVED ||
840 (!(mpath->flags & MESH_PATH_RESOLVING))) 836 (!(mpath->flags & MESH_PATH_RESOLVING)))
841 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); 837 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 838ee60492ad..0a60f55f32ab 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/etherdevice.h> 10#include <linux/etherdevice.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/netdevice.h>
13#include <linux/random.h> 12#include <linux/random.h>
14#include <linux/spinlock.h> 13#include <linux/spinlock.h>
15#include <linux/string.h> 14#include <linux/string.h>
@@ -62,13 +61,13 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
62/** 61/**
63 * mesh_path_lookup - look up a path in the mesh path table 62 * mesh_path_lookup - look up a path in the mesh path table
64 * @dst: hardware address (ETH_ALEN length) of destination 63 * @dst: hardware address (ETH_ALEN length) of destination
65 * @dev: local interface 64 * @sdata: local subif
66 * 65 *
67 * Returns: pointer to the mesh path structure, or NULL if not found 66 * Returns: pointer to the mesh path structure, or NULL if not found
68 * 67 *
69 * Locking: must be called within a read rcu section. 68 * Locking: must be called within a read rcu section.
70 */ 69 */
71struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) 70struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
72{ 71{
73 struct mesh_path *mpath; 72 struct mesh_path *mpath;
74 struct hlist_node *n; 73 struct hlist_node *n;
@@ -78,10 +77,10 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
78 77
79 tbl = rcu_dereference(mesh_paths); 78 tbl = rcu_dereference(mesh_paths);
80 79
81 bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)]; 80 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
82 hlist_for_each_entry_rcu(node, n, bucket, list) { 81 hlist_for_each_entry_rcu(node, n, bucket, list) {
83 mpath = node->mpath; 82 mpath = node->mpath;
84 if (mpath->dev == dev && 83 if (mpath->sdata == sdata &&
85 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 84 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
86 if (MPATH_EXPIRED(mpath)) { 85 if (MPATH_EXPIRED(mpath)) {
87 spin_lock_bh(&mpath->state_lock); 86 spin_lock_bh(&mpath->state_lock);
@@ -98,13 +97,13 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
98/** 97/**
99 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 98 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
100 * @idx: index 99 * @idx: index
101 * @dev: local interface, or NULL for all entries 100 * @sdata: local subif, or NULL for all entries
102 * 101 *
103 * Returns: pointer to the mesh path structure, or NULL if not found. 102 * Returns: pointer to the mesh path structure, or NULL if not found.
104 * 103 *
105 * Locking: must be called within a read rcu section. 104 * Locking: must be called within a read rcu section.
106 */ 105 */
107struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) 106struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
108{ 107{
109 struct mpath_node *node; 108 struct mpath_node *node;
110 struct hlist_node *p; 109 struct hlist_node *p;
@@ -112,7 +111,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
112 int j = 0; 111 int j = 0;
113 112
114 for_each_mesh_entry(mesh_paths, p, node, i) { 113 for_each_mesh_entry(mesh_paths, p, node, i) {
115 if (dev && node->mpath->dev != dev) 114 if (sdata && node->mpath->sdata != sdata)
116 continue; 115 continue;
117 if (j++ == idx) { 116 if (j++ == idx) {
118 if (MPATH_EXPIRED(node->mpath)) { 117 if (MPATH_EXPIRED(node->mpath)) {
@@ -131,15 +130,14 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
131/** 130/**
132 * mesh_path_add - allocate and add a new path to the mesh path table 131 * mesh_path_add - allocate and add a new path to the mesh path table
133 * @addr: destination address of the path (ETH_ALEN length) 132 * @addr: destination address of the path (ETH_ALEN length)
134 * @dev: local interface 133 * @sdata: local subif
135 * 134 *
136 * Returns: 0 on sucess 135 * Returns: 0 on sucess
137 * 136 *
138 * State: the initial state of the new path is set to 0 137 * State: the initial state of the new path is set to 0
139 */ 138 */
140int mesh_path_add(u8 *dst, struct net_device *dev) 139int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
141{ 140{
142 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
143 struct mesh_path *mpath, *new_mpath; 141 struct mesh_path *mpath, *new_mpath;
144 struct mpath_node *node, *new_node; 142 struct mpath_node *node, *new_node;
145 struct hlist_head *bucket; 143 struct hlist_head *bucket;
@@ -148,7 +146,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
148 int err = 0; 146 int err = 0;
149 u32 hash_idx; 147 u32 hash_idx;
150 148
151 if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0) 149 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
152 /* never add ourselves as neighbours */ 150 /* never add ourselves as neighbours */
153 return -ENOTSUPP; 151 return -ENOTSUPP;
154 152
@@ -169,7 +167,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
169 167
170 read_lock(&pathtbl_resize_lock); 168 read_lock(&pathtbl_resize_lock);
171 memcpy(new_mpath->dst, dst, ETH_ALEN); 169 memcpy(new_mpath->dst, dst, ETH_ALEN);
172 new_mpath->dev = dev; 170 new_mpath->sdata = sdata;
173 new_mpath->flags = 0; 171 new_mpath->flags = 0;
174 skb_queue_head_init(&new_mpath->frame_queue); 172 skb_queue_head_init(&new_mpath->frame_queue);
175 new_node->mpath = new_mpath; 173 new_node->mpath = new_mpath;
@@ -179,7 +177,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
179 spin_lock_init(&new_mpath->state_lock); 177 spin_lock_init(&new_mpath->state_lock);
180 init_timer(&new_mpath->timer); 178 init_timer(&new_mpath->timer);
181 179
182 hash_idx = mesh_table_hash(dst, dev, mesh_paths); 180 hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
183 bucket = &mesh_paths->hash_buckets[hash_idx]; 181 bucket = &mesh_paths->hash_buckets[hash_idx];
184 182
185 spin_lock(&mesh_paths->hashwlock[hash_idx]); 183 spin_lock(&mesh_paths->hashwlock[hash_idx]);
@@ -187,7 +185,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
187 err = -EEXIST; 185 err = -EEXIST;
188 hlist_for_each_entry(node, n, bucket, list) { 186 hlist_for_each_entry(node, n, bucket, list) {
189 mpath = node->mpath; 187 mpath = node->mpath;
190 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 188 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
191 goto err_exists; 189 goto err_exists;
192 } 190 }
193 191
@@ -241,7 +239,7 @@ void mesh_plink_broken(struct sta_info *sta)
241 struct mesh_path *mpath; 239 struct mesh_path *mpath;
242 struct mpath_node *node; 240 struct mpath_node *node;
243 struct hlist_node *p; 241 struct hlist_node *p;
244 struct net_device *dev = sta->sdata->dev; 242 struct ieee80211_sub_if_data *sdata = sta->sdata;
245 int i; 243 int i;
246 244
247 rcu_read_lock(); 245 rcu_read_lock();
@@ -256,7 +254,7 @@ void mesh_plink_broken(struct sta_info *sta)
256 spin_unlock_bh(&mpath->state_lock); 254 spin_unlock_bh(&mpath->state_lock);
257 mesh_path_error_tx(mpath->dst, 255 mesh_path_error_tx(mpath->dst,
258 cpu_to_le32(mpath->dsn), 256 cpu_to_le32(mpath->dsn),
259 dev->broadcast, dev); 257 sdata->dev->broadcast, sdata);
260 } else 258 } else
261 spin_unlock_bh(&mpath->state_lock); 259 spin_unlock_bh(&mpath->state_lock);
262 } 260 }
@@ -284,11 +282,11 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
284 for_each_mesh_entry(mesh_paths, p, node, i) { 282 for_each_mesh_entry(mesh_paths, p, node, i) {
285 mpath = node->mpath; 283 mpath = node->mpath;
286 if (mpath->next_hop == sta) 284 if (mpath->next_hop == sta)
287 mesh_path_del(mpath->dst, mpath->dev); 285 mesh_path_del(mpath->dst, mpath->sdata);
288 } 286 }
289} 287}
290 288
291void mesh_path_flush(struct net_device *dev) 289void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
292{ 290{
293 struct mesh_path *mpath; 291 struct mesh_path *mpath;
294 struct mpath_node *node; 292 struct mpath_node *node;
@@ -297,16 +295,15 @@ void mesh_path_flush(struct net_device *dev)
297 295
298 for_each_mesh_entry(mesh_paths, p, node, i) { 296 for_each_mesh_entry(mesh_paths, p, node, i) {
299 mpath = node->mpath; 297 mpath = node->mpath;
300 if (mpath->dev == dev) 298 if (mpath->sdata == sdata)
301 mesh_path_del(mpath->dst, mpath->dev); 299 mesh_path_del(mpath->dst, mpath->sdata);
302 } 300 }
303} 301}
304 302
305static void mesh_path_node_reclaim(struct rcu_head *rp) 303static void mesh_path_node_reclaim(struct rcu_head *rp)
306{ 304{
307 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 305 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
308 struct ieee80211_sub_if_data *sdata = 306 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
309 IEEE80211_DEV_TO_SUB_IF(node->mpath->dev);
310 307
311 del_timer_sync(&node->mpath->timer); 308 del_timer_sync(&node->mpath->timer);
312 atomic_dec(&sdata->u.sta.mpaths); 309 atomic_dec(&sdata->u.sta.mpaths);
@@ -318,11 +315,11 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
318 * mesh_path_del - delete a mesh path from the table 315 * mesh_path_del - delete a mesh path from the table
319 * 316 *
320 * @addr: dst address (ETH_ALEN length) 317 * @addr: dst address (ETH_ALEN length)
321 * @dev: local interface 318 * @sdata: local subif
322 * 319 *
323 * Returns: 0 if succesful 320 * Returns: 0 if succesful
324 */ 321 */
325int mesh_path_del(u8 *addr, struct net_device *dev) 322int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
326{ 323{
327 struct mesh_path *mpath; 324 struct mesh_path *mpath;
328 struct mpath_node *node; 325 struct mpath_node *node;
@@ -332,13 +329,13 @@ int mesh_path_del(u8 *addr, struct net_device *dev)
332 int err = 0; 329 int err = 0;
333 330
334 read_lock(&pathtbl_resize_lock); 331 read_lock(&pathtbl_resize_lock);
335 hash_idx = mesh_table_hash(addr, dev, mesh_paths); 332 hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
336 bucket = &mesh_paths->hash_buckets[hash_idx]; 333 bucket = &mesh_paths->hash_buckets[hash_idx];
337 334
338 spin_lock(&mesh_paths->hashwlock[hash_idx]); 335 spin_lock(&mesh_paths->hashwlock[hash_idx]);
339 hlist_for_each_entry(node, n, bucket, list) { 336 hlist_for_each_entry(node, n, bucket, list) {
340 mpath = node->mpath; 337 mpath = node->mpath;
341 if (mpath->dev == dev && 338 if (mpath->sdata == sdata &&
342 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 339 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
343 spin_lock_bh(&mpath->state_lock); 340 spin_lock_bh(&mpath->state_lock);
344 mpath->flags |= MESH_PATH_RESOLVING; 341 mpath->flags |= MESH_PATH_RESOLVING;
@@ -378,29 +375,29 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
378 * mesh_path_discard_frame - discard a frame whose path could not be resolved 375 * mesh_path_discard_frame - discard a frame whose path could not be resolved
379 * 376 *
380 * @skb: frame to discard 377 * @skb: frame to discard
381 * @dev: network device the frame was to be sent through 378 * @sdata: network subif the frame was to be sent through
382 * 379 *
383 * If the frame was beign forwarded from another MP, a PERR frame will be sent 380 * If the frame was beign forwarded from another MP, a PERR frame will be sent
384 * to the precursor. 381 * to the precursor.
385 * 382 *
386 * Locking: the function must me called within a rcu_read_lock region 383 * Locking: the function must me called within a rcu_read_lock region
387 */ 384 */
388void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) 385void mesh_path_discard_frame(struct sk_buff *skb,
386 struct ieee80211_sub_if_data *sdata)
389{ 387{
390 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
391 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 388 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
392 struct mesh_path *mpath; 389 struct mesh_path *mpath;
393 u32 dsn = 0; 390 u32 dsn = 0;
394 391
395 if (memcmp(hdr->addr4, dev->dev_addr, ETH_ALEN) != 0) { 392 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
396 u8 *ra, *da; 393 u8 *ra, *da;
397 394
398 da = hdr->addr3; 395 da = hdr->addr3;
399 ra = hdr->addr2; 396 ra = hdr->addr2;
400 mpath = mesh_path_lookup(da, dev); 397 mpath = mesh_path_lookup(da, sdata);
401 if (mpath) 398 if (mpath)
402 dsn = ++mpath->dsn; 399 dsn = ++mpath->dsn;
403 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev); 400 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata);
404 } 401 }
405 402
406 kfree_skb(skb); 403 kfree_skb(skb);
@@ -416,14 +413,11 @@ void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev)
416 */ 413 */
417void mesh_path_flush_pending(struct mesh_path *mpath) 414void mesh_path_flush_pending(struct mesh_path *mpath)
418{ 415{
419 struct ieee80211_sub_if_data *sdata;
420 struct sk_buff *skb; 416 struct sk_buff *skb;
421 417
422 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
423
424 while ((skb = skb_dequeue(&mpath->frame_queue)) && 418 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
425 (mpath->flags & MESH_PATH_ACTIVE)) 419 (mpath->flags & MESH_PATH_ACTIVE))
426 mesh_path_discard_frame(skb, mpath->dev); 420 mesh_path_discard_frame(skb, mpath->sdata);
427} 421}
428 422
429/** 423/**
@@ -472,7 +466,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
472 node = hlist_entry(p, struct mpath_node, list); 466 node = hlist_entry(p, struct mpath_node, list);
473 mpath = node->mpath; 467 mpath = node->mpath;
474 new_node->mpath = mpath; 468 new_node->mpath = mpath;
475 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); 469 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
476 hlist_add_head(&new_node->list, 470 hlist_add_head(&new_node->list,
477 &newtbl->hash_buckets[hash_idx]); 471 &newtbl->hash_buckets[hash_idx]);
478 return 0; 472 return 0;
@@ -489,7 +483,7 @@ int mesh_pathtbl_init(void)
489 return 0; 483 return 0;
490} 484}
491 485
492void mesh_path_expire(struct net_device *dev) 486void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
493{ 487{
494 struct mesh_path *mpath; 488 struct mesh_path *mpath;
495 struct mpath_node *node; 489 struct mpath_node *node;
@@ -498,7 +492,7 @@ void mesh_path_expire(struct net_device *dev)
498 492
499 read_lock(&pathtbl_resize_lock); 493 read_lock(&pathtbl_resize_lock);
500 for_each_mesh_entry(mesh_paths, p, node, i) { 494 for_each_mesh_entry(mesh_paths, p, node, i) {
501 if (node->mpath->dev != dev) 495 if (node->mpath->sdata != sdata)
502 continue; 496 continue;
503 mpath = node->mpath; 497 mpath = node->mpath;
504 spin_lock_bh(&mpath->state_lock); 498 spin_lock_bh(&mpath->state_lock);
@@ -507,7 +501,7 @@ void mesh_path_expire(struct net_device *dev)
507 time_after(jiffies, 501 time_after(jiffies,
508 mpath->exp_time + MESH_PATH_EXPIRE)) { 502 mpath->exp_time + MESH_PATH_EXPIRE)) {
509 spin_unlock_bh(&mpath->state_lock); 503 spin_unlock_bh(&mpath->state_lock);
510 mesh_path_del(mpath->dst, mpath->dev); 504 mesh_path_del(mpath->dst, mpath->sdata);
511 } else 505 } else
512 spin_unlock_bh(&mpath->state_lock); 506 spin_unlock_bh(&mpath->state_lock);
513 } 507 }
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 9efeb1f07025..7714b0e6e4d7 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -144,10 +144,10 @@ void mesh_plink_deactivate(struct sta_info *sta)
144 spin_unlock_bh(&sta->lock); 144 spin_unlock_bh(&sta->lock);
145} 145}
146 146
147static int mesh_plink_frame_tx(struct net_device *dev, 147static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
148 enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, 148 enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid,
149 __le16 reason) { 149 __le16 reason) {
150 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 150 struct ieee80211_local *local = sdata->local;
151 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 151 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
152 struct ieee80211_mgmt *mgmt; 152 struct ieee80211_mgmt *mgmt;
153 bool include_plid = false; 153 bool include_plid = false;
@@ -163,10 +163,10 @@ static int mesh_plink_frame_tx(struct net_device *dev,
163 mgmt = (struct ieee80211_mgmt *) 163 mgmt = (struct ieee80211_mgmt *)
164 skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); 164 skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action));
165 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); 165 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action));
166 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 166 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
167 IEEE80211_STYPE_ACTION); 167 IEEE80211_STYPE_ACTION);
168 memcpy(mgmt->da, da, ETH_ALEN); 168 memcpy(mgmt->da, da, ETH_ALEN);
169 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 169 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
170 /* BSSID is left zeroed, wildcard value */ 170 /* BSSID is left zeroed, wildcard value */
171 mgmt->u.action.category = PLINK_CATEGORY; 171 mgmt->u.action.category = PLINK_CATEGORY;
172 mgmt->u.action.u.plink_action.action_code = action; 172 mgmt->u.action.u.plink_action.action_code = action;
@@ -180,7 +180,7 @@ static int mesh_plink_frame_tx(struct net_device *dev,
180 /* two-byte status code followed by two-byte AID */ 180 /* two-byte status code followed by two-byte AID */
181 memset(pos, 0, 4); 181 memset(pos, 0, 4);
182 } 182 }
183 mesh_mgmt_ies_add(skb, dev); 183 mesh_mgmt_ies_add(skb, sdata);
184 } 184 }
185 185
186 /* Add Peer Link Management element */ 186 /* Add Peer Link Management element */
@@ -217,15 +217,14 @@ static int mesh_plink_frame_tx(struct net_device *dev,
217 memcpy(pos, &reason, 2); 217 memcpy(pos, &reason, 2);
218 } 218 }
219 219
220 ieee80211_sta_tx(dev, skb, 0); 220 ieee80211_sta_tx(sdata, skb, 0);
221 return 0; 221 return 0;
222} 222}
223 223
224void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, 224void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct ieee80211_sub_if_data *sdata,
225 bool peer_accepting_plinks) 225 bool peer_accepting_plinks)
226{ 226{
227 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 227 struct ieee80211_local *local = sdata->local;
228 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
229 struct sta_info *sta; 228 struct sta_info *sta;
230 229
231 rcu_read_lock(); 230 rcu_read_lock();
@@ -257,7 +256,6 @@ static void mesh_plink_timer(unsigned long data)
257{ 256{
258 struct sta_info *sta; 257 struct sta_info *sta;
259 __le16 llid, plid, reason; 258 __le16 llid, plid, reason;
260 struct net_device *dev = NULL;
261 struct ieee80211_sub_if_data *sdata; 259 struct ieee80211_sub_if_data *sdata;
262#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG 260#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
263 DECLARE_MAC_BUF(mac); 261 DECLARE_MAC_BUF(mac);
@@ -282,7 +280,6 @@ static void mesh_plink_timer(unsigned long data)
282 llid = sta->llid; 280 llid = sta->llid;
283 plid = sta->plid; 281 plid = sta->plid;
284 sdata = sta->sdata; 282 sdata = sta->sdata;
285 dev = sdata->dev;
286 283
287 switch (sta->plink_state) { 284 switch (sta->plink_state) {
288 case PLINK_OPN_RCVD: 285 case PLINK_OPN_RCVD:
@@ -299,7 +296,7 @@ static void mesh_plink_timer(unsigned long data)
299 ++sta->plink_retries; 296 ++sta->plink_retries;
300 mod_plink_timer(sta, sta->plink_timeout); 297 mod_plink_timer(sta, sta->plink_timeout);
301 spin_unlock_bh(&sta->lock); 298 spin_unlock_bh(&sta->lock);
302 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 299 mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->addr, llid,
303 0, 0); 300 0, 0);
304 break; 301 break;
305 } 302 }
@@ -312,7 +309,7 @@ static void mesh_plink_timer(unsigned long data)
312 sta->plink_state = PLINK_HOLDING; 309 sta->plink_state = PLINK_HOLDING;
313 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 310 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
314 spin_unlock_bh(&sta->lock); 311 spin_unlock_bh(&sta->lock);
315 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, 312 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid, plid,
316 reason); 313 reason);
317 break; 314 break;
318 case PLINK_HOLDING: 315 case PLINK_HOLDING:
@@ -357,7 +354,7 @@ int mesh_plink_open(struct sta_info *sta)
357 mpl_dbg("Mesh plink: starting establishment with %s\n", 354 mpl_dbg("Mesh plink: starting establishment with %s\n",
358 print_mac(mac, sta->addr)); 355 print_mac(mac, sta->addr));
359 356
360 return mesh_plink_frame_tx(sdata->dev, PLINK_OPEN, 357 return mesh_plink_frame_tx(sdata, PLINK_OPEN,
361 sta->addr, llid, 0, 0); 358 sta->addr, llid, 0, 0);
362} 359}
363 360
@@ -403,15 +400,14 @@ int mesh_plink_close(struct sta_info *sta)
403 llid = sta->llid; 400 llid = sta->llid;
404 plid = sta->plid; 401 plid = sta->plid;
405 spin_unlock_bh(&sta->lock); 402 spin_unlock_bh(&sta->lock);
406 mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, 403 mesh_plink_frame_tx(sta->sdata, PLINK_CLOSE, sta->addr, llid,
407 plid, reason); 404 plid, reason);
408 return 0; 405 return 0;
409} 406}
410 407
411void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, 408void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
412 size_t len, struct ieee80211_rx_status *rx_status) 409 size_t len, struct ieee80211_rx_status *rx_status)
413{ 410{
414 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
415 struct ieee80211_local *local = sdata->local; 411 struct ieee80211_local *local = sdata->local;
416 struct ieee802_11_elems elems; 412 struct ieee802_11_elems elems;
417 struct sta_info *sta; 413 struct sta_info *sta;
@@ -478,7 +474,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
478 474
479 /* Now we will figure out the appropriate event... */ 475 /* Now we will figure out the appropriate event... */
480 event = PLINK_UNDEFINED; 476 event = PLINK_UNDEFINED;
481 if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, dev))) { 477 if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
482 switch (ftype) { 478 switch (ftype) {
483 case PLINK_OPEN: 479 case PLINK_OPEN:
484 event = OPN_RJCT; 480 event = OPN_RJCT;
@@ -577,9 +573,9 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
577 sta->llid = llid; 573 sta->llid = llid;
578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 574 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
579 spin_unlock_bh(&sta->lock); 575 spin_unlock_bh(&sta->lock);
580 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 576 mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->addr, llid,
581 0, 0); 577 0, 0);
582 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, 578 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->addr,
583 llid, plid, 0); 579 llid, plid, 0);
584 break; 580 break;
585 default: 581 default:
@@ -604,7 +600,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
604 600
605 llid = sta->llid; 601 llid = sta->llid;
606 spin_unlock_bh(&sta->lock); 602 spin_unlock_bh(&sta->lock);
607 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 603 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid,
608 plid, reason); 604 plid, reason);
609 break; 605 break;
610 case OPN_ACPT: 606 case OPN_ACPT:
@@ -613,7 +609,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
613 sta->plid = plid; 609 sta->plid = plid;
614 llid = sta->llid; 610 llid = sta->llid;
615 spin_unlock_bh(&sta->lock); 611 spin_unlock_bh(&sta->lock);
616 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 612 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->addr, llid,
617 plid, 0); 613 plid, 0);
618 break; 614 break;
619 case CNF_ACPT: 615 case CNF_ACPT:
@@ -646,13 +642,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
646 642
647 llid = sta->llid; 643 llid = sta->llid;
648 spin_unlock_bh(&sta->lock); 644 spin_unlock_bh(&sta->lock);
649 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 645 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid,
650 plid, reason); 646 plid, reason);
651 break; 647 break;
652 case OPN_ACPT: 648 case OPN_ACPT:
653 llid = sta->llid; 649 llid = sta->llid;
654 spin_unlock_bh(&sta->lock); 650 spin_unlock_bh(&sta->lock);
655 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 651 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->addr, llid,
656 plid, 0); 652 plid, 0);
657 break; 653 break;
658 case CNF_ACPT: 654 case CNF_ACPT:
@@ -685,7 +681,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
685 681
686 llid = sta->llid; 682 llid = sta->llid;
687 spin_unlock_bh(&sta->lock); 683 spin_unlock_bh(&sta->lock);
688 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 684 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid,
689 plid, reason); 685 plid, reason);
690 break; 686 break;
691 case OPN_ACPT: 687 case OPN_ACPT:
@@ -695,7 +691,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
695 spin_unlock_bh(&sta->lock); 691 spin_unlock_bh(&sta->lock);
696 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 692 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
697 print_mac(mac, sta->addr)); 693 print_mac(mac, sta->addr));
698 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 694 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->addr, llid,
699 plid, 0); 695 plid, 0);
700 break; 696 break;
701 default: 697 default:
@@ -714,13 +710,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
714 llid = sta->llid; 710 llid = sta->llid;
715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 711 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
716 spin_unlock_bh(&sta->lock); 712 spin_unlock_bh(&sta->lock);
717 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 713 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid,
718 plid, reason); 714 plid, reason);
719 break; 715 break;
720 case OPN_ACPT: 716 case OPN_ACPT:
721 llid = sta->llid; 717 llid = sta->llid;
722 spin_unlock_bh(&sta->lock); 718 spin_unlock_bh(&sta->lock);
723 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 719 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->addr, llid,
724 plid, 0); 720 plid, 0);
725 break; 721 break;
726 default: 722 default:
@@ -743,7 +739,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
743 llid = sta->llid; 739 llid = sta->llid;
744 reason = sta->reason; 740 reason = sta->reason;
745 spin_unlock_bh(&sta->lock); 741 spin_unlock_bh(&sta->lock);
746 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 742 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid,
747 plid, reason); 743 plid, reason);
748 break; 744 break;
749 default: 745 default:
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 902cac1bd246..df12e746b03e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -34,6 +34,7 @@
34#include "led.h" 34#include "led.h"
35#include "mesh.h" 35#include "mesh.h"
36 36
37#define IEEE80211_ASSOC_SCANS_MAX_TRIES 2
37#define IEEE80211_AUTH_TIMEOUT (HZ / 5) 38#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
38#define IEEE80211_AUTH_MAX_TRIES 3 39#define IEEE80211_AUTH_MAX_TRIES 3
39#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 40#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
@@ -73,19 +74,19 @@
73#define IEEE80211_MIN_AMPDU_BUF 0x8 74#define IEEE80211_MIN_AMPDU_BUF 0x8
74#define IEEE80211_MAX_AMPDU_BUF 0x40 75#define IEEE80211_MAX_AMPDU_BUF 0x40
75 76
76static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, 77static void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
77 u8 *ssid, size_t ssid_len); 78 u8 *ssid, size_t ssid_len);
78static struct ieee80211_sta_bss * 79static struct ieee80211_sta_bss *
79ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, 80ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
80 u8 *ssid, u8 ssid_len); 81 u8 *ssid, u8 ssid_len);
81static void ieee80211_rx_bss_put(struct ieee80211_local *local, 82static void ieee80211_rx_bss_put(struct ieee80211_local *local,
82 struct ieee80211_sta_bss *bss); 83 struct ieee80211_sta_bss *bss);
83static int ieee80211_sta_find_ibss(struct net_device *dev, 84static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata,
84 struct ieee80211_if_sta *ifsta); 85 struct ieee80211_if_sta *ifsta);
85static int ieee80211_sta_wep_configured(struct net_device *dev); 86static int ieee80211_sta_wep_configured(struct ieee80211_sub_if_data *sdata);
86static int ieee80211_sta_start_scan(struct net_device *dev, 87static int ieee80211_sta_start_scan(struct ieee80211_sub_if_data *sdata,
87 u8 *ssid, size_t ssid_len); 88 u8 *ssid, size_t ssid_len);
88static int ieee80211_sta_config_auth(struct net_device *dev, 89static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata,
89 struct ieee80211_if_sta *ifsta); 90 struct ieee80211_if_sta *ifsta);
90static void sta_rx_agg_session_timer_expired(unsigned long data); 91static void sta_rx_agg_session_timer_expired(unsigned long data);
91 92
@@ -97,6 +98,8 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
97 u8 *pos = start; 98 u8 *pos = start;
98 99
99 memset(elems, 0, sizeof(*elems)); 100 memset(elems, 0, sizeof(*elems));
101 elems->ie_start = start;
102 elems->total_len = len;
100 103
101 while (left >= 2) { 104 while (left >= 2) {
102 u8 id, elen; 105 u8 id, elen;
@@ -233,17 +236,37 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
233} 236}
234 237
235 238
239static u8 * ieee80211_bss_get_ie(struct ieee80211_sta_bss *bss, u8 ie)
240{
241 u8 *end, *pos;
242
243 pos = bss->ies;
244 if (pos == NULL)
245 return NULL;
246 end = pos + bss->ies_len;
247
248 while (pos + 1 < end) {
249 if (pos + 2 + pos[1] > end)
250 break;
251 if (pos[0] == ie)
252 return pos;
253 pos += 2 + pos[1];
254 }
255
256 return NULL;
257}
258
259
236static int ecw2cw(int ecw) 260static int ecw2cw(int ecw)
237{ 261{
238 return (1 << ecw) - 1; 262 return (1 << ecw) - 1;
239} 263}
240 264
241 265
242static void ieee80211_sta_def_wmm_params(struct net_device *dev, 266static void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
243 struct ieee80211_sta_bss *bss, 267 struct ieee80211_sta_bss *bss,
244 int ibss) 268 int ibss)
245{ 269{
246 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
247 struct ieee80211_local *local = sdata->local; 270 struct ieee80211_local *local = sdata->local;
248 int i, have_higher_than_11mbit = 0; 271 int i, have_higher_than_11mbit = 0;
249 272
@@ -281,11 +304,10 @@ static void ieee80211_sta_def_wmm_params(struct net_device *dev,
281 } 304 }
282} 305}
283 306
284static void ieee80211_sta_wmm_params(struct net_device *dev, 307static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
285 struct ieee80211_if_sta *ifsta, 308 struct ieee80211_if_sta *ifsta,
286 u8 *wmm_param, size_t wmm_param_len) 309 u8 *wmm_param, size_t wmm_param_len)
287{ 310{
288 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
289 struct ieee80211_tx_queue_params params; 311 struct ieee80211_tx_queue_params params;
290 size_t left; 312 size_t left;
291 int count; 313 int count;
@@ -349,14 +371,14 @@ static void ieee80211_sta_wmm_params(struct net_device *dev,
349#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 371#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
350 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 372 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
351 "cWmin=%d cWmax=%d txop=%d\n", 373 "cWmin=%d cWmax=%d txop=%d\n",
352 dev->name, queue, aci, acm, params.aifs, params.cw_min, 374 local->mdev->name, queue, aci, acm, params.aifs, params.cw_min,
353 params.cw_max, params.txop); 375 params.cw_max, params.txop);
354#endif 376#endif
355 /* TODO: handle ACM (block TX, fallback to next lowest allowed 377 /* TODO: handle ACM (block TX, fallback to next lowest allowed
356 * AC for now) */ 378 * AC for now) */
357 if (local->ops->conf_tx(local_to_hw(local), queue, &params)) { 379 if (local->ops->conf_tx(local_to_hw(local), queue, &params)) {
358 printk(KERN_DEBUG "%s: failed to set TX queue " 380 printk(KERN_DEBUG "%s: failed to set TX queue "
359 "parameters for queue %d\n", dev->name, queue); 381 "parameters for queue %d\n", local->mdev->name, queue);
360 } 382 }
361 } 383 }
362} 384}
@@ -475,62 +497,30 @@ int ieee80211_ht_addt_info_ie_to_ht_bss_info(
475 return 0; 497 return 0;
476} 498}
477 499
478static void ieee80211_sta_send_associnfo(struct net_device *dev, 500static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata,
479 struct ieee80211_if_sta *ifsta) 501 struct ieee80211_if_sta *ifsta)
480{ 502{
481 char *buf;
482 size_t len;
483 int i;
484 union iwreq_data wrqu; 503 union iwreq_data wrqu;
485 504
486 if (!ifsta->assocreq_ies && !ifsta->assocresp_ies)
487 return;
488
489 buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len +
490 ifsta->assocresp_ies_len), GFP_KERNEL);
491 if (!buf)
492 return;
493
494 len = sprintf(buf, "ASSOCINFO(");
495 if (ifsta->assocreq_ies) { 505 if (ifsta->assocreq_ies) {
496 len += sprintf(buf + len, "ReqIEs="); 506 memset(&wrqu, 0, sizeof(wrqu));
497 for (i = 0; i < ifsta->assocreq_ies_len; i++) { 507 wrqu.data.length = ifsta->assocreq_ies_len;
498 len += sprintf(buf + len, "%02x", 508 wireless_send_event(sdata->dev, IWEVASSOCREQIE, &wrqu,
499 ifsta->assocreq_ies[i]); 509 ifsta->assocreq_ies);
500 }
501 } 510 }
502 if (ifsta->assocresp_ies) { 511 if (ifsta->assocresp_ies) {
503 if (ifsta->assocreq_ies) 512 memset(&wrqu, 0, sizeof(wrqu));
504 len += sprintf(buf + len, " "); 513 wrqu.data.length = ifsta->assocresp_ies_len;
505 len += sprintf(buf + len, "RespIEs="); 514 wireless_send_event(sdata->dev, IWEVASSOCRESPIE, &wrqu,
506 for (i = 0; i < ifsta->assocresp_ies_len; i++) { 515 ifsta->assocresp_ies);
507 len += sprintf(buf + len, "%02x",
508 ifsta->assocresp_ies[i]);
509 }
510 } 516 }
511 len += sprintf(buf + len, ")");
512
513 if (len > IW_CUSTOM_MAX) {
514 len = sprintf(buf, "ASSOCRESPIE=");
515 for (i = 0; i < ifsta->assocresp_ies_len; i++) {
516 len += sprintf(buf + len, "%02x",
517 ifsta->assocresp_ies[i]);
518 }
519 }
520
521 memset(&wrqu, 0, sizeof(wrqu));
522 wrqu.data.length = len;
523 wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
524
525 kfree(buf);
526} 517}
527 518
528 519
529static void ieee80211_set_associated(struct net_device *dev, 520static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
530 struct ieee80211_if_sta *ifsta, 521 struct ieee80211_if_sta *ifsta,
531 bool assoc) 522 bool assoc)
532{ 523{
533 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
534 struct ieee80211_local *local = sdata->local; 524 struct ieee80211_local *local = sdata->local;
535 struct ieee80211_conf *conf = &local_to_hw(local)->conf; 525 struct ieee80211_conf *conf = &local_to_hw(local)->conf;
536 union iwreq_data wrqu; 526 union iwreq_data wrqu;
@@ -544,7 +534,7 @@ static void ieee80211_set_associated(struct net_device *dev,
544 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 534 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
545 return; 535 return;
546 536
547 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 537 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
548 conf->channel->center_freq, 538 conf->channel->center_freq,
549 ifsta->ssid, ifsta->ssid_len); 539 ifsta->ssid, ifsta->ssid_len);
550 if (bss) { 540 if (bss) {
@@ -568,12 +558,12 @@ static void ieee80211_set_associated(struct net_device *dev,
568 ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET; 558 ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET;
569 memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN); 559 memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN);
570 memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN); 560 memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN);
571 ieee80211_sta_send_associnfo(dev, ifsta); 561 ieee80211_sta_send_associnfo(sdata, ifsta);
572 } else { 562 } else {
573 netif_carrier_off(dev); 563 netif_carrier_off(sdata->dev);
574 ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid); 564 ieee80211_sta_tear_down_BA_sessions(sdata, ifsta->bssid);
575 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; 565 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
576 changed |= ieee80211_reset_erp_info(dev); 566 changed |= ieee80211_reset_erp_info(sdata);
577 567
578 sdata->bss_conf.assoc_ht = 0; 568 sdata->bss_conf.assoc_ht = 0;
579 sdata->bss_conf.ht_conf = NULL; 569 sdata->bss_conf.ht_conf = NULL;
@@ -588,27 +578,27 @@ static void ieee80211_set_associated(struct net_device *dev,
588 ieee80211_bss_info_change_notify(sdata, changed); 578 ieee80211_bss_info_change_notify(sdata, changed);
589 579
590 if (assoc) 580 if (assoc)
591 netif_carrier_on(dev); 581 netif_carrier_on(sdata->dev);
592 582
593 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 583 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
594 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 584 wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
595} 585}
596 586
597static void ieee80211_set_disassoc(struct net_device *dev, 587static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
598 struct ieee80211_if_sta *ifsta, int deauth) 588 struct ieee80211_if_sta *ifsta, int deauth)
599{ 589{
600 if (deauth) 590 if (deauth) {
591 ifsta->direct_probe_tries = 0;
601 ifsta->auth_tries = 0; 592 ifsta->auth_tries = 0;
593 }
594 ifsta->assoc_scan_tries = 0;
602 ifsta->assoc_tries = 0; 595 ifsta->assoc_tries = 0;
603 ieee80211_set_associated(dev, ifsta, 0); 596 ieee80211_set_associated(sdata, ifsta, 0);
604} 597}
605 598
606void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, 599void ieee80211_sta_tx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
607 int encrypt) 600 int encrypt)
608{ 601{
609 struct ieee80211_sub_if_data *sdata;
610
611 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
612 skb->dev = sdata->local->mdev; 602 skb->dev = sdata->local->mdev;
613 skb_set_mac_header(skb, 0); 603 skb_set_mac_header(skb, 0);
614 skb_set_network_header(skb, 0); 604 skb_set_network_header(skb, 0);
@@ -621,12 +611,12 @@ void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
621} 611}
622 612
623 613
624static void ieee80211_send_auth(struct net_device *dev, 614static void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
625 struct ieee80211_if_sta *ifsta, 615 struct ieee80211_if_sta *ifsta,
626 int transaction, u8 *extra, size_t extra_len, 616 int transaction, u8 *extra, size_t extra_len,
627 int encrypt) 617 int encrypt)
628{ 618{
629 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 619 struct ieee80211_local *local = sdata->local;
630 struct sk_buff *skb; 620 struct sk_buff *skb;
631 struct ieee80211_mgmt *mgmt; 621 struct ieee80211_mgmt *mgmt;
632 622
@@ -634,19 +624,19 @@ static void ieee80211_send_auth(struct net_device *dev,
634 sizeof(*mgmt) + 6 + extra_len); 624 sizeof(*mgmt) + 6 + extra_len);
635 if (!skb) { 625 if (!skb) {
636 printk(KERN_DEBUG "%s: failed to allocate buffer for auth " 626 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
637 "frame\n", dev->name); 627 "frame\n", sdata->dev->name);
638 return; 628 return;
639 } 629 }
640 skb_reserve(skb, local->hw.extra_tx_headroom); 630 skb_reserve(skb, local->hw.extra_tx_headroom);
641 631
642 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); 632 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
643 memset(mgmt, 0, 24 + 6); 633 memset(mgmt, 0, 24 + 6);
644 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 634 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
645 IEEE80211_STYPE_AUTH); 635 IEEE80211_STYPE_AUTH);
646 if (encrypt) 636 if (encrypt)
647 mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 637 mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
648 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 638 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
649 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 639 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
650 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 640 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
651 mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg); 641 mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg);
652 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); 642 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
@@ -655,11 +645,41 @@ static void ieee80211_send_auth(struct net_device *dev,
655 if (extra) 645 if (extra)
656 memcpy(skb_put(skb, extra_len), extra, extra_len); 646 memcpy(skb_put(skb, extra_len), extra, extra_len);
657 647
658 ieee80211_sta_tx(dev, skb, encrypt); 648 ieee80211_sta_tx(sdata, skb, encrypt);
659} 649}
660 650
651static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
652 struct ieee80211_if_sta *ifsta)
653{
654 DECLARE_MAC_BUF(mac);
655
656 ifsta->direct_probe_tries++;
657 if (ifsta->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) {
658 printk(KERN_DEBUG "%s: direct probe to AP %s timed out\n",
659 sdata->dev->name, print_mac(mac, ifsta->bssid));
660 ifsta->state = IEEE80211_STA_MLME_DISABLED;
661 return;
662 }
663
664 printk(KERN_DEBUG "%s: direct probe to AP %s try %d\n",
665 sdata->dev->name, print_mac(mac, ifsta->bssid),
666 ifsta->direct_probe_tries);
667
668 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
661 669
662static void ieee80211_authenticate(struct net_device *dev, 670 set_bit(IEEE80211_STA_REQ_DIRECT_PROBE, &ifsta->request);
671
672 /* Direct probe is sent to broadcast address as some APs
673 * will not answer to direct packet in unassociated state.
674 */
675 ieee80211_send_probe_req(sdata, NULL,
676 ifsta->ssid, ifsta->ssid_len);
677
678 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
679}
680
681
682static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
663 struct ieee80211_if_sta *ifsta) 683 struct ieee80211_if_sta *ifsta)
664{ 684{
665 DECLARE_MAC_BUF(mac); 685 DECLARE_MAC_BUF(mac);
@@ -668,16 +688,16 @@ static void ieee80211_authenticate(struct net_device *dev,
668 if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) { 688 if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
669 printk(KERN_DEBUG "%s: authentication with AP %s" 689 printk(KERN_DEBUG "%s: authentication with AP %s"
670 " timed out\n", 690 " timed out\n",
671 dev->name, print_mac(mac, ifsta->bssid)); 691 sdata->dev->name, print_mac(mac, ifsta->bssid));
672 ifsta->state = IEEE80211_DISABLED; 692 ifsta->state = IEEE80211_STA_MLME_DISABLED;
673 return; 693 return;
674 } 694 }
675 695
676 ifsta->state = IEEE80211_AUTHENTICATE; 696 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
677 printk(KERN_DEBUG "%s: authenticate with AP %s\n", 697 printk(KERN_DEBUG "%s: authenticate with AP %s\n",
678 dev->name, print_mac(mac, ifsta->bssid)); 698 sdata->dev->name, print_mac(mac, ifsta->bssid));
679 699
680 ieee80211_send_auth(dev, ifsta, 1, NULL, 0, 0); 700 ieee80211_send_auth(sdata, ifsta, 1, NULL, 0, 0);
681 701
682 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT); 702 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
683} 703}
@@ -703,13 +723,13 @@ static int ieee80211_compatible_rates(struct ieee80211_sta_bss *bss,
703 return count; 723 return count;
704} 724}
705 725
706static void ieee80211_send_assoc(struct net_device *dev, 726static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
707 struct ieee80211_if_sta *ifsta) 727 struct ieee80211_if_sta *ifsta)
708{ 728{
709 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 729 struct ieee80211_local *local = sdata->local;
710 struct sk_buff *skb; 730 struct sk_buff *skb;
711 struct ieee80211_mgmt *mgmt; 731 struct ieee80211_mgmt *mgmt;
712 u8 *pos, *ies; 732 u8 *pos, *ies, *ht_add_ie;
713 int i, len, count, rates_len, supp_rates_len; 733 int i, len, count, rates_len, supp_rates_len;
714 u16 capab; 734 u16 capab;
715 struct ieee80211_sta_bss *bss; 735 struct ieee80211_sta_bss *bss;
@@ -722,7 +742,7 @@ static void ieee80211_send_assoc(struct net_device *dev,
722 ifsta->ssid_len); 742 ifsta->ssid_len);
723 if (!skb) { 743 if (!skb) {
724 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " 744 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
725 "frame\n", dev->name); 745 "frame\n", sdata->dev->name);
726 return; 746 return;
727 } 747 }
728 skb_reserve(skb, local->hw.extra_tx_headroom); 748 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -738,13 +758,13 @@ static void ieee80211_send_assoc(struct net_device *dev,
738 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; 758 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
739 } 759 }
740 760
741 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 761 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
742 local->hw.conf.channel->center_freq, 762 local->hw.conf.channel->center_freq,
743 ifsta->ssid, ifsta->ssid_len); 763 ifsta->ssid, ifsta->ssid_len);
744 if (bss) { 764 if (bss) {
745 if (bss->capability & WLAN_CAPABILITY_PRIVACY) 765 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
746 capab |= WLAN_CAPABILITY_PRIVACY; 766 capab |= WLAN_CAPABILITY_PRIVACY;
747 if (bss->wmm_ie) 767 if (bss->wmm_used)
748 wmm = 1; 768 wmm = 1;
749 769
750 /* get all rates supported by the device and the AP as 770 /* get all rates supported by the device and the AP as
@@ -766,13 +786,13 @@ static void ieee80211_send_assoc(struct net_device *dev,
766 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 786 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
767 memset(mgmt, 0, 24); 787 memset(mgmt, 0, 24);
768 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 788 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
769 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 789 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
770 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 790 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
771 791
772 if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) { 792 if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) {
773 skb_put(skb, 10); 793 skb_put(skb, 10);
774 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 794 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
775 IEEE80211_STYPE_REASSOC_REQ); 795 IEEE80211_STYPE_REASSOC_REQ);
776 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); 796 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
777 mgmt->u.reassoc_req.listen_interval = 797 mgmt->u.reassoc_req.listen_interval =
778 cpu_to_le16(local->hw.conf.listen_interval); 798 cpu_to_le16(local->hw.conf.listen_interval);
@@ -780,8 +800,8 @@ static void ieee80211_send_assoc(struct net_device *dev,
780 ETH_ALEN); 800 ETH_ALEN);
781 } else { 801 } else {
782 skb_put(skb, 4); 802 skb_put(skb, 4);
783 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 803 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
784 IEEE80211_STYPE_ASSOC_REQ); 804 IEEE80211_STYPE_ASSOC_REQ);
785 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); 805 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
786 mgmt->u.reassoc_req.listen_interval = 806 mgmt->u.reassoc_req.listen_interval =
787 cpu_to_le16(local->hw.conf.listen_interval); 807 cpu_to_le16(local->hw.conf.listen_interval);
@@ -866,9 +886,10 @@ static void ieee80211_send_assoc(struct net_device *dev,
866 886
867 /* wmm support is a must to HT */ 887 /* wmm support is a must to HT */
868 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) && 888 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) &&
869 sband->ht_info.ht_supported && bss->ht_add_ie) { 889 sband->ht_info.ht_supported &&
890 (ht_add_ie = ieee80211_bss_get_ie(bss, WLAN_EID_HT_EXTRA_INFO))) {
870 struct ieee80211_ht_addt_info *ht_add_info = 891 struct ieee80211_ht_addt_info *ht_add_info =
871 (struct ieee80211_ht_addt_info *)bss->ht_add_ie; 892 (struct ieee80211_ht_addt_info *)ht_add_ie;
872 u16 cap = sband->ht_info.cap; 893 u16 cap = sband->ht_info.cap;
873 __le16 tmp; 894 __le16 tmp;
874 u32 flags = local->hw.conf.channel->flags; 895 u32 flags = local->hw.conf.channel->flags;
@@ -907,21 +928,21 @@ static void ieee80211_send_assoc(struct net_device *dev,
907 if (ifsta->assocreq_ies) 928 if (ifsta->assocreq_ies)
908 memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len); 929 memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len);
909 930
910 ieee80211_sta_tx(dev, skb, 0); 931 ieee80211_sta_tx(sdata, skb, 0);
911} 932}
912 933
913 934
914static void ieee80211_send_deauth(struct net_device *dev, 935static void ieee80211_send_deauth(struct ieee80211_sub_if_data *sdata,
915 struct ieee80211_if_sta *ifsta, u16 reason) 936 struct ieee80211_if_sta *ifsta, u16 reason)
916{ 937{
917 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 938 struct ieee80211_local *local = sdata->local;
918 struct sk_buff *skb; 939 struct sk_buff *skb;
919 struct ieee80211_mgmt *mgmt; 940 struct ieee80211_mgmt *mgmt;
920 941
921 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 942 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
922 if (!skb) { 943 if (!skb) {
923 printk(KERN_DEBUG "%s: failed to allocate buffer for deauth " 944 printk(KERN_DEBUG "%s: failed to allocate buffer for deauth "
924 "frame\n", dev->name); 945 "frame\n", sdata->dev->name);
925 return; 946 return;
926 } 947 }
927 skb_reserve(skb, local->hw.extra_tx_headroom); 948 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -929,28 +950,28 @@ static void ieee80211_send_deauth(struct net_device *dev,
929 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 950 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
930 memset(mgmt, 0, 24); 951 memset(mgmt, 0, 24);
931 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 952 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
932 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 953 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
933 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 954 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
934 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 955 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
935 IEEE80211_STYPE_DEAUTH); 956 IEEE80211_STYPE_DEAUTH);
936 skb_put(skb, 2); 957 skb_put(skb, 2);
937 mgmt->u.deauth.reason_code = cpu_to_le16(reason); 958 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
938 959
939 ieee80211_sta_tx(dev, skb, 0); 960 ieee80211_sta_tx(sdata, skb, 0);
940} 961}
941 962
942 963
943static void ieee80211_send_disassoc(struct net_device *dev, 964static void ieee80211_send_disassoc(struct ieee80211_sub_if_data *sdata,
944 struct ieee80211_if_sta *ifsta, u16 reason) 965 struct ieee80211_if_sta *ifsta, u16 reason)
945{ 966{
946 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 967 struct ieee80211_local *local = sdata->local;
947 struct sk_buff *skb; 968 struct sk_buff *skb;
948 struct ieee80211_mgmt *mgmt; 969 struct ieee80211_mgmt *mgmt;
949 970
950 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 971 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
951 if (!skb) { 972 if (!skb) {
952 printk(KERN_DEBUG "%s: failed to allocate buffer for disassoc " 973 printk(KERN_DEBUG "%s: failed to allocate buffer for disassoc "
953 "frame\n", dev->name); 974 "frame\n", sdata->dev->name);
954 return; 975 return;
955 } 976 }
956 skb_reserve(skb, local->hw.extra_tx_headroom); 977 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -958,21 +979,21 @@ static void ieee80211_send_disassoc(struct net_device *dev,
958 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 979 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
959 memset(mgmt, 0, 24); 980 memset(mgmt, 0, 24);
960 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 981 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
961 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 982 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
962 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 983 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
963 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 984 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
964 IEEE80211_STYPE_DISASSOC); 985 IEEE80211_STYPE_DISASSOC);
965 skb_put(skb, 2); 986 skb_put(skb, 2);
966 mgmt->u.disassoc.reason_code = cpu_to_le16(reason); 987 mgmt->u.disassoc.reason_code = cpu_to_le16(reason);
967 988
968 ieee80211_sta_tx(dev, skb, 0); 989 ieee80211_sta_tx(sdata, skb, 0);
969} 990}
970 991
971 992
972static int ieee80211_privacy_mismatch(struct net_device *dev, 993static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata,
973 struct ieee80211_if_sta *ifsta) 994 struct ieee80211_if_sta *ifsta)
974{ 995{
975 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 996 struct ieee80211_local *local = sdata->local;
976 struct ieee80211_sta_bss *bss; 997 struct ieee80211_sta_bss *bss;
977 int bss_privacy; 998 int bss_privacy;
978 int wep_privacy; 999 int wep_privacy;
@@ -981,14 +1002,14 @@ static int ieee80211_privacy_mismatch(struct net_device *dev,
981 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL)) 1002 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL))
982 return 0; 1003 return 0;
983 1004
984 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 1005 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
985 local->hw.conf.channel->center_freq, 1006 local->hw.conf.channel->center_freq,
986 ifsta->ssid, ifsta->ssid_len); 1007 ifsta->ssid, ifsta->ssid_len);
987 if (!bss) 1008 if (!bss)
988 return 0; 1009 return 0;
989 1010
990 bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY); 1011 bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY);
991 wep_privacy = !!ieee80211_sta_wep_configured(dev); 1012 wep_privacy = !!ieee80211_sta_wep_configured(sdata);
992 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED); 1013 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED);
993 1014
994 ieee80211_rx_bss_put(local, bss); 1015 ieee80211_rx_bss_put(local, bss);
@@ -1000,7 +1021,7 @@ static int ieee80211_privacy_mismatch(struct net_device *dev,
1000} 1021}
1001 1022
1002 1023
1003static void ieee80211_associate(struct net_device *dev, 1024static void ieee80211_associate(struct ieee80211_sub_if_data *sdata,
1004 struct ieee80211_if_sta *ifsta) 1025 struct ieee80211_if_sta *ifsta)
1005{ 1026{
1006 DECLARE_MAC_BUF(mac); 1027 DECLARE_MAC_BUF(mac);
@@ -1009,31 +1030,31 @@ static void ieee80211_associate(struct net_device *dev,
1009 if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) { 1030 if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) {
1010 printk(KERN_DEBUG "%s: association with AP %s" 1031 printk(KERN_DEBUG "%s: association with AP %s"
1011 " timed out\n", 1032 " timed out\n",
1012 dev->name, print_mac(mac, ifsta->bssid)); 1033 sdata->dev->name, print_mac(mac, ifsta->bssid));
1013 ifsta->state = IEEE80211_DISABLED; 1034 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1014 return; 1035 return;
1015 } 1036 }
1016 1037
1017 ifsta->state = IEEE80211_ASSOCIATE; 1038 ifsta->state = IEEE80211_STA_MLME_ASSOCIATE;
1018 printk(KERN_DEBUG "%s: associate with AP %s\n", 1039 printk(KERN_DEBUG "%s: associate with AP %s\n",
1019 dev->name, print_mac(mac, ifsta->bssid)); 1040 sdata->dev->name, print_mac(mac, ifsta->bssid));
1020 if (ieee80211_privacy_mismatch(dev, ifsta)) { 1041 if (ieee80211_privacy_mismatch(sdata, ifsta)) {
1021 printk(KERN_DEBUG "%s: mismatch in privacy configuration and " 1042 printk(KERN_DEBUG "%s: mismatch in privacy configuration and "
1022 "mixed-cell disabled - abort association\n", dev->name); 1043 "mixed-cell disabled - abort association\n", sdata->dev->name);
1023 ifsta->state = IEEE80211_DISABLED; 1044 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1024 return; 1045 return;
1025 } 1046 }
1026 1047
1027 ieee80211_send_assoc(dev, ifsta); 1048 ieee80211_send_assoc(sdata, ifsta);
1028 1049
1029 mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); 1050 mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT);
1030} 1051}
1031 1052
1032 1053
1033static void ieee80211_associated(struct net_device *dev, 1054static void ieee80211_associated(struct ieee80211_sub_if_data *sdata,
1034 struct ieee80211_if_sta *ifsta) 1055 struct ieee80211_if_sta *ifsta)
1035{ 1056{
1036 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1057 struct ieee80211_local *local = sdata->local;
1037 struct sta_info *sta; 1058 struct sta_info *sta;
1038 int disassoc; 1059 int disassoc;
1039 DECLARE_MAC_BUF(mac); 1060 DECLARE_MAC_BUF(mac);
@@ -1043,14 +1064,14 @@ static void ieee80211_associated(struct net_device *dev,
1043 * for better APs. */ 1064 * for better APs. */
1044 /* TODO: remove expired BSSes */ 1065 /* TODO: remove expired BSSes */
1045 1066
1046 ifsta->state = IEEE80211_ASSOCIATED; 1067 ifsta->state = IEEE80211_STA_MLME_ASSOCIATED;
1047 1068
1048 rcu_read_lock(); 1069 rcu_read_lock();
1049 1070
1050 sta = sta_info_get(local, ifsta->bssid); 1071 sta = sta_info_get(local, ifsta->bssid);
1051 if (!sta) { 1072 if (!sta) {
1052 printk(KERN_DEBUG "%s: No STA entry for own AP %s\n", 1073 printk(KERN_DEBUG "%s: No STA entry for own AP %s\n",
1053 dev->name, print_mac(mac, ifsta->bssid)); 1074 sdata->dev->name, print_mac(mac, ifsta->bssid));
1054 disassoc = 1; 1075 disassoc = 1;
1055 } else { 1076 } else {
1056 disassoc = 0; 1077 disassoc = 0;
@@ -1060,11 +1081,11 @@ static void ieee80211_associated(struct net_device *dev,
1060 printk(KERN_DEBUG "%s: No ProbeResp from " 1081 printk(KERN_DEBUG "%s: No ProbeResp from "
1061 "current AP %s - assume out of " 1082 "current AP %s - assume out of "
1062 "range\n", 1083 "range\n",
1063 dev->name, print_mac(mac, ifsta->bssid)); 1084 sdata->dev->name, print_mac(mac, ifsta->bssid));
1064 disassoc = 1; 1085 disassoc = 1;
1065 sta_info_unlink(&sta); 1086 sta_info_unlink(&sta);
1066 } else 1087 } else
1067 ieee80211_send_probe_req(dev, ifsta->bssid, 1088 ieee80211_send_probe_req(sdata, ifsta->bssid,
1068 local->scan_ssid, 1089 local->scan_ssid,
1069 local->scan_ssid_len); 1090 local->scan_ssid_len);
1070 ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL; 1091 ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL;
@@ -1073,7 +1094,7 @@ static void ieee80211_associated(struct net_device *dev,
1073 if (time_after(jiffies, ifsta->last_probe + 1094 if (time_after(jiffies, ifsta->last_probe +
1074 IEEE80211_PROBE_INTERVAL)) { 1095 IEEE80211_PROBE_INTERVAL)) {
1075 ifsta->last_probe = jiffies; 1096 ifsta->last_probe = jiffies;
1076 ieee80211_send_probe_req(dev, ifsta->bssid, 1097 ieee80211_send_probe_req(sdata, ifsta->bssid,
1077 ifsta->ssid, 1098 ifsta->ssid,
1078 ifsta->ssid_len); 1099 ifsta->ssid_len);
1079 } 1100 }
@@ -1086,8 +1107,8 @@ static void ieee80211_associated(struct net_device *dev,
1086 sta_info_destroy(sta); 1107 sta_info_destroy(sta);
1087 1108
1088 if (disassoc) { 1109 if (disassoc) {
1089 ifsta->state = IEEE80211_DISABLED; 1110 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1090 ieee80211_set_associated(dev, ifsta, 0); 1111 ieee80211_set_associated(sdata, ifsta, 0);
1091 } else { 1112 } else {
1092 mod_timer(&ifsta->timer, jiffies + 1113 mod_timer(&ifsta->timer, jiffies +
1093 IEEE80211_MONITORING_INTERVAL); 1114 IEEE80211_MONITORING_INTERVAL);
@@ -1095,10 +1116,10 @@ static void ieee80211_associated(struct net_device *dev,
1095} 1116}
1096 1117
1097 1118
1098static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, 1119static void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1099 u8 *ssid, size_t ssid_len) 1120 u8 *ssid, size_t ssid_len)
1100{ 1121{
1101 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1122 struct ieee80211_local *local = sdata->local;
1102 struct ieee80211_supported_band *sband; 1123 struct ieee80211_supported_band *sband;
1103 struct sk_buff *skb; 1124 struct sk_buff *skb;
1104 struct ieee80211_mgmt *mgmt; 1125 struct ieee80211_mgmt *mgmt;
@@ -1108,16 +1129,16 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
1108 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200); 1129 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200);
1109 if (!skb) { 1130 if (!skb) {
1110 printk(KERN_DEBUG "%s: failed to allocate buffer for probe " 1131 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
1111 "request\n", dev->name); 1132 "request\n", sdata->dev->name);
1112 return; 1133 return;
1113 } 1134 }
1114 skb_reserve(skb, local->hw.extra_tx_headroom); 1135 skb_reserve(skb, local->hw.extra_tx_headroom);
1115 1136
1116 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 1137 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
1117 memset(mgmt, 0, 24); 1138 memset(mgmt, 0, 24);
1118 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1139 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1119 IEEE80211_STYPE_PROBE_REQ); 1140 IEEE80211_STYPE_PROBE_REQ);
1120 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 1141 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
1121 if (dst) { 1142 if (dst) {
1122 memcpy(mgmt->da, dst, ETH_ALEN); 1143 memcpy(mgmt->da, dst, ETH_ALEN);
1123 memcpy(mgmt->bssid, dst, ETH_ALEN); 1144 memcpy(mgmt->bssid, dst, ETH_ALEN);
@@ -1152,13 +1173,12 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
1152 *pos = rate->bitrate / 5; 1173 *pos = rate->bitrate / 5;
1153 } 1174 }
1154 1175
1155 ieee80211_sta_tx(dev, skb, 0); 1176 ieee80211_sta_tx(sdata, skb, 0);
1156} 1177}
1157 1178
1158 1179
1159static int ieee80211_sta_wep_configured(struct net_device *dev) 1180static int ieee80211_sta_wep_configured(struct ieee80211_sub_if_data *sdata)
1160{ 1181{
1161 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1162 if (!sdata || !sdata->default_key || 1182 if (!sdata || !sdata->default_key ||
1163 sdata->default_key->conf.alg != ALG_WEP) 1183 sdata->default_key->conf.alg != ALG_WEP)
1164 return 0; 1184 return 0;
@@ -1166,16 +1186,16 @@ static int ieee80211_sta_wep_configured(struct net_device *dev)
1166} 1186}
1167 1187
1168 1188
1169static void ieee80211_auth_completed(struct net_device *dev, 1189static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
1170 struct ieee80211_if_sta *ifsta) 1190 struct ieee80211_if_sta *ifsta)
1171{ 1191{
1172 printk(KERN_DEBUG "%s: authenticated\n", dev->name); 1192 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
1173 ifsta->flags |= IEEE80211_STA_AUTHENTICATED; 1193 ifsta->flags |= IEEE80211_STA_AUTHENTICATED;
1174 ieee80211_associate(dev, ifsta); 1194 ieee80211_associate(sdata, ifsta);
1175} 1195}
1176 1196
1177 1197
1178static void ieee80211_auth_challenge(struct net_device *dev, 1198static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1179 struct ieee80211_if_sta *ifsta, 1199 struct ieee80211_if_sta *ifsta,
1180 struct ieee80211_mgmt *mgmt, 1200 struct ieee80211_mgmt *mgmt,
1181 size_t len) 1201 size_t len)
@@ -1187,17 +1207,16 @@ static void ieee80211_auth_challenge(struct net_device *dev,
1187 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); 1207 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1188 if (!elems.challenge) 1208 if (!elems.challenge)
1189 return; 1209 return;
1190 ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2, 1210 ieee80211_send_auth(sdata, ifsta, 3, elems.challenge - 2,
1191 elems.challenge_len + 2, 1); 1211 elems.challenge_len + 2, 1);
1192} 1212}
1193 1213
1194static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid, 1214static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
1195 u8 dialog_token, u16 status, u16 policy, 1215 u8 dialog_token, u16 status, u16 policy,
1196 u16 buf_size, u16 timeout) 1216 u16 buf_size, u16 timeout)
1197{ 1217{
1198 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1199 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 1218 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1200 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1219 struct ieee80211_local *local = sdata->local;
1201 struct sk_buff *skb; 1220 struct sk_buff *skb;
1202 struct ieee80211_mgmt *mgmt; 1221 struct ieee80211_mgmt *mgmt;
1203 u16 capab; 1222 u16 capab;
@@ -1206,7 +1225,7 @@ static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid,
1206 1225
1207 if (!skb) { 1226 if (!skb) {
1208 printk(KERN_DEBUG "%s: failed to allocate buffer " 1227 printk(KERN_DEBUG "%s: failed to allocate buffer "
1209 "for addba resp frame\n", dev->name); 1228 "for addba resp frame\n", sdata->dev->name);
1210 return; 1229 return;
1211 } 1230 }
1212 1231
@@ -1214,13 +1233,13 @@ static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid,
1214 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 1233 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
1215 memset(mgmt, 0, 24); 1234 memset(mgmt, 0, 24);
1216 memcpy(mgmt->da, da, ETH_ALEN); 1235 memcpy(mgmt->da, da, ETH_ALEN);
1217 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 1236 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
1218 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) 1237 if (sdata->vif.type == IEEE80211_IF_TYPE_AP)
1219 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); 1238 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
1220 else 1239 else
1221 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 1240 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1222 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1241 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1223 IEEE80211_STYPE_ACTION); 1242 IEEE80211_STYPE_ACTION);
1224 1243
1225 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); 1244 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp));
1226 mgmt->u.action.category = WLAN_CATEGORY_BACK; 1245 mgmt->u.action.category = WLAN_CATEGORY_BACK;
@@ -1235,17 +1254,16 @@ static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid,
1235 mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); 1254 mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
1236 mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); 1255 mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
1237 1256
1238 ieee80211_sta_tx(dev, skb, 0); 1257 ieee80211_sta_tx(sdata, skb, 0);
1239 1258
1240 return; 1259 return;
1241} 1260}
1242 1261
1243void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, 1262void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, const u8 *da,
1244 u16 tid, u8 dialog_token, u16 start_seq_num, 1263 u16 tid, u8 dialog_token, u16 start_seq_num,
1245 u16 agg_size, u16 timeout) 1264 u16 agg_size, u16 timeout)
1246{ 1265{
1247 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1266 struct ieee80211_local *local = sdata->local;
1248 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1249 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 1267 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1250 struct sk_buff *skb; 1268 struct sk_buff *skb;
1251 struct ieee80211_mgmt *mgmt; 1269 struct ieee80211_mgmt *mgmt;
@@ -1255,21 +1273,21 @@ void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
1255 1273
1256 if (!skb) { 1274 if (!skb) {
1257 printk(KERN_ERR "%s: failed to allocate buffer " 1275 printk(KERN_ERR "%s: failed to allocate buffer "
1258 "for addba request frame\n", dev->name); 1276 "for addba request frame\n", sdata->dev->name);
1259 return; 1277 return;
1260 } 1278 }
1261 skb_reserve(skb, local->hw.extra_tx_headroom); 1279 skb_reserve(skb, local->hw.extra_tx_headroom);
1262 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 1280 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
1263 memset(mgmt, 0, 24); 1281 memset(mgmt, 0, 24);
1264 memcpy(mgmt->da, da, ETH_ALEN); 1282 memcpy(mgmt->da, da, ETH_ALEN);
1265 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 1283 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
1266 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) 1284 if (sdata->vif.type == IEEE80211_IF_TYPE_AP)
1267 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); 1285 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
1268 else 1286 else
1269 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 1287 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1270 1288
1271 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1289 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1272 IEEE80211_STYPE_ACTION); 1290 IEEE80211_STYPE_ACTION);
1273 1291
1274 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); 1292 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
1275 1293
@@ -1287,14 +1305,13 @@ void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
1287 mgmt->u.action.u.addba_req.start_seq_num = 1305 mgmt->u.action.u.addba_req.start_seq_num =
1288 cpu_to_le16(start_seq_num << 4); 1306 cpu_to_le16(start_seq_num << 4);
1289 1307
1290 ieee80211_sta_tx(dev, skb, 0); 1308 ieee80211_sta_tx(sdata, skb, 0);
1291} 1309}
1292 1310
1293static void ieee80211_sta_process_addba_request(struct net_device *dev, 1311static void ieee80211_sta_process_addba_request(struct ieee80211_local *local,
1294 struct ieee80211_mgmt *mgmt, 1312 struct ieee80211_mgmt *mgmt,
1295 size_t len) 1313 size_t len)
1296{ 1314{
1297 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1298 struct ieee80211_hw *hw = &local->hw; 1315 struct ieee80211_hw *hw = &local->hw;
1299 struct ieee80211_conf *conf = &hw->conf; 1316 struct ieee80211_conf *conf = &hw->conf;
1300 struct sta_info *sta; 1317 struct sta_info *sta;
@@ -1426,16 +1443,15 @@ end:
1426 spin_unlock_bh(&sta->lock); 1443 spin_unlock_bh(&sta->lock);
1427 1444
1428end_no_lock: 1445end_no_lock:
1429 ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid, 1446 ieee80211_send_addba_resp(sta->sdata, sta->addr, tid,
1430 dialog_token, status, 1, buf_size, timeout); 1447 dialog_token, status, 1, buf_size, timeout);
1431 rcu_read_unlock(); 1448 rcu_read_unlock();
1432} 1449}
1433 1450
1434static void ieee80211_sta_process_addba_resp(struct net_device *dev, 1451static void ieee80211_sta_process_addba_resp(struct ieee80211_local *local,
1435 struct ieee80211_mgmt *mgmt, 1452 struct ieee80211_mgmt *mgmt,
1436 size_t len) 1453 size_t len)
1437{ 1454{
1438 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1439 struct ieee80211_hw *hw = &local->hw; 1455 struct ieee80211_hw *hw = &local->hw;
1440 struct sta_info *sta; 1456 struct sta_info *sta;
1441 u16 capab; 1457 u16 capab;
@@ -1497,11 +1513,10 @@ addba_resp_exit:
1497 rcu_read_unlock(); 1513 rcu_read_unlock();
1498} 1514}
1499 1515
1500void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, 1516void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, const u8 *da, u16 tid,
1501 u16 initiator, u16 reason_code) 1517 u16 initiator, u16 reason_code)
1502{ 1518{
1503 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1519 struct ieee80211_local *local = sdata->local;
1504 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1505 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 1520 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1506 struct sk_buff *skb; 1521 struct sk_buff *skb;
1507 struct ieee80211_mgmt *mgmt; 1522 struct ieee80211_mgmt *mgmt;
@@ -1511,7 +1526,7 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
1511 1526
1512 if (!skb) { 1527 if (!skb) {
1513 printk(KERN_ERR "%s: failed to allocate buffer " 1528 printk(KERN_ERR "%s: failed to allocate buffer "
1514 "for delba frame\n", dev->name); 1529 "for delba frame\n", sdata->dev->name);
1515 return; 1530 return;
1516 } 1531 }
1517 1532
@@ -1519,13 +1534,13 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
1519 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 1534 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
1520 memset(mgmt, 0, 24); 1535 memset(mgmt, 0, 24);
1521 memcpy(mgmt->da, da, ETH_ALEN); 1536 memcpy(mgmt->da, da, ETH_ALEN);
1522 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 1537 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
1523 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) 1538 if (sdata->vif.type == IEEE80211_IF_TYPE_AP)
1524 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); 1539 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
1525 else 1540 else
1526 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 1541 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1527 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1542 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1528 IEEE80211_STYPE_ACTION); 1543 IEEE80211_STYPE_ACTION);
1529 1544
1530 skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); 1545 skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba));
1531 1546
@@ -1537,12 +1552,12 @@ void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
1537 mgmt->u.action.u.delba.params = cpu_to_le16(params); 1552 mgmt->u.action.u.delba.params = cpu_to_le16(params);
1538 mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); 1553 mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
1539 1554
1540 ieee80211_sta_tx(dev, skb, 0); 1555 ieee80211_sta_tx(sdata, skb, 0);
1541} 1556}
1542 1557
1543void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn) 1558void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
1544{ 1559{
1545 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1560 struct ieee80211_local *local = sdata->local;
1546 struct sk_buff *skb; 1561 struct sk_buff *skb;
1547 struct ieee80211_bar *bar; 1562 struct ieee80211_bar *bar;
1548 u16 bar_control = 0; 1563 u16 bar_control = 0;
@@ -1550,29 +1565,29 @@ void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn)
1550 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 1565 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
1551 if (!skb) { 1566 if (!skb) {
1552 printk(KERN_ERR "%s: failed to allocate buffer for " 1567 printk(KERN_ERR "%s: failed to allocate buffer for "
1553 "bar frame\n", dev->name); 1568 "bar frame\n", sdata->dev->name);
1554 return; 1569 return;
1555 } 1570 }
1556 skb_reserve(skb, local->hw.extra_tx_headroom); 1571 skb_reserve(skb, local->hw.extra_tx_headroom);
1557 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); 1572 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
1558 memset(bar, 0, sizeof(*bar)); 1573 memset(bar, 0, sizeof(*bar));
1559 bar->frame_control = IEEE80211_FC(IEEE80211_FTYPE_CTL, 1574 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
1560 IEEE80211_STYPE_BACK_REQ); 1575 IEEE80211_STYPE_BACK_REQ);
1561 memcpy(bar->ra, ra, ETH_ALEN); 1576 memcpy(bar->ra, ra, ETH_ALEN);
1562 memcpy(bar->ta, dev->dev_addr, ETH_ALEN); 1577 memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN);
1563 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 1578 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
1564 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 1579 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
1565 bar_control |= (u16)(tid << 12); 1580 bar_control |= (u16)(tid << 12);
1566 bar->control = cpu_to_le16(bar_control); 1581 bar->control = cpu_to_le16(bar_control);
1567 bar->start_seq_num = cpu_to_le16(ssn); 1582 bar->start_seq_num = cpu_to_le16(ssn);
1568 1583
1569 ieee80211_sta_tx(dev, skb, 0); 1584 ieee80211_sta_tx(sdata, skb, 0);
1570} 1585}
1571 1586
1572void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, 1587void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
1573 u16 initiator, u16 reason) 1588 u16 initiator, u16 reason)
1574{ 1589{
1575 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1590 struct ieee80211_local *local = sdata->local;
1576 struct ieee80211_hw *hw = &local->hw; 1591 struct ieee80211_hw *hw = &local->hw;
1577 struct sta_info *sta; 1592 struct sta_info *sta;
1578 int ret, i; 1593 int ret, i;
@@ -1620,7 +1635,7 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1620 1635
1621 /* check if this is a self generated aggregation halt */ 1636 /* check if this is a self generated aggregation halt */
1622 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) 1637 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
1623 ieee80211_send_delba(dev, ra, tid, 0, reason); 1638 ieee80211_send_delba(sdata, ra, tid, 0, reason);
1624 1639
1625 /* free the reordering buffer */ 1640 /* free the reordering buffer */
1626 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { 1641 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
@@ -1641,10 +1656,10 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1641} 1656}
1642 1657
1643 1658
1644static void ieee80211_sta_process_delba(struct net_device *dev, 1659static void ieee80211_sta_process_delba(struct ieee80211_sub_if_data *sdata,
1645 struct ieee80211_mgmt *mgmt, size_t len) 1660 struct ieee80211_mgmt *mgmt, size_t len)
1646{ 1661{
1647 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1662 struct ieee80211_local *local = sdata->local;
1648 struct sta_info *sta; 1663 struct sta_info *sta;
1649 u16 tid, params; 1664 u16 tid, params;
1650 u16 initiator; 1665 u16 initiator;
@@ -1671,7 +1686,7 @@ static void ieee80211_sta_process_delba(struct net_device *dev,
1671#endif /* CONFIG_MAC80211_HT_DEBUG */ 1686#endif /* CONFIG_MAC80211_HT_DEBUG */
1672 1687
1673 if (initiator == WLAN_BACK_INITIATOR) 1688 if (initiator == WLAN_BACK_INITIATOR)
1674 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, 1689 ieee80211_sta_stop_rx_ba_session(sdata, sta->addr, tid,
1675 WLAN_BACK_INITIATOR, 0); 1690 WLAN_BACK_INITIATOR, 0);
1676 else { /* WLAN_BACK_RECIPIENT */ 1691 else { /* WLAN_BACK_RECIPIENT */
1677 spin_lock_bh(&sta->lock); 1692 spin_lock_bh(&sta->lock);
@@ -1758,31 +1773,31 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
1758#ifdef CONFIG_MAC80211_HT_DEBUG 1773#ifdef CONFIG_MAC80211_HT_DEBUG
1759 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 1774 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
1760#endif 1775#endif
1761 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, 1776 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->addr,
1762 (u16)*ptid, WLAN_BACK_TIMER, 1777 (u16)*ptid, WLAN_BACK_TIMER,
1763 WLAN_REASON_QSTA_TIMEOUT); 1778 WLAN_REASON_QSTA_TIMEOUT);
1764} 1779}
1765 1780
1766void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr) 1781void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr)
1767{ 1782{
1768 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1783 struct ieee80211_local *local = sdata->local;
1769 int i; 1784 int i;
1770 1785
1771 for (i = 0; i < STA_TID_NUM; i++) { 1786 for (i = 0; i < STA_TID_NUM; i++) {
1772 ieee80211_stop_tx_ba_session(&local->hw, addr, i, 1787 ieee80211_stop_tx_ba_session(&local->hw, addr, i,
1773 WLAN_BACK_INITIATOR); 1788 WLAN_BACK_INITIATOR);
1774 ieee80211_sta_stop_rx_ba_session(dev, addr, i, 1789 ieee80211_sta_stop_rx_ba_session(sdata, addr, i,
1775 WLAN_BACK_RECIPIENT, 1790 WLAN_BACK_RECIPIENT,
1776 WLAN_REASON_QSTA_LEAVE_QBSS); 1791 WLAN_REASON_QSTA_LEAVE_QBSS);
1777 } 1792 }
1778} 1793}
1779 1794
1780static void ieee80211_send_refuse_measurement_request(struct net_device *dev, 1795static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata,
1781 struct ieee80211_msrment_ie *request_ie, 1796 struct ieee80211_msrment_ie *request_ie,
1782 const u8 *da, const u8 *bssid, 1797 const u8 *da, const u8 *bssid,
1783 u8 dialog_token) 1798 u8 dialog_token)
1784{ 1799{
1785 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1800 struct ieee80211_local *local = sdata->local;
1786 struct sk_buff *skb; 1801 struct sk_buff *skb;
1787 struct ieee80211_mgmt *msr_report; 1802 struct ieee80211_mgmt *msr_report;
1788 1803
@@ -1791,7 +1806,7 @@ static void ieee80211_send_refuse_measurement_request(struct net_device *dev,
1791 1806
1792 if (!skb) { 1807 if (!skb) {
1793 printk(KERN_ERR "%s: failed to allocate buffer for " 1808 printk(KERN_ERR "%s: failed to allocate buffer for "
1794 "measurement report frame\n", dev->name); 1809 "measurement report frame\n", sdata->dev->name);
1795 return; 1810 return;
1796 } 1811 }
1797 1812
@@ -1799,9 +1814,9 @@ static void ieee80211_send_refuse_measurement_request(struct net_device *dev,
1799 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); 1814 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
1800 memset(msr_report, 0, 24); 1815 memset(msr_report, 0, 24);
1801 memcpy(msr_report->da, da, ETH_ALEN); 1816 memcpy(msr_report->da, da, ETH_ALEN);
1802 memcpy(msr_report->sa, dev->dev_addr, ETH_ALEN); 1817 memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN);
1803 memcpy(msr_report->bssid, bssid, ETH_ALEN); 1818 memcpy(msr_report->bssid, bssid, ETH_ALEN);
1804 msr_report->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1819 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1805 IEEE80211_STYPE_ACTION); 1820 IEEE80211_STYPE_ACTION);
1806 1821
1807 skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement)); 1822 skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
@@ -1821,10 +1836,10 @@ static void ieee80211_send_refuse_measurement_request(struct net_device *dev,
1821 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; 1836 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
1822 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; 1837 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
1823 1838
1824 ieee80211_sta_tx(dev, skb, 0); 1839 ieee80211_sta_tx(sdata, skb, 0);
1825} 1840}
1826 1841
1827static void ieee80211_sta_process_measurement_req(struct net_device *dev, 1842static void ieee80211_sta_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1828 struct ieee80211_mgmt *mgmt, 1843 struct ieee80211_mgmt *mgmt,
1829 size_t len) 1844 size_t len)
1830{ 1845{
@@ -1835,23 +1850,22 @@ static void ieee80211_sta_process_measurement_req(struct net_device *dev,
1835 * For now just refuse 1850 * For now just refuse
1836 * TODO: Answer basic measurement as unmeasured 1851 * TODO: Answer basic measurement as unmeasured
1837 */ 1852 */
1838 ieee80211_send_refuse_measurement_request(dev, 1853 ieee80211_send_refuse_measurement_request(sdata,
1839 &mgmt->u.action.u.measurement.msr_elem, 1854 &mgmt->u.action.u.measurement.msr_elem,
1840 mgmt->sa, mgmt->bssid, 1855 mgmt->sa, mgmt->bssid,
1841 mgmt->u.action.u.measurement.dialog_token); 1856 mgmt->u.action.u.measurement.dialog_token);
1842} 1857}
1843 1858
1844 1859
1845static void ieee80211_rx_mgmt_auth(struct net_device *dev, 1860static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1846 struct ieee80211_if_sta *ifsta, 1861 struct ieee80211_if_sta *ifsta,
1847 struct ieee80211_mgmt *mgmt, 1862 struct ieee80211_mgmt *mgmt,
1848 size_t len) 1863 size_t len)
1849{ 1864{
1850 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1851 u16 auth_alg, auth_transaction, status_code; 1865 u16 auth_alg, auth_transaction, status_code;
1852 DECLARE_MAC_BUF(mac); 1866 DECLARE_MAC_BUF(mac);
1853 1867
1854 if (ifsta->state != IEEE80211_AUTHENTICATE && 1868 if (ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE &&
1855 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) 1869 sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1856 return; 1870 return;
1857 1871
@@ -1879,7 +1893,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1879 */ 1893 */
1880 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) 1894 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
1881 return; 1895 return;
1882 ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0); 1896 ieee80211_send_auth(sdata, ifsta, 2, NULL, 0, 0);
1883 } 1897 }
1884 1898
1885 if (auth_alg != ifsta->auth_alg || 1899 if (auth_alg != ifsta->auth_alg ||
@@ -1912,7 +1926,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1912 algs[pos] == 0xff) 1926 algs[pos] == 0xff)
1913 continue; 1927 continue;
1914 if (algs[pos] == WLAN_AUTH_SHARED_KEY && 1928 if (algs[pos] == WLAN_AUTH_SHARED_KEY &&
1915 !ieee80211_sta_wep_configured(dev)) 1929 !ieee80211_sta_wep_configured(sdata))
1916 continue; 1930 continue;
1917 ifsta->auth_alg = algs[pos]; 1931 ifsta->auth_alg = algs[pos];
1918 break; 1932 break;
@@ -1924,19 +1938,19 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1924 switch (ifsta->auth_alg) { 1938 switch (ifsta->auth_alg) {
1925 case WLAN_AUTH_OPEN: 1939 case WLAN_AUTH_OPEN:
1926 case WLAN_AUTH_LEAP: 1940 case WLAN_AUTH_LEAP:
1927 ieee80211_auth_completed(dev, ifsta); 1941 ieee80211_auth_completed(sdata, ifsta);
1928 break; 1942 break;
1929 case WLAN_AUTH_SHARED_KEY: 1943 case WLAN_AUTH_SHARED_KEY:
1930 if (ifsta->auth_transaction == 4) 1944 if (ifsta->auth_transaction == 4)
1931 ieee80211_auth_completed(dev, ifsta); 1945 ieee80211_auth_completed(sdata, ifsta);
1932 else 1946 else
1933 ieee80211_auth_challenge(dev, ifsta, mgmt, len); 1947 ieee80211_auth_challenge(sdata, ifsta, mgmt, len);
1934 break; 1948 break;
1935 } 1949 }
1936} 1950}
1937 1951
1938 1952
1939static void ieee80211_rx_mgmt_deauth(struct net_device *dev, 1953static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1940 struct ieee80211_if_sta *ifsta, 1954 struct ieee80211_if_sta *ifsta,
1941 struct ieee80211_mgmt *mgmt, 1955 struct ieee80211_mgmt *mgmt,
1942 size_t len) 1956 size_t len)
@@ -1953,22 +1967,22 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev,
1953 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 1967 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1954 1968
1955 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) 1969 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED)
1956 printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); 1970 printk(KERN_DEBUG "%s: deauthenticated\n", sdata->dev->name);
1957 1971
1958 if (ifsta->state == IEEE80211_AUTHENTICATE || 1972 if (ifsta->state == IEEE80211_STA_MLME_AUTHENTICATE ||
1959 ifsta->state == IEEE80211_ASSOCIATE || 1973 ifsta->state == IEEE80211_STA_MLME_ASSOCIATE ||
1960 ifsta->state == IEEE80211_ASSOCIATED) { 1974 ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) {
1961 ifsta->state = IEEE80211_AUTHENTICATE; 1975 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
1962 mod_timer(&ifsta->timer, jiffies + 1976 mod_timer(&ifsta->timer, jiffies +
1963 IEEE80211_RETRY_AUTH_INTERVAL); 1977 IEEE80211_RETRY_AUTH_INTERVAL);
1964 } 1978 }
1965 1979
1966 ieee80211_set_disassoc(dev, ifsta, 1); 1980 ieee80211_set_disassoc(sdata, ifsta, 1);
1967 ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED; 1981 ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED;
1968} 1982}
1969 1983
1970 1984
1971static void ieee80211_rx_mgmt_disassoc(struct net_device *dev, 1985static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1972 struct ieee80211_if_sta *ifsta, 1986 struct ieee80211_if_sta *ifsta,
1973 struct ieee80211_mgmt *mgmt, 1987 struct ieee80211_mgmt *mgmt,
1974 size_t len) 1988 size_t len)
@@ -1985,15 +1999,15 @@ static void ieee80211_rx_mgmt_disassoc(struct net_device *dev,
1985 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 1999 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1986 2000
1987 if (ifsta->flags & IEEE80211_STA_ASSOCIATED) 2001 if (ifsta->flags & IEEE80211_STA_ASSOCIATED)
1988 printk(KERN_DEBUG "%s: disassociated\n", dev->name); 2002 printk(KERN_DEBUG "%s: disassociated\n", sdata->dev->name);
1989 2003
1990 if (ifsta->state == IEEE80211_ASSOCIATED) { 2004 if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) {
1991 ifsta->state = IEEE80211_ASSOCIATE; 2005 ifsta->state = IEEE80211_STA_MLME_ASSOCIATE;
1992 mod_timer(&ifsta->timer, jiffies + 2006 mod_timer(&ifsta->timer, jiffies +
1993 IEEE80211_RETRY_AUTH_INTERVAL); 2007 IEEE80211_RETRY_AUTH_INTERVAL);
1994 } 2008 }
1995 2009
1996 ieee80211_set_disassoc(dev, ifsta, 0); 2010 ieee80211_set_disassoc(sdata, ifsta, 0);
1997} 2011}
1998 2012
1999 2013
@@ -2004,7 +2018,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2004 int reassoc) 2018 int reassoc)
2005{ 2019{
2006 struct ieee80211_local *local = sdata->local; 2020 struct ieee80211_local *local = sdata->local;
2007 struct net_device *dev = sdata->dev;
2008 struct ieee80211_supported_band *sband; 2021 struct ieee80211_supported_band *sband;
2009 struct sta_info *sta; 2022 struct sta_info *sta;
2010 u64 rates, basic_rates; 2023 u64 rates, basic_rates;
@@ -2019,7 +2032,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2019 /* AssocResp and ReassocResp have identical structure, so process both 2032 /* AssocResp and ReassocResp have identical structure, so process both
2020 * of them in this function. */ 2033 * of them in this function. */
2021 2034
2022 if (ifsta->state != IEEE80211_ASSOCIATE) 2035 if (ifsta->state != IEEE80211_STA_MLME_ASSOCIATE)
2023 return; 2036 return;
2024 2037
2025 if (len < 24 + 6) 2038 if (len < 24 + 6)
@@ -2034,12 +2047,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2034 2047
2035 printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " 2048 printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x "
2036 "status=%d aid=%d)\n", 2049 "status=%d aid=%d)\n",
2037 dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), 2050 sdata->dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa),
2038 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); 2051 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
2039 2052
2040 if (status_code != WLAN_STATUS_SUCCESS) { 2053 if (status_code != WLAN_STATUS_SUCCESS) {
2041 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", 2054 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
2042 dev->name, status_code); 2055 sdata->dev->name, status_code);
2043 /* if this was a reassociation, ensure we try a "full" 2056 /* if this was a reassociation, ensure we try a "full"
2044 * association next time. This works around some broken APs 2057 * association next time. This works around some broken APs
2045 * which do not correctly reject reassociation requests. */ 2058 * which do not correctly reject reassociation requests. */
@@ -2049,7 +2062,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2049 2062
2050 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 2063 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
2051 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 2064 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
2052 "set\n", dev->name, aid); 2065 "set\n", sdata->dev->name, aid);
2053 aid &= ~(BIT(15) | BIT(14)); 2066 aid &= ~(BIT(15) | BIT(14));
2054 2067
2055 pos = mgmt->u.assoc_resp.variable; 2068 pos = mgmt->u.assoc_resp.variable;
@@ -2057,11 +2070,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2057 2070
2058 if (!elems.supp_rates) { 2071 if (!elems.supp_rates) {
2059 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", 2072 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
2060 dev->name); 2073 sdata->dev->name);
2061 return; 2074 return;
2062 } 2075 }
2063 2076
2064 printk(KERN_DEBUG "%s: associated\n", dev->name); 2077 printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
2065 ifsta->aid = aid; 2078 ifsta->aid = aid;
2066 ifsta->ap_capab = capab_info; 2079 ifsta->ap_capab = capab_info;
2067 2080
@@ -2082,11 +2095,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2082 sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC); 2095 sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC);
2083 if (!sta) { 2096 if (!sta) {
2084 printk(KERN_DEBUG "%s: failed to alloc STA entry for" 2097 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
2085 " the AP\n", dev->name); 2098 " the AP\n", sdata->dev->name);
2086 rcu_read_unlock(); 2099 rcu_read_unlock();
2087 return; 2100 return;
2088 } 2101 }
2089 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 2102 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
2090 local->hw.conf.channel->center_freq, 2103 local->hw.conf.channel->center_freq,
2091 ifsta->ssid, ifsta->ssid_len); 2104 ifsta->ssid, ifsta->ssid_len);
2092 if (bss) { 2105 if (bss) {
@@ -2099,7 +2112,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2099 err = sta_info_insert(sta); 2112 err = sta_info_insert(sta);
2100 if (err) { 2113 if (err) {
2101 printk(KERN_DEBUG "%s: failed to insert STA entry for" 2114 printk(KERN_DEBUG "%s: failed to insert STA entry for"
2102 " the AP (error %d)\n", dev->name, err); 2115 " the AP (error %d)\n", sdata->dev->name, err);
2103 rcu_read_unlock(); 2116 rcu_read_unlock();
2104 return; 2117 return;
2105 } 2118 }
@@ -2179,7 +2192,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2179 if (elems.wmm_param) { 2192 if (elems.wmm_param) {
2180 set_sta_flags(sta, WLAN_STA_WME); 2193 set_sta_flags(sta, WLAN_STA_WME);
2181 rcu_read_unlock(); 2194 rcu_read_unlock();
2182 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 2195 ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param,
2183 elems.wmm_param_len); 2196 elems.wmm_param_len);
2184 } else 2197 } else
2185 rcu_read_unlock(); 2198 rcu_read_unlock();
@@ -2188,17 +2201,16 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2188 * ieee80211_set_associated() will tell the driver */ 2201 * ieee80211_set_associated() will tell the driver */
2189 bss_conf->aid = aid; 2202 bss_conf->aid = aid;
2190 bss_conf->assoc_capability = capab_info; 2203 bss_conf->assoc_capability = capab_info;
2191 ieee80211_set_associated(dev, ifsta, 1); 2204 ieee80211_set_associated(sdata, ifsta, 1);
2192 2205
2193 ieee80211_associated(dev, ifsta); 2206 ieee80211_associated(sdata, ifsta);
2194} 2207}
2195 2208
2196 2209
2197/* Caller must hold local->sta_bss_lock */ 2210/* Caller must hold local->sta_bss_lock */
2198static void __ieee80211_rx_bss_hash_add(struct net_device *dev, 2211static void __ieee80211_rx_bss_hash_add(struct ieee80211_local *local,
2199 struct ieee80211_sta_bss *bss) 2212 struct ieee80211_sta_bss *bss)
2200{ 2213{
2201 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2202 u8 hash_idx; 2214 u8 hash_idx;
2203 2215
2204 if (bss_mesh_cfg(bss)) 2216 if (bss_mesh_cfg(bss))
@@ -2234,10 +2246,10 @@ static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local,
2234 2246
2235 2247
2236static struct ieee80211_sta_bss * 2248static struct ieee80211_sta_bss *
2237ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int freq, 2249ieee80211_rx_bss_add(struct ieee80211_sub_if_data *sdata, u8 *bssid, int freq,
2238 u8 *ssid, u8 ssid_len) 2250 u8 *ssid, u8 ssid_len)
2239{ 2251{
2240 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2252 struct ieee80211_local *local = sdata->local;
2241 struct ieee80211_sta_bss *bss; 2253 struct ieee80211_sta_bss *bss;
2242 2254
2243 bss = kzalloc(sizeof(*bss), GFP_ATOMIC); 2255 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
@@ -2255,16 +2267,15 @@ ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int freq,
2255 spin_lock_bh(&local->sta_bss_lock); 2267 spin_lock_bh(&local->sta_bss_lock);
2256 /* TODO: order by RSSI? */ 2268 /* TODO: order by RSSI? */
2257 list_add_tail(&bss->list, &local->sta_bss_list); 2269 list_add_tail(&bss->list, &local->sta_bss_list);
2258 __ieee80211_rx_bss_hash_add(dev, bss); 2270 __ieee80211_rx_bss_hash_add(local, bss);
2259 spin_unlock_bh(&local->sta_bss_lock); 2271 spin_unlock_bh(&local->sta_bss_lock);
2260 return bss; 2272 return bss;
2261} 2273}
2262 2274
2263static struct ieee80211_sta_bss * 2275static struct ieee80211_sta_bss *
2264ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, 2276ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
2265 u8 *ssid, u8 ssid_len) 2277 u8 *ssid, u8 ssid_len)
2266{ 2278{
2267 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2268 struct ieee80211_sta_bss *bss; 2279 struct ieee80211_sta_bss *bss;
2269 2280
2270 spin_lock_bh(&local->sta_bss_lock); 2281 spin_lock_bh(&local->sta_bss_lock);
@@ -2286,10 +2297,9 @@ ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
2286 2297
2287#ifdef CONFIG_MAC80211_MESH 2298#ifdef CONFIG_MAC80211_MESH
2288static struct ieee80211_sta_bss * 2299static struct ieee80211_sta_bss *
2289ieee80211_rx_mesh_bss_get(struct net_device *dev, u8 *mesh_id, int mesh_id_len, 2300ieee80211_rx_mesh_bss_get(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len,
2290 u8 *mesh_cfg, int freq) 2301 u8 *mesh_cfg, int freq)
2291{ 2302{
2292 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2293 struct ieee80211_sta_bss *bss; 2303 struct ieee80211_sta_bss *bss;
2294 2304
2295 spin_lock_bh(&local->sta_bss_lock); 2305 spin_lock_bh(&local->sta_bss_lock);
@@ -2311,10 +2321,9 @@ ieee80211_rx_mesh_bss_get(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
2311} 2321}
2312 2322
2313static struct ieee80211_sta_bss * 2323static struct ieee80211_sta_bss *
2314ieee80211_rx_mesh_bss_add(struct net_device *dev, u8 *mesh_id, int mesh_id_len, 2324ieee80211_rx_mesh_bss_add(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len,
2315 u8 *mesh_cfg, int mesh_config_len, int freq) 2325 u8 *mesh_cfg, int mesh_config_len, int freq)
2316{ 2326{
2317 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2318 struct ieee80211_sta_bss *bss; 2327 struct ieee80211_sta_bss *bss;
2319 2328
2320 if (mesh_config_len != MESH_CFG_LEN) 2329 if (mesh_config_len != MESH_CFG_LEN)
@@ -2348,7 +2357,7 @@ ieee80211_rx_mesh_bss_add(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
2348 spin_lock_bh(&local->sta_bss_lock); 2357 spin_lock_bh(&local->sta_bss_lock);
2349 /* TODO: order by RSSI? */ 2358 /* TODO: order by RSSI? */
2350 list_add_tail(&bss->list, &local->sta_bss_list); 2359 list_add_tail(&bss->list, &local->sta_bss_list);
2351 __ieee80211_rx_bss_hash_add(dev, bss); 2360 __ieee80211_rx_bss_hash_add(local, bss);
2352 spin_unlock_bh(&local->sta_bss_lock); 2361 spin_unlock_bh(&local->sta_bss_lock);
2353 return bss; 2362 return bss;
2354} 2363}
@@ -2356,11 +2365,7 @@ ieee80211_rx_mesh_bss_add(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
2356 2365
2357static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss) 2366static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss)
2358{ 2367{
2359 kfree(bss->wpa_ie); 2368 kfree(bss->ies);
2360 kfree(bss->rsn_ie);
2361 kfree(bss->wmm_ie);
2362 kfree(bss->ht_ie);
2363 kfree(bss->ht_add_ie);
2364 kfree(bss_mesh_id(bss)); 2369 kfree(bss_mesh_id(bss));
2365 kfree(bss_mesh_cfg(bss)); 2370 kfree(bss_mesh_cfg(bss));
2366 kfree(bss); 2371 kfree(bss);
@@ -2399,23 +2404,20 @@ void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local)
2399} 2404}
2400 2405
2401 2406
2402static int ieee80211_sta_join_ibss(struct net_device *dev, 2407static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
2403 struct ieee80211_if_sta *ifsta, 2408 struct ieee80211_if_sta *ifsta,
2404 struct ieee80211_sta_bss *bss) 2409 struct ieee80211_sta_bss *bss)
2405{ 2410{
2406 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2411 struct ieee80211_local *local = sdata->local;
2407 int res, rates, i, j; 2412 int res, rates, i, j;
2408 struct sk_buff *skb; 2413 struct sk_buff *skb;
2409 struct ieee80211_mgmt *mgmt; 2414 struct ieee80211_mgmt *mgmt;
2410 u8 *pos; 2415 u8 *pos;
2411 struct ieee80211_sub_if_data *sdata;
2412 struct ieee80211_supported_band *sband; 2416 struct ieee80211_supported_band *sband;
2413 union iwreq_data wrqu; 2417 union iwreq_data wrqu;
2414 2418
2415 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 2419 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2416 2420
2417 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2418
2419 /* Remove possible STA entries from other IBSS networks. */ 2421 /* Remove possible STA entries from other IBSS networks. */
2420 sta_info_flush_delayed(sdata); 2422 sta_info_flush_delayed(sdata);
2421 2423
@@ -2433,7 +2435,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2433 sdata->drop_unencrypted = bss->capability & 2435 sdata->drop_unencrypted = bss->capability &
2434 WLAN_CAPABILITY_PRIVACY ? 1 : 0; 2436 WLAN_CAPABILITY_PRIVACY ? 1 : 0;
2435 2437
2436 res = ieee80211_set_freq(dev, bss->freq); 2438 res = ieee80211_set_freq(sdata, bss->freq);
2437 2439
2438 if (res) 2440 if (res)
2439 return res; 2441 return res;
@@ -2446,10 +2448,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2446 mgmt = (struct ieee80211_mgmt *) 2448 mgmt = (struct ieee80211_mgmt *)
2447 skb_put(skb, 24 + sizeof(mgmt->u.beacon)); 2449 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
2448 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 2450 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
2449 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 2451 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2450 IEEE80211_STYPE_PROBE_RESP); 2452 IEEE80211_STYPE_PROBE_RESP);
2451 memset(mgmt->da, 0xff, ETH_ALEN); 2453 memset(mgmt->da, 0xff, ETH_ALEN);
2452 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 2454 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
2453 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 2455 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
2454 mgmt->u.beacon.beacon_int = 2456 mgmt->u.beacon.beacon_int =
2455 cpu_to_le16(local->hw.conf.beacon_int); 2457 cpu_to_le16(local->hw.conf.beacon_int);
@@ -2506,14 +2508,14 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2506 } 2508 }
2507 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; 2509 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates;
2508 2510
2509 ieee80211_sta_def_wmm_params(dev, bss, 1); 2511 ieee80211_sta_def_wmm_params(sdata, bss, 1);
2510 2512
2511 ifsta->state = IEEE80211_IBSS_JOINED; 2513 ifsta->state = IEEE80211_STA_MLME_IBSS_JOINED;
2512 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 2514 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
2513 2515
2514 memset(&wrqu, 0, sizeof(wrqu)); 2516 memset(&wrqu, 0, sizeof(wrqu));
2515 memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); 2517 memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN);
2516 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 2518 wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
2517 2519
2518 return res; 2520 return res;
2519} 2521}
@@ -2554,55 +2556,91 @@ u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
2554 return supp_rates; 2556 return supp_rates;
2555} 2557}
2556 2558
2559static u64 ieee80211_sta_get_mandatory_rates(struct ieee80211_local *local,
2560 enum ieee80211_band band)
2561{
2562 struct ieee80211_supported_band *sband;
2563 struct ieee80211_rate *bitrates;
2564 u64 mandatory_rates;
2565 enum ieee80211_rate_flags mandatory_flag;
2566 int i;
2567
2568 sband = local->hw.wiphy->bands[band];
2569 if (!sband) {
2570 WARN_ON(1);
2571 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2572 }
2557 2573
2558static void ieee80211_rx_bss_info(struct net_device *dev, 2574 if (band == IEEE80211_BAND_2GHZ)
2575 mandatory_flag = IEEE80211_RATE_MANDATORY_B;
2576 else
2577 mandatory_flag = IEEE80211_RATE_MANDATORY_A;
2578
2579 bitrates = sband->bitrates;
2580 mandatory_rates = 0;
2581 for (i = 0; i < sband->n_bitrates; i++)
2582 if (bitrates[i].flags & mandatory_flag)
2583 mandatory_rates |= BIT(i);
2584 return mandatory_rates;
2585}
2586
2587static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2559 struct ieee80211_mgmt *mgmt, 2588 struct ieee80211_mgmt *mgmt,
2560 size_t len, 2589 size_t len,
2561 struct ieee80211_rx_status *rx_status, 2590 struct ieee80211_rx_status *rx_status,
2562 struct ieee802_11_elems *elems, 2591 struct ieee802_11_elems *elems)
2563 int beacon)
2564{ 2592{
2565 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2593 struct ieee80211_local *local = sdata->local;
2566 int freq, clen; 2594 int freq, clen;
2567 struct ieee80211_sta_bss *bss; 2595 struct ieee80211_sta_bss *bss;
2568 struct sta_info *sta; 2596 struct sta_info *sta;
2569 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2570 u64 beacon_timestamp, rx_timestamp;
2571 struct ieee80211_channel *channel; 2597 struct ieee80211_channel *channel;
2598 u64 beacon_timestamp, rx_timestamp;
2599 u64 supp_rates = 0;
2600 bool beacon = ieee80211_is_beacon(mgmt->frame_control);
2601 enum ieee80211_band band = rx_status->band;
2572 DECLARE_MAC_BUF(mac); 2602 DECLARE_MAC_BUF(mac);
2573 DECLARE_MAC_BUF(mac2); 2603 DECLARE_MAC_BUF(mac2);
2574 2604
2575 if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN))
2576 return; /* ignore ProbeResp to foreign address */
2577
2578 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); 2605 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
2579 2606
2580 if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id && 2607 if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id &&
2581 elems->mesh_config && mesh_matches_local(elems, dev)) { 2608 elems->mesh_config && mesh_matches_local(elems, sdata)) {
2582 u64 rates = ieee80211_sta_get_rates(local, elems, 2609 supp_rates = ieee80211_sta_get_rates(local, elems, band);
2583 rx_status->band);
2584 2610
2585 mesh_neighbour_update(mgmt->sa, rates, dev, 2611 mesh_neighbour_update(mgmt->sa, supp_rates, sdata,
2586 mesh_peer_accepts_plinks(elems, dev)); 2612 mesh_peer_accepts_plinks(elems));
2587 } 2613 }
2588 2614
2589 rcu_read_lock(); 2615 rcu_read_lock();
2590 2616
2591 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates && 2617 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates &&
2592 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && 2618 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0) {
2593 (sta = sta_info_get(local, mgmt->sa))) { 2619
2594 u64 prev_rates; 2620 supp_rates = ieee80211_sta_get_rates(local, elems, band);
2595 u64 supp_rates = ieee80211_sta_get_rates(local, elems, 2621
2596 rx_status->band); 2622 sta = sta_info_get(local, mgmt->sa);
2597 2623 if (sta) {
2598 prev_rates = sta->supp_rates[rx_status->band]; 2624 u64 prev_rates;
2599 sta->supp_rates[rx_status->band] &= supp_rates; 2625
2600 if (sta->supp_rates[rx_status->band] == 0) { 2626 prev_rates = sta->supp_rates[band];
2601 /* No matching rates - this should not really happen. 2627 /* make sure mandatory rates are always added */
2602 * Make sure that at least one rate is marked 2628 sta->supp_rates[band] = supp_rates |
2603 * supported to avoid issues with TX rate ctrl. */ 2629 ieee80211_sta_get_mandatory_rates(local, band);
2604 sta->supp_rates[rx_status->band] = 2630
2605 sdata->u.sta.supp_rates_bits[rx_status->band]; 2631#ifdef CONFIG_MAC80211_IBSS_DEBUG
2632 if (sta->supp_rates[band] != prev_rates)
2633 printk(KERN_DEBUG "%s: updated supp_rates set "
2634 "for %s based on beacon info (0x%llx | "
2635 "0x%llx -> 0x%llx)\n",
2636 sdata->dev->name, print_mac(mac, sta->addr),
2637 (unsigned long long) prev_rates,
2638 (unsigned long long) supp_rates,
2639 (unsigned long long) sta->supp_rates[band]);
2640#endif
2641 } else {
2642 ieee80211_ibss_add_sta(sdata, NULL, mgmt->bssid,
2643 mgmt->sa, supp_rates);
2606 } 2644 }
2607 } 2645 }
2608 2646
@@ -2620,21 +2658,21 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2620 2658
2621#ifdef CONFIG_MAC80211_MESH 2659#ifdef CONFIG_MAC80211_MESH
2622 if (elems->mesh_config) 2660 if (elems->mesh_config)
2623 bss = ieee80211_rx_mesh_bss_get(dev, elems->mesh_id, 2661 bss = ieee80211_rx_mesh_bss_get(local, elems->mesh_id,
2624 elems->mesh_id_len, elems->mesh_config, freq); 2662 elems->mesh_id_len, elems->mesh_config, freq);
2625 else 2663 else
2626#endif 2664#endif
2627 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq, 2665 bss = ieee80211_rx_bss_get(local, mgmt->bssid, freq,
2628 elems->ssid, elems->ssid_len); 2666 elems->ssid, elems->ssid_len);
2629 if (!bss) { 2667 if (!bss) {
2630#ifdef CONFIG_MAC80211_MESH 2668#ifdef CONFIG_MAC80211_MESH
2631 if (elems->mesh_config) 2669 if (elems->mesh_config)
2632 bss = ieee80211_rx_mesh_bss_add(dev, elems->mesh_id, 2670 bss = ieee80211_rx_mesh_bss_add(local, elems->mesh_id,
2633 elems->mesh_id_len, elems->mesh_config, 2671 elems->mesh_id_len, elems->mesh_config,
2634 elems->mesh_config_len, freq); 2672 elems->mesh_config_len, freq);
2635 else 2673 else
2636#endif 2674#endif
2637 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq, 2675 bss = ieee80211_rx_bss_add(sdata, mgmt->bssid, freq,
2638 elems->ssid, elems->ssid_len); 2676 elems->ssid, elems->ssid_len);
2639 if (!bss) 2677 if (!bss)
2640 return; 2678 return;
@@ -2653,43 +2691,6 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2653 bss->has_erp_value = 1; 2691 bss->has_erp_value = 1;
2654 } 2692 }
2655 2693
2656 if (elems->ht_cap_elem &&
2657 (!bss->ht_ie || bss->ht_ie_len != elems->ht_cap_elem_len ||
2658 memcmp(bss->ht_ie, elems->ht_cap_elem, elems->ht_cap_elem_len))) {
2659 kfree(bss->ht_ie);
2660 bss->ht_ie = kmalloc(elems->ht_cap_elem_len + 2, GFP_ATOMIC);
2661 if (bss->ht_ie) {
2662 memcpy(bss->ht_ie, elems->ht_cap_elem - 2,
2663 elems->ht_cap_elem_len + 2);
2664 bss->ht_ie_len = elems->ht_cap_elem_len + 2;
2665 } else
2666 bss->ht_ie_len = 0;
2667 } else if (!elems->ht_cap_elem && bss->ht_ie) {
2668 kfree(bss->ht_ie);
2669 bss->ht_ie = NULL;
2670 bss->ht_ie_len = 0;
2671 }
2672
2673 if (elems->ht_info_elem &&
2674 (!bss->ht_add_ie ||
2675 bss->ht_add_ie_len != elems->ht_info_elem_len ||
2676 memcmp(bss->ht_add_ie, elems->ht_info_elem,
2677 elems->ht_info_elem_len))) {
2678 kfree(bss->ht_add_ie);
2679 bss->ht_add_ie =
2680 kmalloc(elems->ht_info_elem_len + 2, GFP_ATOMIC);
2681 if (bss->ht_add_ie) {
2682 memcpy(bss->ht_add_ie, elems->ht_info_elem - 2,
2683 elems->ht_info_elem_len + 2);
2684 bss->ht_add_ie_len = elems->ht_info_elem_len + 2;
2685 } else
2686 bss->ht_add_ie_len = 0;
2687 } else if (!elems->ht_info_elem && bss->ht_add_ie) {
2688 kfree(bss->ht_add_ie);
2689 bss->ht_add_ie = NULL;
2690 bss->ht_add_ie_len = 0;
2691 }
2692
2693 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); 2694 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
2694 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); 2695 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
2695 2696
@@ -2721,108 +2722,36 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2721 bss->supp_rates_len += clen; 2722 bss->supp_rates_len += clen;
2722 } 2723 }
2723 2724
2724 bss->band = rx_status->band; 2725 bss->band = band;
2725 2726
2726 bss->timestamp = beacon_timestamp; 2727 bss->timestamp = beacon_timestamp;
2727 bss->last_update = jiffies; 2728 bss->last_update = jiffies;
2728 bss->signal = rx_status->signal; 2729 bss->signal = rx_status->signal;
2729 bss->noise = rx_status->noise; 2730 bss->noise = rx_status->noise;
2730 bss->qual = rx_status->qual; 2731 bss->qual = rx_status->qual;
2731 if (!beacon && !bss->probe_resp) 2732 if (!beacon)
2732 bss->probe_resp = true; 2733 bss->last_probe_resp = jiffies;
2733
2734 /* 2734 /*
2735 * In STA mode, the remaining parameters should not be overridden 2735 * In STA mode, the remaining parameters should not be overridden
2736 * by beacons because they're not necessarily accurate there. 2736 * by beacons because they're not necessarily accurate there.
2737 */ 2737 */
2738 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 2738 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
2739 bss->probe_resp && beacon) { 2739 bss->last_probe_resp && beacon) {
2740 ieee80211_rx_bss_put(local, bss); 2740 ieee80211_rx_bss_put(local, bss);
2741 return; 2741 return;
2742 } 2742 }
2743 2743
2744 if (elems->wpa && 2744 if (bss->ies == NULL || bss->ies_len < elems->total_len) {
2745 (!bss->wpa_ie || bss->wpa_ie_len != elems->wpa_len || 2745 kfree(bss->ies);
2746 memcmp(bss->wpa_ie, elems->wpa, elems->wpa_len))) { 2746 bss->ies = kmalloc(elems->total_len, GFP_ATOMIC);
2747 kfree(bss->wpa_ie);
2748 bss->wpa_ie = kmalloc(elems->wpa_len + 2, GFP_ATOMIC);
2749 if (bss->wpa_ie) {
2750 memcpy(bss->wpa_ie, elems->wpa - 2, elems->wpa_len + 2);
2751 bss->wpa_ie_len = elems->wpa_len + 2;
2752 } else
2753 bss->wpa_ie_len = 0;
2754 } else if (!elems->wpa && bss->wpa_ie) {
2755 kfree(bss->wpa_ie);
2756 bss->wpa_ie = NULL;
2757 bss->wpa_ie_len = 0;
2758 }
2759
2760 if (elems->rsn &&
2761 (!bss->rsn_ie || bss->rsn_ie_len != elems->rsn_len ||
2762 memcmp(bss->rsn_ie, elems->rsn, elems->rsn_len))) {
2763 kfree(bss->rsn_ie);
2764 bss->rsn_ie = kmalloc(elems->rsn_len + 2, GFP_ATOMIC);
2765 if (bss->rsn_ie) {
2766 memcpy(bss->rsn_ie, elems->rsn - 2, elems->rsn_len + 2);
2767 bss->rsn_ie_len = elems->rsn_len + 2;
2768 } else
2769 bss->rsn_ie_len = 0;
2770 } else if (!elems->rsn && bss->rsn_ie) {
2771 kfree(bss->rsn_ie);
2772 bss->rsn_ie = NULL;
2773 bss->rsn_ie_len = 0;
2774 } 2747 }
2748 if (bss->ies) {
2749 memcpy(bss->ies, elems->ie_start, elems->total_len);
2750 bss->ies_len = elems->total_len;
2751 } else
2752 bss->ies_len = 0;
2775 2753
2776 /* 2754 bss->wmm_used = elems->wmm_param || elems->wmm_info;
2777 * Cf.
2778 * http://www.wipo.int/pctdb/en/wo.jsp?wo=2007047181&IA=WO2007047181&DISPLAY=DESC
2779 *
2780 * quoting:
2781 *
2782 * In particular, "Wi-Fi CERTIFIED for WMM - Support for Multimedia
2783 * Applications with Quality of Service in Wi-Fi Networks," Wi- Fi
2784 * Alliance (September 1, 2004) is incorporated by reference herein.
2785 * The inclusion of the WMM Parameters in probe responses and
2786 * association responses is mandatory for WMM enabled networks. The
2787 * inclusion of the WMM Parameters in beacons, however, is optional.
2788 */
2789
2790 if (elems->wmm_param &&
2791 (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_param_len ||
2792 memcmp(bss->wmm_ie, elems->wmm_param, elems->wmm_param_len))) {
2793 kfree(bss->wmm_ie);
2794 bss->wmm_ie = kmalloc(elems->wmm_param_len + 2, GFP_ATOMIC);
2795 if (bss->wmm_ie) {
2796 memcpy(bss->wmm_ie, elems->wmm_param - 2,
2797 elems->wmm_param_len + 2);
2798 bss->wmm_ie_len = elems->wmm_param_len + 2;
2799 } else
2800 bss->wmm_ie_len = 0;
2801 } else if (elems->wmm_info &&
2802 (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_info_len ||
2803 memcmp(bss->wmm_ie, elems->wmm_info,
2804 elems->wmm_info_len))) {
2805 /* As for certain AP's Fifth bit is not set in WMM IE in
2806 * beacon frames.So while parsing the beacon frame the
2807 * wmm_info structure is used instead of wmm_param.
2808 * wmm_info structure was never used to set bss->wmm_ie.
2809 * This code fixes this problem by copying the WME
2810 * information from wmm_info to bss->wmm_ie and enabling
2811 * n-band association.
2812 */
2813 kfree(bss->wmm_ie);
2814 bss->wmm_ie = kmalloc(elems->wmm_info_len + 2, GFP_ATOMIC);
2815 if (bss->wmm_ie) {
2816 memcpy(bss->wmm_ie, elems->wmm_info - 2,
2817 elems->wmm_info_len + 2);
2818 bss->wmm_ie_len = elems->wmm_info_len + 2;
2819 } else
2820 bss->wmm_ie_len = 0;
2821 } else if (!elems->wmm_param && !elems->wmm_info && bss->wmm_ie) {
2822 kfree(bss->wmm_ie);
2823 bss->wmm_ie = NULL;
2824 bss->wmm_ie_len = 0;
2825 }
2826 2755
2827 /* check if we need to merge IBSS */ 2756 /* check if we need to merge IBSS */
2828 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon && 2757 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon &&
@@ -2848,7 +2777,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2848 * e.g: at 1 MBit that means mactime is 192 usec earlier 2777 * e.g: at 1 MBit that means mactime is 192 usec earlier
2849 * (=24 bytes * 8 usecs/byte) than the beacon timestamp. 2778 * (=24 bytes * 8 usecs/byte) than the beacon timestamp.
2850 */ 2779 */
2851 int rate = local->hw.wiphy->bands[rx_status->band]-> 2780 int rate = local->hw.wiphy->bands[band]->
2852 bitrates[rx_status->rate_idx].bitrate; 2781 bitrates[rx_status->rate_idx].bitrate;
2853 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); 2782 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
2854 } else if (local && local->ops && local->ops->get_tsf) 2783 } else if (local && local->ops && local->ops->get_tsf)
@@ -2871,12 +2800,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2871#ifdef CONFIG_MAC80211_IBSS_DEBUG 2800#ifdef CONFIG_MAC80211_IBSS_DEBUG
2872 printk(KERN_DEBUG "%s: beacon TSF higher than " 2801 printk(KERN_DEBUG "%s: beacon TSF higher than "
2873 "local TSF - IBSS merge with BSSID %s\n", 2802 "local TSF - IBSS merge with BSSID %s\n",
2874 dev->name, print_mac(mac, mgmt->bssid)); 2803 sdata->dev->name, print_mac(mac, mgmt->bssid));
2875#endif 2804#endif
2876 ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss); 2805 ieee80211_sta_join_ibss(sdata, &sdata->u.sta, bss);
2877 ieee80211_ibss_add_sta(dev, NULL, 2806 ieee80211_ibss_add_sta(sdata, NULL,
2878 mgmt->bssid, mgmt->sa, 2807 mgmt->bssid, mgmt->sa,
2879 BIT(rx_status->rate_idx)); 2808 supp_rates);
2880 } 2809 }
2881 } 2810 }
2882 2811
@@ -2884,13 +2813,17 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2884} 2813}
2885 2814
2886 2815
2887static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev, 2816static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
2888 struct ieee80211_mgmt *mgmt, 2817 struct ieee80211_mgmt *mgmt,
2889 size_t len, 2818 size_t len,
2890 struct ieee80211_rx_status *rx_status) 2819 struct ieee80211_rx_status *rx_status)
2891{ 2820{
2892 size_t baselen; 2821 size_t baselen;
2893 struct ieee802_11_elems elems; 2822 struct ieee802_11_elems elems;
2823 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
2824
2825 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
2826 return; /* ignore ProbeResp to foreign address */
2894 2827
2895 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 2828 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
2896 if (baselen > len) 2829 if (baselen > len)
@@ -2899,20 +2832,27 @@ static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev,
2899 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, 2832 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
2900 &elems); 2833 &elems);
2901 2834
2902 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 0); 2835 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
2836
2837 /* direct probe may be part of the association flow */
2838 if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE,
2839 &ifsta->request)) {
2840 printk(KERN_DEBUG "%s direct probe responded\n",
2841 sdata->dev->name);
2842 ieee80211_authenticate(sdata, ifsta);
2843 }
2903} 2844}
2904 2845
2905 2846
2906static void ieee80211_rx_mgmt_beacon(struct net_device *dev, 2847static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2907 struct ieee80211_mgmt *mgmt, 2848 struct ieee80211_mgmt *mgmt,
2908 size_t len, 2849 size_t len,
2909 struct ieee80211_rx_status *rx_status) 2850 struct ieee80211_rx_status *rx_status)
2910{ 2851{
2911 struct ieee80211_sub_if_data *sdata;
2912 struct ieee80211_if_sta *ifsta; 2852 struct ieee80211_if_sta *ifsta;
2913 size_t baselen; 2853 size_t baselen;
2914 struct ieee802_11_elems elems; 2854 struct ieee802_11_elems elems;
2915 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2855 struct ieee80211_local *local = sdata->local;
2916 struct ieee80211_conf *conf = &local->hw.conf; 2856 struct ieee80211_conf *conf = &local->hw.conf;
2917 u32 changed = 0; 2857 u32 changed = 0;
2918 2858
@@ -2923,9 +2863,8 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2923 2863
2924 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); 2864 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2925 2865
2926 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 1); 2866 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
2927 2867
2928 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2929 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 2868 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
2930 return; 2869 return;
2931 ifsta = &sdata->u.sta; 2870 ifsta = &sdata->u.sta;
@@ -2934,7 +2873,7 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2934 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) 2873 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
2935 return; 2874 return;
2936 2875
2937 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 2876 ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param,
2938 elems.wmm_param_len); 2877 elems.wmm_param_len);
2939 2878
2940 /* Do not send changes to driver if we are scanning. This removes 2879 /* Do not send changes to driver if we are scanning. This removes
@@ -2966,14 +2905,13 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2966} 2905}
2967 2906
2968 2907
2969static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, 2908static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
2970 struct ieee80211_if_sta *ifsta, 2909 struct ieee80211_if_sta *ifsta,
2971 struct ieee80211_mgmt *mgmt, 2910 struct ieee80211_mgmt *mgmt,
2972 size_t len, 2911 size_t len,
2973 struct ieee80211_rx_status *rx_status) 2912 struct ieee80211_rx_status *rx_status)
2974{ 2913{
2975 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2914 struct ieee80211_local *local = sdata->local;
2976 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2977 int tx_last_beacon; 2915 int tx_last_beacon;
2978 struct sk_buff *skb; 2916 struct sk_buff *skb;
2979 struct ieee80211_mgmt *resp; 2917 struct ieee80211_mgmt *resp;
@@ -2985,7 +2923,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
2985#endif 2923#endif
2986 2924
2987 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS || 2925 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS ||
2988 ifsta->state != IEEE80211_IBSS_JOINED || 2926 ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED ||
2989 len < 24 + 2 || !ifsta->probe_resp) 2927 len < 24 + 2 || !ifsta->probe_resp)
2990 return; 2928 return;
2991 2929
@@ -2997,7 +2935,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
2997#ifdef CONFIG_MAC80211_IBSS_DEBUG 2935#ifdef CONFIG_MAC80211_IBSS_DEBUG
2998 printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID=" 2936 printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID="
2999 "%s (tx_last_beacon=%d)\n", 2937 "%s (tx_last_beacon=%d)\n",
3000 dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da), 2938 sdata->dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da),
3001 print_mac(mac3, mgmt->bssid), tx_last_beacon); 2939 print_mac(mac3, mgmt->bssid), tx_last_beacon);
3002#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2940#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3003 2941
@@ -3015,7 +2953,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
3015#ifdef CONFIG_MAC80211_IBSS_DEBUG 2953#ifdef CONFIG_MAC80211_IBSS_DEBUG
3016 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 2954 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
3017 "from %s\n", 2955 "from %s\n",
3018 dev->name, print_mac(mac, mgmt->sa)); 2956 sdata->dev->name, print_mac(mac, mgmt->sa));
3019#endif 2957#endif
3020 return; 2958 return;
3021 } 2959 }
@@ -3035,19 +2973,18 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
3035 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2973 memcpy(resp->da, mgmt->sa, ETH_ALEN);
3036#ifdef CONFIG_MAC80211_IBSS_DEBUG 2974#ifdef CONFIG_MAC80211_IBSS_DEBUG
3037 printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n", 2975 printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n",
3038 dev->name, print_mac(mac, resp->da)); 2976 sdata->dev->name, print_mac(mac, resp->da));
3039#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2977#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3040 ieee80211_sta_tx(dev, skb, 0); 2978 ieee80211_sta_tx(sdata, skb, 0);
3041} 2979}
3042 2980
3043static void ieee80211_rx_mgmt_action(struct net_device *dev, 2981static void ieee80211_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
3044 struct ieee80211_if_sta *ifsta, 2982 struct ieee80211_if_sta *ifsta,
3045 struct ieee80211_mgmt *mgmt, 2983 struct ieee80211_mgmt *mgmt,
3046 size_t len, 2984 size_t len,
3047 struct ieee80211_rx_status *rx_status) 2985 struct ieee80211_rx_status *rx_status)
3048{ 2986{
3049 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2987 struct ieee80211_local *local = sdata->local;
3050 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3051 2988
3052 if (len < IEEE80211_MIN_ACTION_SIZE) 2989 if (len < IEEE80211_MIN_ACTION_SIZE)
3053 return; 2990 return;
@@ -3061,7 +2998,7 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
3061 if (len < (IEEE80211_MIN_ACTION_SIZE + 2998 if (len < (IEEE80211_MIN_ACTION_SIZE +
3062 sizeof(mgmt->u.action.u.measurement))) 2999 sizeof(mgmt->u.action.u.measurement)))
3063 break; 3000 break;
3064 ieee80211_sta_process_measurement_req(dev, mgmt, len); 3001 ieee80211_sta_process_measurement_req(sdata, mgmt, len);
3065 break; 3002 break;
3066 } 3003 }
3067 break; 3004 break;
@@ -3071,38 +3008,37 @@ static void ieee80211_rx_mgmt_action(struct net_device *dev,
3071 if (len < (IEEE80211_MIN_ACTION_SIZE + 3008 if (len < (IEEE80211_MIN_ACTION_SIZE +
3072 sizeof(mgmt->u.action.u.addba_req))) 3009 sizeof(mgmt->u.action.u.addba_req)))
3073 break; 3010 break;
3074 ieee80211_sta_process_addba_request(dev, mgmt, len); 3011 ieee80211_sta_process_addba_request(local, mgmt, len);
3075 break; 3012 break;
3076 case WLAN_ACTION_ADDBA_RESP: 3013 case WLAN_ACTION_ADDBA_RESP:
3077 if (len < (IEEE80211_MIN_ACTION_SIZE + 3014 if (len < (IEEE80211_MIN_ACTION_SIZE +
3078 sizeof(mgmt->u.action.u.addba_resp))) 3015 sizeof(mgmt->u.action.u.addba_resp)))
3079 break; 3016 break;
3080 ieee80211_sta_process_addba_resp(dev, mgmt, len); 3017 ieee80211_sta_process_addba_resp(local, mgmt, len);
3081 break; 3018 break;
3082 case WLAN_ACTION_DELBA: 3019 case WLAN_ACTION_DELBA:
3083 if (len < (IEEE80211_MIN_ACTION_SIZE + 3020 if (len < (IEEE80211_MIN_ACTION_SIZE +
3084 sizeof(mgmt->u.action.u.delba))) 3021 sizeof(mgmt->u.action.u.delba)))
3085 break; 3022 break;
3086 ieee80211_sta_process_delba(dev, mgmt, len); 3023 ieee80211_sta_process_delba(sdata, mgmt, len);
3087 break; 3024 break;
3088 } 3025 }
3089 break; 3026 break;
3090 case PLINK_CATEGORY: 3027 case PLINK_CATEGORY:
3091 if (ieee80211_vif_is_mesh(&sdata->vif)) 3028 if (ieee80211_vif_is_mesh(&sdata->vif))
3092 mesh_rx_plink_frame(dev, mgmt, len, rx_status); 3029 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
3093 break; 3030 break;
3094 case MESH_PATH_SEL_CATEGORY: 3031 case MESH_PATH_SEL_CATEGORY:
3095 if (ieee80211_vif_is_mesh(&sdata->vif)) 3032 if (ieee80211_vif_is_mesh(&sdata->vif))
3096 mesh_rx_path_sel_frame(dev, mgmt, len); 3033 mesh_rx_path_sel_frame(sdata, mgmt, len);
3097 break; 3034 break;
3098 } 3035 }
3099} 3036}
3100 3037
3101void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, 3038void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
3102 struct ieee80211_rx_status *rx_status) 3039 struct ieee80211_rx_status *rx_status)
3103{ 3040{
3104 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3041 struct ieee80211_local *local = sdata->local;
3105 struct ieee80211_sub_if_data *sdata;
3106 struct ieee80211_if_sta *ifsta; 3042 struct ieee80211_if_sta *ifsta;
3107 struct ieee80211_mgmt *mgmt; 3043 struct ieee80211_mgmt *mgmt;
3108 u16 fc; 3044 u16 fc;
@@ -3110,7 +3046,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3110 if (skb->len < 24) 3046 if (skb->len < 24)
3111 goto fail; 3047 goto fail;
3112 3048
3113 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3114 ifsta = &sdata->u.sta; 3049 ifsta = &sdata->u.sta;
3115 3050
3116 mgmt = (struct ieee80211_mgmt *) skb->data; 3051 mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -3136,17 +3071,14 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3136 kfree_skb(skb); 3071 kfree_skb(skb);
3137} 3072}
3138 3073
3139 3074static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
3140static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3141 struct sk_buff *skb) 3075 struct sk_buff *skb)
3142{ 3076{
3143 struct ieee80211_rx_status *rx_status; 3077 struct ieee80211_rx_status *rx_status;
3144 struct ieee80211_sub_if_data *sdata;
3145 struct ieee80211_if_sta *ifsta; 3078 struct ieee80211_if_sta *ifsta;
3146 struct ieee80211_mgmt *mgmt; 3079 struct ieee80211_mgmt *mgmt;
3147 u16 fc; 3080 u16 fc;
3148 3081
3149 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3150 ifsta = &sdata->u.sta; 3082 ifsta = &sdata->u.sta;
3151 3083
3152 rx_status = (struct ieee80211_rx_status *) skb->cb; 3084 rx_status = (struct ieee80211_rx_status *) skb->cb;
@@ -3155,17 +3087,17 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3155 3087
3156 switch (fc & IEEE80211_FCTL_STYPE) { 3088 switch (fc & IEEE80211_FCTL_STYPE) {
3157 case IEEE80211_STYPE_PROBE_REQ: 3089 case IEEE80211_STYPE_PROBE_REQ:
3158 ieee80211_rx_mgmt_probe_req(dev, ifsta, mgmt, skb->len, 3090 ieee80211_rx_mgmt_probe_req(sdata, ifsta, mgmt, skb->len,
3159 rx_status); 3091 rx_status);
3160 break; 3092 break;
3161 case IEEE80211_STYPE_PROBE_RESP: 3093 case IEEE80211_STYPE_PROBE_RESP:
3162 ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status); 3094 ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len, rx_status);
3163 break; 3095 break;
3164 case IEEE80211_STYPE_BEACON: 3096 case IEEE80211_STYPE_BEACON:
3165 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status); 3097 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
3166 break; 3098 break;
3167 case IEEE80211_STYPE_AUTH: 3099 case IEEE80211_STYPE_AUTH:
3168 ieee80211_rx_mgmt_auth(dev, ifsta, mgmt, skb->len); 3100 ieee80211_rx_mgmt_auth(sdata, ifsta, mgmt, skb->len);
3169 break; 3101 break;
3170 case IEEE80211_STYPE_ASSOC_RESP: 3102 case IEEE80211_STYPE_ASSOC_RESP:
3171 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0); 3103 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0);
@@ -3174,13 +3106,13 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3174 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1); 3106 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1);
3175 break; 3107 break;
3176 case IEEE80211_STYPE_DEAUTH: 3108 case IEEE80211_STYPE_DEAUTH:
3177 ieee80211_rx_mgmt_deauth(dev, ifsta, mgmt, skb->len); 3109 ieee80211_rx_mgmt_deauth(sdata, ifsta, mgmt, skb->len);
3178 break; 3110 break;
3179 case IEEE80211_STYPE_DISASSOC: 3111 case IEEE80211_STYPE_DISASSOC:
3180 ieee80211_rx_mgmt_disassoc(dev, ifsta, mgmt, skb->len); 3112 ieee80211_rx_mgmt_disassoc(sdata, ifsta, mgmt, skb->len);
3181 break; 3113 break;
3182 case IEEE80211_STYPE_ACTION: 3114 case IEEE80211_STYPE_ACTION:
3183 ieee80211_rx_mgmt_action(dev, ifsta, mgmt, skb->len, rx_status); 3115 ieee80211_rx_mgmt_action(sdata, ifsta, mgmt, skb->len, rx_status);
3184 break; 3116 break;
3185 } 3117 }
3186 3118
@@ -3189,7 +3121,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3189 3121
3190 3122
3191ieee80211_rx_result 3123ieee80211_rx_result
3192ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb, 3124ieee80211_sta_rx_scan(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
3193 struct ieee80211_rx_status *rx_status) 3125 struct ieee80211_rx_status *rx_status)
3194{ 3126{
3195 struct ieee80211_mgmt *mgmt; 3127 struct ieee80211_mgmt *mgmt;
@@ -3208,13 +3140,13 @@ ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
3208 return RX_DROP_MONITOR; 3140 return RX_DROP_MONITOR;
3209 3141
3210 if (ieee80211_is_probe_resp(fc)) { 3142 if (ieee80211_is_probe_resp(fc)) {
3211 ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status); 3143 ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len, rx_status);
3212 dev_kfree_skb(skb); 3144 dev_kfree_skb(skb);
3213 return RX_QUEUED; 3145 return RX_QUEUED;
3214 } 3146 }
3215 3147
3216 if (ieee80211_is_beacon(fc)) { 3148 if (ieee80211_is_beacon(fc)) {
3217 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status); 3149 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
3218 dev_kfree_skb(skb); 3150 dev_kfree_skb(skb);
3219 return RX_QUEUED; 3151 return RX_QUEUED;
3220 } 3152 }
@@ -3223,12 +3155,11 @@ ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
3223} 3155}
3224 3156
3225 3157
3226static int ieee80211_sta_active_ibss(struct net_device *dev) 3158static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
3227{ 3159{
3228 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3160 struct ieee80211_local *local = sdata->local;
3229 int active = 0; 3161 int active = 0;
3230 struct sta_info *sta; 3162 struct sta_info *sta;
3231 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3232 3163
3233 rcu_read_lock(); 3164 rcu_read_lock();
3234 3165
@@ -3247,9 +3178,9 @@ static int ieee80211_sta_active_ibss(struct net_device *dev)
3247} 3178}
3248 3179
3249 3180
3250static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time) 3181static void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, unsigned long exp_time)
3251{ 3182{
3252 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3183 struct ieee80211_local *local = sdata->local;
3253 struct sta_info *sta, *tmp; 3184 struct sta_info *sta, *tmp;
3254 LIST_HEAD(tmp_list); 3185 LIST_HEAD(tmp_list);
3255 DECLARE_MAC_BUF(mac); 3186 DECLARE_MAC_BUF(mac);
@@ -3260,7 +3191,7 @@ static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time)
3260 if (time_after(jiffies, sta->last_rx + exp_time)) { 3191 if (time_after(jiffies, sta->last_rx + exp_time)) {
3261#ifdef CONFIG_MAC80211_IBSS_DEBUG 3192#ifdef CONFIG_MAC80211_IBSS_DEBUG
3262 printk(KERN_DEBUG "%s: expiring inactive STA %s\n", 3193 printk(KERN_DEBUG "%s: expiring inactive STA %s\n",
3263 dev->name, print_mac(mac, sta->addr)); 3194 sdata->dev->name, print_mac(mac, sta->addr));
3264#endif 3195#endif
3265 __sta_info_unlink(&sta); 3196 __sta_info_unlink(&sta);
3266 if (sta) 3197 if (sta)
@@ -3273,30 +3204,29 @@ static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time)
3273} 3204}
3274 3205
3275 3206
3276static void ieee80211_sta_merge_ibss(struct net_device *dev, 3207static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata,
3277 struct ieee80211_if_sta *ifsta) 3208 struct ieee80211_if_sta *ifsta)
3278{ 3209{
3279 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 3210 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
3280 3211
3281 ieee80211_sta_expire(dev, IEEE80211_IBSS_INACTIVITY_LIMIT); 3212 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
3282 if (ieee80211_sta_active_ibss(dev)) 3213 if (ieee80211_sta_active_ibss(sdata))
3283 return; 3214 return;
3284 3215
3285 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 3216 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
3286 "IBSS networks with same SSID (merge)\n", dev->name); 3217 "IBSS networks with same SSID (merge)\n", sdata->dev->name);
3287 ieee80211_sta_req_scan(dev, ifsta->ssid, ifsta->ssid_len); 3218 ieee80211_sta_req_scan(sdata, ifsta->ssid, ifsta->ssid_len);
3288} 3219}
3289 3220
3290 3221
3291#ifdef CONFIG_MAC80211_MESH 3222#ifdef CONFIG_MAC80211_MESH
3292static void ieee80211_mesh_housekeeping(struct net_device *dev, 3223static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
3293 struct ieee80211_if_sta *ifsta) 3224 struct ieee80211_if_sta *ifsta)
3294{ 3225{
3295 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3296 bool free_plinks; 3226 bool free_plinks;
3297 3227
3298 ieee80211_sta_expire(dev, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); 3228 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
3299 mesh_path_expire(dev); 3229 mesh_path_expire(sdata);
3300 3230
3301 free_plinks = mesh_plink_availables(sdata); 3231 free_plinks = mesh_plink_availables(sdata);
3302 if (free_plinks != sdata->u.sta.accepting_plinks) 3232 if (free_plinks != sdata->u.sta.accepting_plinks)
@@ -3307,12 +3237,11 @@ static void ieee80211_mesh_housekeeping(struct net_device *dev,
3307} 3237}
3308 3238
3309 3239
3310void ieee80211_start_mesh(struct net_device *dev) 3240void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
3311{ 3241{
3312 struct ieee80211_if_sta *ifsta; 3242 struct ieee80211_if_sta *ifsta;
3313 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3314 ifsta = &sdata->u.sta; 3243 ifsta = &sdata->u.sta;
3315 ifsta->state = IEEE80211_MESH_UP; 3244 ifsta->state = IEEE80211_STA_MLME_MESH_UP;
3316 ieee80211_sta_timer((unsigned long)sdata); 3245 ieee80211_sta_timer((unsigned long)sdata);
3317 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); 3246 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
3318} 3247}
@@ -3324,7 +3253,7 @@ void ieee80211_sta_timer(unsigned long data)
3324 struct ieee80211_sub_if_data *sdata = 3253 struct ieee80211_sub_if_data *sdata =
3325 (struct ieee80211_sub_if_data *) data; 3254 (struct ieee80211_sub_if_data *) data;
3326 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 3255 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
3327 struct ieee80211_local *local = wdev_priv(&sdata->wdev); 3256 struct ieee80211_local *local = sdata->local;
3328 3257
3329 set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); 3258 set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
3330 queue_work(local->hw.workqueue, &ifsta->work); 3259 queue_work(local->hw.workqueue, &ifsta->work);
@@ -3334,12 +3263,11 @@ void ieee80211_sta_work(struct work_struct *work)
3334{ 3263{
3335 struct ieee80211_sub_if_data *sdata = 3264 struct ieee80211_sub_if_data *sdata =
3336 container_of(work, struct ieee80211_sub_if_data, u.sta.work); 3265 container_of(work, struct ieee80211_sub_if_data, u.sta.work);
3337 struct net_device *dev = sdata->dev; 3266 struct ieee80211_local *local = sdata->local;
3338 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3339 struct ieee80211_if_sta *ifsta; 3267 struct ieee80211_if_sta *ifsta;
3340 struct sk_buff *skb; 3268 struct sk_buff *skb;
3341 3269
3342 if (!netif_running(dev)) 3270 if (!netif_running(sdata->dev))
3343 return; 3271 return;
3344 3272
3345 if (local->sta_sw_scanning || local->sta_hw_scanning) 3273 if (local->sta_sw_scanning || local->sta_hw_scanning)
@@ -3352,53 +3280,57 @@ void ieee80211_sta_work(struct work_struct *work)
3352 ifsta = &sdata->u.sta; 3280 ifsta = &sdata->u.sta;
3353 3281
3354 while ((skb = skb_dequeue(&ifsta->skb_queue))) 3282 while ((skb = skb_dequeue(&ifsta->skb_queue)))
3355 ieee80211_sta_rx_queued_mgmt(dev, skb); 3283 ieee80211_sta_rx_queued_mgmt(sdata, skb);
3356 3284
3357#ifdef CONFIG_MAC80211_MESH 3285#ifdef CONFIG_MAC80211_MESH
3358 if (ifsta->preq_queue_len && 3286 if (ifsta->preq_queue_len &&
3359 time_after(jiffies, 3287 time_after(jiffies,
3360 ifsta->last_preq + msecs_to_jiffies(ifsta->mshcfg.dot11MeshHWMPpreqMinInterval))) 3288 ifsta->last_preq + msecs_to_jiffies(ifsta->mshcfg.dot11MeshHWMPpreqMinInterval)))
3361 mesh_path_start_discovery(dev); 3289 mesh_path_start_discovery(sdata);
3362#endif 3290#endif
3363 3291
3364 if (ifsta->state != IEEE80211_AUTHENTICATE && 3292 if (ifsta->state != IEEE80211_STA_MLME_DIRECT_PROBE &&
3365 ifsta->state != IEEE80211_ASSOCIATE && 3293 ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE &&
3294 ifsta->state != IEEE80211_STA_MLME_ASSOCIATE &&
3366 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) { 3295 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
3367 if (ifsta->scan_ssid_len) 3296 if (ifsta->scan_ssid_len)
3368 ieee80211_sta_start_scan(dev, ifsta->scan_ssid, ifsta->scan_ssid_len); 3297 ieee80211_sta_start_scan(sdata, ifsta->scan_ssid, ifsta->scan_ssid_len);
3369 else 3298 else
3370 ieee80211_sta_start_scan(dev, NULL, 0); 3299 ieee80211_sta_start_scan(sdata, NULL, 0);
3371 return; 3300 return;
3372 } 3301 }
3373 3302
3374 if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) { 3303 if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) {
3375 if (ieee80211_sta_config_auth(dev, ifsta)) 3304 if (ieee80211_sta_config_auth(sdata, ifsta))
3376 return; 3305 return;
3377 clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); 3306 clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
3378 } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request)) 3307 } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request))
3379 return; 3308 return;
3380 3309
3381 switch (ifsta->state) { 3310 switch (ifsta->state) {
3382 case IEEE80211_DISABLED: 3311 case IEEE80211_STA_MLME_DISABLED:
3312 break;
3313 case IEEE80211_STA_MLME_DIRECT_PROBE:
3314 ieee80211_direct_probe(sdata, ifsta);
3383 break; 3315 break;
3384 case IEEE80211_AUTHENTICATE: 3316 case IEEE80211_STA_MLME_AUTHENTICATE:
3385 ieee80211_authenticate(dev, ifsta); 3317 ieee80211_authenticate(sdata, ifsta);
3386 break; 3318 break;
3387 case IEEE80211_ASSOCIATE: 3319 case IEEE80211_STA_MLME_ASSOCIATE:
3388 ieee80211_associate(dev, ifsta); 3320 ieee80211_associate(sdata, ifsta);
3389 break; 3321 break;
3390 case IEEE80211_ASSOCIATED: 3322 case IEEE80211_STA_MLME_ASSOCIATED:
3391 ieee80211_associated(dev, ifsta); 3323 ieee80211_associated(sdata, ifsta);
3392 break; 3324 break;
3393 case IEEE80211_IBSS_SEARCH: 3325 case IEEE80211_STA_MLME_IBSS_SEARCH:
3394 ieee80211_sta_find_ibss(dev, ifsta); 3326 ieee80211_sta_find_ibss(sdata, ifsta);
3395 break; 3327 break;
3396 case IEEE80211_IBSS_JOINED: 3328 case IEEE80211_STA_MLME_IBSS_JOINED:
3397 ieee80211_sta_merge_ibss(dev, ifsta); 3329 ieee80211_sta_merge_ibss(sdata, ifsta);
3398 break; 3330 break;
3399#ifdef CONFIG_MAC80211_MESH 3331#ifdef CONFIG_MAC80211_MESH
3400 case IEEE80211_MESH_UP: 3332 case IEEE80211_STA_MLME_MESH_UP:
3401 ieee80211_mesh_housekeeping(dev, ifsta); 3333 ieee80211_mesh_housekeeping(sdata, ifsta);
3402 break; 3334 break;
3403#endif 3335#endif
3404 default: 3336 default:
@@ -3406,20 +3338,20 @@ void ieee80211_sta_work(struct work_struct *work)
3406 break; 3338 break;
3407 } 3339 }
3408 3340
3409 if (ieee80211_privacy_mismatch(dev, ifsta)) { 3341 if (ieee80211_privacy_mismatch(sdata, ifsta)) {
3410 printk(KERN_DEBUG "%s: privacy configuration mismatch and " 3342 printk(KERN_DEBUG "%s: privacy configuration mismatch and "
3411 "mixed-cell disabled - disassociate\n", dev->name); 3343 "mixed-cell disabled - disassociate\n", sdata->dev->name);
3412 3344
3413 ieee80211_send_disassoc(dev, ifsta, WLAN_REASON_UNSPECIFIED); 3345 ieee80211_send_disassoc(sdata, ifsta, WLAN_REASON_UNSPECIFIED);
3414 ieee80211_set_disassoc(dev, ifsta, 0); 3346 ieee80211_set_disassoc(sdata, ifsta, 0);
3415 } 3347 }
3416} 3348}
3417 3349
3418 3350
3419static void ieee80211_sta_reset_auth(struct net_device *dev, 3351static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata,
3420 struct ieee80211_if_sta *ifsta) 3352 struct ieee80211_if_sta *ifsta)
3421{ 3353{
3422 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3354 struct ieee80211_local *local = sdata->local;
3423 3355
3424 if (local->ops->reset_tsf) { 3356 if (local->ops->reset_tsf) {
3425 /* Reset own TSF to allow time synchronization work. */ 3357 /* Reset own TSF to allow time synchronization work. */
@@ -3439,16 +3371,18 @@ static void ieee80211_sta_reset_auth(struct net_device *dev,
3439 ifsta->auth_alg = WLAN_AUTH_OPEN; 3371 ifsta->auth_alg = WLAN_AUTH_OPEN;
3440 ifsta->auth_transaction = -1; 3372 ifsta->auth_transaction = -1;
3441 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; 3373 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
3442 ifsta->auth_tries = ifsta->assoc_tries = 0; 3374 ifsta->assoc_scan_tries = 0;
3443 netif_carrier_off(dev); 3375 ifsta->direct_probe_tries = 0;
3376 ifsta->auth_tries = 0;
3377 ifsta->assoc_tries = 0;
3378 netif_carrier_off(sdata->dev);
3444} 3379}
3445 3380
3446 3381
3447void ieee80211_sta_req_auth(struct net_device *dev, 3382void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata,
3448 struct ieee80211_if_sta *ifsta) 3383 struct ieee80211_if_sta *ifsta)
3449{ 3384{
3450 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3385 struct ieee80211_local *local = sdata->local;
3451 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3452 3386
3453 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 3387 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
3454 return; 3388 return;
@@ -3492,11 +3426,10 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta,
3492 return 0; 3426 return 0;
3493} 3427}
3494 3428
3495static int ieee80211_sta_config_auth(struct net_device *dev, 3429static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata,
3496 struct ieee80211_if_sta *ifsta) 3430 struct ieee80211_if_sta *ifsta)
3497{ 3431{
3498 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3432 struct ieee80211_local *local = sdata->local;
3499 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3500 struct ieee80211_sta_bss *bss, *selected = NULL; 3433 struct ieee80211_sta_bss *bss, *selected = NULL;
3501 int top_rssi = 0, freq; 3434 int top_rssi = 0, freq;
3502 3435
@@ -3535,38 +3468,48 @@ static int ieee80211_sta_config_auth(struct net_device *dev,
3535 spin_unlock_bh(&local->sta_bss_lock); 3468 spin_unlock_bh(&local->sta_bss_lock);
3536 3469
3537 if (selected) { 3470 if (selected) {
3538 ieee80211_set_freq(dev, selected->freq); 3471 ieee80211_set_freq(sdata, selected->freq);
3539 if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) 3472 if (!(ifsta->flags & IEEE80211_STA_SSID_SET))
3540 ieee80211_sta_set_ssid(dev, selected->ssid, 3473 ieee80211_sta_set_ssid(sdata, selected->ssid,
3541 selected->ssid_len); 3474 selected->ssid_len);
3542 ieee80211_sta_set_bssid(dev, selected->bssid); 3475 ieee80211_sta_set_bssid(sdata, selected->bssid);
3543 ieee80211_sta_def_wmm_params(dev, selected, 0); 3476 ieee80211_sta_def_wmm_params(sdata, selected, 0);
3477
3478 /* Send out direct probe if no probe resp was received or
3479 * the one we have is outdated
3480 */
3481 if (!selected->last_probe_resp ||
3482 time_after(jiffies, selected->last_probe_resp
3483 + IEEE80211_SCAN_RESULT_EXPIRE))
3484 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
3485 else
3486 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
3487
3544 ieee80211_rx_bss_put(local, selected); 3488 ieee80211_rx_bss_put(local, selected);
3545 ifsta->state = IEEE80211_AUTHENTICATE; 3489 ieee80211_sta_reset_auth(sdata, ifsta);
3546 ieee80211_sta_reset_auth(dev, ifsta);
3547 return 0; 3490 return 0;
3548 } else { 3491 } else {
3549 if (ifsta->state != IEEE80211_AUTHENTICATE) { 3492 if (ifsta->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) {
3493 ifsta->assoc_scan_tries++;
3550 if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) 3494 if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL)
3551 ieee80211_sta_start_scan(dev, NULL, 0); 3495 ieee80211_sta_start_scan(sdata, NULL, 0);
3552 else 3496 else
3553 ieee80211_sta_start_scan(dev, ifsta->ssid, 3497 ieee80211_sta_start_scan(sdata, ifsta->ssid,
3554 ifsta->ssid_len); 3498 ifsta->ssid_len);
3555 ifsta->state = IEEE80211_AUTHENTICATE; 3499 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
3556 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); 3500 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
3557 } else 3501 } else
3558 ifsta->state = IEEE80211_DISABLED; 3502 ifsta->state = IEEE80211_STA_MLME_DISABLED;
3559 } 3503 }
3560 return -1; 3504 return -1;
3561} 3505}
3562 3506
3563 3507
3564static int ieee80211_sta_create_ibss(struct net_device *dev, 3508static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata,
3565 struct ieee80211_if_sta *ifsta) 3509 struct ieee80211_if_sta *ifsta)
3566{ 3510{
3567 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3511 struct ieee80211_local *local = sdata->local;
3568 struct ieee80211_sta_bss *bss; 3512 struct ieee80211_sta_bss *bss;
3569 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3570 struct ieee80211_supported_band *sband; 3513 struct ieee80211_supported_band *sband;
3571 u8 bssid[ETH_ALEN], *pos; 3514 u8 bssid[ETH_ALEN], *pos;
3572 int i; 3515 int i;
@@ -3582,15 +3525,15 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
3582 * random number generator get different BSSID. */ 3525 * random number generator get different BSSID. */
3583 get_random_bytes(bssid, ETH_ALEN); 3526 get_random_bytes(bssid, ETH_ALEN);
3584 for (i = 0; i < ETH_ALEN; i++) 3527 for (i = 0; i < ETH_ALEN; i++)
3585 bssid[i] ^= dev->dev_addr[i]; 3528 bssid[i] ^= sdata->dev->dev_addr[i];
3586 bssid[0] &= ~0x01; 3529 bssid[0] &= ~0x01;
3587 bssid[0] |= 0x02; 3530 bssid[0] |= 0x02;
3588#endif 3531#endif
3589 3532
3590 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", 3533 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n",
3591 dev->name, print_mac(mac, bssid)); 3534 sdata->dev->name, print_mac(mac, bssid));
3592 3535
3593 bss = ieee80211_rx_bss_add(dev, bssid, 3536 bss = ieee80211_rx_bss_add(sdata, bssid,
3594 local->hw.conf.channel->center_freq, 3537 local->hw.conf.channel->center_freq,
3595 sdata->u.sta.ssid, sdata->u.sta.ssid_len); 3538 sdata->u.sta.ssid, sdata->u.sta.ssid_len);
3596 if (!bss) 3539 if (!bss)
@@ -3617,16 +3560,16 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
3617 *pos++ = (u8) (rate / 5); 3560 *pos++ = (u8) (rate / 5);
3618 } 3561 }
3619 3562
3620 ret = ieee80211_sta_join_ibss(dev, ifsta, bss); 3563 ret = ieee80211_sta_join_ibss(sdata, ifsta, bss);
3621 ieee80211_rx_bss_put(local, bss); 3564 ieee80211_rx_bss_put(local, bss);
3622 return ret; 3565 return ret;
3623} 3566}
3624 3567
3625 3568
3626static int ieee80211_sta_find_ibss(struct net_device *dev, 3569static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata,
3627 struct ieee80211_if_sta *ifsta) 3570 struct ieee80211_if_sta *ifsta)
3628{ 3571{
3629 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3572 struct ieee80211_local *local = sdata->local;
3630 struct ieee80211_sta_bss *bss; 3573 struct ieee80211_sta_bss *bss;
3631 int found = 0; 3574 int found = 0;
3632 u8 bssid[ETH_ALEN]; 3575 u8 bssid[ETH_ALEN];
@@ -3637,10 +3580,10 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3637 if (ifsta->ssid_len == 0) 3580 if (ifsta->ssid_len == 0)
3638 return -EINVAL; 3581 return -EINVAL;
3639 3582
3640 active_ibss = ieee80211_sta_active_ibss(dev); 3583 active_ibss = ieee80211_sta_active_ibss(sdata);
3641#ifdef CONFIG_MAC80211_IBSS_DEBUG 3584#ifdef CONFIG_MAC80211_IBSS_DEBUG
3642 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", 3585 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
3643 dev->name, active_ibss); 3586 sdata->dev->name, active_ibss);
3644#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 3587#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3645 spin_lock_bh(&local->sta_bss_lock); 3588 spin_lock_bh(&local->sta_bss_lock);
3646 list_for_each_entry(bss, &local->sta_bss_list, list) { 3589 list_for_each_entry(bss, &local->sta_bss_list, list) {
@@ -3675,15 +3618,15 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3675 else 3618 else
3676 search_freq = local->hw.conf.channel->center_freq; 3619 search_freq = local->hw.conf.channel->center_freq;
3677 3620
3678 bss = ieee80211_rx_bss_get(dev, bssid, search_freq, 3621 bss = ieee80211_rx_bss_get(local, bssid, search_freq,
3679 ifsta->ssid, ifsta->ssid_len); 3622 ifsta->ssid, ifsta->ssid_len);
3680 if (!bss) 3623 if (!bss)
3681 goto dont_join; 3624 goto dont_join;
3682 3625
3683 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" 3626 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s"
3684 " based on configured SSID\n", 3627 " based on configured SSID\n",
3685 dev->name, print_mac(mac, bssid)); 3628 sdata->dev->name, print_mac(mac, bssid));
3686 ret = ieee80211_sta_join_ibss(dev, ifsta, bss); 3629 ret = ieee80211_sta_join_ibss(sdata, ifsta, bss);
3687 ieee80211_rx_bss_put(local, bss); 3630 ieee80211_rx_bss_put(local, bss);
3688 return ret; 3631 return ret;
3689 } 3632 }
@@ -3694,17 +3637,17 @@ dont_join:
3694#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 3637#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3695 3638
3696 /* Selected IBSS not found in current scan results - try to scan */ 3639 /* Selected IBSS not found in current scan results - try to scan */
3697 if (ifsta->state == IEEE80211_IBSS_JOINED && 3640 if (ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED &&
3698 !ieee80211_sta_active_ibss(dev)) { 3641 !ieee80211_sta_active_ibss(sdata)) {
3699 mod_timer(&ifsta->timer, jiffies + 3642 mod_timer(&ifsta->timer, jiffies +
3700 IEEE80211_IBSS_MERGE_INTERVAL); 3643 IEEE80211_IBSS_MERGE_INTERVAL);
3701 } else if (time_after(jiffies, local->last_scan_completed + 3644 } else if (time_after(jiffies, local->last_scan_completed +
3702 IEEE80211_SCAN_INTERVAL)) { 3645 IEEE80211_SCAN_INTERVAL)) {
3703 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 3646 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
3704 "join\n", dev->name); 3647 "join\n", sdata->dev->name);
3705 return ieee80211_sta_req_scan(dev, ifsta->ssid, 3648 return ieee80211_sta_req_scan(sdata, ifsta->ssid,
3706 ifsta->ssid_len); 3649 ifsta->ssid_len);
3707 } else if (ifsta->state != IEEE80211_IBSS_JOINED) { 3650 } else if (ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED) {
3708 int interval = IEEE80211_SCAN_INTERVAL; 3651 int interval = IEEE80211_SCAN_INTERVAL;
3709 3652
3710 if (time_after(jiffies, ifsta->ibss_join_req + 3653 if (time_after(jiffies, ifsta->ibss_join_req +
@@ -3712,10 +3655,10 @@ dont_join:
3712 if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && 3655 if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) &&
3713 (!(local->oper_channel->flags & 3656 (!(local->oper_channel->flags &
3714 IEEE80211_CHAN_NO_IBSS))) 3657 IEEE80211_CHAN_NO_IBSS)))
3715 return ieee80211_sta_create_ibss(dev, ifsta); 3658 return ieee80211_sta_create_ibss(sdata, ifsta);
3716 if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { 3659 if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) {
3717 printk(KERN_DEBUG "%s: IBSS not allowed on" 3660 printk(KERN_DEBUG "%s: IBSS not allowed on"
3718 " %d MHz\n", dev->name, 3661 " %d MHz\n", sdata->dev->name,
3719 local->hw.conf.channel->center_freq); 3662 local->hw.conf.channel->center_freq);
3720 } 3663 }
3721 3664
@@ -3724,7 +3667,7 @@ dont_join:
3724 interval = IEEE80211_SCAN_INTERVAL_SLOW; 3667 interval = IEEE80211_SCAN_INTERVAL_SLOW;
3725 } 3668 }
3726 3669
3727 ifsta->state = IEEE80211_IBSS_SEARCH; 3670 ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH;
3728 mod_timer(&ifsta->timer, jiffies + interval); 3671 mod_timer(&ifsta->timer, jiffies + interval);
3729 return 0; 3672 return 0;
3730 } 3673 }
@@ -3733,9 +3676,8 @@ dont_join:
3733} 3676}
3734 3677
3735 3678
3736int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) 3679int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len)
3737{ 3680{
3738 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3739 struct ieee80211_if_sta *ifsta; 3681 struct ieee80211_if_sta *ifsta;
3740 int res; 3682 int res;
3741 3683
@@ -3759,7 +3701,7 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len)
3759 res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID); 3701 res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID);
3760 if (res) { 3702 if (res) {
3761 printk(KERN_DEBUG "%s: Failed to config new SSID to " 3703 printk(KERN_DEBUG "%s: Failed to config new SSID to "
3762 "the low-level driver\n", dev->name); 3704 "the low-level driver\n", sdata->dev->name);
3763 return res; 3705 return res;
3764 } 3706 }
3765 } 3707 }
@@ -3772,17 +3714,16 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len)
3772 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && 3714 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
3773 !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { 3715 !(ifsta->flags & IEEE80211_STA_BSSID_SET)) {
3774 ifsta->ibss_join_req = jiffies; 3716 ifsta->ibss_join_req = jiffies;
3775 ifsta->state = IEEE80211_IBSS_SEARCH; 3717 ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH;
3776 return ieee80211_sta_find_ibss(dev, ifsta); 3718 return ieee80211_sta_find_ibss(sdata, ifsta);
3777 } 3719 }
3778 3720
3779 return 0; 3721 return 0;
3780} 3722}
3781 3723
3782 3724
3783int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len) 3725int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len)
3784{ 3726{
3785 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3786 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 3727 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
3787 memcpy(ssid, ifsta->ssid, ifsta->ssid_len); 3728 memcpy(ssid, ifsta->ssid, ifsta->ssid_len);
3788 *len = ifsta->ssid_len; 3729 *len = ifsta->ssid_len;
@@ -3790,13 +3731,11 @@ int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len)
3790} 3731}
3791 3732
3792 3733
3793int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid) 3734int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
3794{ 3735{
3795 struct ieee80211_sub_if_data *sdata;
3796 struct ieee80211_if_sta *ifsta; 3736 struct ieee80211_if_sta *ifsta;
3797 int res; 3737 int res;
3798 3738
3799 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3800 ifsta = &sdata->u.sta; 3739 ifsta = &sdata->u.sta;
3801 3740
3802 if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { 3741 if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) {
@@ -3809,7 +3748,7 @@ int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid)
3809 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID); 3748 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
3810 if (res) { 3749 if (res) {
3811 printk(KERN_DEBUG "%s: Failed to config new BSSID to " 3750 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
3812 "the low-level driver\n", dev->name); 3751 "the low-level driver\n", sdata->dev->name);
3813 return res; 3752 return res;
3814 } 3753 }
3815 } 3754 }
@@ -3850,7 +3789,7 @@ static void ieee80211_send_nullfunc(struct ieee80211_local *local,
3850 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); 3789 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
3851 memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN); 3790 memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN);
3852 3791
3853 ieee80211_sta_tx(sdata->dev, skb, 0); 3792 ieee80211_sta_tx(sdata, skb, 0);
3854} 3793}
3855 3794
3856 3795
@@ -3922,9 +3861,9 @@ done:
3922 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 3861 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
3923 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 3862 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
3924 if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) || 3863 if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) ||
3925 (!(ifsta->state == IEEE80211_IBSS_JOINED) && 3864 (!(ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED) &&
3926 !ieee80211_sta_active_ibss(dev))) 3865 !ieee80211_sta_active_ibss(sdata)))
3927 ieee80211_sta_find_ibss(dev, ifsta); 3866 ieee80211_sta_find_ibss(sdata, ifsta);
3928 } 3867 }
3929} 3868}
3930EXPORT_SYMBOL(ieee80211_scan_completed); 3869EXPORT_SYMBOL(ieee80211_scan_completed);
@@ -4013,7 +3952,7 @@ void ieee80211_sta_scan_work(struct work_struct *work)
4013 3952
4014 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN) 3953 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN)
4015 break; 3954 break;
4016 ieee80211_send_probe_req(dev, NULL, local->scan_ssid, 3955 ieee80211_send_probe_req(sdata, NULL, local->scan_ssid,
4017 local->scan_ssid_len); 3956 local->scan_ssid_len);
4018 next_delay = IEEE80211_CHANNEL_TIME; 3957 next_delay = IEEE80211_CHANNEL_TIME;
4019 break; 3958 break;
@@ -4025,10 +3964,10 @@ void ieee80211_sta_scan_work(struct work_struct *work)
4025} 3964}
4026 3965
4027 3966
4028static int ieee80211_sta_start_scan(struct net_device *dev, 3967static int ieee80211_sta_start_scan(struct ieee80211_sub_if_data *scan_sdata,
4029 u8 *ssid, size_t ssid_len) 3968 u8 *ssid, size_t ssid_len)
4030{ 3969{
4031 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 3970 struct ieee80211_local *local = scan_sdata->local;
4032 struct ieee80211_sub_if_data *sdata; 3971 struct ieee80211_sub_if_data *sdata;
4033 3972
4034 if (ssid_len > IEEE80211_MAX_SSID_LEN) 3973 if (ssid_len > IEEE80211_MAX_SSID_LEN)
@@ -4052,7 +3991,7 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
4052 */ 3991 */
4053 3992
4054 if (local->sta_sw_scanning || local->sta_hw_scanning) { 3993 if (local->sta_sw_scanning || local->sta_hw_scanning) {
4055 if (local->scan_dev == dev) 3994 if (local->scan_dev == scan_sdata->dev)
4056 return 0; 3995 return 0;
4057 return -EBUSY; 3996 return -EBUSY;
4058 } 3997 }
@@ -4062,7 +4001,7 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
4062 ssid, ssid_len); 4001 ssid, ssid_len);
4063 if (!rc) { 4002 if (!rc) {
4064 local->sta_hw_scanning = 1; 4003 local->sta_hw_scanning = 1;
4065 local->scan_dev = dev; 4004 local->scan_dev = scan_sdata->dev;
4066 } 4005 }
4067 return rc; 4006 return rc;
4068 } 4007 }
@@ -4086,7 +4025,7 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
4086 local->scan_state = SCAN_SET_CHANNEL; 4025 local->scan_state = SCAN_SET_CHANNEL;
4087 local->scan_channel_idx = 0; 4026 local->scan_channel_idx = 0;
4088 local->scan_band = IEEE80211_BAND_2GHZ; 4027 local->scan_band = IEEE80211_BAND_2GHZ;
4089 local->scan_dev = dev; 4028 local->scan_dev = scan_sdata->dev;
4090 4029
4091 netif_addr_lock_bh(local->mdev); 4030 netif_addr_lock_bh(local->mdev);
4092 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; 4031 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
@@ -4105,17 +4044,16 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
4105} 4044}
4106 4045
4107 4046
4108int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len) 4047int ieee80211_sta_req_scan(struct ieee80211_sub_if_data *sdata, u8 *ssid, size_t ssid_len)
4109{ 4048{
4110 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4111 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4049 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4112 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 4050 struct ieee80211_local *local = sdata->local;
4113 4051
4114 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 4052 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
4115 return ieee80211_sta_start_scan(dev, ssid, ssid_len); 4053 return ieee80211_sta_start_scan(sdata, ssid, ssid_len);
4116 4054
4117 if (local->sta_sw_scanning || local->sta_hw_scanning) { 4055 if (local->sta_sw_scanning || local->sta_hw_scanning) {
4118 if (local->scan_dev == dev) 4056 if (local->scan_dev == sdata->dev)
4119 return 0; 4057 return 0;
4120 return -EBUSY; 4058 return -EBUSY;
4121 } 4059 }
@@ -4128,13 +4066,54 @@ int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len)
4128 return 0; 4066 return 0;
4129} 4067}
4130 4068
4069
4070static void ieee80211_sta_add_scan_ies(struct iw_request_info *info,
4071 struct ieee80211_sta_bss *bss,
4072 char **current_ev, char *end_buf)
4073{
4074 u8 *pos, *end, *next;
4075 struct iw_event iwe;
4076
4077 if (bss == NULL || bss->ies == NULL)
4078 return;
4079
4080 /*
4081 * If needed, fragment the IEs buffer (at IE boundaries) into short
4082 * enough fragments to fit into IW_GENERIC_IE_MAX octet messages.
4083 */
4084 pos = bss->ies;
4085 end = pos + bss->ies_len;
4086
4087 while (end - pos > IW_GENERIC_IE_MAX) {
4088 next = pos + 2 + pos[1];
4089 while (next + 2 + next[1] - pos < IW_GENERIC_IE_MAX)
4090 next = next + 2 + next[1];
4091
4092 memset(&iwe, 0, sizeof(iwe));
4093 iwe.cmd = IWEVGENIE;
4094 iwe.u.data.length = next - pos;
4095 *current_ev = iwe_stream_add_point(info, *current_ev,
4096 end_buf, &iwe, pos);
4097
4098 pos = next;
4099 }
4100
4101 if (end > pos) {
4102 memset(&iwe, 0, sizeof(iwe));
4103 iwe.cmd = IWEVGENIE;
4104 iwe.u.data.length = end - pos;
4105 *current_ev = iwe_stream_add_point(info, *current_ev,
4106 end_buf, &iwe, pos);
4107 }
4108}
4109
4110
4131static char * 4111static char *
4132ieee80211_sta_scan_result(struct net_device *dev, 4112ieee80211_sta_scan_result(struct ieee80211_local *local,
4133 struct iw_request_info *info, 4113 struct iw_request_info *info,
4134 struct ieee80211_sta_bss *bss, 4114 struct ieee80211_sta_bss *bss,
4135 char *current_ev, char *end_buf) 4115 char *current_ev, char *end_buf)
4136{ 4116{
4137 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4138 struct iw_event iwe; 4117 struct iw_event iwe;
4139 4118
4140 if (time_after(jiffies, 4119 if (time_after(jiffies,
@@ -4208,29 +4187,7 @@ ieee80211_sta_scan_result(struct net_device *dev,
4208 current_ev = iwe_stream_add_point(info, current_ev, end_buf, 4187 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4209 &iwe, ""); 4188 &iwe, "");
4210 4189
4211 if (bss && bss->wpa_ie) { 4190 ieee80211_sta_add_scan_ies(info, bss, &current_ev, end_buf);
4212 memset(&iwe, 0, sizeof(iwe));
4213 iwe.cmd = IWEVGENIE;
4214 iwe.u.data.length = bss->wpa_ie_len;
4215 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4216 &iwe, bss->wpa_ie);
4217 }
4218
4219 if (bss && bss->rsn_ie) {
4220 memset(&iwe, 0, sizeof(iwe));
4221 iwe.cmd = IWEVGENIE;
4222 iwe.u.data.length = bss->rsn_ie_len;
4223 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4224 &iwe, bss->rsn_ie);
4225 }
4226
4227 if (bss && bss->ht_ie) {
4228 memset(&iwe, 0, sizeof(iwe));
4229 iwe.cmd = IWEVGENIE;
4230 iwe.u.data.length = bss->ht_ie_len;
4231 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4232 &iwe, bss->ht_ie);
4233 }
4234 4191
4235 if (bss && bss->supp_rates_len > 0) { 4192 if (bss && bss->supp_rates_len > 0) {
4236 /* display all supported rates in readable format */ 4193 /* display all supported rates in readable format */
@@ -4321,11 +4278,10 @@ ieee80211_sta_scan_result(struct net_device *dev,
4321} 4278}
4322 4279
4323 4280
4324int ieee80211_sta_scan_results(struct net_device *dev, 4281int ieee80211_sta_scan_results(struct ieee80211_local *local,
4325 struct iw_request_info *info, 4282 struct iw_request_info *info,
4326 char *buf, size_t len) 4283 char *buf, size_t len)
4327{ 4284{
4328 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4329 char *current_ev = buf; 4285 char *current_ev = buf;
4330 char *end_buf = buf + len; 4286 char *end_buf = buf + len;
4331 struct ieee80211_sta_bss *bss; 4287 struct ieee80211_sta_bss *bss;
@@ -4336,7 +4292,7 @@ int ieee80211_sta_scan_results(struct net_device *dev,
4336 spin_unlock_bh(&local->sta_bss_lock); 4292 spin_unlock_bh(&local->sta_bss_lock);
4337 return -E2BIG; 4293 return -E2BIG;
4338 } 4294 }
4339 current_ev = ieee80211_sta_scan_result(dev, info, bss, 4295 current_ev = ieee80211_sta_scan_result(local, info, bss,
4340 current_ev, end_buf); 4296 current_ev, end_buf);
4341 } 4297 }
4342 spin_unlock_bh(&local->sta_bss_lock); 4298 spin_unlock_bh(&local->sta_bss_lock);
@@ -4344,9 +4300,8 @@ int ieee80211_sta_scan_results(struct net_device *dev,
4344} 4300}
4345 4301
4346 4302
4347int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len) 4303int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len)
4348{ 4304{
4349 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4350 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4305 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4351 4306
4352 kfree(ifsta->extra_ie); 4307 kfree(ifsta->extra_ie);
@@ -4366,13 +4321,12 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4366} 4321}
4367 4322
4368 4323
4369struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, 4324struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
4370 struct sk_buff *skb, u8 *bssid, 4325 struct sk_buff *skb, u8 *bssid,
4371 u8 *addr, u64 supp_rates) 4326 u8 *addr, u64 supp_rates)
4372{ 4327{
4373 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 4328 struct ieee80211_local *local = sdata->local;
4374 struct sta_info *sta; 4329 struct sta_info *sta;
4375 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4376 DECLARE_MAC_BUF(mac); 4330 DECLARE_MAC_BUF(mac);
4377 int band = local->hw.conf.channel->band; 4331 int band = local->hw.conf.channel->band;
4378 4332
@@ -4381,7 +4335,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
4381 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 4335 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
4382 if (net_ratelimit()) { 4336 if (net_ratelimit()) {
4383 printk(KERN_DEBUG "%s: No room for a new IBSS STA " 4337 printk(KERN_DEBUG "%s: No room for a new IBSS STA "
4384 "entry %s\n", dev->name, print_mac(mac, addr)); 4338 "entry %s\n", sdata->dev->name, print_mac(mac, addr));
4385 } 4339 }
4386 return NULL; 4340 return NULL;
4387 } 4341 }
@@ -4391,7 +4345,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
4391 4345
4392#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 4346#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
4393 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", 4347 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n",
4394 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); 4348 wiphy_name(local->hw.wiphy), print_mac(mac, addr), sdata->dev->name);
4395#endif 4349#endif
4396 4350
4397 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); 4351 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -4400,10 +4354,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
4400 4354
4401 set_sta_flags(sta, WLAN_STA_AUTHORIZED); 4355 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
4402 4356
4403 if (supp_rates) 4357 /* make sure mandatory rates are always added */
4404 sta->supp_rates[band] = supp_rates; 4358 sta->supp_rates[band] = supp_rates |
4405 else 4359 ieee80211_sta_get_mandatory_rates(local, band);
4406 sta->supp_rates[band] = sdata->u.sta.supp_rates_bits[band];
4407 4360
4408 rate_control_rate_init(sta, local); 4361 rate_control_rate_init(sta, local);
4409 4362
@@ -4414,31 +4367,29 @@ struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
4414} 4367}
4415 4368
4416 4369
4417int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason) 4370int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason)
4418{ 4371{
4419 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4420 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4372 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4421 4373
4422 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", 4374 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
4423 dev->name, reason); 4375 sdata->dev->name, reason);
4424 4376
4425 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 4377 if (sdata->vif.type != IEEE80211_IF_TYPE_STA &&
4426 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) 4378 sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
4427 return -EINVAL; 4379 return -EINVAL;
4428 4380
4429 ieee80211_send_deauth(dev, ifsta, reason); 4381 ieee80211_send_deauth(sdata, ifsta, reason);
4430 ieee80211_set_disassoc(dev, ifsta, 1); 4382 ieee80211_set_disassoc(sdata, ifsta, 1);
4431 return 0; 4383 return 0;
4432} 4384}
4433 4385
4434 4386
4435int ieee80211_sta_disassociate(struct net_device *dev, u16 reason) 4387int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason)
4436{ 4388{
4437 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4438 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 4389 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4439 4390
4440 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", 4391 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
4441 dev->name, reason); 4392 sdata->dev->name, reason);
4442 4393
4443 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 4394 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
4444 return -EINVAL; 4395 return -EINVAL;
@@ -4446,8 +4397,8 @@ int ieee80211_sta_disassociate(struct net_device *dev, u16 reason)
4446 if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED)) 4397 if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED))
4447 return -1; 4398 return -1;
4448 4399
4449 ieee80211_send_disassoc(dev, ifsta, reason); 4400 ieee80211_send_disassoc(sdata, ifsta, reason);
4450 ieee80211_set_disassoc(dev, ifsta, 0); 4401 ieee80211_set_disassoc(sdata, ifsta, 0);
4451 return 0; 4402 return 0;
4452} 4403}
4453 4404
@@ -4464,7 +4415,7 @@ void ieee80211_notify_mac(struct ieee80211_hw *hw,
4464 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 4415 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
4465 continue; 4416 continue;
4466 4417
4467 ieee80211_sta_req_auth(sdata->dev, &sdata->u.sta); 4418 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
4468 } 4419 }
4469 rcu_read_unlock(); 4420 rcu_read_unlock();
4470 break; 4421 break;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6db854505193..7e09b30dd393 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -143,6 +143,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
143 /* IEEE80211_RADIOTAP_FLAGS */ 143 /* IEEE80211_RADIOTAP_FLAGS */
144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
145 *pos |= IEEE80211_RADIOTAP_F_FCS; 145 *pos |= IEEE80211_RADIOTAP_F_FCS;
146 if (status->flag & RX_FLAG_SHORTPRE)
147 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
146 pos++; 148 pos++;
147 149
148 /* IEEE80211_RADIOTAP_RATE */ 150 /* IEEE80211_RADIOTAP_RATE */
@@ -155,8 +157,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
155 if (status->band == IEEE80211_BAND_5GHZ) 157 if (status->band == IEEE80211_BAND_5GHZ)
156 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | 158 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
157 IEEE80211_CHAN_5GHZ); 159 IEEE80211_CHAN_5GHZ);
160 else if (rate->flags & IEEE80211_RATE_ERP_G)
161 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
162 IEEE80211_CHAN_2GHZ);
158 else 163 else
159 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_DYN | 164 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
160 IEEE80211_CHAN_2GHZ); 165 IEEE80211_CHAN_2GHZ);
161 pos += 2; 166 pos += 2;
162 167
@@ -399,11 +404,11 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
399 struct sk_buff *skb = rx->skb; 404 struct sk_buff *skb = rx->skb;
400 405
401 if (unlikely(local->sta_hw_scanning)) 406 if (unlikely(local->sta_hw_scanning))
402 return ieee80211_sta_rx_scan(rx->dev, skb, rx->status); 407 return ieee80211_sta_rx_scan(rx->sdata, skb, rx->status);
403 408
404 if (unlikely(local->sta_sw_scanning)) { 409 if (unlikely(local->sta_sw_scanning)) {
405 /* drop all the other packets during a software scan anyway */ 410 /* drop all the other packets during a software scan anyway */
406 if (ieee80211_sta_rx_scan(rx->dev, skb, rx->status) 411 if (ieee80211_sta_rx_scan(rx->sdata, skb, rx->status)
407 != RX_QUEUED) 412 != RX_QUEUED)
408 dev_kfree_skb(skb); 413 dev_kfree_skb(skb);
409 return RX_QUEUED; 414 return RX_QUEUED;
@@ -461,7 +466,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
461 466
462 if (ieee80211_is_data(hdr->frame_control) && 467 if (ieee80211_is_data(hdr->frame_control) &&
463 is_multicast_ether_addr(hdr->addr1) && 468 is_multicast_ether_addr(hdr->addr1) &&
464 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev)) 469 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
465 return RX_DROP_MONITOR; 470 return RX_DROP_MONITOR;
466#undef msh_h_get 471#undef msh_h_get
467 472
@@ -816,7 +821,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
816 821
817static inline struct ieee80211_fragment_entry * 822static inline struct ieee80211_fragment_entry *
818ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 823ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
819 u16 fc, unsigned int frag, unsigned int seq, 824 unsigned int frag, unsigned int seq,
820 int rx_queue, struct ieee80211_hdr *hdr) 825 int rx_queue, struct ieee80211_hdr *hdr)
821{ 826{
822 struct ieee80211_fragment_entry *entry; 827 struct ieee80211_fragment_entry *entry;
@@ -825,7 +830,6 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
825 idx = sdata->fragment_next; 830 idx = sdata->fragment_next;
826 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 831 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
827 struct ieee80211_hdr *f_hdr; 832 struct ieee80211_hdr *f_hdr;
828 u16 f_fc;
829 833
830 idx--; 834 idx--;
831 if (idx < 0) 835 if (idx < 0)
@@ -837,10 +841,13 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
837 entry->last_frag + 1 != frag) 841 entry->last_frag + 1 != frag)
838 continue; 842 continue;
839 843
840 f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data; 844 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
841 f_fc = le16_to_cpu(f_hdr->frame_control);
842 845
843 if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) || 846 /*
847 * Check ftype and addresses are equal, else check next fragment
848 */
849 if (((hdr->frame_control ^ f_hdr->frame_control) &
850 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
844 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || 851 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
845 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 852 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
846 continue; 853 continue;
@@ -860,16 +867,18 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
860{ 867{
861 struct ieee80211_hdr *hdr; 868 struct ieee80211_hdr *hdr;
862 u16 sc; 869 u16 sc;
870 __le16 fc;
863 unsigned int frag, seq; 871 unsigned int frag, seq;
864 struct ieee80211_fragment_entry *entry; 872 struct ieee80211_fragment_entry *entry;
865 struct sk_buff *skb; 873 struct sk_buff *skb;
866 DECLARE_MAC_BUF(mac); 874 DECLARE_MAC_BUF(mac);
867 875
868 hdr = (struct ieee80211_hdr *) rx->skb->data; 876 hdr = (struct ieee80211_hdr *)rx->skb->data;
877 fc = hdr->frame_control;
869 sc = le16_to_cpu(hdr->seq_ctrl); 878 sc = le16_to_cpu(hdr->seq_ctrl);
870 frag = sc & IEEE80211_SCTL_FRAG; 879 frag = sc & IEEE80211_SCTL_FRAG;
871 880
872 if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) || 881 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
873 (rx->skb)->len < 24 || 882 (rx->skb)->len < 24 ||
874 is_multicast_ether_addr(hdr->addr1))) { 883 is_multicast_ether_addr(hdr->addr1))) {
875 /* not fragmented */ 884 /* not fragmented */
@@ -884,7 +893,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
884 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 893 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
885 rx->queue, &(rx->skb)); 894 rx->queue, &(rx->skb));
886 if (rx->key && rx->key->conf.alg == ALG_CCMP && 895 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
887 (rx->fc & IEEE80211_FCTL_PROTECTED)) { 896 ieee80211_has_protected(fc)) {
888 /* Store CCMP PN so that we can verify that the next 897 /* Store CCMP PN so that we can verify that the next
889 * fragment has a sequential PN value. */ 898 * fragment has a sequential PN value. */
890 entry->ccmp = 1; 899 entry->ccmp = 1;
@@ -898,8 +907,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
898 /* This is a fragment for a frame that should already be pending in 907 /* This is a fragment for a frame that should already be pending in
899 * fragment cache. Add this fragment to the end of the pending entry. 908 * fragment cache. Add this fragment to the end of the pending entry.
900 */ 909 */
901 entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, 910 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
902 rx->queue, hdr);
903 if (!entry) { 911 if (!entry) {
904 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 912 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
905 return RX_DROP_MONITOR; 913 return RX_DROP_MONITOR;
@@ -924,11 +932,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
924 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 932 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
925 } 933 }
926 934
927 skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc)); 935 skb_pull(rx->skb, ieee80211_hdrlen(fc));
928 __skb_queue_tail(&entry->skb_list, rx->skb); 936 __skb_queue_tail(&entry->skb_list, rx->skb);
929 entry->last_frag = frag; 937 entry->last_frag = frag;
930 entry->extra_len += rx->skb->len; 938 entry->extra_len += rx->skb->len;
931 if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { 939 if (ieee80211_has_morefrags(fc)) {
932 rx->skb = NULL; 940 rx->skb = NULL;
933 return RX_QUEUED; 941 return RX_QUEUED;
934 } 942 }
@@ -968,10 +976,9 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
968 struct sk_buff *skb; 976 struct sk_buff *skb;
969 int no_pending_pkts; 977 int no_pending_pkts;
970 DECLARE_MAC_BUF(mac); 978 DECLARE_MAC_BUF(mac);
979 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
971 980
972 if (likely(!rx->sta || 981 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
973 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL ||
974 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL ||
975 !(rx->flags & IEEE80211_RX_RA_MATCH))) 982 !(rx->flags & IEEE80211_RX_RA_MATCH)))
976 return RX_CONTINUE; 983 return RX_CONTINUE;
977 984
@@ -1050,7 +1057,6 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1050 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); 1057 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1051 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); 1058 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1052 /* change frame type to non QOS */ 1059 /* change frame type to non QOS */
1053 rx->fc &= ~IEEE80211_STYPE_QOS_DATA;
1054 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1060 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1055 1061
1056 return RX_CONTINUE; 1062 return RX_CONTINUE;
@@ -1067,7 +1073,7 @@ ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1067} 1073}
1068 1074
1069static int 1075static int
1070ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx) 1076ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1071{ 1077{
1072 /* 1078 /*
1073 * Pass through unencrypted frames if the hardware has 1079 * Pass through unencrypted frames if the hardware has
@@ -1077,9 +1083,8 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx)
1077 return 0; 1083 return 0;
1078 1084
1079 /* Drop unencrypted frames if key is set. */ 1085 /* Drop unencrypted frames if key is set. */
1080 if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && 1086 if (unlikely(!ieee80211_has_protected(fc) &&
1081 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 1087 !ieee80211_is_nullfunc(fc) &&
1082 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC &&
1083 (rx->key || rx->sdata->drop_unencrypted))) 1088 (rx->key || rx->sdata->drop_unencrypted)))
1084 return -EACCES; 1089 return -EACCES;
1085 1090
@@ -1091,7 +1096,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1091{ 1096{
1092 struct net_device *dev = rx->dev; 1097 struct net_device *dev = rx->dev;
1093 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 1098 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1094 u16 fc, hdrlen, ethertype; 1099 u16 hdrlen, ethertype;
1095 u8 *payload; 1100 u8 *payload;
1096 u8 dst[ETH_ALEN]; 1101 u8 dst[ETH_ALEN];
1097 u8 src[ETH_ALEN] __aligned(2); 1102 u8 src[ETH_ALEN] __aligned(2);
@@ -1102,12 +1107,10 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1102 DECLARE_MAC_BUF(mac3); 1107 DECLARE_MAC_BUF(mac3);
1103 DECLARE_MAC_BUF(mac4); 1108 DECLARE_MAC_BUF(mac4);
1104 1109
1105 fc = rx->fc; 1110 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1106
1107 if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
1108 return -1; 1111 return -1;
1109 1112
1110 hdrlen = ieee80211_get_hdrlen(fc); 1113 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1111 1114
1112 if (ieee80211_vif_is_mesh(&sdata->vif)) 1115 if (ieee80211_vif_is_mesh(&sdata->vif))
1113 hdrlen += ieee80211_get_mesh_hdrlen( 1116 hdrlen += ieee80211_get_mesh_hdrlen(
@@ -1122,41 +1125,28 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1122 * 1 0 BSSID SA DA n/a 1125 * 1 0 BSSID SA DA n/a
1123 * 1 1 RA TA DA SA 1126 * 1 1 RA TA DA SA
1124 */ 1127 */
1128 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1129 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1125 1130
1126 switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 1131 switch (hdr->frame_control &
1127 case IEEE80211_FCTL_TODS: 1132 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1128 /* BSSID SA DA */ 1133 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS):
1129 memcpy(dst, hdr->addr3, ETH_ALEN);
1130 memcpy(src, hdr->addr2, ETH_ALEN);
1131
1132 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && 1134 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP &&
1133 sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) 1135 sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
1134 return -1; 1136 return -1;
1135 break; 1137 break;
1136 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): 1138 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1137 /* RA TA DA SA */ 1139 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS &&
1138 memcpy(dst, hdr->addr3, ETH_ALEN);
1139 memcpy(src, hdr->addr4, ETH_ALEN);
1140
1141 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS &&
1142 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) 1140 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
1143 return -1; 1141 return -1;
1144 break; 1142 break;
1145 case IEEE80211_FCTL_FROMDS: 1143 case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS):
1146 /* DA BSSID SA */
1147 memcpy(dst, hdr->addr1, ETH_ALEN);
1148 memcpy(src, hdr->addr3, ETH_ALEN);
1149
1150 if (sdata->vif.type != IEEE80211_IF_TYPE_STA || 1144 if (sdata->vif.type != IEEE80211_IF_TYPE_STA ||
1151 (is_multicast_ether_addr(dst) && 1145 (is_multicast_ether_addr(dst) &&
1152 !compare_ether_addr(src, dev->dev_addr))) 1146 !compare_ether_addr(src, dev->dev_addr)))
1153 return -1; 1147 return -1;
1154 break; 1148 break;
1155 case 0: 1149 case __constant_cpu_to_le16(0):
1156 /* DA SA BSSID */
1157 memcpy(dst, hdr->addr1, ETH_ALEN);
1158 memcpy(src, hdr->addr2, ETH_ALEN);
1159
1160 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) 1150 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1161 return -1; 1151 return -1;
1162 break; 1152 break;
@@ -1193,7 +1183,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1193/* 1183/*
1194 * requires that rx->skb is a frame with ethernet header 1184 * requires that rx->skb is a frame with ethernet header
1195 */ 1185 */
1196static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx) 1186static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1197{ 1187{
1198 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 1188 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1199 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 1189 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
@@ -1209,7 +1199,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx)
1209 return true; 1199 return true;
1210 1200
1211 if (ieee80211_802_1x_port_control(rx) || 1201 if (ieee80211_802_1x_port_control(rx) ||
1212 ieee80211_drop_unencrypted(rx)) 1202 ieee80211_drop_unencrypted(rx, fc))
1213 return false; 1203 return false;
1214 1204
1215 return true; 1205 return true;
@@ -1279,20 +1269,21 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1279{ 1269{
1280 struct net_device *dev = rx->dev; 1270 struct net_device *dev = rx->dev;
1281 struct ieee80211_local *local = rx->local; 1271 struct ieee80211_local *local = rx->local;
1282 u16 fc, ethertype; 1272 u16 ethertype;
1283 u8 *payload; 1273 u8 *payload;
1284 struct sk_buff *skb = rx->skb, *frame = NULL; 1274 struct sk_buff *skb = rx->skb, *frame = NULL;
1275 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1276 __le16 fc = hdr->frame_control;
1285 const struct ethhdr *eth; 1277 const struct ethhdr *eth;
1286 int remaining, err; 1278 int remaining, err;
1287 u8 dst[ETH_ALEN]; 1279 u8 dst[ETH_ALEN];
1288 u8 src[ETH_ALEN]; 1280 u8 src[ETH_ALEN];
1289 DECLARE_MAC_BUF(mac); 1281 DECLARE_MAC_BUF(mac);
1290 1282
1291 fc = rx->fc; 1283 if (unlikely(!ieee80211_is_data(fc)))
1292 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1293 return RX_CONTINUE; 1284 return RX_CONTINUE;
1294 1285
1295 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1286 if (unlikely(!ieee80211_is_data_present(fc)))
1296 return RX_DROP_MONITOR; 1287 return RX_DROP_MONITOR;
1297 1288
1298 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1289 if (!(rx->flags & IEEE80211_RX_AMSDU))
@@ -1374,7 +1365,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1374 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); 1365 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1375 } 1366 }
1376 1367
1377 if (!ieee80211_frame_allowed(rx)) { 1368 if (!ieee80211_frame_allowed(rx, fc)) {
1378 if (skb == frame) /* last frame */ 1369 if (skb == frame) /* last frame */
1379 return RX_DROP_UNUSABLE; 1370 return RX_DROP_UNUSABLE;
1380 dev_kfree_skb(frame); 1371 dev_kfree_skb(frame);
@@ -1448,21 +1439,21 @@ static ieee80211_rx_result debug_noinline
1448ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1439ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1449{ 1440{
1450 struct net_device *dev = rx->dev; 1441 struct net_device *dev = rx->dev;
1451 u16 fc; 1442 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1443 __le16 fc = hdr->frame_control;
1452 int err; 1444 int err;
1453 1445
1454 fc = rx->fc; 1446 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1455 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1456 return RX_CONTINUE; 1447 return RX_CONTINUE;
1457 1448
1458 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1449 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1459 return RX_DROP_MONITOR; 1450 return RX_DROP_MONITOR;
1460 1451
1461 err = ieee80211_data_to_8023(rx); 1452 err = ieee80211_data_to_8023(rx);
1462 if (unlikely(err)) 1453 if (unlikely(err))
1463 return RX_DROP_UNUSABLE; 1454 return RX_DROP_UNUSABLE;
1464 1455
1465 if (!ieee80211_frame_allowed(rx)) 1456 if (!ieee80211_frame_allowed(rx, fc))
1466 return RX_DROP_MONITOR; 1457 return RX_DROP_MONITOR;
1467 1458
1468 rx->skb->dev = dev; 1459 rx->skb->dev = dev;
@@ -1532,7 +1523,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1532 sdata->vif.type == IEEE80211_IF_TYPE_IBSS || 1523 sdata->vif.type == IEEE80211_IF_TYPE_IBSS ||
1533 sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) && 1524 sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) &&
1534 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) 1525 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
1535 ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->status); 1526 ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1536 else 1527 else
1537 return RX_DROP_MONITOR; 1528 return RX_DROP_MONITOR;
1538 1529
@@ -1579,7 +1570,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1579 !ieee80211_is_auth(hdr->frame_control)) 1570 !ieee80211_is_auth(hdr->frame_control))
1580 goto ignore; 1571 goto ignore;
1581 1572
1582 mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); 1573 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr);
1583 ignore: 1574 ignore:
1584 dev_kfree_skb(rx->skb); 1575 dev_kfree_skb(rx->skb);
1585 rx->skb = NULL; 1576 rx->skb = NULL;
@@ -1752,10 +1743,6 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1752 if (!bssid) 1743 if (!bssid)
1753 return 0; 1744 return 0;
1754 if (ieee80211_is_beacon(hdr->frame_control)) { 1745 if (ieee80211_is_beacon(hdr->frame_control)) {
1755 if (!rx->sta)
1756 rx->sta = ieee80211_ibss_add_sta(sdata->dev,
1757 rx->skb, bssid, hdr->addr2,
1758 BIT(rx->status->rate_idx));
1759 return 1; 1746 return 1;
1760 } 1747 }
1761 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1748 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
@@ -1769,7 +1756,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1769 return 0; 1756 return 0;
1770 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1757 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1771 } else if (!rx->sta) 1758 } else if (!rx->sta)
1772 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, 1759 rx->sta = ieee80211_ibss_add_sta(sdata, rx->skb,
1773 bssid, hdr->addr2, 1760 bssid, hdr->addr2,
1774 BIT(rx->status->rate_idx)); 1761 BIT(rx->status->rate_idx));
1775 break; 1762 break;
@@ -1827,23 +1814,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1827 struct ieee80211_sub_if_data *sdata; 1814 struct ieee80211_sub_if_data *sdata;
1828 struct ieee80211_hdr *hdr; 1815 struct ieee80211_hdr *hdr;
1829 struct ieee80211_rx_data rx; 1816 struct ieee80211_rx_data rx;
1830 u16 type;
1831 int prepares; 1817 int prepares;
1832 struct ieee80211_sub_if_data *prev = NULL; 1818 struct ieee80211_sub_if_data *prev = NULL;
1833 struct sk_buff *skb_new; 1819 struct sk_buff *skb_new;
1834 u8 *bssid; 1820 u8 *bssid;
1835 1821
1836 hdr = (struct ieee80211_hdr *) skb->data; 1822 hdr = (struct ieee80211_hdr *)skb->data;
1837 memset(&rx, 0, sizeof(rx)); 1823 memset(&rx, 0, sizeof(rx));
1838 rx.skb = skb; 1824 rx.skb = skb;
1839 rx.local = local; 1825 rx.local = local;
1840 1826
1841 rx.status = status; 1827 rx.status = status;
1842 rx.rate = rate; 1828 rx.rate = rate;
1843 rx.fc = le16_to_cpu(hdr->frame_control);
1844 type = rx.fc & IEEE80211_FCTL_FTYPE;
1845 1829
1846 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) 1830 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
1847 local->dot11ReceivedFragmentCount++; 1831 local->dot11ReceivedFragmentCount++;
1848 1832
1849 rx.sta = sta_info_get(local, hdr->addr2); 1833 rx.sta = sta_info_get(local, hdr->addr2);
@@ -1904,14 +1888,12 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1904 prev->dev->name); 1888 prev->dev->name);
1905 continue; 1889 continue;
1906 } 1890 }
1907 rx.fc = le16_to_cpu(hdr->frame_control);
1908 ieee80211_invoke_rx_handlers(prev, &rx, skb_new); 1891 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
1909 prev = sdata; 1892 prev = sdata;
1910 } 1893 }
1911 if (prev) { 1894 if (prev)
1912 rx.fc = le16_to_cpu(hdr->frame_control);
1913 ieee80211_invoke_rx_handlers(prev, &rx, skb); 1895 ieee80211_invoke_rx_handlers(prev, &rx, skb);
1914 } else 1896 else
1915 dev_kfree_skb(skb); 1897 dev_kfree_skb(skb);
1916} 1898}
1917 1899
@@ -2080,7 +2062,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2080 /* if this mpdu is fragmented - terminate rx aggregation session */ 2062 /* if this mpdu is fragmented - terminate rx aggregation session */
2081 sc = le16_to_cpu(hdr->seq_ctrl); 2063 sc = le16_to_cpu(hdr->seq_ctrl);
2082 if (sc & IEEE80211_SCTL_FRAG) { 2064 if (sc & IEEE80211_SCTL_FRAG) {
2083 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, 2065 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->addr,
2084 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); 2066 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2085 ret = 1; 2067 ret = 1;
2086 goto end_reorder; 2068 goto end_reorder;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 109db787ccb7..4a581a5b5766 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -204,6 +204,7 @@ struct sta_ampdu_mlme {
204 * @tx_fragments: number of transmitted MPDUs 204 * @tx_fragments: number of transmitted MPDUs
205 * @txrate_idx: TBD 205 * @txrate_idx: TBD
206 * @last_txrate_idx: TBD 206 * @last_txrate_idx: TBD
207 * @tid_seq: TBD
207 * @wme_tx_queue: TBD 208 * @wme_tx_queue: TBD
208 * @ampdu_mlme: TBD 209 * @ampdu_mlme: TBD
209 * @timer_to_tid: identity mapping to ID timers 210 * @timer_to_tid: identity mapping to ID timers
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4788f7b91f49..c413d4836afe 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -82,6 +82,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
82 struct ieee80211_rate *txrate; 82 struct ieee80211_rate *txrate;
83 struct ieee80211_local *local = tx->local; 83 struct ieee80211_local *local = tx->local;
84 struct ieee80211_supported_band *sband; 84 struct ieee80211_supported_band *sband;
85 struct ieee80211_hdr *hdr;
85 86
86 sband = local->hw.wiphy->bands[tx->channel->band]; 87 sband = local->hw.wiphy->bands[tx->channel->band];
87 txrate = &sband->bitrates[tx->rate_idx]; 88 txrate = &sband->bitrates[tx->rate_idx];
@@ -107,8 +108,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
107 * at the highest possible rate belonging to the PHY rates in the 108 * at the highest possible rate belonging to the PHY rates in the
108 * BSSBasicRateSet 109 * BSSBasicRateSet
109 */ 110 */
110 111 hdr = (struct ieee80211_hdr *)tx->skb->data;
111 if ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) { 112 if (ieee80211_is_ctl(hdr->frame_control)) {
112 /* TODO: These control frames are not currently sent by 113 /* TODO: These control frames are not currently sent by
113 * 80211.o, but should they be implemented, this function 114 * 80211.o, but should they be implemented, this function
114 * needs to be updated to support duration field calculation. 115 * needs to be updated to support duration field calculation.
@@ -213,9 +214,8 @@ static int inline is_ieee80211_device(struct net_device *dev,
213static ieee80211_tx_result debug_noinline 214static ieee80211_tx_result debug_noinline
214ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 215ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
215{ 216{
216#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 217
217 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 218 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
218#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
219 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 219 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
220 u32 sta_flags; 220 u32 sta_flags;
221 221
@@ -223,8 +223,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
223 return TX_CONTINUE; 223 return TX_CONTINUE;
224 224
225 if (unlikely(tx->local->sta_sw_scanning) && 225 if (unlikely(tx->local->sta_sw_scanning) &&
226 ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 226 !ieee80211_is_probe_req(hdr->frame_control))
227 (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ))
228 return TX_DROP; 227 return TX_DROP;
229 228
230 if (tx->sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) 229 if (tx->sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT)
@@ -238,7 +237,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
238 if (likely(tx->flags & IEEE80211_TX_UNICAST)) { 237 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
239 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && 238 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
240 tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 239 tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
241 (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { 240 ieee80211_is_data(hdr->frame_control))) {
242#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 241#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
243 DECLARE_MAC_BUF(mac); 242 DECLARE_MAC_BUF(mac);
244 printk(KERN_DEBUG "%s: dropped data frame to not " 243 printk(KERN_DEBUG "%s: dropped data frame to not "
@@ -249,7 +248,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
249 return TX_DROP; 248 return TX_DROP;
250 } 249 }
251 } else { 250 } else {
252 if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 251 if (unlikely(ieee80211_is_data(hdr->frame_control) &&
253 tx->local->num_sta == 0 && 252 tx->local->num_sta == 0 &&
254 tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS)) { 253 tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS)) {
255 /* 254 /*
@@ -315,6 +314,7 @@ static ieee80211_tx_result
315ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) 314ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
316{ 315{
317 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 316 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
317 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
318 318
319 /* 319 /*
320 * broadcast/multicast frame 320 * broadcast/multicast frame
@@ -329,7 +329,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
329 return TX_CONTINUE; 329 return TX_CONTINUE;
330 330
331 /* no buffering for ordered frames */ 331 /* no buffering for ordered frames */
332 if (tx->fc & IEEE80211_FCTL_ORDER) 332 if (ieee80211_has_order(hdr->frame_control))
333 return TX_CONTINUE; 333 return TX_CONTINUE;
334 334
335 /* no stations in PS mode */ 335 /* no stations in PS mode */
@@ -367,12 +367,11 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
367{ 367{
368 struct sta_info *sta = tx->sta; 368 struct sta_info *sta = tx->sta;
369 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 369 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
370 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
370 u32 staflags; 371 u32 staflags;
371 DECLARE_MAC_BUF(mac); 372 DECLARE_MAC_BUF(mac);
372 373
373 if (unlikely(!sta || 374 if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)))
374 ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
375 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)))
376 return TX_CONTINUE; 375 return TX_CONTINUE;
377 376
378 staflags = get_sta_flags(sta); 377 staflags = get_sta_flags(sta);
@@ -437,7 +436,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
437{ 436{
438 struct ieee80211_key *key; 437 struct ieee80211_key *key;
439 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 438 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
440 u16 fc = tx->fc; 439 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
441 440
442 if (unlikely(tx->skb->do_not_encrypt)) 441 if (unlikely(tx->skb->do_not_encrypt))
443 tx->key = NULL; 442 tx->key = NULL;
@@ -454,22 +453,16 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
454 tx->key = NULL; 453 tx->key = NULL;
455 454
456 if (tx->key) { 455 if (tx->key) {
457 u16 ftype, stype;
458
459 tx->key->tx_rx_count++; 456 tx->key->tx_rx_count++;
460 /* TODO: add threshold stuff again */ 457 /* TODO: add threshold stuff again */
461 458
462 switch (tx->key->conf.alg) { 459 switch (tx->key->conf.alg) {
463 case ALG_WEP: 460 case ALG_WEP:
464 ftype = fc & IEEE80211_FCTL_FTYPE; 461 if (ieee80211_is_auth(hdr->frame_control))
465 stype = fc & IEEE80211_FCTL_STYPE;
466
467 if (ftype == IEEE80211_FTYPE_MGMT &&
468 stype == IEEE80211_STYPE_AUTH)
469 break; 462 break;
470 case ALG_TKIP: 463 case ALG_TKIP:
471 case ALG_CCMP: 464 case ALG_CCMP:
472 if (!WLAN_FC_DATA_PRESENT(fc)) 465 if (!ieee80211_is_data_present(hdr->frame_control))
473 tx->key = NULL; 466 tx->key = NULL;
474 break; 467 break;
475 } 468 }
@@ -1000,7 +993,6 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1000 hdr = (struct ieee80211_hdr *) skb->data; 993 hdr = (struct ieee80211_hdr *) skb->data;
1001 994
1002 tx->sta = sta_info_get(local, hdr->addr1); 995 tx->sta = sta_info_get(local, hdr->addr1);
1003 tx->fc = le16_to_cpu(hdr->frame_control);
1004 996
1005 if (is_multicast_ether_addr(hdr->addr1)) { 997 if (is_multicast_ether_addr(hdr->addr1)) {
1006 tx->flags &= ~IEEE80211_TX_UNICAST; 998 tx->flags &= ~IEEE80211_TX_UNICAST;
@@ -1025,7 +1017,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1025 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) 1017 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1026 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 1018 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1027 1019
1028 hdrlen = ieee80211_get_hdrlen(tx->fc); 1020 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1029 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { 1021 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1030 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; 1022 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
1031 tx->ethertype = (pos[0] << 8) | pos[1]; 1023 tx->ethertype = (pos[0] << 8) | pos[1];
@@ -1335,7 +1327,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1335 if (is_multicast_ether_addr(hdr->addr3)) 1327 if (is_multicast_ether_addr(hdr->addr3))
1336 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); 1328 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
1337 else 1329 else
1338 if (mesh_nexthop_lookup(skb, odev)) 1330 if (mesh_nexthop_lookup(skb, osdata))
1339 return 0; 1331 return 0;
1340 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) 1332 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
1341 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.sta, 1333 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.sta,
@@ -1889,8 +1881,8 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1889 goto out; 1881 goto out;
1890 1882
1891 hdr = (struct ieee80211_hdr *) skb->data; 1883 hdr = (struct ieee80211_hdr *) skb->data;
1892 hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1884 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1893 IEEE80211_STYPE_BEACON); 1885 IEEE80211_STYPE_BEACON);
1894 1886
1895 num_beacons = &ifsta->num_beacons; 1887 num_beacons = &ifsta->num_beacons;
1896 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 1888 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -1916,7 +1908,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1916 *pos++ = WLAN_EID_SSID; 1908 *pos++ = WLAN_EID_SSID;
1917 *pos++ = 0x0; 1909 *pos++ = 0x0;
1918 1910
1919 mesh_mgmt_ies_add(skb, sdata->dev); 1911 mesh_mgmt_ies_add(skb, sdata);
1920 1912
1921 num_beacons = &sdata->u.sta.num_beacons; 1913 num_beacons = &sdata->u.sta.num_beacons;
1922 } else { 1914 } else {
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0d463c80c404..f40c060341ae 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -91,45 +91,6 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
91 return NULL; 91 return NULL;
92} 92}
93 93
94int ieee80211_get_hdrlen(u16 fc)
95{
96 int hdrlen = 24;
97
98 switch (fc & IEEE80211_FCTL_FTYPE) {
99 case IEEE80211_FTYPE_DATA:
100 if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
101 hdrlen = 30; /* Addr4 */
102 /*
103 * The QoS Control field is two bytes and its presence is
104 * indicated by the IEEE80211_STYPE_QOS_DATA bit. Add 2 to
105 * hdrlen if that bit is set.
106 * This works by masking out the bit and shifting it to
107 * bit position 1 so the result has the value 0 or 2.
108 */
109 hdrlen += (fc & IEEE80211_STYPE_QOS_DATA)
110 >> (ilog2(IEEE80211_STYPE_QOS_DATA)-1);
111 break;
112 case IEEE80211_FTYPE_CTL:
113 /*
114 * ACK and CTS are 10 bytes, all others 16. To see how
115 * to get this condition consider
116 * subtype mask: 0b0000000011110000 (0x00F0)
117 * ACK subtype: 0b0000000011010000 (0x00D0)
118 * CTS subtype: 0b0000000011000000 (0x00C0)
119 * bits that matter: ^^^ (0x00E0)
120 * value of those: 0b0000000011000000 (0x00C0)
121 */
122 if ((fc & 0xE0) == 0xC0)
123 hdrlen = 10;
124 else
125 hdrlen = 16;
126 break;
127 }
128
129 return hdrlen;
130}
131EXPORT_SYMBOL(ieee80211_get_hdrlen);
132
133unsigned int ieee80211_hdrlen(__le16 fc) 94unsigned int ieee80211_hdrlen(__le16 fc)
134{ 95{
135 unsigned int hdrlen = 24; 96 unsigned int hdrlen = 24;
@@ -386,6 +347,13 @@ void ieee80211_stop_queues(struct ieee80211_hw *hw)
386} 347}
387EXPORT_SYMBOL(ieee80211_stop_queues); 348EXPORT_SYMBOL(ieee80211_stop_queues);
388 349
350int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
351{
352 struct ieee80211_local *local = hw_to_local(hw);
353 return __netif_subqueue_stopped(local->mdev, queue);
354}
355EXPORT_SYMBOL(ieee80211_queue_stopped);
356
389void ieee80211_wake_queues(struct ieee80211_hw *hw) 357void ieee80211_wake_queues(struct ieee80211_hw *hw)
390{ 358{
391 int i; 359 int i;
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 5c2bf0a3d4db..376c84987e4f 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -228,11 +228,10 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
228 return -1; 228 return -1;
229 229
230 hdrlen = ieee80211_hdrlen(hdr->frame_control); 230 hdrlen = ieee80211_hdrlen(hdr->frame_control);
231 231 if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN)
232 if (skb->len < 8 + hdrlen)
233 return -1; 232 return -1;
234 233
235 len = skb->len - hdrlen - 8; 234 len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN;
236 235
237 keyidx = skb->data[hdrlen + 3] >> 6; 236 keyidx = skb->data[hdrlen + 3] >> 6;
238 237
@@ -292,9 +291,10 @@ u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
292ieee80211_rx_result 291ieee80211_rx_result
293ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) 292ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
294{ 293{
295 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && 294 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
296 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 295
297 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) 296 if (!ieee80211_is_data(hdr->frame_control) &&
297 !ieee80211_is_auth(hdr->frame_control))
298 return RX_CONTINUE; 298 return RX_CONTINUE;
299 299
300 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { 300 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
@@ -303,7 +303,7 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
303 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { 303 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) {
304 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 304 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
305 /* remove ICV */ 305 /* remove ICV */
306 skb_trim(rx->skb, rx->skb->len - 4); 306 skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN);
307 } 307 }
308 308
309 return RX_CONTINUE; 309 return RX_CONTINUE;
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 34fa8ed1e784..beae664ab480 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -27,22 +27,19 @@
27#include "aes_ccm.h" 27#include "aes_ccm.h"
28 28
29 29
30static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr, 30static int ieee80211_set_encryption(struct ieee80211_sub_if_data *sdata, u8 *sta_addr,
31 int idx, int alg, int remove, 31 int idx, int alg, int remove,
32 int set_tx_key, const u8 *_key, 32 int set_tx_key, const u8 *_key,
33 size_t key_len) 33 size_t key_len)
34{ 34{
35 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 35 struct ieee80211_local *local = sdata->local;
36 struct sta_info *sta; 36 struct sta_info *sta;
37 struct ieee80211_key *key; 37 struct ieee80211_key *key;
38 struct ieee80211_sub_if_data *sdata;
39 int err; 38 int err;
40 39
41 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
42
43 if (idx < 0 || idx >= NUM_DEFAULT_KEYS) { 40 if (idx < 0 || idx >= NUM_DEFAULT_KEYS) {
44 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n", 41 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n",
45 dev->name, idx); 42 sdata->dev->name, idx);
46 return -EINVAL; 43 return -EINVAL;
47 } 44 }
48 45
@@ -127,11 +124,11 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev,
127 124
128 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 125 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
129 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 126 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
130 int ret = ieee80211_sta_set_extra_ie(dev, extra, data->length); 127 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
131 if (ret) 128 if (ret)
132 return ret; 129 return ret;
133 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 130 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
134 ieee80211_sta_req_auth(dev, &sdata->u.sta); 131 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
135 return 0; 132 return 0;
136 } 133 }
137 134
@@ -333,12 +330,11 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev,
333 return 0; 330 return 0;
334} 331}
335 332
336int ieee80211_set_freq(struct net_device *dev, int freqMHz) 333int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freqMHz)
337{ 334{
338 int ret = -EINVAL; 335 int ret = -EINVAL;
339 struct ieee80211_channel *chan; 336 struct ieee80211_channel *chan;
340 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 337 struct ieee80211_local *local = sdata->local;
341 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
342 338
343 chan = ieee80211_get_channel(local->hw.wiphy, freqMHz); 339 chan = ieee80211_get_channel(local->hw.wiphy, freqMHz);
344 340
@@ -346,7 +342,7 @@ int ieee80211_set_freq(struct net_device *dev, int freqMHz)
346 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && 342 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
347 chan->flags & IEEE80211_CHAN_NO_IBSS) { 343 chan->flags & IEEE80211_CHAN_NO_IBSS) {
348 printk(KERN_DEBUG "%s: IBSS not allowed on frequency " 344 printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
349 "%d MHz\n", dev->name, chan->center_freq); 345 "%d MHz\n", sdata->dev->name, chan->center_freq);
350 return ret; 346 return ret;
351 } 347 }
352 local->oper_channel = chan; 348 local->oper_channel = chan;
@@ -379,14 +375,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev,
379 IEEE80211_STA_AUTO_CHANNEL_SEL; 375 IEEE80211_STA_AUTO_CHANNEL_SEL;
380 return 0; 376 return 0;
381 } else 377 } else
382 return ieee80211_set_freq(dev, 378 return ieee80211_set_freq(sdata,
383 ieee80211_channel_to_frequency(freq->m)); 379 ieee80211_channel_to_frequency(freq->m));
384 } else { 380 } else {
385 int i, div = 1000000; 381 int i, div = 1000000;
386 for (i = 0; i < freq->e; i++) 382 for (i = 0; i < freq->e; i++)
387 div /= 10; 383 div /= 10;
388 if (div > 0) 384 if (div > 0)
389 return ieee80211_set_freq(dev, freq->m / div); 385 return ieee80211_set_freq(sdata, freq->m / div);
390 else 386 else
391 return -EINVAL; 387 return -EINVAL;
392 } 388 }
@@ -432,10 +428,10 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
432 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; 428 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
433 else 429 else
434 sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL; 430 sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL;
435 ret = ieee80211_sta_set_ssid(dev, ssid, len); 431 ret = ieee80211_sta_set_ssid(sdata, ssid, len);
436 if (ret) 432 if (ret)
437 return ret; 433 return ret;
438 ieee80211_sta_req_auth(dev, &sdata->u.sta); 434 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
439 return 0; 435 return 0;
440 } 436 }
441 437
@@ -460,7 +456,7 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev,
460 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 456 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
461 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 457 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
462 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 458 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
463 int res = ieee80211_sta_get_ssid(dev, ssid, &len); 459 int res = ieee80211_sta_get_ssid(sdata, ssid, &len);
464 if (res == 0) { 460 if (res == 0) {
465 data->length = len; 461 data->length = len;
466 data->flags = 1; 462 data->flags = 1;
@@ -504,10 +500,10 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
504 sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL; 500 sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL;
505 else 501 else
506 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 502 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
507 ret = ieee80211_sta_set_bssid(dev, (u8 *) &ap_addr->sa_data); 503 ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data);
508 if (ret) 504 if (ret)
509 return ret; 505 return ret;
510 ieee80211_sta_req_auth(dev, &sdata->u.sta); 506 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
511 return 0; 507 return 0;
512 } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { 508 } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
513 /* 509 /*
@@ -539,8 +535,8 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
539 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 535 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
540 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 536 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
541 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 537 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
542 if (sdata->u.sta.state == IEEE80211_ASSOCIATED || 538 if (sdata->u.sta.state == IEEE80211_STA_MLME_ASSOCIATED ||
543 sdata->u.sta.state == IEEE80211_IBSS_JOINED) { 539 sdata->u.sta.state == IEEE80211_STA_MLME_IBSS_JOINED) {
544 ap_addr->sa_family = ARPHRD_ETHER; 540 ap_addr->sa_family = ARPHRD_ETHER;
545 memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN); 541 memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN);
546 return 0; 542 return 0;
@@ -584,7 +580,7 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev,
584 ssid_len = req->essid_len; 580 ssid_len = req->essid_len;
585 } 581 }
586 582
587 return ieee80211_sta_req_scan(dev, ssid, ssid_len); 583 return ieee80211_sta_req_scan(sdata, ssid, ssid_len);
588} 584}
589 585
590 586
@@ -594,11 +590,14 @@ static int ieee80211_ioctl_giwscan(struct net_device *dev,
594{ 590{
595 int res; 591 int res;
596 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 592 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
593 struct ieee80211_sub_if_data *sdata;
594
595 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
597 596
598 if (local->sta_sw_scanning || local->sta_hw_scanning) 597 if (local->sta_sw_scanning || local->sta_hw_scanning)
599 return -EAGAIN; 598 return -EAGAIN;
600 599
601 res = ieee80211_sta_scan_results(dev, info, extra, data->length); 600 res = ieee80211_sta_scan_results(local, info, extra, data->length);
602 if (res >= 0) { 601 if (res >= 0) {
603 data->length = res; 602 data->length = res;
604 return 0; 603 return 0;
@@ -894,10 +893,10 @@ static int ieee80211_ioctl_siwmlme(struct net_device *dev,
894 switch (mlme->cmd) { 893 switch (mlme->cmd) {
895 case IW_MLME_DEAUTH: 894 case IW_MLME_DEAUTH:
896 /* TODO: mlme->addr.sa_data */ 895 /* TODO: mlme->addr.sa_data */
897 return ieee80211_sta_deauthenticate(dev, mlme->reason_code); 896 return ieee80211_sta_deauthenticate(sdata, mlme->reason_code);
898 case IW_MLME_DISASSOC: 897 case IW_MLME_DISASSOC:
899 /* TODO: mlme->addr.sa_data */ 898 /* TODO: mlme->addr.sa_data */
900 return ieee80211_sta_disassociate(dev, mlme->reason_code); 899 return ieee80211_sta_disassociate(sdata, mlme->reason_code);
901 default: 900 default:
902 return -EOPNOTSUPP; 901 return -EOPNOTSUPP;
903 } 902 }
@@ -938,7 +937,7 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev,
938 } 937 }
939 938
940 return ieee80211_set_encryption( 939 return ieee80211_set_encryption(
941 dev, bcaddr, 940 sdata, bcaddr,
942 idx, alg, remove, 941 idx, alg, remove,
943 !sdata->default_key, 942 !sdata->default_key,
944 keybuf, erq->length); 943 keybuf, erq->length);
@@ -1184,7 +1183,7 @@ static int ieee80211_ioctl_siwencodeext(struct net_device *dev,
1184 } else 1183 } else
1185 idx--; 1184 idx--;
1186 1185
1187 return ieee80211_set_encryption(dev, ext->addr.sa_data, idx, alg, 1186 return ieee80211_set_encryption(sdata, ext->addr.sa_data, idx, alg,
1188 remove, 1187 remove,
1189 ext->ext_flags & 1188 ext->ext_flags &
1190 IW_ENCODE_EXT_SET_TX_KEY, 1189 IW_ENCODE_EXT_SET_TX_KEY,
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 4310e2f65661..7229e958879d 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -47,8 +47,6 @@ static unsigned int classify_1d(struct sk_buff *skb)
47 return 0; 47 return 0;
48 } 48 }
49 49
50 if (dscp & 0x1c)
51 return 0;
52 return dscp >> 5; 50 return dscp >> 5;
53} 51}
54 52
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 04de28c071a6..465e274df7c5 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -14,8 +14,6 @@
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include "ieee80211_i.h" 15#include "ieee80211_i.h"
16 16
17#define QOS_CONTROL_LEN 2
18
19#define QOS_CONTROL_ACK_POLICY_NORMAL 0 17#define QOS_CONTROL_ACK_POLICY_NORMAL 0
20#define QOS_CONTROL_ACK_POLICY_NOACK 1 18#define QOS_CONTROL_ACK_POLICY_NOACK 1
21 19
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 2f33df0dcccf..78021780b885 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -127,7 +127,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
127 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 127 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
128 return RX_DROP_UNUSABLE; 128 return RX_DROP_UNUSABLE;
129 129
130 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, 130 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
131 (void *) skb->data); 131 (void *) skb->data);
132 return RX_DROP_UNUSABLE; 132 return RX_DROP_UNUSABLE;
133 } 133 }
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h
index f63d05045685..bbfa646157c6 100644
--- a/net/rfkill/rfkill-input.h
+++ b/net/rfkill/rfkill-input.h
@@ -13,5 +13,6 @@
13 13
14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); 14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state);
15void rfkill_epo(void); 15void rfkill_epo(void);
16void rfkill_restore_states(void);
16 17
17#endif /* __RFKILL_INPUT_H */ 18#endif /* __RFKILL_INPUT_H */
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 74aecc098bad..d5735799ccd9 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -37,14 +37,20 @@ MODULE_DESCRIPTION("RF switch support");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39static LIST_HEAD(rfkill_list); /* list of registered rf switches */ 39static LIST_HEAD(rfkill_list); /* list of registered rf switches */
40static DEFINE_MUTEX(rfkill_mutex); 40static DEFINE_MUTEX(rfkill_global_mutex);
41 41
42static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; 42static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED;
43module_param_named(default_state, rfkill_default_state, uint, 0444); 43module_param_named(default_state, rfkill_default_state, uint, 0444);
44MODULE_PARM_DESC(default_state, 44MODULE_PARM_DESC(default_state,
45 "Default initial state for all radio types, 0 = radio off"); 45 "Default initial state for all radio types, 0 = radio off");
46 46
47static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; 47struct rfkill_gsw_state {
48 enum rfkill_state current_state;
49 enum rfkill_state default_state;
50};
51
52static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX];
53static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
48 54
49static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); 55static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list);
50 56
@@ -70,6 +76,7 @@ static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list);
70 */ 76 */
71int register_rfkill_notifier(struct notifier_block *nb) 77int register_rfkill_notifier(struct notifier_block *nb)
72{ 78{
79 BUG_ON(!nb);
73 return blocking_notifier_chain_register(&rfkill_notifier_list, nb); 80 return blocking_notifier_chain_register(&rfkill_notifier_list, nb);
74} 81}
75EXPORT_SYMBOL_GPL(register_rfkill_notifier); 82EXPORT_SYMBOL_GPL(register_rfkill_notifier);
@@ -85,6 +92,7 @@ EXPORT_SYMBOL_GPL(register_rfkill_notifier);
85 */ 92 */
86int unregister_rfkill_notifier(struct notifier_block *nb) 93int unregister_rfkill_notifier(struct notifier_block *nb)
87{ 94{
95 BUG_ON(!nb);
88 return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb); 96 return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb);
89} 97}
90EXPORT_SYMBOL_GPL(unregister_rfkill_notifier); 98EXPORT_SYMBOL_GPL(unregister_rfkill_notifier);
@@ -195,6 +203,11 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
195 * BLOCK even a transmitter that is already in state 203 * BLOCK even a transmitter that is already in state
196 * RFKILL_STATE_HARD_BLOCKED */ 204 * RFKILL_STATE_HARD_BLOCKED */
197 break; 205 break;
206 default:
207 WARN(1, KERN_WARNING
208 "rfkill: illegal state %d passed as parameter "
209 "to rfkill_toggle_radio\n", state);
210 return -EINVAL;
198 } 211 }
199 212
200 if (force || state != rfkill->state) { 213 if (force || state != rfkill->state) {
@@ -213,22 +226,29 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
213} 226}
214 227
215/** 228/**
216 * rfkill_switch_all - Toggle state of all switches of given type 229 * __rfkill_switch_all - Toggle state of all switches of given type
217 * @type: type of interfaces to be affected 230 * @type: type of interfaces to be affected
218 * @state: the new state 231 * @state: the new state
219 * 232 *
220 * This function toggles the state of all switches of given type, 233 * This function toggles the state of all switches of given type,
221 * unless a specific switch is claimed by userspace (in which case, 234 * unless a specific switch is claimed by userspace (in which case,
222 * that switch is left alone) or suspended. 235 * that switch is left alone) or suspended.
236 *
237 * Caller must have acquired rfkill_global_mutex.
223 */ 238 */
224void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) 239static void __rfkill_switch_all(const enum rfkill_type type,
240 const enum rfkill_state state)
225{ 241{
226 struct rfkill *rfkill; 242 struct rfkill *rfkill;
227 243
228 mutex_lock(&rfkill_mutex); 244 if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX),
229 245 KERN_WARNING
230 rfkill_states[type] = state; 246 "rfkill: illegal state %d or type %d "
247 "passed as parameter to __rfkill_switch_all\n",
248 state, type))
249 return;
231 250
251 rfkill_global_states[type].current_state = state;
232 list_for_each_entry(rfkill, &rfkill_list, node) { 252 list_for_each_entry(rfkill, &rfkill_list, node) {
233 if ((!rfkill->user_claim) && (rfkill->type == type)) { 253 if ((!rfkill->user_claim) && (rfkill->type == type)) {
234 mutex_lock(&rfkill->mutex); 254 mutex_lock(&rfkill->mutex);
@@ -236,8 +256,21 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
236 mutex_unlock(&rfkill->mutex); 256 mutex_unlock(&rfkill->mutex);
237 } 257 }
238 } 258 }
259}
239 260
240 mutex_unlock(&rfkill_mutex); 261/**
262 * rfkill_switch_all - Toggle state of all switches of given type
263 * @type: type of interfaces to be affected
264 * @state: the new state
265 *
266 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
267 * Please refer to __rfkill_switch_all() for details.
268 */
269void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
270{
271 mutex_lock(&rfkill_global_mutex);
272 __rfkill_switch_all(type, state);
273 mutex_unlock(&rfkill_global_mutex);
241} 274}
242EXPORT_SYMBOL(rfkill_switch_all); 275EXPORT_SYMBOL(rfkill_switch_all);
243 276
@@ -245,23 +278,53 @@ EXPORT_SYMBOL(rfkill_switch_all);
245 * rfkill_epo - emergency power off all transmitters 278 * rfkill_epo - emergency power off all transmitters
246 * 279 *
247 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, 280 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
248 * ignoring everything in its path but rfkill_mutex and rfkill->mutex. 281 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
282 *
283 * The global state before the EPO is saved and can be restored later
284 * using rfkill_restore_states().
249 */ 285 */
250void rfkill_epo(void) 286void rfkill_epo(void)
251{ 287{
252 struct rfkill *rfkill; 288 struct rfkill *rfkill;
289 int i;
290
291 mutex_lock(&rfkill_global_mutex);
253 292
254 mutex_lock(&rfkill_mutex);
255 list_for_each_entry(rfkill, &rfkill_list, node) { 293 list_for_each_entry(rfkill, &rfkill_list, node) {
256 mutex_lock(&rfkill->mutex); 294 mutex_lock(&rfkill->mutex);
257 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); 295 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
258 mutex_unlock(&rfkill->mutex); 296 mutex_unlock(&rfkill->mutex);
259 } 297 }
260 mutex_unlock(&rfkill_mutex); 298 for (i = 0; i < RFKILL_TYPE_MAX; i++) {
299 rfkill_global_states[i].default_state =
300 rfkill_global_states[i].current_state;
301 rfkill_global_states[i].current_state =
302 RFKILL_STATE_SOFT_BLOCKED;
303 }
304 mutex_unlock(&rfkill_global_mutex);
261} 305}
262EXPORT_SYMBOL_GPL(rfkill_epo); 306EXPORT_SYMBOL_GPL(rfkill_epo);
263 307
264/** 308/**
309 * rfkill_restore_states - restore global states
310 *
311 * Restore (and sync switches to) the global state from the
312 * states in rfkill_default_states. This can undo the effects of
313 * a call to rfkill_epo().
314 */
315void rfkill_restore_states(void)
316{
317 int i;
318
319 mutex_lock(&rfkill_global_mutex);
320
321 for (i = 0; i < RFKILL_TYPE_MAX; i++)
322 __rfkill_switch_all(i, rfkill_global_states[i].default_state);
323 mutex_unlock(&rfkill_global_mutex);
324}
325EXPORT_SYMBOL_GPL(rfkill_restore_states);
326
327/**
265 * rfkill_force_state - Force the internal rfkill radio state 328 * rfkill_force_state - Force the internal rfkill radio state
266 * @rfkill: pointer to the rfkill class to modify. 329 * @rfkill: pointer to the rfkill class to modify.
267 * @state: the current radio state the class should be forced to. 330 * @state: the current radio state the class should be forced to.
@@ -282,9 +345,11 @@ int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state)
282{ 345{
283 enum rfkill_state oldstate; 346 enum rfkill_state oldstate;
284 347
285 if (state != RFKILL_STATE_SOFT_BLOCKED && 348 BUG_ON(!rfkill);
286 state != RFKILL_STATE_UNBLOCKED && 349 if (WARN((state >= RFKILL_STATE_MAX),
287 state != RFKILL_STATE_HARD_BLOCKED) 350 KERN_WARNING
351 "rfkill: illegal state %d passed as parameter "
352 "to rfkill_force_state\n", state))
288 return -EINVAL; 353 return -EINVAL;
289 354
290 mutex_lock(&rfkill->mutex); 355 mutex_lock(&rfkill->mutex);
@@ -352,12 +417,16 @@ static ssize_t rfkill_state_store(struct device *dev,
352 const char *buf, size_t count) 417 const char *buf, size_t count)
353{ 418{
354 struct rfkill *rfkill = to_rfkill(dev); 419 struct rfkill *rfkill = to_rfkill(dev);
355 unsigned int state = simple_strtoul(buf, NULL, 0); 420 unsigned long state;
356 int error; 421 int error;
357 422
358 if (!capable(CAP_NET_ADMIN)) 423 if (!capable(CAP_NET_ADMIN))
359 return -EPERM; 424 return -EPERM;
360 425
426 error = strict_strtoul(buf, 0, &state);
427 if (error)
428 return error;
429
361 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ 430 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
362 if (state != RFKILL_STATE_UNBLOCKED && 431 if (state != RFKILL_STATE_UNBLOCKED &&
363 state != RFKILL_STATE_SOFT_BLOCKED) 432 state != RFKILL_STATE_SOFT_BLOCKED)
@@ -385,7 +454,8 @@ static ssize_t rfkill_claim_store(struct device *dev,
385 const char *buf, size_t count) 454 const char *buf, size_t count)
386{ 455{
387 struct rfkill *rfkill = to_rfkill(dev); 456 struct rfkill *rfkill = to_rfkill(dev);
388 bool claim = !!simple_strtoul(buf, NULL, 0); 457 unsigned long claim_tmp;
458 bool claim;
389 int error; 459 int error;
390 460
391 if (!capable(CAP_NET_ADMIN)) 461 if (!capable(CAP_NET_ADMIN))
@@ -394,11 +464,16 @@ static ssize_t rfkill_claim_store(struct device *dev,
394 if (rfkill->user_claim_unsupported) 464 if (rfkill->user_claim_unsupported)
395 return -EOPNOTSUPP; 465 return -EOPNOTSUPP;
396 466
467 error = strict_strtoul(buf, 0, &claim_tmp);
468 if (error)
469 return error;
470 claim = !!claim_tmp;
471
397 /* 472 /*
398 * Take the global lock to make sure the kernel is not in 473 * Take the global lock to make sure the kernel is not in
399 * the middle of rfkill_switch_all 474 * the middle of rfkill_switch_all
400 */ 475 */
401 error = mutex_lock_interruptible(&rfkill_mutex); 476 error = mutex_lock_interruptible(&rfkill_global_mutex);
402 if (error) 477 if (error)
403 return error; 478 return error;
404 479
@@ -406,14 +481,14 @@ static ssize_t rfkill_claim_store(struct device *dev,
406 if (!claim) { 481 if (!claim) {
407 mutex_lock(&rfkill->mutex); 482 mutex_lock(&rfkill->mutex);
408 rfkill_toggle_radio(rfkill, 483 rfkill_toggle_radio(rfkill,
409 rfkill_states[rfkill->type], 484 rfkill_global_states[rfkill->type].current_state,
410 0); 485 0);
411 mutex_unlock(&rfkill->mutex); 486 mutex_unlock(&rfkill->mutex);
412 } 487 }
413 rfkill->user_claim = claim; 488 rfkill->user_claim = claim;
414 } 489 }
415 490
416 mutex_unlock(&rfkill_mutex); 491 mutex_unlock(&rfkill_global_mutex);
417 492
418 return error ? error : count; 493 return error ? error : count;
419} 494}
@@ -525,24 +600,60 @@ static struct class rfkill_class = {
525 .dev_uevent = rfkill_dev_uevent, 600 .dev_uevent = rfkill_dev_uevent,
526}; 601};
527 602
603static int rfkill_check_duplicity(const struct rfkill *rfkill)
604{
605 struct rfkill *p;
606 unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
607
608 memset(seen, 0, sizeof(seen));
609
610 list_for_each_entry(p, &rfkill_list, node) {
611 if (WARN((p == rfkill), KERN_WARNING
612 "rfkill: illegal attempt to register "
613 "an already registered rfkill struct\n"))
614 return -EEXIST;
615 set_bit(p->type, seen);
616 }
617
618 /* 0: first switch of its kind */
619 return test_bit(rfkill->type, seen);
620}
621
528static int rfkill_add_switch(struct rfkill *rfkill) 622static int rfkill_add_switch(struct rfkill *rfkill)
529{ 623{
530 mutex_lock(&rfkill_mutex); 624 int error;
531 625
532 rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type], 0); 626 mutex_lock(&rfkill_global_mutex);
627
628 error = rfkill_check_duplicity(rfkill);
629 if (error < 0)
630 goto unlock_out;
631
632 if (!error) {
633 /* lock default after first use */
634 set_bit(rfkill->type, rfkill_states_lockdflt);
635 rfkill_global_states[rfkill->type].current_state =
636 rfkill_global_states[rfkill->type].default_state;
637 }
638
639 rfkill_toggle_radio(rfkill,
640 rfkill_global_states[rfkill->type].current_state,
641 0);
533 642
534 list_add_tail(&rfkill->node, &rfkill_list); 643 list_add_tail(&rfkill->node, &rfkill_list);
535 644
536 mutex_unlock(&rfkill_mutex); 645 error = 0;
646unlock_out:
647 mutex_unlock(&rfkill_global_mutex);
537 648
538 return 0; 649 return error;
539} 650}
540 651
541static void rfkill_remove_switch(struct rfkill *rfkill) 652static void rfkill_remove_switch(struct rfkill *rfkill)
542{ 653{
543 mutex_lock(&rfkill_mutex); 654 mutex_lock(&rfkill_global_mutex);
544 list_del_init(&rfkill->node); 655 list_del_init(&rfkill->node);
545 mutex_unlock(&rfkill_mutex); 656 mutex_unlock(&rfkill_global_mutex);
546 657
547 mutex_lock(&rfkill->mutex); 658 mutex_lock(&rfkill->mutex);
548 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); 659 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
@@ -562,11 +673,18 @@ static void rfkill_remove_switch(struct rfkill *rfkill)
562 * NOTE: If registration fails the structure shoudl be freed by calling 673 * NOTE: If registration fails the structure shoudl be freed by calling
563 * rfkill_free() otherwise rfkill_unregister() should be used. 674 * rfkill_free() otherwise rfkill_unregister() should be used.
564 */ 675 */
565struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type) 676struct rfkill * __must_check rfkill_allocate(struct device *parent,
677 enum rfkill_type type)
566{ 678{
567 struct rfkill *rfkill; 679 struct rfkill *rfkill;
568 struct device *dev; 680 struct device *dev;
569 681
682 if (WARN((type >= RFKILL_TYPE_MAX),
683 KERN_WARNING
684 "rfkill: illegal type %d passed as parameter "
685 "to rfkill_allocate\n", type))
686 return NULL;
687
570 rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); 688 rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL);
571 if (!rfkill) 689 if (!rfkill)
572 return NULL; 690 return NULL;
@@ -633,15 +751,18 @@ static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
633 * structure needs to be registered. Immediately from registration the 751 * structure needs to be registered. Immediately from registration the
634 * switch driver should be able to service calls to toggle_radio. 752 * switch driver should be able to service calls to toggle_radio.
635 */ 753 */
636int rfkill_register(struct rfkill *rfkill) 754int __must_check rfkill_register(struct rfkill *rfkill)
637{ 755{
638 static atomic_t rfkill_no = ATOMIC_INIT(0); 756 static atomic_t rfkill_no = ATOMIC_INIT(0);
639 struct device *dev = &rfkill->dev; 757 struct device *dev = &rfkill->dev;
640 int error; 758 int error;
641 759
642 if (!rfkill->toggle_radio) 760 if (WARN((!rfkill || !rfkill->toggle_radio ||
643 return -EINVAL; 761 rfkill->type >= RFKILL_TYPE_MAX ||
644 if (rfkill->type >= RFKILL_TYPE_MAX) 762 rfkill->state >= RFKILL_STATE_MAX),
763 KERN_WARNING
764 "rfkill: attempt to register a "
765 "badly initialized rfkill struct\n"))
645 return -EINVAL; 766 return -EINVAL;
646 767
647 snprintf(dev->bus_id, sizeof(dev->bus_id), 768 snprintf(dev->bus_id, sizeof(dev->bus_id),
@@ -676,6 +797,7 @@ EXPORT_SYMBOL(rfkill_register);
676 */ 797 */
677void rfkill_unregister(struct rfkill *rfkill) 798void rfkill_unregister(struct rfkill *rfkill)
678{ 799{
800 BUG_ON(!rfkill);
679 device_del(&rfkill->dev); 801 device_del(&rfkill->dev);
680 rfkill_remove_switch(rfkill); 802 rfkill_remove_switch(rfkill);
681 rfkill_led_trigger_unregister(rfkill); 803 rfkill_led_trigger_unregister(rfkill);
@@ -683,6 +805,56 @@ void rfkill_unregister(struct rfkill *rfkill)
683} 805}
684EXPORT_SYMBOL(rfkill_unregister); 806EXPORT_SYMBOL(rfkill_unregister);
685 807
808/**
809 * rfkill_set_default - set initial value for a switch type
810 * @type - the type of switch to set the default state of
811 * @state - the new default state for that group of switches
812 *
813 * Sets the initial state rfkill should use for a given type.
814 * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED
815 * and RFKILL_STATE_UNBLOCKED.
816 *
817 * This function is meant to be used by platform drivers for platforms
818 * that can save switch state across power down/reboot.
819 *
820 * The default state for each switch type can be changed exactly once.
821 * After a switch of that type is registered, the default state cannot
822 * be changed anymore. This guards against multiple drivers it the
823 * same platform trying to set the initial switch default state, which
824 * is not allowed.
825 *
826 * Returns -EPERM if the state has already been set once or is in use,
827 * so drivers likely want to either ignore or at most printk(KERN_NOTICE)
828 * if this function returns -EPERM.
829 *
830 * Returns 0 if the new default state was set, or an error if it
831 * could not be set.
832 */
833int rfkill_set_default(enum rfkill_type type, enum rfkill_state state)
834{
835 int error;
836
837 if (WARN((type >= RFKILL_TYPE_MAX ||
838 (state != RFKILL_STATE_SOFT_BLOCKED &&
839 state != RFKILL_STATE_UNBLOCKED)),
840 KERN_WARNING
841 "rfkill: illegal state %d or type %d passed as "
842 "parameter to rfkill_set_default\n", state, type))
843 return -EINVAL;
844
845 mutex_lock(&rfkill_global_mutex);
846
847 if (!test_and_set_bit(type, rfkill_states_lockdflt)) {
848 rfkill_global_states[type].default_state = state;
849 error = 0;
850 } else
851 error = -EPERM;
852
853 mutex_unlock(&rfkill_global_mutex);
854 return error;
855}
856EXPORT_SYMBOL_GPL(rfkill_set_default);
857
686/* 858/*
687 * Rfkill module initialization/deinitialization. 859 * Rfkill module initialization/deinitialization.
688 */ 860 */
@@ -696,8 +868,8 @@ static int __init rfkill_init(void)
696 rfkill_default_state != RFKILL_STATE_UNBLOCKED) 868 rfkill_default_state != RFKILL_STATE_UNBLOCKED)
697 return -EINVAL; 869 return -EINVAL;
698 870
699 for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) 871 for (i = 0; i < RFKILL_TYPE_MAX; i++)
700 rfkill_states[i] = rfkill_default_state; 872 rfkill_global_states[i].default_state = rfkill_default_state;
701 873
702 error = class_register(&rfkill_class); 874 error = class_register(&rfkill_class);
703 if (error) { 875 if (error) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9634091ee2f0..ec0a0839ce51 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -215,10 +215,9 @@ static void dev_watchdog(unsigned long arg)
215 time_after(jiffies, (dev->trans_start + 215 time_after(jiffies, (dev->trans_start +
216 dev->watchdog_timeo))) { 216 dev->watchdog_timeo))) {
217 char drivername[64]; 217 char drivername[64];
218 printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", 218 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
219 dev->name, netdev_drivername(dev, drivername, 64)); 219 dev->name, netdev_drivername(dev, drivername, 64));
220 dev->tx_timeout(dev); 220 dev->tx_timeout(dev);
221 WARN_ON_ONCE(1);
222 } 221 }
223 if (!mod_timer(&dev->watchdog_timer, 222 if (!mod_timer(&dev->watchdog_timer,
224 round_jiffies(jiffies + 223 round_jiffies(jiffies +
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 3781e55046d0..a11959908d9a 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -388,6 +388,20 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
388 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 388 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
389}; 389};
390 390
391static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
392 const struct nla_policy *policy, int len)
393{
394 int nested_len = nla_len(nla) - NLA_ALIGN(len);
395
396 if (nested_len < 0)
397 return -EINVAL;
398 if (nested_len >= nla_attr_size(0))
399 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
400 nested_len, policy);
401 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
402 return 0;
403}
404
391/* Parse netlink message to set options */ 405/* Parse netlink message to set options */
392static int netem_change(struct Qdisc *sch, struct nlattr *opt) 406static int netem_change(struct Qdisc *sch, struct nlattr *opt)
393{ 407{
@@ -399,8 +413,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
399 if (opt == NULL) 413 if (opt == NULL)
400 return -EINVAL; 414 return -EINVAL;
401 415
402 ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy, 416 qopt = nla_data(opt);
403 qopt, sizeof(*qopt)); 417 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
404 if (ret < 0) 418 if (ret < 0)
405 return ret; 419 return ret;
406 420
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index a6697c686c7f..504a78cdb718 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -254,16 +254,12 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
254{ 254{
255 struct prio_sched_data *q = qdisc_priv(sch); 255 struct prio_sched_data *q = qdisc_priv(sch);
256 unsigned char *b = skb_tail_pointer(skb); 256 unsigned char *b = skb_tail_pointer(skb);
257 struct nlattr *nest;
258 struct tc_prio_qopt opt; 257 struct tc_prio_qopt opt;
259 258
260 opt.bands = q->bands; 259 opt.bands = q->bands;
261 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); 260 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
262 261
263 nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); 262 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
264 if (nest == NULL)
265 goto nla_put_failure;
266 nla_nest_compat_end(skb, nest);
267 263
268 return skb->len; 264 return skb->len;
269 265
diff --git a/net/wireless/core.c b/net/wireless/core.c
index f1da0b93bc56..7e995ac06a0c 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This is the linux wireless configuration interface. 2 * This is the linux wireless configuration interface.
3 * 3 *
4 * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
@@ -259,6 +259,13 @@ int wiphy_register(struct wiphy *wiphy)
259 struct ieee80211_supported_band *sband; 259 struct ieee80211_supported_band *sband;
260 bool have_band = false; 260 bool have_band = false;
261 int i; 261 int i;
262 u16 ifmodes = wiphy->interface_modes;
263
264 /* sanity check ifmodes */
265 WARN_ON(!ifmodes);
266 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
267 if (WARN_ON(ifmodes != wiphy->interface_modes))
268 wiphy->interface_modes = ifmodes;
262 269
263 /* sanity check supported bands/channels */ 270 /* sanity check supported bands/channels */
264 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 271 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 59eb2cf42e5f..77880ba8b619 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -87,6 +87,13 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
87 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, 87 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
88 .len = IEEE80211_MAX_MESH_ID_LEN }, 88 .len = IEEE80211_MAX_MESH_ID_LEN },
89 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, 89 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
90
91 [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 },
92 [NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 },
93 [NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 },
94
95 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY,
96 .len = NL80211_HT_CAPABILITY_LEN },
90}; 97};
91 98
92/* message building helper */ 99/* message building helper */
@@ -106,10 +113,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
106 struct nlattr *nl_bands, *nl_band; 113 struct nlattr *nl_bands, *nl_band;
107 struct nlattr *nl_freqs, *nl_freq; 114 struct nlattr *nl_freqs, *nl_freq;
108 struct nlattr *nl_rates, *nl_rate; 115 struct nlattr *nl_rates, *nl_rate;
116 struct nlattr *nl_modes;
109 enum ieee80211_band band; 117 enum ieee80211_band band;
110 struct ieee80211_channel *chan; 118 struct ieee80211_channel *chan;
111 struct ieee80211_rate *rate; 119 struct ieee80211_rate *rate;
112 int i; 120 int i;
121 u16 ifmodes = dev->wiphy.interface_modes;
113 122
114 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); 123 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
115 if (!hdr) 124 if (!hdr)
@@ -118,6 +127,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
118 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); 127 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx);
119 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 128 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
120 129
130 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
131 if (!nl_modes)
132 goto nla_put_failure;
133
134 i = 0;
135 while (ifmodes) {
136 if (ifmodes & 1)
137 NLA_PUT_FLAG(msg, i);
138 ifmodes >>= 1;
139 i++;
140 }
141
142 nla_nest_end(msg, nl_modes);
143
121 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); 144 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
122 if (!nl_bands) 145 if (!nl_bands)
123 goto nla_put_failure; 146 goto nla_put_failure;
@@ -408,7 +431,8 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
408 ifindex = dev->ifindex; 431 ifindex = dev->ifindex;
409 dev_put(dev); 432 dev_put(dev);
410 433
411 if (!drv->ops->change_virtual_intf) { 434 if (!drv->ops->change_virtual_intf ||
435 !(drv->wiphy.interface_modes & (1 << type))) {
412 err = -EOPNOTSUPP; 436 err = -EOPNOTSUPP;
413 goto unlock; 437 goto unlock;
414 } 438 }
@@ -455,7 +479,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
455 if (IS_ERR(drv)) 479 if (IS_ERR(drv))
456 return PTR_ERR(drv); 480 return PTR_ERR(drv);
457 481
458 if (!drv->ops->add_virtual_intf) { 482 if (!drv->ops->add_virtual_intf ||
483 !(drv->wiphy.interface_modes & (1 << type))) {
459 err = -EOPNOTSUPP; 484 err = -EOPNOTSUPP;
460 goto unlock; 485 goto unlock;
461 } 486 }
@@ -1125,6 +1150,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1125 params.listen_interval = 1150 params.listen_interval =
1126 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 1151 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
1127 1152
1153 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
1154 params.ht_capa =
1155 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
1156
1128 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], 1157 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS],
1129 &params.station_flags)) 1158 &params.station_flags))
1130 return -EINVAL; 1159 return -EINVAL;
@@ -1188,6 +1217,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1188 params.listen_interval = 1217 params.listen_interval =
1189 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 1218 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
1190 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); 1219 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
1220 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
1221 params.ht_capa =
1222 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
1191 1223
1192 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], 1224 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS],
1193 &params.station_flags)) 1225 &params.station_flags))
@@ -1525,6 +1557,48 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
1525 return err; 1557 return err;
1526} 1558}
1527 1559
1560static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
1561{
1562 struct cfg80211_registered_device *drv;
1563 int err;
1564 struct net_device *dev;
1565 struct bss_parameters params;
1566
1567 memset(&params, 0, sizeof(params));
1568 /* default to not changing parameters */
1569 params.use_cts_prot = -1;
1570 params.use_short_preamble = -1;
1571 params.use_short_slot_time = -1;
1572
1573 if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
1574 params.use_cts_prot =
1575 nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]);
1576 if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE])
1577 params.use_short_preamble =
1578 nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]);
1579 if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME])
1580 params.use_short_slot_time =
1581 nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]);
1582
1583 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1584 if (err)
1585 return err;
1586
1587 if (!drv->ops->change_bss) {
1588 err = -EOPNOTSUPP;
1589 goto out;
1590 }
1591
1592 rtnl_lock();
1593 err = drv->ops->change_bss(&drv->wiphy, dev, &params);
1594 rtnl_unlock();
1595
1596 out:
1597 cfg80211_put_dev(drv);
1598 dev_put(dev);
1599 return err;
1600}
1601
1528static struct genl_ops nl80211_ops[] = { 1602static struct genl_ops nl80211_ops[] = {
1529 { 1603 {
1530 .cmd = NL80211_CMD_GET_WIPHY, 1604 .cmd = NL80211_CMD_GET_WIPHY,
@@ -1656,6 +1730,12 @@ static struct genl_ops nl80211_ops[] = {
1656 .policy = nl80211_policy, 1730 .policy = nl80211_policy,
1657 .flags = GENL_ADMIN_PERM, 1731 .flags = GENL_ADMIN_PERM,
1658 }, 1732 },
1733 {
1734 .cmd = NL80211_CMD_SET_BSS,
1735 .doit = nl80211_set_bss,
1736 .policy = nl80211_policy,
1737 .flags = GENL_ADMIN_PERM,
1738 },
1659}; 1739};
1660 1740
1661/* multicast groups */ 1741/* multicast groups */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b7754b1b73a4..ef9ccbc38752 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -34,7 +34,7 @@
34 34
35#include "xfrm_hash.h" 35#include "xfrm_hash.h"
36 36
37int sysctl_xfrm_larval_drop __read_mostly; 37int sysctl_xfrm_larval_drop __read_mostly = 1;
38 38
39#ifdef CONFIG_XFRM_STATISTICS 39#ifdef CONFIG_XFRM_STATISTICS
40DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly; 40DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly;