diff options
Diffstat (limited to 'net')
84 files changed, 7874 insertions, 5206 deletions
diff --git a/net/Kconfig b/net/Kconfig index 7612cc8c337c..d87de48ba656 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -236,14 +236,18 @@ source "net/rxrpc/Kconfig" | |||
236 | config FIB_RULES | 236 | config FIB_RULES |
237 | bool | 237 | bool |
238 | 238 | ||
239 | menu "Wireless" | 239 | menuconfig WIRELESS |
240 | bool "Wireless" | ||
240 | depends on !S390 | 241 | depends on !S390 |
242 | default y | ||
243 | |||
244 | if WIRELESS | ||
241 | 245 | ||
242 | source "net/wireless/Kconfig" | 246 | source "net/wireless/Kconfig" |
243 | source "net/mac80211/Kconfig" | 247 | source "net/mac80211/Kconfig" |
244 | source "net/ieee80211/Kconfig" | 248 | source "net/ieee80211/Kconfig" |
245 | 249 | ||
246 | endmenu | 250 | endif # WIRELESS |
247 | 251 | ||
248 | source "net/rfkill/Kconfig" | 252 | source "net/rfkill/Kconfig" |
249 | source "net/9p/Kconfig" | 253 | source "net/9p/Kconfig" |
diff --git a/net/bridge/br.c b/net/bridge/br.c index 573acdf6f9ff..4d2c1f1cb524 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c | |||
@@ -28,6 +28,10 @@ static const struct stp_proto br_stp_proto = { | |||
28 | .rcv = br_stp_rcv, | 28 | .rcv = br_stp_rcv, |
29 | }; | 29 | }; |
30 | 30 | ||
31 | static struct pernet_operations br_net_ops = { | ||
32 | .exit = br_net_exit, | ||
33 | }; | ||
34 | |||
31 | static int __init br_init(void) | 35 | static int __init br_init(void) |
32 | { | 36 | { |
33 | int err; | 37 | int err; |
@@ -42,18 +46,22 @@ static int __init br_init(void) | |||
42 | if (err) | 46 | if (err) |
43 | goto err_out; | 47 | goto err_out; |
44 | 48 | ||
45 | err = br_netfilter_init(); | 49 | err = register_pernet_subsys(&br_net_ops); |
46 | if (err) | 50 | if (err) |
47 | goto err_out1; | 51 | goto err_out1; |
48 | 52 | ||
49 | err = register_netdevice_notifier(&br_device_notifier); | 53 | err = br_netfilter_init(); |
50 | if (err) | 54 | if (err) |
51 | goto err_out2; | 55 | goto err_out2; |
52 | 56 | ||
53 | err = br_netlink_init(); | 57 | err = register_netdevice_notifier(&br_device_notifier); |
54 | if (err) | 58 | if (err) |
55 | goto err_out3; | 59 | goto err_out3; |
56 | 60 | ||
61 | err = br_netlink_init(); | ||
62 | if (err) | ||
63 | goto err_out4; | ||
64 | |||
57 | brioctl_set(br_ioctl_deviceless_stub); | 65 | brioctl_set(br_ioctl_deviceless_stub); |
58 | br_handle_frame_hook = br_handle_frame; | 66 | br_handle_frame_hook = br_handle_frame; |
59 | 67 | ||
@@ -61,10 +69,12 @@ static int __init br_init(void) | |||
61 | br_fdb_put_hook = br_fdb_put; | 69 | br_fdb_put_hook = br_fdb_put; |
62 | 70 | ||
63 | return 0; | 71 | return 0; |
64 | err_out3: | 72 | err_out4: |
65 | unregister_netdevice_notifier(&br_device_notifier); | 73 | unregister_netdevice_notifier(&br_device_notifier); |
66 | err_out2: | 74 | err_out3: |
67 | br_netfilter_fini(); | 75 | br_netfilter_fini(); |
76 | err_out2: | ||
77 | unregister_pernet_subsys(&br_net_ops); | ||
68 | err_out1: | 78 | err_out1: |
69 | br_fdb_fini(); | 79 | br_fdb_fini(); |
70 | err_out: | 80 | err_out: |
@@ -80,7 +90,7 @@ static void __exit br_deinit(void) | |||
80 | unregister_netdevice_notifier(&br_device_notifier); | 90 | unregister_netdevice_notifier(&br_device_notifier); |
81 | brioctl_set(NULL); | 91 | brioctl_set(NULL); |
82 | 92 | ||
83 | br_cleanup_bridges(); | 93 | unregister_pernet_subsys(&br_net_ops); |
84 | 94 | ||
85 | synchronize_net(); | 95 | synchronize_net(); |
86 | 96 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 4f52c3d50ebe..22ba8632196f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -178,5 +178,6 @@ void br_dev_setup(struct net_device *dev) | |||
178 | dev->priv_flags = IFF_EBRIDGE; | 178 | dev->priv_flags = IFF_EBRIDGE; |
179 | 179 | ||
180 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | | 180 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | |
181 | NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX; | 181 | NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | |
182 | NETIF_F_NETNS_LOCAL; | ||
182 | } | 183 | } |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 63c18aacde8c..573e20f7dba4 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -168,7 +168,7 @@ static void del_br(struct net_bridge *br) | |||
168 | unregister_netdevice(br->dev); | 168 | unregister_netdevice(br->dev); |
169 | } | 169 | } |
170 | 170 | ||
171 | static struct net_device *new_bridge_dev(const char *name) | 171 | static struct net_device *new_bridge_dev(struct net *net, const char *name) |
172 | { | 172 | { |
173 | struct net_bridge *br; | 173 | struct net_bridge *br; |
174 | struct net_device *dev; | 174 | struct net_device *dev; |
@@ -178,6 +178,7 @@ static struct net_device *new_bridge_dev(const char *name) | |||
178 | 178 | ||
179 | if (!dev) | 179 | if (!dev) |
180 | return NULL; | 180 | return NULL; |
181 | dev_net_set(dev, net); | ||
181 | 182 | ||
182 | br = netdev_priv(dev); | 183 | br = netdev_priv(dev); |
183 | br->dev = dev; | 184 | br->dev = dev; |
@@ -262,12 +263,12 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, | |||
262 | return p; | 263 | return p; |
263 | } | 264 | } |
264 | 265 | ||
265 | int br_add_bridge(const char *name) | 266 | int br_add_bridge(struct net *net, const char *name) |
266 | { | 267 | { |
267 | struct net_device *dev; | 268 | struct net_device *dev; |
268 | int ret; | 269 | int ret; |
269 | 270 | ||
270 | dev = new_bridge_dev(name); | 271 | dev = new_bridge_dev(net, name); |
271 | if (!dev) | 272 | if (!dev) |
272 | return -ENOMEM; | 273 | return -ENOMEM; |
273 | 274 | ||
@@ -294,13 +295,13 @@ out_free: | |||
294 | goto out; | 295 | goto out; |
295 | } | 296 | } |
296 | 297 | ||
297 | int br_del_bridge(const char *name) | 298 | int br_del_bridge(struct net *net, const char *name) |
298 | { | 299 | { |
299 | struct net_device *dev; | 300 | struct net_device *dev; |
300 | int ret = 0; | 301 | int ret = 0; |
301 | 302 | ||
302 | rtnl_lock(); | 303 | rtnl_lock(); |
303 | dev = __dev_get_by_name(&init_net, name); | 304 | dev = __dev_get_by_name(net, name); |
304 | if (dev == NULL) | 305 | if (dev == NULL) |
305 | ret = -ENXIO; /* Could not find device */ | 306 | ret = -ENXIO; /* Could not find device */ |
306 | 307 | ||
@@ -445,13 +446,13 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) | |||
445 | return 0; | 446 | return 0; |
446 | } | 447 | } |
447 | 448 | ||
448 | void __exit br_cleanup_bridges(void) | 449 | void br_net_exit(struct net *net) |
449 | { | 450 | { |
450 | struct net_device *dev; | 451 | struct net_device *dev; |
451 | 452 | ||
452 | rtnl_lock(); | 453 | rtnl_lock(); |
453 | restart: | 454 | restart: |
454 | for_each_netdev(&init_net, dev) { | 455 | for_each_netdev(net, dev) { |
455 | if (dev->priv_flags & IFF_EBRIDGE) { | 456 | if (dev->priv_flags & IFF_EBRIDGE) { |
456 | del_br(dev->priv); | 457 | del_br(dev->priv); |
457 | goto restart; | 458 | goto restart; |
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 5bbf07362172..6a6433daaf27 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c | |||
@@ -21,12 +21,12 @@ | |||
21 | #include "br_private.h" | 21 | #include "br_private.h" |
22 | 22 | ||
23 | /* called with RTNL */ | 23 | /* called with RTNL */ |
24 | static int get_bridge_ifindices(int *indices, int num) | 24 | static int get_bridge_ifindices(struct net *net, int *indices, int num) |
25 | { | 25 | { |
26 | struct net_device *dev; | 26 | struct net_device *dev; |
27 | int i = 0; | 27 | int i = 0; |
28 | 28 | ||
29 | for_each_netdev(&init_net, dev) { | 29 | for_each_netdev(net, dev) { |
30 | if (i >= num) | 30 | if (i >= num) |
31 | break; | 31 | break; |
32 | if (dev->priv_flags & IFF_EBRIDGE) | 32 | if (dev->priv_flags & IFF_EBRIDGE) |
@@ -89,7 +89,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd) | |||
89 | if (!capable(CAP_NET_ADMIN)) | 89 | if (!capable(CAP_NET_ADMIN)) |
90 | return -EPERM; | 90 | return -EPERM; |
91 | 91 | ||
92 | dev = dev_get_by_index(&init_net, ifindex); | 92 | dev = dev_get_by_index(dev_net(br->dev), ifindex); |
93 | if (dev == NULL) | 93 | if (dev == NULL) |
94 | return -EINVAL; | 94 | return -EINVAL; |
95 | 95 | ||
@@ -315,7 +315,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
315 | return -EOPNOTSUPP; | 315 | return -EOPNOTSUPP; |
316 | } | 316 | } |
317 | 317 | ||
318 | static int old_deviceless(void __user *uarg) | 318 | static int old_deviceless(struct net *net, void __user *uarg) |
319 | { | 319 | { |
320 | unsigned long args[3]; | 320 | unsigned long args[3]; |
321 | 321 | ||
@@ -337,7 +337,7 @@ static int old_deviceless(void __user *uarg) | |||
337 | if (indices == NULL) | 337 | if (indices == NULL) |
338 | return -ENOMEM; | 338 | return -ENOMEM; |
339 | 339 | ||
340 | args[2] = get_bridge_ifindices(indices, args[2]); | 340 | args[2] = get_bridge_ifindices(net, indices, args[2]); |
341 | 341 | ||
342 | ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) | 342 | ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) |
343 | ? -EFAULT : args[2]; | 343 | ? -EFAULT : args[2]; |
@@ -360,9 +360,9 @@ static int old_deviceless(void __user *uarg) | |||
360 | buf[IFNAMSIZ-1] = 0; | 360 | buf[IFNAMSIZ-1] = 0; |
361 | 361 | ||
362 | if (args[0] == BRCTL_ADD_BRIDGE) | 362 | if (args[0] == BRCTL_ADD_BRIDGE) |
363 | return br_add_bridge(buf); | 363 | return br_add_bridge(net, buf); |
364 | 364 | ||
365 | return br_del_bridge(buf); | 365 | return br_del_bridge(net, buf); |
366 | } | 366 | } |
367 | } | 367 | } |
368 | 368 | ||
@@ -374,7 +374,7 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar | |||
374 | switch (cmd) { | 374 | switch (cmd) { |
375 | case SIOCGIFBR: | 375 | case SIOCGIFBR: |
376 | case SIOCSIFBR: | 376 | case SIOCSIFBR: |
377 | return old_deviceless(uarg); | 377 | return old_deviceless(net, uarg); |
378 | 378 | ||
379 | case SIOCBRADDBR: | 379 | case SIOCBRADDBR: |
380 | case SIOCBRDELBR: | 380 | case SIOCBRDELBR: |
@@ -389,9 +389,9 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar | |||
389 | 389 | ||
390 | buf[IFNAMSIZ-1] = 0; | 390 | buf[IFNAMSIZ-1] = 0; |
391 | if (cmd == SIOCBRADDBR) | 391 | if (cmd == SIOCBRADDBR) |
392 | return br_add_bridge(buf); | 392 | return br_add_bridge(net, buf); |
393 | 393 | ||
394 | return br_del_bridge(buf); | 394 | return br_del_bridge(net, buf); |
395 | } | 395 | } |
396 | } | 396 | } |
397 | return -EOPNOTSUPP; | 397 | return -EOPNOTSUPP; |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index f155e6ce8a21..ba7be195803c 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -82,6 +82,7 @@ nla_put_failure: | |||
82 | */ | 82 | */ |
83 | void br_ifinfo_notify(int event, struct net_bridge_port *port) | 83 | void br_ifinfo_notify(int event, struct net_bridge_port *port) |
84 | { | 84 | { |
85 | struct net *net = dev_net(port->dev); | ||
85 | struct sk_buff *skb; | 86 | struct sk_buff *skb; |
86 | int err = -ENOBUFS; | 87 | int err = -ENOBUFS; |
87 | 88 | ||
@@ -97,10 +98,10 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) | |||
97 | kfree_skb(skb); | 98 | kfree_skb(skb); |
98 | goto errout; | 99 | goto errout; |
99 | } | 100 | } |
100 | err = rtnl_notify(skb, &init_net,0, RTNLGRP_LINK, NULL, GFP_ATOMIC); | 101 | err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); |
101 | errout: | 102 | errout: |
102 | if (err < 0) | 103 | if (err < 0) |
103 | rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err); | 104 | rtnl_set_sk_err(net, RTNLGRP_LINK, err); |
104 | } | 105 | } |
105 | 106 | ||
106 | /* | 107 | /* |
@@ -112,11 +113,8 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
112 | struct net_device *dev; | 113 | struct net_device *dev; |
113 | int idx; | 114 | int idx; |
114 | 115 | ||
115 | if (net != &init_net) | ||
116 | return 0; | ||
117 | |||
118 | idx = 0; | 116 | idx = 0; |
119 | for_each_netdev(&init_net, dev) { | 117 | for_each_netdev(net, dev) { |
120 | /* not a bridge port */ | 118 | /* not a bridge port */ |
121 | if (dev->br_port == NULL || idx < cb->args[0]) | 119 | if (dev->br_port == NULL || idx < cb->args[0]) |
122 | goto skip; | 120 | goto skip; |
@@ -147,9 +145,6 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
147 | struct net_bridge_port *p; | 145 | struct net_bridge_port *p; |
148 | u8 new_state; | 146 | u8 new_state; |
149 | 147 | ||
150 | if (net != &init_net) | ||
151 | return -EINVAL; | ||
152 | |||
153 | if (nlmsg_len(nlh) < sizeof(*ifm)) | 148 | if (nlmsg_len(nlh) < sizeof(*ifm)) |
154 | return -EINVAL; | 149 | return -EINVAL; |
155 | 150 | ||
@@ -165,7 +160,7 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
165 | if (new_state > BR_STATE_BLOCKING) | 160 | if (new_state > BR_STATE_BLOCKING) |
166 | return -EINVAL; | 161 | return -EINVAL; |
167 | 162 | ||
168 | dev = __dev_get_by_index(&init_net, ifm->ifi_index); | 163 | dev = __dev_get_by_index(net, ifm->ifi_index); |
169 | if (!dev) | 164 | if (!dev) |
170 | return -ENODEV; | 165 | return -ENODEV; |
171 | 166 | ||
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 76340bdd052e..763a3ec292e5 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c | |||
@@ -35,9 +35,6 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
35 | struct net_bridge_port *p = dev->br_port; | 35 | struct net_bridge_port *p = dev->br_port; |
36 | struct net_bridge *br; | 36 | struct net_bridge *br; |
37 | 37 | ||
38 | if (!net_eq(dev_net(dev), &init_net)) | ||
39 | return NOTIFY_DONE; | ||
40 | |||
41 | /* not a port of a bridge */ | 38 | /* not a port of a bridge */ |
42 | if (p == NULL) | 39 | if (p == NULL) |
43 | return NOTIFY_DONE; | 40 | return NOTIFY_DONE; |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c3dc18ddc043..b6c3b71974dc 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -178,9 +178,9 @@ extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb); | |||
178 | 178 | ||
179 | /* br_if.c */ | 179 | /* br_if.c */ |
180 | extern void br_port_carrier_check(struct net_bridge_port *p); | 180 | extern void br_port_carrier_check(struct net_bridge_port *p); |
181 | extern int br_add_bridge(const char *name); | 181 | extern int br_add_bridge(struct net *net, const char *name); |
182 | extern int br_del_bridge(const char *name); | 182 | extern int br_del_bridge(struct net *net, const char *name); |
183 | extern void br_cleanup_bridges(void); | 183 | extern void br_net_exit(struct net *net); |
184 | extern int br_add_if(struct net_bridge *br, | 184 | extern int br_add_if(struct net_bridge *br, |
185 | struct net_device *dev); | 185 | struct net_device *dev); |
186 | extern int br_del_if(struct net_bridge *br, | 186 | extern int br_del_if(struct net_bridge *br, |
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 8b200f96f722..81ae40b3f655 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c | |||
@@ -140,9 +140,6 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, | |||
140 | struct net_bridge *br; | 140 | struct net_bridge *br; |
141 | const unsigned char *buf; | 141 | const unsigned char *buf; |
142 | 142 | ||
143 | if (!net_eq(dev_net(dev), &init_net)) | ||
144 | goto err; | ||
145 | |||
146 | if (!p) | 143 | if (!p) |
147 | goto err; | 144 | goto err; |
148 | 145 | ||
diff --git a/net/core/Makefile b/net/core/Makefile index b1332f6d0042..26a37cb31923 100644 --- a/net/core/Makefile +++ b/net/core/Makefile | |||
@@ -6,6 +6,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ | |||
6 | gen_stats.o gen_estimator.o net_namespace.o | 6 | gen_stats.o gen_estimator.o net_namespace.o |
7 | 7 | ||
8 | obj-$(CONFIG_SYSCTL) += sysctl_net_core.o | 8 | obj-$(CONFIG_SYSCTL) += sysctl_net_core.o |
9 | obj-$(CONFIG_HAS_DMA) += skb_dma_map.o | ||
9 | 10 | ||
10 | obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ | 11 | obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ |
11 | neighbour.o rtnetlink.o utils.o link_watch.o filter.o | 12 | neighbour.o rtnetlink.o utils.o link_watch.o filter.o |
diff --git a/net/core/dev.c b/net/core/dev.c index e719ed29310f..f48d1b24f9ce 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4668,6 +4668,12 @@ int netdev_compute_features(unsigned long all, unsigned long one) | |||
4668 | one |= NETIF_F_GSO_SOFTWARE; | 4668 | one |= NETIF_F_GSO_SOFTWARE; |
4669 | one |= NETIF_F_GSO; | 4669 | one |= NETIF_F_GSO; |
4670 | 4670 | ||
4671 | /* | ||
4672 | * If even one device supports a GSO protocol with software fallback, | ||
4673 | * enable it for all. | ||
4674 | */ | ||
4675 | all |= one & NETIF_F_GSO_SOFTWARE; | ||
4676 | |||
4671 | /* If even one device supports robust GSO, enable it for all. */ | 4677 | /* If even one device supports robust GSO, enable it for all. */ |
4672 | if (one & NETIF_F_GSO_ROBUST) | 4678 | if (one & NETIF_F_GSO_ROBUST) |
4673 | all |= NETIF_F_GSO_ROBUST; | 4679 | all |= NETIF_F_GSO_ROBUST; |
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c new file mode 100644 index 000000000000..1f49afcd8e86 --- /dev/null +++ b/net/core/skb_dma_map.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* skb_dma_map.c: DMA mapping helpers for socket buffers. | ||
2 | * | ||
3 | * Copyright (C) David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/dma-mapping.h> | ||
9 | #include <linux/skbuff.h> | ||
10 | |||
11 | int skb_dma_map(struct device *dev, struct sk_buff *skb, | ||
12 | enum dma_data_direction dir) | ||
13 | { | ||
14 | struct skb_shared_info *sp = skb_shinfo(skb); | ||
15 | dma_addr_t map; | ||
16 | int i; | ||
17 | |||
18 | map = dma_map_single(dev, skb->data, | ||
19 | skb_headlen(skb), dir); | ||
20 | if (dma_mapping_error(dev, map)) | ||
21 | goto out_err; | ||
22 | |||
23 | sp->dma_maps[0] = map; | ||
24 | for (i = 0; i < sp->nr_frags; i++) { | ||
25 | skb_frag_t *fp = &sp->frags[i]; | ||
26 | |||
27 | map = dma_map_page(dev, fp->page, fp->page_offset, | ||
28 | fp->size, dir); | ||
29 | if (dma_mapping_error(dev, map)) | ||
30 | goto unwind; | ||
31 | sp->dma_maps[i + 1] = map; | ||
32 | } | ||
33 | sp->num_dma_maps = i + 1; | ||
34 | |||
35 | return 0; | ||
36 | |||
37 | unwind: | ||
38 | while (i-- >= 0) { | ||
39 | skb_frag_t *fp = &sp->frags[i]; | ||
40 | |||
41 | dma_unmap_page(dev, sp->dma_maps[i + 1], | ||
42 | fp->size, dir); | ||
43 | } | ||
44 | dma_unmap_single(dev, sp->dma_maps[0], | ||
45 | skb_headlen(skb), dir); | ||
46 | out_err: | ||
47 | return -ENOMEM; | ||
48 | } | ||
49 | EXPORT_SYMBOL(skb_dma_map); | ||
50 | |||
51 | void skb_dma_unmap(struct device *dev, struct sk_buff *skb, | ||
52 | enum dma_data_direction dir) | ||
53 | { | ||
54 | struct skb_shared_info *sp = skb_shinfo(skb); | ||
55 | int i; | ||
56 | |||
57 | dma_unmap_single(dev, sp->dma_maps[0], | ||
58 | skb_headlen(skb), dir); | ||
59 | for (i = 0; i < sp->nr_frags; i++) { | ||
60 | skb_frag_t *fp = &sp->frags[i]; | ||
61 | |||
62 | dma_unmap_page(dev, sp->dma_maps[i + 1], | ||
63 | fp->size, dir); | ||
64 | } | ||
65 | } | ||
66 | EXPORT_SYMBOL(skb_dma_unmap); | ||
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 8e9580874216..9a430734530c 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -783,7 +783,7 @@ static struct ccid_operations ccid2 = { | |||
783 | }; | 783 | }; |
784 | 784 | ||
785 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 785 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
786 | module_param(ccid2_debug, bool, 0444); | 786 | module_param(ccid2_debug, bool, 0644); |
787 | MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); | 787 | MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); |
788 | #endif | 788 | #endif |
789 | 789 | ||
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index f6756e0c9e69..3b8bd7ca6761 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -963,7 +963,7 @@ static struct ccid_operations ccid3 = { | |||
963 | }; | 963 | }; |
964 | 964 | ||
965 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG | 965 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
966 | module_param(ccid3_debug, bool, 0444); | 966 | module_param(ccid3_debug, bool, 0644); |
967 | MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); | 967 | MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); |
968 | #endif | 968 | #endif |
969 | 969 | ||
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index bcd6ac415bb9..5b3ce0688c5c 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c | |||
@@ -67,7 +67,10 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) | |||
67 | u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0; | 67 | u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0; |
68 | int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */ | 68 | int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */ |
69 | 69 | ||
70 | for (i=0; i <= k; i++) { | 70 | if (k <= 0) |
71 | return; | ||
72 | |||
73 | for (i = 0; i <= k; i++) { | ||
71 | i_i = tfrc_lh_get_interval(lh, i); | 74 | i_i = tfrc_lh_get_interval(lh, i); |
72 | 75 | ||
73 | if (i < k) { | 76 | if (i < k) { |
@@ -78,7 +81,6 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) | |||
78 | i_tot1 += i_i * tfrc_lh_weights[i-1]; | 81 | i_tot1 += i_i * tfrc_lh_weights[i-1]; |
79 | } | 82 | } |
80 | 83 | ||
81 | BUG_ON(w_tot == 0); | ||
82 | lh->i_mean = max(i_tot0, i_tot1) / w_tot; | 84 | lh->i_mean = max(i_tot0, i_tot1) / w_tot; |
83 | } | 85 | } |
84 | 86 | ||
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c index 97ecec0a8e76..185916218e07 100644 --- a/net/dccp/ccids/lib/tfrc.c +++ b/net/dccp/ccids/lib/tfrc.c | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG | 11 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG |
12 | int tfrc_debug; | 12 | int tfrc_debug; |
13 | module_param(tfrc_debug, bool, 0444); | 13 | module_param(tfrc_debug, bool, 0644); |
14 | MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); | 14 | MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); |
15 | #endif | 15 | #endif |
16 | 16 | ||
diff --git a/net/dccp/input.c b/net/dccp/input.c index 803933ab396d..779d0ed9ae94 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -370,7 +370,7 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
370 | goto discard; | 370 | goto discard; |
371 | 371 | ||
372 | if (dccp_parse_options(sk, NULL, skb)) | 372 | if (dccp_parse_options(sk, NULL, skb)) |
373 | goto discard; | 373 | return 1; |
374 | 374 | ||
375 | if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) | 375 | if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) |
376 | dccp_event_ack_recv(sk, skb); | 376 | dccp_event_ack_recv(sk, skb); |
@@ -610,7 +610,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
610 | * Step 8: Process options and mark acknowledgeable | 610 | * Step 8: Process options and mark acknowledgeable |
611 | */ | 611 | */ |
612 | if (dccp_parse_options(sk, NULL, skb)) | 612 | if (dccp_parse_options(sk, NULL, skb)) |
613 | goto discard; | 613 | return 1; |
614 | 614 | ||
615 | if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) | 615 | if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) |
616 | dccp_event_ack_recv(sk, skb); | 616 | dccp_event_ack_recv(sk, skb); |
diff --git a/net/dccp/options.c b/net/dccp/options.c index dc7c158a2f4b..0809b63cb055 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -81,11 +81,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, | |||
81 | /* Check if this isn't a single byte option */ | 81 | /* Check if this isn't a single byte option */ |
82 | if (opt > DCCPO_MAX_RESERVED) { | 82 | if (opt > DCCPO_MAX_RESERVED) { |
83 | if (opt_ptr == opt_end) | 83 | if (opt_ptr == opt_end) |
84 | goto out_invalid_option; | 84 | goto out_nonsensical_length; |
85 | 85 | ||
86 | len = *opt_ptr++; | 86 | len = *opt_ptr++; |
87 | if (len < 3) | 87 | if (len < 2) |
88 | goto out_invalid_option; | 88 | goto out_nonsensical_length; |
89 | /* | 89 | /* |
90 | * Remove the type and len fields, leaving | 90 | * Remove the type and len fields, leaving |
91 | * just the value size | 91 | * just the value size |
@@ -95,7 +95,7 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, | |||
95 | opt_ptr += len; | 95 | opt_ptr += len; |
96 | 96 | ||
97 | if (opt_ptr > opt_end) | 97 | if (opt_ptr > opt_end) |
98 | goto out_invalid_option; | 98 | goto out_nonsensical_length; |
99 | } | 99 | } |
100 | 100 | ||
101 | /* | 101 | /* |
@@ -283,12 +283,17 @@ ignore_option: | |||
283 | if (mandatory) | 283 | if (mandatory) |
284 | goto out_invalid_option; | 284 | goto out_invalid_option; |
285 | 285 | ||
286 | out_nonsensical_length: | ||
287 | /* RFC 4340, 5.8: ignore option and all remaining option space */ | ||
286 | return 0; | 288 | return 0; |
287 | 289 | ||
288 | out_invalid_option: | 290 | out_invalid_option: |
289 | DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); | 291 | DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); |
290 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; | 292 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; |
291 | DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len); | 293 | DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len); |
294 | DCCP_SKB_CB(skb)->dccpd_reset_data[0] = opt; | ||
295 | DCCP_SKB_CB(skb)->dccpd_reset_data[1] = len > 0 ? value[0] : 0; | ||
296 | DCCP_SKB_CB(skb)->dccpd_reset_data[2] = len > 1 ? value[1] : 0; | ||
292 | return -1; | 297 | return -1; |
293 | } | 298 | } |
294 | 299 | ||
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 1ca3b26eed0f..d0bd34819761 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -309,7 +309,9 @@ int dccp_disconnect(struct sock *sk, int flags) | |||
309 | sk->sk_err = ECONNRESET; | 309 | sk->sk_err = ECONNRESET; |
310 | 310 | ||
311 | dccp_clear_xmit_timers(sk); | 311 | dccp_clear_xmit_timers(sk); |
312 | |||
312 | __skb_queue_purge(&sk->sk_receive_queue); | 313 | __skb_queue_purge(&sk->sk_receive_queue); |
314 | __skb_queue_purge(&sk->sk_write_queue); | ||
313 | if (sk->sk_send_head != NULL) { | 315 | if (sk->sk_send_head != NULL) { |
314 | __kfree_skb(sk->sk_send_head); | 316 | __kfree_skb(sk->sk_send_head); |
315 | sk->sk_send_head = NULL; | 317 | sk->sk_send_head = NULL; |
@@ -1028,7 +1030,7 @@ MODULE_PARM_DESC(thash_entries, "Number of ehash buckets"); | |||
1028 | 1030 | ||
1029 | #ifdef CONFIG_IP_DCCP_DEBUG | 1031 | #ifdef CONFIG_IP_DCCP_DEBUG |
1030 | int dccp_debug; | 1032 | int dccp_debug; |
1031 | module_param(dccp_debug, bool, 0444); | 1033 | module_param(dccp_debug, bool, 0644); |
1032 | MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); | 1034 | MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); |
1033 | 1035 | ||
1034 | EXPORT_SYMBOL_GPL(dccp_debug); | 1036 | EXPORT_SYMBOL_GPL(dccp_debug); |
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 3bca97f55d47..949772a5a7dc 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c | |||
@@ -157,7 +157,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv) | |||
157 | err = ieee80211_networks_allocate(ieee); | 157 | err = ieee80211_networks_allocate(ieee); |
158 | if (err) { | 158 | if (err) { |
159 | IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err); | 159 | IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err); |
160 | goto failed; | 160 | goto failed_free_netdev; |
161 | } | 161 | } |
162 | ieee80211_networks_initialize(ieee); | 162 | ieee80211_networks_initialize(ieee); |
163 | 163 | ||
@@ -193,9 +193,9 @@ struct net_device *alloc_ieee80211(int sizeof_priv) | |||
193 | 193 | ||
194 | return dev; | 194 | return dev; |
195 | 195 | ||
196 | failed: | 196 | failed_free_netdev: |
197 | if (dev) | 197 | free_netdev(dev); |
198 | free_netdev(dev); | 198 | failed: |
199 | return NULL; | 199 | return NULL; |
200 | } | 200 | } |
201 | 201 | ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index c10036e7a463..89cb047ab314 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -782,11 +782,15 @@ skip_listen_ht: | |||
782 | struct sock *sk; | 782 | struct sock *sk; |
783 | struct hlist_node *node; | 783 | struct hlist_node *node; |
784 | 784 | ||
785 | num = 0; | ||
786 | |||
787 | if (hlist_empty(&head->chain) && hlist_empty(&head->twchain)) | ||
788 | continue; | ||
789 | |||
785 | if (i > s_i) | 790 | if (i > s_i) |
786 | s_num = 0; | 791 | s_num = 0; |
787 | 792 | ||
788 | read_lock_bh(lock); | 793 | read_lock_bh(lock); |
789 | num = 0; | ||
790 | sk_for_each(sk, node, &head->chain) { | 794 | sk_for_each(sk, node, &head->chain) { |
791 | struct inet_sock *inet = inet_sk(sk); | 795 | struct inet_sock *inet = inet_sk(sk); |
792 | 796 | ||
diff --git a/net/ipv4/ipvs/Kconfig b/net/ipv4/ipvs/Kconfig index 09d0c3f35669..de6004de80bc 100644 --- a/net/ipv4/ipvs/Kconfig +++ b/net/ipv4/ipvs/Kconfig | |||
@@ -24,6 +24,14 @@ menuconfig IP_VS | |||
24 | 24 | ||
25 | if IP_VS | 25 | if IP_VS |
26 | 26 | ||
27 | config IP_VS_IPV6 | ||
28 | bool "IPv6 support for IPVS (DANGEROUS)" | ||
29 | depends on EXPERIMENTAL && (IPV6 = y || IP_VS = IPV6) | ||
30 | ---help--- | ||
31 | Add IPv6 support to IPVS. This is incomplete and might be dangerous. | ||
32 | |||
33 | Say N if unsure. | ||
34 | |||
27 | config IP_VS_DEBUG | 35 | config IP_VS_DEBUG |
28 | bool "IP virtual server debugging" | 36 | bool "IP virtual server debugging" |
29 | ---help--- | 37 | ---help--- |
@@ -33,7 +41,8 @@ config IP_VS_DEBUG | |||
33 | 41 | ||
34 | config IP_VS_TAB_BITS | 42 | config IP_VS_TAB_BITS |
35 | int "IPVS connection table size (the Nth power of 2)" | 43 | int "IPVS connection table size (the Nth power of 2)" |
36 | default "12" | 44 | range 8 20 |
45 | default 12 | ||
37 | ---help--- | 46 | ---help--- |
38 | The IPVS connection hash table uses the chaining scheme to handle | 47 | The IPVS connection hash table uses the chaining scheme to handle |
39 | hash collisions. Using a big IPVS connection hash table will greatly | 48 | hash collisions. Using a big IPVS connection hash table will greatly |
@@ -71,14 +80,20 @@ config IP_VS_PROTO_UDP | |||
71 | This option enables support for load balancing UDP transport | 80 | This option enables support for load balancing UDP transport |
72 | protocol. Say Y if unsure. | 81 | protocol. Say Y if unsure. |
73 | 82 | ||
83 | config IP_VS_PROTO_AH_ESP | ||
84 | bool | ||
85 | depends on UNDEFINED | ||
86 | |||
74 | config IP_VS_PROTO_ESP | 87 | config IP_VS_PROTO_ESP |
75 | bool "ESP load balancing support" | 88 | bool "ESP load balancing support" |
89 | select IP_VS_PROTO_AH_ESP | ||
76 | ---help--- | 90 | ---help--- |
77 | This option enables support for load balancing ESP (Encapsulation | 91 | This option enables support for load balancing ESP (Encapsulation |
78 | Security Payload) transport protocol. Say Y if unsure. | 92 | Security Payload) transport protocol. Say Y if unsure. |
79 | 93 | ||
80 | config IP_VS_PROTO_AH | 94 | config IP_VS_PROTO_AH |
81 | bool "AH load balancing support" | 95 | bool "AH load balancing support" |
96 | select IP_VS_PROTO_AH_ESP | ||
82 | ---help--- | 97 | ---help--- |
83 | This option enables support for load balancing AH (Authentication | 98 | This option enables support for load balancing AH (Authentication |
84 | Header) transport protocol. Say Y if unsure. | 99 | Header) transport protocol. Say Y if unsure. |
diff --git a/net/ipv4/ipvs/Makefile b/net/ipv4/ipvs/Makefile index 30e85de9ffff..73a46fe1fe4c 100644 --- a/net/ipv4/ipvs/Makefile +++ b/net/ipv4/ipvs/Makefile | |||
@@ -6,8 +6,7 @@ | |||
6 | ip_vs_proto-objs-y := | 6 | ip_vs_proto-objs-y := |
7 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o | 7 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o |
8 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o | 8 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o |
9 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_ESP) += ip_vs_proto_esp.o | 9 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o |
10 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH) += ip_vs_proto_ah.o | ||
11 | 10 | ||
12 | ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ | 11 | ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ |
13 | ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ | 12 | ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ |
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index 44a6872dc245..9a24332fbed8 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c | |||
@@ -114,9 +114,18 @@ static inline void ct_write_unlock_bh(unsigned key) | |||
114 | /* | 114 | /* |
115 | * Returns hash value for IPVS connection entry | 115 | * Returns hash value for IPVS connection entry |
116 | */ | 116 | */ |
117 | static unsigned int ip_vs_conn_hashkey(unsigned proto, __be32 addr, __be16 port) | 117 | static unsigned int ip_vs_conn_hashkey(int af, unsigned proto, |
118 | const union nf_inet_addr *addr, | ||
119 | __be16 port) | ||
118 | { | 120 | { |
119 | return jhash_3words((__force u32)addr, (__force u32)port, proto, ip_vs_conn_rnd) | 121 | #ifdef CONFIG_IP_VS_IPV6 |
122 | if (af == AF_INET6) | ||
123 | return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd), | ||
124 | (__force u32)port, proto, ip_vs_conn_rnd) | ||
125 | & IP_VS_CONN_TAB_MASK; | ||
126 | #endif | ||
127 | return jhash_3words((__force u32)addr->ip, (__force u32)port, proto, | ||
128 | ip_vs_conn_rnd) | ||
120 | & IP_VS_CONN_TAB_MASK; | 129 | & IP_VS_CONN_TAB_MASK; |
121 | } | 130 | } |
122 | 131 | ||
@@ -131,7 +140,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) | |||
131 | int ret; | 140 | int ret; |
132 | 141 | ||
133 | /* Hash by protocol, client address and port */ | 142 | /* Hash by protocol, client address and port */ |
134 | hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport); | 143 | hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); |
135 | 144 | ||
136 | ct_write_lock(hash); | 145 | ct_write_lock(hash); |
137 | 146 | ||
@@ -162,7 +171,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) | |||
162 | int ret; | 171 | int ret; |
163 | 172 | ||
164 | /* unhash it and decrease its reference counter */ | 173 | /* unhash it and decrease its reference counter */ |
165 | hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport); | 174 | hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); |
166 | 175 | ||
167 | ct_write_lock(hash); | 176 | ct_write_lock(hash); |
168 | 177 | ||
@@ -187,20 +196,23 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) | |||
187 | * d_addr, d_port: pkt dest address (load balancer) | 196 | * d_addr, d_port: pkt dest address (load balancer) |
188 | */ | 197 | */ |
189 | static inline struct ip_vs_conn *__ip_vs_conn_in_get | 198 | static inline struct ip_vs_conn *__ip_vs_conn_in_get |
190 | (int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) | 199 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, |
200 | const union nf_inet_addr *d_addr, __be16 d_port) | ||
191 | { | 201 | { |
192 | unsigned hash; | 202 | unsigned hash; |
193 | struct ip_vs_conn *cp; | 203 | struct ip_vs_conn *cp; |
194 | 204 | ||
195 | hash = ip_vs_conn_hashkey(protocol, s_addr, s_port); | 205 | hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port); |
196 | 206 | ||
197 | ct_read_lock(hash); | 207 | ct_read_lock(hash); |
198 | 208 | ||
199 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { | 209 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
200 | if (s_addr==cp->caddr && s_port==cp->cport && | 210 | if (cp->af == af && |
201 | d_port==cp->vport && d_addr==cp->vaddr && | 211 | ip_vs_addr_equal(af, s_addr, &cp->caddr) && |
212 | ip_vs_addr_equal(af, d_addr, &cp->vaddr) && | ||
213 | s_port == cp->cport && d_port == cp->vport && | ||
202 | ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && | 214 | ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && |
203 | protocol==cp->protocol) { | 215 | protocol == cp->protocol) { |
204 | /* HIT */ | 216 | /* HIT */ |
205 | atomic_inc(&cp->refcnt); | 217 | atomic_inc(&cp->refcnt); |
206 | ct_read_unlock(hash); | 218 | ct_read_unlock(hash); |
@@ -214,39 +226,44 @@ static inline struct ip_vs_conn *__ip_vs_conn_in_get | |||
214 | } | 226 | } |
215 | 227 | ||
216 | struct ip_vs_conn *ip_vs_conn_in_get | 228 | struct ip_vs_conn *ip_vs_conn_in_get |
217 | (int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) | 229 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, |
230 | const union nf_inet_addr *d_addr, __be16 d_port) | ||
218 | { | 231 | { |
219 | struct ip_vs_conn *cp; | 232 | struct ip_vs_conn *cp; |
220 | 233 | ||
221 | cp = __ip_vs_conn_in_get(protocol, s_addr, s_port, d_addr, d_port); | 234 | cp = __ip_vs_conn_in_get(af, protocol, s_addr, s_port, d_addr, d_port); |
222 | if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) | 235 | if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) |
223 | cp = __ip_vs_conn_in_get(protocol, s_addr, 0, d_addr, d_port); | 236 | cp = __ip_vs_conn_in_get(af, protocol, s_addr, 0, d_addr, |
237 | d_port); | ||
224 | 238 | ||
225 | IP_VS_DBG(9, "lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", | 239 | IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n", |
226 | ip_vs_proto_name(protocol), | 240 | ip_vs_proto_name(protocol), |
227 | NIPQUAD(s_addr), ntohs(s_port), | 241 | IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port), |
228 | NIPQUAD(d_addr), ntohs(d_port), | 242 | IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port), |
229 | cp?"hit":"not hit"); | 243 | cp ? "hit" : "not hit"); |
230 | 244 | ||
231 | return cp; | 245 | return cp; |
232 | } | 246 | } |
233 | 247 | ||
234 | /* Get reference to connection template */ | 248 | /* Get reference to connection template */ |
235 | struct ip_vs_conn *ip_vs_ct_in_get | 249 | struct ip_vs_conn *ip_vs_ct_in_get |
236 | (int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) | 250 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, |
251 | const union nf_inet_addr *d_addr, __be16 d_port) | ||
237 | { | 252 | { |
238 | unsigned hash; | 253 | unsigned hash; |
239 | struct ip_vs_conn *cp; | 254 | struct ip_vs_conn *cp; |
240 | 255 | ||
241 | hash = ip_vs_conn_hashkey(protocol, s_addr, s_port); | 256 | hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port); |
242 | 257 | ||
243 | ct_read_lock(hash); | 258 | ct_read_lock(hash); |
244 | 259 | ||
245 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { | 260 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
246 | if (s_addr==cp->caddr && s_port==cp->cport && | 261 | if (cp->af == af && |
247 | d_port==cp->vport && d_addr==cp->vaddr && | 262 | ip_vs_addr_equal(af, s_addr, &cp->caddr) && |
263 | ip_vs_addr_equal(af, d_addr, &cp->vaddr) && | ||
264 | s_port == cp->cport && d_port == cp->vport && | ||
248 | cp->flags & IP_VS_CONN_F_TEMPLATE && | 265 | cp->flags & IP_VS_CONN_F_TEMPLATE && |
249 | protocol==cp->protocol) { | 266 | protocol == cp->protocol) { |
250 | /* HIT */ | 267 | /* HIT */ |
251 | atomic_inc(&cp->refcnt); | 268 | atomic_inc(&cp->refcnt); |
252 | goto out; | 269 | goto out; |
@@ -257,11 +274,11 @@ struct ip_vs_conn *ip_vs_ct_in_get | |||
257 | out: | 274 | out: |
258 | ct_read_unlock(hash); | 275 | ct_read_unlock(hash); |
259 | 276 | ||
260 | IP_VS_DBG(9, "template lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", | 277 | IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n", |
261 | ip_vs_proto_name(protocol), | 278 | ip_vs_proto_name(protocol), |
262 | NIPQUAD(s_addr), ntohs(s_port), | 279 | IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port), |
263 | NIPQUAD(d_addr), ntohs(d_port), | 280 | IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port), |
264 | cp?"hit":"not hit"); | 281 | cp ? "hit" : "not hit"); |
265 | 282 | ||
266 | return cp; | 283 | return cp; |
267 | } | 284 | } |
@@ -273,7 +290,8 @@ struct ip_vs_conn *ip_vs_ct_in_get | |||
273 | * d_addr, d_port: pkt dest address (foreign host) | 290 | * d_addr, d_port: pkt dest address (foreign host) |
274 | */ | 291 | */ |
275 | struct ip_vs_conn *ip_vs_conn_out_get | 292 | struct ip_vs_conn *ip_vs_conn_out_get |
276 | (int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) | 293 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, |
294 | const union nf_inet_addr *d_addr, __be16 d_port) | ||
277 | { | 295 | { |
278 | unsigned hash; | 296 | unsigned hash; |
279 | struct ip_vs_conn *cp, *ret=NULL; | 297 | struct ip_vs_conn *cp, *ret=NULL; |
@@ -281,13 +299,15 @@ struct ip_vs_conn *ip_vs_conn_out_get | |||
281 | /* | 299 | /* |
282 | * Check for "full" addressed entries | 300 | * Check for "full" addressed entries |
283 | */ | 301 | */ |
284 | hash = ip_vs_conn_hashkey(protocol, d_addr, d_port); | 302 | hash = ip_vs_conn_hashkey(af, protocol, d_addr, d_port); |
285 | 303 | ||
286 | ct_read_lock(hash); | 304 | ct_read_lock(hash); |
287 | 305 | ||
288 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { | 306 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
289 | if (d_addr == cp->caddr && d_port == cp->cport && | 307 | if (cp->af == af && |
290 | s_port == cp->dport && s_addr == cp->daddr && | 308 | ip_vs_addr_equal(af, d_addr, &cp->caddr) && |
309 | ip_vs_addr_equal(af, s_addr, &cp->daddr) && | ||
310 | d_port == cp->cport && s_port == cp->dport && | ||
291 | protocol == cp->protocol) { | 311 | protocol == cp->protocol) { |
292 | /* HIT */ | 312 | /* HIT */ |
293 | atomic_inc(&cp->refcnt); | 313 | atomic_inc(&cp->refcnt); |
@@ -298,11 +318,11 @@ struct ip_vs_conn *ip_vs_conn_out_get | |||
298 | 318 | ||
299 | ct_read_unlock(hash); | 319 | ct_read_unlock(hash); |
300 | 320 | ||
301 | IP_VS_DBG(9, "lookup/out %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", | 321 | IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n", |
302 | ip_vs_proto_name(protocol), | 322 | ip_vs_proto_name(protocol), |
303 | NIPQUAD(s_addr), ntohs(s_port), | 323 | IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port), |
304 | NIPQUAD(d_addr), ntohs(d_port), | 324 | IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port), |
305 | ret?"hit":"not hit"); | 325 | ret ? "hit" : "not hit"); |
306 | 326 | ||
307 | return ret; | 327 | return ret; |
308 | } | 328 | } |
@@ -369,6 +389,33 @@ static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp) | |||
369 | } | 389 | } |
370 | } | 390 | } |
371 | 391 | ||
392 | #ifdef CONFIG_IP_VS_IPV6 | ||
393 | static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp) | ||
394 | { | ||
395 | switch (IP_VS_FWD_METHOD(cp)) { | ||
396 | case IP_VS_CONN_F_MASQ: | ||
397 | cp->packet_xmit = ip_vs_nat_xmit_v6; | ||
398 | break; | ||
399 | |||
400 | case IP_VS_CONN_F_TUNNEL: | ||
401 | cp->packet_xmit = ip_vs_tunnel_xmit_v6; | ||
402 | break; | ||
403 | |||
404 | case IP_VS_CONN_F_DROUTE: | ||
405 | cp->packet_xmit = ip_vs_dr_xmit_v6; | ||
406 | break; | ||
407 | |||
408 | case IP_VS_CONN_F_LOCALNODE: | ||
409 | cp->packet_xmit = ip_vs_null_xmit; | ||
410 | break; | ||
411 | |||
412 | case IP_VS_CONN_F_BYPASS: | ||
413 | cp->packet_xmit = ip_vs_bypass_xmit_v6; | ||
414 | break; | ||
415 | } | ||
416 | } | ||
417 | #endif | ||
418 | |||
372 | 419 | ||
373 | static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest) | 420 | static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest) |
374 | { | 421 | { |
@@ -402,16 +449,16 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) | |||
402 | cp->flags |= atomic_read(&dest->conn_flags); | 449 | cp->flags |= atomic_read(&dest->conn_flags); |
403 | cp->dest = dest; | 450 | cp->dest = dest; |
404 | 451 | ||
405 | IP_VS_DBG(7, "Bind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " | 452 | IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d " |
406 | "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " | 453 | "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " |
407 | "dest->refcnt:%d\n", | 454 | "dest->refcnt:%d\n", |
408 | ip_vs_proto_name(cp->protocol), | 455 | ip_vs_proto_name(cp->protocol), |
409 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 456 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), |
410 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 457 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), |
411 | NIPQUAD(cp->daddr), ntohs(cp->dport), | 458 | IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), |
412 | ip_vs_fwd_tag(cp), cp->state, | 459 | ip_vs_fwd_tag(cp), cp->state, |
413 | cp->flags, atomic_read(&cp->refcnt), | 460 | cp->flags, atomic_read(&cp->refcnt), |
414 | atomic_read(&dest->refcnt)); | 461 | atomic_read(&dest->refcnt)); |
415 | 462 | ||
416 | /* Update the connection counters */ | 463 | /* Update the connection counters */ |
417 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { | 464 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { |
@@ -444,8 +491,9 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) | |||
444 | struct ip_vs_dest *dest; | 491 | struct ip_vs_dest *dest; |
445 | 492 | ||
446 | if ((cp) && (!cp->dest)) { | 493 | if ((cp) && (!cp->dest)) { |
447 | dest = ip_vs_find_dest(cp->daddr, cp->dport, | 494 | dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport, |
448 | cp->vaddr, cp->vport, cp->protocol); | 495 | &cp->vaddr, cp->vport, |
496 | cp->protocol); | ||
449 | ip_vs_bind_dest(cp, dest); | 497 | ip_vs_bind_dest(cp, dest); |
450 | return dest; | 498 | return dest; |
451 | } else | 499 | } else |
@@ -464,16 +512,16 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp) | |||
464 | if (!dest) | 512 | if (!dest) |
465 | return; | 513 | return; |
466 | 514 | ||
467 | IP_VS_DBG(7, "Unbind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " | 515 | IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d " |
468 | "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " | 516 | "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " |
469 | "dest->refcnt:%d\n", | 517 | "dest->refcnt:%d\n", |
470 | ip_vs_proto_name(cp->protocol), | 518 | ip_vs_proto_name(cp->protocol), |
471 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 519 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), |
472 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 520 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), |
473 | NIPQUAD(cp->daddr), ntohs(cp->dport), | 521 | IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), |
474 | ip_vs_fwd_tag(cp), cp->state, | 522 | ip_vs_fwd_tag(cp), cp->state, |
475 | cp->flags, atomic_read(&cp->refcnt), | 523 | cp->flags, atomic_read(&cp->refcnt), |
476 | atomic_read(&dest->refcnt)); | 524 | atomic_read(&dest->refcnt)); |
477 | 525 | ||
478 | /* Update the connection counters */ | 526 | /* Update the connection counters */ |
479 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { | 527 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { |
@@ -526,13 +574,16 @@ int ip_vs_check_template(struct ip_vs_conn *ct) | |||
526 | !(dest->flags & IP_VS_DEST_F_AVAILABLE) || | 574 | !(dest->flags & IP_VS_DEST_F_AVAILABLE) || |
527 | (sysctl_ip_vs_expire_quiescent_template && | 575 | (sysctl_ip_vs_expire_quiescent_template && |
528 | (atomic_read(&dest->weight) == 0))) { | 576 | (atomic_read(&dest->weight) == 0))) { |
529 | IP_VS_DBG(9, "check_template: dest not available for " | 577 | IP_VS_DBG_BUF(9, "check_template: dest not available for " |
530 | "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " | 578 | "protocol %s s:%s:%d v:%s:%d " |
531 | "-> d:%u.%u.%u.%u:%d\n", | 579 | "-> d:%s:%d\n", |
532 | ip_vs_proto_name(ct->protocol), | 580 | ip_vs_proto_name(ct->protocol), |
533 | NIPQUAD(ct->caddr), ntohs(ct->cport), | 581 | IP_VS_DBG_ADDR(ct->af, &ct->caddr), |
534 | NIPQUAD(ct->vaddr), ntohs(ct->vport), | 582 | ntohs(ct->cport), |
535 | NIPQUAD(ct->daddr), ntohs(ct->dport)); | 583 | IP_VS_DBG_ADDR(ct->af, &ct->vaddr), |
584 | ntohs(ct->vport), | ||
585 | IP_VS_DBG_ADDR(ct->af, &ct->daddr), | ||
586 | ntohs(ct->dport)); | ||
536 | 587 | ||
537 | /* | 588 | /* |
538 | * Invalidate the connection template | 589 | * Invalidate the connection template |
@@ -625,8 +676,9 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp) | |||
625 | * Create a new connection entry and hash it into the ip_vs_conn_tab | 676 | * Create a new connection entry and hash it into the ip_vs_conn_tab |
626 | */ | 677 | */ |
627 | struct ip_vs_conn * | 678 | struct ip_vs_conn * |
628 | ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport, | 679 | ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport, |
629 | __be32 daddr, __be16 dport, unsigned flags, | 680 | const union nf_inet_addr *vaddr, __be16 vport, |
681 | const union nf_inet_addr *daddr, __be16 dport, unsigned flags, | ||
630 | struct ip_vs_dest *dest) | 682 | struct ip_vs_dest *dest) |
631 | { | 683 | { |
632 | struct ip_vs_conn *cp; | 684 | struct ip_vs_conn *cp; |
@@ -640,12 +692,13 @@ ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport | |||
640 | 692 | ||
641 | INIT_LIST_HEAD(&cp->c_list); | 693 | INIT_LIST_HEAD(&cp->c_list); |
642 | setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); | 694 | setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); |
695 | cp->af = af; | ||
643 | cp->protocol = proto; | 696 | cp->protocol = proto; |
644 | cp->caddr = caddr; | 697 | ip_vs_addr_copy(af, &cp->caddr, caddr); |
645 | cp->cport = cport; | 698 | cp->cport = cport; |
646 | cp->vaddr = vaddr; | 699 | ip_vs_addr_copy(af, &cp->vaddr, vaddr); |
647 | cp->vport = vport; | 700 | cp->vport = vport; |
648 | cp->daddr = daddr; | 701 | ip_vs_addr_copy(af, &cp->daddr, daddr); |
649 | cp->dport = dport; | 702 | cp->dport = dport; |
650 | cp->flags = flags; | 703 | cp->flags = flags; |
651 | spin_lock_init(&cp->lock); | 704 | spin_lock_init(&cp->lock); |
@@ -672,7 +725,12 @@ ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport | |||
672 | cp->timeout = 3*HZ; | 725 | cp->timeout = 3*HZ; |
673 | 726 | ||
674 | /* Bind its packet transmitter */ | 727 | /* Bind its packet transmitter */ |
675 | ip_vs_bind_xmit(cp); | 728 | #ifdef CONFIG_IP_VS_IPV6 |
729 | if (af == AF_INET6) | ||
730 | ip_vs_bind_xmit_v6(cp); | ||
731 | else | ||
732 | #endif | ||
733 | ip_vs_bind_xmit(cp); | ||
676 | 734 | ||
677 | if (unlikely(pp && atomic_read(&pp->appcnt))) | 735 | if (unlikely(pp && atomic_read(&pp->appcnt))) |
678 | ip_vs_bind_app(cp, pp); | 736 | ip_vs_bind_app(cp, pp); |
@@ -760,12 +818,26 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) | |||
760 | else { | 818 | else { |
761 | const struct ip_vs_conn *cp = v; | 819 | const struct ip_vs_conn *cp = v; |
762 | 820 | ||
763 | seq_printf(seq, | 821 | #ifdef CONFIG_IP_VS_IPV6 |
764 | "%-3s %08X %04X %08X %04X %08X %04X %-11s %7lu\n", | 822 | if (cp->af == AF_INET6) |
823 | seq_printf(seq, | ||
824 | "%-3s " NIP6_FMT " %04X " NIP6_FMT | ||
825 | " %04X " NIP6_FMT " %04X %-11s %7lu\n", | ||
826 | ip_vs_proto_name(cp->protocol), | ||
827 | NIP6(cp->caddr.in6), ntohs(cp->cport), | ||
828 | NIP6(cp->vaddr.in6), ntohs(cp->vport), | ||
829 | NIP6(cp->daddr.in6), ntohs(cp->dport), | ||
830 | ip_vs_state_name(cp->protocol, cp->state), | ||
831 | (cp->timer.expires-jiffies)/HZ); | ||
832 | else | ||
833 | #endif | ||
834 | seq_printf(seq, | ||
835 | "%-3s %08X %04X %08X %04X" | ||
836 | " %08X %04X %-11s %7lu\n", | ||
765 | ip_vs_proto_name(cp->protocol), | 837 | ip_vs_proto_name(cp->protocol), |
766 | ntohl(cp->caddr), ntohs(cp->cport), | 838 | ntohl(cp->caddr.ip), ntohs(cp->cport), |
767 | ntohl(cp->vaddr), ntohs(cp->vport), | 839 | ntohl(cp->vaddr.ip), ntohs(cp->vport), |
768 | ntohl(cp->daddr), ntohs(cp->dport), | 840 | ntohl(cp->daddr.ip), ntohs(cp->dport), |
769 | ip_vs_state_name(cp->protocol, cp->state), | 841 | ip_vs_state_name(cp->protocol, cp->state), |
770 | (cp->timer.expires-jiffies)/HZ); | 842 | (cp->timer.expires-jiffies)/HZ); |
771 | } | 843 | } |
@@ -809,12 +881,27 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v) | |||
809 | else { | 881 | else { |
810 | const struct ip_vs_conn *cp = v; | 882 | const struct ip_vs_conn *cp = v; |
811 | 883 | ||
812 | seq_printf(seq, | 884 | #ifdef CONFIG_IP_VS_IPV6 |
813 | "%-3s %08X %04X %08X %04X %08X %04X %-11s %-6s %7lu\n", | 885 | if (cp->af == AF_INET6) |
886 | seq_printf(seq, | ||
887 | "%-3s " NIP6_FMT " %04X " NIP6_FMT | ||
888 | " %04X " NIP6_FMT " %04X %-11s %-6s %7lu\n", | ||
889 | ip_vs_proto_name(cp->protocol), | ||
890 | NIP6(cp->caddr.in6), ntohs(cp->cport), | ||
891 | NIP6(cp->vaddr.in6), ntohs(cp->vport), | ||
892 | NIP6(cp->daddr.in6), ntohs(cp->dport), | ||
893 | ip_vs_state_name(cp->protocol, cp->state), | ||
894 | ip_vs_origin_name(cp->flags), | ||
895 | (cp->timer.expires-jiffies)/HZ); | ||
896 | else | ||
897 | #endif | ||
898 | seq_printf(seq, | ||
899 | "%-3s %08X %04X %08X %04X " | ||
900 | "%08X %04X %-11s %-6s %7lu\n", | ||
814 | ip_vs_proto_name(cp->protocol), | 901 | ip_vs_proto_name(cp->protocol), |
815 | ntohl(cp->caddr), ntohs(cp->cport), | 902 | ntohl(cp->caddr.ip), ntohs(cp->cport), |
816 | ntohl(cp->vaddr), ntohs(cp->vport), | 903 | ntohl(cp->vaddr.ip), ntohs(cp->vport), |
817 | ntohl(cp->daddr), ntohs(cp->dport), | 904 | ntohl(cp->daddr.ip), ntohs(cp->dport), |
818 | ip_vs_state_name(cp->protocol, cp->state), | 905 | ip_vs_state_name(cp->protocol, cp->state), |
819 | ip_vs_origin_name(cp->flags), | 906 | ip_vs_origin_name(cp->flags), |
820 | (cp->timer.expires-jiffies)/HZ); | 907 | (cp->timer.expires-jiffies)/HZ); |
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index a7879eafc3b5..80a4fcf33a54 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c | |||
@@ -39,6 +39,11 @@ | |||
39 | #include <linux/netfilter.h> | 39 | #include <linux/netfilter.h> |
40 | #include <linux/netfilter_ipv4.h> | 40 | #include <linux/netfilter_ipv4.h> |
41 | 41 | ||
42 | #ifdef CONFIG_IP_VS_IPV6 | ||
43 | #include <net/ipv6.h> | ||
44 | #include <linux/netfilter_ipv6.h> | ||
45 | #endif | ||
46 | |||
42 | #include <net/ip_vs.h> | 47 | #include <net/ip_vs.h> |
43 | 48 | ||
44 | 49 | ||
@@ -60,6 +65,7 @@ EXPORT_SYMBOL(ip_vs_get_debug_level); | |||
60 | 65 | ||
61 | /* ID used in ICMP lookups */ | 66 | /* ID used in ICMP lookups */ |
62 | #define icmp_id(icmph) (((icmph)->un).echo.id) | 67 | #define icmp_id(icmph) (((icmph)->un).echo.id) |
68 | #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier) | ||
63 | 69 | ||
64 | const char *ip_vs_proto_name(unsigned proto) | 70 | const char *ip_vs_proto_name(unsigned proto) |
65 | { | 71 | { |
@@ -74,6 +80,10 @@ const char *ip_vs_proto_name(unsigned proto) | |||
74 | return "TCP"; | 80 | return "TCP"; |
75 | case IPPROTO_ICMP: | 81 | case IPPROTO_ICMP: |
76 | return "ICMP"; | 82 | return "ICMP"; |
83 | #ifdef CONFIG_IP_VS_IPV6 | ||
84 | case IPPROTO_ICMPV6: | ||
85 | return "ICMPv6"; | ||
86 | #endif | ||
77 | default: | 87 | default: |
78 | sprintf(buf, "IP_%d", proto); | 88 | sprintf(buf, "IP_%d", proto); |
79 | return buf; | 89 | return buf; |
@@ -92,18 +102,18 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
92 | struct ip_vs_dest *dest = cp->dest; | 102 | struct ip_vs_dest *dest = cp->dest; |
93 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 103 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
94 | spin_lock(&dest->stats.lock); | 104 | spin_lock(&dest->stats.lock); |
95 | dest->stats.inpkts++; | 105 | dest->stats.ustats.inpkts++; |
96 | dest->stats.inbytes += skb->len; | 106 | dest->stats.ustats.inbytes += skb->len; |
97 | spin_unlock(&dest->stats.lock); | 107 | spin_unlock(&dest->stats.lock); |
98 | 108 | ||
99 | spin_lock(&dest->svc->stats.lock); | 109 | spin_lock(&dest->svc->stats.lock); |
100 | dest->svc->stats.inpkts++; | 110 | dest->svc->stats.ustats.inpkts++; |
101 | dest->svc->stats.inbytes += skb->len; | 111 | dest->svc->stats.ustats.inbytes += skb->len; |
102 | spin_unlock(&dest->svc->stats.lock); | 112 | spin_unlock(&dest->svc->stats.lock); |
103 | 113 | ||
104 | spin_lock(&ip_vs_stats.lock); | 114 | spin_lock(&ip_vs_stats.lock); |
105 | ip_vs_stats.inpkts++; | 115 | ip_vs_stats.ustats.inpkts++; |
106 | ip_vs_stats.inbytes += skb->len; | 116 | ip_vs_stats.ustats.inbytes += skb->len; |
107 | spin_unlock(&ip_vs_stats.lock); | 117 | spin_unlock(&ip_vs_stats.lock); |
108 | } | 118 | } |
109 | } | 119 | } |
@@ -115,18 +125,18 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
115 | struct ip_vs_dest *dest = cp->dest; | 125 | struct ip_vs_dest *dest = cp->dest; |
116 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 126 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
117 | spin_lock(&dest->stats.lock); | 127 | spin_lock(&dest->stats.lock); |
118 | dest->stats.outpkts++; | 128 | dest->stats.ustats.outpkts++; |
119 | dest->stats.outbytes += skb->len; | 129 | dest->stats.ustats.outbytes += skb->len; |
120 | spin_unlock(&dest->stats.lock); | 130 | spin_unlock(&dest->stats.lock); |
121 | 131 | ||
122 | spin_lock(&dest->svc->stats.lock); | 132 | spin_lock(&dest->svc->stats.lock); |
123 | dest->svc->stats.outpkts++; | 133 | dest->svc->stats.ustats.outpkts++; |
124 | dest->svc->stats.outbytes += skb->len; | 134 | dest->svc->stats.ustats.outbytes += skb->len; |
125 | spin_unlock(&dest->svc->stats.lock); | 135 | spin_unlock(&dest->svc->stats.lock); |
126 | 136 | ||
127 | spin_lock(&ip_vs_stats.lock); | 137 | spin_lock(&ip_vs_stats.lock); |
128 | ip_vs_stats.outpkts++; | 138 | ip_vs_stats.ustats.outpkts++; |
129 | ip_vs_stats.outbytes += skb->len; | 139 | ip_vs_stats.ustats.outbytes += skb->len; |
130 | spin_unlock(&ip_vs_stats.lock); | 140 | spin_unlock(&ip_vs_stats.lock); |
131 | } | 141 | } |
132 | } | 142 | } |
@@ -136,15 +146,15 @@ static inline void | |||
136 | ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) | 146 | ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) |
137 | { | 147 | { |
138 | spin_lock(&cp->dest->stats.lock); | 148 | spin_lock(&cp->dest->stats.lock); |
139 | cp->dest->stats.conns++; | 149 | cp->dest->stats.ustats.conns++; |
140 | spin_unlock(&cp->dest->stats.lock); | 150 | spin_unlock(&cp->dest->stats.lock); |
141 | 151 | ||
142 | spin_lock(&svc->stats.lock); | 152 | spin_lock(&svc->stats.lock); |
143 | svc->stats.conns++; | 153 | svc->stats.ustats.conns++; |
144 | spin_unlock(&svc->stats.lock); | 154 | spin_unlock(&svc->stats.lock); |
145 | 155 | ||
146 | spin_lock(&ip_vs_stats.lock); | 156 | spin_lock(&ip_vs_stats.lock); |
147 | ip_vs_stats.conns++; | 157 | ip_vs_stats.ustats.conns++; |
148 | spin_unlock(&ip_vs_stats.lock); | 158 | spin_unlock(&ip_vs_stats.lock); |
149 | } | 159 | } |
150 | 160 | ||
@@ -173,20 +183,28 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
173 | __be16 ports[2]) | 183 | __be16 ports[2]) |
174 | { | 184 | { |
175 | struct ip_vs_conn *cp = NULL; | 185 | struct ip_vs_conn *cp = NULL; |
176 | struct iphdr *iph = ip_hdr(skb); | 186 | struct ip_vs_iphdr iph; |
177 | struct ip_vs_dest *dest; | 187 | struct ip_vs_dest *dest; |
178 | struct ip_vs_conn *ct; | 188 | struct ip_vs_conn *ct; |
179 | __be16 dport; /* destination port to forward */ | 189 | __be16 dport; /* destination port to forward */ |
180 | __be32 snet; /* source network of the client, after masking */ | 190 | union nf_inet_addr snet; /* source network of the client, |
191 | after masking */ | ||
192 | |||
193 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | ||
181 | 194 | ||
182 | /* Mask saddr with the netmask to adjust template granularity */ | 195 | /* Mask saddr with the netmask to adjust template granularity */ |
183 | snet = iph->saddr & svc->netmask; | 196 | #ifdef CONFIG_IP_VS_IPV6 |
197 | if (svc->af == AF_INET6) | ||
198 | ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask); | ||
199 | else | ||
200 | #endif | ||
201 | snet.ip = iph.saddr.ip & svc->netmask; | ||
184 | 202 | ||
185 | IP_VS_DBG(6, "p-schedule: src %u.%u.%u.%u:%u dest %u.%u.%u.%u:%u " | 203 | IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u " |
186 | "mnet %u.%u.%u.%u\n", | 204 | "mnet %s\n", |
187 | NIPQUAD(iph->saddr), ntohs(ports[0]), | 205 | IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]), |
188 | NIPQUAD(iph->daddr), ntohs(ports[1]), | 206 | IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]), |
189 | NIPQUAD(snet)); | 207 | IP_VS_DBG_ADDR(svc->af, &snet)); |
190 | 208 | ||
191 | /* | 209 | /* |
192 | * As far as we know, FTP is a very complicated network protocol, and | 210 | * As far as we know, FTP is a very complicated network protocol, and |
@@ -204,11 +222,11 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
204 | if (ports[1] == svc->port) { | 222 | if (ports[1] == svc->port) { |
205 | /* Check if a template already exists */ | 223 | /* Check if a template already exists */ |
206 | if (svc->port != FTPPORT) | 224 | if (svc->port != FTPPORT) |
207 | ct = ip_vs_ct_in_get(iph->protocol, snet, 0, | 225 | ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, |
208 | iph->daddr, ports[1]); | 226 | &iph.daddr, ports[1]); |
209 | else | 227 | else |
210 | ct = ip_vs_ct_in_get(iph->protocol, snet, 0, | 228 | ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, |
211 | iph->daddr, 0); | 229 | &iph.daddr, 0); |
212 | 230 | ||
213 | if (!ct || !ip_vs_check_template(ct)) { | 231 | if (!ct || !ip_vs_check_template(ct)) { |
214 | /* | 232 | /* |
@@ -228,18 +246,18 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
228 | * for ftp service. | 246 | * for ftp service. |
229 | */ | 247 | */ |
230 | if (svc->port != FTPPORT) | 248 | if (svc->port != FTPPORT) |
231 | ct = ip_vs_conn_new(iph->protocol, | 249 | ct = ip_vs_conn_new(svc->af, iph.protocol, |
232 | snet, 0, | 250 | &snet, 0, |
233 | iph->daddr, | 251 | &iph.daddr, |
234 | ports[1], | 252 | ports[1], |
235 | dest->addr, dest->port, | 253 | &dest->addr, dest->port, |
236 | IP_VS_CONN_F_TEMPLATE, | 254 | IP_VS_CONN_F_TEMPLATE, |
237 | dest); | 255 | dest); |
238 | else | 256 | else |
239 | ct = ip_vs_conn_new(iph->protocol, | 257 | ct = ip_vs_conn_new(svc->af, iph.protocol, |
240 | snet, 0, | 258 | &snet, 0, |
241 | iph->daddr, 0, | 259 | &iph.daddr, 0, |
242 | dest->addr, 0, | 260 | &dest->addr, 0, |
243 | IP_VS_CONN_F_TEMPLATE, | 261 | IP_VS_CONN_F_TEMPLATE, |
244 | dest); | 262 | dest); |
245 | if (ct == NULL) | 263 | if (ct == NULL) |
@@ -258,12 +276,16 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
258 | * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0> | 276 | * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0> |
259 | * port zero template: <protocol,caddr,0,vaddr,0,daddr,0> | 277 | * port zero template: <protocol,caddr,0,vaddr,0,daddr,0> |
260 | */ | 278 | */ |
261 | if (svc->fwmark) | 279 | if (svc->fwmark) { |
262 | ct = ip_vs_ct_in_get(IPPROTO_IP, snet, 0, | 280 | union nf_inet_addr fwmark = { |
263 | htonl(svc->fwmark), 0); | 281 | .all = { 0, 0, 0, htonl(svc->fwmark) } |
264 | else | 282 | }; |
265 | ct = ip_vs_ct_in_get(iph->protocol, snet, 0, | 283 | |
266 | iph->daddr, 0); | 284 | ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0, |
285 | &fwmark, 0); | ||
286 | } else | ||
287 | ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, | ||
288 | &iph.daddr, 0); | ||
267 | 289 | ||
268 | if (!ct || !ip_vs_check_template(ct)) { | 290 | if (!ct || !ip_vs_check_template(ct)) { |
269 | /* | 291 | /* |
@@ -282,18 +304,22 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
282 | /* | 304 | /* |
283 | * Create a template according to the service | 305 | * Create a template according to the service |
284 | */ | 306 | */ |
285 | if (svc->fwmark) | 307 | if (svc->fwmark) { |
286 | ct = ip_vs_conn_new(IPPROTO_IP, | 308 | union nf_inet_addr fwmark = { |
287 | snet, 0, | 309 | .all = { 0, 0, 0, htonl(svc->fwmark) } |
288 | htonl(svc->fwmark), 0, | 310 | }; |
289 | dest->addr, 0, | 311 | |
312 | ct = ip_vs_conn_new(svc->af, IPPROTO_IP, | ||
313 | &snet, 0, | ||
314 | &fwmark, 0, | ||
315 | &dest->addr, 0, | ||
290 | IP_VS_CONN_F_TEMPLATE, | 316 | IP_VS_CONN_F_TEMPLATE, |
291 | dest); | 317 | dest); |
292 | else | 318 | } else |
293 | ct = ip_vs_conn_new(iph->protocol, | 319 | ct = ip_vs_conn_new(svc->af, iph.protocol, |
294 | snet, 0, | 320 | &snet, 0, |
295 | iph->daddr, 0, | 321 | &iph.daddr, 0, |
296 | dest->addr, 0, | 322 | &dest->addr, 0, |
297 | IP_VS_CONN_F_TEMPLATE, | 323 | IP_VS_CONN_F_TEMPLATE, |
298 | dest); | 324 | dest); |
299 | if (ct == NULL) | 325 | if (ct == NULL) |
@@ -310,10 +336,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
310 | /* | 336 | /* |
311 | * Create a new connection according to the template | 337 | * Create a new connection according to the template |
312 | */ | 338 | */ |
313 | cp = ip_vs_conn_new(iph->protocol, | 339 | cp = ip_vs_conn_new(svc->af, iph.protocol, |
314 | iph->saddr, ports[0], | 340 | &iph.saddr, ports[0], |
315 | iph->daddr, ports[1], | 341 | &iph.daddr, ports[1], |
316 | dest->addr, dport, | 342 | &dest->addr, dport, |
317 | 0, | 343 | 0, |
318 | dest); | 344 | dest); |
319 | if (cp == NULL) { | 345 | if (cp == NULL) { |
@@ -342,12 +368,12 @@ struct ip_vs_conn * | |||
342 | ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | 368 | ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) |
343 | { | 369 | { |
344 | struct ip_vs_conn *cp = NULL; | 370 | struct ip_vs_conn *cp = NULL; |
345 | struct iphdr *iph = ip_hdr(skb); | 371 | struct ip_vs_iphdr iph; |
346 | struct ip_vs_dest *dest; | 372 | struct ip_vs_dest *dest; |
347 | __be16 _ports[2], *pptr; | 373 | __be16 _ports[2], *pptr; |
348 | 374 | ||
349 | pptr = skb_header_pointer(skb, iph->ihl*4, | 375 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); |
350 | sizeof(_ports), _ports); | 376 | pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); |
351 | if (pptr == NULL) | 377 | if (pptr == NULL) |
352 | return NULL; | 378 | return NULL; |
353 | 379 | ||
@@ -377,22 +403,22 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
377 | /* | 403 | /* |
378 | * Create a connection entry. | 404 | * Create a connection entry. |
379 | */ | 405 | */ |
380 | cp = ip_vs_conn_new(iph->protocol, | 406 | cp = ip_vs_conn_new(svc->af, iph.protocol, |
381 | iph->saddr, pptr[0], | 407 | &iph.saddr, pptr[0], |
382 | iph->daddr, pptr[1], | 408 | &iph.daddr, pptr[1], |
383 | dest->addr, dest->port?dest->port:pptr[1], | 409 | &dest->addr, dest->port ? dest->port : pptr[1], |
384 | 0, | 410 | 0, |
385 | dest); | 411 | dest); |
386 | if (cp == NULL) | 412 | if (cp == NULL) |
387 | return NULL; | 413 | return NULL; |
388 | 414 | ||
389 | IP_VS_DBG(6, "Schedule fwd:%c c:%u.%u.%u.%u:%u v:%u.%u.%u.%u:%u " | 415 | IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u " |
390 | "d:%u.%u.%u.%u:%u conn->flags:%X conn->refcnt:%d\n", | 416 | "d:%s:%u conn->flags:%X conn->refcnt:%d\n", |
391 | ip_vs_fwd_tag(cp), | 417 | ip_vs_fwd_tag(cp), |
392 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 418 | IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport), |
393 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 419 | IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport), |
394 | NIPQUAD(cp->daddr), ntohs(cp->dport), | 420 | IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport), |
395 | cp->flags, atomic_read(&cp->refcnt)); | 421 | cp->flags, atomic_read(&cp->refcnt)); |
396 | 422 | ||
397 | ip_vs_conn_stats(cp, svc); | 423 | ip_vs_conn_stats(cp, svc); |
398 | return cp; | 424 | return cp; |
@@ -408,20 +434,27 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
408 | struct ip_vs_protocol *pp) | 434 | struct ip_vs_protocol *pp) |
409 | { | 435 | { |
410 | __be16 _ports[2], *pptr; | 436 | __be16 _ports[2], *pptr; |
411 | struct iphdr *iph = ip_hdr(skb); | 437 | struct ip_vs_iphdr iph; |
438 | int unicast; | ||
439 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | ||
412 | 440 | ||
413 | pptr = skb_header_pointer(skb, iph->ihl*4, | 441 | pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); |
414 | sizeof(_ports), _ports); | ||
415 | if (pptr == NULL) { | 442 | if (pptr == NULL) { |
416 | ip_vs_service_put(svc); | 443 | ip_vs_service_put(svc); |
417 | return NF_DROP; | 444 | return NF_DROP; |
418 | } | 445 | } |
419 | 446 | ||
447 | #ifdef CONFIG_IP_VS_IPV6 | ||
448 | if (svc->af == AF_INET6) | ||
449 | unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST; | ||
450 | else | ||
451 | #endif | ||
452 | unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST); | ||
453 | |||
420 | /* if it is fwmark-based service, the cache_bypass sysctl is up | 454 | /* if it is fwmark-based service, the cache_bypass sysctl is up |
421 | and the destination is RTN_UNICAST (and not local), then create | 455 | and the destination is a non-local unicast, then create |
422 | a cache_bypass connection entry */ | 456 | a cache_bypass connection entry */ |
423 | if (sysctl_ip_vs_cache_bypass && svc->fwmark | 457 | if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) { |
424 | && (inet_addr_type(&init_net, iph->daddr) == RTN_UNICAST)) { | ||
425 | int ret, cs; | 458 | int ret, cs; |
426 | struct ip_vs_conn *cp; | 459 | struct ip_vs_conn *cp; |
427 | 460 | ||
@@ -429,9 +462,9 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
429 | 462 | ||
430 | /* create a new connection entry */ | 463 | /* create a new connection entry */ |
431 | IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n"); | 464 | IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n"); |
432 | cp = ip_vs_conn_new(iph->protocol, | 465 | cp = ip_vs_conn_new(svc->af, iph.protocol, |
433 | iph->saddr, pptr[0], | 466 | &iph.saddr, pptr[0], |
434 | iph->daddr, pptr[1], | 467 | &iph.daddr, pptr[1], |
435 | 0, 0, | 468 | 0, 0, |
436 | IP_VS_CONN_F_BYPASS, | 469 | IP_VS_CONN_F_BYPASS, |
437 | NULL); | 470 | NULL); |
@@ -473,7 +506,14 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
473 | * created, the TCP RST packet cannot be sent, instead that | 506 | * created, the TCP RST packet cannot be sent, instead that |
474 | * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ | 507 | * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ |
475 | */ | 508 | */ |
476 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | 509 | #ifdef CONFIG_IP_VS_IPV6 |
510 | if (svc->af == AF_INET6) | ||
511 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, | ||
512 | skb->dev); | ||
513 | else | ||
514 | #endif | ||
515 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | ||
516 | |||
477 | return NF_DROP; | 517 | return NF_DROP; |
478 | } | 518 | } |
479 | 519 | ||
@@ -512,6 +552,14 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) | |||
512 | return err; | 552 | return err; |
513 | } | 553 | } |
514 | 554 | ||
555 | #ifdef CONFIG_IP_VS_IPV6 | ||
556 | static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user) | ||
557 | { | ||
558 | /* TODO IPv6: Find out what to do here for IPv6 */ | ||
559 | return 0; | ||
560 | } | ||
561 | #endif | ||
562 | |||
515 | /* | 563 | /* |
516 | * Packet has been made sufficiently writable in caller | 564 | * Packet has been made sufficiently writable in caller |
517 | * - inout: 1=in->out, 0=out->in | 565 | * - inout: 1=in->out, 0=out->in |
@@ -526,14 +574,14 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
526 | struct iphdr *ciph = (struct iphdr *)(icmph + 1); | 574 | struct iphdr *ciph = (struct iphdr *)(icmph + 1); |
527 | 575 | ||
528 | if (inout) { | 576 | if (inout) { |
529 | iph->saddr = cp->vaddr; | 577 | iph->saddr = cp->vaddr.ip; |
530 | ip_send_check(iph); | 578 | ip_send_check(iph); |
531 | ciph->daddr = cp->vaddr; | 579 | ciph->daddr = cp->vaddr.ip; |
532 | ip_send_check(ciph); | 580 | ip_send_check(ciph); |
533 | } else { | 581 | } else { |
534 | iph->daddr = cp->daddr; | 582 | iph->daddr = cp->daddr.ip; |
535 | ip_send_check(iph); | 583 | ip_send_check(iph); |
536 | ciph->saddr = cp->daddr; | 584 | ciph->saddr = cp->daddr.ip; |
537 | ip_send_check(ciph); | 585 | ip_send_check(ciph); |
538 | } | 586 | } |
539 | 587 | ||
@@ -560,21 +608,112 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
560 | "Forwarding altered incoming ICMP"); | 608 | "Forwarding altered incoming ICMP"); |
561 | } | 609 | } |
562 | 610 | ||
611 | #ifdef CONFIG_IP_VS_IPV6 | ||
612 | void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
613 | struct ip_vs_conn *cp, int inout) | ||
614 | { | ||
615 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
616 | unsigned int icmp_offset = sizeof(struct ipv6hdr); | ||
617 | struct icmp6hdr *icmph = (struct icmp6hdr *)(skb_network_header(skb) + | ||
618 | icmp_offset); | ||
619 | struct ipv6hdr *ciph = (struct ipv6hdr *)(icmph + 1); | ||
620 | |||
621 | if (inout) { | ||
622 | iph->saddr = cp->vaddr.in6; | ||
623 | ciph->daddr = cp->vaddr.in6; | ||
624 | } else { | ||
625 | iph->daddr = cp->daddr.in6; | ||
626 | ciph->saddr = cp->daddr.in6; | ||
627 | } | ||
628 | |||
629 | /* the TCP/UDP port */ | ||
630 | if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr) { | ||
631 | __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr); | ||
632 | |||
633 | if (inout) | ||
634 | ports[1] = cp->vport; | ||
635 | else | ||
636 | ports[0] = cp->dport; | ||
637 | } | ||
638 | |||
639 | /* And finally the ICMP checksum */ | ||
640 | icmph->icmp6_cksum = 0; | ||
641 | /* TODO IPv6: is this correct for ICMPv6? */ | ||
642 | ip_vs_checksum_complete(skb, icmp_offset); | ||
643 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
644 | |||
645 | if (inout) | ||
646 | IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, | ||
647 | "Forwarding altered outgoing ICMPv6"); | ||
648 | else | ||
649 | IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, | ||
650 | "Forwarding altered incoming ICMPv6"); | ||
651 | } | ||
652 | #endif | ||
653 | |||
654 | /* Handle relevant response ICMP messages - forward to the right | ||
655 | * destination host. Used for NAT and local client. | ||
656 | */ | ||
657 | static int handle_response_icmp(int af, struct sk_buff *skb, | ||
658 | union nf_inet_addr *snet, | ||
659 | __u8 protocol, struct ip_vs_conn *cp, | ||
660 | struct ip_vs_protocol *pp, | ||
661 | unsigned int offset, unsigned int ihl) | ||
662 | { | ||
663 | unsigned int verdict = NF_DROP; | ||
664 | |||
665 | if (IP_VS_FWD_METHOD(cp) != 0) { | ||
666 | IP_VS_ERR("shouldn't reach here, because the box is on the " | ||
667 | "half connection in the tun/dr module.\n"); | ||
668 | } | ||
669 | |||
670 | /* Ensure the checksum is correct */ | ||
671 | if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { | ||
672 | /* Failed checksum! */ | ||
673 | IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n", | ||
674 | IP_VS_DBG_ADDR(af, snet)); | ||
675 | goto out; | ||
676 | } | ||
677 | |||
678 | if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol) | ||
679 | offset += 2 * sizeof(__u16); | ||
680 | if (!skb_make_writable(skb, offset)) | ||
681 | goto out; | ||
682 | |||
683 | #ifdef CONFIG_IP_VS_IPV6 | ||
684 | if (af == AF_INET6) | ||
685 | ip_vs_nat_icmp_v6(skb, pp, cp, 1); | ||
686 | else | ||
687 | #endif | ||
688 | ip_vs_nat_icmp(skb, pp, cp, 1); | ||
689 | |||
690 | /* do the statistics and put it back */ | ||
691 | ip_vs_out_stats(cp, skb); | ||
692 | |||
693 | skb->ipvs_property = 1; | ||
694 | verdict = NF_ACCEPT; | ||
695 | |||
696 | out: | ||
697 | __ip_vs_conn_put(cp); | ||
698 | |||
699 | return verdict; | ||
700 | } | ||
701 | |||
563 | /* | 702 | /* |
564 | * Handle ICMP messages in the inside-to-outside direction (outgoing). | 703 | * Handle ICMP messages in the inside-to-outside direction (outgoing). |
565 | * Find any that might be relevant, check against existing connections, | 704 | * Find any that might be relevant, check against existing connections. |
566 | * forward to the right destination host if relevant. | ||
567 | * Currently handles error types - unreachable, quench, ttl exceeded. | 705 | * Currently handles error types - unreachable, quench, ttl exceeded. |
568 | * (Only used in VS/NAT) | ||
569 | */ | 706 | */ |
570 | static int ip_vs_out_icmp(struct sk_buff *skb, int *related) | 707 | static int ip_vs_out_icmp(struct sk_buff *skb, int *related) |
571 | { | 708 | { |
572 | struct iphdr *iph; | 709 | struct iphdr *iph; |
573 | struct icmphdr _icmph, *ic; | 710 | struct icmphdr _icmph, *ic; |
574 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ | 711 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ |
712 | struct ip_vs_iphdr ciph; | ||
575 | struct ip_vs_conn *cp; | 713 | struct ip_vs_conn *cp; |
576 | struct ip_vs_protocol *pp; | 714 | struct ip_vs_protocol *pp; |
577 | unsigned int offset, ihl, verdict; | 715 | unsigned int offset, ihl; |
716 | union nf_inet_addr snet; | ||
578 | 717 | ||
579 | *related = 1; | 718 | *related = 1; |
580 | 719 | ||
@@ -627,102 +766,231 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related) | |||
627 | 766 | ||
628 | offset += cih->ihl * 4; | 767 | offset += cih->ihl * 4; |
629 | 768 | ||
769 | ip_vs_fill_iphdr(AF_INET, cih, &ciph); | ||
630 | /* The embedded headers contain source and dest in reverse order */ | 770 | /* The embedded headers contain source and dest in reverse order */ |
631 | cp = pp->conn_out_get(skb, pp, cih, offset, 1); | 771 | cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1); |
632 | if (!cp) | 772 | if (!cp) |
633 | return NF_ACCEPT; | 773 | return NF_ACCEPT; |
634 | 774 | ||
635 | verdict = NF_DROP; | 775 | snet.ip = iph->saddr; |
776 | return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, | ||
777 | pp, offset, ihl); | ||
778 | } | ||
636 | 779 | ||
637 | if (IP_VS_FWD_METHOD(cp) != 0) { | 780 | #ifdef CONFIG_IP_VS_IPV6 |
638 | IP_VS_ERR("shouldn't reach here, because the box is on the " | 781 | static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related) |
639 | "half connection in the tun/dr module.\n"); | 782 | { |
783 | struct ipv6hdr *iph; | ||
784 | struct icmp6hdr _icmph, *ic; | ||
785 | struct ipv6hdr _ciph, *cih; /* The ip header contained | ||
786 | within the ICMP */ | ||
787 | struct ip_vs_iphdr ciph; | ||
788 | struct ip_vs_conn *cp; | ||
789 | struct ip_vs_protocol *pp; | ||
790 | unsigned int offset; | ||
791 | union nf_inet_addr snet; | ||
792 | |||
793 | *related = 1; | ||
794 | |||
795 | /* reassemble IP fragments */ | ||
796 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) { | ||
797 | if (ip_vs_gather_frags_v6(skb, IP_DEFRAG_VS_OUT)) | ||
798 | return NF_STOLEN; | ||
640 | } | 799 | } |
641 | 800 | ||
642 | /* Ensure the checksum is correct */ | 801 | iph = ipv6_hdr(skb); |
643 | if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { | 802 | offset = sizeof(struct ipv6hdr); |
644 | /* Failed checksum! */ | 803 | ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); |
645 | IP_VS_DBG(1, "Forward ICMP: failed checksum from %d.%d.%d.%d!\n", | 804 | if (ic == NULL) |
646 | NIPQUAD(iph->saddr)); | 805 | return NF_DROP; |
647 | goto out; | 806 | |
807 | IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) " NIP6_FMT "->" NIP6_FMT "\n", | ||
808 | ic->icmp6_type, ntohs(icmpv6_id(ic)), | ||
809 | NIP6(iph->saddr), NIP6(iph->daddr)); | ||
810 | |||
811 | /* | ||
812 | * Work through seeing if this is for us. | ||
813 | * These checks are supposed to be in an order that means easy | ||
814 | * things are checked first to speed up processing.... however | ||
815 | * this means that some packets will manage to get a long way | ||
816 | * down this stack and then be rejected, but that's life. | ||
817 | */ | ||
818 | if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) && | ||
819 | (ic->icmp6_type != ICMPV6_PKT_TOOBIG) && | ||
820 | (ic->icmp6_type != ICMPV6_TIME_EXCEED)) { | ||
821 | *related = 0; | ||
822 | return NF_ACCEPT; | ||
648 | } | 823 | } |
649 | 824 | ||
650 | if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) | 825 | /* Now find the contained IP header */ |
651 | offset += 2 * sizeof(__u16); | 826 | offset += sizeof(_icmph); |
652 | if (!skb_make_writable(skb, offset)) | 827 | cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); |
653 | goto out; | 828 | if (cih == NULL) |
829 | return NF_ACCEPT; /* The packet looks wrong, ignore */ | ||
654 | 830 | ||
655 | ip_vs_nat_icmp(skb, pp, cp, 1); | 831 | pp = ip_vs_proto_get(cih->nexthdr); |
832 | if (!pp) | ||
833 | return NF_ACCEPT; | ||
656 | 834 | ||
657 | /* do the statistics and put it back */ | 835 | /* Is the embedded protocol header present? */ |
658 | ip_vs_out_stats(cp, skb); | 836 | /* TODO: we don't support fragmentation at the moment anyways */ |
837 | if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag)) | ||
838 | return NF_ACCEPT; | ||
659 | 839 | ||
660 | skb->ipvs_property = 1; | 840 | IP_VS_DBG_PKT(11, pp, skb, offset, "Checking outgoing ICMPv6 for"); |
661 | verdict = NF_ACCEPT; | ||
662 | 841 | ||
663 | out: | 842 | offset += sizeof(struct ipv6hdr); |
664 | __ip_vs_conn_put(cp); | ||
665 | 843 | ||
666 | return verdict; | 844 | ip_vs_fill_iphdr(AF_INET6, cih, &ciph); |
845 | /* The embedded headers contain source and dest in reverse order */ | ||
846 | cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1); | ||
847 | if (!cp) | ||
848 | return NF_ACCEPT; | ||
849 | |||
850 | ipv6_addr_copy(&snet.in6, &iph->saddr); | ||
851 | return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp, | ||
852 | pp, offset, sizeof(struct ipv6hdr)); | ||
667 | } | 853 | } |
854 | #endif | ||
668 | 855 | ||
669 | static inline int is_tcp_reset(const struct sk_buff *skb) | 856 | static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len) |
670 | { | 857 | { |
671 | struct tcphdr _tcph, *th; | 858 | struct tcphdr _tcph, *th; |
672 | 859 | ||
673 | th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); | 860 | th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph); |
674 | if (th == NULL) | 861 | if (th == NULL) |
675 | return 0; | 862 | return 0; |
676 | return th->rst; | 863 | return th->rst; |
677 | } | 864 | } |
678 | 865 | ||
866 | /* Handle response packets: rewrite addresses and send away... | ||
867 | * Used for NAT and local client. | ||
868 | */ | ||
869 | static unsigned int | ||
870 | handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
871 | struct ip_vs_conn *cp, int ihl) | ||
872 | { | ||
873 | IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet"); | ||
874 | |||
875 | if (!skb_make_writable(skb, ihl)) | ||
876 | goto drop; | ||
877 | |||
878 | /* mangle the packet */ | ||
879 | if (pp->snat_handler && !pp->snat_handler(skb, pp, cp)) | ||
880 | goto drop; | ||
881 | |||
882 | #ifdef CONFIG_IP_VS_IPV6 | ||
883 | if (af == AF_INET6) | ||
884 | ipv6_hdr(skb)->saddr = cp->vaddr.in6; | ||
885 | else | ||
886 | #endif | ||
887 | { | ||
888 | ip_hdr(skb)->saddr = cp->vaddr.ip; | ||
889 | ip_send_check(ip_hdr(skb)); | ||
890 | } | ||
891 | |||
892 | /* For policy routing, packets originating from this | ||
893 | * machine itself may be routed differently to packets | ||
894 | * passing through. We want this packet to be routed as | ||
895 | * if it came from this machine itself. So re-compute | ||
896 | * the routing information. | ||
897 | */ | ||
898 | #ifdef CONFIG_IP_VS_IPV6 | ||
899 | if (af == AF_INET6) { | ||
900 | if (ip6_route_me_harder(skb) != 0) | ||
901 | goto drop; | ||
902 | } else | ||
903 | #endif | ||
904 | if (ip_route_me_harder(skb, RTN_LOCAL) != 0) | ||
905 | goto drop; | ||
906 | |||
907 | IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); | ||
908 | |||
909 | ip_vs_out_stats(cp, skb); | ||
910 | ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); | ||
911 | ip_vs_conn_put(cp); | ||
912 | |||
913 | skb->ipvs_property = 1; | ||
914 | |||
915 | LeaveFunction(11); | ||
916 | return NF_ACCEPT; | ||
917 | |||
918 | drop: | ||
919 | ip_vs_conn_put(cp); | ||
920 | kfree_skb(skb); | ||
921 | return NF_STOLEN; | ||
922 | } | ||
923 | |||
679 | /* | 924 | /* |
680 | * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT. | 925 | * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT. |
681 | * Check if outgoing packet belongs to the established ip_vs_conn, | 926 | * Check if outgoing packet belongs to the established ip_vs_conn. |
682 | * rewrite addresses of the packet and send it on its way... | ||
683 | */ | 927 | */ |
684 | static unsigned int | 928 | static unsigned int |
685 | ip_vs_out(unsigned int hooknum, struct sk_buff *skb, | 929 | ip_vs_out(unsigned int hooknum, struct sk_buff *skb, |
686 | const struct net_device *in, const struct net_device *out, | 930 | const struct net_device *in, const struct net_device *out, |
687 | int (*okfn)(struct sk_buff *)) | 931 | int (*okfn)(struct sk_buff *)) |
688 | { | 932 | { |
689 | struct iphdr *iph; | 933 | struct ip_vs_iphdr iph; |
690 | struct ip_vs_protocol *pp; | 934 | struct ip_vs_protocol *pp; |
691 | struct ip_vs_conn *cp; | 935 | struct ip_vs_conn *cp; |
692 | int ihl; | 936 | int af; |
693 | 937 | ||
694 | EnterFunction(11); | 938 | EnterFunction(11); |
695 | 939 | ||
940 | af = (skb->protocol == __constant_htons(ETH_P_IP)) ? AF_INET : AF_INET6; | ||
941 | |||
696 | if (skb->ipvs_property) | 942 | if (skb->ipvs_property) |
697 | return NF_ACCEPT; | 943 | return NF_ACCEPT; |
698 | 944 | ||
699 | iph = ip_hdr(skb); | 945 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
700 | if (unlikely(iph->protocol == IPPROTO_ICMP)) { | 946 | #ifdef CONFIG_IP_VS_IPV6 |
701 | int related, verdict = ip_vs_out_icmp(skb, &related); | 947 | if (af == AF_INET6) { |
948 | if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { | ||
949 | int related, verdict = ip_vs_out_icmp_v6(skb, &related); | ||
702 | 950 | ||
703 | if (related) | 951 | if (related) |
704 | return verdict; | 952 | return verdict; |
705 | iph = ip_hdr(skb); | 953 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
706 | } | 954 | } |
955 | } else | ||
956 | #endif | ||
957 | if (unlikely(iph.protocol == IPPROTO_ICMP)) { | ||
958 | int related, verdict = ip_vs_out_icmp(skb, &related); | ||
707 | 959 | ||
708 | pp = ip_vs_proto_get(iph->protocol); | 960 | if (related) |
961 | return verdict; | ||
962 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
963 | } | ||
964 | |||
965 | pp = ip_vs_proto_get(iph.protocol); | ||
709 | if (unlikely(!pp)) | 966 | if (unlikely(!pp)) |
710 | return NF_ACCEPT; | 967 | return NF_ACCEPT; |
711 | 968 | ||
712 | /* reassemble IP fragments */ | 969 | /* reassemble IP fragments */ |
713 | if (unlikely(iph->frag_off & htons(IP_MF|IP_OFFSET) && | 970 | #ifdef CONFIG_IP_VS_IPV6 |
714 | !pp->dont_defrag)) { | 971 | if (af == AF_INET6) { |
715 | if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT)) | 972 | if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { |
716 | return NF_STOLEN; | 973 | int related, verdict = ip_vs_out_icmp_v6(skb, &related); |
717 | iph = ip_hdr(skb); | 974 | |
718 | } | 975 | if (related) |
976 | return verdict; | ||
719 | 977 | ||
720 | ihl = iph->ihl << 2; | 978 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
979 | } | ||
980 | } else | ||
981 | #endif | ||
982 | if (unlikely(ip_hdr(skb)->frag_off & htons(IP_MF|IP_OFFSET) && | ||
983 | !pp->dont_defrag)) { | ||
984 | if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT)) | ||
985 | return NF_STOLEN; | ||
986 | |||
987 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
988 | } | ||
721 | 989 | ||
722 | /* | 990 | /* |
723 | * Check if the packet belongs to an existing entry | 991 | * Check if the packet belongs to an existing entry |
724 | */ | 992 | */ |
725 | cp = pp->conn_out_get(skb, pp, iph, ihl, 0); | 993 | cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0); |
726 | 994 | ||
727 | if (unlikely(!cp)) { | 995 | if (unlikely(!cp)) { |
728 | if (sysctl_ip_vs_nat_icmp_send && | 996 | if (sysctl_ip_vs_nat_icmp_send && |
@@ -730,21 +998,31 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, | |||
730 | pp->protocol == IPPROTO_UDP)) { | 998 | pp->protocol == IPPROTO_UDP)) { |
731 | __be16 _ports[2], *pptr; | 999 | __be16 _ports[2], *pptr; |
732 | 1000 | ||
733 | pptr = skb_header_pointer(skb, ihl, | 1001 | pptr = skb_header_pointer(skb, iph.len, |
734 | sizeof(_ports), _ports); | 1002 | sizeof(_ports), _ports); |
735 | if (pptr == NULL) | 1003 | if (pptr == NULL) |
736 | return NF_ACCEPT; /* Not for me */ | 1004 | return NF_ACCEPT; /* Not for me */ |
737 | if (ip_vs_lookup_real_service(iph->protocol, | 1005 | if (ip_vs_lookup_real_service(af, iph.protocol, |
738 | iph->saddr, pptr[0])) { | 1006 | &iph.saddr, |
1007 | pptr[0])) { | ||
739 | /* | 1008 | /* |
740 | * Notify the real server: there is no | 1009 | * Notify the real server: there is no |
741 | * existing entry if it is not RST | 1010 | * existing entry if it is not RST |
742 | * packet or not TCP packet. | 1011 | * packet or not TCP packet. |
743 | */ | 1012 | */ |
744 | if (iph->protocol != IPPROTO_TCP | 1013 | if (iph.protocol != IPPROTO_TCP |
745 | || !is_tcp_reset(skb)) { | 1014 | || !is_tcp_reset(skb, iph.len)) { |
746 | icmp_send(skb,ICMP_DEST_UNREACH, | 1015 | #ifdef CONFIG_IP_VS_IPV6 |
747 | ICMP_PORT_UNREACH, 0); | 1016 | if (af == AF_INET6) |
1017 | icmpv6_send(skb, | ||
1018 | ICMPV6_DEST_UNREACH, | ||
1019 | ICMPV6_PORT_UNREACH, | ||
1020 | 0, skb->dev); | ||
1021 | else | ||
1022 | #endif | ||
1023 | icmp_send(skb, | ||
1024 | ICMP_DEST_UNREACH, | ||
1025 | ICMP_PORT_UNREACH, 0); | ||
748 | return NF_DROP; | 1026 | return NF_DROP; |
749 | } | 1027 | } |
750 | } | 1028 | } |
@@ -754,41 +1032,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, | |||
754 | return NF_ACCEPT; | 1032 | return NF_ACCEPT; |
755 | } | 1033 | } |
756 | 1034 | ||
757 | IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet"); | 1035 | return handle_response(af, skb, pp, cp, iph.len); |
758 | |||
759 | if (!skb_make_writable(skb, ihl)) | ||
760 | goto drop; | ||
761 | |||
762 | /* mangle the packet */ | ||
763 | if (pp->snat_handler && !pp->snat_handler(skb, pp, cp)) | ||
764 | goto drop; | ||
765 | ip_hdr(skb)->saddr = cp->vaddr; | ||
766 | ip_send_check(ip_hdr(skb)); | ||
767 | |||
768 | /* For policy routing, packets originating from this | ||
769 | * machine itself may be routed differently to packets | ||
770 | * passing through. We want this packet to be routed as | ||
771 | * if it came from this machine itself. So re-compute | ||
772 | * the routing information. | ||
773 | */ | ||
774 | if (ip_route_me_harder(skb, RTN_LOCAL) != 0) | ||
775 | goto drop; | ||
776 | |||
777 | IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); | ||
778 | |||
779 | ip_vs_out_stats(cp, skb); | ||
780 | ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); | ||
781 | ip_vs_conn_put(cp); | ||
782 | |||
783 | skb->ipvs_property = 1; | ||
784 | |||
785 | LeaveFunction(11); | ||
786 | return NF_ACCEPT; | ||
787 | |||
788 | drop: | ||
789 | ip_vs_conn_put(cp); | ||
790 | kfree_skb(skb); | ||
791 | return NF_STOLEN; | ||
792 | } | 1036 | } |
793 | 1037 | ||
794 | 1038 | ||
@@ -804,9 +1048,11 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
804 | struct iphdr *iph; | 1048 | struct iphdr *iph; |
805 | struct icmphdr _icmph, *ic; | 1049 | struct icmphdr _icmph, *ic; |
806 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ | 1050 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ |
1051 | struct ip_vs_iphdr ciph; | ||
807 | struct ip_vs_conn *cp; | 1052 | struct ip_vs_conn *cp; |
808 | struct ip_vs_protocol *pp; | 1053 | struct ip_vs_protocol *pp; |
809 | unsigned int offset, ihl, verdict; | 1054 | unsigned int offset, ihl, verdict; |
1055 | union nf_inet_addr snet; | ||
810 | 1056 | ||
811 | *related = 1; | 1057 | *related = 1; |
812 | 1058 | ||
@@ -860,10 +1106,20 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
860 | 1106 | ||
861 | offset += cih->ihl * 4; | 1107 | offset += cih->ihl * 4; |
862 | 1108 | ||
1109 | ip_vs_fill_iphdr(AF_INET, cih, &ciph); | ||
863 | /* The embedded headers contain source and dest in reverse order */ | 1110 | /* The embedded headers contain source and dest in reverse order */ |
864 | cp = pp->conn_in_get(skb, pp, cih, offset, 1); | 1111 | cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1); |
865 | if (!cp) | 1112 | if (!cp) { |
1113 | /* The packet could also belong to a local client */ | ||
1114 | cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1); | ||
1115 | if (cp) { | ||
1116 | snet.ip = iph->saddr; | ||
1117 | return handle_response_icmp(AF_INET, skb, &snet, | ||
1118 | cih->protocol, cp, pp, | ||
1119 | offset, ihl); | ||
1120 | } | ||
866 | return NF_ACCEPT; | 1121 | return NF_ACCEPT; |
1122 | } | ||
867 | 1123 | ||
868 | verdict = NF_DROP; | 1124 | verdict = NF_DROP; |
869 | 1125 | ||
@@ -888,6 +1144,105 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
888 | return verdict; | 1144 | return verdict; |
889 | } | 1145 | } |
890 | 1146 | ||
1147 | #ifdef CONFIG_IP_VS_IPV6 | ||
1148 | static int | ||
1149 | ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) | ||
1150 | { | ||
1151 | struct ipv6hdr *iph; | ||
1152 | struct icmp6hdr _icmph, *ic; | ||
1153 | struct ipv6hdr _ciph, *cih; /* The ip header contained | ||
1154 | within the ICMP */ | ||
1155 | struct ip_vs_iphdr ciph; | ||
1156 | struct ip_vs_conn *cp; | ||
1157 | struct ip_vs_protocol *pp; | ||
1158 | unsigned int offset, verdict; | ||
1159 | union nf_inet_addr snet; | ||
1160 | |||
1161 | *related = 1; | ||
1162 | |||
1163 | /* reassemble IP fragments */ | ||
1164 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) { | ||
1165 | if (ip_vs_gather_frags_v6(skb, hooknum == NF_INET_LOCAL_IN ? | ||
1166 | IP_DEFRAG_VS_IN : | ||
1167 | IP_DEFRAG_VS_FWD)) | ||
1168 | return NF_STOLEN; | ||
1169 | } | ||
1170 | |||
1171 | iph = ipv6_hdr(skb); | ||
1172 | offset = sizeof(struct ipv6hdr); | ||
1173 | ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); | ||
1174 | if (ic == NULL) | ||
1175 | return NF_DROP; | ||
1176 | |||
1177 | IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) " NIP6_FMT "->" NIP6_FMT "\n", | ||
1178 | ic->icmp6_type, ntohs(icmpv6_id(ic)), | ||
1179 | NIP6(iph->saddr), NIP6(iph->daddr)); | ||
1180 | |||
1181 | /* | ||
1182 | * Work through seeing if this is for us. | ||
1183 | * These checks are supposed to be in an order that means easy | ||
1184 | * things are checked first to speed up processing.... however | ||
1185 | * this means that some packets will manage to get a long way | ||
1186 | * down this stack and then be rejected, but that's life. | ||
1187 | */ | ||
1188 | if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) && | ||
1189 | (ic->icmp6_type != ICMPV6_PKT_TOOBIG) && | ||
1190 | (ic->icmp6_type != ICMPV6_TIME_EXCEED)) { | ||
1191 | *related = 0; | ||
1192 | return NF_ACCEPT; | ||
1193 | } | ||
1194 | |||
1195 | /* Now find the contained IP header */ | ||
1196 | offset += sizeof(_icmph); | ||
1197 | cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); | ||
1198 | if (cih == NULL) | ||
1199 | return NF_ACCEPT; /* The packet looks wrong, ignore */ | ||
1200 | |||
1201 | pp = ip_vs_proto_get(cih->nexthdr); | ||
1202 | if (!pp) | ||
1203 | return NF_ACCEPT; | ||
1204 | |||
1205 | /* Is the embedded protocol header present? */ | ||
1206 | /* TODO: we don't support fragmentation at the moment anyways */ | ||
1207 | if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag)) | ||
1208 | return NF_ACCEPT; | ||
1209 | |||
1210 | IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMPv6 for"); | ||
1211 | |||
1212 | offset += sizeof(struct ipv6hdr); | ||
1213 | |||
1214 | ip_vs_fill_iphdr(AF_INET6, cih, &ciph); | ||
1215 | /* The embedded headers contain source and dest in reverse order */ | ||
1216 | cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1); | ||
1217 | if (!cp) { | ||
1218 | /* The packet could also belong to a local client */ | ||
1219 | cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1); | ||
1220 | if (cp) { | ||
1221 | ipv6_addr_copy(&snet.in6, &iph->saddr); | ||
1222 | return handle_response_icmp(AF_INET6, skb, &snet, | ||
1223 | cih->nexthdr, | ||
1224 | cp, pp, offset, | ||
1225 | sizeof(struct ipv6hdr)); | ||
1226 | } | ||
1227 | return NF_ACCEPT; | ||
1228 | } | ||
1229 | |||
1230 | verdict = NF_DROP; | ||
1231 | |||
1232 | /* do the statistics and put it back */ | ||
1233 | ip_vs_in_stats(cp, skb); | ||
1234 | if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr) | ||
1235 | offset += 2 * sizeof(__u16); | ||
1236 | verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset); | ||
1237 | /* do not touch skb anymore */ | ||
1238 | |||
1239 | __ip_vs_conn_put(cp); | ||
1240 | |||
1241 | return verdict; | ||
1242 | } | ||
1243 | #endif | ||
1244 | |||
1245 | |||
891 | /* | 1246 | /* |
892 | * Check if it's for virtual services, look it up, | 1247 | * Check if it's for virtual services, look it up, |
893 | * and send it on its way... | 1248 | * and send it on its way... |
@@ -897,50 +1252,54 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
897 | const struct net_device *in, const struct net_device *out, | 1252 | const struct net_device *in, const struct net_device *out, |
898 | int (*okfn)(struct sk_buff *)) | 1253 | int (*okfn)(struct sk_buff *)) |
899 | { | 1254 | { |
900 | struct iphdr *iph; | 1255 | struct ip_vs_iphdr iph; |
901 | struct ip_vs_protocol *pp; | 1256 | struct ip_vs_protocol *pp; |
902 | struct ip_vs_conn *cp; | 1257 | struct ip_vs_conn *cp; |
903 | int ret, restart; | 1258 | int ret, restart, af; |
904 | int ihl; | 1259 | |
1260 | af = (skb->protocol == __constant_htons(ETH_P_IP)) ? AF_INET : AF_INET6; | ||
1261 | |||
1262 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
905 | 1263 | ||
906 | /* | 1264 | /* |
907 | * Big tappo: only PACKET_HOST (neither loopback nor mcasts) | 1265 | * Big tappo: only PACKET_HOST, including loopback for local client |
908 | * ... don't know why 1st test DOES NOT include 2nd (?) | 1266 | * Don't handle local packets on IPv6 for now |
909 | */ | 1267 | */ |
910 | if (unlikely(skb->pkt_type != PACKET_HOST | 1268 | if (unlikely(skb->pkt_type != PACKET_HOST)) { |
911 | || skb->dev->flags & IFF_LOOPBACK || skb->sk)) { | 1269 | IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s ignored\n", |
912 | IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n", | 1270 | skb->pkt_type, |
913 | skb->pkt_type, | 1271 | iph.protocol, |
914 | ip_hdr(skb)->protocol, | 1272 | IP_VS_DBG_ADDR(af, &iph.daddr)); |
915 | NIPQUAD(ip_hdr(skb)->daddr)); | ||
916 | return NF_ACCEPT; | 1273 | return NF_ACCEPT; |
917 | } | 1274 | } |
918 | 1275 | ||
919 | iph = ip_hdr(skb); | 1276 | if (unlikely(iph.protocol == IPPROTO_ICMP)) { |
920 | if (unlikely(iph->protocol == IPPROTO_ICMP)) { | ||
921 | int related, verdict = ip_vs_in_icmp(skb, &related, hooknum); | 1277 | int related, verdict = ip_vs_in_icmp(skb, &related, hooknum); |
922 | 1278 | ||
923 | if (related) | 1279 | if (related) |
924 | return verdict; | 1280 | return verdict; |
925 | iph = ip_hdr(skb); | 1281 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
926 | } | 1282 | } |
927 | 1283 | ||
928 | /* Protocol supported? */ | 1284 | /* Protocol supported? */ |
929 | pp = ip_vs_proto_get(iph->protocol); | 1285 | pp = ip_vs_proto_get(iph.protocol); |
930 | if (unlikely(!pp)) | 1286 | if (unlikely(!pp)) |
931 | return NF_ACCEPT; | 1287 | return NF_ACCEPT; |
932 | 1288 | ||
933 | ihl = iph->ihl << 2; | ||
934 | |||
935 | /* | 1289 | /* |
936 | * Check if the packet belongs to an existing connection entry | 1290 | * Check if the packet belongs to an existing connection entry |
937 | */ | 1291 | */ |
938 | cp = pp->conn_in_get(skb, pp, iph, ihl, 0); | 1292 | cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0); |
939 | 1293 | ||
940 | if (unlikely(!cp)) { | 1294 | if (unlikely(!cp)) { |
941 | int v; | 1295 | int v; |
942 | 1296 | ||
943 | if (!pp->conn_schedule(skb, pp, &v, &cp)) | 1297 | /* For local client packets, it could be a response */ |
1298 | cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0); | ||
1299 | if (cp) | ||
1300 | return handle_response(af, skb, pp, cp, iph.len); | ||
1301 | |||
1302 | if (!pp->conn_schedule(af, skb, pp, &v, &cp)) | ||
944 | return v; | 1303 | return v; |
945 | } | 1304 | } |
946 | 1305 | ||
@@ -984,7 +1343,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
984 | * encorage the standby servers to update the connections timeout | 1343 | * encorage the standby servers to update the connections timeout |
985 | */ | 1344 | */ |
986 | atomic_inc(&cp->in_pkts); | 1345 | atomic_inc(&cp->in_pkts); |
987 | if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && | 1346 | if (af == AF_INET && |
1347 | (ip_vs_sync_state & IP_VS_STATE_MASTER) && | ||
988 | (((cp->protocol != IPPROTO_TCP || | 1348 | (((cp->protocol != IPPROTO_TCP || |
989 | cp->state == IP_VS_TCP_S_ESTABLISHED) && | 1349 | cp->state == IP_VS_TCP_S_ESTABLISHED) && |
990 | (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] | 1350 | (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] |
@@ -1023,6 +1383,21 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, | |||
1023 | return ip_vs_in_icmp(skb, &r, hooknum); | 1383 | return ip_vs_in_icmp(skb, &r, hooknum); |
1024 | } | 1384 | } |
1025 | 1385 | ||
1386 | #ifdef CONFIG_IP_VS_IPV6 | ||
1387 | static unsigned int | ||
1388 | ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | ||
1389 | const struct net_device *in, const struct net_device *out, | ||
1390 | int (*okfn)(struct sk_buff *)) | ||
1391 | { | ||
1392 | int r; | ||
1393 | |||
1394 | if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) | ||
1395 | return NF_ACCEPT; | ||
1396 | |||
1397 | return ip_vs_in_icmp_v6(skb, &r, hooknum); | ||
1398 | } | ||
1399 | #endif | ||
1400 | |||
1026 | 1401 | ||
1027 | static struct nf_hook_ops ip_vs_ops[] __read_mostly = { | 1402 | static struct nf_hook_ops ip_vs_ops[] __read_mostly = { |
1028 | /* After packet filtering, forward packet through VS/DR, VS/TUN, | 1403 | /* After packet filtering, forward packet through VS/DR, VS/TUN, |
@@ -1060,6 +1435,43 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = { | |||
1060 | .hooknum = NF_INET_POST_ROUTING, | 1435 | .hooknum = NF_INET_POST_ROUTING, |
1061 | .priority = NF_IP_PRI_NAT_SRC-1, | 1436 | .priority = NF_IP_PRI_NAT_SRC-1, |
1062 | }, | 1437 | }, |
1438 | #ifdef CONFIG_IP_VS_IPV6 | ||
1439 | /* After packet filtering, forward packet through VS/DR, VS/TUN, | ||
1440 | * or VS/NAT(change destination), so that filtering rules can be | ||
1441 | * applied to IPVS. */ | ||
1442 | { | ||
1443 | .hook = ip_vs_in, | ||
1444 | .owner = THIS_MODULE, | ||
1445 | .pf = PF_INET6, | ||
1446 | .hooknum = NF_INET_LOCAL_IN, | ||
1447 | .priority = 100, | ||
1448 | }, | ||
1449 | /* After packet filtering, change source only for VS/NAT */ | ||
1450 | { | ||
1451 | .hook = ip_vs_out, | ||
1452 | .owner = THIS_MODULE, | ||
1453 | .pf = PF_INET6, | ||
1454 | .hooknum = NF_INET_FORWARD, | ||
1455 | .priority = 100, | ||
1456 | }, | ||
1457 | /* After packet filtering (but before ip_vs_out_icmp), catch icmp | ||
1458 | * destined for 0.0.0.0/0, which is for incoming IPVS connections */ | ||
1459 | { | ||
1460 | .hook = ip_vs_forward_icmp_v6, | ||
1461 | .owner = THIS_MODULE, | ||
1462 | .pf = PF_INET6, | ||
1463 | .hooknum = NF_INET_FORWARD, | ||
1464 | .priority = 99, | ||
1465 | }, | ||
1466 | /* Before the netfilter connection tracking, exit from POST_ROUTING */ | ||
1467 | { | ||
1468 | .hook = ip_vs_post_routing, | ||
1469 | .owner = THIS_MODULE, | ||
1470 | .pf = PF_INET6, | ||
1471 | .hooknum = NF_INET_POST_ROUTING, | ||
1472 | .priority = NF_IP6_PRI_NAT_SRC-1, | ||
1473 | }, | ||
1474 | #endif | ||
1063 | }; | 1475 | }; |
1064 | 1476 | ||
1065 | 1477 | ||
@@ -1070,10 +1482,12 @@ static int __init ip_vs_init(void) | |||
1070 | { | 1482 | { |
1071 | int ret; | 1483 | int ret; |
1072 | 1484 | ||
1485 | ip_vs_estimator_init(); | ||
1486 | |||
1073 | ret = ip_vs_control_init(); | 1487 | ret = ip_vs_control_init(); |
1074 | if (ret < 0) { | 1488 | if (ret < 0) { |
1075 | IP_VS_ERR("can't setup control.\n"); | 1489 | IP_VS_ERR("can't setup control.\n"); |
1076 | goto cleanup_nothing; | 1490 | goto cleanup_estimator; |
1077 | } | 1491 | } |
1078 | 1492 | ||
1079 | ip_vs_protocol_init(); | 1493 | ip_vs_protocol_init(); |
@@ -1106,7 +1520,8 @@ static int __init ip_vs_init(void) | |||
1106 | cleanup_protocol: | 1520 | cleanup_protocol: |
1107 | ip_vs_protocol_cleanup(); | 1521 | ip_vs_protocol_cleanup(); |
1108 | ip_vs_control_cleanup(); | 1522 | ip_vs_control_cleanup(); |
1109 | cleanup_nothing: | 1523 | cleanup_estimator: |
1524 | ip_vs_estimator_cleanup(); | ||
1110 | return ret; | 1525 | return ret; |
1111 | } | 1526 | } |
1112 | 1527 | ||
@@ -1117,6 +1532,7 @@ static void __exit ip_vs_cleanup(void) | |||
1117 | ip_vs_app_cleanup(); | 1532 | ip_vs_app_cleanup(); |
1118 | ip_vs_protocol_cleanup(); | 1533 | ip_vs_protocol_cleanup(); |
1119 | ip_vs_control_cleanup(); | 1534 | ip_vs_control_cleanup(); |
1535 | ip_vs_estimator_cleanup(); | ||
1120 | IP_VS_INFO("ipvs unloaded.\n"); | 1536 | IP_VS_INFO("ipvs unloaded.\n"); |
1121 | } | 1537 | } |
1122 | 1538 | ||
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 6379705a8dcb..993a83fb0d56 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
@@ -35,8 +35,13 @@ | |||
35 | 35 | ||
36 | #include <net/net_namespace.h> | 36 | #include <net/net_namespace.h> |
37 | #include <net/ip.h> | 37 | #include <net/ip.h> |
38 | #ifdef CONFIG_IP_VS_IPV6 | ||
39 | #include <net/ipv6.h> | ||
40 | #include <net/ip6_route.h> | ||
41 | #endif | ||
38 | #include <net/route.h> | 42 | #include <net/route.h> |
39 | #include <net/sock.h> | 43 | #include <net/sock.h> |
44 | #include <net/genetlink.h> | ||
40 | 45 | ||
41 | #include <asm/uaccess.h> | 46 | #include <asm/uaccess.h> |
42 | 47 | ||
@@ -90,6 +95,26 @@ int ip_vs_get_debug_level(void) | |||
90 | } | 95 | } |
91 | #endif | 96 | #endif |
92 | 97 | ||
98 | #ifdef CONFIG_IP_VS_IPV6 | ||
99 | /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ | ||
100 | static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr) | ||
101 | { | ||
102 | struct rt6_info *rt; | ||
103 | struct flowi fl = { | ||
104 | .oif = 0, | ||
105 | .nl_u = { | ||
106 | .ip6_u = { | ||
107 | .daddr = *addr, | ||
108 | .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } }, | ||
109 | }; | ||
110 | |||
111 | rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); | ||
112 | if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK)) | ||
113 | return 1; | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | #endif | ||
93 | /* | 118 | /* |
94 | * update_defense_level is called from keventd and from sysctl, | 119 | * update_defense_level is called from keventd and from sysctl, |
95 | * so it needs to protect itself from softirqs | 120 | * so it needs to protect itself from softirqs |
@@ -281,11 +306,19 @@ static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0); | |||
281 | * Returns hash value for virtual service | 306 | * Returns hash value for virtual service |
282 | */ | 307 | */ |
283 | static __inline__ unsigned | 308 | static __inline__ unsigned |
284 | ip_vs_svc_hashkey(unsigned proto, __be32 addr, __be16 port) | 309 | ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr, |
310 | __be16 port) | ||
285 | { | 311 | { |
286 | register unsigned porth = ntohs(port); | 312 | register unsigned porth = ntohs(port); |
313 | __be32 addr_fold = addr->ip; | ||
314 | |||
315 | #ifdef CONFIG_IP_VS_IPV6 | ||
316 | if (af == AF_INET6) | ||
317 | addr_fold = addr->ip6[0]^addr->ip6[1]^ | ||
318 | addr->ip6[2]^addr->ip6[3]; | ||
319 | #endif | ||
287 | 320 | ||
288 | return (proto^ntohl(addr)^(porth>>IP_VS_SVC_TAB_BITS)^porth) | 321 | return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth) |
289 | & IP_VS_SVC_TAB_MASK; | 322 | & IP_VS_SVC_TAB_MASK; |
290 | } | 323 | } |
291 | 324 | ||
@@ -316,7 +349,8 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc) | |||
316 | /* | 349 | /* |
317 | * Hash it by <protocol,addr,port> in ip_vs_svc_table | 350 | * Hash it by <protocol,addr,port> in ip_vs_svc_table |
318 | */ | 351 | */ |
319 | hash = ip_vs_svc_hashkey(svc->protocol, svc->addr, svc->port); | 352 | hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr, |
353 | svc->port); | ||
320 | list_add(&svc->s_list, &ip_vs_svc_table[hash]); | 354 | list_add(&svc->s_list, &ip_vs_svc_table[hash]); |
321 | } else { | 355 | } else { |
322 | /* | 356 | /* |
@@ -362,17 +396,19 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc) | |||
362 | /* | 396 | /* |
363 | * Get service by {proto,addr,port} in the service table. | 397 | * Get service by {proto,addr,port} in the service table. |
364 | */ | 398 | */ |
365 | static __inline__ struct ip_vs_service * | 399 | static inline struct ip_vs_service * |
366 | __ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport) | 400 | __ip_vs_service_get(int af, __u16 protocol, const union nf_inet_addr *vaddr, |
401 | __be16 vport) | ||
367 | { | 402 | { |
368 | unsigned hash; | 403 | unsigned hash; |
369 | struct ip_vs_service *svc; | 404 | struct ip_vs_service *svc; |
370 | 405 | ||
371 | /* Check for "full" addressed entries */ | 406 | /* Check for "full" addressed entries */ |
372 | hash = ip_vs_svc_hashkey(protocol, vaddr, vport); | 407 | hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport); |
373 | 408 | ||
374 | list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){ | 409 | list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){ |
375 | if ((svc->addr == vaddr) | 410 | if ((svc->af == af) |
411 | && ip_vs_addr_equal(af, &svc->addr, vaddr) | ||
376 | && (svc->port == vport) | 412 | && (svc->port == vport) |
377 | && (svc->protocol == protocol)) { | 413 | && (svc->protocol == protocol)) { |
378 | /* HIT */ | 414 | /* HIT */ |
@@ -388,7 +424,8 @@ __ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport) | |||
388 | /* | 424 | /* |
389 | * Get service by {fwmark} in the service table. | 425 | * Get service by {fwmark} in the service table. |
390 | */ | 426 | */ |
391 | static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark) | 427 | static inline struct ip_vs_service * |
428 | __ip_vs_svc_fwm_get(int af, __u32 fwmark) | ||
392 | { | 429 | { |
393 | unsigned hash; | 430 | unsigned hash; |
394 | struct ip_vs_service *svc; | 431 | struct ip_vs_service *svc; |
@@ -397,7 +434,7 @@ static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark) | |||
397 | hash = ip_vs_svc_fwm_hashkey(fwmark); | 434 | hash = ip_vs_svc_fwm_hashkey(fwmark); |
398 | 435 | ||
399 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) { | 436 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) { |
400 | if (svc->fwmark == fwmark) { | 437 | if (svc->fwmark == fwmark && svc->af == af) { |
401 | /* HIT */ | 438 | /* HIT */ |
402 | atomic_inc(&svc->usecnt); | 439 | atomic_inc(&svc->usecnt); |
403 | return svc; | 440 | return svc; |
@@ -408,7 +445,8 @@ static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark) | |||
408 | } | 445 | } |
409 | 446 | ||
410 | struct ip_vs_service * | 447 | struct ip_vs_service * |
411 | ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) | 448 | ip_vs_service_get(int af, __u32 fwmark, __u16 protocol, |
449 | const union nf_inet_addr *vaddr, __be16 vport) | ||
412 | { | 450 | { |
413 | struct ip_vs_service *svc; | 451 | struct ip_vs_service *svc; |
414 | 452 | ||
@@ -417,14 +455,14 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) | |||
417 | /* | 455 | /* |
418 | * Check the table hashed by fwmark first | 456 | * Check the table hashed by fwmark first |
419 | */ | 457 | */ |
420 | if (fwmark && (svc = __ip_vs_svc_fwm_get(fwmark))) | 458 | if (fwmark && (svc = __ip_vs_svc_fwm_get(af, fwmark))) |
421 | goto out; | 459 | goto out; |
422 | 460 | ||
423 | /* | 461 | /* |
424 | * Check the table hashed by <protocol,addr,port> | 462 | * Check the table hashed by <protocol,addr,port> |
425 | * for "full" addressed entries | 463 | * for "full" addressed entries |
426 | */ | 464 | */ |
427 | svc = __ip_vs_service_get(protocol, vaddr, vport); | 465 | svc = __ip_vs_service_get(af, protocol, vaddr, vport); |
428 | 466 | ||
429 | if (svc == NULL | 467 | if (svc == NULL |
430 | && protocol == IPPROTO_TCP | 468 | && protocol == IPPROTO_TCP |
@@ -434,7 +472,7 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) | |||
434 | * Check if ftp service entry exists, the packet | 472 | * Check if ftp service entry exists, the packet |
435 | * might belong to FTP data connections. | 473 | * might belong to FTP data connections. |
436 | */ | 474 | */ |
437 | svc = __ip_vs_service_get(protocol, vaddr, FTPPORT); | 475 | svc = __ip_vs_service_get(af, protocol, vaddr, FTPPORT); |
438 | } | 476 | } |
439 | 477 | ||
440 | if (svc == NULL | 478 | if (svc == NULL |
@@ -442,16 +480,16 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) | |||
442 | /* | 480 | /* |
443 | * Check if the catch-all port (port zero) exists | 481 | * Check if the catch-all port (port zero) exists |
444 | */ | 482 | */ |
445 | svc = __ip_vs_service_get(protocol, vaddr, 0); | 483 | svc = __ip_vs_service_get(af, protocol, vaddr, 0); |
446 | } | 484 | } |
447 | 485 | ||
448 | out: | 486 | out: |
449 | read_unlock(&__ip_vs_svc_lock); | 487 | read_unlock(&__ip_vs_svc_lock); |
450 | 488 | ||
451 | IP_VS_DBG(9, "lookup service: fwm %u %s %u.%u.%u.%u:%u %s\n", | 489 | IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n", |
452 | fwmark, ip_vs_proto_name(protocol), | 490 | fwmark, ip_vs_proto_name(protocol), |
453 | NIPQUAD(vaddr), ntohs(vport), | 491 | IP_VS_DBG_ADDR(af, vaddr), ntohs(vport), |
454 | svc?"hit":"not hit"); | 492 | svc ? "hit" : "not hit"); |
455 | 493 | ||
456 | return svc; | 494 | return svc; |
457 | } | 495 | } |
@@ -478,11 +516,20 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest) | |||
478 | /* | 516 | /* |
479 | * Returns hash value for real service | 517 | * Returns hash value for real service |
480 | */ | 518 | */ |
481 | static __inline__ unsigned ip_vs_rs_hashkey(__be32 addr, __be16 port) | 519 | static inline unsigned ip_vs_rs_hashkey(int af, |
520 | const union nf_inet_addr *addr, | ||
521 | __be16 port) | ||
482 | { | 522 | { |
483 | register unsigned porth = ntohs(port); | 523 | register unsigned porth = ntohs(port); |
524 | __be32 addr_fold = addr->ip; | ||
525 | |||
526 | #ifdef CONFIG_IP_VS_IPV6 | ||
527 | if (af == AF_INET6) | ||
528 | addr_fold = addr->ip6[0]^addr->ip6[1]^ | ||
529 | addr->ip6[2]^addr->ip6[3]; | ||
530 | #endif | ||
484 | 531 | ||
485 | return (ntohl(addr)^(porth>>IP_VS_RTAB_BITS)^porth) | 532 | return (ntohl(addr_fold)^(porth>>IP_VS_RTAB_BITS)^porth) |
486 | & IP_VS_RTAB_MASK; | 533 | & IP_VS_RTAB_MASK; |
487 | } | 534 | } |
488 | 535 | ||
@@ -502,7 +549,8 @@ static int ip_vs_rs_hash(struct ip_vs_dest *dest) | |||
502 | * Hash by proto,addr,port, | 549 | * Hash by proto,addr,port, |
503 | * which are the parameters of the real service. | 550 | * which are the parameters of the real service. |
504 | */ | 551 | */ |
505 | hash = ip_vs_rs_hashkey(dest->addr, dest->port); | 552 | hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port); |
553 | |||
506 | list_add(&dest->d_list, &ip_vs_rtable[hash]); | 554 | list_add(&dest->d_list, &ip_vs_rtable[hash]); |
507 | 555 | ||
508 | return 1; | 556 | return 1; |
@@ -529,7 +577,9 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest) | |||
529 | * Lookup real service by <proto,addr,port> in the real service table. | 577 | * Lookup real service by <proto,addr,port> in the real service table. |
530 | */ | 578 | */ |
531 | struct ip_vs_dest * | 579 | struct ip_vs_dest * |
532 | ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport) | 580 | ip_vs_lookup_real_service(int af, __u16 protocol, |
581 | const union nf_inet_addr *daddr, | ||
582 | __be16 dport) | ||
533 | { | 583 | { |
534 | unsigned hash; | 584 | unsigned hash; |
535 | struct ip_vs_dest *dest; | 585 | struct ip_vs_dest *dest; |
@@ -538,11 +588,12 @@ ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport) | |||
538 | * Check for "full" addressed entries | 588 | * Check for "full" addressed entries |
539 | * Return the first found entry | 589 | * Return the first found entry |
540 | */ | 590 | */ |
541 | hash = ip_vs_rs_hashkey(daddr, dport); | 591 | hash = ip_vs_rs_hashkey(af, daddr, dport); |
542 | 592 | ||
543 | read_lock(&__ip_vs_rs_lock); | 593 | read_lock(&__ip_vs_rs_lock); |
544 | list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) { | 594 | list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) { |
545 | if ((dest->addr == daddr) | 595 | if ((dest->af == af) |
596 | && ip_vs_addr_equal(af, &dest->addr, daddr) | ||
546 | && (dest->port == dport) | 597 | && (dest->port == dport) |
547 | && ((dest->protocol == protocol) || | 598 | && ((dest->protocol == protocol) || |
548 | dest->vfwmark)) { | 599 | dest->vfwmark)) { |
@@ -560,7 +611,8 @@ ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport) | |||
560 | * Lookup destination by {addr,port} in the given service | 611 | * Lookup destination by {addr,port} in the given service |
561 | */ | 612 | */ |
562 | static struct ip_vs_dest * | 613 | static struct ip_vs_dest * |
563 | ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | 614 | ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, |
615 | __be16 dport) | ||
564 | { | 616 | { |
565 | struct ip_vs_dest *dest; | 617 | struct ip_vs_dest *dest; |
566 | 618 | ||
@@ -568,7 +620,9 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | |||
568 | * Find the destination for the given service | 620 | * Find the destination for the given service |
569 | */ | 621 | */ |
570 | list_for_each_entry(dest, &svc->destinations, n_list) { | 622 | list_for_each_entry(dest, &svc->destinations, n_list) { |
571 | if ((dest->addr == daddr) && (dest->port == dport)) { | 623 | if ((dest->af == svc->af) |
624 | && ip_vs_addr_equal(svc->af, &dest->addr, daddr) | ||
625 | && (dest->port == dport)) { | ||
572 | /* HIT */ | 626 | /* HIT */ |
573 | return dest; | 627 | return dest; |
574 | } | 628 | } |
@@ -587,13 +641,15 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | |||
587 | * ip_vs_lookup_real_service() looked promissing, but | 641 | * ip_vs_lookup_real_service() looked promissing, but |
588 | * seems not working as expected. | 642 | * seems not working as expected. |
589 | */ | 643 | */ |
590 | struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport, | 644 | struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr, |
591 | __be32 vaddr, __be16 vport, __u16 protocol) | 645 | __be16 dport, |
646 | const union nf_inet_addr *vaddr, | ||
647 | __be16 vport, __u16 protocol) | ||
592 | { | 648 | { |
593 | struct ip_vs_dest *dest; | 649 | struct ip_vs_dest *dest; |
594 | struct ip_vs_service *svc; | 650 | struct ip_vs_service *svc; |
595 | 651 | ||
596 | svc = ip_vs_service_get(0, protocol, vaddr, vport); | 652 | svc = ip_vs_service_get(af, 0, protocol, vaddr, vport); |
597 | if (!svc) | 653 | if (!svc) |
598 | return NULL; | 654 | return NULL; |
599 | dest = ip_vs_lookup_dest(svc, daddr, dport); | 655 | dest = ip_vs_lookup_dest(svc, daddr, dport); |
@@ -614,7 +670,8 @@ struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport, | |||
614 | * scheduling. | 670 | * scheduling. |
615 | */ | 671 | */ |
616 | static struct ip_vs_dest * | 672 | static struct ip_vs_dest * |
617 | ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | 673 | ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, |
674 | __be16 dport) | ||
618 | { | 675 | { |
619 | struct ip_vs_dest *dest, *nxt; | 676 | struct ip_vs_dest *dest, *nxt; |
620 | 677 | ||
@@ -622,17 +679,19 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | |||
622 | * Find the destination in trash | 679 | * Find the destination in trash |
623 | */ | 680 | */ |
624 | list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { | 681 | list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { |
625 | IP_VS_DBG(3, "Destination %u/%u.%u.%u.%u:%u still in trash, " | 682 | IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, " |
626 | "dest->refcnt=%d\n", | 683 | "dest->refcnt=%d\n", |
627 | dest->vfwmark, | 684 | dest->vfwmark, |
628 | NIPQUAD(dest->addr), ntohs(dest->port), | 685 | IP_VS_DBG_ADDR(svc->af, &dest->addr), |
629 | atomic_read(&dest->refcnt)); | 686 | ntohs(dest->port), |
630 | if (dest->addr == daddr && | 687 | atomic_read(&dest->refcnt)); |
688 | if (dest->af == svc->af && | ||
689 | ip_vs_addr_equal(svc->af, &dest->addr, daddr) && | ||
631 | dest->port == dport && | 690 | dest->port == dport && |
632 | dest->vfwmark == svc->fwmark && | 691 | dest->vfwmark == svc->fwmark && |
633 | dest->protocol == svc->protocol && | 692 | dest->protocol == svc->protocol && |
634 | (svc->fwmark || | 693 | (svc->fwmark || |
635 | (dest->vaddr == svc->addr && | 694 | (ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) && |
636 | dest->vport == svc->port))) { | 695 | dest->vport == svc->port))) { |
637 | /* HIT */ | 696 | /* HIT */ |
638 | return dest; | 697 | return dest; |
@@ -642,10 +701,11 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | |||
642 | * Try to purge the destination from trash if not referenced | 701 | * Try to purge the destination from trash if not referenced |
643 | */ | 702 | */ |
644 | if (atomic_read(&dest->refcnt) == 1) { | 703 | if (atomic_read(&dest->refcnt) == 1) { |
645 | IP_VS_DBG(3, "Removing destination %u/%u.%u.%u.%u:%u " | 704 | IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u " |
646 | "from trash\n", | 705 | "from trash\n", |
647 | dest->vfwmark, | 706 | dest->vfwmark, |
648 | NIPQUAD(dest->addr), ntohs(dest->port)); | 707 | IP_VS_DBG_ADDR(svc->af, &dest->addr), |
708 | ntohs(dest->port)); | ||
649 | list_del(&dest->n_list); | 709 | list_del(&dest->n_list); |
650 | ip_vs_dst_reset(dest); | 710 | ip_vs_dst_reset(dest); |
651 | __ip_vs_unbind_svc(dest); | 711 | __ip_vs_unbind_svc(dest); |
@@ -684,18 +744,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats) | |||
684 | { | 744 | { |
685 | spin_lock_bh(&stats->lock); | 745 | spin_lock_bh(&stats->lock); |
686 | 746 | ||
687 | stats->conns = 0; | 747 | memset(&stats->ustats, 0, sizeof(stats->ustats)); |
688 | stats->inpkts = 0; | ||
689 | stats->outpkts = 0; | ||
690 | stats->inbytes = 0; | ||
691 | stats->outbytes = 0; | ||
692 | |||
693 | stats->cps = 0; | ||
694 | stats->inpps = 0; | ||
695 | stats->outpps = 0; | ||
696 | stats->inbps = 0; | ||
697 | stats->outbps = 0; | ||
698 | |||
699 | ip_vs_zero_estimator(stats); | 748 | ip_vs_zero_estimator(stats); |
700 | 749 | ||
701 | spin_unlock_bh(&stats->lock); | 750 | spin_unlock_bh(&stats->lock); |
@@ -706,7 +755,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats) | |||
706 | */ | 755 | */ |
707 | static void | 756 | static void |
708 | __ip_vs_update_dest(struct ip_vs_service *svc, | 757 | __ip_vs_update_dest(struct ip_vs_service *svc, |
709 | struct ip_vs_dest *dest, struct ip_vs_dest_user *udest) | 758 | struct ip_vs_dest *dest, struct ip_vs_dest_user_kern *udest) |
710 | { | 759 | { |
711 | int conn_flags; | 760 | int conn_flags; |
712 | 761 | ||
@@ -715,10 +764,18 @@ __ip_vs_update_dest(struct ip_vs_service *svc, | |||
715 | conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE; | 764 | conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE; |
716 | 765 | ||
717 | /* check if local node and update the flags */ | 766 | /* check if local node and update the flags */ |
718 | if (inet_addr_type(&init_net, udest->addr) == RTN_LOCAL) { | 767 | #ifdef CONFIG_IP_VS_IPV6 |
719 | conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) | 768 | if (svc->af == AF_INET6) { |
720 | | IP_VS_CONN_F_LOCALNODE; | 769 | if (__ip_vs_addr_is_local_v6(&udest->addr.in6)) { |
721 | } | 770 | conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) |
771 | | IP_VS_CONN_F_LOCALNODE; | ||
772 | } | ||
773 | } else | ||
774 | #endif | ||
775 | if (inet_addr_type(&init_net, udest->addr.ip) == RTN_LOCAL) { | ||
776 | conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) | ||
777 | | IP_VS_CONN_F_LOCALNODE; | ||
778 | } | ||
722 | 779 | ||
723 | /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */ | 780 | /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */ |
724 | if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) { | 781 | if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) { |
@@ -759,7 +816,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, | |||
759 | * Create a destination for the given service | 816 | * Create a destination for the given service |
760 | */ | 817 | */ |
761 | static int | 818 | static int |
762 | ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, | 819 | ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, |
763 | struct ip_vs_dest **dest_p) | 820 | struct ip_vs_dest **dest_p) |
764 | { | 821 | { |
765 | struct ip_vs_dest *dest; | 822 | struct ip_vs_dest *dest; |
@@ -767,9 +824,20 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, | |||
767 | 824 | ||
768 | EnterFunction(2); | 825 | EnterFunction(2); |
769 | 826 | ||
770 | atype = inet_addr_type(&init_net, udest->addr); | 827 | #ifdef CONFIG_IP_VS_IPV6 |
771 | if (atype != RTN_LOCAL && atype != RTN_UNICAST) | 828 | if (svc->af == AF_INET6) { |
772 | return -EINVAL; | 829 | atype = ipv6_addr_type(&udest->addr.in6); |
830 | if ((!(atype & IPV6_ADDR_UNICAST) || | ||
831 | atype & IPV6_ADDR_LINKLOCAL) && | ||
832 | !__ip_vs_addr_is_local_v6(&udest->addr.in6)) | ||
833 | return -EINVAL; | ||
834 | } else | ||
835 | #endif | ||
836 | { | ||
837 | atype = inet_addr_type(&init_net, udest->addr.ip); | ||
838 | if (atype != RTN_LOCAL && atype != RTN_UNICAST) | ||
839 | return -EINVAL; | ||
840 | } | ||
773 | 841 | ||
774 | dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); | 842 | dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); |
775 | if (dest == NULL) { | 843 | if (dest == NULL) { |
@@ -777,11 +845,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, | |||
777 | return -ENOMEM; | 845 | return -ENOMEM; |
778 | } | 846 | } |
779 | 847 | ||
848 | dest->af = svc->af; | ||
780 | dest->protocol = svc->protocol; | 849 | dest->protocol = svc->protocol; |
781 | dest->vaddr = svc->addr; | 850 | dest->vaddr = svc->addr; |
782 | dest->vport = svc->port; | 851 | dest->vport = svc->port; |
783 | dest->vfwmark = svc->fwmark; | 852 | dest->vfwmark = svc->fwmark; |
784 | dest->addr = udest->addr; | 853 | ip_vs_addr_copy(svc->af, &dest->addr, &udest->addr); |
785 | dest->port = udest->port; | 854 | dest->port = udest->port; |
786 | 855 | ||
787 | atomic_set(&dest->activeconns, 0); | 856 | atomic_set(&dest->activeconns, 0); |
@@ -806,10 +875,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, | |||
806 | * Add a destination into an existing service | 875 | * Add a destination into an existing service |
807 | */ | 876 | */ |
808 | static int | 877 | static int |
809 | ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | 878 | ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) |
810 | { | 879 | { |
811 | struct ip_vs_dest *dest; | 880 | struct ip_vs_dest *dest; |
812 | __be32 daddr = udest->addr; | 881 | union nf_inet_addr daddr; |
813 | __be16 dport = udest->port; | 882 | __be16 dport = udest->port; |
814 | int ret; | 883 | int ret; |
815 | 884 | ||
@@ -826,10 +895,13 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
826 | return -ERANGE; | 895 | return -ERANGE; |
827 | } | 896 | } |
828 | 897 | ||
898 | ip_vs_addr_copy(svc->af, &daddr, &udest->addr); | ||
899 | |||
829 | /* | 900 | /* |
830 | * Check if the dest already exists in the list | 901 | * Check if the dest already exists in the list |
831 | */ | 902 | */ |
832 | dest = ip_vs_lookup_dest(svc, daddr, dport); | 903 | dest = ip_vs_lookup_dest(svc, &daddr, dport); |
904 | |||
833 | if (dest != NULL) { | 905 | if (dest != NULL) { |
834 | IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n"); | 906 | IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n"); |
835 | return -EEXIST; | 907 | return -EEXIST; |
@@ -839,15 +911,17 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
839 | * Check if the dest already exists in the trash and | 911 | * Check if the dest already exists in the trash and |
840 | * is from the same service | 912 | * is from the same service |
841 | */ | 913 | */ |
842 | dest = ip_vs_trash_get_dest(svc, daddr, dport); | 914 | dest = ip_vs_trash_get_dest(svc, &daddr, dport); |
915 | |||
843 | if (dest != NULL) { | 916 | if (dest != NULL) { |
844 | IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, " | 917 | IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, " |
845 | "dest->refcnt=%d, service %u/%u.%u.%u.%u:%u\n", | 918 | "dest->refcnt=%d, service %u/%s:%u\n", |
846 | NIPQUAD(daddr), ntohs(dport), | 919 | IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport), |
847 | atomic_read(&dest->refcnt), | 920 | atomic_read(&dest->refcnt), |
848 | dest->vfwmark, | 921 | dest->vfwmark, |
849 | NIPQUAD(dest->vaddr), | 922 | IP_VS_DBG_ADDR(svc->af, &dest->vaddr), |
850 | ntohs(dest->vport)); | 923 | ntohs(dest->vport)); |
924 | |||
851 | __ip_vs_update_dest(svc, dest, udest); | 925 | __ip_vs_update_dest(svc, dest, udest); |
852 | 926 | ||
853 | /* | 927 | /* |
@@ -868,7 +942,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
868 | svc->num_dests++; | 942 | svc->num_dests++; |
869 | 943 | ||
870 | /* call the update_service function of its scheduler */ | 944 | /* call the update_service function of its scheduler */ |
871 | svc->scheduler->update_service(svc); | 945 | if (svc->scheduler->update_service) |
946 | svc->scheduler->update_service(svc); | ||
872 | 947 | ||
873 | write_unlock_bh(&__ip_vs_svc_lock); | 948 | write_unlock_bh(&__ip_vs_svc_lock); |
874 | return 0; | 949 | return 0; |
@@ -898,7 +973,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
898 | svc->num_dests++; | 973 | svc->num_dests++; |
899 | 974 | ||
900 | /* call the update_service function of its scheduler */ | 975 | /* call the update_service function of its scheduler */ |
901 | svc->scheduler->update_service(svc); | 976 | if (svc->scheduler->update_service) |
977 | svc->scheduler->update_service(svc); | ||
902 | 978 | ||
903 | write_unlock_bh(&__ip_vs_svc_lock); | 979 | write_unlock_bh(&__ip_vs_svc_lock); |
904 | 980 | ||
@@ -912,10 +988,10 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
912 | * Edit a destination in the given service | 988 | * Edit a destination in the given service |
913 | */ | 989 | */ |
914 | static int | 990 | static int |
915 | ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | 991 | ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) |
916 | { | 992 | { |
917 | struct ip_vs_dest *dest; | 993 | struct ip_vs_dest *dest; |
918 | __be32 daddr = udest->addr; | 994 | union nf_inet_addr daddr; |
919 | __be16 dport = udest->port; | 995 | __be16 dport = udest->port; |
920 | 996 | ||
921 | EnterFunction(2); | 997 | EnterFunction(2); |
@@ -931,10 +1007,13 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
931 | return -ERANGE; | 1007 | return -ERANGE; |
932 | } | 1008 | } |
933 | 1009 | ||
1010 | ip_vs_addr_copy(svc->af, &daddr, &udest->addr); | ||
1011 | |||
934 | /* | 1012 | /* |
935 | * Lookup the destination list | 1013 | * Lookup the destination list |
936 | */ | 1014 | */ |
937 | dest = ip_vs_lookup_dest(svc, daddr, dport); | 1015 | dest = ip_vs_lookup_dest(svc, &daddr, dport); |
1016 | |||
938 | if (dest == NULL) { | 1017 | if (dest == NULL) { |
939 | IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n"); | 1018 | IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n"); |
940 | return -ENOENT; | 1019 | return -ENOENT; |
@@ -948,7 +1027,8 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
948 | IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); | 1027 | IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); |
949 | 1028 | ||
950 | /* call the update_service, because server weight may be changed */ | 1029 | /* call the update_service, because server weight may be changed */ |
951 | svc->scheduler->update_service(svc); | 1030 | if (svc->scheduler->update_service) |
1031 | svc->scheduler->update_service(svc); | ||
952 | 1032 | ||
953 | write_unlock_bh(&__ip_vs_svc_lock); | 1033 | write_unlock_bh(&__ip_vs_svc_lock); |
954 | 1034 | ||
@@ -987,10 +1067,11 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest) | |||
987 | atomic_dec(&dest->svc->refcnt); | 1067 | atomic_dec(&dest->svc->refcnt); |
988 | kfree(dest); | 1068 | kfree(dest); |
989 | } else { | 1069 | } else { |
990 | IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, " | 1070 | IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, " |
991 | "dest->refcnt=%d\n", | 1071 | "dest->refcnt=%d\n", |
992 | NIPQUAD(dest->addr), ntohs(dest->port), | 1072 | IP_VS_DBG_ADDR(dest->af, &dest->addr), |
993 | atomic_read(&dest->refcnt)); | 1073 | ntohs(dest->port), |
1074 | atomic_read(&dest->refcnt)); | ||
994 | list_add(&dest->n_list, &ip_vs_dest_trash); | 1075 | list_add(&dest->n_list, &ip_vs_dest_trash); |
995 | atomic_inc(&dest->refcnt); | 1076 | atomic_inc(&dest->refcnt); |
996 | } | 1077 | } |
@@ -1011,12 +1092,12 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc, | |||
1011 | */ | 1092 | */ |
1012 | list_del(&dest->n_list); | 1093 | list_del(&dest->n_list); |
1013 | svc->num_dests--; | 1094 | svc->num_dests--; |
1014 | if (svcupd) { | 1095 | |
1015 | /* | 1096 | /* |
1016 | * Call the update_service function of its scheduler | 1097 | * Call the update_service function of its scheduler |
1017 | */ | 1098 | */ |
1018 | svc->scheduler->update_service(svc); | 1099 | if (svcupd && svc->scheduler->update_service) |
1019 | } | 1100 | svc->scheduler->update_service(svc); |
1020 | } | 1101 | } |
1021 | 1102 | ||
1022 | 1103 | ||
@@ -1024,15 +1105,15 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc, | |||
1024 | * Delete a destination server in the given service | 1105 | * Delete a destination server in the given service |
1025 | */ | 1106 | */ |
1026 | static int | 1107 | static int |
1027 | ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest) | 1108 | ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) |
1028 | { | 1109 | { |
1029 | struct ip_vs_dest *dest; | 1110 | struct ip_vs_dest *dest; |
1030 | __be32 daddr = udest->addr; | ||
1031 | __be16 dport = udest->port; | 1111 | __be16 dport = udest->port; |
1032 | 1112 | ||
1033 | EnterFunction(2); | 1113 | EnterFunction(2); |
1034 | 1114 | ||
1035 | dest = ip_vs_lookup_dest(svc, daddr, dport); | 1115 | dest = ip_vs_lookup_dest(svc, &udest->addr, dport); |
1116 | |||
1036 | if (dest == NULL) { | 1117 | if (dest == NULL) { |
1037 | IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n"); | 1118 | IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n"); |
1038 | return -ENOENT; | 1119 | return -ENOENT; |
@@ -1067,7 +1148,8 @@ ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest) | |||
1067 | * Add a service into the service hash table | 1148 | * Add a service into the service hash table |
1068 | */ | 1149 | */ |
1069 | static int | 1150 | static int |
1070 | ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) | 1151 | ip_vs_add_service(struct ip_vs_service_user_kern *u, |
1152 | struct ip_vs_service **svc_p) | ||
1071 | { | 1153 | { |
1072 | int ret = 0; | 1154 | int ret = 0; |
1073 | struct ip_vs_scheduler *sched = NULL; | 1155 | struct ip_vs_scheduler *sched = NULL; |
@@ -1085,6 +1167,19 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) | |||
1085 | goto out_mod_dec; | 1167 | goto out_mod_dec; |
1086 | } | 1168 | } |
1087 | 1169 | ||
1170 | #ifdef CONFIG_IP_VS_IPV6 | ||
1171 | if (u->af == AF_INET6) { | ||
1172 | if (!sched->supports_ipv6) { | ||
1173 | ret = -EAFNOSUPPORT; | ||
1174 | goto out_err; | ||
1175 | } | ||
1176 | if ((u->netmask < 1) || (u->netmask > 128)) { | ||
1177 | ret = -EINVAL; | ||
1178 | goto out_err; | ||
1179 | } | ||
1180 | } | ||
1181 | #endif | ||
1182 | |||
1088 | svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); | 1183 | svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); |
1089 | if (svc == NULL) { | 1184 | if (svc == NULL) { |
1090 | IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); | 1185 | IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); |
@@ -1096,8 +1191,9 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) | |||
1096 | atomic_set(&svc->usecnt, 1); | 1191 | atomic_set(&svc->usecnt, 1); |
1097 | atomic_set(&svc->refcnt, 0); | 1192 | atomic_set(&svc->refcnt, 0); |
1098 | 1193 | ||
1194 | svc->af = u->af; | ||
1099 | svc->protocol = u->protocol; | 1195 | svc->protocol = u->protocol; |
1100 | svc->addr = u->addr; | 1196 | ip_vs_addr_copy(svc->af, &svc->addr, &u->addr); |
1101 | svc->port = u->port; | 1197 | svc->port = u->port; |
1102 | svc->fwmark = u->fwmark; | 1198 | svc->fwmark = u->fwmark; |
1103 | svc->flags = u->flags; | 1199 | svc->flags = u->flags; |
@@ -1121,7 +1217,10 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) | |||
1121 | atomic_inc(&ip_vs_nullsvc_counter); | 1217 | atomic_inc(&ip_vs_nullsvc_counter); |
1122 | 1218 | ||
1123 | ip_vs_new_estimator(&svc->stats); | 1219 | ip_vs_new_estimator(&svc->stats); |
1124 | ip_vs_num_services++; | 1220 | |
1221 | /* Count only IPv4 services for old get/setsockopt interface */ | ||
1222 | if (svc->af == AF_INET) | ||
1223 | ip_vs_num_services++; | ||
1125 | 1224 | ||
1126 | /* Hash the service into the service table */ | 1225 | /* Hash the service into the service table */ |
1127 | write_lock_bh(&__ip_vs_svc_lock); | 1226 | write_lock_bh(&__ip_vs_svc_lock); |
@@ -1156,7 +1255,7 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) | |||
1156 | * Edit a service and bind it with a new scheduler | 1255 | * Edit a service and bind it with a new scheduler |
1157 | */ | 1256 | */ |
1158 | static int | 1257 | static int |
1159 | ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u) | 1258 | ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) |
1160 | { | 1259 | { |
1161 | struct ip_vs_scheduler *sched, *old_sched; | 1260 | struct ip_vs_scheduler *sched, *old_sched; |
1162 | int ret = 0; | 1261 | int ret = 0; |
@@ -1172,6 +1271,19 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u) | |||
1172 | } | 1271 | } |
1173 | old_sched = sched; | 1272 | old_sched = sched; |
1174 | 1273 | ||
1274 | #ifdef CONFIG_IP_VS_IPV6 | ||
1275 | if (u->af == AF_INET6) { | ||
1276 | if (!sched->supports_ipv6) { | ||
1277 | ret = -EAFNOSUPPORT; | ||
1278 | goto out; | ||
1279 | } | ||
1280 | if ((u->netmask < 1) || (u->netmask > 128)) { | ||
1281 | ret = -EINVAL; | ||
1282 | goto out; | ||
1283 | } | ||
1284 | } | ||
1285 | #endif | ||
1286 | |||
1175 | write_lock_bh(&__ip_vs_svc_lock); | 1287 | write_lock_bh(&__ip_vs_svc_lock); |
1176 | 1288 | ||
1177 | /* | 1289 | /* |
@@ -1236,7 +1348,10 @@ static void __ip_vs_del_service(struct ip_vs_service *svc) | |||
1236 | struct ip_vs_dest *dest, *nxt; | 1348 | struct ip_vs_dest *dest, *nxt; |
1237 | struct ip_vs_scheduler *old_sched; | 1349 | struct ip_vs_scheduler *old_sched; |
1238 | 1350 | ||
1239 | ip_vs_num_services--; | 1351 | /* Count only IPv4 services for old get/setsockopt interface */ |
1352 | if (svc->af == AF_INET) | ||
1353 | ip_vs_num_services--; | ||
1354 | |||
1240 | ip_vs_kill_estimator(&svc->stats); | 1355 | ip_vs_kill_estimator(&svc->stats); |
1241 | 1356 | ||
1242 | /* Unbind scheduler */ | 1357 | /* Unbind scheduler */ |
@@ -1744,15 +1859,25 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) | |||
1744 | const struct ip_vs_iter *iter = seq->private; | 1859 | const struct ip_vs_iter *iter = seq->private; |
1745 | const struct ip_vs_dest *dest; | 1860 | const struct ip_vs_dest *dest; |
1746 | 1861 | ||
1747 | if (iter->table == ip_vs_svc_table) | 1862 | if (iter->table == ip_vs_svc_table) { |
1748 | seq_printf(seq, "%s %08X:%04X %s ", | 1863 | #ifdef CONFIG_IP_VS_IPV6 |
1749 | ip_vs_proto_name(svc->protocol), | 1864 | if (svc->af == AF_INET6) |
1750 | ntohl(svc->addr), | 1865 | seq_printf(seq, "%s [" NIP6_FMT "]:%04X %s ", |
1751 | ntohs(svc->port), | 1866 | ip_vs_proto_name(svc->protocol), |
1752 | svc->scheduler->name); | 1867 | NIP6(svc->addr.in6), |
1753 | else | 1868 | ntohs(svc->port), |
1869 | svc->scheduler->name); | ||
1870 | else | ||
1871 | #endif | ||
1872 | seq_printf(seq, "%s %08X:%04X %s ", | ||
1873 | ip_vs_proto_name(svc->protocol), | ||
1874 | ntohl(svc->addr.ip), | ||
1875 | ntohs(svc->port), | ||
1876 | svc->scheduler->name); | ||
1877 | } else { | ||
1754 | seq_printf(seq, "FWM %08X %s ", | 1878 | seq_printf(seq, "FWM %08X %s ", |
1755 | svc->fwmark, svc->scheduler->name); | 1879 | svc->fwmark, svc->scheduler->name); |
1880 | } | ||
1756 | 1881 | ||
1757 | if (svc->flags & IP_VS_SVC_F_PERSISTENT) | 1882 | if (svc->flags & IP_VS_SVC_F_PERSISTENT) |
1758 | seq_printf(seq, "persistent %d %08X\n", | 1883 | seq_printf(seq, "persistent %d %08X\n", |
@@ -1762,13 +1887,29 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) | |||
1762 | seq_putc(seq, '\n'); | 1887 | seq_putc(seq, '\n'); |
1763 | 1888 | ||
1764 | list_for_each_entry(dest, &svc->destinations, n_list) { | 1889 | list_for_each_entry(dest, &svc->destinations, n_list) { |
1765 | seq_printf(seq, | 1890 | #ifdef CONFIG_IP_VS_IPV6 |
1766 | " -> %08X:%04X %-7s %-6d %-10d %-10d\n", | 1891 | if (dest->af == AF_INET6) |
1767 | ntohl(dest->addr), ntohs(dest->port), | 1892 | seq_printf(seq, |
1768 | ip_vs_fwd_name(atomic_read(&dest->conn_flags)), | 1893 | " -> [" NIP6_FMT "]:%04X" |
1769 | atomic_read(&dest->weight), | 1894 | " %-7s %-6d %-10d %-10d\n", |
1770 | atomic_read(&dest->activeconns), | 1895 | NIP6(dest->addr.in6), |
1771 | atomic_read(&dest->inactconns)); | 1896 | ntohs(dest->port), |
1897 | ip_vs_fwd_name(atomic_read(&dest->conn_flags)), | ||
1898 | atomic_read(&dest->weight), | ||
1899 | atomic_read(&dest->activeconns), | ||
1900 | atomic_read(&dest->inactconns)); | ||
1901 | else | ||
1902 | #endif | ||
1903 | seq_printf(seq, | ||
1904 | " -> %08X:%04X " | ||
1905 | "%-7s %-6d %-10d %-10d\n", | ||
1906 | ntohl(dest->addr.ip), | ||
1907 | ntohs(dest->port), | ||
1908 | ip_vs_fwd_name(atomic_read(&dest->conn_flags)), | ||
1909 | atomic_read(&dest->weight), | ||
1910 | atomic_read(&dest->activeconns), | ||
1911 | atomic_read(&dest->inactconns)); | ||
1912 | |||
1772 | } | 1913 | } |
1773 | } | 1914 | } |
1774 | return 0; | 1915 | return 0; |
@@ -1812,20 +1953,20 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v) | |||
1812 | " Conns Packets Packets Bytes Bytes\n"); | 1953 | " Conns Packets Packets Bytes Bytes\n"); |
1813 | 1954 | ||
1814 | spin_lock_bh(&ip_vs_stats.lock); | 1955 | spin_lock_bh(&ip_vs_stats.lock); |
1815 | seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.conns, | 1956 | seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns, |
1816 | ip_vs_stats.inpkts, ip_vs_stats.outpkts, | 1957 | ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts, |
1817 | (unsigned long long) ip_vs_stats.inbytes, | 1958 | (unsigned long long) ip_vs_stats.ustats.inbytes, |
1818 | (unsigned long long) ip_vs_stats.outbytes); | 1959 | (unsigned long long) ip_vs_stats.ustats.outbytes); |
1819 | 1960 | ||
1820 | /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ | 1961 | /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ |
1821 | seq_puts(seq, | 1962 | seq_puts(seq, |
1822 | " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); | 1963 | " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); |
1823 | seq_printf(seq,"%8X %8X %8X %16X %16X\n", | 1964 | seq_printf(seq,"%8X %8X %8X %16X %16X\n", |
1824 | ip_vs_stats.cps, | 1965 | ip_vs_stats.ustats.cps, |
1825 | ip_vs_stats.inpps, | 1966 | ip_vs_stats.ustats.inpps, |
1826 | ip_vs_stats.outpps, | 1967 | ip_vs_stats.ustats.outpps, |
1827 | ip_vs_stats.inbps, | 1968 | ip_vs_stats.ustats.inbps, |
1828 | ip_vs_stats.outbps); | 1969 | ip_vs_stats.ustats.outbps); |
1829 | spin_unlock_bh(&ip_vs_stats.lock); | 1970 | spin_unlock_bh(&ip_vs_stats.lock); |
1830 | 1971 | ||
1831 | return 0; | 1972 | return 0; |
@@ -1900,14 +2041,44 @@ static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = { | |||
1900 | [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN, | 2041 | [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN, |
1901 | }; | 2042 | }; |
1902 | 2043 | ||
2044 | static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc, | ||
2045 | struct ip_vs_service_user *usvc_compat) | ||
2046 | { | ||
2047 | usvc->af = AF_INET; | ||
2048 | usvc->protocol = usvc_compat->protocol; | ||
2049 | usvc->addr.ip = usvc_compat->addr; | ||
2050 | usvc->port = usvc_compat->port; | ||
2051 | usvc->fwmark = usvc_compat->fwmark; | ||
2052 | |||
2053 | /* Deep copy of sched_name is not needed here */ | ||
2054 | usvc->sched_name = usvc_compat->sched_name; | ||
2055 | |||
2056 | usvc->flags = usvc_compat->flags; | ||
2057 | usvc->timeout = usvc_compat->timeout; | ||
2058 | usvc->netmask = usvc_compat->netmask; | ||
2059 | } | ||
2060 | |||
2061 | static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest, | ||
2062 | struct ip_vs_dest_user *udest_compat) | ||
2063 | { | ||
2064 | udest->addr.ip = udest_compat->addr; | ||
2065 | udest->port = udest_compat->port; | ||
2066 | udest->conn_flags = udest_compat->conn_flags; | ||
2067 | udest->weight = udest_compat->weight; | ||
2068 | udest->u_threshold = udest_compat->u_threshold; | ||
2069 | udest->l_threshold = udest_compat->l_threshold; | ||
2070 | } | ||
2071 | |||
1903 | static int | 2072 | static int |
1904 | do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | 2073 | do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) |
1905 | { | 2074 | { |
1906 | int ret; | 2075 | int ret; |
1907 | unsigned char arg[MAX_ARG_LEN]; | 2076 | unsigned char arg[MAX_ARG_LEN]; |
1908 | struct ip_vs_service_user *usvc; | 2077 | struct ip_vs_service_user *usvc_compat; |
2078 | struct ip_vs_service_user_kern usvc; | ||
1909 | struct ip_vs_service *svc; | 2079 | struct ip_vs_service *svc; |
1910 | struct ip_vs_dest_user *udest; | 2080 | struct ip_vs_dest_user *udest_compat; |
2081 | struct ip_vs_dest_user_kern udest; | ||
1911 | 2082 | ||
1912 | if (!capable(CAP_NET_ADMIN)) | 2083 | if (!capable(CAP_NET_ADMIN)) |
1913 | return -EPERM; | 2084 | return -EPERM; |
@@ -1947,35 +2118,40 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
1947 | goto out_unlock; | 2118 | goto out_unlock; |
1948 | } | 2119 | } |
1949 | 2120 | ||
1950 | usvc = (struct ip_vs_service_user *)arg; | 2121 | usvc_compat = (struct ip_vs_service_user *)arg; |
1951 | udest = (struct ip_vs_dest_user *)(usvc + 1); | 2122 | udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1); |
2123 | |||
2124 | /* We only use the new structs internally, so copy userspace compat | ||
2125 | * structs to extended internal versions */ | ||
2126 | ip_vs_copy_usvc_compat(&usvc, usvc_compat); | ||
2127 | ip_vs_copy_udest_compat(&udest, udest_compat); | ||
1952 | 2128 | ||
1953 | if (cmd == IP_VS_SO_SET_ZERO) { | 2129 | if (cmd == IP_VS_SO_SET_ZERO) { |
1954 | /* if no service address is set, zero counters in all */ | 2130 | /* if no service address is set, zero counters in all */ |
1955 | if (!usvc->fwmark && !usvc->addr && !usvc->port) { | 2131 | if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) { |
1956 | ret = ip_vs_zero_all(); | 2132 | ret = ip_vs_zero_all(); |
1957 | goto out_unlock; | 2133 | goto out_unlock; |
1958 | } | 2134 | } |
1959 | } | 2135 | } |
1960 | 2136 | ||
1961 | /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ | 2137 | /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ |
1962 | if (usvc->protocol!=IPPROTO_TCP && usvc->protocol!=IPPROTO_UDP) { | 2138 | if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) { |
1963 | IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n", | 2139 | IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n", |
1964 | usvc->protocol, NIPQUAD(usvc->addr), | 2140 | usvc.protocol, NIPQUAD(usvc.addr.ip), |
1965 | ntohs(usvc->port), usvc->sched_name); | 2141 | ntohs(usvc.port), usvc.sched_name); |
1966 | ret = -EFAULT; | 2142 | ret = -EFAULT; |
1967 | goto out_unlock; | 2143 | goto out_unlock; |
1968 | } | 2144 | } |
1969 | 2145 | ||
1970 | /* Lookup the exact service by <protocol, addr, port> or fwmark */ | 2146 | /* Lookup the exact service by <protocol, addr, port> or fwmark */ |
1971 | if (usvc->fwmark == 0) | 2147 | if (usvc.fwmark == 0) |
1972 | svc = __ip_vs_service_get(usvc->protocol, | 2148 | svc = __ip_vs_service_get(usvc.af, usvc.protocol, |
1973 | usvc->addr, usvc->port); | 2149 | &usvc.addr, usvc.port); |
1974 | else | 2150 | else |
1975 | svc = __ip_vs_svc_fwm_get(usvc->fwmark); | 2151 | svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark); |
1976 | 2152 | ||
1977 | if (cmd != IP_VS_SO_SET_ADD | 2153 | if (cmd != IP_VS_SO_SET_ADD |
1978 | && (svc == NULL || svc->protocol != usvc->protocol)) { | 2154 | && (svc == NULL || svc->protocol != usvc.protocol)) { |
1979 | ret = -ESRCH; | 2155 | ret = -ESRCH; |
1980 | goto out_unlock; | 2156 | goto out_unlock; |
1981 | } | 2157 | } |
@@ -1985,10 +2161,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
1985 | if (svc != NULL) | 2161 | if (svc != NULL) |
1986 | ret = -EEXIST; | 2162 | ret = -EEXIST; |
1987 | else | 2163 | else |
1988 | ret = ip_vs_add_service(usvc, &svc); | 2164 | ret = ip_vs_add_service(&usvc, &svc); |
1989 | break; | 2165 | break; |
1990 | case IP_VS_SO_SET_EDIT: | 2166 | case IP_VS_SO_SET_EDIT: |
1991 | ret = ip_vs_edit_service(svc, usvc); | 2167 | ret = ip_vs_edit_service(svc, &usvc); |
1992 | break; | 2168 | break; |
1993 | case IP_VS_SO_SET_DEL: | 2169 | case IP_VS_SO_SET_DEL: |
1994 | ret = ip_vs_del_service(svc); | 2170 | ret = ip_vs_del_service(svc); |
@@ -1999,13 +2175,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
1999 | ret = ip_vs_zero_service(svc); | 2175 | ret = ip_vs_zero_service(svc); |
2000 | break; | 2176 | break; |
2001 | case IP_VS_SO_SET_ADDDEST: | 2177 | case IP_VS_SO_SET_ADDDEST: |
2002 | ret = ip_vs_add_dest(svc, udest); | 2178 | ret = ip_vs_add_dest(svc, &udest); |
2003 | break; | 2179 | break; |
2004 | case IP_VS_SO_SET_EDITDEST: | 2180 | case IP_VS_SO_SET_EDITDEST: |
2005 | ret = ip_vs_edit_dest(svc, udest); | 2181 | ret = ip_vs_edit_dest(svc, &udest); |
2006 | break; | 2182 | break; |
2007 | case IP_VS_SO_SET_DELDEST: | 2183 | case IP_VS_SO_SET_DELDEST: |
2008 | ret = ip_vs_del_dest(svc, udest); | 2184 | ret = ip_vs_del_dest(svc, &udest); |
2009 | break; | 2185 | break; |
2010 | default: | 2186 | default: |
2011 | ret = -EINVAL; | 2187 | ret = -EINVAL; |
@@ -2028,7 +2204,7 @@ static void | |||
2028 | ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src) | 2204 | ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src) |
2029 | { | 2205 | { |
2030 | spin_lock_bh(&src->lock); | 2206 | spin_lock_bh(&src->lock); |
2031 | memcpy(dst, src, (char*)&src->lock - (char*)src); | 2207 | memcpy(dst, &src->ustats, sizeof(*dst)); |
2032 | spin_unlock_bh(&src->lock); | 2208 | spin_unlock_bh(&src->lock); |
2033 | } | 2209 | } |
2034 | 2210 | ||
@@ -2036,7 +2212,7 @@ static void | |||
2036 | ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) | 2212 | ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) |
2037 | { | 2213 | { |
2038 | dst->protocol = src->protocol; | 2214 | dst->protocol = src->protocol; |
2039 | dst->addr = src->addr; | 2215 | dst->addr = src->addr.ip; |
2040 | dst->port = src->port; | 2216 | dst->port = src->port; |
2041 | dst->fwmark = src->fwmark; | 2217 | dst->fwmark = src->fwmark; |
2042 | strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)); | 2218 | strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)); |
@@ -2058,6 +2234,10 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get, | |||
2058 | 2234 | ||
2059 | for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { | 2235 | for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { |
2060 | list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { | 2236 | list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { |
2237 | /* Only expose IPv4 entries to old interface */ | ||
2238 | if (svc->af != AF_INET) | ||
2239 | continue; | ||
2240 | |||
2061 | if (count >= get->num_services) | 2241 | if (count >= get->num_services) |
2062 | goto out; | 2242 | goto out; |
2063 | memset(&entry, 0, sizeof(entry)); | 2243 | memset(&entry, 0, sizeof(entry)); |
@@ -2073,6 +2253,10 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get, | |||
2073 | 2253 | ||
2074 | for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { | 2254 | for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { |
2075 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { | 2255 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { |
2256 | /* Only expose IPv4 entries to old interface */ | ||
2257 | if (svc->af != AF_INET) | ||
2258 | continue; | ||
2259 | |||
2076 | if (count >= get->num_services) | 2260 | if (count >= get->num_services) |
2077 | goto out; | 2261 | goto out; |
2078 | memset(&entry, 0, sizeof(entry)); | 2262 | memset(&entry, 0, sizeof(entry)); |
@@ -2094,13 +2278,15 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get, | |||
2094 | struct ip_vs_get_dests __user *uptr) | 2278 | struct ip_vs_get_dests __user *uptr) |
2095 | { | 2279 | { |
2096 | struct ip_vs_service *svc; | 2280 | struct ip_vs_service *svc; |
2281 | union nf_inet_addr addr = { .ip = get->addr }; | ||
2097 | int ret = 0; | 2282 | int ret = 0; |
2098 | 2283 | ||
2099 | if (get->fwmark) | 2284 | if (get->fwmark) |
2100 | svc = __ip_vs_svc_fwm_get(get->fwmark); | 2285 | svc = __ip_vs_svc_fwm_get(AF_INET, get->fwmark); |
2101 | else | 2286 | else |
2102 | svc = __ip_vs_service_get(get->protocol, | 2287 | svc = __ip_vs_service_get(AF_INET, get->protocol, &addr, |
2103 | get->addr, get->port); | 2288 | get->port); |
2289 | |||
2104 | if (svc) { | 2290 | if (svc) { |
2105 | int count = 0; | 2291 | int count = 0; |
2106 | struct ip_vs_dest *dest; | 2292 | struct ip_vs_dest *dest; |
@@ -2110,7 +2296,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get, | |||
2110 | if (count >= get->num_dests) | 2296 | if (count >= get->num_dests) |
2111 | break; | 2297 | break; |
2112 | 2298 | ||
2113 | entry.addr = dest->addr; | 2299 | entry.addr = dest->addr.ip; |
2114 | entry.port = dest->port; | 2300 | entry.port = dest->port; |
2115 | entry.conn_flags = atomic_read(&dest->conn_flags); | 2301 | entry.conn_flags = atomic_read(&dest->conn_flags); |
2116 | entry.weight = atomic_read(&dest->weight); | 2302 | entry.weight = atomic_read(&dest->weight); |
@@ -2235,13 +2421,15 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2235 | { | 2421 | { |
2236 | struct ip_vs_service_entry *entry; | 2422 | struct ip_vs_service_entry *entry; |
2237 | struct ip_vs_service *svc; | 2423 | struct ip_vs_service *svc; |
2424 | union nf_inet_addr addr; | ||
2238 | 2425 | ||
2239 | entry = (struct ip_vs_service_entry *)arg; | 2426 | entry = (struct ip_vs_service_entry *)arg; |
2427 | addr.ip = entry->addr; | ||
2240 | if (entry->fwmark) | 2428 | if (entry->fwmark) |
2241 | svc = __ip_vs_svc_fwm_get(entry->fwmark); | 2429 | svc = __ip_vs_svc_fwm_get(AF_INET, entry->fwmark); |
2242 | else | 2430 | else |
2243 | svc = __ip_vs_service_get(entry->protocol, | 2431 | svc = __ip_vs_service_get(AF_INET, entry->protocol, |
2244 | entry->addr, entry->port); | 2432 | &addr, entry->port); |
2245 | if (svc) { | 2433 | if (svc) { |
2246 | ip_vs_copy_service(entry, svc); | 2434 | ip_vs_copy_service(entry, svc); |
2247 | if (copy_to_user(user, entry, sizeof(*entry)) != 0) | 2435 | if (copy_to_user(user, entry, sizeof(*entry)) != 0) |
@@ -2320,6 +2508,875 @@ static struct nf_sockopt_ops ip_vs_sockopts = { | |||
2320 | .owner = THIS_MODULE, | 2508 | .owner = THIS_MODULE, |
2321 | }; | 2509 | }; |
2322 | 2510 | ||
2511 | /* | ||
2512 | * Generic Netlink interface | ||
2513 | */ | ||
2514 | |||
2515 | /* IPVS genetlink family */ | ||
2516 | static struct genl_family ip_vs_genl_family = { | ||
2517 | .id = GENL_ID_GENERATE, | ||
2518 | .hdrsize = 0, | ||
2519 | .name = IPVS_GENL_NAME, | ||
2520 | .version = IPVS_GENL_VERSION, | ||
2521 | .maxattr = IPVS_CMD_MAX, | ||
2522 | }; | ||
2523 | |||
2524 | /* Policy used for first-level command attributes */ | ||
2525 | static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = { | ||
2526 | [IPVS_CMD_ATTR_SERVICE] = { .type = NLA_NESTED }, | ||
2527 | [IPVS_CMD_ATTR_DEST] = { .type = NLA_NESTED }, | ||
2528 | [IPVS_CMD_ATTR_DAEMON] = { .type = NLA_NESTED }, | ||
2529 | [IPVS_CMD_ATTR_TIMEOUT_TCP] = { .type = NLA_U32 }, | ||
2530 | [IPVS_CMD_ATTR_TIMEOUT_TCP_FIN] = { .type = NLA_U32 }, | ||
2531 | [IPVS_CMD_ATTR_TIMEOUT_UDP] = { .type = NLA_U32 }, | ||
2532 | }; | ||
2533 | |||
2534 | /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */ | ||
2535 | static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { | ||
2536 | [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, | ||
2537 | [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, | ||
2538 | .len = IP_VS_IFNAME_MAXLEN }, | ||
2539 | [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, | ||
2540 | }; | ||
2541 | |||
2542 | /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */ | ||
2543 | static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = { | ||
2544 | [IPVS_SVC_ATTR_AF] = { .type = NLA_U16 }, | ||
2545 | [IPVS_SVC_ATTR_PROTOCOL] = { .type = NLA_U16 }, | ||
2546 | [IPVS_SVC_ATTR_ADDR] = { .type = NLA_BINARY, | ||
2547 | .len = sizeof(union nf_inet_addr) }, | ||
2548 | [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, | ||
2549 | [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, | ||
2550 | [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, | ||
2551 | .len = IP_VS_SCHEDNAME_MAXLEN }, | ||
2552 | [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY, | ||
2553 | .len = sizeof(struct ip_vs_flags) }, | ||
2554 | [IPVS_SVC_ATTR_TIMEOUT] = { .type = NLA_U32 }, | ||
2555 | [IPVS_SVC_ATTR_NETMASK] = { .type = NLA_U32 }, | ||
2556 | [IPVS_SVC_ATTR_STATS] = { .type = NLA_NESTED }, | ||
2557 | }; | ||
2558 | |||
2559 | /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */ | ||
2560 | static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = { | ||
2561 | [IPVS_DEST_ATTR_ADDR] = { .type = NLA_BINARY, | ||
2562 | .len = sizeof(union nf_inet_addr) }, | ||
2563 | [IPVS_DEST_ATTR_PORT] = { .type = NLA_U16 }, | ||
2564 | [IPVS_DEST_ATTR_FWD_METHOD] = { .type = NLA_U32 }, | ||
2565 | [IPVS_DEST_ATTR_WEIGHT] = { .type = NLA_U32 }, | ||
2566 | [IPVS_DEST_ATTR_U_THRESH] = { .type = NLA_U32 }, | ||
2567 | [IPVS_DEST_ATTR_L_THRESH] = { .type = NLA_U32 }, | ||
2568 | [IPVS_DEST_ATTR_ACTIVE_CONNS] = { .type = NLA_U32 }, | ||
2569 | [IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 }, | ||
2570 | [IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 }, | ||
2571 | [IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED }, | ||
2572 | }; | ||
2573 | |||
2574 | static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, | ||
2575 | struct ip_vs_stats *stats) | ||
2576 | { | ||
2577 | struct nlattr *nl_stats = nla_nest_start(skb, container_type); | ||
2578 | if (!nl_stats) | ||
2579 | return -EMSGSIZE; | ||
2580 | |||
2581 | spin_lock_bh(&stats->lock); | ||
2582 | |||
2583 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->ustats.conns); | ||
2584 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->ustats.inpkts); | ||
2585 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->ustats.outpkts); | ||
2586 | NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->ustats.inbytes); | ||
2587 | NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->ustats.outbytes); | ||
2588 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->ustats.cps); | ||
2589 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->ustats.inpps); | ||
2590 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->ustats.outpps); | ||
2591 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->ustats.inbps); | ||
2592 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->ustats.outbps); | ||
2593 | |||
2594 | spin_unlock_bh(&stats->lock); | ||
2595 | |||
2596 | nla_nest_end(skb, nl_stats); | ||
2597 | |||
2598 | return 0; | ||
2599 | |||
2600 | nla_put_failure: | ||
2601 | spin_unlock_bh(&stats->lock); | ||
2602 | nla_nest_cancel(skb, nl_stats); | ||
2603 | return -EMSGSIZE; | ||
2604 | } | ||
2605 | |||
2606 | static int ip_vs_genl_fill_service(struct sk_buff *skb, | ||
2607 | struct ip_vs_service *svc) | ||
2608 | { | ||
2609 | struct nlattr *nl_service; | ||
2610 | struct ip_vs_flags flags = { .flags = svc->flags, | ||
2611 | .mask = ~0 }; | ||
2612 | |||
2613 | nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); | ||
2614 | if (!nl_service) | ||
2615 | return -EMSGSIZE; | ||
2616 | |||
2617 | NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af); | ||
2618 | |||
2619 | if (svc->fwmark) { | ||
2620 | NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark); | ||
2621 | } else { | ||
2622 | NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol); | ||
2623 | NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr); | ||
2624 | NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port); | ||
2625 | } | ||
2626 | |||
2627 | NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name); | ||
2628 | NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags); | ||
2629 | NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ); | ||
2630 | NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask); | ||
2631 | |||
2632 | if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats)) | ||
2633 | goto nla_put_failure; | ||
2634 | |||
2635 | nla_nest_end(skb, nl_service); | ||
2636 | |||
2637 | return 0; | ||
2638 | |||
2639 | nla_put_failure: | ||
2640 | nla_nest_cancel(skb, nl_service); | ||
2641 | return -EMSGSIZE; | ||
2642 | } | ||
2643 | |||
2644 | static int ip_vs_genl_dump_service(struct sk_buff *skb, | ||
2645 | struct ip_vs_service *svc, | ||
2646 | struct netlink_callback *cb) | ||
2647 | { | ||
2648 | void *hdr; | ||
2649 | |||
2650 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, | ||
2651 | &ip_vs_genl_family, NLM_F_MULTI, | ||
2652 | IPVS_CMD_NEW_SERVICE); | ||
2653 | if (!hdr) | ||
2654 | return -EMSGSIZE; | ||
2655 | |||
2656 | if (ip_vs_genl_fill_service(skb, svc) < 0) | ||
2657 | goto nla_put_failure; | ||
2658 | |||
2659 | return genlmsg_end(skb, hdr); | ||
2660 | |||
2661 | nla_put_failure: | ||
2662 | genlmsg_cancel(skb, hdr); | ||
2663 | return -EMSGSIZE; | ||
2664 | } | ||
2665 | |||
2666 | static int ip_vs_genl_dump_services(struct sk_buff *skb, | ||
2667 | struct netlink_callback *cb) | ||
2668 | { | ||
2669 | int idx = 0, i; | ||
2670 | int start = cb->args[0]; | ||
2671 | struct ip_vs_service *svc; | ||
2672 | |||
2673 | mutex_lock(&__ip_vs_mutex); | ||
2674 | for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { | ||
2675 | list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) { | ||
2676 | if (++idx <= start) | ||
2677 | continue; | ||
2678 | if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { | ||
2679 | idx--; | ||
2680 | goto nla_put_failure; | ||
2681 | } | ||
2682 | } | ||
2683 | } | ||
2684 | |||
2685 | for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { | ||
2686 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) { | ||
2687 | if (++idx <= start) | ||
2688 | continue; | ||
2689 | if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { | ||
2690 | idx--; | ||
2691 | goto nla_put_failure; | ||
2692 | } | ||
2693 | } | ||
2694 | } | ||
2695 | |||
2696 | nla_put_failure: | ||
2697 | mutex_unlock(&__ip_vs_mutex); | ||
2698 | cb->args[0] = idx; | ||
2699 | |||
2700 | return skb->len; | ||
2701 | } | ||
2702 | |||
2703 | static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc, | ||
2704 | struct nlattr *nla, int full_entry) | ||
2705 | { | ||
2706 | struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1]; | ||
2707 | struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr; | ||
2708 | |||
2709 | /* Parse mandatory identifying service fields first */ | ||
2710 | if (nla == NULL || | ||
2711 | nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy)) | ||
2712 | return -EINVAL; | ||
2713 | |||
2714 | nla_af = attrs[IPVS_SVC_ATTR_AF]; | ||
2715 | nla_protocol = attrs[IPVS_SVC_ATTR_PROTOCOL]; | ||
2716 | nla_addr = attrs[IPVS_SVC_ATTR_ADDR]; | ||
2717 | nla_port = attrs[IPVS_SVC_ATTR_PORT]; | ||
2718 | nla_fwmark = attrs[IPVS_SVC_ATTR_FWMARK]; | ||
2719 | |||
2720 | if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) | ||
2721 | return -EINVAL; | ||
2722 | |||
2723 | usvc->af = nla_get_u16(nla_af); | ||
2724 | #ifdef CONFIG_IP_VS_IPV6 | ||
2725 | if (usvc->af != AF_INET && usvc->af != AF_INET6) | ||
2726 | #else | ||
2727 | if (usvc->af != AF_INET) | ||
2728 | #endif | ||
2729 | return -EAFNOSUPPORT; | ||
2730 | |||
2731 | if (nla_fwmark) { | ||
2732 | usvc->protocol = IPPROTO_TCP; | ||
2733 | usvc->fwmark = nla_get_u32(nla_fwmark); | ||
2734 | } else { | ||
2735 | usvc->protocol = nla_get_u16(nla_protocol); | ||
2736 | nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr)); | ||
2737 | usvc->port = nla_get_u16(nla_port); | ||
2738 | usvc->fwmark = 0; | ||
2739 | } | ||
2740 | |||
2741 | /* If a full entry was requested, check for the additional fields */ | ||
2742 | if (full_entry) { | ||
2743 | struct nlattr *nla_sched, *nla_flags, *nla_timeout, | ||
2744 | *nla_netmask; | ||
2745 | struct ip_vs_flags flags; | ||
2746 | struct ip_vs_service *svc; | ||
2747 | |||
2748 | nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME]; | ||
2749 | nla_flags = attrs[IPVS_SVC_ATTR_FLAGS]; | ||
2750 | nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT]; | ||
2751 | nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK]; | ||
2752 | |||
2753 | if (!(nla_sched && nla_flags && nla_timeout && nla_netmask)) | ||
2754 | return -EINVAL; | ||
2755 | |||
2756 | nla_memcpy(&flags, nla_flags, sizeof(flags)); | ||
2757 | |||
2758 | /* prefill flags from service if it already exists */ | ||
2759 | if (usvc->fwmark) | ||
2760 | svc = __ip_vs_svc_fwm_get(usvc->af, usvc->fwmark); | ||
2761 | else | ||
2762 | svc = __ip_vs_service_get(usvc->af, usvc->protocol, | ||
2763 | &usvc->addr, usvc->port); | ||
2764 | if (svc) { | ||
2765 | usvc->flags = svc->flags; | ||
2766 | ip_vs_service_put(svc); | ||
2767 | } else | ||
2768 | usvc->flags = 0; | ||
2769 | |||
2770 | /* set new flags from userland */ | ||
2771 | usvc->flags = (usvc->flags & ~flags.mask) | | ||
2772 | (flags.flags & flags.mask); | ||
2773 | usvc->sched_name = nla_data(nla_sched); | ||
2774 | usvc->timeout = nla_get_u32(nla_timeout); | ||
2775 | usvc->netmask = nla_get_u32(nla_netmask); | ||
2776 | } | ||
2777 | |||
2778 | return 0; | ||
2779 | } | ||
2780 | |||
2781 | static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla) | ||
2782 | { | ||
2783 | struct ip_vs_service_user_kern usvc; | ||
2784 | int ret; | ||
2785 | |||
2786 | ret = ip_vs_genl_parse_service(&usvc, nla, 0); | ||
2787 | if (ret) | ||
2788 | return ERR_PTR(ret); | ||
2789 | |||
2790 | if (usvc.fwmark) | ||
2791 | return __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark); | ||
2792 | else | ||
2793 | return __ip_vs_service_get(usvc.af, usvc.protocol, | ||
2794 | &usvc.addr, usvc.port); | ||
2795 | } | ||
2796 | |||
2797 | static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) | ||
2798 | { | ||
2799 | struct nlattr *nl_dest; | ||
2800 | |||
2801 | nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST); | ||
2802 | if (!nl_dest) | ||
2803 | return -EMSGSIZE; | ||
2804 | |||
2805 | NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr); | ||
2806 | NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); | ||
2807 | |||
2808 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, | ||
2809 | atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); | ||
2810 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); | ||
2811 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); | ||
2812 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); | ||
2813 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, | ||
2814 | atomic_read(&dest->activeconns)); | ||
2815 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS, | ||
2816 | atomic_read(&dest->inactconns)); | ||
2817 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, | ||
2818 | atomic_read(&dest->persistconns)); | ||
2819 | |||
2820 | if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats)) | ||
2821 | goto nla_put_failure; | ||
2822 | |||
2823 | nla_nest_end(skb, nl_dest); | ||
2824 | |||
2825 | return 0; | ||
2826 | |||
2827 | nla_put_failure: | ||
2828 | nla_nest_cancel(skb, nl_dest); | ||
2829 | return -EMSGSIZE; | ||
2830 | } | ||
2831 | |||
2832 | static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest, | ||
2833 | struct netlink_callback *cb) | ||
2834 | { | ||
2835 | void *hdr; | ||
2836 | |||
2837 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, | ||
2838 | &ip_vs_genl_family, NLM_F_MULTI, | ||
2839 | IPVS_CMD_NEW_DEST); | ||
2840 | if (!hdr) | ||
2841 | return -EMSGSIZE; | ||
2842 | |||
2843 | if (ip_vs_genl_fill_dest(skb, dest) < 0) | ||
2844 | goto nla_put_failure; | ||
2845 | |||
2846 | return genlmsg_end(skb, hdr); | ||
2847 | |||
2848 | nla_put_failure: | ||
2849 | genlmsg_cancel(skb, hdr); | ||
2850 | return -EMSGSIZE; | ||
2851 | } | ||
2852 | |||
2853 | static int ip_vs_genl_dump_dests(struct sk_buff *skb, | ||
2854 | struct netlink_callback *cb) | ||
2855 | { | ||
2856 | int idx = 0; | ||
2857 | int start = cb->args[0]; | ||
2858 | struct ip_vs_service *svc; | ||
2859 | struct ip_vs_dest *dest; | ||
2860 | struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1]; | ||
2861 | |||
2862 | mutex_lock(&__ip_vs_mutex); | ||
2863 | |||
2864 | /* Try to find the service for which to dump destinations */ | ||
2865 | if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, | ||
2866 | IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy)) | ||
2867 | goto out_err; | ||
2868 | |||
2869 | svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]); | ||
2870 | if (IS_ERR(svc) || svc == NULL) | ||
2871 | goto out_err; | ||
2872 | |||
2873 | /* Dump the destinations */ | ||
2874 | list_for_each_entry(dest, &svc->destinations, n_list) { | ||
2875 | if (++idx <= start) | ||
2876 | continue; | ||
2877 | if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) { | ||
2878 | idx--; | ||
2879 | goto nla_put_failure; | ||
2880 | } | ||
2881 | } | ||
2882 | |||
2883 | nla_put_failure: | ||
2884 | cb->args[0] = idx; | ||
2885 | ip_vs_service_put(svc); | ||
2886 | |||
2887 | out_err: | ||
2888 | mutex_unlock(&__ip_vs_mutex); | ||
2889 | |||
2890 | return skb->len; | ||
2891 | } | ||
2892 | |||
2893 | static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, | ||
2894 | struct nlattr *nla, int full_entry) | ||
2895 | { | ||
2896 | struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1]; | ||
2897 | struct nlattr *nla_addr, *nla_port; | ||
2898 | |||
2899 | /* Parse mandatory identifying destination fields first */ | ||
2900 | if (nla == NULL || | ||
2901 | nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy)) | ||
2902 | return -EINVAL; | ||
2903 | |||
2904 | nla_addr = attrs[IPVS_DEST_ATTR_ADDR]; | ||
2905 | nla_port = attrs[IPVS_DEST_ATTR_PORT]; | ||
2906 | |||
2907 | if (!(nla_addr && nla_port)) | ||
2908 | return -EINVAL; | ||
2909 | |||
2910 | nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); | ||
2911 | udest->port = nla_get_u16(nla_port); | ||
2912 | |||
2913 | /* If a full entry was requested, check for the additional fields */ | ||
2914 | if (full_entry) { | ||
2915 | struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh, | ||
2916 | *nla_l_thresh; | ||
2917 | |||
2918 | nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD]; | ||
2919 | nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT]; | ||
2920 | nla_u_thresh = attrs[IPVS_DEST_ATTR_U_THRESH]; | ||
2921 | nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH]; | ||
2922 | |||
2923 | if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh)) | ||
2924 | return -EINVAL; | ||
2925 | |||
2926 | udest->conn_flags = nla_get_u32(nla_fwd) | ||
2927 | & IP_VS_CONN_F_FWD_MASK; | ||
2928 | udest->weight = nla_get_u32(nla_weight); | ||
2929 | udest->u_threshold = nla_get_u32(nla_u_thresh); | ||
2930 | udest->l_threshold = nla_get_u32(nla_l_thresh); | ||
2931 | } | ||
2932 | |||
2933 | return 0; | ||
2934 | } | ||
2935 | |||
2936 | static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state, | ||
2937 | const char *mcast_ifn, __be32 syncid) | ||
2938 | { | ||
2939 | struct nlattr *nl_daemon; | ||
2940 | |||
2941 | nl_daemon = nla_nest_start(skb, IPVS_CMD_ATTR_DAEMON); | ||
2942 | if (!nl_daemon) | ||
2943 | return -EMSGSIZE; | ||
2944 | |||
2945 | NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state); | ||
2946 | NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn); | ||
2947 | NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid); | ||
2948 | |||
2949 | nla_nest_end(skb, nl_daemon); | ||
2950 | |||
2951 | return 0; | ||
2952 | |||
2953 | nla_put_failure: | ||
2954 | nla_nest_cancel(skb, nl_daemon); | ||
2955 | return -EMSGSIZE; | ||
2956 | } | ||
2957 | |||
2958 | static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state, | ||
2959 | const char *mcast_ifn, __be32 syncid, | ||
2960 | struct netlink_callback *cb) | ||
2961 | { | ||
2962 | void *hdr; | ||
2963 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, | ||
2964 | &ip_vs_genl_family, NLM_F_MULTI, | ||
2965 | IPVS_CMD_NEW_DAEMON); | ||
2966 | if (!hdr) | ||
2967 | return -EMSGSIZE; | ||
2968 | |||
2969 | if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid)) | ||
2970 | goto nla_put_failure; | ||
2971 | |||
2972 | return genlmsg_end(skb, hdr); | ||
2973 | |||
2974 | nla_put_failure: | ||
2975 | genlmsg_cancel(skb, hdr); | ||
2976 | return -EMSGSIZE; | ||
2977 | } | ||
2978 | |||
2979 | static int ip_vs_genl_dump_daemons(struct sk_buff *skb, | ||
2980 | struct netlink_callback *cb) | ||
2981 | { | ||
2982 | mutex_lock(&__ip_vs_mutex); | ||
2983 | if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { | ||
2984 | if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, | ||
2985 | ip_vs_master_mcast_ifn, | ||
2986 | ip_vs_master_syncid, cb) < 0) | ||
2987 | goto nla_put_failure; | ||
2988 | |||
2989 | cb->args[0] = 1; | ||
2990 | } | ||
2991 | |||
2992 | if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) { | ||
2993 | if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP, | ||
2994 | ip_vs_backup_mcast_ifn, | ||
2995 | ip_vs_backup_syncid, cb) < 0) | ||
2996 | goto nla_put_failure; | ||
2997 | |||
2998 | cb->args[1] = 1; | ||
2999 | } | ||
3000 | |||
3001 | nla_put_failure: | ||
3002 | mutex_unlock(&__ip_vs_mutex); | ||
3003 | |||
3004 | return skb->len; | ||
3005 | } | ||
3006 | |||
3007 | static int ip_vs_genl_new_daemon(struct nlattr **attrs) | ||
3008 | { | ||
3009 | if (!(attrs[IPVS_DAEMON_ATTR_STATE] && | ||
3010 | attrs[IPVS_DAEMON_ATTR_MCAST_IFN] && | ||
3011 | attrs[IPVS_DAEMON_ATTR_SYNC_ID])) | ||
3012 | return -EINVAL; | ||
3013 | |||
3014 | return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]), | ||
3015 | nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), | ||
3016 | nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID])); | ||
3017 | } | ||
3018 | |||
3019 | static int ip_vs_genl_del_daemon(struct nlattr **attrs) | ||
3020 | { | ||
3021 | if (!attrs[IPVS_DAEMON_ATTR_STATE]) | ||
3022 | return -EINVAL; | ||
3023 | |||
3024 | return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); | ||
3025 | } | ||
3026 | |||
3027 | static int ip_vs_genl_set_config(struct nlattr **attrs) | ||
3028 | { | ||
3029 | struct ip_vs_timeout_user t; | ||
3030 | |||
3031 | __ip_vs_get_timeouts(&t); | ||
3032 | |||
3033 | if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]) | ||
3034 | t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]); | ||
3035 | |||
3036 | if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]) | ||
3037 | t.tcp_fin_timeout = | ||
3038 | nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]); | ||
3039 | |||
3040 | if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]) | ||
3041 | t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]); | ||
3042 | |||
3043 | return ip_vs_set_timeout(&t); | ||
3044 | } | ||
3045 | |||
3046 | static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) | ||
3047 | { | ||
3048 | struct ip_vs_service *svc = NULL; | ||
3049 | struct ip_vs_service_user_kern usvc; | ||
3050 | struct ip_vs_dest_user_kern udest; | ||
3051 | int ret = 0, cmd; | ||
3052 | int need_full_svc = 0, need_full_dest = 0; | ||
3053 | |||
3054 | cmd = info->genlhdr->cmd; | ||
3055 | |||
3056 | mutex_lock(&__ip_vs_mutex); | ||
3057 | |||
3058 | if (cmd == IPVS_CMD_FLUSH) { | ||
3059 | ret = ip_vs_flush(); | ||
3060 | goto out; | ||
3061 | } else if (cmd == IPVS_CMD_SET_CONFIG) { | ||
3062 | ret = ip_vs_genl_set_config(info->attrs); | ||
3063 | goto out; | ||
3064 | } else if (cmd == IPVS_CMD_NEW_DAEMON || | ||
3065 | cmd == IPVS_CMD_DEL_DAEMON) { | ||
3066 | |||
3067 | struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; | ||
3068 | |||
3069 | if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || | ||
3070 | nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, | ||
3071 | info->attrs[IPVS_CMD_ATTR_DAEMON], | ||
3072 | ip_vs_daemon_policy)) { | ||
3073 | ret = -EINVAL; | ||
3074 | goto out; | ||
3075 | } | ||
3076 | |||
3077 | if (cmd == IPVS_CMD_NEW_DAEMON) | ||
3078 | ret = ip_vs_genl_new_daemon(daemon_attrs); | ||
3079 | else | ||
3080 | ret = ip_vs_genl_del_daemon(daemon_attrs); | ||
3081 | goto out; | ||
3082 | } else if (cmd == IPVS_CMD_ZERO && | ||
3083 | !info->attrs[IPVS_CMD_ATTR_SERVICE]) { | ||
3084 | ret = ip_vs_zero_all(); | ||
3085 | goto out; | ||
3086 | } | ||
3087 | |||
3088 | /* All following commands require a service argument, so check if we | ||
3089 | * received a valid one. We need a full service specification when | ||
3090 | * adding / editing a service. Only identifying members otherwise. */ | ||
3091 | if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE) | ||
3092 | need_full_svc = 1; | ||
3093 | |||
3094 | ret = ip_vs_genl_parse_service(&usvc, | ||
3095 | info->attrs[IPVS_CMD_ATTR_SERVICE], | ||
3096 | need_full_svc); | ||
3097 | if (ret) | ||
3098 | goto out; | ||
3099 | |||
3100 | /* Lookup the exact service by <protocol, addr, port> or fwmark */ | ||
3101 | if (usvc.fwmark == 0) | ||
3102 | svc = __ip_vs_service_get(usvc.af, usvc.protocol, | ||
3103 | &usvc.addr, usvc.port); | ||
3104 | else | ||
3105 | svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark); | ||
3106 | |||
3107 | /* Unless we're adding a new service, the service must already exist */ | ||
3108 | if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) { | ||
3109 | ret = -ESRCH; | ||
3110 | goto out; | ||
3111 | } | ||
3112 | |||
3113 | /* Destination commands require a valid destination argument. For | ||
3114 | * adding / editing a destination, we need a full destination | ||
3115 | * specification. */ | ||
3116 | if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST || | ||
3117 | cmd == IPVS_CMD_DEL_DEST) { | ||
3118 | if (cmd != IPVS_CMD_DEL_DEST) | ||
3119 | need_full_dest = 1; | ||
3120 | |||
3121 | ret = ip_vs_genl_parse_dest(&udest, | ||
3122 | info->attrs[IPVS_CMD_ATTR_DEST], | ||
3123 | need_full_dest); | ||
3124 | if (ret) | ||
3125 | goto out; | ||
3126 | } | ||
3127 | |||
3128 | switch (cmd) { | ||
3129 | case IPVS_CMD_NEW_SERVICE: | ||
3130 | if (svc == NULL) | ||
3131 | ret = ip_vs_add_service(&usvc, &svc); | ||
3132 | else | ||
3133 | ret = -EEXIST; | ||
3134 | break; | ||
3135 | case IPVS_CMD_SET_SERVICE: | ||
3136 | ret = ip_vs_edit_service(svc, &usvc); | ||
3137 | break; | ||
3138 | case IPVS_CMD_DEL_SERVICE: | ||
3139 | ret = ip_vs_del_service(svc); | ||
3140 | break; | ||
3141 | case IPVS_CMD_NEW_DEST: | ||
3142 | ret = ip_vs_add_dest(svc, &udest); | ||
3143 | break; | ||
3144 | case IPVS_CMD_SET_DEST: | ||
3145 | ret = ip_vs_edit_dest(svc, &udest); | ||
3146 | break; | ||
3147 | case IPVS_CMD_DEL_DEST: | ||
3148 | ret = ip_vs_del_dest(svc, &udest); | ||
3149 | break; | ||
3150 | case IPVS_CMD_ZERO: | ||
3151 | ret = ip_vs_zero_service(svc); | ||
3152 | break; | ||
3153 | default: | ||
3154 | ret = -EINVAL; | ||
3155 | } | ||
3156 | |||
3157 | out: | ||
3158 | if (svc) | ||
3159 | ip_vs_service_put(svc); | ||
3160 | mutex_unlock(&__ip_vs_mutex); | ||
3161 | |||
3162 | return ret; | ||
3163 | } | ||
3164 | |||
3165 | static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) | ||
3166 | { | ||
3167 | struct sk_buff *msg; | ||
3168 | void *reply; | ||
3169 | int ret, cmd, reply_cmd; | ||
3170 | |||
3171 | cmd = info->genlhdr->cmd; | ||
3172 | |||
3173 | if (cmd == IPVS_CMD_GET_SERVICE) | ||
3174 | reply_cmd = IPVS_CMD_NEW_SERVICE; | ||
3175 | else if (cmd == IPVS_CMD_GET_INFO) | ||
3176 | reply_cmd = IPVS_CMD_SET_INFO; | ||
3177 | else if (cmd == IPVS_CMD_GET_CONFIG) | ||
3178 | reply_cmd = IPVS_CMD_SET_CONFIG; | ||
3179 | else { | ||
3180 | IP_VS_ERR("unknown Generic Netlink command\n"); | ||
3181 | return -EINVAL; | ||
3182 | } | ||
3183 | |||
3184 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
3185 | if (!msg) | ||
3186 | return -ENOMEM; | ||
3187 | |||
3188 | mutex_lock(&__ip_vs_mutex); | ||
3189 | |||
3190 | reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd); | ||
3191 | if (reply == NULL) | ||
3192 | goto nla_put_failure; | ||
3193 | |||
3194 | switch (cmd) { | ||
3195 | case IPVS_CMD_GET_SERVICE: | ||
3196 | { | ||
3197 | struct ip_vs_service *svc; | ||
3198 | |||
3199 | svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]); | ||
3200 | if (IS_ERR(svc)) { | ||
3201 | ret = PTR_ERR(svc); | ||
3202 | goto out_err; | ||
3203 | } else if (svc) { | ||
3204 | ret = ip_vs_genl_fill_service(msg, svc); | ||
3205 | ip_vs_service_put(svc); | ||
3206 | if (ret) | ||
3207 | goto nla_put_failure; | ||
3208 | } else { | ||
3209 | ret = -ESRCH; | ||
3210 | goto out_err; | ||
3211 | } | ||
3212 | |||
3213 | break; | ||
3214 | } | ||
3215 | |||
3216 | case IPVS_CMD_GET_CONFIG: | ||
3217 | { | ||
3218 | struct ip_vs_timeout_user t; | ||
3219 | |||
3220 | __ip_vs_get_timeouts(&t); | ||
3221 | #ifdef CONFIG_IP_VS_PROTO_TCP | ||
3222 | NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout); | ||
3223 | NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, | ||
3224 | t.tcp_fin_timeout); | ||
3225 | #endif | ||
3226 | #ifdef CONFIG_IP_VS_PROTO_UDP | ||
3227 | NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout); | ||
3228 | #endif | ||
3229 | |||
3230 | break; | ||
3231 | } | ||
3232 | |||
3233 | case IPVS_CMD_GET_INFO: | ||
3234 | NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); | ||
3235 | NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, | ||
3236 | IP_VS_CONN_TAB_SIZE); | ||
3237 | break; | ||
3238 | } | ||
3239 | |||
3240 | genlmsg_end(msg, reply); | ||
3241 | ret = genlmsg_unicast(msg, info->snd_pid); | ||
3242 | goto out; | ||
3243 | |||
3244 | nla_put_failure: | ||
3245 | IP_VS_ERR("not enough space in Netlink message\n"); | ||
3246 | ret = -EMSGSIZE; | ||
3247 | |||
3248 | out_err: | ||
3249 | nlmsg_free(msg); | ||
3250 | out: | ||
3251 | mutex_unlock(&__ip_vs_mutex); | ||
3252 | |||
3253 | return ret; | ||
3254 | } | ||
3255 | |||
3256 | |||
3257 | static struct genl_ops ip_vs_genl_ops[] __read_mostly = { | ||
3258 | { | ||
3259 | .cmd = IPVS_CMD_NEW_SERVICE, | ||
3260 | .flags = GENL_ADMIN_PERM, | ||
3261 | .policy = ip_vs_cmd_policy, | ||
3262 | .doit = ip_vs_genl_set_cmd, | ||
3263 | }, | ||
3264 | { | ||
3265 | .cmd = IPVS_CMD_SET_SERVICE, | ||
3266 | .flags = GENL_ADMIN_PERM, | ||
3267 | .policy = ip_vs_cmd_policy, | ||
3268 | .doit = ip_vs_genl_set_cmd, | ||
3269 | }, | ||
3270 | { | ||
3271 | .cmd = IPVS_CMD_DEL_SERVICE, | ||
3272 | .flags = GENL_ADMIN_PERM, | ||
3273 | .policy = ip_vs_cmd_policy, | ||
3274 | .doit = ip_vs_genl_set_cmd, | ||
3275 | }, | ||
3276 | { | ||
3277 | .cmd = IPVS_CMD_GET_SERVICE, | ||
3278 | .flags = GENL_ADMIN_PERM, | ||
3279 | .doit = ip_vs_genl_get_cmd, | ||
3280 | .dumpit = ip_vs_genl_dump_services, | ||
3281 | .policy = ip_vs_cmd_policy, | ||
3282 | }, | ||
3283 | { | ||
3284 | .cmd = IPVS_CMD_NEW_DEST, | ||
3285 | .flags = GENL_ADMIN_PERM, | ||
3286 | .policy = ip_vs_cmd_policy, | ||
3287 | .doit = ip_vs_genl_set_cmd, | ||
3288 | }, | ||
3289 | { | ||
3290 | .cmd = IPVS_CMD_SET_DEST, | ||
3291 | .flags = GENL_ADMIN_PERM, | ||
3292 | .policy = ip_vs_cmd_policy, | ||
3293 | .doit = ip_vs_genl_set_cmd, | ||
3294 | }, | ||
3295 | { | ||
3296 | .cmd = IPVS_CMD_DEL_DEST, | ||
3297 | .flags = GENL_ADMIN_PERM, | ||
3298 | .policy = ip_vs_cmd_policy, | ||
3299 | .doit = ip_vs_genl_set_cmd, | ||
3300 | }, | ||
3301 | { | ||
3302 | .cmd = IPVS_CMD_GET_DEST, | ||
3303 | .flags = GENL_ADMIN_PERM, | ||
3304 | .policy = ip_vs_cmd_policy, | ||
3305 | .dumpit = ip_vs_genl_dump_dests, | ||
3306 | }, | ||
3307 | { | ||
3308 | .cmd = IPVS_CMD_NEW_DAEMON, | ||
3309 | .flags = GENL_ADMIN_PERM, | ||
3310 | .policy = ip_vs_cmd_policy, | ||
3311 | .doit = ip_vs_genl_set_cmd, | ||
3312 | }, | ||
3313 | { | ||
3314 | .cmd = IPVS_CMD_DEL_DAEMON, | ||
3315 | .flags = GENL_ADMIN_PERM, | ||
3316 | .policy = ip_vs_cmd_policy, | ||
3317 | .doit = ip_vs_genl_set_cmd, | ||
3318 | }, | ||
3319 | { | ||
3320 | .cmd = IPVS_CMD_GET_DAEMON, | ||
3321 | .flags = GENL_ADMIN_PERM, | ||
3322 | .dumpit = ip_vs_genl_dump_daemons, | ||
3323 | }, | ||
3324 | { | ||
3325 | .cmd = IPVS_CMD_SET_CONFIG, | ||
3326 | .flags = GENL_ADMIN_PERM, | ||
3327 | .policy = ip_vs_cmd_policy, | ||
3328 | .doit = ip_vs_genl_set_cmd, | ||
3329 | }, | ||
3330 | { | ||
3331 | .cmd = IPVS_CMD_GET_CONFIG, | ||
3332 | .flags = GENL_ADMIN_PERM, | ||
3333 | .doit = ip_vs_genl_get_cmd, | ||
3334 | }, | ||
3335 | { | ||
3336 | .cmd = IPVS_CMD_GET_INFO, | ||
3337 | .flags = GENL_ADMIN_PERM, | ||
3338 | .doit = ip_vs_genl_get_cmd, | ||
3339 | }, | ||
3340 | { | ||
3341 | .cmd = IPVS_CMD_ZERO, | ||
3342 | .flags = GENL_ADMIN_PERM, | ||
3343 | .policy = ip_vs_cmd_policy, | ||
3344 | .doit = ip_vs_genl_set_cmd, | ||
3345 | }, | ||
3346 | { | ||
3347 | .cmd = IPVS_CMD_FLUSH, | ||
3348 | .flags = GENL_ADMIN_PERM, | ||
3349 | .doit = ip_vs_genl_set_cmd, | ||
3350 | }, | ||
3351 | }; | ||
3352 | |||
3353 | static int __init ip_vs_genl_register(void) | ||
3354 | { | ||
3355 | int ret, i; | ||
3356 | |||
3357 | ret = genl_register_family(&ip_vs_genl_family); | ||
3358 | if (ret) | ||
3359 | return ret; | ||
3360 | |||
3361 | for (i = 0; i < ARRAY_SIZE(ip_vs_genl_ops); i++) { | ||
3362 | ret = genl_register_ops(&ip_vs_genl_family, &ip_vs_genl_ops[i]); | ||
3363 | if (ret) | ||
3364 | goto err_out; | ||
3365 | } | ||
3366 | return 0; | ||
3367 | |||
3368 | err_out: | ||
3369 | genl_unregister_family(&ip_vs_genl_family); | ||
3370 | return ret; | ||
3371 | } | ||
3372 | |||
3373 | static void ip_vs_genl_unregister(void) | ||
3374 | { | ||
3375 | genl_unregister_family(&ip_vs_genl_family); | ||
3376 | } | ||
3377 | |||
3378 | /* End of Generic Netlink interface definitions */ | ||
3379 | |||
2323 | 3380 | ||
2324 | int __init ip_vs_control_init(void) | 3381 | int __init ip_vs_control_init(void) |
2325 | { | 3382 | { |
@@ -2334,6 +3391,13 @@ int __init ip_vs_control_init(void) | |||
2334 | return ret; | 3391 | return ret; |
2335 | } | 3392 | } |
2336 | 3393 | ||
3394 | ret = ip_vs_genl_register(); | ||
3395 | if (ret) { | ||
3396 | IP_VS_ERR("cannot register Generic Netlink interface.\n"); | ||
3397 | nf_unregister_sockopt(&ip_vs_sockopts); | ||
3398 | return ret; | ||
3399 | } | ||
3400 | |||
2337 | proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops); | 3401 | proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops); |
2338 | proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops); | 3402 | proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops); |
2339 | 3403 | ||
@@ -2368,6 +3432,7 @@ void ip_vs_control_cleanup(void) | |||
2368 | unregister_sysctl_table(sysctl_header); | 3432 | unregister_sysctl_table(sysctl_header); |
2369 | proc_net_remove(&init_net, "ip_vs_stats"); | 3433 | proc_net_remove(&init_net, "ip_vs_stats"); |
2370 | proc_net_remove(&init_net, "ip_vs"); | 3434 | proc_net_remove(&init_net, "ip_vs"); |
3435 | ip_vs_genl_unregister(); | ||
2371 | nf_unregister_sockopt(&ip_vs_sockopts); | 3436 | nf_unregister_sockopt(&ip_vs_sockopts); |
2372 | LeaveFunction(2); | 3437 | LeaveFunction(2); |
2373 | } | 3438 | } |
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c index fa66824d264f..a16943fd72f1 100644 --- a/net/ipv4/ipvs/ip_vs_dh.c +++ b/net/ipv4/ipvs/ip_vs_dh.c | |||
@@ -218,7 +218,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
218 | IP_VS_DBG(6, "DH: destination IP address %u.%u.%u.%u " | 218 | IP_VS_DBG(6, "DH: destination IP address %u.%u.%u.%u " |
219 | "--> server %u.%u.%u.%u:%d\n", | 219 | "--> server %u.%u.%u.%u:%d\n", |
220 | NIPQUAD(iph->daddr), | 220 | NIPQUAD(iph->daddr), |
221 | NIPQUAD(dest->addr), | 221 | NIPQUAD(dest->addr.ip), |
222 | ntohs(dest->port)); | 222 | ntohs(dest->port)); |
223 | 223 | ||
224 | return dest; | 224 | return dest; |
@@ -234,6 +234,9 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler = | |||
234 | .refcnt = ATOMIC_INIT(0), | 234 | .refcnt = ATOMIC_INIT(0), |
235 | .module = THIS_MODULE, | 235 | .module = THIS_MODULE, |
236 | .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), | 236 | .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), |
237 | #ifdef CONFIG_IP_VS_IPV6 | ||
238 | .supports_ipv6 = 0, | ||
239 | #endif | ||
237 | .init_service = ip_vs_dh_init_svc, | 240 | .init_service = ip_vs_dh_init_svc, |
238 | .done_service = ip_vs_dh_done_svc, | 241 | .done_service = ip_vs_dh_done_svc, |
239 | .update_service = ip_vs_dh_update_svc, | 242 | .update_service = ip_vs_dh_update_svc, |
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c index 5a20f93bd7f9..2eb2860dabb5 100644 --- a/net/ipv4/ipvs/ip_vs_est.c +++ b/net/ipv4/ipvs/ip_vs_est.c | |||
@@ -65,37 +65,37 @@ static void estimation_timer(unsigned long arg) | |||
65 | s = container_of(e, struct ip_vs_stats, est); | 65 | s = container_of(e, struct ip_vs_stats, est); |
66 | 66 | ||
67 | spin_lock(&s->lock); | 67 | spin_lock(&s->lock); |
68 | n_conns = s->conns; | 68 | n_conns = s->ustats.conns; |
69 | n_inpkts = s->inpkts; | 69 | n_inpkts = s->ustats.inpkts; |
70 | n_outpkts = s->outpkts; | 70 | n_outpkts = s->ustats.outpkts; |
71 | n_inbytes = s->inbytes; | 71 | n_inbytes = s->ustats.inbytes; |
72 | n_outbytes = s->outbytes; | 72 | n_outbytes = s->ustats.outbytes; |
73 | 73 | ||
74 | /* scaled by 2^10, but divided 2 seconds */ | 74 | /* scaled by 2^10, but divided 2 seconds */ |
75 | rate = (n_conns - e->last_conns)<<9; | 75 | rate = (n_conns - e->last_conns)<<9; |
76 | e->last_conns = n_conns; | 76 | e->last_conns = n_conns; |
77 | e->cps += ((long)rate - (long)e->cps)>>2; | 77 | e->cps += ((long)rate - (long)e->cps)>>2; |
78 | s->cps = (e->cps+0x1FF)>>10; | 78 | s->ustats.cps = (e->cps+0x1FF)>>10; |
79 | 79 | ||
80 | rate = (n_inpkts - e->last_inpkts)<<9; | 80 | rate = (n_inpkts - e->last_inpkts)<<9; |
81 | e->last_inpkts = n_inpkts; | 81 | e->last_inpkts = n_inpkts; |
82 | e->inpps += ((long)rate - (long)e->inpps)>>2; | 82 | e->inpps += ((long)rate - (long)e->inpps)>>2; |
83 | s->inpps = (e->inpps+0x1FF)>>10; | 83 | s->ustats.inpps = (e->inpps+0x1FF)>>10; |
84 | 84 | ||
85 | rate = (n_outpkts - e->last_outpkts)<<9; | 85 | rate = (n_outpkts - e->last_outpkts)<<9; |
86 | e->last_outpkts = n_outpkts; | 86 | e->last_outpkts = n_outpkts; |
87 | e->outpps += ((long)rate - (long)e->outpps)>>2; | 87 | e->outpps += ((long)rate - (long)e->outpps)>>2; |
88 | s->outpps = (e->outpps+0x1FF)>>10; | 88 | s->ustats.outpps = (e->outpps+0x1FF)>>10; |
89 | 89 | ||
90 | rate = (n_inbytes - e->last_inbytes)<<4; | 90 | rate = (n_inbytes - e->last_inbytes)<<4; |
91 | e->last_inbytes = n_inbytes; | 91 | e->last_inbytes = n_inbytes; |
92 | e->inbps += ((long)rate - (long)e->inbps)>>2; | 92 | e->inbps += ((long)rate - (long)e->inbps)>>2; |
93 | s->inbps = (e->inbps+0xF)>>5; | 93 | s->ustats.inbps = (e->inbps+0xF)>>5; |
94 | 94 | ||
95 | rate = (n_outbytes - e->last_outbytes)<<4; | 95 | rate = (n_outbytes - e->last_outbytes)<<4; |
96 | e->last_outbytes = n_outbytes; | 96 | e->last_outbytes = n_outbytes; |
97 | e->outbps += ((long)rate - (long)e->outbps)>>2; | 97 | e->outbps += ((long)rate - (long)e->outbps)>>2; |
98 | s->outbps = (e->outbps+0xF)>>5; | 98 | s->ustats.outbps = (e->outbps+0xF)>>5; |
99 | spin_unlock(&s->lock); | 99 | spin_unlock(&s->lock); |
100 | } | 100 | } |
101 | spin_unlock(&est_lock); | 101 | spin_unlock(&est_lock); |
@@ -108,24 +108,22 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats) | |||
108 | 108 | ||
109 | INIT_LIST_HEAD(&est->list); | 109 | INIT_LIST_HEAD(&est->list); |
110 | 110 | ||
111 | est->last_conns = stats->conns; | 111 | est->last_conns = stats->ustats.conns; |
112 | est->cps = stats->cps<<10; | 112 | est->cps = stats->ustats.cps<<10; |
113 | 113 | ||
114 | est->last_inpkts = stats->inpkts; | 114 | est->last_inpkts = stats->ustats.inpkts; |
115 | est->inpps = stats->inpps<<10; | 115 | est->inpps = stats->ustats.inpps<<10; |
116 | 116 | ||
117 | est->last_outpkts = stats->outpkts; | 117 | est->last_outpkts = stats->ustats.outpkts; |
118 | est->outpps = stats->outpps<<10; | 118 | est->outpps = stats->ustats.outpps<<10; |
119 | 119 | ||
120 | est->last_inbytes = stats->inbytes; | 120 | est->last_inbytes = stats->ustats.inbytes; |
121 | est->inbps = stats->inbps<<5; | 121 | est->inbps = stats->ustats.inbps<<5; |
122 | 122 | ||
123 | est->last_outbytes = stats->outbytes; | 123 | est->last_outbytes = stats->ustats.outbytes; |
124 | est->outbps = stats->outbps<<5; | 124 | est->outbps = stats->ustats.outbps<<5; |
125 | 125 | ||
126 | spin_lock_bh(&est_lock); | 126 | spin_lock_bh(&est_lock); |
127 | if (list_empty(&est_list)) | ||
128 | mod_timer(&est_timer, jiffies + 2 * HZ); | ||
129 | list_add(&est->list, &est_list); | 127 | list_add(&est->list, &est_list); |
130 | spin_unlock_bh(&est_lock); | 128 | spin_unlock_bh(&est_lock); |
131 | } | 129 | } |
@@ -136,11 +134,6 @@ void ip_vs_kill_estimator(struct ip_vs_stats *stats) | |||
136 | 134 | ||
137 | spin_lock_bh(&est_lock); | 135 | spin_lock_bh(&est_lock); |
138 | list_del(&est->list); | 136 | list_del(&est->list); |
139 | while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) { | ||
140 | spin_unlock_bh(&est_lock); | ||
141 | cpu_relax(); | ||
142 | spin_lock_bh(&est_lock); | ||
143 | } | ||
144 | spin_unlock_bh(&est_lock); | 137 | spin_unlock_bh(&est_lock); |
145 | } | 138 | } |
146 | 139 | ||
@@ -160,3 +153,14 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats) | |||
160 | est->inbps = 0; | 153 | est->inbps = 0; |
161 | est->outbps = 0; | 154 | est->outbps = 0; |
162 | } | 155 | } |
156 | |||
157 | int __init ip_vs_estimator_init(void) | ||
158 | { | ||
159 | mod_timer(&est_timer, jiffies + 2 * HZ); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | void ip_vs_estimator_cleanup(void) | ||
164 | { | ||
165 | del_timer_sync(&est_timer); | ||
166 | } | ||
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c index c1c758e4f733..2e7dbd8b73a4 100644 --- a/net/ipv4/ipvs/ip_vs_ftp.c +++ b/net/ipv4/ipvs/ip_vs_ftp.c | |||
@@ -140,13 +140,21 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
140 | struct tcphdr *th; | 140 | struct tcphdr *th; |
141 | char *data, *data_limit; | 141 | char *data, *data_limit; |
142 | char *start, *end; | 142 | char *start, *end; |
143 | __be32 from; | 143 | union nf_inet_addr from; |
144 | __be16 port; | 144 | __be16 port; |
145 | struct ip_vs_conn *n_cp; | 145 | struct ip_vs_conn *n_cp; |
146 | char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ | 146 | char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ |
147 | unsigned buf_len; | 147 | unsigned buf_len; |
148 | int ret; | 148 | int ret; |
149 | 149 | ||
150 | #ifdef CONFIG_IP_VS_IPV6 | ||
151 | /* This application helper doesn't work with IPv6 yet, | ||
152 | * so turn this into a no-op for IPv6 packets | ||
153 | */ | ||
154 | if (cp->af == AF_INET6) | ||
155 | return 1; | ||
156 | #endif | ||
157 | |||
150 | *diff = 0; | 158 | *diff = 0; |
151 | 159 | ||
152 | /* Only useful for established sessions */ | 160 | /* Only useful for established sessions */ |
@@ -166,24 +174,25 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
166 | if (ip_vs_ftp_get_addrport(data, data_limit, | 174 | if (ip_vs_ftp_get_addrport(data, data_limit, |
167 | SERVER_STRING, | 175 | SERVER_STRING, |
168 | sizeof(SERVER_STRING)-1, ')', | 176 | sizeof(SERVER_STRING)-1, ')', |
169 | &from, &port, | 177 | &from.ip, &port, |
170 | &start, &end) != 1) | 178 | &start, &end) != 1) |
171 | return 1; | 179 | return 1; |
172 | 180 | ||
173 | IP_VS_DBG(7, "PASV response (%u.%u.%u.%u:%d) -> " | 181 | IP_VS_DBG(7, "PASV response (%u.%u.%u.%u:%d) -> " |
174 | "%u.%u.%u.%u:%d detected\n", | 182 | "%u.%u.%u.%u:%d detected\n", |
175 | NIPQUAD(from), ntohs(port), NIPQUAD(cp->caddr), 0); | 183 | NIPQUAD(from.ip), ntohs(port), |
184 | NIPQUAD(cp->caddr.ip), 0); | ||
176 | 185 | ||
177 | /* | 186 | /* |
178 | * Now update or create an connection entry for it | 187 | * Now update or create an connection entry for it |
179 | */ | 188 | */ |
180 | n_cp = ip_vs_conn_out_get(iph->protocol, from, port, | 189 | n_cp = ip_vs_conn_out_get(AF_INET, iph->protocol, &from, port, |
181 | cp->caddr, 0); | 190 | &cp->caddr, 0); |
182 | if (!n_cp) { | 191 | if (!n_cp) { |
183 | n_cp = ip_vs_conn_new(IPPROTO_TCP, | 192 | n_cp = ip_vs_conn_new(AF_INET, IPPROTO_TCP, |
184 | cp->caddr, 0, | 193 | &cp->caddr, 0, |
185 | cp->vaddr, port, | 194 | &cp->vaddr, port, |
186 | from, port, | 195 | &from, port, |
187 | IP_VS_CONN_F_NO_CPORT, | 196 | IP_VS_CONN_F_NO_CPORT, |
188 | cp->dest); | 197 | cp->dest); |
189 | if (!n_cp) | 198 | if (!n_cp) |
@@ -196,9 +205,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
196 | /* | 205 | /* |
197 | * Replace the old passive address with the new one | 206 | * Replace the old passive address with the new one |
198 | */ | 207 | */ |
199 | from = n_cp->vaddr; | 208 | from.ip = n_cp->vaddr.ip; |
200 | port = n_cp->vport; | 209 | port = n_cp->vport; |
201 | sprintf(buf,"%d,%d,%d,%d,%d,%d", NIPQUAD(from), | 210 | sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip), |
202 | (ntohs(port)>>8)&255, ntohs(port)&255); | 211 | (ntohs(port)>>8)&255, ntohs(port)&255); |
203 | buf_len = strlen(buf); | 212 | buf_len = strlen(buf); |
204 | 213 | ||
@@ -243,10 +252,18 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
243 | struct tcphdr *th; | 252 | struct tcphdr *th; |
244 | char *data, *data_start, *data_limit; | 253 | char *data, *data_start, *data_limit; |
245 | char *start, *end; | 254 | char *start, *end; |
246 | __be32 to; | 255 | union nf_inet_addr to; |
247 | __be16 port; | 256 | __be16 port; |
248 | struct ip_vs_conn *n_cp; | 257 | struct ip_vs_conn *n_cp; |
249 | 258 | ||
259 | #ifdef CONFIG_IP_VS_IPV6 | ||
260 | /* This application helper doesn't work with IPv6 yet, | ||
261 | * so turn this into a no-op for IPv6 packets | ||
262 | */ | ||
263 | if (cp->af == AF_INET6) | ||
264 | return 1; | ||
265 | #endif | ||
266 | |||
250 | /* no diff required for incoming packets */ | 267 | /* no diff required for incoming packets */ |
251 | *diff = 0; | 268 | *diff = 0; |
252 | 269 | ||
@@ -291,12 +308,12 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
291 | */ | 308 | */ |
292 | if (ip_vs_ftp_get_addrport(data_start, data_limit, | 309 | if (ip_vs_ftp_get_addrport(data_start, data_limit, |
293 | CLIENT_STRING, sizeof(CLIENT_STRING)-1, | 310 | CLIENT_STRING, sizeof(CLIENT_STRING)-1, |
294 | '\r', &to, &port, | 311 | '\r', &to.ip, &port, |
295 | &start, &end) != 1) | 312 | &start, &end) != 1) |
296 | return 1; | 313 | return 1; |
297 | 314 | ||
298 | IP_VS_DBG(7, "PORT %u.%u.%u.%u:%d detected\n", | 315 | IP_VS_DBG(7, "PORT %u.%u.%u.%u:%d detected\n", |
299 | NIPQUAD(to), ntohs(port)); | 316 | NIPQUAD(to.ip), ntohs(port)); |
300 | 317 | ||
301 | /* Passive mode off */ | 318 | /* Passive mode off */ |
302 | cp->app_data = NULL; | 319 | cp->app_data = NULL; |
@@ -306,16 +323,16 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
306 | */ | 323 | */ |
307 | IP_VS_DBG(7, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n", | 324 | IP_VS_DBG(7, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n", |
308 | ip_vs_proto_name(iph->protocol), | 325 | ip_vs_proto_name(iph->protocol), |
309 | NIPQUAD(to), ntohs(port), NIPQUAD(cp->vaddr), 0); | 326 | NIPQUAD(to.ip), ntohs(port), NIPQUAD(cp->vaddr.ip), 0); |
310 | 327 | ||
311 | n_cp = ip_vs_conn_in_get(iph->protocol, | 328 | n_cp = ip_vs_conn_in_get(AF_INET, iph->protocol, |
312 | to, port, | 329 | &to, port, |
313 | cp->vaddr, htons(ntohs(cp->vport)-1)); | 330 | &cp->vaddr, htons(ntohs(cp->vport)-1)); |
314 | if (!n_cp) { | 331 | if (!n_cp) { |
315 | n_cp = ip_vs_conn_new(IPPROTO_TCP, | 332 | n_cp = ip_vs_conn_new(AF_INET, IPPROTO_TCP, |
316 | to, port, | 333 | &to, port, |
317 | cp->vaddr, htons(ntohs(cp->vport)-1), | 334 | &cp->vaddr, htons(ntohs(cp->vport)-1), |
318 | cp->daddr, htons(ntohs(cp->dport)-1), | 335 | &cp->daddr, htons(ntohs(cp->dport)-1), |
319 | 0, | 336 | 0, |
320 | cp->dest); | 337 | cp->dest); |
321 | if (!n_cp) | 338 | if (!n_cp) |
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c index 7a6a319f544a..6ecef3518cac 100644 --- a/net/ipv4/ipvs/ip_vs_lblc.c +++ b/net/ipv4/ipvs/ip_vs_lblc.c | |||
@@ -96,7 +96,6 @@ struct ip_vs_lblc_entry { | |||
96 | * IPVS lblc hash table | 96 | * IPVS lblc hash table |
97 | */ | 97 | */ |
98 | struct ip_vs_lblc_table { | 98 | struct ip_vs_lblc_table { |
99 | rwlock_t lock; /* lock for this table */ | ||
100 | struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ | 99 | struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ |
101 | atomic_t entries; /* number of entries */ | 100 | atomic_t entries; /* number of entries */ |
102 | int max_size; /* maximum size of entries */ | 101 | int max_size; /* maximum size of entries */ |
@@ -123,31 +122,6 @@ static ctl_table vs_vars_table[] = { | |||
123 | 122 | ||
124 | static struct ctl_table_header * sysctl_header; | 123 | static struct ctl_table_header * sysctl_header; |
125 | 124 | ||
126 | /* | ||
127 | * new/free a ip_vs_lblc_entry, which is a mapping of a destionation | ||
128 | * IP address to a server. | ||
129 | */ | ||
130 | static inline struct ip_vs_lblc_entry * | ||
131 | ip_vs_lblc_new(__be32 daddr, struct ip_vs_dest *dest) | ||
132 | { | ||
133 | struct ip_vs_lblc_entry *en; | ||
134 | |||
135 | en = kmalloc(sizeof(struct ip_vs_lblc_entry), GFP_ATOMIC); | ||
136 | if (en == NULL) { | ||
137 | IP_VS_ERR("ip_vs_lblc_new(): no memory\n"); | ||
138 | return NULL; | ||
139 | } | ||
140 | |||
141 | INIT_LIST_HEAD(&en->list); | ||
142 | en->addr = daddr; | ||
143 | |||
144 | atomic_inc(&dest->refcnt); | ||
145 | en->dest = dest; | ||
146 | |||
147 | return en; | ||
148 | } | ||
149 | |||
150 | |||
151 | static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) | 125 | static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) |
152 | { | 126 | { |
153 | list_del(&en->list); | 127 | list_del(&en->list); |
@@ -173,55 +147,66 @@ static inline unsigned ip_vs_lblc_hashkey(__be32 addr) | |||
173 | * Hash an entry in the ip_vs_lblc_table. | 147 | * Hash an entry in the ip_vs_lblc_table. |
174 | * returns bool success. | 148 | * returns bool success. |
175 | */ | 149 | */ |
176 | static int | 150 | static void |
177 | ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) | 151 | ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) |
178 | { | 152 | { |
179 | unsigned hash; | 153 | unsigned hash = ip_vs_lblc_hashkey(en->addr); |
180 | |||
181 | if (!list_empty(&en->list)) { | ||
182 | IP_VS_ERR("ip_vs_lblc_hash(): request for already hashed, " | ||
183 | "called from %p\n", __builtin_return_address(0)); | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * Hash by destination IP address | ||
189 | */ | ||
190 | hash = ip_vs_lblc_hashkey(en->addr); | ||
191 | 154 | ||
192 | write_lock(&tbl->lock); | ||
193 | list_add(&en->list, &tbl->bucket[hash]); | 155 | list_add(&en->list, &tbl->bucket[hash]); |
194 | atomic_inc(&tbl->entries); | 156 | atomic_inc(&tbl->entries); |
195 | write_unlock(&tbl->lock); | ||
196 | |||
197 | return 1; | ||
198 | } | 157 | } |
199 | 158 | ||
200 | 159 | ||
201 | /* | 160 | /* |
202 | * Get ip_vs_lblc_entry associated with supplied parameters. | 161 | * Get ip_vs_lblc_entry associated with supplied parameters. Called under read |
162 | * lock | ||
203 | */ | 163 | */ |
204 | static inline struct ip_vs_lblc_entry * | 164 | static inline struct ip_vs_lblc_entry * |
205 | ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) | 165 | ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) |
206 | { | 166 | { |
207 | unsigned hash; | 167 | unsigned hash = ip_vs_lblc_hashkey(addr); |
208 | struct ip_vs_lblc_entry *en; | 168 | struct ip_vs_lblc_entry *en; |
209 | 169 | ||
210 | hash = ip_vs_lblc_hashkey(addr); | 170 | list_for_each_entry(en, &tbl->bucket[hash], list) |
171 | if (en->addr == addr) | ||
172 | return en; | ||
211 | 173 | ||
212 | read_lock(&tbl->lock); | 174 | return NULL; |
175 | } | ||
213 | 176 | ||
214 | list_for_each_entry(en, &tbl->bucket[hash], list) { | 177 | |
215 | if (en->addr == addr) { | 178 | /* |
216 | /* HIT */ | 179 | * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP |
217 | read_unlock(&tbl->lock); | 180 | * address to a server. Called under write lock. |
218 | return en; | 181 | */ |
182 | static inline struct ip_vs_lblc_entry * | ||
183 | ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, __be32 daddr, | ||
184 | struct ip_vs_dest *dest) | ||
185 | { | ||
186 | struct ip_vs_lblc_entry *en; | ||
187 | |||
188 | en = ip_vs_lblc_get(tbl, daddr); | ||
189 | if (!en) { | ||
190 | en = kmalloc(sizeof(*en), GFP_ATOMIC); | ||
191 | if (!en) { | ||
192 | IP_VS_ERR("ip_vs_lblc_new(): no memory\n"); | ||
193 | return NULL; | ||
219 | } | 194 | } |
220 | } | ||
221 | 195 | ||
222 | read_unlock(&tbl->lock); | 196 | en->addr = daddr; |
197 | en->lastuse = jiffies; | ||
223 | 198 | ||
224 | return NULL; | 199 | atomic_inc(&dest->refcnt); |
200 | en->dest = dest; | ||
201 | |||
202 | ip_vs_lblc_hash(tbl, en); | ||
203 | } else if (en->dest != dest) { | ||
204 | atomic_dec(&en->dest->refcnt); | ||
205 | atomic_inc(&dest->refcnt); | ||
206 | en->dest = dest; | ||
207 | } | ||
208 | |||
209 | return en; | ||
225 | } | 210 | } |
226 | 211 | ||
227 | 212 | ||
@@ -230,30 +215,29 @@ ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) | |||
230 | */ | 215 | */ |
231 | static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) | 216 | static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) |
232 | { | 217 | { |
233 | int i; | ||
234 | struct ip_vs_lblc_entry *en, *nxt; | 218 | struct ip_vs_lblc_entry *en, *nxt; |
219 | int i; | ||
235 | 220 | ||
236 | for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { | 221 | for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { |
237 | write_lock(&tbl->lock); | ||
238 | list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { | 222 | list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { |
239 | ip_vs_lblc_free(en); | 223 | ip_vs_lblc_free(en); |
240 | atomic_dec(&tbl->entries); | 224 | atomic_dec(&tbl->entries); |
241 | } | 225 | } |
242 | write_unlock(&tbl->lock); | ||
243 | } | 226 | } |
244 | } | 227 | } |
245 | 228 | ||
246 | 229 | ||
247 | static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) | 230 | static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc) |
248 | { | 231 | { |
232 | struct ip_vs_lblc_table *tbl = svc->sched_data; | ||
233 | struct ip_vs_lblc_entry *en, *nxt; | ||
249 | unsigned long now = jiffies; | 234 | unsigned long now = jiffies; |
250 | int i, j; | 235 | int i, j; |
251 | struct ip_vs_lblc_entry *en, *nxt; | ||
252 | 236 | ||
253 | for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { | 237 | for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { |
254 | j = (j + 1) & IP_VS_LBLC_TAB_MASK; | 238 | j = (j + 1) & IP_VS_LBLC_TAB_MASK; |
255 | 239 | ||
256 | write_lock(&tbl->lock); | 240 | write_lock(&svc->sched_lock); |
257 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { | 241 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { |
258 | if (time_before(now, | 242 | if (time_before(now, |
259 | en->lastuse + sysctl_ip_vs_lblc_expiration)) | 243 | en->lastuse + sysctl_ip_vs_lblc_expiration)) |
@@ -262,7 +246,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) | |||
262 | ip_vs_lblc_free(en); | 246 | ip_vs_lblc_free(en); |
263 | atomic_dec(&tbl->entries); | 247 | atomic_dec(&tbl->entries); |
264 | } | 248 | } |
265 | write_unlock(&tbl->lock); | 249 | write_unlock(&svc->sched_lock); |
266 | } | 250 | } |
267 | tbl->rover = j; | 251 | tbl->rover = j; |
268 | } | 252 | } |
@@ -281,17 +265,16 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) | |||
281 | */ | 265 | */ |
282 | static void ip_vs_lblc_check_expire(unsigned long data) | 266 | static void ip_vs_lblc_check_expire(unsigned long data) |
283 | { | 267 | { |
284 | struct ip_vs_lblc_table *tbl; | 268 | struct ip_vs_service *svc = (struct ip_vs_service *) data; |
269 | struct ip_vs_lblc_table *tbl = svc->sched_data; | ||
285 | unsigned long now = jiffies; | 270 | unsigned long now = jiffies; |
286 | int goal; | 271 | int goal; |
287 | int i, j; | 272 | int i, j; |
288 | struct ip_vs_lblc_entry *en, *nxt; | 273 | struct ip_vs_lblc_entry *en, *nxt; |
289 | 274 | ||
290 | tbl = (struct ip_vs_lblc_table *)data; | ||
291 | |||
292 | if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { | 275 | if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { |
293 | /* do full expiration check */ | 276 | /* do full expiration check */ |
294 | ip_vs_lblc_full_check(tbl); | 277 | ip_vs_lblc_full_check(svc); |
295 | tbl->counter = 1; | 278 | tbl->counter = 1; |
296 | goto out; | 279 | goto out; |
297 | } | 280 | } |
@@ -308,7 +291,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) | |||
308 | for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { | 291 | for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { |
309 | j = (j + 1) & IP_VS_LBLC_TAB_MASK; | 292 | j = (j + 1) & IP_VS_LBLC_TAB_MASK; |
310 | 293 | ||
311 | write_lock(&tbl->lock); | 294 | write_lock(&svc->sched_lock); |
312 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { | 295 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { |
313 | if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) | 296 | if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) |
314 | continue; | 297 | continue; |
@@ -317,7 +300,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) | |||
317 | atomic_dec(&tbl->entries); | 300 | atomic_dec(&tbl->entries); |
318 | goal--; | 301 | goal--; |
319 | } | 302 | } |
320 | write_unlock(&tbl->lock); | 303 | write_unlock(&svc->sched_lock); |
321 | if (goal <= 0) | 304 | if (goal <= 0) |
322 | break; | 305 | break; |
323 | } | 306 | } |
@@ -336,15 +319,14 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) | |||
336 | /* | 319 | /* |
337 | * Allocate the ip_vs_lblc_table for this service | 320 | * Allocate the ip_vs_lblc_table for this service |
338 | */ | 321 | */ |
339 | tbl = kmalloc(sizeof(struct ip_vs_lblc_table), GFP_ATOMIC); | 322 | tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); |
340 | if (tbl == NULL) { | 323 | if (tbl == NULL) { |
341 | IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n"); | 324 | IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n"); |
342 | return -ENOMEM; | 325 | return -ENOMEM; |
343 | } | 326 | } |
344 | svc->sched_data = tbl; | 327 | svc->sched_data = tbl; |
345 | IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " | 328 | IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " |
346 | "current service\n", | 329 | "current service\n", sizeof(*tbl)); |
347 | sizeof(struct ip_vs_lblc_table)); | ||
348 | 330 | ||
349 | /* | 331 | /* |
350 | * Initialize the hash buckets | 332 | * Initialize the hash buckets |
@@ -352,7 +334,6 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) | |||
352 | for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { | 334 | for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { |
353 | INIT_LIST_HEAD(&tbl->bucket[i]); | 335 | INIT_LIST_HEAD(&tbl->bucket[i]); |
354 | } | 336 | } |
355 | rwlock_init(&tbl->lock); | ||
356 | tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; | 337 | tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; |
357 | tbl->rover = 0; | 338 | tbl->rover = 0; |
358 | tbl->counter = 1; | 339 | tbl->counter = 1; |
@@ -361,9 +342,8 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) | |||
361 | * Hook periodic timer for garbage collection | 342 | * Hook periodic timer for garbage collection |
362 | */ | 343 | */ |
363 | setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire, | 344 | setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire, |
364 | (unsigned long)tbl); | 345 | (unsigned long)svc); |
365 | tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; | 346 | mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); |
366 | add_timer(&tbl->periodic_timer); | ||
367 | 347 | ||
368 | return 0; | 348 | return 0; |
369 | } | 349 | } |
@@ -380,22 +360,16 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc) | |||
380 | ip_vs_lblc_flush(tbl); | 360 | ip_vs_lblc_flush(tbl); |
381 | 361 | ||
382 | /* release the table itself */ | 362 | /* release the table itself */ |
383 | kfree(svc->sched_data); | 363 | kfree(tbl); |
384 | IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", | 364 | IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", |
385 | sizeof(struct ip_vs_lblc_table)); | 365 | sizeof(*tbl)); |
386 | 366 | ||
387 | return 0; | 367 | return 0; |
388 | } | 368 | } |
389 | 369 | ||
390 | 370 | ||
391 | static int ip_vs_lblc_update_svc(struct ip_vs_service *svc) | ||
392 | { | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | |||
397 | static inline struct ip_vs_dest * | 371 | static inline struct ip_vs_dest * |
398 | __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) | 372 | __ip_vs_lblc_schedule(struct ip_vs_service *svc, struct iphdr *iph) |
399 | { | 373 | { |
400 | struct ip_vs_dest *dest, *least; | 374 | struct ip_vs_dest *dest, *least; |
401 | int loh, doh; | 375 | int loh, doh; |
@@ -448,7 +422,7 @@ __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) | |||
448 | 422 | ||
449 | IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d " | 423 | IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d " |
450 | "activeconns %d refcnt %d weight %d overhead %d\n", | 424 | "activeconns %d refcnt %d weight %d overhead %d\n", |
451 | NIPQUAD(least->addr), ntohs(least->port), | 425 | NIPQUAD(least->addr.ip), ntohs(least->port), |
452 | atomic_read(&least->activeconns), | 426 | atomic_read(&least->activeconns), |
453 | atomic_read(&least->refcnt), | 427 | atomic_read(&least->refcnt), |
454 | atomic_read(&least->weight), loh); | 428 | atomic_read(&least->weight), loh); |
@@ -484,47 +458,55 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) | |||
484 | static struct ip_vs_dest * | 458 | static struct ip_vs_dest * |
485 | ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | 459 | ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) |
486 | { | 460 | { |
487 | struct ip_vs_dest *dest; | 461 | struct ip_vs_lblc_table *tbl = svc->sched_data; |
488 | struct ip_vs_lblc_table *tbl; | ||
489 | struct ip_vs_lblc_entry *en; | ||
490 | struct iphdr *iph = ip_hdr(skb); | 462 | struct iphdr *iph = ip_hdr(skb); |
463 | struct ip_vs_dest *dest = NULL; | ||
464 | struct ip_vs_lblc_entry *en; | ||
491 | 465 | ||
492 | IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); | 466 | IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); |
493 | 467 | ||
494 | tbl = (struct ip_vs_lblc_table *)svc->sched_data; | 468 | /* First look in our cache */ |
469 | read_lock(&svc->sched_lock); | ||
495 | en = ip_vs_lblc_get(tbl, iph->daddr); | 470 | en = ip_vs_lblc_get(tbl, iph->daddr); |
496 | if (en == NULL) { | 471 | if (en) { |
497 | dest = __ip_vs_wlc_schedule(svc, iph); | 472 | /* We only hold a read lock, but this is atomic */ |
498 | if (dest == NULL) { | 473 | en->lastuse = jiffies; |
499 | IP_VS_DBG(1, "no destination available\n"); | 474 | |
500 | return NULL; | 475 | /* |
501 | } | 476 | * If the destination is not available, i.e. it's in the trash, |
502 | en = ip_vs_lblc_new(iph->daddr, dest); | 477 | * we must ignore it, as it may be removed from under our feet, |
503 | if (en == NULL) { | 478 | * if someone drops our reference count. Our caller only makes |
504 | return NULL; | 479 | * sure that destinations, that are not in the trash, are not |
505 | } | 480 | * moved to the trash, while we are scheduling. But anyone can |
506 | ip_vs_lblc_hash(tbl, en); | 481 | * free up entries from the trash at any time. |
507 | } else { | 482 | */ |
508 | dest = en->dest; | 483 | |
509 | if (!(dest->flags & IP_VS_DEST_F_AVAILABLE) | 484 | if (en->dest->flags & IP_VS_DEST_F_AVAILABLE) |
510 | || atomic_read(&dest->weight) <= 0 | 485 | dest = en->dest; |
511 | || is_overloaded(dest, svc)) { | 486 | } |
512 | dest = __ip_vs_wlc_schedule(svc, iph); | 487 | read_unlock(&svc->sched_lock); |
513 | if (dest == NULL) { | 488 | |
514 | IP_VS_DBG(1, "no destination available\n"); | 489 | /* If the destination has a weight and is not overloaded, use it */ |
515 | return NULL; | 490 | if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc)) |
516 | } | 491 | goto out; |
517 | atomic_dec(&en->dest->refcnt); | 492 | |
518 | atomic_inc(&dest->refcnt); | 493 | /* No cache entry or it is invalid, time to schedule */ |
519 | en->dest = dest; | 494 | dest = __ip_vs_lblc_schedule(svc, iph); |
520 | } | 495 | if (!dest) { |
496 | IP_VS_DBG(1, "no destination available\n"); | ||
497 | return NULL; | ||
521 | } | 498 | } |
522 | en->lastuse = jiffies; | ||
523 | 499 | ||
500 | /* If we fail to create a cache entry, we'll just use the valid dest */ | ||
501 | write_lock(&svc->sched_lock); | ||
502 | ip_vs_lblc_new(tbl, iph->daddr, dest); | ||
503 | write_unlock(&svc->sched_lock); | ||
504 | |||
505 | out: | ||
524 | IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u " | 506 | IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u " |
525 | "--> server %u.%u.%u.%u:%d\n", | 507 | "--> server %u.%u.%u.%u:%d\n", |
526 | NIPQUAD(en->addr), | 508 | NIPQUAD(iph->daddr), |
527 | NIPQUAD(dest->addr), | 509 | NIPQUAD(dest->addr.ip), |
528 | ntohs(dest->port)); | 510 | ntohs(dest->port)); |
529 | 511 | ||
530 | return dest; | 512 | return dest; |
@@ -540,9 +522,11 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler = | |||
540 | .refcnt = ATOMIC_INIT(0), | 522 | .refcnt = ATOMIC_INIT(0), |
541 | .module = THIS_MODULE, | 523 | .module = THIS_MODULE, |
542 | .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list), | 524 | .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list), |
525 | #ifdef CONFIG_IP_VS_IPV6 | ||
526 | .supports_ipv6 = 0, | ||
527 | #endif | ||
543 | .init_service = ip_vs_lblc_init_svc, | 528 | .init_service = ip_vs_lblc_init_svc, |
544 | .done_service = ip_vs_lblc_done_svc, | 529 | .done_service = ip_vs_lblc_done_svc, |
545 | .update_service = ip_vs_lblc_update_svc, | ||
546 | .schedule = ip_vs_lblc_schedule, | 530 | .schedule = ip_vs_lblc_schedule, |
547 | }; | 531 | }; |
548 | 532 | ||
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c index c234e73968a6..1f75ea83bcf8 100644 --- a/net/ipv4/ipvs/ip_vs_lblcr.c +++ b/net/ipv4/ipvs/ip_vs_lblcr.c | |||
@@ -106,7 +106,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) | |||
106 | return NULL; | 106 | return NULL; |
107 | } | 107 | } |
108 | 108 | ||
109 | e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC); | 109 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
110 | if (e == NULL) { | 110 | if (e == NULL) { |
111 | IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n"); | 111 | IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n"); |
112 | return NULL; | 112 | return NULL; |
@@ -116,11 +116,9 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) | |||
116 | e->dest = dest; | 116 | e->dest = dest; |
117 | 117 | ||
118 | /* link it to the list */ | 118 | /* link it to the list */ |
119 | write_lock(&set->lock); | ||
120 | e->next = set->list; | 119 | e->next = set->list; |
121 | set->list = e; | 120 | set->list = e; |
122 | atomic_inc(&set->size); | 121 | atomic_inc(&set->size); |
123 | write_unlock(&set->lock); | ||
124 | 122 | ||
125 | set->lastmod = jiffies; | 123 | set->lastmod = jiffies; |
126 | return e; | 124 | return e; |
@@ -131,7 +129,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) | |||
131 | { | 129 | { |
132 | struct ip_vs_dest_list *e, **ep; | 130 | struct ip_vs_dest_list *e, **ep; |
133 | 131 | ||
134 | write_lock(&set->lock); | ||
135 | for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { | 132 | for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { |
136 | if (e->dest == dest) { | 133 | if (e->dest == dest) { |
137 | /* HIT */ | 134 | /* HIT */ |
@@ -144,7 +141,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) | |||
144 | } | 141 | } |
145 | ep = &e->next; | 142 | ep = &e->next; |
146 | } | 143 | } |
147 | write_unlock(&set->lock); | ||
148 | } | 144 | } |
149 | 145 | ||
150 | static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) | 146 | static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) |
@@ -174,7 +170,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
174 | if (set == NULL) | 170 | if (set == NULL) |
175 | return NULL; | 171 | return NULL; |
176 | 172 | ||
177 | read_lock(&set->lock); | ||
178 | /* select the first destination server, whose weight > 0 */ | 173 | /* select the first destination server, whose weight > 0 */ |
179 | for (e=set->list; e!=NULL; e=e->next) { | 174 | for (e=set->list; e!=NULL; e=e->next) { |
180 | least = e->dest; | 175 | least = e->dest; |
@@ -188,7 +183,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
188 | goto nextstage; | 183 | goto nextstage; |
189 | } | 184 | } |
190 | } | 185 | } |
191 | read_unlock(&set->lock); | ||
192 | return NULL; | 186 | return NULL; |
193 | 187 | ||
194 | /* find the destination with the weighted least load */ | 188 | /* find the destination with the weighted least load */ |
@@ -207,11 +201,10 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
207 | loh = doh; | 201 | loh = doh; |
208 | } | 202 | } |
209 | } | 203 | } |
210 | read_unlock(&set->lock); | ||
211 | 204 | ||
212 | IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d " | 205 | IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d " |
213 | "activeconns %d refcnt %d weight %d overhead %d\n", | 206 | "activeconns %d refcnt %d weight %d overhead %d\n", |
214 | NIPQUAD(least->addr), ntohs(least->port), | 207 | NIPQUAD(least->addr.ip), ntohs(least->port), |
215 | atomic_read(&least->activeconns), | 208 | atomic_read(&least->activeconns), |
216 | atomic_read(&least->refcnt), | 209 | atomic_read(&least->refcnt), |
217 | atomic_read(&least->weight), loh); | 210 | atomic_read(&least->weight), loh); |
@@ -229,7 +222,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
229 | if (set == NULL) | 222 | if (set == NULL) |
230 | return NULL; | 223 | return NULL; |
231 | 224 | ||
232 | read_lock(&set->lock); | ||
233 | /* select the first destination server, whose weight > 0 */ | 225 | /* select the first destination server, whose weight > 0 */ |
234 | for (e=set->list; e!=NULL; e=e->next) { | 226 | for (e=set->list; e!=NULL; e=e->next) { |
235 | most = e->dest; | 227 | most = e->dest; |
@@ -239,7 +231,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
239 | goto nextstage; | 231 | goto nextstage; |
240 | } | 232 | } |
241 | } | 233 | } |
242 | read_unlock(&set->lock); | ||
243 | return NULL; | 234 | return NULL; |
244 | 235 | ||
245 | /* find the destination with the weighted most load */ | 236 | /* find the destination with the weighted most load */ |
@@ -256,11 +247,10 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
256 | moh = doh; | 247 | moh = doh; |
257 | } | 248 | } |
258 | } | 249 | } |
259 | read_unlock(&set->lock); | ||
260 | 250 | ||
261 | IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d " | 251 | IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d " |
262 | "activeconns %d refcnt %d weight %d overhead %d\n", | 252 | "activeconns %d refcnt %d weight %d overhead %d\n", |
263 | NIPQUAD(most->addr), ntohs(most->port), | 253 | NIPQUAD(most->addr.ip), ntohs(most->port), |
264 | atomic_read(&most->activeconns), | 254 | atomic_read(&most->activeconns), |
265 | atomic_read(&most->refcnt), | 255 | atomic_read(&most->refcnt), |
266 | atomic_read(&most->weight), moh); | 256 | atomic_read(&most->weight), moh); |
@@ -284,7 +274,6 @@ struct ip_vs_lblcr_entry { | |||
284 | * IPVS lblcr hash table | 274 | * IPVS lblcr hash table |
285 | */ | 275 | */ |
286 | struct ip_vs_lblcr_table { | 276 | struct ip_vs_lblcr_table { |
287 | rwlock_t lock; /* lock for this table */ | ||
288 | struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ | 277 | struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ |
289 | atomic_t entries; /* number of entries */ | 278 | atomic_t entries; /* number of entries */ |
290 | int max_size; /* maximum size of entries */ | 279 | int max_size; /* maximum size of entries */ |
@@ -311,32 +300,6 @@ static ctl_table vs_vars_table[] = { | |||
311 | 300 | ||
312 | static struct ctl_table_header * sysctl_header; | 301 | static struct ctl_table_header * sysctl_header; |
313 | 302 | ||
314 | /* | ||
315 | * new/free a ip_vs_lblcr_entry, which is a mapping of a destination | ||
316 | * IP address to a server. | ||
317 | */ | ||
318 | static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr) | ||
319 | { | ||
320 | struct ip_vs_lblcr_entry *en; | ||
321 | |||
322 | en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC); | ||
323 | if (en == NULL) { | ||
324 | IP_VS_ERR("ip_vs_lblcr_new(): no memory\n"); | ||
325 | return NULL; | ||
326 | } | ||
327 | |||
328 | INIT_LIST_HEAD(&en->list); | ||
329 | en->addr = daddr; | ||
330 | |||
331 | /* initilize its dest set */ | ||
332 | atomic_set(&(en->set.size), 0); | ||
333 | en->set.list = NULL; | ||
334 | rwlock_init(&en->set.lock); | ||
335 | |||
336 | return en; | ||
337 | } | ||
338 | |||
339 | |||
340 | static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) | 303 | static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) |
341 | { | 304 | { |
342 | list_del(&en->list); | 305 | list_del(&en->list); |
@@ -358,55 +321,68 @@ static inline unsigned ip_vs_lblcr_hashkey(__be32 addr) | |||
358 | * Hash an entry in the ip_vs_lblcr_table. | 321 | * Hash an entry in the ip_vs_lblcr_table. |
359 | * returns bool success. | 322 | * returns bool success. |
360 | */ | 323 | */ |
361 | static int | 324 | static void |
362 | ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) | 325 | ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) |
363 | { | 326 | { |
364 | unsigned hash; | 327 | unsigned hash = ip_vs_lblcr_hashkey(en->addr); |
365 | 328 | ||
366 | if (!list_empty(&en->list)) { | ||
367 | IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, " | ||
368 | "called from %p\n", __builtin_return_address(0)); | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Hash by destination IP address | ||
374 | */ | ||
375 | hash = ip_vs_lblcr_hashkey(en->addr); | ||
376 | |||
377 | write_lock(&tbl->lock); | ||
378 | list_add(&en->list, &tbl->bucket[hash]); | 329 | list_add(&en->list, &tbl->bucket[hash]); |
379 | atomic_inc(&tbl->entries); | 330 | atomic_inc(&tbl->entries); |
380 | write_unlock(&tbl->lock); | ||
381 | |||
382 | return 1; | ||
383 | } | 331 | } |
384 | 332 | ||
385 | 333 | ||
386 | /* | 334 | /* |
387 | * Get ip_vs_lblcr_entry associated with supplied parameters. | 335 | * Get ip_vs_lblcr_entry associated with supplied parameters. Called under |
336 | * read lock. | ||
388 | */ | 337 | */ |
389 | static inline struct ip_vs_lblcr_entry * | 338 | static inline struct ip_vs_lblcr_entry * |
390 | ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr) | 339 | ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr) |
391 | { | 340 | { |
392 | unsigned hash; | 341 | unsigned hash = ip_vs_lblcr_hashkey(addr); |
393 | struct ip_vs_lblcr_entry *en; | 342 | struct ip_vs_lblcr_entry *en; |
394 | 343 | ||
395 | hash = ip_vs_lblcr_hashkey(addr); | 344 | list_for_each_entry(en, &tbl->bucket[hash], list) |
345 | if (en->addr == addr) | ||
346 | return en; | ||
396 | 347 | ||
397 | read_lock(&tbl->lock); | 348 | return NULL; |
349 | } | ||
398 | 350 | ||
399 | list_for_each_entry(en, &tbl->bucket[hash], list) { | 351 | |
400 | if (en->addr == addr) { | 352 | /* |
401 | /* HIT */ | 353 | * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination |
402 | read_unlock(&tbl->lock); | 354 | * IP address to a server. Called under write lock. |
403 | return en; | 355 | */ |
356 | static inline struct ip_vs_lblcr_entry * | ||
357 | ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, __be32 daddr, | ||
358 | struct ip_vs_dest *dest) | ||
359 | { | ||
360 | struct ip_vs_lblcr_entry *en; | ||
361 | |||
362 | en = ip_vs_lblcr_get(tbl, daddr); | ||
363 | if (!en) { | ||
364 | en = kmalloc(sizeof(*en), GFP_ATOMIC); | ||
365 | if (!en) { | ||
366 | IP_VS_ERR("ip_vs_lblcr_new(): no memory\n"); | ||
367 | return NULL; | ||
404 | } | 368 | } |
369 | |||
370 | en->addr = daddr; | ||
371 | en->lastuse = jiffies; | ||
372 | |||
373 | /* initilize its dest set */ | ||
374 | atomic_set(&(en->set.size), 0); | ||
375 | en->set.list = NULL; | ||
376 | rwlock_init(&en->set.lock); | ||
377 | |||
378 | ip_vs_lblcr_hash(tbl, en); | ||
405 | } | 379 | } |
406 | 380 | ||
407 | read_unlock(&tbl->lock); | 381 | write_lock(&en->set.lock); |
382 | ip_vs_dest_set_insert(&en->set, dest); | ||
383 | write_unlock(&en->set.lock); | ||
408 | 384 | ||
409 | return NULL; | 385 | return en; |
410 | } | 386 | } |
411 | 387 | ||
412 | 388 | ||
@@ -418,19 +394,18 @@ static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl) | |||
418 | int i; | 394 | int i; |
419 | struct ip_vs_lblcr_entry *en, *nxt; | 395 | struct ip_vs_lblcr_entry *en, *nxt; |
420 | 396 | ||
397 | /* No locking required, only called during cleanup. */ | ||
421 | for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { | 398 | for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { |
422 | write_lock(&tbl->lock); | ||
423 | list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { | 399 | list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { |
424 | ip_vs_lblcr_free(en); | 400 | ip_vs_lblcr_free(en); |
425 | atomic_dec(&tbl->entries); | ||
426 | } | 401 | } |
427 | write_unlock(&tbl->lock); | ||
428 | } | 402 | } |
429 | } | 403 | } |
430 | 404 | ||
431 | 405 | ||
432 | static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) | 406 | static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc) |
433 | { | 407 | { |
408 | struct ip_vs_lblcr_table *tbl = svc->sched_data; | ||
434 | unsigned long now = jiffies; | 409 | unsigned long now = jiffies; |
435 | int i, j; | 410 | int i, j; |
436 | struct ip_vs_lblcr_entry *en, *nxt; | 411 | struct ip_vs_lblcr_entry *en, *nxt; |
@@ -438,7 +413,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) | |||
438 | for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { | 413 | for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { |
439 | j = (j + 1) & IP_VS_LBLCR_TAB_MASK; | 414 | j = (j + 1) & IP_VS_LBLCR_TAB_MASK; |
440 | 415 | ||
441 | write_lock(&tbl->lock); | 416 | write_lock(&svc->sched_lock); |
442 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { | 417 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { |
443 | if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration, | 418 | if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration, |
444 | now)) | 419 | now)) |
@@ -447,7 +422,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) | |||
447 | ip_vs_lblcr_free(en); | 422 | ip_vs_lblcr_free(en); |
448 | atomic_dec(&tbl->entries); | 423 | atomic_dec(&tbl->entries); |
449 | } | 424 | } |
450 | write_unlock(&tbl->lock); | 425 | write_unlock(&svc->sched_lock); |
451 | } | 426 | } |
452 | tbl->rover = j; | 427 | tbl->rover = j; |
453 | } | 428 | } |
@@ -466,17 +441,16 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) | |||
466 | */ | 441 | */ |
467 | static void ip_vs_lblcr_check_expire(unsigned long data) | 442 | static void ip_vs_lblcr_check_expire(unsigned long data) |
468 | { | 443 | { |
469 | struct ip_vs_lblcr_table *tbl; | 444 | struct ip_vs_service *svc = (struct ip_vs_service *) data; |
445 | struct ip_vs_lblcr_table *tbl = svc->sched_data; | ||
470 | unsigned long now = jiffies; | 446 | unsigned long now = jiffies; |
471 | int goal; | 447 | int goal; |
472 | int i, j; | 448 | int i, j; |
473 | struct ip_vs_lblcr_entry *en, *nxt; | 449 | struct ip_vs_lblcr_entry *en, *nxt; |
474 | 450 | ||
475 | tbl = (struct ip_vs_lblcr_table *)data; | ||
476 | |||
477 | if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { | 451 | if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { |
478 | /* do full expiration check */ | 452 | /* do full expiration check */ |
479 | ip_vs_lblcr_full_check(tbl); | 453 | ip_vs_lblcr_full_check(svc); |
480 | tbl->counter = 1; | 454 | tbl->counter = 1; |
481 | goto out; | 455 | goto out; |
482 | } | 456 | } |
@@ -493,7 +467,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data) | |||
493 | for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { | 467 | for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { |
494 | j = (j + 1) & IP_VS_LBLCR_TAB_MASK; | 468 | j = (j + 1) & IP_VS_LBLCR_TAB_MASK; |
495 | 469 | ||
496 | write_lock(&tbl->lock); | 470 | write_lock(&svc->sched_lock); |
497 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { | 471 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { |
498 | if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) | 472 | if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) |
499 | continue; | 473 | continue; |
@@ -502,7 +476,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data) | |||
502 | atomic_dec(&tbl->entries); | 476 | atomic_dec(&tbl->entries); |
503 | goal--; | 477 | goal--; |
504 | } | 478 | } |
505 | write_unlock(&tbl->lock); | 479 | write_unlock(&svc->sched_lock); |
506 | if (goal <= 0) | 480 | if (goal <= 0) |
507 | break; | 481 | break; |
508 | } | 482 | } |
@@ -520,15 +494,14 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) | |||
520 | /* | 494 | /* |
521 | * Allocate the ip_vs_lblcr_table for this service | 495 | * Allocate the ip_vs_lblcr_table for this service |
522 | */ | 496 | */ |
523 | tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC); | 497 | tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); |
524 | if (tbl == NULL) { | 498 | if (tbl == NULL) { |
525 | IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n"); | 499 | IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n"); |
526 | return -ENOMEM; | 500 | return -ENOMEM; |
527 | } | 501 | } |
528 | svc->sched_data = tbl; | 502 | svc->sched_data = tbl; |
529 | IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " | 503 | IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " |
530 | "current service\n", | 504 | "current service\n", sizeof(*tbl)); |
531 | sizeof(struct ip_vs_lblcr_table)); | ||
532 | 505 | ||
533 | /* | 506 | /* |
534 | * Initialize the hash buckets | 507 | * Initialize the hash buckets |
@@ -536,7 +509,6 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) | |||
536 | for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { | 509 | for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { |
537 | INIT_LIST_HEAD(&tbl->bucket[i]); | 510 | INIT_LIST_HEAD(&tbl->bucket[i]); |
538 | } | 511 | } |
539 | rwlock_init(&tbl->lock); | ||
540 | tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; | 512 | tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; |
541 | tbl->rover = 0; | 513 | tbl->rover = 0; |
542 | tbl->counter = 1; | 514 | tbl->counter = 1; |
@@ -545,9 +517,8 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) | |||
545 | * Hook periodic timer for garbage collection | 517 | * Hook periodic timer for garbage collection |
546 | */ | 518 | */ |
547 | setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire, | 519 | setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire, |
548 | (unsigned long)tbl); | 520 | (unsigned long)svc); |
549 | tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; | 521 | mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); |
550 | add_timer(&tbl->periodic_timer); | ||
551 | 522 | ||
552 | return 0; | 523 | return 0; |
553 | } | 524 | } |
@@ -564,22 +535,16 @@ static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc) | |||
564 | ip_vs_lblcr_flush(tbl); | 535 | ip_vs_lblcr_flush(tbl); |
565 | 536 | ||
566 | /* release the table itself */ | 537 | /* release the table itself */ |
567 | kfree(svc->sched_data); | 538 | kfree(tbl); |
568 | IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", | 539 | IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", |
569 | sizeof(struct ip_vs_lblcr_table)); | 540 | sizeof(*tbl)); |
570 | 541 | ||
571 | return 0; | 542 | return 0; |
572 | } | 543 | } |
573 | 544 | ||
574 | 545 | ||
575 | static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc) | ||
576 | { | ||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | |||
581 | static inline struct ip_vs_dest * | 546 | static inline struct ip_vs_dest * |
582 | __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) | 547 | __ip_vs_lblcr_schedule(struct ip_vs_service *svc, struct iphdr *iph) |
583 | { | 548 | { |
584 | struct ip_vs_dest *dest, *least; | 549 | struct ip_vs_dest *dest, *least; |
585 | int loh, doh; | 550 | int loh, doh; |
@@ -633,7 +598,7 @@ __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) | |||
633 | 598 | ||
634 | IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d " | 599 | IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d " |
635 | "activeconns %d refcnt %d weight %d overhead %d\n", | 600 | "activeconns %d refcnt %d weight %d overhead %d\n", |
636 | NIPQUAD(least->addr), ntohs(least->port), | 601 | NIPQUAD(least->addr.ip), ntohs(least->port), |
637 | atomic_read(&least->activeconns), | 602 | atomic_read(&least->activeconns), |
638 | atomic_read(&least->refcnt), | 603 | atomic_read(&least->refcnt), |
639 | atomic_read(&least->weight), loh); | 604 | atomic_read(&least->weight), loh); |
@@ -669,51 +634,79 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) | |||
669 | static struct ip_vs_dest * | 634 | static struct ip_vs_dest * |
670 | ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | 635 | ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) |
671 | { | 636 | { |
672 | struct ip_vs_dest *dest; | 637 | struct ip_vs_lblcr_table *tbl = svc->sched_data; |
673 | struct ip_vs_lblcr_table *tbl; | ||
674 | struct ip_vs_lblcr_entry *en; | ||
675 | struct iphdr *iph = ip_hdr(skb); | 638 | struct iphdr *iph = ip_hdr(skb); |
639 | struct ip_vs_dest *dest = NULL; | ||
640 | struct ip_vs_lblcr_entry *en; | ||
676 | 641 | ||
677 | IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n"); | 642 | IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n"); |
678 | 643 | ||
679 | tbl = (struct ip_vs_lblcr_table *)svc->sched_data; | 644 | /* First look in our cache */ |
645 | read_lock(&svc->sched_lock); | ||
680 | en = ip_vs_lblcr_get(tbl, iph->daddr); | 646 | en = ip_vs_lblcr_get(tbl, iph->daddr); |
681 | if (en == NULL) { | 647 | if (en) { |
682 | dest = __ip_vs_wlc_schedule(svc, iph); | 648 | /* We only hold a read lock, but this is atomic */ |
683 | if (dest == NULL) { | 649 | en->lastuse = jiffies; |
684 | IP_VS_DBG(1, "no destination available\n"); | 650 | |
685 | return NULL; | 651 | /* Get the least loaded destination */ |
686 | } | 652 | read_lock(&en->set.lock); |
687 | en = ip_vs_lblcr_new(iph->daddr); | ||
688 | if (en == NULL) { | ||
689 | return NULL; | ||
690 | } | ||
691 | ip_vs_dest_set_insert(&en->set, dest); | ||
692 | ip_vs_lblcr_hash(tbl, en); | ||
693 | } else { | ||
694 | dest = ip_vs_dest_set_min(&en->set); | 653 | dest = ip_vs_dest_set_min(&en->set); |
695 | if (!dest || is_overloaded(dest, svc)) { | 654 | read_unlock(&en->set.lock); |
696 | dest = __ip_vs_wlc_schedule(svc, iph); | 655 | |
697 | if (dest == NULL) { | 656 | /* More than one destination + enough time passed by, cleanup */ |
698 | IP_VS_DBG(1, "no destination available\n"); | ||
699 | return NULL; | ||
700 | } | ||
701 | ip_vs_dest_set_insert(&en->set, dest); | ||
702 | } | ||
703 | if (atomic_read(&en->set.size) > 1 && | 657 | if (atomic_read(&en->set.size) > 1 && |
704 | jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) { | 658 | time_after(jiffies, en->set.lastmod + |
659 | sysctl_ip_vs_lblcr_expiration)) { | ||
705 | struct ip_vs_dest *m; | 660 | struct ip_vs_dest *m; |
661 | |||
662 | write_lock(&en->set.lock); | ||
706 | m = ip_vs_dest_set_max(&en->set); | 663 | m = ip_vs_dest_set_max(&en->set); |
707 | if (m) | 664 | if (m) |
708 | ip_vs_dest_set_erase(&en->set, m); | 665 | ip_vs_dest_set_erase(&en->set, m); |
666 | write_unlock(&en->set.lock); | ||
709 | } | 667 | } |
668 | |||
669 | /* If the destination is not overloaded, use it */ | ||
670 | if (dest && !is_overloaded(dest, svc)) { | ||
671 | read_unlock(&svc->sched_lock); | ||
672 | goto out; | ||
673 | } | ||
674 | |||
675 | /* The cache entry is invalid, time to schedule */ | ||
676 | dest = __ip_vs_lblcr_schedule(svc, iph); | ||
677 | if (!dest) { | ||
678 | IP_VS_DBG(1, "no destination available\n"); | ||
679 | read_unlock(&svc->sched_lock); | ||
680 | return NULL; | ||
681 | } | ||
682 | |||
683 | /* Update our cache entry */ | ||
684 | write_lock(&en->set.lock); | ||
685 | ip_vs_dest_set_insert(&en->set, dest); | ||
686 | write_unlock(&en->set.lock); | ||
687 | } | ||
688 | read_unlock(&svc->sched_lock); | ||
689 | |||
690 | if (dest) | ||
691 | goto out; | ||
692 | |||
693 | /* No cache entry, time to schedule */ | ||
694 | dest = __ip_vs_lblcr_schedule(svc, iph); | ||
695 | if (!dest) { | ||
696 | IP_VS_DBG(1, "no destination available\n"); | ||
697 | return NULL; | ||
710 | } | 698 | } |
711 | en->lastuse = jiffies; | ||
712 | 699 | ||
700 | /* If we fail to create a cache entry, we'll just use the valid dest */ | ||
701 | write_lock(&svc->sched_lock); | ||
702 | ip_vs_lblcr_new(tbl, iph->daddr, dest); | ||
703 | write_unlock(&svc->sched_lock); | ||
704 | |||
705 | out: | ||
713 | IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u " | 706 | IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u " |
714 | "--> server %u.%u.%u.%u:%d\n", | 707 | "--> server %u.%u.%u.%u:%d\n", |
715 | NIPQUAD(en->addr), | 708 | NIPQUAD(iph->daddr), |
716 | NIPQUAD(dest->addr), | 709 | NIPQUAD(dest->addr.ip), |
717 | ntohs(dest->port)); | 710 | ntohs(dest->port)); |
718 | 711 | ||
719 | return dest; | 712 | return dest; |
@@ -729,9 +722,11 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler = | |||
729 | .refcnt = ATOMIC_INIT(0), | 722 | .refcnt = ATOMIC_INIT(0), |
730 | .module = THIS_MODULE, | 723 | .module = THIS_MODULE, |
731 | .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), | 724 | .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), |
725 | #ifdef CONFIG_IP_VS_IPV6 | ||
726 | .supports_ipv6 = 0, | ||
727 | #endif | ||
732 | .init_service = ip_vs_lblcr_init_svc, | 728 | .init_service = ip_vs_lblcr_init_svc, |
733 | .done_service = ip_vs_lblcr_done_svc, | 729 | .done_service = ip_vs_lblcr_done_svc, |
734 | .update_service = ip_vs_lblcr_update_svc, | ||
735 | .schedule = ip_vs_lblcr_schedule, | 730 | .schedule = ip_vs_lblcr_schedule, |
736 | }; | 731 | }; |
737 | 732 | ||
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c index ebcdbf75ac65..b69f808ac461 100644 --- a/net/ipv4/ipvs/ip_vs_lc.c +++ b/net/ipv4/ipvs/ip_vs_lc.c | |||
@@ -20,24 +20,6 @@ | |||
20 | #include <net/ip_vs.h> | 20 | #include <net/ip_vs.h> |
21 | 21 | ||
22 | 22 | ||
23 | static int ip_vs_lc_init_svc(struct ip_vs_service *svc) | ||
24 | { | ||
25 | return 0; | ||
26 | } | ||
27 | |||
28 | |||
29 | static int ip_vs_lc_done_svc(struct ip_vs_service *svc) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | |||
35 | static int ip_vs_lc_update_svc(struct ip_vs_service *svc) | ||
36 | { | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | |||
41 | static inline unsigned int | 23 | static inline unsigned int |
42 | ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) | 24 | ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) |
43 | { | 25 | { |
@@ -85,10 +67,10 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
85 | } | 67 | } |
86 | 68 | ||
87 | if (least) | 69 | if (least) |
88 | IP_VS_DBG(6, "LC: server %u.%u.%u.%u:%u activeconns %d inactconns %d\n", | 70 | IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n", |
89 | NIPQUAD(least->addr), ntohs(least->port), | 71 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), |
90 | atomic_read(&least->activeconns), | 72 | atomic_read(&least->activeconns), |
91 | atomic_read(&least->inactconns)); | 73 | atomic_read(&least->inactconns)); |
92 | 74 | ||
93 | return least; | 75 | return least; |
94 | } | 76 | } |
@@ -99,9 +81,9 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = { | |||
99 | .refcnt = ATOMIC_INIT(0), | 81 | .refcnt = ATOMIC_INIT(0), |
100 | .module = THIS_MODULE, | 82 | .module = THIS_MODULE, |
101 | .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), | 83 | .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), |
102 | .init_service = ip_vs_lc_init_svc, | 84 | #ifdef CONFIG_IP_VS_IPV6 |
103 | .done_service = ip_vs_lc_done_svc, | 85 | .supports_ipv6 = 1, |
104 | .update_service = ip_vs_lc_update_svc, | 86 | #endif |
105 | .schedule = ip_vs_lc_schedule, | 87 | .schedule = ip_vs_lc_schedule, |
106 | }; | 88 | }; |
107 | 89 | ||
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c index 92f3a6770031..9a2d8033f08f 100644 --- a/net/ipv4/ipvs/ip_vs_nq.c +++ b/net/ipv4/ipvs/ip_vs_nq.c | |||
@@ -37,27 +37,6 @@ | |||
37 | #include <net/ip_vs.h> | 37 | #include <net/ip_vs.h> |
38 | 38 | ||
39 | 39 | ||
40 | static int | ||
41 | ip_vs_nq_init_svc(struct ip_vs_service *svc) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | |||
47 | static int | ||
48 | ip_vs_nq_done_svc(struct ip_vs_service *svc) | ||
49 | { | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | |||
54 | static int | ||
55 | ip_vs_nq_update_svc(struct ip_vs_service *svc) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | |||
61 | static inline unsigned int | 40 | static inline unsigned int |
62 | ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) | 41 | ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) |
63 | { | 42 | { |
@@ -120,12 +99,12 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
120 | return NULL; | 99 | return NULL; |
121 | 100 | ||
122 | out: | 101 | out: |
123 | IP_VS_DBG(6, "NQ: server %u.%u.%u.%u:%u " | 102 | IP_VS_DBG_BUF(6, "NQ: server %s:%u " |
124 | "activeconns %d refcnt %d weight %d overhead %d\n", | 103 | "activeconns %d refcnt %d weight %d overhead %d\n", |
125 | NIPQUAD(least->addr), ntohs(least->port), | 104 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), |
126 | atomic_read(&least->activeconns), | 105 | atomic_read(&least->activeconns), |
127 | atomic_read(&least->refcnt), | 106 | atomic_read(&least->refcnt), |
128 | atomic_read(&least->weight), loh); | 107 | atomic_read(&least->weight), loh); |
129 | 108 | ||
130 | return least; | 109 | return least; |
131 | } | 110 | } |
@@ -137,9 +116,9 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler = | |||
137 | .refcnt = ATOMIC_INIT(0), | 116 | .refcnt = ATOMIC_INIT(0), |
138 | .module = THIS_MODULE, | 117 | .module = THIS_MODULE, |
139 | .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), | 118 | .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), |
140 | .init_service = ip_vs_nq_init_svc, | 119 | #ifdef CONFIG_IP_VS_IPV6 |
141 | .done_service = ip_vs_nq_done_svc, | 120 | .supports_ipv6 = 1, |
142 | .update_service = ip_vs_nq_update_svc, | 121 | #endif |
143 | .schedule = ip_vs_nq_schedule, | 122 | .schedule = ip_vs_nq_schedule, |
144 | }; | 123 | }; |
145 | 124 | ||
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c index 6099a88fc200..b06da1c3445a 100644 --- a/net/ipv4/ipvs/ip_vs_proto.c +++ b/net/ipv4/ipvs/ip_vs_proto.c | |||
@@ -151,11 +151,11 @@ const char * ip_vs_state_name(__u16 proto, int state) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | 153 | ||
154 | void | 154 | static void |
155 | ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, | 155 | ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp, |
156 | const struct sk_buff *skb, | 156 | const struct sk_buff *skb, |
157 | int offset, | 157 | int offset, |
158 | const char *msg) | 158 | const char *msg) |
159 | { | 159 | { |
160 | char buf[128]; | 160 | char buf[128]; |
161 | struct iphdr _iph, *ih; | 161 | struct iphdr _iph, *ih; |
@@ -189,6 +189,61 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, | |||
189 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | 189 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); |
190 | } | 190 | } |
191 | 191 | ||
192 | #ifdef CONFIG_IP_VS_IPV6 | ||
193 | static void | ||
194 | ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, | ||
195 | const struct sk_buff *skb, | ||
196 | int offset, | ||
197 | const char *msg) | ||
198 | { | ||
199 | char buf[192]; | ||
200 | struct ipv6hdr _iph, *ih; | ||
201 | |||
202 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | ||
203 | if (ih == NULL) | ||
204 | sprintf(buf, "%s TRUNCATED", pp->name); | ||
205 | else if (ih->nexthdr == IPPROTO_FRAGMENT) | ||
206 | sprintf(buf, "%s " NIP6_FMT "->" NIP6_FMT " frag", | ||
207 | pp->name, NIP6(ih->saddr), | ||
208 | NIP6(ih->daddr)); | ||
209 | else { | ||
210 | __be16 _ports[2], *pptr; | ||
211 | |||
212 | pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), | ||
213 | sizeof(_ports), _ports); | ||
214 | if (pptr == NULL) | ||
215 | sprintf(buf, "%s TRUNCATED " NIP6_FMT "->" NIP6_FMT, | ||
216 | pp->name, | ||
217 | NIP6(ih->saddr), | ||
218 | NIP6(ih->daddr)); | ||
219 | else | ||
220 | sprintf(buf, "%s " NIP6_FMT ":%u->" NIP6_FMT ":%u", | ||
221 | pp->name, | ||
222 | NIP6(ih->saddr), | ||
223 | ntohs(pptr[0]), | ||
224 | NIP6(ih->daddr), | ||
225 | ntohs(pptr[1])); | ||
226 | } | ||
227 | |||
228 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | ||
229 | } | ||
230 | #endif | ||
231 | |||
232 | |||
233 | void | ||
234 | ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, | ||
235 | const struct sk_buff *skb, | ||
236 | int offset, | ||
237 | const char *msg) | ||
238 | { | ||
239 | #ifdef CONFIG_IP_VS_IPV6 | ||
240 | if (skb->protocol == __constant_htons(ETH_P_IPV6)) | ||
241 | ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg); | ||
242 | else | ||
243 | #endif | ||
244 | ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg); | ||
245 | } | ||
246 | |||
192 | 247 | ||
193 | int __init ip_vs_protocol_init(void) | 248 | int __init ip_vs_protocol_init(void) |
194 | { | 249 | { |
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah.c deleted file mode 100644 index 73e0ea87c1f5..000000000000 --- a/net/ipv4/ipvs/ip_vs_proto_ah.c +++ /dev/null | |||
@@ -1,178 +0,0 @@ | |||
1 | /* | ||
2 | * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS | ||
3 | * | ||
4 | * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 | ||
5 | * Wensong Zhang <wensong@linuxvirtualserver.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation; | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/in.h> | ||
14 | #include <linux/ip.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/netfilter.h> | ||
18 | #include <linux/netfilter_ipv4.h> | ||
19 | |||
20 | #include <net/ip_vs.h> | ||
21 | |||
22 | |||
23 | /* TODO: | ||
24 | |||
25 | struct isakmp_hdr { | ||
26 | __u8 icookie[8]; | ||
27 | __u8 rcookie[8]; | ||
28 | __u8 np; | ||
29 | __u8 version; | ||
30 | __u8 xchgtype; | ||
31 | __u8 flags; | ||
32 | __u32 msgid; | ||
33 | __u32 length; | ||
34 | }; | ||
35 | |||
36 | */ | ||
37 | |||
38 | #define PORT_ISAKMP 500 | ||
39 | |||
40 | |||
41 | static struct ip_vs_conn * | ||
42 | ah_conn_in_get(const struct sk_buff *skb, | ||
43 | struct ip_vs_protocol *pp, | ||
44 | const struct iphdr *iph, | ||
45 | unsigned int proto_off, | ||
46 | int inverse) | ||
47 | { | ||
48 | struct ip_vs_conn *cp; | ||
49 | |||
50 | if (likely(!inverse)) { | ||
51 | cp = ip_vs_conn_in_get(IPPROTO_UDP, | ||
52 | iph->saddr, | ||
53 | htons(PORT_ISAKMP), | ||
54 | iph->daddr, | ||
55 | htons(PORT_ISAKMP)); | ||
56 | } else { | ||
57 | cp = ip_vs_conn_in_get(IPPROTO_UDP, | ||
58 | iph->daddr, | ||
59 | htons(PORT_ISAKMP), | ||
60 | iph->saddr, | ||
61 | htons(PORT_ISAKMP)); | ||
62 | } | ||
63 | |||
64 | if (!cp) { | ||
65 | /* | ||
66 | * We are not sure if the packet is from our | ||
67 | * service, so our conn_schedule hook should return NF_ACCEPT | ||
68 | */ | ||
69 | IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet " | ||
70 | "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n", | ||
71 | inverse ? "ICMP+" : "", | ||
72 | pp->name, | ||
73 | NIPQUAD(iph->saddr), | ||
74 | NIPQUAD(iph->daddr)); | ||
75 | } | ||
76 | |||
77 | return cp; | ||
78 | } | ||
79 | |||
80 | |||
81 | static struct ip_vs_conn * | ||
82 | ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
83 | const struct iphdr *iph, unsigned int proto_off, int inverse) | ||
84 | { | ||
85 | struct ip_vs_conn *cp; | ||
86 | |||
87 | if (likely(!inverse)) { | ||
88 | cp = ip_vs_conn_out_get(IPPROTO_UDP, | ||
89 | iph->saddr, | ||
90 | htons(PORT_ISAKMP), | ||
91 | iph->daddr, | ||
92 | htons(PORT_ISAKMP)); | ||
93 | } else { | ||
94 | cp = ip_vs_conn_out_get(IPPROTO_UDP, | ||
95 | iph->daddr, | ||
96 | htons(PORT_ISAKMP), | ||
97 | iph->saddr, | ||
98 | htons(PORT_ISAKMP)); | ||
99 | } | ||
100 | |||
101 | if (!cp) { | ||
102 | IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet " | ||
103 | "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n", | ||
104 | inverse ? "ICMP+" : "", | ||
105 | pp->name, | ||
106 | NIPQUAD(iph->saddr), | ||
107 | NIPQUAD(iph->daddr)); | ||
108 | } | ||
109 | |||
110 | return cp; | ||
111 | } | ||
112 | |||
113 | |||
114 | static int | ||
115 | ah_conn_schedule(struct sk_buff *skb, | ||
116 | struct ip_vs_protocol *pp, | ||
117 | int *verdict, struct ip_vs_conn **cpp) | ||
118 | { | ||
119 | /* | ||
120 | * AH is only related traffic. Pass the packet to IP stack. | ||
121 | */ | ||
122 | *verdict = NF_ACCEPT; | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | |||
127 | static void | ||
128 | ah_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, | ||
129 | int offset, const char *msg) | ||
130 | { | ||
131 | char buf[256]; | ||
132 | struct iphdr _iph, *ih; | ||
133 | |||
134 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | ||
135 | if (ih == NULL) | ||
136 | sprintf(buf, "%s TRUNCATED", pp->name); | ||
137 | else | ||
138 | sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u", | ||
139 | pp->name, NIPQUAD(ih->saddr), | ||
140 | NIPQUAD(ih->daddr)); | ||
141 | |||
142 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | ||
143 | } | ||
144 | |||
145 | |||
146 | static void ah_init(struct ip_vs_protocol *pp) | ||
147 | { | ||
148 | /* nothing to do now */ | ||
149 | } | ||
150 | |||
151 | |||
152 | static void ah_exit(struct ip_vs_protocol *pp) | ||
153 | { | ||
154 | /* nothing to do now */ | ||
155 | } | ||
156 | |||
157 | |||
158 | struct ip_vs_protocol ip_vs_protocol_ah = { | ||
159 | .name = "AH", | ||
160 | .protocol = IPPROTO_AH, | ||
161 | .num_states = 1, | ||
162 | .dont_defrag = 1, | ||
163 | .init = ah_init, | ||
164 | .exit = ah_exit, | ||
165 | .conn_schedule = ah_conn_schedule, | ||
166 | .conn_in_get = ah_conn_in_get, | ||
167 | .conn_out_get = ah_conn_out_get, | ||
168 | .snat_handler = NULL, | ||
169 | .dnat_handler = NULL, | ||
170 | .csum_check = NULL, | ||
171 | .state_transition = NULL, | ||
172 | .register_app = NULL, | ||
173 | .unregister_app = NULL, | ||
174 | .app_conn_bind = NULL, | ||
175 | .debug_packet = ah_debug_packet, | ||
176 | .timeout_change = NULL, /* ISAKMP */ | ||
177 | .set_state_timeout = NULL, | ||
178 | }; | ||
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah_esp.c b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c new file mode 100644 index 000000000000..2b18a78d0399 --- /dev/null +++ b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * ip_vs_proto_ah_esp.c: AH/ESP IPSec load balancing support for IPVS | ||
3 | * | ||
4 | * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 | ||
5 | * Wensong Zhang <wensong@linuxvirtualserver.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation; | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/in.h> | ||
14 | #include <linux/ip.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/netfilter.h> | ||
18 | #include <linux/netfilter_ipv4.h> | ||
19 | |||
20 | #include <net/ip_vs.h> | ||
21 | |||
22 | |||
23 | /* TODO: | ||
24 | |||
25 | struct isakmp_hdr { | ||
26 | __u8 icookie[8]; | ||
27 | __u8 rcookie[8]; | ||
28 | __u8 np; | ||
29 | __u8 version; | ||
30 | __u8 xchgtype; | ||
31 | __u8 flags; | ||
32 | __u32 msgid; | ||
33 | __u32 length; | ||
34 | }; | ||
35 | |||
36 | */ | ||
37 | |||
38 | #define PORT_ISAKMP 500 | ||
39 | |||
40 | |||
41 | static struct ip_vs_conn * | ||
42 | ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
43 | const struct ip_vs_iphdr *iph, unsigned int proto_off, | ||
44 | int inverse) | ||
45 | { | ||
46 | struct ip_vs_conn *cp; | ||
47 | |||
48 | if (likely(!inverse)) { | ||
49 | cp = ip_vs_conn_in_get(af, IPPROTO_UDP, | ||
50 | &iph->saddr, | ||
51 | htons(PORT_ISAKMP), | ||
52 | &iph->daddr, | ||
53 | htons(PORT_ISAKMP)); | ||
54 | } else { | ||
55 | cp = ip_vs_conn_in_get(af, IPPROTO_UDP, | ||
56 | &iph->daddr, | ||
57 | htons(PORT_ISAKMP), | ||
58 | &iph->saddr, | ||
59 | htons(PORT_ISAKMP)); | ||
60 | } | ||
61 | |||
62 | if (!cp) { | ||
63 | /* | ||
64 | * We are not sure if the packet is from our | ||
65 | * service, so our conn_schedule hook should return NF_ACCEPT | ||
66 | */ | ||
67 | IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet " | ||
68 | "%s%s %s->%s\n", | ||
69 | inverse ? "ICMP+" : "", | ||
70 | pp->name, | ||
71 | IP_VS_DBG_ADDR(af, &iph->saddr), | ||
72 | IP_VS_DBG_ADDR(af, &iph->daddr)); | ||
73 | } | ||
74 | |||
75 | return cp; | ||
76 | } | ||
77 | |||
78 | |||
79 | static struct ip_vs_conn * | ||
80 | ah_esp_conn_out_get(int af, const struct sk_buff *skb, | ||
81 | struct ip_vs_protocol *pp, | ||
82 | const struct ip_vs_iphdr *iph, | ||
83 | unsigned int proto_off, | ||
84 | int inverse) | ||
85 | { | ||
86 | struct ip_vs_conn *cp; | ||
87 | |||
88 | if (likely(!inverse)) { | ||
89 | cp = ip_vs_conn_out_get(af, IPPROTO_UDP, | ||
90 | &iph->saddr, | ||
91 | htons(PORT_ISAKMP), | ||
92 | &iph->daddr, | ||
93 | htons(PORT_ISAKMP)); | ||
94 | } else { | ||
95 | cp = ip_vs_conn_out_get(af, IPPROTO_UDP, | ||
96 | &iph->daddr, | ||
97 | htons(PORT_ISAKMP), | ||
98 | &iph->saddr, | ||
99 | htons(PORT_ISAKMP)); | ||
100 | } | ||
101 | |||
102 | if (!cp) { | ||
103 | IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet " | ||
104 | "%s%s %s->%s\n", | ||
105 | inverse ? "ICMP+" : "", | ||
106 | pp->name, | ||
107 | IP_VS_DBG_ADDR(af, &iph->saddr), | ||
108 | IP_VS_DBG_ADDR(af, &iph->daddr)); | ||
109 | } | ||
110 | |||
111 | return cp; | ||
112 | } | ||
113 | |||
114 | |||
115 | static int | ||
116 | ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
117 | int *verdict, struct ip_vs_conn **cpp) | ||
118 | { | ||
119 | /* | ||
120 | * AH/ESP is only related traffic. Pass the packet to IP stack. | ||
121 | */ | ||
122 | *verdict = NF_ACCEPT; | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | |||
127 | static void | ||
128 | ah_esp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb, | ||
129 | int offset, const char *msg) | ||
130 | { | ||
131 | char buf[256]; | ||
132 | struct iphdr _iph, *ih; | ||
133 | |||
134 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | ||
135 | if (ih == NULL) | ||
136 | sprintf(buf, "%s TRUNCATED", pp->name); | ||
137 | else | ||
138 | sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u", | ||
139 | pp->name, NIPQUAD(ih->saddr), | ||
140 | NIPQUAD(ih->daddr)); | ||
141 | |||
142 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | ||
143 | } | ||
144 | |||
145 | #ifdef CONFIG_IP_VS_IPV6 | ||
146 | static void | ||
147 | ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb, | ||
148 | int offset, const char *msg) | ||
149 | { | ||
150 | char buf[256]; | ||
151 | struct ipv6hdr _iph, *ih; | ||
152 | |||
153 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | ||
154 | if (ih == NULL) | ||
155 | sprintf(buf, "%s TRUNCATED", pp->name); | ||
156 | else | ||
157 | sprintf(buf, "%s " NIP6_FMT "->" NIP6_FMT, | ||
158 | pp->name, NIP6(ih->saddr), | ||
159 | NIP6(ih->daddr)); | ||
160 | |||
161 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | ||
162 | } | ||
163 | #endif | ||
164 | |||
165 | static void | ||
166 | ah_esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, | ||
167 | int offset, const char *msg) | ||
168 | { | ||
169 | #ifdef CONFIG_IP_VS_IPV6 | ||
170 | if (skb->protocol == __constant_htons(ETH_P_IPV6)) | ||
171 | ah_esp_debug_packet_v6(pp, skb, offset, msg); | ||
172 | else | ||
173 | #endif | ||
174 | ah_esp_debug_packet_v4(pp, skb, offset, msg); | ||
175 | } | ||
176 | |||
177 | |||
178 | static void ah_esp_init(struct ip_vs_protocol *pp) | ||
179 | { | ||
180 | /* nothing to do now */ | ||
181 | } | ||
182 | |||
183 | |||
184 | static void ah_esp_exit(struct ip_vs_protocol *pp) | ||
185 | { | ||
186 | /* nothing to do now */ | ||
187 | } | ||
188 | |||
189 | |||
190 | #ifdef CONFIG_IP_VS_PROTO_AH | ||
191 | struct ip_vs_protocol ip_vs_protocol_ah = { | ||
192 | .name = "AH", | ||
193 | .protocol = IPPROTO_AH, | ||
194 | .num_states = 1, | ||
195 | .dont_defrag = 1, | ||
196 | .init = ah_esp_init, | ||
197 | .exit = ah_esp_exit, | ||
198 | .conn_schedule = ah_esp_conn_schedule, | ||
199 | .conn_in_get = ah_esp_conn_in_get, | ||
200 | .conn_out_get = ah_esp_conn_out_get, | ||
201 | .snat_handler = NULL, | ||
202 | .dnat_handler = NULL, | ||
203 | .csum_check = NULL, | ||
204 | .state_transition = NULL, | ||
205 | .register_app = NULL, | ||
206 | .unregister_app = NULL, | ||
207 | .app_conn_bind = NULL, | ||
208 | .debug_packet = ah_esp_debug_packet, | ||
209 | .timeout_change = NULL, /* ISAKMP */ | ||
210 | .set_state_timeout = NULL, | ||
211 | }; | ||
212 | #endif | ||
213 | |||
214 | #ifdef CONFIG_IP_VS_PROTO_ESP | ||
215 | struct ip_vs_protocol ip_vs_protocol_esp = { | ||
216 | .name = "ESP", | ||
217 | .protocol = IPPROTO_ESP, | ||
218 | .num_states = 1, | ||
219 | .dont_defrag = 1, | ||
220 | .init = ah_esp_init, | ||
221 | .exit = ah_esp_exit, | ||
222 | .conn_schedule = ah_esp_conn_schedule, | ||
223 | .conn_in_get = ah_esp_conn_in_get, | ||
224 | .conn_out_get = ah_esp_conn_out_get, | ||
225 | .snat_handler = NULL, | ||
226 | .dnat_handler = NULL, | ||
227 | .csum_check = NULL, | ||
228 | .state_transition = NULL, | ||
229 | .register_app = NULL, | ||
230 | .unregister_app = NULL, | ||
231 | .app_conn_bind = NULL, | ||
232 | .debug_packet = ah_esp_debug_packet, | ||
233 | .timeout_change = NULL, /* ISAKMP */ | ||
234 | }; | ||
235 | #endif | ||
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c deleted file mode 100644 index 21d70c8ffa54..000000000000 --- a/net/ipv4/ipvs/ip_vs_proto_esp.c +++ /dev/null | |||
@@ -1,176 +0,0 @@ | |||
1 | /* | ||
2 | * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS | ||
3 | * | ||
4 | * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 | ||
5 | * Wensong Zhang <wensong@linuxvirtualserver.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation; | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/in.h> | ||
14 | #include <linux/ip.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/netfilter.h> | ||
18 | #include <linux/netfilter_ipv4.h> | ||
19 | |||
20 | #include <net/ip_vs.h> | ||
21 | |||
22 | |||
23 | /* TODO: | ||
24 | |||
25 | struct isakmp_hdr { | ||
26 | __u8 icookie[8]; | ||
27 | __u8 rcookie[8]; | ||
28 | __u8 np; | ||
29 | __u8 version; | ||
30 | __u8 xchgtype; | ||
31 | __u8 flags; | ||
32 | __u32 msgid; | ||
33 | __u32 length; | ||
34 | }; | ||
35 | |||
36 | */ | ||
37 | |||
38 | #define PORT_ISAKMP 500 | ||
39 | |||
40 | |||
41 | static struct ip_vs_conn * | ||
42 | esp_conn_in_get(const struct sk_buff *skb, | ||
43 | struct ip_vs_protocol *pp, | ||
44 | const struct iphdr *iph, | ||
45 | unsigned int proto_off, | ||
46 | int inverse) | ||
47 | { | ||
48 | struct ip_vs_conn *cp; | ||
49 | |||
50 | if (likely(!inverse)) { | ||
51 | cp = ip_vs_conn_in_get(IPPROTO_UDP, | ||
52 | iph->saddr, | ||
53 | htons(PORT_ISAKMP), | ||
54 | iph->daddr, | ||
55 | htons(PORT_ISAKMP)); | ||
56 | } else { | ||
57 | cp = ip_vs_conn_in_get(IPPROTO_UDP, | ||
58 | iph->daddr, | ||
59 | htons(PORT_ISAKMP), | ||
60 | iph->saddr, | ||
61 | htons(PORT_ISAKMP)); | ||
62 | } | ||
63 | |||
64 | if (!cp) { | ||
65 | /* | ||
66 | * We are not sure if the packet is from our | ||
67 | * service, so our conn_schedule hook should return NF_ACCEPT | ||
68 | */ | ||
69 | IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet " | ||
70 | "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n", | ||
71 | inverse ? "ICMP+" : "", | ||
72 | pp->name, | ||
73 | NIPQUAD(iph->saddr), | ||
74 | NIPQUAD(iph->daddr)); | ||
75 | } | ||
76 | |||
77 | return cp; | ||
78 | } | ||
79 | |||
80 | |||
81 | static struct ip_vs_conn * | ||
82 | esp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
83 | const struct iphdr *iph, unsigned int proto_off, int inverse) | ||
84 | { | ||
85 | struct ip_vs_conn *cp; | ||
86 | |||
87 | if (likely(!inverse)) { | ||
88 | cp = ip_vs_conn_out_get(IPPROTO_UDP, | ||
89 | iph->saddr, | ||
90 | htons(PORT_ISAKMP), | ||
91 | iph->daddr, | ||
92 | htons(PORT_ISAKMP)); | ||
93 | } else { | ||
94 | cp = ip_vs_conn_out_get(IPPROTO_UDP, | ||
95 | iph->daddr, | ||
96 | htons(PORT_ISAKMP), | ||
97 | iph->saddr, | ||
98 | htons(PORT_ISAKMP)); | ||
99 | } | ||
100 | |||
101 | if (!cp) { | ||
102 | IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet " | ||
103 | "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n", | ||
104 | inverse ? "ICMP+" : "", | ||
105 | pp->name, | ||
106 | NIPQUAD(iph->saddr), | ||
107 | NIPQUAD(iph->daddr)); | ||
108 | } | ||
109 | |||
110 | return cp; | ||
111 | } | ||
112 | |||
113 | |||
114 | static int | ||
115 | esp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
116 | int *verdict, struct ip_vs_conn **cpp) | ||
117 | { | ||
118 | /* | ||
119 | * ESP is only related traffic. Pass the packet to IP stack. | ||
120 | */ | ||
121 | *verdict = NF_ACCEPT; | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | |||
126 | static void | ||
127 | esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, | ||
128 | int offset, const char *msg) | ||
129 | { | ||
130 | char buf[256]; | ||
131 | struct iphdr _iph, *ih; | ||
132 | |||
133 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | ||
134 | if (ih == NULL) | ||
135 | sprintf(buf, "%s TRUNCATED", pp->name); | ||
136 | else | ||
137 | sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u", | ||
138 | pp->name, NIPQUAD(ih->saddr), | ||
139 | NIPQUAD(ih->daddr)); | ||
140 | |||
141 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | ||
142 | } | ||
143 | |||
144 | |||
145 | static void esp_init(struct ip_vs_protocol *pp) | ||
146 | { | ||
147 | /* nothing to do now */ | ||
148 | } | ||
149 | |||
150 | |||
151 | static void esp_exit(struct ip_vs_protocol *pp) | ||
152 | { | ||
153 | /* nothing to do now */ | ||
154 | } | ||
155 | |||
156 | |||
157 | struct ip_vs_protocol ip_vs_protocol_esp = { | ||
158 | .name = "ESP", | ||
159 | .protocol = IPPROTO_ESP, | ||
160 | .num_states = 1, | ||
161 | .dont_defrag = 1, | ||
162 | .init = esp_init, | ||
163 | .exit = esp_exit, | ||
164 | .conn_schedule = esp_conn_schedule, | ||
165 | .conn_in_get = esp_conn_in_get, | ||
166 | .conn_out_get = esp_conn_out_get, | ||
167 | .snat_handler = NULL, | ||
168 | .dnat_handler = NULL, | ||
169 | .csum_check = NULL, | ||
170 | .state_transition = NULL, | ||
171 | .register_app = NULL, | ||
172 | .unregister_app = NULL, | ||
173 | .app_conn_bind = NULL, | ||
174 | .debug_packet = esp_debug_packet, | ||
175 | .timeout_change = NULL, /* ISAKMP */ | ||
176 | }; | ||
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c index d0ea467986a0..537f616776da 100644 --- a/net/ipv4/ipvs/ip_vs_proto_tcp.c +++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c | |||
@@ -25,8 +25,9 @@ | |||
25 | 25 | ||
26 | 26 | ||
27 | static struct ip_vs_conn * | 27 | static struct ip_vs_conn * |
28 | tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | 28 | tcp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, |
29 | const struct iphdr *iph, unsigned int proto_off, int inverse) | 29 | const struct ip_vs_iphdr *iph, unsigned int proto_off, |
30 | int inverse) | ||
30 | { | 31 | { |
31 | __be16 _ports[2], *pptr; | 32 | __be16 _ports[2], *pptr; |
32 | 33 | ||
@@ -35,19 +36,20 @@ tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
35 | return NULL; | 36 | return NULL; |
36 | 37 | ||
37 | if (likely(!inverse)) { | 38 | if (likely(!inverse)) { |
38 | return ip_vs_conn_in_get(iph->protocol, | 39 | return ip_vs_conn_in_get(af, iph->protocol, |
39 | iph->saddr, pptr[0], | 40 | &iph->saddr, pptr[0], |
40 | iph->daddr, pptr[1]); | 41 | &iph->daddr, pptr[1]); |
41 | } else { | 42 | } else { |
42 | return ip_vs_conn_in_get(iph->protocol, | 43 | return ip_vs_conn_in_get(af, iph->protocol, |
43 | iph->daddr, pptr[1], | 44 | &iph->daddr, pptr[1], |
44 | iph->saddr, pptr[0]); | 45 | &iph->saddr, pptr[0]); |
45 | } | 46 | } |
46 | } | 47 | } |
47 | 48 | ||
48 | static struct ip_vs_conn * | 49 | static struct ip_vs_conn * |
49 | tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | 50 | tcp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, |
50 | const struct iphdr *iph, unsigned int proto_off, int inverse) | 51 | const struct ip_vs_iphdr *iph, unsigned int proto_off, |
52 | int inverse) | ||
51 | { | 53 | { |
52 | __be16 _ports[2], *pptr; | 54 | __be16 _ports[2], *pptr; |
53 | 55 | ||
@@ -56,34 +58,36 @@ tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
56 | return NULL; | 58 | return NULL; |
57 | 59 | ||
58 | if (likely(!inverse)) { | 60 | if (likely(!inverse)) { |
59 | return ip_vs_conn_out_get(iph->protocol, | 61 | return ip_vs_conn_out_get(af, iph->protocol, |
60 | iph->saddr, pptr[0], | 62 | &iph->saddr, pptr[0], |
61 | iph->daddr, pptr[1]); | 63 | &iph->daddr, pptr[1]); |
62 | } else { | 64 | } else { |
63 | return ip_vs_conn_out_get(iph->protocol, | 65 | return ip_vs_conn_out_get(af, iph->protocol, |
64 | iph->daddr, pptr[1], | 66 | &iph->daddr, pptr[1], |
65 | iph->saddr, pptr[0]); | 67 | &iph->saddr, pptr[0]); |
66 | } | 68 | } |
67 | } | 69 | } |
68 | 70 | ||
69 | 71 | ||
70 | static int | 72 | static int |
71 | tcp_conn_schedule(struct sk_buff *skb, | 73 | tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, |
72 | struct ip_vs_protocol *pp, | ||
73 | int *verdict, struct ip_vs_conn **cpp) | 74 | int *verdict, struct ip_vs_conn **cpp) |
74 | { | 75 | { |
75 | struct ip_vs_service *svc; | 76 | struct ip_vs_service *svc; |
76 | struct tcphdr _tcph, *th; | 77 | struct tcphdr _tcph, *th; |
78 | struct ip_vs_iphdr iph; | ||
77 | 79 | ||
78 | th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); | 80 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
81 | |||
82 | th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph); | ||
79 | if (th == NULL) { | 83 | if (th == NULL) { |
80 | *verdict = NF_DROP; | 84 | *verdict = NF_DROP; |
81 | return 0; | 85 | return 0; |
82 | } | 86 | } |
83 | 87 | ||
84 | if (th->syn && | 88 | if (th->syn && |
85 | (svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol, | 89 | (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr, |
86 | ip_hdr(skb)->daddr, th->dest))) { | 90 | th->dest))) { |
87 | if (ip_vs_todrop()) { | 91 | if (ip_vs_todrop()) { |
88 | /* | 92 | /* |
89 | * It seems that we are very loaded. | 93 | * It seems that we are very loaded. |
@@ -110,22 +114,62 @@ tcp_conn_schedule(struct sk_buff *skb, | |||
110 | 114 | ||
111 | 115 | ||
112 | static inline void | 116 | static inline void |
113 | tcp_fast_csum_update(struct tcphdr *tcph, __be32 oldip, __be32 newip, | 117 | tcp_fast_csum_update(int af, struct tcphdr *tcph, |
118 | const union nf_inet_addr *oldip, | ||
119 | const union nf_inet_addr *newip, | ||
114 | __be16 oldport, __be16 newport) | 120 | __be16 oldport, __be16 newport) |
115 | { | 121 | { |
122 | #ifdef CONFIG_IP_VS_IPV6 | ||
123 | if (af == AF_INET6) | ||
124 | tcph->check = | ||
125 | csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, | ||
126 | ip_vs_check_diff2(oldport, newport, | ||
127 | ~csum_unfold(tcph->check)))); | ||
128 | else | ||
129 | #endif | ||
116 | tcph->check = | 130 | tcph->check = |
117 | csum_fold(ip_vs_check_diff4(oldip, newip, | 131 | csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, |
118 | ip_vs_check_diff2(oldport, newport, | 132 | ip_vs_check_diff2(oldport, newport, |
119 | ~csum_unfold(tcph->check)))); | 133 | ~csum_unfold(tcph->check)))); |
120 | } | 134 | } |
121 | 135 | ||
122 | 136 | ||
137 | static inline void | ||
138 | tcp_partial_csum_update(int af, struct tcphdr *tcph, | ||
139 | const union nf_inet_addr *oldip, | ||
140 | const union nf_inet_addr *newip, | ||
141 | __be16 oldlen, __be16 newlen) | ||
142 | { | ||
143 | #ifdef CONFIG_IP_VS_IPV6 | ||
144 | if (af == AF_INET6) | ||
145 | tcph->check = | ||
146 | csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, | ||
147 | ip_vs_check_diff2(oldlen, newlen, | ||
148 | ~csum_unfold(tcph->check)))); | ||
149 | else | ||
150 | #endif | ||
151 | tcph->check = | ||
152 | csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, | ||
153 | ip_vs_check_diff2(oldlen, newlen, | ||
154 | ~csum_unfold(tcph->check)))); | ||
155 | } | ||
156 | |||
157 | |||
123 | static int | 158 | static int |
124 | tcp_snat_handler(struct sk_buff *skb, | 159 | tcp_snat_handler(struct sk_buff *skb, |
125 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 160 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
126 | { | 161 | { |
127 | struct tcphdr *tcph; | 162 | struct tcphdr *tcph; |
128 | const unsigned int tcphoff = ip_hdrlen(skb); | 163 | unsigned int tcphoff; |
164 | int oldlen; | ||
165 | |||
166 | #ifdef CONFIG_IP_VS_IPV6 | ||
167 | if (cp->af == AF_INET6) | ||
168 | tcphoff = sizeof(struct ipv6hdr); | ||
169 | else | ||
170 | #endif | ||
171 | tcphoff = ip_hdrlen(skb); | ||
172 | oldlen = skb->len - tcphoff; | ||
129 | 173 | ||
130 | /* csum_check requires unshared skb */ | 174 | /* csum_check requires unshared skb */ |
131 | if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) | 175 | if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) |
@@ -133,7 +177,7 @@ tcp_snat_handler(struct sk_buff *skb, | |||
133 | 177 | ||
134 | if (unlikely(cp->app != NULL)) { | 178 | if (unlikely(cp->app != NULL)) { |
135 | /* Some checks before mangling */ | 179 | /* Some checks before mangling */ |
136 | if (pp->csum_check && !pp->csum_check(skb, pp)) | 180 | if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) |
137 | return 0; | 181 | return 0; |
138 | 182 | ||
139 | /* Call application helper if needed */ | 183 | /* Call application helper if needed */ |
@@ -141,13 +185,17 @@ tcp_snat_handler(struct sk_buff *skb, | |||
141 | return 0; | 185 | return 0; |
142 | } | 186 | } |
143 | 187 | ||
144 | tcph = (void *)ip_hdr(skb) + tcphoff; | 188 | tcph = (void *)skb_network_header(skb) + tcphoff; |
145 | tcph->source = cp->vport; | 189 | tcph->source = cp->vport; |
146 | 190 | ||
147 | /* Adjust TCP checksums */ | 191 | /* Adjust TCP checksums */ |
148 | if (!cp->app) { | 192 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
193 | tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, | ||
194 | htonl(oldlen), | ||
195 | htonl(skb->len - tcphoff)); | ||
196 | } else if (!cp->app) { | ||
149 | /* Only port and addr are changed, do fast csum update */ | 197 | /* Only port and addr are changed, do fast csum update */ |
150 | tcp_fast_csum_update(tcph, cp->daddr, cp->vaddr, | 198 | tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, |
151 | cp->dport, cp->vport); | 199 | cp->dport, cp->vport); |
152 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 200 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
153 | skb->ip_summed = CHECKSUM_NONE; | 201 | skb->ip_summed = CHECKSUM_NONE; |
@@ -155,9 +203,20 @@ tcp_snat_handler(struct sk_buff *skb, | |||
155 | /* full checksum calculation */ | 203 | /* full checksum calculation */ |
156 | tcph->check = 0; | 204 | tcph->check = 0; |
157 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); | 205 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); |
158 | tcph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, | 206 | #ifdef CONFIG_IP_VS_IPV6 |
159 | skb->len - tcphoff, | 207 | if (cp->af == AF_INET6) |
160 | cp->protocol, skb->csum); | 208 | tcph->check = csum_ipv6_magic(&cp->vaddr.in6, |
209 | &cp->caddr.in6, | ||
210 | skb->len - tcphoff, | ||
211 | cp->protocol, skb->csum); | ||
212 | else | ||
213 | #endif | ||
214 | tcph->check = csum_tcpudp_magic(cp->vaddr.ip, | ||
215 | cp->caddr.ip, | ||
216 | skb->len - tcphoff, | ||
217 | cp->protocol, | ||
218 | skb->csum); | ||
219 | |||
161 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", | 220 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", |
162 | pp->name, tcph->check, | 221 | pp->name, tcph->check, |
163 | (char*)&(tcph->check) - (char*)tcph); | 222 | (char*)&(tcph->check) - (char*)tcph); |
@@ -171,7 +230,16 @@ tcp_dnat_handler(struct sk_buff *skb, | |||
171 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 230 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
172 | { | 231 | { |
173 | struct tcphdr *tcph; | 232 | struct tcphdr *tcph; |
174 | const unsigned int tcphoff = ip_hdrlen(skb); | 233 | unsigned int tcphoff; |
234 | int oldlen; | ||
235 | |||
236 | #ifdef CONFIG_IP_VS_IPV6 | ||
237 | if (cp->af == AF_INET6) | ||
238 | tcphoff = sizeof(struct ipv6hdr); | ||
239 | else | ||
240 | #endif | ||
241 | tcphoff = ip_hdrlen(skb); | ||
242 | oldlen = skb->len - tcphoff; | ||
175 | 243 | ||
176 | /* csum_check requires unshared skb */ | 244 | /* csum_check requires unshared skb */ |
177 | if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) | 245 | if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) |
@@ -179,7 +247,7 @@ tcp_dnat_handler(struct sk_buff *skb, | |||
179 | 247 | ||
180 | if (unlikely(cp->app != NULL)) { | 248 | if (unlikely(cp->app != NULL)) { |
181 | /* Some checks before mangling */ | 249 | /* Some checks before mangling */ |
182 | if (pp->csum_check && !pp->csum_check(skb, pp)) | 250 | if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) |
183 | return 0; | 251 | return 0; |
184 | 252 | ||
185 | /* | 253 | /* |
@@ -190,15 +258,19 @@ tcp_dnat_handler(struct sk_buff *skb, | |||
190 | return 0; | 258 | return 0; |
191 | } | 259 | } |
192 | 260 | ||
193 | tcph = (void *)ip_hdr(skb) + tcphoff; | 261 | tcph = (void *)skb_network_header(skb) + tcphoff; |
194 | tcph->dest = cp->dport; | 262 | tcph->dest = cp->dport; |
195 | 263 | ||
196 | /* | 264 | /* |
197 | * Adjust TCP checksums | 265 | * Adjust TCP checksums |
198 | */ | 266 | */ |
199 | if (!cp->app) { | 267 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
268 | tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, | ||
269 | htonl(oldlen), | ||
270 | htonl(skb->len - tcphoff)); | ||
271 | } else if (!cp->app) { | ||
200 | /* Only port and addr are changed, do fast csum update */ | 272 | /* Only port and addr are changed, do fast csum update */ |
201 | tcp_fast_csum_update(tcph, cp->vaddr, cp->daddr, | 273 | tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, |
202 | cp->vport, cp->dport); | 274 | cp->vport, cp->dport); |
203 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 275 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
204 | skb->ip_summed = CHECKSUM_NONE; | 276 | skb->ip_summed = CHECKSUM_NONE; |
@@ -206,9 +278,19 @@ tcp_dnat_handler(struct sk_buff *skb, | |||
206 | /* full checksum calculation */ | 278 | /* full checksum calculation */ |
207 | tcph->check = 0; | 279 | tcph->check = 0; |
208 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); | 280 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); |
209 | tcph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, | 281 | #ifdef CONFIG_IP_VS_IPV6 |
210 | skb->len - tcphoff, | 282 | if (cp->af == AF_INET6) |
211 | cp->protocol, skb->csum); | 283 | tcph->check = csum_ipv6_magic(&cp->caddr.in6, |
284 | &cp->daddr.in6, | ||
285 | skb->len - tcphoff, | ||
286 | cp->protocol, skb->csum); | ||
287 | else | ||
288 | #endif | ||
289 | tcph->check = csum_tcpudp_magic(cp->caddr.ip, | ||
290 | cp->daddr.ip, | ||
291 | skb->len - tcphoff, | ||
292 | cp->protocol, | ||
293 | skb->csum); | ||
212 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 294 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
213 | } | 295 | } |
214 | return 1; | 296 | return 1; |
@@ -216,21 +298,43 @@ tcp_dnat_handler(struct sk_buff *skb, | |||
216 | 298 | ||
217 | 299 | ||
218 | static int | 300 | static int |
219 | tcp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) | 301 | tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) |
220 | { | 302 | { |
221 | const unsigned int tcphoff = ip_hdrlen(skb); | 303 | unsigned int tcphoff; |
304 | |||
305 | #ifdef CONFIG_IP_VS_IPV6 | ||
306 | if (af == AF_INET6) | ||
307 | tcphoff = sizeof(struct ipv6hdr); | ||
308 | else | ||
309 | #endif | ||
310 | tcphoff = ip_hdrlen(skb); | ||
222 | 311 | ||
223 | switch (skb->ip_summed) { | 312 | switch (skb->ip_summed) { |
224 | case CHECKSUM_NONE: | 313 | case CHECKSUM_NONE: |
225 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); | 314 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); |
226 | case CHECKSUM_COMPLETE: | 315 | case CHECKSUM_COMPLETE: |
227 | if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, | 316 | #ifdef CONFIG_IP_VS_IPV6 |
228 | skb->len - tcphoff, | 317 | if (af == AF_INET6) { |
229 | ip_hdr(skb)->protocol, skb->csum)) { | 318 | if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
230 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | 319 | &ipv6_hdr(skb)->daddr, |
231 | "Failed checksum for"); | 320 | skb->len - tcphoff, |
232 | return 0; | 321 | ipv6_hdr(skb)->nexthdr, |
233 | } | 322 | skb->csum)) { |
323 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | ||
324 | "Failed checksum for"); | ||
325 | return 0; | ||
326 | } | ||
327 | } else | ||
328 | #endif | ||
329 | if (csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
330 | ip_hdr(skb)->daddr, | ||
331 | skb->len - tcphoff, | ||
332 | ip_hdr(skb)->protocol, | ||
333 | skb->csum)) { | ||
334 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | ||
335 | "Failed checksum for"); | ||
336 | return 0; | ||
337 | } | ||
234 | break; | 338 | break; |
235 | default: | 339 | default: |
236 | /* No need to checksum. */ | 340 | /* No need to checksum. */ |
@@ -419,19 +523,23 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp, | |||
419 | if (new_state != cp->state) { | 523 | if (new_state != cp->state) { |
420 | struct ip_vs_dest *dest = cp->dest; | 524 | struct ip_vs_dest *dest = cp->dest; |
421 | 525 | ||
422 | IP_VS_DBG(8, "%s %s [%c%c%c%c] %u.%u.%u.%u:%d->" | 526 | IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->" |
423 | "%u.%u.%u.%u:%d state: %s->%s conn->refcnt:%d\n", | 527 | "%s:%d state: %s->%s conn->refcnt:%d\n", |
424 | pp->name, | 528 | pp->name, |
425 | (state_off==TCP_DIR_OUTPUT)?"output ":"input ", | 529 | ((state_off == TCP_DIR_OUTPUT) ? |
426 | th->syn? 'S' : '.', | 530 | "output " : "input "), |
427 | th->fin? 'F' : '.', | 531 | th->syn ? 'S' : '.', |
428 | th->ack? 'A' : '.', | 532 | th->fin ? 'F' : '.', |
429 | th->rst? 'R' : '.', | 533 | th->ack ? 'A' : '.', |
430 | NIPQUAD(cp->daddr), ntohs(cp->dport), | 534 | th->rst ? 'R' : '.', |
431 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 535 | IP_VS_DBG_ADDR(cp->af, &cp->daddr), |
432 | tcp_state_name(cp->state), | 536 | ntohs(cp->dport), |
433 | tcp_state_name(new_state), | 537 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), |
434 | atomic_read(&cp->refcnt)); | 538 | ntohs(cp->cport), |
539 | tcp_state_name(cp->state), | ||
540 | tcp_state_name(new_state), | ||
541 | atomic_read(&cp->refcnt)); | ||
542 | |||
435 | if (dest) { | 543 | if (dest) { |
436 | if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && | 544 | if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && |
437 | (new_state != IP_VS_TCP_S_ESTABLISHED)) { | 545 | (new_state != IP_VS_TCP_S_ESTABLISHED)) { |
@@ -461,7 +569,13 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction, | |||
461 | { | 569 | { |
462 | struct tcphdr _tcph, *th; | 570 | struct tcphdr _tcph, *th; |
463 | 571 | ||
464 | th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); | 572 | #ifdef CONFIG_IP_VS_IPV6 |
573 | int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); | ||
574 | #else | ||
575 | int ihl = ip_hdrlen(skb); | ||
576 | #endif | ||
577 | |||
578 | th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph); | ||
465 | if (th == NULL) | 579 | if (th == NULL) |
466 | return 0; | 580 | return 0; |
467 | 581 | ||
@@ -546,12 +660,15 @@ tcp_app_conn_bind(struct ip_vs_conn *cp) | |||
546 | break; | 660 | break; |
547 | spin_unlock(&tcp_app_lock); | 661 | spin_unlock(&tcp_app_lock); |
548 | 662 | ||
549 | IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" | 663 | IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->" |
550 | "%u.%u.%u.%u:%u to app %s on port %u\n", | 664 | "%s:%u to app %s on port %u\n", |
551 | __func__, | 665 | __func__, |
552 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 666 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), |
553 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 667 | ntohs(cp->cport), |
554 | inc->name, ntohs(inc->port)); | 668 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), |
669 | ntohs(cp->vport), | ||
670 | inc->name, ntohs(inc->port)); | ||
671 | |||
555 | cp->app = inc; | 672 | cp->app = inc; |
556 | if (inc->init_conn) | 673 | if (inc->init_conn) |
557 | result = inc->init_conn(inc, cp); | 674 | result = inc->init_conn(inc, cp); |
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c index c6be5d56823f..e3ee26bd1de7 100644 --- a/net/ipv4/ipvs/ip_vs_proto_udp.c +++ b/net/ipv4/ipvs/ip_vs_proto_udp.c | |||
@@ -24,8 +24,9 @@ | |||
24 | #include <net/ip.h> | 24 | #include <net/ip.h> |
25 | 25 | ||
26 | static struct ip_vs_conn * | 26 | static struct ip_vs_conn * |
27 | udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | 27 | udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, |
28 | const struct iphdr *iph, unsigned int proto_off, int inverse) | 28 | const struct ip_vs_iphdr *iph, unsigned int proto_off, |
29 | int inverse) | ||
29 | { | 30 | { |
30 | struct ip_vs_conn *cp; | 31 | struct ip_vs_conn *cp; |
31 | __be16 _ports[2], *pptr; | 32 | __be16 _ports[2], *pptr; |
@@ -35,13 +36,13 @@ udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
35 | return NULL; | 36 | return NULL; |
36 | 37 | ||
37 | if (likely(!inverse)) { | 38 | if (likely(!inverse)) { |
38 | cp = ip_vs_conn_in_get(iph->protocol, | 39 | cp = ip_vs_conn_in_get(af, iph->protocol, |
39 | iph->saddr, pptr[0], | 40 | &iph->saddr, pptr[0], |
40 | iph->daddr, pptr[1]); | 41 | &iph->daddr, pptr[1]); |
41 | } else { | 42 | } else { |
42 | cp = ip_vs_conn_in_get(iph->protocol, | 43 | cp = ip_vs_conn_in_get(af, iph->protocol, |
43 | iph->daddr, pptr[1], | 44 | &iph->daddr, pptr[1], |
44 | iph->saddr, pptr[0]); | 45 | &iph->saddr, pptr[0]); |
45 | } | 46 | } |
46 | 47 | ||
47 | return cp; | 48 | return cp; |
@@ -49,25 +50,25 @@ udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
49 | 50 | ||
50 | 51 | ||
51 | static struct ip_vs_conn * | 52 | static struct ip_vs_conn * |
52 | udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | 53 | udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, |
53 | const struct iphdr *iph, unsigned int proto_off, int inverse) | 54 | const struct ip_vs_iphdr *iph, unsigned int proto_off, |
55 | int inverse) | ||
54 | { | 56 | { |
55 | struct ip_vs_conn *cp; | 57 | struct ip_vs_conn *cp; |
56 | __be16 _ports[2], *pptr; | 58 | __be16 _ports[2], *pptr; |
57 | 59 | ||
58 | pptr = skb_header_pointer(skb, ip_hdrlen(skb), | 60 | pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); |
59 | sizeof(_ports), _ports); | ||
60 | if (pptr == NULL) | 61 | if (pptr == NULL) |
61 | return NULL; | 62 | return NULL; |
62 | 63 | ||
63 | if (likely(!inverse)) { | 64 | if (likely(!inverse)) { |
64 | cp = ip_vs_conn_out_get(iph->protocol, | 65 | cp = ip_vs_conn_out_get(af, iph->protocol, |
65 | iph->saddr, pptr[0], | 66 | &iph->saddr, pptr[0], |
66 | iph->daddr, pptr[1]); | 67 | &iph->daddr, pptr[1]); |
67 | } else { | 68 | } else { |
68 | cp = ip_vs_conn_out_get(iph->protocol, | 69 | cp = ip_vs_conn_out_get(af, iph->protocol, |
69 | iph->daddr, pptr[1], | 70 | &iph->daddr, pptr[1], |
70 | iph->saddr, pptr[0]); | 71 | &iph->saddr, pptr[0]); |
71 | } | 72 | } |
72 | 73 | ||
73 | return cp; | 74 | return cp; |
@@ -75,21 +76,24 @@ udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
75 | 76 | ||
76 | 77 | ||
77 | static int | 78 | static int |
78 | udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, | 79 | udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, |
79 | int *verdict, struct ip_vs_conn **cpp) | 80 | int *verdict, struct ip_vs_conn **cpp) |
80 | { | 81 | { |
81 | struct ip_vs_service *svc; | 82 | struct ip_vs_service *svc; |
82 | struct udphdr _udph, *uh; | 83 | struct udphdr _udph, *uh; |
84 | struct ip_vs_iphdr iph; | ||
85 | |||
86 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
83 | 87 | ||
84 | uh = skb_header_pointer(skb, ip_hdrlen(skb), | 88 | uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph); |
85 | sizeof(_udph), &_udph); | ||
86 | if (uh == NULL) { | 89 | if (uh == NULL) { |
87 | *verdict = NF_DROP; | 90 | *verdict = NF_DROP; |
88 | return 0; | 91 | return 0; |
89 | } | 92 | } |
90 | 93 | ||
91 | if ((svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol, | 94 | svc = ip_vs_service_get(af, skb->mark, iph.protocol, |
92 | ip_hdr(skb)->daddr, uh->dest))) { | 95 | &iph.daddr, uh->dest); |
96 | if (svc) { | ||
93 | if (ip_vs_todrop()) { | 97 | if (ip_vs_todrop()) { |
94 | /* | 98 | /* |
95 | * It seems that we are very loaded. | 99 | * It seems that we are very loaded. |
@@ -116,23 +120,63 @@ udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
116 | 120 | ||
117 | 121 | ||
118 | static inline void | 122 | static inline void |
119 | udp_fast_csum_update(struct udphdr *uhdr, __be32 oldip, __be32 newip, | 123 | udp_fast_csum_update(int af, struct udphdr *uhdr, |
124 | const union nf_inet_addr *oldip, | ||
125 | const union nf_inet_addr *newip, | ||
120 | __be16 oldport, __be16 newport) | 126 | __be16 oldport, __be16 newport) |
121 | { | 127 | { |
122 | uhdr->check = | 128 | #ifdef CONFIG_IP_VS_IPV6 |
123 | csum_fold(ip_vs_check_diff4(oldip, newip, | 129 | if (af == AF_INET6) |
124 | ip_vs_check_diff2(oldport, newport, | 130 | uhdr->check = |
125 | ~csum_unfold(uhdr->check)))); | 131 | csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, |
132 | ip_vs_check_diff2(oldport, newport, | ||
133 | ~csum_unfold(uhdr->check)))); | ||
134 | else | ||
135 | #endif | ||
136 | uhdr->check = | ||
137 | csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, | ||
138 | ip_vs_check_diff2(oldport, newport, | ||
139 | ~csum_unfold(uhdr->check)))); | ||
126 | if (!uhdr->check) | 140 | if (!uhdr->check) |
127 | uhdr->check = CSUM_MANGLED_0; | 141 | uhdr->check = CSUM_MANGLED_0; |
128 | } | 142 | } |
129 | 143 | ||
144 | static inline void | ||
145 | udp_partial_csum_update(int af, struct udphdr *uhdr, | ||
146 | const union nf_inet_addr *oldip, | ||
147 | const union nf_inet_addr *newip, | ||
148 | __be16 oldlen, __be16 newlen) | ||
149 | { | ||
150 | #ifdef CONFIG_IP_VS_IPV6 | ||
151 | if (af == AF_INET6) | ||
152 | uhdr->check = | ||
153 | csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, | ||
154 | ip_vs_check_diff2(oldlen, newlen, | ||
155 | ~csum_unfold(uhdr->check)))); | ||
156 | else | ||
157 | #endif | ||
158 | uhdr->check = | ||
159 | csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, | ||
160 | ip_vs_check_diff2(oldlen, newlen, | ||
161 | ~csum_unfold(uhdr->check)))); | ||
162 | } | ||
163 | |||
164 | |||
130 | static int | 165 | static int |
131 | udp_snat_handler(struct sk_buff *skb, | 166 | udp_snat_handler(struct sk_buff *skb, |
132 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 167 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
133 | { | 168 | { |
134 | struct udphdr *udph; | 169 | struct udphdr *udph; |
135 | const unsigned int udphoff = ip_hdrlen(skb); | 170 | unsigned int udphoff; |
171 | int oldlen; | ||
172 | |||
173 | #ifdef CONFIG_IP_VS_IPV6 | ||
174 | if (cp->af == AF_INET6) | ||
175 | udphoff = sizeof(struct ipv6hdr); | ||
176 | else | ||
177 | #endif | ||
178 | udphoff = ip_hdrlen(skb); | ||
179 | oldlen = skb->len - udphoff; | ||
136 | 180 | ||
137 | /* csum_check requires unshared skb */ | 181 | /* csum_check requires unshared skb */ |
138 | if (!skb_make_writable(skb, udphoff+sizeof(*udph))) | 182 | if (!skb_make_writable(skb, udphoff+sizeof(*udph))) |
@@ -140,7 +184,7 @@ udp_snat_handler(struct sk_buff *skb, | |||
140 | 184 | ||
141 | if (unlikely(cp->app != NULL)) { | 185 | if (unlikely(cp->app != NULL)) { |
142 | /* Some checks before mangling */ | 186 | /* Some checks before mangling */ |
143 | if (pp->csum_check && !pp->csum_check(skb, pp)) | 187 | if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) |
144 | return 0; | 188 | return 0; |
145 | 189 | ||
146 | /* | 190 | /* |
@@ -150,15 +194,19 @@ udp_snat_handler(struct sk_buff *skb, | |||
150 | return 0; | 194 | return 0; |
151 | } | 195 | } |
152 | 196 | ||
153 | udph = (void *)ip_hdr(skb) + udphoff; | 197 | udph = (void *)skb_network_header(skb) + udphoff; |
154 | udph->source = cp->vport; | 198 | udph->source = cp->vport; |
155 | 199 | ||
156 | /* | 200 | /* |
157 | * Adjust UDP checksums | 201 | * Adjust UDP checksums |
158 | */ | 202 | */ |
159 | if (!cp->app && (udph->check != 0)) { | 203 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
204 | udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, | ||
205 | htonl(oldlen), | ||
206 | htonl(skb->len - udphoff)); | ||
207 | } else if (!cp->app && (udph->check != 0)) { | ||
160 | /* Only port and addr are changed, do fast csum update */ | 208 | /* Only port and addr are changed, do fast csum update */ |
161 | udp_fast_csum_update(udph, cp->daddr, cp->vaddr, | 209 | udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, |
162 | cp->dport, cp->vport); | 210 | cp->dport, cp->vport); |
163 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 211 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
164 | skb->ip_summed = CHECKSUM_NONE; | 212 | skb->ip_summed = CHECKSUM_NONE; |
@@ -166,9 +214,19 @@ udp_snat_handler(struct sk_buff *skb, | |||
166 | /* full checksum calculation */ | 214 | /* full checksum calculation */ |
167 | udph->check = 0; | 215 | udph->check = 0; |
168 | skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); | 216 | skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); |
169 | udph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, | 217 | #ifdef CONFIG_IP_VS_IPV6 |
170 | skb->len - udphoff, | 218 | if (cp->af == AF_INET6) |
171 | cp->protocol, skb->csum); | 219 | udph->check = csum_ipv6_magic(&cp->vaddr.in6, |
220 | &cp->caddr.in6, | ||
221 | skb->len - udphoff, | ||
222 | cp->protocol, skb->csum); | ||
223 | else | ||
224 | #endif | ||
225 | udph->check = csum_tcpudp_magic(cp->vaddr.ip, | ||
226 | cp->caddr.ip, | ||
227 | skb->len - udphoff, | ||
228 | cp->protocol, | ||
229 | skb->csum); | ||
172 | if (udph->check == 0) | 230 | if (udph->check == 0) |
173 | udph->check = CSUM_MANGLED_0; | 231 | udph->check = CSUM_MANGLED_0; |
174 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", | 232 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", |
@@ -184,7 +242,16 @@ udp_dnat_handler(struct sk_buff *skb, | |||
184 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 242 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
185 | { | 243 | { |
186 | struct udphdr *udph; | 244 | struct udphdr *udph; |
187 | unsigned int udphoff = ip_hdrlen(skb); | 245 | unsigned int udphoff; |
246 | int oldlen; | ||
247 | |||
248 | #ifdef CONFIG_IP_VS_IPV6 | ||
249 | if (cp->af == AF_INET6) | ||
250 | udphoff = sizeof(struct ipv6hdr); | ||
251 | else | ||
252 | #endif | ||
253 | udphoff = ip_hdrlen(skb); | ||
254 | oldlen = skb->len - udphoff; | ||
188 | 255 | ||
189 | /* csum_check requires unshared skb */ | 256 | /* csum_check requires unshared skb */ |
190 | if (!skb_make_writable(skb, udphoff+sizeof(*udph))) | 257 | if (!skb_make_writable(skb, udphoff+sizeof(*udph))) |
@@ -192,7 +259,7 @@ udp_dnat_handler(struct sk_buff *skb, | |||
192 | 259 | ||
193 | if (unlikely(cp->app != NULL)) { | 260 | if (unlikely(cp->app != NULL)) { |
194 | /* Some checks before mangling */ | 261 | /* Some checks before mangling */ |
195 | if (pp->csum_check && !pp->csum_check(skb, pp)) | 262 | if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) |
196 | return 0; | 263 | return 0; |
197 | 264 | ||
198 | /* | 265 | /* |
@@ -203,15 +270,19 @@ udp_dnat_handler(struct sk_buff *skb, | |||
203 | return 0; | 270 | return 0; |
204 | } | 271 | } |
205 | 272 | ||
206 | udph = (void *)ip_hdr(skb) + udphoff; | 273 | udph = (void *)skb_network_header(skb) + udphoff; |
207 | udph->dest = cp->dport; | 274 | udph->dest = cp->dport; |
208 | 275 | ||
209 | /* | 276 | /* |
210 | * Adjust UDP checksums | 277 | * Adjust UDP checksums |
211 | */ | 278 | */ |
212 | if (!cp->app && (udph->check != 0)) { | 279 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
280 | udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, | ||
281 | htonl(oldlen), | ||
282 | htonl(skb->len - udphoff)); | ||
283 | } else if (!cp->app && (udph->check != 0)) { | ||
213 | /* Only port and addr are changed, do fast csum update */ | 284 | /* Only port and addr are changed, do fast csum update */ |
214 | udp_fast_csum_update(udph, cp->vaddr, cp->daddr, | 285 | udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr, |
215 | cp->vport, cp->dport); | 286 | cp->vport, cp->dport); |
216 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 287 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
217 | skb->ip_summed = CHECKSUM_NONE; | 288 | skb->ip_summed = CHECKSUM_NONE; |
@@ -219,9 +290,19 @@ udp_dnat_handler(struct sk_buff *skb, | |||
219 | /* full checksum calculation */ | 290 | /* full checksum calculation */ |
220 | udph->check = 0; | 291 | udph->check = 0; |
221 | skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); | 292 | skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); |
222 | udph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, | 293 | #ifdef CONFIG_IP_VS_IPV6 |
223 | skb->len - udphoff, | 294 | if (cp->af == AF_INET6) |
224 | cp->protocol, skb->csum); | 295 | udph->check = csum_ipv6_magic(&cp->caddr.in6, |
296 | &cp->daddr.in6, | ||
297 | skb->len - udphoff, | ||
298 | cp->protocol, skb->csum); | ||
299 | else | ||
300 | #endif | ||
301 | udph->check = csum_tcpudp_magic(cp->caddr.ip, | ||
302 | cp->daddr.ip, | ||
303 | skb->len - udphoff, | ||
304 | cp->protocol, | ||
305 | skb->csum); | ||
225 | if (udph->check == 0) | 306 | if (udph->check == 0) |
226 | udph->check = CSUM_MANGLED_0; | 307 | udph->check = CSUM_MANGLED_0; |
227 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 308 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -231,10 +312,17 @@ udp_dnat_handler(struct sk_buff *skb, | |||
231 | 312 | ||
232 | 313 | ||
233 | static int | 314 | static int |
234 | udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) | 315 | udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) |
235 | { | 316 | { |
236 | struct udphdr _udph, *uh; | 317 | struct udphdr _udph, *uh; |
237 | const unsigned int udphoff = ip_hdrlen(skb); | 318 | unsigned int udphoff; |
319 | |||
320 | #ifdef CONFIG_IP_VS_IPV6 | ||
321 | if (af == AF_INET6) | ||
322 | udphoff = sizeof(struct ipv6hdr); | ||
323 | else | ||
324 | #endif | ||
325 | udphoff = ip_hdrlen(skb); | ||
238 | 326 | ||
239 | uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph); | 327 | uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph); |
240 | if (uh == NULL) | 328 | if (uh == NULL) |
@@ -246,15 +334,28 @@ udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) | |||
246 | skb->csum = skb_checksum(skb, udphoff, | 334 | skb->csum = skb_checksum(skb, udphoff, |
247 | skb->len - udphoff, 0); | 335 | skb->len - udphoff, 0); |
248 | case CHECKSUM_COMPLETE: | 336 | case CHECKSUM_COMPLETE: |
249 | if (csum_tcpudp_magic(ip_hdr(skb)->saddr, | 337 | #ifdef CONFIG_IP_VS_IPV6 |
250 | ip_hdr(skb)->daddr, | 338 | if (af == AF_INET6) { |
251 | skb->len - udphoff, | 339 | if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
252 | ip_hdr(skb)->protocol, | 340 | &ipv6_hdr(skb)->daddr, |
253 | skb->csum)) { | 341 | skb->len - udphoff, |
254 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | 342 | ipv6_hdr(skb)->nexthdr, |
255 | "Failed checksum for"); | 343 | skb->csum)) { |
256 | return 0; | 344 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, |
257 | } | 345 | "Failed checksum for"); |
346 | return 0; | ||
347 | } | ||
348 | } else | ||
349 | #endif | ||
350 | if (csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
351 | ip_hdr(skb)->daddr, | ||
352 | skb->len - udphoff, | ||
353 | ip_hdr(skb)->protocol, | ||
354 | skb->csum)) { | ||
355 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | ||
356 | "Failed checksum for"); | ||
357 | return 0; | ||
358 | } | ||
258 | break; | 359 | break; |
259 | default: | 360 | default: |
260 | /* No need to checksum. */ | 361 | /* No need to checksum. */ |
@@ -340,12 +441,15 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp) | |||
340 | break; | 441 | break; |
341 | spin_unlock(&udp_app_lock); | 442 | spin_unlock(&udp_app_lock); |
342 | 443 | ||
343 | IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" | 444 | IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->" |
344 | "%u.%u.%u.%u:%u to app %s on port %u\n", | 445 | "%s:%u to app %s on port %u\n", |
345 | __func__, | 446 | __func__, |
346 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 447 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), |
347 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 448 | ntohs(cp->cport), |
348 | inc->name, ntohs(inc->port)); | 449 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), |
450 | ntohs(cp->vport), | ||
451 | inc->name, ntohs(inc->port)); | ||
452 | |||
349 | cp->app = inc; | 453 | cp->app = inc; |
350 | if (inc->init_conn) | 454 | if (inc->init_conn) |
351 | result = inc->init_conn(inc, cp); | 455 | result = inc->init_conn(inc, cp); |
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c index 358110d17e59..a22195f68ac4 100644 --- a/net/ipv4/ipvs/ip_vs_rr.c +++ b/net/ipv4/ipvs/ip_vs_rr.c | |||
@@ -32,12 +32,6 @@ static int ip_vs_rr_init_svc(struct ip_vs_service *svc) | |||
32 | } | 32 | } |
33 | 33 | ||
34 | 34 | ||
35 | static int ip_vs_rr_done_svc(struct ip_vs_service *svc) | ||
36 | { | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | |||
41 | static int ip_vs_rr_update_svc(struct ip_vs_service *svc) | 35 | static int ip_vs_rr_update_svc(struct ip_vs_service *svc) |
42 | { | 36 | { |
43 | svc->sched_data = &svc->destinations; | 37 | svc->sched_data = &svc->destinations; |
@@ -80,11 +74,11 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
80 | out: | 74 | out: |
81 | svc->sched_data = q; | 75 | svc->sched_data = q; |
82 | write_unlock(&svc->sched_lock); | 76 | write_unlock(&svc->sched_lock); |
83 | IP_VS_DBG(6, "RR: server %u.%u.%u.%u:%u " | 77 | IP_VS_DBG_BUF(6, "RR: server %s:%u " |
84 | "activeconns %d refcnt %d weight %d\n", | 78 | "activeconns %d refcnt %d weight %d\n", |
85 | NIPQUAD(dest->addr), ntohs(dest->port), | 79 | IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), |
86 | atomic_read(&dest->activeconns), | 80 | atomic_read(&dest->activeconns), |
87 | atomic_read(&dest->refcnt), atomic_read(&dest->weight)); | 81 | atomic_read(&dest->refcnt), atomic_read(&dest->weight)); |
88 | 82 | ||
89 | return dest; | 83 | return dest; |
90 | } | 84 | } |
@@ -95,8 +89,10 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = { | |||
95 | .refcnt = ATOMIC_INIT(0), | 89 | .refcnt = ATOMIC_INIT(0), |
96 | .module = THIS_MODULE, | 90 | .module = THIS_MODULE, |
97 | .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), | 91 | .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), |
92 | #ifdef CONFIG_IP_VS_IPV6 | ||
93 | .supports_ipv6 = 1, | ||
94 | #endif | ||
98 | .init_service = ip_vs_rr_init_svc, | 95 | .init_service = ip_vs_rr_init_svc, |
99 | .done_service = ip_vs_rr_done_svc, | ||
100 | .update_service = ip_vs_rr_update_svc, | 96 | .update_service = ip_vs_rr_update_svc, |
101 | .schedule = ip_vs_rr_schedule, | 97 | .schedule = ip_vs_rr_schedule, |
102 | }; | 98 | }; |
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c index 77663d84cbd1..7d2f22f04b83 100644 --- a/net/ipv4/ipvs/ip_vs_sed.c +++ b/net/ipv4/ipvs/ip_vs_sed.c | |||
@@ -41,27 +41,6 @@ | |||
41 | #include <net/ip_vs.h> | 41 | #include <net/ip_vs.h> |
42 | 42 | ||
43 | 43 | ||
44 | static int | ||
45 | ip_vs_sed_init_svc(struct ip_vs_service *svc) | ||
46 | { | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | |||
51 | static int | ||
52 | ip_vs_sed_done_svc(struct ip_vs_service *svc) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | |||
58 | static int | ||
59 | ip_vs_sed_update_svc(struct ip_vs_service *svc) | ||
60 | { | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | |||
65 | static inline unsigned int | 44 | static inline unsigned int |
66 | ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) | 45 | ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) |
67 | { | 46 | { |
@@ -122,12 +101,12 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
122 | } | 101 | } |
123 | } | 102 | } |
124 | 103 | ||
125 | IP_VS_DBG(6, "SED: server %u.%u.%u.%u:%u " | 104 | IP_VS_DBG_BUF(6, "SED: server %s:%u " |
126 | "activeconns %d refcnt %d weight %d overhead %d\n", | 105 | "activeconns %d refcnt %d weight %d overhead %d\n", |
127 | NIPQUAD(least->addr), ntohs(least->port), | 106 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), |
128 | atomic_read(&least->activeconns), | 107 | atomic_read(&least->activeconns), |
129 | atomic_read(&least->refcnt), | 108 | atomic_read(&least->refcnt), |
130 | atomic_read(&least->weight), loh); | 109 | atomic_read(&least->weight), loh); |
131 | 110 | ||
132 | return least; | 111 | return least; |
133 | } | 112 | } |
@@ -139,9 +118,9 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler = | |||
139 | .refcnt = ATOMIC_INIT(0), | 118 | .refcnt = ATOMIC_INIT(0), |
140 | .module = THIS_MODULE, | 119 | .module = THIS_MODULE, |
141 | .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), | 120 | .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), |
142 | .init_service = ip_vs_sed_init_svc, | 121 | #ifdef CONFIG_IP_VS_IPV6 |
143 | .done_service = ip_vs_sed_done_svc, | 122 | .supports_ipv6 = 1, |
144 | .update_service = ip_vs_sed_update_svc, | 123 | #endif |
145 | .schedule = ip_vs_sed_schedule, | 124 | .schedule = ip_vs_sed_schedule, |
146 | }; | 125 | }; |
147 | 126 | ||
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c index 7b979e228056..1d96de27fefd 100644 --- a/net/ipv4/ipvs/ip_vs_sh.c +++ b/net/ipv4/ipvs/ip_vs_sh.c | |||
@@ -215,7 +215,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
215 | IP_VS_DBG(6, "SH: source IP address %u.%u.%u.%u " | 215 | IP_VS_DBG(6, "SH: source IP address %u.%u.%u.%u " |
216 | "--> server %u.%u.%u.%u:%d\n", | 216 | "--> server %u.%u.%u.%u:%d\n", |
217 | NIPQUAD(iph->saddr), | 217 | NIPQUAD(iph->saddr), |
218 | NIPQUAD(dest->addr), | 218 | NIPQUAD(dest->addr.ip), |
219 | ntohs(dest->port)); | 219 | ntohs(dest->port)); |
220 | 220 | ||
221 | return dest; | 221 | return dest; |
@@ -231,6 +231,9 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler = | |||
231 | .refcnt = ATOMIC_INIT(0), | 231 | .refcnt = ATOMIC_INIT(0), |
232 | .module = THIS_MODULE, | 232 | .module = THIS_MODULE, |
233 | .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list), | 233 | .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list), |
234 | #ifdef CONFIG_IP_VS_IPV6 | ||
235 | .supports_ipv6 = 0, | ||
236 | #endif | ||
234 | .init_service = ip_vs_sh_init_svc, | 237 | .init_service = ip_vs_sh_init_svc, |
235 | .done_service = ip_vs_sh_done_svc, | 238 | .done_service = ip_vs_sh_done_svc, |
236 | .update_service = ip_vs_sh_update_svc, | 239 | .update_service = ip_vs_sh_update_svc, |
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index a652da2c3200..28237a5f62e2 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c | |||
@@ -256,9 +256,9 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp) | |||
256 | s->cport = cp->cport; | 256 | s->cport = cp->cport; |
257 | s->vport = cp->vport; | 257 | s->vport = cp->vport; |
258 | s->dport = cp->dport; | 258 | s->dport = cp->dport; |
259 | s->caddr = cp->caddr; | 259 | s->caddr = cp->caddr.ip; |
260 | s->vaddr = cp->vaddr; | 260 | s->vaddr = cp->vaddr.ip; |
261 | s->daddr = cp->daddr; | 261 | s->daddr = cp->daddr.ip; |
262 | s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); | 262 | s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); |
263 | s->state = htons(cp->state); | 263 | s->state = htons(cp->state); |
264 | if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { | 264 | if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { |
@@ -366,21 +366,28 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) | |||
366 | } | 366 | } |
367 | 367 | ||
368 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) | 368 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) |
369 | cp = ip_vs_conn_in_get(s->protocol, | 369 | cp = ip_vs_conn_in_get(AF_INET, s->protocol, |
370 | s->caddr, s->cport, | 370 | (union nf_inet_addr *)&s->caddr, |
371 | s->vaddr, s->vport); | 371 | s->cport, |
372 | (union nf_inet_addr *)&s->vaddr, | ||
373 | s->vport); | ||
372 | else | 374 | else |
373 | cp = ip_vs_ct_in_get(s->protocol, | 375 | cp = ip_vs_ct_in_get(AF_INET, s->protocol, |
374 | s->caddr, s->cport, | 376 | (union nf_inet_addr *)&s->caddr, |
375 | s->vaddr, s->vport); | 377 | s->cport, |
378 | (union nf_inet_addr *)&s->vaddr, | ||
379 | s->vport); | ||
376 | if (!cp) { | 380 | if (!cp) { |
377 | /* | 381 | /* |
378 | * Find the appropriate destination for the connection. | 382 | * Find the appropriate destination for the connection. |
379 | * If it is not found the connection will remain unbound | 383 | * If it is not found the connection will remain unbound |
380 | * but still handled. | 384 | * but still handled. |
381 | */ | 385 | */ |
382 | dest = ip_vs_find_dest(s->daddr, s->dport, | 386 | dest = ip_vs_find_dest(AF_INET, |
383 | s->vaddr, s->vport, | 387 | (union nf_inet_addr *)&s->daddr, |
388 | s->dport, | ||
389 | (union nf_inet_addr *)&s->vaddr, | ||
390 | s->vport, | ||
384 | s->protocol); | 391 | s->protocol); |
385 | /* Set the approprite ativity flag */ | 392 | /* Set the approprite ativity flag */ |
386 | if (s->protocol == IPPROTO_TCP) { | 393 | if (s->protocol == IPPROTO_TCP) { |
@@ -389,10 +396,13 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) | |||
389 | else | 396 | else |
390 | flags &= ~IP_VS_CONN_F_INACTIVE; | 397 | flags &= ~IP_VS_CONN_F_INACTIVE; |
391 | } | 398 | } |
392 | cp = ip_vs_conn_new(s->protocol, | 399 | cp = ip_vs_conn_new(AF_INET, s->protocol, |
393 | s->caddr, s->cport, | 400 | (union nf_inet_addr *)&s->caddr, |
394 | s->vaddr, s->vport, | 401 | s->cport, |
395 | s->daddr, s->dport, | 402 | (union nf_inet_addr *)&s->vaddr, |
403 | s->vport, | ||
404 | (union nf_inet_addr *)&s->daddr, | ||
405 | s->dport, | ||
396 | flags, dest); | 406 | flags, dest); |
397 | if (dest) | 407 | if (dest) |
398 | atomic_dec(&dest->refcnt); | 408 | atomic_dec(&dest->refcnt); |
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c index 9b0ef86bb1f7..8c596e712599 100644 --- a/net/ipv4/ipvs/ip_vs_wlc.c +++ b/net/ipv4/ipvs/ip_vs_wlc.c | |||
@@ -25,27 +25,6 @@ | |||
25 | #include <net/ip_vs.h> | 25 | #include <net/ip_vs.h> |
26 | 26 | ||
27 | 27 | ||
28 | static int | ||
29 | ip_vs_wlc_init_svc(struct ip_vs_service *svc) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | |||
35 | static int | ||
36 | ip_vs_wlc_done_svc(struct ip_vs_service *svc) | ||
37 | { | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | |||
42 | static int | ||
43 | ip_vs_wlc_update_svc(struct ip_vs_service *svc) | ||
44 | { | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | |||
49 | static inline unsigned int | 28 | static inline unsigned int |
50 | ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) | 29 | ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) |
51 | { | 30 | { |
@@ -110,12 +89,12 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
110 | } | 89 | } |
111 | } | 90 | } |
112 | 91 | ||
113 | IP_VS_DBG(6, "WLC: server %u.%u.%u.%u:%u " | 92 | IP_VS_DBG_BUF(6, "WLC: server %s:%u " |
114 | "activeconns %d refcnt %d weight %d overhead %d\n", | 93 | "activeconns %d refcnt %d weight %d overhead %d\n", |
115 | NIPQUAD(least->addr), ntohs(least->port), | 94 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), |
116 | atomic_read(&least->activeconns), | 95 | atomic_read(&least->activeconns), |
117 | atomic_read(&least->refcnt), | 96 | atomic_read(&least->refcnt), |
118 | atomic_read(&least->weight), loh); | 97 | atomic_read(&least->weight), loh); |
119 | 98 | ||
120 | return least; | 99 | return least; |
121 | } | 100 | } |
@@ -127,9 +106,9 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler = | |||
127 | .refcnt = ATOMIC_INIT(0), | 106 | .refcnt = ATOMIC_INIT(0), |
128 | .module = THIS_MODULE, | 107 | .module = THIS_MODULE, |
129 | .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list), | 108 | .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list), |
130 | .init_service = ip_vs_wlc_init_svc, | 109 | #ifdef CONFIG_IP_VS_IPV6 |
131 | .done_service = ip_vs_wlc_done_svc, | 110 | .supports_ipv6 = 1, |
132 | .update_service = ip_vs_wlc_update_svc, | 111 | #endif |
133 | .schedule = ip_vs_wlc_schedule, | 112 | .schedule = ip_vs_wlc_schedule, |
134 | }; | 113 | }; |
135 | 114 | ||
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c index 0d86a79b87b5..7ea92fed50bf 100644 --- a/net/ipv4/ipvs/ip_vs_wrr.c +++ b/net/ipv4/ipvs/ip_vs_wrr.c | |||
@@ -195,12 +195,12 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
195 | } | 195 | } |
196 | } | 196 | } |
197 | 197 | ||
198 | IP_VS_DBG(6, "WRR: server %u.%u.%u.%u:%u " | 198 | IP_VS_DBG_BUF(6, "WRR: server %s:%u " |
199 | "activeconns %d refcnt %d weight %d\n", | 199 | "activeconns %d refcnt %d weight %d\n", |
200 | NIPQUAD(dest->addr), ntohs(dest->port), | 200 | IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), |
201 | atomic_read(&dest->activeconns), | 201 | atomic_read(&dest->activeconns), |
202 | atomic_read(&dest->refcnt), | 202 | atomic_read(&dest->refcnt), |
203 | atomic_read(&dest->weight)); | 203 | atomic_read(&dest->weight)); |
204 | 204 | ||
205 | out: | 205 | out: |
206 | write_unlock(&svc->sched_lock); | 206 | write_unlock(&svc->sched_lock); |
@@ -213,6 +213,9 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = { | |||
213 | .refcnt = ATOMIC_INIT(0), | 213 | .refcnt = ATOMIC_INIT(0), |
214 | .module = THIS_MODULE, | 214 | .module = THIS_MODULE, |
215 | .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list), | 215 | .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list), |
216 | #ifdef CONFIG_IP_VS_IPV6 | ||
217 | .supports_ipv6 = 1, | ||
218 | #endif | ||
216 | .init_service = ip_vs_wrr_init_svc, | 219 | .init_service = ip_vs_wrr_init_svc, |
217 | .done_service = ip_vs_wrr_done_svc, | 220 | .done_service = ip_vs_wrr_done_svc, |
218 | .update_service = ip_vs_wrr_update_svc, | 221 | .update_service = ip_vs_wrr_update_svc, |
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index 9892d4aca42e..02ddc2b3ce2e 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c | |||
@@ -20,6 +20,9 @@ | |||
20 | #include <net/udp.h> | 20 | #include <net/udp.h> |
21 | #include <net/icmp.h> /* for icmp_send */ | 21 | #include <net/icmp.h> /* for icmp_send */ |
22 | #include <net/route.h> /* for ip_route_output */ | 22 | #include <net/route.h> /* for ip_route_output */ |
23 | #include <net/ipv6.h> | ||
24 | #include <net/ip6_route.h> | ||
25 | #include <linux/icmpv6.h> | ||
23 | #include <linux/netfilter.h> | 26 | #include <linux/netfilter.h> |
24 | #include <linux/netfilter_ipv4.h> | 27 | #include <linux/netfilter_ipv4.h> |
25 | 28 | ||
@@ -47,7 +50,8 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos, u32 cookie) | |||
47 | 50 | ||
48 | if (!dst) | 51 | if (!dst) |
49 | return NULL; | 52 | return NULL; |
50 | if ((dst->obsolete || rtos != dest->dst_rtos) && | 53 | if ((dst->obsolete |
54 | || (dest->af == AF_INET && rtos != dest->dst_rtos)) && | ||
51 | dst->ops->check(dst, cookie) == NULL) { | 55 | dst->ops->check(dst, cookie) == NULL) { |
52 | dest->dst_cache = NULL; | 56 | dest->dst_cache = NULL; |
53 | dst_release(dst); | 57 | dst_release(dst); |
@@ -71,7 +75,7 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos) | |||
71 | .oif = 0, | 75 | .oif = 0, |
72 | .nl_u = { | 76 | .nl_u = { |
73 | .ip4_u = { | 77 | .ip4_u = { |
74 | .daddr = dest->addr, | 78 | .daddr = dest->addr.ip, |
75 | .saddr = 0, | 79 | .saddr = 0, |
76 | .tos = rtos, } }, | 80 | .tos = rtos, } }, |
77 | }; | 81 | }; |
@@ -80,12 +84,12 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos) | |||
80 | spin_unlock(&dest->dst_lock); | 84 | spin_unlock(&dest->dst_lock); |
81 | IP_VS_DBG_RL("ip_route_output error, " | 85 | IP_VS_DBG_RL("ip_route_output error, " |
82 | "dest: %u.%u.%u.%u\n", | 86 | "dest: %u.%u.%u.%u\n", |
83 | NIPQUAD(dest->addr)); | 87 | NIPQUAD(dest->addr.ip)); |
84 | return NULL; | 88 | return NULL; |
85 | } | 89 | } |
86 | __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst)); | 90 | __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst)); |
87 | IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n", | 91 | IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n", |
88 | NIPQUAD(dest->addr), | 92 | NIPQUAD(dest->addr.ip), |
89 | atomic_read(&rt->u.dst.__refcnt), rtos); | 93 | atomic_read(&rt->u.dst.__refcnt), rtos); |
90 | } | 94 | } |
91 | spin_unlock(&dest->dst_lock); | 95 | spin_unlock(&dest->dst_lock); |
@@ -94,14 +98,14 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos) | |||
94 | .oif = 0, | 98 | .oif = 0, |
95 | .nl_u = { | 99 | .nl_u = { |
96 | .ip4_u = { | 100 | .ip4_u = { |
97 | .daddr = cp->daddr, | 101 | .daddr = cp->daddr.ip, |
98 | .saddr = 0, | 102 | .saddr = 0, |
99 | .tos = rtos, } }, | 103 | .tos = rtos, } }, |
100 | }; | 104 | }; |
101 | 105 | ||
102 | if (ip_route_output_key(&init_net, &rt, &fl)) { | 106 | if (ip_route_output_key(&init_net, &rt, &fl)) { |
103 | IP_VS_DBG_RL("ip_route_output error, dest: " | 107 | IP_VS_DBG_RL("ip_route_output error, dest: " |
104 | "%u.%u.%u.%u\n", NIPQUAD(cp->daddr)); | 108 | "%u.%u.%u.%u\n", NIPQUAD(cp->daddr.ip)); |
105 | return NULL; | 109 | return NULL; |
106 | } | 110 | } |
107 | } | 111 | } |
@@ -109,6 +113,70 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos) | |||
109 | return rt; | 113 | return rt; |
110 | } | 114 | } |
111 | 115 | ||
116 | #ifdef CONFIG_IP_VS_IPV6 | ||
117 | static struct rt6_info * | ||
118 | __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp) | ||
119 | { | ||
120 | struct rt6_info *rt; /* Route to the other host */ | ||
121 | struct ip_vs_dest *dest = cp->dest; | ||
122 | |||
123 | if (dest) { | ||
124 | spin_lock(&dest->dst_lock); | ||
125 | rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0, 0); | ||
126 | if (!rt) { | ||
127 | struct flowi fl = { | ||
128 | .oif = 0, | ||
129 | .nl_u = { | ||
130 | .ip6_u = { | ||
131 | .daddr = dest->addr.in6, | ||
132 | .saddr = { | ||
133 | .s6_addr32 = | ||
134 | { 0, 0, 0, 0 }, | ||
135 | }, | ||
136 | }, | ||
137 | }, | ||
138 | }; | ||
139 | |||
140 | rt = (struct rt6_info *)ip6_route_output(&init_net, | ||
141 | NULL, &fl); | ||
142 | if (!rt) { | ||
143 | spin_unlock(&dest->dst_lock); | ||
144 | IP_VS_DBG_RL("ip6_route_output error, " | ||
145 | "dest: " NIP6_FMT "\n", | ||
146 | NIP6(dest->addr.in6)); | ||
147 | return NULL; | ||
148 | } | ||
149 | __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst)); | ||
150 | IP_VS_DBG(10, "new dst " NIP6_FMT ", refcnt=%d\n", | ||
151 | NIP6(dest->addr.in6), | ||
152 | atomic_read(&rt->u.dst.__refcnt)); | ||
153 | } | ||
154 | spin_unlock(&dest->dst_lock); | ||
155 | } else { | ||
156 | struct flowi fl = { | ||
157 | .oif = 0, | ||
158 | .nl_u = { | ||
159 | .ip6_u = { | ||
160 | .daddr = cp->daddr.in6, | ||
161 | .saddr = { | ||
162 | .s6_addr32 = { 0, 0, 0, 0 }, | ||
163 | }, | ||
164 | }, | ||
165 | }, | ||
166 | }; | ||
167 | |||
168 | rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); | ||
169 | if (!rt) { | ||
170 | IP_VS_DBG_RL("ip6_route_output error, dest: " | ||
171 | NIP6_FMT "\n", NIP6(cp->daddr.in6)); | ||
172 | return NULL; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | return rt; | ||
177 | } | ||
178 | #endif | ||
179 | |||
112 | 180 | ||
113 | /* | 181 | /* |
114 | * Release dest->dst_cache before a dest is removed | 182 | * Release dest->dst_cache before a dest is removed |
@@ -123,11 +191,11 @@ ip_vs_dst_reset(struct ip_vs_dest *dest) | |||
123 | dst_release(old_dst); | 191 | dst_release(old_dst); |
124 | } | 192 | } |
125 | 193 | ||
126 | #define IP_VS_XMIT(skb, rt) \ | 194 | #define IP_VS_XMIT(pf, skb, rt) \ |
127 | do { \ | 195 | do { \ |
128 | (skb)->ipvs_property = 1; \ | 196 | (skb)->ipvs_property = 1; \ |
129 | skb_forward_csum(skb); \ | 197 | skb_forward_csum(skb); \ |
130 | NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, (skb), NULL, \ | 198 | NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \ |
131 | (rt)->u.dst.dev, dst_output); \ | 199 | (rt)->u.dst.dev, dst_output); \ |
132 | } while (0) | 200 | } while (0) |
133 | 201 | ||
@@ -200,7 +268,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
200 | /* Another hack: avoid icmp_send in ip_fragment */ | 268 | /* Another hack: avoid icmp_send in ip_fragment */ |
201 | skb->local_df = 1; | 269 | skb->local_df = 1; |
202 | 270 | ||
203 | IP_VS_XMIT(skb, rt); | 271 | IP_VS_XMIT(PF_INET, skb, rt); |
204 | 272 | ||
205 | LeaveFunction(10); | 273 | LeaveFunction(10); |
206 | return NF_STOLEN; | 274 | return NF_STOLEN; |
@@ -213,6 +281,70 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
213 | return NF_STOLEN; | 281 | return NF_STOLEN; |
214 | } | 282 | } |
215 | 283 | ||
284 | #ifdef CONFIG_IP_VS_IPV6 | ||
285 | int | ||
286 | ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
287 | struct ip_vs_protocol *pp) | ||
288 | { | ||
289 | struct rt6_info *rt; /* Route to the other host */ | ||
290 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
291 | int mtu; | ||
292 | struct flowi fl = { | ||
293 | .oif = 0, | ||
294 | .nl_u = { | ||
295 | .ip6_u = { | ||
296 | .daddr = iph->daddr, | ||
297 | .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } }, | ||
298 | }; | ||
299 | |||
300 | EnterFunction(10); | ||
301 | |||
302 | rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); | ||
303 | if (!rt) { | ||
304 | IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): ip6_route_output error, " | ||
305 | "dest: " NIP6_FMT "\n", NIP6(iph->daddr)); | ||
306 | goto tx_error_icmp; | ||
307 | } | ||
308 | |||
309 | /* MTU checking */ | ||
310 | mtu = dst_mtu(&rt->u.dst); | ||
311 | if (skb->len > mtu) { | ||
312 | dst_release(&rt->u.dst); | ||
313 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | ||
314 | IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): frag needed\n"); | ||
315 | goto tx_error; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Call ip_send_check because we are not sure it is called | ||
320 | * after ip_defrag. Is copy-on-write needed? | ||
321 | */ | ||
322 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
323 | if (unlikely(skb == NULL)) { | ||
324 | dst_release(&rt->u.dst); | ||
325 | return NF_STOLEN; | ||
326 | } | ||
327 | |||
328 | /* drop old route */ | ||
329 | dst_release(skb->dst); | ||
330 | skb->dst = &rt->u.dst; | ||
331 | |||
332 | /* Another hack: avoid icmp_send in ip_fragment */ | ||
333 | skb->local_df = 1; | ||
334 | |||
335 | IP_VS_XMIT(PF_INET6, skb, rt); | ||
336 | |||
337 | LeaveFunction(10); | ||
338 | return NF_STOLEN; | ||
339 | |||
340 | tx_error_icmp: | ||
341 | dst_link_failure(skb); | ||
342 | tx_error: | ||
343 | kfree_skb(skb); | ||
344 | LeaveFunction(10); | ||
345 | return NF_STOLEN; | ||
346 | } | ||
347 | #endif | ||
216 | 348 | ||
217 | /* | 349 | /* |
218 | * NAT transmitter (only for outside-to-inside nat forwarding) | 350 | * NAT transmitter (only for outside-to-inside nat forwarding) |
@@ -264,7 +396,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
264 | /* mangle the packet */ | 396 | /* mangle the packet */ |
265 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) | 397 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) |
266 | goto tx_error; | 398 | goto tx_error; |
267 | ip_hdr(skb)->daddr = cp->daddr; | 399 | ip_hdr(skb)->daddr = cp->daddr.ip; |
268 | ip_send_check(ip_hdr(skb)); | 400 | ip_send_check(ip_hdr(skb)); |
269 | 401 | ||
270 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); | 402 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); |
@@ -276,7 +408,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
276 | /* Another hack: avoid icmp_send in ip_fragment */ | 408 | /* Another hack: avoid icmp_send in ip_fragment */ |
277 | skb->local_df = 1; | 409 | skb->local_df = 1; |
278 | 410 | ||
279 | IP_VS_XMIT(skb, rt); | 411 | IP_VS_XMIT(PF_INET, skb, rt); |
280 | 412 | ||
281 | LeaveFunction(10); | 413 | LeaveFunction(10); |
282 | return NF_STOLEN; | 414 | return NF_STOLEN; |
@@ -292,6 +424,83 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
292 | goto tx_error; | 424 | goto tx_error; |
293 | } | 425 | } |
294 | 426 | ||
427 | #ifdef CONFIG_IP_VS_IPV6 | ||
428 | int | ||
429 | ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
430 | struct ip_vs_protocol *pp) | ||
431 | { | ||
432 | struct rt6_info *rt; /* Route to the other host */ | ||
433 | int mtu; | ||
434 | |||
435 | EnterFunction(10); | ||
436 | |||
437 | /* check if it is a connection of no-client-port */ | ||
438 | if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) { | ||
439 | __be16 _pt, *p; | ||
440 | p = skb_header_pointer(skb, sizeof(struct ipv6hdr), | ||
441 | sizeof(_pt), &_pt); | ||
442 | if (p == NULL) | ||
443 | goto tx_error; | ||
444 | ip_vs_conn_fill_cport(cp, *p); | ||
445 | IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p)); | ||
446 | } | ||
447 | |||
448 | rt = __ip_vs_get_out_rt_v6(cp); | ||
449 | if (!rt) | ||
450 | goto tx_error_icmp; | ||
451 | |||
452 | /* MTU checking */ | ||
453 | mtu = dst_mtu(&rt->u.dst); | ||
454 | if (skb->len > mtu) { | ||
455 | dst_release(&rt->u.dst); | ||
456 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | ||
457 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | ||
458 | "ip_vs_nat_xmit_v6(): frag needed for"); | ||
459 | goto tx_error; | ||
460 | } | ||
461 | |||
462 | /* copy-on-write the packet before mangling it */ | ||
463 | if (!skb_make_writable(skb, sizeof(struct ipv6hdr))) | ||
464 | goto tx_error_put; | ||
465 | |||
466 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) | ||
467 | goto tx_error_put; | ||
468 | |||
469 | /* drop old route */ | ||
470 | dst_release(skb->dst); | ||
471 | skb->dst = &rt->u.dst; | ||
472 | |||
473 | /* mangle the packet */ | ||
474 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) | ||
475 | goto tx_error; | ||
476 | ipv6_hdr(skb)->daddr = cp->daddr.in6; | ||
477 | |||
478 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); | ||
479 | |||
480 | /* FIXME: when application helper enlarges the packet and the length | ||
481 | is larger than the MTU of outgoing device, there will be still | ||
482 | MTU problem. */ | ||
483 | |||
484 | /* Another hack: avoid icmp_send in ip_fragment */ | ||
485 | skb->local_df = 1; | ||
486 | |||
487 | IP_VS_XMIT(PF_INET6, skb, rt); | ||
488 | |||
489 | LeaveFunction(10); | ||
490 | return NF_STOLEN; | ||
491 | |||
492 | tx_error_icmp: | ||
493 | dst_link_failure(skb); | ||
494 | tx_error: | ||
495 | LeaveFunction(10); | ||
496 | kfree_skb(skb); | ||
497 | return NF_STOLEN; | ||
498 | tx_error_put: | ||
499 | dst_release(&rt->u.dst); | ||
500 | goto tx_error; | ||
501 | } | ||
502 | #endif | ||
503 | |||
295 | 504 | ||
296 | /* | 505 | /* |
297 | * IP Tunneling transmitter | 506 | * IP Tunneling transmitter |
@@ -423,6 +632,112 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
423 | return NF_STOLEN; | 632 | return NF_STOLEN; |
424 | } | 633 | } |
425 | 634 | ||
635 | #ifdef CONFIG_IP_VS_IPV6 | ||
636 | int | ||
637 | ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
638 | struct ip_vs_protocol *pp) | ||
639 | { | ||
640 | struct rt6_info *rt; /* Route to the other host */ | ||
641 | struct net_device *tdev; /* Device to other host */ | ||
642 | struct ipv6hdr *old_iph = ipv6_hdr(skb); | ||
643 | sk_buff_data_t old_transport_header = skb->transport_header; | ||
644 | struct ipv6hdr *iph; /* Our new IP header */ | ||
645 | unsigned int max_headroom; /* The extra header space needed */ | ||
646 | int mtu; | ||
647 | |||
648 | EnterFunction(10); | ||
649 | |||
650 | if (skb->protocol != htons(ETH_P_IPV6)) { | ||
651 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): protocol error, " | ||
652 | "ETH_P_IPV6: %d, skb protocol: %d\n", | ||
653 | htons(ETH_P_IPV6), skb->protocol); | ||
654 | goto tx_error; | ||
655 | } | ||
656 | |||
657 | rt = __ip_vs_get_out_rt_v6(cp); | ||
658 | if (!rt) | ||
659 | goto tx_error_icmp; | ||
660 | |||
661 | tdev = rt->u.dst.dev; | ||
662 | |||
663 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr); | ||
664 | /* TODO IPv6: do we need this check in IPv6? */ | ||
665 | if (mtu < 1280) { | ||
666 | dst_release(&rt->u.dst); | ||
667 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n"); | ||
668 | goto tx_error; | ||
669 | } | ||
670 | if (skb->dst) | ||
671 | skb->dst->ops->update_pmtu(skb->dst, mtu); | ||
672 | |||
673 | if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { | ||
674 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | ||
675 | dst_release(&rt->u.dst); | ||
676 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): frag needed\n"); | ||
677 | goto tx_error; | ||
678 | } | ||
679 | |||
680 | /* | ||
681 | * Okay, now see if we can stuff it in the buffer as-is. | ||
682 | */ | ||
683 | max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr); | ||
684 | |||
685 | if (skb_headroom(skb) < max_headroom | ||
686 | || skb_cloned(skb) || skb_shared(skb)) { | ||
687 | struct sk_buff *new_skb = | ||
688 | skb_realloc_headroom(skb, max_headroom); | ||
689 | if (!new_skb) { | ||
690 | dst_release(&rt->u.dst); | ||
691 | kfree_skb(skb); | ||
692 | IP_VS_ERR_RL("ip_vs_tunnel_xmit_v6(): no memory\n"); | ||
693 | return NF_STOLEN; | ||
694 | } | ||
695 | kfree_skb(skb); | ||
696 | skb = new_skb; | ||
697 | old_iph = ipv6_hdr(skb); | ||
698 | } | ||
699 | |||
700 | skb->transport_header = old_transport_header; | ||
701 | |||
702 | skb_push(skb, sizeof(struct ipv6hdr)); | ||
703 | skb_reset_network_header(skb); | ||
704 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
705 | |||
706 | /* drop old route */ | ||
707 | dst_release(skb->dst); | ||
708 | skb->dst = &rt->u.dst; | ||
709 | |||
710 | /* | ||
711 | * Push down and install the IPIP header. | ||
712 | */ | ||
713 | iph = ipv6_hdr(skb); | ||
714 | iph->version = 6; | ||
715 | iph->nexthdr = IPPROTO_IPV6; | ||
716 | iph->payload_len = old_iph->payload_len + sizeof(old_iph); | ||
717 | iph->priority = old_iph->priority; | ||
718 | memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); | ||
719 | iph->daddr = rt->rt6i_dst.addr; | ||
720 | iph->saddr = cp->vaddr.in6; /* rt->rt6i_src.addr; */ | ||
721 | iph->hop_limit = old_iph->hop_limit; | ||
722 | |||
723 | /* Another hack: avoid icmp_send in ip_fragment */ | ||
724 | skb->local_df = 1; | ||
725 | |||
726 | ip6_local_out(skb); | ||
727 | |||
728 | LeaveFunction(10); | ||
729 | |||
730 | return NF_STOLEN; | ||
731 | |||
732 | tx_error_icmp: | ||
733 | dst_link_failure(skb); | ||
734 | tx_error: | ||
735 | kfree_skb(skb); | ||
736 | LeaveFunction(10); | ||
737 | return NF_STOLEN; | ||
738 | } | ||
739 | #endif | ||
740 | |||
426 | 741 | ||
427 | /* | 742 | /* |
428 | * Direct Routing transmitter | 743 | * Direct Routing transmitter |
@@ -467,7 +782,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
467 | /* Another hack: avoid icmp_send in ip_fragment */ | 782 | /* Another hack: avoid icmp_send in ip_fragment */ |
468 | skb->local_df = 1; | 783 | skb->local_df = 1; |
469 | 784 | ||
470 | IP_VS_XMIT(skb, rt); | 785 | IP_VS_XMIT(PF_INET, skb, rt); |
471 | 786 | ||
472 | LeaveFunction(10); | 787 | LeaveFunction(10); |
473 | return NF_STOLEN; | 788 | return NF_STOLEN; |
@@ -480,6 +795,60 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
480 | return NF_STOLEN; | 795 | return NF_STOLEN; |
481 | } | 796 | } |
482 | 797 | ||
798 | #ifdef CONFIG_IP_VS_IPV6 | ||
799 | int | ||
800 | ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
801 | struct ip_vs_protocol *pp) | ||
802 | { | ||
803 | struct rt6_info *rt; /* Route to the other host */ | ||
804 | int mtu; | ||
805 | |||
806 | EnterFunction(10); | ||
807 | |||
808 | rt = __ip_vs_get_out_rt_v6(cp); | ||
809 | if (!rt) | ||
810 | goto tx_error_icmp; | ||
811 | |||
812 | /* MTU checking */ | ||
813 | mtu = dst_mtu(&rt->u.dst); | ||
814 | if (skb->len > mtu) { | ||
815 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | ||
816 | dst_release(&rt->u.dst); | ||
817 | IP_VS_DBG_RL("ip_vs_dr_xmit_v6(): frag needed\n"); | ||
818 | goto tx_error; | ||
819 | } | ||
820 | |||
821 | /* | ||
822 | * Call ip_send_check because we are not sure it is called | ||
823 | * after ip_defrag. Is copy-on-write needed? | ||
824 | */ | ||
825 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
826 | if (unlikely(skb == NULL)) { | ||
827 | dst_release(&rt->u.dst); | ||
828 | return NF_STOLEN; | ||
829 | } | ||
830 | |||
831 | /* drop old route */ | ||
832 | dst_release(skb->dst); | ||
833 | skb->dst = &rt->u.dst; | ||
834 | |||
835 | /* Another hack: avoid icmp_send in ip_fragment */ | ||
836 | skb->local_df = 1; | ||
837 | |||
838 | IP_VS_XMIT(PF_INET6, skb, rt); | ||
839 | |||
840 | LeaveFunction(10); | ||
841 | return NF_STOLEN; | ||
842 | |||
843 | tx_error_icmp: | ||
844 | dst_link_failure(skb); | ||
845 | tx_error: | ||
846 | kfree_skb(skb); | ||
847 | LeaveFunction(10); | ||
848 | return NF_STOLEN; | ||
849 | } | ||
850 | #endif | ||
851 | |||
483 | 852 | ||
484 | /* | 853 | /* |
485 | * ICMP packet transmitter | 854 | * ICMP packet transmitter |
@@ -540,7 +909,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
540 | /* Another hack: avoid icmp_send in ip_fragment */ | 909 | /* Another hack: avoid icmp_send in ip_fragment */ |
541 | skb->local_df = 1; | 910 | skb->local_df = 1; |
542 | 911 | ||
543 | IP_VS_XMIT(skb, rt); | 912 | IP_VS_XMIT(PF_INET, skb, rt); |
544 | 913 | ||
545 | rc = NF_STOLEN; | 914 | rc = NF_STOLEN; |
546 | goto out; | 915 | goto out; |
@@ -557,3 +926,79 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
557 | ip_rt_put(rt); | 926 | ip_rt_put(rt); |
558 | goto tx_error; | 927 | goto tx_error; |
559 | } | 928 | } |
929 | |||
930 | #ifdef CONFIG_IP_VS_IPV6 | ||
931 | int | ||
932 | ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
933 | struct ip_vs_protocol *pp, int offset) | ||
934 | { | ||
935 | struct rt6_info *rt; /* Route to the other host */ | ||
936 | int mtu; | ||
937 | int rc; | ||
938 | |||
939 | EnterFunction(10); | ||
940 | |||
941 | /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be | ||
942 | forwarded directly here, because there is no need to | ||
943 | translate address/port back */ | ||
944 | if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { | ||
945 | if (cp->packet_xmit) | ||
946 | rc = cp->packet_xmit(skb, cp, pp); | ||
947 | else | ||
948 | rc = NF_ACCEPT; | ||
949 | /* do not touch skb anymore */ | ||
950 | atomic_inc(&cp->in_pkts); | ||
951 | goto out; | ||
952 | } | ||
953 | |||
954 | /* | ||
955 | * mangle and send the packet here (only for VS/NAT) | ||
956 | */ | ||
957 | |||
958 | rt = __ip_vs_get_out_rt_v6(cp); | ||
959 | if (!rt) | ||
960 | goto tx_error_icmp; | ||
961 | |||
962 | /* MTU checking */ | ||
963 | mtu = dst_mtu(&rt->u.dst); | ||
964 | if (skb->len > mtu) { | ||
965 | dst_release(&rt->u.dst); | ||
966 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | ||
967 | IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n"); | ||
968 | goto tx_error; | ||
969 | } | ||
970 | |||
971 | /* copy-on-write the packet before mangling it */ | ||
972 | if (!skb_make_writable(skb, offset)) | ||
973 | goto tx_error_put; | ||
974 | |||
975 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) | ||
976 | goto tx_error_put; | ||
977 | |||
978 | /* drop the old route when skb is not shared */ | ||
979 | dst_release(skb->dst); | ||
980 | skb->dst = &rt->u.dst; | ||
981 | |||
982 | ip_vs_nat_icmp_v6(skb, pp, cp, 0); | ||
983 | |||
984 | /* Another hack: avoid icmp_send in ip_fragment */ | ||
985 | skb->local_df = 1; | ||
986 | |||
987 | IP_VS_XMIT(PF_INET6, skb, rt); | ||
988 | |||
989 | rc = NF_STOLEN; | ||
990 | goto out; | ||
991 | |||
992 | tx_error_icmp: | ||
993 | dst_link_failure(skb); | ||
994 | tx_error: | ||
995 | dev_kfree_skb(skb); | ||
996 | rc = NF_STOLEN; | ||
997 | out: | ||
998 | LeaveFunction(10); | ||
999 | return rc; | ||
1000 | tx_error_put: | ||
1001 | dst_release(&rt->u.dst); | ||
1002 | goto tx_error; | ||
1003 | } | ||
1004 | #endif | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6ee5354c9aa1..f62187bb6d08 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -282,6 +282,8 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq) | |||
282 | struct rtable *r = NULL; | 282 | struct rtable *r = NULL; |
283 | 283 | ||
284 | for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { | 284 | for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { |
285 | if (!rt_hash_table[st->bucket].chain) | ||
286 | continue; | ||
285 | rcu_read_lock_bh(); | 287 | rcu_read_lock_bh(); |
286 | r = rcu_dereference(rt_hash_table[st->bucket].chain); | 288 | r = rcu_dereference(rt_hash_table[st->bucket].chain); |
287 | while (r) { | 289 | while (r) { |
@@ -299,11 +301,14 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq, | |||
299 | struct rtable *r) | 301 | struct rtable *r) |
300 | { | 302 | { |
301 | struct rt_cache_iter_state *st = seq->private; | 303 | struct rt_cache_iter_state *st = seq->private; |
304 | |||
302 | r = r->u.dst.rt_next; | 305 | r = r->u.dst.rt_next; |
303 | while (!r) { | 306 | while (!r) { |
304 | rcu_read_unlock_bh(); | 307 | rcu_read_unlock_bh(); |
305 | if (--st->bucket < 0) | 308 | do { |
306 | break; | 309 | if (--st->bucket < 0) |
310 | return NULL; | ||
311 | } while (!rt_hash_table[st->bucket].chain); | ||
307 | rcu_read_lock_bh(); | 312 | rcu_read_lock_bh(); |
308 | r = rt_hash_table[st->bucket].chain; | 313 | r = rt_hash_table[st->bucket].chain; |
309 | } | 314 | } |
@@ -2840,7 +2845,9 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2840 | if (s_h < 0) | 2845 | if (s_h < 0) |
2841 | s_h = 0; | 2846 | s_h = 0; |
2842 | s_idx = idx = cb->args[1]; | 2847 | s_idx = idx = cb->args[1]; |
2843 | for (h = s_h; h <= rt_hash_mask; h++) { | 2848 | for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) { |
2849 | if (!rt_hash_table[h].chain) | ||
2850 | continue; | ||
2844 | rcu_read_lock_bh(); | 2851 | rcu_read_lock_bh(); |
2845 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; | 2852 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; |
2846 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { | 2853 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { |
@@ -2859,7 +2866,6 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2859 | dst_release(xchg(&skb->dst, NULL)); | 2866 | dst_release(xchg(&skb->dst, NULL)); |
2860 | } | 2867 | } |
2861 | rcu_read_unlock_bh(); | 2868 | rcu_read_unlock_bh(); |
2862 | s_idx = 0; | ||
2863 | } | 2869 | } |
2864 | 2870 | ||
2865 | done: | 2871 | done: |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 67ccce2a96bd..f79a51607292 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3442,6 +3442,22 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3442 | } | 3442 | } |
3443 | } | 3443 | } |
3444 | 3444 | ||
3445 | static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th) | ||
3446 | { | ||
3447 | __be32 *ptr = (__be32 *)(th + 1); | ||
3448 | |||
3449 | if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | ||
3450 | | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { | ||
3451 | tp->rx_opt.saw_tstamp = 1; | ||
3452 | ++ptr; | ||
3453 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | ||
3454 | ++ptr; | ||
3455 | tp->rx_opt.rcv_tsecr = ntohl(*ptr); | ||
3456 | return 1; | ||
3457 | } | ||
3458 | return 0; | ||
3459 | } | ||
3460 | |||
3445 | /* Fast parse options. This hopes to only see timestamps. | 3461 | /* Fast parse options. This hopes to only see timestamps. |
3446 | * If it is wrong it falls back on tcp_parse_options(). | 3462 | * If it is wrong it falls back on tcp_parse_options(). |
3447 | */ | 3463 | */ |
@@ -3453,16 +3469,8 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, | |||
3453 | return 0; | 3469 | return 0; |
3454 | } else if (tp->rx_opt.tstamp_ok && | 3470 | } else if (tp->rx_opt.tstamp_ok && |
3455 | th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { | 3471 | th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { |
3456 | __be32 *ptr = (__be32 *)(th + 1); | 3472 | if (tcp_parse_aligned_timestamp(tp, th)) |
3457 | if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | ||
3458 | | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { | ||
3459 | tp->rx_opt.saw_tstamp = 1; | ||
3460 | ++ptr; | ||
3461 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | ||
3462 | ++ptr; | ||
3463 | tp->rx_opt.rcv_tsecr = ntohl(*ptr); | ||
3464 | return 1; | 3473 | return 1; |
3465 | } | ||
3466 | } | 3474 | } |
3467 | tcp_parse_options(skb, &tp->rx_opt, 1); | 3475 | tcp_parse_options(skb, &tp->rx_opt, 1); |
3468 | return 1; | 3476 | return 1; |
@@ -4161,6 +4169,18 @@ add_sack: | |||
4161 | } | 4169 | } |
4162 | } | 4170 | } |
4163 | 4171 | ||
4172 | static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, | ||
4173 | struct sk_buff_head *list) | ||
4174 | { | ||
4175 | struct sk_buff *next = skb->next; | ||
4176 | |||
4177 | __skb_unlink(skb, list); | ||
4178 | __kfree_skb(skb); | ||
4179 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); | ||
4180 | |||
4181 | return next; | ||
4182 | } | ||
4183 | |||
4164 | /* Collapse contiguous sequence of skbs head..tail with | 4184 | /* Collapse contiguous sequence of skbs head..tail with |
4165 | * sequence numbers start..end. | 4185 | * sequence numbers start..end. |
4166 | * Segments with FIN/SYN are not collapsed (only because this | 4186 | * Segments with FIN/SYN are not collapsed (only because this |
@@ -4178,11 +4198,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4178 | for (skb = head; skb != tail;) { | 4198 | for (skb = head; skb != tail;) { |
4179 | /* No new bits? It is possible on ofo queue. */ | 4199 | /* No new bits? It is possible on ofo queue. */ |
4180 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4200 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4181 | struct sk_buff *next = skb->next; | 4201 | skb = tcp_collapse_one(sk, skb, list); |
4182 | __skb_unlink(skb, list); | ||
4183 | __kfree_skb(skb); | ||
4184 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); | ||
4185 | skb = next; | ||
4186 | continue; | 4202 | continue; |
4187 | } | 4203 | } |
4188 | 4204 | ||
@@ -4246,11 +4262,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4246 | start += size; | 4262 | start += size; |
4247 | } | 4263 | } |
4248 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4264 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4249 | struct sk_buff *next = skb->next; | 4265 | skb = tcp_collapse_one(sk, skb, list); |
4250 | __skb_unlink(skb, list); | ||
4251 | __kfree_skb(skb); | ||
4252 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); | ||
4253 | skb = next; | ||
4254 | if (skb == tail || | 4266 | if (skb == tail || |
4255 | tcp_hdr(skb)->syn || | 4267 | tcp_hdr(skb)->syn || |
4256 | tcp_hdr(skb)->fin) | 4268 | tcp_hdr(skb)->fin) |
@@ -4691,6 +4703,67 @@ out: | |||
4691 | } | 4703 | } |
4692 | #endif /* CONFIG_NET_DMA */ | 4704 | #endif /* CONFIG_NET_DMA */ |
4693 | 4705 | ||
4706 | /* Does PAWS and seqno based validation of an incoming segment, flags will | ||
4707 | * play significant role here. | ||
4708 | */ | ||
4709 | static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | ||
4710 | struct tcphdr *th, int syn_inerr) | ||
4711 | { | ||
4712 | struct tcp_sock *tp = tcp_sk(sk); | ||
4713 | |||
4714 | /* RFC1323: H1. Apply PAWS check first. */ | ||
4715 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | ||
4716 | tcp_paws_discard(sk, skb)) { | ||
4717 | if (!th->rst) { | ||
4718 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); | ||
4719 | tcp_send_dupack(sk, skb); | ||
4720 | goto discard; | ||
4721 | } | ||
4722 | /* Reset is accepted even if it did not pass PAWS. */ | ||
4723 | } | ||
4724 | |||
4725 | /* Step 1: check sequence number */ | ||
4726 | if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { | ||
4727 | /* RFC793, page 37: "In all states except SYN-SENT, all reset | ||
4728 | * (RST) segments are validated by checking their SEQ-fields." | ||
4729 | * And page 69: "If an incoming segment is not acceptable, | ||
4730 | * an acknowledgment should be sent in reply (unless the RST | ||
4731 | * bit is set, if so drop the segment and return)". | ||
4732 | */ | ||
4733 | if (!th->rst) | ||
4734 | tcp_send_dupack(sk, skb); | ||
4735 | goto discard; | ||
4736 | } | ||
4737 | |||
4738 | /* Step 2: check RST bit */ | ||
4739 | if (th->rst) { | ||
4740 | tcp_reset(sk); | ||
4741 | goto discard; | ||
4742 | } | ||
4743 | |||
4744 | /* ts_recent update must be made after we are sure that the packet | ||
4745 | * is in window. | ||
4746 | */ | ||
4747 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
4748 | |||
4749 | /* step 3: check security and precedence [ignored] */ | ||
4750 | |||
4751 | /* step 4: Check for a SYN in window. */ | ||
4752 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | ||
4753 | if (syn_inerr) | ||
4754 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); | ||
4755 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); | ||
4756 | tcp_reset(sk); | ||
4757 | return -1; | ||
4758 | } | ||
4759 | |||
4760 | return 1; | ||
4761 | |||
4762 | discard: | ||
4763 | __kfree_skb(skb); | ||
4764 | return 0; | ||
4765 | } | ||
4766 | |||
4694 | /* | 4767 | /* |
4695 | * TCP receive function for the ESTABLISHED state. | 4768 | * TCP receive function for the ESTABLISHED state. |
4696 | * | 4769 | * |
@@ -4718,6 +4791,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4718 | struct tcphdr *th, unsigned len) | 4791 | struct tcphdr *th, unsigned len) |
4719 | { | 4792 | { |
4720 | struct tcp_sock *tp = tcp_sk(sk); | 4793 | struct tcp_sock *tp = tcp_sk(sk); |
4794 | int res; | ||
4721 | 4795 | ||
4722 | /* | 4796 | /* |
4723 | * Header prediction. | 4797 | * Header prediction. |
@@ -4756,19 +4830,10 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4756 | 4830 | ||
4757 | /* Check timestamp */ | 4831 | /* Check timestamp */ |
4758 | if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { | 4832 | if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { |
4759 | __be32 *ptr = (__be32 *)(th + 1); | ||
4760 | |||
4761 | /* No? Slow path! */ | 4833 | /* No? Slow path! */ |
4762 | if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 4834 | if (!tcp_parse_aligned_timestamp(tp, th)) |
4763 | | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) | ||
4764 | goto slow_path; | 4835 | goto slow_path; |
4765 | 4836 | ||
4766 | tp->rx_opt.saw_tstamp = 1; | ||
4767 | ++ptr; | ||
4768 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | ||
4769 | ++ptr; | ||
4770 | tp->rx_opt.rcv_tsecr = ntohl(*ptr); | ||
4771 | |||
4772 | /* If PAWS failed, check it more carefully in slow path */ | 4837 | /* If PAWS failed, check it more carefully in slow path */ |
4773 | if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) | 4838 | if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) |
4774 | goto slow_path; | 4839 | goto slow_path; |
@@ -4899,51 +4964,12 @@ slow_path: | |||
4899 | goto csum_error; | 4964 | goto csum_error; |
4900 | 4965 | ||
4901 | /* | 4966 | /* |
4902 | * RFC1323: H1. Apply PAWS check first. | ||
4903 | */ | ||
4904 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | ||
4905 | tcp_paws_discard(sk, skb)) { | ||
4906 | if (!th->rst) { | ||
4907 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); | ||
4908 | tcp_send_dupack(sk, skb); | ||
4909 | goto discard; | ||
4910 | } | ||
4911 | /* Resets are accepted even if PAWS failed. | ||
4912 | |||
4913 | ts_recent update must be made after we are sure | ||
4914 | that the packet is in window. | ||
4915 | */ | ||
4916 | } | ||
4917 | |||
4918 | /* | ||
4919 | * Standard slow path. | 4967 | * Standard slow path. |
4920 | */ | 4968 | */ |
4921 | 4969 | ||
4922 | if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { | 4970 | res = tcp_validate_incoming(sk, skb, th, 1); |
4923 | /* RFC793, page 37: "In all states except SYN-SENT, all reset | 4971 | if (res <= 0) |
4924 | * (RST) segments are validated by checking their SEQ-fields." | 4972 | return -res; |
4925 | * And page 69: "If an incoming segment is not acceptable, | ||
4926 | * an acknowledgment should be sent in reply (unless the RST bit | ||
4927 | * is set, if so drop the segment and return)". | ||
4928 | */ | ||
4929 | if (!th->rst) | ||
4930 | tcp_send_dupack(sk, skb); | ||
4931 | goto discard; | ||
4932 | } | ||
4933 | |||
4934 | if (th->rst) { | ||
4935 | tcp_reset(sk); | ||
4936 | goto discard; | ||
4937 | } | ||
4938 | |||
4939 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
4940 | |||
4941 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | ||
4942 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); | ||
4943 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); | ||
4944 | tcp_reset(sk); | ||
4945 | return 1; | ||
4946 | } | ||
4947 | 4973 | ||
4948 | step5: | 4974 | step5: |
4949 | if (th->ack) | 4975 | if (th->ack) |
@@ -5225,6 +5251,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5225 | struct tcp_sock *tp = tcp_sk(sk); | 5251 | struct tcp_sock *tp = tcp_sk(sk); |
5226 | struct inet_connection_sock *icsk = inet_csk(sk); | 5252 | struct inet_connection_sock *icsk = inet_csk(sk); |
5227 | int queued = 0; | 5253 | int queued = 0; |
5254 | int res; | ||
5228 | 5255 | ||
5229 | tp->rx_opt.saw_tstamp = 0; | 5256 | tp->rx_opt.saw_tstamp = 0; |
5230 | 5257 | ||
@@ -5277,42 +5304,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5277 | return 0; | 5304 | return 0; |
5278 | } | 5305 | } |
5279 | 5306 | ||
5280 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | 5307 | res = tcp_validate_incoming(sk, skb, th, 0); |
5281 | tcp_paws_discard(sk, skb)) { | 5308 | if (res <= 0) |
5282 | if (!th->rst) { | 5309 | return -res; |
5283 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); | ||
5284 | tcp_send_dupack(sk, skb); | ||
5285 | goto discard; | ||
5286 | } | ||
5287 | /* Reset is accepted even if it did not pass PAWS. */ | ||
5288 | } | ||
5289 | |||
5290 | /* step 1: check sequence number */ | ||
5291 | if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { | ||
5292 | if (!th->rst) | ||
5293 | tcp_send_dupack(sk, skb); | ||
5294 | goto discard; | ||
5295 | } | ||
5296 | |||
5297 | /* step 2: check RST bit */ | ||
5298 | if (th->rst) { | ||
5299 | tcp_reset(sk); | ||
5300 | goto discard; | ||
5301 | } | ||
5302 | |||
5303 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
5304 | |||
5305 | /* step 3: check security and precedence [ignored] */ | ||
5306 | |||
5307 | /* step 4: | ||
5308 | * | ||
5309 | * Check for a SYN in window. | ||
5310 | */ | ||
5311 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | ||
5312 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); | ||
5313 | tcp_reset(sk); | ||
5314 | return 1; | ||
5315 | } | ||
5316 | 5310 | ||
5317 | /* step 5: check the ACK field */ | 5311 | /* step 5: check the ACK field */ |
5318 | if (th->ack) { | 5312 | if (th->ack) { |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1b4fee20fc93..3dfbc21e555a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1946,6 +1946,12 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos) | |||
1946 | return rc; | 1946 | return rc; |
1947 | } | 1947 | } |
1948 | 1948 | ||
1949 | static inline int empty_bucket(struct tcp_iter_state *st) | ||
1950 | { | ||
1951 | return hlist_empty(&tcp_hashinfo.ehash[st->bucket].chain) && | ||
1952 | hlist_empty(&tcp_hashinfo.ehash[st->bucket].twchain); | ||
1953 | } | ||
1954 | |||
1949 | static void *established_get_first(struct seq_file *seq) | 1955 | static void *established_get_first(struct seq_file *seq) |
1950 | { | 1956 | { |
1951 | struct tcp_iter_state* st = seq->private; | 1957 | struct tcp_iter_state* st = seq->private; |
@@ -1958,6 +1964,10 @@ static void *established_get_first(struct seq_file *seq) | |||
1958 | struct inet_timewait_sock *tw; | 1964 | struct inet_timewait_sock *tw; |
1959 | rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); | 1965 | rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); |
1960 | 1966 | ||
1967 | /* Lockless fast path for the common case of empty buckets */ | ||
1968 | if (empty_bucket(st)) | ||
1969 | continue; | ||
1970 | |||
1961 | read_lock_bh(lock); | 1971 | read_lock_bh(lock); |
1962 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { | 1972 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { |
1963 | if (sk->sk_family != st->family || | 1973 | if (sk->sk_family != st->family || |
@@ -2008,13 +2018,15 @@ get_tw: | |||
2008 | read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); | 2018 | read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); |
2009 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2019 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
2010 | 2020 | ||
2011 | if (++st->bucket < tcp_hashinfo.ehash_size) { | 2021 | /* Look for next non empty bucket */ |
2012 | read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); | 2022 | while (++st->bucket < tcp_hashinfo.ehash_size && |
2013 | sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); | 2023 | empty_bucket(st)) |
2014 | } else { | 2024 | ; |
2015 | cur = NULL; | 2025 | if (st->bucket >= tcp_hashinfo.ehash_size) |
2016 | goto out; | 2026 | return NULL; |
2017 | } | 2027 | |
2028 | read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); | ||
2029 | sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); | ||
2018 | } else | 2030 | } else |
2019 | sk = sk_next(sk); | 2031 | sk = sk_next(sk); |
2020 | 2032 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9af6115f0f50..776871ee2288 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1003,6 +1003,25 @@ int icmp6_dst_gc(void) | |||
1003 | return more; | 1003 | return more; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg), | ||
1007 | void *arg) | ||
1008 | { | ||
1009 | struct dst_entry *dst, **pprev; | ||
1010 | |||
1011 | spin_lock_bh(&icmp6_dst_lock); | ||
1012 | pprev = &icmp6_dst_gc_list; | ||
1013 | while ((dst = *pprev) != NULL) { | ||
1014 | struct rt6_info *rt = (struct rt6_info *) dst; | ||
1015 | if (func(rt, arg)) { | ||
1016 | *pprev = dst->next; | ||
1017 | dst_free(dst); | ||
1018 | } else { | ||
1019 | pprev = &dst->next; | ||
1020 | } | ||
1021 | } | ||
1022 | spin_unlock_bh(&icmp6_dst_lock); | ||
1023 | } | ||
1024 | |||
1006 | static int ip6_dst_gc(struct dst_ops *ops) | 1025 | static int ip6_dst_gc(struct dst_ops *ops) |
1007 | { | 1026 | { |
1008 | unsigned long now = jiffies; | 1027 | unsigned long now = jiffies; |
@@ -1930,6 +1949,7 @@ void rt6_ifdown(struct net *net, struct net_device *dev) | |||
1930 | }; | 1949 | }; |
1931 | 1950 | ||
1932 | fib6_clean_all(net, fib6_ifdown, 0, &adn); | 1951 | fib6_clean_all(net, fib6_ifdown, 0, &adn); |
1952 | icmp6_clean_all(fib6_ifdown, &adn); | ||
1933 | } | 1953 | } |
1934 | 1954 | ||
1935 | struct rt6_mtu_change_arg | 1955 | struct rt6_mtu_change_arg |
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index a169b0201d61..2dc8f2bff27b 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile | |||
@@ -7,6 +7,8 @@ mac80211-y := \ | |||
7 | sta_info.o \ | 7 | sta_info.o \ |
8 | wep.o \ | 8 | wep.o \ |
9 | wpa.o \ | 9 | wpa.o \ |
10 | scan.o \ | ||
11 | ht.o \ | ||
10 | mlme.o \ | 12 | mlme.o \ |
11 | iface.o \ | 13 | iface.o \ |
12 | rate.o \ | 14 | rate.o \ |
@@ -15,6 +17,7 @@ mac80211-y := \ | |||
15 | aes_ccm.o \ | 17 | aes_ccm.o \ |
16 | cfg.o \ | 18 | cfg.o \ |
17 | rx.o \ | 19 | rx.o \ |
20 | spectmgmt.o \ | ||
18 | tx.o \ | 21 | tx.o \ |
19 | key.o \ | 22 | key.o \ |
20 | util.o \ | 23 | util.o \ |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 297c257864c7..928813ce08e2 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -66,13 +66,16 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name, | |||
66 | static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) | 66 | static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) |
67 | { | 67 | { |
68 | struct net_device *dev; | 68 | struct net_device *dev; |
69 | struct ieee80211_sub_if_data *sdata; | ||
69 | 70 | ||
70 | /* we're under RTNL */ | 71 | /* we're under RTNL */ |
71 | dev = __dev_get_by_index(&init_net, ifindex); | 72 | dev = __dev_get_by_index(&init_net, ifindex); |
72 | if (!dev) | 73 | if (!dev) |
73 | return -ENODEV; | 74 | return -ENODEV; |
74 | 75 | ||
75 | ieee80211_if_remove(dev); | 76 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
77 | |||
78 | ieee80211_if_remove(sdata); | ||
76 | 79 | ||
77 | return 0; | 80 | return 0; |
78 | } | 81 | } |
@@ -671,6 +674,11 @@ static void sta_apply_parameters(struct ieee80211_local *local, | |||
671 | sta->supp_rates[local->oper_channel->band] = rates; | 674 | sta->supp_rates[local->oper_channel->band] = rates; |
672 | } | 675 | } |
673 | 676 | ||
677 | if (params->ht_capa) { | ||
678 | ieee80211_ht_cap_ie_to_ht_info(params->ht_capa, | ||
679 | &sta->ht_info); | ||
680 | } | ||
681 | |||
674 | if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { | 682 | if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { |
675 | switch (params->plink_action) { | 683 | switch (params->plink_action) { |
676 | case PLINK_ACTION_OPEN: | 684 | case PLINK_ACTION_OPEN: |
@@ -842,13 +850,13 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
842 | return -ENOENT; | 850 | return -ENOENT; |
843 | } | 851 | } |
844 | 852 | ||
845 | err = mesh_path_add(dst, dev); | 853 | err = mesh_path_add(dst, sdata); |
846 | if (err) { | 854 | if (err) { |
847 | rcu_read_unlock(); | 855 | rcu_read_unlock(); |
848 | return err; | 856 | return err; |
849 | } | 857 | } |
850 | 858 | ||
851 | mpath = mesh_path_lookup(dst, dev); | 859 | mpath = mesh_path_lookup(dst, sdata); |
852 | if (!mpath) { | 860 | if (!mpath) { |
853 | rcu_read_unlock(); | 861 | rcu_read_unlock(); |
854 | return -ENXIO; | 862 | return -ENXIO; |
@@ -862,10 +870,12 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
862 | static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, | 870 | static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, |
863 | u8 *dst) | 871 | u8 *dst) |
864 | { | 872 | { |
873 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
874 | |||
865 | if (dst) | 875 | if (dst) |
866 | return mesh_path_del(dst, dev); | 876 | return mesh_path_del(dst, sdata); |
867 | 877 | ||
868 | mesh_path_flush(dev); | 878 | mesh_path_flush(sdata); |
869 | return 0; | 879 | return 0; |
870 | } | 880 | } |
871 | 881 | ||
@@ -897,7 +907,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy, | |||
897 | return -ENOENT; | 907 | return -ENOENT; |
898 | } | 908 | } |
899 | 909 | ||
900 | mpath = mesh_path_lookup(dst, dev); | 910 | mpath = mesh_path_lookup(dst, sdata); |
901 | if (!mpath) { | 911 | if (!mpath) { |
902 | rcu_read_unlock(); | 912 | rcu_read_unlock(); |
903 | return -ENOENT; | 913 | return -ENOENT; |
@@ -965,7 +975,7 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
965 | return -ENOTSUPP; | 975 | return -ENOTSUPP; |
966 | 976 | ||
967 | rcu_read_lock(); | 977 | rcu_read_lock(); |
968 | mpath = mesh_path_lookup(dst, dev); | 978 | mpath = mesh_path_lookup(dst, sdata); |
969 | if (!mpath) { | 979 | if (!mpath) { |
970 | rcu_read_unlock(); | 980 | rcu_read_unlock(); |
971 | return -ENOENT; | 981 | return -ENOENT; |
@@ -993,7 +1003,7 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
993 | return -ENOTSUPP; | 1003 | return -ENOTSUPP; |
994 | 1004 | ||
995 | rcu_read_lock(); | 1005 | rcu_read_lock(); |
996 | mpath = mesh_path_lookup_by_idx(idx, dev); | 1006 | mpath = mesh_path_lookup_by_idx(idx, sdata); |
997 | if (!mpath) { | 1007 | if (!mpath) { |
998 | rcu_read_unlock(); | 1008 | rcu_read_unlock(); |
999 | return -ENOENT; | 1009 | return -ENOENT; |
@@ -1005,6 +1015,42 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
1005 | } | 1015 | } |
1006 | #endif | 1016 | #endif |
1007 | 1017 | ||
1018 | static int ieee80211_change_bss(struct wiphy *wiphy, | ||
1019 | struct net_device *dev, | ||
1020 | struct bss_parameters *params) | ||
1021 | { | ||
1022 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
1023 | struct ieee80211_sub_if_data *sdata; | ||
1024 | u32 changed = 0; | ||
1025 | |||
1026 | if (dev == local->mdev) | ||
1027 | return -EOPNOTSUPP; | ||
1028 | |||
1029 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1030 | |||
1031 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP) | ||
1032 | return -EINVAL; | ||
1033 | |||
1034 | if (params->use_cts_prot >= 0) { | ||
1035 | sdata->bss_conf.use_cts_prot = params->use_cts_prot; | ||
1036 | changed |= BSS_CHANGED_ERP_CTS_PROT; | ||
1037 | } | ||
1038 | if (params->use_short_preamble >= 0) { | ||
1039 | sdata->bss_conf.use_short_preamble = | ||
1040 | params->use_short_preamble; | ||
1041 | changed |= BSS_CHANGED_ERP_PREAMBLE; | ||
1042 | } | ||
1043 | if (params->use_short_slot_time >= 0) { | ||
1044 | sdata->bss_conf.use_short_slot = | ||
1045 | params->use_short_slot_time; | ||
1046 | changed |= BSS_CHANGED_ERP_SLOT; | ||
1047 | } | ||
1048 | |||
1049 | ieee80211_bss_info_change_notify(sdata, changed); | ||
1050 | |||
1051 | return 0; | ||
1052 | } | ||
1053 | |||
1008 | struct cfg80211_ops mac80211_config_ops = { | 1054 | struct cfg80211_ops mac80211_config_ops = { |
1009 | .add_virtual_intf = ieee80211_add_iface, | 1055 | .add_virtual_intf = ieee80211_add_iface, |
1010 | .del_virtual_intf = ieee80211_del_iface, | 1056 | .del_virtual_intf = ieee80211_del_iface, |
@@ -1028,4 +1074,5 @@ struct cfg80211_ops mac80211_config_ops = { | |||
1028 | .get_mpath = ieee80211_get_mpath, | 1074 | .get_mpath = ieee80211_get_mpath, |
1029 | .dump_mpath = ieee80211_dump_mpath, | 1075 | .dump_mpath = ieee80211_dump_mpath, |
1030 | #endif | 1076 | #endif |
1077 | .change_bss = ieee80211_change_bss, | ||
1031 | }; | 1078 | }; |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 79a062782d52..6abe5427752b 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -201,7 +201,7 @@ static ssize_t sta_agg_status_write(struct file *file, | |||
201 | tid_num = tid_num - 100; | 201 | tid_num = tid_num - 100; |
202 | if (tid_static_rx[tid_num] == 1) { | 202 | if (tid_static_rx[tid_num] == 1) { |
203 | strcpy(state, "off "); | 203 | strcpy(state, "off "); |
204 | ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0, | 204 | ieee80211_sta_stop_rx_ba_session(sta->sdata, da, tid_num, 0, |
205 | WLAN_REASON_QSTA_REQUIRE_SETUP); | 205 | WLAN_REASON_QSTA_REQUIRE_SETUP); |
206 | sta->ampdu_mlme.tid_state_rx[tid_num] |= | 206 | sta->ampdu_mlme.tid_state_rx[tid_num] |= |
207 | HT_AGG_STATE_DEBUGFS_CTL; | 207 | HT_AGG_STATE_DEBUGFS_CTL; |
diff --git a/net/mac80211/event.c b/net/mac80211/event.c index 2280f40b4560..8de60de70bc9 100644 --- a/net/mac80211/event.c +++ b/net/mac80211/event.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * mac80211 - events | 8 | * mac80211 - events |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/netdevice.h> | ||
12 | #include <net/iw_handler.h> | 11 | #include <net/iw_handler.h> |
13 | #include "ieee80211_i.h" | 12 | #include "ieee80211_i.h" |
14 | 13 | ||
@@ -17,7 +16,7 @@ | |||
17 | * (in the variable hdr) must be long enough to extract the TKIP | 16 | * (in the variable hdr) must be long enough to extract the TKIP |
18 | * fields like TSC | 17 | * fields like TSC |
19 | */ | 18 | */ |
20 | void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, | 19 | void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, |
21 | struct ieee80211_hdr *hdr) | 20 | struct ieee80211_hdr *hdr) |
22 | { | 21 | { |
23 | union iwreq_data wrqu; | 22 | union iwreq_data wrqu; |
@@ -32,7 +31,7 @@ void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, | |||
32 | print_mac(mac, hdr->addr2)); | 31 | print_mac(mac, hdr->addr2)); |
33 | memset(&wrqu, 0, sizeof(wrqu)); | 32 | memset(&wrqu, 0, sizeof(wrqu)); |
34 | wrqu.data.length = strlen(buf); | 33 | wrqu.data.length = strlen(buf); |
35 | wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); | 34 | wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf); |
36 | kfree(buf); | 35 | kfree(buf); |
37 | } | 36 | } |
38 | 37 | ||
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c new file mode 100644 index 000000000000..4dc35c9dabc7 --- /dev/null +++ b/net/mac80211/ht.c | |||
@@ -0,0 +1,992 @@ | |||
1 | /* | ||
2 | * HT handling | ||
3 | * | ||
4 | * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> | ||
5 | * Copyright 2002-2005, Instant802 Networks, Inc. | ||
6 | * Copyright 2005-2006, Devicescape Software, Inc. | ||
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | ||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | ||
9 | * Copyright 2007-2008, Intel Corporation | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | */ | ||
15 | |||
16 | #include <linux/ieee80211.h> | ||
17 | #include <net/wireless.h> | ||
18 | #include <net/mac80211.h> | ||
19 | #include "ieee80211_i.h" | ||
20 | #include "sta_info.h" | ||
21 | #include "wme.h" | ||
22 | |||
23 | int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, | ||
24 | struct ieee80211_ht_info *ht_info) | ||
25 | { | ||
26 | |||
27 | if (ht_info == NULL) | ||
28 | return -EINVAL; | ||
29 | |||
30 | memset(ht_info, 0, sizeof(*ht_info)); | ||
31 | |||
32 | if (ht_cap_ie) { | ||
33 | u8 ampdu_info = ht_cap_ie->ampdu_params_info; | ||
34 | |||
35 | ht_info->ht_supported = 1; | ||
36 | ht_info->cap = le16_to_cpu(ht_cap_ie->cap_info); | ||
37 | ht_info->ampdu_factor = | ||
38 | ampdu_info & IEEE80211_HT_CAP_AMPDU_FACTOR; | ||
39 | ht_info->ampdu_density = | ||
40 | (ampdu_info & IEEE80211_HT_CAP_AMPDU_DENSITY) >> 2; | ||
41 | memcpy(ht_info->supp_mcs_set, ht_cap_ie->supp_mcs_set, 16); | ||
42 | } else | ||
43 | ht_info->ht_supported = 0; | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( | ||
49 | struct ieee80211_ht_addt_info *ht_add_info_ie, | ||
50 | struct ieee80211_ht_bss_info *bss_info) | ||
51 | { | ||
52 | if (bss_info == NULL) | ||
53 | return -EINVAL; | ||
54 | |||
55 | memset(bss_info, 0, sizeof(*bss_info)); | ||
56 | |||
57 | if (ht_add_info_ie) { | ||
58 | u16 op_mode; | ||
59 | op_mode = le16_to_cpu(ht_add_info_ie->operation_mode); | ||
60 | |||
61 | bss_info->primary_channel = ht_add_info_ie->control_chan; | ||
62 | bss_info->bss_cap = ht_add_info_ie->ht_param; | ||
63 | bss_info->bss_op_mode = (u8)(op_mode & 0xff); | ||
64 | } | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, | ||
70 | const u8 *da, u16 tid, | ||
71 | u8 dialog_token, u16 start_seq_num, | ||
72 | u16 agg_size, u16 timeout) | ||
73 | { | ||
74 | struct ieee80211_local *local = sdata->local; | ||
75 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
76 | struct sk_buff *skb; | ||
77 | struct ieee80211_mgmt *mgmt; | ||
78 | u16 capab; | ||
79 | |||
80 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | ||
81 | |||
82 | if (!skb) { | ||
83 | printk(KERN_ERR "%s: failed to allocate buffer " | ||
84 | "for addba request frame\n", sdata->dev->name); | ||
85 | return; | ||
86 | } | ||
87 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
88 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | ||
89 | memset(mgmt, 0, 24); | ||
90 | memcpy(mgmt->da, da, ETH_ALEN); | ||
91 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | ||
92 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) | ||
93 | memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); | ||
94 | else | ||
95 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
96 | |||
97 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | ||
98 | IEEE80211_STYPE_ACTION); | ||
99 | |||
100 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); | ||
101 | |||
102 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | ||
103 | mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; | ||
104 | |||
105 | mgmt->u.action.u.addba_req.dialog_token = dialog_token; | ||
106 | capab = (u16)(1 << 1); /* bit 1 aggregation policy */ | ||
107 | capab |= (u16)(tid << 2); /* bit 5:2 TID number */ | ||
108 | capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ | ||
109 | |||
110 | mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); | ||
111 | |||
112 | mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); | ||
113 | mgmt->u.action.u.addba_req.start_seq_num = | ||
114 | cpu_to_le16(start_seq_num << 4); | ||
115 | |||
116 | ieee80211_tx_skb(sdata, skb, 0); | ||
117 | } | ||
118 | |||
119 | static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, | ||
120 | u8 dialog_token, u16 status, u16 policy, | ||
121 | u16 buf_size, u16 timeout) | ||
122 | { | ||
123 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
124 | struct ieee80211_local *local = sdata->local; | ||
125 | struct sk_buff *skb; | ||
126 | struct ieee80211_mgmt *mgmt; | ||
127 | u16 capab; | ||
128 | |||
129 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | ||
130 | |||
131 | if (!skb) { | ||
132 | printk(KERN_DEBUG "%s: failed to allocate buffer " | ||
133 | "for addba resp frame\n", sdata->dev->name); | ||
134 | return; | ||
135 | } | ||
136 | |||
137 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
138 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | ||
139 | memset(mgmt, 0, 24); | ||
140 | memcpy(mgmt->da, da, ETH_ALEN); | ||
141 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | ||
142 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) | ||
143 | memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); | ||
144 | else | ||
145 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
146 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | ||
147 | IEEE80211_STYPE_ACTION); | ||
148 | |||
149 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); | ||
150 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | ||
151 | mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; | ||
152 | mgmt->u.action.u.addba_resp.dialog_token = dialog_token; | ||
153 | |||
154 | capab = (u16)(policy << 1); /* bit 1 aggregation policy */ | ||
155 | capab |= (u16)(tid << 2); /* bit 5:2 TID number */ | ||
156 | capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ | ||
157 | |||
158 | mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); | ||
159 | mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); | ||
160 | mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); | ||
161 | |||
162 | ieee80211_tx_skb(sdata, skb, 0); | ||
163 | } | ||
164 | |||
165 | static void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, | ||
166 | const u8 *da, u16 tid, | ||
167 | u16 initiator, u16 reason_code) | ||
168 | { | ||
169 | struct ieee80211_local *local = sdata->local; | ||
170 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
171 | struct sk_buff *skb; | ||
172 | struct ieee80211_mgmt *mgmt; | ||
173 | u16 params; | ||
174 | |||
175 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | ||
176 | |||
177 | if (!skb) { | ||
178 | printk(KERN_ERR "%s: failed to allocate buffer " | ||
179 | "for delba frame\n", sdata->dev->name); | ||
180 | return; | ||
181 | } | ||
182 | |||
183 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
184 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | ||
185 | memset(mgmt, 0, 24); | ||
186 | memcpy(mgmt->da, da, ETH_ALEN); | ||
187 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | ||
188 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) | ||
189 | memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); | ||
190 | else | ||
191 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
192 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | ||
193 | IEEE80211_STYPE_ACTION); | ||
194 | |||
195 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); | ||
196 | |||
197 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | ||
198 | mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; | ||
199 | params = (u16)(initiator << 11); /* bit 11 initiator */ | ||
200 | params |= (u16)(tid << 12); /* bit 15:12 TID number */ | ||
201 | |||
202 | mgmt->u.action.u.delba.params = cpu_to_le16(params); | ||
203 | mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); | ||
204 | |||
205 | ieee80211_tx_skb(sdata, skb, 0); | ||
206 | } | ||
207 | |||
208 | void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn) | ||
209 | { | ||
210 | struct ieee80211_local *local = sdata->local; | ||
211 | struct sk_buff *skb; | ||
212 | struct ieee80211_bar *bar; | ||
213 | u16 bar_control = 0; | ||
214 | |||
215 | skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); | ||
216 | if (!skb) { | ||
217 | printk(KERN_ERR "%s: failed to allocate buffer for " | ||
218 | "bar frame\n", sdata->dev->name); | ||
219 | return; | ||
220 | } | ||
221 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
222 | bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); | ||
223 | memset(bar, 0, sizeof(*bar)); | ||
224 | bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | | ||
225 | IEEE80211_STYPE_BACK_REQ); | ||
226 | memcpy(bar->ra, ra, ETH_ALEN); | ||
227 | memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN); | ||
228 | bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; | ||
229 | bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; | ||
230 | bar_control |= (u16)(tid << 12); | ||
231 | bar->control = cpu_to_le16(bar_control); | ||
232 | bar->start_seq_num = cpu_to_le16(ssn); | ||
233 | |||
234 | ieee80211_tx_skb(sdata, skb, 0); | ||
235 | } | ||
236 | |||
237 | void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, | ||
238 | u16 initiator, u16 reason) | ||
239 | { | ||
240 | struct ieee80211_local *local = sdata->local; | ||
241 | struct ieee80211_hw *hw = &local->hw; | ||
242 | struct sta_info *sta; | ||
243 | int ret, i; | ||
244 | DECLARE_MAC_BUF(mac); | ||
245 | |||
246 | rcu_read_lock(); | ||
247 | |||
248 | sta = sta_info_get(local, ra); | ||
249 | if (!sta) { | ||
250 | rcu_read_unlock(); | ||
251 | return; | ||
252 | } | ||
253 | |||
254 | /* check if TID is in operational state */ | ||
255 | spin_lock_bh(&sta->lock); | ||
256 | if (sta->ampdu_mlme.tid_state_rx[tid] | ||
257 | != HT_AGG_STATE_OPERATIONAL) { | ||
258 | spin_unlock_bh(&sta->lock); | ||
259 | rcu_read_unlock(); | ||
260 | return; | ||
261 | } | ||
262 | sta->ampdu_mlme.tid_state_rx[tid] = | ||
263 | HT_AGG_STATE_REQ_STOP_BA_MSK | | ||
264 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
265 | spin_unlock_bh(&sta->lock); | ||
266 | |||
267 | /* stop HW Rx aggregation. ampdu_action existence | ||
268 | * already verified in session init so we add the BUG_ON */ | ||
269 | BUG_ON(!local->ops->ampdu_action); | ||
270 | |||
271 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
272 | printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n", | ||
273 | print_mac(mac, ra), tid); | ||
274 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
275 | |||
276 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, | ||
277 | ra, tid, NULL); | ||
278 | if (ret) | ||
279 | printk(KERN_DEBUG "HW problem - can not stop rx " | ||
280 | "aggregation for tid %d\n", tid); | ||
281 | |||
282 | /* shutdown timer has not expired */ | ||
283 | if (initiator != WLAN_BACK_TIMER) | ||
284 | del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
285 | |||
286 | /* check if this is a self generated aggregation halt */ | ||
287 | if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) | ||
288 | ieee80211_send_delba(sdata, ra, tid, 0, reason); | ||
289 | |||
290 | /* free the reordering buffer */ | ||
291 | for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { | ||
292 | if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) { | ||
293 | /* release the reordered frames */ | ||
294 | dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]); | ||
295 | sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--; | ||
296 | sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL; | ||
297 | } | ||
298 | } | ||
299 | /* free resources */ | ||
300 | kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); | ||
301 | kfree(sta->ampdu_mlme.tid_rx[tid]); | ||
302 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
303 | sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE; | ||
304 | |||
305 | rcu_read_unlock(); | ||
306 | } | ||
307 | |||
308 | |||
309 | /* | ||
310 | * After sending add Block Ack request we activated a timer until | ||
311 | * add Block Ack response will arrive from the recipient. | ||
312 | * If this timer expires sta_addba_resp_timer_expired will be executed. | ||
313 | */ | ||
314 | static void sta_addba_resp_timer_expired(unsigned long data) | ||
315 | { | ||
316 | /* not an elegant detour, but there is no choice as the timer passes | ||
317 | * only one argument, and both sta_info and TID are needed, so init | ||
318 | * flow in sta_info_create gives the TID as data, while the timer_to_id | ||
319 | * array gives the sta through container_of */ | ||
320 | u16 tid = *(u8 *)data; | ||
321 | struct sta_info *temp_sta = container_of((void *)data, | ||
322 | struct sta_info, timer_to_tid[tid]); | ||
323 | |||
324 | struct ieee80211_local *local = temp_sta->local; | ||
325 | struct ieee80211_hw *hw = &local->hw; | ||
326 | struct sta_info *sta; | ||
327 | u8 *state; | ||
328 | |||
329 | rcu_read_lock(); | ||
330 | |||
331 | sta = sta_info_get(local, temp_sta->addr); | ||
332 | if (!sta) { | ||
333 | rcu_read_unlock(); | ||
334 | return; | ||
335 | } | ||
336 | |||
337 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
338 | /* check if the TID waits for addBA response */ | ||
339 | spin_lock_bh(&sta->lock); | ||
340 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
341 | spin_unlock_bh(&sta->lock); | ||
342 | *state = HT_AGG_STATE_IDLE; | ||
343 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
344 | printk(KERN_DEBUG "timer expired on tid %d but we are not " | ||
345 | "expecting addBA response there", tid); | ||
346 | #endif | ||
347 | goto timer_expired_exit; | ||
348 | } | ||
349 | |||
350 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
351 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); | ||
352 | #endif | ||
353 | |||
354 | /* go through the state check in stop_BA_session */ | ||
355 | *state = HT_AGG_STATE_OPERATIONAL; | ||
356 | spin_unlock_bh(&sta->lock); | ||
357 | ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid, | ||
358 | WLAN_BACK_INITIATOR); | ||
359 | |||
360 | timer_expired_exit: | ||
361 | rcu_read_unlock(); | ||
362 | } | ||
363 | |||
364 | void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr) | ||
365 | { | ||
366 | struct ieee80211_local *local = sdata->local; | ||
367 | int i; | ||
368 | |||
369 | for (i = 0; i < STA_TID_NUM; i++) { | ||
370 | ieee80211_stop_tx_ba_session(&local->hw, addr, i, | ||
371 | WLAN_BACK_INITIATOR); | ||
372 | ieee80211_sta_stop_rx_ba_session(sdata, addr, i, | ||
373 | WLAN_BACK_RECIPIENT, | ||
374 | WLAN_REASON_QSTA_LEAVE_QBSS); | ||
375 | } | ||
376 | } | ||
377 | |||
378 | int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | ||
379 | { | ||
380 | struct ieee80211_local *local = hw_to_local(hw); | ||
381 | struct sta_info *sta; | ||
382 | struct ieee80211_sub_if_data *sdata; | ||
383 | u16 start_seq_num; | ||
384 | u8 *state; | ||
385 | int ret; | ||
386 | DECLARE_MAC_BUF(mac); | ||
387 | |||
388 | if (tid >= STA_TID_NUM) | ||
389 | return -EINVAL; | ||
390 | |||
391 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
392 | printk(KERN_DEBUG "Open BA session requested for %s tid %u\n", | ||
393 | print_mac(mac, ra), tid); | ||
394 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
395 | |||
396 | rcu_read_lock(); | ||
397 | |||
398 | sta = sta_info_get(local, ra); | ||
399 | if (!sta) { | ||
400 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
401 | printk(KERN_DEBUG "Could not find the station\n"); | ||
402 | #endif | ||
403 | ret = -ENOENT; | ||
404 | goto exit; | ||
405 | } | ||
406 | |||
407 | spin_lock_bh(&sta->lock); | ||
408 | |||
409 | /* we have tried too many times, receiver does not want A-MPDU */ | ||
410 | if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { | ||
411 | ret = -EBUSY; | ||
412 | goto err_unlock_sta; | ||
413 | } | ||
414 | |||
415 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
416 | /* check if the TID is not in aggregation flow already */ | ||
417 | if (*state != HT_AGG_STATE_IDLE) { | ||
418 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
419 | printk(KERN_DEBUG "BA request denied - session is not " | ||
420 | "idle on tid %u\n", tid); | ||
421 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
422 | ret = -EAGAIN; | ||
423 | goto err_unlock_sta; | ||
424 | } | ||
425 | |||
426 | /* prepare A-MPDU MLME for Tx aggregation */ | ||
427 | sta->ampdu_mlme.tid_tx[tid] = | ||
428 | kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); | ||
429 | if (!sta->ampdu_mlme.tid_tx[tid]) { | ||
430 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
431 | if (net_ratelimit()) | ||
432 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", | ||
433 | tid); | ||
434 | #endif | ||
435 | ret = -ENOMEM; | ||
436 | goto err_unlock_sta; | ||
437 | } | ||
438 | /* Tx timer */ | ||
439 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = | ||
440 | sta_addba_resp_timer_expired; | ||
441 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = | ||
442 | (unsigned long)&sta->timer_to_tid[tid]; | ||
443 | init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
444 | |||
445 | /* create a new queue for this aggregation */ | ||
446 | ret = ieee80211_ht_agg_queue_add(local, sta, tid); | ||
447 | |||
448 | /* case no queue is available to aggregation | ||
449 | * don't switch to aggregation */ | ||
450 | if (ret) { | ||
451 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
452 | printk(KERN_DEBUG "BA request denied - queue unavailable for" | ||
453 | " tid %d\n", tid); | ||
454 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
455 | goto err_unlock_queue; | ||
456 | } | ||
457 | sdata = sta->sdata; | ||
458 | |||
459 | /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the | ||
460 | * call back right away, it must see that the flow has begun */ | ||
461 | *state |= HT_ADDBA_REQUESTED_MSK; | ||
462 | |||
463 | /* This is slightly racy because the queue isn't stopped */ | ||
464 | start_seq_num = sta->tid_seq[tid]; | ||
465 | |||
466 | if (local->ops->ampdu_action) | ||
467 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, | ||
468 | ra, tid, &start_seq_num); | ||
469 | |||
470 | if (ret) { | ||
471 | /* No need to requeue the packets in the agg queue, since we | ||
472 | * held the tx lock: no packet could be enqueued to the newly | ||
473 | * allocated queue */ | ||
474 | ieee80211_ht_agg_queue_remove(local, sta, tid, 0); | ||
475 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
476 | printk(KERN_DEBUG "BA request denied - HW unavailable for" | ||
477 | " tid %d\n", tid); | ||
478 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
479 | *state = HT_AGG_STATE_IDLE; | ||
480 | goto err_unlock_queue; | ||
481 | } | ||
482 | |||
483 | /* Will put all the packets in the new SW queue */ | ||
484 | ieee80211_requeue(local, ieee802_1d_to_ac[tid]); | ||
485 | spin_unlock_bh(&sta->lock); | ||
486 | |||
487 | /* send an addBA request */ | ||
488 | sta->ampdu_mlme.dialog_token_allocator++; | ||
489 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = | ||
490 | sta->ampdu_mlme.dialog_token_allocator; | ||
491 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | ||
492 | |||
493 | |||
494 | ieee80211_send_addba_request(sta->sdata, ra, tid, | ||
495 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | ||
496 | sta->ampdu_mlme.tid_tx[tid]->ssn, | ||
497 | 0x40, 5000); | ||
498 | /* activate the timer for the recipient's addBA response */ | ||
499 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = | ||
500 | jiffies + ADDBA_RESP_INTERVAL; | ||
501 | add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
502 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
503 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); | ||
504 | #endif | ||
505 | goto exit; | ||
506 | |||
507 | err_unlock_queue: | ||
508 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
509 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
510 | ret = -EBUSY; | ||
511 | err_unlock_sta: | ||
512 | spin_unlock_bh(&sta->lock); | ||
513 | exit: | ||
514 | rcu_read_unlock(); | ||
515 | return ret; | ||
516 | } | ||
517 | EXPORT_SYMBOL(ieee80211_start_tx_ba_session); | ||
518 | |||
519 | int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, | ||
520 | u8 *ra, u16 tid, | ||
521 | enum ieee80211_back_parties initiator) | ||
522 | { | ||
523 | struct ieee80211_local *local = hw_to_local(hw); | ||
524 | struct sta_info *sta; | ||
525 | u8 *state; | ||
526 | int ret = 0; | ||
527 | DECLARE_MAC_BUF(mac); | ||
528 | |||
529 | if (tid >= STA_TID_NUM) | ||
530 | return -EINVAL; | ||
531 | |||
532 | rcu_read_lock(); | ||
533 | sta = sta_info_get(local, ra); | ||
534 | if (!sta) { | ||
535 | rcu_read_unlock(); | ||
536 | return -ENOENT; | ||
537 | } | ||
538 | |||
539 | /* check if the TID is in aggregation */ | ||
540 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
541 | spin_lock_bh(&sta->lock); | ||
542 | |||
543 | if (*state != HT_AGG_STATE_OPERATIONAL) { | ||
544 | ret = -ENOENT; | ||
545 | goto stop_BA_exit; | ||
546 | } | ||
547 | |||
548 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
549 | printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n", | ||
550 | print_mac(mac, ra), tid); | ||
551 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
552 | |||
553 | ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]); | ||
554 | |||
555 | *state = HT_AGG_STATE_REQ_STOP_BA_MSK | | ||
556 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
557 | |||
558 | if (local->ops->ampdu_action) | ||
559 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP, | ||
560 | ra, tid, NULL); | ||
561 | |||
562 | /* case HW denied going back to legacy */ | ||
563 | if (ret) { | ||
564 | WARN_ON(ret != -EBUSY); | ||
565 | *state = HT_AGG_STATE_OPERATIONAL; | ||
566 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
567 | goto stop_BA_exit; | ||
568 | } | ||
569 | |||
570 | stop_BA_exit: | ||
571 | spin_unlock_bh(&sta->lock); | ||
572 | rcu_read_unlock(); | ||
573 | return ret; | ||
574 | } | ||
575 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); | ||
576 | |||
577 | void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) | ||
578 | { | ||
579 | struct ieee80211_local *local = hw_to_local(hw); | ||
580 | struct sta_info *sta; | ||
581 | u8 *state; | ||
582 | DECLARE_MAC_BUF(mac); | ||
583 | |||
584 | if (tid >= STA_TID_NUM) { | ||
585 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
586 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | ||
587 | tid, STA_TID_NUM); | ||
588 | #endif | ||
589 | return; | ||
590 | } | ||
591 | |||
592 | rcu_read_lock(); | ||
593 | sta = sta_info_get(local, ra); | ||
594 | if (!sta) { | ||
595 | rcu_read_unlock(); | ||
596 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
597 | printk(KERN_DEBUG "Could not find station: %s\n", | ||
598 | print_mac(mac, ra)); | ||
599 | #endif | ||
600 | return; | ||
601 | } | ||
602 | |||
603 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
604 | spin_lock_bh(&sta->lock); | ||
605 | |||
606 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
607 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
608 | printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", | ||
609 | *state); | ||
610 | #endif | ||
611 | spin_unlock_bh(&sta->lock); | ||
612 | rcu_read_unlock(); | ||
613 | return; | ||
614 | } | ||
615 | |||
616 | WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK); | ||
617 | |||
618 | *state |= HT_ADDBA_DRV_READY_MSK; | ||
619 | |||
620 | if (*state == HT_AGG_STATE_OPERATIONAL) { | ||
621 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
622 | printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); | ||
623 | #endif | ||
624 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
625 | } | ||
626 | spin_unlock_bh(&sta->lock); | ||
627 | rcu_read_unlock(); | ||
628 | } | ||
629 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); | ||
630 | |||
631 | void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) | ||
632 | { | ||
633 | struct ieee80211_local *local = hw_to_local(hw); | ||
634 | struct sta_info *sta; | ||
635 | u8 *state; | ||
636 | int agg_queue; | ||
637 | DECLARE_MAC_BUF(mac); | ||
638 | |||
639 | if (tid >= STA_TID_NUM) { | ||
640 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
641 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | ||
642 | tid, STA_TID_NUM); | ||
643 | #endif | ||
644 | return; | ||
645 | } | ||
646 | |||
647 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
648 | printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n", | ||
649 | print_mac(mac, ra), tid); | ||
650 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
651 | |||
652 | rcu_read_lock(); | ||
653 | sta = sta_info_get(local, ra); | ||
654 | if (!sta) { | ||
655 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
656 | printk(KERN_DEBUG "Could not find station: %s\n", | ||
657 | print_mac(mac, ra)); | ||
658 | #endif | ||
659 | rcu_read_unlock(); | ||
660 | return; | ||
661 | } | ||
662 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
663 | |||
664 | /* NOTE: no need to use sta->lock in this state check, as | ||
665 | * ieee80211_stop_tx_ba_session will let only one stop call to | ||
666 | * pass through per sta/tid | ||
667 | */ | ||
668 | if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { | ||
669 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
670 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); | ||
671 | #endif | ||
672 | rcu_read_unlock(); | ||
673 | return; | ||
674 | } | ||
675 | |||
676 | if (*state & HT_AGG_STATE_INITIATOR_MSK) | ||
677 | ieee80211_send_delba(sta->sdata, ra, tid, | ||
678 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); | ||
679 | |||
680 | agg_queue = sta->tid_to_tx_q[tid]; | ||
681 | |||
682 | ieee80211_ht_agg_queue_remove(local, sta, tid, 1); | ||
683 | |||
684 | /* We just requeued the all the frames that were in the | ||
685 | * removed queue, and since we might miss a softirq we do | ||
686 | * netif_schedule_queue. ieee80211_wake_queue is not used | ||
687 | * here as this queue is not necessarily stopped | ||
688 | */ | ||
689 | netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue)); | ||
690 | spin_lock_bh(&sta->lock); | ||
691 | *state = HT_AGG_STATE_IDLE; | ||
692 | sta->ampdu_mlme.addba_req_num[tid] = 0; | ||
693 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
694 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
695 | spin_unlock_bh(&sta->lock); | ||
696 | |||
697 | rcu_read_unlock(); | ||
698 | } | ||
699 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); | ||
700 | |||
701 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | ||
702 | const u8 *ra, u16 tid) | ||
703 | { | ||
704 | struct ieee80211_local *local = hw_to_local(hw); | ||
705 | struct ieee80211_ra_tid *ra_tid; | ||
706 | struct sk_buff *skb = dev_alloc_skb(0); | ||
707 | |||
708 | if (unlikely(!skb)) { | ||
709 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
710 | if (net_ratelimit()) | ||
711 | printk(KERN_WARNING "%s: Not enough memory, " | ||
712 | "dropping start BA session", skb->dev->name); | ||
713 | #endif | ||
714 | return; | ||
715 | } | ||
716 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
717 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | ||
718 | ra_tid->tid = tid; | ||
719 | |||
720 | skb->pkt_type = IEEE80211_ADDBA_MSG; | ||
721 | skb_queue_tail(&local->skb_queue, skb); | ||
722 | tasklet_schedule(&local->tasklet); | ||
723 | } | ||
724 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); | ||
725 | |||
726 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | ||
727 | const u8 *ra, u16 tid) | ||
728 | { | ||
729 | struct ieee80211_local *local = hw_to_local(hw); | ||
730 | struct ieee80211_ra_tid *ra_tid; | ||
731 | struct sk_buff *skb = dev_alloc_skb(0); | ||
732 | |||
733 | if (unlikely(!skb)) { | ||
734 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
735 | if (net_ratelimit()) | ||
736 | printk(KERN_WARNING "%s: Not enough memory, " | ||
737 | "dropping stop BA session", skb->dev->name); | ||
738 | #endif | ||
739 | return; | ||
740 | } | ||
741 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
742 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | ||
743 | ra_tid->tid = tid; | ||
744 | |||
745 | skb->pkt_type = IEEE80211_DELBA_MSG; | ||
746 | skb_queue_tail(&local->skb_queue, skb); | ||
747 | tasklet_schedule(&local->tasklet); | ||
748 | } | ||
749 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); | ||
750 | |||
751 | /* | ||
752 | * After accepting the AddBA Request we activated a timer, | ||
753 | * resetting it after each frame that arrives from the originator. | ||
754 | * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. | ||
755 | */ | ||
756 | static void sta_rx_agg_session_timer_expired(unsigned long data) | ||
757 | { | ||
758 | /* not an elegant detour, but there is no choice as the timer passes | ||
759 | * only one argument, and various sta_info are needed here, so init | ||
760 | * flow in sta_info_create gives the TID as data, while the timer_to_id | ||
761 | * array gives the sta through container_of */ | ||
762 | u8 *ptid = (u8 *)data; | ||
763 | u8 *timer_to_id = ptid - *ptid; | ||
764 | struct sta_info *sta = container_of(timer_to_id, struct sta_info, | ||
765 | timer_to_tid[0]); | ||
766 | |||
767 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
768 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | ||
769 | #endif | ||
770 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->addr, | ||
771 | (u16)*ptid, WLAN_BACK_TIMER, | ||
772 | WLAN_REASON_QSTA_TIMEOUT); | ||
773 | } | ||
774 | |||
775 | void ieee80211_process_addba_request(struct ieee80211_local *local, | ||
776 | struct sta_info *sta, | ||
777 | struct ieee80211_mgmt *mgmt, | ||
778 | size_t len) | ||
779 | { | ||
780 | struct ieee80211_hw *hw = &local->hw; | ||
781 | struct ieee80211_conf *conf = &hw->conf; | ||
782 | struct tid_ampdu_rx *tid_agg_rx; | ||
783 | u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; | ||
784 | u8 dialog_token; | ||
785 | int ret = -EOPNOTSUPP; | ||
786 | DECLARE_MAC_BUF(mac); | ||
787 | |||
788 | /* extract session parameters from addba request frame */ | ||
789 | dialog_token = mgmt->u.action.u.addba_req.dialog_token; | ||
790 | timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); | ||
791 | start_seq_num = | ||
792 | le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4; | ||
793 | |||
794 | capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); | ||
795 | ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1; | ||
796 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | ||
797 | buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; | ||
798 | |||
799 | status = WLAN_STATUS_REQUEST_DECLINED; | ||
800 | |||
801 | /* sanity check for incoming parameters: | ||
802 | * check if configuration can support the BA policy | ||
803 | * and if buffer size does not exceeds max value */ | ||
804 | if (((ba_policy != 1) | ||
805 | && (!(conf->ht_conf.cap & IEEE80211_HT_CAP_DELAY_BA))) | ||
806 | || (buf_size > IEEE80211_MAX_AMPDU_BUF)) { | ||
807 | status = WLAN_STATUS_INVALID_QOS_PARAM; | ||
808 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
809 | if (net_ratelimit()) | ||
810 | printk(KERN_DEBUG "AddBA Req with bad params from " | ||
811 | "%s on tid %u. policy %d, buffer size %d\n", | ||
812 | print_mac(mac, mgmt->sa), tid, ba_policy, | ||
813 | buf_size); | ||
814 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
815 | goto end_no_lock; | ||
816 | } | ||
817 | /* determine default buffer size */ | ||
818 | if (buf_size == 0) { | ||
819 | struct ieee80211_supported_band *sband; | ||
820 | |||
821 | sband = local->hw.wiphy->bands[conf->channel->band]; | ||
822 | buf_size = IEEE80211_MIN_AMPDU_BUF; | ||
823 | buf_size = buf_size << sband->ht_info.ampdu_factor; | ||
824 | } | ||
825 | |||
826 | |||
827 | /* examine state machine */ | ||
828 | spin_lock_bh(&sta->lock); | ||
829 | |||
830 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { | ||
831 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
832 | if (net_ratelimit()) | ||
833 | printk(KERN_DEBUG "unexpected AddBA Req from " | ||
834 | "%s on tid %u\n", | ||
835 | print_mac(mac, mgmt->sa), tid); | ||
836 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
837 | goto end; | ||
838 | } | ||
839 | |||
840 | /* prepare A-MPDU MLME for Rx aggregation */ | ||
841 | sta->ampdu_mlme.tid_rx[tid] = | ||
842 | kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); | ||
843 | if (!sta->ampdu_mlme.tid_rx[tid]) { | ||
844 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
845 | if (net_ratelimit()) | ||
846 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", | ||
847 | tid); | ||
848 | #endif | ||
849 | goto end; | ||
850 | } | ||
851 | /* rx timer */ | ||
852 | sta->ampdu_mlme.tid_rx[tid]->session_timer.function = | ||
853 | sta_rx_agg_session_timer_expired; | ||
854 | sta->ampdu_mlme.tid_rx[tid]->session_timer.data = | ||
855 | (unsigned long)&sta->timer_to_tid[tid]; | ||
856 | init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
857 | |||
858 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | ||
859 | |||
860 | /* prepare reordering buffer */ | ||
861 | tid_agg_rx->reorder_buf = | ||
862 | kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC); | ||
863 | if (!tid_agg_rx->reorder_buf) { | ||
864 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
865 | if (net_ratelimit()) | ||
866 | printk(KERN_ERR "can not allocate reordering buffer " | ||
867 | "to tid %d\n", tid); | ||
868 | #endif | ||
869 | kfree(sta->ampdu_mlme.tid_rx[tid]); | ||
870 | goto end; | ||
871 | } | ||
872 | memset(tid_agg_rx->reorder_buf, 0, | ||
873 | buf_size * sizeof(struct sk_buff *)); | ||
874 | |||
875 | if (local->ops->ampdu_action) | ||
876 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, | ||
877 | sta->addr, tid, &start_seq_num); | ||
878 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
879 | printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); | ||
880 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
881 | |||
882 | if (ret) { | ||
883 | kfree(tid_agg_rx->reorder_buf); | ||
884 | kfree(tid_agg_rx); | ||
885 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
886 | goto end; | ||
887 | } | ||
888 | |||
889 | /* change state and send addba resp */ | ||
890 | sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL; | ||
891 | tid_agg_rx->dialog_token = dialog_token; | ||
892 | tid_agg_rx->ssn = start_seq_num; | ||
893 | tid_agg_rx->head_seq_num = start_seq_num; | ||
894 | tid_agg_rx->buf_size = buf_size; | ||
895 | tid_agg_rx->timeout = timeout; | ||
896 | tid_agg_rx->stored_mpdu_num = 0; | ||
897 | status = WLAN_STATUS_SUCCESS; | ||
898 | end: | ||
899 | spin_unlock_bh(&sta->lock); | ||
900 | |||
901 | end_no_lock: | ||
902 | ieee80211_send_addba_resp(sta->sdata, sta->addr, tid, | ||
903 | dialog_token, status, 1, buf_size, timeout); | ||
904 | } | ||
905 | |||
906 | void ieee80211_process_addba_resp(struct ieee80211_local *local, | ||
907 | struct sta_info *sta, | ||
908 | struct ieee80211_mgmt *mgmt, | ||
909 | size_t len) | ||
910 | { | ||
911 | struct ieee80211_hw *hw = &local->hw; | ||
912 | u16 capab; | ||
913 | u16 tid; | ||
914 | u8 *state; | ||
915 | |||
916 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); | ||
917 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | ||
918 | |||
919 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
920 | |||
921 | spin_lock_bh(&sta->lock); | ||
922 | |||
923 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
924 | spin_unlock_bh(&sta->lock); | ||
925 | return; | ||
926 | } | ||
927 | |||
928 | if (mgmt->u.action.u.addba_resp.dialog_token != | ||
929 | sta->ampdu_mlme.tid_tx[tid]->dialog_token) { | ||
930 | spin_unlock_bh(&sta->lock); | ||
931 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
932 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); | ||
933 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
934 | return; | ||
935 | } | ||
936 | |||
937 | del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
938 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
939 | printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); | ||
940 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
941 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) | ||
942 | == WLAN_STATUS_SUCCESS) { | ||
943 | *state |= HT_ADDBA_RECEIVED_MSK; | ||
944 | sta->ampdu_mlme.addba_req_num[tid] = 0; | ||
945 | |||
946 | if (*state == HT_AGG_STATE_OPERATIONAL) | ||
947 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
948 | |||
949 | spin_unlock_bh(&sta->lock); | ||
950 | } else { | ||
951 | sta->ampdu_mlme.addba_req_num[tid]++; | ||
952 | /* this will allow the state check in stop_BA_session */ | ||
953 | *state = HT_AGG_STATE_OPERATIONAL; | ||
954 | spin_unlock_bh(&sta->lock); | ||
955 | ieee80211_stop_tx_ba_session(hw, sta->addr, tid, | ||
956 | WLAN_BACK_INITIATOR); | ||
957 | } | ||
958 | } | ||
959 | |||
960 | void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, | ||
961 | struct sta_info *sta, | ||
962 | struct ieee80211_mgmt *mgmt, size_t len) | ||
963 | { | ||
964 | struct ieee80211_local *local = sdata->local; | ||
965 | u16 tid, params; | ||
966 | u16 initiator; | ||
967 | DECLARE_MAC_BUF(mac); | ||
968 | |||
969 | params = le16_to_cpu(mgmt->u.action.u.delba.params); | ||
970 | tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; | ||
971 | initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; | ||
972 | |||
973 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
974 | if (net_ratelimit()) | ||
975 | printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n", | ||
976 | print_mac(mac, mgmt->sa), | ||
977 | initiator ? "initiator" : "recipient", tid, | ||
978 | mgmt->u.action.u.delba.reason_code); | ||
979 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
980 | |||
981 | if (initiator == WLAN_BACK_INITIATOR) | ||
982 | ieee80211_sta_stop_rx_ba_session(sdata, sta->addr, tid, | ||
983 | WLAN_BACK_INITIATOR, 0); | ||
984 | else { /* WLAN_BACK_RECIPIENT */ | ||
985 | spin_lock_bh(&sta->lock); | ||
986 | sta->ampdu_mlme.tid_state_tx[tid] = | ||
987 | HT_AGG_STATE_OPERATIONAL; | ||
988 | spin_unlock_bh(&sta->lock); | ||
989 | ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, | ||
990 | WLAN_BACK_RECIPIENT); | ||
991 | } | ||
992 | } | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 4498d8713652..6f334e4c3d66 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -32,14 +32,6 @@ | |||
32 | /* ieee80211.o internal definitions, etc. These are not included into | 32 | /* ieee80211.o internal definitions, etc. These are not included into |
33 | * low-level drivers. */ | 33 | * low-level drivers. */ |
34 | 34 | ||
35 | #ifndef ETH_P_PAE | ||
36 | #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ | ||
37 | #endif /* ETH_P_PAE */ | ||
38 | |||
39 | #define WLAN_FC_DATA_PRESENT(fc) (((fc) & 0x4c) == 0x08) | ||
40 | |||
41 | #define IEEE80211_FC(type, subtype) cpu_to_le16(type | subtype) | ||
42 | |||
43 | struct ieee80211_local; | 35 | struct ieee80211_local; |
44 | 36 | ||
45 | /* Maximum number of broadcast/multicast frames to buffer when some of the | 37 | /* Maximum number of broadcast/multicast frames to buffer when some of the |
@@ -61,6 +53,12 @@ struct ieee80211_local; | |||
61 | * increased memory use (about 2 kB of RAM per entry). */ | 53 | * increased memory use (about 2 kB of RAM per entry). */ |
62 | #define IEEE80211_FRAGMENT_MAX 4 | 54 | #define IEEE80211_FRAGMENT_MAX 4 |
63 | 55 | ||
56 | /* | ||
57 | * Time after which we ignore scan results and no longer report/use | ||
58 | * them in any way. | ||
59 | */ | ||
60 | #define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ) | ||
61 | |||
64 | struct ieee80211_fragment_entry { | 62 | struct ieee80211_fragment_entry { |
65 | unsigned long first_frag_time; | 63 | unsigned long first_frag_time; |
66 | unsigned int seq; | 64 | unsigned int seq; |
@@ -87,16 +85,11 @@ struct ieee80211_sta_bss { | |||
87 | enum ieee80211_band band; | 85 | enum ieee80211_band band; |
88 | int freq; | 86 | int freq; |
89 | int signal, noise, qual; | 87 | int signal, noise, qual; |
90 | u8 *wpa_ie; | 88 | u8 *ies; /* all information elements from the last Beacon or Probe |
91 | size_t wpa_ie_len; | 89 | * Response frames; note Beacon frame is not allowed to |
92 | u8 *rsn_ie; | 90 | * override values from Probe Response */ |
93 | size_t rsn_ie_len; | 91 | size_t ies_len; |
94 | u8 *wmm_ie; | 92 | bool wmm_used; |
95 | size_t wmm_ie_len; | ||
96 | u8 *ht_ie; | ||
97 | size_t ht_ie_len; | ||
98 | u8 *ht_add_ie; | ||
99 | size_t ht_add_ie_len; | ||
100 | #ifdef CONFIG_MAC80211_MESH | 93 | #ifdef CONFIG_MAC80211_MESH |
101 | u8 *mesh_id; | 94 | u8 *mesh_id; |
102 | size_t mesh_id_len; | 95 | size_t mesh_id_len; |
@@ -108,7 +101,7 @@ struct ieee80211_sta_bss { | |||
108 | u64 timestamp; | 101 | u64 timestamp; |
109 | int beacon_int; | 102 | int beacon_int; |
110 | 103 | ||
111 | bool probe_resp; | 104 | unsigned long last_probe_resp; |
112 | unsigned long last_update; | 105 | unsigned long last_update; |
113 | 106 | ||
114 | /* during assocation, we save an ERP value from a probe response so | 107 | /* during assocation, we save an ERP value from a probe response so |
@@ -174,7 +167,7 @@ struct ieee80211_tx_data { | |||
174 | struct sk_buff **extra_frag; | 167 | struct sk_buff **extra_frag; |
175 | int num_extra_frag; | 168 | int num_extra_frag; |
176 | 169 | ||
177 | u16 fc, ethertype; | 170 | u16 ethertype; |
178 | unsigned int flags; | 171 | unsigned int flags; |
179 | }; | 172 | }; |
180 | 173 | ||
@@ -202,7 +195,7 @@ struct ieee80211_rx_data { | |||
202 | struct ieee80211_rx_status *status; | 195 | struct ieee80211_rx_status *status; |
203 | struct ieee80211_rate *rate; | 196 | struct ieee80211_rate *rate; |
204 | 197 | ||
205 | u16 fc, ethertype; | 198 | u16 ethertype; |
206 | unsigned int flags; | 199 | unsigned int flags; |
207 | int sent_ps_buffered; | 200 | int sent_ps_buffered; |
208 | int queue; | 201 | int queue; |
@@ -300,17 +293,35 @@ struct mesh_config { | |||
300 | #define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) | 293 | #define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) |
301 | #define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) | 294 | #define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) |
302 | #define IEEE80211_STA_PRIVACY_INVOKED BIT(13) | 295 | #define IEEE80211_STA_PRIVACY_INVOKED BIT(13) |
296 | /* flags for MLME request*/ | ||
297 | #define IEEE80211_STA_REQ_SCAN 0 | ||
298 | #define IEEE80211_STA_REQ_DIRECT_PROBE 1 | ||
299 | #define IEEE80211_STA_REQ_AUTH 2 | ||
300 | #define IEEE80211_STA_REQ_RUN 3 | ||
301 | |||
302 | /* flags used for setting mlme state */ | ||
303 | enum ieee80211_sta_mlme_state { | ||
304 | IEEE80211_STA_MLME_DISABLED, | ||
305 | IEEE80211_STA_MLME_DIRECT_PROBE, | ||
306 | IEEE80211_STA_MLME_AUTHENTICATE, | ||
307 | IEEE80211_STA_MLME_ASSOCIATE, | ||
308 | IEEE80211_STA_MLME_ASSOCIATED, | ||
309 | IEEE80211_STA_MLME_IBSS_SEARCH, | ||
310 | IEEE80211_STA_MLME_IBSS_JOINED, | ||
311 | IEEE80211_STA_MLME_MESH_UP | ||
312 | }; | ||
313 | |||
314 | /* bitfield of allowed auth algs */ | ||
315 | #define IEEE80211_AUTH_ALG_OPEN BIT(0) | ||
316 | #define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1) | ||
317 | #define IEEE80211_AUTH_ALG_LEAP BIT(2) | ||
318 | |||
303 | struct ieee80211_if_sta { | 319 | struct ieee80211_if_sta { |
304 | struct timer_list timer; | 320 | struct timer_list timer; |
305 | struct work_struct work; | 321 | struct work_struct work; |
306 | u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; | 322 | u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; |
307 | u8 ssid[IEEE80211_MAX_SSID_LEN]; | 323 | u8 ssid[IEEE80211_MAX_SSID_LEN]; |
308 | enum { | 324 | enum ieee80211_sta_mlme_state state; |
309 | IEEE80211_DISABLED, IEEE80211_AUTHENTICATE, | ||
310 | IEEE80211_ASSOCIATE, IEEE80211_ASSOCIATED, | ||
311 | IEEE80211_IBSS_SEARCH, IEEE80211_IBSS_JOINED, | ||
312 | IEEE80211_MESH_UP | ||
313 | } state; | ||
314 | size_t ssid_len; | 325 | size_t ssid_len; |
315 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; | 326 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; |
316 | size_t scan_ssid_len; | 327 | size_t scan_ssid_len; |
@@ -353,20 +364,17 @@ struct ieee80211_if_sta { | |||
353 | 364 | ||
354 | struct sk_buff_head skb_queue; | 365 | struct sk_buff_head skb_queue; |
355 | 366 | ||
356 | int auth_tries, assoc_tries; | 367 | int assoc_scan_tries; /* number of scans done pre-association */ |
368 | int direct_probe_tries; /* retries for direct probes */ | ||
369 | int auth_tries; /* retries for auth req */ | ||
370 | int assoc_tries; /* retries for assoc req */ | ||
357 | 371 | ||
358 | unsigned long request; | 372 | unsigned long request; |
359 | 373 | ||
360 | unsigned long last_probe; | 374 | unsigned long last_probe; |
361 | 375 | ||
362 | unsigned int flags; | 376 | unsigned int flags; |
363 | #define IEEE80211_STA_REQ_SCAN 0 | ||
364 | #define IEEE80211_STA_REQ_AUTH 1 | ||
365 | #define IEEE80211_STA_REQ_RUN 2 | ||
366 | 377 | ||
367 | #define IEEE80211_AUTH_ALG_OPEN BIT(0) | ||
368 | #define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1) | ||
369 | #define IEEE80211_AUTH_ALG_LEAP BIT(2) | ||
370 | unsigned int auth_algs; /* bitfield of allowed auth algs */ | 378 | unsigned int auth_algs; /* bitfield of allowed auth algs */ |
371 | int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ | 379 | int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ |
372 | int auth_transaction; | 380 | int auth_transaction; |
@@ -634,7 +642,7 @@ struct ieee80211_local { | |||
634 | enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; | 642 | enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; |
635 | unsigned long last_scan_completed; | 643 | unsigned long last_scan_completed; |
636 | struct delayed_work scan_work; | 644 | struct delayed_work scan_work; |
637 | struct net_device *scan_dev; | 645 | struct ieee80211_sub_if_data *scan_sdata; |
638 | struct ieee80211_channel *oper_channel, *scan_channel; | 646 | struct ieee80211_channel *oper_channel, *scan_channel; |
639 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; | 647 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; |
640 | size_t scan_ssid_len; | 648 | size_t scan_ssid_len; |
@@ -774,6 +782,9 @@ struct ieee80211_ra_tid { | |||
774 | 782 | ||
775 | /* Parsed Information Elements */ | 783 | /* Parsed Information Elements */ |
776 | struct ieee802_11_elems { | 784 | struct ieee802_11_elems { |
785 | u8 *ie_start; | ||
786 | size_t total_len; | ||
787 | |||
777 | /* pointers to IEs */ | 788 | /* pointers to IEs */ |
778 | u8 *ssid; | 789 | u8 *ssid; |
779 | u8 *supp_rates; | 790 | u8 *supp_rates; |
@@ -867,65 +878,67 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, | |||
867 | 878 | ||
868 | /* ieee80211_ioctl.c */ | 879 | /* ieee80211_ioctl.c */ |
869 | extern const struct iw_handler_def ieee80211_iw_handler_def; | 880 | extern const struct iw_handler_def ieee80211_iw_handler_def; |
870 | int ieee80211_set_freq(struct net_device *dev, int freq); | 881 | int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq); |
871 | 882 | ||
872 | /* ieee80211_sta.c */ | 883 | /* ieee80211_sta.c */ |
873 | void ieee80211_sta_timer(unsigned long data); | 884 | void ieee80211_sta_timer(unsigned long data); |
874 | void ieee80211_sta_work(struct work_struct *work); | 885 | void ieee80211_sta_work(struct work_struct *work); |
875 | void ieee80211_sta_scan_work(struct work_struct *work); | 886 | void ieee80211_sta_scan_work(struct work_struct *work); |
876 | void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | 887 | void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, |
877 | struct ieee80211_rx_status *rx_status); | 888 | struct ieee80211_rx_status *rx_status); |
878 | int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len); | 889 | int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len); |
879 | int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len); | 890 | int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len); |
880 | int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid); | 891 | int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid); |
881 | int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len); | 892 | int ieee80211_sta_req_scan(struct ieee80211_sub_if_data *sdata, u8 *ssid, size_t ssid_len); |
882 | void ieee80211_sta_req_auth(struct net_device *dev, | 893 | void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata, |
883 | struct ieee80211_if_sta *ifsta); | 894 | struct ieee80211_if_sta *ifsta); |
884 | int ieee80211_sta_scan_results(struct net_device *dev, | 895 | int ieee80211_sta_scan_results(struct ieee80211_local *local, |
885 | struct iw_request_info *info, | 896 | struct iw_request_info *info, |
886 | char *buf, size_t len); | 897 | char *buf, size_t len); |
887 | ieee80211_rx_result ieee80211_sta_rx_scan( | 898 | ieee80211_rx_result ieee80211_sta_rx_scan( |
888 | struct net_device *dev, struct sk_buff *skb, | 899 | struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, |
889 | struct ieee80211_rx_status *rx_status); | 900 | struct ieee80211_rx_status *rx_status); |
890 | void ieee80211_rx_bss_list_init(struct ieee80211_local *local); | 901 | void ieee80211_rx_bss_list_init(struct ieee80211_local *local); |
891 | void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local); | 902 | void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local); |
892 | int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); | 903 | int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len); |
893 | struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, | 904 | struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, |
894 | struct sk_buff *skb, u8 *bssid, | 905 | struct sk_buff *skb, u8 *bssid, |
895 | u8 *addr, u64 supp_rates); | 906 | u8 *addr, u64 supp_rates); |
896 | int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); | 907 | int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason); |
897 | int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); | 908 | int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason); |
898 | void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | 909 | void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, |
899 | u32 changed); | 910 | u32 changed); |
900 | u32 ieee80211_reset_erp_info(struct net_device *dev); | 911 | u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); |
901 | int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, | ||
902 | struct ieee80211_ht_info *ht_info); | ||
903 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( | ||
904 | struct ieee80211_ht_addt_info *ht_add_info_ie, | ||
905 | struct ieee80211_ht_bss_info *bss_info); | ||
906 | void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, | ||
907 | u16 tid, u8 dialog_token, u16 start_seq_num, | ||
908 | u16 agg_size, u16 timeout); | ||
909 | void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, | ||
910 | u16 initiator, u16 reason_code); | ||
911 | void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn); | ||
912 | |||
913 | void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, | ||
914 | u16 tid, u16 initiator, u16 reason); | ||
915 | void sta_addba_resp_timer_expired(unsigned long data); | ||
916 | void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr); | ||
917 | u64 ieee80211_sta_get_rates(struct ieee80211_local *local, | 912 | u64 ieee80211_sta_get_rates(struct ieee80211_local *local, |
918 | struct ieee802_11_elems *elems, | 913 | struct ieee802_11_elems *elems, |
919 | enum ieee80211_band band); | 914 | enum ieee80211_band band); |
920 | void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, | 915 | void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, |
921 | int encrypt); | 916 | u8 *ssid, size_t ssid_len); |
922 | void ieee802_11_parse_elems(u8 *start, size_t len, | 917 | void ieee802_11_parse_elems(u8 *start, size_t len, |
923 | struct ieee802_11_elems *elems); | 918 | struct ieee802_11_elems *elems); |
919 | void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); | ||
920 | int ieee80211_sta_start_scan(struct ieee80211_sub_if_data *scan_sdata, | ||
921 | u8 *ssid, size_t ssid_len); | ||
922 | struct ieee80211_sta_bss * | ||
923 | ieee80211_bss_info_update(struct ieee80211_local *local, | ||
924 | struct ieee80211_rx_status *rx_status, | ||
925 | struct ieee80211_mgmt *mgmt, | ||
926 | size_t len, | ||
927 | struct ieee802_11_elems *elems, | ||
928 | int freq, bool beacon); | ||
929 | struct ieee80211_sta_bss * | ||
930 | ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq, | ||
931 | u8 *ssid, u8 ssid_len); | ||
932 | struct ieee80211_sta_bss * | ||
933 | ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, | ||
934 | u8 *ssid, u8 ssid_len); | ||
935 | void ieee80211_rx_bss_put(struct ieee80211_local *local, | ||
936 | struct ieee80211_sta_bss *bss); | ||
924 | 937 | ||
925 | #ifdef CONFIG_MAC80211_MESH | 938 | #ifdef CONFIG_MAC80211_MESH |
926 | void ieee80211_start_mesh(struct net_device *dev); | 939 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); |
927 | #else | 940 | #else |
928 | static inline void ieee80211_start_mesh(struct net_device *dev) | 941 | static inline void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) |
929 | {} | 942 | {} |
930 | #endif | 943 | #endif |
931 | 944 | ||
@@ -936,7 +949,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
936 | struct vif_params *params); | 949 | struct vif_params *params); |
937 | int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | 950 | int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, |
938 | enum ieee80211_if_types type); | 951 | enum ieee80211_if_types type); |
939 | void ieee80211_if_remove(struct net_device *dev); | 952 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); |
940 | void ieee80211_remove_interfaces(struct ieee80211_local *local); | 953 | void ieee80211_remove_interfaces(struct ieee80211_local *local); |
941 | 954 | ||
942 | /* tx handling */ | 955 | /* tx handling */ |
@@ -946,6 +959,34 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev); | |||
946 | int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); | 959 | int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); |
947 | int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); | 960 | int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); |
948 | 961 | ||
962 | /* HT */ | ||
963 | int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, | ||
964 | struct ieee80211_ht_info *ht_info); | ||
965 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( | ||
966 | struct ieee80211_ht_addt_info *ht_add_info_ie, | ||
967 | struct ieee80211_ht_bss_info *bss_info); | ||
968 | void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn); | ||
969 | |||
970 | void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da, | ||
971 | u16 tid, u16 initiator, u16 reason); | ||
972 | void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr); | ||
973 | void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, | ||
974 | struct sta_info *sta, | ||
975 | struct ieee80211_mgmt *mgmt, size_t len); | ||
976 | void ieee80211_process_addba_resp(struct ieee80211_local *local, | ||
977 | struct sta_info *sta, | ||
978 | struct ieee80211_mgmt *mgmt, | ||
979 | size_t len); | ||
980 | void ieee80211_process_addba_request(struct ieee80211_local *local, | ||
981 | struct sta_info *sta, | ||
982 | struct ieee80211_mgmt *mgmt, | ||
983 | size_t len); | ||
984 | |||
985 | /* Spectrum management */ | ||
986 | void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | ||
987 | struct ieee80211_mgmt *mgmt, | ||
988 | size_t len); | ||
989 | |||
949 | /* utility functions/constants */ | 990 | /* utility functions/constants */ |
950 | extern void *mac80211_wiphy_privid; /* for wiphy privid */ | 991 | extern void *mac80211_wiphy_privid; /* for wiphy privid */ |
951 | extern const unsigned char rfc1042_header[6]; | 992 | extern const unsigned char rfc1042_header[6]; |
@@ -954,8 +995,11 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, | |||
954 | enum ieee80211_if_types type); | 995 | enum ieee80211_if_types type); |
955 | int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, | 996 | int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, |
956 | int rate, int erp, int short_preamble); | 997 | int rate, int erp, int short_preamble); |
957 | void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, | 998 | void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, |
958 | struct ieee80211_hdr *hdr); | 999 | struct ieee80211_hdr *hdr); |
1000 | void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); | ||
1001 | void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
1002 | int encrypt); | ||
959 | 1003 | ||
960 | #ifdef CONFIG_MAC80211_NOINLINE | 1004 | #ifdef CONFIG_MAC80211_NOINLINE |
961 | #define debug_noinline noinline | 1005 | #define debug_noinline noinline |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 610ed1d9893a..672cec60a2fb 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -31,11 +31,11 @@ static void ieee80211_teardown_sdata(struct net_device *dev) | |||
31 | int flushed; | 31 | int flushed; |
32 | int i; | 32 | int i; |
33 | 33 | ||
34 | ieee80211_debugfs_remove_netdev(sdata); | ||
35 | |||
36 | /* free extra data */ | 34 | /* free extra data */ |
37 | ieee80211_free_keys(sdata); | 35 | ieee80211_free_keys(sdata); |
38 | 36 | ||
37 | ieee80211_debugfs_remove_netdev(sdata); | ||
38 | |||
39 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) | 39 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) |
40 | __skb_queue_purge(&sdata->fragments[i].skb_list); | 40 | __skb_queue_purge(&sdata->fragments[i].skb_list); |
41 | sdata->fragment_next = 0; | 41 | sdata->fragment_next = 0; |
@@ -56,7 +56,7 @@ static void ieee80211_teardown_sdata(struct net_device *dev) | |||
56 | case IEEE80211_IF_TYPE_MESH_POINT: | 56 | case IEEE80211_IF_TYPE_MESH_POINT: |
57 | /* Allow compiler to elide mesh_rmc_free call. */ | 57 | /* Allow compiler to elide mesh_rmc_free call. */ |
58 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 58 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
59 | mesh_rmc_free(dev); | 59 | mesh_rmc_free(sdata); |
60 | /* fall through */ | 60 | /* fall through */ |
61 | case IEEE80211_IF_TYPE_STA: | 61 | case IEEE80211_IF_TYPE_STA: |
62 | case IEEE80211_IF_TYPE_IBSS: | 62 | case IEEE80211_IF_TYPE_IBSS: |
@@ -241,15 +241,13 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
241 | return ret; | 241 | return ret; |
242 | } | 242 | } |
243 | 243 | ||
244 | void ieee80211_if_remove(struct net_device *dev) | 244 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) |
245 | { | 245 | { |
246 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
247 | |||
248 | ASSERT_RTNL(); | 246 | ASSERT_RTNL(); |
249 | 247 | ||
250 | list_del_rcu(&sdata->list); | 248 | list_del_rcu(&sdata->list); |
251 | synchronize_rcu(); | 249 | synchronize_rcu(); |
252 | unregister_netdevice(dev); | 250 | unregister_netdevice(sdata->dev); |
253 | } | 251 | } |
254 | 252 | ||
255 | /* | 253 | /* |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index aa5a191598c9..6a7f4fae18c2 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -187,9 +187,15 @@ static int ieee80211_open(struct net_device *dev) | |||
187 | u32 changed = 0; | 187 | u32 changed = 0; |
188 | int res; | 188 | int res; |
189 | bool need_hw_reconfig = 0; | 189 | bool need_hw_reconfig = 0; |
190 | u8 null_addr[ETH_ALEN] = {0}; | ||
190 | 191 | ||
191 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 192 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
192 | 193 | ||
194 | /* fail early if user set an invalid address */ | ||
195 | if (compare_ether_addr(dev->dev_addr, null_addr) && | ||
196 | !is_valid_ether_addr(dev->dev_addr)) | ||
197 | return -EADDRNOTAVAIL; | ||
198 | |||
193 | /* we hold the RTNL here so can safely walk the list */ | 199 | /* we hold the RTNL here so can safely walk the list */ |
194 | list_for_each_entry(nsdata, &local->interfaces, list) { | 200 | list_for_each_entry(nsdata, &local->interfaces, list) { |
195 | struct net_device *ndev = nsdata->dev; | 201 | struct net_device *ndev = nsdata->dev; |
@@ -270,6 +276,36 @@ static int ieee80211_open(struct net_device *dev) | |||
270 | ieee80211_led_radio(local, local->hw.conf.radio_enabled); | 276 | ieee80211_led_radio(local, local->hw.conf.radio_enabled); |
271 | } | 277 | } |
272 | 278 | ||
279 | /* | ||
280 | * Check all interfaces and copy the hopefully now-present | ||
281 | * MAC address to those that have the special null one. | ||
282 | */ | ||
283 | list_for_each_entry(nsdata, &local->interfaces, list) { | ||
284 | struct net_device *ndev = nsdata->dev; | ||
285 | |||
286 | /* | ||
287 | * No need to check netif_running since we do not allow | ||
288 | * it to start up with this invalid address. | ||
289 | */ | ||
290 | if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) | ||
291 | memcpy(ndev->dev_addr, | ||
292 | local->hw.wiphy->perm_addr, | ||
293 | ETH_ALEN); | ||
294 | } | ||
295 | |||
296 | if (compare_ether_addr(null_addr, local->mdev->dev_addr) == 0) | ||
297 | memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, | ||
298 | ETH_ALEN); | ||
299 | |||
300 | /* | ||
301 | * Validate the MAC address for this device. | ||
302 | */ | ||
303 | if (!is_valid_ether_addr(dev->dev_addr)) { | ||
304 | if (!local->open_count && local->ops->stop) | ||
305 | local->ops->stop(local_to_hw(local)); | ||
306 | return -EADDRNOTAVAIL; | ||
307 | } | ||
308 | |||
273 | switch (sdata->vif.type) { | 309 | switch (sdata->vif.type) { |
274 | case IEEE80211_IF_TYPE_VLAN: | 310 | case IEEE80211_IF_TYPE_VLAN: |
275 | /* no need to tell driver */ | 311 | /* no need to tell driver */ |
@@ -311,8 +347,8 @@ static int ieee80211_open(struct net_device *dev) | |||
311 | goto err_stop; | 347 | goto err_stop; |
312 | 348 | ||
313 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 349 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
314 | ieee80211_start_mesh(sdata->dev); | 350 | ieee80211_start_mesh(sdata); |
315 | changed |= ieee80211_reset_erp_info(dev); | 351 | changed |= ieee80211_reset_erp_info(sdata); |
316 | ieee80211_bss_info_change_notify(sdata, changed); | 352 | ieee80211_bss_info_change_notify(sdata, changed); |
317 | ieee80211_enable_keys(sdata); | 353 | ieee80211_enable_keys(sdata); |
318 | 354 | ||
@@ -363,8 +399,15 @@ static int ieee80211_open(struct net_device *dev) | |||
363 | atomic_inc(&local->iff_promiscs); | 399 | atomic_inc(&local->iff_promiscs); |
364 | 400 | ||
365 | local->open_count++; | 401 | local->open_count++; |
366 | if (need_hw_reconfig) | 402 | if (need_hw_reconfig) { |
367 | ieee80211_hw_config(local); | 403 | ieee80211_hw_config(local); |
404 | /* | ||
405 | * set default queue parameters so drivers don't | ||
406 | * need to initialise the hardware if the hardware | ||
407 | * doesn't start up with sane defaults | ||
408 | */ | ||
409 | ieee80211_set_wmm_default(sdata); | ||
410 | } | ||
368 | 411 | ||
369 | /* | 412 | /* |
370 | * ieee80211_sta_work is disabled while network interface | 413 | * ieee80211_sta_work is disabled while network interface |
@@ -412,7 +455,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
412 | 455 | ||
413 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | 456 | list_for_each_entry_rcu(sta, &local->sta_list, list) { |
414 | if (sta->sdata == sdata) | 457 | if (sta->sdata == sdata) |
415 | ieee80211_sta_tear_down_BA_sessions(dev, sta->addr); | 458 | ieee80211_sta_tear_down_BA_sessions(sdata, sta->addr); |
416 | } | 459 | } |
417 | 460 | ||
418 | rcu_read_unlock(); | 461 | rcu_read_unlock(); |
@@ -503,7 +546,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
503 | /* fall through */ | 546 | /* fall through */ |
504 | case IEEE80211_IF_TYPE_STA: | 547 | case IEEE80211_IF_TYPE_STA: |
505 | case IEEE80211_IF_TYPE_IBSS: | 548 | case IEEE80211_IF_TYPE_IBSS: |
506 | sdata->u.sta.state = IEEE80211_DISABLED; | 549 | sdata->u.sta.state = IEEE80211_STA_MLME_DISABLED; |
507 | memset(sdata->u.sta.bssid, 0, ETH_ALEN); | 550 | memset(sdata->u.sta.bssid, 0, ETH_ALEN); |
508 | del_timer_sync(&sdata->u.sta.timer); | 551 | del_timer_sync(&sdata->u.sta.timer); |
509 | /* | 552 | /* |
@@ -515,7 +558,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
515 | synchronize_rcu(); | 558 | synchronize_rcu(); |
516 | skb_queue_purge(&sdata->u.sta.skb_queue); | 559 | skb_queue_purge(&sdata->u.sta.skb_queue); |
517 | 560 | ||
518 | if (local->scan_dev == sdata->dev) { | 561 | if (local->scan_sdata == sdata) { |
519 | if (!local->ops->hw_scan) { | 562 | if (!local->ops->hw_scan) { |
520 | local->sta_sw_scanning = 0; | 563 | local->sta_sw_scanning = 0; |
521 | cancel_delayed_work(&local->scan_work); | 564 | cancel_delayed_work(&local->scan_work); |
@@ -557,376 +600,6 @@ static int ieee80211_stop(struct net_device *dev) | |||
557 | return 0; | 600 | return 0; |
558 | } | 601 | } |
559 | 602 | ||
560 | int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | ||
561 | { | ||
562 | struct ieee80211_local *local = hw_to_local(hw); | ||
563 | struct sta_info *sta; | ||
564 | struct ieee80211_sub_if_data *sdata; | ||
565 | u16 start_seq_num = 0; | ||
566 | u8 *state; | ||
567 | int ret; | ||
568 | DECLARE_MAC_BUF(mac); | ||
569 | |||
570 | if (tid >= STA_TID_NUM) | ||
571 | return -EINVAL; | ||
572 | |||
573 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
574 | printk(KERN_DEBUG "Open BA session requested for %s tid %u\n", | ||
575 | print_mac(mac, ra), tid); | ||
576 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
577 | |||
578 | rcu_read_lock(); | ||
579 | |||
580 | sta = sta_info_get(local, ra); | ||
581 | if (!sta) { | ||
582 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
583 | printk(KERN_DEBUG "Could not find the station\n"); | ||
584 | #endif | ||
585 | ret = -ENOENT; | ||
586 | goto exit; | ||
587 | } | ||
588 | |||
589 | spin_lock_bh(&sta->lock); | ||
590 | |||
591 | /* we have tried too many times, receiver does not want A-MPDU */ | ||
592 | if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { | ||
593 | ret = -EBUSY; | ||
594 | goto err_unlock_sta; | ||
595 | } | ||
596 | |||
597 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
598 | /* check if the TID is not in aggregation flow already */ | ||
599 | if (*state != HT_AGG_STATE_IDLE) { | ||
600 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
601 | printk(KERN_DEBUG "BA request denied - session is not " | ||
602 | "idle on tid %u\n", tid); | ||
603 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
604 | ret = -EAGAIN; | ||
605 | goto err_unlock_sta; | ||
606 | } | ||
607 | |||
608 | /* prepare A-MPDU MLME for Tx aggregation */ | ||
609 | sta->ampdu_mlme.tid_tx[tid] = | ||
610 | kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); | ||
611 | if (!sta->ampdu_mlme.tid_tx[tid]) { | ||
612 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
613 | if (net_ratelimit()) | ||
614 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", | ||
615 | tid); | ||
616 | #endif | ||
617 | ret = -ENOMEM; | ||
618 | goto err_unlock_sta; | ||
619 | } | ||
620 | /* Tx timer */ | ||
621 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = | ||
622 | sta_addba_resp_timer_expired; | ||
623 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = | ||
624 | (unsigned long)&sta->timer_to_tid[tid]; | ||
625 | init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
626 | |||
627 | /* create a new queue for this aggregation */ | ||
628 | ret = ieee80211_ht_agg_queue_add(local, sta, tid); | ||
629 | |||
630 | /* case no queue is available to aggregation | ||
631 | * don't switch to aggregation */ | ||
632 | if (ret) { | ||
633 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
634 | printk(KERN_DEBUG "BA request denied - queue unavailable for" | ||
635 | " tid %d\n", tid); | ||
636 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
637 | goto err_unlock_queue; | ||
638 | } | ||
639 | sdata = sta->sdata; | ||
640 | |||
641 | /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the | ||
642 | * call back right away, it must see that the flow has begun */ | ||
643 | *state |= HT_ADDBA_REQUESTED_MSK; | ||
644 | |||
645 | if (local->ops->ampdu_action) | ||
646 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, | ||
647 | ra, tid, &start_seq_num); | ||
648 | |||
649 | if (ret) { | ||
650 | /* No need to requeue the packets in the agg queue, since we | ||
651 | * held the tx lock: no packet could be enqueued to the newly | ||
652 | * allocated queue */ | ||
653 | ieee80211_ht_agg_queue_remove(local, sta, tid, 0); | ||
654 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
655 | printk(KERN_DEBUG "BA request denied - HW unavailable for" | ||
656 | " tid %d\n", tid); | ||
657 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
658 | *state = HT_AGG_STATE_IDLE; | ||
659 | goto err_unlock_queue; | ||
660 | } | ||
661 | |||
662 | /* Will put all the packets in the new SW queue */ | ||
663 | ieee80211_requeue(local, ieee802_1d_to_ac[tid]); | ||
664 | spin_unlock_bh(&sta->lock); | ||
665 | |||
666 | /* send an addBA request */ | ||
667 | sta->ampdu_mlme.dialog_token_allocator++; | ||
668 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = | ||
669 | sta->ampdu_mlme.dialog_token_allocator; | ||
670 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | ||
671 | |||
672 | |||
673 | ieee80211_send_addba_request(sta->sdata->dev, ra, tid, | ||
674 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | ||
675 | sta->ampdu_mlme.tid_tx[tid]->ssn, | ||
676 | 0x40, 5000); | ||
677 | /* activate the timer for the recipient's addBA response */ | ||
678 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = | ||
679 | jiffies + ADDBA_RESP_INTERVAL; | ||
680 | add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
681 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
682 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); | ||
683 | #endif | ||
684 | goto exit; | ||
685 | |||
686 | err_unlock_queue: | ||
687 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
688 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
689 | ret = -EBUSY; | ||
690 | err_unlock_sta: | ||
691 | spin_unlock_bh(&sta->lock); | ||
692 | exit: | ||
693 | rcu_read_unlock(); | ||
694 | return ret; | ||
695 | } | ||
696 | EXPORT_SYMBOL(ieee80211_start_tx_ba_session); | ||
697 | |||
698 | int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, | ||
699 | u8 *ra, u16 tid, | ||
700 | enum ieee80211_back_parties initiator) | ||
701 | { | ||
702 | struct ieee80211_local *local = hw_to_local(hw); | ||
703 | struct sta_info *sta; | ||
704 | u8 *state; | ||
705 | int ret = 0; | ||
706 | DECLARE_MAC_BUF(mac); | ||
707 | |||
708 | if (tid >= STA_TID_NUM) | ||
709 | return -EINVAL; | ||
710 | |||
711 | rcu_read_lock(); | ||
712 | sta = sta_info_get(local, ra); | ||
713 | if (!sta) { | ||
714 | rcu_read_unlock(); | ||
715 | return -ENOENT; | ||
716 | } | ||
717 | |||
718 | /* check if the TID is in aggregation */ | ||
719 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
720 | spin_lock_bh(&sta->lock); | ||
721 | |||
722 | if (*state != HT_AGG_STATE_OPERATIONAL) { | ||
723 | ret = -ENOENT; | ||
724 | goto stop_BA_exit; | ||
725 | } | ||
726 | |||
727 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
728 | printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n", | ||
729 | print_mac(mac, ra), tid); | ||
730 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
731 | |||
732 | ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]); | ||
733 | |||
734 | *state = HT_AGG_STATE_REQ_STOP_BA_MSK | | ||
735 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
736 | |||
737 | if (local->ops->ampdu_action) | ||
738 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP, | ||
739 | ra, tid, NULL); | ||
740 | |||
741 | /* case HW denied going back to legacy */ | ||
742 | if (ret) { | ||
743 | WARN_ON(ret != -EBUSY); | ||
744 | *state = HT_AGG_STATE_OPERATIONAL; | ||
745 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
746 | goto stop_BA_exit; | ||
747 | } | ||
748 | |||
749 | stop_BA_exit: | ||
750 | spin_unlock_bh(&sta->lock); | ||
751 | rcu_read_unlock(); | ||
752 | return ret; | ||
753 | } | ||
754 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); | ||
755 | |||
756 | void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) | ||
757 | { | ||
758 | struct ieee80211_local *local = hw_to_local(hw); | ||
759 | struct sta_info *sta; | ||
760 | u8 *state; | ||
761 | DECLARE_MAC_BUF(mac); | ||
762 | |||
763 | if (tid >= STA_TID_NUM) { | ||
764 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
765 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | ||
766 | tid, STA_TID_NUM); | ||
767 | #endif | ||
768 | return; | ||
769 | } | ||
770 | |||
771 | rcu_read_lock(); | ||
772 | sta = sta_info_get(local, ra); | ||
773 | if (!sta) { | ||
774 | rcu_read_unlock(); | ||
775 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
776 | printk(KERN_DEBUG "Could not find station: %s\n", | ||
777 | print_mac(mac, ra)); | ||
778 | #endif | ||
779 | return; | ||
780 | } | ||
781 | |||
782 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
783 | spin_lock_bh(&sta->lock); | ||
784 | |||
785 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
786 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
787 | printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", | ||
788 | *state); | ||
789 | #endif | ||
790 | spin_unlock_bh(&sta->lock); | ||
791 | rcu_read_unlock(); | ||
792 | return; | ||
793 | } | ||
794 | |||
795 | WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK); | ||
796 | |||
797 | *state |= HT_ADDBA_DRV_READY_MSK; | ||
798 | |||
799 | if (*state == HT_AGG_STATE_OPERATIONAL) { | ||
800 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
801 | printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); | ||
802 | #endif | ||
803 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
804 | } | ||
805 | spin_unlock_bh(&sta->lock); | ||
806 | rcu_read_unlock(); | ||
807 | } | ||
808 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); | ||
809 | |||
810 | void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) | ||
811 | { | ||
812 | struct ieee80211_local *local = hw_to_local(hw); | ||
813 | struct sta_info *sta; | ||
814 | u8 *state; | ||
815 | int agg_queue; | ||
816 | DECLARE_MAC_BUF(mac); | ||
817 | |||
818 | if (tid >= STA_TID_NUM) { | ||
819 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
820 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | ||
821 | tid, STA_TID_NUM); | ||
822 | #endif | ||
823 | return; | ||
824 | } | ||
825 | |||
826 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
827 | printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n", | ||
828 | print_mac(mac, ra), tid); | ||
829 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
830 | |||
831 | rcu_read_lock(); | ||
832 | sta = sta_info_get(local, ra); | ||
833 | if (!sta) { | ||
834 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
835 | printk(KERN_DEBUG "Could not find station: %s\n", | ||
836 | print_mac(mac, ra)); | ||
837 | #endif | ||
838 | rcu_read_unlock(); | ||
839 | return; | ||
840 | } | ||
841 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
842 | |||
843 | /* NOTE: no need to use sta->lock in this state check, as | ||
844 | * ieee80211_stop_tx_ba_session will let only one stop call to | ||
845 | * pass through per sta/tid | ||
846 | */ | ||
847 | if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { | ||
848 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
849 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); | ||
850 | #endif | ||
851 | rcu_read_unlock(); | ||
852 | return; | ||
853 | } | ||
854 | |||
855 | if (*state & HT_AGG_STATE_INITIATOR_MSK) | ||
856 | ieee80211_send_delba(sta->sdata->dev, ra, tid, | ||
857 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); | ||
858 | |||
859 | agg_queue = sta->tid_to_tx_q[tid]; | ||
860 | |||
861 | ieee80211_ht_agg_queue_remove(local, sta, tid, 1); | ||
862 | |||
863 | /* We just requeued the all the frames that were in the | ||
864 | * removed queue, and since we might miss a softirq we do | ||
865 | * netif_schedule_queue. ieee80211_wake_queue is not used | ||
866 | * here as this queue is not necessarily stopped | ||
867 | */ | ||
868 | netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue)); | ||
869 | spin_lock_bh(&sta->lock); | ||
870 | *state = HT_AGG_STATE_IDLE; | ||
871 | sta->ampdu_mlme.addba_req_num[tid] = 0; | ||
872 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
873 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
874 | spin_unlock_bh(&sta->lock); | ||
875 | |||
876 | rcu_read_unlock(); | ||
877 | } | ||
878 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); | ||
879 | |||
880 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | ||
881 | const u8 *ra, u16 tid) | ||
882 | { | ||
883 | struct ieee80211_local *local = hw_to_local(hw); | ||
884 | struct ieee80211_ra_tid *ra_tid; | ||
885 | struct sk_buff *skb = dev_alloc_skb(0); | ||
886 | |||
887 | if (unlikely(!skb)) { | ||
888 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
889 | if (net_ratelimit()) | ||
890 | printk(KERN_WARNING "%s: Not enough memory, " | ||
891 | "dropping start BA session", skb->dev->name); | ||
892 | #endif | ||
893 | return; | ||
894 | } | ||
895 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
896 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | ||
897 | ra_tid->tid = tid; | ||
898 | |||
899 | skb->pkt_type = IEEE80211_ADDBA_MSG; | ||
900 | skb_queue_tail(&local->skb_queue, skb); | ||
901 | tasklet_schedule(&local->tasklet); | ||
902 | } | ||
903 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); | ||
904 | |||
905 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | ||
906 | const u8 *ra, u16 tid) | ||
907 | { | ||
908 | struct ieee80211_local *local = hw_to_local(hw); | ||
909 | struct ieee80211_ra_tid *ra_tid; | ||
910 | struct sk_buff *skb = dev_alloc_skb(0); | ||
911 | |||
912 | if (unlikely(!skb)) { | ||
913 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
914 | if (net_ratelimit()) | ||
915 | printk(KERN_WARNING "%s: Not enough memory, " | ||
916 | "dropping stop BA session", skb->dev->name); | ||
917 | #endif | ||
918 | return; | ||
919 | } | ||
920 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
921 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | ||
922 | ra_tid->tid = tid; | ||
923 | |||
924 | skb->pkt_type = IEEE80211_DELBA_MSG; | ||
925 | skb_queue_tail(&local->skb_queue, skb); | ||
926 | tasklet_schedule(&local->tasklet); | ||
927 | } | ||
928 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); | ||
929 | |||
930 | static void ieee80211_set_multicast_list(struct net_device *dev) | 603 | static void ieee80211_set_multicast_list(struct net_device *dev) |
931 | { | 604 | { |
932 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 605 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
@@ -975,6 +648,8 @@ void ieee80211_if_setup(struct net_device *dev) | |||
975 | dev->open = ieee80211_open; | 648 | dev->open = ieee80211_open; |
976 | dev->stop = ieee80211_stop; | 649 | dev->stop = ieee80211_stop; |
977 | dev->destructor = free_netdev; | 650 | dev->destructor = free_netdev; |
651 | /* we will validate the address ourselves in ->open */ | ||
652 | dev->validate_addr = NULL; | ||
978 | } | 653 | } |
979 | 654 | ||
980 | /* everything else */ | 655 | /* everything else */ |
@@ -1099,8 +774,8 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, | |||
1099 | ht_conf.ht_supported = 1; | 774 | ht_conf.ht_supported = 1; |
1100 | 775 | ||
1101 | ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; | 776 | ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; |
1102 | ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); | 777 | ht_conf.cap &= ~(IEEE80211_HT_CAP_SM_PS); |
1103 | ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS; | 778 | ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_SM_PS; |
1104 | ht_bss_conf.primary_channel = req_bss_cap->primary_channel; | 779 | ht_bss_conf.primary_channel = req_bss_cap->primary_channel; |
1105 | ht_bss_conf.bss_cap = req_bss_cap->bss_cap; | 780 | ht_bss_conf.bss_cap = req_bss_cap->bss_cap; |
1106 | ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; | 781 | ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; |
@@ -1162,10 +837,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
1162 | changed); | 837 | changed); |
1163 | } | 838 | } |
1164 | 839 | ||
1165 | u32 ieee80211_reset_erp_info(struct net_device *dev) | 840 | u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) |
1166 | { | 841 | { |
1167 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1168 | |||
1169 | sdata->bss_conf.use_cts_prot = 0; | 842 | sdata->bss_conf.use_cts_prot = 0; |
1170 | sdata->bss_conf.use_short_preamble = 0; | 843 | sdata->bss_conf.use_short_preamble = 0; |
1171 | return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE; | 844 | return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE; |
@@ -1244,9 +917,10 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local, | |||
1244 | struct ieee80211_key *key, | 917 | struct ieee80211_key *key, |
1245 | struct sk_buff *skb) | 918 | struct sk_buff *skb) |
1246 | { | 919 | { |
1247 | int hdrlen, iv_len, mic_len; | 920 | unsigned int hdrlen, iv_len, mic_len; |
921 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1248 | 922 | ||
1249 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 923 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
1250 | 924 | ||
1251 | if (!key) | 925 | if (!key) |
1252 | goto no_key; | 926 | goto no_key; |
@@ -1268,24 +942,20 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local, | |||
1268 | goto no_key; | 942 | goto no_key; |
1269 | } | 943 | } |
1270 | 944 | ||
1271 | if (skb->len >= mic_len && | 945 | if (skb->len >= hdrlen + mic_len && |
1272 | !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) | 946 | !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) |
1273 | skb_trim(skb, skb->len - mic_len); | 947 | skb_trim(skb, skb->len - mic_len); |
1274 | if (skb->len >= iv_len && skb->len > hdrlen) { | 948 | if (skb->len >= hdrlen + iv_len) { |
1275 | memmove(skb->data + iv_len, skb->data, hdrlen); | 949 | memmove(skb->data + iv_len, skb->data, hdrlen); |
1276 | skb_pull(skb, iv_len); | 950 | hdr = (struct ieee80211_hdr *)skb_pull(skb, iv_len); |
1277 | } | 951 | } |
1278 | 952 | ||
1279 | no_key: | 953 | no_key: |
1280 | { | 954 | if (ieee80211_is_data_qos(hdr->frame_control)) { |
1281 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 955 | hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); |
1282 | u16 fc = le16_to_cpu(hdr->frame_control); | 956 | memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data, |
1283 | if ((fc & 0x8C) == 0x88) /* QoS Control Field */ { | 957 | hdrlen - IEEE80211_QOS_CTL_LEN); |
1284 | fc &= ~IEEE80211_STYPE_QOS_DATA; | 958 | skb_pull(skb, IEEE80211_QOS_CTL_LEN); |
1285 | hdr->frame_control = cpu_to_le16(fc); | ||
1286 | memmove(skb->data + 2, skb->data, hdrlen - 2); | ||
1287 | skb_pull(skb, 2); | ||
1288 | } | ||
1289 | } | 959 | } |
1290 | } | 960 | } |
1291 | 961 | ||
@@ -1403,7 +1073,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
1403 | tid = qc[0] & 0xf; | 1073 | tid = qc[0] & 0xf; |
1404 | ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) | 1074 | ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) |
1405 | & IEEE80211_SCTL_SEQ); | 1075 | & IEEE80211_SCTL_SEQ); |
1406 | ieee80211_send_bar(sta->sdata->dev, hdr->addr1, | 1076 | ieee80211_send_bar(sta->sdata, hdr->addr1, |
1407 | tid, ssn); | 1077 | tid, ssn); |
1408 | } | 1078 | } |
1409 | } | 1079 | } |
@@ -1639,6 +1309,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1639 | } | 1309 | } |
1640 | } | 1310 | } |
1641 | 1311 | ||
1312 | /* if low-level driver supports AP, we also support VLAN */ | ||
1313 | if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) | ||
1314 | local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); | ||
1315 | |||
1316 | /* mac80211 always supports monitor */ | ||
1317 | local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); | ||
1318 | |||
1642 | result = wiphy_register(local->hw.wiphy); | 1319 | result = wiphy_register(local->hw.wiphy); |
1643 | if (result < 0) | 1320 | if (result < 0) |
1644 | return result; | 1321 | return result; |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 35f2f95f2fa7..3ccb3599c04f 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -39,14 +39,13 @@ void ieee80211s_stop(void) | |||
39 | * mesh_matches_local - check if the config of a mesh point matches ours | 39 | * mesh_matches_local - check if the config of a mesh point matches ours |
40 | * | 40 | * |
41 | * @ie: information elements of a management frame from the mesh peer | 41 | * @ie: information elements of a management frame from the mesh peer |
42 | * @dev: local mesh interface | 42 | * @sdata: local mesh subif |
43 | * | 43 | * |
44 | * This function checks if the mesh configuration of a mesh point matches the | 44 | * This function checks if the mesh configuration of a mesh point matches the |
45 | * local mesh configuration, i.e. if both nodes belong to the same mesh network. | 45 | * local mesh configuration, i.e. if both nodes belong to the same mesh network. |
46 | */ | 46 | */ |
47 | bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev) | 47 | bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) |
48 | { | 48 | { |
49 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
50 | struct ieee80211_if_sta *sta = &sdata->u.sta; | 49 | struct ieee80211_if_sta *sta = &sdata->u.sta; |
51 | 50 | ||
52 | /* | 51 | /* |
@@ -73,10 +72,8 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev) | |||
73 | * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links | 72 | * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links |
74 | * | 73 | * |
75 | * @ie: information elements of a management frame from the mesh peer | 74 | * @ie: information elements of a management frame from the mesh peer |
76 | * @dev: local mesh interface | ||
77 | */ | 75 | */ |
78 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, | 76 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) |
79 | struct net_device *dev) | ||
80 | { | 77 | { |
81 | return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0; | 78 | return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0; |
82 | } | 79 | } |
@@ -111,9 +108,8 @@ void mesh_ids_set_default(struct ieee80211_if_sta *sta) | |||
111 | memcpy(sta->mesh_cc_id, def_id, 4); | 108 | memcpy(sta->mesh_cc_id, def_id, 4); |
112 | } | 109 | } |
113 | 110 | ||
114 | int mesh_rmc_init(struct net_device *dev) | 111 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) |
115 | { | 112 | { |
116 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
117 | int i; | 113 | int i; |
118 | 114 | ||
119 | sdata->u.sta.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); | 115 | sdata->u.sta.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); |
@@ -125,9 +121,8 @@ int mesh_rmc_init(struct net_device *dev) | |||
125 | return 0; | 121 | return 0; |
126 | } | 122 | } |
127 | 123 | ||
128 | void mesh_rmc_free(struct net_device *dev) | 124 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata) |
129 | { | 125 | { |
130 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
131 | struct mesh_rmc *rmc = sdata->u.sta.rmc; | 126 | struct mesh_rmc *rmc = sdata->u.sta.rmc; |
132 | struct rmc_entry *p, *n; | 127 | struct rmc_entry *p, *n; |
133 | int i; | 128 | int i; |
@@ -158,9 +153,8 @@ void mesh_rmc_free(struct net_device *dev) | |||
158 | * it. | 153 | * it. |
159 | */ | 154 | */ |
160 | int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, | 155 | int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, |
161 | struct net_device *dev) | 156 | struct ieee80211_sub_if_data *sdata) |
162 | { | 157 | { |
163 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
164 | struct mesh_rmc *rmc = sdata->u.sta.rmc; | 158 | struct mesh_rmc *rmc = sdata->u.sta.rmc; |
165 | u32 seqnum = 0; | 159 | u32 seqnum = 0; |
166 | int entries = 0; | 160 | int entries = 0; |
@@ -194,10 +188,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, | |||
194 | return 0; | 188 | return 0; |
195 | } | 189 | } |
196 | 190 | ||
197 | void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev) | 191 | void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) |
198 | { | 192 | { |
199 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 193 | struct ieee80211_local *local = sdata->local; |
200 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
201 | struct ieee80211_supported_band *sband; | 194 | struct ieee80211_supported_band *sband; |
202 | u8 *pos; | 195 | u8 *pos; |
203 | int len, i, rate; | 196 | int len, i, rate; |
@@ -262,10 +255,10 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev) | |||
262 | return; | 255 | return; |
263 | } | 256 | } |
264 | 257 | ||
265 | u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl) | 258 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) |
266 | { | 259 | { |
267 | /* Use last four bytes of hw addr and interface index as hash index */ | 260 | /* Use last four bytes of hw addr and interface index as hash index */ |
268 | return jhash_2words(*(u32 *)(addr+2), dev->ifindex, tbl->hash_rnd) | 261 | return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) |
269 | & tbl->hash_mask; | 262 | & tbl->hash_mask; |
270 | } | 263 | } |
271 | 264 | ||
@@ -434,7 +427,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | |||
434 | ifsta->preq_id = 0; | 427 | ifsta->preq_id = 0; |
435 | ifsta->dsn = 0; | 428 | ifsta->dsn = 0; |
436 | atomic_set(&ifsta->mpaths, 0); | 429 | atomic_set(&ifsta->mpaths, 0); |
437 | mesh_rmc_init(sdata->dev); | 430 | mesh_rmc_init(sdata); |
438 | ifsta->last_preq = jiffies; | 431 | ifsta->last_preq = jiffies; |
439 | /* Allocate all mesh structures when creating the first mesh interface. */ | 432 | /* Allocate all mesh structures when creating the first mesh interface. */ |
440 | if (!mesh_allocated) | 433 | if (!mesh_allocated) |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 7495fbb0d211..84ff5d828fdb 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -47,7 +47,7 @@ enum mesh_path_flags { | |||
47 | * struct mesh_path - mac80211 mesh path structure | 47 | * struct mesh_path - mac80211 mesh path structure |
48 | * | 48 | * |
49 | * @dst: mesh path destination mac address | 49 | * @dst: mesh path destination mac address |
50 | * @dev: mesh path device | 50 | * @sdata: mesh subif |
51 | * @next_hop: mesh neighbor to which frames for this destination will be | 51 | * @next_hop: mesh neighbor to which frames for this destination will be |
52 | * forwarded | 52 | * forwarded |
53 | * @timer: mesh path discovery timer | 53 | * @timer: mesh path discovery timer |
@@ -64,14 +64,14 @@ enum mesh_path_flags { | |||
64 | * @state_lock: mesh pat state lock | 64 | * @state_lock: mesh pat state lock |
65 | * | 65 | * |
66 | * | 66 | * |
67 | * The combination of dst and dev is unique in the mesh path table. Since the | 67 | * The combination of dst and sdata is unique in the mesh path table. Since the |
68 | * next_hop STA is only protected by RCU as well, deleting the STA must also | 68 | * next_hop STA is only protected by RCU as well, deleting the STA must also |
69 | * remove/substitute the mesh_path structure and wait until that is no longer | 69 | * remove/substitute the mesh_path structure and wait until that is no longer |
70 | * reachable before destroying the STA completely. | 70 | * reachable before destroying the STA completely. |
71 | */ | 71 | */ |
72 | struct mesh_path { | 72 | struct mesh_path { |
73 | u8 dst[ETH_ALEN]; | 73 | u8 dst[ETH_ALEN]; |
74 | struct net_device *dev; | 74 | struct ieee80211_sub_if_data *sdata; |
75 | struct sta_info *next_hop; | 75 | struct sta_info *next_hop; |
76 | struct timer_list timer; | 76 | struct timer_list timer; |
77 | struct sk_buff_head frame_queue; | 77 | struct sk_buff_head frame_queue; |
@@ -203,59 +203,66 @@ int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); | |||
203 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | 203 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, |
204 | struct ieee80211_sub_if_data *sdata); | 204 | struct ieee80211_sub_if_data *sdata); |
205 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, | 205 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, |
206 | struct net_device *dev); | 206 | struct ieee80211_sub_if_data *sdata); |
207 | bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev); | 207 | bool mesh_matches_local(struct ieee802_11_elems *ie, |
208 | struct ieee80211_sub_if_data *sdata); | ||
208 | void mesh_ids_set_default(struct ieee80211_if_sta *sta); | 209 | void mesh_ids_set_default(struct ieee80211_if_sta *sta); |
209 | void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev); | 210 | void mesh_mgmt_ies_add(struct sk_buff *skb, |
210 | void mesh_rmc_free(struct net_device *dev); | 211 | struct ieee80211_sub_if_data *sdata); |
211 | int mesh_rmc_init(struct net_device *dev); | 212 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); |
213 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); | ||
212 | void ieee80211s_init(void); | 214 | void ieee80211s_init(void); |
213 | void ieee80211s_stop(void); | 215 | void ieee80211s_stop(void); |
214 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); | 216 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); |
215 | 217 | ||
216 | /* Mesh paths */ | 218 | /* Mesh paths */ |
217 | int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev); | 219 | int mesh_nexthop_lookup(struct sk_buff *skb, |
218 | void mesh_path_start_discovery(struct net_device *dev); | 220 | struct ieee80211_sub_if_data *sdata); |
219 | struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev); | 221 | void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); |
220 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev); | 222 | struct mesh_path *mesh_path_lookup(u8 *dst, |
223 | struct ieee80211_sub_if_data *sdata); | ||
224 | struct mesh_path *mesh_path_lookup_by_idx(int idx, | ||
225 | struct ieee80211_sub_if_data *sdata); | ||
221 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); | 226 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); |
222 | void mesh_path_expire(struct net_device *dev); | 227 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata); |
223 | void mesh_path_flush(struct net_device *dev); | 228 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata); |
224 | void mesh_rx_path_sel_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | 229 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, |
225 | size_t len); | 230 | struct ieee80211_mgmt *mgmt, size_t len); |
226 | int mesh_path_add(u8 *dst, struct net_device *dev); | 231 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); |
227 | /* Mesh plinks */ | 232 | /* Mesh plinks */ |
228 | void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, | 233 | void mesh_neighbour_update(u8 *hw_addr, u64 rates, |
229 | bool add); | 234 | struct ieee80211_sub_if_data *sdata, bool add); |
230 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, | 235 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); |
231 | struct net_device *dev); | ||
232 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); | 236 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); |
233 | void mesh_plink_broken(struct sta_info *sta); | 237 | void mesh_plink_broken(struct sta_info *sta); |
234 | void mesh_plink_deactivate(struct sta_info *sta); | 238 | void mesh_plink_deactivate(struct sta_info *sta); |
235 | int mesh_plink_open(struct sta_info *sta); | 239 | int mesh_plink_open(struct sta_info *sta); |
236 | int mesh_plink_close(struct sta_info *sta); | 240 | int mesh_plink_close(struct sta_info *sta); |
237 | void mesh_plink_block(struct sta_info *sta); | 241 | void mesh_plink_block(struct sta_info *sta); |
238 | void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | 242 | void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, |
239 | size_t len, struct ieee80211_rx_status *rx_status); | 243 | struct ieee80211_mgmt *mgmt, size_t len, |
244 | struct ieee80211_rx_status *rx_status); | ||
240 | 245 | ||
241 | /* Private interfaces */ | 246 | /* Private interfaces */ |
242 | /* Mesh tables */ | 247 | /* Mesh tables */ |
243 | struct mesh_table *mesh_table_alloc(int size_order); | 248 | struct mesh_table *mesh_table_alloc(int size_order); |
244 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs); | 249 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs); |
245 | struct mesh_table *mesh_table_grow(struct mesh_table *tbl); | 250 | struct mesh_table *mesh_table_grow(struct mesh_table *tbl); |
246 | u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl); | 251 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, |
252 | struct mesh_table *tbl); | ||
247 | /* Mesh paths */ | 253 | /* Mesh paths */ |
248 | int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra, | 254 | int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra, |
249 | struct net_device *dev); | 255 | struct ieee80211_sub_if_data *sdata); |
250 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); | 256 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); |
251 | void mesh_path_flush_pending(struct mesh_path *mpath); | 257 | void mesh_path_flush_pending(struct mesh_path *mpath); |
252 | void mesh_path_tx_pending(struct mesh_path *mpath); | 258 | void mesh_path_tx_pending(struct mesh_path *mpath); |
253 | int mesh_pathtbl_init(void); | 259 | int mesh_pathtbl_init(void); |
254 | void mesh_pathtbl_unregister(void); | 260 | void mesh_pathtbl_unregister(void); |
255 | int mesh_path_del(u8 *addr, struct net_device *dev); | 261 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); |
256 | void mesh_path_timer(unsigned long data); | 262 | void mesh_path_timer(unsigned long data); |
257 | void mesh_path_flush_by_nexthop(struct sta_info *sta); | 263 | void mesh_path_flush_by_nexthop(struct sta_info *sta); |
258 | void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev); | 264 | void mesh_path_discard_frame(struct sk_buff *skb, |
265 | struct ieee80211_sub_if_data *sdata); | ||
259 | 266 | ||
260 | #ifdef CONFIG_MAC80211_MESH | 267 | #ifdef CONFIG_MAC80211_MESH |
261 | extern int mesh_allocated; | 268 | extern int mesh_allocated; |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 08aca446ca01..210d6b852406 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -82,9 +82,9 @@ enum mpath_frame_type { | |||
82 | static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | 82 | static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, |
83 | u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, | 83 | u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, |
84 | __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, | 84 | __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, |
85 | __le32 metric, __le32 preq_id, struct net_device *dev) | 85 | __le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata) |
86 | { | 86 | { |
87 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 87 | struct ieee80211_local *local = sdata->local; |
88 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | 88 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); |
89 | struct ieee80211_mgmt *mgmt; | 89 | struct ieee80211_mgmt *mgmt; |
90 | u8 *pos; | 90 | u8 *pos; |
@@ -99,11 +99,11 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
99 | mgmt = (struct ieee80211_mgmt *) | 99 | mgmt = (struct ieee80211_mgmt *) |
100 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); | 100 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); |
101 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); | 101 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); |
102 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 102 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
103 | IEEE80211_STYPE_ACTION); | 103 | IEEE80211_STYPE_ACTION); |
104 | 104 | ||
105 | memcpy(mgmt->da, da, ETH_ALEN); | 105 | memcpy(mgmt->da, da, ETH_ALEN); |
106 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 106 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
107 | /* BSSID is left zeroed, wildcard value */ | 107 | /* BSSID is left zeroed, wildcard value */ |
108 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; | 108 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; |
109 | mgmt->u.action.u.mesh_action.action_code = action; | 109 | mgmt->u.action.u.mesh_action.action_code = action; |
@@ -149,7 +149,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
149 | pos += ETH_ALEN; | 149 | pos += ETH_ALEN; |
150 | memcpy(pos, &dst_dsn, 4); | 150 | memcpy(pos, &dst_dsn, 4); |
151 | 151 | ||
152 | ieee80211_sta_tx(dev, skb, 0); | 152 | ieee80211_tx_skb(sdata, skb, 0); |
153 | return 0; | 153 | return 0; |
154 | } | 154 | } |
155 | 155 | ||
@@ -161,9 +161,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
161 | * @ra: node this frame is addressed to | 161 | * @ra: node this frame is addressed to |
162 | */ | 162 | */ |
163 | int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, | 163 | int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, |
164 | struct net_device *dev) | 164 | struct ieee80211_sub_if_data *sdata) |
165 | { | 165 | { |
166 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 166 | struct ieee80211_local *local = sdata->local; |
167 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | 167 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); |
168 | struct ieee80211_mgmt *mgmt; | 168 | struct ieee80211_mgmt *mgmt; |
169 | u8 *pos; | 169 | u8 *pos; |
@@ -178,11 +178,11 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, | |||
178 | mgmt = (struct ieee80211_mgmt *) | 178 | mgmt = (struct ieee80211_mgmt *) |
179 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); | 179 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); |
180 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); | 180 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); |
181 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 181 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
182 | IEEE80211_STYPE_ACTION); | 182 | IEEE80211_STYPE_ACTION); |
183 | 183 | ||
184 | memcpy(mgmt->da, ra, ETH_ALEN); | 184 | memcpy(mgmt->da, ra, ETH_ALEN); |
185 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 185 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
186 | /* BSSID is left zeroed, wildcard value */ | 186 | /* BSSID is left zeroed, wildcard value */ |
187 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; | 187 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; |
188 | mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; | 188 | mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; |
@@ -198,7 +198,7 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, | |||
198 | pos += ETH_ALEN; | 198 | pos += ETH_ALEN; |
199 | memcpy(pos, &dst_dsn, 4); | 199 | memcpy(pos, &dst_dsn, 4); |
200 | 200 | ||
201 | ieee80211_sta_tx(dev, skb, 0); | 201 | ieee80211_tx_skb(sdata, skb, 0); |
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
@@ -233,7 +233,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, | |||
233 | /** | 233 | /** |
234 | * hwmp_route_info_get - Update routing info to originator and transmitter | 234 | * hwmp_route_info_get - Update routing info to originator and transmitter |
235 | * | 235 | * |
236 | * @dev: local mesh interface | 236 | * @sdata: local mesh subif |
237 | * @mgmt: mesh management frame | 237 | * @mgmt: mesh management frame |
238 | * @hwmp_ie: hwmp information element (PREP or PREQ) | 238 | * @hwmp_ie: hwmp information element (PREP or PREQ) |
239 | * | 239 | * |
@@ -246,11 +246,11 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, | |||
246 | * Notes: this function is the only place (besides user-provided info) where | 246 | * Notes: this function is the only place (besides user-provided info) where |
247 | * path routing information is updated. | 247 | * path routing information is updated. |
248 | */ | 248 | */ |
249 | static u32 hwmp_route_info_get(struct net_device *dev, | 249 | static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, |
250 | struct ieee80211_mgmt *mgmt, | 250 | struct ieee80211_mgmt *mgmt, |
251 | u8 *hwmp_ie) | 251 | u8 *hwmp_ie) |
252 | { | 252 | { |
253 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 253 | struct ieee80211_local *local = sdata->local; |
254 | struct mesh_path *mpath; | 254 | struct mesh_path *mpath; |
255 | struct sta_info *sta; | 255 | struct sta_info *sta; |
256 | bool fresh_info; | 256 | bool fresh_info; |
@@ -301,14 +301,14 @@ static u32 hwmp_route_info_get(struct net_device *dev, | |||
301 | new_metric = MAX_METRIC; | 301 | new_metric = MAX_METRIC; |
302 | exp_time = TU_TO_EXP_TIME(orig_lifetime); | 302 | exp_time = TU_TO_EXP_TIME(orig_lifetime); |
303 | 303 | ||
304 | if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) { | 304 | if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { |
305 | /* This MP is the originator, we are not interested in this | 305 | /* This MP is the originator, we are not interested in this |
306 | * frame, except for updating transmitter's path info. | 306 | * frame, except for updating transmitter's path info. |
307 | */ | 307 | */ |
308 | process = false; | 308 | process = false; |
309 | fresh_info = false; | 309 | fresh_info = false; |
310 | } else { | 310 | } else { |
311 | mpath = mesh_path_lookup(orig_addr, dev); | 311 | mpath = mesh_path_lookup(orig_addr, sdata); |
312 | if (mpath) { | 312 | if (mpath) { |
313 | spin_lock_bh(&mpath->state_lock); | 313 | spin_lock_bh(&mpath->state_lock); |
314 | if (mpath->flags & MESH_PATH_FIXED) | 314 | if (mpath->flags & MESH_PATH_FIXED) |
@@ -324,8 +324,8 @@ static u32 hwmp_route_info_get(struct net_device *dev, | |||
324 | } | 324 | } |
325 | } | 325 | } |
326 | } else { | 326 | } else { |
327 | mesh_path_add(orig_addr, dev); | 327 | mesh_path_add(orig_addr, sdata); |
328 | mpath = mesh_path_lookup(orig_addr, dev); | 328 | mpath = mesh_path_lookup(orig_addr, sdata); |
329 | if (!mpath) { | 329 | if (!mpath) { |
330 | rcu_read_unlock(); | 330 | rcu_read_unlock(); |
331 | return 0; | 331 | return 0; |
@@ -357,7 +357,7 @@ static u32 hwmp_route_info_get(struct net_device *dev, | |||
357 | else { | 357 | else { |
358 | fresh_info = true; | 358 | fresh_info = true; |
359 | 359 | ||
360 | mpath = mesh_path_lookup(ta, dev); | 360 | mpath = mesh_path_lookup(ta, sdata); |
361 | if (mpath) { | 361 | if (mpath) { |
362 | spin_lock_bh(&mpath->state_lock); | 362 | spin_lock_bh(&mpath->state_lock); |
363 | if ((mpath->flags & MESH_PATH_FIXED) || | 363 | if ((mpath->flags & MESH_PATH_FIXED) || |
@@ -365,8 +365,8 @@ static u32 hwmp_route_info_get(struct net_device *dev, | |||
365 | (last_hop_metric > mpath->metric))) | 365 | (last_hop_metric > mpath->metric))) |
366 | fresh_info = false; | 366 | fresh_info = false; |
367 | } else { | 367 | } else { |
368 | mesh_path_add(ta, dev); | 368 | mesh_path_add(ta, sdata); |
369 | mpath = mesh_path_lookup(ta, dev); | 369 | mpath = mesh_path_lookup(ta, sdata); |
370 | if (!mpath) { | 370 | if (!mpath) { |
371 | rcu_read_unlock(); | 371 | rcu_read_unlock(); |
372 | return 0; | 372 | return 0; |
@@ -392,10 +392,9 @@ static u32 hwmp_route_info_get(struct net_device *dev, | |||
392 | return process ? new_metric : 0; | 392 | return process ? new_metric : 0; |
393 | } | 393 | } |
394 | 394 | ||
395 | static void hwmp_preq_frame_process(struct net_device *dev, | 395 | static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, |
396 | struct ieee80211_mgmt *mgmt, | 396 | struct ieee80211_mgmt *mgmt, |
397 | u8 *preq_elem, u32 metric) { | 397 | u8 *preq_elem, u32 metric) { |
398 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
399 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 398 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
400 | struct mesh_path *mpath; | 399 | struct mesh_path *mpath; |
401 | u8 *dst_addr, *orig_addr; | 400 | u8 *dst_addr, *orig_addr; |
@@ -411,7 +410,7 @@ static void hwmp_preq_frame_process(struct net_device *dev, | |||
411 | orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); | 410 | orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); |
412 | dst_flags = PREQ_IE_DST_F(preq_elem); | 411 | dst_flags = PREQ_IE_DST_F(preq_elem); |
413 | 412 | ||
414 | if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) { | 413 | if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { |
415 | forward = false; | 414 | forward = false; |
416 | reply = true; | 415 | reply = true; |
417 | metric = 0; | 416 | metric = 0; |
@@ -423,7 +422,7 @@ static void hwmp_preq_frame_process(struct net_device *dev, | |||
423 | } | 422 | } |
424 | } else { | 423 | } else { |
425 | rcu_read_lock(); | 424 | rcu_read_lock(); |
426 | mpath = mesh_path_lookup(dst_addr, dev); | 425 | mpath = mesh_path_lookup(dst_addr, sdata); |
427 | if (mpath) { | 426 | if (mpath) { |
428 | if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || | 427 | if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || |
429 | DSN_LT(mpath->dsn, dst_dsn)) { | 428 | DSN_LT(mpath->dsn, dst_dsn)) { |
@@ -451,7 +450,7 @@ static void hwmp_preq_frame_process(struct net_device *dev, | |||
451 | cpu_to_le32(dst_dsn), 0, orig_addr, | 450 | cpu_to_le32(dst_dsn), 0, orig_addr, |
452 | cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, | 451 | cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, |
453 | cpu_to_le32(lifetime), cpu_to_le32(metric), | 452 | cpu_to_le32(lifetime), cpu_to_le32(metric), |
454 | 0, dev); | 453 | 0, sdata); |
455 | else | 454 | else |
456 | ifsta->mshstats.dropped_frames_ttl++; | 455 | ifsta->mshstats.dropped_frames_ttl++; |
457 | } | 456 | } |
@@ -472,20 +471,19 @@ static void hwmp_preq_frame_process(struct net_device *dev, | |||
472 | hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; | 471 | hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; |
473 | mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, | 472 | mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, |
474 | cpu_to_le32(orig_dsn), dst_flags, dst_addr, | 473 | cpu_to_le32(orig_dsn), dst_flags, dst_addr, |
475 | cpu_to_le32(dst_dsn), dev->broadcast, | 474 | cpu_to_le32(dst_dsn), sdata->dev->broadcast, |
476 | hopcount, ttl, cpu_to_le32(lifetime), | 475 | hopcount, ttl, cpu_to_le32(lifetime), |
477 | cpu_to_le32(metric), cpu_to_le32(preq_id), | 476 | cpu_to_le32(metric), cpu_to_le32(preq_id), |
478 | dev); | 477 | sdata); |
479 | ifsta->mshstats.fwded_frames++; | 478 | ifsta->mshstats.fwded_frames++; |
480 | } | 479 | } |
481 | } | 480 | } |
482 | 481 | ||
483 | 482 | ||
484 | static void hwmp_prep_frame_process(struct net_device *dev, | 483 | static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, |
485 | struct ieee80211_mgmt *mgmt, | 484 | struct ieee80211_mgmt *mgmt, |
486 | u8 *prep_elem, u32 metric) | 485 | u8 *prep_elem, u32 metric) |
487 | { | 486 | { |
488 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
489 | struct mesh_path *mpath; | 487 | struct mesh_path *mpath; |
490 | u8 *dst_addr, *orig_addr; | 488 | u8 *dst_addr, *orig_addr; |
491 | u8 ttl, hopcount, flags; | 489 | u8 ttl, hopcount, flags; |
@@ -499,7 +497,7 @@ static void hwmp_prep_frame_process(struct net_device *dev, | |||
499 | * replies | 497 | * replies |
500 | */ | 498 | */ |
501 | dst_addr = PREP_IE_DST_ADDR(prep_elem); | 499 | dst_addr = PREP_IE_DST_ADDR(prep_elem); |
502 | if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) | 500 | if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) |
503 | /* destination, no forwarding required */ | 501 | /* destination, no forwarding required */ |
504 | return; | 502 | return; |
505 | 503 | ||
@@ -510,7 +508,7 @@ static void hwmp_prep_frame_process(struct net_device *dev, | |||
510 | } | 508 | } |
511 | 509 | ||
512 | rcu_read_lock(); | 510 | rcu_read_lock(); |
513 | mpath = mesh_path_lookup(dst_addr, dev); | 511 | mpath = mesh_path_lookup(dst_addr, sdata); |
514 | if (mpath) | 512 | if (mpath) |
515 | spin_lock_bh(&mpath->state_lock); | 513 | spin_lock_bh(&mpath->state_lock); |
516 | else | 514 | else |
@@ -533,7 +531,7 @@ static void hwmp_prep_frame_process(struct net_device *dev, | |||
533 | cpu_to_le32(orig_dsn), 0, dst_addr, | 531 | cpu_to_le32(orig_dsn), 0, dst_addr, |
534 | cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl, | 532 | cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl, |
535 | cpu_to_le32(lifetime), cpu_to_le32(metric), | 533 | cpu_to_le32(lifetime), cpu_to_le32(metric), |
536 | 0, dev); | 534 | 0, sdata); |
537 | rcu_read_unlock(); | 535 | rcu_read_unlock(); |
538 | sdata->u.sta.mshstats.fwded_frames++; | 536 | sdata->u.sta.mshstats.fwded_frames++; |
539 | return; | 537 | return; |
@@ -544,7 +542,7 @@ fail: | |||
544 | return; | 542 | return; |
545 | } | 543 | } |
546 | 544 | ||
547 | static void hwmp_perr_frame_process(struct net_device *dev, | 545 | static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, |
548 | struct ieee80211_mgmt *mgmt, u8 *perr_elem) | 546 | struct ieee80211_mgmt *mgmt, u8 *perr_elem) |
549 | { | 547 | { |
550 | struct mesh_path *mpath; | 548 | struct mesh_path *mpath; |
@@ -555,7 +553,7 @@ static void hwmp_perr_frame_process(struct net_device *dev, | |||
555 | dst_addr = PERR_IE_DST_ADDR(perr_elem); | 553 | dst_addr = PERR_IE_DST_ADDR(perr_elem); |
556 | dst_dsn = PERR_IE_DST_DSN(perr_elem); | 554 | dst_dsn = PERR_IE_DST_DSN(perr_elem); |
557 | rcu_read_lock(); | 555 | rcu_read_lock(); |
558 | mpath = mesh_path_lookup(dst_addr, dev); | 556 | mpath = mesh_path_lookup(dst_addr, sdata); |
559 | if (mpath) { | 557 | if (mpath) { |
560 | spin_lock_bh(&mpath->state_lock); | 558 | spin_lock_bh(&mpath->state_lock); |
561 | if (mpath->flags & MESH_PATH_ACTIVE && | 559 | if (mpath->flags & MESH_PATH_ACTIVE && |
@@ -566,7 +564,7 @@ static void hwmp_perr_frame_process(struct net_device *dev, | |||
566 | mpath->dsn = dst_dsn; | 564 | mpath->dsn = dst_dsn; |
567 | spin_unlock_bh(&mpath->state_lock); | 565 | spin_unlock_bh(&mpath->state_lock); |
568 | mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), | 566 | mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), |
569 | dev->broadcast, dev); | 567 | sdata->dev->broadcast, sdata); |
570 | } else | 568 | } else |
571 | spin_unlock_bh(&mpath->state_lock); | 569 | spin_unlock_bh(&mpath->state_lock); |
572 | } | 570 | } |
@@ -575,7 +573,7 @@ static void hwmp_perr_frame_process(struct net_device *dev, | |||
575 | 573 | ||
576 | 574 | ||
577 | 575 | ||
578 | void mesh_rx_path_sel_frame(struct net_device *dev, | 576 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, |
579 | struct ieee80211_mgmt *mgmt, | 577 | struct ieee80211_mgmt *mgmt, |
580 | size_t len) | 578 | size_t len) |
581 | { | 579 | { |
@@ -583,6 +581,10 @@ void mesh_rx_path_sel_frame(struct net_device *dev, | |||
583 | size_t baselen; | 581 | size_t baselen; |
584 | u32 last_hop_metric; | 582 | u32 last_hop_metric; |
585 | 583 | ||
584 | /* need action_code */ | ||
585 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | ||
586 | return; | ||
587 | |||
586 | baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; | 588 | baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; |
587 | ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, | 589 | ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, |
588 | len - baselen, &elems); | 590 | len - baselen, &elems); |
@@ -592,25 +594,25 @@ void mesh_rx_path_sel_frame(struct net_device *dev, | |||
592 | if (!elems.preq || elems.preq_len != 37) | 594 | if (!elems.preq || elems.preq_len != 37) |
593 | /* Right now we support just 1 destination and no AE */ | 595 | /* Right now we support just 1 destination and no AE */ |
594 | return; | 596 | return; |
595 | last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.preq); | 597 | last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq); |
596 | if (!last_hop_metric) | 598 | if (!last_hop_metric) |
597 | return; | 599 | return; |
598 | hwmp_preq_frame_process(dev, mgmt, elems.preq, last_hop_metric); | 600 | hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric); |
599 | break; | 601 | break; |
600 | case MPATH_PREP: | 602 | case MPATH_PREP: |
601 | if (!elems.prep || elems.prep_len != 31) | 603 | if (!elems.prep || elems.prep_len != 31) |
602 | /* Right now we support no AE */ | 604 | /* Right now we support no AE */ |
603 | return; | 605 | return; |
604 | last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.prep); | 606 | last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep); |
605 | if (!last_hop_metric) | 607 | if (!last_hop_metric) |
606 | return; | 608 | return; |
607 | hwmp_prep_frame_process(dev, mgmt, elems.prep, last_hop_metric); | 609 | hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric); |
608 | break; | 610 | break; |
609 | case MPATH_PERR: | 611 | case MPATH_PERR: |
610 | if (!elems.perr || elems.perr_len != 12) | 612 | if (!elems.perr || elems.perr_len != 12) |
611 | /* Right now we support only one destination per PERR */ | 613 | /* Right now we support only one destination per PERR */ |
612 | return; | 614 | return; |
613 | hwmp_perr_frame_process(dev, mgmt, elems.perr); | 615 | hwmp_perr_frame_process(sdata, mgmt, elems.perr); |
614 | default: | 616 | default: |
615 | return; | 617 | return; |
616 | } | 618 | } |
@@ -628,8 +630,7 @@ void mesh_rx_path_sel_frame(struct net_device *dev, | |||
628 | */ | 630 | */ |
629 | static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | 631 | static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) |
630 | { | 632 | { |
631 | struct ieee80211_sub_if_data *sdata = | 633 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
632 | IEEE80211_DEV_TO_SUB_IF(mpath->dev); | ||
633 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 634 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
634 | struct mesh_preq_queue *preq_node; | 635 | struct mesh_preq_queue *preq_node; |
635 | 636 | ||
@@ -672,12 +673,10 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | |||
672 | /** | 673 | /** |
673 | * mesh_path_start_discovery - launch a path discovery from the PREQ queue | 674 | * mesh_path_start_discovery - launch a path discovery from the PREQ queue |
674 | * | 675 | * |
675 | * @dev: local mesh interface | 676 | * @sdata: local mesh subif |
676 | */ | 677 | */ |
677 | void mesh_path_start_discovery(struct net_device *dev) | 678 | void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) |
678 | { | 679 | { |
679 | struct ieee80211_sub_if_data *sdata = | ||
680 | IEEE80211_DEV_TO_SUB_IF(dev); | ||
681 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 680 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
682 | struct mesh_preq_queue *preq_node; | 681 | struct mesh_preq_queue *preq_node; |
683 | struct mesh_path *mpath; | 682 | struct mesh_path *mpath; |
@@ -699,7 +698,7 @@ void mesh_path_start_discovery(struct net_device *dev) | |||
699 | spin_unlock(&ifsta->mesh_preq_queue_lock); | 698 | spin_unlock(&ifsta->mesh_preq_queue_lock); |
700 | 699 | ||
701 | rcu_read_lock(); | 700 | rcu_read_lock(); |
702 | mpath = mesh_path_lookup(preq_node->dst, dev); | 701 | mpath = mesh_path_lookup(preq_node->dst, sdata); |
703 | if (!mpath) | 702 | if (!mpath) |
704 | goto enddiscovery; | 703 | goto enddiscovery; |
705 | 704 | ||
@@ -743,11 +742,11 @@ void mesh_path_start_discovery(struct net_device *dev) | |||
743 | dst_flags = MP_F_RF; | 742 | dst_flags = MP_F_RF; |
744 | 743 | ||
745 | spin_unlock_bh(&mpath->state_lock); | 744 | spin_unlock_bh(&mpath->state_lock); |
746 | mesh_path_sel_frame_tx(MPATH_PREQ, 0, dev->dev_addr, | 745 | mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr, |
747 | cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst, | 746 | cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst, |
748 | cpu_to_le32(mpath->dsn), dev->broadcast, 0, | 747 | cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0, |
749 | ttl, cpu_to_le32(lifetime), 0, | 748 | ttl, cpu_to_le32(lifetime), 0, |
750 | cpu_to_le32(ifsta->preq_id++), dev); | 749 | cpu_to_le32(ifsta->preq_id++), sdata); |
751 | mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); | 750 | mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); |
752 | 751 | ||
753 | enddiscovery: | 752 | enddiscovery: |
@@ -759,7 +758,7 @@ enddiscovery: | |||
759 | * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame | 758 | * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame |
760 | * | 759 | * |
761 | * @skb: 802.11 frame to be sent | 760 | * @skb: 802.11 frame to be sent |
762 | * @dev: network device the frame will be sent through | 761 | * @sdata: network subif the frame will be sent through |
763 | * @fwd_frame: true if this frame was originally from a different host | 762 | * @fwd_frame: true if this frame was originally from a different host |
764 | * | 763 | * |
765 | * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is | 764 | * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is |
@@ -767,9 +766,9 @@ enddiscovery: | |||
767 | * sent when the path is resolved. This means the caller must not free the skb | 766 | * sent when the path is resolved. This means the caller must not free the skb |
768 | * in this case. | 767 | * in this case. |
769 | */ | 768 | */ |
770 | int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) | 769 | int mesh_nexthop_lookup(struct sk_buff *skb, |
770 | struct ieee80211_sub_if_data *sdata) | ||
771 | { | 771 | { |
772 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
773 | struct sk_buff *skb_to_free = NULL; | 772 | struct sk_buff *skb_to_free = NULL; |
774 | struct mesh_path *mpath; | 773 | struct mesh_path *mpath; |
775 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 774 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
@@ -777,11 +776,11 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) | |||
777 | int err = 0; | 776 | int err = 0; |
778 | 777 | ||
779 | rcu_read_lock(); | 778 | rcu_read_lock(); |
780 | mpath = mesh_path_lookup(dst_addr, dev); | 779 | mpath = mesh_path_lookup(dst_addr, sdata); |
781 | 780 | ||
782 | if (!mpath) { | 781 | if (!mpath) { |
783 | mesh_path_add(dst_addr, dev); | 782 | mesh_path_add(dst_addr, sdata); |
784 | mpath = mesh_path_lookup(dst_addr, dev); | 783 | mpath = mesh_path_lookup(dst_addr, sdata); |
785 | if (!mpath) { | 784 | if (!mpath) { |
786 | dev_kfree_skb(skb); | 785 | dev_kfree_skb(skb); |
787 | sdata->u.sta.mshstats.dropped_frames_no_route++; | 786 | sdata->u.sta.mshstats.dropped_frames_no_route++; |
@@ -793,7 +792,8 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) | |||
793 | if (mpath->flags & MESH_PATH_ACTIVE) { | 792 | if (mpath->flags & MESH_PATH_ACTIVE) { |
794 | if (time_after(jiffies, mpath->exp_time - | 793 | if (time_after(jiffies, mpath->exp_time - |
795 | msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time)) | 794 | msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time)) |
796 | && !memcmp(dev->dev_addr, hdr->addr4, ETH_ALEN) | 795 | && !memcmp(sdata->dev->dev_addr, hdr->addr4, |
796 | ETH_ALEN) | ||
797 | && !(mpath->flags & MESH_PATH_RESOLVING) | 797 | && !(mpath->flags & MESH_PATH_RESOLVING) |
798 | && !(mpath->flags & MESH_PATH_FIXED)) { | 798 | && !(mpath->flags & MESH_PATH_FIXED)) { |
799 | mesh_queue_preq(mpath, | 799 | mesh_queue_preq(mpath, |
@@ -815,7 +815,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) | |||
815 | 815 | ||
816 | skb_queue_tail(&mpath->frame_queue, skb); | 816 | skb_queue_tail(&mpath->frame_queue, skb); |
817 | if (skb_to_free) | 817 | if (skb_to_free) |
818 | mesh_path_discard_frame(skb_to_free, dev); | 818 | mesh_path_discard_frame(skb_to_free, sdata); |
819 | err = -ENOENT; | 819 | err = -ENOENT; |
820 | } | 820 | } |
821 | 821 | ||
@@ -835,7 +835,7 @@ void mesh_path_timer(unsigned long data) | |||
835 | if (!mpath) | 835 | if (!mpath) |
836 | goto endmpathtimer; | 836 | goto endmpathtimer; |
837 | spin_lock_bh(&mpath->state_lock); | 837 | spin_lock_bh(&mpath->state_lock); |
838 | sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); | 838 | sdata = mpath->sdata; |
839 | if (mpath->flags & MESH_PATH_RESOLVED || | 839 | if (mpath->flags & MESH_PATH_RESOLVED || |
840 | (!(mpath->flags & MESH_PATH_RESOLVING))) | 840 | (!(mpath->flags & MESH_PATH_RESOLVING))) |
841 | mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); | 841 | mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 838ee60492ad..0a60f55f32ab 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/etherdevice.h> | 10 | #include <linux/etherdevice.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/random.h> | 12 | #include <linux/random.h> |
14 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
15 | #include <linux/string.h> | 14 | #include <linux/string.h> |
@@ -62,13 +61,13 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | |||
62 | /** | 61 | /** |
63 | * mesh_path_lookup - look up a path in the mesh path table | 62 | * mesh_path_lookup - look up a path in the mesh path table |
64 | * @dst: hardware address (ETH_ALEN length) of destination | 63 | * @dst: hardware address (ETH_ALEN length) of destination |
65 | * @dev: local interface | 64 | * @sdata: local subif |
66 | * | 65 | * |
67 | * Returns: pointer to the mesh path structure, or NULL if not found | 66 | * Returns: pointer to the mesh path structure, or NULL if not found |
68 | * | 67 | * |
69 | * Locking: must be called within a read rcu section. | 68 | * Locking: must be called within a read rcu section. |
70 | */ | 69 | */ |
71 | struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | 70 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
72 | { | 71 | { |
73 | struct mesh_path *mpath; | 72 | struct mesh_path *mpath; |
74 | struct hlist_node *n; | 73 | struct hlist_node *n; |
@@ -78,10 +77,10 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | |||
78 | 77 | ||
79 | tbl = rcu_dereference(mesh_paths); | 78 | tbl = rcu_dereference(mesh_paths); |
80 | 79 | ||
81 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)]; | 80 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
82 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 81 | hlist_for_each_entry_rcu(node, n, bucket, list) { |
83 | mpath = node->mpath; | 82 | mpath = node->mpath; |
84 | if (mpath->dev == dev && | 83 | if (mpath->sdata == sdata && |
85 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | 84 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { |
86 | if (MPATH_EXPIRED(mpath)) { | 85 | if (MPATH_EXPIRED(mpath)) { |
87 | spin_lock_bh(&mpath->state_lock); | 86 | spin_lock_bh(&mpath->state_lock); |
@@ -98,13 +97,13 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | |||
98 | /** | 97 | /** |
99 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index | 98 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index |
100 | * @idx: index | 99 | * @idx: index |
101 | * @dev: local interface, or NULL for all entries | 100 | * @sdata: local subif, or NULL for all entries |
102 | * | 101 | * |
103 | * Returns: pointer to the mesh path structure, or NULL if not found. | 102 | * Returns: pointer to the mesh path structure, or NULL if not found. |
104 | * | 103 | * |
105 | * Locking: must be called within a read rcu section. | 104 | * Locking: must be called within a read rcu section. |
106 | */ | 105 | */ |
107 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | 106 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) |
108 | { | 107 | { |
109 | struct mpath_node *node; | 108 | struct mpath_node *node; |
110 | struct hlist_node *p; | 109 | struct hlist_node *p; |
@@ -112,7 +111,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | |||
112 | int j = 0; | 111 | int j = 0; |
113 | 112 | ||
114 | for_each_mesh_entry(mesh_paths, p, node, i) { | 113 | for_each_mesh_entry(mesh_paths, p, node, i) { |
115 | if (dev && node->mpath->dev != dev) | 114 | if (sdata && node->mpath->sdata != sdata) |
116 | continue; | 115 | continue; |
117 | if (j++ == idx) { | 116 | if (j++ == idx) { |
118 | if (MPATH_EXPIRED(node->mpath)) { | 117 | if (MPATH_EXPIRED(node->mpath)) { |
@@ -131,15 +130,14 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | |||
131 | /** | 130 | /** |
132 | * mesh_path_add - allocate and add a new path to the mesh path table | 131 | * mesh_path_add - allocate and add a new path to the mesh path table |
133 | * @addr: destination address of the path (ETH_ALEN length) | 132 | * @addr: destination address of the path (ETH_ALEN length) |
134 | * @dev: local interface | 133 | * @sdata: local subif |
135 | * | 134 | * |
136 | * Returns: 0 on sucess | 135 | * Returns: 0 on sucess |
137 | * | 136 | * |
138 | * State: the initial state of the new path is set to 0 | 137 | * State: the initial state of the new path is set to 0 |
139 | */ | 138 | */ |
140 | int mesh_path_add(u8 *dst, struct net_device *dev) | 139 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) |
141 | { | 140 | { |
142 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
143 | struct mesh_path *mpath, *new_mpath; | 141 | struct mesh_path *mpath, *new_mpath; |
144 | struct mpath_node *node, *new_node; | 142 | struct mpath_node *node, *new_node; |
145 | struct hlist_head *bucket; | 143 | struct hlist_head *bucket; |
@@ -148,7 +146,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
148 | int err = 0; | 146 | int err = 0; |
149 | u32 hash_idx; | 147 | u32 hash_idx; |
150 | 148 | ||
151 | if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0) | 149 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) |
152 | /* never add ourselves as neighbours */ | 150 | /* never add ourselves as neighbours */ |
153 | return -ENOTSUPP; | 151 | return -ENOTSUPP; |
154 | 152 | ||
@@ -169,7 +167,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
169 | 167 | ||
170 | read_lock(&pathtbl_resize_lock); | 168 | read_lock(&pathtbl_resize_lock); |
171 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 169 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
172 | new_mpath->dev = dev; | 170 | new_mpath->sdata = sdata; |
173 | new_mpath->flags = 0; | 171 | new_mpath->flags = 0; |
174 | skb_queue_head_init(&new_mpath->frame_queue); | 172 | skb_queue_head_init(&new_mpath->frame_queue); |
175 | new_node->mpath = new_mpath; | 173 | new_node->mpath = new_mpath; |
@@ -179,7 +177,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
179 | spin_lock_init(&new_mpath->state_lock); | 177 | spin_lock_init(&new_mpath->state_lock); |
180 | init_timer(&new_mpath->timer); | 178 | init_timer(&new_mpath->timer); |
181 | 179 | ||
182 | hash_idx = mesh_table_hash(dst, dev, mesh_paths); | 180 | hash_idx = mesh_table_hash(dst, sdata, mesh_paths); |
183 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 181 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
184 | 182 | ||
185 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 183 | spin_lock(&mesh_paths->hashwlock[hash_idx]); |
@@ -187,7 +185,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
187 | err = -EEXIST; | 185 | err = -EEXIST; |
188 | hlist_for_each_entry(node, n, bucket, list) { | 186 | hlist_for_each_entry(node, n, bucket, list) { |
189 | mpath = node->mpath; | 187 | mpath = node->mpath; |
190 | if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0) | 188 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) |
191 | goto err_exists; | 189 | goto err_exists; |
192 | } | 190 | } |
193 | 191 | ||
@@ -241,7 +239,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
241 | struct mesh_path *mpath; | 239 | struct mesh_path *mpath; |
242 | struct mpath_node *node; | 240 | struct mpath_node *node; |
243 | struct hlist_node *p; | 241 | struct hlist_node *p; |
244 | struct net_device *dev = sta->sdata->dev; | 242 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
245 | int i; | 243 | int i; |
246 | 244 | ||
247 | rcu_read_lock(); | 245 | rcu_read_lock(); |
@@ -256,7 +254,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
256 | spin_unlock_bh(&mpath->state_lock); | 254 | spin_unlock_bh(&mpath->state_lock); |
257 | mesh_path_error_tx(mpath->dst, | 255 | mesh_path_error_tx(mpath->dst, |
258 | cpu_to_le32(mpath->dsn), | 256 | cpu_to_le32(mpath->dsn), |
259 | dev->broadcast, dev); | 257 | sdata->dev->broadcast, sdata); |
260 | } else | 258 | } else |
261 | spin_unlock_bh(&mpath->state_lock); | 259 | spin_unlock_bh(&mpath->state_lock); |
262 | } | 260 | } |
@@ -284,11 +282,11 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
284 | for_each_mesh_entry(mesh_paths, p, node, i) { | 282 | for_each_mesh_entry(mesh_paths, p, node, i) { |
285 | mpath = node->mpath; | 283 | mpath = node->mpath; |
286 | if (mpath->next_hop == sta) | 284 | if (mpath->next_hop == sta) |
287 | mesh_path_del(mpath->dst, mpath->dev); | 285 | mesh_path_del(mpath->dst, mpath->sdata); |
288 | } | 286 | } |
289 | } | 287 | } |
290 | 288 | ||
291 | void mesh_path_flush(struct net_device *dev) | 289 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata) |
292 | { | 290 | { |
293 | struct mesh_path *mpath; | 291 | struct mesh_path *mpath; |
294 | struct mpath_node *node; | 292 | struct mpath_node *node; |
@@ -297,16 +295,15 @@ void mesh_path_flush(struct net_device *dev) | |||
297 | 295 | ||
298 | for_each_mesh_entry(mesh_paths, p, node, i) { | 296 | for_each_mesh_entry(mesh_paths, p, node, i) { |
299 | mpath = node->mpath; | 297 | mpath = node->mpath; |
300 | if (mpath->dev == dev) | 298 | if (mpath->sdata == sdata) |
301 | mesh_path_del(mpath->dst, mpath->dev); | 299 | mesh_path_del(mpath->dst, mpath->sdata); |
302 | } | 300 | } |
303 | } | 301 | } |
304 | 302 | ||
305 | static void mesh_path_node_reclaim(struct rcu_head *rp) | 303 | static void mesh_path_node_reclaim(struct rcu_head *rp) |
306 | { | 304 | { |
307 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | 305 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); |
308 | struct ieee80211_sub_if_data *sdata = | 306 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; |
309 | IEEE80211_DEV_TO_SUB_IF(node->mpath->dev); | ||
310 | 307 | ||
311 | del_timer_sync(&node->mpath->timer); | 308 | del_timer_sync(&node->mpath->timer); |
312 | atomic_dec(&sdata->u.sta.mpaths); | 309 | atomic_dec(&sdata->u.sta.mpaths); |
@@ -318,11 +315,11 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) | |||
318 | * mesh_path_del - delete a mesh path from the table | 315 | * mesh_path_del - delete a mesh path from the table |
319 | * | 316 | * |
320 | * @addr: dst address (ETH_ALEN length) | 317 | * @addr: dst address (ETH_ALEN length) |
321 | * @dev: local interface | 318 | * @sdata: local subif |
322 | * | 319 | * |
323 | * Returns: 0 if succesful | 320 | * Returns: 0 if succesful |
324 | */ | 321 | */ |
325 | int mesh_path_del(u8 *addr, struct net_device *dev) | 322 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) |
326 | { | 323 | { |
327 | struct mesh_path *mpath; | 324 | struct mesh_path *mpath; |
328 | struct mpath_node *node; | 325 | struct mpath_node *node; |
@@ -332,13 +329,13 @@ int mesh_path_del(u8 *addr, struct net_device *dev) | |||
332 | int err = 0; | 329 | int err = 0; |
333 | 330 | ||
334 | read_lock(&pathtbl_resize_lock); | 331 | read_lock(&pathtbl_resize_lock); |
335 | hash_idx = mesh_table_hash(addr, dev, mesh_paths); | 332 | hash_idx = mesh_table_hash(addr, sdata, mesh_paths); |
336 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 333 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
337 | 334 | ||
338 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 335 | spin_lock(&mesh_paths->hashwlock[hash_idx]); |
339 | hlist_for_each_entry(node, n, bucket, list) { | 336 | hlist_for_each_entry(node, n, bucket, list) { |
340 | mpath = node->mpath; | 337 | mpath = node->mpath; |
341 | if (mpath->dev == dev && | 338 | if (mpath->sdata == sdata && |
342 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | 339 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { |
343 | spin_lock_bh(&mpath->state_lock); | 340 | spin_lock_bh(&mpath->state_lock); |
344 | mpath->flags |= MESH_PATH_RESOLVING; | 341 | mpath->flags |= MESH_PATH_RESOLVING; |
@@ -378,29 +375,29 @@ void mesh_path_tx_pending(struct mesh_path *mpath) | |||
378 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | 375 | * mesh_path_discard_frame - discard a frame whose path could not be resolved |
379 | * | 376 | * |
380 | * @skb: frame to discard | 377 | * @skb: frame to discard |
381 | * @dev: network device the frame was to be sent through | 378 | * @sdata: network subif the frame was to be sent through |
382 | * | 379 | * |
383 | * If the frame was beign forwarded from another MP, a PERR frame will be sent | 380 | * If the frame was beign forwarded from another MP, a PERR frame will be sent |
384 | * to the precursor. | 381 | * to the precursor. |
385 | * | 382 | * |
386 | * Locking: the function must me called within a rcu_read_lock region | 383 | * Locking: the function must me called within a rcu_read_lock region |
387 | */ | 384 | */ |
388 | void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) | 385 | void mesh_path_discard_frame(struct sk_buff *skb, |
386 | struct ieee80211_sub_if_data *sdata) | ||
389 | { | 387 | { |
390 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
391 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 388 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
392 | struct mesh_path *mpath; | 389 | struct mesh_path *mpath; |
393 | u32 dsn = 0; | 390 | u32 dsn = 0; |
394 | 391 | ||
395 | if (memcmp(hdr->addr4, dev->dev_addr, ETH_ALEN) != 0) { | 392 | if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) { |
396 | u8 *ra, *da; | 393 | u8 *ra, *da; |
397 | 394 | ||
398 | da = hdr->addr3; | 395 | da = hdr->addr3; |
399 | ra = hdr->addr2; | 396 | ra = hdr->addr2; |
400 | mpath = mesh_path_lookup(da, dev); | 397 | mpath = mesh_path_lookup(da, sdata); |
401 | if (mpath) | 398 | if (mpath) |
402 | dsn = ++mpath->dsn; | 399 | dsn = ++mpath->dsn; |
403 | mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev); | 400 | mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata); |
404 | } | 401 | } |
405 | 402 | ||
406 | kfree_skb(skb); | 403 | kfree_skb(skb); |
@@ -416,14 +413,11 @@ void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) | |||
416 | */ | 413 | */ |
417 | void mesh_path_flush_pending(struct mesh_path *mpath) | 414 | void mesh_path_flush_pending(struct mesh_path *mpath) |
418 | { | 415 | { |
419 | struct ieee80211_sub_if_data *sdata; | ||
420 | struct sk_buff *skb; | 416 | struct sk_buff *skb; |
421 | 417 | ||
422 | sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); | ||
423 | |||
424 | while ((skb = skb_dequeue(&mpath->frame_queue)) && | 418 | while ((skb = skb_dequeue(&mpath->frame_queue)) && |
425 | (mpath->flags & MESH_PATH_ACTIVE)) | 419 | (mpath->flags & MESH_PATH_ACTIVE)) |
426 | mesh_path_discard_frame(skb, mpath->dev); | 420 | mesh_path_discard_frame(skb, mpath->sdata); |
427 | } | 421 | } |
428 | 422 | ||
429 | /** | 423 | /** |
@@ -472,7 +466,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | |||
472 | node = hlist_entry(p, struct mpath_node, list); | 466 | node = hlist_entry(p, struct mpath_node, list); |
473 | mpath = node->mpath; | 467 | mpath = node->mpath; |
474 | new_node->mpath = mpath; | 468 | new_node->mpath = mpath; |
475 | hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); | 469 | hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); |
476 | hlist_add_head(&new_node->list, | 470 | hlist_add_head(&new_node->list, |
477 | &newtbl->hash_buckets[hash_idx]); | 471 | &newtbl->hash_buckets[hash_idx]); |
478 | return 0; | 472 | return 0; |
@@ -489,7 +483,7 @@ int mesh_pathtbl_init(void) | |||
489 | return 0; | 483 | return 0; |
490 | } | 484 | } |
491 | 485 | ||
492 | void mesh_path_expire(struct net_device *dev) | 486 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
493 | { | 487 | { |
494 | struct mesh_path *mpath; | 488 | struct mesh_path *mpath; |
495 | struct mpath_node *node; | 489 | struct mpath_node *node; |
@@ -498,7 +492,7 @@ void mesh_path_expire(struct net_device *dev) | |||
498 | 492 | ||
499 | read_lock(&pathtbl_resize_lock); | 493 | read_lock(&pathtbl_resize_lock); |
500 | for_each_mesh_entry(mesh_paths, p, node, i) { | 494 | for_each_mesh_entry(mesh_paths, p, node, i) { |
501 | if (node->mpath->dev != dev) | 495 | if (node->mpath->sdata != sdata) |
502 | continue; | 496 | continue; |
503 | mpath = node->mpath; | 497 | mpath = node->mpath; |
504 | spin_lock_bh(&mpath->state_lock); | 498 | spin_lock_bh(&mpath->state_lock); |
@@ -507,7 +501,7 @@ void mesh_path_expire(struct net_device *dev) | |||
507 | time_after(jiffies, | 501 | time_after(jiffies, |
508 | mpath->exp_time + MESH_PATH_EXPIRE)) { | 502 | mpath->exp_time + MESH_PATH_EXPIRE)) { |
509 | spin_unlock_bh(&mpath->state_lock); | 503 | spin_unlock_bh(&mpath->state_lock); |
510 | mesh_path_del(mpath->dst, mpath->dev); | 504 | mesh_path_del(mpath->dst, mpath->sdata); |
511 | } else | 505 | } else |
512 | spin_unlock_bh(&mpath->state_lock); | 506 | spin_unlock_bh(&mpath->state_lock); |
513 | } | 507 | } |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 9efeb1f07025..7356462dee96 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -144,10 +144,10 @@ void mesh_plink_deactivate(struct sta_info *sta) | |||
144 | spin_unlock_bh(&sta->lock); | 144 | spin_unlock_bh(&sta->lock); |
145 | } | 145 | } |
146 | 146 | ||
147 | static int mesh_plink_frame_tx(struct net_device *dev, | 147 | static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, |
148 | enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, | 148 | enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, |
149 | __le16 reason) { | 149 | __le16 reason) { |
150 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 150 | struct ieee80211_local *local = sdata->local; |
151 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | 151 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); |
152 | struct ieee80211_mgmt *mgmt; | 152 | struct ieee80211_mgmt *mgmt; |
153 | bool include_plid = false; | 153 | bool include_plid = false; |
@@ -163,10 +163,10 @@ static int mesh_plink_frame_tx(struct net_device *dev, | |||
163 | mgmt = (struct ieee80211_mgmt *) | 163 | mgmt = (struct ieee80211_mgmt *) |
164 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); | 164 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); |
165 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); | 165 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); |
166 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 166 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
167 | IEEE80211_STYPE_ACTION); | 167 | IEEE80211_STYPE_ACTION); |
168 | memcpy(mgmt->da, da, ETH_ALEN); | 168 | memcpy(mgmt->da, da, ETH_ALEN); |
169 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 169 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
170 | /* BSSID is left zeroed, wildcard value */ | 170 | /* BSSID is left zeroed, wildcard value */ |
171 | mgmt->u.action.category = PLINK_CATEGORY; | 171 | mgmt->u.action.category = PLINK_CATEGORY; |
172 | mgmt->u.action.u.plink_action.action_code = action; | 172 | mgmt->u.action.u.plink_action.action_code = action; |
@@ -180,7 +180,7 @@ static int mesh_plink_frame_tx(struct net_device *dev, | |||
180 | /* two-byte status code followed by two-byte AID */ | 180 | /* two-byte status code followed by two-byte AID */ |
181 | memset(pos, 0, 4); | 181 | memset(pos, 0, 4); |
182 | } | 182 | } |
183 | mesh_mgmt_ies_add(skb, dev); | 183 | mesh_mgmt_ies_add(skb, sdata); |
184 | } | 184 | } |
185 | 185 | ||
186 | /* Add Peer Link Management element */ | 186 | /* Add Peer Link Management element */ |
@@ -217,15 +217,14 @@ static int mesh_plink_frame_tx(struct net_device *dev, | |||
217 | memcpy(pos, &reason, 2); | 217 | memcpy(pos, &reason, 2); |
218 | } | 218 | } |
219 | 219 | ||
220 | ieee80211_sta_tx(dev, skb, 0); | 220 | ieee80211_tx_skb(sdata, skb, 0); |
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, | 224 | void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct ieee80211_sub_if_data *sdata, |
225 | bool peer_accepting_plinks) | 225 | bool peer_accepting_plinks) |
226 | { | 226 | { |
227 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 227 | struct ieee80211_local *local = sdata->local; |
228 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
229 | struct sta_info *sta; | 228 | struct sta_info *sta; |
230 | 229 | ||
231 | rcu_read_lock(); | 230 | rcu_read_lock(); |
@@ -257,7 +256,6 @@ static void mesh_plink_timer(unsigned long data) | |||
257 | { | 256 | { |
258 | struct sta_info *sta; | 257 | struct sta_info *sta; |
259 | __le16 llid, plid, reason; | 258 | __le16 llid, plid, reason; |
260 | struct net_device *dev = NULL; | ||
261 | struct ieee80211_sub_if_data *sdata; | 259 | struct ieee80211_sub_if_data *sdata; |
262 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG | 260 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG |
263 | DECLARE_MAC_BUF(mac); | 261 | DECLARE_MAC_BUF(mac); |
@@ -282,7 +280,6 @@ static void mesh_plink_timer(unsigned long data) | |||
282 | llid = sta->llid; | 280 | llid = sta->llid; |
283 | plid = sta->plid; | 281 | plid = sta->plid; |
284 | sdata = sta->sdata; | 282 | sdata = sta->sdata; |
285 | dev = sdata->dev; | ||
286 | 283 | ||
287 | switch (sta->plink_state) { | 284 | switch (sta->plink_state) { |
288 | case PLINK_OPN_RCVD: | 285 | case PLINK_OPN_RCVD: |
@@ -299,7 +296,7 @@ static void mesh_plink_timer(unsigned long data) | |||
299 | ++sta->plink_retries; | 296 | ++sta->plink_retries; |
300 | mod_plink_timer(sta, sta->plink_timeout); | 297 | mod_plink_timer(sta, sta->plink_timeout); |
301 | spin_unlock_bh(&sta->lock); | 298 | spin_unlock_bh(&sta->lock); |
302 | mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, | 299 | mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->addr, llid, |
303 | 0, 0); | 300 | 0, 0); |
304 | break; | 301 | break; |
305 | } | 302 | } |
@@ -312,7 +309,7 @@ static void mesh_plink_timer(unsigned long data) | |||
312 | sta->plink_state = PLINK_HOLDING; | 309 | sta->plink_state = PLINK_HOLDING; |
313 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | 310 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); |
314 | spin_unlock_bh(&sta->lock); | 311 | spin_unlock_bh(&sta->lock); |
315 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, | 312 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid, plid, |
316 | reason); | 313 | reason); |
317 | break; | 314 | break; |
318 | case PLINK_HOLDING: | 315 | case PLINK_HOLDING: |
@@ -357,7 +354,7 @@ int mesh_plink_open(struct sta_info *sta) | |||
357 | mpl_dbg("Mesh plink: starting establishment with %s\n", | 354 | mpl_dbg("Mesh plink: starting establishment with %s\n", |
358 | print_mac(mac, sta->addr)); | 355 | print_mac(mac, sta->addr)); |
359 | 356 | ||
360 | return mesh_plink_frame_tx(sdata->dev, PLINK_OPEN, | 357 | return mesh_plink_frame_tx(sdata, PLINK_OPEN, |
361 | sta->addr, llid, 0, 0); | 358 | sta->addr, llid, 0, 0); |
362 | } | 359 | } |
363 | 360 | ||
@@ -403,15 +400,14 @@ int mesh_plink_close(struct sta_info *sta) | |||
403 | llid = sta->llid; | 400 | llid = sta->llid; |
404 | plid = sta->plid; | 401 | plid = sta->plid; |
405 | spin_unlock_bh(&sta->lock); | 402 | spin_unlock_bh(&sta->lock); |
406 | mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, | 403 | mesh_plink_frame_tx(sta->sdata, PLINK_CLOSE, sta->addr, llid, |
407 | plid, reason); | 404 | plid, reason); |
408 | return 0; | 405 | return 0; |
409 | } | 406 | } |
410 | 407 | ||
411 | void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | 408 | void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, |
412 | size_t len, struct ieee80211_rx_status *rx_status) | 409 | size_t len, struct ieee80211_rx_status *rx_status) |
413 | { | 410 | { |
414 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
415 | struct ieee80211_local *local = sdata->local; | 411 | struct ieee80211_local *local = sdata->local; |
416 | struct ieee802_11_elems elems; | 412 | struct ieee802_11_elems elems; |
417 | struct sta_info *sta; | 413 | struct sta_info *sta; |
@@ -425,6 +421,10 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
425 | DECLARE_MAC_BUF(mac); | 421 | DECLARE_MAC_BUF(mac); |
426 | #endif | 422 | #endif |
427 | 423 | ||
424 | /* need action_code, aux */ | ||
425 | if (len < IEEE80211_MIN_ACTION_SIZE + 3) | ||
426 | return; | ||
427 | |||
428 | if (is_multicast_ether_addr(mgmt->da)) { | 428 | if (is_multicast_ether_addr(mgmt->da)) { |
429 | mpl_dbg("Mesh plink: ignore frame from multicast address"); | 429 | mpl_dbg("Mesh plink: ignore frame from multicast address"); |
430 | return; | 430 | return; |
@@ -478,7 +478,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
478 | 478 | ||
479 | /* Now we will figure out the appropriate event... */ | 479 | /* Now we will figure out the appropriate event... */ |
480 | event = PLINK_UNDEFINED; | 480 | event = PLINK_UNDEFINED; |
481 | if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, dev))) { | 481 | if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) { |
482 | switch (ftype) { | 482 | switch (ftype) { |
483 | case PLINK_OPEN: | 483 | case PLINK_OPEN: |
484 | event = OPN_RJCT; | 484 | event = OPN_RJCT; |
@@ -577,9 +577,9 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
577 | sta->llid = llid; | 577 | sta->llid = llid; |
578 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); | 578 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); |
579 | spin_unlock_bh(&sta->lock); | 579 | spin_unlock_bh(&sta->lock); |
580 | mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, | 580 | mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->addr, llid, |
581 | 0, 0); | 581 | 0, 0); |
582 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, | 582 | mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->addr, |
583 | llid, plid, 0); | 583 | llid, plid, 0); |
584 | break; | 584 | break; |
585 | default: | 585 | default: |
@@ -604,7 +604,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
604 | 604 | ||
605 | llid = sta->llid; | 605 | llid = sta->llid; |
606 | spin_unlock_bh(&sta->lock); | 606 | spin_unlock_bh(&sta->lock); |
607 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 607 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid, |
608 | plid, reason); | 608 | plid, reason); |
609 | break; | 609 | break; |
610 | case OPN_ACPT: | 610 | case OPN_ACPT: |
@@ -613,7 +613,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
613 | sta->plid = plid; | 613 | sta->plid = plid; |
614 | llid = sta->llid; | 614 | llid = sta->llid; |
615 | spin_unlock_bh(&sta->lock); | 615 | spin_unlock_bh(&sta->lock); |
616 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 616 | mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->addr, llid, |
617 | plid, 0); | 617 | plid, 0); |
618 | break; | 618 | break; |
619 | case CNF_ACPT: | 619 | case CNF_ACPT: |
@@ -646,13 +646,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
646 | 646 | ||
647 | llid = sta->llid; | 647 | llid = sta->llid; |
648 | spin_unlock_bh(&sta->lock); | 648 | spin_unlock_bh(&sta->lock); |
649 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 649 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid, |
650 | plid, reason); | 650 | plid, reason); |
651 | break; | 651 | break; |
652 | case OPN_ACPT: | 652 | case OPN_ACPT: |
653 | llid = sta->llid; | 653 | llid = sta->llid; |
654 | spin_unlock_bh(&sta->lock); | 654 | spin_unlock_bh(&sta->lock); |
655 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 655 | mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->addr, llid, |
656 | plid, 0); | 656 | plid, 0); |
657 | break; | 657 | break; |
658 | case CNF_ACPT: | 658 | case CNF_ACPT: |
@@ -685,7 +685,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
685 | 685 | ||
686 | llid = sta->llid; | 686 | llid = sta->llid; |
687 | spin_unlock_bh(&sta->lock); | 687 | spin_unlock_bh(&sta->lock); |
688 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 688 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid, |
689 | plid, reason); | 689 | plid, reason); |
690 | break; | 690 | break; |
691 | case OPN_ACPT: | 691 | case OPN_ACPT: |
@@ -695,7 +695,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
695 | spin_unlock_bh(&sta->lock); | 695 | spin_unlock_bh(&sta->lock); |
696 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", | 696 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", |
697 | print_mac(mac, sta->addr)); | 697 | print_mac(mac, sta->addr)); |
698 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 698 | mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->addr, llid, |
699 | plid, 0); | 699 | plid, 0); |
700 | break; | 700 | break; |
701 | default: | 701 | default: |
@@ -714,13 +714,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
714 | llid = sta->llid; | 714 | llid = sta->llid; |
715 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | 715 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); |
716 | spin_unlock_bh(&sta->lock); | 716 | spin_unlock_bh(&sta->lock); |
717 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 717 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid, |
718 | plid, reason); | 718 | plid, reason); |
719 | break; | 719 | break; |
720 | case OPN_ACPT: | 720 | case OPN_ACPT: |
721 | llid = sta->llid; | 721 | llid = sta->llid; |
722 | spin_unlock_bh(&sta->lock); | 722 | spin_unlock_bh(&sta->lock); |
723 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 723 | mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->addr, llid, |
724 | plid, 0); | 724 | plid, 0); |
725 | break; | 725 | break; |
726 | default: | 726 | default: |
@@ -743,7 +743,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
743 | llid = sta->llid; | 743 | llid = sta->llid; |
744 | reason = sta->reason; | 744 | reason = sta->reason; |
745 | spin_unlock_bh(&sta->lock); | 745 | spin_unlock_bh(&sta->lock); |
746 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 746 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->addr, llid, |
747 | plid, reason); | 747 | plid, reason); |
748 | break; | 748 | break; |
749 | default: | 749 | default: |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 902cac1bd246..2c06f6965b7d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -11,11 +11,6 @@ | |||
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | /* TODO: | ||
15 | * order BSS list by RSSI(?) ("quality of AP") | ||
16 | * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE, | ||
17 | * SSID) | ||
18 | */ | ||
19 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
20 | #include <linux/if_ether.h> | 15 | #include <linux/if_ether.h> |
21 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
@@ -26,14 +21,14 @@ | |||
26 | #include <linux/etherdevice.h> | 21 | #include <linux/etherdevice.h> |
27 | #include <linux/rtnetlink.h> | 22 | #include <linux/rtnetlink.h> |
28 | #include <net/iw_handler.h> | 23 | #include <net/iw_handler.h> |
29 | #include <asm/types.h> | ||
30 | |||
31 | #include <net/mac80211.h> | 24 | #include <net/mac80211.h> |
25 | |||
32 | #include "ieee80211_i.h" | 26 | #include "ieee80211_i.h" |
33 | #include "rate.h" | 27 | #include "rate.h" |
34 | #include "led.h" | 28 | #include "led.h" |
35 | #include "mesh.h" | 29 | #include "mesh.h" |
36 | 30 | ||
31 | #define IEEE80211_ASSOC_SCANS_MAX_TRIES 2 | ||
37 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) | 32 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) |
38 | #define IEEE80211_AUTH_MAX_TRIES 3 | 33 | #define IEEE80211_AUTH_MAX_TRIES 3 |
39 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) | 34 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) |
@@ -46,10 +41,6 @@ | |||
46 | #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) | 41 | #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) |
47 | #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) | 42 | #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) |
48 | 43 | ||
49 | #define IEEE80211_PROBE_DELAY (HZ / 33) | ||
50 | #define IEEE80211_CHANNEL_TIME (HZ / 33) | ||
51 | #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5) | ||
52 | #define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ) | ||
53 | #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) | 44 | #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) |
54 | #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) | 45 | #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) |
55 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) | 46 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) |
@@ -57,576 +48,60 @@ | |||
57 | #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 | 48 | #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 |
58 | 49 | ||
59 | 50 | ||
60 | #define ERP_INFO_USE_PROTECTION BIT(1) | 51 | /* utils */ |
61 | |||
62 | /* mgmt header + 1 byte action code */ | ||
63 | #define IEEE80211_MIN_ACTION_SIZE (24 + 1) | ||
64 | |||
65 | #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 | ||
66 | #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C | ||
67 | #define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 | ||
68 | #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 | ||
69 | #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 | ||
70 | |||
71 | /* next values represent the buffer size for A-MPDU frame. | ||
72 | * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */ | ||
73 | #define IEEE80211_MIN_AMPDU_BUF 0x8 | ||
74 | #define IEEE80211_MAX_AMPDU_BUF 0x40 | ||
75 | |||
76 | static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, | ||
77 | u8 *ssid, size_t ssid_len); | ||
78 | static struct ieee80211_sta_bss * | ||
79 | ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, | ||
80 | u8 *ssid, u8 ssid_len); | ||
81 | static void ieee80211_rx_bss_put(struct ieee80211_local *local, | ||
82 | struct ieee80211_sta_bss *bss); | ||
83 | static int ieee80211_sta_find_ibss(struct net_device *dev, | ||
84 | struct ieee80211_if_sta *ifsta); | ||
85 | static int ieee80211_sta_wep_configured(struct net_device *dev); | ||
86 | static int ieee80211_sta_start_scan(struct net_device *dev, | ||
87 | u8 *ssid, size_t ssid_len); | ||
88 | static int ieee80211_sta_config_auth(struct net_device *dev, | ||
89 | struct ieee80211_if_sta *ifsta); | ||
90 | static void sta_rx_agg_session_timer_expired(unsigned long data); | ||
91 | |||
92 | |||
93 | void ieee802_11_parse_elems(u8 *start, size_t len, | ||
94 | struct ieee802_11_elems *elems) | ||
95 | { | ||
96 | size_t left = len; | ||
97 | u8 *pos = start; | ||
98 | |||
99 | memset(elems, 0, sizeof(*elems)); | ||
100 | |||
101 | while (left >= 2) { | ||
102 | u8 id, elen; | ||
103 | |||
104 | id = *pos++; | ||
105 | elen = *pos++; | ||
106 | left -= 2; | ||
107 | |||
108 | if (elen > left) | ||
109 | return; | ||
110 | |||
111 | switch (id) { | ||
112 | case WLAN_EID_SSID: | ||
113 | elems->ssid = pos; | ||
114 | elems->ssid_len = elen; | ||
115 | break; | ||
116 | case WLAN_EID_SUPP_RATES: | ||
117 | elems->supp_rates = pos; | ||
118 | elems->supp_rates_len = elen; | ||
119 | break; | ||
120 | case WLAN_EID_FH_PARAMS: | ||
121 | elems->fh_params = pos; | ||
122 | elems->fh_params_len = elen; | ||
123 | break; | ||
124 | case WLAN_EID_DS_PARAMS: | ||
125 | elems->ds_params = pos; | ||
126 | elems->ds_params_len = elen; | ||
127 | break; | ||
128 | case WLAN_EID_CF_PARAMS: | ||
129 | elems->cf_params = pos; | ||
130 | elems->cf_params_len = elen; | ||
131 | break; | ||
132 | case WLAN_EID_TIM: | ||
133 | elems->tim = pos; | ||
134 | elems->tim_len = elen; | ||
135 | break; | ||
136 | case WLAN_EID_IBSS_PARAMS: | ||
137 | elems->ibss_params = pos; | ||
138 | elems->ibss_params_len = elen; | ||
139 | break; | ||
140 | case WLAN_EID_CHALLENGE: | ||
141 | elems->challenge = pos; | ||
142 | elems->challenge_len = elen; | ||
143 | break; | ||
144 | case WLAN_EID_WPA: | ||
145 | if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && | ||
146 | pos[2] == 0xf2) { | ||
147 | /* Microsoft OUI (00:50:F2) */ | ||
148 | if (pos[3] == 1) { | ||
149 | /* OUI Type 1 - WPA IE */ | ||
150 | elems->wpa = pos; | ||
151 | elems->wpa_len = elen; | ||
152 | } else if (elen >= 5 && pos[3] == 2) { | ||
153 | if (pos[4] == 0) { | ||
154 | elems->wmm_info = pos; | ||
155 | elems->wmm_info_len = elen; | ||
156 | } else if (pos[4] == 1) { | ||
157 | elems->wmm_param = pos; | ||
158 | elems->wmm_param_len = elen; | ||
159 | } | ||
160 | } | ||
161 | } | ||
162 | break; | ||
163 | case WLAN_EID_RSN: | ||
164 | elems->rsn = pos; | ||
165 | elems->rsn_len = elen; | ||
166 | break; | ||
167 | case WLAN_EID_ERP_INFO: | ||
168 | elems->erp_info = pos; | ||
169 | elems->erp_info_len = elen; | ||
170 | break; | ||
171 | case WLAN_EID_EXT_SUPP_RATES: | ||
172 | elems->ext_supp_rates = pos; | ||
173 | elems->ext_supp_rates_len = elen; | ||
174 | break; | ||
175 | case WLAN_EID_HT_CAPABILITY: | ||
176 | elems->ht_cap_elem = pos; | ||
177 | elems->ht_cap_elem_len = elen; | ||
178 | break; | ||
179 | case WLAN_EID_HT_EXTRA_INFO: | ||
180 | elems->ht_info_elem = pos; | ||
181 | elems->ht_info_elem_len = elen; | ||
182 | break; | ||
183 | case WLAN_EID_MESH_ID: | ||
184 | elems->mesh_id = pos; | ||
185 | elems->mesh_id_len = elen; | ||
186 | break; | ||
187 | case WLAN_EID_MESH_CONFIG: | ||
188 | elems->mesh_config = pos; | ||
189 | elems->mesh_config_len = elen; | ||
190 | break; | ||
191 | case WLAN_EID_PEER_LINK: | ||
192 | elems->peer_link = pos; | ||
193 | elems->peer_link_len = elen; | ||
194 | break; | ||
195 | case WLAN_EID_PREQ: | ||
196 | elems->preq = pos; | ||
197 | elems->preq_len = elen; | ||
198 | break; | ||
199 | case WLAN_EID_PREP: | ||
200 | elems->prep = pos; | ||
201 | elems->prep_len = elen; | ||
202 | break; | ||
203 | case WLAN_EID_PERR: | ||
204 | elems->perr = pos; | ||
205 | elems->perr_len = elen; | ||
206 | break; | ||
207 | case WLAN_EID_CHANNEL_SWITCH: | ||
208 | elems->ch_switch_elem = pos; | ||
209 | elems->ch_switch_elem_len = elen; | ||
210 | break; | ||
211 | case WLAN_EID_QUIET: | ||
212 | if (!elems->quiet_elem) { | ||
213 | elems->quiet_elem = pos; | ||
214 | elems->quiet_elem_len = elen; | ||
215 | } | ||
216 | elems->num_of_quiet_elem++; | ||
217 | break; | ||
218 | case WLAN_EID_COUNTRY: | ||
219 | elems->country_elem = pos; | ||
220 | elems->country_elem_len = elen; | ||
221 | break; | ||
222 | case WLAN_EID_PWR_CONSTRAINT: | ||
223 | elems->pwr_constr_elem = pos; | ||
224 | elems->pwr_constr_elem_len = elen; | ||
225 | break; | ||
226 | default: | ||
227 | break; | ||
228 | } | ||
229 | |||
230 | left -= elen; | ||
231 | pos += elen; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | |||
236 | static int ecw2cw(int ecw) | 52 | static int ecw2cw(int ecw) |
237 | { | 53 | { |
238 | return (1 << ecw) - 1; | 54 | return (1 << ecw) - 1; |
239 | } | 55 | } |
240 | 56 | ||
241 | 57 | static u8 *ieee80211_bss_get_ie(struct ieee80211_sta_bss *bss, u8 ie) | |
242 | static void ieee80211_sta_def_wmm_params(struct net_device *dev, | ||
243 | struct ieee80211_sta_bss *bss, | ||
244 | int ibss) | ||
245 | { | ||
246 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
247 | struct ieee80211_local *local = sdata->local; | ||
248 | int i, have_higher_than_11mbit = 0; | ||
249 | |||
250 | |||
251 | /* cf. IEEE 802.11 9.2.12 */ | ||
252 | for (i = 0; i < bss->supp_rates_len; i++) | ||
253 | if ((bss->supp_rates[i] & 0x7f) * 5 > 110) | ||
254 | have_higher_than_11mbit = 1; | ||
255 | |||
256 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && | ||
257 | have_higher_than_11mbit) | ||
258 | sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; | ||
259 | else | ||
260 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; | ||
261 | |||
262 | |||
263 | if (local->ops->conf_tx) { | ||
264 | struct ieee80211_tx_queue_params qparam; | ||
265 | |||
266 | memset(&qparam, 0, sizeof(qparam)); | ||
267 | |||
268 | qparam.aifs = 2; | ||
269 | |||
270 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && | ||
271 | !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)) | ||
272 | qparam.cw_min = 31; | ||
273 | else | ||
274 | qparam.cw_min = 15; | ||
275 | |||
276 | qparam.cw_max = 1023; | ||
277 | qparam.txop = 0; | ||
278 | |||
279 | for (i = 0; i < local_to_hw(local)->queues; i++) | ||
280 | local->ops->conf_tx(local_to_hw(local), i, &qparam); | ||
281 | } | ||
282 | } | ||
283 | |||
284 | static void ieee80211_sta_wmm_params(struct net_device *dev, | ||
285 | struct ieee80211_if_sta *ifsta, | ||
286 | u8 *wmm_param, size_t wmm_param_len) | ||
287 | { | 58 | { |
288 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 59 | u8 *end, *pos; |
289 | struct ieee80211_tx_queue_params params; | ||
290 | size_t left; | ||
291 | int count; | ||
292 | u8 *pos; | ||
293 | |||
294 | if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED)) | ||
295 | return; | ||
296 | |||
297 | if (!wmm_param) | ||
298 | return; | ||
299 | |||
300 | if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) | ||
301 | return; | ||
302 | count = wmm_param[6] & 0x0f; | ||
303 | if (count == ifsta->wmm_last_param_set) | ||
304 | return; | ||
305 | ifsta->wmm_last_param_set = count; | ||
306 | 60 | ||
307 | pos = wmm_param + 8; | 61 | pos = bss->ies; |
308 | left = wmm_param_len - 8; | 62 | if (pos == NULL) |
309 | 63 | return NULL; | |
310 | memset(¶ms, 0, sizeof(params)); | 64 | end = pos + bss->ies_len; |
311 | |||
312 | if (!local->ops->conf_tx) | ||
313 | return; | ||
314 | |||
315 | local->wmm_acm = 0; | ||
316 | for (; left >= 4; left -= 4, pos += 4) { | ||
317 | int aci = (pos[0] >> 5) & 0x03; | ||
318 | int acm = (pos[0] >> 4) & 0x01; | ||
319 | int queue; | ||
320 | 65 | ||
321 | switch (aci) { | 66 | while (pos + 1 < end) { |
322 | case 1: | 67 | if (pos + 2 + pos[1] > end) |
323 | queue = 3; | ||
324 | if (acm) | ||
325 | local->wmm_acm |= BIT(0) | BIT(3); | ||
326 | break; | ||
327 | case 2: | ||
328 | queue = 1; | ||
329 | if (acm) | ||
330 | local->wmm_acm |= BIT(4) | BIT(5); | ||
331 | break; | ||
332 | case 3: | ||
333 | queue = 0; | ||
334 | if (acm) | ||
335 | local->wmm_acm |= BIT(6) | BIT(7); | ||
336 | break; | ||
337 | case 0: | ||
338 | default: | ||
339 | queue = 2; | ||
340 | if (acm) | ||
341 | local->wmm_acm |= BIT(1) | BIT(2); | ||
342 | break; | 68 | break; |
343 | } | 69 | if (pos[0] == ie) |
344 | 70 | return pos; | |
345 | params.aifs = pos[0] & 0x0f; | 71 | pos += 2 + pos[1]; |
346 | params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); | ||
347 | params.cw_min = ecw2cw(pos[1] & 0x0f); | ||
348 | params.txop = get_unaligned_le16(pos + 2); | ||
349 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
350 | printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " | ||
351 | "cWmin=%d cWmax=%d txop=%d\n", | ||
352 | dev->name, queue, aci, acm, params.aifs, params.cw_min, | ||
353 | params.cw_max, params.txop); | ||
354 | #endif | ||
355 | /* TODO: handle ACM (block TX, fallback to next lowest allowed | ||
356 | * AC for now) */ | ||
357 | if (local->ops->conf_tx(local_to_hw(local), queue, ¶ms)) { | ||
358 | printk(KERN_DEBUG "%s: failed to set TX queue " | ||
359 | "parameters for queue %d\n", dev->name, queue); | ||
360 | } | ||
361 | } | 72 | } |
362 | } | ||
363 | 73 | ||
364 | static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata, | 74 | return NULL; |
365 | bool use_protection, | ||
366 | bool use_short_preamble) | ||
367 | { | ||
368 | struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; | ||
369 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
370 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
371 | DECLARE_MAC_BUF(mac); | ||
372 | #endif | ||
373 | u32 changed = 0; | ||
374 | |||
375 | if (use_protection != bss_conf->use_cts_prot) { | ||
376 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
377 | if (net_ratelimit()) { | ||
378 | printk(KERN_DEBUG "%s: CTS protection %s (BSSID=" | ||
379 | "%s)\n", | ||
380 | sdata->dev->name, | ||
381 | use_protection ? "enabled" : "disabled", | ||
382 | print_mac(mac, ifsta->bssid)); | ||
383 | } | ||
384 | #endif | ||
385 | bss_conf->use_cts_prot = use_protection; | ||
386 | changed |= BSS_CHANGED_ERP_CTS_PROT; | ||
387 | } | ||
388 | |||
389 | if (use_short_preamble != bss_conf->use_short_preamble) { | ||
390 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
391 | if (net_ratelimit()) { | ||
392 | printk(KERN_DEBUG "%s: switched to %s barker preamble" | ||
393 | " (BSSID=%s)\n", | ||
394 | sdata->dev->name, | ||
395 | use_short_preamble ? "short" : "long", | ||
396 | print_mac(mac, ifsta->bssid)); | ||
397 | } | ||
398 | #endif | ||
399 | bss_conf->use_short_preamble = use_short_preamble; | ||
400 | changed |= BSS_CHANGED_ERP_PREAMBLE; | ||
401 | } | ||
402 | |||
403 | return changed; | ||
404 | } | 75 | } |
405 | 76 | ||
406 | static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata, | 77 | static int ieee80211_compatible_rates(struct ieee80211_sta_bss *bss, |
407 | u8 erp_value) | 78 | struct ieee80211_supported_band *sband, |
408 | { | 79 | u64 *rates) |
409 | bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0; | ||
410 | bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0; | ||
411 | |||
412 | return ieee80211_handle_protect_preamb(sdata, | ||
413 | use_protection, use_short_preamble); | ||
414 | } | ||
415 | |||
416 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | ||
417 | struct ieee80211_sta_bss *bss) | ||
418 | { | ||
419 | u32 changed = 0; | ||
420 | |||
421 | if (bss->has_erp_value) | ||
422 | changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value); | ||
423 | else { | ||
424 | u16 capab = bss->capability; | ||
425 | changed |= ieee80211_handle_protect_preamb(sdata, false, | ||
426 | (capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0); | ||
427 | } | ||
428 | |||
429 | return changed; | ||
430 | } | ||
431 | |||
432 | int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, | ||
433 | struct ieee80211_ht_info *ht_info) | ||
434 | { | ||
435 | |||
436 | if (ht_info == NULL) | ||
437 | return -EINVAL; | ||
438 | |||
439 | memset(ht_info, 0, sizeof(*ht_info)); | ||
440 | |||
441 | if (ht_cap_ie) { | ||
442 | u8 ampdu_info = ht_cap_ie->ampdu_params_info; | ||
443 | |||
444 | ht_info->ht_supported = 1; | ||
445 | ht_info->cap = le16_to_cpu(ht_cap_ie->cap_info); | ||
446 | ht_info->ampdu_factor = | ||
447 | ampdu_info & IEEE80211_HT_CAP_AMPDU_FACTOR; | ||
448 | ht_info->ampdu_density = | ||
449 | (ampdu_info & IEEE80211_HT_CAP_AMPDU_DENSITY) >> 2; | ||
450 | memcpy(ht_info->supp_mcs_set, ht_cap_ie->supp_mcs_set, 16); | ||
451 | } else | ||
452 | ht_info->ht_supported = 0; | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( | ||
458 | struct ieee80211_ht_addt_info *ht_add_info_ie, | ||
459 | struct ieee80211_ht_bss_info *bss_info) | ||
460 | { | ||
461 | if (bss_info == NULL) | ||
462 | return -EINVAL; | ||
463 | |||
464 | memset(bss_info, 0, sizeof(*bss_info)); | ||
465 | |||
466 | if (ht_add_info_ie) { | ||
467 | u16 op_mode; | ||
468 | op_mode = le16_to_cpu(ht_add_info_ie->operation_mode); | ||
469 | |||
470 | bss_info->primary_channel = ht_add_info_ie->control_chan; | ||
471 | bss_info->bss_cap = ht_add_info_ie->ht_param; | ||
472 | bss_info->bss_op_mode = (u8)(op_mode & 0xff); | ||
473 | } | ||
474 | |||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | static void ieee80211_sta_send_associnfo(struct net_device *dev, | ||
479 | struct ieee80211_if_sta *ifsta) | ||
480 | { | ||
481 | char *buf; | ||
482 | size_t len; | ||
483 | int i; | ||
484 | union iwreq_data wrqu; | ||
485 | |||
486 | if (!ifsta->assocreq_ies && !ifsta->assocresp_ies) | ||
487 | return; | ||
488 | |||
489 | buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len + | ||
490 | ifsta->assocresp_ies_len), GFP_KERNEL); | ||
491 | if (!buf) | ||
492 | return; | ||
493 | |||
494 | len = sprintf(buf, "ASSOCINFO("); | ||
495 | if (ifsta->assocreq_ies) { | ||
496 | len += sprintf(buf + len, "ReqIEs="); | ||
497 | for (i = 0; i < ifsta->assocreq_ies_len; i++) { | ||
498 | len += sprintf(buf + len, "%02x", | ||
499 | ifsta->assocreq_ies[i]); | ||
500 | } | ||
501 | } | ||
502 | if (ifsta->assocresp_ies) { | ||
503 | if (ifsta->assocreq_ies) | ||
504 | len += sprintf(buf + len, " "); | ||
505 | len += sprintf(buf + len, "RespIEs="); | ||
506 | for (i = 0; i < ifsta->assocresp_ies_len; i++) { | ||
507 | len += sprintf(buf + len, "%02x", | ||
508 | ifsta->assocresp_ies[i]); | ||
509 | } | ||
510 | } | ||
511 | len += sprintf(buf + len, ")"); | ||
512 | |||
513 | if (len > IW_CUSTOM_MAX) { | ||
514 | len = sprintf(buf, "ASSOCRESPIE="); | ||
515 | for (i = 0; i < ifsta->assocresp_ies_len; i++) { | ||
516 | len += sprintf(buf + len, "%02x", | ||
517 | ifsta->assocresp_ies[i]); | ||
518 | } | ||
519 | } | ||
520 | |||
521 | memset(&wrqu, 0, sizeof(wrqu)); | ||
522 | wrqu.data.length = len; | ||
523 | wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); | ||
524 | |||
525 | kfree(buf); | ||
526 | } | ||
527 | |||
528 | |||
529 | static void ieee80211_set_associated(struct net_device *dev, | ||
530 | struct ieee80211_if_sta *ifsta, | ||
531 | bool assoc) | ||
532 | { | 80 | { |
533 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 81 | int i, j, count; |
534 | struct ieee80211_local *local = sdata->local; | 82 | *rates = 0; |
535 | struct ieee80211_conf *conf = &local_to_hw(local)->conf; | 83 | count = 0; |
536 | union iwreq_data wrqu; | 84 | for (i = 0; i < bss->supp_rates_len; i++) { |
537 | u32 changed = BSS_CHANGED_ASSOC; | 85 | int rate = (bss->supp_rates[i] & 0x7F) * 5; |
538 | |||
539 | if (assoc) { | ||
540 | struct ieee80211_sta_bss *bss; | ||
541 | |||
542 | ifsta->flags |= IEEE80211_STA_ASSOCIATED; | ||
543 | |||
544 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | ||
545 | return; | ||
546 | |||
547 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | ||
548 | conf->channel->center_freq, | ||
549 | ifsta->ssid, ifsta->ssid_len); | ||
550 | if (bss) { | ||
551 | /* set timing information */ | ||
552 | sdata->bss_conf.beacon_int = bss->beacon_int; | ||
553 | sdata->bss_conf.timestamp = bss->timestamp; | ||
554 | sdata->bss_conf.dtim_period = bss->dtim_period; | ||
555 | |||
556 | changed |= ieee80211_handle_bss_capability(sdata, bss); | ||
557 | |||
558 | ieee80211_rx_bss_put(local, bss); | ||
559 | } | ||
560 | |||
561 | if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { | ||
562 | changed |= BSS_CHANGED_HT; | ||
563 | sdata->bss_conf.assoc_ht = 1; | ||
564 | sdata->bss_conf.ht_conf = &conf->ht_conf; | ||
565 | sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf; | ||
566 | } | ||
567 | |||
568 | ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET; | ||
569 | memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN); | ||
570 | memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN); | ||
571 | ieee80211_sta_send_associnfo(dev, ifsta); | ||
572 | } else { | ||
573 | netif_carrier_off(dev); | ||
574 | ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid); | ||
575 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; | ||
576 | changed |= ieee80211_reset_erp_info(dev); | ||
577 | |||
578 | sdata->bss_conf.assoc_ht = 0; | ||
579 | sdata->bss_conf.ht_conf = NULL; | ||
580 | sdata->bss_conf.ht_bss_conf = NULL; | ||
581 | 86 | ||
582 | memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); | 87 | for (j = 0; j < sband->n_bitrates; j++) |
88 | if (sband->bitrates[j].bitrate == rate) { | ||
89 | *rates |= BIT(j); | ||
90 | count++; | ||
91 | break; | ||
92 | } | ||
583 | } | 93 | } |
584 | ifsta->last_probe = jiffies; | ||
585 | ieee80211_led_assoc(local, assoc); | ||
586 | |||
587 | sdata->bss_conf.assoc = assoc; | ||
588 | ieee80211_bss_info_change_notify(sdata, changed); | ||
589 | |||
590 | if (assoc) | ||
591 | netif_carrier_on(dev); | ||
592 | |||
593 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | ||
594 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | ||
595 | } | ||
596 | |||
597 | static void ieee80211_set_disassoc(struct net_device *dev, | ||
598 | struct ieee80211_if_sta *ifsta, int deauth) | ||
599 | { | ||
600 | if (deauth) | ||
601 | ifsta->auth_tries = 0; | ||
602 | ifsta->assoc_tries = 0; | ||
603 | ieee80211_set_associated(dev, ifsta, 0); | ||
604 | } | ||
605 | |||
606 | void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, | ||
607 | int encrypt) | ||
608 | { | ||
609 | struct ieee80211_sub_if_data *sdata; | ||
610 | |||
611 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
612 | skb->dev = sdata->local->mdev; | ||
613 | skb_set_mac_header(skb, 0); | ||
614 | skb_set_network_header(skb, 0); | ||
615 | skb_set_transport_header(skb, 0); | ||
616 | |||
617 | skb->iif = sdata->dev->ifindex; | ||
618 | skb->do_not_encrypt = !encrypt; | ||
619 | 94 | ||
620 | dev_queue_xmit(skb); | 95 | return count; |
621 | } | 96 | } |
622 | 97 | ||
623 | 98 | /* frame sending functions */ | |
624 | static void ieee80211_send_auth(struct net_device *dev, | 99 | static void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, |
625 | struct ieee80211_if_sta *ifsta, | 100 | struct ieee80211_if_sta *ifsta, |
626 | int transaction, u8 *extra, size_t extra_len, | 101 | int transaction, u8 *extra, size_t extra_len, |
627 | int encrypt) | 102 | int encrypt) |
628 | { | 103 | { |
629 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 104 | struct ieee80211_local *local = sdata->local; |
630 | struct sk_buff *skb; | 105 | struct sk_buff *skb; |
631 | struct ieee80211_mgmt *mgmt; | 106 | struct ieee80211_mgmt *mgmt; |
632 | 107 | ||
@@ -634,19 +109,19 @@ static void ieee80211_send_auth(struct net_device *dev, | |||
634 | sizeof(*mgmt) + 6 + extra_len); | 109 | sizeof(*mgmt) + 6 + extra_len); |
635 | if (!skb) { | 110 | if (!skb) { |
636 | printk(KERN_DEBUG "%s: failed to allocate buffer for auth " | 111 | printk(KERN_DEBUG "%s: failed to allocate buffer for auth " |
637 | "frame\n", dev->name); | 112 | "frame\n", sdata->dev->name); |
638 | return; | 113 | return; |
639 | } | 114 | } |
640 | skb_reserve(skb, local->hw.extra_tx_headroom); | 115 | skb_reserve(skb, local->hw.extra_tx_headroom); |
641 | 116 | ||
642 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); | 117 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); |
643 | memset(mgmt, 0, 24 + 6); | 118 | memset(mgmt, 0, 24 + 6); |
644 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 119 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
645 | IEEE80211_STYPE_AUTH); | 120 | IEEE80211_STYPE_AUTH); |
646 | if (encrypt) | 121 | if (encrypt) |
647 | mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); | 122 | mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
648 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); | 123 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); |
649 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 124 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
650 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 125 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); |
651 | mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg); | 126 | mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg); |
652 | mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); | 127 | mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); |
@@ -655,61 +130,76 @@ static void ieee80211_send_auth(struct net_device *dev, | |||
655 | if (extra) | 130 | if (extra) |
656 | memcpy(skb_put(skb, extra_len), extra, extra_len); | 131 | memcpy(skb_put(skb, extra_len), extra, extra_len); |
657 | 132 | ||
658 | ieee80211_sta_tx(dev, skb, encrypt); | 133 | ieee80211_tx_skb(sdata, skb, encrypt); |
659 | } | 134 | } |
660 | 135 | ||
661 | 136 | void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, | |
662 | static void ieee80211_authenticate(struct net_device *dev, | 137 | u8 *ssid, size_t ssid_len) |
663 | struct ieee80211_if_sta *ifsta) | ||
664 | { | 138 | { |
665 | DECLARE_MAC_BUF(mac); | 139 | struct ieee80211_local *local = sdata->local; |
140 | struct ieee80211_supported_band *sband; | ||
141 | struct sk_buff *skb; | ||
142 | struct ieee80211_mgmt *mgmt; | ||
143 | u8 *pos, *supp_rates, *esupp_rates = NULL; | ||
144 | int i; | ||
666 | 145 | ||
667 | ifsta->auth_tries++; | 146 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200); |
668 | if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) { | 147 | if (!skb) { |
669 | printk(KERN_DEBUG "%s: authentication with AP %s" | 148 | printk(KERN_DEBUG "%s: failed to allocate buffer for probe " |
670 | " timed out\n", | 149 | "request\n", sdata->dev->name); |
671 | dev->name, print_mac(mac, ifsta->bssid)); | ||
672 | ifsta->state = IEEE80211_DISABLED; | ||
673 | return; | 150 | return; |
674 | } | 151 | } |
152 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
675 | 153 | ||
676 | ifsta->state = IEEE80211_AUTHENTICATE; | 154 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
677 | printk(KERN_DEBUG "%s: authenticate with AP %s\n", | 155 | memset(mgmt, 0, 24); |
678 | dev->name, print_mac(mac, ifsta->bssid)); | 156 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
679 | 157 | IEEE80211_STYPE_PROBE_REQ); | |
680 | ieee80211_send_auth(dev, ifsta, 1, NULL, 0, 0); | 158 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
681 | 159 | if (dst) { | |
682 | mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT); | 160 | memcpy(mgmt->da, dst, ETH_ALEN); |
683 | } | 161 | memcpy(mgmt->bssid, dst, ETH_ALEN); |
162 | } else { | ||
163 | memset(mgmt->da, 0xff, ETH_ALEN); | ||
164 | memset(mgmt->bssid, 0xff, ETH_ALEN); | ||
165 | } | ||
166 | pos = skb_put(skb, 2 + ssid_len); | ||
167 | *pos++ = WLAN_EID_SSID; | ||
168 | *pos++ = ssid_len; | ||
169 | memcpy(pos, ssid, ssid_len); | ||
684 | 170 | ||
685 | static int ieee80211_compatible_rates(struct ieee80211_sta_bss *bss, | 171 | supp_rates = skb_put(skb, 2); |
686 | struct ieee80211_supported_band *sband, | 172 | supp_rates[0] = WLAN_EID_SUPP_RATES; |
687 | u64 *rates) | 173 | supp_rates[1] = 0; |
688 | { | 174 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
689 | int i, j, count; | ||
690 | *rates = 0; | ||
691 | count = 0; | ||
692 | for (i = 0; i < bss->supp_rates_len; i++) { | ||
693 | int rate = (bss->supp_rates[i] & 0x7F) * 5; | ||
694 | 175 | ||
695 | for (j = 0; j < sband->n_bitrates; j++) | 176 | for (i = 0; i < sband->n_bitrates; i++) { |
696 | if (sband->bitrates[j].bitrate == rate) { | 177 | struct ieee80211_rate *rate = &sband->bitrates[i]; |
697 | *rates |= BIT(j); | 178 | if (esupp_rates) { |
698 | count++; | 179 | pos = skb_put(skb, 1); |
699 | break; | 180 | esupp_rates[1]++; |
700 | } | 181 | } else if (supp_rates[1] == 8) { |
182 | esupp_rates = skb_put(skb, 3); | ||
183 | esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES; | ||
184 | esupp_rates[1] = 1; | ||
185 | pos = &esupp_rates[2]; | ||
186 | } else { | ||
187 | pos = skb_put(skb, 1); | ||
188 | supp_rates[1]++; | ||
189 | } | ||
190 | *pos = rate->bitrate / 5; | ||
701 | } | 191 | } |
702 | 192 | ||
703 | return count; | 193 | ieee80211_tx_skb(sdata, skb, 0); |
704 | } | 194 | } |
705 | 195 | ||
706 | static void ieee80211_send_assoc(struct net_device *dev, | 196 | static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, |
707 | struct ieee80211_if_sta *ifsta) | 197 | struct ieee80211_if_sta *ifsta) |
708 | { | 198 | { |
709 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 199 | struct ieee80211_local *local = sdata->local; |
710 | struct sk_buff *skb; | 200 | struct sk_buff *skb; |
711 | struct ieee80211_mgmt *mgmt; | 201 | struct ieee80211_mgmt *mgmt; |
712 | u8 *pos, *ies; | 202 | u8 *pos, *ies, *ht_add_ie; |
713 | int i, len, count, rates_len, supp_rates_len; | 203 | int i, len, count, rates_len, supp_rates_len; |
714 | u16 capab; | 204 | u16 capab; |
715 | struct ieee80211_sta_bss *bss; | 205 | struct ieee80211_sta_bss *bss; |
@@ -722,7 +212,7 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
722 | ifsta->ssid_len); | 212 | ifsta->ssid_len); |
723 | if (!skb) { | 213 | if (!skb) { |
724 | printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " | 214 | printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " |
725 | "frame\n", dev->name); | 215 | "frame\n", sdata->dev->name); |
726 | return; | 216 | return; |
727 | } | 217 | } |
728 | skb_reserve(skb, local->hw.extra_tx_headroom); | 218 | skb_reserve(skb, local->hw.extra_tx_headroom); |
@@ -738,13 +228,13 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
738 | capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; | 228 | capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; |
739 | } | 229 | } |
740 | 230 | ||
741 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | 231 | bss = ieee80211_rx_bss_get(local, ifsta->bssid, |
742 | local->hw.conf.channel->center_freq, | 232 | local->hw.conf.channel->center_freq, |
743 | ifsta->ssid, ifsta->ssid_len); | 233 | ifsta->ssid, ifsta->ssid_len); |
744 | if (bss) { | 234 | if (bss) { |
745 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) | 235 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) |
746 | capab |= WLAN_CAPABILITY_PRIVACY; | 236 | capab |= WLAN_CAPABILITY_PRIVACY; |
747 | if (bss->wmm_ie) | 237 | if (bss->wmm_used) |
748 | wmm = 1; | 238 | wmm = 1; |
749 | 239 | ||
750 | /* get all rates supported by the device and the AP as | 240 | /* get all rates supported by the device and the AP as |
@@ -766,13 +256,13 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
766 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 256 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
767 | memset(mgmt, 0, 24); | 257 | memset(mgmt, 0, 24); |
768 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); | 258 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); |
769 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 259 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
770 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 260 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); |
771 | 261 | ||
772 | if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) { | 262 | if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) { |
773 | skb_put(skb, 10); | 263 | skb_put(skb, 10); |
774 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 264 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
775 | IEEE80211_STYPE_REASSOC_REQ); | 265 | IEEE80211_STYPE_REASSOC_REQ); |
776 | mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); | 266 | mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); |
777 | mgmt->u.reassoc_req.listen_interval = | 267 | mgmt->u.reassoc_req.listen_interval = |
778 | cpu_to_le16(local->hw.conf.listen_interval); | 268 | cpu_to_le16(local->hw.conf.listen_interval); |
@@ -780,8 +270,8 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
780 | ETH_ALEN); | 270 | ETH_ALEN); |
781 | } else { | 271 | } else { |
782 | skb_put(skb, 4); | 272 | skb_put(skb, 4); |
783 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 273 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
784 | IEEE80211_STYPE_ASSOC_REQ); | 274 | IEEE80211_STYPE_ASSOC_REQ); |
785 | mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); | 275 | mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); |
786 | mgmt->u.reassoc_req.listen_interval = | 276 | mgmt->u.reassoc_req.listen_interval = |
787 | cpu_to_le16(local->hw.conf.listen_interval); | 277 | cpu_to_le16(local->hw.conf.listen_interval); |
@@ -866,9 +356,10 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
866 | 356 | ||
867 | /* wmm support is a must to HT */ | 357 | /* wmm support is a must to HT */ |
868 | if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) && | 358 | if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) && |
869 | sband->ht_info.ht_supported && bss->ht_add_ie) { | 359 | sband->ht_info.ht_supported && |
360 | (ht_add_ie = ieee80211_bss_get_ie(bss, WLAN_EID_HT_EXTRA_INFO))) { | ||
870 | struct ieee80211_ht_addt_info *ht_add_info = | 361 | struct ieee80211_ht_addt_info *ht_add_info = |
871 | (struct ieee80211_ht_addt_info *)bss->ht_add_ie; | 362 | (struct ieee80211_ht_addt_info *)ht_add_ie; |
872 | u16 cap = sband->ht_info.cap; | 363 | u16 cap = sband->ht_info.cap; |
873 | __le16 tmp; | 364 | __le16 tmp; |
874 | u32 flags = local->hw.conf.channel->flags; | 365 | u32 flags = local->hw.conf.channel->flags; |
@@ -907,21 +398,22 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
907 | if (ifsta->assocreq_ies) | 398 | if (ifsta->assocreq_ies) |
908 | memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len); | 399 | memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len); |
909 | 400 | ||
910 | ieee80211_sta_tx(dev, skb, 0); | 401 | ieee80211_tx_skb(sdata, skb, 0); |
911 | } | 402 | } |
912 | 403 | ||
913 | 404 | ||
914 | static void ieee80211_send_deauth(struct net_device *dev, | 405 | static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, |
915 | struct ieee80211_if_sta *ifsta, u16 reason) | 406 | u16 stype, u16 reason) |
916 | { | 407 | { |
917 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 408 | struct ieee80211_local *local = sdata->local; |
409 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
918 | struct sk_buff *skb; | 410 | struct sk_buff *skb; |
919 | struct ieee80211_mgmt *mgmt; | 411 | struct ieee80211_mgmt *mgmt; |
920 | 412 | ||
921 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); | 413 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); |
922 | if (!skb) { | 414 | if (!skb) { |
923 | printk(KERN_DEBUG "%s: failed to allocate buffer for deauth " | 415 | printk(KERN_DEBUG "%s: failed to allocate buffer for " |
924 | "frame\n", dev->name); | 416 | "deauth/disassoc frame\n", sdata->dev->name); |
925 | return; | 417 | return; |
926 | } | 418 | } |
927 | skb_reserve(skb, local->hw.extra_tx_headroom); | 419 | skb_reserve(skb, local->hw.extra_tx_headroom); |
@@ -929,929 +421,544 @@ static void ieee80211_send_deauth(struct net_device *dev, | |||
929 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 421 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
930 | memset(mgmt, 0, 24); | 422 | memset(mgmt, 0, 24); |
931 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); | 423 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); |
932 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 424 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
933 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 425 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); |
934 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 426 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); |
935 | IEEE80211_STYPE_DEAUTH); | ||
936 | skb_put(skb, 2); | 427 | skb_put(skb, 2); |
428 | /* u.deauth.reason_code == u.disassoc.reason_code */ | ||
937 | mgmt->u.deauth.reason_code = cpu_to_le16(reason); | 429 | mgmt->u.deauth.reason_code = cpu_to_le16(reason); |
938 | 430 | ||
939 | ieee80211_sta_tx(dev, skb, 0); | 431 | ieee80211_tx_skb(sdata, skb, 0); |
940 | } | 432 | } |
941 | 433 | ||
942 | 434 | /* MLME */ | |
943 | static void ieee80211_send_disassoc(struct net_device *dev, | 435 | static void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, |
944 | struct ieee80211_if_sta *ifsta, u16 reason) | 436 | struct ieee80211_sta_bss *bss) |
945 | { | 437 | { |
946 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 438 | struct ieee80211_local *local = sdata->local; |
947 | struct sk_buff *skb; | 439 | int i, have_higher_than_11mbit = 0; |
948 | struct ieee80211_mgmt *mgmt; | ||
949 | 440 | ||
950 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); | 441 | /* cf. IEEE 802.11 9.2.12 */ |
951 | if (!skb) { | 442 | for (i = 0; i < bss->supp_rates_len; i++) |
952 | printk(KERN_DEBUG "%s: failed to allocate buffer for disassoc " | 443 | if ((bss->supp_rates[i] & 0x7f) * 5 > 110) |
953 | "frame\n", dev->name); | 444 | have_higher_than_11mbit = 1; |
954 | return; | ||
955 | } | ||
956 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
957 | 445 | ||
958 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 446 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && |
959 | memset(mgmt, 0, 24); | 447 | have_higher_than_11mbit) |
960 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); | 448 | sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; |
961 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 449 | else |
962 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 450 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; |
963 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
964 | IEEE80211_STYPE_DISASSOC); | ||
965 | skb_put(skb, 2); | ||
966 | mgmt->u.disassoc.reason_code = cpu_to_le16(reason); | ||
967 | 451 | ||
968 | ieee80211_sta_tx(dev, skb, 0); | 452 | ieee80211_set_wmm_default(sdata); |
969 | } | 453 | } |
970 | 454 | ||
971 | 455 | static void ieee80211_sta_wmm_params(struct ieee80211_local *local, | |
972 | static int ieee80211_privacy_mismatch(struct net_device *dev, | 456 | struct ieee80211_if_sta *ifsta, |
973 | struct ieee80211_if_sta *ifsta) | 457 | u8 *wmm_param, size_t wmm_param_len) |
974 | { | 458 | { |
975 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 459 | struct ieee80211_tx_queue_params params; |
976 | struct ieee80211_sta_bss *bss; | 460 | size_t left; |
977 | int bss_privacy; | 461 | int count; |
978 | int wep_privacy; | 462 | u8 *pos; |
979 | int privacy_invoked; | ||
980 | |||
981 | if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL)) | ||
982 | return 0; | ||
983 | |||
984 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | ||
985 | local->hw.conf.channel->center_freq, | ||
986 | ifsta->ssid, ifsta->ssid_len); | ||
987 | if (!bss) | ||
988 | return 0; | ||
989 | |||
990 | bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY); | ||
991 | wep_privacy = !!ieee80211_sta_wep_configured(dev); | ||
992 | privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED); | ||
993 | 463 | ||
994 | ieee80211_rx_bss_put(local, bss); | 464 | if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED)) |
465 | return; | ||
995 | 466 | ||
996 | if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked)) | 467 | if (!wmm_param) |
997 | return 0; | 468 | return; |
998 | 469 | ||
999 | return 1; | 470 | if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) |
1000 | } | 471 | return; |
472 | count = wmm_param[6] & 0x0f; | ||
473 | if (count == ifsta->wmm_last_param_set) | ||
474 | return; | ||
475 | ifsta->wmm_last_param_set = count; | ||
1001 | 476 | ||
477 | pos = wmm_param + 8; | ||
478 | left = wmm_param_len - 8; | ||
1002 | 479 | ||
1003 | static void ieee80211_associate(struct net_device *dev, | 480 | memset(¶ms, 0, sizeof(params)); |
1004 | struct ieee80211_if_sta *ifsta) | ||
1005 | { | ||
1006 | DECLARE_MAC_BUF(mac); | ||
1007 | 481 | ||
1008 | ifsta->assoc_tries++; | 482 | if (!local->ops->conf_tx) |
1009 | if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) { | ||
1010 | printk(KERN_DEBUG "%s: association with AP %s" | ||
1011 | " timed out\n", | ||
1012 | dev->name, print_mac(mac, ifsta->bssid)); | ||
1013 | ifsta->state = IEEE80211_DISABLED; | ||
1014 | return; | 483 | return; |
1015 | } | ||
1016 | 484 | ||
1017 | ifsta->state = IEEE80211_ASSOCIATE; | 485 | local->wmm_acm = 0; |
1018 | printk(KERN_DEBUG "%s: associate with AP %s\n", | 486 | for (; left >= 4; left -= 4, pos += 4) { |
1019 | dev->name, print_mac(mac, ifsta->bssid)); | 487 | int aci = (pos[0] >> 5) & 0x03; |
1020 | if (ieee80211_privacy_mismatch(dev, ifsta)) { | 488 | int acm = (pos[0] >> 4) & 0x01; |
1021 | printk(KERN_DEBUG "%s: mismatch in privacy configuration and " | 489 | int queue; |
1022 | "mixed-cell disabled - abort association\n", dev->name); | ||
1023 | ifsta->state = IEEE80211_DISABLED; | ||
1024 | return; | ||
1025 | } | ||
1026 | 490 | ||
1027 | ieee80211_send_assoc(dev, ifsta); | 491 | switch (aci) { |
492 | case 1: | ||
493 | queue = 3; | ||
494 | if (acm) | ||
495 | local->wmm_acm |= BIT(0) | BIT(3); | ||
496 | break; | ||
497 | case 2: | ||
498 | queue = 1; | ||
499 | if (acm) | ||
500 | local->wmm_acm |= BIT(4) | BIT(5); | ||
501 | break; | ||
502 | case 3: | ||
503 | queue = 0; | ||
504 | if (acm) | ||
505 | local->wmm_acm |= BIT(6) | BIT(7); | ||
506 | break; | ||
507 | case 0: | ||
508 | default: | ||
509 | queue = 2; | ||
510 | if (acm) | ||
511 | local->wmm_acm |= BIT(1) | BIT(2); | ||
512 | break; | ||
513 | } | ||
1028 | 514 | ||
1029 | mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); | 515 | params.aifs = pos[0] & 0x0f; |
516 | params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); | ||
517 | params.cw_min = ecw2cw(pos[1] & 0x0f); | ||
518 | params.txop = get_unaligned_le16(pos + 2); | ||
519 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
520 | printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " | ||
521 | "cWmin=%d cWmax=%d txop=%d\n", | ||
522 | local->mdev->name, queue, aci, acm, params.aifs, params.cw_min, | ||
523 | params.cw_max, params.txop); | ||
524 | #endif | ||
525 | /* TODO: handle ACM (block TX, fallback to next lowest allowed | ||
526 | * AC for now) */ | ||
527 | if (local->ops->conf_tx(local_to_hw(local), queue, ¶ms)) { | ||
528 | printk(KERN_DEBUG "%s: failed to set TX queue " | ||
529 | "parameters for queue %d\n", local->mdev->name, queue); | ||
530 | } | ||
531 | } | ||
1030 | } | 532 | } |
1031 | 533 | ||
1032 | 534 | static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata, | |
1033 | static void ieee80211_associated(struct net_device *dev, | 535 | bool use_protection, |
1034 | struct ieee80211_if_sta *ifsta) | 536 | bool use_short_preamble) |
1035 | { | 537 | { |
1036 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 538 | struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; |
1037 | struct sta_info *sta; | 539 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
1038 | int disassoc; | 540 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
1039 | DECLARE_MAC_BUF(mac); | 541 | DECLARE_MAC_BUF(mac); |
542 | #endif | ||
543 | u32 changed = 0; | ||
1040 | 544 | ||
1041 | /* TODO: start monitoring current AP signal quality and number of | 545 | if (use_protection != bss_conf->use_cts_prot) { |
1042 | * missed beacons. Scan other channels every now and then and search | 546 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
1043 | * for better APs. */ | 547 | if (net_ratelimit()) { |
1044 | /* TODO: remove expired BSSes */ | 548 | printk(KERN_DEBUG "%s: CTS protection %s (BSSID=" |
1045 | 549 | "%s)\n", | |
1046 | ifsta->state = IEEE80211_ASSOCIATED; | 550 | sdata->dev->name, |
1047 | 551 | use_protection ? "enabled" : "disabled", | |
1048 | rcu_read_lock(); | 552 | print_mac(mac, ifsta->bssid)); |
1049 | |||
1050 | sta = sta_info_get(local, ifsta->bssid); | ||
1051 | if (!sta) { | ||
1052 | printk(KERN_DEBUG "%s: No STA entry for own AP %s\n", | ||
1053 | dev->name, print_mac(mac, ifsta->bssid)); | ||
1054 | disassoc = 1; | ||
1055 | } else { | ||
1056 | disassoc = 0; | ||
1057 | if (time_after(jiffies, | ||
1058 | sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { | ||
1059 | if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) { | ||
1060 | printk(KERN_DEBUG "%s: No ProbeResp from " | ||
1061 | "current AP %s - assume out of " | ||
1062 | "range\n", | ||
1063 | dev->name, print_mac(mac, ifsta->bssid)); | ||
1064 | disassoc = 1; | ||
1065 | sta_info_unlink(&sta); | ||
1066 | } else | ||
1067 | ieee80211_send_probe_req(dev, ifsta->bssid, | ||
1068 | local->scan_ssid, | ||
1069 | local->scan_ssid_len); | ||
1070 | ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL; | ||
1071 | } else { | ||
1072 | ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL; | ||
1073 | if (time_after(jiffies, ifsta->last_probe + | ||
1074 | IEEE80211_PROBE_INTERVAL)) { | ||
1075 | ifsta->last_probe = jiffies; | ||
1076 | ieee80211_send_probe_req(dev, ifsta->bssid, | ||
1077 | ifsta->ssid, | ||
1078 | ifsta->ssid_len); | ||
1079 | } | ||
1080 | } | 553 | } |
554 | #endif | ||
555 | bss_conf->use_cts_prot = use_protection; | ||
556 | changed |= BSS_CHANGED_ERP_CTS_PROT; | ||
1081 | } | 557 | } |
1082 | 558 | ||
1083 | rcu_read_unlock(); | 559 | if (use_short_preamble != bss_conf->use_short_preamble) { |
1084 | 560 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | |
1085 | if (disassoc && sta) | 561 | if (net_ratelimit()) { |
1086 | sta_info_destroy(sta); | 562 | printk(KERN_DEBUG "%s: switched to %s barker preamble" |
1087 | 563 | " (BSSID=%s)\n", | |
1088 | if (disassoc) { | 564 | sdata->dev->name, |
1089 | ifsta->state = IEEE80211_DISABLED; | 565 | use_short_preamble ? "short" : "long", |
1090 | ieee80211_set_associated(dev, ifsta, 0); | 566 | print_mac(mac, ifsta->bssid)); |
1091 | } else { | 567 | } |
1092 | mod_timer(&ifsta->timer, jiffies + | 568 | #endif |
1093 | IEEE80211_MONITORING_INTERVAL); | 569 | bss_conf->use_short_preamble = use_short_preamble; |
570 | changed |= BSS_CHANGED_ERP_PREAMBLE; | ||
1094 | } | 571 | } |
1095 | } | ||
1096 | 572 | ||
573 | return changed; | ||
574 | } | ||
1097 | 575 | ||
1098 | static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, | 576 | static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata, |
1099 | u8 *ssid, size_t ssid_len) | 577 | u8 erp_value) |
1100 | { | 578 | { |
1101 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 579 | bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0; |
1102 | struct ieee80211_supported_band *sband; | 580 | bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0; |
1103 | struct sk_buff *skb; | ||
1104 | struct ieee80211_mgmt *mgmt; | ||
1105 | u8 *pos, *supp_rates, *esupp_rates = NULL; | ||
1106 | int i; | ||
1107 | |||
1108 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200); | ||
1109 | if (!skb) { | ||
1110 | printk(KERN_DEBUG "%s: failed to allocate buffer for probe " | ||
1111 | "request\n", dev->name); | ||
1112 | return; | ||
1113 | } | ||
1114 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1115 | 581 | ||
1116 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 582 | return ieee80211_handle_protect_preamb(sdata, |
1117 | memset(mgmt, 0, 24); | 583 | use_protection, use_short_preamble); |
1118 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 584 | } |
1119 | IEEE80211_STYPE_PROBE_REQ); | ||
1120 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | ||
1121 | if (dst) { | ||
1122 | memcpy(mgmt->da, dst, ETH_ALEN); | ||
1123 | memcpy(mgmt->bssid, dst, ETH_ALEN); | ||
1124 | } else { | ||
1125 | memset(mgmt->da, 0xff, ETH_ALEN); | ||
1126 | memset(mgmt->bssid, 0xff, ETH_ALEN); | ||
1127 | } | ||
1128 | pos = skb_put(skb, 2 + ssid_len); | ||
1129 | *pos++ = WLAN_EID_SSID; | ||
1130 | *pos++ = ssid_len; | ||
1131 | memcpy(pos, ssid, ssid_len); | ||
1132 | 585 | ||
1133 | supp_rates = skb_put(skb, 2); | 586 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, |
1134 | supp_rates[0] = WLAN_EID_SUPP_RATES; | 587 | struct ieee80211_sta_bss *bss) |
1135 | supp_rates[1] = 0; | 588 | { |
1136 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 589 | u32 changed = 0; |
1137 | 590 | ||
1138 | for (i = 0; i < sband->n_bitrates; i++) { | 591 | if (bss->has_erp_value) |
1139 | struct ieee80211_rate *rate = &sband->bitrates[i]; | 592 | changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value); |
1140 | if (esupp_rates) { | 593 | else { |
1141 | pos = skb_put(skb, 1); | 594 | u16 capab = bss->capability; |
1142 | esupp_rates[1]++; | 595 | changed |= ieee80211_handle_protect_preamb(sdata, false, |
1143 | } else if (supp_rates[1] == 8) { | 596 | (capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0); |
1144 | esupp_rates = skb_put(skb, 3); | ||
1145 | esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES; | ||
1146 | esupp_rates[1] = 1; | ||
1147 | pos = &esupp_rates[2]; | ||
1148 | } else { | ||
1149 | pos = skb_put(skb, 1); | ||
1150 | supp_rates[1]++; | ||
1151 | } | ||
1152 | *pos = rate->bitrate / 5; | ||
1153 | } | 597 | } |
1154 | 598 | ||
1155 | ieee80211_sta_tx(dev, skb, 0); | 599 | return changed; |
1156 | } | 600 | } |
1157 | 601 | ||
602 | static void ieee80211_sta_send_apinfo(struct ieee80211_sub_if_data *sdata, | ||
603 | struct ieee80211_if_sta *ifsta) | ||
604 | { | ||
605 | union iwreq_data wrqu; | ||
606 | memset(&wrqu, 0, sizeof(wrqu)); | ||
607 | if (ifsta->flags & IEEE80211_STA_ASSOCIATED) | ||
608 | memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN); | ||
609 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | ||
610 | wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL); | ||
611 | } | ||
1158 | 612 | ||
1159 | static int ieee80211_sta_wep_configured(struct net_device *dev) | 613 | static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata, |
614 | struct ieee80211_if_sta *ifsta) | ||
1160 | { | 615 | { |
1161 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 616 | union iwreq_data wrqu; |
1162 | if (!sdata || !sdata->default_key || | 617 | |
1163 | sdata->default_key->conf.alg != ALG_WEP) | 618 | if (ifsta->assocreq_ies) { |
1164 | return 0; | 619 | memset(&wrqu, 0, sizeof(wrqu)); |
1165 | return 1; | 620 | wrqu.data.length = ifsta->assocreq_ies_len; |
621 | wireless_send_event(sdata->dev, IWEVASSOCREQIE, &wrqu, | ||
622 | ifsta->assocreq_ies); | ||
623 | } | ||
624 | if (ifsta->assocresp_ies) { | ||
625 | memset(&wrqu, 0, sizeof(wrqu)); | ||
626 | wrqu.data.length = ifsta->assocresp_ies_len; | ||
627 | wireless_send_event(sdata->dev, IWEVASSOCRESPIE, &wrqu, | ||
628 | ifsta->assocresp_ies); | ||
629 | } | ||
1166 | } | 630 | } |
1167 | 631 | ||
1168 | 632 | ||
1169 | static void ieee80211_auth_completed(struct net_device *dev, | 633 | static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, |
1170 | struct ieee80211_if_sta *ifsta) | 634 | struct ieee80211_if_sta *ifsta) |
1171 | { | 635 | { |
1172 | printk(KERN_DEBUG "%s: authenticated\n", dev->name); | 636 | struct ieee80211_local *local = sdata->local; |
1173 | ifsta->flags |= IEEE80211_STA_AUTHENTICATED; | 637 | struct ieee80211_conf *conf = &local_to_hw(local)->conf; |
1174 | ieee80211_associate(dev, ifsta); | 638 | u32 changed = BSS_CHANGED_ASSOC; |
1175 | } | ||
1176 | 639 | ||
640 | struct ieee80211_sta_bss *bss; | ||
1177 | 641 | ||
1178 | static void ieee80211_auth_challenge(struct net_device *dev, | 642 | ifsta->flags |= IEEE80211_STA_ASSOCIATED; |
1179 | struct ieee80211_if_sta *ifsta, | ||
1180 | struct ieee80211_mgmt *mgmt, | ||
1181 | size_t len) | ||
1182 | { | ||
1183 | u8 *pos; | ||
1184 | struct ieee802_11_elems elems; | ||
1185 | 643 | ||
1186 | pos = mgmt->u.auth.variable; | 644 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) |
1187 | ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); | ||
1188 | if (!elems.challenge) | ||
1189 | return; | 645 | return; |
1190 | ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2, | ||
1191 | elems.challenge_len + 2, 1); | ||
1192 | } | ||
1193 | 646 | ||
1194 | static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid, | 647 | bss = ieee80211_rx_bss_get(local, ifsta->bssid, |
1195 | u8 dialog_token, u16 status, u16 policy, | 648 | conf->channel->center_freq, |
1196 | u16 buf_size, u16 timeout) | 649 | ifsta->ssid, ifsta->ssid_len); |
1197 | { | 650 | if (bss) { |
1198 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 651 | /* set timing information */ |
1199 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 652 | sdata->bss_conf.beacon_int = bss->beacon_int; |
1200 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 653 | sdata->bss_conf.timestamp = bss->timestamp; |
1201 | struct sk_buff *skb; | 654 | sdata->bss_conf.dtim_period = bss->dtim_period; |
1202 | struct ieee80211_mgmt *mgmt; | ||
1203 | u16 capab; | ||
1204 | 655 | ||
1205 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | 656 | changed |= ieee80211_handle_bss_capability(sdata, bss); |
1206 | 657 | ||
1207 | if (!skb) { | 658 | ieee80211_rx_bss_put(local, bss); |
1208 | printk(KERN_DEBUG "%s: failed to allocate buffer " | ||
1209 | "for addba resp frame\n", dev->name); | ||
1210 | return; | ||
1211 | } | 659 | } |
1212 | 660 | ||
1213 | skb_reserve(skb, local->hw.extra_tx_headroom); | 661 | if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { |
1214 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 662 | changed |= BSS_CHANGED_HT; |
1215 | memset(mgmt, 0, 24); | 663 | sdata->bss_conf.assoc_ht = 1; |
1216 | memcpy(mgmt->da, da, ETH_ALEN); | 664 | sdata->bss_conf.ht_conf = &conf->ht_conf; |
1217 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 665 | sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf; |
1218 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) | 666 | } |
1219 | memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); | ||
1220 | else | ||
1221 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
1222 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1223 | IEEE80211_STYPE_ACTION); | ||
1224 | 667 | ||
1225 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); | 668 | ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET; |
1226 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | 669 | memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN); |
1227 | mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; | 670 | ieee80211_sta_send_associnfo(sdata, ifsta); |
1228 | mgmt->u.action.u.addba_resp.dialog_token = dialog_token; | ||
1229 | 671 | ||
1230 | capab = (u16)(policy << 1); /* bit 1 aggregation policy */ | 672 | ifsta->last_probe = jiffies; |
1231 | capab |= (u16)(tid << 2); /* bit 5:2 TID number */ | 673 | ieee80211_led_assoc(local, 1); |
1232 | capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ | ||
1233 | 674 | ||
1234 | mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); | 675 | sdata->bss_conf.assoc = 1; |
1235 | mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); | 676 | ieee80211_bss_info_change_notify(sdata, changed); |
1236 | mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); | ||
1237 | 677 | ||
1238 | ieee80211_sta_tx(dev, skb, 0); | 678 | netif_tx_start_all_queues(sdata->dev); |
679 | netif_carrier_on(sdata->dev); | ||
1239 | 680 | ||
1240 | return; | 681 | ieee80211_sta_send_apinfo(sdata, ifsta); |
1241 | } | 682 | } |
1242 | 683 | ||
1243 | void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, | 684 | static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata, |
1244 | u16 tid, u8 dialog_token, u16 start_seq_num, | 685 | struct ieee80211_if_sta *ifsta) |
1245 | u16 agg_size, u16 timeout) | ||
1246 | { | 686 | { |
1247 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 687 | DECLARE_MAC_BUF(mac); |
1248 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1249 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
1250 | struct sk_buff *skb; | ||
1251 | struct ieee80211_mgmt *mgmt; | ||
1252 | u16 capab; | ||
1253 | |||
1254 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | ||
1255 | 688 | ||
1256 | if (!skb) { | 689 | ifsta->direct_probe_tries++; |
1257 | printk(KERN_ERR "%s: failed to allocate buffer " | 690 | if (ifsta->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) { |
1258 | "for addba request frame\n", dev->name); | 691 | printk(KERN_DEBUG "%s: direct probe to AP %s timed out\n", |
692 | sdata->dev->name, print_mac(mac, ifsta->bssid)); | ||
693 | ifsta->state = IEEE80211_STA_MLME_DISABLED; | ||
1259 | return; | 694 | return; |
1260 | } | 695 | } |
1261 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1262 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | ||
1263 | memset(mgmt, 0, 24); | ||
1264 | memcpy(mgmt->da, da, ETH_ALEN); | ||
1265 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | ||
1266 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) | ||
1267 | memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); | ||
1268 | else | ||
1269 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
1270 | |||
1271 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1272 | IEEE80211_STYPE_ACTION); | ||
1273 | 696 | ||
1274 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); | 697 | printk(KERN_DEBUG "%s: direct probe to AP %s try %d\n", |
698 | sdata->dev->name, print_mac(mac, ifsta->bssid), | ||
699 | ifsta->direct_probe_tries); | ||
1275 | 700 | ||
1276 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | 701 | ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE; |
1277 | mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; | ||
1278 | 702 | ||
1279 | mgmt->u.action.u.addba_req.dialog_token = dialog_token; | 703 | set_bit(IEEE80211_STA_REQ_DIRECT_PROBE, &ifsta->request); |
1280 | capab = (u16)(1 << 1); /* bit 1 aggregation policy */ | ||
1281 | capab |= (u16)(tid << 2); /* bit 5:2 TID number */ | ||
1282 | capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ | ||
1283 | 704 | ||
1284 | mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); | 705 | /* Direct probe is sent to broadcast address as some APs |
1285 | 706 | * will not answer to direct packet in unassociated state. | |
1286 | mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); | 707 | */ |
1287 | mgmt->u.action.u.addba_req.start_seq_num = | 708 | ieee80211_send_probe_req(sdata, NULL, |
1288 | cpu_to_le16(start_seq_num << 4); | 709 | ifsta->ssid, ifsta->ssid_len); |
1289 | 710 | ||
1290 | ieee80211_sta_tx(dev, skb, 0); | 711 | mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT); |
1291 | } | 712 | } |
1292 | 713 | ||
1293 | static void ieee80211_sta_process_addba_request(struct net_device *dev, | 714 | |
1294 | struct ieee80211_mgmt *mgmt, | 715 | static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata, |
1295 | size_t len) | 716 | struct ieee80211_if_sta *ifsta) |
1296 | { | 717 | { |
1297 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1298 | struct ieee80211_hw *hw = &local->hw; | ||
1299 | struct ieee80211_conf *conf = &hw->conf; | ||
1300 | struct sta_info *sta; | ||
1301 | struct tid_ampdu_rx *tid_agg_rx; | ||
1302 | u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; | ||
1303 | u8 dialog_token; | ||
1304 | int ret = -EOPNOTSUPP; | ||
1305 | DECLARE_MAC_BUF(mac); | 718 | DECLARE_MAC_BUF(mac); |
1306 | 719 | ||
1307 | rcu_read_lock(); | 720 | ifsta->auth_tries++; |
1308 | 721 | if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) { | |
1309 | sta = sta_info_get(local, mgmt->sa); | 722 | printk(KERN_DEBUG "%s: authentication with AP %s" |
1310 | if (!sta) { | 723 | " timed out\n", |
1311 | rcu_read_unlock(); | 724 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
725 | ifsta->state = IEEE80211_STA_MLME_DISABLED; | ||
1312 | return; | 726 | return; |
1313 | } | 727 | } |
1314 | 728 | ||
1315 | /* extract session parameters from addba request frame */ | 729 | ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE; |
1316 | dialog_token = mgmt->u.action.u.addba_req.dialog_token; | 730 | printk(KERN_DEBUG "%s: authenticate with AP %s\n", |
1317 | timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); | 731 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
1318 | start_seq_num = | ||
1319 | le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4; | ||
1320 | |||
1321 | capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); | ||
1322 | ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1; | ||
1323 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | ||
1324 | buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; | ||
1325 | |||
1326 | status = WLAN_STATUS_REQUEST_DECLINED; | ||
1327 | |||
1328 | /* sanity check for incoming parameters: | ||
1329 | * check if configuration can support the BA policy | ||
1330 | * and if buffer size does not exceeds max value */ | ||
1331 | if (((ba_policy != 1) | ||
1332 | && (!(conf->ht_conf.cap & IEEE80211_HT_CAP_DELAY_BA))) | ||
1333 | || (buf_size > IEEE80211_MAX_AMPDU_BUF)) { | ||
1334 | status = WLAN_STATUS_INVALID_QOS_PARAM; | ||
1335 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1336 | if (net_ratelimit()) | ||
1337 | printk(KERN_DEBUG "AddBA Req with bad params from " | ||
1338 | "%s on tid %u. policy %d, buffer size %d\n", | ||
1339 | print_mac(mac, mgmt->sa), tid, ba_policy, | ||
1340 | buf_size); | ||
1341 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1342 | goto end_no_lock; | ||
1343 | } | ||
1344 | /* determine default buffer size */ | ||
1345 | if (buf_size == 0) { | ||
1346 | struct ieee80211_supported_band *sband; | ||
1347 | |||
1348 | sband = local->hw.wiphy->bands[conf->channel->band]; | ||
1349 | buf_size = IEEE80211_MIN_AMPDU_BUF; | ||
1350 | buf_size = buf_size << sband->ht_info.ampdu_factor; | ||
1351 | } | ||
1352 | |||
1353 | |||
1354 | /* examine state machine */ | ||
1355 | spin_lock_bh(&sta->lock); | ||
1356 | 732 | ||
1357 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { | 733 | ieee80211_send_auth(sdata, ifsta, 1, NULL, 0, 0); |
1358 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1359 | if (net_ratelimit()) | ||
1360 | printk(KERN_DEBUG "unexpected AddBA Req from " | ||
1361 | "%s on tid %u\n", | ||
1362 | print_mac(mac, mgmt->sa), tid); | ||
1363 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1364 | goto end; | ||
1365 | } | ||
1366 | 734 | ||
1367 | /* prepare A-MPDU MLME for Rx aggregation */ | 735 | mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT); |
1368 | sta->ampdu_mlme.tid_rx[tid] = | ||
1369 | kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); | ||
1370 | if (!sta->ampdu_mlme.tid_rx[tid]) { | ||
1371 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1372 | if (net_ratelimit()) | ||
1373 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", | ||
1374 | tid); | ||
1375 | #endif | ||
1376 | goto end; | ||
1377 | } | ||
1378 | /* rx timer */ | ||
1379 | sta->ampdu_mlme.tid_rx[tid]->session_timer.function = | ||
1380 | sta_rx_agg_session_timer_expired; | ||
1381 | sta->ampdu_mlme.tid_rx[tid]->session_timer.data = | ||
1382 | (unsigned long)&sta->timer_to_tid[tid]; | ||
1383 | init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
1384 | |||
1385 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | ||
1386 | |||
1387 | /* prepare reordering buffer */ | ||
1388 | tid_agg_rx->reorder_buf = | ||
1389 | kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC); | ||
1390 | if (!tid_agg_rx->reorder_buf) { | ||
1391 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1392 | if (net_ratelimit()) | ||
1393 | printk(KERN_ERR "can not allocate reordering buffer " | ||
1394 | "to tid %d\n", tid); | ||
1395 | #endif | ||
1396 | kfree(sta->ampdu_mlme.tid_rx[tid]); | ||
1397 | goto end; | ||
1398 | } | ||
1399 | memset(tid_agg_rx->reorder_buf, 0, | ||
1400 | buf_size * sizeof(struct sk_buff *)); | ||
1401 | |||
1402 | if (local->ops->ampdu_action) | ||
1403 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, | ||
1404 | sta->addr, tid, &start_seq_num); | ||
1405 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1406 | printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); | ||
1407 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1408 | |||
1409 | if (ret) { | ||
1410 | kfree(tid_agg_rx->reorder_buf); | ||
1411 | kfree(tid_agg_rx); | ||
1412 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
1413 | goto end; | ||
1414 | } | ||
1415 | |||
1416 | /* change state and send addba resp */ | ||
1417 | sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL; | ||
1418 | tid_agg_rx->dialog_token = dialog_token; | ||
1419 | tid_agg_rx->ssn = start_seq_num; | ||
1420 | tid_agg_rx->head_seq_num = start_seq_num; | ||
1421 | tid_agg_rx->buf_size = buf_size; | ||
1422 | tid_agg_rx->timeout = timeout; | ||
1423 | tid_agg_rx->stored_mpdu_num = 0; | ||
1424 | status = WLAN_STATUS_SUCCESS; | ||
1425 | end: | ||
1426 | spin_unlock_bh(&sta->lock); | ||
1427 | |||
1428 | end_no_lock: | ||
1429 | ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid, | ||
1430 | dialog_token, status, 1, buf_size, timeout); | ||
1431 | rcu_read_unlock(); | ||
1432 | } | 736 | } |
1433 | 737 | ||
1434 | static void ieee80211_sta_process_addba_resp(struct net_device *dev, | 738 | static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, |
1435 | struct ieee80211_mgmt *mgmt, | 739 | struct ieee80211_if_sta *ifsta, bool deauth, |
1436 | size_t len) | 740 | bool self_disconnected, u16 reason) |
1437 | { | 741 | { |
1438 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 742 | struct ieee80211_local *local = sdata->local; |
1439 | struct ieee80211_hw *hw = &local->hw; | ||
1440 | struct sta_info *sta; | 743 | struct sta_info *sta; |
1441 | u16 capab; | 744 | u32 changed = BSS_CHANGED_ASSOC; |
1442 | u16 tid; | ||
1443 | u8 *state; | ||
1444 | 745 | ||
1445 | rcu_read_lock(); | 746 | rcu_read_lock(); |
1446 | 747 | ||
1447 | sta = sta_info_get(local, mgmt->sa); | 748 | sta = sta_info_get(local, ifsta->bssid); |
1448 | if (!sta) { | 749 | if (!sta) { |
1449 | rcu_read_unlock(); | 750 | rcu_read_unlock(); |
1450 | return; | 751 | return; |
1451 | } | 752 | } |
1452 | 753 | ||
1453 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); | 754 | if (deauth) { |
1454 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | 755 | ifsta->direct_probe_tries = 0; |
756 | ifsta->auth_tries = 0; | ||
757 | } | ||
758 | ifsta->assoc_scan_tries = 0; | ||
759 | ifsta->assoc_tries = 0; | ||
1455 | 760 | ||
1456 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 761 | netif_tx_stop_all_queues(sdata->dev); |
762 | netif_carrier_off(sdata->dev); | ||
1457 | 763 | ||
1458 | spin_lock_bh(&sta->lock); | 764 | ieee80211_sta_tear_down_BA_sessions(sdata, sta->addr); |
1459 | 765 | ||
1460 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | 766 | if (self_disconnected) { |
1461 | spin_unlock_bh(&sta->lock); | 767 | if (deauth) |
1462 | goto addba_resp_exit; | 768 | ieee80211_send_deauth_disassoc(sdata, |
769 | IEEE80211_STYPE_DEAUTH, reason); | ||
770 | else | ||
771 | ieee80211_send_deauth_disassoc(sdata, | ||
772 | IEEE80211_STYPE_DISASSOC, reason); | ||
1463 | } | 773 | } |
1464 | 774 | ||
1465 | if (mgmt->u.action.u.addba_resp.dialog_token != | 775 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; |
1466 | sta->ampdu_mlme.tid_tx[tid]->dialog_token) { | 776 | changed |= ieee80211_reset_erp_info(sdata); |
1467 | spin_unlock_bh(&sta->lock); | ||
1468 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1469 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); | ||
1470 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1471 | goto addba_resp_exit; | ||
1472 | } | ||
1473 | 777 | ||
1474 | del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | 778 | if (sdata->bss_conf.assoc_ht) |
1475 | #ifdef CONFIG_MAC80211_HT_DEBUG | 779 | changed |= BSS_CHANGED_HT; |
1476 | printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); | ||
1477 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1478 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) | ||
1479 | == WLAN_STATUS_SUCCESS) { | ||
1480 | *state |= HT_ADDBA_RECEIVED_MSK; | ||
1481 | sta->ampdu_mlme.addba_req_num[tid] = 0; | ||
1482 | 780 | ||
1483 | if (*state == HT_AGG_STATE_OPERATIONAL) | 781 | sdata->bss_conf.assoc_ht = 0; |
1484 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | 782 | sdata->bss_conf.ht_conf = NULL; |
783 | sdata->bss_conf.ht_bss_conf = NULL; | ||
1485 | 784 | ||
1486 | spin_unlock_bh(&sta->lock); | 785 | ieee80211_led_assoc(local, 0); |
1487 | } else { | 786 | sdata->bss_conf.assoc = 0; |
1488 | sta->ampdu_mlme.addba_req_num[tid]++; | 787 | |
1489 | /* this will allow the state check in stop_BA_session */ | 788 | ieee80211_sta_send_apinfo(sdata, ifsta); |
1490 | *state = HT_AGG_STATE_OPERATIONAL; | 789 | |
1491 | spin_unlock_bh(&sta->lock); | 790 | if (self_disconnected) |
1492 | ieee80211_stop_tx_ba_session(hw, sta->addr, tid, | 791 | ifsta->state = IEEE80211_STA_MLME_DISABLED; |
1493 | WLAN_BACK_INITIATOR); | 792 | |
1494 | } | 793 | sta_info_unlink(&sta); |
1495 | 794 | ||
1496 | addba_resp_exit: | ||
1497 | rcu_read_unlock(); | 795 | rcu_read_unlock(); |
796 | |||
797 | sta_info_destroy(sta); | ||
1498 | } | 798 | } |
1499 | 799 | ||
1500 | void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, | 800 | static int ieee80211_sta_wep_configured(struct ieee80211_sub_if_data *sdata) |
1501 | u16 initiator, u16 reason_code) | ||
1502 | { | 801 | { |
1503 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 802 | if (!sdata || !sdata->default_key || |
1504 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 803 | sdata->default_key->conf.alg != ALG_WEP) |
1505 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 804 | return 0; |
1506 | struct sk_buff *skb; | 805 | return 1; |
1507 | struct ieee80211_mgmt *mgmt; | 806 | } |
1508 | u16 params; | ||
1509 | |||
1510 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | ||
1511 | |||
1512 | if (!skb) { | ||
1513 | printk(KERN_ERR "%s: failed to allocate buffer " | ||
1514 | "for delba frame\n", dev->name); | ||
1515 | return; | ||
1516 | } | ||
1517 | 807 | ||
1518 | skb_reserve(skb, local->hw.extra_tx_headroom); | 808 | static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata, |
1519 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 809 | struct ieee80211_if_sta *ifsta) |
1520 | memset(mgmt, 0, 24); | 810 | { |
1521 | memcpy(mgmt->da, da, ETH_ALEN); | 811 | struct ieee80211_local *local = sdata->local; |
1522 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 812 | struct ieee80211_sta_bss *bss; |
1523 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) | 813 | int bss_privacy; |
1524 | memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); | 814 | int wep_privacy; |
1525 | else | 815 | int privacy_invoked; |
1526 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
1527 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1528 | IEEE80211_STYPE_ACTION); | ||
1529 | 816 | ||
1530 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); | 817 | if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL)) |
818 | return 0; | ||
1531 | 819 | ||
1532 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | 820 | bss = ieee80211_rx_bss_get(local, ifsta->bssid, |
1533 | mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; | 821 | local->hw.conf.channel->center_freq, |
1534 | params = (u16)(initiator << 11); /* bit 11 initiator */ | 822 | ifsta->ssid, ifsta->ssid_len); |
1535 | params |= (u16)(tid << 12); /* bit 15:12 TID number */ | 823 | if (!bss) |
824 | return 0; | ||
1536 | 825 | ||
1537 | mgmt->u.action.u.delba.params = cpu_to_le16(params); | 826 | bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY); |
1538 | mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); | 827 | wep_privacy = !!ieee80211_sta_wep_configured(sdata); |
828 | privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED); | ||
1539 | 829 | ||
1540 | ieee80211_sta_tx(dev, skb, 0); | 830 | ieee80211_rx_bss_put(local, bss); |
1541 | } | ||
1542 | 831 | ||
1543 | void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn) | 832 | if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked)) |
1544 | { | 833 | return 0; |
1545 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1546 | struct sk_buff *skb; | ||
1547 | struct ieee80211_bar *bar; | ||
1548 | u16 bar_control = 0; | ||
1549 | 834 | ||
1550 | skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); | 835 | return 1; |
1551 | if (!skb) { | ||
1552 | printk(KERN_ERR "%s: failed to allocate buffer for " | ||
1553 | "bar frame\n", dev->name); | ||
1554 | return; | ||
1555 | } | ||
1556 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1557 | bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); | ||
1558 | memset(bar, 0, sizeof(*bar)); | ||
1559 | bar->frame_control = IEEE80211_FC(IEEE80211_FTYPE_CTL, | ||
1560 | IEEE80211_STYPE_BACK_REQ); | ||
1561 | memcpy(bar->ra, ra, ETH_ALEN); | ||
1562 | memcpy(bar->ta, dev->dev_addr, ETH_ALEN); | ||
1563 | bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; | ||
1564 | bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; | ||
1565 | bar_control |= (u16)(tid << 12); | ||
1566 | bar->control = cpu_to_le16(bar_control); | ||
1567 | bar->start_seq_num = cpu_to_le16(ssn); | ||
1568 | |||
1569 | ieee80211_sta_tx(dev, skb, 0); | ||
1570 | } | 836 | } |
1571 | 837 | ||
1572 | void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, | 838 | static void ieee80211_associate(struct ieee80211_sub_if_data *sdata, |
1573 | u16 initiator, u16 reason) | 839 | struct ieee80211_if_sta *ifsta) |
1574 | { | 840 | { |
1575 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1576 | struct ieee80211_hw *hw = &local->hw; | ||
1577 | struct sta_info *sta; | ||
1578 | int ret, i; | ||
1579 | DECLARE_MAC_BUF(mac); | 841 | DECLARE_MAC_BUF(mac); |
1580 | 842 | ||
1581 | rcu_read_lock(); | 843 | ifsta->assoc_tries++; |
1582 | 844 | if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) { | |
1583 | sta = sta_info_get(local, ra); | 845 | printk(KERN_DEBUG "%s: association with AP %s" |
1584 | if (!sta) { | 846 | " timed out\n", |
1585 | rcu_read_unlock(); | 847 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
848 | ifsta->state = IEEE80211_STA_MLME_DISABLED; | ||
1586 | return; | 849 | return; |
1587 | } | 850 | } |
1588 | 851 | ||
1589 | /* check if TID is in operational state */ | 852 | ifsta->state = IEEE80211_STA_MLME_ASSOCIATE; |
1590 | spin_lock_bh(&sta->lock); | 853 | printk(KERN_DEBUG "%s: associate with AP %s\n", |
1591 | if (sta->ampdu_mlme.tid_state_rx[tid] | 854 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
1592 | != HT_AGG_STATE_OPERATIONAL) { | 855 | if (ieee80211_privacy_mismatch(sdata, ifsta)) { |
1593 | spin_unlock_bh(&sta->lock); | 856 | printk(KERN_DEBUG "%s: mismatch in privacy configuration and " |
1594 | rcu_read_unlock(); | 857 | "mixed-cell disabled - abort association\n", sdata->dev->name); |
858 | ifsta->state = IEEE80211_STA_MLME_DISABLED; | ||
1595 | return; | 859 | return; |
1596 | } | 860 | } |
1597 | sta->ampdu_mlme.tid_state_rx[tid] = | ||
1598 | HT_AGG_STATE_REQ_STOP_BA_MSK | | ||
1599 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
1600 | spin_unlock_bh(&sta->lock); | ||
1601 | |||
1602 | /* stop HW Rx aggregation. ampdu_action existence | ||
1603 | * already verified in session init so we add the BUG_ON */ | ||
1604 | BUG_ON(!local->ops->ampdu_action); | ||
1605 | |||
1606 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1607 | printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n", | ||
1608 | print_mac(mac, ra), tid); | ||
1609 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1610 | |||
1611 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, | ||
1612 | ra, tid, NULL); | ||
1613 | if (ret) | ||
1614 | printk(KERN_DEBUG "HW problem - can not stop rx " | ||
1615 | "aggregation for tid %d\n", tid); | ||
1616 | |||
1617 | /* shutdown timer has not expired */ | ||
1618 | if (initiator != WLAN_BACK_TIMER) | ||
1619 | del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
1620 | |||
1621 | /* check if this is a self generated aggregation halt */ | ||
1622 | if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) | ||
1623 | ieee80211_send_delba(dev, ra, tid, 0, reason); | ||
1624 | |||
1625 | /* free the reordering buffer */ | ||
1626 | for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { | ||
1627 | if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) { | ||
1628 | /* release the reordered frames */ | ||
1629 | dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]); | ||
1630 | sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--; | ||
1631 | sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL; | ||
1632 | } | ||
1633 | } | ||
1634 | /* free resources */ | ||
1635 | kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); | ||
1636 | kfree(sta->ampdu_mlme.tid_rx[tid]); | ||
1637 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
1638 | sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE; | ||
1639 | 861 | ||
1640 | rcu_read_unlock(); | 862 | ieee80211_send_assoc(sdata, ifsta); |
863 | |||
864 | mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); | ||
1641 | } | 865 | } |
1642 | 866 | ||
1643 | 867 | ||
1644 | static void ieee80211_sta_process_delba(struct net_device *dev, | 868 | static void ieee80211_associated(struct ieee80211_sub_if_data *sdata, |
1645 | struct ieee80211_mgmt *mgmt, size_t len) | 869 | struct ieee80211_if_sta *ifsta) |
1646 | { | 870 | { |
1647 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 871 | struct ieee80211_local *local = sdata->local; |
1648 | struct sta_info *sta; | 872 | struct sta_info *sta; |
1649 | u16 tid, params; | 873 | int disassoc; |
1650 | u16 initiator; | ||
1651 | DECLARE_MAC_BUF(mac); | 874 | DECLARE_MAC_BUF(mac); |
1652 | 875 | ||
1653 | rcu_read_lock(); | 876 | /* TODO: start monitoring current AP signal quality and number of |
1654 | 877 | * missed beacons. Scan other channels every now and then and search | |
1655 | sta = sta_info_get(local, mgmt->sa); | 878 | * for better APs. */ |
1656 | if (!sta) { | 879 | /* TODO: remove expired BSSes */ |
1657 | rcu_read_unlock(); | ||
1658 | return; | ||
1659 | } | ||
1660 | |||
1661 | params = le16_to_cpu(mgmt->u.action.u.delba.params); | ||
1662 | tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; | ||
1663 | initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; | ||
1664 | |||
1665 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1666 | if (net_ratelimit()) | ||
1667 | printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n", | ||
1668 | print_mac(mac, mgmt->sa), | ||
1669 | initiator ? "initiator" : "recipient", tid, | ||
1670 | mgmt->u.action.u.delba.reason_code); | ||
1671 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1672 | |||
1673 | if (initiator == WLAN_BACK_INITIATOR) | ||
1674 | ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, | ||
1675 | WLAN_BACK_INITIATOR, 0); | ||
1676 | else { /* WLAN_BACK_RECIPIENT */ | ||
1677 | spin_lock_bh(&sta->lock); | ||
1678 | sta->ampdu_mlme.tid_state_tx[tid] = | ||
1679 | HT_AGG_STATE_OPERATIONAL; | ||
1680 | spin_unlock_bh(&sta->lock); | ||
1681 | ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, | ||
1682 | WLAN_BACK_RECIPIENT); | ||
1683 | } | ||
1684 | rcu_read_unlock(); | ||
1685 | } | ||
1686 | 880 | ||
1687 | /* | 881 | ifsta->state = IEEE80211_STA_MLME_ASSOCIATED; |
1688 | * After sending add Block Ack request we activated a timer until | ||
1689 | * add Block Ack response will arrive from the recipient. | ||
1690 | * If this timer expires sta_addba_resp_timer_expired will be executed. | ||
1691 | */ | ||
1692 | void sta_addba_resp_timer_expired(unsigned long data) | ||
1693 | { | ||
1694 | /* not an elegant detour, but there is no choice as the timer passes | ||
1695 | * only one argument, and both sta_info and TID are needed, so init | ||
1696 | * flow in sta_info_create gives the TID as data, while the timer_to_id | ||
1697 | * array gives the sta through container_of */ | ||
1698 | u16 tid = *(u8 *)data; | ||
1699 | struct sta_info *temp_sta = container_of((void *)data, | ||
1700 | struct sta_info, timer_to_tid[tid]); | ||
1701 | |||
1702 | struct ieee80211_local *local = temp_sta->local; | ||
1703 | struct ieee80211_hw *hw = &local->hw; | ||
1704 | struct sta_info *sta; | ||
1705 | u8 *state; | ||
1706 | 882 | ||
1707 | rcu_read_lock(); | 883 | rcu_read_lock(); |
1708 | 884 | ||
1709 | sta = sta_info_get(local, temp_sta->addr); | 885 | sta = sta_info_get(local, ifsta->bssid); |
1710 | if (!sta) { | 886 | if (!sta) { |
1711 | rcu_read_unlock(); | 887 | printk(KERN_DEBUG "%s: No STA entry for own AP %s\n", |
1712 | return; | 888 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
1713 | } | 889 | disassoc = 1; |
1714 | 890 | } else { | |
1715 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 891 | disassoc = 0; |
1716 | /* check if the TID waits for addBA response */ | 892 | if (time_after(jiffies, |
1717 | spin_lock_bh(&sta->lock); | 893 | sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { |
1718 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | 894 | if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) { |
1719 | spin_unlock_bh(&sta->lock); | 895 | printk(KERN_DEBUG "%s: No ProbeResp from " |
1720 | *state = HT_AGG_STATE_IDLE; | 896 | "current AP %s - assume out of " |
1721 | #ifdef CONFIG_MAC80211_HT_DEBUG | 897 | "range\n", |
1722 | printk(KERN_DEBUG "timer expired on tid %d but we are not " | 898 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
1723 | "expecting addBA response there", tid); | 899 | disassoc = 1; |
1724 | #endif | 900 | } else |
1725 | goto timer_expired_exit; | 901 | ieee80211_send_probe_req(sdata, ifsta->bssid, |
902 | local->scan_ssid, | ||
903 | local->scan_ssid_len); | ||
904 | ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL; | ||
905 | } else { | ||
906 | ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL; | ||
907 | if (time_after(jiffies, ifsta->last_probe + | ||
908 | IEEE80211_PROBE_INTERVAL)) { | ||
909 | ifsta->last_probe = jiffies; | ||
910 | ieee80211_send_probe_req(sdata, ifsta->bssid, | ||
911 | ifsta->ssid, | ||
912 | ifsta->ssid_len); | ||
913 | } | ||
914 | } | ||
1726 | } | 915 | } |
1727 | 916 | ||
1728 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1729 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); | ||
1730 | #endif | ||
1731 | |||
1732 | /* go through the state check in stop_BA_session */ | ||
1733 | *state = HT_AGG_STATE_OPERATIONAL; | ||
1734 | spin_unlock_bh(&sta->lock); | ||
1735 | ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid, | ||
1736 | WLAN_BACK_INITIATOR); | ||
1737 | |||
1738 | timer_expired_exit: | ||
1739 | rcu_read_unlock(); | 917 | rcu_read_unlock(); |
1740 | } | ||
1741 | 918 | ||
1742 | /* | 919 | if (disassoc) |
1743 | * After accepting the AddBA Request we activated a timer, | 920 | ieee80211_set_disassoc(sdata, ifsta, true, true, |
1744 | * resetting it after each frame that arrives from the originator. | 921 | WLAN_REASON_PREV_AUTH_NOT_VALID); |
1745 | * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. | 922 | else |
1746 | */ | 923 | mod_timer(&ifsta->timer, jiffies + |
1747 | static void sta_rx_agg_session_timer_expired(unsigned long data) | 924 | IEEE80211_MONITORING_INTERVAL); |
1748 | { | ||
1749 | /* not an elegant detour, but there is no choice as the timer passes | ||
1750 | * only one argument, and various sta_info are needed here, so init | ||
1751 | * flow in sta_info_create gives the TID as data, while the timer_to_id | ||
1752 | * array gives the sta through container_of */ | ||
1753 | u8 *ptid = (u8 *)data; | ||
1754 | u8 *timer_to_id = ptid - *ptid; | ||
1755 | struct sta_info *sta = container_of(timer_to_id, struct sta_info, | ||
1756 | timer_to_tid[0]); | ||
1757 | |||
1758 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1759 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | ||
1760 | #endif | ||
1761 | ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, | ||
1762 | (u16)*ptid, WLAN_BACK_TIMER, | ||
1763 | WLAN_REASON_QSTA_TIMEOUT); | ||
1764 | } | 925 | } |
1765 | 926 | ||
1766 | void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr) | ||
1767 | { | ||
1768 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1769 | int i; | ||
1770 | 927 | ||
1771 | for (i = 0; i < STA_TID_NUM; i++) { | 928 | static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata, |
1772 | ieee80211_stop_tx_ba_session(&local->hw, addr, i, | 929 | struct ieee80211_if_sta *ifsta) |
1773 | WLAN_BACK_INITIATOR); | 930 | { |
1774 | ieee80211_sta_stop_rx_ba_session(dev, addr, i, | 931 | printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name); |
1775 | WLAN_BACK_RECIPIENT, | 932 | ifsta->flags |= IEEE80211_STA_AUTHENTICATED; |
1776 | WLAN_REASON_QSTA_LEAVE_QBSS); | 933 | ieee80211_associate(sdata, ifsta); |
1777 | } | ||
1778 | } | 934 | } |
1779 | 935 | ||
1780 | static void ieee80211_send_refuse_measurement_request(struct net_device *dev, | ||
1781 | struct ieee80211_msrment_ie *request_ie, | ||
1782 | const u8 *da, const u8 *bssid, | ||
1783 | u8 dialog_token) | ||
1784 | { | ||
1785 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1786 | struct sk_buff *skb; | ||
1787 | struct ieee80211_mgmt *msr_report; | ||
1788 | 936 | ||
1789 | skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + | 937 | static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, |
1790 | sizeof(struct ieee80211_msrment_ie)); | 938 | struct ieee80211_if_sta *ifsta, |
939 | struct ieee80211_mgmt *mgmt, | ||
940 | size_t len) | ||
941 | { | ||
942 | u8 *pos; | ||
943 | struct ieee802_11_elems elems; | ||
1791 | 944 | ||
1792 | if (!skb) { | 945 | pos = mgmt->u.auth.variable; |
1793 | printk(KERN_ERR "%s: failed to allocate buffer for " | 946 | ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); |
1794 | "measurement report frame\n", dev->name); | 947 | if (!elems.challenge) |
1795 | return; | 948 | return; |
1796 | } | 949 | ieee80211_send_auth(sdata, ifsta, 3, elems.challenge - 2, |
1797 | 950 | elems.challenge_len + 2, 1); | |
1798 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1799 | msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); | ||
1800 | memset(msr_report, 0, 24); | ||
1801 | memcpy(msr_report->da, da, ETH_ALEN); | ||
1802 | memcpy(msr_report->sa, dev->dev_addr, ETH_ALEN); | ||
1803 | memcpy(msr_report->bssid, bssid, ETH_ALEN); | ||
1804 | msr_report->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1805 | IEEE80211_STYPE_ACTION); | ||
1806 | |||
1807 | skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement)); | ||
1808 | msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; | ||
1809 | msr_report->u.action.u.measurement.action_code = | ||
1810 | WLAN_ACTION_SPCT_MSR_RPRT; | ||
1811 | msr_report->u.action.u.measurement.dialog_token = dialog_token; | ||
1812 | |||
1813 | msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT; | ||
1814 | msr_report->u.action.u.measurement.length = | ||
1815 | sizeof(struct ieee80211_msrment_ie); | ||
1816 | |||
1817 | memset(&msr_report->u.action.u.measurement.msr_elem, 0, | ||
1818 | sizeof(struct ieee80211_msrment_ie)); | ||
1819 | msr_report->u.action.u.measurement.msr_elem.token = request_ie->token; | ||
1820 | msr_report->u.action.u.measurement.msr_elem.mode |= | ||
1821 | IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; | ||
1822 | msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; | ||
1823 | |||
1824 | ieee80211_sta_tx(dev, skb, 0); | ||
1825 | } | ||
1826 | |||
1827 | static void ieee80211_sta_process_measurement_req(struct net_device *dev, | ||
1828 | struct ieee80211_mgmt *mgmt, | ||
1829 | size_t len) | ||
1830 | { | ||
1831 | /* | ||
1832 | * Ignoring measurement request is spec violation. | ||
1833 | * Mandatory measurements must be reported optional | ||
1834 | * measurements might be refused or reported incapable | ||
1835 | * For now just refuse | ||
1836 | * TODO: Answer basic measurement as unmeasured | ||
1837 | */ | ||
1838 | ieee80211_send_refuse_measurement_request(dev, | ||
1839 | &mgmt->u.action.u.measurement.msr_elem, | ||
1840 | mgmt->sa, mgmt->bssid, | ||
1841 | mgmt->u.action.u.measurement.dialog_token); | ||
1842 | } | 951 | } |
1843 | 952 | ||
1844 | 953 | static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | |
1845 | static void ieee80211_rx_mgmt_auth(struct net_device *dev, | ||
1846 | struct ieee80211_if_sta *ifsta, | 954 | struct ieee80211_if_sta *ifsta, |
1847 | struct ieee80211_mgmt *mgmt, | 955 | struct ieee80211_mgmt *mgmt, |
1848 | size_t len) | 956 | size_t len) |
1849 | { | 957 | { |
1850 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1851 | u16 auth_alg, auth_transaction, status_code; | 958 | u16 auth_alg, auth_transaction, status_code; |
1852 | DECLARE_MAC_BUF(mac); | 959 | DECLARE_MAC_BUF(mac); |
1853 | 960 | ||
1854 | if (ifsta->state != IEEE80211_AUTHENTICATE && | 961 | if (ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE && |
1855 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) | 962 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) |
1856 | return; | 963 | return; |
1857 | 964 | ||
@@ -1879,7 +986,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, | |||
1879 | */ | 986 | */ |
1880 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) | 987 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) |
1881 | return; | 988 | return; |
1882 | ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0); | 989 | ieee80211_send_auth(sdata, ifsta, 2, NULL, 0, 0); |
1883 | } | 990 | } |
1884 | 991 | ||
1885 | if (auth_alg != ifsta->auth_alg || | 992 | if (auth_alg != ifsta->auth_alg || |
@@ -1912,7 +1019,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, | |||
1912 | algs[pos] == 0xff) | 1019 | algs[pos] == 0xff) |
1913 | continue; | 1020 | continue; |
1914 | if (algs[pos] == WLAN_AUTH_SHARED_KEY && | 1021 | if (algs[pos] == WLAN_AUTH_SHARED_KEY && |
1915 | !ieee80211_sta_wep_configured(dev)) | 1022 | !ieee80211_sta_wep_configured(sdata)) |
1916 | continue; | 1023 | continue; |
1917 | ifsta->auth_alg = algs[pos]; | 1024 | ifsta->auth_alg = algs[pos]; |
1918 | break; | 1025 | break; |
@@ -1924,19 +1031,19 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, | |||
1924 | switch (ifsta->auth_alg) { | 1031 | switch (ifsta->auth_alg) { |
1925 | case WLAN_AUTH_OPEN: | 1032 | case WLAN_AUTH_OPEN: |
1926 | case WLAN_AUTH_LEAP: | 1033 | case WLAN_AUTH_LEAP: |
1927 | ieee80211_auth_completed(dev, ifsta); | 1034 | ieee80211_auth_completed(sdata, ifsta); |
1928 | break; | 1035 | break; |
1929 | case WLAN_AUTH_SHARED_KEY: | 1036 | case WLAN_AUTH_SHARED_KEY: |
1930 | if (ifsta->auth_transaction == 4) | 1037 | if (ifsta->auth_transaction == 4) |
1931 | ieee80211_auth_completed(dev, ifsta); | 1038 | ieee80211_auth_completed(sdata, ifsta); |
1932 | else | 1039 | else |
1933 | ieee80211_auth_challenge(dev, ifsta, mgmt, len); | 1040 | ieee80211_auth_challenge(sdata, ifsta, mgmt, len); |
1934 | break; | 1041 | break; |
1935 | } | 1042 | } |
1936 | } | 1043 | } |
1937 | 1044 | ||
1938 | 1045 | ||
1939 | static void ieee80211_rx_mgmt_deauth(struct net_device *dev, | 1046 | static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, |
1940 | struct ieee80211_if_sta *ifsta, | 1047 | struct ieee80211_if_sta *ifsta, |
1941 | struct ieee80211_mgmt *mgmt, | 1048 | struct ieee80211_mgmt *mgmt, |
1942 | size_t len) | 1049 | size_t len) |
@@ -1953,22 +1060,22 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev, | |||
1953 | reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); | 1060 | reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); |
1954 | 1061 | ||
1955 | if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) | 1062 | if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) |
1956 | printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); | 1063 | printk(KERN_DEBUG "%s: deauthenticated\n", sdata->dev->name); |
1957 | 1064 | ||
1958 | if (ifsta->state == IEEE80211_AUTHENTICATE || | 1065 | if (ifsta->state == IEEE80211_STA_MLME_AUTHENTICATE || |
1959 | ifsta->state == IEEE80211_ASSOCIATE || | 1066 | ifsta->state == IEEE80211_STA_MLME_ASSOCIATE || |
1960 | ifsta->state == IEEE80211_ASSOCIATED) { | 1067 | ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) { |
1961 | ifsta->state = IEEE80211_AUTHENTICATE; | 1068 | ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE; |
1962 | mod_timer(&ifsta->timer, jiffies + | 1069 | mod_timer(&ifsta->timer, jiffies + |
1963 | IEEE80211_RETRY_AUTH_INTERVAL); | 1070 | IEEE80211_RETRY_AUTH_INTERVAL); |
1964 | } | 1071 | } |
1965 | 1072 | ||
1966 | ieee80211_set_disassoc(dev, ifsta, 1); | 1073 | ieee80211_set_disassoc(sdata, ifsta, true, false, 0); |
1967 | ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED; | 1074 | ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED; |
1968 | } | 1075 | } |
1969 | 1076 | ||
1970 | 1077 | ||
1971 | static void ieee80211_rx_mgmt_disassoc(struct net_device *dev, | 1078 | static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, |
1972 | struct ieee80211_if_sta *ifsta, | 1079 | struct ieee80211_if_sta *ifsta, |
1973 | struct ieee80211_mgmt *mgmt, | 1080 | struct ieee80211_mgmt *mgmt, |
1974 | size_t len) | 1081 | size_t len) |
@@ -1985,15 +1092,15 @@ static void ieee80211_rx_mgmt_disassoc(struct net_device *dev, | |||
1985 | reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); | 1092 | reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); |
1986 | 1093 | ||
1987 | if (ifsta->flags & IEEE80211_STA_ASSOCIATED) | 1094 | if (ifsta->flags & IEEE80211_STA_ASSOCIATED) |
1988 | printk(KERN_DEBUG "%s: disassociated\n", dev->name); | 1095 | printk(KERN_DEBUG "%s: disassociated\n", sdata->dev->name); |
1989 | 1096 | ||
1990 | if (ifsta->state == IEEE80211_ASSOCIATED) { | 1097 | if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) { |
1991 | ifsta->state = IEEE80211_ASSOCIATE; | 1098 | ifsta->state = IEEE80211_STA_MLME_ASSOCIATE; |
1992 | mod_timer(&ifsta->timer, jiffies + | 1099 | mod_timer(&ifsta->timer, jiffies + |
1993 | IEEE80211_RETRY_AUTH_INTERVAL); | 1100 | IEEE80211_RETRY_AUTH_INTERVAL); |
1994 | } | 1101 | } |
1995 | 1102 | ||
1996 | ieee80211_set_disassoc(dev, ifsta, 0); | 1103 | ieee80211_set_disassoc(sdata, ifsta, false, false, 0); |
1997 | } | 1104 | } |
1998 | 1105 | ||
1999 | 1106 | ||
@@ -2004,7 +1111,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2004 | int reassoc) | 1111 | int reassoc) |
2005 | { | 1112 | { |
2006 | struct ieee80211_local *local = sdata->local; | 1113 | struct ieee80211_local *local = sdata->local; |
2007 | struct net_device *dev = sdata->dev; | ||
2008 | struct ieee80211_supported_band *sband; | 1114 | struct ieee80211_supported_band *sband; |
2009 | struct sta_info *sta; | 1115 | struct sta_info *sta; |
2010 | u64 rates, basic_rates; | 1116 | u64 rates, basic_rates; |
@@ -2019,7 +1125,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2019 | /* AssocResp and ReassocResp have identical structure, so process both | 1125 | /* AssocResp and ReassocResp have identical structure, so process both |
2020 | * of them in this function. */ | 1126 | * of them in this function. */ |
2021 | 1127 | ||
2022 | if (ifsta->state != IEEE80211_ASSOCIATE) | 1128 | if (ifsta->state != IEEE80211_STA_MLME_ASSOCIATE) |
2023 | return; | 1129 | return; |
2024 | 1130 | ||
2025 | if (len < 24 + 6) | 1131 | if (len < 24 + 6) |
@@ -2034,12 +1140,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2034 | 1140 | ||
2035 | printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " | 1141 | printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " |
2036 | "status=%d aid=%d)\n", | 1142 | "status=%d aid=%d)\n", |
2037 | dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), | 1143 | sdata->dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), |
2038 | capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); | 1144 | capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); |
2039 | 1145 | ||
2040 | if (status_code != WLAN_STATUS_SUCCESS) { | 1146 | if (status_code != WLAN_STATUS_SUCCESS) { |
2041 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", | 1147 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", |
2042 | dev->name, status_code); | 1148 | sdata->dev->name, status_code); |
2043 | /* if this was a reassociation, ensure we try a "full" | 1149 | /* if this was a reassociation, ensure we try a "full" |
2044 | * association next time. This works around some broken APs | 1150 | * association next time. This works around some broken APs |
2045 | * which do not correctly reject reassociation requests. */ | 1151 | * which do not correctly reject reassociation requests. */ |
@@ -2049,7 +1155,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2049 | 1155 | ||
2050 | if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) | 1156 | if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) |
2051 | printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " | 1157 | printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " |
2052 | "set\n", dev->name, aid); | 1158 | "set\n", sdata->dev->name, aid); |
2053 | aid &= ~(BIT(15) | BIT(14)); | 1159 | aid &= ~(BIT(15) | BIT(14)); |
2054 | 1160 | ||
2055 | pos = mgmt->u.assoc_resp.variable; | 1161 | pos = mgmt->u.assoc_resp.variable; |
@@ -2057,11 +1163,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2057 | 1163 | ||
2058 | if (!elems.supp_rates) { | 1164 | if (!elems.supp_rates) { |
2059 | printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", | 1165 | printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", |
2060 | dev->name); | 1166 | sdata->dev->name); |
2061 | return; | 1167 | return; |
2062 | } | 1168 | } |
2063 | 1169 | ||
2064 | printk(KERN_DEBUG "%s: associated\n", dev->name); | 1170 | printk(KERN_DEBUG "%s: associated\n", sdata->dev->name); |
2065 | ifsta->aid = aid; | 1171 | ifsta->aid = aid; |
2066 | ifsta->ap_capab = capab_info; | 1172 | ifsta->ap_capab = capab_info; |
2067 | 1173 | ||
@@ -2082,11 +1188,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2082 | sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC); | 1188 | sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC); |
2083 | if (!sta) { | 1189 | if (!sta) { |
2084 | printk(KERN_DEBUG "%s: failed to alloc STA entry for" | 1190 | printk(KERN_DEBUG "%s: failed to alloc STA entry for" |
2085 | " the AP\n", dev->name); | 1191 | " the AP\n", sdata->dev->name); |
2086 | rcu_read_unlock(); | 1192 | rcu_read_unlock(); |
2087 | return; | 1193 | return; |
2088 | } | 1194 | } |
2089 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | 1195 | bss = ieee80211_rx_bss_get(local, ifsta->bssid, |
2090 | local->hw.conf.channel->center_freq, | 1196 | local->hw.conf.channel->center_freq, |
2091 | ifsta->ssid, ifsta->ssid_len); | 1197 | ifsta->ssid, ifsta->ssid_len); |
2092 | if (bss) { | 1198 | if (bss) { |
@@ -2099,7 +1205,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2099 | err = sta_info_insert(sta); | 1205 | err = sta_info_insert(sta); |
2100 | if (err) { | 1206 | if (err) { |
2101 | printk(KERN_DEBUG "%s: failed to insert STA entry for" | 1207 | printk(KERN_DEBUG "%s: failed to insert STA entry for" |
2102 | " the AP (error %d)\n", dev->name, err); | 1208 | " the AP (error %d)\n", sdata->dev->name, err); |
2103 | rcu_read_unlock(); | 1209 | rcu_read_unlock(); |
2104 | return; | 1210 | return; |
2105 | } | 1211 | } |
@@ -2179,7 +1285,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2179 | if (elems.wmm_param) { | 1285 | if (elems.wmm_param) { |
2180 | set_sta_flags(sta, WLAN_STA_WME); | 1286 | set_sta_flags(sta, WLAN_STA_WME); |
2181 | rcu_read_unlock(); | 1287 | rcu_read_unlock(); |
2182 | ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, | 1288 | ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param, |
2183 | elems.wmm_param_len); | 1289 | elems.wmm_param_len); |
2184 | } else | 1290 | } else |
2185 | rcu_read_unlock(); | 1291 | rcu_read_unlock(); |
@@ -2188,234 +1294,26 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2188 | * ieee80211_set_associated() will tell the driver */ | 1294 | * ieee80211_set_associated() will tell the driver */ |
2189 | bss_conf->aid = aid; | 1295 | bss_conf->aid = aid; |
2190 | bss_conf->assoc_capability = capab_info; | 1296 | bss_conf->assoc_capability = capab_info; |
2191 | ieee80211_set_associated(dev, ifsta, 1); | 1297 | ieee80211_set_associated(sdata, ifsta); |
2192 | |||
2193 | ieee80211_associated(dev, ifsta); | ||
2194 | } | ||
2195 | |||
2196 | |||
2197 | /* Caller must hold local->sta_bss_lock */ | ||
2198 | static void __ieee80211_rx_bss_hash_add(struct net_device *dev, | ||
2199 | struct ieee80211_sta_bss *bss) | ||
2200 | { | ||
2201 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2202 | u8 hash_idx; | ||
2203 | |||
2204 | if (bss_mesh_cfg(bss)) | ||
2205 | hash_idx = mesh_id_hash(bss_mesh_id(bss), | ||
2206 | bss_mesh_id_len(bss)); | ||
2207 | else | ||
2208 | hash_idx = STA_HASH(bss->bssid); | ||
2209 | |||
2210 | bss->hnext = local->sta_bss_hash[hash_idx]; | ||
2211 | local->sta_bss_hash[hash_idx] = bss; | ||
2212 | } | ||
2213 | 1298 | ||
2214 | 1299 | ieee80211_associated(sdata, ifsta); | |
2215 | /* Caller must hold local->sta_bss_lock */ | ||
2216 | static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local, | ||
2217 | struct ieee80211_sta_bss *bss) | ||
2218 | { | ||
2219 | struct ieee80211_sta_bss *b, *prev = NULL; | ||
2220 | b = local->sta_bss_hash[STA_HASH(bss->bssid)]; | ||
2221 | while (b) { | ||
2222 | if (b == bss) { | ||
2223 | if (!prev) | ||
2224 | local->sta_bss_hash[STA_HASH(bss->bssid)] = | ||
2225 | bss->hnext; | ||
2226 | else | ||
2227 | prev->hnext = bss->hnext; | ||
2228 | break; | ||
2229 | } | ||
2230 | prev = b; | ||
2231 | b = b->hnext; | ||
2232 | } | ||
2233 | } | 1300 | } |
2234 | 1301 | ||
2235 | 1302 | ||
2236 | static struct ieee80211_sta_bss * | 1303 | static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, |
2237 | ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int freq, | ||
2238 | u8 *ssid, u8 ssid_len) | ||
2239 | { | ||
2240 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2241 | struct ieee80211_sta_bss *bss; | ||
2242 | |||
2243 | bss = kzalloc(sizeof(*bss), GFP_ATOMIC); | ||
2244 | if (!bss) | ||
2245 | return NULL; | ||
2246 | atomic_inc(&bss->users); | ||
2247 | atomic_inc(&bss->users); | ||
2248 | memcpy(bss->bssid, bssid, ETH_ALEN); | ||
2249 | bss->freq = freq; | ||
2250 | if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) { | ||
2251 | memcpy(bss->ssid, ssid, ssid_len); | ||
2252 | bss->ssid_len = ssid_len; | ||
2253 | } | ||
2254 | |||
2255 | spin_lock_bh(&local->sta_bss_lock); | ||
2256 | /* TODO: order by RSSI? */ | ||
2257 | list_add_tail(&bss->list, &local->sta_bss_list); | ||
2258 | __ieee80211_rx_bss_hash_add(dev, bss); | ||
2259 | spin_unlock_bh(&local->sta_bss_lock); | ||
2260 | return bss; | ||
2261 | } | ||
2262 | |||
2263 | static struct ieee80211_sta_bss * | ||
2264 | ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, | ||
2265 | u8 *ssid, u8 ssid_len) | ||
2266 | { | ||
2267 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2268 | struct ieee80211_sta_bss *bss; | ||
2269 | |||
2270 | spin_lock_bh(&local->sta_bss_lock); | ||
2271 | bss = local->sta_bss_hash[STA_HASH(bssid)]; | ||
2272 | while (bss) { | ||
2273 | if (!bss_mesh_cfg(bss) && | ||
2274 | !memcmp(bss->bssid, bssid, ETH_ALEN) && | ||
2275 | bss->freq == freq && | ||
2276 | bss->ssid_len == ssid_len && | ||
2277 | (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) { | ||
2278 | atomic_inc(&bss->users); | ||
2279 | break; | ||
2280 | } | ||
2281 | bss = bss->hnext; | ||
2282 | } | ||
2283 | spin_unlock_bh(&local->sta_bss_lock); | ||
2284 | return bss; | ||
2285 | } | ||
2286 | |||
2287 | #ifdef CONFIG_MAC80211_MESH | ||
2288 | static struct ieee80211_sta_bss * | ||
2289 | ieee80211_rx_mesh_bss_get(struct net_device *dev, u8 *mesh_id, int mesh_id_len, | ||
2290 | u8 *mesh_cfg, int freq) | ||
2291 | { | ||
2292 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2293 | struct ieee80211_sta_bss *bss; | ||
2294 | |||
2295 | spin_lock_bh(&local->sta_bss_lock); | ||
2296 | bss = local->sta_bss_hash[mesh_id_hash(mesh_id, mesh_id_len)]; | ||
2297 | while (bss) { | ||
2298 | if (bss_mesh_cfg(bss) && | ||
2299 | !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) && | ||
2300 | bss->freq == freq && | ||
2301 | mesh_id_len == bss->mesh_id_len && | ||
2302 | (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id, | ||
2303 | mesh_id_len))) { | ||
2304 | atomic_inc(&bss->users); | ||
2305 | break; | ||
2306 | } | ||
2307 | bss = bss->hnext; | ||
2308 | } | ||
2309 | spin_unlock_bh(&local->sta_bss_lock); | ||
2310 | return bss; | ||
2311 | } | ||
2312 | |||
2313 | static struct ieee80211_sta_bss * | ||
2314 | ieee80211_rx_mesh_bss_add(struct net_device *dev, u8 *mesh_id, int mesh_id_len, | ||
2315 | u8 *mesh_cfg, int mesh_config_len, int freq) | ||
2316 | { | ||
2317 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2318 | struct ieee80211_sta_bss *bss; | ||
2319 | |||
2320 | if (mesh_config_len != MESH_CFG_LEN) | ||
2321 | return NULL; | ||
2322 | |||
2323 | bss = kzalloc(sizeof(*bss), GFP_ATOMIC); | ||
2324 | if (!bss) | ||
2325 | return NULL; | ||
2326 | |||
2327 | bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC); | ||
2328 | if (!bss->mesh_cfg) { | ||
2329 | kfree(bss); | ||
2330 | return NULL; | ||
2331 | } | ||
2332 | |||
2333 | if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) { | ||
2334 | bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC); | ||
2335 | if (!bss->mesh_id) { | ||
2336 | kfree(bss->mesh_cfg); | ||
2337 | kfree(bss); | ||
2338 | return NULL; | ||
2339 | } | ||
2340 | memcpy(bss->mesh_id, mesh_id, mesh_id_len); | ||
2341 | } | ||
2342 | |||
2343 | atomic_inc(&bss->users); | ||
2344 | atomic_inc(&bss->users); | ||
2345 | memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN); | ||
2346 | bss->mesh_id_len = mesh_id_len; | ||
2347 | bss->freq = freq; | ||
2348 | spin_lock_bh(&local->sta_bss_lock); | ||
2349 | /* TODO: order by RSSI? */ | ||
2350 | list_add_tail(&bss->list, &local->sta_bss_list); | ||
2351 | __ieee80211_rx_bss_hash_add(dev, bss); | ||
2352 | spin_unlock_bh(&local->sta_bss_lock); | ||
2353 | return bss; | ||
2354 | } | ||
2355 | #endif | ||
2356 | |||
2357 | static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss) | ||
2358 | { | ||
2359 | kfree(bss->wpa_ie); | ||
2360 | kfree(bss->rsn_ie); | ||
2361 | kfree(bss->wmm_ie); | ||
2362 | kfree(bss->ht_ie); | ||
2363 | kfree(bss->ht_add_ie); | ||
2364 | kfree(bss_mesh_id(bss)); | ||
2365 | kfree(bss_mesh_cfg(bss)); | ||
2366 | kfree(bss); | ||
2367 | } | ||
2368 | |||
2369 | |||
2370 | static void ieee80211_rx_bss_put(struct ieee80211_local *local, | ||
2371 | struct ieee80211_sta_bss *bss) | ||
2372 | { | ||
2373 | local_bh_disable(); | ||
2374 | if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) { | ||
2375 | local_bh_enable(); | ||
2376 | return; | ||
2377 | } | ||
2378 | |||
2379 | __ieee80211_rx_bss_hash_del(local, bss); | ||
2380 | list_del(&bss->list); | ||
2381 | spin_unlock_bh(&local->sta_bss_lock); | ||
2382 | ieee80211_rx_bss_free(bss); | ||
2383 | } | ||
2384 | |||
2385 | |||
2386 | void ieee80211_rx_bss_list_init(struct ieee80211_local *local) | ||
2387 | { | ||
2388 | spin_lock_init(&local->sta_bss_lock); | ||
2389 | INIT_LIST_HEAD(&local->sta_bss_list); | ||
2390 | } | ||
2391 | |||
2392 | |||
2393 | void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local) | ||
2394 | { | ||
2395 | struct ieee80211_sta_bss *bss, *tmp; | ||
2396 | |||
2397 | list_for_each_entry_safe(bss, tmp, &local->sta_bss_list, list) | ||
2398 | ieee80211_rx_bss_put(local, bss); | ||
2399 | } | ||
2400 | |||
2401 | |||
2402 | static int ieee80211_sta_join_ibss(struct net_device *dev, | ||
2403 | struct ieee80211_if_sta *ifsta, | 1304 | struct ieee80211_if_sta *ifsta, |
2404 | struct ieee80211_sta_bss *bss) | 1305 | struct ieee80211_sta_bss *bss) |
2405 | { | 1306 | { |
2406 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1307 | struct ieee80211_local *local = sdata->local; |
2407 | int res, rates, i, j; | 1308 | int res, rates, i, j; |
2408 | struct sk_buff *skb; | 1309 | struct sk_buff *skb; |
2409 | struct ieee80211_mgmt *mgmt; | 1310 | struct ieee80211_mgmt *mgmt; |
2410 | u8 *pos; | 1311 | u8 *pos; |
2411 | struct ieee80211_sub_if_data *sdata; | ||
2412 | struct ieee80211_supported_band *sband; | 1312 | struct ieee80211_supported_band *sband; |
2413 | union iwreq_data wrqu; | 1313 | union iwreq_data wrqu; |
2414 | 1314 | ||
2415 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 1315 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
2416 | 1316 | ||
2417 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2418 | |||
2419 | /* Remove possible STA entries from other IBSS networks. */ | 1317 | /* Remove possible STA entries from other IBSS networks. */ |
2420 | sta_info_flush_delayed(sdata); | 1318 | sta_info_flush_delayed(sdata); |
2421 | 1319 | ||
@@ -2433,7 +1331,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2433 | sdata->drop_unencrypted = bss->capability & | 1331 | sdata->drop_unencrypted = bss->capability & |
2434 | WLAN_CAPABILITY_PRIVACY ? 1 : 0; | 1332 | WLAN_CAPABILITY_PRIVACY ? 1 : 0; |
2435 | 1333 | ||
2436 | res = ieee80211_set_freq(dev, bss->freq); | 1334 | res = ieee80211_set_freq(sdata, bss->freq); |
2437 | 1335 | ||
2438 | if (res) | 1336 | if (res) |
2439 | return res; | 1337 | return res; |
@@ -2446,10 +1344,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2446 | mgmt = (struct ieee80211_mgmt *) | 1344 | mgmt = (struct ieee80211_mgmt *) |
2447 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); | 1345 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); |
2448 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); | 1346 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); |
2449 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 1347 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
2450 | IEEE80211_STYPE_PROBE_RESP); | 1348 | IEEE80211_STYPE_PROBE_RESP); |
2451 | memset(mgmt->da, 0xff, ETH_ALEN); | 1349 | memset(mgmt->da, 0xff, ETH_ALEN); |
2452 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 1350 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
2453 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 1351 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); |
2454 | mgmt->u.beacon.beacon_int = | 1352 | mgmt->u.beacon.beacon_int = |
2455 | cpu_to_le16(local->hw.conf.beacon_int); | 1353 | cpu_to_le16(local->hw.conf.beacon_int); |
@@ -2506,14 +1404,14 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2506 | } | 1404 | } |
2507 | ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; | 1405 | ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; |
2508 | 1406 | ||
2509 | ieee80211_sta_def_wmm_params(dev, bss, 1); | 1407 | ieee80211_sta_def_wmm_params(sdata, bss); |
2510 | 1408 | ||
2511 | ifsta->state = IEEE80211_IBSS_JOINED; | 1409 | ifsta->state = IEEE80211_STA_MLME_IBSS_JOINED; |
2512 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); | 1410 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); |
2513 | 1411 | ||
2514 | memset(&wrqu, 0, sizeof(wrqu)); | 1412 | memset(&wrqu, 0, sizeof(wrqu)); |
2515 | memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); | 1413 | memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); |
2516 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | 1414 | wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL); |
2517 | 1415 | ||
2518 | return res; | 1416 | return res; |
2519 | } | 1417 | } |
@@ -2554,60 +1452,52 @@ u64 ieee80211_sta_get_rates(struct ieee80211_local *local, | |||
2554 | return supp_rates; | 1452 | return supp_rates; |
2555 | } | 1453 | } |
2556 | 1454 | ||
1455 | static u64 ieee80211_sta_get_mandatory_rates(struct ieee80211_local *local, | ||
1456 | enum ieee80211_band band) | ||
1457 | { | ||
1458 | struct ieee80211_supported_band *sband; | ||
1459 | struct ieee80211_rate *bitrates; | ||
1460 | u64 mandatory_rates; | ||
1461 | enum ieee80211_rate_flags mandatory_flag; | ||
1462 | int i; | ||
1463 | |||
1464 | sband = local->hw.wiphy->bands[band]; | ||
1465 | if (!sband) { | ||
1466 | WARN_ON(1); | ||
1467 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
1468 | } | ||
1469 | |||
1470 | if (band == IEEE80211_BAND_2GHZ) | ||
1471 | mandatory_flag = IEEE80211_RATE_MANDATORY_B; | ||
1472 | else | ||
1473 | mandatory_flag = IEEE80211_RATE_MANDATORY_A; | ||
1474 | |||
1475 | bitrates = sband->bitrates; | ||
1476 | mandatory_rates = 0; | ||
1477 | for (i = 0; i < sband->n_bitrates; i++) | ||
1478 | if (bitrates[i].flags & mandatory_flag) | ||
1479 | mandatory_rates |= BIT(i); | ||
1480 | return mandatory_rates; | ||
1481 | } | ||
2557 | 1482 | ||
2558 | static void ieee80211_rx_bss_info(struct net_device *dev, | 1483 | static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, |
2559 | struct ieee80211_mgmt *mgmt, | 1484 | struct ieee80211_mgmt *mgmt, |
2560 | size_t len, | 1485 | size_t len, |
2561 | struct ieee80211_rx_status *rx_status, | 1486 | struct ieee80211_rx_status *rx_status, |
2562 | struct ieee802_11_elems *elems, | 1487 | struct ieee802_11_elems *elems, |
2563 | int beacon) | 1488 | bool beacon) |
2564 | { | 1489 | { |
2565 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1490 | struct ieee80211_local *local = sdata->local; |
2566 | int freq, clen; | 1491 | int freq; |
2567 | struct ieee80211_sta_bss *bss; | 1492 | struct ieee80211_sta_bss *bss; |
2568 | struct sta_info *sta; | 1493 | struct sta_info *sta; |
2569 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2570 | u64 beacon_timestamp, rx_timestamp; | ||
2571 | struct ieee80211_channel *channel; | 1494 | struct ieee80211_channel *channel; |
1495 | u64 beacon_timestamp, rx_timestamp; | ||
1496 | u64 supp_rates = 0; | ||
1497 | enum ieee80211_band band = rx_status->band; | ||
2572 | DECLARE_MAC_BUF(mac); | 1498 | DECLARE_MAC_BUF(mac); |
2573 | DECLARE_MAC_BUF(mac2); | 1499 | DECLARE_MAC_BUF(mac2); |
2574 | 1500 | ||
2575 | if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN)) | ||
2576 | return; /* ignore ProbeResp to foreign address */ | ||
2577 | |||
2578 | beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); | ||
2579 | |||
2580 | if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id && | ||
2581 | elems->mesh_config && mesh_matches_local(elems, dev)) { | ||
2582 | u64 rates = ieee80211_sta_get_rates(local, elems, | ||
2583 | rx_status->band); | ||
2584 | |||
2585 | mesh_neighbour_update(mgmt->sa, rates, dev, | ||
2586 | mesh_peer_accepts_plinks(elems, dev)); | ||
2587 | } | ||
2588 | |||
2589 | rcu_read_lock(); | ||
2590 | |||
2591 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates && | ||
2592 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && | ||
2593 | (sta = sta_info_get(local, mgmt->sa))) { | ||
2594 | u64 prev_rates; | ||
2595 | u64 supp_rates = ieee80211_sta_get_rates(local, elems, | ||
2596 | rx_status->band); | ||
2597 | |||
2598 | prev_rates = sta->supp_rates[rx_status->band]; | ||
2599 | sta->supp_rates[rx_status->band] &= supp_rates; | ||
2600 | if (sta->supp_rates[rx_status->band] == 0) { | ||
2601 | /* No matching rates - this should not really happen. | ||
2602 | * Make sure that at least one rate is marked | ||
2603 | * supported to avoid issues with TX rate ctrl. */ | ||
2604 | sta->supp_rates[rx_status->band] = | ||
2605 | sdata->u.sta.supp_rates_bits[rx_status->band]; | ||
2606 | } | ||
2607 | } | ||
2608 | |||
2609 | rcu_read_unlock(); | ||
2610 | |||
2611 | if (elems->ds_params && elems->ds_params_len == 1) | 1501 | if (elems->ds_params && elems->ds_params_len == 1) |
2612 | freq = ieee80211_channel_to_frequency(elems->ds_params[0]); | 1502 | freq = ieee80211_channel_to_frequency(elems->ds_params[0]); |
2613 | else | 1503 | else |
@@ -2618,215 +1508,67 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2618 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) | 1508 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) |
2619 | return; | 1509 | return; |
2620 | 1510 | ||
2621 | #ifdef CONFIG_MAC80211_MESH | 1511 | if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id && |
2622 | if (elems->mesh_config) | 1512 | elems->mesh_config && mesh_matches_local(elems, sdata)) { |
2623 | bss = ieee80211_rx_mesh_bss_get(dev, elems->mesh_id, | 1513 | supp_rates = ieee80211_sta_get_rates(local, elems, band); |
2624 | elems->mesh_id_len, elems->mesh_config, freq); | ||
2625 | else | ||
2626 | #endif | ||
2627 | bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq, | ||
2628 | elems->ssid, elems->ssid_len); | ||
2629 | if (!bss) { | ||
2630 | #ifdef CONFIG_MAC80211_MESH | ||
2631 | if (elems->mesh_config) | ||
2632 | bss = ieee80211_rx_mesh_bss_add(dev, elems->mesh_id, | ||
2633 | elems->mesh_id_len, elems->mesh_config, | ||
2634 | elems->mesh_config_len, freq); | ||
2635 | else | ||
2636 | #endif | ||
2637 | bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq, | ||
2638 | elems->ssid, elems->ssid_len); | ||
2639 | if (!bss) | ||
2640 | return; | ||
2641 | } else { | ||
2642 | #if 0 | ||
2643 | /* TODO: order by RSSI? */ | ||
2644 | spin_lock_bh(&local->sta_bss_lock); | ||
2645 | list_move_tail(&bss->list, &local->sta_bss_list); | ||
2646 | spin_unlock_bh(&local->sta_bss_lock); | ||
2647 | #endif | ||
2648 | } | ||
2649 | 1514 | ||
2650 | /* save the ERP value so that it is available at association time */ | 1515 | mesh_neighbour_update(mgmt->sa, supp_rates, sdata, |
2651 | if (elems->erp_info && elems->erp_info_len >= 1) { | 1516 | mesh_peer_accepts_plinks(elems)); |
2652 | bss->erp_value = elems->erp_info[0]; | ||
2653 | bss->has_erp_value = 1; | ||
2654 | } | 1517 | } |
2655 | 1518 | ||
2656 | if (elems->ht_cap_elem && | 1519 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates && |
2657 | (!bss->ht_ie || bss->ht_ie_len != elems->ht_cap_elem_len || | 1520 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0) { |
2658 | memcmp(bss->ht_ie, elems->ht_cap_elem, elems->ht_cap_elem_len))) { | 1521 | supp_rates = ieee80211_sta_get_rates(local, elems, band); |
2659 | kfree(bss->ht_ie); | ||
2660 | bss->ht_ie = kmalloc(elems->ht_cap_elem_len + 2, GFP_ATOMIC); | ||
2661 | if (bss->ht_ie) { | ||
2662 | memcpy(bss->ht_ie, elems->ht_cap_elem - 2, | ||
2663 | elems->ht_cap_elem_len + 2); | ||
2664 | bss->ht_ie_len = elems->ht_cap_elem_len + 2; | ||
2665 | } else | ||
2666 | bss->ht_ie_len = 0; | ||
2667 | } else if (!elems->ht_cap_elem && bss->ht_ie) { | ||
2668 | kfree(bss->ht_ie); | ||
2669 | bss->ht_ie = NULL; | ||
2670 | bss->ht_ie_len = 0; | ||
2671 | } | ||
2672 | 1522 | ||
2673 | if (elems->ht_info_elem && | 1523 | rcu_read_lock(); |
2674 | (!bss->ht_add_ie || | ||
2675 | bss->ht_add_ie_len != elems->ht_info_elem_len || | ||
2676 | memcmp(bss->ht_add_ie, elems->ht_info_elem, | ||
2677 | elems->ht_info_elem_len))) { | ||
2678 | kfree(bss->ht_add_ie); | ||
2679 | bss->ht_add_ie = | ||
2680 | kmalloc(elems->ht_info_elem_len + 2, GFP_ATOMIC); | ||
2681 | if (bss->ht_add_ie) { | ||
2682 | memcpy(bss->ht_add_ie, elems->ht_info_elem - 2, | ||
2683 | elems->ht_info_elem_len + 2); | ||
2684 | bss->ht_add_ie_len = elems->ht_info_elem_len + 2; | ||
2685 | } else | ||
2686 | bss->ht_add_ie_len = 0; | ||
2687 | } else if (!elems->ht_info_elem && bss->ht_add_ie) { | ||
2688 | kfree(bss->ht_add_ie); | ||
2689 | bss->ht_add_ie = NULL; | ||
2690 | bss->ht_add_ie_len = 0; | ||
2691 | } | ||
2692 | 1524 | ||
2693 | bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); | 1525 | sta = sta_info_get(local, mgmt->sa); |
2694 | bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); | 1526 | if (sta) { |
1527 | u64 prev_rates; | ||
2695 | 1528 | ||
2696 | if (elems->tim) { | 1529 | prev_rates = sta->supp_rates[band]; |
2697 | struct ieee80211_tim_ie *tim_ie = | 1530 | /* make sure mandatory rates are always added */ |
2698 | (struct ieee80211_tim_ie *)elems->tim; | 1531 | sta->supp_rates[band] = supp_rates | |
2699 | bss->dtim_period = tim_ie->dtim_period; | 1532 | ieee80211_sta_get_mandatory_rates(local, band); |
2700 | } | ||
2701 | 1533 | ||
2702 | /* set default value for buggy APs */ | 1534 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
2703 | if (!elems->tim || bss->dtim_period == 0) | 1535 | if (sta->supp_rates[band] != prev_rates) |
2704 | bss->dtim_period = 1; | 1536 | printk(KERN_DEBUG "%s: updated supp_rates set " |
2705 | 1537 | "for %s based on beacon info (0x%llx | " | |
2706 | bss->supp_rates_len = 0; | 1538 | "0x%llx -> 0x%llx)\n", |
2707 | if (elems->supp_rates) { | 1539 | sdata->dev->name, print_mac(mac, sta->addr), |
2708 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; | 1540 | (unsigned long long) prev_rates, |
2709 | if (clen > elems->supp_rates_len) | 1541 | (unsigned long long) supp_rates, |
2710 | clen = elems->supp_rates_len; | 1542 | (unsigned long long) sta->supp_rates[band]); |
2711 | memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates, | 1543 | #endif |
2712 | clen); | 1544 | } else { |
2713 | bss->supp_rates_len += clen; | 1545 | ieee80211_ibss_add_sta(sdata, NULL, mgmt->bssid, |
2714 | } | 1546 | mgmt->sa, supp_rates); |
2715 | if (elems->ext_supp_rates) { | 1547 | } |
2716 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; | 1548 | |
2717 | if (clen > elems->ext_supp_rates_len) | 1549 | rcu_read_unlock(); |
2718 | clen = elems->ext_supp_rates_len; | ||
2719 | memcpy(&bss->supp_rates[bss->supp_rates_len], | ||
2720 | elems->ext_supp_rates, clen); | ||
2721 | bss->supp_rates_len += clen; | ||
2722 | } | 1550 | } |
2723 | 1551 | ||
2724 | bss->band = rx_status->band; | 1552 | bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, |
1553 | freq, beacon); | ||
1554 | if (!bss) | ||
1555 | return; | ||
2725 | 1556 | ||
2726 | bss->timestamp = beacon_timestamp; | 1557 | /* was just updated in ieee80211_bss_info_update */ |
2727 | bss->last_update = jiffies; | 1558 | beacon_timestamp = bss->timestamp; |
2728 | bss->signal = rx_status->signal; | ||
2729 | bss->noise = rx_status->noise; | ||
2730 | bss->qual = rx_status->qual; | ||
2731 | if (!beacon && !bss->probe_resp) | ||
2732 | bss->probe_resp = true; | ||
2733 | 1559 | ||
2734 | /* | 1560 | /* |
2735 | * In STA mode, the remaining parameters should not be overridden | 1561 | * In STA mode, the remaining parameters should not be overridden |
2736 | * by beacons because they're not necessarily accurate there. | 1562 | * by beacons because they're not necessarily accurate there. |
2737 | */ | 1563 | */ |
2738 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 1564 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
2739 | bss->probe_resp && beacon) { | 1565 | bss->last_probe_resp && beacon) { |
2740 | ieee80211_rx_bss_put(local, bss); | 1566 | ieee80211_rx_bss_put(local, bss); |
2741 | return; | 1567 | return; |
2742 | } | 1568 | } |
2743 | 1569 | ||
2744 | if (elems->wpa && | ||
2745 | (!bss->wpa_ie || bss->wpa_ie_len != elems->wpa_len || | ||
2746 | memcmp(bss->wpa_ie, elems->wpa, elems->wpa_len))) { | ||
2747 | kfree(bss->wpa_ie); | ||
2748 | bss->wpa_ie = kmalloc(elems->wpa_len + 2, GFP_ATOMIC); | ||
2749 | if (bss->wpa_ie) { | ||
2750 | memcpy(bss->wpa_ie, elems->wpa - 2, elems->wpa_len + 2); | ||
2751 | bss->wpa_ie_len = elems->wpa_len + 2; | ||
2752 | } else | ||
2753 | bss->wpa_ie_len = 0; | ||
2754 | } else if (!elems->wpa && bss->wpa_ie) { | ||
2755 | kfree(bss->wpa_ie); | ||
2756 | bss->wpa_ie = NULL; | ||
2757 | bss->wpa_ie_len = 0; | ||
2758 | } | ||
2759 | |||
2760 | if (elems->rsn && | ||
2761 | (!bss->rsn_ie || bss->rsn_ie_len != elems->rsn_len || | ||
2762 | memcmp(bss->rsn_ie, elems->rsn, elems->rsn_len))) { | ||
2763 | kfree(bss->rsn_ie); | ||
2764 | bss->rsn_ie = kmalloc(elems->rsn_len + 2, GFP_ATOMIC); | ||
2765 | if (bss->rsn_ie) { | ||
2766 | memcpy(bss->rsn_ie, elems->rsn - 2, elems->rsn_len + 2); | ||
2767 | bss->rsn_ie_len = elems->rsn_len + 2; | ||
2768 | } else | ||
2769 | bss->rsn_ie_len = 0; | ||
2770 | } else if (!elems->rsn && bss->rsn_ie) { | ||
2771 | kfree(bss->rsn_ie); | ||
2772 | bss->rsn_ie = NULL; | ||
2773 | bss->rsn_ie_len = 0; | ||
2774 | } | ||
2775 | |||
2776 | /* | ||
2777 | * Cf. | ||
2778 | * http://www.wipo.int/pctdb/en/wo.jsp?wo=2007047181&IA=WO2007047181&DISPLAY=DESC | ||
2779 | * | ||
2780 | * quoting: | ||
2781 | * | ||
2782 | * In particular, "Wi-Fi CERTIFIED for WMM - Support for Multimedia | ||
2783 | * Applications with Quality of Service in Wi-Fi Networks," Wi- Fi | ||
2784 | * Alliance (September 1, 2004) is incorporated by reference herein. | ||
2785 | * The inclusion of the WMM Parameters in probe responses and | ||
2786 | * association responses is mandatory for WMM enabled networks. The | ||
2787 | * inclusion of the WMM Parameters in beacons, however, is optional. | ||
2788 | */ | ||
2789 | |||
2790 | if (elems->wmm_param && | ||
2791 | (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_param_len || | ||
2792 | memcmp(bss->wmm_ie, elems->wmm_param, elems->wmm_param_len))) { | ||
2793 | kfree(bss->wmm_ie); | ||
2794 | bss->wmm_ie = kmalloc(elems->wmm_param_len + 2, GFP_ATOMIC); | ||
2795 | if (bss->wmm_ie) { | ||
2796 | memcpy(bss->wmm_ie, elems->wmm_param - 2, | ||
2797 | elems->wmm_param_len + 2); | ||
2798 | bss->wmm_ie_len = elems->wmm_param_len + 2; | ||
2799 | } else | ||
2800 | bss->wmm_ie_len = 0; | ||
2801 | } else if (elems->wmm_info && | ||
2802 | (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_info_len || | ||
2803 | memcmp(bss->wmm_ie, elems->wmm_info, | ||
2804 | elems->wmm_info_len))) { | ||
2805 | /* As for certain AP's Fifth bit is not set in WMM IE in | ||
2806 | * beacon frames.So while parsing the beacon frame the | ||
2807 | * wmm_info structure is used instead of wmm_param. | ||
2808 | * wmm_info structure was never used to set bss->wmm_ie. | ||
2809 | * This code fixes this problem by copying the WME | ||
2810 | * information from wmm_info to bss->wmm_ie and enabling | ||
2811 | * n-band association. | ||
2812 | */ | ||
2813 | kfree(bss->wmm_ie); | ||
2814 | bss->wmm_ie = kmalloc(elems->wmm_info_len + 2, GFP_ATOMIC); | ||
2815 | if (bss->wmm_ie) { | ||
2816 | memcpy(bss->wmm_ie, elems->wmm_info - 2, | ||
2817 | elems->wmm_info_len + 2); | ||
2818 | bss->wmm_ie_len = elems->wmm_info_len + 2; | ||
2819 | } else | ||
2820 | bss->wmm_ie_len = 0; | ||
2821 | } else if (!elems->wmm_param && !elems->wmm_info && bss->wmm_ie) { | ||
2822 | kfree(bss->wmm_ie); | ||
2823 | bss->wmm_ie = NULL; | ||
2824 | bss->wmm_ie_len = 0; | ||
2825 | } | ||
2826 | |||
2827 | /* check if we need to merge IBSS */ | 1570 | /* check if we need to merge IBSS */ |
2828 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon && | 1571 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon && |
2829 | !local->sta_sw_scanning && !local->sta_hw_scanning && | ||
2830 | bss->capability & WLAN_CAPABILITY_IBSS && | 1572 | bss->capability & WLAN_CAPABILITY_IBSS && |
2831 | bss->freq == local->oper_channel->center_freq && | 1573 | bss->freq == local->oper_channel->center_freq && |
2832 | elems->ssid_len == sdata->u.sta.ssid_len && | 1574 | elems->ssid_len == sdata->u.sta.ssid_len && |
@@ -2848,7 +1590,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2848 | * e.g: at 1 MBit that means mactime is 192 usec earlier | 1590 | * e.g: at 1 MBit that means mactime is 192 usec earlier |
2849 | * (=24 bytes * 8 usecs/byte) than the beacon timestamp. | 1591 | * (=24 bytes * 8 usecs/byte) than the beacon timestamp. |
2850 | */ | 1592 | */ |
2851 | int rate = local->hw.wiphy->bands[rx_status->band]-> | 1593 | int rate = local->hw.wiphy->bands[band]-> |
2852 | bitrates[rx_status->rate_idx].bitrate; | 1594 | bitrates[rx_status->rate_idx].bitrate; |
2853 | rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); | 1595 | rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); |
2854 | } else if (local && local->ops && local->ops->get_tsf) | 1596 | } else if (local && local->ops && local->ops->get_tsf) |
@@ -2871,12 +1613,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2871 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 1613 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
2872 | printk(KERN_DEBUG "%s: beacon TSF higher than " | 1614 | printk(KERN_DEBUG "%s: beacon TSF higher than " |
2873 | "local TSF - IBSS merge with BSSID %s\n", | 1615 | "local TSF - IBSS merge with BSSID %s\n", |
2874 | dev->name, print_mac(mac, mgmt->bssid)); | 1616 | sdata->dev->name, print_mac(mac, mgmt->bssid)); |
2875 | #endif | 1617 | #endif |
2876 | ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss); | 1618 | ieee80211_sta_join_ibss(sdata, &sdata->u.sta, bss); |
2877 | ieee80211_ibss_add_sta(dev, NULL, | 1619 | ieee80211_ibss_add_sta(sdata, NULL, |
2878 | mgmt->bssid, mgmt->sa, | 1620 | mgmt->bssid, mgmt->sa, |
2879 | BIT(rx_status->rate_idx)); | 1621 | supp_rates); |
2880 | } | 1622 | } |
2881 | } | 1623 | } |
2882 | 1624 | ||
@@ -2884,13 +1626,17 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2884 | } | 1626 | } |
2885 | 1627 | ||
2886 | 1628 | ||
2887 | static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev, | 1629 | static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, |
2888 | struct ieee80211_mgmt *mgmt, | 1630 | struct ieee80211_mgmt *mgmt, |
2889 | size_t len, | 1631 | size_t len, |
2890 | struct ieee80211_rx_status *rx_status) | 1632 | struct ieee80211_rx_status *rx_status) |
2891 | { | 1633 | { |
2892 | size_t baselen; | 1634 | size_t baselen; |
2893 | struct ieee802_11_elems elems; | 1635 | struct ieee802_11_elems elems; |
1636 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
1637 | |||
1638 | if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) | ||
1639 | return; /* ignore ProbeResp to foreign address */ | ||
2894 | 1640 | ||
2895 | baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; | 1641 | baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; |
2896 | if (baselen > len) | 1642 | if (baselen > len) |
@@ -2899,20 +1645,27 @@ static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev, | |||
2899 | ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, | 1645 | ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, |
2900 | &elems); | 1646 | &elems); |
2901 | 1647 | ||
2902 | ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 0); | 1648 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); |
1649 | |||
1650 | /* direct probe may be part of the association flow */ | ||
1651 | if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE, | ||
1652 | &ifsta->request)) { | ||
1653 | printk(KERN_DEBUG "%s direct probe responded\n", | ||
1654 | sdata->dev->name); | ||
1655 | ieee80211_authenticate(sdata, ifsta); | ||
1656 | } | ||
2903 | } | 1657 | } |
2904 | 1658 | ||
2905 | 1659 | ||
2906 | static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | 1660 | static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, |
2907 | struct ieee80211_mgmt *mgmt, | 1661 | struct ieee80211_mgmt *mgmt, |
2908 | size_t len, | 1662 | size_t len, |
2909 | struct ieee80211_rx_status *rx_status) | 1663 | struct ieee80211_rx_status *rx_status) |
2910 | { | 1664 | { |
2911 | struct ieee80211_sub_if_data *sdata; | ||
2912 | struct ieee80211_if_sta *ifsta; | 1665 | struct ieee80211_if_sta *ifsta; |
2913 | size_t baselen; | 1666 | size_t baselen; |
2914 | struct ieee802_11_elems elems; | 1667 | struct ieee802_11_elems elems; |
2915 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1668 | struct ieee80211_local *local = sdata->local; |
2916 | struct ieee80211_conf *conf = &local->hw.conf; | 1669 | struct ieee80211_conf *conf = &local->hw.conf; |
2917 | u32 changed = 0; | 1670 | u32 changed = 0; |
2918 | 1671 | ||
@@ -2923,9 +1676,8 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | |||
2923 | 1676 | ||
2924 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); | 1677 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); |
2925 | 1678 | ||
2926 | ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 1); | 1679 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); |
2927 | 1680 | ||
2928 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2929 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | 1681 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) |
2930 | return; | 1682 | return; |
2931 | ifsta = &sdata->u.sta; | 1683 | ifsta = &sdata->u.sta; |
@@ -2934,15 +1686,9 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | |||
2934 | memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) | 1686 | memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) |
2935 | return; | 1687 | return; |
2936 | 1688 | ||
2937 | ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, | 1689 | ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param, |
2938 | elems.wmm_param_len); | 1690 | elems.wmm_param_len); |
2939 | 1691 | ||
2940 | /* Do not send changes to driver if we are scanning. This removes | ||
2941 | * requirement that driver's bss_info_changed function needs to be | ||
2942 | * atomic. */ | ||
2943 | if (local->sta_sw_scanning || local->sta_hw_scanning) | ||
2944 | return; | ||
2945 | |||
2946 | if (elems.erp_info && elems.erp_info_len >= 1) | 1692 | if (elems.erp_info && elems.erp_info_len >= 1) |
2947 | changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]); | 1693 | changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]); |
2948 | else { | 1694 | else { |
@@ -2966,14 +1712,13 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | |||
2966 | } | 1712 | } |
2967 | 1713 | ||
2968 | 1714 | ||
2969 | static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | 1715 | static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, |
2970 | struct ieee80211_if_sta *ifsta, | 1716 | struct ieee80211_if_sta *ifsta, |
2971 | struct ieee80211_mgmt *mgmt, | 1717 | struct ieee80211_mgmt *mgmt, |
2972 | size_t len, | 1718 | size_t len, |
2973 | struct ieee80211_rx_status *rx_status) | 1719 | struct ieee80211_rx_status *rx_status) |
2974 | { | 1720 | { |
2975 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1721 | struct ieee80211_local *local = sdata->local; |
2976 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2977 | int tx_last_beacon; | 1722 | int tx_last_beacon; |
2978 | struct sk_buff *skb; | 1723 | struct sk_buff *skb; |
2979 | struct ieee80211_mgmt *resp; | 1724 | struct ieee80211_mgmt *resp; |
@@ -2985,7 +1730,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | |||
2985 | #endif | 1730 | #endif |
2986 | 1731 | ||
2987 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS || | 1732 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS || |
2988 | ifsta->state != IEEE80211_IBSS_JOINED || | 1733 | ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED || |
2989 | len < 24 + 2 || !ifsta->probe_resp) | 1734 | len < 24 + 2 || !ifsta->probe_resp) |
2990 | return; | 1735 | return; |
2991 | 1736 | ||
@@ -2997,7 +1742,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | |||
2997 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 1742 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
2998 | printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID=" | 1743 | printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID=" |
2999 | "%s (tx_last_beacon=%d)\n", | 1744 | "%s (tx_last_beacon=%d)\n", |
3000 | dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da), | 1745 | sdata->dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da), |
3001 | print_mac(mac3, mgmt->bssid), tx_last_beacon); | 1746 | print_mac(mac3, mgmt->bssid), tx_last_beacon); |
3002 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 1747 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
3003 | 1748 | ||
@@ -3015,7 +1760,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | |||
3015 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 1760 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
3016 | printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " | 1761 | printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " |
3017 | "from %s\n", | 1762 | "from %s\n", |
3018 | dev->name, print_mac(mac, mgmt->sa)); | 1763 | sdata->dev->name, print_mac(mac, mgmt->sa)); |
3019 | #endif | 1764 | #endif |
3020 | return; | 1765 | return; |
3021 | } | 1766 | } |
@@ -3035,74 +1780,35 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | |||
3035 | memcpy(resp->da, mgmt->sa, ETH_ALEN); | 1780 | memcpy(resp->da, mgmt->sa, ETH_ALEN); |
3036 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 1781 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
3037 | printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n", | 1782 | printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n", |
3038 | dev->name, print_mac(mac, resp->da)); | 1783 | sdata->dev->name, print_mac(mac, resp->da)); |
3039 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 1784 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
3040 | ieee80211_sta_tx(dev, skb, 0); | 1785 | ieee80211_tx_skb(sdata, skb, 0); |
3041 | } | 1786 | } |
3042 | 1787 | ||
3043 | static void ieee80211_rx_mgmt_action(struct net_device *dev, | 1788 | static void ieee80211_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, |
3044 | struct ieee80211_if_sta *ifsta, | 1789 | struct ieee80211_if_sta *ifsta, |
3045 | struct ieee80211_mgmt *mgmt, | 1790 | struct ieee80211_mgmt *mgmt, |
3046 | size_t len, | 1791 | size_t len, |
3047 | struct ieee80211_rx_status *rx_status) | 1792 | struct ieee80211_rx_status *rx_status) |
3048 | { | 1793 | { |
3049 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1794 | /* currently we only handle mesh interface action frames here */ |
3050 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1795 | if (!ieee80211_vif_is_mesh(&sdata->vif)) |
3051 | |||
3052 | if (len < IEEE80211_MIN_ACTION_SIZE) | ||
3053 | return; | 1796 | return; |
3054 | 1797 | ||
3055 | switch (mgmt->u.action.category) { | 1798 | switch (mgmt->u.action.category) { |
3056 | case WLAN_CATEGORY_SPECTRUM_MGMT: | ||
3057 | if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) | ||
3058 | break; | ||
3059 | switch (mgmt->u.action.u.chan_switch.action_code) { | ||
3060 | case WLAN_ACTION_SPCT_MSR_REQ: | ||
3061 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
3062 | sizeof(mgmt->u.action.u.measurement))) | ||
3063 | break; | ||
3064 | ieee80211_sta_process_measurement_req(dev, mgmt, len); | ||
3065 | break; | ||
3066 | } | ||
3067 | break; | ||
3068 | case WLAN_CATEGORY_BACK: | ||
3069 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
3070 | case WLAN_ACTION_ADDBA_REQ: | ||
3071 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
3072 | sizeof(mgmt->u.action.u.addba_req))) | ||
3073 | break; | ||
3074 | ieee80211_sta_process_addba_request(dev, mgmt, len); | ||
3075 | break; | ||
3076 | case WLAN_ACTION_ADDBA_RESP: | ||
3077 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
3078 | sizeof(mgmt->u.action.u.addba_resp))) | ||
3079 | break; | ||
3080 | ieee80211_sta_process_addba_resp(dev, mgmt, len); | ||
3081 | break; | ||
3082 | case WLAN_ACTION_DELBA: | ||
3083 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
3084 | sizeof(mgmt->u.action.u.delba))) | ||
3085 | break; | ||
3086 | ieee80211_sta_process_delba(dev, mgmt, len); | ||
3087 | break; | ||
3088 | } | ||
3089 | break; | ||
3090 | case PLINK_CATEGORY: | 1799 | case PLINK_CATEGORY: |
3091 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 1800 | mesh_rx_plink_frame(sdata, mgmt, len, rx_status); |
3092 | mesh_rx_plink_frame(dev, mgmt, len, rx_status); | ||
3093 | break; | 1801 | break; |
3094 | case MESH_PATH_SEL_CATEGORY: | 1802 | case MESH_PATH_SEL_CATEGORY: |
3095 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 1803 | mesh_rx_path_sel_frame(sdata, mgmt, len); |
3096 | mesh_rx_path_sel_frame(dev, mgmt, len); | ||
3097 | break; | 1804 | break; |
3098 | } | 1805 | } |
3099 | } | 1806 | } |
3100 | 1807 | ||
3101 | void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | 1808 | void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, |
3102 | struct ieee80211_rx_status *rx_status) | 1809 | struct ieee80211_rx_status *rx_status) |
3103 | { | 1810 | { |
3104 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1811 | struct ieee80211_local *local = sdata->local; |
3105 | struct ieee80211_sub_if_data *sdata; | ||
3106 | struct ieee80211_if_sta *ifsta; | 1812 | struct ieee80211_if_sta *ifsta; |
3107 | struct ieee80211_mgmt *mgmt; | 1813 | struct ieee80211_mgmt *mgmt; |
3108 | u16 fc; | 1814 | u16 fc; |
@@ -3110,7 +1816,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | |||
3110 | if (skb->len < 24) | 1816 | if (skb->len < 24) |
3111 | goto fail; | 1817 | goto fail; |
3112 | 1818 | ||
3113 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3114 | ifsta = &sdata->u.sta; | 1819 | ifsta = &sdata->u.sta; |
3115 | 1820 | ||
3116 | mgmt = (struct ieee80211_mgmt *) skb->data; | 1821 | mgmt = (struct ieee80211_mgmt *) skb->data; |
@@ -3136,17 +1841,14 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | |||
3136 | kfree_skb(skb); | 1841 | kfree_skb(skb); |
3137 | } | 1842 | } |
3138 | 1843 | ||
3139 | 1844 | static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |
3140 | static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev, | ||
3141 | struct sk_buff *skb) | 1845 | struct sk_buff *skb) |
3142 | { | 1846 | { |
3143 | struct ieee80211_rx_status *rx_status; | 1847 | struct ieee80211_rx_status *rx_status; |
3144 | struct ieee80211_sub_if_data *sdata; | ||
3145 | struct ieee80211_if_sta *ifsta; | 1848 | struct ieee80211_if_sta *ifsta; |
3146 | struct ieee80211_mgmt *mgmt; | 1849 | struct ieee80211_mgmt *mgmt; |
3147 | u16 fc; | 1850 | u16 fc; |
3148 | 1851 | ||
3149 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3150 | ifsta = &sdata->u.sta; | 1852 | ifsta = &sdata->u.sta; |
3151 | 1853 | ||
3152 | rx_status = (struct ieee80211_rx_status *) skb->cb; | 1854 | rx_status = (struct ieee80211_rx_status *) skb->cb; |
@@ -3155,17 +1857,17 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev, | |||
3155 | 1857 | ||
3156 | switch (fc & IEEE80211_FCTL_STYPE) { | 1858 | switch (fc & IEEE80211_FCTL_STYPE) { |
3157 | case IEEE80211_STYPE_PROBE_REQ: | 1859 | case IEEE80211_STYPE_PROBE_REQ: |
3158 | ieee80211_rx_mgmt_probe_req(dev, ifsta, mgmt, skb->len, | 1860 | ieee80211_rx_mgmt_probe_req(sdata, ifsta, mgmt, skb->len, |
3159 | rx_status); | 1861 | rx_status); |
3160 | break; | 1862 | break; |
3161 | case IEEE80211_STYPE_PROBE_RESP: | 1863 | case IEEE80211_STYPE_PROBE_RESP: |
3162 | ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status); | 1864 | ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len, rx_status); |
3163 | break; | 1865 | break; |
3164 | case IEEE80211_STYPE_BEACON: | 1866 | case IEEE80211_STYPE_BEACON: |
3165 | ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status); | 1867 | ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); |
3166 | break; | 1868 | break; |
3167 | case IEEE80211_STYPE_AUTH: | 1869 | case IEEE80211_STYPE_AUTH: |
3168 | ieee80211_rx_mgmt_auth(dev, ifsta, mgmt, skb->len); | 1870 | ieee80211_rx_mgmt_auth(sdata, ifsta, mgmt, skb->len); |
3169 | break; | 1871 | break; |
3170 | case IEEE80211_STYPE_ASSOC_RESP: | 1872 | case IEEE80211_STYPE_ASSOC_RESP: |
3171 | ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0); | 1873 | ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0); |
@@ -3174,13 +1876,13 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev, | |||
3174 | ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1); | 1876 | ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1); |
3175 | break; | 1877 | break; |
3176 | case IEEE80211_STYPE_DEAUTH: | 1878 | case IEEE80211_STYPE_DEAUTH: |
3177 | ieee80211_rx_mgmt_deauth(dev, ifsta, mgmt, skb->len); | 1879 | ieee80211_rx_mgmt_deauth(sdata, ifsta, mgmt, skb->len); |
3178 | break; | 1880 | break; |
3179 | case IEEE80211_STYPE_DISASSOC: | 1881 | case IEEE80211_STYPE_DISASSOC: |
3180 | ieee80211_rx_mgmt_disassoc(dev, ifsta, mgmt, skb->len); | 1882 | ieee80211_rx_mgmt_disassoc(sdata, ifsta, mgmt, skb->len); |
3181 | break; | 1883 | break; |
3182 | case IEEE80211_STYPE_ACTION: | 1884 | case IEEE80211_STYPE_ACTION: |
3183 | ieee80211_rx_mgmt_action(dev, ifsta, mgmt, skb->len, rx_status); | 1885 | ieee80211_rx_mgmt_action(sdata, ifsta, mgmt, skb->len, rx_status); |
3184 | break; | 1886 | break; |
3185 | } | 1887 | } |
3186 | 1888 | ||
@@ -3188,47 +1890,11 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev, | |||
3188 | } | 1890 | } |
3189 | 1891 | ||
3190 | 1892 | ||
3191 | ieee80211_rx_result | 1893 | static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) |
3192 | ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb, | ||
3193 | struct ieee80211_rx_status *rx_status) | ||
3194 | { | 1894 | { |
3195 | struct ieee80211_mgmt *mgmt; | 1895 | struct ieee80211_local *local = sdata->local; |
3196 | __le16 fc; | ||
3197 | |||
3198 | if (skb->len < 2) | ||
3199 | return RX_DROP_UNUSABLE; | ||
3200 | |||
3201 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
3202 | fc = mgmt->frame_control; | ||
3203 | |||
3204 | if (ieee80211_is_ctl(fc)) | ||
3205 | return RX_CONTINUE; | ||
3206 | |||
3207 | if (skb->len < 24) | ||
3208 | return RX_DROP_MONITOR; | ||
3209 | |||
3210 | if (ieee80211_is_probe_resp(fc)) { | ||
3211 | ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status); | ||
3212 | dev_kfree_skb(skb); | ||
3213 | return RX_QUEUED; | ||
3214 | } | ||
3215 | |||
3216 | if (ieee80211_is_beacon(fc)) { | ||
3217 | ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status); | ||
3218 | dev_kfree_skb(skb); | ||
3219 | return RX_QUEUED; | ||
3220 | } | ||
3221 | |||
3222 | return RX_CONTINUE; | ||
3223 | } | ||
3224 | |||
3225 | |||
3226 | static int ieee80211_sta_active_ibss(struct net_device *dev) | ||
3227 | { | ||
3228 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
3229 | int active = 0; | 1896 | int active = 0; |
3230 | struct sta_info *sta; | 1897 | struct sta_info *sta; |
3231 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3232 | 1898 | ||
3233 | rcu_read_lock(); | 1899 | rcu_read_lock(); |
3234 | 1900 | ||
@@ -3247,9 +1913,9 @@ static int ieee80211_sta_active_ibss(struct net_device *dev) | |||
3247 | } | 1913 | } |
3248 | 1914 | ||
3249 | 1915 | ||
3250 | static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time) | 1916 | static void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, unsigned long exp_time) |
3251 | { | 1917 | { |
3252 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1918 | struct ieee80211_local *local = sdata->local; |
3253 | struct sta_info *sta, *tmp; | 1919 | struct sta_info *sta, *tmp; |
3254 | LIST_HEAD(tmp_list); | 1920 | LIST_HEAD(tmp_list); |
3255 | DECLARE_MAC_BUF(mac); | 1921 | DECLARE_MAC_BUF(mac); |
@@ -3260,7 +1926,7 @@ static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time) | |||
3260 | if (time_after(jiffies, sta->last_rx + exp_time)) { | 1926 | if (time_after(jiffies, sta->last_rx + exp_time)) { |
3261 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 1927 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
3262 | printk(KERN_DEBUG "%s: expiring inactive STA %s\n", | 1928 | printk(KERN_DEBUG "%s: expiring inactive STA %s\n", |
3263 | dev->name, print_mac(mac, sta->addr)); | 1929 | sdata->dev->name, print_mac(mac, sta->addr)); |
3264 | #endif | 1930 | #endif |
3265 | __sta_info_unlink(&sta); | 1931 | __sta_info_unlink(&sta); |
3266 | if (sta) | 1932 | if (sta) |
@@ -3273,30 +1939,29 @@ static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time) | |||
3273 | } | 1939 | } |
3274 | 1940 | ||
3275 | 1941 | ||
3276 | static void ieee80211_sta_merge_ibss(struct net_device *dev, | 1942 | static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata, |
3277 | struct ieee80211_if_sta *ifsta) | 1943 | struct ieee80211_if_sta *ifsta) |
3278 | { | 1944 | { |
3279 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); | 1945 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); |
3280 | 1946 | ||
3281 | ieee80211_sta_expire(dev, IEEE80211_IBSS_INACTIVITY_LIMIT); | 1947 | ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); |
3282 | if (ieee80211_sta_active_ibss(dev)) | 1948 | if (ieee80211_sta_active_ibss(sdata)) |
3283 | return; | 1949 | return; |
3284 | 1950 | ||
3285 | printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " | 1951 | printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " |
3286 | "IBSS networks with same SSID (merge)\n", dev->name); | 1952 | "IBSS networks with same SSID (merge)\n", sdata->dev->name); |
3287 | ieee80211_sta_req_scan(dev, ifsta->ssid, ifsta->ssid_len); | 1953 | ieee80211_sta_req_scan(sdata, ifsta->ssid, ifsta->ssid_len); |
3288 | } | 1954 | } |
3289 | 1955 | ||
3290 | 1956 | ||
3291 | #ifdef CONFIG_MAC80211_MESH | 1957 | #ifdef CONFIG_MAC80211_MESH |
3292 | static void ieee80211_mesh_housekeeping(struct net_device *dev, | 1958 | static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, |
3293 | struct ieee80211_if_sta *ifsta) | 1959 | struct ieee80211_if_sta *ifsta) |
3294 | { | 1960 | { |
3295 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3296 | bool free_plinks; | 1961 | bool free_plinks; |
3297 | 1962 | ||
3298 | ieee80211_sta_expire(dev, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); | 1963 | ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); |
3299 | mesh_path_expire(dev); | 1964 | mesh_path_expire(sdata); |
3300 | 1965 | ||
3301 | free_plinks = mesh_plink_availables(sdata); | 1966 | free_plinks = mesh_plink_availables(sdata); |
3302 | if (free_plinks != sdata->u.sta.accepting_plinks) | 1967 | if (free_plinks != sdata->u.sta.accepting_plinks) |
@@ -3307,12 +1972,11 @@ static void ieee80211_mesh_housekeeping(struct net_device *dev, | |||
3307 | } | 1972 | } |
3308 | 1973 | ||
3309 | 1974 | ||
3310 | void ieee80211_start_mesh(struct net_device *dev) | 1975 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) |
3311 | { | 1976 | { |
3312 | struct ieee80211_if_sta *ifsta; | 1977 | struct ieee80211_if_sta *ifsta; |
3313 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3314 | ifsta = &sdata->u.sta; | 1978 | ifsta = &sdata->u.sta; |
3315 | ifsta->state = IEEE80211_MESH_UP; | 1979 | ifsta->state = IEEE80211_STA_MLME_MESH_UP; |
3316 | ieee80211_sta_timer((unsigned long)sdata); | 1980 | ieee80211_sta_timer((unsigned long)sdata); |
3317 | ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); | 1981 | ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); |
3318 | } | 1982 | } |
@@ -3324,102 +1988,16 @@ void ieee80211_sta_timer(unsigned long data) | |||
3324 | struct ieee80211_sub_if_data *sdata = | 1988 | struct ieee80211_sub_if_data *sdata = |
3325 | (struct ieee80211_sub_if_data *) data; | 1989 | (struct ieee80211_sub_if_data *) data; |
3326 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 1990 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
3327 | struct ieee80211_local *local = wdev_priv(&sdata->wdev); | 1991 | struct ieee80211_local *local = sdata->local; |
3328 | 1992 | ||
3329 | set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); | 1993 | set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); |
3330 | queue_work(local->hw.workqueue, &ifsta->work); | 1994 | queue_work(local->hw.workqueue, &ifsta->work); |
3331 | } | 1995 | } |
3332 | 1996 | ||
3333 | void ieee80211_sta_work(struct work_struct *work) | 1997 | static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata, |
3334 | { | ||
3335 | struct ieee80211_sub_if_data *sdata = | ||
3336 | container_of(work, struct ieee80211_sub_if_data, u.sta.work); | ||
3337 | struct net_device *dev = sdata->dev; | ||
3338 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
3339 | struct ieee80211_if_sta *ifsta; | ||
3340 | struct sk_buff *skb; | ||
3341 | |||
3342 | if (!netif_running(dev)) | ||
3343 | return; | ||
3344 | |||
3345 | if (local->sta_sw_scanning || local->sta_hw_scanning) | ||
3346 | return; | ||
3347 | |||
3348 | if (WARN_ON(sdata->vif.type != IEEE80211_IF_TYPE_STA && | ||
3349 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | ||
3350 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) | ||
3351 | return; | ||
3352 | ifsta = &sdata->u.sta; | ||
3353 | |||
3354 | while ((skb = skb_dequeue(&ifsta->skb_queue))) | ||
3355 | ieee80211_sta_rx_queued_mgmt(dev, skb); | ||
3356 | |||
3357 | #ifdef CONFIG_MAC80211_MESH | ||
3358 | if (ifsta->preq_queue_len && | ||
3359 | time_after(jiffies, | ||
3360 | ifsta->last_preq + msecs_to_jiffies(ifsta->mshcfg.dot11MeshHWMPpreqMinInterval))) | ||
3361 | mesh_path_start_discovery(dev); | ||
3362 | #endif | ||
3363 | |||
3364 | if (ifsta->state != IEEE80211_AUTHENTICATE && | ||
3365 | ifsta->state != IEEE80211_ASSOCIATE && | ||
3366 | test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) { | ||
3367 | if (ifsta->scan_ssid_len) | ||
3368 | ieee80211_sta_start_scan(dev, ifsta->scan_ssid, ifsta->scan_ssid_len); | ||
3369 | else | ||
3370 | ieee80211_sta_start_scan(dev, NULL, 0); | ||
3371 | return; | ||
3372 | } | ||
3373 | |||
3374 | if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) { | ||
3375 | if (ieee80211_sta_config_auth(dev, ifsta)) | ||
3376 | return; | ||
3377 | clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); | ||
3378 | } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request)) | ||
3379 | return; | ||
3380 | |||
3381 | switch (ifsta->state) { | ||
3382 | case IEEE80211_DISABLED: | ||
3383 | break; | ||
3384 | case IEEE80211_AUTHENTICATE: | ||
3385 | ieee80211_authenticate(dev, ifsta); | ||
3386 | break; | ||
3387 | case IEEE80211_ASSOCIATE: | ||
3388 | ieee80211_associate(dev, ifsta); | ||
3389 | break; | ||
3390 | case IEEE80211_ASSOCIATED: | ||
3391 | ieee80211_associated(dev, ifsta); | ||
3392 | break; | ||
3393 | case IEEE80211_IBSS_SEARCH: | ||
3394 | ieee80211_sta_find_ibss(dev, ifsta); | ||
3395 | break; | ||
3396 | case IEEE80211_IBSS_JOINED: | ||
3397 | ieee80211_sta_merge_ibss(dev, ifsta); | ||
3398 | break; | ||
3399 | #ifdef CONFIG_MAC80211_MESH | ||
3400 | case IEEE80211_MESH_UP: | ||
3401 | ieee80211_mesh_housekeeping(dev, ifsta); | ||
3402 | break; | ||
3403 | #endif | ||
3404 | default: | ||
3405 | WARN_ON(1); | ||
3406 | break; | ||
3407 | } | ||
3408 | |||
3409 | if (ieee80211_privacy_mismatch(dev, ifsta)) { | ||
3410 | printk(KERN_DEBUG "%s: privacy configuration mismatch and " | ||
3411 | "mixed-cell disabled - disassociate\n", dev->name); | ||
3412 | |||
3413 | ieee80211_send_disassoc(dev, ifsta, WLAN_REASON_UNSPECIFIED); | ||
3414 | ieee80211_set_disassoc(dev, ifsta, 0); | ||
3415 | } | ||
3416 | } | ||
3417 | |||
3418 | |||
3419 | static void ieee80211_sta_reset_auth(struct net_device *dev, | ||
3420 | struct ieee80211_if_sta *ifsta) | 1998 | struct ieee80211_if_sta *ifsta) |
3421 | { | 1999 | { |
3422 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2000 | struct ieee80211_local *local = sdata->local; |
3423 | 2001 | ||
3424 | if (local->ops->reset_tsf) { | 2002 | if (local->ops->reset_tsf) { |
3425 | /* Reset own TSF to allow time synchronization work. */ | 2003 | /* Reset own TSF to allow time synchronization work. */ |
@@ -3439,24 +2017,32 @@ static void ieee80211_sta_reset_auth(struct net_device *dev, | |||
3439 | ifsta->auth_alg = WLAN_AUTH_OPEN; | 2017 | ifsta->auth_alg = WLAN_AUTH_OPEN; |
3440 | ifsta->auth_transaction = -1; | 2018 | ifsta->auth_transaction = -1; |
3441 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; | 2019 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; |
3442 | ifsta->auth_tries = ifsta->assoc_tries = 0; | 2020 | ifsta->assoc_scan_tries = 0; |
3443 | netif_carrier_off(dev); | 2021 | ifsta->direct_probe_tries = 0; |
2022 | ifsta->auth_tries = 0; | ||
2023 | ifsta->assoc_tries = 0; | ||
2024 | netif_tx_stop_all_queues(sdata->dev); | ||
2025 | netif_carrier_off(sdata->dev); | ||
3444 | } | 2026 | } |
3445 | 2027 | ||
3446 | 2028 | ||
3447 | void ieee80211_sta_req_auth(struct net_device *dev, | 2029 | void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata, |
3448 | struct ieee80211_if_sta *ifsta) | 2030 | struct ieee80211_if_sta *ifsta) |
3449 | { | 2031 | { |
3450 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2032 | struct ieee80211_local *local = sdata->local; |
3451 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3452 | 2033 | ||
3453 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | 2034 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) |
3454 | return; | 2035 | return; |
3455 | 2036 | ||
3456 | if ((ifsta->flags & (IEEE80211_STA_BSSID_SET | | 2037 | if ((ifsta->flags & (IEEE80211_STA_BSSID_SET | |
3457 | IEEE80211_STA_AUTO_BSSID_SEL)) && | 2038 | IEEE80211_STA_AUTO_BSSID_SEL)) && |
3458 | (ifsta->flags & (IEEE80211_STA_SSID_SET | | 2039 | (ifsta->flags & (IEEE80211_STA_SSID_SET | |
3459 | IEEE80211_STA_AUTO_SSID_SEL))) { | 2040 | IEEE80211_STA_AUTO_SSID_SEL))) { |
2041 | |||
2042 | if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) | ||
2043 | ieee80211_set_disassoc(sdata, ifsta, true, true, | ||
2044 | WLAN_REASON_DEAUTH_LEAVING); | ||
2045 | |||
3460 | set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); | 2046 | set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); |
3461 | queue_work(local->hw.workqueue, &ifsta->work); | 2047 | queue_work(local->hw.workqueue, &ifsta->work); |
3462 | } | 2048 | } |
@@ -3492,81 +2078,11 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta, | |||
3492 | return 0; | 2078 | return 0; |
3493 | } | 2079 | } |
3494 | 2080 | ||
3495 | static int ieee80211_sta_config_auth(struct net_device *dev, | 2081 | static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata, |
3496 | struct ieee80211_if_sta *ifsta) | ||
3497 | { | ||
3498 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
3499 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3500 | struct ieee80211_sta_bss *bss, *selected = NULL; | ||
3501 | int top_rssi = 0, freq; | ||
3502 | |||
3503 | spin_lock_bh(&local->sta_bss_lock); | ||
3504 | freq = local->oper_channel->center_freq; | ||
3505 | list_for_each_entry(bss, &local->sta_bss_list, list) { | ||
3506 | if (!(bss->capability & WLAN_CAPABILITY_ESS)) | ||
3507 | continue; | ||
3508 | |||
3509 | if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL | | ||
3510 | IEEE80211_STA_AUTO_BSSID_SEL | | ||
3511 | IEEE80211_STA_AUTO_CHANNEL_SEL)) && | ||
3512 | (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^ | ||
3513 | !!sdata->default_key)) | ||
3514 | continue; | ||
3515 | |||
3516 | if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) && | ||
3517 | bss->freq != freq) | ||
3518 | continue; | ||
3519 | |||
3520 | if (!(ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) && | ||
3521 | memcmp(bss->bssid, ifsta->bssid, ETH_ALEN)) | ||
3522 | continue; | ||
3523 | |||
3524 | if (!(ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) && | ||
3525 | !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len)) | ||
3526 | continue; | ||
3527 | |||
3528 | if (!selected || top_rssi < bss->signal) { | ||
3529 | selected = bss; | ||
3530 | top_rssi = bss->signal; | ||
3531 | } | ||
3532 | } | ||
3533 | if (selected) | ||
3534 | atomic_inc(&selected->users); | ||
3535 | spin_unlock_bh(&local->sta_bss_lock); | ||
3536 | |||
3537 | if (selected) { | ||
3538 | ieee80211_set_freq(dev, selected->freq); | ||
3539 | if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) | ||
3540 | ieee80211_sta_set_ssid(dev, selected->ssid, | ||
3541 | selected->ssid_len); | ||
3542 | ieee80211_sta_set_bssid(dev, selected->bssid); | ||
3543 | ieee80211_sta_def_wmm_params(dev, selected, 0); | ||
3544 | ieee80211_rx_bss_put(local, selected); | ||
3545 | ifsta->state = IEEE80211_AUTHENTICATE; | ||
3546 | ieee80211_sta_reset_auth(dev, ifsta); | ||
3547 | return 0; | ||
3548 | } else { | ||
3549 | if (ifsta->state != IEEE80211_AUTHENTICATE) { | ||
3550 | if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) | ||
3551 | ieee80211_sta_start_scan(dev, NULL, 0); | ||
3552 | else | ||
3553 | ieee80211_sta_start_scan(dev, ifsta->ssid, | ||
3554 | ifsta->ssid_len); | ||
3555 | ifsta->state = IEEE80211_AUTHENTICATE; | ||
3556 | set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); | ||
3557 | } else | ||
3558 | ifsta->state = IEEE80211_DISABLED; | ||
3559 | } | ||
3560 | return -1; | ||
3561 | } | ||
3562 | |||
3563 | |||
3564 | static int ieee80211_sta_create_ibss(struct net_device *dev, | ||
3565 | struct ieee80211_if_sta *ifsta) | 2082 | struct ieee80211_if_sta *ifsta) |
3566 | { | 2083 | { |
3567 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2084 | struct ieee80211_local *local = sdata->local; |
3568 | struct ieee80211_sta_bss *bss; | 2085 | struct ieee80211_sta_bss *bss; |
3569 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3570 | struct ieee80211_supported_band *sband; | 2086 | struct ieee80211_supported_band *sband; |
3571 | u8 bssid[ETH_ALEN], *pos; | 2087 | u8 bssid[ETH_ALEN], *pos; |
3572 | int i; | 2088 | int i; |
@@ -3582,15 +2098,15 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, | |||
3582 | * random number generator get different BSSID. */ | 2098 | * random number generator get different BSSID. */ |
3583 | get_random_bytes(bssid, ETH_ALEN); | 2099 | get_random_bytes(bssid, ETH_ALEN); |
3584 | for (i = 0; i < ETH_ALEN; i++) | 2100 | for (i = 0; i < ETH_ALEN; i++) |
3585 | bssid[i] ^= dev->dev_addr[i]; | 2101 | bssid[i] ^= sdata->dev->dev_addr[i]; |
3586 | bssid[0] &= ~0x01; | 2102 | bssid[0] &= ~0x01; |
3587 | bssid[0] |= 0x02; | 2103 | bssid[0] |= 0x02; |
3588 | #endif | 2104 | #endif |
3589 | 2105 | ||
3590 | printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", | 2106 | printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", |
3591 | dev->name, print_mac(mac, bssid)); | 2107 | sdata->dev->name, print_mac(mac, bssid)); |
3592 | 2108 | ||
3593 | bss = ieee80211_rx_bss_add(dev, bssid, | 2109 | bss = ieee80211_rx_bss_add(local, bssid, |
3594 | local->hw.conf.channel->center_freq, | 2110 | local->hw.conf.channel->center_freq, |
3595 | sdata->u.sta.ssid, sdata->u.sta.ssid_len); | 2111 | sdata->u.sta.ssid, sdata->u.sta.ssid_len); |
3596 | if (!bss) | 2112 | if (!bss) |
@@ -3617,16 +2133,16 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, | |||
3617 | *pos++ = (u8) (rate / 5); | 2133 | *pos++ = (u8) (rate / 5); |
3618 | } | 2134 | } |
3619 | 2135 | ||
3620 | ret = ieee80211_sta_join_ibss(dev, ifsta, bss); | 2136 | ret = ieee80211_sta_join_ibss(sdata, ifsta, bss); |
3621 | ieee80211_rx_bss_put(local, bss); | 2137 | ieee80211_rx_bss_put(local, bss); |
3622 | return ret; | 2138 | return ret; |
3623 | } | 2139 | } |
3624 | 2140 | ||
3625 | 2141 | ||
3626 | static int ieee80211_sta_find_ibss(struct net_device *dev, | 2142 | static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata, |
3627 | struct ieee80211_if_sta *ifsta) | 2143 | struct ieee80211_if_sta *ifsta) |
3628 | { | 2144 | { |
3629 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2145 | struct ieee80211_local *local = sdata->local; |
3630 | struct ieee80211_sta_bss *bss; | 2146 | struct ieee80211_sta_bss *bss; |
3631 | int found = 0; | 2147 | int found = 0; |
3632 | u8 bssid[ETH_ALEN]; | 2148 | u8 bssid[ETH_ALEN]; |
@@ -3637,10 +2153,10 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, | |||
3637 | if (ifsta->ssid_len == 0) | 2153 | if (ifsta->ssid_len == 0) |
3638 | return -EINVAL; | 2154 | return -EINVAL; |
3639 | 2155 | ||
3640 | active_ibss = ieee80211_sta_active_ibss(dev); | 2156 | active_ibss = ieee80211_sta_active_ibss(sdata); |
3641 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 2157 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
3642 | printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", | 2158 | printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", |
3643 | dev->name, active_ibss); | 2159 | sdata->dev->name, active_ibss); |
3644 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 2160 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
3645 | spin_lock_bh(&local->sta_bss_lock); | 2161 | spin_lock_bh(&local->sta_bss_lock); |
3646 | list_for_each_entry(bss, &local->sta_bss_list, list) { | 2162 | list_for_each_entry(bss, &local->sta_bss_list, list) { |
@@ -3675,15 +2191,15 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, | |||
3675 | else | 2191 | else |
3676 | search_freq = local->hw.conf.channel->center_freq; | 2192 | search_freq = local->hw.conf.channel->center_freq; |
3677 | 2193 | ||
3678 | bss = ieee80211_rx_bss_get(dev, bssid, search_freq, | 2194 | bss = ieee80211_rx_bss_get(local, bssid, search_freq, |
3679 | ifsta->ssid, ifsta->ssid_len); | 2195 | ifsta->ssid, ifsta->ssid_len); |
3680 | if (!bss) | 2196 | if (!bss) |
3681 | goto dont_join; | 2197 | goto dont_join; |
3682 | 2198 | ||
3683 | printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" | 2199 | printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" |
3684 | " based on configured SSID\n", | 2200 | " based on configured SSID\n", |
3685 | dev->name, print_mac(mac, bssid)); | 2201 | sdata->dev->name, print_mac(mac, bssid)); |
3686 | ret = ieee80211_sta_join_ibss(dev, ifsta, bss); | 2202 | ret = ieee80211_sta_join_ibss(sdata, ifsta, bss); |
3687 | ieee80211_rx_bss_put(local, bss); | 2203 | ieee80211_rx_bss_put(local, bss); |
3688 | return ret; | 2204 | return ret; |
3689 | } | 2205 | } |
@@ -3694,17 +2210,17 @@ dont_join: | |||
3694 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 2210 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
3695 | 2211 | ||
3696 | /* Selected IBSS not found in current scan results - try to scan */ | 2212 | /* Selected IBSS not found in current scan results - try to scan */ |
3697 | if (ifsta->state == IEEE80211_IBSS_JOINED && | 2213 | if (ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED && |
3698 | !ieee80211_sta_active_ibss(dev)) { | 2214 | !ieee80211_sta_active_ibss(sdata)) { |
3699 | mod_timer(&ifsta->timer, jiffies + | 2215 | mod_timer(&ifsta->timer, jiffies + |
3700 | IEEE80211_IBSS_MERGE_INTERVAL); | 2216 | IEEE80211_IBSS_MERGE_INTERVAL); |
3701 | } else if (time_after(jiffies, local->last_scan_completed + | 2217 | } else if (time_after(jiffies, local->last_scan_completed + |
3702 | IEEE80211_SCAN_INTERVAL)) { | 2218 | IEEE80211_SCAN_INTERVAL)) { |
3703 | printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " | 2219 | printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " |
3704 | "join\n", dev->name); | 2220 | "join\n", sdata->dev->name); |
3705 | return ieee80211_sta_req_scan(dev, ifsta->ssid, | 2221 | return ieee80211_sta_req_scan(sdata, ifsta->ssid, |
3706 | ifsta->ssid_len); | 2222 | ifsta->ssid_len); |
3707 | } else if (ifsta->state != IEEE80211_IBSS_JOINED) { | 2223 | } else if (ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED) { |
3708 | int interval = IEEE80211_SCAN_INTERVAL; | 2224 | int interval = IEEE80211_SCAN_INTERVAL; |
3709 | 2225 | ||
3710 | if (time_after(jiffies, ifsta->ibss_join_req + | 2226 | if (time_after(jiffies, ifsta->ibss_join_req + |
@@ -3712,10 +2228,10 @@ dont_join: | |||
3712 | if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && | 2228 | if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && |
3713 | (!(local->oper_channel->flags & | 2229 | (!(local->oper_channel->flags & |
3714 | IEEE80211_CHAN_NO_IBSS))) | 2230 | IEEE80211_CHAN_NO_IBSS))) |
3715 | return ieee80211_sta_create_ibss(dev, ifsta); | 2231 | return ieee80211_sta_create_ibss(sdata, ifsta); |
3716 | if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { | 2232 | if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { |
3717 | printk(KERN_DEBUG "%s: IBSS not allowed on" | 2233 | printk(KERN_DEBUG "%s: IBSS not allowed on" |
3718 | " %d MHz\n", dev->name, | 2234 | " %d MHz\n", sdata->dev->name, |
3719 | local->hw.conf.channel->center_freq); | 2235 | local->hw.conf.channel->center_freq); |
3720 | } | 2236 | } |
3721 | 2237 | ||
@@ -3724,7 +2240,7 @@ dont_join: | |||
3724 | interval = IEEE80211_SCAN_INTERVAL_SLOW; | 2240 | interval = IEEE80211_SCAN_INTERVAL_SLOW; |
3725 | } | 2241 | } |
3726 | 2242 | ||
3727 | ifsta->state = IEEE80211_IBSS_SEARCH; | 2243 | ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH; |
3728 | mod_timer(&ifsta->timer, jiffies + interval); | 2244 | mod_timer(&ifsta->timer, jiffies + interval); |
3729 | return 0; | 2245 | return 0; |
3730 | } | 2246 | } |
@@ -3733,9 +2249,8 @@ dont_join: | |||
3733 | } | 2249 | } |
3734 | 2250 | ||
3735 | 2251 | ||
3736 | int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) | 2252 | int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len) |
3737 | { | 2253 | { |
3738 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3739 | struct ieee80211_if_sta *ifsta; | 2254 | struct ieee80211_if_sta *ifsta; |
3740 | int res; | 2255 | int res; |
3741 | 2256 | ||
@@ -3759,7 +2274,7 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) | |||
3759 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID); | 2274 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID); |
3760 | if (res) { | 2275 | if (res) { |
3761 | printk(KERN_DEBUG "%s: Failed to config new SSID to " | 2276 | printk(KERN_DEBUG "%s: Failed to config new SSID to " |
3762 | "the low-level driver\n", dev->name); | 2277 | "the low-level driver\n", sdata->dev->name); |
3763 | return res; | 2278 | return res; |
3764 | } | 2279 | } |
3765 | } | 2280 | } |
@@ -3772,17 +2287,16 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) | |||
3772 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | 2287 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && |
3773 | !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { | 2288 | !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { |
3774 | ifsta->ibss_join_req = jiffies; | 2289 | ifsta->ibss_join_req = jiffies; |
3775 | ifsta->state = IEEE80211_IBSS_SEARCH; | 2290 | ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH; |
3776 | return ieee80211_sta_find_ibss(dev, ifsta); | 2291 | return ieee80211_sta_find_ibss(sdata, ifsta); |
3777 | } | 2292 | } |
3778 | 2293 | ||
3779 | return 0; | 2294 | return 0; |
3780 | } | 2295 | } |
3781 | 2296 | ||
3782 | 2297 | ||
3783 | int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len) | 2298 | int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len) |
3784 | { | 2299 | { |
3785 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3786 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 2300 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
3787 | memcpy(ssid, ifsta->ssid, ifsta->ssid_len); | 2301 | memcpy(ssid, ifsta->ssid, ifsta->ssid_len); |
3788 | *len = ifsta->ssid_len; | 2302 | *len = ifsta->ssid_len; |
@@ -3790,13 +2304,11 @@ int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len) | |||
3790 | } | 2304 | } |
3791 | 2305 | ||
3792 | 2306 | ||
3793 | int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid) | 2307 | int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid) |
3794 | { | 2308 | { |
3795 | struct ieee80211_sub_if_data *sdata; | ||
3796 | struct ieee80211_if_sta *ifsta; | 2309 | struct ieee80211_if_sta *ifsta; |
3797 | int res; | 2310 | int res; |
3798 | 2311 | ||
3799 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3800 | ifsta = &sdata->u.sta; | 2312 | ifsta = &sdata->u.sta; |
3801 | 2313 | ||
3802 | if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { | 2314 | if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { |
@@ -3809,7 +2321,7 @@ int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid) | |||
3809 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID); | 2321 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID); |
3810 | if (res) { | 2322 | if (res) { |
3811 | printk(KERN_DEBUG "%s: Failed to config new BSSID to " | 2323 | printk(KERN_DEBUG "%s: Failed to config new BSSID to " |
3812 | "the low-level driver\n", dev->name); | 2324 | "the low-level driver\n", sdata->dev->name); |
3813 | return res; | 2325 | return res; |
3814 | } | 2326 | } |
3815 | } | 2327 | } |
@@ -3823,530 +2335,8 @@ int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid) | |||
3823 | } | 2335 | } |
3824 | 2336 | ||
3825 | 2337 | ||
3826 | static void ieee80211_send_nullfunc(struct ieee80211_local *local, | 2338 | int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len) |
3827 | struct ieee80211_sub_if_data *sdata, | ||
3828 | int powersave) | ||
3829 | { | ||
3830 | struct sk_buff *skb; | ||
3831 | struct ieee80211_hdr *nullfunc; | ||
3832 | __le16 fc; | ||
3833 | |||
3834 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); | ||
3835 | if (!skb) { | ||
3836 | printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " | ||
3837 | "frame\n", sdata->dev->name); | ||
3838 | return; | ||
3839 | } | ||
3840 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
3841 | |||
3842 | nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); | ||
3843 | memset(nullfunc, 0, 24); | ||
3844 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | | ||
3845 | IEEE80211_FCTL_TODS); | ||
3846 | if (powersave) | ||
3847 | fc |= cpu_to_le16(IEEE80211_FCTL_PM); | ||
3848 | nullfunc->frame_control = fc; | ||
3849 | memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN); | ||
3850 | memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); | ||
3851 | memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN); | ||
3852 | |||
3853 | ieee80211_sta_tx(sdata->dev, skb, 0); | ||
3854 | } | ||
3855 | |||
3856 | |||
3857 | static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) | ||
3858 | { | ||
3859 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | ||
3860 | ieee80211_vif_is_mesh(&sdata->vif)) | ||
3861 | ieee80211_sta_timer((unsigned long)sdata); | ||
3862 | } | ||
3863 | |||
3864 | void ieee80211_scan_completed(struct ieee80211_hw *hw) | ||
3865 | { | ||
3866 | struct ieee80211_local *local = hw_to_local(hw); | ||
3867 | struct net_device *dev = local->scan_dev; | ||
3868 | struct ieee80211_sub_if_data *sdata; | ||
3869 | union iwreq_data wrqu; | ||
3870 | |||
3871 | local->last_scan_completed = jiffies; | ||
3872 | memset(&wrqu, 0, sizeof(wrqu)); | ||
3873 | wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); | ||
3874 | |||
3875 | if (local->sta_hw_scanning) { | ||
3876 | local->sta_hw_scanning = 0; | ||
3877 | if (ieee80211_hw_config(local)) | ||
3878 | printk(KERN_DEBUG "%s: failed to restore operational " | ||
3879 | "channel after scan\n", dev->name); | ||
3880 | /* Restart STA timer for HW scan case */ | ||
3881 | rcu_read_lock(); | ||
3882 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
3883 | ieee80211_restart_sta_timer(sdata); | ||
3884 | rcu_read_unlock(); | ||
3885 | |||
3886 | goto done; | ||
3887 | } | ||
3888 | |||
3889 | local->sta_sw_scanning = 0; | ||
3890 | if (ieee80211_hw_config(local)) | ||
3891 | printk(KERN_DEBUG "%s: failed to restore operational " | ||
3892 | "channel after scan\n", dev->name); | ||
3893 | |||
3894 | |||
3895 | netif_tx_lock_bh(local->mdev); | ||
3896 | netif_addr_lock(local->mdev); | ||
3897 | local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; | ||
3898 | local->ops->configure_filter(local_to_hw(local), | ||
3899 | FIF_BCN_PRBRESP_PROMISC, | ||
3900 | &local->filter_flags, | ||
3901 | local->mdev->mc_count, | ||
3902 | local->mdev->mc_list); | ||
3903 | |||
3904 | netif_addr_unlock(local->mdev); | ||
3905 | netif_tx_unlock_bh(local->mdev); | ||
3906 | |||
3907 | rcu_read_lock(); | ||
3908 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
3909 | /* Tell AP we're back */ | ||
3910 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && | ||
3911 | sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) | ||
3912 | ieee80211_send_nullfunc(local, sdata, 0); | ||
3913 | |||
3914 | ieee80211_restart_sta_timer(sdata); | ||
3915 | |||
3916 | netif_wake_queue(sdata->dev); | ||
3917 | } | ||
3918 | rcu_read_unlock(); | ||
3919 | |||
3920 | done: | ||
3921 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3922 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | ||
3923 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
3924 | if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) || | ||
3925 | (!(ifsta->state == IEEE80211_IBSS_JOINED) && | ||
3926 | !ieee80211_sta_active_ibss(dev))) | ||
3927 | ieee80211_sta_find_ibss(dev, ifsta); | ||
3928 | } | ||
3929 | } | ||
3930 | EXPORT_SYMBOL(ieee80211_scan_completed); | ||
3931 | |||
3932 | void ieee80211_sta_scan_work(struct work_struct *work) | ||
3933 | { | ||
3934 | struct ieee80211_local *local = | ||
3935 | container_of(work, struct ieee80211_local, scan_work.work); | ||
3936 | struct net_device *dev = local->scan_dev; | ||
3937 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3938 | struct ieee80211_supported_band *sband; | ||
3939 | struct ieee80211_channel *chan; | ||
3940 | int skip; | ||
3941 | unsigned long next_delay = 0; | ||
3942 | |||
3943 | if (!local->sta_sw_scanning) | ||
3944 | return; | ||
3945 | |||
3946 | switch (local->scan_state) { | ||
3947 | case SCAN_SET_CHANNEL: | ||
3948 | /* | ||
3949 | * Get current scan band. scan_band may be IEEE80211_NUM_BANDS | ||
3950 | * after we successfully scanned the last channel of the last | ||
3951 | * band (and the last band is supported by the hw) | ||
3952 | */ | ||
3953 | if (local->scan_band < IEEE80211_NUM_BANDS) | ||
3954 | sband = local->hw.wiphy->bands[local->scan_band]; | ||
3955 | else | ||
3956 | sband = NULL; | ||
3957 | |||
3958 | /* | ||
3959 | * If we are at an unsupported band and have more bands | ||
3960 | * left to scan, advance to the next supported one. | ||
3961 | */ | ||
3962 | while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) { | ||
3963 | local->scan_band++; | ||
3964 | sband = local->hw.wiphy->bands[local->scan_band]; | ||
3965 | local->scan_channel_idx = 0; | ||
3966 | } | ||
3967 | |||
3968 | /* if no more bands/channels left, complete scan */ | ||
3969 | if (!sband || local->scan_channel_idx >= sband->n_channels) { | ||
3970 | ieee80211_scan_completed(local_to_hw(local)); | ||
3971 | return; | ||
3972 | } | ||
3973 | skip = 0; | ||
3974 | chan = &sband->channels[local->scan_channel_idx]; | ||
3975 | |||
3976 | if (chan->flags & IEEE80211_CHAN_DISABLED || | ||
3977 | (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | ||
3978 | chan->flags & IEEE80211_CHAN_NO_IBSS)) | ||
3979 | skip = 1; | ||
3980 | |||
3981 | if (!skip) { | ||
3982 | local->scan_channel = chan; | ||
3983 | if (ieee80211_hw_config(local)) { | ||
3984 | printk(KERN_DEBUG "%s: failed to set freq to " | ||
3985 | "%d MHz for scan\n", dev->name, | ||
3986 | chan->center_freq); | ||
3987 | skip = 1; | ||
3988 | } | ||
3989 | } | ||
3990 | |||
3991 | /* advance state machine to next channel/band */ | ||
3992 | local->scan_channel_idx++; | ||
3993 | if (local->scan_channel_idx >= sband->n_channels) { | ||
3994 | /* | ||
3995 | * scan_band may end up == IEEE80211_NUM_BANDS, but | ||
3996 | * we'll catch that case above and complete the scan | ||
3997 | * if that is the case. | ||
3998 | */ | ||
3999 | local->scan_band++; | ||
4000 | local->scan_channel_idx = 0; | ||
4001 | } | ||
4002 | |||
4003 | if (skip) | ||
4004 | break; | ||
4005 | |||
4006 | next_delay = IEEE80211_PROBE_DELAY + | ||
4007 | usecs_to_jiffies(local->hw.channel_change_time); | ||
4008 | local->scan_state = SCAN_SEND_PROBE; | ||
4009 | break; | ||
4010 | case SCAN_SEND_PROBE: | ||
4011 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | ||
4012 | local->scan_state = SCAN_SET_CHANNEL; | ||
4013 | |||
4014 | if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN) | ||
4015 | break; | ||
4016 | ieee80211_send_probe_req(dev, NULL, local->scan_ssid, | ||
4017 | local->scan_ssid_len); | ||
4018 | next_delay = IEEE80211_CHANNEL_TIME; | ||
4019 | break; | ||
4020 | } | ||
4021 | |||
4022 | if (local->sta_sw_scanning) | ||
4023 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | ||
4024 | next_delay); | ||
4025 | } | ||
4026 | |||
4027 | |||
4028 | static int ieee80211_sta_start_scan(struct net_device *dev, | ||
4029 | u8 *ssid, size_t ssid_len) | ||
4030 | { | ||
4031 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
4032 | struct ieee80211_sub_if_data *sdata; | ||
4033 | |||
4034 | if (ssid_len > IEEE80211_MAX_SSID_LEN) | ||
4035 | return -EINVAL; | ||
4036 | |||
4037 | /* MLME-SCAN.request (page 118) page 144 (11.1.3.1) | ||
4038 | * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS | ||
4039 | * BSSID: MACAddress | ||
4040 | * SSID | ||
4041 | * ScanType: ACTIVE, PASSIVE | ||
4042 | * ProbeDelay: delay (in microseconds) to be used prior to transmitting | ||
4043 | * a Probe frame during active scanning | ||
4044 | * ChannelList | ||
4045 | * MinChannelTime (>= ProbeDelay), in TU | ||
4046 | * MaxChannelTime: (>= MinChannelTime), in TU | ||
4047 | */ | ||
4048 | |||
4049 | /* MLME-SCAN.confirm | ||
4050 | * BSSDescriptionSet | ||
4051 | * ResultCode: SUCCESS, INVALID_PARAMETERS | ||
4052 | */ | ||
4053 | |||
4054 | if (local->sta_sw_scanning || local->sta_hw_scanning) { | ||
4055 | if (local->scan_dev == dev) | ||
4056 | return 0; | ||
4057 | return -EBUSY; | ||
4058 | } | ||
4059 | |||
4060 | if (local->ops->hw_scan) { | ||
4061 | int rc = local->ops->hw_scan(local_to_hw(local), | ||
4062 | ssid, ssid_len); | ||
4063 | if (!rc) { | ||
4064 | local->sta_hw_scanning = 1; | ||
4065 | local->scan_dev = dev; | ||
4066 | } | ||
4067 | return rc; | ||
4068 | } | ||
4069 | |||
4070 | local->sta_sw_scanning = 1; | ||
4071 | |||
4072 | rcu_read_lock(); | ||
4073 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
4074 | netif_stop_queue(sdata->dev); | ||
4075 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && | ||
4076 | (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)) | ||
4077 | ieee80211_send_nullfunc(local, sdata, 1); | ||
4078 | } | ||
4079 | rcu_read_unlock(); | ||
4080 | |||
4081 | if (ssid) { | ||
4082 | local->scan_ssid_len = ssid_len; | ||
4083 | memcpy(local->scan_ssid, ssid, ssid_len); | ||
4084 | } else | ||
4085 | local->scan_ssid_len = 0; | ||
4086 | local->scan_state = SCAN_SET_CHANNEL; | ||
4087 | local->scan_channel_idx = 0; | ||
4088 | local->scan_band = IEEE80211_BAND_2GHZ; | ||
4089 | local->scan_dev = dev; | ||
4090 | |||
4091 | netif_addr_lock_bh(local->mdev); | ||
4092 | local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; | ||
4093 | local->ops->configure_filter(local_to_hw(local), | ||
4094 | FIF_BCN_PRBRESP_PROMISC, | ||
4095 | &local->filter_flags, | ||
4096 | local->mdev->mc_count, | ||
4097 | local->mdev->mc_list); | ||
4098 | netif_addr_unlock_bh(local->mdev); | ||
4099 | |||
4100 | /* TODO: start scan as soon as all nullfunc frames are ACKed */ | ||
4101 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | ||
4102 | IEEE80211_CHANNEL_TIME); | ||
4103 | |||
4104 | return 0; | ||
4105 | } | ||
4106 | |||
4107 | |||
4108 | int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len) | ||
4109 | { | 2339 | { |
4110 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
4111 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
4112 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
4113 | |||
4114 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | ||
4115 | return ieee80211_sta_start_scan(dev, ssid, ssid_len); | ||
4116 | |||
4117 | if (local->sta_sw_scanning || local->sta_hw_scanning) { | ||
4118 | if (local->scan_dev == dev) | ||
4119 | return 0; | ||
4120 | return -EBUSY; | ||
4121 | } | ||
4122 | |||
4123 | ifsta->scan_ssid_len = ssid_len; | ||
4124 | if (ssid_len) | ||
4125 | memcpy(ifsta->scan_ssid, ssid, ssid_len); | ||
4126 | set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request); | ||
4127 | queue_work(local->hw.workqueue, &ifsta->work); | ||
4128 | return 0; | ||
4129 | } | ||
4130 | |||
4131 | static char * | ||
4132 | ieee80211_sta_scan_result(struct net_device *dev, | ||
4133 | struct iw_request_info *info, | ||
4134 | struct ieee80211_sta_bss *bss, | ||
4135 | char *current_ev, char *end_buf) | ||
4136 | { | ||
4137 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
4138 | struct iw_event iwe; | ||
4139 | |||
4140 | if (time_after(jiffies, | ||
4141 | bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE)) | ||
4142 | return current_ev; | ||
4143 | |||
4144 | memset(&iwe, 0, sizeof(iwe)); | ||
4145 | iwe.cmd = SIOCGIWAP; | ||
4146 | iwe.u.ap_addr.sa_family = ARPHRD_ETHER; | ||
4147 | memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN); | ||
4148 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
4149 | IW_EV_ADDR_LEN); | ||
4150 | |||
4151 | memset(&iwe, 0, sizeof(iwe)); | ||
4152 | iwe.cmd = SIOCGIWESSID; | ||
4153 | if (bss_mesh_cfg(bss)) { | ||
4154 | iwe.u.data.length = bss_mesh_id_len(bss); | ||
4155 | iwe.u.data.flags = 1; | ||
4156 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4157 | &iwe, bss_mesh_id(bss)); | ||
4158 | } else { | ||
4159 | iwe.u.data.length = bss->ssid_len; | ||
4160 | iwe.u.data.flags = 1; | ||
4161 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4162 | &iwe, bss->ssid); | ||
4163 | } | ||
4164 | |||
4165 | if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) | ||
4166 | || bss_mesh_cfg(bss)) { | ||
4167 | memset(&iwe, 0, sizeof(iwe)); | ||
4168 | iwe.cmd = SIOCGIWMODE; | ||
4169 | if (bss_mesh_cfg(bss)) | ||
4170 | iwe.u.mode = IW_MODE_MESH; | ||
4171 | else if (bss->capability & WLAN_CAPABILITY_ESS) | ||
4172 | iwe.u.mode = IW_MODE_MASTER; | ||
4173 | else | ||
4174 | iwe.u.mode = IW_MODE_ADHOC; | ||
4175 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, | ||
4176 | &iwe, IW_EV_UINT_LEN); | ||
4177 | } | ||
4178 | |||
4179 | memset(&iwe, 0, sizeof(iwe)); | ||
4180 | iwe.cmd = SIOCGIWFREQ; | ||
4181 | iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq); | ||
4182 | iwe.u.freq.e = 0; | ||
4183 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
4184 | IW_EV_FREQ_LEN); | ||
4185 | |||
4186 | memset(&iwe, 0, sizeof(iwe)); | ||
4187 | iwe.cmd = SIOCGIWFREQ; | ||
4188 | iwe.u.freq.m = bss->freq; | ||
4189 | iwe.u.freq.e = 6; | ||
4190 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
4191 | IW_EV_FREQ_LEN); | ||
4192 | memset(&iwe, 0, sizeof(iwe)); | ||
4193 | iwe.cmd = IWEVQUAL; | ||
4194 | iwe.u.qual.qual = bss->qual; | ||
4195 | iwe.u.qual.level = bss->signal; | ||
4196 | iwe.u.qual.noise = bss->noise; | ||
4197 | iwe.u.qual.updated = local->wstats_flags; | ||
4198 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
4199 | IW_EV_QUAL_LEN); | ||
4200 | |||
4201 | memset(&iwe, 0, sizeof(iwe)); | ||
4202 | iwe.cmd = SIOCGIWENCODE; | ||
4203 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) | ||
4204 | iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; | ||
4205 | else | ||
4206 | iwe.u.data.flags = IW_ENCODE_DISABLED; | ||
4207 | iwe.u.data.length = 0; | ||
4208 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4209 | &iwe, ""); | ||
4210 | |||
4211 | if (bss && bss->wpa_ie) { | ||
4212 | memset(&iwe, 0, sizeof(iwe)); | ||
4213 | iwe.cmd = IWEVGENIE; | ||
4214 | iwe.u.data.length = bss->wpa_ie_len; | ||
4215 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4216 | &iwe, bss->wpa_ie); | ||
4217 | } | ||
4218 | |||
4219 | if (bss && bss->rsn_ie) { | ||
4220 | memset(&iwe, 0, sizeof(iwe)); | ||
4221 | iwe.cmd = IWEVGENIE; | ||
4222 | iwe.u.data.length = bss->rsn_ie_len; | ||
4223 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4224 | &iwe, bss->rsn_ie); | ||
4225 | } | ||
4226 | |||
4227 | if (bss && bss->ht_ie) { | ||
4228 | memset(&iwe, 0, sizeof(iwe)); | ||
4229 | iwe.cmd = IWEVGENIE; | ||
4230 | iwe.u.data.length = bss->ht_ie_len; | ||
4231 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4232 | &iwe, bss->ht_ie); | ||
4233 | } | ||
4234 | |||
4235 | if (bss && bss->supp_rates_len > 0) { | ||
4236 | /* display all supported rates in readable format */ | ||
4237 | char *p = current_ev + iwe_stream_lcp_len(info); | ||
4238 | int i; | ||
4239 | |||
4240 | memset(&iwe, 0, sizeof(iwe)); | ||
4241 | iwe.cmd = SIOCGIWRATE; | ||
4242 | /* Those two flags are ignored... */ | ||
4243 | iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; | ||
4244 | |||
4245 | for (i = 0; i < bss->supp_rates_len; i++) { | ||
4246 | iwe.u.bitrate.value = ((bss->supp_rates[i] & | ||
4247 | 0x7f) * 500000); | ||
4248 | p = iwe_stream_add_value(info, current_ev, p, | ||
4249 | end_buf, &iwe, IW_EV_PARAM_LEN); | ||
4250 | } | ||
4251 | current_ev = p; | ||
4252 | } | ||
4253 | |||
4254 | if (bss) { | ||
4255 | char *buf; | ||
4256 | buf = kmalloc(30, GFP_ATOMIC); | ||
4257 | if (buf) { | ||
4258 | memset(&iwe, 0, sizeof(iwe)); | ||
4259 | iwe.cmd = IWEVCUSTOM; | ||
4260 | sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp)); | ||
4261 | iwe.u.data.length = strlen(buf); | ||
4262 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4263 | end_buf, | ||
4264 | &iwe, buf); | ||
4265 | memset(&iwe, 0, sizeof(iwe)); | ||
4266 | iwe.cmd = IWEVCUSTOM; | ||
4267 | sprintf(buf, " Last beacon: %dms ago", | ||
4268 | jiffies_to_msecs(jiffies - bss->last_update)); | ||
4269 | iwe.u.data.length = strlen(buf); | ||
4270 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4271 | end_buf, &iwe, buf); | ||
4272 | kfree(buf); | ||
4273 | } | ||
4274 | } | ||
4275 | |||
4276 | if (bss_mesh_cfg(bss)) { | ||
4277 | char *buf; | ||
4278 | u8 *cfg = bss_mesh_cfg(bss); | ||
4279 | buf = kmalloc(50, GFP_ATOMIC); | ||
4280 | if (buf) { | ||
4281 | memset(&iwe, 0, sizeof(iwe)); | ||
4282 | iwe.cmd = IWEVCUSTOM; | ||
4283 | sprintf(buf, "Mesh network (version %d)", cfg[0]); | ||
4284 | iwe.u.data.length = strlen(buf); | ||
4285 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4286 | end_buf, | ||
4287 | &iwe, buf); | ||
4288 | sprintf(buf, "Path Selection Protocol ID: " | ||
4289 | "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3], | ||
4290 | cfg[4]); | ||
4291 | iwe.u.data.length = strlen(buf); | ||
4292 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4293 | end_buf, | ||
4294 | &iwe, buf); | ||
4295 | sprintf(buf, "Path Selection Metric ID: " | ||
4296 | "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7], | ||
4297 | cfg[8]); | ||
4298 | iwe.u.data.length = strlen(buf); | ||
4299 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4300 | end_buf, | ||
4301 | &iwe, buf); | ||
4302 | sprintf(buf, "Congestion Control Mode ID: " | ||
4303 | "0x%02X%02X%02X%02X", cfg[9], cfg[10], | ||
4304 | cfg[11], cfg[12]); | ||
4305 | iwe.u.data.length = strlen(buf); | ||
4306 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4307 | end_buf, | ||
4308 | &iwe, buf); | ||
4309 | sprintf(buf, "Channel Precedence: " | ||
4310 | "0x%02X%02X%02X%02X", cfg[13], cfg[14], | ||
4311 | cfg[15], cfg[16]); | ||
4312 | iwe.u.data.length = strlen(buf); | ||
4313 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4314 | end_buf, | ||
4315 | &iwe, buf); | ||
4316 | kfree(buf); | ||
4317 | } | ||
4318 | } | ||
4319 | |||
4320 | return current_ev; | ||
4321 | } | ||
4322 | |||
4323 | |||
4324 | int ieee80211_sta_scan_results(struct net_device *dev, | ||
4325 | struct iw_request_info *info, | ||
4326 | char *buf, size_t len) | ||
4327 | { | ||
4328 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
4329 | char *current_ev = buf; | ||
4330 | char *end_buf = buf + len; | ||
4331 | struct ieee80211_sta_bss *bss; | ||
4332 | |||
4333 | spin_lock_bh(&local->sta_bss_lock); | ||
4334 | list_for_each_entry(bss, &local->sta_bss_list, list) { | ||
4335 | if (buf + len - current_ev <= IW_EV_ADDR_LEN) { | ||
4336 | spin_unlock_bh(&local->sta_bss_lock); | ||
4337 | return -E2BIG; | ||
4338 | } | ||
4339 | current_ev = ieee80211_sta_scan_result(dev, info, bss, | ||
4340 | current_ev, end_buf); | ||
4341 | } | ||
4342 | spin_unlock_bh(&local->sta_bss_lock); | ||
4343 | return current_ev - buf; | ||
4344 | } | ||
4345 | |||
4346 | |||
4347 | int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len) | ||
4348 | { | ||
4349 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
4350 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 2340 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
4351 | 2341 | ||
4352 | kfree(ifsta->extra_ie); | 2342 | kfree(ifsta->extra_ie); |
@@ -4366,13 +2356,12 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len) | |||
4366 | } | 2356 | } |
4367 | 2357 | ||
4368 | 2358 | ||
4369 | struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, | 2359 | struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, |
4370 | struct sk_buff *skb, u8 *bssid, | 2360 | struct sk_buff *skb, u8 *bssid, |
4371 | u8 *addr, u64 supp_rates) | 2361 | u8 *addr, u64 supp_rates) |
4372 | { | 2362 | { |
4373 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2363 | struct ieee80211_local *local = sdata->local; |
4374 | struct sta_info *sta; | 2364 | struct sta_info *sta; |
4375 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
4376 | DECLARE_MAC_BUF(mac); | 2365 | DECLARE_MAC_BUF(mac); |
4377 | int band = local->hw.conf.channel->band; | 2366 | int band = local->hw.conf.channel->band; |
4378 | 2367 | ||
@@ -4381,7 +2370,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, | |||
4381 | if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { | 2370 | if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { |
4382 | if (net_ratelimit()) { | 2371 | if (net_ratelimit()) { |
4383 | printk(KERN_DEBUG "%s: No room for a new IBSS STA " | 2372 | printk(KERN_DEBUG "%s: No room for a new IBSS STA " |
4384 | "entry %s\n", dev->name, print_mac(mac, addr)); | 2373 | "entry %s\n", sdata->dev->name, print_mac(mac, addr)); |
4385 | } | 2374 | } |
4386 | return NULL; | 2375 | return NULL; |
4387 | } | 2376 | } |
@@ -4391,7 +2380,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, | |||
4391 | 2380 | ||
4392 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 2381 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
4393 | printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", | 2382 | printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", |
4394 | wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); | 2383 | wiphy_name(local->hw.wiphy), print_mac(mac, addr), sdata->dev->name); |
4395 | #endif | 2384 | #endif |
4396 | 2385 | ||
4397 | sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); | 2386 | sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); |
@@ -4400,10 +2389,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, | |||
4400 | 2389 | ||
4401 | set_sta_flags(sta, WLAN_STA_AUTHORIZED); | 2390 | set_sta_flags(sta, WLAN_STA_AUTHORIZED); |
4402 | 2391 | ||
4403 | if (supp_rates) | 2392 | /* make sure mandatory rates are always added */ |
4404 | sta->supp_rates[band] = supp_rates; | 2393 | sta->supp_rates[band] = supp_rates | |
4405 | else | 2394 | ieee80211_sta_get_mandatory_rates(local, band); |
4406 | sta->supp_rates[band] = sdata->u.sta.supp_rates_bits[band]; | ||
4407 | 2395 | ||
4408 | rate_control_rate_init(sta, local); | 2396 | rate_control_rate_init(sta, local); |
4409 | 2397 | ||
@@ -4414,31 +2402,107 @@ struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, | |||
4414 | } | 2402 | } |
4415 | 2403 | ||
4416 | 2404 | ||
4417 | int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason) | 2405 | static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata, |
2406 | struct ieee80211_if_sta *ifsta) | ||
2407 | { | ||
2408 | struct ieee80211_local *local = sdata->local; | ||
2409 | struct ieee80211_sta_bss *bss, *selected = NULL; | ||
2410 | int top_rssi = 0, freq; | ||
2411 | |||
2412 | spin_lock_bh(&local->sta_bss_lock); | ||
2413 | freq = local->oper_channel->center_freq; | ||
2414 | list_for_each_entry(bss, &local->sta_bss_list, list) { | ||
2415 | if (!(bss->capability & WLAN_CAPABILITY_ESS)) | ||
2416 | continue; | ||
2417 | |||
2418 | if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL | | ||
2419 | IEEE80211_STA_AUTO_BSSID_SEL | | ||
2420 | IEEE80211_STA_AUTO_CHANNEL_SEL)) && | ||
2421 | (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^ | ||
2422 | !!sdata->default_key)) | ||
2423 | continue; | ||
2424 | |||
2425 | if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) && | ||
2426 | bss->freq != freq) | ||
2427 | continue; | ||
2428 | |||
2429 | if (!(ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) && | ||
2430 | memcmp(bss->bssid, ifsta->bssid, ETH_ALEN)) | ||
2431 | continue; | ||
2432 | |||
2433 | if (!(ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) && | ||
2434 | !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len)) | ||
2435 | continue; | ||
2436 | |||
2437 | if (!selected || top_rssi < bss->signal) { | ||
2438 | selected = bss; | ||
2439 | top_rssi = bss->signal; | ||
2440 | } | ||
2441 | } | ||
2442 | if (selected) | ||
2443 | atomic_inc(&selected->users); | ||
2444 | spin_unlock_bh(&local->sta_bss_lock); | ||
2445 | |||
2446 | if (selected) { | ||
2447 | ieee80211_set_freq(sdata, selected->freq); | ||
2448 | if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) | ||
2449 | ieee80211_sta_set_ssid(sdata, selected->ssid, | ||
2450 | selected->ssid_len); | ||
2451 | ieee80211_sta_set_bssid(sdata, selected->bssid); | ||
2452 | ieee80211_sta_def_wmm_params(sdata, selected); | ||
2453 | |||
2454 | /* Send out direct probe if no probe resp was received or | ||
2455 | * the one we have is outdated | ||
2456 | */ | ||
2457 | if (!selected->last_probe_resp || | ||
2458 | time_after(jiffies, selected->last_probe_resp | ||
2459 | + IEEE80211_SCAN_RESULT_EXPIRE)) | ||
2460 | ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE; | ||
2461 | else | ||
2462 | ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE; | ||
2463 | |||
2464 | ieee80211_rx_bss_put(local, selected); | ||
2465 | ieee80211_sta_reset_auth(sdata, ifsta); | ||
2466 | return 0; | ||
2467 | } else { | ||
2468 | if (ifsta->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) { | ||
2469 | ifsta->assoc_scan_tries++; | ||
2470 | if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) | ||
2471 | ieee80211_sta_start_scan(sdata, NULL, 0); | ||
2472 | else | ||
2473 | ieee80211_sta_start_scan(sdata, ifsta->ssid, | ||
2474 | ifsta->ssid_len); | ||
2475 | ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE; | ||
2476 | set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); | ||
2477 | } else | ||
2478 | ifsta->state = IEEE80211_STA_MLME_DISABLED; | ||
2479 | } | ||
2480 | return -1; | ||
2481 | } | ||
2482 | |||
2483 | |||
2484 | int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason) | ||
4418 | { | 2485 | { |
4419 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
4420 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 2486 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
4421 | 2487 | ||
4422 | printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", | 2488 | printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", |
4423 | dev->name, reason); | 2489 | sdata->dev->name, reason); |
4424 | 2490 | ||
4425 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && | 2491 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && |
4426 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) | 2492 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) |
4427 | return -EINVAL; | 2493 | return -EINVAL; |
4428 | 2494 | ||
4429 | ieee80211_send_deauth(dev, ifsta, reason); | 2495 | ieee80211_set_disassoc(sdata, ifsta, true, true, reason); |
4430 | ieee80211_set_disassoc(dev, ifsta, 1); | ||
4431 | return 0; | 2496 | return 0; |
4432 | } | 2497 | } |
4433 | 2498 | ||
4434 | 2499 | ||
4435 | int ieee80211_sta_disassociate(struct net_device *dev, u16 reason) | 2500 | int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason) |
4436 | { | 2501 | { |
4437 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
4438 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 2502 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
4439 | 2503 | ||
4440 | printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", | 2504 | printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", |
4441 | dev->name, reason); | 2505 | sdata->dev->name, reason); |
4442 | 2506 | ||
4443 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | 2507 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) |
4444 | return -EINVAL; | 2508 | return -EINVAL; |
@@ -4446,8 +2510,7 @@ int ieee80211_sta_disassociate(struct net_device *dev, u16 reason) | |||
4446 | if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED)) | 2510 | if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED)) |
4447 | return -1; | 2511 | return -1; |
4448 | 2512 | ||
4449 | ieee80211_send_disassoc(dev, ifsta, reason); | 2513 | ieee80211_set_disassoc(sdata, ifsta, false, true, reason); |
4450 | ieee80211_set_disassoc(dev, ifsta, 0); | ||
4451 | return 0; | 2514 | return 0; |
4452 | } | 2515 | } |
4453 | 2516 | ||
@@ -4464,10 +2527,109 @@ void ieee80211_notify_mac(struct ieee80211_hw *hw, | |||
4464 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | 2527 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) |
4465 | continue; | 2528 | continue; |
4466 | 2529 | ||
4467 | ieee80211_sta_req_auth(sdata->dev, &sdata->u.sta); | 2530 | ieee80211_sta_req_auth(sdata, &sdata->u.sta); |
4468 | } | 2531 | } |
4469 | rcu_read_unlock(); | 2532 | rcu_read_unlock(); |
4470 | break; | 2533 | break; |
4471 | } | 2534 | } |
4472 | } | 2535 | } |
4473 | EXPORT_SYMBOL(ieee80211_notify_mac); | 2536 | EXPORT_SYMBOL(ieee80211_notify_mac); |
2537 | |||
2538 | void ieee80211_sta_work(struct work_struct *work) | ||
2539 | { | ||
2540 | struct ieee80211_sub_if_data *sdata = | ||
2541 | container_of(work, struct ieee80211_sub_if_data, u.sta.work); | ||
2542 | struct ieee80211_local *local = sdata->local; | ||
2543 | struct ieee80211_if_sta *ifsta; | ||
2544 | struct sk_buff *skb; | ||
2545 | |||
2546 | if (!netif_running(sdata->dev)) | ||
2547 | return; | ||
2548 | |||
2549 | if (local->sta_sw_scanning || local->sta_hw_scanning) | ||
2550 | return; | ||
2551 | |||
2552 | if (WARN_ON(sdata->vif.type != IEEE80211_IF_TYPE_STA && | ||
2553 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | ||
2554 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) | ||
2555 | return; | ||
2556 | ifsta = &sdata->u.sta; | ||
2557 | |||
2558 | while ((skb = skb_dequeue(&ifsta->skb_queue))) | ||
2559 | ieee80211_sta_rx_queued_mgmt(sdata, skb); | ||
2560 | |||
2561 | #ifdef CONFIG_MAC80211_MESH | ||
2562 | if (ifsta->preq_queue_len && | ||
2563 | time_after(jiffies, | ||
2564 | ifsta->last_preq + msecs_to_jiffies(ifsta->mshcfg.dot11MeshHWMPpreqMinInterval))) | ||
2565 | mesh_path_start_discovery(sdata); | ||
2566 | #endif | ||
2567 | |||
2568 | if (ifsta->state != IEEE80211_STA_MLME_DIRECT_PROBE && | ||
2569 | ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE && | ||
2570 | ifsta->state != IEEE80211_STA_MLME_ASSOCIATE && | ||
2571 | test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) { | ||
2572 | ieee80211_sta_start_scan(sdata, ifsta->scan_ssid, ifsta->scan_ssid_len); | ||
2573 | return; | ||
2574 | } | ||
2575 | |||
2576 | if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) { | ||
2577 | if (ieee80211_sta_config_auth(sdata, ifsta)) | ||
2578 | return; | ||
2579 | clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); | ||
2580 | } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request)) | ||
2581 | return; | ||
2582 | |||
2583 | switch (ifsta->state) { | ||
2584 | case IEEE80211_STA_MLME_DISABLED: | ||
2585 | break; | ||
2586 | case IEEE80211_STA_MLME_DIRECT_PROBE: | ||
2587 | ieee80211_direct_probe(sdata, ifsta); | ||
2588 | break; | ||
2589 | case IEEE80211_STA_MLME_AUTHENTICATE: | ||
2590 | ieee80211_authenticate(sdata, ifsta); | ||
2591 | break; | ||
2592 | case IEEE80211_STA_MLME_ASSOCIATE: | ||
2593 | ieee80211_associate(sdata, ifsta); | ||
2594 | break; | ||
2595 | case IEEE80211_STA_MLME_ASSOCIATED: | ||
2596 | ieee80211_associated(sdata, ifsta); | ||
2597 | break; | ||
2598 | case IEEE80211_STA_MLME_IBSS_SEARCH: | ||
2599 | ieee80211_sta_find_ibss(sdata, ifsta); | ||
2600 | break; | ||
2601 | case IEEE80211_STA_MLME_IBSS_JOINED: | ||
2602 | ieee80211_sta_merge_ibss(sdata, ifsta); | ||
2603 | break; | ||
2604 | #ifdef CONFIG_MAC80211_MESH | ||
2605 | case IEEE80211_STA_MLME_MESH_UP: | ||
2606 | ieee80211_mesh_housekeeping(sdata, ifsta); | ||
2607 | break; | ||
2608 | #endif | ||
2609 | default: | ||
2610 | WARN_ON(1); | ||
2611 | break; | ||
2612 | } | ||
2613 | |||
2614 | if (ieee80211_privacy_mismatch(sdata, ifsta)) { | ||
2615 | printk(KERN_DEBUG "%s: privacy configuration mismatch and " | ||
2616 | "mixed-cell disabled - disassociate\n", sdata->dev->name); | ||
2617 | |||
2618 | ieee80211_set_disassoc(sdata, ifsta, false, true, | ||
2619 | WLAN_REASON_UNSPECIFIED); | ||
2620 | } | ||
2621 | } | ||
2622 | |||
2623 | void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) | ||
2624 | { | ||
2625 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; | ||
2626 | struct ieee80211_if_sta *ifsta; | ||
2627 | |||
2628 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | ||
2629 | ifsta = &sdata->u.sta; | ||
2630 | if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) || | ||
2631 | (!(ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED) && | ||
2632 | !ieee80211_sta_active_ibss(sdata))) | ||
2633 | ieee80211_sta_find_ibss(sdata, ifsta); | ||
2634 | } | ||
2635 | } | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6db854505193..d0803797902b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -143,6 +143,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
143 | /* IEEE80211_RADIOTAP_FLAGS */ | 143 | /* IEEE80211_RADIOTAP_FLAGS */ |
144 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) | 144 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) |
145 | *pos |= IEEE80211_RADIOTAP_F_FCS; | 145 | *pos |= IEEE80211_RADIOTAP_F_FCS; |
146 | if (status->flag & RX_FLAG_SHORTPRE) | ||
147 | *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; | ||
146 | pos++; | 148 | pos++; |
147 | 149 | ||
148 | /* IEEE80211_RADIOTAP_RATE */ | 150 | /* IEEE80211_RADIOTAP_RATE */ |
@@ -155,8 +157,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
155 | if (status->band == IEEE80211_BAND_5GHZ) | 157 | if (status->band == IEEE80211_BAND_5GHZ) |
156 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | | 158 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | |
157 | IEEE80211_CHAN_5GHZ); | 159 | IEEE80211_CHAN_5GHZ); |
160 | else if (rate->flags & IEEE80211_RATE_ERP_G) | ||
161 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | | ||
162 | IEEE80211_CHAN_2GHZ); | ||
158 | else | 163 | else |
159 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_DYN | | 164 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK | |
160 | IEEE80211_CHAN_2GHZ); | 165 | IEEE80211_CHAN_2GHZ); |
161 | pos += 2; | 166 | pos += 2; |
162 | 167 | ||
@@ -399,11 +404,11 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) | |||
399 | struct sk_buff *skb = rx->skb; | 404 | struct sk_buff *skb = rx->skb; |
400 | 405 | ||
401 | if (unlikely(local->sta_hw_scanning)) | 406 | if (unlikely(local->sta_hw_scanning)) |
402 | return ieee80211_sta_rx_scan(rx->dev, skb, rx->status); | 407 | return ieee80211_sta_rx_scan(rx->sdata, skb, rx->status); |
403 | 408 | ||
404 | if (unlikely(local->sta_sw_scanning)) { | 409 | if (unlikely(local->sta_sw_scanning)) { |
405 | /* drop all the other packets during a software scan anyway */ | 410 | /* drop all the other packets during a software scan anyway */ |
406 | if (ieee80211_sta_rx_scan(rx->dev, skb, rx->status) | 411 | if (ieee80211_sta_rx_scan(rx->sdata, skb, rx->status) |
407 | != RX_QUEUED) | 412 | != RX_QUEUED) |
408 | dev_kfree_skb(skb); | 413 | dev_kfree_skb(skb); |
409 | return RX_QUEUED; | 414 | return RX_QUEUED; |
@@ -461,7 +466,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
461 | 466 | ||
462 | if (ieee80211_is_data(hdr->frame_control) && | 467 | if (ieee80211_is_data(hdr->frame_control) && |
463 | is_multicast_ether_addr(hdr->addr1) && | 468 | is_multicast_ether_addr(hdr->addr1) && |
464 | mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev)) | 469 | mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata)) |
465 | return RX_DROP_MONITOR; | 470 | return RX_DROP_MONITOR; |
466 | #undef msh_h_get | 471 | #undef msh_h_get |
467 | 472 | ||
@@ -816,7 +821,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, | |||
816 | 821 | ||
817 | static inline struct ieee80211_fragment_entry * | 822 | static inline struct ieee80211_fragment_entry * |
818 | ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, | 823 | ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, |
819 | u16 fc, unsigned int frag, unsigned int seq, | 824 | unsigned int frag, unsigned int seq, |
820 | int rx_queue, struct ieee80211_hdr *hdr) | 825 | int rx_queue, struct ieee80211_hdr *hdr) |
821 | { | 826 | { |
822 | struct ieee80211_fragment_entry *entry; | 827 | struct ieee80211_fragment_entry *entry; |
@@ -825,7 +830,6 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, | |||
825 | idx = sdata->fragment_next; | 830 | idx = sdata->fragment_next; |
826 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { | 831 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { |
827 | struct ieee80211_hdr *f_hdr; | 832 | struct ieee80211_hdr *f_hdr; |
828 | u16 f_fc; | ||
829 | 833 | ||
830 | idx--; | 834 | idx--; |
831 | if (idx < 0) | 835 | if (idx < 0) |
@@ -837,10 +841,13 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, | |||
837 | entry->last_frag + 1 != frag) | 841 | entry->last_frag + 1 != frag) |
838 | continue; | 842 | continue; |
839 | 843 | ||
840 | f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data; | 844 | f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; |
841 | f_fc = le16_to_cpu(f_hdr->frame_control); | ||
842 | 845 | ||
843 | if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) || | 846 | /* |
847 | * Check ftype and addresses are equal, else check next fragment | ||
848 | */ | ||
849 | if (((hdr->frame_control ^ f_hdr->frame_control) & | ||
850 | cpu_to_le16(IEEE80211_FCTL_FTYPE)) || | ||
844 | compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || | 851 | compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || |
845 | compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) | 852 | compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) |
846 | continue; | 853 | continue; |
@@ -860,16 +867,18 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
860 | { | 867 | { |
861 | struct ieee80211_hdr *hdr; | 868 | struct ieee80211_hdr *hdr; |
862 | u16 sc; | 869 | u16 sc; |
870 | __le16 fc; | ||
863 | unsigned int frag, seq; | 871 | unsigned int frag, seq; |
864 | struct ieee80211_fragment_entry *entry; | 872 | struct ieee80211_fragment_entry *entry; |
865 | struct sk_buff *skb; | 873 | struct sk_buff *skb; |
866 | DECLARE_MAC_BUF(mac); | 874 | DECLARE_MAC_BUF(mac); |
867 | 875 | ||
868 | hdr = (struct ieee80211_hdr *) rx->skb->data; | 876 | hdr = (struct ieee80211_hdr *)rx->skb->data; |
877 | fc = hdr->frame_control; | ||
869 | sc = le16_to_cpu(hdr->seq_ctrl); | 878 | sc = le16_to_cpu(hdr->seq_ctrl); |
870 | frag = sc & IEEE80211_SCTL_FRAG; | 879 | frag = sc & IEEE80211_SCTL_FRAG; |
871 | 880 | ||
872 | if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) || | 881 | if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || |
873 | (rx->skb)->len < 24 || | 882 | (rx->skb)->len < 24 || |
874 | is_multicast_ether_addr(hdr->addr1))) { | 883 | is_multicast_ether_addr(hdr->addr1))) { |
875 | /* not fragmented */ | 884 | /* not fragmented */ |
@@ -884,7 +893,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
884 | entry = ieee80211_reassemble_add(rx->sdata, frag, seq, | 893 | entry = ieee80211_reassemble_add(rx->sdata, frag, seq, |
885 | rx->queue, &(rx->skb)); | 894 | rx->queue, &(rx->skb)); |
886 | if (rx->key && rx->key->conf.alg == ALG_CCMP && | 895 | if (rx->key && rx->key->conf.alg == ALG_CCMP && |
887 | (rx->fc & IEEE80211_FCTL_PROTECTED)) { | 896 | ieee80211_has_protected(fc)) { |
888 | /* Store CCMP PN so that we can verify that the next | 897 | /* Store CCMP PN so that we can verify that the next |
889 | * fragment has a sequential PN value. */ | 898 | * fragment has a sequential PN value. */ |
890 | entry->ccmp = 1; | 899 | entry->ccmp = 1; |
@@ -898,8 +907,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
898 | /* This is a fragment for a frame that should already be pending in | 907 | /* This is a fragment for a frame that should already be pending in |
899 | * fragment cache. Add this fragment to the end of the pending entry. | 908 | * fragment cache. Add this fragment to the end of the pending entry. |
900 | */ | 909 | */ |
901 | entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, | 910 | entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr); |
902 | rx->queue, hdr); | ||
903 | if (!entry) { | 911 | if (!entry) { |
904 | I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); | 912 | I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); |
905 | return RX_DROP_MONITOR; | 913 | return RX_DROP_MONITOR; |
@@ -924,11 +932,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
924 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); | 932 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); |
925 | } | 933 | } |
926 | 934 | ||
927 | skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc)); | 935 | skb_pull(rx->skb, ieee80211_hdrlen(fc)); |
928 | __skb_queue_tail(&entry->skb_list, rx->skb); | 936 | __skb_queue_tail(&entry->skb_list, rx->skb); |
929 | entry->last_frag = frag; | 937 | entry->last_frag = frag; |
930 | entry->extra_len += rx->skb->len; | 938 | entry->extra_len += rx->skb->len; |
931 | if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { | 939 | if (ieee80211_has_morefrags(fc)) { |
932 | rx->skb = NULL; | 940 | rx->skb = NULL; |
933 | return RX_QUEUED; | 941 | return RX_QUEUED; |
934 | } | 942 | } |
@@ -968,10 +976,9 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
968 | struct sk_buff *skb; | 976 | struct sk_buff *skb; |
969 | int no_pending_pkts; | 977 | int no_pending_pkts; |
970 | DECLARE_MAC_BUF(mac); | 978 | DECLARE_MAC_BUF(mac); |
979 | __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; | ||
971 | 980 | ||
972 | if (likely(!rx->sta || | 981 | if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || |
973 | (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL || | ||
974 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL || | ||
975 | !(rx->flags & IEEE80211_RX_RA_MATCH))) | 982 | !(rx->flags & IEEE80211_RX_RA_MATCH))) |
976 | return RX_CONTINUE; | 983 | return RX_CONTINUE; |
977 | 984 | ||
@@ -1050,7 +1057,6 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) | |||
1050 | ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); | 1057 | ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); |
1051 | hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); | 1058 | hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); |
1052 | /* change frame type to non QOS */ | 1059 | /* change frame type to non QOS */ |
1053 | rx->fc &= ~IEEE80211_STYPE_QOS_DATA; | ||
1054 | hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); | 1060 | hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); |
1055 | 1061 | ||
1056 | return RX_CONTINUE; | 1062 | return RX_CONTINUE; |
@@ -1067,7 +1073,7 @@ ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) | |||
1067 | } | 1073 | } |
1068 | 1074 | ||
1069 | static int | 1075 | static int |
1070 | ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx) | 1076 | ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) |
1071 | { | 1077 | { |
1072 | /* | 1078 | /* |
1073 | * Pass through unencrypted frames if the hardware has | 1079 | * Pass through unencrypted frames if the hardware has |
@@ -1077,9 +1083,8 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx) | |||
1077 | return 0; | 1083 | return 0; |
1078 | 1084 | ||
1079 | /* Drop unencrypted frames if key is set. */ | 1085 | /* Drop unencrypted frames if key is set. */ |
1080 | if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && | 1086 | if (unlikely(!ieee80211_has_protected(fc) && |
1081 | (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && | 1087 | !ieee80211_is_nullfunc(fc) && |
1082 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && | ||
1083 | (rx->key || rx->sdata->drop_unencrypted))) | 1088 | (rx->key || rx->sdata->drop_unencrypted))) |
1084 | return -EACCES; | 1089 | return -EACCES; |
1085 | 1090 | ||
@@ -1091,7 +1096,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1091 | { | 1096 | { |
1092 | struct net_device *dev = rx->dev; | 1097 | struct net_device *dev = rx->dev; |
1093 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 1098 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; |
1094 | u16 fc, hdrlen, ethertype; | 1099 | u16 hdrlen, ethertype; |
1095 | u8 *payload; | 1100 | u8 *payload; |
1096 | u8 dst[ETH_ALEN]; | 1101 | u8 dst[ETH_ALEN]; |
1097 | u8 src[ETH_ALEN] __aligned(2); | 1102 | u8 src[ETH_ALEN] __aligned(2); |
@@ -1102,12 +1107,10 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1102 | DECLARE_MAC_BUF(mac3); | 1107 | DECLARE_MAC_BUF(mac3); |
1103 | DECLARE_MAC_BUF(mac4); | 1108 | DECLARE_MAC_BUF(mac4); |
1104 | 1109 | ||
1105 | fc = rx->fc; | 1110 | if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) |
1106 | |||
1107 | if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) | ||
1108 | return -1; | 1111 | return -1; |
1109 | 1112 | ||
1110 | hdrlen = ieee80211_get_hdrlen(fc); | 1113 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
1111 | 1114 | ||
1112 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 1115 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
1113 | hdrlen += ieee80211_get_mesh_hdrlen( | 1116 | hdrlen += ieee80211_get_mesh_hdrlen( |
@@ -1122,41 +1125,28 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1122 | * 1 0 BSSID SA DA n/a | 1125 | * 1 0 BSSID SA DA n/a |
1123 | * 1 1 RA TA DA SA | 1126 | * 1 1 RA TA DA SA |
1124 | */ | 1127 | */ |
1128 | memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN); | ||
1129 | memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN); | ||
1125 | 1130 | ||
1126 | switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { | 1131 | switch (hdr->frame_control & |
1127 | case IEEE80211_FCTL_TODS: | 1132 | cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { |
1128 | /* BSSID SA DA */ | 1133 | case __constant_cpu_to_le16(IEEE80211_FCTL_TODS): |
1129 | memcpy(dst, hdr->addr3, ETH_ALEN); | ||
1130 | memcpy(src, hdr->addr2, ETH_ALEN); | ||
1131 | |||
1132 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && | 1134 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && |
1133 | sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) | 1135 | sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) |
1134 | return -1; | 1136 | return -1; |
1135 | break; | 1137 | break; |
1136 | case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): | 1138 | case __constant_cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): |
1137 | /* RA TA DA SA */ | 1139 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS && |
1138 | memcpy(dst, hdr->addr3, ETH_ALEN); | ||
1139 | memcpy(src, hdr->addr4, ETH_ALEN); | ||
1140 | |||
1141 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS && | ||
1142 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) | 1140 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) |
1143 | return -1; | 1141 | return -1; |
1144 | break; | 1142 | break; |
1145 | case IEEE80211_FCTL_FROMDS: | 1143 | case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS): |
1146 | /* DA BSSID SA */ | ||
1147 | memcpy(dst, hdr->addr1, ETH_ALEN); | ||
1148 | memcpy(src, hdr->addr3, ETH_ALEN); | ||
1149 | |||
1150 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA || | 1144 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA || |
1151 | (is_multicast_ether_addr(dst) && | 1145 | (is_multicast_ether_addr(dst) && |
1152 | !compare_ether_addr(src, dev->dev_addr))) | 1146 | !compare_ether_addr(src, dev->dev_addr))) |
1153 | return -1; | 1147 | return -1; |
1154 | break; | 1148 | break; |
1155 | case 0: | 1149 | case __constant_cpu_to_le16(0): |
1156 | /* DA SA BSSID */ | ||
1157 | memcpy(dst, hdr->addr1, ETH_ALEN); | ||
1158 | memcpy(src, hdr->addr2, ETH_ALEN); | ||
1159 | |||
1160 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) | 1150 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) |
1161 | return -1; | 1151 | return -1; |
1162 | break; | 1152 | break; |
@@ -1193,7 +1183,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1193 | /* | 1183 | /* |
1194 | * requires that rx->skb is a frame with ethernet header | 1184 | * requires that rx->skb is a frame with ethernet header |
1195 | */ | 1185 | */ |
1196 | static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx) | 1186 | static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) |
1197 | { | 1187 | { |
1198 | static const u8 pae_group_addr[ETH_ALEN] __aligned(2) | 1188 | static const u8 pae_group_addr[ETH_ALEN] __aligned(2) |
1199 | = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; | 1189 | = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; |
@@ -1209,7 +1199,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx) | |||
1209 | return true; | 1199 | return true; |
1210 | 1200 | ||
1211 | if (ieee80211_802_1x_port_control(rx) || | 1201 | if (ieee80211_802_1x_port_control(rx) || |
1212 | ieee80211_drop_unencrypted(rx)) | 1202 | ieee80211_drop_unencrypted(rx, fc)) |
1213 | return false; | 1203 | return false; |
1214 | 1204 | ||
1215 | return true; | 1205 | return true; |
@@ -1279,20 +1269,21 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1279 | { | 1269 | { |
1280 | struct net_device *dev = rx->dev; | 1270 | struct net_device *dev = rx->dev; |
1281 | struct ieee80211_local *local = rx->local; | 1271 | struct ieee80211_local *local = rx->local; |
1282 | u16 fc, ethertype; | 1272 | u16 ethertype; |
1283 | u8 *payload; | 1273 | u8 *payload; |
1284 | struct sk_buff *skb = rx->skb, *frame = NULL; | 1274 | struct sk_buff *skb = rx->skb, *frame = NULL; |
1275 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1276 | __le16 fc = hdr->frame_control; | ||
1285 | const struct ethhdr *eth; | 1277 | const struct ethhdr *eth; |
1286 | int remaining, err; | 1278 | int remaining, err; |
1287 | u8 dst[ETH_ALEN]; | 1279 | u8 dst[ETH_ALEN]; |
1288 | u8 src[ETH_ALEN]; | 1280 | u8 src[ETH_ALEN]; |
1289 | DECLARE_MAC_BUF(mac); | 1281 | DECLARE_MAC_BUF(mac); |
1290 | 1282 | ||
1291 | fc = rx->fc; | 1283 | if (unlikely(!ieee80211_is_data(fc))) |
1292 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) | ||
1293 | return RX_CONTINUE; | 1284 | return RX_CONTINUE; |
1294 | 1285 | ||
1295 | if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) | 1286 | if (unlikely(!ieee80211_is_data_present(fc))) |
1296 | return RX_DROP_MONITOR; | 1287 | return RX_DROP_MONITOR; |
1297 | 1288 | ||
1298 | if (!(rx->flags & IEEE80211_RX_AMSDU)) | 1289 | if (!(rx->flags & IEEE80211_RX_AMSDU)) |
@@ -1374,7 +1365,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1374 | memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); | 1365 | memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); |
1375 | } | 1366 | } |
1376 | 1367 | ||
1377 | if (!ieee80211_frame_allowed(rx)) { | 1368 | if (!ieee80211_frame_allowed(rx, fc)) { |
1378 | if (skb == frame) /* last frame */ | 1369 | if (skb == frame) /* last frame */ |
1379 | return RX_DROP_UNUSABLE; | 1370 | return RX_DROP_UNUSABLE; |
1380 | dev_kfree_skb(frame); | 1371 | dev_kfree_skb(frame); |
@@ -1448,21 +1439,21 @@ static ieee80211_rx_result debug_noinline | |||
1448 | ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | 1439 | ieee80211_rx_h_data(struct ieee80211_rx_data *rx) |
1449 | { | 1440 | { |
1450 | struct net_device *dev = rx->dev; | 1441 | struct net_device *dev = rx->dev; |
1451 | u16 fc; | 1442 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
1443 | __le16 fc = hdr->frame_control; | ||
1452 | int err; | 1444 | int err; |
1453 | 1445 | ||
1454 | fc = rx->fc; | 1446 | if (unlikely(!ieee80211_is_data(hdr->frame_control))) |
1455 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) | ||
1456 | return RX_CONTINUE; | 1447 | return RX_CONTINUE; |
1457 | 1448 | ||
1458 | if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) | 1449 | if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) |
1459 | return RX_DROP_MONITOR; | 1450 | return RX_DROP_MONITOR; |
1460 | 1451 | ||
1461 | err = ieee80211_data_to_8023(rx); | 1452 | err = ieee80211_data_to_8023(rx); |
1462 | if (unlikely(err)) | 1453 | if (unlikely(err)) |
1463 | return RX_DROP_UNUSABLE; | 1454 | return RX_DROP_UNUSABLE; |
1464 | 1455 | ||
1465 | if (!ieee80211_frame_allowed(rx)) | 1456 | if (!ieee80211_frame_allowed(rx, fc)) |
1466 | return RX_DROP_MONITOR; | 1457 | return RX_DROP_MONITOR; |
1467 | 1458 | ||
1468 | rx->skb->dev = dev; | 1459 | rx->skb->dev = dev; |
@@ -1520,22 +1511,95 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | |||
1520 | } | 1511 | } |
1521 | 1512 | ||
1522 | static ieee80211_rx_result debug_noinline | 1513 | static ieee80211_rx_result debug_noinline |
1514 | ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | ||
1515 | { | ||
1516 | struct ieee80211_local *local = rx->local; | ||
1517 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | ||
1518 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; | ||
1519 | int len = rx->skb->len; | ||
1520 | |||
1521 | if (!ieee80211_is_action(mgmt->frame_control)) | ||
1522 | return RX_CONTINUE; | ||
1523 | |||
1524 | if (!rx->sta) | ||
1525 | return RX_DROP_MONITOR; | ||
1526 | |||
1527 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | ||
1528 | return RX_DROP_MONITOR; | ||
1529 | |||
1530 | /* all categories we currently handle have action_code */ | ||
1531 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | ||
1532 | return RX_DROP_MONITOR; | ||
1533 | |||
1534 | /* | ||
1535 | * FIXME: revisit this, I'm sure we should handle most | ||
1536 | * of these frames in other modes as well! | ||
1537 | */ | ||
1538 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && | ||
1539 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) | ||
1540 | return RX_DROP_MONITOR; | ||
1541 | |||
1542 | switch (mgmt->u.action.category) { | ||
1543 | case WLAN_CATEGORY_BACK: | ||
1544 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
1545 | case WLAN_ACTION_ADDBA_REQ: | ||
1546 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1547 | sizeof(mgmt->u.action.u.addba_req))) | ||
1548 | return RX_DROP_MONITOR; | ||
1549 | ieee80211_process_addba_request(local, rx->sta, mgmt, len); | ||
1550 | break; | ||
1551 | case WLAN_ACTION_ADDBA_RESP: | ||
1552 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1553 | sizeof(mgmt->u.action.u.addba_resp))) | ||
1554 | return RX_DROP_MONITOR; | ||
1555 | ieee80211_process_addba_resp(local, rx->sta, mgmt, len); | ||
1556 | break; | ||
1557 | case WLAN_ACTION_DELBA: | ||
1558 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1559 | sizeof(mgmt->u.action.u.delba))) | ||
1560 | return RX_DROP_MONITOR; | ||
1561 | ieee80211_process_delba(sdata, rx->sta, mgmt, len); | ||
1562 | break; | ||
1563 | } | ||
1564 | break; | ||
1565 | case WLAN_CATEGORY_SPECTRUM_MGMT: | ||
1566 | if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) | ||
1567 | return RX_DROP_MONITOR; | ||
1568 | switch (mgmt->u.action.u.measurement.action_code) { | ||
1569 | case WLAN_ACTION_SPCT_MSR_REQ: | ||
1570 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1571 | sizeof(mgmt->u.action.u.measurement))) | ||
1572 | return RX_DROP_MONITOR; | ||
1573 | ieee80211_process_measurement_req(sdata, mgmt, len); | ||
1574 | break; | ||
1575 | } | ||
1576 | break; | ||
1577 | default: | ||
1578 | return RX_CONTINUE; | ||
1579 | } | ||
1580 | |||
1581 | rx->sta->rx_packets++; | ||
1582 | dev_kfree_skb(rx->skb); | ||
1583 | return RX_QUEUED; | ||
1584 | } | ||
1585 | |||
1586 | static ieee80211_rx_result debug_noinline | ||
1523 | ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) | 1587 | ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) |
1524 | { | 1588 | { |
1525 | struct ieee80211_sub_if_data *sdata; | 1589 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); |
1526 | 1590 | ||
1527 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 1591 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
1528 | return RX_DROP_MONITOR; | 1592 | return RX_DROP_MONITOR; |
1529 | 1593 | ||
1530 | sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | 1594 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && |
1531 | if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || | 1595 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
1532 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS || | 1596 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) |
1533 | sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) && | ||
1534 | !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) | ||
1535 | ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->status); | ||
1536 | else | ||
1537 | return RX_DROP_MONITOR; | 1597 | return RX_DROP_MONITOR; |
1538 | 1598 | ||
1599 | if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) | ||
1600 | return RX_DROP_MONITOR; | ||
1601 | |||
1602 | ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status); | ||
1539 | return RX_QUEUED; | 1603 | return RX_QUEUED; |
1540 | } | 1604 | } |
1541 | 1605 | ||
@@ -1579,7 +1643,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev, | |||
1579 | !ieee80211_is_auth(hdr->frame_control)) | 1643 | !ieee80211_is_auth(hdr->frame_control)) |
1580 | goto ignore; | 1644 | goto ignore; |
1581 | 1645 | ||
1582 | mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); | 1646 | mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr); |
1583 | ignore: | 1647 | ignore: |
1584 | dev_kfree_skb(rx->skb); | 1648 | dev_kfree_skb(rx->skb); |
1585 | rx->skb = NULL; | 1649 | rx->skb = NULL; |
@@ -1698,6 +1762,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | |||
1698 | CALL_RXH(ieee80211_rx_h_mesh_fwding); | 1762 | CALL_RXH(ieee80211_rx_h_mesh_fwding); |
1699 | CALL_RXH(ieee80211_rx_h_data) | 1763 | CALL_RXH(ieee80211_rx_h_data) |
1700 | CALL_RXH(ieee80211_rx_h_ctrl) | 1764 | CALL_RXH(ieee80211_rx_h_ctrl) |
1765 | CALL_RXH(ieee80211_rx_h_action) | ||
1701 | CALL_RXH(ieee80211_rx_h_mgmt) | 1766 | CALL_RXH(ieee80211_rx_h_mgmt) |
1702 | 1767 | ||
1703 | #undef CALL_RXH | 1768 | #undef CALL_RXH |
@@ -1752,10 +1817,6 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1752 | if (!bssid) | 1817 | if (!bssid) |
1753 | return 0; | 1818 | return 0; |
1754 | if (ieee80211_is_beacon(hdr->frame_control)) { | 1819 | if (ieee80211_is_beacon(hdr->frame_control)) { |
1755 | if (!rx->sta) | ||
1756 | rx->sta = ieee80211_ibss_add_sta(sdata->dev, | ||
1757 | rx->skb, bssid, hdr->addr2, | ||
1758 | BIT(rx->status->rate_idx)); | ||
1759 | return 1; | 1820 | return 1; |
1760 | } | 1821 | } |
1761 | else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { | 1822 | else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { |
@@ -1769,7 +1830,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1769 | return 0; | 1830 | return 0; |
1770 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 1831 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1771 | } else if (!rx->sta) | 1832 | } else if (!rx->sta) |
1772 | rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, | 1833 | rx->sta = ieee80211_ibss_add_sta(sdata, rx->skb, |
1773 | bssid, hdr->addr2, | 1834 | bssid, hdr->addr2, |
1774 | BIT(rx->status->rate_idx)); | 1835 | BIT(rx->status->rate_idx)); |
1775 | break; | 1836 | break; |
@@ -1827,23 +1888,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
1827 | struct ieee80211_sub_if_data *sdata; | 1888 | struct ieee80211_sub_if_data *sdata; |
1828 | struct ieee80211_hdr *hdr; | 1889 | struct ieee80211_hdr *hdr; |
1829 | struct ieee80211_rx_data rx; | 1890 | struct ieee80211_rx_data rx; |
1830 | u16 type; | ||
1831 | int prepares; | 1891 | int prepares; |
1832 | struct ieee80211_sub_if_data *prev = NULL; | 1892 | struct ieee80211_sub_if_data *prev = NULL; |
1833 | struct sk_buff *skb_new; | 1893 | struct sk_buff *skb_new; |
1834 | u8 *bssid; | 1894 | u8 *bssid; |
1835 | 1895 | ||
1836 | hdr = (struct ieee80211_hdr *) skb->data; | 1896 | hdr = (struct ieee80211_hdr *)skb->data; |
1837 | memset(&rx, 0, sizeof(rx)); | 1897 | memset(&rx, 0, sizeof(rx)); |
1838 | rx.skb = skb; | 1898 | rx.skb = skb; |
1839 | rx.local = local; | 1899 | rx.local = local; |
1840 | 1900 | ||
1841 | rx.status = status; | 1901 | rx.status = status; |
1842 | rx.rate = rate; | 1902 | rx.rate = rate; |
1843 | rx.fc = le16_to_cpu(hdr->frame_control); | ||
1844 | type = rx.fc & IEEE80211_FCTL_FTYPE; | ||
1845 | 1903 | ||
1846 | if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) | 1904 | if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control)) |
1847 | local->dot11ReceivedFragmentCount++; | 1905 | local->dot11ReceivedFragmentCount++; |
1848 | 1906 | ||
1849 | rx.sta = sta_info_get(local, hdr->addr2); | 1907 | rx.sta = sta_info_get(local, hdr->addr2); |
@@ -1904,14 +1962,12 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
1904 | prev->dev->name); | 1962 | prev->dev->name); |
1905 | continue; | 1963 | continue; |
1906 | } | 1964 | } |
1907 | rx.fc = le16_to_cpu(hdr->frame_control); | ||
1908 | ieee80211_invoke_rx_handlers(prev, &rx, skb_new); | 1965 | ieee80211_invoke_rx_handlers(prev, &rx, skb_new); |
1909 | prev = sdata; | 1966 | prev = sdata; |
1910 | } | 1967 | } |
1911 | if (prev) { | 1968 | if (prev) |
1912 | rx.fc = le16_to_cpu(hdr->frame_control); | ||
1913 | ieee80211_invoke_rx_handlers(prev, &rx, skb); | 1969 | ieee80211_invoke_rx_handlers(prev, &rx, skb); |
1914 | } else | 1970 | else |
1915 | dev_kfree_skb(skb); | 1971 | dev_kfree_skb(skb); |
1916 | } | 1972 | } |
1917 | 1973 | ||
@@ -2080,7 +2136,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | |||
2080 | /* if this mpdu is fragmented - terminate rx aggregation session */ | 2136 | /* if this mpdu is fragmented - terminate rx aggregation session */ |
2081 | sc = le16_to_cpu(hdr->seq_ctrl); | 2137 | sc = le16_to_cpu(hdr->seq_ctrl); |
2082 | if (sc & IEEE80211_SCTL_FRAG) { | 2138 | if (sc & IEEE80211_SCTL_FRAG) { |
2083 | ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, | 2139 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->addr, |
2084 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); | 2140 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); |
2085 | ret = 1; | 2141 | ret = 1; |
2086 | goto end_reorder; | 2142 | goto end_reorder; |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c new file mode 100644 index 000000000000..010781b806f3 --- /dev/null +++ b/net/mac80211/scan.c | |||
@@ -0,0 +1,933 @@ | |||
1 | /* | ||
2 | * Scanning implementation | ||
3 | * | ||
4 | * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> | ||
5 | * Copyright 2004, Instant802 Networks, Inc. | ||
6 | * Copyright 2005, Devicescape Software, Inc. | ||
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | ||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | /* TODO: | ||
16 | * order BSS list by RSSI(?) ("quality of AP") | ||
17 | * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE, | ||
18 | * SSID) | ||
19 | */ | ||
20 | |||
21 | #include <linux/wireless.h> | ||
22 | #include <linux/if_arp.h> | ||
23 | #include <net/mac80211.h> | ||
24 | #include <net/iw_handler.h> | ||
25 | |||
26 | #include "ieee80211_i.h" | ||
27 | #include "mesh.h" | ||
28 | |||
29 | #define IEEE80211_PROBE_DELAY (HZ / 33) | ||
30 | #define IEEE80211_CHANNEL_TIME (HZ / 33) | ||
31 | #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5) | ||
32 | |||
33 | void ieee80211_rx_bss_list_init(struct ieee80211_local *local) | ||
34 | { | ||
35 | spin_lock_init(&local->sta_bss_lock); | ||
36 | INIT_LIST_HEAD(&local->sta_bss_list); | ||
37 | } | ||
38 | |||
39 | void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local) | ||
40 | { | ||
41 | struct ieee80211_sta_bss *bss, *tmp; | ||
42 | |||
43 | list_for_each_entry_safe(bss, tmp, &local->sta_bss_list, list) | ||
44 | ieee80211_rx_bss_put(local, bss); | ||
45 | } | ||
46 | |||
47 | struct ieee80211_sta_bss * | ||
48 | ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, | ||
49 | u8 *ssid, u8 ssid_len) | ||
50 | { | ||
51 | struct ieee80211_sta_bss *bss; | ||
52 | |||
53 | spin_lock_bh(&local->sta_bss_lock); | ||
54 | bss = local->sta_bss_hash[STA_HASH(bssid)]; | ||
55 | while (bss) { | ||
56 | if (!bss_mesh_cfg(bss) && | ||
57 | !memcmp(bss->bssid, bssid, ETH_ALEN) && | ||
58 | bss->freq == freq && | ||
59 | bss->ssid_len == ssid_len && | ||
60 | (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) { | ||
61 | atomic_inc(&bss->users); | ||
62 | break; | ||
63 | } | ||
64 | bss = bss->hnext; | ||
65 | } | ||
66 | spin_unlock_bh(&local->sta_bss_lock); | ||
67 | return bss; | ||
68 | } | ||
69 | |||
70 | /* Caller must hold local->sta_bss_lock */ | ||
71 | static void __ieee80211_rx_bss_hash_add(struct ieee80211_local *local, | ||
72 | struct ieee80211_sta_bss *bss) | ||
73 | { | ||
74 | u8 hash_idx; | ||
75 | |||
76 | if (bss_mesh_cfg(bss)) | ||
77 | hash_idx = mesh_id_hash(bss_mesh_id(bss), | ||
78 | bss_mesh_id_len(bss)); | ||
79 | else | ||
80 | hash_idx = STA_HASH(bss->bssid); | ||
81 | |||
82 | bss->hnext = local->sta_bss_hash[hash_idx]; | ||
83 | local->sta_bss_hash[hash_idx] = bss; | ||
84 | } | ||
85 | |||
86 | /* Caller must hold local->sta_bss_lock */ | ||
87 | static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local, | ||
88 | struct ieee80211_sta_bss *bss) | ||
89 | { | ||
90 | struct ieee80211_sta_bss *b, *prev = NULL; | ||
91 | b = local->sta_bss_hash[STA_HASH(bss->bssid)]; | ||
92 | while (b) { | ||
93 | if (b == bss) { | ||
94 | if (!prev) | ||
95 | local->sta_bss_hash[STA_HASH(bss->bssid)] = | ||
96 | bss->hnext; | ||
97 | else | ||
98 | prev->hnext = bss->hnext; | ||
99 | break; | ||
100 | } | ||
101 | prev = b; | ||
102 | b = b->hnext; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | struct ieee80211_sta_bss * | ||
107 | ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq, | ||
108 | u8 *ssid, u8 ssid_len) | ||
109 | { | ||
110 | struct ieee80211_sta_bss *bss; | ||
111 | |||
112 | bss = kzalloc(sizeof(*bss), GFP_ATOMIC); | ||
113 | if (!bss) | ||
114 | return NULL; | ||
115 | atomic_set(&bss->users, 2); | ||
116 | memcpy(bss->bssid, bssid, ETH_ALEN); | ||
117 | bss->freq = freq; | ||
118 | if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) { | ||
119 | memcpy(bss->ssid, ssid, ssid_len); | ||
120 | bss->ssid_len = ssid_len; | ||
121 | } | ||
122 | |||
123 | spin_lock_bh(&local->sta_bss_lock); | ||
124 | /* TODO: order by RSSI? */ | ||
125 | list_add_tail(&bss->list, &local->sta_bss_list); | ||
126 | __ieee80211_rx_bss_hash_add(local, bss); | ||
127 | spin_unlock_bh(&local->sta_bss_lock); | ||
128 | return bss; | ||
129 | } | ||
130 | |||
131 | #ifdef CONFIG_MAC80211_MESH | ||
132 | static struct ieee80211_sta_bss * | ||
133 | ieee80211_rx_mesh_bss_get(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len, | ||
134 | u8 *mesh_cfg, int freq) | ||
135 | { | ||
136 | struct ieee80211_sta_bss *bss; | ||
137 | |||
138 | spin_lock_bh(&local->sta_bss_lock); | ||
139 | bss = local->sta_bss_hash[mesh_id_hash(mesh_id, mesh_id_len)]; | ||
140 | while (bss) { | ||
141 | if (bss_mesh_cfg(bss) && | ||
142 | !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) && | ||
143 | bss->freq == freq && | ||
144 | mesh_id_len == bss->mesh_id_len && | ||
145 | (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id, | ||
146 | mesh_id_len))) { | ||
147 | atomic_inc(&bss->users); | ||
148 | break; | ||
149 | } | ||
150 | bss = bss->hnext; | ||
151 | } | ||
152 | spin_unlock_bh(&local->sta_bss_lock); | ||
153 | return bss; | ||
154 | } | ||
155 | |||
156 | static struct ieee80211_sta_bss * | ||
157 | ieee80211_rx_mesh_bss_add(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len, | ||
158 | u8 *mesh_cfg, int mesh_config_len, int freq) | ||
159 | { | ||
160 | struct ieee80211_sta_bss *bss; | ||
161 | |||
162 | if (mesh_config_len != MESH_CFG_LEN) | ||
163 | return NULL; | ||
164 | |||
165 | bss = kzalloc(sizeof(*bss), GFP_ATOMIC); | ||
166 | if (!bss) | ||
167 | return NULL; | ||
168 | |||
169 | bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC); | ||
170 | if (!bss->mesh_cfg) { | ||
171 | kfree(bss); | ||
172 | return NULL; | ||
173 | } | ||
174 | |||
175 | if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) { | ||
176 | bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC); | ||
177 | if (!bss->mesh_id) { | ||
178 | kfree(bss->mesh_cfg); | ||
179 | kfree(bss); | ||
180 | return NULL; | ||
181 | } | ||
182 | memcpy(bss->mesh_id, mesh_id, mesh_id_len); | ||
183 | } | ||
184 | |||
185 | atomic_set(&bss->users, 2); | ||
186 | memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN); | ||
187 | bss->mesh_id_len = mesh_id_len; | ||
188 | bss->freq = freq; | ||
189 | spin_lock_bh(&local->sta_bss_lock); | ||
190 | /* TODO: order by RSSI? */ | ||
191 | list_add_tail(&bss->list, &local->sta_bss_list); | ||
192 | __ieee80211_rx_bss_hash_add(local, bss); | ||
193 | spin_unlock_bh(&local->sta_bss_lock); | ||
194 | return bss; | ||
195 | } | ||
196 | #endif | ||
197 | |||
198 | static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss) | ||
199 | { | ||
200 | kfree(bss->ies); | ||
201 | kfree(bss_mesh_id(bss)); | ||
202 | kfree(bss_mesh_cfg(bss)); | ||
203 | kfree(bss); | ||
204 | } | ||
205 | |||
206 | void ieee80211_rx_bss_put(struct ieee80211_local *local, | ||
207 | struct ieee80211_sta_bss *bss) | ||
208 | { | ||
209 | local_bh_disable(); | ||
210 | if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) { | ||
211 | local_bh_enable(); | ||
212 | return; | ||
213 | } | ||
214 | |||
215 | __ieee80211_rx_bss_hash_del(local, bss); | ||
216 | list_del(&bss->list); | ||
217 | spin_unlock_bh(&local->sta_bss_lock); | ||
218 | ieee80211_rx_bss_free(bss); | ||
219 | } | ||
220 | |||
221 | struct ieee80211_sta_bss * | ||
222 | ieee80211_bss_info_update(struct ieee80211_local *local, | ||
223 | struct ieee80211_rx_status *rx_status, | ||
224 | struct ieee80211_mgmt *mgmt, | ||
225 | size_t len, | ||
226 | struct ieee802_11_elems *elems, | ||
227 | int freq, bool beacon) | ||
228 | { | ||
229 | struct ieee80211_sta_bss *bss; | ||
230 | int clen; | ||
231 | |||
232 | #ifdef CONFIG_MAC80211_MESH | ||
233 | if (elems->mesh_config) | ||
234 | bss = ieee80211_rx_mesh_bss_get(local, elems->mesh_id, | ||
235 | elems->mesh_id_len, elems->mesh_config, freq); | ||
236 | else | ||
237 | #endif | ||
238 | bss = ieee80211_rx_bss_get(local, mgmt->bssid, freq, | ||
239 | elems->ssid, elems->ssid_len); | ||
240 | if (!bss) { | ||
241 | #ifdef CONFIG_MAC80211_MESH | ||
242 | if (elems->mesh_config) | ||
243 | bss = ieee80211_rx_mesh_bss_add(local, elems->mesh_id, | ||
244 | elems->mesh_id_len, elems->mesh_config, | ||
245 | elems->mesh_config_len, freq); | ||
246 | else | ||
247 | #endif | ||
248 | bss = ieee80211_rx_bss_add(local, mgmt->bssid, freq, | ||
249 | elems->ssid, elems->ssid_len); | ||
250 | if (!bss) | ||
251 | return NULL; | ||
252 | } else { | ||
253 | #if 0 | ||
254 | /* TODO: order by RSSI? */ | ||
255 | spin_lock_bh(&local->sta_bss_lock); | ||
256 | list_move_tail(&bss->list, &local->sta_bss_list); | ||
257 | spin_unlock_bh(&local->sta_bss_lock); | ||
258 | #endif | ||
259 | } | ||
260 | |||
261 | /* save the ERP value so that it is available at association time */ | ||
262 | if (elems->erp_info && elems->erp_info_len >= 1) { | ||
263 | bss->erp_value = elems->erp_info[0]; | ||
264 | bss->has_erp_value = 1; | ||
265 | } | ||
266 | |||
267 | bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); | ||
268 | bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); | ||
269 | |||
270 | if (elems->tim) { | ||
271 | struct ieee80211_tim_ie *tim_ie = | ||
272 | (struct ieee80211_tim_ie *)elems->tim; | ||
273 | bss->dtim_period = tim_ie->dtim_period; | ||
274 | } | ||
275 | |||
276 | /* set default value for buggy APs */ | ||
277 | if (!elems->tim || bss->dtim_period == 0) | ||
278 | bss->dtim_period = 1; | ||
279 | |||
280 | bss->supp_rates_len = 0; | ||
281 | if (elems->supp_rates) { | ||
282 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; | ||
283 | if (clen > elems->supp_rates_len) | ||
284 | clen = elems->supp_rates_len; | ||
285 | memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates, | ||
286 | clen); | ||
287 | bss->supp_rates_len += clen; | ||
288 | } | ||
289 | if (elems->ext_supp_rates) { | ||
290 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; | ||
291 | if (clen > elems->ext_supp_rates_len) | ||
292 | clen = elems->ext_supp_rates_len; | ||
293 | memcpy(&bss->supp_rates[bss->supp_rates_len], | ||
294 | elems->ext_supp_rates, clen); | ||
295 | bss->supp_rates_len += clen; | ||
296 | } | ||
297 | |||
298 | bss->band = rx_status->band; | ||
299 | |||
300 | bss->timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); | ||
301 | bss->last_update = jiffies; | ||
302 | bss->signal = rx_status->signal; | ||
303 | bss->noise = rx_status->noise; | ||
304 | bss->qual = rx_status->qual; | ||
305 | bss->wmm_used = elems->wmm_param || elems->wmm_info; | ||
306 | |||
307 | if (!beacon) | ||
308 | bss->last_probe_resp = jiffies; | ||
309 | |||
310 | /* | ||
311 | * For probe responses, or if we don't have any information yet, | ||
312 | * use the IEs from the beacon. | ||
313 | */ | ||
314 | if (!bss->ies || !beacon) { | ||
315 | if (bss->ies == NULL || bss->ies_len < elems->total_len) { | ||
316 | kfree(bss->ies); | ||
317 | bss->ies = kmalloc(elems->total_len, GFP_ATOMIC); | ||
318 | } | ||
319 | if (bss->ies) { | ||
320 | memcpy(bss->ies, elems->ie_start, elems->total_len); | ||
321 | bss->ies_len = elems->total_len; | ||
322 | } else | ||
323 | bss->ies_len = 0; | ||
324 | } | ||
325 | |||
326 | return bss; | ||
327 | } | ||
328 | |||
329 | ieee80211_rx_result | ||
330 | ieee80211_sta_rx_scan(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
331 | struct ieee80211_rx_status *rx_status) | ||
332 | { | ||
333 | struct ieee80211_mgmt *mgmt; | ||
334 | struct ieee80211_sta_bss *bss; | ||
335 | u8 *elements; | ||
336 | struct ieee80211_channel *channel; | ||
337 | size_t baselen; | ||
338 | int freq; | ||
339 | __le16 fc; | ||
340 | bool presp, beacon = false; | ||
341 | struct ieee802_11_elems elems; | ||
342 | |||
343 | if (skb->len < 2) | ||
344 | return RX_DROP_UNUSABLE; | ||
345 | |||
346 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
347 | fc = mgmt->frame_control; | ||
348 | |||
349 | if (ieee80211_is_ctl(fc)) | ||
350 | return RX_CONTINUE; | ||
351 | |||
352 | if (skb->len < 24) | ||
353 | return RX_DROP_MONITOR; | ||
354 | |||
355 | presp = ieee80211_is_probe_resp(fc); | ||
356 | if (presp) { | ||
357 | /* ignore ProbeResp to foreign address */ | ||
358 | if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) | ||
359 | return RX_DROP_MONITOR; | ||
360 | |||
361 | presp = true; | ||
362 | elements = mgmt->u.probe_resp.variable; | ||
363 | baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); | ||
364 | } else { | ||
365 | beacon = ieee80211_is_beacon(fc); | ||
366 | baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable); | ||
367 | elements = mgmt->u.beacon.variable; | ||
368 | } | ||
369 | |||
370 | if (!presp && !beacon) | ||
371 | return RX_CONTINUE; | ||
372 | |||
373 | if (baselen > skb->len) | ||
374 | return RX_DROP_MONITOR; | ||
375 | |||
376 | ieee802_11_parse_elems(elements, skb->len - baselen, &elems); | ||
377 | |||
378 | if (elems.ds_params && elems.ds_params_len == 1) | ||
379 | freq = ieee80211_channel_to_frequency(elems.ds_params[0]); | ||
380 | else | ||
381 | freq = rx_status->freq; | ||
382 | |||
383 | channel = ieee80211_get_channel(sdata->local->hw.wiphy, freq); | ||
384 | |||
385 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) | ||
386 | return RX_DROP_MONITOR; | ||
387 | |||
388 | bss = ieee80211_bss_info_update(sdata->local, rx_status, | ||
389 | mgmt, skb->len, &elems, | ||
390 | freq, beacon); | ||
391 | ieee80211_rx_bss_put(sdata->local, bss); | ||
392 | |||
393 | dev_kfree_skb(skb); | ||
394 | return RX_QUEUED; | ||
395 | } | ||
396 | |||
397 | static void ieee80211_send_nullfunc(struct ieee80211_local *local, | ||
398 | struct ieee80211_sub_if_data *sdata, | ||
399 | int powersave) | ||
400 | { | ||
401 | struct sk_buff *skb; | ||
402 | struct ieee80211_hdr *nullfunc; | ||
403 | __le16 fc; | ||
404 | |||
405 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); | ||
406 | if (!skb) { | ||
407 | printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " | ||
408 | "frame\n", sdata->dev->name); | ||
409 | return; | ||
410 | } | ||
411 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
412 | |||
413 | nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); | ||
414 | memset(nullfunc, 0, 24); | ||
415 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | | ||
416 | IEEE80211_FCTL_TODS); | ||
417 | if (powersave) | ||
418 | fc |= cpu_to_le16(IEEE80211_FCTL_PM); | ||
419 | nullfunc->frame_control = fc; | ||
420 | memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN); | ||
421 | memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); | ||
422 | memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN); | ||
423 | |||
424 | ieee80211_tx_skb(sdata, skb, 0); | ||
425 | } | ||
426 | |||
427 | static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) | ||
428 | { | ||
429 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | ||
430 | ieee80211_vif_is_mesh(&sdata->vif)) | ||
431 | ieee80211_sta_timer((unsigned long)sdata); | ||
432 | } | ||
433 | |||
434 | void ieee80211_scan_completed(struct ieee80211_hw *hw) | ||
435 | { | ||
436 | struct ieee80211_local *local = hw_to_local(hw); | ||
437 | struct ieee80211_sub_if_data *sdata; | ||
438 | union iwreq_data wrqu; | ||
439 | |||
440 | local->last_scan_completed = jiffies; | ||
441 | memset(&wrqu, 0, sizeof(wrqu)); | ||
442 | wireless_send_event(local->scan_sdata->dev, SIOCGIWSCAN, &wrqu, NULL); | ||
443 | |||
444 | if (local->sta_hw_scanning) { | ||
445 | local->sta_hw_scanning = 0; | ||
446 | if (ieee80211_hw_config(local)) | ||
447 | printk(KERN_DEBUG "%s: failed to restore operational " | ||
448 | "channel after scan\n", wiphy_name(local->hw.wiphy)); | ||
449 | /* Restart STA timer for HW scan case */ | ||
450 | rcu_read_lock(); | ||
451 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
452 | ieee80211_restart_sta_timer(sdata); | ||
453 | rcu_read_unlock(); | ||
454 | |||
455 | goto done; | ||
456 | } | ||
457 | |||
458 | local->sta_sw_scanning = 0; | ||
459 | if (ieee80211_hw_config(local)) | ||
460 | printk(KERN_DEBUG "%s: failed to restore operational " | ||
461 | "channel after scan\n", wiphy_name(local->hw.wiphy)); | ||
462 | |||
463 | |||
464 | netif_tx_lock_bh(local->mdev); | ||
465 | netif_addr_lock(local->mdev); | ||
466 | local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; | ||
467 | local->ops->configure_filter(local_to_hw(local), | ||
468 | FIF_BCN_PRBRESP_PROMISC, | ||
469 | &local->filter_flags, | ||
470 | local->mdev->mc_count, | ||
471 | local->mdev->mc_list); | ||
472 | |||
473 | netif_addr_unlock(local->mdev); | ||
474 | netif_tx_unlock_bh(local->mdev); | ||
475 | |||
476 | rcu_read_lock(); | ||
477 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
478 | /* Tell AP we're back */ | ||
479 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { | ||
480 | if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) { | ||
481 | ieee80211_send_nullfunc(local, sdata, 0); | ||
482 | netif_tx_wake_all_queues(sdata->dev); | ||
483 | } | ||
484 | } else | ||
485 | netif_tx_wake_all_queues(sdata->dev); | ||
486 | |||
487 | ieee80211_restart_sta_timer(sdata); | ||
488 | } | ||
489 | rcu_read_unlock(); | ||
490 | |||
491 | done: | ||
492 | ieee80211_mlme_notify_scan_completed(local); | ||
493 | } | ||
494 | EXPORT_SYMBOL(ieee80211_scan_completed); | ||
495 | |||
496 | |||
497 | void ieee80211_sta_scan_work(struct work_struct *work) | ||
498 | { | ||
499 | struct ieee80211_local *local = | ||
500 | container_of(work, struct ieee80211_local, scan_work.work); | ||
501 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; | ||
502 | struct ieee80211_supported_band *sband; | ||
503 | struct ieee80211_channel *chan; | ||
504 | int skip; | ||
505 | unsigned long next_delay = 0; | ||
506 | |||
507 | if (!local->sta_sw_scanning) | ||
508 | return; | ||
509 | |||
510 | switch (local->scan_state) { | ||
511 | case SCAN_SET_CHANNEL: | ||
512 | /* | ||
513 | * Get current scan band. scan_band may be IEEE80211_NUM_BANDS | ||
514 | * after we successfully scanned the last channel of the last | ||
515 | * band (and the last band is supported by the hw) | ||
516 | */ | ||
517 | if (local->scan_band < IEEE80211_NUM_BANDS) | ||
518 | sband = local->hw.wiphy->bands[local->scan_band]; | ||
519 | else | ||
520 | sband = NULL; | ||
521 | |||
522 | /* | ||
523 | * If we are at an unsupported band and have more bands | ||
524 | * left to scan, advance to the next supported one. | ||
525 | */ | ||
526 | while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) { | ||
527 | local->scan_band++; | ||
528 | sband = local->hw.wiphy->bands[local->scan_band]; | ||
529 | local->scan_channel_idx = 0; | ||
530 | } | ||
531 | |||
532 | /* if no more bands/channels left, complete scan */ | ||
533 | if (!sband || local->scan_channel_idx >= sband->n_channels) { | ||
534 | ieee80211_scan_completed(local_to_hw(local)); | ||
535 | return; | ||
536 | } | ||
537 | skip = 0; | ||
538 | chan = &sband->channels[local->scan_channel_idx]; | ||
539 | |||
540 | if (chan->flags & IEEE80211_CHAN_DISABLED || | ||
541 | (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | ||
542 | chan->flags & IEEE80211_CHAN_NO_IBSS)) | ||
543 | skip = 1; | ||
544 | |||
545 | if (!skip) { | ||
546 | local->scan_channel = chan; | ||
547 | if (ieee80211_hw_config(local)) { | ||
548 | printk(KERN_DEBUG "%s: failed to set freq to " | ||
549 | "%d MHz for scan\n", wiphy_name(local->hw.wiphy), | ||
550 | chan->center_freq); | ||
551 | skip = 1; | ||
552 | } | ||
553 | } | ||
554 | |||
555 | /* advance state machine to next channel/band */ | ||
556 | local->scan_channel_idx++; | ||
557 | if (local->scan_channel_idx >= sband->n_channels) { | ||
558 | /* | ||
559 | * scan_band may end up == IEEE80211_NUM_BANDS, but | ||
560 | * we'll catch that case above and complete the scan | ||
561 | * if that is the case. | ||
562 | */ | ||
563 | local->scan_band++; | ||
564 | local->scan_channel_idx = 0; | ||
565 | } | ||
566 | |||
567 | if (skip) | ||
568 | break; | ||
569 | |||
570 | next_delay = IEEE80211_PROBE_DELAY + | ||
571 | usecs_to_jiffies(local->hw.channel_change_time); | ||
572 | local->scan_state = SCAN_SEND_PROBE; | ||
573 | break; | ||
574 | case SCAN_SEND_PROBE: | ||
575 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | ||
576 | local->scan_state = SCAN_SET_CHANNEL; | ||
577 | |||
578 | if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN) | ||
579 | break; | ||
580 | ieee80211_send_probe_req(sdata, NULL, local->scan_ssid, | ||
581 | local->scan_ssid_len); | ||
582 | next_delay = IEEE80211_CHANNEL_TIME; | ||
583 | break; | ||
584 | } | ||
585 | |||
586 | if (local->sta_sw_scanning) | ||
587 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | ||
588 | next_delay); | ||
589 | } | ||
590 | |||
591 | |||
592 | int ieee80211_sta_start_scan(struct ieee80211_sub_if_data *scan_sdata, | ||
593 | u8 *ssid, size_t ssid_len) | ||
594 | { | ||
595 | struct ieee80211_local *local = scan_sdata->local; | ||
596 | struct ieee80211_sub_if_data *sdata; | ||
597 | |||
598 | if (ssid_len > IEEE80211_MAX_SSID_LEN) | ||
599 | return -EINVAL; | ||
600 | |||
601 | /* MLME-SCAN.request (page 118) page 144 (11.1.3.1) | ||
602 | * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS | ||
603 | * BSSID: MACAddress | ||
604 | * SSID | ||
605 | * ScanType: ACTIVE, PASSIVE | ||
606 | * ProbeDelay: delay (in microseconds) to be used prior to transmitting | ||
607 | * a Probe frame during active scanning | ||
608 | * ChannelList | ||
609 | * MinChannelTime (>= ProbeDelay), in TU | ||
610 | * MaxChannelTime: (>= MinChannelTime), in TU | ||
611 | */ | ||
612 | |||
613 | /* MLME-SCAN.confirm | ||
614 | * BSSDescriptionSet | ||
615 | * ResultCode: SUCCESS, INVALID_PARAMETERS | ||
616 | */ | ||
617 | |||
618 | if (local->sta_sw_scanning || local->sta_hw_scanning) { | ||
619 | if (local->scan_sdata == scan_sdata) | ||
620 | return 0; | ||
621 | return -EBUSY; | ||
622 | } | ||
623 | |||
624 | if (local->ops->hw_scan) { | ||
625 | int rc = local->ops->hw_scan(local_to_hw(local), | ||
626 | ssid, ssid_len); | ||
627 | if (!rc) { | ||
628 | local->sta_hw_scanning = 1; | ||
629 | local->scan_sdata = scan_sdata; | ||
630 | } | ||
631 | return rc; | ||
632 | } | ||
633 | |||
634 | local->sta_sw_scanning = 1; | ||
635 | |||
636 | rcu_read_lock(); | ||
637 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
638 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { | ||
639 | if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) { | ||
640 | netif_tx_stop_all_queues(sdata->dev); | ||
641 | ieee80211_send_nullfunc(local, sdata, 1); | ||
642 | } | ||
643 | } else | ||
644 | netif_tx_stop_all_queues(sdata->dev); | ||
645 | } | ||
646 | rcu_read_unlock(); | ||
647 | |||
648 | if (ssid) { | ||
649 | local->scan_ssid_len = ssid_len; | ||
650 | memcpy(local->scan_ssid, ssid, ssid_len); | ||
651 | } else | ||
652 | local->scan_ssid_len = 0; | ||
653 | local->scan_state = SCAN_SET_CHANNEL; | ||
654 | local->scan_channel_idx = 0; | ||
655 | local->scan_band = IEEE80211_BAND_2GHZ; | ||
656 | local->scan_sdata = scan_sdata; | ||
657 | |||
658 | netif_addr_lock_bh(local->mdev); | ||
659 | local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; | ||
660 | local->ops->configure_filter(local_to_hw(local), | ||
661 | FIF_BCN_PRBRESP_PROMISC, | ||
662 | &local->filter_flags, | ||
663 | local->mdev->mc_count, | ||
664 | local->mdev->mc_list); | ||
665 | netif_addr_unlock_bh(local->mdev); | ||
666 | |||
667 | /* TODO: start scan as soon as all nullfunc frames are ACKed */ | ||
668 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | ||
669 | IEEE80211_CHANNEL_TIME); | ||
670 | |||
671 | return 0; | ||
672 | } | ||
673 | |||
674 | |||
675 | int ieee80211_sta_req_scan(struct ieee80211_sub_if_data *sdata, u8 *ssid, size_t ssid_len) | ||
676 | { | ||
677 | struct ieee80211_local *local = sdata->local; | ||
678 | struct ieee80211_if_sta *ifsta; | ||
679 | |||
680 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | ||
681 | return ieee80211_sta_start_scan(sdata, ssid, ssid_len); | ||
682 | |||
683 | /* | ||
684 | * STA has a state machine that might need to defer scanning | ||
685 | * while it's trying to associate/authenticate, therefore we | ||
686 | * queue it up to the state machine in that case. | ||
687 | */ | ||
688 | |||
689 | if (local->sta_sw_scanning || local->sta_hw_scanning) { | ||
690 | if (local->scan_sdata == sdata) | ||
691 | return 0; | ||
692 | return -EBUSY; | ||
693 | } | ||
694 | |||
695 | ifsta = &sdata->u.sta; | ||
696 | |||
697 | ifsta->scan_ssid_len = ssid_len; | ||
698 | if (ssid_len) | ||
699 | memcpy(ifsta->scan_ssid, ssid, ssid_len); | ||
700 | set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request); | ||
701 | queue_work(local->hw.workqueue, &ifsta->work); | ||
702 | |||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | |||
707 | static void ieee80211_sta_add_scan_ies(struct iw_request_info *info, | ||
708 | struct ieee80211_sta_bss *bss, | ||
709 | char **current_ev, char *end_buf) | ||
710 | { | ||
711 | u8 *pos, *end, *next; | ||
712 | struct iw_event iwe; | ||
713 | |||
714 | if (bss == NULL || bss->ies == NULL) | ||
715 | return; | ||
716 | |||
717 | /* | ||
718 | * If needed, fragment the IEs buffer (at IE boundaries) into short | ||
719 | * enough fragments to fit into IW_GENERIC_IE_MAX octet messages. | ||
720 | */ | ||
721 | pos = bss->ies; | ||
722 | end = pos + bss->ies_len; | ||
723 | |||
724 | while (end - pos > IW_GENERIC_IE_MAX) { | ||
725 | next = pos + 2 + pos[1]; | ||
726 | while (next + 2 + next[1] - pos < IW_GENERIC_IE_MAX) | ||
727 | next = next + 2 + next[1]; | ||
728 | |||
729 | memset(&iwe, 0, sizeof(iwe)); | ||
730 | iwe.cmd = IWEVGENIE; | ||
731 | iwe.u.data.length = next - pos; | ||
732 | *current_ev = iwe_stream_add_point(info, *current_ev, | ||
733 | end_buf, &iwe, pos); | ||
734 | |||
735 | pos = next; | ||
736 | } | ||
737 | |||
738 | if (end > pos) { | ||
739 | memset(&iwe, 0, sizeof(iwe)); | ||
740 | iwe.cmd = IWEVGENIE; | ||
741 | iwe.u.data.length = end - pos; | ||
742 | *current_ev = iwe_stream_add_point(info, *current_ev, | ||
743 | end_buf, &iwe, pos); | ||
744 | } | ||
745 | } | ||
746 | |||
747 | |||
748 | static char * | ||
749 | ieee80211_sta_scan_result(struct ieee80211_local *local, | ||
750 | struct iw_request_info *info, | ||
751 | struct ieee80211_sta_bss *bss, | ||
752 | char *current_ev, char *end_buf) | ||
753 | { | ||
754 | struct iw_event iwe; | ||
755 | char *buf; | ||
756 | |||
757 | if (time_after(jiffies, | ||
758 | bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE)) | ||
759 | return current_ev; | ||
760 | |||
761 | memset(&iwe, 0, sizeof(iwe)); | ||
762 | iwe.cmd = SIOCGIWAP; | ||
763 | iwe.u.ap_addr.sa_family = ARPHRD_ETHER; | ||
764 | memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN); | ||
765 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
766 | IW_EV_ADDR_LEN); | ||
767 | |||
768 | memset(&iwe, 0, sizeof(iwe)); | ||
769 | iwe.cmd = SIOCGIWESSID; | ||
770 | if (bss_mesh_cfg(bss)) { | ||
771 | iwe.u.data.length = bss_mesh_id_len(bss); | ||
772 | iwe.u.data.flags = 1; | ||
773 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
774 | &iwe, bss_mesh_id(bss)); | ||
775 | } else { | ||
776 | iwe.u.data.length = bss->ssid_len; | ||
777 | iwe.u.data.flags = 1; | ||
778 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
779 | &iwe, bss->ssid); | ||
780 | } | ||
781 | |||
782 | if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) | ||
783 | || bss_mesh_cfg(bss)) { | ||
784 | memset(&iwe, 0, sizeof(iwe)); | ||
785 | iwe.cmd = SIOCGIWMODE; | ||
786 | if (bss_mesh_cfg(bss)) | ||
787 | iwe.u.mode = IW_MODE_MESH; | ||
788 | else if (bss->capability & WLAN_CAPABILITY_ESS) | ||
789 | iwe.u.mode = IW_MODE_MASTER; | ||
790 | else | ||
791 | iwe.u.mode = IW_MODE_ADHOC; | ||
792 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, | ||
793 | &iwe, IW_EV_UINT_LEN); | ||
794 | } | ||
795 | |||
796 | memset(&iwe, 0, sizeof(iwe)); | ||
797 | iwe.cmd = SIOCGIWFREQ; | ||
798 | iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq); | ||
799 | iwe.u.freq.e = 0; | ||
800 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
801 | IW_EV_FREQ_LEN); | ||
802 | |||
803 | memset(&iwe, 0, sizeof(iwe)); | ||
804 | iwe.cmd = SIOCGIWFREQ; | ||
805 | iwe.u.freq.m = bss->freq; | ||
806 | iwe.u.freq.e = 6; | ||
807 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
808 | IW_EV_FREQ_LEN); | ||
809 | memset(&iwe, 0, sizeof(iwe)); | ||
810 | iwe.cmd = IWEVQUAL; | ||
811 | iwe.u.qual.qual = bss->qual; | ||
812 | iwe.u.qual.level = bss->signal; | ||
813 | iwe.u.qual.noise = bss->noise; | ||
814 | iwe.u.qual.updated = local->wstats_flags; | ||
815 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
816 | IW_EV_QUAL_LEN); | ||
817 | |||
818 | memset(&iwe, 0, sizeof(iwe)); | ||
819 | iwe.cmd = SIOCGIWENCODE; | ||
820 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) | ||
821 | iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; | ||
822 | else | ||
823 | iwe.u.data.flags = IW_ENCODE_DISABLED; | ||
824 | iwe.u.data.length = 0; | ||
825 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
826 | &iwe, ""); | ||
827 | |||
828 | ieee80211_sta_add_scan_ies(info, bss, ¤t_ev, end_buf); | ||
829 | |||
830 | if (bss->supp_rates_len > 0) { | ||
831 | /* display all supported rates in readable format */ | ||
832 | char *p = current_ev + iwe_stream_lcp_len(info); | ||
833 | int i; | ||
834 | |||
835 | memset(&iwe, 0, sizeof(iwe)); | ||
836 | iwe.cmd = SIOCGIWRATE; | ||
837 | /* Those two flags are ignored... */ | ||
838 | iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; | ||
839 | |||
840 | for (i = 0; i < bss->supp_rates_len; i++) { | ||
841 | iwe.u.bitrate.value = ((bss->supp_rates[i] & | ||
842 | 0x7f) * 500000); | ||
843 | p = iwe_stream_add_value(info, current_ev, p, | ||
844 | end_buf, &iwe, IW_EV_PARAM_LEN); | ||
845 | } | ||
846 | current_ev = p; | ||
847 | } | ||
848 | |||
849 | buf = kmalloc(30, GFP_ATOMIC); | ||
850 | if (buf) { | ||
851 | memset(&iwe, 0, sizeof(iwe)); | ||
852 | iwe.cmd = IWEVCUSTOM; | ||
853 | sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp)); | ||
854 | iwe.u.data.length = strlen(buf); | ||
855 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
856 | &iwe, buf); | ||
857 | memset(&iwe, 0, sizeof(iwe)); | ||
858 | iwe.cmd = IWEVCUSTOM; | ||
859 | sprintf(buf, " Last beacon: %dms ago", | ||
860 | jiffies_to_msecs(jiffies - bss->last_update)); | ||
861 | iwe.u.data.length = strlen(buf); | ||
862 | current_ev = iwe_stream_add_point(info, current_ev, | ||
863 | end_buf, &iwe, buf); | ||
864 | kfree(buf); | ||
865 | } | ||
866 | |||
867 | if (bss_mesh_cfg(bss)) { | ||
868 | u8 *cfg = bss_mesh_cfg(bss); | ||
869 | buf = kmalloc(50, GFP_ATOMIC); | ||
870 | if (buf) { | ||
871 | memset(&iwe, 0, sizeof(iwe)); | ||
872 | iwe.cmd = IWEVCUSTOM; | ||
873 | sprintf(buf, "Mesh network (version %d)", cfg[0]); | ||
874 | iwe.u.data.length = strlen(buf); | ||
875 | current_ev = iwe_stream_add_point(info, current_ev, | ||
876 | end_buf, | ||
877 | &iwe, buf); | ||
878 | sprintf(buf, "Path Selection Protocol ID: " | ||
879 | "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3], | ||
880 | cfg[4]); | ||
881 | iwe.u.data.length = strlen(buf); | ||
882 | current_ev = iwe_stream_add_point(info, current_ev, | ||
883 | end_buf, | ||
884 | &iwe, buf); | ||
885 | sprintf(buf, "Path Selection Metric ID: " | ||
886 | "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7], | ||
887 | cfg[8]); | ||
888 | iwe.u.data.length = strlen(buf); | ||
889 | current_ev = iwe_stream_add_point(info, current_ev, | ||
890 | end_buf, | ||
891 | &iwe, buf); | ||
892 | sprintf(buf, "Congestion Control Mode ID: " | ||
893 | "0x%02X%02X%02X%02X", cfg[9], cfg[10], | ||
894 | cfg[11], cfg[12]); | ||
895 | iwe.u.data.length = strlen(buf); | ||
896 | current_ev = iwe_stream_add_point(info, current_ev, | ||
897 | end_buf, | ||
898 | &iwe, buf); | ||
899 | sprintf(buf, "Channel Precedence: " | ||
900 | "0x%02X%02X%02X%02X", cfg[13], cfg[14], | ||
901 | cfg[15], cfg[16]); | ||
902 | iwe.u.data.length = strlen(buf); | ||
903 | current_ev = iwe_stream_add_point(info, current_ev, | ||
904 | end_buf, | ||
905 | &iwe, buf); | ||
906 | kfree(buf); | ||
907 | } | ||
908 | } | ||
909 | |||
910 | return current_ev; | ||
911 | } | ||
912 | |||
913 | |||
914 | int ieee80211_sta_scan_results(struct ieee80211_local *local, | ||
915 | struct iw_request_info *info, | ||
916 | char *buf, size_t len) | ||
917 | { | ||
918 | char *current_ev = buf; | ||
919 | char *end_buf = buf + len; | ||
920 | struct ieee80211_sta_bss *bss; | ||
921 | |||
922 | spin_lock_bh(&local->sta_bss_lock); | ||
923 | list_for_each_entry(bss, &local->sta_bss_list, list) { | ||
924 | if (buf + len - current_ev <= IW_EV_ADDR_LEN) { | ||
925 | spin_unlock_bh(&local->sta_bss_lock); | ||
926 | return -E2BIG; | ||
927 | } | ||
928 | current_ev = ieee80211_sta_scan_result(local, info, bss, | ||
929 | current_ev, end_buf); | ||
930 | } | ||
931 | spin_unlock_bh(&local->sta_bss_lock); | ||
932 | return current_ev - buf; | ||
933 | } | ||
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c new file mode 100644 index 000000000000..f72bad636d8e --- /dev/null +++ b/net/mac80211/spectmgmt.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * spectrum management | ||
3 | * | ||
4 | * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> | ||
5 | * Copyright 2002-2005, Instant802 Networks, Inc. | ||
6 | * Copyright 2005-2006, Devicescape Software, Inc. | ||
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | ||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | ||
9 | * Copyright 2007-2008, Intel Corporation | ||
10 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/ieee80211.h> | ||
18 | #include <net/wireless.h> | ||
19 | #include <net/mac80211.h> | ||
20 | #include "ieee80211_i.h" | ||
21 | #include "sta_info.h" | ||
22 | #include "wme.h" | ||
23 | |||
24 | static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata, | ||
25 | struct ieee80211_msrment_ie *request_ie, | ||
26 | const u8 *da, const u8 *bssid, | ||
27 | u8 dialog_token) | ||
28 | { | ||
29 | struct ieee80211_local *local = sdata->local; | ||
30 | struct sk_buff *skb; | ||
31 | struct ieee80211_mgmt *msr_report; | ||
32 | |||
33 | skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + | ||
34 | sizeof(struct ieee80211_msrment_ie)); | ||
35 | |||
36 | if (!skb) { | ||
37 | printk(KERN_ERR "%s: failed to allocate buffer for " | ||
38 | "measurement report frame\n", sdata->dev->name); | ||
39 | return; | ||
40 | } | ||
41 | |||
42 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
43 | msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); | ||
44 | memset(msr_report, 0, 24); | ||
45 | memcpy(msr_report->da, da, ETH_ALEN); | ||
46 | memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN); | ||
47 | memcpy(msr_report->bssid, bssid, ETH_ALEN); | ||
48 | msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | ||
49 | IEEE80211_STYPE_ACTION); | ||
50 | |||
51 | skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement)); | ||
52 | msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; | ||
53 | msr_report->u.action.u.measurement.action_code = | ||
54 | WLAN_ACTION_SPCT_MSR_RPRT; | ||
55 | msr_report->u.action.u.measurement.dialog_token = dialog_token; | ||
56 | |||
57 | msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT; | ||
58 | msr_report->u.action.u.measurement.length = | ||
59 | sizeof(struct ieee80211_msrment_ie); | ||
60 | |||
61 | memset(&msr_report->u.action.u.measurement.msr_elem, 0, | ||
62 | sizeof(struct ieee80211_msrment_ie)); | ||
63 | msr_report->u.action.u.measurement.msr_elem.token = request_ie->token; | ||
64 | msr_report->u.action.u.measurement.msr_elem.mode |= | ||
65 | IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; | ||
66 | msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; | ||
67 | |||
68 | ieee80211_tx_skb(sdata, skb, 0); | ||
69 | } | ||
70 | |||
71 | void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | ||
72 | struct ieee80211_mgmt *mgmt, | ||
73 | size_t len) | ||
74 | { | ||
75 | /* | ||
76 | * Ignoring measurement request is spec violation. | ||
77 | * Mandatory measurements must be reported optional | ||
78 | * measurements might be refused or reported incapable | ||
79 | * For now just refuse | ||
80 | * TODO: Answer basic measurement as unmeasured | ||
81 | */ | ||
82 | ieee80211_send_refuse_measurement_request(sdata, | ||
83 | &mgmt->u.action.u.measurement.msr_elem, | ||
84 | mgmt->sa, mgmt->bssid, | ||
85 | mgmt->u.action.u.measurement.dialog_token); | ||
86 | } | ||
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 109db787ccb7..4a581a5b5766 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -204,6 +204,7 @@ struct sta_ampdu_mlme { | |||
204 | * @tx_fragments: number of transmitted MPDUs | 204 | * @tx_fragments: number of transmitted MPDUs |
205 | * @txrate_idx: TBD | 205 | * @txrate_idx: TBD |
206 | * @last_txrate_idx: TBD | 206 | * @last_txrate_idx: TBD |
207 | * @tid_seq: TBD | ||
207 | * @wme_tx_queue: TBD | 208 | * @wme_tx_queue: TBD |
208 | * @ampdu_mlme: TBD | 209 | * @ampdu_mlme: TBD |
209 | * @timer_to_tid: identity mapping to ID timers | 210 | * @timer_to_tid: identity mapping to ID timers |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 4788f7b91f49..c413d4836afe 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -82,6 +82,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | |||
82 | struct ieee80211_rate *txrate; | 82 | struct ieee80211_rate *txrate; |
83 | struct ieee80211_local *local = tx->local; | 83 | struct ieee80211_local *local = tx->local; |
84 | struct ieee80211_supported_band *sband; | 84 | struct ieee80211_supported_band *sband; |
85 | struct ieee80211_hdr *hdr; | ||
85 | 86 | ||
86 | sband = local->hw.wiphy->bands[tx->channel->band]; | 87 | sband = local->hw.wiphy->bands[tx->channel->band]; |
87 | txrate = &sband->bitrates[tx->rate_idx]; | 88 | txrate = &sband->bitrates[tx->rate_idx]; |
@@ -107,8 +108,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | |||
107 | * at the highest possible rate belonging to the PHY rates in the | 108 | * at the highest possible rate belonging to the PHY rates in the |
108 | * BSSBasicRateSet | 109 | * BSSBasicRateSet |
109 | */ | 110 | */ |
110 | 111 | hdr = (struct ieee80211_hdr *)tx->skb->data; | |
111 | if ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) { | 112 | if (ieee80211_is_ctl(hdr->frame_control)) { |
112 | /* TODO: These control frames are not currently sent by | 113 | /* TODO: These control frames are not currently sent by |
113 | * 80211.o, but should they be implemented, this function | 114 | * 80211.o, but should they be implemented, this function |
114 | * needs to be updated to support duration field calculation. | 115 | * needs to be updated to support duration field calculation. |
@@ -213,9 +214,8 @@ static int inline is_ieee80211_device(struct net_device *dev, | |||
213 | static ieee80211_tx_result debug_noinline | 214 | static ieee80211_tx_result debug_noinline |
214 | ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | 215 | ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) |
215 | { | 216 | { |
216 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 217 | |
217 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | 218 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
218 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
219 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 219 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
220 | u32 sta_flags; | 220 | u32 sta_flags; |
221 | 221 | ||
@@ -223,8 +223,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
223 | return TX_CONTINUE; | 223 | return TX_CONTINUE; |
224 | 224 | ||
225 | if (unlikely(tx->local->sta_sw_scanning) && | 225 | if (unlikely(tx->local->sta_sw_scanning) && |
226 | ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || | 226 | !ieee80211_is_probe_req(hdr->frame_control)) |
227 | (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ)) | ||
228 | return TX_DROP; | 227 | return TX_DROP; |
229 | 228 | ||
230 | if (tx->sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) | 229 | if (tx->sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) |
@@ -238,7 +237,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
238 | if (likely(tx->flags & IEEE80211_TX_UNICAST)) { | 237 | if (likely(tx->flags & IEEE80211_TX_UNICAST)) { |
239 | if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && | 238 | if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && |
240 | tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 239 | tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && |
241 | (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { | 240 | ieee80211_is_data(hdr->frame_control))) { |
242 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 241 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
243 | DECLARE_MAC_BUF(mac); | 242 | DECLARE_MAC_BUF(mac); |
244 | printk(KERN_DEBUG "%s: dropped data frame to not " | 243 | printk(KERN_DEBUG "%s: dropped data frame to not " |
@@ -249,7 +248,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
249 | return TX_DROP; | 248 | return TX_DROP; |
250 | } | 249 | } |
251 | } else { | 250 | } else { |
252 | if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && | 251 | if (unlikely(ieee80211_is_data(hdr->frame_control) && |
253 | tx->local->num_sta == 0 && | 252 | tx->local->num_sta == 0 && |
254 | tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS)) { | 253 | tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS)) { |
255 | /* | 254 | /* |
@@ -315,6 +314,7 @@ static ieee80211_tx_result | |||
315 | ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | 314 | ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) |
316 | { | 315 | { |
317 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 316 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
317 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
318 | 318 | ||
319 | /* | 319 | /* |
320 | * broadcast/multicast frame | 320 | * broadcast/multicast frame |
@@ -329,7 +329,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
329 | return TX_CONTINUE; | 329 | return TX_CONTINUE; |
330 | 330 | ||
331 | /* no buffering for ordered frames */ | 331 | /* no buffering for ordered frames */ |
332 | if (tx->fc & IEEE80211_FCTL_ORDER) | 332 | if (ieee80211_has_order(hdr->frame_control)) |
333 | return TX_CONTINUE; | 333 | return TX_CONTINUE; |
334 | 334 | ||
335 | /* no stations in PS mode */ | 335 | /* no stations in PS mode */ |
@@ -367,12 +367,11 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
367 | { | 367 | { |
368 | struct sta_info *sta = tx->sta; | 368 | struct sta_info *sta = tx->sta; |
369 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 369 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
370 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
370 | u32 staflags; | 371 | u32 staflags; |
371 | DECLARE_MAC_BUF(mac); | 372 | DECLARE_MAC_BUF(mac); |
372 | 373 | ||
373 | if (unlikely(!sta || | 374 | if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control))) |
374 | ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && | ||
375 | (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) | ||
376 | return TX_CONTINUE; | 375 | return TX_CONTINUE; |
377 | 376 | ||
378 | staflags = get_sta_flags(sta); | 377 | staflags = get_sta_flags(sta); |
@@ -437,7 +436,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | |||
437 | { | 436 | { |
438 | struct ieee80211_key *key; | 437 | struct ieee80211_key *key; |
439 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 438 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
440 | u16 fc = tx->fc; | 439 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
441 | 440 | ||
442 | if (unlikely(tx->skb->do_not_encrypt)) | 441 | if (unlikely(tx->skb->do_not_encrypt)) |
443 | tx->key = NULL; | 442 | tx->key = NULL; |
@@ -454,22 +453,16 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | |||
454 | tx->key = NULL; | 453 | tx->key = NULL; |
455 | 454 | ||
456 | if (tx->key) { | 455 | if (tx->key) { |
457 | u16 ftype, stype; | ||
458 | |||
459 | tx->key->tx_rx_count++; | 456 | tx->key->tx_rx_count++; |
460 | /* TODO: add threshold stuff again */ | 457 | /* TODO: add threshold stuff again */ |
461 | 458 | ||
462 | switch (tx->key->conf.alg) { | 459 | switch (tx->key->conf.alg) { |
463 | case ALG_WEP: | 460 | case ALG_WEP: |
464 | ftype = fc & IEEE80211_FCTL_FTYPE; | 461 | if (ieee80211_is_auth(hdr->frame_control)) |
465 | stype = fc & IEEE80211_FCTL_STYPE; | ||
466 | |||
467 | if (ftype == IEEE80211_FTYPE_MGMT && | ||
468 | stype == IEEE80211_STYPE_AUTH) | ||
469 | break; | 462 | break; |
470 | case ALG_TKIP: | 463 | case ALG_TKIP: |
471 | case ALG_CCMP: | 464 | case ALG_CCMP: |
472 | if (!WLAN_FC_DATA_PRESENT(fc)) | 465 | if (!ieee80211_is_data_present(hdr->frame_control)) |
473 | tx->key = NULL; | 466 | tx->key = NULL; |
474 | break; | 467 | break; |
475 | } | 468 | } |
@@ -1000,7 +993,6 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1000 | hdr = (struct ieee80211_hdr *) skb->data; | 993 | hdr = (struct ieee80211_hdr *) skb->data; |
1001 | 994 | ||
1002 | tx->sta = sta_info_get(local, hdr->addr1); | 995 | tx->sta = sta_info_get(local, hdr->addr1); |
1003 | tx->fc = le16_to_cpu(hdr->frame_control); | ||
1004 | 996 | ||
1005 | if (is_multicast_ether_addr(hdr->addr1)) { | 997 | if (is_multicast_ether_addr(hdr->addr1)) { |
1006 | tx->flags &= ~IEEE80211_TX_UNICAST; | 998 | tx->flags &= ~IEEE80211_TX_UNICAST; |
@@ -1025,7 +1017,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1025 | else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) | 1017 | else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) |
1026 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; | 1018 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; |
1027 | 1019 | ||
1028 | hdrlen = ieee80211_get_hdrlen(tx->fc); | 1020 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
1029 | if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { | 1021 | if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { |
1030 | u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; | 1022 | u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; |
1031 | tx->ethertype = (pos[0] << 8) | pos[1]; | 1023 | tx->ethertype = (pos[0] << 8) | pos[1]; |
@@ -1335,7 +1327,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, | |||
1335 | if (is_multicast_ether_addr(hdr->addr3)) | 1327 | if (is_multicast_ether_addr(hdr->addr3)) |
1336 | memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); | 1328 | memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); |
1337 | else | 1329 | else |
1338 | if (mesh_nexthop_lookup(skb, odev)) | 1330 | if (mesh_nexthop_lookup(skb, osdata)) |
1339 | return 0; | 1331 | return 0; |
1340 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) | 1332 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) |
1341 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.sta, | 1333 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.sta, |
@@ -1889,8 +1881,8 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1889 | goto out; | 1881 | goto out; |
1890 | 1882 | ||
1891 | hdr = (struct ieee80211_hdr *) skb->data; | 1883 | hdr = (struct ieee80211_hdr *) skb->data; |
1892 | hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 1884 | hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
1893 | IEEE80211_STYPE_BEACON); | 1885 | IEEE80211_STYPE_BEACON); |
1894 | 1886 | ||
1895 | num_beacons = &ifsta->num_beacons; | 1887 | num_beacons = &ifsta->num_beacons; |
1896 | } else if (ieee80211_vif_is_mesh(&sdata->vif)) { | 1888 | } else if (ieee80211_vif_is_mesh(&sdata->vif)) { |
@@ -1916,7 +1908,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1916 | *pos++ = WLAN_EID_SSID; | 1908 | *pos++ = WLAN_EID_SSID; |
1917 | *pos++ = 0x0; | 1909 | *pos++ = 0x0; |
1918 | 1910 | ||
1919 | mesh_mgmt_ies_add(skb, sdata->dev); | 1911 | mesh_mgmt_ies_add(skb, sdata); |
1920 | 1912 | ||
1921 | num_beacons = &sdata->u.sta.num_beacons; | 1913 | num_beacons = &sdata->u.sta.num_beacons; |
1922 | } else { | 1914 | } else { |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 0d463c80c404..c3a22ab2ad2e 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -91,45 +91,6 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, | |||
91 | return NULL; | 91 | return NULL; |
92 | } | 92 | } |
93 | 93 | ||
94 | int ieee80211_get_hdrlen(u16 fc) | ||
95 | { | ||
96 | int hdrlen = 24; | ||
97 | |||
98 | switch (fc & IEEE80211_FCTL_FTYPE) { | ||
99 | case IEEE80211_FTYPE_DATA: | ||
100 | if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS)) | ||
101 | hdrlen = 30; /* Addr4 */ | ||
102 | /* | ||
103 | * The QoS Control field is two bytes and its presence is | ||
104 | * indicated by the IEEE80211_STYPE_QOS_DATA bit. Add 2 to | ||
105 | * hdrlen if that bit is set. | ||
106 | * This works by masking out the bit and shifting it to | ||
107 | * bit position 1 so the result has the value 0 or 2. | ||
108 | */ | ||
109 | hdrlen += (fc & IEEE80211_STYPE_QOS_DATA) | ||
110 | >> (ilog2(IEEE80211_STYPE_QOS_DATA)-1); | ||
111 | break; | ||
112 | case IEEE80211_FTYPE_CTL: | ||
113 | /* | ||
114 | * ACK and CTS are 10 bytes, all others 16. To see how | ||
115 | * to get this condition consider | ||
116 | * subtype mask: 0b0000000011110000 (0x00F0) | ||
117 | * ACK subtype: 0b0000000011010000 (0x00D0) | ||
118 | * CTS subtype: 0b0000000011000000 (0x00C0) | ||
119 | * bits that matter: ^^^ (0x00E0) | ||
120 | * value of those: 0b0000000011000000 (0x00C0) | ||
121 | */ | ||
122 | if ((fc & 0xE0) == 0xC0) | ||
123 | hdrlen = 10; | ||
124 | else | ||
125 | hdrlen = 16; | ||
126 | break; | ||
127 | } | ||
128 | |||
129 | return hdrlen; | ||
130 | } | ||
131 | EXPORT_SYMBOL(ieee80211_get_hdrlen); | ||
132 | |||
133 | unsigned int ieee80211_hdrlen(__le16 fc) | 94 | unsigned int ieee80211_hdrlen(__le16 fc) |
134 | { | 95 | { |
135 | unsigned int hdrlen = 24; | 96 | unsigned int hdrlen = 24; |
@@ -386,6 +347,13 @@ void ieee80211_stop_queues(struct ieee80211_hw *hw) | |||
386 | } | 347 | } |
387 | EXPORT_SYMBOL(ieee80211_stop_queues); | 348 | EXPORT_SYMBOL(ieee80211_stop_queues); |
388 | 349 | ||
350 | int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) | ||
351 | { | ||
352 | struct ieee80211_local *local = hw_to_local(hw); | ||
353 | return __netif_subqueue_stopped(local->mdev, queue); | ||
354 | } | ||
355 | EXPORT_SYMBOL(ieee80211_queue_stopped); | ||
356 | |||
389 | void ieee80211_wake_queues(struct ieee80211_hw *hw) | 357 | void ieee80211_wake_queues(struct ieee80211_hw *hw) |
390 | { | 358 | { |
391 | int i; | 359 | int i; |
@@ -460,3 +428,187 @@ void ieee80211_iterate_active_interfaces_atomic( | |||
460 | rcu_read_unlock(); | 428 | rcu_read_unlock(); |
461 | } | 429 | } |
462 | EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); | 430 | EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); |
431 | |||
432 | void ieee802_11_parse_elems(u8 *start, size_t len, | ||
433 | struct ieee802_11_elems *elems) | ||
434 | { | ||
435 | size_t left = len; | ||
436 | u8 *pos = start; | ||
437 | |||
438 | memset(elems, 0, sizeof(*elems)); | ||
439 | elems->ie_start = start; | ||
440 | elems->total_len = len; | ||
441 | |||
442 | while (left >= 2) { | ||
443 | u8 id, elen; | ||
444 | |||
445 | id = *pos++; | ||
446 | elen = *pos++; | ||
447 | left -= 2; | ||
448 | |||
449 | if (elen > left) | ||
450 | return; | ||
451 | |||
452 | switch (id) { | ||
453 | case WLAN_EID_SSID: | ||
454 | elems->ssid = pos; | ||
455 | elems->ssid_len = elen; | ||
456 | break; | ||
457 | case WLAN_EID_SUPP_RATES: | ||
458 | elems->supp_rates = pos; | ||
459 | elems->supp_rates_len = elen; | ||
460 | break; | ||
461 | case WLAN_EID_FH_PARAMS: | ||
462 | elems->fh_params = pos; | ||
463 | elems->fh_params_len = elen; | ||
464 | break; | ||
465 | case WLAN_EID_DS_PARAMS: | ||
466 | elems->ds_params = pos; | ||
467 | elems->ds_params_len = elen; | ||
468 | break; | ||
469 | case WLAN_EID_CF_PARAMS: | ||
470 | elems->cf_params = pos; | ||
471 | elems->cf_params_len = elen; | ||
472 | break; | ||
473 | case WLAN_EID_TIM: | ||
474 | elems->tim = pos; | ||
475 | elems->tim_len = elen; | ||
476 | break; | ||
477 | case WLAN_EID_IBSS_PARAMS: | ||
478 | elems->ibss_params = pos; | ||
479 | elems->ibss_params_len = elen; | ||
480 | break; | ||
481 | case WLAN_EID_CHALLENGE: | ||
482 | elems->challenge = pos; | ||
483 | elems->challenge_len = elen; | ||
484 | break; | ||
485 | case WLAN_EID_WPA: | ||
486 | if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && | ||
487 | pos[2] == 0xf2) { | ||
488 | /* Microsoft OUI (00:50:F2) */ | ||
489 | if (pos[3] == 1) { | ||
490 | /* OUI Type 1 - WPA IE */ | ||
491 | elems->wpa = pos; | ||
492 | elems->wpa_len = elen; | ||
493 | } else if (elen >= 5 && pos[3] == 2) { | ||
494 | if (pos[4] == 0) { | ||
495 | elems->wmm_info = pos; | ||
496 | elems->wmm_info_len = elen; | ||
497 | } else if (pos[4] == 1) { | ||
498 | elems->wmm_param = pos; | ||
499 | elems->wmm_param_len = elen; | ||
500 | } | ||
501 | } | ||
502 | } | ||
503 | break; | ||
504 | case WLAN_EID_RSN: | ||
505 | elems->rsn = pos; | ||
506 | elems->rsn_len = elen; | ||
507 | break; | ||
508 | case WLAN_EID_ERP_INFO: | ||
509 | elems->erp_info = pos; | ||
510 | elems->erp_info_len = elen; | ||
511 | break; | ||
512 | case WLAN_EID_EXT_SUPP_RATES: | ||
513 | elems->ext_supp_rates = pos; | ||
514 | elems->ext_supp_rates_len = elen; | ||
515 | break; | ||
516 | case WLAN_EID_HT_CAPABILITY: | ||
517 | elems->ht_cap_elem = pos; | ||
518 | elems->ht_cap_elem_len = elen; | ||
519 | break; | ||
520 | case WLAN_EID_HT_EXTRA_INFO: | ||
521 | elems->ht_info_elem = pos; | ||
522 | elems->ht_info_elem_len = elen; | ||
523 | break; | ||
524 | case WLAN_EID_MESH_ID: | ||
525 | elems->mesh_id = pos; | ||
526 | elems->mesh_id_len = elen; | ||
527 | break; | ||
528 | case WLAN_EID_MESH_CONFIG: | ||
529 | elems->mesh_config = pos; | ||
530 | elems->mesh_config_len = elen; | ||
531 | break; | ||
532 | case WLAN_EID_PEER_LINK: | ||
533 | elems->peer_link = pos; | ||
534 | elems->peer_link_len = elen; | ||
535 | break; | ||
536 | case WLAN_EID_PREQ: | ||
537 | elems->preq = pos; | ||
538 | elems->preq_len = elen; | ||
539 | break; | ||
540 | case WLAN_EID_PREP: | ||
541 | elems->prep = pos; | ||
542 | elems->prep_len = elen; | ||
543 | break; | ||
544 | case WLAN_EID_PERR: | ||
545 | elems->perr = pos; | ||
546 | elems->perr_len = elen; | ||
547 | break; | ||
548 | case WLAN_EID_CHANNEL_SWITCH: | ||
549 | elems->ch_switch_elem = pos; | ||
550 | elems->ch_switch_elem_len = elen; | ||
551 | break; | ||
552 | case WLAN_EID_QUIET: | ||
553 | if (!elems->quiet_elem) { | ||
554 | elems->quiet_elem = pos; | ||
555 | elems->quiet_elem_len = elen; | ||
556 | } | ||
557 | elems->num_of_quiet_elem++; | ||
558 | break; | ||
559 | case WLAN_EID_COUNTRY: | ||
560 | elems->country_elem = pos; | ||
561 | elems->country_elem_len = elen; | ||
562 | break; | ||
563 | case WLAN_EID_PWR_CONSTRAINT: | ||
564 | elems->pwr_constr_elem = pos; | ||
565 | elems->pwr_constr_elem_len = elen; | ||
566 | break; | ||
567 | default: | ||
568 | break; | ||
569 | } | ||
570 | |||
571 | left -= elen; | ||
572 | pos += elen; | ||
573 | } | ||
574 | } | ||
575 | |||
576 | void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) | ||
577 | { | ||
578 | struct ieee80211_local *local = sdata->local; | ||
579 | struct ieee80211_tx_queue_params qparam; | ||
580 | int i; | ||
581 | |||
582 | if (!local->ops->conf_tx) | ||
583 | return; | ||
584 | |||
585 | memset(&qparam, 0, sizeof(qparam)); | ||
586 | |||
587 | qparam.aifs = 2; | ||
588 | |||
589 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && | ||
590 | !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)) | ||
591 | qparam.cw_min = 31; | ||
592 | else | ||
593 | qparam.cw_min = 15; | ||
594 | |||
595 | qparam.cw_max = 1023; | ||
596 | qparam.txop = 0; | ||
597 | |||
598 | for (i = 0; i < local_to_hw(local)->queues; i++) | ||
599 | local->ops->conf_tx(local_to_hw(local), i, &qparam); | ||
600 | } | ||
601 | |||
602 | void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
603 | int encrypt) | ||
604 | { | ||
605 | skb->dev = sdata->local->mdev; | ||
606 | skb_set_mac_header(skb, 0); | ||
607 | skb_set_network_header(skb, 0); | ||
608 | skb_set_transport_header(skb, 0); | ||
609 | |||
610 | skb->iif = sdata->dev->ifindex; | ||
611 | skb->do_not_encrypt = !encrypt; | ||
612 | |||
613 | dev_queue_xmit(skb); | ||
614 | } | ||
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 5c2bf0a3d4db..376c84987e4f 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c | |||
@@ -228,11 +228,10 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, | |||
228 | return -1; | 228 | return -1; |
229 | 229 | ||
230 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 230 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
231 | 231 | if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN) | |
232 | if (skb->len < 8 + hdrlen) | ||
233 | return -1; | 232 | return -1; |
234 | 233 | ||
235 | len = skb->len - hdrlen - 8; | 234 | len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN; |
236 | 235 | ||
237 | keyidx = skb->data[hdrlen + 3] >> 6; | 236 | keyidx = skb->data[hdrlen + 3] >> 6; |
238 | 237 | ||
@@ -292,9 +291,10 @@ u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) | |||
292 | ieee80211_rx_result | 291 | ieee80211_rx_result |
293 | ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) | 292 | ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) |
294 | { | 293 | { |
295 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && | 294 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
296 | ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || | 295 | |
297 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) | 296 | if (!ieee80211_is_data(hdr->frame_control) && |
297 | !ieee80211_is_auth(hdr->frame_control)) | ||
298 | return RX_CONTINUE; | 298 | return RX_CONTINUE; |
299 | 299 | ||
300 | if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { | 300 | if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { |
@@ -303,7 +303,7 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) | |||
303 | } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { | 303 | } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { |
304 | ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); | 304 | ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); |
305 | /* remove ICV */ | 305 | /* remove ICV */ |
306 | skb_trim(rx->skb, rx->skb->len - 4); | 306 | skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN); |
307 | } | 307 | } |
308 | 308 | ||
309 | return RX_CONTINUE; | 309 | return RX_CONTINUE; |
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index 34fa8ed1e784..beae664ab480 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c | |||
@@ -27,22 +27,19 @@ | |||
27 | #include "aes_ccm.h" | 27 | #include "aes_ccm.h" |
28 | 28 | ||
29 | 29 | ||
30 | static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr, | 30 | static int ieee80211_set_encryption(struct ieee80211_sub_if_data *sdata, u8 *sta_addr, |
31 | int idx, int alg, int remove, | 31 | int idx, int alg, int remove, |
32 | int set_tx_key, const u8 *_key, | 32 | int set_tx_key, const u8 *_key, |
33 | size_t key_len) | 33 | size_t key_len) |
34 | { | 34 | { |
35 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 35 | struct ieee80211_local *local = sdata->local; |
36 | struct sta_info *sta; | 36 | struct sta_info *sta; |
37 | struct ieee80211_key *key; | 37 | struct ieee80211_key *key; |
38 | struct ieee80211_sub_if_data *sdata; | ||
39 | int err; | 38 | int err; |
40 | 39 | ||
41 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
42 | |||
43 | if (idx < 0 || idx >= NUM_DEFAULT_KEYS) { | 40 | if (idx < 0 || idx >= NUM_DEFAULT_KEYS) { |
44 | printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n", | 41 | printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n", |
45 | dev->name, idx); | 42 | sdata->dev->name, idx); |
46 | return -EINVAL; | 43 | return -EINVAL; |
47 | } | 44 | } |
48 | 45 | ||
@@ -127,11 +124,11 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev, | |||
127 | 124 | ||
128 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 125 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || |
129 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 126 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { |
130 | int ret = ieee80211_sta_set_extra_ie(dev, extra, data->length); | 127 | int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length); |
131 | if (ret) | 128 | if (ret) |
132 | return ret; | 129 | return ret; |
133 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; | 130 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; |
134 | ieee80211_sta_req_auth(dev, &sdata->u.sta); | 131 | ieee80211_sta_req_auth(sdata, &sdata->u.sta); |
135 | return 0; | 132 | return 0; |
136 | } | 133 | } |
137 | 134 | ||
@@ -333,12 +330,11 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev, | |||
333 | return 0; | 330 | return 0; |
334 | } | 331 | } |
335 | 332 | ||
336 | int ieee80211_set_freq(struct net_device *dev, int freqMHz) | 333 | int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freqMHz) |
337 | { | 334 | { |
338 | int ret = -EINVAL; | 335 | int ret = -EINVAL; |
339 | struct ieee80211_channel *chan; | 336 | struct ieee80211_channel *chan; |
340 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 337 | struct ieee80211_local *local = sdata->local; |
341 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
342 | 338 | ||
343 | chan = ieee80211_get_channel(local->hw.wiphy, freqMHz); | 339 | chan = ieee80211_get_channel(local->hw.wiphy, freqMHz); |
344 | 340 | ||
@@ -346,7 +342,7 @@ int ieee80211_set_freq(struct net_device *dev, int freqMHz) | |||
346 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | 342 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && |
347 | chan->flags & IEEE80211_CHAN_NO_IBSS) { | 343 | chan->flags & IEEE80211_CHAN_NO_IBSS) { |
348 | printk(KERN_DEBUG "%s: IBSS not allowed on frequency " | 344 | printk(KERN_DEBUG "%s: IBSS not allowed on frequency " |
349 | "%d MHz\n", dev->name, chan->center_freq); | 345 | "%d MHz\n", sdata->dev->name, chan->center_freq); |
350 | return ret; | 346 | return ret; |
351 | } | 347 | } |
352 | local->oper_channel = chan; | 348 | local->oper_channel = chan; |
@@ -379,14 +375,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev, | |||
379 | IEEE80211_STA_AUTO_CHANNEL_SEL; | 375 | IEEE80211_STA_AUTO_CHANNEL_SEL; |
380 | return 0; | 376 | return 0; |
381 | } else | 377 | } else |
382 | return ieee80211_set_freq(dev, | 378 | return ieee80211_set_freq(sdata, |
383 | ieee80211_channel_to_frequency(freq->m)); | 379 | ieee80211_channel_to_frequency(freq->m)); |
384 | } else { | 380 | } else { |
385 | int i, div = 1000000; | 381 | int i, div = 1000000; |
386 | for (i = 0; i < freq->e; i++) | 382 | for (i = 0; i < freq->e; i++) |
387 | div /= 10; | 383 | div /= 10; |
388 | if (div > 0) | 384 | if (div > 0) |
389 | return ieee80211_set_freq(dev, freq->m / div); | 385 | return ieee80211_set_freq(sdata, freq->m / div); |
390 | else | 386 | else |
391 | return -EINVAL; | 387 | return -EINVAL; |
392 | } | 388 | } |
@@ -432,10 +428,10 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev, | |||
432 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; | 428 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; |
433 | else | 429 | else |
434 | sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL; | 430 | sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL; |
435 | ret = ieee80211_sta_set_ssid(dev, ssid, len); | 431 | ret = ieee80211_sta_set_ssid(sdata, ssid, len); |
436 | if (ret) | 432 | if (ret) |
437 | return ret; | 433 | return ret; |
438 | ieee80211_sta_req_auth(dev, &sdata->u.sta); | 434 | ieee80211_sta_req_auth(sdata, &sdata->u.sta); |
439 | return 0; | 435 | return 0; |
440 | } | 436 | } |
441 | 437 | ||
@@ -460,7 +456,7 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev, | |||
460 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 456 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
461 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 457 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || |
462 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 458 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { |
463 | int res = ieee80211_sta_get_ssid(dev, ssid, &len); | 459 | int res = ieee80211_sta_get_ssid(sdata, ssid, &len); |
464 | if (res == 0) { | 460 | if (res == 0) { |
465 | data->length = len; | 461 | data->length = len; |
466 | data->flags = 1; | 462 | data->flags = 1; |
@@ -504,10 +500,10 @@ static int ieee80211_ioctl_siwap(struct net_device *dev, | |||
504 | sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL; | 500 | sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL; |
505 | else | 501 | else |
506 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; | 502 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; |
507 | ret = ieee80211_sta_set_bssid(dev, (u8 *) &ap_addr->sa_data); | 503 | ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data); |
508 | if (ret) | 504 | if (ret) |
509 | return ret; | 505 | return ret; |
510 | ieee80211_sta_req_auth(dev, &sdata->u.sta); | 506 | ieee80211_sta_req_auth(sdata, &sdata->u.sta); |
511 | return 0; | 507 | return 0; |
512 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { | 508 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { |
513 | /* | 509 | /* |
@@ -539,8 +535,8 @@ static int ieee80211_ioctl_giwap(struct net_device *dev, | |||
539 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 535 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
540 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 536 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || |
541 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 537 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { |
542 | if (sdata->u.sta.state == IEEE80211_ASSOCIATED || | 538 | if (sdata->u.sta.state == IEEE80211_STA_MLME_ASSOCIATED || |
543 | sdata->u.sta.state == IEEE80211_IBSS_JOINED) { | 539 | sdata->u.sta.state == IEEE80211_STA_MLME_IBSS_JOINED) { |
544 | ap_addr->sa_family = ARPHRD_ETHER; | 540 | ap_addr->sa_family = ARPHRD_ETHER; |
545 | memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN); | 541 | memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN); |
546 | return 0; | 542 | return 0; |
@@ -584,7 +580,7 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev, | |||
584 | ssid_len = req->essid_len; | 580 | ssid_len = req->essid_len; |
585 | } | 581 | } |
586 | 582 | ||
587 | return ieee80211_sta_req_scan(dev, ssid, ssid_len); | 583 | return ieee80211_sta_req_scan(sdata, ssid, ssid_len); |
588 | } | 584 | } |
589 | 585 | ||
590 | 586 | ||
@@ -594,11 +590,14 @@ static int ieee80211_ioctl_giwscan(struct net_device *dev, | |||
594 | { | 590 | { |
595 | int res; | 591 | int res; |
596 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 592 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
593 | struct ieee80211_sub_if_data *sdata; | ||
594 | |||
595 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
597 | 596 | ||
598 | if (local->sta_sw_scanning || local->sta_hw_scanning) | 597 | if (local->sta_sw_scanning || local->sta_hw_scanning) |
599 | return -EAGAIN; | 598 | return -EAGAIN; |
600 | 599 | ||
601 | res = ieee80211_sta_scan_results(dev, info, extra, data->length); | 600 | res = ieee80211_sta_scan_results(local, info, extra, data->length); |
602 | if (res >= 0) { | 601 | if (res >= 0) { |
603 | data->length = res; | 602 | data->length = res; |
604 | return 0; | 603 | return 0; |
@@ -894,10 +893,10 @@ static int ieee80211_ioctl_siwmlme(struct net_device *dev, | |||
894 | switch (mlme->cmd) { | 893 | switch (mlme->cmd) { |
895 | case IW_MLME_DEAUTH: | 894 | case IW_MLME_DEAUTH: |
896 | /* TODO: mlme->addr.sa_data */ | 895 | /* TODO: mlme->addr.sa_data */ |
897 | return ieee80211_sta_deauthenticate(dev, mlme->reason_code); | 896 | return ieee80211_sta_deauthenticate(sdata, mlme->reason_code); |
898 | case IW_MLME_DISASSOC: | 897 | case IW_MLME_DISASSOC: |
899 | /* TODO: mlme->addr.sa_data */ | 898 | /* TODO: mlme->addr.sa_data */ |
900 | return ieee80211_sta_disassociate(dev, mlme->reason_code); | 899 | return ieee80211_sta_disassociate(sdata, mlme->reason_code); |
901 | default: | 900 | default: |
902 | return -EOPNOTSUPP; | 901 | return -EOPNOTSUPP; |
903 | } | 902 | } |
@@ -938,7 +937,7 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev, | |||
938 | } | 937 | } |
939 | 938 | ||
940 | return ieee80211_set_encryption( | 939 | return ieee80211_set_encryption( |
941 | dev, bcaddr, | 940 | sdata, bcaddr, |
942 | idx, alg, remove, | 941 | idx, alg, remove, |
943 | !sdata->default_key, | 942 | !sdata->default_key, |
944 | keybuf, erq->length); | 943 | keybuf, erq->length); |
@@ -1184,7 +1183,7 @@ static int ieee80211_ioctl_siwencodeext(struct net_device *dev, | |||
1184 | } else | 1183 | } else |
1185 | idx--; | 1184 | idx--; |
1186 | 1185 | ||
1187 | return ieee80211_set_encryption(dev, ext->addr.sa_data, idx, alg, | 1186 | return ieee80211_set_encryption(sdata, ext->addr.sa_data, idx, alg, |
1188 | remove, | 1187 | remove, |
1189 | ext->ext_flags & | 1188 | ext->ext_flags & |
1190 | IW_ENCODE_EXT_SET_TX_KEY, | 1189 | IW_ENCODE_EXT_SET_TX_KEY, |
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 4310e2f65661..7229e958879d 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -47,8 +47,6 @@ static unsigned int classify_1d(struct sk_buff *skb) | |||
47 | return 0; | 47 | return 0; |
48 | } | 48 | } |
49 | 49 | ||
50 | if (dscp & 0x1c) | ||
51 | return 0; | ||
52 | return dscp >> 5; | 50 | return dscp >> 5; |
53 | } | 51 | } |
54 | 52 | ||
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index 04de28c071a6..465e274df7c5 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h | |||
@@ -14,8 +14,6 @@ | |||
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include "ieee80211_i.h" | 15 | #include "ieee80211_i.h" |
16 | 16 | ||
17 | #define QOS_CONTROL_LEN 2 | ||
18 | |||
19 | #define QOS_CONTROL_ACK_POLICY_NORMAL 0 | 17 | #define QOS_CONTROL_ACK_POLICY_NORMAL 0 |
20 | #define QOS_CONTROL_ACK_POLICY_NOACK 1 | 18 | #define QOS_CONTROL_ACK_POLICY_NOACK 1 |
21 | 19 | ||
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 2f33df0dcccf..78021780b885 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -127,7 +127,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
127 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 127 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
128 | return RX_DROP_UNUSABLE; | 128 | return RX_DROP_UNUSABLE; |
129 | 129 | ||
130 | mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, | 130 | mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, |
131 | (void *) skb->data); | 131 | (void *) skb->data); |
132 | return RX_DROP_UNUSABLE; | 132 | return RX_DROP_UNUSABLE; |
133 | } | 133 | } |
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h index f63d05045685..bbfa646157c6 100644 --- a/net/rfkill/rfkill-input.h +++ b/net/rfkill/rfkill-input.h | |||
@@ -13,5 +13,6 @@ | |||
13 | 13 | ||
14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); | 14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); |
15 | void rfkill_epo(void); | 15 | void rfkill_epo(void); |
16 | void rfkill_restore_states(void); | ||
16 | 17 | ||
17 | #endif /* __RFKILL_INPUT_H */ | 18 | #endif /* __RFKILL_INPUT_H */ |
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index 74aecc098bad..d5735799ccd9 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c | |||
@@ -37,14 +37,20 @@ MODULE_DESCRIPTION("RF switch support"); | |||
37 | MODULE_LICENSE("GPL"); | 37 | MODULE_LICENSE("GPL"); |
38 | 38 | ||
39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | 39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ |
40 | static DEFINE_MUTEX(rfkill_mutex); | 40 | static DEFINE_MUTEX(rfkill_global_mutex); |
41 | 41 | ||
42 | static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; | 42 | static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; |
43 | module_param_named(default_state, rfkill_default_state, uint, 0444); | 43 | module_param_named(default_state, rfkill_default_state, uint, 0444); |
44 | MODULE_PARM_DESC(default_state, | 44 | MODULE_PARM_DESC(default_state, |
45 | "Default initial state for all radio types, 0 = radio off"); | 45 | "Default initial state for all radio types, 0 = radio off"); |
46 | 46 | ||
47 | static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; | 47 | struct rfkill_gsw_state { |
48 | enum rfkill_state current_state; | ||
49 | enum rfkill_state default_state; | ||
50 | }; | ||
51 | |||
52 | static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX]; | ||
53 | static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
48 | 54 | ||
49 | static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); | 55 | static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); |
50 | 56 | ||
@@ -70,6 +76,7 @@ static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); | |||
70 | */ | 76 | */ |
71 | int register_rfkill_notifier(struct notifier_block *nb) | 77 | int register_rfkill_notifier(struct notifier_block *nb) |
72 | { | 78 | { |
79 | BUG_ON(!nb); | ||
73 | return blocking_notifier_chain_register(&rfkill_notifier_list, nb); | 80 | return blocking_notifier_chain_register(&rfkill_notifier_list, nb); |
74 | } | 81 | } |
75 | EXPORT_SYMBOL_GPL(register_rfkill_notifier); | 82 | EXPORT_SYMBOL_GPL(register_rfkill_notifier); |
@@ -85,6 +92,7 @@ EXPORT_SYMBOL_GPL(register_rfkill_notifier); | |||
85 | */ | 92 | */ |
86 | int unregister_rfkill_notifier(struct notifier_block *nb) | 93 | int unregister_rfkill_notifier(struct notifier_block *nb) |
87 | { | 94 | { |
95 | BUG_ON(!nb); | ||
88 | return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb); | 96 | return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb); |
89 | } | 97 | } |
90 | EXPORT_SYMBOL_GPL(unregister_rfkill_notifier); | 98 | EXPORT_SYMBOL_GPL(unregister_rfkill_notifier); |
@@ -195,6 +203,11 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, | |||
195 | * BLOCK even a transmitter that is already in state | 203 | * BLOCK even a transmitter that is already in state |
196 | * RFKILL_STATE_HARD_BLOCKED */ | 204 | * RFKILL_STATE_HARD_BLOCKED */ |
197 | break; | 205 | break; |
206 | default: | ||
207 | WARN(1, KERN_WARNING | ||
208 | "rfkill: illegal state %d passed as parameter " | ||
209 | "to rfkill_toggle_radio\n", state); | ||
210 | return -EINVAL; | ||
198 | } | 211 | } |
199 | 212 | ||
200 | if (force || state != rfkill->state) { | 213 | if (force || state != rfkill->state) { |
@@ -213,22 +226,29 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, | |||
213 | } | 226 | } |
214 | 227 | ||
215 | /** | 228 | /** |
216 | * rfkill_switch_all - Toggle state of all switches of given type | 229 | * __rfkill_switch_all - Toggle state of all switches of given type |
217 | * @type: type of interfaces to be affected | 230 | * @type: type of interfaces to be affected |
218 | * @state: the new state | 231 | * @state: the new state |
219 | * | 232 | * |
220 | * This function toggles the state of all switches of given type, | 233 | * This function toggles the state of all switches of given type, |
221 | * unless a specific switch is claimed by userspace (in which case, | 234 | * unless a specific switch is claimed by userspace (in which case, |
222 | * that switch is left alone) or suspended. | 235 | * that switch is left alone) or suspended. |
236 | * | ||
237 | * Caller must have acquired rfkill_global_mutex. | ||
223 | */ | 238 | */ |
224 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | 239 | static void __rfkill_switch_all(const enum rfkill_type type, |
240 | const enum rfkill_state state) | ||
225 | { | 241 | { |
226 | struct rfkill *rfkill; | 242 | struct rfkill *rfkill; |
227 | 243 | ||
228 | mutex_lock(&rfkill_mutex); | 244 | if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX), |
229 | 245 | KERN_WARNING | |
230 | rfkill_states[type] = state; | 246 | "rfkill: illegal state %d or type %d " |
247 | "passed as parameter to __rfkill_switch_all\n", | ||
248 | state, type)) | ||
249 | return; | ||
231 | 250 | ||
251 | rfkill_global_states[type].current_state = state; | ||
232 | list_for_each_entry(rfkill, &rfkill_list, node) { | 252 | list_for_each_entry(rfkill, &rfkill_list, node) { |
233 | if ((!rfkill->user_claim) && (rfkill->type == type)) { | 253 | if ((!rfkill->user_claim) && (rfkill->type == type)) { |
234 | mutex_lock(&rfkill->mutex); | 254 | mutex_lock(&rfkill->mutex); |
@@ -236,8 +256,21 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | |||
236 | mutex_unlock(&rfkill->mutex); | 256 | mutex_unlock(&rfkill->mutex); |
237 | } | 257 | } |
238 | } | 258 | } |
259 | } | ||
239 | 260 | ||
240 | mutex_unlock(&rfkill_mutex); | 261 | /** |
262 | * rfkill_switch_all - Toggle state of all switches of given type | ||
263 | * @type: type of interfaces to be affected | ||
264 | * @state: the new state | ||
265 | * | ||
266 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | ||
267 | * Please refer to __rfkill_switch_all() for details. | ||
268 | */ | ||
269 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | ||
270 | { | ||
271 | mutex_lock(&rfkill_global_mutex); | ||
272 | __rfkill_switch_all(type, state); | ||
273 | mutex_unlock(&rfkill_global_mutex); | ||
241 | } | 274 | } |
242 | EXPORT_SYMBOL(rfkill_switch_all); | 275 | EXPORT_SYMBOL(rfkill_switch_all); |
243 | 276 | ||
@@ -245,23 +278,53 @@ EXPORT_SYMBOL(rfkill_switch_all); | |||
245 | * rfkill_epo - emergency power off all transmitters | 278 | * rfkill_epo - emergency power off all transmitters |
246 | * | 279 | * |
247 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | 280 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, |
248 | * ignoring everything in its path but rfkill_mutex and rfkill->mutex. | 281 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. |
282 | * | ||
283 | * The global state before the EPO is saved and can be restored later | ||
284 | * using rfkill_restore_states(). | ||
249 | */ | 285 | */ |
250 | void rfkill_epo(void) | 286 | void rfkill_epo(void) |
251 | { | 287 | { |
252 | struct rfkill *rfkill; | 288 | struct rfkill *rfkill; |
289 | int i; | ||
290 | |||
291 | mutex_lock(&rfkill_global_mutex); | ||
253 | 292 | ||
254 | mutex_lock(&rfkill_mutex); | ||
255 | list_for_each_entry(rfkill, &rfkill_list, node) { | 293 | list_for_each_entry(rfkill, &rfkill_list, node) { |
256 | mutex_lock(&rfkill->mutex); | 294 | mutex_lock(&rfkill->mutex); |
257 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | 295 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); |
258 | mutex_unlock(&rfkill->mutex); | 296 | mutex_unlock(&rfkill->mutex); |
259 | } | 297 | } |
260 | mutex_unlock(&rfkill_mutex); | 298 | for (i = 0; i < RFKILL_TYPE_MAX; i++) { |
299 | rfkill_global_states[i].default_state = | ||
300 | rfkill_global_states[i].current_state; | ||
301 | rfkill_global_states[i].current_state = | ||
302 | RFKILL_STATE_SOFT_BLOCKED; | ||
303 | } | ||
304 | mutex_unlock(&rfkill_global_mutex); | ||
261 | } | 305 | } |
262 | EXPORT_SYMBOL_GPL(rfkill_epo); | 306 | EXPORT_SYMBOL_GPL(rfkill_epo); |
263 | 307 | ||
264 | /** | 308 | /** |
309 | * rfkill_restore_states - restore global states | ||
310 | * | ||
311 | * Restore (and sync switches to) the global state from the | ||
312 | * states in rfkill_default_states. This can undo the effects of | ||
313 | * a call to rfkill_epo(). | ||
314 | */ | ||
315 | void rfkill_restore_states(void) | ||
316 | { | ||
317 | int i; | ||
318 | |||
319 | mutex_lock(&rfkill_global_mutex); | ||
320 | |||
321 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
322 | __rfkill_switch_all(i, rfkill_global_states[i].default_state); | ||
323 | mutex_unlock(&rfkill_global_mutex); | ||
324 | } | ||
325 | EXPORT_SYMBOL_GPL(rfkill_restore_states); | ||
326 | |||
327 | /** | ||
265 | * rfkill_force_state - Force the internal rfkill radio state | 328 | * rfkill_force_state - Force the internal rfkill radio state |
266 | * @rfkill: pointer to the rfkill class to modify. | 329 | * @rfkill: pointer to the rfkill class to modify. |
267 | * @state: the current radio state the class should be forced to. | 330 | * @state: the current radio state the class should be forced to. |
@@ -282,9 +345,11 @@ int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state) | |||
282 | { | 345 | { |
283 | enum rfkill_state oldstate; | 346 | enum rfkill_state oldstate; |
284 | 347 | ||
285 | if (state != RFKILL_STATE_SOFT_BLOCKED && | 348 | BUG_ON(!rfkill); |
286 | state != RFKILL_STATE_UNBLOCKED && | 349 | if (WARN((state >= RFKILL_STATE_MAX), |
287 | state != RFKILL_STATE_HARD_BLOCKED) | 350 | KERN_WARNING |
351 | "rfkill: illegal state %d passed as parameter " | ||
352 | "to rfkill_force_state\n", state)) | ||
288 | return -EINVAL; | 353 | return -EINVAL; |
289 | 354 | ||
290 | mutex_lock(&rfkill->mutex); | 355 | mutex_lock(&rfkill->mutex); |
@@ -352,12 +417,16 @@ static ssize_t rfkill_state_store(struct device *dev, | |||
352 | const char *buf, size_t count) | 417 | const char *buf, size_t count) |
353 | { | 418 | { |
354 | struct rfkill *rfkill = to_rfkill(dev); | 419 | struct rfkill *rfkill = to_rfkill(dev); |
355 | unsigned int state = simple_strtoul(buf, NULL, 0); | 420 | unsigned long state; |
356 | int error; | 421 | int error; |
357 | 422 | ||
358 | if (!capable(CAP_NET_ADMIN)) | 423 | if (!capable(CAP_NET_ADMIN)) |
359 | return -EPERM; | 424 | return -EPERM; |
360 | 425 | ||
426 | error = strict_strtoul(buf, 0, &state); | ||
427 | if (error) | ||
428 | return error; | ||
429 | |||
361 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | 430 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ |
362 | if (state != RFKILL_STATE_UNBLOCKED && | 431 | if (state != RFKILL_STATE_UNBLOCKED && |
363 | state != RFKILL_STATE_SOFT_BLOCKED) | 432 | state != RFKILL_STATE_SOFT_BLOCKED) |
@@ -385,7 +454,8 @@ static ssize_t rfkill_claim_store(struct device *dev, | |||
385 | const char *buf, size_t count) | 454 | const char *buf, size_t count) |
386 | { | 455 | { |
387 | struct rfkill *rfkill = to_rfkill(dev); | 456 | struct rfkill *rfkill = to_rfkill(dev); |
388 | bool claim = !!simple_strtoul(buf, NULL, 0); | 457 | unsigned long claim_tmp; |
458 | bool claim; | ||
389 | int error; | 459 | int error; |
390 | 460 | ||
391 | if (!capable(CAP_NET_ADMIN)) | 461 | if (!capable(CAP_NET_ADMIN)) |
@@ -394,11 +464,16 @@ static ssize_t rfkill_claim_store(struct device *dev, | |||
394 | if (rfkill->user_claim_unsupported) | 464 | if (rfkill->user_claim_unsupported) |
395 | return -EOPNOTSUPP; | 465 | return -EOPNOTSUPP; |
396 | 466 | ||
467 | error = strict_strtoul(buf, 0, &claim_tmp); | ||
468 | if (error) | ||
469 | return error; | ||
470 | claim = !!claim_tmp; | ||
471 | |||
397 | /* | 472 | /* |
398 | * Take the global lock to make sure the kernel is not in | 473 | * Take the global lock to make sure the kernel is not in |
399 | * the middle of rfkill_switch_all | 474 | * the middle of rfkill_switch_all |
400 | */ | 475 | */ |
401 | error = mutex_lock_interruptible(&rfkill_mutex); | 476 | error = mutex_lock_interruptible(&rfkill_global_mutex); |
402 | if (error) | 477 | if (error) |
403 | return error; | 478 | return error; |
404 | 479 | ||
@@ -406,14 +481,14 @@ static ssize_t rfkill_claim_store(struct device *dev, | |||
406 | if (!claim) { | 481 | if (!claim) { |
407 | mutex_lock(&rfkill->mutex); | 482 | mutex_lock(&rfkill->mutex); |
408 | rfkill_toggle_radio(rfkill, | 483 | rfkill_toggle_radio(rfkill, |
409 | rfkill_states[rfkill->type], | 484 | rfkill_global_states[rfkill->type].current_state, |
410 | 0); | 485 | 0); |
411 | mutex_unlock(&rfkill->mutex); | 486 | mutex_unlock(&rfkill->mutex); |
412 | } | 487 | } |
413 | rfkill->user_claim = claim; | 488 | rfkill->user_claim = claim; |
414 | } | 489 | } |
415 | 490 | ||
416 | mutex_unlock(&rfkill_mutex); | 491 | mutex_unlock(&rfkill_global_mutex); |
417 | 492 | ||
418 | return error ? error : count; | 493 | return error ? error : count; |
419 | } | 494 | } |
@@ -525,24 +600,60 @@ static struct class rfkill_class = { | |||
525 | .dev_uevent = rfkill_dev_uevent, | 600 | .dev_uevent = rfkill_dev_uevent, |
526 | }; | 601 | }; |
527 | 602 | ||
603 | static int rfkill_check_duplicity(const struct rfkill *rfkill) | ||
604 | { | ||
605 | struct rfkill *p; | ||
606 | unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
607 | |||
608 | memset(seen, 0, sizeof(seen)); | ||
609 | |||
610 | list_for_each_entry(p, &rfkill_list, node) { | ||
611 | if (WARN((p == rfkill), KERN_WARNING | ||
612 | "rfkill: illegal attempt to register " | ||
613 | "an already registered rfkill struct\n")) | ||
614 | return -EEXIST; | ||
615 | set_bit(p->type, seen); | ||
616 | } | ||
617 | |||
618 | /* 0: first switch of its kind */ | ||
619 | return test_bit(rfkill->type, seen); | ||
620 | } | ||
621 | |||
528 | static int rfkill_add_switch(struct rfkill *rfkill) | 622 | static int rfkill_add_switch(struct rfkill *rfkill) |
529 | { | 623 | { |
530 | mutex_lock(&rfkill_mutex); | 624 | int error; |
531 | 625 | ||
532 | rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type], 0); | 626 | mutex_lock(&rfkill_global_mutex); |
627 | |||
628 | error = rfkill_check_duplicity(rfkill); | ||
629 | if (error < 0) | ||
630 | goto unlock_out; | ||
631 | |||
632 | if (!error) { | ||
633 | /* lock default after first use */ | ||
634 | set_bit(rfkill->type, rfkill_states_lockdflt); | ||
635 | rfkill_global_states[rfkill->type].current_state = | ||
636 | rfkill_global_states[rfkill->type].default_state; | ||
637 | } | ||
638 | |||
639 | rfkill_toggle_radio(rfkill, | ||
640 | rfkill_global_states[rfkill->type].current_state, | ||
641 | 0); | ||
533 | 642 | ||
534 | list_add_tail(&rfkill->node, &rfkill_list); | 643 | list_add_tail(&rfkill->node, &rfkill_list); |
535 | 644 | ||
536 | mutex_unlock(&rfkill_mutex); | 645 | error = 0; |
646 | unlock_out: | ||
647 | mutex_unlock(&rfkill_global_mutex); | ||
537 | 648 | ||
538 | return 0; | 649 | return error; |
539 | } | 650 | } |
540 | 651 | ||
541 | static void rfkill_remove_switch(struct rfkill *rfkill) | 652 | static void rfkill_remove_switch(struct rfkill *rfkill) |
542 | { | 653 | { |
543 | mutex_lock(&rfkill_mutex); | 654 | mutex_lock(&rfkill_global_mutex); |
544 | list_del_init(&rfkill->node); | 655 | list_del_init(&rfkill->node); |
545 | mutex_unlock(&rfkill_mutex); | 656 | mutex_unlock(&rfkill_global_mutex); |
546 | 657 | ||
547 | mutex_lock(&rfkill->mutex); | 658 | mutex_lock(&rfkill->mutex); |
548 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | 659 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); |
@@ -562,11 +673,18 @@ static void rfkill_remove_switch(struct rfkill *rfkill) | |||
562 | * NOTE: If registration fails the structure shoudl be freed by calling | 673 | * NOTE: If registration fails the structure shoudl be freed by calling |
563 | * rfkill_free() otherwise rfkill_unregister() should be used. | 674 | * rfkill_free() otherwise rfkill_unregister() should be used. |
564 | */ | 675 | */ |
565 | struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type) | 676 | struct rfkill * __must_check rfkill_allocate(struct device *parent, |
677 | enum rfkill_type type) | ||
566 | { | 678 | { |
567 | struct rfkill *rfkill; | 679 | struct rfkill *rfkill; |
568 | struct device *dev; | 680 | struct device *dev; |
569 | 681 | ||
682 | if (WARN((type >= RFKILL_TYPE_MAX), | ||
683 | KERN_WARNING | ||
684 | "rfkill: illegal type %d passed as parameter " | ||
685 | "to rfkill_allocate\n", type)) | ||
686 | return NULL; | ||
687 | |||
570 | rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); | 688 | rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); |
571 | if (!rfkill) | 689 | if (!rfkill) |
572 | return NULL; | 690 | return NULL; |
@@ -633,15 +751,18 @@ static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | |||
633 | * structure needs to be registered. Immediately from registration the | 751 | * structure needs to be registered. Immediately from registration the |
634 | * switch driver should be able to service calls to toggle_radio. | 752 | * switch driver should be able to service calls to toggle_radio. |
635 | */ | 753 | */ |
636 | int rfkill_register(struct rfkill *rfkill) | 754 | int __must_check rfkill_register(struct rfkill *rfkill) |
637 | { | 755 | { |
638 | static atomic_t rfkill_no = ATOMIC_INIT(0); | 756 | static atomic_t rfkill_no = ATOMIC_INIT(0); |
639 | struct device *dev = &rfkill->dev; | 757 | struct device *dev = &rfkill->dev; |
640 | int error; | 758 | int error; |
641 | 759 | ||
642 | if (!rfkill->toggle_radio) | 760 | if (WARN((!rfkill || !rfkill->toggle_radio || |
643 | return -EINVAL; | 761 | rfkill->type >= RFKILL_TYPE_MAX || |
644 | if (rfkill->type >= RFKILL_TYPE_MAX) | 762 | rfkill->state >= RFKILL_STATE_MAX), |
763 | KERN_WARNING | ||
764 | "rfkill: attempt to register a " | ||
765 | "badly initialized rfkill struct\n")) | ||
645 | return -EINVAL; | 766 | return -EINVAL; |
646 | 767 | ||
647 | snprintf(dev->bus_id, sizeof(dev->bus_id), | 768 | snprintf(dev->bus_id, sizeof(dev->bus_id), |
@@ -676,6 +797,7 @@ EXPORT_SYMBOL(rfkill_register); | |||
676 | */ | 797 | */ |
677 | void rfkill_unregister(struct rfkill *rfkill) | 798 | void rfkill_unregister(struct rfkill *rfkill) |
678 | { | 799 | { |
800 | BUG_ON(!rfkill); | ||
679 | device_del(&rfkill->dev); | 801 | device_del(&rfkill->dev); |
680 | rfkill_remove_switch(rfkill); | 802 | rfkill_remove_switch(rfkill); |
681 | rfkill_led_trigger_unregister(rfkill); | 803 | rfkill_led_trigger_unregister(rfkill); |
@@ -683,6 +805,56 @@ void rfkill_unregister(struct rfkill *rfkill) | |||
683 | } | 805 | } |
684 | EXPORT_SYMBOL(rfkill_unregister); | 806 | EXPORT_SYMBOL(rfkill_unregister); |
685 | 807 | ||
808 | /** | ||
809 | * rfkill_set_default - set initial value for a switch type | ||
810 | * @type - the type of switch to set the default state of | ||
811 | * @state - the new default state for that group of switches | ||
812 | * | ||
813 | * Sets the initial state rfkill should use for a given type. | ||
814 | * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED | ||
815 | * and RFKILL_STATE_UNBLOCKED. | ||
816 | * | ||
817 | * This function is meant to be used by platform drivers for platforms | ||
818 | * that can save switch state across power down/reboot. | ||
819 | * | ||
820 | * The default state for each switch type can be changed exactly once. | ||
821 | * After a switch of that type is registered, the default state cannot | ||
822 | * be changed anymore. This guards against multiple drivers it the | ||
823 | * same platform trying to set the initial switch default state, which | ||
824 | * is not allowed. | ||
825 | * | ||
826 | * Returns -EPERM if the state has already been set once or is in use, | ||
827 | * so drivers likely want to either ignore or at most printk(KERN_NOTICE) | ||
828 | * if this function returns -EPERM. | ||
829 | * | ||
830 | * Returns 0 if the new default state was set, or an error if it | ||
831 | * could not be set. | ||
832 | */ | ||
833 | int rfkill_set_default(enum rfkill_type type, enum rfkill_state state) | ||
834 | { | ||
835 | int error; | ||
836 | |||
837 | if (WARN((type >= RFKILL_TYPE_MAX || | ||
838 | (state != RFKILL_STATE_SOFT_BLOCKED && | ||
839 | state != RFKILL_STATE_UNBLOCKED)), | ||
840 | KERN_WARNING | ||
841 | "rfkill: illegal state %d or type %d passed as " | ||
842 | "parameter to rfkill_set_default\n", state, type)) | ||
843 | return -EINVAL; | ||
844 | |||
845 | mutex_lock(&rfkill_global_mutex); | ||
846 | |||
847 | if (!test_and_set_bit(type, rfkill_states_lockdflt)) { | ||
848 | rfkill_global_states[type].default_state = state; | ||
849 | error = 0; | ||
850 | } else | ||
851 | error = -EPERM; | ||
852 | |||
853 | mutex_unlock(&rfkill_global_mutex); | ||
854 | return error; | ||
855 | } | ||
856 | EXPORT_SYMBOL_GPL(rfkill_set_default); | ||
857 | |||
686 | /* | 858 | /* |
687 | * Rfkill module initialization/deinitialization. | 859 | * Rfkill module initialization/deinitialization. |
688 | */ | 860 | */ |
@@ -696,8 +868,8 @@ static int __init rfkill_init(void) | |||
696 | rfkill_default_state != RFKILL_STATE_UNBLOCKED) | 868 | rfkill_default_state != RFKILL_STATE_UNBLOCKED) |
697 | return -EINVAL; | 869 | return -EINVAL; |
698 | 870 | ||
699 | for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) | 871 | for (i = 0; i < RFKILL_TYPE_MAX; i++) |
700 | rfkill_states[i] = rfkill_default_state; | 872 | rfkill_global_states[i].default_state = rfkill_default_state; |
701 | 873 | ||
702 | error = class_register(&rfkill_class); | 874 | error = class_register(&rfkill_class); |
703 | if (error) { | 875 | if (error) { |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 9634091ee2f0..ec0a0839ce51 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -215,10 +215,9 @@ static void dev_watchdog(unsigned long arg) | |||
215 | time_after(jiffies, (dev->trans_start + | 215 | time_after(jiffies, (dev->trans_start + |
216 | dev->watchdog_timeo))) { | 216 | dev->watchdog_timeo))) { |
217 | char drivername[64]; | 217 | char drivername[64]; |
218 | printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", | 218 | WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", |
219 | dev->name, netdev_drivername(dev, drivername, 64)); | 219 | dev->name, netdev_drivername(dev, drivername, 64)); |
220 | dev->tx_timeout(dev); | 220 | dev->tx_timeout(dev); |
221 | WARN_ON_ONCE(1); | ||
222 | } | 221 | } |
223 | if (!mod_timer(&dev->watchdog_timer, | 222 | if (!mod_timer(&dev->watchdog_timer, |
224 | round_jiffies(jiffies + | 223 | round_jiffies(jiffies + |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 3781e55046d0..a11959908d9a 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -388,6 +388,20 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { | |||
388 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, | 388 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, |
389 | }; | 389 | }; |
390 | 390 | ||
391 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, | ||
392 | const struct nla_policy *policy, int len) | ||
393 | { | ||
394 | int nested_len = nla_len(nla) - NLA_ALIGN(len); | ||
395 | |||
396 | if (nested_len < 0) | ||
397 | return -EINVAL; | ||
398 | if (nested_len >= nla_attr_size(0)) | ||
399 | return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), | ||
400 | nested_len, policy); | ||
401 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); | ||
402 | return 0; | ||
403 | } | ||
404 | |||
391 | /* Parse netlink message to set options */ | 405 | /* Parse netlink message to set options */ |
392 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) | 406 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) |
393 | { | 407 | { |
@@ -399,8 +413,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) | |||
399 | if (opt == NULL) | 413 | if (opt == NULL) |
400 | return -EINVAL; | 414 | return -EINVAL; |
401 | 415 | ||
402 | ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy, | 416 | qopt = nla_data(opt); |
403 | qopt, sizeof(*qopt)); | 417 | ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); |
404 | if (ret < 0) | 418 | if (ret < 0) |
405 | return ret; | 419 | return ret; |
406 | 420 | ||
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index a6697c686c7f..504a78cdb718 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -254,16 +254,12 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
254 | { | 254 | { |
255 | struct prio_sched_data *q = qdisc_priv(sch); | 255 | struct prio_sched_data *q = qdisc_priv(sch); |
256 | unsigned char *b = skb_tail_pointer(skb); | 256 | unsigned char *b = skb_tail_pointer(skb); |
257 | struct nlattr *nest; | ||
258 | struct tc_prio_qopt opt; | 257 | struct tc_prio_qopt opt; |
259 | 258 | ||
260 | opt.bands = q->bands; | 259 | opt.bands = q->bands; |
261 | memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); | 260 | memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); |
262 | 261 | ||
263 | nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); | 262 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
264 | if (nest == NULL) | ||
265 | goto nla_put_failure; | ||
266 | nla_nest_compat_end(skb, nest); | ||
267 | 263 | ||
268 | return skb->len; | 264 | return skb->len; |
269 | 265 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index f1da0b93bc56..7e995ac06a0c 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * This is the linux wireless configuration interface. | 2 | * This is the linux wireless configuration interface. |
3 | * | 3 | * |
4 | * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> | 4 | * Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net> |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/if.h> | 7 | #include <linux/if.h> |
@@ -259,6 +259,13 @@ int wiphy_register(struct wiphy *wiphy) | |||
259 | struct ieee80211_supported_band *sband; | 259 | struct ieee80211_supported_band *sband; |
260 | bool have_band = false; | 260 | bool have_band = false; |
261 | int i; | 261 | int i; |
262 | u16 ifmodes = wiphy->interface_modes; | ||
263 | |||
264 | /* sanity check ifmodes */ | ||
265 | WARN_ON(!ifmodes); | ||
266 | ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; | ||
267 | if (WARN_ON(ifmodes != wiphy->interface_modes)) | ||
268 | wiphy->interface_modes = ifmodes; | ||
262 | 269 | ||
263 | /* sanity check supported bands/channels */ | 270 | /* sanity check supported bands/channels */ |
264 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 271 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 59eb2cf42e5f..77880ba8b619 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -87,6 +87,13 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { | |||
87 | [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, | 87 | [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, |
88 | .len = IEEE80211_MAX_MESH_ID_LEN }, | 88 | .len = IEEE80211_MAX_MESH_ID_LEN }, |
89 | [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, | 89 | [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, |
90 | |||
91 | [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 }, | ||
92 | [NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 }, | ||
93 | [NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 }, | ||
94 | |||
95 | [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, | ||
96 | .len = NL80211_HT_CAPABILITY_LEN }, | ||
90 | }; | 97 | }; |
91 | 98 | ||
92 | /* message building helper */ | 99 | /* message building helper */ |
@@ -106,10 +113,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
106 | struct nlattr *nl_bands, *nl_band; | 113 | struct nlattr *nl_bands, *nl_band; |
107 | struct nlattr *nl_freqs, *nl_freq; | 114 | struct nlattr *nl_freqs, *nl_freq; |
108 | struct nlattr *nl_rates, *nl_rate; | 115 | struct nlattr *nl_rates, *nl_rate; |
116 | struct nlattr *nl_modes; | ||
109 | enum ieee80211_band band; | 117 | enum ieee80211_band band; |
110 | struct ieee80211_channel *chan; | 118 | struct ieee80211_channel *chan; |
111 | struct ieee80211_rate *rate; | 119 | struct ieee80211_rate *rate; |
112 | int i; | 120 | int i; |
121 | u16 ifmodes = dev->wiphy.interface_modes; | ||
113 | 122 | ||
114 | hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); | 123 | hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); |
115 | if (!hdr) | 124 | if (!hdr) |
@@ -118,6 +127,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
118 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); | 127 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); |
119 | NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); | 128 | NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); |
120 | 129 | ||
130 | nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); | ||
131 | if (!nl_modes) | ||
132 | goto nla_put_failure; | ||
133 | |||
134 | i = 0; | ||
135 | while (ifmodes) { | ||
136 | if (ifmodes & 1) | ||
137 | NLA_PUT_FLAG(msg, i); | ||
138 | ifmodes >>= 1; | ||
139 | i++; | ||
140 | } | ||
141 | |||
142 | nla_nest_end(msg, nl_modes); | ||
143 | |||
121 | nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); | 144 | nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); |
122 | if (!nl_bands) | 145 | if (!nl_bands) |
123 | goto nla_put_failure; | 146 | goto nla_put_failure; |
@@ -408,7 +431,8 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | |||
408 | ifindex = dev->ifindex; | 431 | ifindex = dev->ifindex; |
409 | dev_put(dev); | 432 | dev_put(dev); |
410 | 433 | ||
411 | if (!drv->ops->change_virtual_intf) { | 434 | if (!drv->ops->change_virtual_intf || |
435 | !(drv->wiphy.interface_modes & (1 << type))) { | ||
412 | err = -EOPNOTSUPP; | 436 | err = -EOPNOTSUPP; |
413 | goto unlock; | 437 | goto unlock; |
414 | } | 438 | } |
@@ -455,7 +479,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
455 | if (IS_ERR(drv)) | 479 | if (IS_ERR(drv)) |
456 | return PTR_ERR(drv); | 480 | return PTR_ERR(drv); |
457 | 481 | ||
458 | if (!drv->ops->add_virtual_intf) { | 482 | if (!drv->ops->add_virtual_intf || |
483 | !(drv->wiphy.interface_modes & (1 << type))) { | ||
459 | err = -EOPNOTSUPP; | 484 | err = -EOPNOTSUPP; |
460 | goto unlock; | 485 | goto unlock; |
461 | } | 486 | } |
@@ -1125,6 +1150,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) | |||
1125 | params.listen_interval = | 1150 | params.listen_interval = |
1126 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); | 1151 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); |
1127 | 1152 | ||
1153 | if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) | ||
1154 | params.ht_capa = | ||
1155 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); | ||
1156 | |||
1128 | if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], | 1157 | if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], |
1129 | ¶ms.station_flags)) | 1158 | ¶ms.station_flags)) |
1130 | return -EINVAL; | 1159 | return -EINVAL; |
@@ -1188,6 +1217,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
1188 | params.listen_interval = | 1217 | params.listen_interval = |
1189 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); | 1218 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); |
1190 | params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); | 1219 | params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); |
1220 | if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) | ||
1221 | params.ht_capa = | ||
1222 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); | ||
1191 | 1223 | ||
1192 | if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], | 1224 | if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], |
1193 | ¶ms.station_flags)) | 1225 | ¶ms.station_flags)) |
@@ -1525,6 +1557,48 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) | |||
1525 | return err; | 1557 | return err; |
1526 | } | 1558 | } |
1527 | 1559 | ||
1560 | static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) | ||
1561 | { | ||
1562 | struct cfg80211_registered_device *drv; | ||
1563 | int err; | ||
1564 | struct net_device *dev; | ||
1565 | struct bss_parameters params; | ||
1566 | |||
1567 | memset(¶ms, 0, sizeof(params)); | ||
1568 | /* default to not changing parameters */ | ||
1569 | params.use_cts_prot = -1; | ||
1570 | params.use_short_preamble = -1; | ||
1571 | params.use_short_slot_time = -1; | ||
1572 | |||
1573 | if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) | ||
1574 | params.use_cts_prot = | ||
1575 | nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]); | ||
1576 | if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]) | ||
1577 | params.use_short_preamble = | ||
1578 | nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]); | ||
1579 | if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]) | ||
1580 | params.use_short_slot_time = | ||
1581 | nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]); | ||
1582 | |||
1583 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | ||
1584 | if (err) | ||
1585 | return err; | ||
1586 | |||
1587 | if (!drv->ops->change_bss) { | ||
1588 | err = -EOPNOTSUPP; | ||
1589 | goto out; | ||
1590 | } | ||
1591 | |||
1592 | rtnl_lock(); | ||
1593 | err = drv->ops->change_bss(&drv->wiphy, dev, ¶ms); | ||
1594 | rtnl_unlock(); | ||
1595 | |||
1596 | out: | ||
1597 | cfg80211_put_dev(drv); | ||
1598 | dev_put(dev); | ||
1599 | return err; | ||
1600 | } | ||
1601 | |||
1528 | static struct genl_ops nl80211_ops[] = { | 1602 | static struct genl_ops nl80211_ops[] = { |
1529 | { | 1603 | { |
1530 | .cmd = NL80211_CMD_GET_WIPHY, | 1604 | .cmd = NL80211_CMD_GET_WIPHY, |
@@ -1656,6 +1730,12 @@ static struct genl_ops nl80211_ops[] = { | |||
1656 | .policy = nl80211_policy, | 1730 | .policy = nl80211_policy, |
1657 | .flags = GENL_ADMIN_PERM, | 1731 | .flags = GENL_ADMIN_PERM, |
1658 | }, | 1732 | }, |
1733 | { | ||
1734 | .cmd = NL80211_CMD_SET_BSS, | ||
1735 | .doit = nl80211_set_bss, | ||
1736 | .policy = nl80211_policy, | ||
1737 | .flags = GENL_ADMIN_PERM, | ||
1738 | }, | ||
1659 | }; | 1739 | }; |
1660 | 1740 | ||
1661 | /* multicast groups */ | 1741 | /* multicast groups */ |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index b7754b1b73a4..ef9ccbc38752 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | #include "xfrm_hash.h" | 35 | #include "xfrm_hash.h" |
36 | 36 | ||
37 | int sysctl_xfrm_larval_drop __read_mostly; | 37 | int sysctl_xfrm_larval_drop __read_mostly = 1; |
38 | 38 | ||
39 | #ifdef CONFIG_XFRM_STATISTICS | 39 | #ifdef CONFIG_XFRM_STATISTICS |
40 | DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly; | 40 | DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 0a8f09c3144c..abbe2702c400 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -59,6 +59,11 @@ static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; | |||
59 | static unsigned int xfrm_state_num; | 59 | static unsigned int xfrm_state_num; |
60 | static unsigned int xfrm_state_genid; | 60 | static unsigned int xfrm_state_genid; |
61 | 61 | ||
62 | /* Counter indicating ongoing walk, protected by xfrm_state_lock. */ | ||
63 | static unsigned long xfrm_state_walk_ongoing; | ||
64 | /* Counter indicating walk completion, protected by xfrm_cfg_mutex. */ | ||
65 | static unsigned long xfrm_state_walk_completed; | ||
66 | |||
62 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); | 67 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); |
63 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); | 68 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); |
64 | 69 | ||
@@ -191,7 +196,8 @@ static DEFINE_RWLOCK(xfrm_state_afinfo_lock); | |||
191 | static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO]; | 196 | static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO]; |
192 | 197 | ||
193 | static struct work_struct xfrm_state_gc_work; | 198 | static struct work_struct xfrm_state_gc_work; |
194 | static HLIST_HEAD(xfrm_state_gc_list); | 199 | static LIST_HEAD(xfrm_state_gc_leftovers); |
200 | static LIST_HEAD(xfrm_state_gc_list); | ||
195 | static DEFINE_SPINLOCK(xfrm_state_gc_lock); | 201 | static DEFINE_SPINLOCK(xfrm_state_gc_lock); |
196 | 202 | ||
197 | int __xfrm_state_delete(struct xfrm_state *x); | 203 | int __xfrm_state_delete(struct xfrm_state *x); |
@@ -403,17 +409,23 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) | |||
403 | 409 | ||
404 | static void xfrm_state_gc_task(struct work_struct *data) | 410 | static void xfrm_state_gc_task(struct work_struct *data) |
405 | { | 411 | { |
406 | struct xfrm_state *x; | 412 | struct xfrm_state *x, *tmp; |
407 | struct hlist_node *entry, *tmp; | 413 | unsigned long completed; |
408 | struct hlist_head gc_list; | ||
409 | 414 | ||
415 | mutex_lock(&xfrm_cfg_mutex); | ||
410 | spin_lock_bh(&xfrm_state_gc_lock); | 416 | spin_lock_bh(&xfrm_state_gc_lock); |
411 | gc_list.first = xfrm_state_gc_list.first; | 417 | list_splice_tail_init(&xfrm_state_gc_list, &xfrm_state_gc_leftovers); |
412 | INIT_HLIST_HEAD(&xfrm_state_gc_list); | ||
413 | spin_unlock_bh(&xfrm_state_gc_lock); | 418 | spin_unlock_bh(&xfrm_state_gc_lock); |
414 | 419 | ||
415 | hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst) | 420 | completed = xfrm_state_walk_completed; |
421 | mutex_unlock(&xfrm_cfg_mutex); | ||
422 | |||
423 | list_for_each_entry_safe(x, tmp, &xfrm_state_gc_leftovers, gclist) { | ||
424 | if ((long)(x->lastused - completed) > 0) | ||
425 | break; | ||
426 | list_del(&x->gclist); | ||
416 | xfrm_state_gc_destroy(x); | 427 | xfrm_state_gc_destroy(x); |
428 | } | ||
417 | 429 | ||
418 | wake_up(&km_waitq); | 430 | wake_up(&km_waitq); |
419 | } | 431 | } |
@@ -540,12 +552,8 @@ void __xfrm_state_destroy(struct xfrm_state *x) | |||
540 | { | 552 | { |
541 | WARN_ON(x->km.state != XFRM_STATE_DEAD); | 553 | WARN_ON(x->km.state != XFRM_STATE_DEAD); |
542 | 554 | ||
543 | spin_lock_bh(&xfrm_state_lock); | ||
544 | list_del(&x->all); | ||
545 | spin_unlock_bh(&xfrm_state_lock); | ||
546 | |||
547 | spin_lock_bh(&xfrm_state_gc_lock); | 555 | spin_lock_bh(&xfrm_state_gc_lock); |
548 | hlist_add_head(&x->bydst, &xfrm_state_gc_list); | 556 | list_add_tail(&x->gclist, &xfrm_state_gc_list); |
549 | spin_unlock_bh(&xfrm_state_gc_lock); | 557 | spin_unlock_bh(&xfrm_state_gc_lock); |
550 | schedule_work(&xfrm_state_gc_work); | 558 | schedule_work(&xfrm_state_gc_work); |
551 | } | 559 | } |
@@ -558,6 +566,8 @@ int __xfrm_state_delete(struct xfrm_state *x) | |||
558 | if (x->km.state != XFRM_STATE_DEAD) { | 566 | if (x->km.state != XFRM_STATE_DEAD) { |
559 | x->km.state = XFRM_STATE_DEAD; | 567 | x->km.state = XFRM_STATE_DEAD; |
560 | spin_lock(&xfrm_state_lock); | 568 | spin_lock(&xfrm_state_lock); |
569 | x->lastused = xfrm_state_walk_ongoing; | ||
570 | list_del_rcu(&x->all); | ||
561 | hlist_del(&x->bydst); | 571 | hlist_del(&x->bydst); |
562 | hlist_del(&x->bysrc); | 572 | hlist_del(&x->bysrc); |
563 | if (x->id.spi) | 573 | if (x->id.spi) |
@@ -1574,6 +1584,7 @@ int xfrm_state_walk(struct xfrm_state_walk *walk, | |||
1574 | if (err) { | 1584 | if (err) { |
1575 | xfrm_state_hold(last); | 1585 | xfrm_state_hold(last); |
1576 | walk->state = last; | 1586 | walk->state = last; |
1587 | xfrm_state_walk_ongoing++; | ||
1577 | goto out; | 1588 | goto out; |
1578 | } | 1589 | } |
1579 | } | 1590 | } |
@@ -1588,12 +1599,28 @@ int xfrm_state_walk(struct xfrm_state_walk *walk, | |||
1588 | err = func(last, 0, data); | 1599 | err = func(last, 0, data); |
1589 | out: | 1600 | out: |
1590 | spin_unlock_bh(&xfrm_state_lock); | 1601 | spin_unlock_bh(&xfrm_state_lock); |
1591 | if (old != NULL) | 1602 | if (old != NULL) { |
1592 | xfrm_state_put(old); | 1603 | xfrm_state_put(old); |
1604 | xfrm_state_walk_completed++; | ||
1605 | if (!list_empty(&xfrm_state_gc_leftovers)) | ||
1606 | schedule_work(&xfrm_state_gc_work); | ||
1607 | } | ||
1593 | return err; | 1608 | return err; |
1594 | } | 1609 | } |
1595 | EXPORT_SYMBOL(xfrm_state_walk); | 1610 | EXPORT_SYMBOL(xfrm_state_walk); |
1596 | 1611 | ||
1612 | void xfrm_state_walk_done(struct xfrm_state_walk *walk) | ||
1613 | { | ||
1614 | if (walk->state != NULL) { | ||
1615 | xfrm_state_put(walk->state); | ||
1616 | walk->state = NULL; | ||
1617 | xfrm_state_walk_completed++; | ||
1618 | if (!list_empty(&xfrm_state_gc_leftovers)) | ||
1619 | schedule_work(&xfrm_state_gc_work); | ||
1620 | } | ||
1621 | } | ||
1622 | EXPORT_SYMBOL(xfrm_state_walk_done); | ||
1623 | |||
1597 | 1624 | ||
1598 | void xfrm_replay_notify(struct xfrm_state *x, int event) | 1625 | void xfrm_replay_notify(struct xfrm_state *x, int event) |
1599 | { | 1626 | { |