diff options
Diffstat (limited to 'net')
135 files changed, 13549 insertions, 7308 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index b661f47bf10a..f0e335aa20df 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -394,6 +394,7 @@ static void vlan_transfer_features(struct net_device *dev, | |||
394 | 394 | ||
395 | vlandev->features &= ~dev->vlan_features; | 395 | vlandev->features &= ~dev->vlan_features; |
396 | vlandev->features |= dev->features & dev->vlan_features; | 396 | vlandev->features |= dev->features & dev->vlan_features; |
397 | vlandev->gso_max_size = dev->gso_max_size; | ||
397 | 398 | ||
398 | if (old_features != vlandev->features) | 399 | if (old_features != vlandev->features) |
399 | netdev_features_change(vlandev); | 400 | netdev_features_change(vlandev); |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4bf014e51f8c..8883e9c8a223 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -48,7 +48,7 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb) | |||
48 | 48 | ||
49 | switch (veth->h_vlan_encapsulated_proto) { | 49 | switch (veth->h_vlan_encapsulated_proto) { |
50 | #ifdef CONFIG_INET | 50 | #ifdef CONFIG_INET |
51 | case __constant_htons(ETH_P_IP): | 51 | case htons(ETH_P_IP): |
52 | 52 | ||
53 | /* TODO: Confirm this will work with VLAN headers... */ | 53 | /* TODO: Confirm this will work with VLAN headers... */ |
54 | return arp_find(veth->h_dest, skb); | 54 | return arp_find(veth->h_dest, skb); |
@@ -607,6 +607,7 @@ static int vlan_dev_init(struct net_device *dev) | |||
607 | (1<<__LINK_STATE_PRESENT); | 607 | (1<<__LINK_STATE_PRESENT); |
608 | 608 | ||
609 | dev->features |= real_dev->features & real_dev->vlan_features; | 609 | dev->features |= real_dev->features & real_dev->vlan_features; |
610 | dev->gso_max_size = real_dev->gso_max_size; | ||
610 | 611 | ||
611 | /* ipv6 shared card related stuff */ | 612 | /* ipv6 shared card related stuff */ |
612 | dev->dev_id = real_dev->dev_id; | 613 | dev->dev_id = real_dev->dev_id; |
diff --git a/net/Kconfig b/net/Kconfig index 7612cc8c337c..9103a16a77be 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -232,18 +232,23 @@ source "net/can/Kconfig" | |||
232 | source "net/irda/Kconfig" | 232 | source "net/irda/Kconfig" |
233 | source "net/bluetooth/Kconfig" | 233 | source "net/bluetooth/Kconfig" |
234 | source "net/rxrpc/Kconfig" | 234 | source "net/rxrpc/Kconfig" |
235 | source "net/phonet/Kconfig" | ||
235 | 236 | ||
236 | config FIB_RULES | 237 | config FIB_RULES |
237 | bool | 238 | bool |
238 | 239 | ||
239 | menu "Wireless" | 240 | menuconfig WIRELESS |
241 | bool "Wireless" | ||
240 | depends on !S390 | 242 | depends on !S390 |
243 | default y | ||
244 | |||
245 | if WIRELESS | ||
241 | 246 | ||
242 | source "net/wireless/Kconfig" | 247 | source "net/wireless/Kconfig" |
243 | source "net/mac80211/Kconfig" | 248 | source "net/mac80211/Kconfig" |
244 | source "net/ieee80211/Kconfig" | 249 | source "net/ieee80211/Kconfig" |
245 | 250 | ||
246 | endmenu | 251 | endif # WIRELESS |
247 | 252 | ||
248 | source "net/rfkill/Kconfig" | 253 | source "net/rfkill/Kconfig" |
249 | source "net/9p/Kconfig" | 254 | source "net/9p/Kconfig" |
diff --git a/net/Makefile b/net/Makefile index 4f43e7f874f3..acaf819f24aa 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -42,6 +42,7 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/ | |||
42 | obj-$(CONFIG_ATM) += atm/ | 42 | obj-$(CONFIG_ATM) += atm/ |
43 | obj-$(CONFIG_DECNET) += decnet/ | 43 | obj-$(CONFIG_DECNET) += decnet/ |
44 | obj-$(CONFIG_ECONET) += econet/ | 44 | obj-$(CONFIG_ECONET) += econet/ |
45 | obj-$(CONFIG_PHONET) += phonet/ | ||
45 | ifneq ($(CONFIG_VLAN_8021Q),) | 46 | ifneq ($(CONFIG_VLAN_8021Q),) |
46 | obj-y += 8021q/ | 47 | obj-y += 8021q/ |
47 | endif | 48 | endif |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 8d9a6f158880..280de481edc7 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -375,11 +375,11 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) | |||
375 | if (memcmp | 375 | if (memcmp |
376 | (skb->data + 6, ethertype_ipv6, | 376 | (skb->data + 6, ethertype_ipv6, |
377 | sizeof(ethertype_ipv6)) == 0) | 377 | sizeof(ethertype_ipv6)) == 0) |
378 | skb->protocol = __constant_htons(ETH_P_IPV6); | 378 | skb->protocol = htons(ETH_P_IPV6); |
379 | else if (memcmp | 379 | else if (memcmp |
380 | (skb->data + 6, ethertype_ipv4, | 380 | (skb->data + 6, ethertype_ipv4, |
381 | sizeof(ethertype_ipv4)) == 0) | 381 | sizeof(ethertype_ipv4)) == 0) |
382 | skb->protocol = __constant_htons(ETH_P_IP); | 382 | skb->protocol = htons(ETH_P_IP); |
383 | else | 383 | else |
384 | goto error; | 384 | goto error; |
385 | skb_pull(skb, sizeof(llc_oui_ipv4)); | 385 | skb_pull(skb, sizeof(llc_oui_ipv4)); |
@@ -404,9 +404,9 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) | |||
404 | skb_reset_network_header(skb); | 404 | skb_reset_network_header(skb); |
405 | iph = ip_hdr(skb); | 405 | iph = ip_hdr(skb); |
406 | if (iph->version == 4) | 406 | if (iph->version == 4) |
407 | skb->protocol = __constant_htons(ETH_P_IP); | 407 | skb->protocol = htons(ETH_P_IP); |
408 | else if (iph->version == 6) | 408 | else if (iph->version == 6) |
409 | skb->protocol = __constant_htons(ETH_P_IPV6); | 409 | skb->protocol = htons(ETH_P_IPV6); |
410 | else | 410 | else |
411 | goto error; | 411 | goto error; |
412 | skb->pkt_type = PACKET_HOST; | 412 | skb->pkt_type = PACKET_HOST; |
diff --git a/net/atm/lec.c b/net/atm/lec.c index 5799fb52365a..8f701cde5945 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -1931,7 +1931,6 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, | |||
1931 | switch (priv->lane_version) { | 1931 | switch (priv->lane_version) { |
1932 | case 1: | 1932 | case 1: |
1933 | return priv->mcast_vcc; | 1933 | return priv->mcast_vcc; |
1934 | break; | ||
1935 | case 2: /* LANE2 wants arp for multicast addresses */ | 1934 | case 2: /* LANE2 wants arp for multicast addresses */ |
1936 | if (!compare_ether_addr(mac_to_find, bus_mac)) | 1935 | if (!compare_ether_addr(mac_to_find, bus_mac)) |
1937 | return priv->mcast_vcc; | 1936 | return priv->mcast_vcc; |
diff --git a/net/bridge/br.c b/net/bridge/br.c index 573acdf6f9ff..4d2c1f1cb524 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c | |||
@@ -28,6 +28,10 @@ static const struct stp_proto br_stp_proto = { | |||
28 | .rcv = br_stp_rcv, | 28 | .rcv = br_stp_rcv, |
29 | }; | 29 | }; |
30 | 30 | ||
31 | static struct pernet_operations br_net_ops = { | ||
32 | .exit = br_net_exit, | ||
33 | }; | ||
34 | |||
31 | static int __init br_init(void) | 35 | static int __init br_init(void) |
32 | { | 36 | { |
33 | int err; | 37 | int err; |
@@ -42,18 +46,22 @@ static int __init br_init(void) | |||
42 | if (err) | 46 | if (err) |
43 | goto err_out; | 47 | goto err_out; |
44 | 48 | ||
45 | err = br_netfilter_init(); | 49 | err = register_pernet_subsys(&br_net_ops); |
46 | if (err) | 50 | if (err) |
47 | goto err_out1; | 51 | goto err_out1; |
48 | 52 | ||
49 | err = register_netdevice_notifier(&br_device_notifier); | 53 | err = br_netfilter_init(); |
50 | if (err) | 54 | if (err) |
51 | goto err_out2; | 55 | goto err_out2; |
52 | 56 | ||
53 | err = br_netlink_init(); | 57 | err = register_netdevice_notifier(&br_device_notifier); |
54 | if (err) | 58 | if (err) |
55 | goto err_out3; | 59 | goto err_out3; |
56 | 60 | ||
61 | err = br_netlink_init(); | ||
62 | if (err) | ||
63 | goto err_out4; | ||
64 | |||
57 | brioctl_set(br_ioctl_deviceless_stub); | 65 | brioctl_set(br_ioctl_deviceless_stub); |
58 | br_handle_frame_hook = br_handle_frame; | 66 | br_handle_frame_hook = br_handle_frame; |
59 | 67 | ||
@@ -61,10 +69,12 @@ static int __init br_init(void) | |||
61 | br_fdb_put_hook = br_fdb_put; | 69 | br_fdb_put_hook = br_fdb_put; |
62 | 70 | ||
63 | return 0; | 71 | return 0; |
64 | err_out3: | 72 | err_out4: |
65 | unregister_netdevice_notifier(&br_device_notifier); | 73 | unregister_netdevice_notifier(&br_device_notifier); |
66 | err_out2: | 74 | err_out3: |
67 | br_netfilter_fini(); | 75 | br_netfilter_fini(); |
76 | err_out2: | ||
77 | unregister_pernet_subsys(&br_net_ops); | ||
68 | err_out1: | 78 | err_out1: |
69 | br_fdb_fini(); | 79 | br_fdb_fini(); |
70 | err_out: | 80 | err_out: |
@@ -80,7 +90,7 @@ static void __exit br_deinit(void) | |||
80 | unregister_netdevice_notifier(&br_device_notifier); | 90 | unregister_netdevice_notifier(&br_device_notifier); |
81 | brioctl_set(NULL); | 91 | brioctl_set(NULL); |
82 | 92 | ||
83 | br_cleanup_bridges(); | 93 | unregister_pernet_subsys(&br_net_ops); |
84 | 94 | ||
85 | synchronize_net(); | 95 | synchronize_net(); |
86 | 96 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 4f52c3d50ebe..22ba8632196f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -178,5 +178,6 @@ void br_dev_setup(struct net_device *dev) | |||
178 | dev->priv_flags = IFF_EBRIDGE; | 178 | dev->priv_flags = IFF_EBRIDGE; |
179 | 179 | ||
180 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | | 180 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | |
181 | NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX; | 181 | NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | |
182 | NETIF_F_NETNS_LOCAL; | ||
182 | } | 183 | } |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 63c18aacde8c..573e20f7dba4 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -168,7 +168,7 @@ static void del_br(struct net_bridge *br) | |||
168 | unregister_netdevice(br->dev); | 168 | unregister_netdevice(br->dev); |
169 | } | 169 | } |
170 | 170 | ||
171 | static struct net_device *new_bridge_dev(const char *name) | 171 | static struct net_device *new_bridge_dev(struct net *net, const char *name) |
172 | { | 172 | { |
173 | struct net_bridge *br; | 173 | struct net_bridge *br; |
174 | struct net_device *dev; | 174 | struct net_device *dev; |
@@ -178,6 +178,7 @@ static struct net_device *new_bridge_dev(const char *name) | |||
178 | 178 | ||
179 | if (!dev) | 179 | if (!dev) |
180 | return NULL; | 180 | return NULL; |
181 | dev_net_set(dev, net); | ||
181 | 182 | ||
182 | br = netdev_priv(dev); | 183 | br = netdev_priv(dev); |
183 | br->dev = dev; | 184 | br->dev = dev; |
@@ -262,12 +263,12 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, | |||
262 | return p; | 263 | return p; |
263 | } | 264 | } |
264 | 265 | ||
265 | int br_add_bridge(const char *name) | 266 | int br_add_bridge(struct net *net, const char *name) |
266 | { | 267 | { |
267 | struct net_device *dev; | 268 | struct net_device *dev; |
268 | int ret; | 269 | int ret; |
269 | 270 | ||
270 | dev = new_bridge_dev(name); | 271 | dev = new_bridge_dev(net, name); |
271 | if (!dev) | 272 | if (!dev) |
272 | return -ENOMEM; | 273 | return -ENOMEM; |
273 | 274 | ||
@@ -294,13 +295,13 @@ out_free: | |||
294 | goto out; | 295 | goto out; |
295 | } | 296 | } |
296 | 297 | ||
297 | int br_del_bridge(const char *name) | 298 | int br_del_bridge(struct net *net, const char *name) |
298 | { | 299 | { |
299 | struct net_device *dev; | 300 | struct net_device *dev; |
300 | int ret = 0; | 301 | int ret = 0; |
301 | 302 | ||
302 | rtnl_lock(); | 303 | rtnl_lock(); |
303 | dev = __dev_get_by_name(&init_net, name); | 304 | dev = __dev_get_by_name(net, name); |
304 | if (dev == NULL) | 305 | if (dev == NULL) |
305 | ret = -ENXIO; /* Could not find device */ | 306 | ret = -ENXIO; /* Could not find device */ |
306 | 307 | ||
@@ -445,13 +446,13 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) | |||
445 | return 0; | 446 | return 0; |
446 | } | 447 | } |
447 | 448 | ||
448 | void __exit br_cleanup_bridges(void) | 449 | void br_net_exit(struct net *net) |
449 | { | 450 | { |
450 | struct net_device *dev; | 451 | struct net_device *dev; |
451 | 452 | ||
452 | rtnl_lock(); | 453 | rtnl_lock(); |
453 | restart: | 454 | restart: |
454 | for_each_netdev(&init_net, dev) { | 455 | for_each_netdev(net, dev) { |
455 | if (dev->priv_flags & IFF_EBRIDGE) { | 456 | if (dev->priv_flags & IFF_EBRIDGE) { |
456 | del_br(dev->priv); | 457 | del_br(dev->priv); |
457 | goto restart; | 458 | goto restart; |
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 5bbf07362172..6a6433daaf27 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c | |||
@@ -21,12 +21,12 @@ | |||
21 | #include "br_private.h" | 21 | #include "br_private.h" |
22 | 22 | ||
23 | /* called with RTNL */ | 23 | /* called with RTNL */ |
24 | static int get_bridge_ifindices(int *indices, int num) | 24 | static int get_bridge_ifindices(struct net *net, int *indices, int num) |
25 | { | 25 | { |
26 | struct net_device *dev; | 26 | struct net_device *dev; |
27 | int i = 0; | 27 | int i = 0; |
28 | 28 | ||
29 | for_each_netdev(&init_net, dev) { | 29 | for_each_netdev(net, dev) { |
30 | if (i >= num) | 30 | if (i >= num) |
31 | break; | 31 | break; |
32 | if (dev->priv_flags & IFF_EBRIDGE) | 32 | if (dev->priv_flags & IFF_EBRIDGE) |
@@ -89,7 +89,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd) | |||
89 | if (!capable(CAP_NET_ADMIN)) | 89 | if (!capable(CAP_NET_ADMIN)) |
90 | return -EPERM; | 90 | return -EPERM; |
91 | 91 | ||
92 | dev = dev_get_by_index(&init_net, ifindex); | 92 | dev = dev_get_by_index(dev_net(br->dev), ifindex); |
93 | if (dev == NULL) | 93 | if (dev == NULL) |
94 | return -EINVAL; | 94 | return -EINVAL; |
95 | 95 | ||
@@ -315,7 +315,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
315 | return -EOPNOTSUPP; | 315 | return -EOPNOTSUPP; |
316 | } | 316 | } |
317 | 317 | ||
318 | static int old_deviceless(void __user *uarg) | 318 | static int old_deviceless(struct net *net, void __user *uarg) |
319 | { | 319 | { |
320 | unsigned long args[3]; | 320 | unsigned long args[3]; |
321 | 321 | ||
@@ -337,7 +337,7 @@ static int old_deviceless(void __user *uarg) | |||
337 | if (indices == NULL) | 337 | if (indices == NULL) |
338 | return -ENOMEM; | 338 | return -ENOMEM; |
339 | 339 | ||
340 | args[2] = get_bridge_ifindices(indices, args[2]); | 340 | args[2] = get_bridge_ifindices(net, indices, args[2]); |
341 | 341 | ||
342 | ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) | 342 | ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) |
343 | ? -EFAULT : args[2]; | 343 | ? -EFAULT : args[2]; |
@@ -360,9 +360,9 @@ static int old_deviceless(void __user *uarg) | |||
360 | buf[IFNAMSIZ-1] = 0; | 360 | buf[IFNAMSIZ-1] = 0; |
361 | 361 | ||
362 | if (args[0] == BRCTL_ADD_BRIDGE) | 362 | if (args[0] == BRCTL_ADD_BRIDGE) |
363 | return br_add_bridge(buf); | 363 | return br_add_bridge(net, buf); |
364 | 364 | ||
365 | return br_del_bridge(buf); | 365 | return br_del_bridge(net, buf); |
366 | } | 366 | } |
367 | } | 367 | } |
368 | 368 | ||
@@ -374,7 +374,7 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar | |||
374 | switch (cmd) { | 374 | switch (cmd) { |
375 | case SIOCGIFBR: | 375 | case SIOCGIFBR: |
376 | case SIOCSIFBR: | 376 | case SIOCSIFBR: |
377 | return old_deviceless(uarg); | 377 | return old_deviceless(net, uarg); |
378 | 378 | ||
379 | case SIOCBRADDBR: | 379 | case SIOCBRADDBR: |
380 | case SIOCBRDELBR: | 380 | case SIOCBRDELBR: |
@@ -389,9 +389,9 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar | |||
389 | 389 | ||
390 | buf[IFNAMSIZ-1] = 0; | 390 | buf[IFNAMSIZ-1] = 0; |
391 | if (cmd == SIOCBRADDBR) | 391 | if (cmd == SIOCBRADDBR) |
392 | return br_add_bridge(buf); | 392 | return br_add_bridge(net, buf); |
393 | 393 | ||
394 | return br_del_bridge(buf); | 394 | return br_del_bridge(net, buf); |
395 | } | 395 | } |
396 | } | 396 | } |
397 | return -EOPNOTSUPP; | 397 | return -EOPNOTSUPP; |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index f155e6ce8a21..ba7be195803c 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -82,6 +82,7 @@ nla_put_failure: | |||
82 | */ | 82 | */ |
83 | void br_ifinfo_notify(int event, struct net_bridge_port *port) | 83 | void br_ifinfo_notify(int event, struct net_bridge_port *port) |
84 | { | 84 | { |
85 | struct net *net = dev_net(port->dev); | ||
85 | struct sk_buff *skb; | 86 | struct sk_buff *skb; |
86 | int err = -ENOBUFS; | 87 | int err = -ENOBUFS; |
87 | 88 | ||
@@ -97,10 +98,10 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port) | |||
97 | kfree_skb(skb); | 98 | kfree_skb(skb); |
98 | goto errout; | 99 | goto errout; |
99 | } | 100 | } |
100 | err = rtnl_notify(skb, &init_net,0, RTNLGRP_LINK, NULL, GFP_ATOMIC); | 101 | err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); |
101 | errout: | 102 | errout: |
102 | if (err < 0) | 103 | if (err < 0) |
103 | rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err); | 104 | rtnl_set_sk_err(net, RTNLGRP_LINK, err); |
104 | } | 105 | } |
105 | 106 | ||
106 | /* | 107 | /* |
@@ -112,11 +113,8 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
112 | struct net_device *dev; | 113 | struct net_device *dev; |
113 | int idx; | 114 | int idx; |
114 | 115 | ||
115 | if (net != &init_net) | ||
116 | return 0; | ||
117 | |||
118 | idx = 0; | 116 | idx = 0; |
119 | for_each_netdev(&init_net, dev) { | 117 | for_each_netdev(net, dev) { |
120 | /* not a bridge port */ | 118 | /* not a bridge port */ |
121 | if (dev->br_port == NULL || idx < cb->args[0]) | 119 | if (dev->br_port == NULL || idx < cb->args[0]) |
122 | goto skip; | 120 | goto skip; |
@@ -147,9 +145,6 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
147 | struct net_bridge_port *p; | 145 | struct net_bridge_port *p; |
148 | u8 new_state; | 146 | u8 new_state; |
149 | 147 | ||
150 | if (net != &init_net) | ||
151 | return -EINVAL; | ||
152 | |||
153 | if (nlmsg_len(nlh) < sizeof(*ifm)) | 148 | if (nlmsg_len(nlh) < sizeof(*ifm)) |
154 | return -EINVAL; | 149 | return -EINVAL; |
155 | 150 | ||
@@ -165,7 +160,7 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
165 | if (new_state > BR_STATE_BLOCKING) | 160 | if (new_state > BR_STATE_BLOCKING) |
166 | return -EINVAL; | 161 | return -EINVAL; |
167 | 162 | ||
168 | dev = __dev_get_by_index(&init_net, ifm->ifi_index); | 163 | dev = __dev_get_by_index(net, ifm->ifi_index); |
169 | if (!dev) | 164 | if (!dev) |
170 | return -ENODEV; | 165 | return -ENODEV; |
171 | 166 | ||
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 76340bdd052e..763a3ec292e5 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c | |||
@@ -35,9 +35,6 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v | |||
35 | struct net_bridge_port *p = dev->br_port; | 35 | struct net_bridge_port *p = dev->br_port; |
36 | struct net_bridge *br; | 36 | struct net_bridge *br; |
37 | 37 | ||
38 | if (!net_eq(dev_net(dev), &init_net)) | ||
39 | return NOTIFY_DONE; | ||
40 | |||
41 | /* not a port of a bridge */ | 38 | /* not a port of a bridge */ |
42 | if (p == NULL) | 39 | if (p == NULL) |
43 | return NOTIFY_DONE; | 40 | return NOTIFY_DONE; |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c3dc18ddc043..b6c3b71974dc 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -178,9 +178,9 @@ extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb); | |||
178 | 178 | ||
179 | /* br_if.c */ | 179 | /* br_if.c */ |
180 | extern void br_port_carrier_check(struct net_bridge_port *p); | 180 | extern void br_port_carrier_check(struct net_bridge_port *p); |
181 | extern int br_add_bridge(const char *name); | 181 | extern int br_add_bridge(struct net *net, const char *name); |
182 | extern int br_del_bridge(const char *name); | 182 | extern int br_del_bridge(struct net *net, const char *name); |
183 | extern void br_cleanup_bridges(void); | 183 | extern void br_net_exit(struct net *net); |
184 | extern int br_add_if(struct net_bridge *br, | 184 | extern int br_add_if(struct net_bridge *br, |
185 | struct net_device *dev); | 185 | struct net_device *dev); |
186 | extern int br_del_if(struct net_bridge *br, | 186 | extern int br_del_if(struct net_bridge *br, |
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 8b200f96f722..81ae40b3f655 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c | |||
@@ -140,9 +140,6 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, | |||
140 | struct net_bridge *br; | 140 | struct net_bridge *br; |
141 | const unsigned char *buf; | 141 | const unsigned char *buf; |
142 | 142 | ||
143 | if (!net_eq(dev_net(dev), &init_net)) | ||
144 | goto err; | ||
145 | |||
146 | if (!p) | 143 | if (!p) |
147 | goto err; | 144 | goto err; |
148 | 145 | ||
diff --git a/net/core/Makefile b/net/core/Makefile index b1332f6d0042..26a37cb31923 100644 --- a/net/core/Makefile +++ b/net/core/Makefile | |||
@@ -6,6 +6,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ | |||
6 | gen_stats.o gen_estimator.o net_namespace.o | 6 | gen_stats.o gen_estimator.o net_namespace.o |
7 | 7 | ||
8 | obj-$(CONFIG_SYSCTL) += sysctl_net_core.o | 8 | obj-$(CONFIG_SYSCTL) += sysctl_net_core.o |
9 | obj-$(CONFIG_HAS_DMA) += skb_dma_map.o | ||
9 | 10 | ||
10 | obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ | 11 | obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ |
11 | neighbour.o rtnetlink.o utils.o link_watch.o filter.o | 12 | neighbour.o rtnetlink.o utils.o link_watch.o filter.o |
diff --git a/net/core/dev.c b/net/core/dev.c index e8eb2b478344..7091040e32ac 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -891,7 +891,7 @@ int dev_alloc_name(struct net_device *dev, const char *name) | |||
891 | * Change name of a device, can pass format strings "eth%d". | 891 | * Change name of a device, can pass format strings "eth%d". |
892 | * for wildcarding. | 892 | * for wildcarding. |
893 | */ | 893 | */ |
894 | int dev_change_name(struct net_device *dev, char *newname) | 894 | int dev_change_name(struct net_device *dev, const char *newname) |
895 | { | 895 | { |
896 | char oldname[IFNAMSIZ]; | 896 | char oldname[IFNAMSIZ]; |
897 | int err = 0; | 897 | int err = 0; |
@@ -917,7 +917,6 @@ int dev_change_name(struct net_device *dev, char *newname) | |||
917 | err = dev_alloc_name(dev, newname); | 917 | err = dev_alloc_name(dev, newname); |
918 | if (err < 0) | 918 | if (err < 0) |
919 | return err; | 919 | return err; |
920 | strcpy(newname, dev->name); | ||
921 | } | 920 | } |
922 | else if (__dev_get_by_name(net, newname)) | 921 | else if (__dev_get_by_name(net, newname)) |
923 | return -EEXIST; | 922 | return -EEXIST; |
@@ -955,6 +954,38 @@ rollback: | |||
955 | } | 954 | } |
956 | 955 | ||
957 | /** | 956 | /** |
957 | * dev_set_alias - change ifalias of a device | ||
958 | * @dev: device | ||
959 | * @alias: name up to IFALIASZ | ||
960 | * @len: limit of bytes to copy from info | ||
961 | * | ||
962 | * Set ifalias for a device, | ||
963 | */ | ||
964 | int dev_set_alias(struct net_device *dev, const char *alias, size_t len) | ||
965 | { | ||
966 | ASSERT_RTNL(); | ||
967 | |||
968 | if (len >= IFALIASZ) | ||
969 | return -EINVAL; | ||
970 | |||
971 | if (!len) { | ||
972 | if (dev->ifalias) { | ||
973 | kfree(dev->ifalias); | ||
974 | dev->ifalias = NULL; | ||
975 | } | ||
976 | return 0; | ||
977 | } | ||
978 | |||
979 | dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL); | ||
980 | if (!dev->ifalias) | ||
981 | return -ENOMEM; | ||
982 | |||
983 | strlcpy(dev->ifalias, alias, len+1); | ||
984 | return len; | ||
985 | } | ||
986 | |||
987 | |||
988 | /** | ||
958 | * netdev_features_change - device changes features | 989 | * netdev_features_change - device changes features |
959 | * @dev: device to cause notification | 990 | * @dev: device to cause notification |
960 | * | 991 | * |
@@ -1676,14 +1707,14 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) | |||
1676 | } | 1707 | } |
1677 | 1708 | ||
1678 | switch (skb->protocol) { | 1709 | switch (skb->protocol) { |
1679 | case __constant_htons(ETH_P_IP): | 1710 | case htons(ETH_P_IP): |
1680 | if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) | 1711 | if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) |
1681 | ip_proto = ip_hdr(skb)->protocol; | 1712 | ip_proto = ip_hdr(skb)->protocol; |
1682 | addr1 = ip_hdr(skb)->saddr; | 1713 | addr1 = ip_hdr(skb)->saddr; |
1683 | addr2 = ip_hdr(skb)->daddr; | 1714 | addr2 = ip_hdr(skb)->daddr; |
1684 | ihl = ip_hdr(skb)->ihl; | 1715 | ihl = ip_hdr(skb)->ihl; |
1685 | break; | 1716 | break; |
1686 | case __constant_htons(ETH_P_IPV6): | 1717 | case htons(ETH_P_IPV6): |
1687 | ip_proto = ipv6_hdr(skb)->nexthdr; | 1718 | ip_proto = ipv6_hdr(skb)->nexthdr; |
1688 | addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3]; | 1719 | addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3]; |
1689 | addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3]; | 1720 | addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3]; |
@@ -3302,6 +3333,12 @@ static void dev_addr_discard(struct net_device *dev) | |||
3302 | netif_addr_unlock_bh(dev); | 3333 | netif_addr_unlock_bh(dev); |
3303 | } | 3334 | } |
3304 | 3335 | ||
3336 | /** | ||
3337 | * dev_get_flags - get flags reported to userspace | ||
3338 | * @dev: device | ||
3339 | * | ||
3340 | * Get the combination of flag bits exported through APIs to userspace. | ||
3341 | */ | ||
3305 | unsigned dev_get_flags(const struct net_device *dev) | 3342 | unsigned dev_get_flags(const struct net_device *dev) |
3306 | { | 3343 | { |
3307 | unsigned flags; | 3344 | unsigned flags; |
@@ -3326,6 +3363,14 @@ unsigned dev_get_flags(const struct net_device *dev) | |||
3326 | return flags; | 3363 | return flags; |
3327 | } | 3364 | } |
3328 | 3365 | ||
3366 | /** | ||
3367 | * dev_change_flags - change device settings | ||
3368 | * @dev: device | ||
3369 | * @flags: device state flags | ||
3370 | * | ||
3371 | * Change settings on device based state flags. The flags are | ||
3372 | * in the userspace exported format. | ||
3373 | */ | ||
3329 | int dev_change_flags(struct net_device *dev, unsigned flags) | 3374 | int dev_change_flags(struct net_device *dev, unsigned flags) |
3330 | { | 3375 | { |
3331 | int ret, changes; | 3376 | int ret, changes; |
@@ -3395,6 +3440,13 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
3395 | return ret; | 3440 | return ret; |
3396 | } | 3441 | } |
3397 | 3442 | ||
3443 | /** | ||
3444 | * dev_set_mtu - Change maximum transfer unit | ||
3445 | * @dev: device | ||
3446 | * @new_mtu: new transfer unit | ||
3447 | * | ||
3448 | * Change the maximum transfer size of the network device. | ||
3449 | */ | ||
3398 | int dev_set_mtu(struct net_device *dev, int new_mtu) | 3450 | int dev_set_mtu(struct net_device *dev, int new_mtu) |
3399 | { | 3451 | { |
3400 | int err; | 3452 | int err; |
@@ -3419,6 +3471,13 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) | |||
3419 | return err; | 3471 | return err; |
3420 | } | 3472 | } |
3421 | 3473 | ||
3474 | /** | ||
3475 | * dev_set_mac_address - Change Media Access Control Address | ||
3476 | * @dev: device | ||
3477 | * @sa: new address | ||
3478 | * | ||
3479 | * Change the hardware (MAC) address of the device | ||
3480 | */ | ||
3422 | int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) | 3481 | int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) |
3423 | { | 3482 | { |
3424 | int err; | 3483 | int err; |
@@ -4322,7 +4381,12 @@ void free_netdev(struct net_device *dev) | |||
4322 | put_device(&dev->dev); | 4381 | put_device(&dev->dev); |
4323 | } | 4382 | } |
4324 | 4383 | ||
4325 | /* Synchronize with packet receive processing. */ | 4384 | /** |
4385 | * synchronize_net - Synchronize with packet receive processing | ||
4386 | * | ||
4387 | * Wait for packets currently being received to be done. | ||
4388 | * Does not block later packets from starting. | ||
4389 | */ | ||
4326 | void synchronize_net(void) | 4390 | void synchronize_net(void) |
4327 | { | 4391 | { |
4328 | might_sleep(); | 4392 | might_sleep(); |
@@ -4624,7 +4688,7 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan, | |||
4624 | } | 4688 | } |
4625 | 4689 | ||
4626 | /** | 4690 | /** |
4627 | * netdev_dma_regiser - register the networking subsystem as a DMA client | 4691 | * netdev_dma_register - register the networking subsystem as a DMA client |
4628 | */ | 4692 | */ |
4629 | static int __init netdev_dma_register(void) | 4693 | static int __init netdev_dma_register(void) |
4630 | { | 4694 | { |
@@ -4670,6 +4734,12 @@ int netdev_compute_features(unsigned long all, unsigned long one) | |||
4670 | one |= NETIF_F_GSO_SOFTWARE; | 4734 | one |= NETIF_F_GSO_SOFTWARE; |
4671 | one |= NETIF_F_GSO; | 4735 | one |= NETIF_F_GSO; |
4672 | 4736 | ||
4737 | /* | ||
4738 | * If even one device supports a GSO protocol with software fallback, | ||
4739 | * enable it for all. | ||
4740 | */ | ||
4741 | all |= one & NETIF_F_GSO_SOFTWARE; | ||
4742 | |||
4673 | /* If even one device supports robust GSO, enable it for all. */ | 4743 | /* If even one device supports robust GSO, enable it for all. */ |
4674 | if (one & NETIF_F_GSO_ROBUST) | 4744 | if (one & NETIF_F_GSO_ROBUST) |
4675 | all |= NETIF_F_GSO_ROBUST; | 4745 | all |= NETIF_F_GSO_ROBUST; |
@@ -4719,10 +4789,18 @@ err_name: | |||
4719 | return -ENOMEM; | 4789 | return -ENOMEM; |
4720 | } | 4790 | } |
4721 | 4791 | ||
4722 | char *netdev_drivername(struct net_device *dev, char *buffer, int len) | 4792 | /** |
4793 | * netdev_drivername - network driver for the device | ||
4794 | * @dev: network device | ||
4795 | * @buffer: buffer for resulting name | ||
4796 | * @len: size of buffer | ||
4797 | * | ||
4798 | * Determine network driver for device. | ||
4799 | */ | ||
4800 | char *netdev_drivername(const struct net_device *dev, char *buffer, int len) | ||
4723 | { | 4801 | { |
4724 | struct device_driver *driver; | 4802 | const struct device_driver *driver; |
4725 | struct device *parent; | 4803 | const struct device *parent; |
4726 | 4804 | ||
4727 | if (len <= 0 || !buffer) | 4805 | if (len <= 0 || !buffer) |
4728 | return buffer; | 4806 | return buffer; |
diff --git a/net/core/dst.c b/net/core/dst.c index fe03266130b6..09c1530f4681 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -203,6 +203,7 @@ void __dst_free(struct dst_entry * dst) | |||
203 | if (dst_garbage.timer_inc > DST_GC_INC) { | 203 | if (dst_garbage.timer_inc > DST_GC_INC) { |
204 | dst_garbage.timer_inc = DST_GC_INC; | 204 | dst_garbage.timer_inc = DST_GC_INC; |
205 | dst_garbage.timer_expires = DST_GC_MIN; | 205 | dst_garbage.timer_expires = DST_GC_MIN; |
206 | cancel_delayed_work(&dst_gc_work); | ||
206 | schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); | 207 | schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); |
207 | } | 208 | } |
208 | spin_unlock_bh(&dst_garbage.lock); | 209 | spin_unlock_bh(&dst_garbage.lock); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 9d92e41826e7..1dc728b38589 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -927,8 +927,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) | |||
927 | if (skb_queue_len(&neigh->arp_queue) >= | 927 | if (skb_queue_len(&neigh->arp_queue) >= |
928 | neigh->parms->queue_len) { | 928 | neigh->parms->queue_len) { |
929 | struct sk_buff *buff; | 929 | struct sk_buff *buff; |
930 | buff = neigh->arp_queue.next; | 930 | buff = __skb_dequeue(&neigh->arp_queue); |
931 | __skb_unlink(buff, &neigh->arp_queue); | ||
932 | kfree_skb(buff); | 931 | kfree_skb(buff); |
933 | NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); | 932 | NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); |
934 | } | 933 | } |
@@ -1259,24 +1258,20 @@ static void neigh_proxy_process(unsigned long arg) | |||
1259 | struct neigh_table *tbl = (struct neigh_table *)arg; | 1258 | struct neigh_table *tbl = (struct neigh_table *)arg; |
1260 | long sched_next = 0; | 1259 | long sched_next = 0; |
1261 | unsigned long now = jiffies; | 1260 | unsigned long now = jiffies; |
1262 | struct sk_buff *skb; | 1261 | struct sk_buff *skb, *n; |
1263 | 1262 | ||
1264 | spin_lock(&tbl->proxy_queue.lock); | 1263 | spin_lock(&tbl->proxy_queue.lock); |
1265 | 1264 | ||
1266 | skb = tbl->proxy_queue.next; | 1265 | skb_queue_walk_safe(&tbl->proxy_queue, skb, n) { |
1267 | 1266 | long tdif = NEIGH_CB(skb)->sched_next - now; | |
1268 | while (skb != (struct sk_buff *)&tbl->proxy_queue) { | ||
1269 | struct sk_buff *back = skb; | ||
1270 | long tdif = NEIGH_CB(back)->sched_next - now; | ||
1271 | 1267 | ||
1272 | skb = skb->next; | ||
1273 | if (tdif <= 0) { | 1268 | if (tdif <= 0) { |
1274 | struct net_device *dev = back->dev; | 1269 | struct net_device *dev = skb->dev; |
1275 | __skb_unlink(back, &tbl->proxy_queue); | 1270 | __skb_unlink(skb, &tbl->proxy_queue); |
1276 | if (tbl->proxy_redo && netif_running(dev)) | 1271 | if (tbl->proxy_redo && netif_running(dev)) |
1277 | tbl->proxy_redo(back); | 1272 | tbl->proxy_redo(skb); |
1278 | else | 1273 | else |
1279 | kfree_skb(back); | 1274 | kfree_skb(skb); |
1280 | 1275 | ||
1281 | dev_put(dev); | 1276 | dev_put(dev); |
1282 | } else if (!sched_next || tdif < sched_next) | 1277 | } else if (!sched_next || tdif < sched_next) |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index c1f4e0d428c0..92d6b9467314 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -209,9 +209,44 @@ static ssize_t store_tx_queue_len(struct device *dev, | |||
209 | return netdev_store(dev, attr, buf, len, change_tx_queue_len); | 209 | return netdev_store(dev, attr, buf, len, change_tx_queue_len); |
210 | } | 210 | } |
211 | 211 | ||
212 | static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr, | ||
213 | const char *buf, size_t len) | ||
214 | { | ||
215 | struct net_device *netdev = to_net_dev(dev); | ||
216 | size_t count = len; | ||
217 | ssize_t ret; | ||
218 | |||
219 | if (!capable(CAP_NET_ADMIN)) | ||
220 | return -EPERM; | ||
221 | |||
222 | /* ignore trailing newline */ | ||
223 | if (len > 0 && buf[len - 1] == '\n') | ||
224 | --count; | ||
225 | |||
226 | rtnl_lock(); | ||
227 | ret = dev_set_alias(netdev, buf, count); | ||
228 | rtnl_unlock(); | ||
229 | |||
230 | return ret < 0 ? ret : len; | ||
231 | } | ||
232 | |||
233 | static ssize_t show_ifalias(struct device *dev, | ||
234 | struct device_attribute *attr, char *buf) | ||
235 | { | ||
236 | const struct net_device *netdev = to_net_dev(dev); | ||
237 | ssize_t ret = 0; | ||
238 | |||
239 | rtnl_lock(); | ||
240 | if (netdev->ifalias) | ||
241 | ret = sprintf(buf, "%s\n", netdev->ifalias); | ||
242 | rtnl_unlock(); | ||
243 | return ret; | ||
244 | } | ||
245 | |||
212 | static struct device_attribute net_class_attributes[] = { | 246 | static struct device_attribute net_class_attributes[] = { |
213 | __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), | 247 | __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), |
214 | __ATTR(dev_id, S_IRUGO, show_dev_id, NULL), | 248 | __ATTR(dev_id, S_IRUGO, show_dev_id, NULL), |
249 | __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias), | ||
215 | __ATTR(iflink, S_IRUGO, show_iflink, NULL), | 250 | __ATTR(iflink, S_IRUGO, show_iflink, NULL), |
216 | __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), | 251 | __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), |
217 | __ATTR(features, S_IRUGO, show_features, NULL), | 252 | __ATTR(features, S_IRUGO, show_features, NULL), |
@@ -418,6 +453,7 @@ static void netdev_release(struct device *d) | |||
418 | 453 | ||
419 | BUG_ON(dev->reg_state != NETREG_RELEASED); | 454 | BUG_ON(dev->reg_state != NETREG_RELEASED); |
420 | 455 | ||
456 | kfree(dev->ifalias); | ||
421 | kfree((char *)dev - dev->padded); | 457 | kfree((char *)dev - dev->padded); |
422 | } | 458 | } |
423 | 459 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 71edb8b36341..8862498fd4a6 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -586,6 +586,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev) | |||
586 | { | 586 | { |
587 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) | 587 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) |
588 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ | 588 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ |
589 | + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */ | ||
589 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ | 590 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ |
590 | + nla_total_size(sizeof(struct rtnl_link_ifmap)) | 591 | + nla_total_size(sizeof(struct rtnl_link_ifmap)) |
591 | + nla_total_size(sizeof(struct rtnl_link_stats)) | 592 | + nla_total_size(sizeof(struct rtnl_link_stats)) |
@@ -640,6 +641,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
640 | if (txq->qdisc_sleeping) | 641 | if (txq->qdisc_sleeping) |
641 | NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id); | 642 | NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id); |
642 | 643 | ||
644 | if (dev->ifalias) | ||
645 | NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias); | ||
646 | |||
643 | if (1) { | 647 | if (1) { |
644 | struct rtnl_link_ifmap map = { | 648 | struct rtnl_link_ifmap map = { |
645 | .mem_start = dev->mem_start, | 649 | .mem_start = dev->mem_start, |
@@ -713,6 +717,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { | |||
713 | [IFLA_LINKMODE] = { .type = NLA_U8 }, | 717 | [IFLA_LINKMODE] = { .type = NLA_U8 }, |
714 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, | 718 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, |
715 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, | 719 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, |
720 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, | ||
716 | }; | 721 | }; |
717 | 722 | ||
718 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { | 723 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { |
@@ -853,6 +858,14 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
853 | modified = 1; | 858 | modified = 1; |
854 | } | 859 | } |
855 | 860 | ||
861 | if (tb[IFLA_IFALIAS]) { | ||
862 | err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), | ||
863 | nla_len(tb[IFLA_IFALIAS])); | ||
864 | if (err < 0) | ||
865 | goto errout; | ||
866 | modified = 1; | ||
867 | } | ||
868 | |||
856 | if (tb[IFLA_BROADCAST]) { | 869 | if (tb[IFLA_BROADCAST]) { |
857 | nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); | 870 | nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); |
858 | send_addr_notify = 1; | 871 | send_addr_notify = 1; |
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c new file mode 100644 index 000000000000..1f49afcd8e86 --- /dev/null +++ b/net/core/skb_dma_map.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* skb_dma_map.c: DMA mapping helpers for socket buffers. | ||
2 | * | ||
3 | * Copyright (C) David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/dma-mapping.h> | ||
9 | #include <linux/skbuff.h> | ||
10 | |||
11 | int skb_dma_map(struct device *dev, struct sk_buff *skb, | ||
12 | enum dma_data_direction dir) | ||
13 | { | ||
14 | struct skb_shared_info *sp = skb_shinfo(skb); | ||
15 | dma_addr_t map; | ||
16 | int i; | ||
17 | |||
18 | map = dma_map_single(dev, skb->data, | ||
19 | skb_headlen(skb), dir); | ||
20 | if (dma_mapping_error(dev, map)) | ||
21 | goto out_err; | ||
22 | |||
23 | sp->dma_maps[0] = map; | ||
24 | for (i = 0; i < sp->nr_frags; i++) { | ||
25 | skb_frag_t *fp = &sp->frags[i]; | ||
26 | |||
27 | map = dma_map_page(dev, fp->page, fp->page_offset, | ||
28 | fp->size, dir); | ||
29 | if (dma_mapping_error(dev, map)) | ||
30 | goto unwind; | ||
31 | sp->dma_maps[i + 1] = map; | ||
32 | } | ||
33 | sp->num_dma_maps = i + 1; | ||
34 | |||
35 | return 0; | ||
36 | |||
37 | unwind: | ||
38 | while (i-- >= 0) { | ||
39 | skb_frag_t *fp = &sp->frags[i]; | ||
40 | |||
41 | dma_unmap_page(dev, sp->dma_maps[i + 1], | ||
42 | fp->size, dir); | ||
43 | } | ||
44 | dma_unmap_single(dev, sp->dma_maps[0], | ||
45 | skb_headlen(skb), dir); | ||
46 | out_err: | ||
47 | return -ENOMEM; | ||
48 | } | ||
49 | EXPORT_SYMBOL(skb_dma_map); | ||
50 | |||
51 | void skb_dma_unmap(struct device *dev, struct sk_buff *skb, | ||
52 | enum dma_data_direction dir) | ||
53 | { | ||
54 | struct skb_shared_info *sp = skb_shinfo(skb); | ||
55 | int i; | ||
56 | |||
57 | dma_unmap_single(dev, sp->dma_maps[0], | ||
58 | skb_headlen(skb), dir); | ||
59 | for (i = 0; i < sp->nr_frags; i++) { | ||
60 | skb_frag_t *fp = &sp->frags[i]; | ||
61 | |||
62 | dma_unmap_page(dev, sp->dma_maps[i + 1], | ||
63 | fp->size, dir); | ||
64 | } | ||
65 | } | ||
66 | EXPORT_SYMBOL(skb_dma_unmap); | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ca1ccdf1ef76..2c218a0808b4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -363,8 +363,7 @@ static void kfree_skbmem(struct sk_buff *skb) | |||
363 | } | 363 | } |
364 | } | 364 | } |
365 | 365 | ||
366 | /* Free everything but the sk_buff shell. */ | 366 | static void skb_release_head_state(struct sk_buff *skb) |
367 | static void skb_release_all(struct sk_buff *skb) | ||
368 | { | 367 | { |
369 | dst_release(skb->dst); | 368 | dst_release(skb->dst); |
370 | #ifdef CONFIG_XFRM | 369 | #ifdef CONFIG_XFRM |
@@ -388,6 +387,12 @@ static void skb_release_all(struct sk_buff *skb) | |||
388 | skb->tc_verd = 0; | 387 | skb->tc_verd = 0; |
389 | #endif | 388 | #endif |
390 | #endif | 389 | #endif |
390 | } | ||
391 | |||
392 | /* Free everything but the sk_buff shell. */ | ||
393 | static void skb_release_all(struct sk_buff *skb) | ||
394 | { | ||
395 | skb_release_head_state(skb); | ||
391 | skb_release_data(skb); | 396 | skb_release_data(skb); |
392 | } | 397 | } |
393 | 398 | ||
@@ -424,6 +429,38 @@ void kfree_skb(struct sk_buff *skb) | |||
424 | __kfree_skb(skb); | 429 | __kfree_skb(skb); |
425 | } | 430 | } |
426 | 431 | ||
432 | int skb_recycle_check(struct sk_buff *skb, int skb_size) | ||
433 | { | ||
434 | struct skb_shared_info *shinfo; | ||
435 | |||
436 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) | ||
437 | return 0; | ||
438 | |||
439 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); | ||
440 | if (skb_end_pointer(skb) - skb->head < skb_size) | ||
441 | return 0; | ||
442 | |||
443 | if (skb_shared(skb) || skb_cloned(skb)) | ||
444 | return 0; | ||
445 | |||
446 | skb_release_head_state(skb); | ||
447 | shinfo = skb_shinfo(skb); | ||
448 | atomic_set(&shinfo->dataref, 1); | ||
449 | shinfo->nr_frags = 0; | ||
450 | shinfo->gso_size = 0; | ||
451 | shinfo->gso_segs = 0; | ||
452 | shinfo->gso_type = 0; | ||
453 | shinfo->ip6_frag_id = 0; | ||
454 | shinfo->frag_list = NULL; | ||
455 | |||
456 | memset(skb, 0, offsetof(struct sk_buff, tail)); | ||
457 | skb_reset_tail_pointer(skb); | ||
458 | skb->data = skb->head + NET_SKB_PAD; | ||
459 | |||
460 | return 1; | ||
461 | } | ||
462 | EXPORT_SYMBOL(skb_recycle_check); | ||
463 | |||
427 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | 464 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
428 | { | 465 | { |
429 | new->tstamp = old->tstamp; | 466 | new->tstamp = old->tstamp; |
diff --git a/net/core/sock.c b/net/core/sock.c index 91f8bbc93526..2d358dd8a03e 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -154,7 +154,8 @@ static const char *af_family_key_strings[AF_MAX+1] = { | |||
154 | "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , | 154 | "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , |
155 | "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , | 155 | "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , |
156 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , | 156 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , |
157 | "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX" | 157 | "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , |
158 | "sk_lock-AF_MAX" | ||
158 | }; | 159 | }; |
159 | static const char *af_family_slock_key_strings[AF_MAX+1] = { | 160 | static const char *af_family_slock_key_strings[AF_MAX+1] = { |
160 | "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , | 161 | "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , |
@@ -168,7 +169,8 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = { | |||
168 | "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , | 169 | "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , |
169 | "slock-27" , "slock-28" , "slock-AF_CAN" , | 170 | "slock-27" , "slock-28" , "slock-AF_CAN" , |
170 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , | 171 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , |
171 | "slock-AF_RXRPC" , "slock-AF_MAX" | 172 | "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , |
173 | "slock-AF_MAX" | ||
172 | }; | 174 | }; |
173 | static const char *af_family_clock_key_strings[AF_MAX+1] = { | 175 | static const char *af_family_clock_key_strings[AF_MAX+1] = { |
174 | "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , | 176 | "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , |
@@ -182,7 +184,8 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = { | |||
182 | "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , | 184 | "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , |
183 | "clock-27" , "clock-28" , "clock-AF_CAN" , | 185 | "clock-27" , "clock-28" , "clock-AF_CAN" , |
184 | "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , | 186 | "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , |
185 | "clock-AF_RXRPC" , "clock-AF_MAX" | 187 | "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , |
188 | "clock-AF_MAX" | ||
186 | }; | 189 | }; |
187 | #endif | 190 | #endif |
188 | 191 | ||
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 8e9580874216..9a430734530c 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -783,7 +783,7 @@ static struct ccid_operations ccid2 = { | |||
783 | }; | 783 | }; |
784 | 784 | ||
785 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 785 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
786 | module_param(ccid2_debug, bool, 0444); | 786 | module_param(ccid2_debug, bool, 0644); |
787 | MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); | 787 | MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); |
788 | #endif | 788 | #endif |
789 | 789 | ||
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index f6756e0c9e69..3b8bd7ca6761 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -963,7 +963,7 @@ static struct ccid_operations ccid3 = { | |||
963 | }; | 963 | }; |
964 | 964 | ||
965 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG | 965 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
966 | module_param(ccid3_debug, bool, 0444); | 966 | module_param(ccid3_debug, bool, 0644); |
967 | MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); | 967 | MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); |
968 | #endif | 968 | #endif |
969 | 969 | ||
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index bcd6ac415bb9..5b3ce0688c5c 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c | |||
@@ -67,7 +67,10 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) | |||
67 | u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0; | 67 | u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0; |
68 | int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */ | 68 | int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */ |
69 | 69 | ||
70 | for (i=0; i <= k; i++) { | 70 | if (k <= 0) |
71 | return; | ||
72 | |||
73 | for (i = 0; i <= k; i++) { | ||
71 | i_i = tfrc_lh_get_interval(lh, i); | 74 | i_i = tfrc_lh_get_interval(lh, i); |
72 | 75 | ||
73 | if (i < k) { | 76 | if (i < k) { |
@@ -78,7 +81,6 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh) | |||
78 | i_tot1 += i_i * tfrc_lh_weights[i-1]; | 81 | i_tot1 += i_i * tfrc_lh_weights[i-1]; |
79 | } | 82 | } |
80 | 83 | ||
81 | BUG_ON(w_tot == 0); | ||
82 | lh->i_mean = max(i_tot0, i_tot1) / w_tot; | 84 | lh->i_mean = max(i_tot0, i_tot1) / w_tot; |
83 | } | 85 | } |
84 | 86 | ||
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c index 97ecec0a8e76..185916218e07 100644 --- a/net/dccp/ccids/lib/tfrc.c +++ b/net/dccp/ccids/lib/tfrc.c | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG | 11 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG |
12 | int tfrc_debug; | 12 | int tfrc_debug; |
13 | module_param(tfrc_debug, bool, 0444); | 13 | module_param(tfrc_debug, bool, 0644); |
14 | MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); | 14 | MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); |
15 | #endif | 15 | #endif |
16 | 16 | ||
diff --git a/net/dccp/input.c b/net/dccp/input.c index 803933ab396d..779d0ed9ae94 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -370,7 +370,7 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
370 | goto discard; | 370 | goto discard; |
371 | 371 | ||
372 | if (dccp_parse_options(sk, NULL, skb)) | 372 | if (dccp_parse_options(sk, NULL, skb)) |
373 | goto discard; | 373 | return 1; |
374 | 374 | ||
375 | if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) | 375 | if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) |
376 | dccp_event_ack_recv(sk, skb); | 376 | dccp_event_ack_recv(sk, skb); |
@@ -610,7 +610,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
610 | * Step 8: Process options and mark acknowledgeable | 610 | * Step 8: Process options and mark acknowledgeable |
611 | */ | 611 | */ |
612 | if (dccp_parse_options(sk, NULL, skb)) | 612 | if (dccp_parse_options(sk, NULL, skb)) |
613 | goto discard; | 613 | return 1; |
614 | 614 | ||
615 | if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) | 615 | if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) |
616 | dccp_event_ack_recv(sk, skb); | 616 | dccp_event_ack_recv(sk, skb); |
diff --git a/net/dccp/options.c b/net/dccp/options.c index dc7c158a2f4b..0809b63cb055 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -81,11 +81,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, | |||
81 | /* Check if this isn't a single byte option */ | 81 | /* Check if this isn't a single byte option */ |
82 | if (opt > DCCPO_MAX_RESERVED) { | 82 | if (opt > DCCPO_MAX_RESERVED) { |
83 | if (opt_ptr == opt_end) | 83 | if (opt_ptr == opt_end) |
84 | goto out_invalid_option; | 84 | goto out_nonsensical_length; |
85 | 85 | ||
86 | len = *opt_ptr++; | 86 | len = *opt_ptr++; |
87 | if (len < 3) | 87 | if (len < 2) |
88 | goto out_invalid_option; | 88 | goto out_nonsensical_length; |
89 | /* | 89 | /* |
90 | * Remove the type and len fields, leaving | 90 | * Remove the type and len fields, leaving |
91 | * just the value size | 91 | * just the value size |
@@ -95,7 +95,7 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, | |||
95 | opt_ptr += len; | 95 | opt_ptr += len; |
96 | 96 | ||
97 | if (opt_ptr > opt_end) | 97 | if (opt_ptr > opt_end) |
98 | goto out_invalid_option; | 98 | goto out_nonsensical_length; |
99 | } | 99 | } |
100 | 100 | ||
101 | /* | 101 | /* |
@@ -283,12 +283,17 @@ ignore_option: | |||
283 | if (mandatory) | 283 | if (mandatory) |
284 | goto out_invalid_option; | 284 | goto out_invalid_option; |
285 | 285 | ||
286 | out_nonsensical_length: | ||
287 | /* RFC 4340, 5.8: ignore option and all remaining option space */ | ||
286 | return 0; | 288 | return 0; |
287 | 289 | ||
288 | out_invalid_option: | 290 | out_invalid_option: |
289 | DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); | 291 | DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); |
290 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; | 292 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; |
291 | DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len); | 293 | DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len); |
294 | DCCP_SKB_CB(skb)->dccpd_reset_data[0] = opt; | ||
295 | DCCP_SKB_CB(skb)->dccpd_reset_data[1] = len > 0 ? value[0] : 0; | ||
296 | DCCP_SKB_CB(skb)->dccpd_reset_data[2] = len > 1 ? value[1] : 0; | ||
292 | return -1; | 297 | return -1; |
293 | } | 298 | } |
294 | 299 | ||
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 1ca3b26eed0f..d0bd34819761 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -309,7 +309,9 @@ int dccp_disconnect(struct sock *sk, int flags) | |||
309 | sk->sk_err = ECONNRESET; | 309 | sk->sk_err = ECONNRESET; |
310 | 310 | ||
311 | dccp_clear_xmit_timers(sk); | 311 | dccp_clear_xmit_timers(sk); |
312 | |||
312 | __skb_queue_purge(&sk->sk_receive_queue); | 313 | __skb_queue_purge(&sk->sk_receive_queue); |
314 | __skb_queue_purge(&sk->sk_write_queue); | ||
313 | if (sk->sk_send_head != NULL) { | 315 | if (sk->sk_send_head != NULL) { |
314 | __kfree_skb(sk->sk_send_head); | 316 | __kfree_skb(sk->sk_send_head); |
315 | sk->sk_send_head = NULL; | 317 | sk->sk_send_head = NULL; |
@@ -1028,7 +1030,7 @@ MODULE_PARM_DESC(thash_entries, "Number of ehash buckets"); | |||
1028 | 1030 | ||
1029 | #ifdef CONFIG_IP_DCCP_DEBUG | 1031 | #ifdef CONFIG_IP_DCCP_DEBUG |
1030 | int dccp_debug; | 1032 | int dccp_debug; |
1031 | module_param(dccp_debug, bool, 0444); | 1033 | module_param(dccp_debug, bool, 0644); |
1032 | MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); | 1034 | MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); |
1033 | 1035 | ||
1034 | EXPORT_SYMBOL_GPL(dccp_debug); | 1036 | EXPORT_SYMBOL_GPL(dccp_debug); |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index a80839b02e3f..647a9edee375 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -129,7 +129,7 @@ int eth_rebuild_header(struct sk_buff *skb) | |||
129 | 129 | ||
130 | switch (eth->h_proto) { | 130 | switch (eth->h_proto) { |
131 | #ifdef CONFIG_INET | 131 | #ifdef CONFIG_INET |
132 | case __constant_htons(ETH_P_IP): | 132 | case htons(ETH_P_IP): |
133 | return arp_find(eth->h_dest, skb); | 133 | return arp_find(eth->h_dest, skb); |
134 | #endif | 134 | #endif |
135 | default: | 135 | default: |
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 3bca97f55d47..949772a5a7dc 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c | |||
@@ -157,7 +157,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv) | |||
157 | err = ieee80211_networks_allocate(ieee); | 157 | err = ieee80211_networks_allocate(ieee); |
158 | if (err) { | 158 | if (err) { |
159 | IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err); | 159 | IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err); |
160 | goto failed; | 160 | goto failed_free_netdev; |
161 | } | 161 | } |
162 | ieee80211_networks_initialize(ieee); | 162 | ieee80211_networks_initialize(ieee); |
163 | 163 | ||
@@ -193,9 +193,9 @@ struct net_device *alloc_ieee80211(int sizeof_priv) | |||
193 | 193 | ||
194 | return dev; | 194 | return dev; |
195 | 195 | ||
196 | failed: | 196 | failed_free_netdev: |
197 | if (dev) | 197 | free_netdev(dev); |
198 | free_netdev(dev); | 198 | failed: |
199 | return NULL; | 199 | return NULL; |
200 | } | 200 | } |
201 | 201 | ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index c10036e7a463..89cb047ab314 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -782,11 +782,15 @@ skip_listen_ht: | |||
782 | struct sock *sk; | 782 | struct sock *sk; |
783 | struct hlist_node *node; | 783 | struct hlist_node *node; |
784 | 784 | ||
785 | num = 0; | ||
786 | |||
787 | if (hlist_empty(&head->chain) && hlist_empty(&head->twchain)) | ||
788 | continue; | ||
789 | |||
785 | if (i > s_i) | 790 | if (i > s_i) |
786 | s_num = 0; | 791 | s_num = 0; |
787 | 792 | ||
788 | read_lock_bh(lock); | 793 | read_lock_bh(lock); |
789 | num = 0; | ||
790 | sk_for_each(sk, node, &head->chain) { | 794 | sk_for_each(sk, node, &head->chain) { |
791 | struct inet_sock *inet = inet_sk(sk); | 795 | struct inet_sock *inet = inet_sk(sk); |
792 | 796 | ||
diff --git a/net/ipv4/ipvs/Kconfig b/net/ipv4/ipvs/Kconfig index 09d0c3f35669..de6004de80bc 100644 --- a/net/ipv4/ipvs/Kconfig +++ b/net/ipv4/ipvs/Kconfig | |||
@@ -24,6 +24,14 @@ menuconfig IP_VS | |||
24 | 24 | ||
25 | if IP_VS | 25 | if IP_VS |
26 | 26 | ||
27 | config IP_VS_IPV6 | ||
28 | bool "IPv6 support for IPVS (DANGEROUS)" | ||
29 | depends on EXPERIMENTAL && (IPV6 = y || IP_VS = IPV6) | ||
30 | ---help--- | ||
31 | Add IPv6 support to IPVS. This is incomplete and might be dangerous. | ||
32 | |||
33 | Say N if unsure. | ||
34 | |||
27 | config IP_VS_DEBUG | 35 | config IP_VS_DEBUG |
28 | bool "IP virtual server debugging" | 36 | bool "IP virtual server debugging" |
29 | ---help--- | 37 | ---help--- |
@@ -33,7 +41,8 @@ config IP_VS_DEBUG | |||
33 | 41 | ||
34 | config IP_VS_TAB_BITS | 42 | config IP_VS_TAB_BITS |
35 | int "IPVS connection table size (the Nth power of 2)" | 43 | int "IPVS connection table size (the Nth power of 2)" |
36 | default "12" | 44 | range 8 20 |
45 | default 12 | ||
37 | ---help--- | 46 | ---help--- |
38 | The IPVS connection hash table uses the chaining scheme to handle | 47 | The IPVS connection hash table uses the chaining scheme to handle |
39 | hash collisions. Using a big IPVS connection hash table will greatly | 48 | hash collisions. Using a big IPVS connection hash table will greatly |
@@ -71,14 +80,20 @@ config IP_VS_PROTO_UDP | |||
71 | This option enables support for load balancing UDP transport | 80 | This option enables support for load balancing UDP transport |
72 | protocol. Say Y if unsure. | 81 | protocol. Say Y if unsure. |
73 | 82 | ||
83 | config IP_VS_PROTO_AH_ESP | ||
84 | bool | ||
85 | depends on UNDEFINED | ||
86 | |||
74 | config IP_VS_PROTO_ESP | 87 | config IP_VS_PROTO_ESP |
75 | bool "ESP load balancing support" | 88 | bool "ESP load balancing support" |
89 | select IP_VS_PROTO_AH_ESP | ||
76 | ---help--- | 90 | ---help--- |
77 | This option enables support for load balancing ESP (Encapsulation | 91 | This option enables support for load balancing ESP (Encapsulation |
78 | Security Payload) transport protocol. Say Y if unsure. | 92 | Security Payload) transport protocol. Say Y if unsure. |
79 | 93 | ||
80 | config IP_VS_PROTO_AH | 94 | config IP_VS_PROTO_AH |
81 | bool "AH load balancing support" | 95 | bool "AH load balancing support" |
96 | select IP_VS_PROTO_AH_ESP | ||
82 | ---help--- | 97 | ---help--- |
83 | This option enables support for load balancing AH (Authentication | 98 | This option enables support for load balancing AH (Authentication |
84 | Header) transport protocol. Say Y if unsure. | 99 | Header) transport protocol. Say Y if unsure. |
diff --git a/net/ipv4/ipvs/Makefile b/net/ipv4/ipvs/Makefile index 30e85de9ffff..73a46fe1fe4c 100644 --- a/net/ipv4/ipvs/Makefile +++ b/net/ipv4/ipvs/Makefile | |||
@@ -6,8 +6,7 @@ | |||
6 | ip_vs_proto-objs-y := | 6 | ip_vs_proto-objs-y := |
7 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o | 7 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o |
8 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o | 8 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o |
9 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_ESP) += ip_vs_proto_esp.o | 9 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o |
10 | ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH) += ip_vs_proto_ah.o | ||
11 | 10 | ||
12 | ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ | 11 | ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ |
13 | ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ | 12 | ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ |
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index 44a6872dc245..9a24332fbed8 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c | |||
@@ -114,9 +114,18 @@ static inline void ct_write_unlock_bh(unsigned key) | |||
114 | /* | 114 | /* |
115 | * Returns hash value for IPVS connection entry | 115 | * Returns hash value for IPVS connection entry |
116 | */ | 116 | */ |
117 | static unsigned int ip_vs_conn_hashkey(unsigned proto, __be32 addr, __be16 port) | 117 | static unsigned int ip_vs_conn_hashkey(int af, unsigned proto, |
118 | const union nf_inet_addr *addr, | ||
119 | __be16 port) | ||
118 | { | 120 | { |
119 | return jhash_3words((__force u32)addr, (__force u32)port, proto, ip_vs_conn_rnd) | 121 | #ifdef CONFIG_IP_VS_IPV6 |
122 | if (af == AF_INET6) | ||
123 | return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd), | ||
124 | (__force u32)port, proto, ip_vs_conn_rnd) | ||
125 | & IP_VS_CONN_TAB_MASK; | ||
126 | #endif | ||
127 | return jhash_3words((__force u32)addr->ip, (__force u32)port, proto, | ||
128 | ip_vs_conn_rnd) | ||
120 | & IP_VS_CONN_TAB_MASK; | 129 | & IP_VS_CONN_TAB_MASK; |
121 | } | 130 | } |
122 | 131 | ||
@@ -131,7 +140,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) | |||
131 | int ret; | 140 | int ret; |
132 | 141 | ||
133 | /* Hash by protocol, client address and port */ | 142 | /* Hash by protocol, client address and port */ |
134 | hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport); | 143 | hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); |
135 | 144 | ||
136 | ct_write_lock(hash); | 145 | ct_write_lock(hash); |
137 | 146 | ||
@@ -162,7 +171,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) | |||
162 | int ret; | 171 | int ret; |
163 | 172 | ||
164 | /* unhash it and decrease its reference counter */ | 173 | /* unhash it and decrease its reference counter */ |
165 | hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport); | 174 | hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); |
166 | 175 | ||
167 | ct_write_lock(hash); | 176 | ct_write_lock(hash); |
168 | 177 | ||
@@ -187,20 +196,23 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) | |||
187 | * d_addr, d_port: pkt dest address (load balancer) | 196 | * d_addr, d_port: pkt dest address (load balancer) |
188 | */ | 197 | */ |
189 | static inline struct ip_vs_conn *__ip_vs_conn_in_get | 198 | static inline struct ip_vs_conn *__ip_vs_conn_in_get |
190 | (int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) | 199 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, |
200 | const union nf_inet_addr *d_addr, __be16 d_port) | ||
191 | { | 201 | { |
192 | unsigned hash; | 202 | unsigned hash; |
193 | struct ip_vs_conn *cp; | 203 | struct ip_vs_conn *cp; |
194 | 204 | ||
195 | hash = ip_vs_conn_hashkey(protocol, s_addr, s_port); | 205 | hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port); |
196 | 206 | ||
197 | ct_read_lock(hash); | 207 | ct_read_lock(hash); |
198 | 208 | ||
199 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { | 209 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
200 | if (s_addr==cp->caddr && s_port==cp->cport && | 210 | if (cp->af == af && |
201 | d_port==cp->vport && d_addr==cp->vaddr && | 211 | ip_vs_addr_equal(af, s_addr, &cp->caddr) && |
212 | ip_vs_addr_equal(af, d_addr, &cp->vaddr) && | ||
213 | s_port == cp->cport && d_port == cp->vport && | ||
202 | ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && | 214 | ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && |
203 | protocol==cp->protocol) { | 215 | protocol == cp->protocol) { |
204 | /* HIT */ | 216 | /* HIT */ |
205 | atomic_inc(&cp->refcnt); | 217 | atomic_inc(&cp->refcnt); |
206 | ct_read_unlock(hash); | 218 | ct_read_unlock(hash); |
@@ -214,39 +226,44 @@ static inline struct ip_vs_conn *__ip_vs_conn_in_get | |||
214 | } | 226 | } |
215 | 227 | ||
216 | struct ip_vs_conn *ip_vs_conn_in_get | 228 | struct ip_vs_conn *ip_vs_conn_in_get |
217 | (int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) | 229 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, |
230 | const union nf_inet_addr *d_addr, __be16 d_port) | ||
218 | { | 231 | { |
219 | struct ip_vs_conn *cp; | 232 | struct ip_vs_conn *cp; |
220 | 233 | ||
221 | cp = __ip_vs_conn_in_get(protocol, s_addr, s_port, d_addr, d_port); | 234 | cp = __ip_vs_conn_in_get(af, protocol, s_addr, s_port, d_addr, d_port); |
222 | if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) | 235 | if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) |
223 | cp = __ip_vs_conn_in_get(protocol, s_addr, 0, d_addr, d_port); | 236 | cp = __ip_vs_conn_in_get(af, protocol, s_addr, 0, d_addr, |
237 | d_port); | ||
224 | 238 | ||
225 | IP_VS_DBG(9, "lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", | 239 | IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n", |
226 | ip_vs_proto_name(protocol), | 240 | ip_vs_proto_name(protocol), |
227 | NIPQUAD(s_addr), ntohs(s_port), | 241 | IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port), |
228 | NIPQUAD(d_addr), ntohs(d_port), | 242 | IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port), |
229 | cp?"hit":"not hit"); | 243 | cp ? "hit" : "not hit"); |
230 | 244 | ||
231 | return cp; | 245 | return cp; |
232 | } | 246 | } |
233 | 247 | ||
234 | /* Get reference to connection template */ | 248 | /* Get reference to connection template */ |
235 | struct ip_vs_conn *ip_vs_ct_in_get | 249 | struct ip_vs_conn *ip_vs_ct_in_get |
236 | (int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) | 250 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, |
251 | const union nf_inet_addr *d_addr, __be16 d_port) | ||
237 | { | 252 | { |
238 | unsigned hash; | 253 | unsigned hash; |
239 | struct ip_vs_conn *cp; | 254 | struct ip_vs_conn *cp; |
240 | 255 | ||
241 | hash = ip_vs_conn_hashkey(protocol, s_addr, s_port); | 256 | hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port); |
242 | 257 | ||
243 | ct_read_lock(hash); | 258 | ct_read_lock(hash); |
244 | 259 | ||
245 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { | 260 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
246 | if (s_addr==cp->caddr && s_port==cp->cport && | 261 | if (cp->af == af && |
247 | d_port==cp->vport && d_addr==cp->vaddr && | 262 | ip_vs_addr_equal(af, s_addr, &cp->caddr) && |
263 | ip_vs_addr_equal(af, d_addr, &cp->vaddr) && | ||
264 | s_port == cp->cport && d_port == cp->vport && | ||
248 | cp->flags & IP_VS_CONN_F_TEMPLATE && | 265 | cp->flags & IP_VS_CONN_F_TEMPLATE && |
249 | protocol==cp->protocol) { | 266 | protocol == cp->protocol) { |
250 | /* HIT */ | 267 | /* HIT */ |
251 | atomic_inc(&cp->refcnt); | 268 | atomic_inc(&cp->refcnt); |
252 | goto out; | 269 | goto out; |
@@ -257,11 +274,11 @@ struct ip_vs_conn *ip_vs_ct_in_get | |||
257 | out: | 274 | out: |
258 | ct_read_unlock(hash); | 275 | ct_read_unlock(hash); |
259 | 276 | ||
260 | IP_VS_DBG(9, "template lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", | 277 | IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n", |
261 | ip_vs_proto_name(protocol), | 278 | ip_vs_proto_name(protocol), |
262 | NIPQUAD(s_addr), ntohs(s_port), | 279 | IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port), |
263 | NIPQUAD(d_addr), ntohs(d_port), | 280 | IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port), |
264 | cp?"hit":"not hit"); | 281 | cp ? "hit" : "not hit"); |
265 | 282 | ||
266 | return cp; | 283 | return cp; |
267 | } | 284 | } |
@@ -273,7 +290,8 @@ struct ip_vs_conn *ip_vs_ct_in_get | |||
273 | * d_addr, d_port: pkt dest address (foreign host) | 290 | * d_addr, d_port: pkt dest address (foreign host) |
274 | */ | 291 | */ |
275 | struct ip_vs_conn *ip_vs_conn_out_get | 292 | struct ip_vs_conn *ip_vs_conn_out_get |
276 | (int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) | 293 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, |
294 | const union nf_inet_addr *d_addr, __be16 d_port) | ||
277 | { | 295 | { |
278 | unsigned hash; | 296 | unsigned hash; |
279 | struct ip_vs_conn *cp, *ret=NULL; | 297 | struct ip_vs_conn *cp, *ret=NULL; |
@@ -281,13 +299,15 @@ struct ip_vs_conn *ip_vs_conn_out_get | |||
281 | /* | 299 | /* |
282 | * Check for "full" addressed entries | 300 | * Check for "full" addressed entries |
283 | */ | 301 | */ |
284 | hash = ip_vs_conn_hashkey(protocol, d_addr, d_port); | 302 | hash = ip_vs_conn_hashkey(af, protocol, d_addr, d_port); |
285 | 303 | ||
286 | ct_read_lock(hash); | 304 | ct_read_lock(hash); |
287 | 305 | ||
288 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { | 306 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
289 | if (d_addr == cp->caddr && d_port == cp->cport && | 307 | if (cp->af == af && |
290 | s_port == cp->dport && s_addr == cp->daddr && | 308 | ip_vs_addr_equal(af, d_addr, &cp->caddr) && |
309 | ip_vs_addr_equal(af, s_addr, &cp->daddr) && | ||
310 | d_port == cp->cport && s_port == cp->dport && | ||
291 | protocol == cp->protocol) { | 311 | protocol == cp->protocol) { |
292 | /* HIT */ | 312 | /* HIT */ |
293 | atomic_inc(&cp->refcnt); | 313 | atomic_inc(&cp->refcnt); |
@@ -298,11 +318,11 @@ struct ip_vs_conn *ip_vs_conn_out_get | |||
298 | 318 | ||
299 | ct_read_unlock(hash); | 319 | ct_read_unlock(hash); |
300 | 320 | ||
301 | IP_VS_DBG(9, "lookup/out %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", | 321 | IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n", |
302 | ip_vs_proto_name(protocol), | 322 | ip_vs_proto_name(protocol), |
303 | NIPQUAD(s_addr), ntohs(s_port), | 323 | IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port), |
304 | NIPQUAD(d_addr), ntohs(d_port), | 324 | IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port), |
305 | ret?"hit":"not hit"); | 325 | ret ? "hit" : "not hit"); |
306 | 326 | ||
307 | return ret; | 327 | return ret; |
308 | } | 328 | } |
@@ -369,6 +389,33 @@ static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp) | |||
369 | } | 389 | } |
370 | } | 390 | } |
371 | 391 | ||
392 | #ifdef CONFIG_IP_VS_IPV6 | ||
393 | static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp) | ||
394 | { | ||
395 | switch (IP_VS_FWD_METHOD(cp)) { | ||
396 | case IP_VS_CONN_F_MASQ: | ||
397 | cp->packet_xmit = ip_vs_nat_xmit_v6; | ||
398 | break; | ||
399 | |||
400 | case IP_VS_CONN_F_TUNNEL: | ||
401 | cp->packet_xmit = ip_vs_tunnel_xmit_v6; | ||
402 | break; | ||
403 | |||
404 | case IP_VS_CONN_F_DROUTE: | ||
405 | cp->packet_xmit = ip_vs_dr_xmit_v6; | ||
406 | break; | ||
407 | |||
408 | case IP_VS_CONN_F_LOCALNODE: | ||
409 | cp->packet_xmit = ip_vs_null_xmit; | ||
410 | break; | ||
411 | |||
412 | case IP_VS_CONN_F_BYPASS: | ||
413 | cp->packet_xmit = ip_vs_bypass_xmit_v6; | ||
414 | break; | ||
415 | } | ||
416 | } | ||
417 | #endif | ||
418 | |||
372 | 419 | ||
373 | static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest) | 420 | static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest) |
374 | { | 421 | { |
@@ -402,16 +449,16 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) | |||
402 | cp->flags |= atomic_read(&dest->conn_flags); | 449 | cp->flags |= atomic_read(&dest->conn_flags); |
403 | cp->dest = dest; | 450 | cp->dest = dest; |
404 | 451 | ||
405 | IP_VS_DBG(7, "Bind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " | 452 | IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d " |
406 | "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " | 453 | "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " |
407 | "dest->refcnt:%d\n", | 454 | "dest->refcnt:%d\n", |
408 | ip_vs_proto_name(cp->protocol), | 455 | ip_vs_proto_name(cp->protocol), |
409 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 456 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), |
410 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 457 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), |
411 | NIPQUAD(cp->daddr), ntohs(cp->dport), | 458 | IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), |
412 | ip_vs_fwd_tag(cp), cp->state, | 459 | ip_vs_fwd_tag(cp), cp->state, |
413 | cp->flags, atomic_read(&cp->refcnt), | 460 | cp->flags, atomic_read(&cp->refcnt), |
414 | atomic_read(&dest->refcnt)); | 461 | atomic_read(&dest->refcnt)); |
415 | 462 | ||
416 | /* Update the connection counters */ | 463 | /* Update the connection counters */ |
417 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { | 464 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { |
@@ -444,8 +491,9 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) | |||
444 | struct ip_vs_dest *dest; | 491 | struct ip_vs_dest *dest; |
445 | 492 | ||
446 | if ((cp) && (!cp->dest)) { | 493 | if ((cp) && (!cp->dest)) { |
447 | dest = ip_vs_find_dest(cp->daddr, cp->dport, | 494 | dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport, |
448 | cp->vaddr, cp->vport, cp->protocol); | 495 | &cp->vaddr, cp->vport, |
496 | cp->protocol); | ||
449 | ip_vs_bind_dest(cp, dest); | 497 | ip_vs_bind_dest(cp, dest); |
450 | return dest; | 498 | return dest; |
451 | } else | 499 | } else |
@@ -464,16 +512,16 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp) | |||
464 | if (!dest) | 512 | if (!dest) |
465 | return; | 513 | return; |
466 | 514 | ||
467 | IP_VS_DBG(7, "Unbind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " | 515 | IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d " |
468 | "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " | 516 | "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " |
469 | "dest->refcnt:%d\n", | 517 | "dest->refcnt:%d\n", |
470 | ip_vs_proto_name(cp->protocol), | 518 | ip_vs_proto_name(cp->protocol), |
471 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 519 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), |
472 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 520 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), |
473 | NIPQUAD(cp->daddr), ntohs(cp->dport), | 521 | IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), |
474 | ip_vs_fwd_tag(cp), cp->state, | 522 | ip_vs_fwd_tag(cp), cp->state, |
475 | cp->flags, atomic_read(&cp->refcnt), | 523 | cp->flags, atomic_read(&cp->refcnt), |
476 | atomic_read(&dest->refcnt)); | 524 | atomic_read(&dest->refcnt)); |
477 | 525 | ||
478 | /* Update the connection counters */ | 526 | /* Update the connection counters */ |
479 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { | 527 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { |
@@ -526,13 +574,16 @@ int ip_vs_check_template(struct ip_vs_conn *ct) | |||
526 | !(dest->flags & IP_VS_DEST_F_AVAILABLE) || | 574 | !(dest->flags & IP_VS_DEST_F_AVAILABLE) || |
527 | (sysctl_ip_vs_expire_quiescent_template && | 575 | (sysctl_ip_vs_expire_quiescent_template && |
528 | (atomic_read(&dest->weight) == 0))) { | 576 | (atomic_read(&dest->weight) == 0))) { |
529 | IP_VS_DBG(9, "check_template: dest not available for " | 577 | IP_VS_DBG_BUF(9, "check_template: dest not available for " |
530 | "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " | 578 | "protocol %s s:%s:%d v:%s:%d " |
531 | "-> d:%u.%u.%u.%u:%d\n", | 579 | "-> d:%s:%d\n", |
532 | ip_vs_proto_name(ct->protocol), | 580 | ip_vs_proto_name(ct->protocol), |
533 | NIPQUAD(ct->caddr), ntohs(ct->cport), | 581 | IP_VS_DBG_ADDR(ct->af, &ct->caddr), |
534 | NIPQUAD(ct->vaddr), ntohs(ct->vport), | 582 | ntohs(ct->cport), |
535 | NIPQUAD(ct->daddr), ntohs(ct->dport)); | 583 | IP_VS_DBG_ADDR(ct->af, &ct->vaddr), |
584 | ntohs(ct->vport), | ||
585 | IP_VS_DBG_ADDR(ct->af, &ct->daddr), | ||
586 | ntohs(ct->dport)); | ||
536 | 587 | ||
537 | /* | 588 | /* |
538 | * Invalidate the connection template | 589 | * Invalidate the connection template |
@@ -625,8 +676,9 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp) | |||
625 | * Create a new connection entry and hash it into the ip_vs_conn_tab | 676 | * Create a new connection entry and hash it into the ip_vs_conn_tab |
626 | */ | 677 | */ |
627 | struct ip_vs_conn * | 678 | struct ip_vs_conn * |
628 | ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport, | 679 | ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport, |
629 | __be32 daddr, __be16 dport, unsigned flags, | 680 | const union nf_inet_addr *vaddr, __be16 vport, |
681 | const union nf_inet_addr *daddr, __be16 dport, unsigned flags, | ||
630 | struct ip_vs_dest *dest) | 682 | struct ip_vs_dest *dest) |
631 | { | 683 | { |
632 | struct ip_vs_conn *cp; | 684 | struct ip_vs_conn *cp; |
@@ -640,12 +692,13 @@ ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport | |||
640 | 692 | ||
641 | INIT_LIST_HEAD(&cp->c_list); | 693 | INIT_LIST_HEAD(&cp->c_list); |
642 | setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); | 694 | setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); |
695 | cp->af = af; | ||
643 | cp->protocol = proto; | 696 | cp->protocol = proto; |
644 | cp->caddr = caddr; | 697 | ip_vs_addr_copy(af, &cp->caddr, caddr); |
645 | cp->cport = cport; | 698 | cp->cport = cport; |
646 | cp->vaddr = vaddr; | 699 | ip_vs_addr_copy(af, &cp->vaddr, vaddr); |
647 | cp->vport = vport; | 700 | cp->vport = vport; |
648 | cp->daddr = daddr; | 701 | ip_vs_addr_copy(af, &cp->daddr, daddr); |
649 | cp->dport = dport; | 702 | cp->dport = dport; |
650 | cp->flags = flags; | 703 | cp->flags = flags; |
651 | spin_lock_init(&cp->lock); | 704 | spin_lock_init(&cp->lock); |
@@ -672,7 +725,12 @@ ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport | |||
672 | cp->timeout = 3*HZ; | 725 | cp->timeout = 3*HZ; |
673 | 726 | ||
674 | /* Bind its packet transmitter */ | 727 | /* Bind its packet transmitter */ |
675 | ip_vs_bind_xmit(cp); | 728 | #ifdef CONFIG_IP_VS_IPV6 |
729 | if (af == AF_INET6) | ||
730 | ip_vs_bind_xmit_v6(cp); | ||
731 | else | ||
732 | #endif | ||
733 | ip_vs_bind_xmit(cp); | ||
676 | 734 | ||
677 | if (unlikely(pp && atomic_read(&pp->appcnt))) | 735 | if (unlikely(pp && atomic_read(&pp->appcnt))) |
678 | ip_vs_bind_app(cp, pp); | 736 | ip_vs_bind_app(cp, pp); |
@@ -760,12 +818,26 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) | |||
760 | else { | 818 | else { |
761 | const struct ip_vs_conn *cp = v; | 819 | const struct ip_vs_conn *cp = v; |
762 | 820 | ||
763 | seq_printf(seq, | 821 | #ifdef CONFIG_IP_VS_IPV6 |
764 | "%-3s %08X %04X %08X %04X %08X %04X %-11s %7lu\n", | 822 | if (cp->af == AF_INET6) |
823 | seq_printf(seq, | ||
824 | "%-3s " NIP6_FMT " %04X " NIP6_FMT | ||
825 | " %04X " NIP6_FMT " %04X %-11s %7lu\n", | ||
826 | ip_vs_proto_name(cp->protocol), | ||
827 | NIP6(cp->caddr.in6), ntohs(cp->cport), | ||
828 | NIP6(cp->vaddr.in6), ntohs(cp->vport), | ||
829 | NIP6(cp->daddr.in6), ntohs(cp->dport), | ||
830 | ip_vs_state_name(cp->protocol, cp->state), | ||
831 | (cp->timer.expires-jiffies)/HZ); | ||
832 | else | ||
833 | #endif | ||
834 | seq_printf(seq, | ||
835 | "%-3s %08X %04X %08X %04X" | ||
836 | " %08X %04X %-11s %7lu\n", | ||
765 | ip_vs_proto_name(cp->protocol), | 837 | ip_vs_proto_name(cp->protocol), |
766 | ntohl(cp->caddr), ntohs(cp->cport), | 838 | ntohl(cp->caddr.ip), ntohs(cp->cport), |
767 | ntohl(cp->vaddr), ntohs(cp->vport), | 839 | ntohl(cp->vaddr.ip), ntohs(cp->vport), |
768 | ntohl(cp->daddr), ntohs(cp->dport), | 840 | ntohl(cp->daddr.ip), ntohs(cp->dport), |
769 | ip_vs_state_name(cp->protocol, cp->state), | 841 | ip_vs_state_name(cp->protocol, cp->state), |
770 | (cp->timer.expires-jiffies)/HZ); | 842 | (cp->timer.expires-jiffies)/HZ); |
771 | } | 843 | } |
@@ -809,12 +881,27 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v) | |||
809 | else { | 881 | else { |
810 | const struct ip_vs_conn *cp = v; | 882 | const struct ip_vs_conn *cp = v; |
811 | 883 | ||
812 | seq_printf(seq, | 884 | #ifdef CONFIG_IP_VS_IPV6 |
813 | "%-3s %08X %04X %08X %04X %08X %04X %-11s %-6s %7lu\n", | 885 | if (cp->af == AF_INET6) |
886 | seq_printf(seq, | ||
887 | "%-3s " NIP6_FMT " %04X " NIP6_FMT | ||
888 | " %04X " NIP6_FMT " %04X %-11s %-6s %7lu\n", | ||
889 | ip_vs_proto_name(cp->protocol), | ||
890 | NIP6(cp->caddr.in6), ntohs(cp->cport), | ||
891 | NIP6(cp->vaddr.in6), ntohs(cp->vport), | ||
892 | NIP6(cp->daddr.in6), ntohs(cp->dport), | ||
893 | ip_vs_state_name(cp->protocol, cp->state), | ||
894 | ip_vs_origin_name(cp->flags), | ||
895 | (cp->timer.expires-jiffies)/HZ); | ||
896 | else | ||
897 | #endif | ||
898 | seq_printf(seq, | ||
899 | "%-3s %08X %04X %08X %04X " | ||
900 | "%08X %04X %-11s %-6s %7lu\n", | ||
814 | ip_vs_proto_name(cp->protocol), | 901 | ip_vs_proto_name(cp->protocol), |
815 | ntohl(cp->caddr), ntohs(cp->cport), | 902 | ntohl(cp->caddr.ip), ntohs(cp->cport), |
816 | ntohl(cp->vaddr), ntohs(cp->vport), | 903 | ntohl(cp->vaddr.ip), ntohs(cp->vport), |
817 | ntohl(cp->daddr), ntohs(cp->dport), | 904 | ntohl(cp->daddr.ip), ntohs(cp->dport), |
818 | ip_vs_state_name(cp->protocol, cp->state), | 905 | ip_vs_state_name(cp->protocol, cp->state), |
819 | ip_vs_origin_name(cp->flags), | 906 | ip_vs_origin_name(cp->flags), |
820 | (cp->timer.expires-jiffies)/HZ); | 907 | (cp->timer.expires-jiffies)/HZ); |
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index a7879eafc3b5..958abf3e5f8c 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c | |||
@@ -39,6 +39,11 @@ | |||
39 | #include <linux/netfilter.h> | 39 | #include <linux/netfilter.h> |
40 | #include <linux/netfilter_ipv4.h> | 40 | #include <linux/netfilter_ipv4.h> |
41 | 41 | ||
42 | #ifdef CONFIG_IP_VS_IPV6 | ||
43 | #include <net/ipv6.h> | ||
44 | #include <linux/netfilter_ipv6.h> | ||
45 | #endif | ||
46 | |||
42 | #include <net/ip_vs.h> | 47 | #include <net/ip_vs.h> |
43 | 48 | ||
44 | 49 | ||
@@ -60,6 +65,7 @@ EXPORT_SYMBOL(ip_vs_get_debug_level); | |||
60 | 65 | ||
61 | /* ID used in ICMP lookups */ | 66 | /* ID used in ICMP lookups */ |
62 | #define icmp_id(icmph) (((icmph)->un).echo.id) | 67 | #define icmp_id(icmph) (((icmph)->un).echo.id) |
68 | #define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier) | ||
63 | 69 | ||
64 | const char *ip_vs_proto_name(unsigned proto) | 70 | const char *ip_vs_proto_name(unsigned proto) |
65 | { | 71 | { |
@@ -74,6 +80,10 @@ const char *ip_vs_proto_name(unsigned proto) | |||
74 | return "TCP"; | 80 | return "TCP"; |
75 | case IPPROTO_ICMP: | 81 | case IPPROTO_ICMP: |
76 | return "ICMP"; | 82 | return "ICMP"; |
83 | #ifdef CONFIG_IP_VS_IPV6 | ||
84 | case IPPROTO_ICMPV6: | ||
85 | return "ICMPv6"; | ||
86 | #endif | ||
77 | default: | 87 | default: |
78 | sprintf(buf, "IP_%d", proto); | 88 | sprintf(buf, "IP_%d", proto); |
79 | return buf; | 89 | return buf; |
@@ -92,18 +102,18 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
92 | struct ip_vs_dest *dest = cp->dest; | 102 | struct ip_vs_dest *dest = cp->dest; |
93 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 103 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
94 | spin_lock(&dest->stats.lock); | 104 | spin_lock(&dest->stats.lock); |
95 | dest->stats.inpkts++; | 105 | dest->stats.ustats.inpkts++; |
96 | dest->stats.inbytes += skb->len; | 106 | dest->stats.ustats.inbytes += skb->len; |
97 | spin_unlock(&dest->stats.lock); | 107 | spin_unlock(&dest->stats.lock); |
98 | 108 | ||
99 | spin_lock(&dest->svc->stats.lock); | 109 | spin_lock(&dest->svc->stats.lock); |
100 | dest->svc->stats.inpkts++; | 110 | dest->svc->stats.ustats.inpkts++; |
101 | dest->svc->stats.inbytes += skb->len; | 111 | dest->svc->stats.ustats.inbytes += skb->len; |
102 | spin_unlock(&dest->svc->stats.lock); | 112 | spin_unlock(&dest->svc->stats.lock); |
103 | 113 | ||
104 | spin_lock(&ip_vs_stats.lock); | 114 | spin_lock(&ip_vs_stats.lock); |
105 | ip_vs_stats.inpkts++; | 115 | ip_vs_stats.ustats.inpkts++; |
106 | ip_vs_stats.inbytes += skb->len; | 116 | ip_vs_stats.ustats.inbytes += skb->len; |
107 | spin_unlock(&ip_vs_stats.lock); | 117 | spin_unlock(&ip_vs_stats.lock); |
108 | } | 118 | } |
109 | } | 119 | } |
@@ -115,18 +125,18 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
115 | struct ip_vs_dest *dest = cp->dest; | 125 | struct ip_vs_dest *dest = cp->dest; |
116 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 126 | if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
117 | spin_lock(&dest->stats.lock); | 127 | spin_lock(&dest->stats.lock); |
118 | dest->stats.outpkts++; | 128 | dest->stats.ustats.outpkts++; |
119 | dest->stats.outbytes += skb->len; | 129 | dest->stats.ustats.outbytes += skb->len; |
120 | spin_unlock(&dest->stats.lock); | 130 | spin_unlock(&dest->stats.lock); |
121 | 131 | ||
122 | spin_lock(&dest->svc->stats.lock); | 132 | spin_lock(&dest->svc->stats.lock); |
123 | dest->svc->stats.outpkts++; | 133 | dest->svc->stats.ustats.outpkts++; |
124 | dest->svc->stats.outbytes += skb->len; | 134 | dest->svc->stats.ustats.outbytes += skb->len; |
125 | spin_unlock(&dest->svc->stats.lock); | 135 | spin_unlock(&dest->svc->stats.lock); |
126 | 136 | ||
127 | spin_lock(&ip_vs_stats.lock); | 137 | spin_lock(&ip_vs_stats.lock); |
128 | ip_vs_stats.outpkts++; | 138 | ip_vs_stats.ustats.outpkts++; |
129 | ip_vs_stats.outbytes += skb->len; | 139 | ip_vs_stats.ustats.outbytes += skb->len; |
130 | spin_unlock(&ip_vs_stats.lock); | 140 | spin_unlock(&ip_vs_stats.lock); |
131 | } | 141 | } |
132 | } | 142 | } |
@@ -136,15 +146,15 @@ static inline void | |||
136 | ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) | 146 | ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) |
137 | { | 147 | { |
138 | spin_lock(&cp->dest->stats.lock); | 148 | spin_lock(&cp->dest->stats.lock); |
139 | cp->dest->stats.conns++; | 149 | cp->dest->stats.ustats.conns++; |
140 | spin_unlock(&cp->dest->stats.lock); | 150 | spin_unlock(&cp->dest->stats.lock); |
141 | 151 | ||
142 | spin_lock(&svc->stats.lock); | 152 | spin_lock(&svc->stats.lock); |
143 | svc->stats.conns++; | 153 | svc->stats.ustats.conns++; |
144 | spin_unlock(&svc->stats.lock); | 154 | spin_unlock(&svc->stats.lock); |
145 | 155 | ||
146 | spin_lock(&ip_vs_stats.lock); | 156 | spin_lock(&ip_vs_stats.lock); |
147 | ip_vs_stats.conns++; | 157 | ip_vs_stats.ustats.conns++; |
148 | spin_unlock(&ip_vs_stats.lock); | 158 | spin_unlock(&ip_vs_stats.lock); |
149 | } | 159 | } |
150 | 160 | ||
@@ -173,20 +183,28 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
173 | __be16 ports[2]) | 183 | __be16 ports[2]) |
174 | { | 184 | { |
175 | struct ip_vs_conn *cp = NULL; | 185 | struct ip_vs_conn *cp = NULL; |
176 | struct iphdr *iph = ip_hdr(skb); | 186 | struct ip_vs_iphdr iph; |
177 | struct ip_vs_dest *dest; | 187 | struct ip_vs_dest *dest; |
178 | struct ip_vs_conn *ct; | 188 | struct ip_vs_conn *ct; |
179 | __be16 dport; /* destination port to forward */ | 189 | __be16 dport; /* destination port to forward */ |
180 | __be32 snet; /* source network of the client, after masking */ | 190 | union nf_inet_addr snet; /* source network of the client, |
191 | after masking */ | ||
192 | |||
193 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | ||
181 | 194 | ||
182 | /* Mask saddr with the netmask to adjust template granularity */ | 195 | /* Mask saddr with the netmask to adjust template granularity */ |
183 | snet = iph->saddr & svc->netmask; | 196 | #ifdef CONFIG_IP_VS_IPV6 |
197 | if (svc->af == AF_INET6) | ||
198 | ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask); | ||
199 | else | ||
200 | #endif | ||
201 | snet.ip = iph.saddr.ip & svc->netmask; | ||
184 | 202 | ||
185 | IP_VS_DBG(6, "p-schedule: src %u.%u.%u.%u:%u dest %u.%u.%u.%u:%u " | 203 | IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u " |
186 | "mnet %u.%u.%u.%u\n", | 204 | "mnet %s\n", |
187 | NIPQUAD(iph->saddr), ntohs(ports[0]), | 205 | IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]), |
188 | NIPQUAD(iph->daddr), ntohs(ports[1]), | 206 | IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]), |
189 | NIPQUAD(snet)); | 207 | IP_VS_DBG_ADDR(svc->af, &snet)); |
190 | 208 | ||
191 | /* | 209 | /* |
192 | * As far as we know, FTP is a very complicated network protocol, and | 210 | * As far as we know, FTP is a very complicated network protocol, and |
@@ -204,11 +222,11 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
204 | if (ports[1] == svc->port) { | 222 | if (ports[1] == svc->port) { |
205 | /* Check if a template already exists */ | 223 | /* Check if a template already exists */ |
206 | if (svc->port != FTPPORT) | 224 | if (svc->port != FTPPORT) |
207 | ct = ip_vs_ct_in_get(iph->protocol, snet, 0, | 225 | ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, |
208 | iph->daddr, ports[1]); | 226 | &iph.daddr, ports[1]); |
209 | else | 227 | else |
210 | ct = ip_vs_ct_in_get(iph->protocol, snet, 0, | 228 | ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, |
211 | iph->daddr, 0); | 229 | &iph.daddr, 0); |
212 | 230 | ||
213 | if (!ct || !ip_vs_check_template(ct)) { | 231 | if (!ct || !ip_vs_check_template(ct)) { |
214 | /* | 232 | /* |
@@ -228,18 +246,18 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
228 | * for ftp service. | 246 | * for ftp service. |
229 | */ | 247 | */ |
230 | if (svc->port != FTPPORT) | 248 | if (svc->port != FTPPORT) |
231 | ct = ip_vs_conn_new(iph->protocol, | 249 | ct = ip_vs_conn_new(svc->af, iph.protocol, |
232 | snet, 0, | 250 | &snet, 0, |
233 | iph->daddr, | 251 | &iph.daddr, |
234 | ports[1], | 252 | ports[1], |
235 | dest->addr, dest->port, | 253 | &dest->addr, dest->port, |
236 | IP_VS_CONN_F_TEMPLATE, | 254 | IP_VS_CONN_F_TEMPLATE, |
237 | dest); | 255 | dest); |
238 | else | 256 | else |
239 | ct = ip_vs_conn_new(iph->protocol, | 257 | ct = ip_vs_conn_new(svc->af, iph.protocol, |
240 | snet, 0, | 258 | &snet, 0, |
241 | iph->daddr, 0, | 259 | &iph.daddr, 0, |
242 | dest->addr, 0, | 260 | &dest->addr, 0, |
243 | IP_VS_CONN_F_TEMPLATE, | 261 | IP_VS_CONN_F_TEMPLATE, |
244 | dest); | 262 | dest); |
245 | if (ct == NULL) | 263 | if (ct == NULL) |
@@ -258,12 +276,16 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
258 | * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0> | 276 | * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0> |
259 | * port zero template: <protocol,caddr,0,vaddr,0,daddr,0> | 277 | * port zero template: <protocol,caddr,0,vaddr,0,daddr,0> |
260 | */ | 278 | */ |
261 | if (svc->fwmark) | 279 | if (svc->fwmark) { |
262 | ct = ip_vs_ct_in_get(IPPROTO_IP, snet, 0, | 280 | union nf_inet_addr fwmark = { |
263 | htonl(svc->fwmark), 0); | 281 | .all = { 0, 0, 0, htonl(svc->fwmark) } |
264 | else | 282 | }; |
265 | ct = ip_vs_ct_in_get(iph->protocol, snet, 0, | 283 | |
266 | iph->daddr, 0); | 284 | ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0, |
285 | &fwmark, 0); | ||
286 | } else | ||
287 | ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0, | ||
288 | &iph.daddr, 0); | ||
267 | 289 | ||
268 | if (!ct || !ip_vs_check_template(ct)) { | 290 | if (!ct || !ip_vs_check_template(ct)) { |
269 | /* | 291 | /* |
@@ -282,18 +304,22 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
282 | /* | 304 | /* |
283 | * Create a template according to the service | 305 | * Create a template according to the service |
284 | */ | 306 | */ |
285 | if (svc->fwmark) | 307 | if (svc->fwmark) { |
286 | ct = ip_vs_conn_new(IPPROTO_IP, | 308 | union nf_inet_addr fwmark = { |
287 | snet, 0, | 309 | .all = { 0, 0, 0, htonl(svc->fwmark) } |
288 | htonl(svc->fwmark), 0, | 310 | }; |
289 | dest->addr, 0, | 311 | |
312 | ct = ip_vs_conn_new(svc->af, IPPROTO_IP, | ||
313 | &snet, 0, | ||
314 | &fwmark, 0, | ||
315 | &dest->addr, 0, | ||
290 | IP_VS_CONN_F_TEMPLATE, | 316 | IP_VS_CONN_F_TEMPLATE, |
291 | dest); | 317 | dest); |
292 | else | 318 | } else |
293 | ct = ip_vs_conn_new(iph->protocol, | 319 | ct = ip_vs_conn_new(svc->af, iph.protocol, |
294 | snet, 0, | 320 | &snet, 0, |
295 | iph->daddr, 0, | 321 | &iph.daddr, 0, |
296 | dest->addr, 0, | 322 | &dest->addr, 0, |
297 | IP_VS_CONN_F_TEMPLATE, | 323 | IP_VS_CONN_F_TEMPLATE, |
298 | dest); | 324 | dest); |
299 | if (ct == NULL) | 325 | if (ct == NULL) |
@@ -310,10 +336,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
310 | /* | 336 | /* |
311 | * Create a new connection according to the template | 337 | * Create a new connection according to the template |
312 | */ | 338 | */ |
313 | cp = ip_vs_conn_new(iph->protocol, | 339 | cp = ip_vs_conn_new(svc->af, iph.protocol, |
314 | iph->saddr, ports[0], | 340 | &iph.saddr, ports[0], |
315 | iph->daddr, ports[1], | 341 | &iph.daddr, ports[1], |
316 | dest->addr, dport, | 342 | &dest->addr, dport, |
317 | 0, | 343 | 0, |
318 | dest); | 344 | dest); |
319 | if (cp == NULL) { | 345 | if (cp == NULL) { |
@@ -342,12 +368,12 @@ struct ip_vs_conn * | |||
342 | ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | 368 | ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) |
343 | { | 369 | { |
344 | struct ip_vs_conn *cp = NULL; | 370 | struct ip_vs_conn *cp = NULL; |
345 | struct iphdr *iph = ip_hdr(skb); | 371 | struct ip_vs_iphdr iph; |
346 | struct ip_vs_dest *dest; | 372 | struct ip_vs_dest *dest; |
347 | __be16 _ports[2], *pptr; | 373 | __be16 _ports[2], *pptr; |
348 | 374 | ||
349 | pptr = skb_header_pointer(skb, iph->ihl*4, | 375 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); |
350 | sizeof(_ports), _ports); | 376 | pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); |
351 | if (pptr == NULL) | 377 | if (pptr == NULL) |
352 | return NULL; | 378 | return NULL; |
353 | 379 | ||
@@ -377,22 +403,22 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
377 | /* | 403 | /* |
378 | * Create a connection entry. | 404 | * Create a connection entry. |
379 | */ | 405 | */ |
380 | cp = ip_vs_conn_new(iph->protocol, | 406 | cp = ip_vs_conn_new(svc->af, iph.protocol, |
381 | iph->saddr, pptr[0], | 407 | &iph.saddr, pptr[0], |
382 | iph->daddr, pptr[1], | 408 | &iph.daddr, pptr[1], |
383 | dest->addr, dest->port?dest->port:pptr[1], | 409 | &dest->addr, dest->port ? dest->port : pptr[1], |
384 | 0, | 410 | 0, |
385 | dest); | 411 | dest); |
386 | if (cp == NULL) | 412 | if (cp == NULL) |
387 | return NULL; | 413 | return NULL; |
388 | 414 | ||
389 | IP_VS_DBG(6, "Schedule fwd:%c c:%u.%u.%u.%u:%u v:%u.%u.%u.%u:%u " | 415 | IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u " |
390 | "d:%u.%u.%u.%u:%u conn->flags:%X conn->refcnt:%d\n", | 416 | "d:%s:%u conn->flags:%X conn->refcnt:%d\n", |
391 | ip_vs_fwd_tag(cp), | 417 | ip_vs_fwd_tag(cp), |
392 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 418 | IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport), |
393 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 419 | IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport), |
394 | NIPQUAD(cp->daddr), ntohs(cp->dport), | 420 | IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport), |
395 | cp->flags, atomic_read(&cp->refcnt)); | 421 | cp->flags, atomic_read(&cp->refcnt)); |
396 | 422 | ||
397 | ip_vs_conn_stats(cp, svc); | 423 | ip_vs_conn_stats(cp, svc); |
398 | return cp; | 424 | return cp; |
@@ -408,31 +434,39 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
408 | struct ip_vs_protocol *pp) | 434 | struct ip_vs_protocol *pp) |
409 | { | 435 | { |
410 | __be16 _ports[2], *pptr; | 436 | __be16 _ports[2], *pptr; |
411 | struct iphdr *iph = ip_hdr(skb); | 437 | struct ip_vs_iphdr iph; |
438 | int unicast; | ||
439 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | ||
412 | 440 | ||
413 | pptr = skb_header_pointer(skb, iph->ihl*4, | 441 | pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); |
414 | sizeof(_ports), _ports); | ||
415 | if (pptr == NULL) { | 442 | if (pptr == NULL) { |
416 | ip_vs_service_put(svc); | 443 | ip_vs_service_put(svc); |
417 | return NF_DROP; | 444 | return NF_DROP; |
418 | } | 445 | } |
419 | 446 | ||
447 | #ifdef CONFIG_IP_VS_IPV6 | ||
448 | if (svc->af == AF_INET6) | ||
449 | unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST; | ||
450 | else | ||
451 | #endif | ||
452 | unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST); | ||
453 | |||
420 | /* if it is fwmark-based service, the cache_bypass sysctl is up | 454 | /* if it is fwmark-based service, the cache_bypass sysctl is up |
421 | and the destination is RTN_UNICAST (and not local), then create | 455 | and the destination is a non-local unicast, then create |
422 | a cache_bypass connection entry */ | 456 | a cache_bypass connection entry */ |
423 | if (sysctl_ip_vs_cache_bypass && svc->fwmark | 457 | if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) { |
424 | && (inet_addr_type(&init_net, iph->daddr) == RTN_UNICAST)) { | ||
425 | int ret, cs; | 458 | int ret, cs; |
426 | struct ip_vs_conn *cp; | 459 | struct ip_vs_conn *cp; |
460 | union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } }; | ||
427 | 461 | ||
428 | ip_vs_service_put(svc); | 462 | ip_vs_service_put(svc); |
429 | 463 | ||
430 | /* create a new connection entry */ | 464 | /* create a new connection entry */ |
431 | IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n"); | 465 | IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n"); |
432 | cp = ip_vs_conn_new(iph->protocol, | 466 | cp = ip_vs_conn_new(svc->af, iph.protocol, |
433 | iph->saddr, pptr[0], | 467 | &iph.saddr, pptr[0], |
434 | iph->daddr, pptr[1], | 468 | &iph.daddr, pptr[1], |
435 | 0, 0, | 469 | &daddr, 0, |
436 | IP_VS_CONN_F_BYPASS, | 470 | IP_VS_CONN_F_BYPASS, |
437 | NULL); | 471 | NULL); |
438 | if (cp == NULL) | 472 | if (cp == NULL) |
@@ -473,7 +507,14 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
473 | * created, the TCP RST packet cannot be sent, instead that | 507 | * created, the TCP RST packet cannot be sent, instead that |
474 | * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ | 508 | * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ |
475 | */ | 509 | */ |
476 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | 510 | #ifdef CONFIG_IP_VS_IPV6 |
511 | if (svc->af == AF_INET6) | ||
512 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, | ||
513 | skb->dev); | ||
514 | else | ||
515 | #endif | ||
516 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | ||
517 | |||
477 | return NF_DROP; | 518 | return NF_DROP; |
478 | } | 519 | } |
479 | 520 | ||
@@ -512,6 +553,14 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) | |||
512 | return err; | 553 | return err; |
513 | } | 554 | } |
514 | 555 | ||
556 | #ifdef CONFIG_IP_VS_IPV6 | ||
557 | static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user) | ||
558 | { | ||
559 | /* TODO IPv6: Find out what to do here for IPv6 */ | ||
560 | return 0; | ||
561 | } | ||
562 | #endif | ||
563 | |||
515 | /* | 564 | /* |
516 | * Packet has been made sufficiently writable in caller | 565 | * Packet has been made sufficiently writable in caller |
517 | * - inout: 1=in->out, 0=out->in | 566 | * - inout: 1=in->out, 0=out->in |
@@ -526,14 +575,14 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
526 | struct iphdr *ciph = (struct iphdr *)(icmph + 1); | 575 | struct iphdr *ciph = (struct iphdr *)(icmph + 1); |
527 | 576 | ||
528 | if (inout) { | 577 | if (inout) { |
529 | iph->saddr = cp->vaddr; | 578 | iph->saddr = cp->vaddr.ip; |
530 | ip_send_check(iph); | 579 | ip_send_check(iph); |
531 | ciph->daddr = cp->vaddr; | 580 | ciph->daddr = cp->vaddr.ip; |
532 | ip_send_check(ciph); | 581 | ip_send_check(ciph); |
533 | } else { | 582 | } else { |
534 | iph->daddr = cp->daddr; | 583 | iph->daddr = cp->daddr.ip; |
535 | ip_send_check(iph); | 584 | ip_send_check(iph); |
536 | ciph->saddr = cp->daddr; | 585 | ciph->saddr = cp->daddr.ip; |
537 | ip_send_check(ciph); | 586 | ip_send_check(ciph); |
538 | } | 587 | } |
539 | 588 | ||
@@ -560,21 +609,112 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
560 | "Forwarding altered incoming ICMP"); | 609 | "Forwarding altered incoming ICMP"); |
561 | } | 610 | } |
562 | 611 | ||
612 | #ifdef CONFIG_IP_VS_IPV6 | ||
613 | void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
614 | struct ip_vs_conn *cp, int inout) | ||
615 | { | ||
616 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
617 | unsigned int icmp_offset = sizeof(struct ipv6hdr); | ||
618 | struct icmp6hdr *icmph = (struct icmp6hdr *)(skb_network_header(skb) + | ||
619 | icmp_offset); | ||
620 | struct ipv6hdr *ciph = (struct ipv6hdr *)(icmph + 1); | ||
621 | |||
622 | if (inout) { | ||
623 | iph->saddr = cp->vaddr.in6; | ||
624 | ciph->daddr = cp->vaddr.in6; | ||
625 | } else { | ||
626 | iph->daddr = cp->daddr.in6; | ||
627 | ciph->saddr = cp->daddr.in6; | ||
628 | } | ||
629 | |||
630 | /* the TCP/UDP port */ | ||
631 | if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr) { | ||
632 | __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr); | ||
633 | |||
634 | if (inout) | ||
635 | ports[1] = cp->vport; | ||
636 | else | ||
637 | ports[0] = cp->dport; | ||
638 | } | ||
639 | |||
640 | /* And finally the ICMP checksum */ | ||
641 | icmph->icmp6_cksum = 0; | ||
642 | /* TODO IPv6: is this correct for ICMPv6? */ | ||
643 | ip_vs_checksum_complete(skb, icmp_offset); | ||
644 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
645 | |||
646 | if (inout) | ||
647 | IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, | ||
648 | "Forwarding altered outgoing ICMPv6"); | ||
649 | else | ||
650 | IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, | ||
651 | "Forwarding altered incoming ICMPv6"); | ||
652 | } | ||
653 | #endif | ||
654 | |||
655 | /* Handle relevant response ICMP messages - forward to the right | ||
656 | * destination host. Used for NAT and local client. | ||
657 | */ | ||
658 | static int handle_response_icmp(int af, struct sk_buff *skb, | ||
659 | union nf_inet_addr *snet, | ||
660 | __u8 protocol, struct ip_vs_conn *cp, | ||
661 | struct ip_vs_protocol *pp, | ||
662 | unsigned int offset, unsigned int ihl) | ||
663 | { | ||
664 | unsigned int verdict = NF_DROP; | ||
665 | |||
666 | if (IP_VS_FWD_METHOD(cp) != 0) { | ||
667 | IP_VS_ERR("shouldn't reach here, because the box is on the " | ||
668 | "half connection in the tun/dr module.\n"); | ||
669 | } | ||
670 | |||
671 | /* Ensure the checksum is correct */ | ||
672 | if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { | ||
673 | /* Failed checksum! */ | ||
674 | IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n", | ||
675 | IP_VS_DBG_ADDR(af, snet)); | ||
676 | goto out; | ||
677 | } | ||
678 | |||
679 | if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol) | ||
680 | offset += 2 * sizeof(__u16); | ||
681 | if (!skb_make_writable(skb, offset)) | ||
682 | goto out; | ||
683 | |||
684 | #ifdef CONFIG_IP_VS_IPV6 | ||
685 | if (af == AF_INET6) | ||
686 | ip_vs_nat_icmp_v6(skb, pp, cp, 1); | ||
687 | else | ||
688 | #endif | ||
689 | ip_vs_nat_icmp(skb, pp, cp, 1); | ||
690 | |||
691 | /* do the statistics and put it back */ | ||
692 | ip_vs_out_stats(cp, skb); | ||
693 | |||
694 | skb->ipvs_property = 1; | ||
695 | verdict = NF_ACCEPT; | ||
696 | |||
697 | out: | ||
698 | __ip_vs_conn_put(cp); | ||
699 | |||
700 | return verdict; | ||
701 | } | ||
702 | |||
563 | /* | 703 | /* |
564 | * Handle ICMP messages in the inside-to-outside direction (outgoing). | 704 | * Handle ICMP messages in the inside-to-outside direction (outgoing). |
565 | * Find any that might be relevant, check against existing connections, | 705 | * Find any that might be relevant, check against existing connections. |
566 | * forward to the right destination host if relevant. | ||
567 | * Currently handles error types - unreachable, quench, ttl exceeded. | 706 | * Currently handles error types - unreachable, quench, ttl exceeded. |
568 | * (Only used in VS/NAT) | ||
569 | */ | 707 | */ |
570 | static int ip_vs_out_icmp(struct sk_buff *skb, int *related) | 708 | static int ip_vs_out_icmp(struct sk_buff *skb, int *related) |
571 | { | 709 | { |
572 | struct iphdr *iph; | 710 | struct iphdr *iph; |
573 | struct icmphdr _icmph, *ic; | 711 | struct icmphdr _icmph, *ic; |
574 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ | 712 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ |
713 | struct ip_vs_iphdr ciph; | ||
575 | struct ip_vs_conn *cp; | 714 | struct ip_vs_conn *cp; |
576 | struct ip_vs_protocol *pp; | 715 | struct ip_vs_protocol *pp; |
577 | unsigned int offset, ihl, verdict; | 716 | unsigned int offset, ihl; |
717 | union nf_inet_addr snet; | ||
578 | 718 | ||
579 | *related = 1; | 719 | *related = 1; |
580 | 720 | ||
@@ -627,102 +767,231 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related) | |||
627 | 767 | ||
628 | offset += cih->ihl * 4; | 768 | offset += cih->ihl * 4; |
629 | 769 | ||
770 | ip_vs_fill_iphdr(AF_INET, cih, &ciph); | ||
630 | /* The embedded headers contain source and dest in reverse order */ | 771 | /* The embedded headers contain source and dest in reverse order */ |
631 | cp = pp->conn_out_get(skb, pp, cih, offset, 1); | 772 | cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1); |
632 | if (!cp) | 773 | if (!cp) |
633 | return NF_ACCEPT; | 774 | return NF_ACCEPT; |
634 | 775 | ||
635 | verdict = NF_DROP; | 776 | snet.ip = iph->saddr; |
777 | return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, | ||
778 | pp, offset, ihl); | ||
779 | } | ||
636 | 780 | ||
637 | if (IP_VS_FWD_METHOD(cp) != 0) { | 781 | #ifdef CONFIG_IP_VS_IPV6 |
638 | IP_VS_ERR("shouldn't reach here, because the box is on the " | 782 | static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related) |
639 | "half connection in the tun/dr module.\n"); | 783 | { |
784 | struct ipv6hdr *iph; | ||
785 | struct icmp6hdr _icmph, *ic; | ||
786 | struct ipv6hdr _ciph, *cih; /* The ip header contained | ||
787 | within the ICMP */ | ||
788 | struct ip_vs_iphdr ciph; | ||
789 | struct ip_vs_conn *cp; | ||
790 | struct ip_vs_protocol *pp; | ||
791 | unsigned int offset; | ||
792 | union nf_inet_addr snet; | ||
793 | |||
794 | *related = 1; | ||
795 | |||
796 | /* reassemble IP fragments */ | ||
797 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) { | ||
798 | if (ip_vs_gather_frags_v6(skb, IP_DEFRAG_VS_OUT)) | ||
799 | return NF_STOLEN; | ||
640 | } | 800 | } |
641 | 801 | ||
642 | /* Ensure the checksum is correct */ | 802 | iph = ipv6_hdr(skb); |
643 | if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { | 803 | offset = sizeof(struct ipv6hdr); |
644 | /* Failed checksum! */ | 804 | ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); |
645 | IP_VS_DBG(1, "Forward ICMP: failed checksum from %d.%d.%d.%d!\n", | 805 | if (ic == NULL) |
646 | NIPQUAD(iph->saddr)); | 806 | return NF_DROP; |
647 | goto out; | 807 | |
808 | IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) " NIP6_FMT "->" NIP6_FMT "\n", | ||
809 | ic->icmp6_type, ntohs(icmpv6_id(ic)), | ||
810 | NIP6(iph->saddr), NIP6(iph->daddr)); | ||
811 | |||
812 | /* | ||
813 | * Work through seeing if this is for us. | ||
814 | * These checks are supposed to be in an order that means easy | ||
815 | * things are checked first to speed up processing.... however | ||
816 | * this means that some packets will manage to get a long way | ||
817 | * down this stack and then be rejected, but that's life. | ||
818 | */ | ||
819 | if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) && | ||
820 | (ic->icmp6_type != ICMPV6_PKT_TOOBIG) && | ||
821 | (ic->icmp6_type != ICMPV6_TIME_EXCEED)) { | ||
822 | *related = 0; | ||
823 | return NF_ACCEPT; | ||
648 | } | 824 | } |
649 | 825 | ||
650 | if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) | 826 | /* Now find the contained IP header */ |
651 | offset += 2 * sizeof(__u16); | 827 | offset += sizeof(_icmph); |
652 | if (!skb_make_writable(skb, offset)) | 828 | cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); |
653 | goto out; | 829 | if (cih == NULL) |
830 | return NF_ACCEPT; /* The packet looks wrong, ignore */ | ||
654 | 831 | ||
655 | ip_vs_nat_icmp(skb, pp, cp, 1); | 832 | pp = ip_vs_proto_get(cih->nexthdr); |
833 | if (!pp) | ||
834 | return NF_ACCEPT; | ||
656 | 835 | ||
657 | /* do the statistics and put it back */ | 836 | /* Is the embedded protocol header present? */ |
658 | ip_vs_out_stats(cp, skb); | 837 | /* TODO: we don't support fragmentation at the moment anyways */ |
838 | if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag)) | ||
839 | return NF_ACCEPT; | ||
659 | 840 | ||
660 | skb->ipvs_property = 1; | 841 | IP_VS_DBG_PKT(11, pp, skb, offset, "Checking outgoing ICMPv6 for"); |
661 | verdict = NF_ACCEPT; | ||
662 | 842 | ||
663 | out: | 843 | offset += sizeof(struct ipv6hdr); |
664 | __ip_vs_conn_put(cp); | ||
665 | 844 | ||
666 | return verdict; | 845 | ip_vs_fill_iphdr(AF_INET6, cih, &ciph); |
846 | /* The embedded headers contain source and dest in reverse order */ | ||
847 | cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1); | ||
848 | if (!cp) | ||
849 | return NF_ACCEPT; | ||
850 | |||
851 | ipv6_addr_copy(&snet.in6, &iph->saddr); | ||
852 | return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp, | ||
853 | pp, offset, sizeof(struct ipv6hdr)); | ||
667 | } | 854 | } |
855 | #endif | ||
668 | 856 | ||
669 | static inline int is_tcp_reset(const struct sk_buff *skb) | 857 | static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len) |
670 | { | 858 | { |
671 | struct tcphdr _tcph, *th; | 859 | struct tcphdr _tcph, *th; |
672 | 860 | ||
673 | th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); | 861 | th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph); |
674 | if (th == NULL) | 862 | if (th == NULL) |
675 | return 0; | 863 | return 0; |
676 | return th->rst; | 864 | return th->rst; |
677 | } | 865 | } |
678 | 866 | ||
867 | /* Handle response packets: rewrite addresses and send away... | ||
868 | * Used for NAT and local client. | ||
869 | */ | ||
870 | static unsigned int | ||
871 | handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
872 | struct ip_vs_conn *cp, int ihl) | ||
873 | { | ||
874 | IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet"); | ||
875 | |||
876 | if (!skb_make_writable(skb, ihl)) | ||
877 | goto drop; | ||
878 | |||
879 | /* mangle the packet */ | ||
880 | if (pp->snat_handler && !pp->snat_handler(skb, pp, cp)) | ||
881 | goto drop; | ||
882 | |||
883 | #ifdef CONFIG_IP_VS_IPV6 | ||
884 | if (af == AF_INET6) | ||
885 | ipv6_hdr(skb)->saddr = cp->vaddr.in6; | ||
886 | else | ||
887 | #endif | ||
888 | { | ||
889 | ip_hdr(skb)->saddr = cp->vaddr.ip; | ||
890 | ip_send_check(ip_hdr(skb)); | ||
891 | } | ||
892 | |||
893 | /* For policy routing, packets originating from this | ||
894 | * machine itself may be routed differently to packets | ||
895 | * passing through. We want this packet to be routed as | ||
896 | * if it came from this machine itself. So re-compute | ||
897 | * the routing information. | ||
898 | */ | ||
899 | #ifdef CONFIG_IP_VS_IPV6 | ||
900 | if (af == AF_INET6) { | ||
901 | if (ip6_route_me_harder(skb) != 0) | ||
902 | goto drop; | ||
903 | } else | ||
904 | #endif | ||
905 | if (ip_route_me_harder(skb, RTN_LOCAL) != 0) | ||
906 | goto drop; | ||
907 | |||
908 | IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); | ||
909 | |||
910 | ip_vs_out_stats(cp, skb); | ||
911 | ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); | ||
912 | ip_vs_conn_put(cp); | ||
913 | |||
914 | skb->ipvs_property = 1; | ||
915 | |||
916 | LeaveFunction(11); | ||
917 | return NF_ACCEPT; | ||
918 | |||
919 | drop: | ||
920 | ip_vs_conn_put(cp); | ||
921 | kfree_skb(skb); | ||
922 | return NF_STOLEN; | ||
923 | } | ||
924 | |||
679 | /* | 925 | /* |
680 | * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT. | 926 | * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT. |
681 | * Check if outgoing packet belongs to the established ip_vs_conn, | 927 | * Check if outgoing packet belongs to the established ip_vs_conn. |
682 | * rewrite addresses of the packet and send it on its way... | ||
683 | */ | 928 | */ |
684 | static unsigned int | 929 | static unsigned int |
685 | ip_vs_out(unsigned int hooknum, struct sk_buff *skb, | 930 | ip_vs_out(unsigned int hooknum, struct sk_buff *skb, |
686 | const struct net_device *in, const struct net_device *out, | 931 | const struct net_device *in, const struct net_device *out, |
687 | int (*okfn)(struct sk_buff *)) | 932 | int (*okfn)(struct sk_buff *)) |
688 | { | 933 | { |
689 | struct iphdr *iph; | 934 | struct ip_vs_iphdr iph; |
690 | struct ip_vs_protocol *pp; | 935 | struct ip_vs_protocol *pp; |
691 | struct ip_vs_conn *cp; | 936 | struct ip_vs_conn *cp; |
692 | int ihl; | 937 | int af; |
693 | 938 | ||
694 | EnterFunction(11); | 939 | EnterFunction(11); |
695 | 940 | ||
941 | af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6; | ||
942 | |||
696 | if (skb->ipvs_property) | 943 | if (skb->ipvs_property) |
697 | return NF_ACCEPT; | 944 | return NF_ACCEPT; |
698 | 945 | ||
699 | iph = ip_hdr(skb); | 946 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
700 | if (unlikely(iph->protocol == IPPROTO_ICMP)) { | 947 | #ifdef CONFIG_IP_VS_IPV6 |
701 | int related, verdict = ip_vs_out_icmp(skb, &related); | 948 | if (af == AF_INET6) { |
949 | if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { | ||
950 | int related, verdict = ip_vs_out_icmp_v6(skb, &related); | ||
702 | 951 | ||
703 | if (related) | 952 | if (related) |
704 | return verdict; | 953 | return verdict; |
705 | iph = ip_hdr(skb); | 954 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
706 | } | 955 | } |
956 | } else | ||
957 | #endif | ||
958 | if (unlikely(iph.protocol == IPPROTO_ICMP)) { | ||
959 | int related, verdict = ip_vs_out_icmp(skb, &related); | ||
707 | 960 | ||
708 | pp = ip_vs_proto_get(iph->protocol); | 961 | if (related) |
962 | return verdict; | ||
963 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
964 | } | ||
965 | |||
966 | pp = ip_vs_proto_get(iph.protocol); | ||
709 | if (unlikely(!pp)) | 967 | if (unlikely(!pp)) |
710 | return NF_ACCEPT; | 968 | return NF_ACCEPT; |
711 | 969 | ||
712 | /* reassemble IP fragments */ | 970 | /* reassemble IP fragments */ |
713 | if (unlikely(iph->frag_off & htons(IP_MF|IP_OFFSET) && | 971 | #ifdef CONFIG_IP_VS_IPV6 |
714 | !pp->dont_defrag)) { | 972 | if (af == AF_INET6) { |
715 | if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT)) | 973 | if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { |
716 | return NF_STOLEN; | 974 | int related, verdict = ip_vs_out_icmp_v6(skb, &related); |
717 | iph = ip_hdr(skb); | 975 | |
718 | } | 976 | if (related) |
977 | return verdict; | ||
719 | 978 | ||
720 | ihl = iph->ihl << 2; | 979 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
980 | } | ||
981 | } else | ||
982 | #endif | ||
983 | if (unlikely(ip_hdr(skb)->frag_off & htons(IP_MF|IP_OFFSET) && | ||
984 | !pp->dont_defrag)) { | ||
985 | if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT)) | ||
986 | return NF_STOLEN; | ||
987 | |||
988 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
989 | } | ||
721 | 990 | ||
722 | /* | 991 | /* |
723 | * Check if the packet belongs to an existing entry | 992 | * Check if the packet belongs to an existing entry |
724 | */ | 993 | */ |
725 | cp = pp->conn_out_get(skb, pp, iph, ihl, 0); | 994 | cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0); |
726 | 995 | ||
727 | if (unlikely(!cp)) { | 996 | if (unlikely(!cp)) { |
728 | if (sysctl_ip_vs_nat_icmp_send && | 997 | if (sysctl_ip_vs_nat_icmp_send && |
@@ -730,21 +999,31 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, | |||
730 | pp->protocol == IPPROTO_UDP)) { | 999 | pp->protocol == IPPROTO_UDP)) { |
731 | __be16 _ports[2], *pptr; | 1000 | __be16 _ports[2], *pptr; |
732 | 1001 | ||
733 | pptr = skb_header_pointer(skb, ihl, | 1002 | pptr = skb_header_pointer(skb, iph.len, |
734 | sizeof(_ports), _ports); | 1003 | sizeof(_ports), _ports); |
735 | if (pptr == NULL) | 1004 | if (pptr == NULL) |
736 | return NF_ACCEPT; /* Not for me */ | 1005 | return NF_ACCEPT; /* Not for me */ |
737 | if (ip_vs_lookup_real_service(iph->protocol, | 1006 | if (ip_vs_lookup_real_service(af, iph.protocol, |
738 | iph->saddr, pptr[0])) { | 1007 | &iph.saddr, |
1008 | pptr[0])) { | ||
739 | /* | 1009 | /* |
740 | * Notify the real server: there is no | 1010 | * Notify the real server: there is no |
741 | * existing entry if it is not RST | 1011 | * existing entry if it is not RST |
742 | * packet or not TCP packet. | 1012 | * packet or not TCP packet. |
743 | */ | 1013 | */ |
744 | if (iph->protocol != IPPROTO_TCP | 1014 | if (iph.protocol != IPPROTO_TCP |
745 | || !is_tcp_reset(skb)) { | 1015 | || !is_tcp_reset(skb, iph.len)) { |
746 | icmp_send(skb,ICMP_DEST_UNREACH, | 1016 | #ifdef CONFIG_IP_VS_IPV6 |
747 | ICMP_PORT_UNREACH, 0); | 1017 | if (af == AF_INET6) |
1018 | icmpv6_send(skb, | ||
1019 | ICMPV6_DEST_UNREACH, | ||
1020 | ICMPV6_PORT_UNREACH, | ||
1021 | 0, skb->dev); | ||
1022 | else | ||
1023 | #endif | ||
1024 | icmp_send(skb, | ||
1025 | ICMP_DEST_UNREACH, | ||
1026 | ICMP_PORT_UNREACH, 0); | ||
748 | return NF_DROP; | 1027 | return NF_DROP; |
749 | } | 1028 | } |
750 | } | 1029 | } |
@@ -754,41 +1033,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, | |||
754 | return NF_ACCEPT; | 1033 | return NF_ACCEPT; |
755 | } | 1034 | } |
756 | 1035 | ||
757 | IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet"); | 1036 | return handle_response(af, skb, pp, cp, iph.len); |
758 | |||
759 | if (!skb_make_writable(skb, ihl)) | ||
760 | goto drop; | ||
761 | |||
762 | /* mangle the packet */ | ||
763 | if (pp->snat_handler && !pp->snat_handler(skb, pp, cp)) | ||
764 | goto drop; | ||
765 | ip_hdr(skb)->saddr = cp->vaddr; | ||
766 | ip_send_check(ip_hdr(skb)); | ||
767 | |||
768 | /* For policy routing, packets originating from this | ||
769 | * machine itself may be routed differently to packets | ||
770 | * passing through. We want this packet to be routed as | ||
771 | * if it came from this machine itself. So re-compute | ||
772 | * the routing information. | ||
773 | */ | ||
774 | if (ip_route_me_harder(skb, RTN_LOCAL) != 0) | ||
775 | goto drop; | ||
776 | |||
777 | IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); | ||
778 | |||
779 | ip_vs_out_stats(cp, skb); | ||
780 | ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); | ||
781 | ip_vs_conn_put(cp); | ||
782 | |||
783 | skb->ipvs_property = 1; | ||
784 | |||
785 | LeaveFunction(11); | ||
786 | return NF_ACCEPT; | ||
787 | |||
788 | drop: | ||
789 | ip_vs_conn_put(cp); | ||
790 | kfree_skb(skb); | ||
791 | return NF_STOLEN; | ||
792 | } | 1037 | } |
793 | 1038 | ||
794 | 1039 | ||
@@ -804,9 +1049,11 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
804 | struct iphdr *iph; | 1049 | struct iphdr *iph; |
805 | struct icmphdr _icmph, *ic; | 1050 | struct icmphdr _icmph, *ic; |
806 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ | 1051 | struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ |
1052 | struct ip_vs_iphdr ciph; | ||
807 | struct ip_vs_conn *cp; | 1053 | struct ip_vs_conn *cp; |
808 | struct ip_vs_protocol *pp; | 1054 | struct ip_vs_protocol *pp; |
809 | unsigned int offset, ihl, verdict; | 1055 | unsigned int offset, ihl, verdict; |
1056 | union nf_inet_addr snet; | ||
810 | 1057 | ||
811 | *related = 1; | 1058 | *related = 1; |
812 | 1059 | ||
@@ -860,10 +1107,20 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
860 | 1107 | ||
861 | offset += cih->ihl * 4; | 1108 | offset += cih->ihl * 4; |
862 | 1109 | ||
1110 | ip_vs_fill_iphdr(AF_INET, cih, &ciph); | ||
863 | /* The embedded headers contain source and dest in reverse order */ | 1111 | /* The embedded headers contain source and dest in reverse order */ |
864 | cp = pp->conn_in_get(skb, pp, cih, offset, 1); | 1112 | cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1); |
865 | if (!cp) | 1113 | if (!cp) { |
1114 | /* The packet could also belong to a local client */ | ||
1115 | cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1); | ||
1116 | if (cp) { | ||
1117 | snet.ip = iph->saddr; | ||
1118 | return handle_response_icmp(AF_INET, skb, &snet, | ||
1119 | cih->protocol, cp, pp, | ||
1120 | offset, ihl); | ||
1121 | } | ||
866 | return NF_ACCEPT; | 1122 | return NF_ACCEPT; |
1123 | } | ||
867 | 1124 | ||
868 | verdict = NF_DROP; | 1125 | verdict = NF_DROP; |
869 | 1126 | ||
@@ -888,6 +1145,105 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
888 | return verdict; | 1145 | return verdict; |
889 | } | 1146 | } |
890 | 1147 | ||
1148 | #ifdef CONFIG_IP_VS_IPV6 | ||
1149 | static int | ||
1150 | ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) | ||
1151 | { | ||
1152 | struct ipv6hdr *iph; | ||
1153 | struct icmp6hdr _icmph, *ic; | ||
1154 | struct ipv6hdr _ciph, *cih; /* The ip header contained | ||
1155 | within the ICMP */ | ||
1156 | struct ip_vs_iphdr ciph; | ||
1157 | struct ip_vs_conn *cp; | ||
1158 | struct ip_vs_protocol *pp; | ||
1159 | unsigned int offset, verdict; | ||
1160 | union nf_inet_addr snet; | ||
1161 | |||
1162 | *related = 1; | ||
1163 | |||
1164 | /* reassemble IP fragments */ | ||
1165 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) { | ||
1166 | if (ip_vs_gather_frags_v6(skb, hooknum == NF_INET_LOCAL_IN ? | ||
1167 | IP_DEFRAG_VS_IN : | ||
1168 | IP_DEFRAG_VS_FWD)) | ||
1169 | return NF_STOLEN; | ||
1170 | } | ||
1171 | |||
1172 | iph = ipv6_hdr(skb); | ||
1173 | offset = sizeof(struct ipv6hdr); | ||
1174 | ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); | ||
1175 | if (ic == NULL) | ||
1176 | return NF_DROP; | ||
1177 | |||
1178 | IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) " NIP6_FMT "->" NIP6_FMT "\n", | ||
1179 | ic->icmp6_type, ntohs(icmpv6_id(ic)), | ||
1180 | NIP6(iph->saddr), NIP6(iph->daddr)); | ||
1181 | |||
1182 | /* | ||
1183 | * Work through seeing if this is for us. | ||
1184 | * These checks are supposed to be in an order that means easy | ||
1185 | * things are checked first to speed up processing.... however | ||
1186 | * this means that some packets will manage to get a long way | ||
1187 | * down this stack and then be rejected, but that's life. | ||
1188 | */ | ||
1189 | if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) && | ||
1190 | (ic->icmp6_type != ICMPV6_PKT_TOOBIG) && | ||
1191 | (ic->icmp6_type != ICMPV6_TIME_EXCEED)) { | ||
1192 | *related = 0; | ||
1193 | return NF_ACCEPT; | ||
1194 | } | ||
1195 | |||
1196 | /* Now find the contained IP header */ | ||
1197 | offset += sizeof(_icmph); | ||
1198 | cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); | ||
1199 | if (cih == NULL) | ||
1200 | return NF_ACCEPT; /* The packet looks wrong, ignore */ | ||
1201 | |||
1202 | pp = ip_vs_proto_get(cih->nexthdr); | ||
1203 | if (!pp) | ||
1204 | return NF_ACCEPT; | ||
1205 | |||
1206 | /* Is the embedded protocol header present? */ | ||
1207 | /* TODO: we don't support fragmentation at the moment anyways */ | ||
1208 | if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag)) | ||
1209 | return NF_ACCEPT; | ||
1210 | |||
1211 | IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMPv6 for"); | ||
1212 | |||
1213 | offset += sizeof(struct ipv6hdr); | ||
1214 | |||
1215 | ip_vs_fill_iphdr(AF_INET6, cih, &ciph); | ||
1216 | /* The embedded headers contain source and dest in reverse order */ | ||
1217 | cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1); | ||
1218 | if (!cp) { | ||
1219 | /* The packet could also belong to a local client */ | ||
1220 | cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1); | ||
1221 | if (cp) { | ||
1222 | ipv6_addr_copy(&snet.in6, &iph->saddr); | ||
1223 | return handle_response_icmp(AF_INET6, skb, &snet, | ||
1224 | cih->nexthdr, | ||
1225 | cp, pp, offset, | ||
1226 | sizeof(struct ipv6hdr)); | ||
1227 | } | ||
1228 | return NF_ACCEPT; | ||
1229 | } | ||
1230 | |||
1231 | verdict = NF_DROP; | ||
1232 | |||
1233 | /* do the statistics and put it back */ | ||
1234 | ip_vs_in_stats(cp, skb); | ||
1235 | if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr) | ||
1236 | offset += 2 * sizeof(__u16); | ||
1237 | verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset); | ||
1238 | /* do not touch skb anymore */ | ||
1239 | |||
1240 | __ip_vs_conn_put(cp); | ||
1241 | |||
1242 | return verdict; | ||
1243 | } | ||
1244 | #endif | ||
1245 | |||
1246 | |||
891 | /* | 1247 | /* |
892 | * Check if it's for virtual services, look it up, | 1248 | * Check if it's for virtual services, look it up, |
893 | * and send it on its way... | 1249 | * and send it on its way... |
@@ -897,50 +1253,54 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
897 | const struct net_device *in, const struct net_device *out, | 1253 | const struct net_device *in, const struct net_device *out, |
898 | int (*okfn)(struct sk_buff *)) | 1254 | int (*okfn)(struct sk_buff *)) |
899 | { | 1255 | { |
900 | struct iphdr *iph; | 1256 | struct ip_vs_iphdr iph; |
901 | struct ip_vs_protocol *pp; | 1257 | struct ip_vs_protocol *pp; |
902 | struct ip_vs_conn *cp; | 1258 | struct ip_vs_conn *cp; |
903 | int ret, restart; | 1259 | int ret, restart, af; |
904 | int ihl; | 1260 | |
1261 | af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6; | ||
1262 | |||
1263 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
905 | 1264 | ||
906 | /* | 1265 | /* |
907 | * Big tappo: only PACKET_HOST (neither loopback nor mcasts) | 1266 | * Big tappo: only PACKET_HOST, including loopback for local client |
908 | * ... don't know why 1st test DOES NOT include 2nd (?) | 1267 | * Don't handle local packets on IPv6 for now |
909 | */ | 1268 | */ |
910 | if (unlikely(skb->pkt_type != PACKET_HOST | 1269 | if (unlikely(skb->pkt_type != PACKET_HOST)) { |
911 | || skb->dev->flags & IFF_LOOPBACK || skb->sk)) { | 1270 | IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s ignored\n", |
912 | IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n", | 1271 | skb->pkt_type, |
913 | skb->pkt_type, | 1272 | iph.protocol, |
914 | ip_hdr(skb)->protocol, | 1273 | IP_VS_DBG_ADDR(af, &iph.daddr)); |
915 | NIPQUAD(ip_hdr(skb)->daddr)); | ||
916 | return NF_ACCEPT; | 1274 | return NF_ACCEPT; |
917 | } | 1275 | } |
918 | 1276 | ||
919 | iph = ip_hdr(skb); | 1277 | if (unlikely(iph.protocol == IPPROTO_ICMP)) { |
920 | if (unlikely(iph->protocol == IPPROTO_ICMP)) { | ||
921 | int related, verdict = ip_vs_in_icmp(skb, &related, hooknum); | 1278 | int related, verdict = ip_vs_in_icmp(skb, &related, hooknum); |
922 | 1279 | ||
923 | if (related) | 1280 | if (related) |
924 | return verdict; | 1281 | return verdict; |
925 | iph = ip_hdr(skb); | 1282 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
926 | } | 1283 | } |
927 | 1284 | ||
928 | /* Protocol supported? */ | 1285 | /* Protocol supported? */ |
929 | pp = ip_vs_proto_get(iph->protocol); | 1286 | pp = ip_vs_proto_get(iph.protocol); |
930 | if (unlikely(!pp)) | 1287 | if (unlikely(!pp)) |
931 | return NF_ACCEPT; | 1288 | return NF_ACCEPT; |
932 | 1289 | ||
933 | ihl = iph->ihl << 2; | ||
934 | |||
935 | /* | 1290 | /* |
936 | * Check if the packet belongs to an existing connection entry | 1291 | * Check if the packet belongs to an existing connection entry |
937 | */ | 1292 | */ |
938 | cp = pp->conn_in_get(skb, pp, iph, ihl, 0); | 1293 | cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0); |
939 | 1294 | ||
940 | if (unlikely(!cp)) { | 1295 | if (unlikely(!cp)) { |
941 | int v; | 1296 | int v; |
942 | 1297 | ||
943 | if (!pp->conn_schedule(skb, pp, &v, &cp)) | 1298 | /* For local client packets, it could be a response */ |
1299 | cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0); | ||
1300 | if (cp) | ||
1301 | return handle_response(af, skb, pp, cp, iph.len); | ||
1302 | |||
1303 | if (!pp->conn_schedule(af, skb, pp, &v, &cp)) | ||
944 | return v; | 1304 | return v; |
945 | } | 1305 | } |
946 | 1306 | ||
@@ -984,7 +1344,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
984 | * encorage the standby servers to update the connections timeout | 1344 | * encorage the standby servers to update the connections timeout |
985 | */ | 1345 | */ |
986 | atomic_inc(&cp->in_pkts); | 1346 | atomic_inc(&cp->in_pkts); |
987 | if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && | 1347 | if (af == AF_INET && |
1348 | (ip_vs_sync_state & IP_VS_STATE_MASTER) && | ||
988 | (((cp->protocol != IPPROTO_TCP || | 1349 | (((cp->protocol != IPPROTO_TCP || |
989 | cp->state == IP_VS_TCP_S_ESTABLISHED) && | 1350 | cp->state == IP_VS_TCP_S_ESTABLISHED) && |
990 | (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] | 1351 | (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] |
@@ -1023,6 +1384,21 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, | |||
1023 | return ip_vs_in_icmp(skb, &r, hooknum); | 1384 | return ip_vs_in_icmp(skb, &r, hooknum); |
1024 | } | 1385 | } |
1025 | 1386 | ||
1387 | #ifdef CONFIG_IP_VS_IPV6 | ||
1388 | static unsigned int | ||
1389 | ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | ||
1390 | const struct net_device *in, const struct net_device *out, | ||
1391 | int (*okfn)(struct sk_buff *)) | ||
1392 | { | ||
1393 | int r; | ||
1394 | |||
1395 | if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) | ||
1396 | return NF_ACCEPT; | ||
1397 | |||
1398 | return ip_vs_in_icmp_v6(skb, &r, hooknum); | ||
1399 | } | ||
1400 | #endif | ||
1401 | |||
1026 | 1402 | ||
1027 | static struct nf_hook_ops ip_vs_ops[] __read_mostly = { | 1403 | static struct nf_hook_ops ip_vs_ops[] __read_mostly = { |
1028 | /* After packet filtering, forward packet through VS/DR, VS/TUN, | 1404 | /* After packet filtering, forward packet through VS/DR, VS/TUN, |
@@ -1060,6 +1436,43 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = { | |||
1060 | .hooknum = NF_INET_POST_ROUTING, | 1436 | .hooknum = NF_INET_POST_ROUTING, |
1061 | .priority = NF_IP_PRI_NAT_SRC-1, | 1437 | .priority = NF_IP_PRI_NAT_SRC-1, |
1062 | }, | 1438 | }, |
1439 | #ifdef CONFIG_IP_VS_IPV6 | ||
1440 | /* After packet filtering, forward packet through VS/DR, VS/TUN, | ||
1441 | * or VS/NAT(change destination), so that filtering rules can be | ||
1442 | * applied to IPVS. */ | ||
1443 | { | ||
1444 | .hook = ip_vs_in, | ||
1445 | .owner = THIS_MODULE, | ||
1446 | .pf = PF_INET6, | ||
1447 | .hooknum = NF_INET_LOCAL_IN, | ||
1448 | .priority = 100, | ||
1449 | }, | ||
1450 | /* After packet filtering, change source only for VS/NAT */ | ||
1451 | { | ||
1452 | .hook = ip_vs_out, | ||
1453 | .owner = THIS_MODULE, | ||
1454 | .pf = PF_INET6, | ||
1455 | .hooknum = NF_INET_FORWARD, | ||
1456 | .priority = 100, | ||
1457 | }, | ||
1458 | /* After packet filtering (but before ip_vs_out_icmp), catch icmp | ||
1459 | * destined for 0.0.0.0/0, which is for incoming IPVS connections */ | ||
1460 | { | ||
1461 | .hook = ip_vs_forward_icmp_v6, | ||
1462 | .owner = THIS_MODULE, | ||
1463 | .pf = PF_INET6, | ||
1464 | .hooknum = NF_INET_FORWARD, | ||
1465 | .priority = 99, | ||
1466 | }, | ||
1467 | /* Before the netfilter connection tracking, exit from POST_ROUTING */ | ||
1468 | { | ||
1469 | .hook = ip_vs_post_routing, | ||
1470 | .owner = THIS_MODULE, | ||
1471 | .pf = PF_INET6, | ||
1472 | .hooknum = NF_INET_POST_ROUTING, | ||
1473 | .priority = NF_IP6_PRI_NAT_SRC-1, | ||
1474 | }, | ||
1475 | #endif | ||
1063 | }; | 1476 | }; |
1064 | 1477 | ||
1065 | 1478 | ||
@@ -1070,10 +1483,12 @@ static int __init ip_vs_init(void) | |||
1070 | { | 1483 | { |
1071 | int ret; | 1484 | int ret; |
1072 | 1485 | ||
1486 | ip_vs_estimator_init(); | ||
1487 | |||
1073 | ret = ip_vs_control_init(); | 1488 | ret = ip_vs_control_init(); |
1074 | if (ret < 0) { | 1489 | if (ret < 0) { |
1075 | IP_VS_ERR("can't setup control.\n"); | 1490 | IP_VS_ERR("can't setup control.\n"); |
1076 | goto cleanup_nothing; | 1491 | goto cleanup_estimator; |
1077 | } | 1492 | } |
1078 | 1493 | ||
1079 | ip_vs_protocol_init(); | 1494 | ip_vs_protocol_init(); |
@@ -1106,7 +1521,8 @@ static int __init ip_vs_init(void) | |||
1106 | cleanup_protocol: | 1521 | cleanup_protocol: |
1107 | ip_vs_protocol_cleanup(); | 1522 | ip_vs_protocol_cleanup(); |
1108 | ip_vs_control_cleanup(); | 1523 | ip_vs_control_cleanup(); |
1109 | cleanup_nothing: | 1524 | cleanup_estimator: |
1525 | ip_vs_estimator_cleanup(); | ||
1110 | return ret; | 1526 | return ret; |
1111 | } | 1527 | } |
1112 | 1528 | ||
@@ -1117,6 +1533,7 @@ static void __exit ip_vs_cleanup(void) | |||
1117 | ip_vs_app_cleanup(); | 1533 | ip_vs_app_cleanup(); |
1118 | ip_vs_protocol_cleanup(); | 1534 | ip_vs_protocol_cleanup(); |
1119 | ip_vs_control_cleanup(); | 1535 | ip_vs_control_cleanup(); |
1536 | ip_vs_estimator_cleanup(); | ||
1120 | IP_VS_INFO("ipvs unloaded.\n"); | 1537 | IP_VS_INFO("ipvs unloaded.\n"); |
1121 | } | 1538 | } |
1122 | 1539 | ||
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 6379705a8dcb..771551d8fba9 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
@@ -35,8 +35,13 @@ | |||
35 | 35 | ||
36 | #include <net/net_namespace.h> | 36 | #include <net/net_namespace.h> |
37 | #include <net/ip.h> | 37 | #include <net/ip.h> |
38 | #ifdef CONFIG_IP_VS_IPV6 | ||
39 | #include <net/ipv6.h> | ||
40 | #include <net/ip6_route.h> | ||
41 | #endif | ||
38 | #include <net/route.h> | 42 | #include <net/route.h> |
39 | #include <net/sock.h> | 43 | #include <net/sock.h> |
44 | #include <net/genetlink.h> | ||
40 | 45 | ||
41 | #include <asm/uaccess.h> | 46 | #include <asm/uaccess.h> |
42 | 47 | ||
@@ -90,6 +95,26 @@ int ip_vs_get_debug_level(void) | |||
90 | } | 95 | } |
91 | #endif | 96 | #endif |
92 | 97 | ||
98 | #ifdef CONFIG_IP_VS_IPV6 | ||
99 | /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */ | ||
100 | static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr) | ||
101 | { | ||
102 | struct rt6_info *rt; | ||
103 | struct flowi fl = { | ||
104 | .oif = 0, | ||
105 | .nl_u = { | ||
106 | .ip6_u = { | ||
107 | .daddr = *addr, | ||
108 | .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } }, | ||
109 | }; | ||
110 | |||
111 | rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); | ||
112 | if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK)) | ||
113 | return 1; | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | #endif | ||
93 | /* | 118 | /* |
94 | * update_defense_level is called from keventd and from sysctl, | 119 | * update_defense_level is called from keventd and from sysctl, |
95 | * so it needs to protect itself from softirqs | 120 | * so it needs to protect itself from softirqs |
@@ -281,11 +306,19 @@ static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0); | |||
281 | * Returns hash value for virtual service | 306 | * Returns hash value for virtual service |
282 | */ | 307 | */ |
283 | static __inline__ unsigned | 308 | static __inline__ unsigned |
284 | ip_vs_svc_hashkey(unsigned proto, __be32 addr, __be16 port) | 309 | ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr, |
310 | __be16 port) | ||
285 | { | 311 | { |
286 | register unsigned porth = ntohs(port); | 312 | register unsigned porth = ntohs(port); |
313 | __be32 addr_fold = addr->ip; | ||
314 | |||
315 | #ifdef CONFIG_IP_VS_IPV6 | ||
316 | if (af == AF_INET6) | ||
317 | addr_fold = addr->ip6[0]^addr->ip6[1]^ | ||
318 | addr->ip6[2]^addr->ip6[3]; | ||
319 | #endif | ||
287 | 320 | ||
288 | return (proto^ntohl(addr)^(porth>>IP_VS_SVC_TAB_BITS)^porth) | 321 | return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth) |
289 | & IP_VS_SVC_TAB_MASK; | 322 | & IP_VS_SVC_TAB_MASK; |
290 | } | 323 | } |
291 | 324 | ||
@@ -316,7 +349,8 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc) | |||
316 | /* | 349 | /* |
317 | * Hash it by <protocol,addr,port> in ip_vs_svc_table | 350 | * Hash it by <protocol,addr,port> in ip_vs_svc_table |
318 | */ | 351 | */ |
319 | hash = ip_vs_svc_hashkey(svc->protocol, svc->addr, svc->port); | 352 | hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr, |
353 | svc->port); | ||
320 | list_add(&svc->s_list, &ip_vs_svc_table[hash]); | 354 | list_add(&svc->s_list, &ip_vs_svc_table[hash]); |
321 | } else { | 355 | } else { |
322 | /* | 356 | /* |
@@ -362,17 +396,19 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc) | |||
362 | /* | 396 | /* |
363 | * Get service by {proto,addr,port} in the service table. | 397 | * Get service by {proto,addr,port} in the service table. |
364 | */ | 398 | */ |
365 | static __inline__ struct ip_vs_service * | 399 | static inline struct ip_vs_service * |
366 | __ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport) | 400 | __ip_vs_service_get(int af, __u16 protocol, const union nf_inet_addr *vaddr, |
401 | __be16 vport) | ||
367 | { | 402 | { |
368 | unsigned hash; | 403 | unsigned hash; |
369 | struct ip_vs_service *svc; | 404 | struct ip_vs_service *svc; |
370 | 405 | ||
371 | /* Check for "full" addressed entries */ | 406 | /* Check for "full" addressed entries */ |
372 | hash = ip_vs_svc_hashkey(protocol, vaddr, vport); | 407 | hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport); |
373 | 408 | ||
374 | list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){ | 409 | list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){ |
375 | if ((svc->addr == vaddr) | 410 | if ((svc->af == af) |
411 | && ip_vs_addr_equal(af, &svc->addr, vaddr) | ||
376 | && (svc->port == vport) | 412 | && (svc->port == vport) |
377 | && (svc->protocol == protocol)) { | 413 | && (svc->protocol == protocol)) { |
378 | /* HIT */ | 414 | /* HIT */ |
@@ -388,7 +424,8 @@ __ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport) | |||
388 | /* | 424 | /* |
389 | * Get service by {fwmark} in the service table. | 425 | * Get service by {fwmark} in the service table. |
390 | */ | 426 | */ |
391 | static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark) | 427 | static inline struct ip_vs_service * |
428 | __ip_vs_svc_fwm_get(int af, __u32 fwmark) | ||
392 | { | 429 | { |
393 | unsigned hash; | 430 | unsigned hash; |
394 | struct ip_vs_service *svc; | 431 | struct ip_vs_service *svc; |
@@ -397,7 +434,7 @@ static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark) | |||
397 | hash = ip_vs_svc_fwm_hashkey(fwmark); | 434 | hash = ip_vs_svc_fwm_hashkey(fwmark); |
398 | 435 | ||
399 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) { | 436 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) { |
400 | if (svc->fwmark == fwmark) { | 437 | if (svc->fwmark == fwmark && svc->af == af) { |
401 | /* HIT */ | 438 | /* HIT */ |
402 | atomic_inc(&svc->usecnt); | 439 | atomic_inc(&svc->usecnt); |
403 | return svc; | 440 | return svc; |
@@ -408,7 +445,8 @@ static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark) | |||
408 | } | 445 | } |
409 | 446 | ||
410 | struct ip_vs_service * | 447 | struct ip_vs_service * |
411 | ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) | 448 | ip_vs_service_get(int af, __u32 fwmark, __u16 protocol, |
449 | const union nf_inet_addr *vaddr, __be16 vport) | ||
412 | { | 450 | { |
413 | struct ip_vs_service *svc; | 451 | struct ip_vs_service *svc; |
414 | 452 | ||
@@ -417,14 +455,14 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) | |||
417 | /* | 455 | /* |
418 | * Check the table hashed by fwmark first | 456 | * Check the table hashed by fwmark first |
419 | */ | 457 | */ |
420 | if (fwmark && (svc = __ip_vs_svc_fwm_get(fwmark))) | 458 | if (fwmark && (svc = __ip_vs_svc_fwm_get(af, fwmark))) |
421 | goto out; | 459 | goto out; |
422 | 460 | ||
423 | /* | 461 | /* |
424 | * Check the table hashed by <protocol,addr,port> | 462 | * Check the table hashed by <protocol,addr,port> |
425 | * for "full" addressed entries | 463 | * for "full" addressed entries |
426 | */ | 464 | */ |
427 | svc = __ip_vs_service_get(protocol, vaddr, vport); | 465 | svc = __ip_vs_service_get(af, protocol, vaddr, vport); |
428 | 466 | ||
429 | if (svc == NULL | 467 | if (svc == NULL |
430 | && protocol == IPPROTO_TCP | 468 | && protocol == IPPROTO_TCP |
@@ -434,7 +472,7 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) | |||
434 | * Check if ftp service entry exists, the packet | 472 | * Check if ftp service entry exists, the packet |
435 | * might belong to FTP data connections. | 473 | * might belong to FTP data connections. |
436 | */ | 474 | */ |
437 | svc = __ip_vs_service_get(protocol, vaddr, FTPPORT); | 475 | svc = __ip_vs_service_get(af, protocol, vaddr, FTPPORT); |
438 | } | 476 | } |
439 | 477 | ||
440 | if (svc == NULL | 478 | if (svc == NULL |
@@ -442,16 +480,16 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) | |||
442 | /* | 480 | /* |
443 | * Check if the catch-all port (port zero) exists | 481 | * Check if the catch-all port (port zero) exists |
444 | */ | 482 | */ |
445 | svc = __ip_vs_service_get(protocol, vaddr, 0); | 483 | svc = __ip_vs_service_get(af, protocol, vaddr, 0); |
446 | } | 484 | } |
447 | 485 | ||
448 | out: | 486 | out: |
449 | read_unlock(&__ip_vs_svc_lock); | 487 | read_unlock(&__ip_vs_svc_lock); |
450 | 488 | ||
451 | IP_VS_DBG(9, "lookup service: fwm %u %s %u.%u.%u.%u:%u %s\n", | 489 | IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n", |
452 | fwmark, ip_vs_proto_name(protocol), | 490 | fwmark, ip_vs_proto_name(protocol), |
453 | NIPQUAD(vaddr), ntohs(vport), | 491 | IP_VS_DBG_ADDR(af, vaddr), ntohs(vport), |
454 | svc?"hit":"not hit"); | 492 | svc ? "hit" : "not hit"); |
455 | 493 | ||
456 | return svc; | 494 | return svc; |
457 | } | 495 | } |
@@ -478,11 +516,20 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest) | |||
478 | /* | 516 | /* |
479 | * Returns hash value for real service | 517 | * Returns hash value for real service |
480 | */ | 518 | */ |
481 | static __inline__ unsigned ip_vs_rs_hashkey(__be32 addr, __be16 port) | 519 | static inline unsigned ip_vs_rs_hashkey(int af, |
520 | const union nf_inet_addr *addr, | ||
521 | __be16 port) | ||
482 | { | 522 | { |
483 | register unsigned porth = ntohs(port); | 523 | register unsigned porth = ntohs(port); |
524 | __be32 addr_fold = addr->ip; | ||
525 | |||
526 | #ifdef CONFIG_IP_VS_IPV6 | ||
527 | if (af == AF_INET6) | ||
528 | addr_fold = addr->ip6[0]^addr->ip6[1]^ | ||
529 | addr->ip6[2]^addr->ip6[3]; | ||
530 | #endif | ||
484 | 531 | ||
485 | return (ntohl(addr)^(porth>>IP_VS_RTAB_BITS)^porth) | 532 | return (ntohl(addr_fold)^(porth>>IP_VS_RTAB_BITS)^porth) |
486 | & IP_VS_RTAB_MASK; | 533 | & IP_VS_RTAB_MASK; |
487 | } | 534 | } |
488 | 535 | ||
@@ -502,7 +549,8 @@ static int ip_vs_rs_hash(struct ip_vs_dest *dest) | |||
502 | * Hash by proto,addr,port, | 549 | * Hash by proto,addr,port, |
503 | * which are the parameters of the real service. | 550 | * which are the parameters of the real service. |
504 | */ | 551 | */ |
505 | hash = ip_vs_rs_hashkey(dest->addr, dest->port); | 552 | hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port); |
553 | |||
506 | list_add(&dest->d_list, &ip_vs_rtable[hash]); | 554 | list_add(&dest->d_list, &ip_vs_rtable[hash]); |
507 | 555 | ||
508 | return 1; | 556 | return 1; |
@@ -529,7 +577,9 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest) | |||
529 | * Lookup real service by <proto,addr,port> in the real service table. | 577 | * Lookup real service by <proto,addr,port> in the real service table. |
530 | */ | 578 | */ |
531 | struct ip_vs_dest * | 579 | struct ip_vs_dest * |
532 | ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport) | 580 | ip_vs_lookup_real_service(int af, __u16 protocol, |
581 | const union nf_inet_addr *daddr, | ||
582 | __be16 dport) | ||
533 | { | 583 | { |
534 | unsigned hash; | 584 | unsigned hash; |
535 | struct ip_vs_dest *dest; | 585 | struct ip_vs_dest *dest; |
@@ -538,11 +588,12 @@ ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport) | |||
538 | * Check for "full" addressed entries | 588 | * Check for "full" addressed entries |
539 | * Return the first found entry | 589 | * Return the first found entry |
540 | */ | 590 | */ |
541 | hash = ip_vs_rs_hashkey(daddr, dport); | 591 | hash = ip_vs_rs_hashkey(af, daddr, dport); |
542 | 592 | ||
543 | read_lock(&__ip_vs_rs_lock); | 593 | read_lock(&__ip_vs_rs_lock); |
544 | list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) { | 594 | list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) { |
545 | if ((dest->addr == daddr) | 595 | if ((dest->af == af) |
596 | && ip_vs_addr_equal(af, &dest->addr, daddr) | ||
546 | && (dest->port == dport) | 597 | && (dest->port == dport) |
547 | && ((dest->protocol == protocol) || | 598 | && ((dest->protocol == protocol) || |
548 | dest->vfwmark)) { | 599 | dest->vfwmark)) { |
@@ -560,7 +611,8 @@ ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport) | |||
560 | * Lookup destination by {addr,port} in the given service | 611 | * Lookup destination by {addr,port} in the given service |
561 | */ | 612 | */ |
562 | static struct ip_vs_dest * | 613 | static struct ip_vs_dest * |
563 | ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | 614 | ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, |
615 | __be16 dport) | ||
564 | { | 616 | { |
565 | struct ip_vs_dest *dest; | 617 | struct ip_vs_dest *dest; |
566 | 618 | ||
@@ -568,7 +620,9 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | |||
568 | * Find the destination for the given service | 620 | * Find the destination for the given service |
569 | */ | 621 | */ |
570 | list_for_each_entry(dest, &svc->destinations, n_list) { | 622 | list_for_each_entry(dest, &svc->destinations, n_list) { |
571 | if ((dest->addr == daddr) && (dest->port == dport)) { | 623 | if ((dest->af == svc->af) |
624 | && ip_vs_addr_equal(svc->af, &dest->addr, daddr) | ||
625 | && (dest->port == dport)) { | ||
572 | /* HIT */ | 626 | /* HIT */ |
573 | return dest; | 627 | return dest; |
574 | } | 628 | } |
@@ -587,13 +641,15 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | |||
587 | * ip_vs_lookup_real_service() looked promissing, but | 641 | * ip_vs_lookup_real_service() looked promissing, but |
588 | * seems not working as expected. | 642 | * seems not working as expected. |
589 | */ | 643 | */ |
590 | struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport, | 644 | struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr, |
591 | __be32 vaddr, __be16 vport, __u16 protocol) | 645 | __be16 dport, |
646 | const union nf_inet_addr *vaddr, | ||
647 | __be16 vport, __u16 protocol) | ||
592 | { | 648 | { |
593 | struct ip_vs_dest *dest; | 649 | struct ip_vs_dest *dest; |
594 | struct ip_vs_service *svc; | 650 | struct ip_vs_service *svc; |
595 | 651 | ||
596 | svc = ip_vs_service_get(0, protocol, vaddr, vport); | 652 | svc = ip_vs_service_get(af, 0, protocol, vaddr, vport); |
597 | if (!svc) | 653 | if (!svc) |
598 | return NULL; | 654 | return NULL; |
599 | dest = ip_vs_lookup_dest(svc, daddr, dport); | 655 | dest = ip_vs_lookup_dest(svc, daddr, dport); |
@@ -614,7 +670,8 @@ struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport, | |||
614 | * scheduling. | 670 | * scheduling. |
615 | */ | 671 | */ |
616 | static struct ip_vs_dest * | 672 | static struct ip_vs_dest * |
617 | ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | 673 | ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, |
674 | __be16 dport) | ||
618 | { | 675 | { |
619 | struct ip_vs_dest *dest, *nxt; | 676 | struct ip_vs_dest *dest, *nxt; |
620 | 677 | ||
@@ -622,17 +679,19 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | |||
622 | * Find the destination in trash | 679 | * Find the destination in trash |
623 | */ | 680 | */ |
624 | list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { | 681 | list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { |
625 | IP_VS_DBG(3, "Destination %u/%u.%u.%u.%u:%u still in trash, " | 682 | IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, " |
626 | "dest->refcnt=%d\n", | 683 | "dest->refcnt=%d\n", |
627 | dest->vfwmark, | 684 | dest->vfwmark, |
628 | NIPQUAD(dest->addr), ntohs(dest->port), | 685 | IP_VS_DBG_ADDR(svc->af, &dest->addr), |
629 | atomic_read(&dest->refcnt)); | 686 | ntohs(dest->port), |
630 | if (dest->addr == daddr && | 687 | atomic_read(&dest->refcnt)); |
688 | if (dest->af == svc->af && | ||
689 | ip_vs_addr_equal(svc->af, &dest->addr, daddr) && | ||
631 | dest->port == dport && | 690 | dest->port == dport && |
632 | dest->vfwmark == svc->fwmark && | 691 | dest->vfwmark == svc->fwmark && |
633 | dest->protocol == svc->protocol && | 692 | dest->protocol == svc->protocol && |
634 | (svc->fwmark || | 693 | (svc->fwmark || |
635 | (dest->vaddr == svc->addr && | 694 | (ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) && |
636 | dest->vport == svc->port))) { | 695 | dest->vport == svc->port))) { |
637 | /* HIT */ | 696 | /* HIT */ |
638 | return dest; | 697 | return dest; |
@@ -642,10 +701,11 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) | |||
642 | * Try to purge the destination from trash if not referenced | 701 | * Try to purge the destination from trash if not referenced |
643 | */ | 702 | */ |
644 | if (atomic_read(&dest->refcnt) == 1) { | 703 | if (atomic_read(&dest->refcnt) == 1) { |
645 | IP_VS_DBG(3, "Removing destination %u/%u.%u.%u.%u:%u " | 704 | IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u " |
646 | "from trash\n", | 705 | "from trash\n", |
647 | dest->vfwmark, | 706 | dest->vfwmark, |
648 | NIPQUAD(dest->addr), ntohs(dest->port)); | 707 | IP_VS_DBG_ADDR(svc->af, &dest->addr), |
708 | ntohs(dest->port)); | ||
649 | list_del(&dest->n_list); | 709 | list_del(&dest->n_list); |
650 | ip_vs_dst_reset(dest); | 710 | ip_vs_dst_reset(dest); |
651 | __ip_vs_unbind_svc(dest); | 711 | __ip_vs_unbind_svc(dest); |
@@ -684,18 +744,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats) | |||
684 | { | 744 | { |
685 | spin_lock_bh(&stats->lock); | 745 | spin_lock_bh(&stats->lock); |
686 | 746 | ||
687 | stats->conns = 0; | 747 | memset(&stats->ustats, 0, sizeof(stats->ustats)); |
688 | stats->inpkts = 0; | ||
689 | stats->outpkts = 0; | ||
690 | stats->inbytes = 0; | ||
691 | stats->outbytes = 0; | ||
692 | |||
693 | stats->cps = 0; | ||
694 | stats->inpps = 0; | ||
695 | stats->outpps = 0; | ||
696 | stats->inbps = 0; | ||
697 | stats->outbps = 0; | ||
698 | |||
699 | ip_vs_zero_estimator(stats); | 748 | ip_vs_zero_estimator(stats); |
700 | 749 | ||
701 | spin_unlock_bh(&stats->lock); | 750 | spin_unlock_bh(&stats->lock); |
@@ -706,7 +755,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats) | |||
706 | */ | 755 | */ |
707 | static void | 756 | static void |
708 | __ip_vs_update_dest(struct ip_vs_service *svc, | 757 | __ip_vs_update_dest(struct ip_vs_service *svc, |
709 | struct ip_vs_dest *dest, struct ip_vs_dest_user *udest) | 758 | struct ip_vs_dest *dest, struct ip_vs_dest_user_kern *udest) |
710 | { | 759 | { |
711 | int conn_flags; | 760 | int conn_flags; |
712 | 761 | ||
@@ -715,10 +764,18 @@ __ip_vs_update_dest(struct ip_vs_service *svc, | |||
715 | conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE; | 764 | conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE; |
716 | 765 | ||
717 | /* check if local node and update the flags */ | 766 | /* check if local node and update the flags */ |
718 | if (inet_addr_type(&init_net, udest->addr) == RTN_LOCAL) { | 767 | #ifdef CONFIG_IP_VS_IPV6 |
719 | conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) | 768 | if (svc->af == AF_INET6) { |
720 | | IP_VS_CONN_F_LOCALNODE; | 769 | if (__ip_vs_addr_is_local_v6(&udest->addr.in6)) { |
721 | } | 770 | conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) |
771 | | IP_VS_CONN_F_LOCALNODE; | ||
772 | } | ||
773 | } else | ||
774 | #endif | ||
775 | if (inet_addr_type(&init_net, udest->addr.ip) == RTN_LOCAL) { | ||
776 | conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) | ||
777 | | IP_VS_CONN_F_LOCALNODE; | ||
778 | } | ||
722 | 779 | ||
723 | /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */ | 780 | /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */ |
724 | if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) { | 781 | if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) { |
@@ -759,7 +816,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, | |||
759 | * Create a destination for the given service | 816 | * Create a destination for the given service |
760 | */ | 817 | */ |
761 | static int | 818 | static int |
762 | ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, | 819 | ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, |
763 | struct ip_vs_dest **dest_p) | 820 | struct ip_vs_dest **dest_p) |
764 | { | 821 | { |
765 | struct ip_vs_dest *dest; | 822 | struct ip_vs_dest *dest; |
@@ -767,9 +824,20 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, | |||
767 | 824 | ||
768 | EnterFunction(2); | 825 | EnterFunction(2); |
769 | 826 | ||
770 | atype = inet_addr_type(&init_net, udest->addr); | 827 | #ifdef CONFIG_IP_VS_IPV6 |
771 | if (atype != RTN_LOCAL && atype != RTN_UNICAST) | 828 | if (svc->af == AF_INET6) { |
772 | return -EINVAL; | 829 | atype = ipv6_addr_type(&udest->addr.in6); |
830 | if ((!(atype & IPV6_ADDR_UNICAST) || | ||
831 | atype & IPV6_ADDR_LINKLOCAL) && | ||
832 | !__ip_vs_addr_is_local_v6(&udest->addr.in6)) | ||
833 | return -EINVAL; | ||
834 | } else | ||
835 | #endif | ||
836 | { | ||
837 | atype = inet_addr_type(&init_net, udest->addr.ip); | ||
838 | if (atype != RTN_LOCAL && atype != RTN_UNICAST) | ||
839 | return -EINVAL; | ||
840 | } | ||
773 | 841 | ||
774 | dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); | 842 | dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); |
775 | if (dest == NULL) { | 843 | if (dest == NULL) { |
@@ -777,11 +845,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, | |||
777 | return -ENOMEM; | 845 | return -ENOMEM; |
778 | } | 846 | } |
779 | 847 | ||
848 | dest->af = svc->af; | ||
780 | dest->protocol = svc->protocol; | 849 | dest->protocol = svc->protocol; |
781 | dest->vaddr = svc->addr; | 850 | dest->vaddr = svc->addr; |
782 | dest->vport = svc->port; | 851 | dest->vport = svc->port; |
783 | dest->vfwmark = svc->fwmark; | 852 | dest->vfwmark = svc->fwmark; |
784 | dest->addr = udest->addr; | 853 | ip_vs_addr_copy(svc->af, &dest->addr, &udest->addr); |
785 | dest->port = udest->port; | 854 | dest->port = udest->port; |
786 | 855 | ||
787 | atomic_set(&dest->activeconns, 0); | 856 | atomic_set(&dest->activeconns, 0); |
@@ -806,10 +875,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, | |||
806 | * Add a destination into an existing service | 875 | * Add a destination into an existing service |
807 | */ | 876 | */ |
808 | static int | 877 | static int |
809 | ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | 878 | ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) |
810 | { | 879 | { |
811 | struct ip_vs_dest *dest; | 880 | struct ip_vs_dest *dest; |
812 | __be32 daddr = udest->addr; | 881 | union nf_inet_addr daddr; |
813 | __be16 dport = udest->port; | 882 | __be16 dport = udest->port; |
814 | int ret; | 883 | int ret; |
815 | 884 | ||
@@ -826,10 +895,13 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
826 | return -ERANGE; | 895 | return -ERANGE; |
827 | } | 896 | } |
828 | 897 | ||
898 | ip_vs_addr_copy(svc->af, &daddr, &udest->addr); | ||
899 | |||
829 | /* | 900 | /* |
830 | * Check if the dest already exists in the list | 901 | * Check if the dest already exists in the list |
831 | */ | 902 | */ |
832 | dest = ip_vs_lookup_dest(svc, daddr, dport); | 903 | dest = ip_vs_lookup_dest(svc, &daddr, dport); |
904 | |||
833 | if (dest != NULL) { | 905 | if (dest != NULL) { |
834 | IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n"); | 906 | IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n"); |
835 | return -EEXIST; | 907 | return -EEXIST; |
@@ -839,15 +911,17 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
839 | * Check if the dest already exists in the trash and | 911 | * Check if the dest already exists in the trash and |
840 | * is from the same service | 912 | * is from the same service |
841 | */ | 913 | */ |
842 | dest = ip_vs_trash_get_dest(svc, daddr, dport); | 914 | dest = ip_vs_trash_get_dest(svc, &daddr, dport); |
915 | |||
843 | if (dest != NULL) { | 916 | if (dest != NULL) { |
844 | IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, " | 917 | IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, " |
845 | "dest->refcnt=%d, service %u/%u.%u.%u.%u:%u\n", | 918 | "dest->refcnt=%d, service %u/%s:%u\n", |
846 | NIPQUAD(daddr), ntohs(dport), | 919 | IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport), |
847 | atomic_read(&dest->refcnt), | 920 | atomic_read(&dest->refcnt), |
848 | dest->vfwmark, | 921 | dest->vfwmark, |
849 | NIPQUAD(dest->vaddr), | 922 | IP_VS_DBG_ADDR(svc->af, &dest->vaddr), |
850 | ntohs(dest->vport)); | 923 | ntohs(dest->vport)); |
924 | |||
851 | __ip_vs_update_dest(svc, dest, udest); | 925 | __ip_vs_update_dest(svc, dest, udest); |
852 | 926 | ||
853 | /* | 927 | /* |
@@ -868,7 +942,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
868 | svc->num_dests++; | 942 | svc->num_dests++; |
869 | 943 | ||
870 | /* call the update_service function of its scheduler */ | 944 | /* call the update_service function of its scheduler */ |
871 | svc->scheduler->update_service(svc); | 945 | if (svc->scheduler->update_service) |
946 | svc->scheduler->update_service(svc); | ||
872 | 947 | ||
873 | write_unlock_bh(&__ip_vs_svc_lock); | 948 | write_unlock_bh(&__ip_vs_svc_lock); |
874 | return 0; | 949 | return 0; |
@@ -898,7 +973,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
898 | svc->num_dests++; | 973 | svc->num_dests++; |
899 | 974 | ||
900 | /* call the update_service function of its scheduler */ | 975 | /* call the update_service function of its scheduler */ |
901 | svc->scheduler->update_service(svc); | 976 | if (svc->scheduler->update_service) |
977 | svc->scheduler->update_service(svc); | ||
902 | 978 | ||
903 | write_unlock_bh(&__ip_vs_svc_lock); | 979 | write_unlock_bh(&__ip_vs_svc_lock); |
904 | 980 | ||
@@ -912,10 +988,10 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
912 | * Edit a destination in the given service | 988 | * Edit a destination in the given service |
913 | */ | 989 | */ |
914 | static int | 990 | static int |
915 | ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | 991 | ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) |
916 | { | 992 | { |
917 | struct ip_vs_dest *dest; | 993 | struct ip_vs_dest *dest; |
918 | __be32 daddr = udest->addr; | 994 | union nf_inet_addr daddr; |
919 | __be16 dport = udest->port; | 995 | __be16 dport = udest->port; |
920 | 996 | ||
921 | EnterFunction(2); | 997 | EnterFunction(2); |
@@ -931,10 +1007,13 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
931 | return -ERANGE; | 1007 | return -ERANGE; |
932 | } | 1008 | } |
933 | 1009 | ||
1010 | ip_vs_addr_copy(svc->af, &daddr, &udest->addr); | ||
1011 | |||
934 | /* | 1012 | /* |
935 | * Lookup the destination list | 1013 | * Lookup the destination list |
936 | */ | 1014 | */ |
937 | dest = ip_vs_lookup_dest(svc, daddr, dport); | 1015 | dest = ip_vs_lookup_dest(svc, &daddr, dport); |
1016 | |||
938 | if (dest == NULL) { | 1017 | if (dest == NULL) { |
939 | IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n"); | 1018 | IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n"); |
940 | return -ENOENT; | 1019 | return -ENOENT; |
@@ -948,7 +1027,8 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) | |||
948 | IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); | 1027 | IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); |
949 | 1028 | ||
950 | /* call the update_service, because server weight may be changed */ | 1029 | /* call the update_service, because server weight may be changed */ |
951 | svc->scheduler->update_service(svc); | 1030 | if (svc->scheduler->update_service) |
1031 | svc->scheduler->update_service(svc); | ||
952 | 1032 | ||
953 | write_unlock_bh(&__ip_vs_svc_lock); | 1033 | write_unlock_bh(&__ip_vs_svc_lock); |
954 | 1034 | ||
@@ -987,10 +1067,11 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest) | |||
987 | atomic_dec(&dest->svc->refcnt); | 1067 | atomic_dec(&dest->svc->refcnt); |
988 | kfree(dest); | 1068 | kfree(dest); |
989 | } else { | 1069 | } else { |
990 | IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, " | 1070 | IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, " |
991 | "dest->refcnt=%d\n", | 1071 | "dest->refcnt=%d\n", |
992 | NIPQUAD(dest->addr), ntohs(dest->port), | 1072 | IP_VS_DBG_ADDR(dest->af, &dest->addr), |
993 | atomic_read(&dest->refcnt)); | 1073 | ntohs(dest->port), |
1074 | atomic_read(&dest->refcnt)); | ||
994 | list_add(&dest->n_list, &ip_vs_dest_trash); | 1075 | list_add(&dest->n_list, &ip_vs_dest_trash); |
995 | atomic_inc(&dest->refcnt); | 1076 | atomic_inc(&dest->refcnt); |
996 | } | 1077 | } |
@@ -1011,12 +1092,12 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc, | |||
1011 | */ | 1092 | */ |
1012 | list_del(&dest->n_list); | 1093 | list_del(&dest->n_list); |
1013 | svc->num_dests--; | 1094 | svc->num_dests--; |
1014 | if (svcupd) { | 1095 | |
1015 | /* | 1096 | /* |
1016 | * Call the update_service function of its scheduler | 1097 | * Call the update_service function of its scheduler |
1017 | */ | 1098 | */ |
1018 | svc->scheduler->update_service(svc); | 1099 | if (svcupd && svc->scheduler->update_service) |
1019 | } | 1100 | svc->scheduler->update_service(svc); |
1020 | } | 1101 | } |
1021 | 1102 | ||
1022 | 1103 | ||
@@ -1024,15 +1105,15 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc, | |||
1024 | * Delete a destination server in the given service | 1105 | * Delete a destination server in the given service |
1025 | */ | 1106 | */ |
1026 | static int | 1107 | static int |
1027 | ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest) | 1108 | ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) |
1028 | { | 1109 | { |
1029 | struct ip_vs_dest *dest; | 1110 | struct ip_vs_dest *dest; |
1030 | __be32 daddr = udest->addr; | ||
1031 | __be16 dport = udest->port; | 1111 | __be16 dport = udest->port; |
1032 | 1112 | ||
1033 | EnterFunction(2); | 1113 | EnterFunction(2); |
1034 | 1114 | ||
1035 | dest = ip_vs_lookup_dest(svc, daddr, dport); | 1115 | dest = ip_vs_lookup_dest(svc, &udest->addr, dport); |
1116 | |||
1036 | if (dest == NULL) { | 1117 | if (dest == NULL) { |
1037 | IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n"); | 1118 | IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n"); |
1038 | return -ENOENT; | 1119 | return -ENOENT; |
@@ -1067,7 +1148,8 @@ ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest) | |||
1067 | * Add a service into the service hash table | 1148 | * Add a service into the service hash table |
1068 | */ | 1149 | */ |
1069 | static int | 1150 | static int |
1070 | ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) | 1151 | ip_vs_add_service(struct ip_vs_service_user_kern *u, |
1152 | struct ip_vs_service **svc_p) | ||
1071 | { | 1153 | { |
1072 | int ret = 0; | 1154 | int ret = 0; |
1073 | struct ip_vs_scheduler *sched = NULL; | 1155 | struct ip_vs_scheduler *sched = NULL; |
@@ -1085,6 +1167,19 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) | |||
1085 | goto out_mod_dec; | 1167 | goto out_mod_dec; |
1086 | } | 1168 | } |
1087 | 1169 | ||
1170 | #ifdef CONFIG_IP_VS_IPV6 | ||
1171 | if (u->af == AF_INET6) { | ||
1172 | if (!sched->supports_ipv6) { | ||
1173 | ret = -EAFNOSUPPORT; | ||
1174 | goto out_err; | ||
1175 | } | ||
1176 | if ((u->netmask < 1) || (u->netmask > 128)) { | ||
1177 | ret = -EINVAL; | ||
1178 | goto out_err; | ||
1179 | } | ||
1180 | } | ||
1181 | #endif | ||
1182 | |||
1088 | svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); | 1183 | svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); |
1089 | if (svc == NULL) { | 1184 | if (svc == NULL) { |
1090 | IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); | 1185 | IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); |
@@ -1096,8 +1191,9 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) | |||
1096 | atomic_set(&svc->usecnt, 1); | 1191 | atomic_set(&svc->usecnt, 1); |
1097 | atomic_set(&svc->refcnt, 0); | 1192 | atomic_set(&svc->refcnt, 0); |
1098 | 1193 | ||
1194 | svc->af = u->af; | ||
1099 | svc->protocol = u->protocol; | 1195 | svc->protocol = u->protocol; |
1100 | svc->addr = u->addr; | 1196 | ip_vs_addr_copy(svc->af, &svc->addr, &u->addr); |
1101 | svc->port = u->port; | 1197 | svc->port = u->port; |
1102 | svc->fwmark = u->fwmark; | 1198 | svc->fwmark = u->fwmark; |
1103 | svc->flags = u->flags; | 1199 | svc->flags = u->flags; |
@@ -1121,7 +1217,10 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) | |||
1121 | atomic_inc(&ip_vs_nullsvc_counter); | 1217 | atomic_inc(&ip_vs_nullsvc_counter); |
1122 | 1218 | ||
1123 | ip_vs_new_estimator(&svc->stats); | 1219 | ip_vs_new_estimator(&svc->stats); |
1124 | ip_vs_num_services++; | 1220 | |
1221 | /* Count only IPv4 services for old get/setsockopt interface */ | ||
1222 | if (svc->af == AF_INET) | ||
1223 | ip_vs_num_services++; | ||
1125 | 1224 | ||
1126 | /* Hash the service into the service table */ | 1225 | /* Hash the service into the service table */ |
1127 | write_lock_bh(&__ip_vs_svc_lock); | 1226 | write_lock_bh(&__ip_vs_svc_lock); |
@@ -1156,7 +1255,7 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) | |||
1156 | * Edit a service and bind it with a new scheduler | 1255 | * Edit a service and bind it with a new scheduler |
1157 | */ | 1256 | */ |
1158 | static int | 1257 | static int |
1159 | ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u) | 1258 | ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) |
1160 | { | 1259 | { |
1161 | struct ip_vs_scheduler *sched, *old_sched; | 1260 | struct ip_vs_scheduler *sched, *old_sched; |
1162 | int ret = 0; | 1261 | int ret = 0; |
@@ -1172,6 +1271,19 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u) | |||
1172 | } | 1271 | } |
1173 | old_sched = sched; | 1272 | old_sched = sched; |
1174 | 1273 | ||
1274 | #ifdef CONFIG_IP_VS_IPV6 | ||
1275 | if (u->af == AF_INET6) { | ||
1276 | if (!sched->supports_ipv6) { | ||
1277 | ret = -EAFNOSUPPORT; | ||
1278 | goto out; | ||
1279 | } | ||
1280 | if ((u->netmask < 1) || (u->netmask > 128)) { | ||
1281 | ret = -EINVAL; | ||
1282 | goto out; | ||
1283 | } | ||
1284 | } | ||
1285 | #endif | ||
1286 | |||
1175 | write_lock_bh(&__ip_vs_svc_lock); | 1287 | write_lock_bh(&__ip_vs_svc_lock); |
1176 | 1288 | ||
1177 | /* | 1289 | /* |
@@ -1193,7 +1305,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u) | |||
1193 | */ | 1305 | */ |
1194 | if ((ret = ip_vs_unbind_scheduler(svc))) { | 1306 | if ((ret = ip_vs_unbind_scheduler(svc))) { |
1195 | old_sched = sched; | 1307 | old_sched = sched; |
1196 | goto out; | 1308 | goto out_unlock; |
1197 | } | 1309 | } |
1198 | 1310 | ||
1199 | /* | 1311 | /* |
@@ -1212,12 +1324,13 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u) | |||
1212 | */ | 1324 | */ |
1213 | ip_vs_bind_scheduler(svc, old_sched); | 1325 | ip_vs_bind_scheduler(svc, old_sched); |
1214 | old_sched = sched; | 1326 | old_sched = sched; |
1215 | goto out; | 1327 | goto out_unlock; |
1216 | } | 1328 | } |
1217 | } | 1329 | } |
1218 | 1330 | ||
1219 | out: | 1331 | out_unlock: |
1220 | write_unlock_bh(&__ip_vs_svc_lock); | 1332 | write_unlock_bh(&__ip_vs_svc_lock); |
1333 | out: | ||
1221 | 1334 | ||
1222 | if (old_sched) | 1335 | if (old_sched) |
1223 | ip_vs_scheduler_put(old_sched); | 1336 | ip_vs_scheduler_put(old_sched); |
@@ -1236,7 +1349,10 @@ static void __ip_vs_del_service(struct ip_vs_service *svc) | |||
1236 | struct ip_vs_dest *dest, *nxt; | 1349 | struct ip_vs_dest *dest, *nxt; |
1237 | struct ip_vs_scheduler *old_sched; | 1350 | struct ip_vs_scheduler *old_sched; |
1238 | 1351 | ||
1239 | ip_vs_num_services--; | 1352 | /* Count only IPv4 services for old get/setsockopt interface */ |
1353 | if (svc->af == AF_INET) | ||
1354 | ip_vs_num_services--; | ||
1355 | |||
1240 | ip_vs_kill_estimator(&svc->stats); | 1356 | ip_vs_kill_estimator(&svc->stats); |
1241 | 1357 | ||
1242 | /* Unbind scheduler */ | 1358 | /* Unbind scheduler */ |
@@ -1671,6 +1787,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos) | |||
1671 | } | 1787 | } |
1672 | 1788 | ||
1673 | static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos) | 1789 | static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos) |
1790 | __acquires(__ip_vs_svc_lock) | ||
1674 | { | 1791 | { |
1675 | 1792 | ||
1676 | read_lock_bh(&__ip_vs_svc_lock); | 1793 | read_lock_bh(&__ip_vs_svc_lock); |
@@ -1724,6 +1841,7 @@ static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1724 | } | 1841 | } |
1725 | 1842 | ||
1726 | static void ip_vs_info_seq_stop(struct seq_file *seq, void *v) | 1843 | static void ip_vs_info_seq_stop(struct seq_file *seq, void *v) |
1844 | __releases(__ip_vs_svc_lock) | ||
1727 | { | 1845 | { |
1728 | read_unlock_bh(&__ip_vs_svc_lock); | 1846 | read_unlock_bh(&__ip_vs_svc_lock); |
1729 | } | 1847 | } |
@@ -1744,15 +1862,25 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) | |||
1744 | const struct ip_vs_iter *iter = seq->private; | 1862 | const struct ip_vs_iter *iter = seq->private; |
1745 | const struct ip_vs_dest *dest; | 1863 | const struct ip_vs_dest *dest; |
1746 | 1864 | ||
1747 | if (iter->table == ip_vs_svc_table) | 1865 | if (iter->table == ip_vs_svc_table) { |
1748 | seq_printf(seq, "%s %08X:%04X %s ", | 1866 | #ifdef CONFIG_IP_VS_IPV6 |
1749 | ip_vs_proto_name(svc->protocol), | 1867 | if (svc->af == AF_INET6) |
1750 | ntohl(svc->addr), | 1868 | seq_printf(seq, "%s [" NIP6_FMT "]:%04X %s ", |
1751 | ntohs(svc->port), | 1869 | ip_vs_proto_name(svc->protocol), |
1752 | svc->scheduler->name); | 1870 | NIP6(svc->addr.in6), |
1753 | else | 1871 | ntohs(svc->port), |
1872 | svc->scheduler->name); | ||
1873 | else | ||
1874 | #endif | ||
1875 | seq_printf(seq, "%s %08X:%04X %s ", | ||
1876 | ip_vs_proto_name(svc->protocol), | ||
1877 | ntohl(svc->addr.ip), | ||
1878 | ntohs(svc->port), | ||
1879 | svc->scheduler->name); | ||
1880 | } else { | ||
1754 | seq_printf(seq, "FWM %08X %s ", | 1881 | seq_printf(seq, "FWM %08X %s ", |
1755 | svc->fwmark, svc->scheduler->name); | 1882 | svc->fwmark, svc->scheduler->name); |
1883 | } | ||
1756 | 1884 | ||
1757 | if (svc->flags & IP_VS_SVC_F_PERSISTENT) | 1885 | if (svc->flags & IP_VS_SVC_F_PERSISTENT) |
1758 | seq_printf(seq, "persistent %d %08X\n", | 1886 | seq_printf(seq, "persistent %d %08X\n", |
@@ -1762,13 +1890,29 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) | |||
1762 | seq_putc(seq, '\n'); | 1890 | seq_putc(seq, '\n'); |
1763 | 1891 | ||
1764 | list_for_each_entry(dest, &svc->destinations, n_list) { | 1892 | list_for_each_entry(dest, &svc->destinations, n_list) { |
1765 | seq_printf(seq, | 1893 | #ifdef CONFIG_IP_VS_IPV6 |
1766 | " -> %08X:%04X %-7s %-6d %-10d %-10d\n", | 1894 | if (dest->af == AF_INET6) |
1767 | ntohl(dest->addr), ntohs(dest->port), | 1895 | seq_printf(seq, |
1768 | ip_vs_fwd_name(atomic_read(&dest->conn_flags)), | 1896 | " -> [" NIP6_FMT "]:%04X" |
1769 | atomic_read(&dest->weight), | 1897 | " %-7s %-6d %-10d %-10d\n", |
1770 | atomic_read(&dest->activeconns), | 1898 | NIP6(dest->addr.in6), |
1771 | atomic_read(&dest->inactconns)); | 1899 | ntohs(dest->port), |
1900 | ip_vs_fwd_name(atomic_read(&dest->conn_flags)), | ||
1901 | atomic_read(&dest->weight), | ||
1902 | atomic_read(&dest->activeconns), | ||
1903 | atomic_read(&dest->inactconns)); | ||
1904 | else | ||
1905 | #endif | ||
1906 | seq_printf(seq, | ||
1907 | " -> %08X:%04X " | ||
1908 | "%-7s %-6d %-10d %-10d\n", | ||
1909 | ntohl(dest->addr.ip), | ||
1910 | ntohs(dest->port), | ||
1911 | ip_vs_fwd_name(atomic_read(&dest->conn_flags)), | ||
1912 | atomic_read(&dest->weight), | ||
1913 | atomic_read(&dest->activeconns), | ||
1914 | atomic_read(&dest->inactconns)); | ||
1915 | |||
1772 | } | 1916 | } |
1773 | } | 1917 | } |
1774 | return 0; | 1918 | return 0; |
@@ -1812,20 +1956,20 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v) | |||
1812 | " Conns Packets Packets Bytes Bytes\n"); | 1956 | " Conns Packets Packets Bytes Bytes\n"); |
1813 | 1957 | ||
1814 | spin_lock_bh(&ip_vs_stats.lock); | 1958 | spin_lock_bh(&ip_vs_stats.lock); |
1815 | seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.conns, | 1959 | seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns, |
1816 | ip_vs_stats.inpkts, ip_vs_stats.outpkts, | 1960 | ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts, |
1817 | (unsigned long long) ip_vs_stats.inbytes, | 1961 | (unsigned long long) ip_vs_stats.ustats.inbytes, |
1818 | (unsigned long long) ip_vs_stats.outbytes); | 1962 | (unsigned long long) ip_vs_stats.ustats.outbytes); |
1819 | 1963 | ||
1820 | /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ | 1964 | /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ |
1821 | seq_puts(seq, | 1965 | seq_puts(seq, |
1822 | " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); | 1966 | " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); |
1823 | seq_printf(seq,"%8X %8X %8X %16X %16X\n", | 1967 | seq_printf(seq,"%8X %8X %8X %16X %16X\n", |
1824 | ip_vs_stats.cps, | 1968 | ip_vs_stats.ustats.cps, |
1825 | ip_vs_stats.inpps, | 1969 | ip_vs_stats.ustats.inpps, |
1826 | ip_vs_stats.outpps, | 1970 | ip_vs_stats.ustats.outpps, |
1827 | ip_vs_stats.inbps, | 1971 | ip_vs_stats.ustats.inbps, |
1828 | ip_vs_stats.outbps); | 1972 | ip_vs_stats.ustats.outbps); |
1829 | spin_unlock_bh(&ip_vs_stats.lock); | 1973 | spin_unlock_bh(&ip_vs_stats.lock); |
1830 | 1974 | ||
1831 | return 0; | 1975 | return 0; |
@@ -1900,14 +2044,44 @@ static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = { | |||
1900 | [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN, | 2044 | [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN, |
1901 | }; | 2045 | }; |
1902 | 2046 | ||
2047 | static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc, | ||
2048 | struct ip_vs_service_user *usvc_compat) | ||
2049 | { | ||
2050 | usvc->af = AF_INET; | ||
2051 | usvc->protocol = usvc_compat->protocol; | ||
2052 | usvc->addr.ip = usvc_compat->addr; | ||
2053 | usvc->port = usvc_compat->port; | ||
2054 | usvc->fwmark = usvc_compat->fwmark; | ||
2055 | |||
2056 | /* Deep copy of sched_name is not needed here */ | ||
2057 | usvc->sched_name = usvc_compat->sched_name; | ||
2058 | |||
2059 | usvc->flags = usvc_compat->flags; | ||
2060 | usvc->timeout = usvc_compat->timeout; | ||
2061 | usvc->netmask = usvc_compat->netmask; | ||
2062 | } | ||
2063 | |||
2064 | static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest, | ||
2065 | struct ip_vs_dest_user *udest_compat) | ||
2066 | { | ||
2067 | udest->addr.ip = udest_compat->addr; | ||
2068 | udest->port = udest_compat->port; | ||
2069 | udest->conn_flags = udest_compat->conn_flags; | ||
2070 | udest->weight = udest_compat->weight; | ||
2071 | udest->u_threshold = udest_compat->u_threshold; | ||
2072 | udest->l_threshold = udest_compat->l_threshold; | ||
2073 | } | ||
2074 | |||
1903 | static int | 2075 | static int |
1904 | do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | 2076 | do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) |
1905 | { | 2077 | { |
1906 | int ret; | 2078 | int ret; |
1907 | unsigned char arg[MAX_ARG_LEN]; | 2079 | unsigned char arg[MAX_ARG_LEN]; |
1908 | struct ip_vs_service_user *usvc; | 2080 | struct ip_vs_service_user *usvc_compat; |
2081 | struct ip_vs_service_user_kern usvc; | ||
1909 | struct ip_vs_service *svc; | 2082 | struct ip_vs_service *svc; |
1910 | struct ip_vs_dest_user *udest; | 2083 | struct ip_vs_dest_user *udest_compat; |
2084 | struct ip_vs_dest_user_kern udest; | ||
1911 | 2085 | ||
1912 | if (!capable(CAP_NET_ADMIN)) | 2086 | if (!capable(CAP_NET_ADMIN)) |
1913 | return -EPERM; | 2087 | return -EPERM; |
@@ -1947,35 +2121,40 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
1947 | goto out_unlock; | 2121 | goto out_unlock; |
1948 | } | 2122 | } |
1949 | 2123 | ||
1950 | usvc = (struct ip_vs_service_user *)arg; | 2124 | usvc_compat = (struct ip_vs_service_user *)arg; |
1951 | udest = (struct ip_vs_dest_user *)(usvc + 1); | 2125 | udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1); |
2126 | |||
2127 | /* We only use the new structs internally, so copy userspace compat | ||
2128 | * structs to extended internal versions */ | ||
2129 | ip_vs_copy_usvc_compat(&usvc, usvc_compat); | ||
2130 | ip_vs_copy_udest_compat(&udest, udest_compat); | ||
1952 | 2131 | ||
1953 | if (cmd == IP_VS_SO_SET_ZERO) { | 2132 | if (cmd == IP_VS_SO_SET_ZERO) { |
1954 | /* if no service address is set, zero counters in all */ | 2133 | /* if no service address is set, zero counters in all */ |
1955 | if (!usvc->fwmark && !usvc->addr && !usvc->port) { | 2134 | if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) { |
1956 | ret = ip_vs_zero_all(); | 2135 | ret = ip_vs_zero_all(); |
1957 | goto out_unlock; | 2136 | goto out_unlock; |
1958 | } | 2137 | } |
1959 | } | 2138 | } |
1960 | 2139 | ||
1961 | /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ | 2140 | /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ |
1962 | if (usvc->protocol!=IPPROTO_TCP && usvc->protocol!=IPPROTO_UDP) { | 2141 | if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) { |
1963 | IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n", | 2142 | IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n", |
1964 | usvc->protocol, NIPQUAD(usvc->addr), | 2143 | usvc.protocol, NIPQUAD(usvc.addr.ip), |
1965 | ntohs(usvc->port), usvc->sched_name); | 2144 | ntohs(usvc.port), usvc.sched_name); |
1966 | ret = -EFAULT; | 2145 | ret = -EFAULT; |
1967 | goto out_unlock; | 2146 | goto out_unlock; |
1968 | } | 2147 | } |
1969 | 2148 | ||
1970 | /* Lookup the exact service by <protocol, addr, port> or fwmark */ | 2149 | /* Lookup the exact service by <protocol, addr, port> or fwmark */ |
1971 | if (usvc->fwmark == 0) | 2150 | if (usvc.fwmark == 0) |
1972 | svc = __ip_vs_service_get(usvc->protocol, | 2151 | svc = __ip_vs_service_get(usvc.af, usvc.protocol, |
1973 | usvc->addr, usvc->port); | 2152 | &usvc.addr, usvc.port); |
1974 | else | 2153 | else |
1975 | svc = __ip_vs_svc_fwm_get(usvc->fwmark); | 2154 | svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark); |
1976 | 2155 | ||
1977 | if (cmd != IP_VS_SO_SET_ADD | 2156 | if (cmd != IP_VS_SO_SET_ADD |
1978 | && (svc == NULL || svc->protocol != usvc->protocol)) { | 2157 | && (svc == NULL || svc->protocol != usvc.protocol)) { |
1979 | ret = -ESRCH; | 2158 | ret = -ESRCH; |
1980 | goto out_unlock; | 2159 | goto out_unlock; |
1981 | } | 2160 | } |
@@ -1985,10 +2164,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
1985 | if (svc != NULL) | 2164 | if (svc != NULL) |
1986 | ret = -EEXIST; | 2165 | ret = -EEXIST; |
1987 | else | 2166 | else |
1988 | ret = ip_vs_add_service(usvc, &svc); | 2167 | ret = ip_vs_add_service(&usvc, &svc); |
1989 | break; | 2168 | break; |
1990 | case IP_VS_SO_SET_EDIT: | 2169 | case IP_VS_SO_SET_EDIT: |
1991 | ret = ip_vs_edit_service(svc, usvc); | 2170 | ret = ip_vs_edit_service(svc, &usvc); |
1992 | break; | 2171 | break; |
1993 | case IP_VS_SO_SET_DEL: | 2172 | case IP_VS_SO_SET_DEL: |
1994 | ret = ip_vs_del_service(svc); | 2173 | ret = ip_vs_del_service(svc); |
@@ -1999,13 +2178,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
1999 | ret = ip_vs_zero_service(svc); | 2178 | ret = ip_vs_zero_service(svc); |
2000 | break; | 2179 | break; |
2001 | case IP_VS_SO_SET_ADDDEST: | 2180 | case IP_VS_SO_SET_ADDDEST: |
2002 | ret = ip_vs_add_dest(svc, udest); | 2181 | ret = ip_vs_add_dest(svc, &udest); |
2003 | break; | 2182 | break; |
2004 | case IP_VS_SO_SET_EDITDEST: | 2183 | case IP_VS_SO_SET_EDITDEST: |
2005 | ret = ip_vs_edit_dest(svc, udest); | 2184 | ret = ip_vs_edit_dest(svc, &udest); |
2006 | break; | 2185 | break; |
2007 | case IP_VS_SO_SET_DELDEST: | 2186 | case IP_VS_SO_SET_DELDEST: |
2008 | ret = ip_vs_del_dest(svc, udest); | 2187 | ret = ip_vs_del_dest(svc, &udest); |
2009 | break; | 2188 | break; |
2010 | default: | 2189 | default: |
2011 | ret = -EINVAL; | 2190 | ret = -EINVAL; |
@@ -2028,7 +2207,7 @@ static void | |||
2028 | ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src) | 2207 | ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src) |
2029 | { | 2208 | { |
2030 | spin_lock_bh(&src->lock); | 2209 | spin_lock_bh(&src->lock); |
2031 | memcpy(dst, src, (char*)&src->lock - (char*)src); | 2210 | memcpy(dst, &src->ustats, sizeof(*dst)); |
2032 | spin_unlock_bh(&src->lock); | 2211 | spin_unlock_bh(&src->lock); |
2033 | } | 2212 | } |
2034 | 2213 | ||
@@ -2036,7 +2215,7 @@ static void | |||
2036 | ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) | 2215 | ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) |
2037 | { | 2216 | { |
2038 | dst->protocol = src->protocol; | 2217 | dst->protocol = src->protocol; |
2039 | dst->addr = src->addr; | 2218 | dst->addr = src->addr.ip; |
2040 | dst->port = src->port; | 2219 | dst->port = src->port; |
2041 | dst->fwmark = src->fwmark; | 2220 | dst->fwmark = src->fwmark; |
2042 | strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)); | 2221 | strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)); |
@@ -2058,6 +2237,10 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get, | |||
2058 | 2237 | ||
2059 | for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { | 2238 | for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { |
2060 | list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { | 2239 | list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { |
2240 | /* Only expose IPv4 entries to old interface */ | ||
2241 | if (svc->af != AF_INET) | ||
2242 | continue; | ||
2243 | |||
2061 | if (count >= get->num_services) | 2244 | if (count >= get->num_services) |
2062 | goto out; | 2245 | goto out; |
2063 | memset(&entry, 0, sizeof(entry)); | 2246 | memset(&entry, 0, sizeof(entry)); |
@@ -2073,6 +2256,10 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get, | |||
2073 | 2256 | ||
2074 | for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { | 2257 | for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { |
2075 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { | 2258 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { |
2259 | /* Only expose IPv4 entries to old interface */ | ||
2260 | if (svc->af != AF_INET) | ||
2261 | continue; | ||
2262 | |||
2076 | if (count >= get->num_services) | 2263 | if (count >= get->num_services) |
2077 | goto out; | 2264 | goto out; |
2078 | memset(&entry, 0, sizeof(entry)); | 2265 | memset(&entry, 0, sizeof(entry)); |
@@ -2094,13 +2281,15 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get, | |||
2094 | struct ip_vs_get_dests __user *uptr) | 2281 | struct ip_vs_get_dests __user *uptr) |
2095 | { | 2282 | { |
2096 | struct ip_vs_service *svc; | 2283 | struct ip_vs_service *svc; |
2284 | union nf_inet_addr addr = { .ip = get->addr }; | ||
2097 | int ret = 0; | 2285 | int ret = 0; |
2098 | 2286 | ||
2099 | if (get->fwmark) | 2287 | if (get->fwmark) |
2100 | svc = __ip_vs_svc_fwm_get(get->fwmark); | 2288 | svc = __ip_vs_svc_fwm_get(AF_INET, get->fwmark); |
2101 | else | 2289 | else |
2102 | svc = __ip_vs_service_get(get->protocol, | 2290 | svc = __ip_vs_service_get(AF_INET, get->protocol, &addr, |
2103 | get->addr, get->port); | 2291 | get->port); |
2292 | |||
2104 | if (svc) { | 2293 | if (svc) { |
2105 | int count = 0; | 2294 | int count = 0; |
2106 | struct ip_vs_dest *dest; | 2295 | struct ip_vs_dest *dest; |
@@ -2110,7 +2299,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get, | |||
2110 | if (count >= get->num_dests) | 2299 | if (count >= get->num_dests) |
2111 | break; | 2300 | break; |
2112 | 2301 | ||
2113 | entry.addr = dest->addr; | 2302 | entry.addr = dest->addr.ip; |
2114 | entry.port = dest->port; | 2303 | entry.port = dest->port; |
2115 | entry.conn_flags = atomic_read(&dest->conn_flags); | 2304 | entry.conn_flags = atomic_read(&dest->conn_flags); |
2116 | entry.weight = atomic_read(&dest->weight); | 2305 | entry.weight = atomic_read(&dest->weight); |
@@ -2235,13 +2424,15 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2235 | { | 2424 | { |
2236 | struct ip_vs_service_entry *entry; | 2425 | struct ip_vs_service_entry *entry; |
2237 | struct ip_vs_service *svc; | 2426 | struct ip_vs_service *svc; |
2427 | union nf_inet_addr addr; | ||
2238 | 2428 | ||
2239 | entry = (struct ip_vs_service_entry *)arg; | 2429 | entry = (struct ip_vs_service_entry *)arg; |
2430 | addr.ip = entry->addr; | ||
2240 | if (entry->fwmark) | 2431 | if (entry->fwmark) |
2241 | svc = __ip_vs_svc_fwm_get(entry->fwmark); | 2432 | svc = __ip_vs_svc_fwm_get(AF_INET, entry->fwmark); |
2242 | else | 2433 | else |
2243 | svc = __ip_vs_service_get(entry->protocol, | 2434 | svc = __ip_vs_service_get(AF_INET, entry->protocol, |
2244 | entry->addr, entry->port); | 2435 | &addr, entry->port); |
2245 | if (svc) { | 2436 | if (svc) { |
2246 | ip_vs_copy_service(entry, svc); | 2437 | ip_vs_copy_service(entry, svc); |
2247 | if (copy_to_user(user, entry, sizeof(*entry)) != 0) | 2438 | if (copy_to_user(user, entry, sizeof(*entry)) != 0) |
@@ -2320,6 +2511,875 @@ static struct nf_sockopt_ops ip_vs_sockopts = { | |||
2320 | .owner = THIS_MODULE, | 2511 | .owner = THIS_MODULE, |
2321 | }; | 2512 | }; |
2322 | 2513 | ||
2514 | /* | ||
2515 | * Generic Netlink interface | ||
2516 | */ | ||
2517 | |||
2518 | /* IPVS genetlink family */ | ||
2519 | static struct genl_family ip_vs_genl_family = { | ||
2520 | .id = GENL_ID_GENERATE, | ||
2521 | .hdrsize = 0, | ||
2522 | .name = IPVS_GENL_NAME, | ||
2523 | .version = IPVS_GENL_VERSION, | ||
2524 | .maxattr = IPVS_CMD_MAX, | ||
2525 | }; | ||
2526 | |||
2527 | /* Policy used for first-level command attributes */ | ||
2528 | static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = { | ||
2529 | [IPVS_CMD_ATTR_SERVICE] = { .type = NLA_NESTED }, | ||
2530 | [IPVS_CMD_ATTR_DEST] = { .type = NLA_NESTED }, | ||
2531 | [IPVS_CMD_ATTR_DAEMON] = { .type = NLA_NESTED }, | ||
2532 | [IPVS_CMD_ATTR_TIMEOUT_TCP] = { .type = NLA_U32 }, | ||
2533 | [IPVS_CMD_ATTR_TIMEOUT_TCP_FIN] = { .type = NLA_U32 }, | ||
2534 | [IPVS_CMD_ATTR_TIMEOUT_UDP] = { .type = NLA_U32 }, | ||
2535 | }; | ||
2536 | |||
2537 | /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */ | ||
2538 | static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { | ||
2539 | [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, | ||
2540 | [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, | ||
2541 | .len = IP_VS_IFNAME_MAXLEN }, | ||
2542 | [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, | ||
2543 | }; | ||
2544 | |||
2545 | /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */ | ||
2546 | static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = { | ||
2547 | [IPVS_SVC_ATTR_AF] = { .type = NLA_U16 }, | ||
2548 | [IPVS_SVC_ATTR_PROTOCOL] = { .type = NLA_U16 }, | ||
2549 | [IPVS_SVC_ATTR_ADDR] = { .type = NLA_BINARY, | ||
2550 | .len = sizeof(union nf_inet_addr) }, | ||
2551 | [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, | ||
2552 | [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, | ||
2553 | [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, | ||
2554 | .len = IP_VS_SCHEDNAME_MAXLEN }, | ||
2555 | [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY, | ||
2556 | .len = sizeof(struct ip_vs_flags) }, | ||
2557 | [IPVS_SVC_ATTR_TIMEOUT] = { .type = NLA_U32 }, | ||
2558 | [IPVS_SVC_ATTR_NETMASK] = { .type = NLA_U32 }, | ||
2559 | [IPVS_SVC_ATTR_STATS] = { .type = NLA_NESTED }, | ||
2560 | }; | ||
2561 | |||
2562 | /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */ | ||
2563 | static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = { | ||
2564 | [IPVS_DEST_ATTR_ADDR] = { .type = NLA_BINARY, | ||
2565 | .len = sizeof(union nf_inet_addr) }, | ||
2566 | [IPVS_DEST_ATTR_PORT] = { .type = NLA_U16 }, | ||
2567 | [IPVS_DEST_ATTR_FWD_METHOD] = { .type = NLA_U32 }, | ||
2568 | [IPVS_DEST_ATTR_WEIGHT] = { .type = NLA_U32 }, | ||
2569 | [IPVS_DEST_ATTR_U_THRESH] = { .type = NLA_U32 }, | ||
2570 | [IPVS_DEST_ATTR_L_THRESH] = { .type = NLA_U32 }, | ||
2571 | [IPVS_DEST_ATTR_ACTIVE_CONNS] = { .type = NLA_U32 }, | ||
2572 | [IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 }, | ||
2573 | [IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 }, | ||
2574 | [IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED }, | ||
2575 | }; | ||
2576 | |||
2577 | static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, | ||
2578 | struct ip_vs_stats *stats) | ||
2579 | { | ||
2580 | struct nlattr *nl_stats = nla_nest_start(skb, container_type); | ||
2581 | if (!nl_stats) | ||
2582 | return -EMSGSIZE; | ||
2583 | |||
2584 | spin_lock_bh(&stats->lock); | ||
2585 | |||
2586 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->ustats.conns); | ||
2587 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->ustats.inpkts); | ||
2588 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->ustats.outpkts); | ||
2589 | NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->ustats.inbytes); | ||
2590 | NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->ustats.outbytes); | ||
2591 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->ustats.cps); | ||
2592 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->ustats.inpps); | ||
2593 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->ustats.outpps); | ||
2594 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->ustats.inbps); | ||
2595 | NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->ustats.outbps); | ||
2596 | |||
2597 | spin_unlock_bh(&stats->lock); | ||
2598 | |||
2599 | nla_nest_end(skb, nl_stats); | ||
2600 | |||
2601 | return 0; | ||
2602 | |||
2603 | nla_put_failure: | ||
2604 | spin_unlock_bh(&stats->lock); | ||
2605 | nla_nest_cancel(skb, nl_stats); | ||
2606 | return -EMSGSIZE; | ||
2607 | } | ||
2608 | |||
2609 | static int ip_vs_genl_fill_service(struct sk_buff *skb, | ||
2610 | struct ip_vs_service *svc) | ||
2611 | { | ||
2612 | struct nlattr *nl_service; | ||
2613 | struct ip_vs_flags flags = { .flags = svc->flags, | ||
2614 | .mask = ~0 }; | ||
2615 | |||
2616 | nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); | ||
2617 | if (!nl_service) | ||
2618 | return -EMSGSIZE; | ||
2619 | |||
2620 | NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af); | ||
2621 | |||
2622 | if (svc->fwmark) { | ||
2623 | NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark); | ||
2624 | } else { | ||
2625 | NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol); | ||
2626 | NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr); | ||
2627 | NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port); | ||
2628 | } | ||
2629 | |||
2630 | NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name); | ||
2631 | NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags); | ||
2632 | NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ); | ||
2633 | NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask); | ||
2634 | |||
2635 | if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats)) | ||
2636 | goto nla_put_failure; | ||
2637 | |||
2638 | nla_nest_end(skb, nl_service); | ||
2639 | |||
2640 | return 0; | ||
2641 | |||
2642 | nla_put_failure: | ||
2643 | nla_nest_cancel(skb, nl_service); | ||
2644 | return -EMSGSIZE; | ||
2645 | } | ||
2646 | |||
2647 | static int ip_vs_genl_dump_service(struct sk_buff *skb, | ||
2648 | struct ip_vs_service *svc, | ||
2649 | struct netlink_callback *cb) | ||
2650 | { | ||
2651 | void *hdr; | ||
2652 | |||
2653 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, | ||
2654 | &ip_vs_genl_family, NLM_F_MULTI, | ||
2655 | IPVS_CMD_NEW_SERVICE); | ||
2656 | if (!hdr) | ||
2657 | return -EMSGSIZE; | ||
2658 | |||
2659 | if (ip_vs_genl_fill_service(skb, svc) < 0) | ||
2660 | goto nla_put_failure; | ||
2661 | |||
2662 | return genlmsg_end(skb, hdr); | ||
2663 | |||
2664 | nla_put_failure: | ||
2665 | genlmsg_cancel(skb, hdr); | ||
2666 | return -EMSGSIZE; | ||
2667 | } | ||
2668 | |||
2669 | static int ip_vs_genl_dump_services(struct sk_buff *skb, | ||
2670 | struct netlink_callback *cb) | ||
2671 | { | ||
2672 | int idx = 0, i; | ||
2673 | int start = cb->args[0]; | ||
2674 | struct ip_vs_service *svc; | ||
2675 | |||
2676 | mutex_lock(&__ip_vs_mutex); | ||
2677 | for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { | ||
2678 | list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) { | ||
2679 | if (++idx <= start) | ||
2680 | continue; | ||
2681 | if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { | ||
2682 | idx--; | ||
2683 | goto nla_put_failure; | ||
2684 | } | ||
2685 | } | ||
2686 | } | ||
2687 | |||
2688 | for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { | ||
2689 | list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) { | ||
2690 | if (++idx <= start) | ||
2691 | continue; | ||
2692 | if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { | ||
2693 | idx--; | ||
2694 | goto nla_put_failure; | ||
2695 | } | ||
2696 | } | ||
2697 | } | ||
2698 | |||
2699 | nla_put_failure: | ||
2700 | mutex_unlock(&__ip_vs_mutex); | ||
2701 | cb->args[0] = idx; | ||
2702 | |||
2703 | return skb->len; | ||
2704 | } | ||
2705 | |||
2706 | static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc, | ||
2707 | struct nlattr *nla, int full_entry) | ||
2708 | { | ||
2709 | struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1]; | ||
2710 | struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr; | ||
2711 | |||
2712 | /* Parse mandatory identifying service fields first */ | ||
2713 | if (nla == NULL || | ||
2714 | nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy)) | ||
2715 | return -EINVAL; | ||
2716 | |||
2717 | nla_af = attrs[IPVS_SVC_ATTR_AF]; | ||
2718 | nla_protocol = attrs[IPVS_SVC_ATTR_PROTOCOL]; | ||
2719 | nla_addr = attrs[IPVS_SVC_ATTR_ADDR]; | ||
2720 | nla_port = attrs[IPVS_SVC_ATTR_PORT]; | ||
2721 | nla_fwmark = attrs[IPVS_SVC_ATTR_FWMARK]; | ||
2722 | |||
2723 | if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) | ||
2724 | return -EINVAL; | ||
2725 | |||
2726 | usvc->af = nla_get_u16(nla_af); | ||
2727 | #ifdef CONFIG_IP_VS_IPV6 | ||
2728 | if (usvc->af != AF_INET && usvc->af != AF_INET6) | ||
2729 | #else | ||
2730 | if (usvc->af != AF_INET) | ||
2731 | #endif | ||
2732 | return -EAFNOSUPPORT; | ||
2733 | |||
2734 | if (nla_fwmark) { | ||
2735 | usvc->protocol = IPPROTO_TCP; | ||
2736 | usvc->fwmark = nla_get_u32(nla_fwmark); | ||
2737 | } else { | ||
2738 | usvc->protocol = nla_get_u16(nla_protocol); | ||
2739 | nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr)); | ||
2740 | usvc->port = nla_get_u16(nla_port); | ||
2741 | usvc->fwmark = 0; | ||
2742 | } | ||
2743 | |||
2744 | /* If a full entry was requested, check for the additional fields */ | ||
2745 | if (full_entry) { | ||
2746 | struct nlattr *nla_sched, *nla_flags, *nla_timeout, | ||
2747 | *nla_netmask; | ||
2748 | struct ip_vs_flags flags; | ||
2749 | struct ip_vs_service *svc; | ||
2750 | |||
2751 | nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME]; | ||
2752 | nla_flags = attrs[IPVS_SVC_ATTR_FLAGS]; | ||
2753 | nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT]; | ||
2754 | nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK]; | ||
2755 | |||
2756 | if (!(nla_sched && nla_flags && nla_timeout && nla_netmask)) | ||
2757 | return -EINVAL; | ||
2758 | |||
2759 | nla_memcpy(&flags, nla_flags, sizeof(flags)); | ||
2760 | |||
2761 | /* prefill flags from service if it already exists */ | ||
2762 | if (usvc->fwmark) | ||
2763 | svc = __ip_vs_svc_fwm_get(usvc->af, usvc->fwmark); | ||
2764 | else | ||
2765 | svc = __ip_vs_service_get(usvc->af, usvc->protocol, | ||
2766 | &usvc->addr, usvc->port); | ||
2767 | if (svc) { | ||
2768 | usvc->flags = svc->flags; | ||
2769 | ip_vs_service_put(svc); | ||
2770 | } else | ||
2771 | usvc->flags = 0; | ||
2772 | |||
2773 | /* set new flags from userland */ | ||
2774 | usvc->flags = (usvc->flags & ~flags.mask) | | ||
2775 | (flags.flags & flags.mask); | ||
2776 | usvc->sched_name = nla_data(nla_sched); | ||
2777 | usvc->timeout = nla_get_u32(nla_timeout); | ||
2778 | usvc->netmask = nla_get_u32(nla_netmask); | ||
2779 | } | ||
2780 | |||
2781 | return 0; | ||
2782 | } | ||
2783 | |||
2784 | static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla) | ||
2785 | { | ||
2786 | struct ip_vs_service_user_kern usvc; | ||
2787 | int ret; | ||
2788 | |||
2789 | ret = ip_vs_genl_parse_service(&usvc, nla, 0); | ||
2790 | if (ret) | ||
2791 | return ERR_PTR(ret); | ||
2792 | |||
2793 | if (usvc.fwmark) | ||
2794 | return __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark); | ||
2795 | else | ||
2796 | return __ip_vs_service_get(usvc.af, usvc.protocol, | ||
2797 | &usvc.addr, usvc.port); | ||
2798 | } | ||
2799 | |||
2800 | static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest) | ||
2801 | { | ||
2802 | struct nlattr *nl_dest; | ||
2803 | |||
2804 | nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST); | ||
2805 | if (!nl_dest) | ||
2806 | return -EMSGSIZE; | ||
2807 | |||
2808 | NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr); | ||
2809 | NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); | ||
2810 | |||
2811 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, | ||
2812 | atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); | ||
2813 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); | ||
2814 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); | ||
2815 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); | ||
2816 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, | ||
2817 | atomic_read(&dest->activeconns)); | ||
2818 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS, | ||
2819 | atomic_read(&dest->inactconns)); | ||
2820 | NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, | ||
2821 | atomic_read(&dest->persistconns)); | ||
2822 | |||
2823 | if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats)) | ||
2824 | goto nla_put_failure; | ||
2825 | |||
2826 | nla_nest_end(skb, nl_dest); | ||
2827 | |||
2828 | return 0; | ||
2829 | |||
2830 | nla_put_failure: | ||
2831 | nla_nest_cancel(skb, nl_dest); | ||
2832 | return -EMSGSIZE; | ||
2833 | } | ||
2834 | |||
2835 | static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest, | ||
2836 | struct netlink_callback *cb) | ||
2837 | { | ||
2838 | void *hdr; | ||
2839 | |||
2840 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, | ||
2841 | &ip_vs_genl_family, NLM_F_MULTI, | ||
2842 | IPVS_CMD_NEW_DEST); | ||
2843 | if (!hdr) | ||
2844 | return -EMSGSIZE; | ||
2845 | |||
2846 | if (ip_vs_genl_fill_dest(skb, dest) < 0) | ||
2847 | goto nla_put_failure; | ||
2848 | |||
2849 | return genlmsg_end(skb, hdr); | ||
2850 | |||
2851 | nla_put_failure: | ||
2852 | genlmsg_cancel(skb, hdr); | ||
2853 | return -EMSGSIZE; | ||
2854 | } | ||
2855 | |||
2856 | static int ip_vs_genl_dump_dests(struct sk_buff *skb, | ||
2857 | struct netlink_callback *cb) | ||
2858 | { | ||
2859 | int idx = 0; | ||
2860 | int start = cb->args[0]; | ||
2861 | struct ip_vs_service *svc; | ||
2862 | struct ip_vs_dest *dest; | ||
2863 | struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1]; | ||
2864 | |||
2865 | mutex_lock(&__ip_vs_mutex); | ||
2866 | |||
2867 | /* Try to find the service for which to dump destinations */ | ||
2868 | if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, | ||
2869 | IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy)) | ||
2870 | goto out_err; | ||
2871 | |||
2872 | svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]); | ||
2873 | if (IS_ERR(svc) || svc == NULL) | ||
2874 | goto out_err; | ||
2875 | |||
2876 | /* Dump the destinations */ | ||
2877 | list_for_each_entry(dest, &svc->destinations, n_list) { | ||
2878 | if (++idx <= start) | ||
2879 | continue; | ||
2880 | if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) { | ||
2881 | idx--; | ||
2882 | goto nla_put_failure; | ||
2883 | } | ||
2884 | } | ||
2885 | |||
2886 | nla_put_failure: | ||
2887 | cb->args[0] = idx; | ||
2888 | ip_vs_service_put(svc); | ||
2889 | |||
2890 | out_err: | ||
2891 | mutex_unlock(&__ip_vs_mutex); | ||
2892 | |||
2893 | return skb->len; | ||
2894 | } | ||
2895 | |||
2896 | static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, | ||
2897 | struct nlattr *nla, int full_entry) | ||
2898 | { | ||
2899 | struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1]; | ||
2900 | struct nlattr *nla_addr, *nla_port; | ||
2901 | |||
2902 | /* Parse mandatory identifying destination fields first */ | ||
2903 | if (nla == NULL || | ||
2904 | nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy)) | ||
2905 | return -EINVAL; | ||
2906 | |||
2907 | nla_addr = attrs[IPVS_DEST_ATTR_ADDR]; | ||
2908 | nla_port = attrs[IPVS_DEST_ATTR_PORT]; | ||
2909 | |||
2910 | if (!(nla_addr && nla_port)) | ||
2911 | return -EINVAL; | ||
2912 | |||
2913 | nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); | ||
2914 | udest->port = nla_get_u16(nla_port); | ||
2915 | |||
2916 | /* If a full entry was requested, check for the additional fields */ | ||
2917 | if (full_entry) { | ||
2918 | struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh, | ||
2919 | *nla_l_thresh; | ||
2920 | |||
2921 | nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD]; | ||
2922 | nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT]; | ||
2923 | nla_u_thresh = attrs[IPVS_DEST_ATTR_U_THRESH]; | ||
2924 | nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH]; | ||
2925 | |||
2926 | if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh)) | ||
2927 | return -EINVAL; | ||
2928 | |||
2929 | udest->conn_flags = nla_get_u32(nla_fwd) | ||
2930 | & IP_VS_CONN_F_FWD_MASK; | ||
2931 | udest->weight = nla_get_u32(nla_weight); | ||
2932 | udest->u_threshold = nla_get_u32(nla_u_thresh); | ||
2933 | udest->l_threshold = nla_get_u32(nla_l_thresh); | ||
2934 | } | ||
2935 | |||
2936 | return 0; | ||
2937 | } | ||
2938 | |||
2939 | static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state, | ||
2940 | const char *mcast_ifn, __be32 syncid) | ||
2941 | { | ||
2942 | struct nlattr *nl_daemon; | ||
2943 | |||
2944 | nl_daemon = nla_nest_start(skb, IPVS_CMD_ATTR_DAEMON); | ||
2945 | if (!nl_daemon) | ||
2946 | return -EMSGSIZE; | ||
2947 | |||
2948 | NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state); | ||
2949 | NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn); | ||
2950 | NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid); | ||
2951 | |||
2952 | nla_nest_end(skb, nl_daemon); | ||
2953 | |||
2954 | return 0; | ||
2955 | |||
2956 | nla_put_failure: | ||
2957 | nla_nest_cancel(skb, nl_daemon); | ||
2958 | return -EMSGSIZE; | ||
2959 | } | ||
2960 | |||
2961 | static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state, | ||
2962 | const char *mcast_ifn, __be32 syncid, | ||
2963 | struct netlink_callback *cb) | ||
2964 | { | ||
2965 | void *hdr; | ||
2966 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, | ||
2967 | &ip_vs_genl_family, NLM_F_MULTI, | ||
2968 | IPVS_CMD_NEW_DAEMON); | ||
2969 | if (!hdr) | ||
2970 | return -EMSGSIZE; | ||
2971 | |||
2972 | if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid)) | ||
2973 | goto nla_put_failure; | ||
2974 | |||
2975 | return genlmsg_end(skb, hdr); | ||
2976 | |||
2977 | nla_put_failure: | ||
2978 | genlmsg_cancel(skb, hdr); | ||
2979 | return -EMSGSIZE; | ||
2980 | } | ||
2981 | |||
2982 | static int ip_vs_genl_dump_daemons(struct sk_buff *skb, | ||
2983 | struct netlink_callback *cb) | ||
2984 | { | ||
2985 | mutex_lock(&__ip_vs_mutex); | ||
2986 | if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { | ||
2987 | if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, | ||
2988 | ip_vs_master_mcast_ifn, | ||
2989 | ip_vs_master_syncid, cb) < 0) | ||
2990 | goto nla_put_failure; | ||
2991 | |||
2992 | cb->args[0] = 1; | ||
2993 | } | ||
2994 | |||
2995 | if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) { | ||
2996 | if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP, | ||
2997 | ip_vs_backup_mcast_ifn, | ||
2998 | ip_vs_backup_syncid, cb) < 0) | ||
2999 | goto nla_put_failure; | ||
3000 | |||
3001 | cb->args[1] = 1; | ||
3002 | } | ||
3003 | |||
3004 | nla_put_failure: | ||
3005 | mutex_unlock(&__ip_vs_mutex); | ||
3006 | |||
3007 | return skb->len; | ||
3008 | } | ||
3009 | |||
3010 | static int ip_vs_genl_new_daemon(struct nlattr **attrs) | ||
3011 | { | ||
3012 | if (!(attrs[IPVS_DAEMON_ATTR_STATE] && | ||
3013 | attrs[IPVS_DAEMON_ATTR_MCAST_IFN] && | ||
3014 | attrs[IPVS_DAEMON_ATTR_SYNC_ID])) | ||
3015 | return -EINVAL; | ||
3016 | |||
3017 | return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]), | ||
3018 | nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), | ||
3019 | nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID])); | ||
3020 | } | ||
3021 | |||
3022 | static int ip_vs_genl_del_daemon(struct nlattr **attrs) | ||
3023 | { | ||
3024 | if (!attrs[IPVS_DAEMON_ATTR_STATE]) | ||
3025 | return -EINVAL; | ||
3026 | |||
3027 | return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); | ||
3028 | } | ||
3029 | |||
3030 | static int ip_vs_genl_set_config(struct nlattr **attrs) | ||
3031 | { | ||
3032 | struct ip_vs_timeout_user t; | ||
3033 | |||
3034 | __ip_vs_get_timeouts(&t); | ||
3035 | |||
3036 | if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]) | ||
3037 | t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]); | ||
3038 | |||
3039 | if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]) | ||
3040 | t.tcp_fin_timeout = | ||
3041 | nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]); | ||
3042 | |||
3043 | if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]) | ||
3044 | t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]); | ||
3045 | |||
3046 | return ip_vs_set_timeout(&t); | ||
3047 | } | ||
3048 | |||
3049 | static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) | ||
3050 | { | ||
3051 | struct ip_vs_service *svc = NULL; | ||
3052 | struct ip_vs_service_user_kern usvc; | ||
3053 | struct ip_vs_dest_user_kern udest; | ||
3054 | int ret = 0, cmd; | ||
3055 | int need_full_svc = 0, need_full_dest = 0; | ||
3056 | |||
3057 | cmd = info->genlhdr->cmd; | ||
3058 | |||
3059 | mutex_lock(&__ip_vs_mutex); | ||
3060 | |||
3061 | if (cmd == IPVS_CMD_FLUSH) { | ||
3062 | ret = ip_vs_flush(); | ||
3063 | goto out; | ||
3064 | } else if (cmd == IPVS_CMD_SET_CONFIG) { | ||
3065 | ret = ip_vs_genl_set_config(info->attrs); | ||
3066 | goto out; | ||
3067 | } else if (cmd == IPVS_CMD_NEW_DAEMON || | ||
3068 | cmd == IPVS_CMD_DEL_DAEMON) { | ||
3069 | |||
3070 | struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; | ||
3071 | |||
3072 | if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || | ||
3073 | nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, | ||
3074 | info->attrs[IPVS_CMD_ATTR_DAEMON], | ||
3075 | ip_vs_daemon_policy)) { | ||
3076 | ret = -EINVAL; | ||
3077 | goto out; | ||
3078 | } | ||
3079 | |||
3080 | if (cmd == IPVS_CMD_NEW_DAEMON) | ||
3081 | ret = ip_vs_genl_new_daemon(daemon_attrs); | ||
3082 | else | ||
3083 | ret = ip_vs_genl_del_daemon(daemon_attrs); | ||
3084 | goto out; | ||
3085 | } else if (cmd == IPVS_CMD_ZERO && | ||
3086 | !info->attrs[IPVS_CMD_ATTR_SERVICE]) { | ||
3087 | ret = ip_vs_zero_all(); | ||
3088 | goto out; | ||
3089 | } | ||
3090 | |||
3091 | /* All following commands require a service argument, so check if we | ||
3092 | * received a valid one. We need a full service specification when | ||
3093 | * adding / editing a service. Only identifying members otherwise. */ | ||
3094 | if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE) | ||
3095 | need_full_svc = 1; | ||
3096 | |||
3097 | ret = ip_vs_genl_parse_service(&usvc, | ||
3098 | info->attrs[IPVS_CMD_ATTR_SERVICE], | ||
3099 | need_full_svc); | ||
3100 | if (ret) | ||
3101 | goto out; | ||
3102 | |||
3103 | /* Lookup the exact service by <protocol, addr, port> or fwmark */ | ||
3104 | if (usvc.fwmark == 0) | ||
3105 | svc = __ip_vs_service_get(usvc.af, usvc.protocol, | ||
3106 | &usvc.addr, usvc.port); | ||
3107 | else | ||
3108 | svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark); | ||
3109 | |||
3110 | /* Unless we're adding a new service, the service must already exist */ | ||
3111 | if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) { | ||
3112 | ret = -ESRCH; | ||
3113 | goto out; | ||
3114 | } | ||
3115 | |||
3116 | /* Destination commands require a valid destination argument. For | ||
3117 | * adding / editing a destination, we need a full destination | ||
3118 | * specification. */ | ||
3119 | if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST || | ||
3120 | cmd == IPVS_CMD_DEL_DEST) { | ||
3121 | if (cmd != IPVS_CMD_DEL_DEST) | ||
3122 | need_full_dest = 1; | ||
3123 | |||
3124 | ret = ip_vs_genl_parse_dest(&udest, | ||
3125 | info->attrs[IPVS_CMD_ATTR_DEST], | ||
3126 | need_full_dest); | ||
3127 | if (ret) | ||
3128 | goto out; | ||
3129 | } | ||
3130 | |||
3131 | switch (cmd) { | ||
3132 | case IPVS_CMD_NEW_SERVICE: | ||
3133 | if (svc == NULL) | ||
3134 | ret = ip_vs_add_service(&usvc, &svc); | ||
3135 | else | ||
3136 | ret = -EEXIST; | ||
3137 | break; | ||
3138 | case IPVS_CMD_SET_SERVICE: | ||
3139 | ret = ip_vs_edit_service(svc, &usvc); | ||
3140 | break; | ||
3141 | case IPVS_CMD_DEL_SERVICE: | ||
3142 | ret = ip_vs_del_service(svc); | ||
3143 | break; | ||
3144 | case IPVS_CMD_NEW_DEST: | ||
3145 | ret = ip_vs_add_dest(svc, &udest); | ||
3146 | break; | ||
3147 | case IPVS_CMD_SET_DEST: | ||
3148 | ret = ip_vs_edit_dest(svc, &udest); | ||
3149 | break; | ||
3150 | case IPVS_CMD_DEL_DEST: | ||
3151 | ret = ip_vs_del_dest(svc, &udest); | ||
3152 | break; | ||
3153 | case IPVS_CMD_ZERO: | ||
3154 | ret = ip_vs_zero_service(svc); | ||
3155 | break; | ||
3156 | default: | ||
3157 | ret = -EINVAL; | ||
3158 | } | ||
3159 | |||
3160 | out: | ||
3161 | if (svc) | ||
3162 | ip_vs_service_put(svc); | ||
3163 | mutex_unlock(&__ip_vs_mutex); | ||
3164 | |||
3165 | return ret; | ||
3166 | } | ||
3167 | |||
3168 | static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) | ||
3169 | { | ||
3170 | struct sk_buff *msg; | ||
3171 | void *reply; | ||
3172 | int ret, cmd, reply_cmd; | ||
3173 | |||
3174 | cmd = info->genlhdr->cmd; | ||
3175 | |||
3176 | if (cmd == IPVS_CMD_GET_SERVICE) | ||
3177 | reply_cmd = IPVS_CMD_NEW_SERVICE; | ||
3178 | else if (cmd == IPVS_CMD_GET_INFO) | ||
3179 | reply_cmd = IPVS_CMD_SET_INFO; | ||
3180 | else if (cmd == IPVS_CMD_GET_CONFIG) | ||
3181 | reply_cmd = IPVS_CMD_SET_CONFIG; | ||
3182 | else { | ||
3183 | IP_VS_ERR("unknown Generic Netlink command\n"); | ||
3184 | return -EINVAL; | ||
3185 | } | ||
3186 | |||
3187 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
3188 | if (!msg) | ||
3189 | return -ENOMEM; | ||
3190 | |||
3191 | mutex_lock(&__ip_vs_mutex); | ||
3192 | |||
3193 | reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd); | ||
3194 | if (reply == NULL) | ||
3195 | goto nla_put_failure; | ||
3196 | |||
3197 | switch (cmd) { | ||
3198 | case IPVS_CMD_GET_SERVICE: | ||
3199 | { | ||
3200 | struct ip_vs_service *svc; | ||
3201 | |||
3202 | svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]); | ||
3203 | if (IS_ERR(svc)) { | ||
3204 | ret = PTR_ERR(svc); | ||
3205 | goto out_err; | ||
3206 | } else if (svc) { | ||
3207 | ret = ip_vs_genl_fill_service(msg, svc); | ||
3208 | ip_vs_service_put(svc); | ||
3209 | if (ret) | ||
3210 | goto nla_put_failure; | ||
3211 | } else { | ||
3212 | ret = -ESRCH; | ||
3213 | goto out_err; | ||
3214 | } | ||
3215 | |||
3216 | break; | ||
3217 | } | ||
3218 | |||
3219 | case IPVS_CMD_GET_CONFIG: | ||
3220 | { | ||
3221 | struct ip_vs_timeout_user t; | ||
3222 | |||
3223 | __ip_vs_get_timeouts(&t); | ||
3224 | #ifdef CONFIG_IP_VS_PROTO_TCP | ||
3225 | NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout); | ||
3226 | NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, | ||
3227 | t.tcp_fin_timeout); | ||
3228 | #endif | ||
3229 | #ifdef CONFIG_IP_VS_PROTO_UDP | ||
3230 | NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout); | ||
3231 | #endif | ||
3232 | |||
3233 | break; | ||
3234 | } | ||
3235 | |||
3236 | case IPVS_CMD_GET_INFO: | ||
3237 | NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); | ||
3238 | NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, | ||
3239 | IP_VS_CONN_TAB_SIZE); | ||
3240 | break; | ||
3241 | } | ||
3242 | |||
3243 | genlmsg_end(msg, reply); | ||
3244 | ret = genlmsg_unicast(msg, info->snd_pid); | ||
3245 | goto out; | ||
3246 | |||
3247 | nla_put_failure: | ||
3248 | IP_VS_ERR("not enough space in Netlink message\n"); | ||
3249 | ret = -EMSGSIZE; | ||
3250 | |||
3251 | out_err: | ||
3252 | nlmsg_free(msg); | ||
3253 | out: | ||
3254 | mutex_unlock(&__ip_vs_mutex); | ||
3255 | |||
3256 | return ret; | ||
3257 | } | ||
3258 | |||
3259 | |||
3260 | static struct genl_ops ip_vs_genl_ops[] __read_mostly = { | ||
3261 | { | ||
3262 | .cmd = IPVS_CMD_NEW_SERVICE, | ||
3263 | .flags = GENL_ADMIN_PERM, | ||
3264 | .policy = ip_vs_cmd_policy, | ||
3265 | .doit = ip_vs_genl_set_cmd, | ||
3266 | }, | ||
3267 | { | ||
3268 | .cmd = IPVS_CMD_SET_SERVICE, | ||
3269 | .flags = GENL_ADMIN_PERM, | ||
3270 | .policy = ip_vs_cmd_policy, | ||
3271 | .doit = ip_vs_genl_set_cmd, | ||
3272 | }, | ||
3273 | { | ||
3274 | .cmd = IPVS_CMD_DEL_SERVICE, | ||
3275 | .flags = GENL_ADMIN_PERM, | ||
3276 | .policy = ip_vs_cmd_policy, | ||
3277 | .doit = ip_vs_genl_set_cmd, | ||
3278 | }, | ||
3279 | { | ||
3280 | .cmd = IPVS_CMD_GET_SERVICE, | ||
3281 | .flags = GENL_ADMIN_PERM, | ||
3282 | .doit = ip_vs_genl_get_cmd, | ||
3283 | .dumpit = ip_vs_genl_dump_services, | ||
3284 | .policy = ip_vs_cmd_policy, | ||
3285 | }, | ||
3286 | { | ||
3287 | .cmd = IPVS_CMD_NEW_DEST, | ||
3288 | .flags = GENL_ADMIN_PERM, | ||
3289 | .policy = ip_vs_cmd_policy, | ||
3290 | .doit = ip_vs_genl_set_cmd, | ||
3291 | }, | ||
3292 | { | ||
3293 | .cmd = IPVS_CMD_SET_DEST, | ||
3294 | .flags = GENL_ADMIN_PERM, | ||
3295 | .policy = ip_vs_cmd_policy, | ||
3296 | .doit = ip_vs_genl_set_cmd, | ||
3297 | }, | ||
3298 | { | ||
3299 | .cmd = IPVS_CMD_DEL_DEST, | ||
3300 | .flags = GENL_ADMIN_PERM, | ||
3301 | .policy = ip_vs_cmd_policy, | ||
3302 | .doit = ip_vs_genl_set_cmd, | ||
3303 | }, | ||
3304 | { | ||
3305 | .cmd = IPVS_CMD_GET_DEST, | ||
3306 | .flags = GENL_ADMIN_PERM, | ||
3307 | .policy = ip_vs_cmd_policy, | ||
3308 | .dumpit = ip_vs_genl_dump_dests, | ||
3309 | }, | ||
3310 | { | ||
3311 | .cmd = IPVS_CMD_NEW_DAEMON, | ||
3312 | .flags = GENL_ADMIN_PERM, | ||
3313 | .policy = ip_vs_cmd_policy, | ||
3314 | .doit = ip_vs_genl_set_cmd, | ||
3315 | }, | ||
3316 | { | ||
3317 | .cmd = IPVS_CMD_DEL_DAEMON, | ||
3318 | .flags = GENL_ADMIN_PERM, | ||
3319 | .policy = ip_vs_cmd_policy, | ||
3320 | .doit = ip_vs_genl_set_cmd, | ||
3321 | }, | ||
3322 | { | ||
3323 | .cmd = IPVS_CMD_GET_DAEMON, | ||
3324 | .flags = GENL_ADMIN_PERM, | ||
3325 | .dumpit = ip_vs_genl_dump_daemons, | ||
3326 | }, | ||
3327 | { | ||
3328 | .cmd = IPVS_CMD_SET_CONFIG, | ||
3329 | .flags = GENL_ADMIN_PERM, | ||
3330 | .policy = ip_vs_cmd_policy, | ||
3331 | .doit = ip_vs_genl_set_cmd, | ||
3332 | }, | ||
3333 | { | ||
3334 | .cmd = IPVS_CMD_GET_CONFIG, | ||
3335 | .flags = GENL_ADMIN_PERM, | ||
3336 | .doit = ip_vs_genl_get_cmd, | ||
3337 | }, | ||
3338 | { | ||
3339 | .cmd = IPVS_CMD_GET_INFO, | ||
3340 | .flags = GENL_ADMIN_PERM, | ||
3341 | .doit = ip_vs_genl_get_cmd, | ||
3342 | }, | ||
3343 | { | ||
3344 | .cmd = IPVS_CMD_ZERO, | ||
3345 | .flags = GENL_ADMIN_PERM, | ||
3346 | .policy = ip_vs_cmd_policy, | ||
3347 | .doit = ip_vs_genl_set_cmd, | ||
3348 | }, | ||
3349 | { | ||
3350 | .cmd = IPVS_CMD_FLUSH, | ||
3351 | .flags = GENL_ADMIN_PERM, | ||
3352 | .doit = ip_vs_genl_set_cmd, | ||
3353 | }, | ||
3354 | }; | ||
3355 | |||
3356 | static int __init ip_vs_genl_register(void) | ||
3357 | { | ||
3358 | int ret, i; | ||
3359 | |||
3360 | ret = genl_register_family(&ip_vs_genl_family); | ||
3361 | if (ret) | ||
3362 | return ret; | ||
3363 | |||
3364 | for (i = 0; i < ARRAY_SIZE(ip_vs_genl_ops); i++) { | ||
3365 | ret = genl_register_ops(&ip_vs_genl_family, &ip_vs_genl_ops[i]); | ||
3366 | if (ret) | ||
3367 | goto err_out; | ||
3368 | } | ||
3369 | return 0; | ||
3370 | |||
3371 | err_out: | ||
3372 | genl_unregister_family(&ip_vs_genl_family); | ||
3373 | return ret; | ||
3374 | } | ||
3375 | |||
3376 | static void ip_vs_genl_unregister(void) | ||
3377 | { | ||
3378 | genl_unregister_family(&ip_vs_genl_family); | ||
3379 | } | ||
3380 | |||
3381 | /* End of Generic Netlink interface definitions */ | ||
3382 | |||
2323 | 3383 | ||
2324 | int __init ip_vs_control_init(void) | 3384 | int __init ip_vs_control_init(void) |
2325 | { | 3385 | { |
@@ -2334,6 +3394,13 @@ int __init ip_vs_control_init(void) | |||
2334 | return ret; | 3394 | return ret; |
2335 | } | 3395 | } |
2336 | 3396 | ||
3397 | ret = ip_vs_genl_register(); | ||
3398 | if (ret) { | ||
3399 | IP_VS_ERR("cannot register Generic Netlink interface.\n"); | ||
3400 | nf_unregister_sockopt(&ip_vs_sockopts); | ||
3401 | return ret; | ||
3402 | } | ||
3403 | |||
2337 | proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops); | 3404 | proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops); |
2338 | proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops); | 3405 | proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops); |
2339 | 3406 | ||
@@ -2368,6 +3435,7 @@ void ip_vs_control_cleanup(void) | |||
2368 | unregister_sysctl_table(sysctl_header); | 3435 | unregister_sysctl_table(sysctl_header); |
2369 | proc_net_remove(&init_net, "ip_vs_stats"); | 3436 | proc_net_remove(&init_net, "ip_vs_stats"); |
2370 | proc_net_remove(&init_net, "ip_vs"); | 3437 | proc_net_remove(&init_net, "ip_vs"); |
3438 | ip_vs_genl_unregister(); | ||
2371 | nf_unregister_sockopt(&ip_vs_sockopts); | 3439 | nf_unregister_sockopt(&ip_vs_sockopts); |
2372 | LeaveFunction(2); | 3440 | LeaveFunction(2); |
2373 | } | 3441 | } |
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c index fa66824d264f..a16943fd72f1 100644 --- a/net/ipv4/ipvs/ip_vs_dh.c +++ b/net/ipv4/ipvs/ip_vs_dh.c | |||
@@ -218,7 +218,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
218 | IP_VS_DBG(6, "DH: destination IP address %u.%u.%u.%u " | 218 | IP_VS_DBG(6, "DH: destination IP address %u.%u.%u.%u " |
219 | "--> server %u.%u.%u.%u:%d\n", | 219 | "--> server %u.%u.%u.%u:%d\n", |
220 | NIPQUAD(iph->daddr), | 220 | NIPQUAD(iph->daddr), |
221 | NIPQUAD(dest->addr), | 221 | NIPQUAD(dest->addr.ip), |
222 | ntohs(dest->port)); | 222 | ntohs(dest->port)); |
223 | 223 | ||
224 | return dest; | 224 | return dest; |
@@ -234,6 +234,9 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler = | |||
234 | .refcnt = ATOMIC_INIT(0), | 234 | .refcnt = ATOMIC_INIT(0), |
235 | .module = THIS_MODULE, | 235 | .module = THIS_MODULE, |
236 | .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), | 236 | .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), |
237 | #ifdef CONFIG_IP_VS_IPV6 | ||
238 | .supports_ipv6 = 0, | ||
239 | #endif | ||
237 | .init_service = ip_vs_dh_init_svc, | 240 | .init_service = ip_vs_dh_init_svc, |
238 | .done_service = ip_vs_dh_done_svc, | 241 | .done_service = ip_vs_dh_done_svc, |
239 | .update_service = ip_vs_dh_update_svc, | 242 | .update_service = ip_vs_dh_update_svc, |
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c index 5a20f93bd7f9..2eb2860dabb5 100644 --- a/net/ipv4/ipvs/ip_vs_est.c +++ b/net/ipv4/ipvs/ip_vs_est.c | |||
@@ -65,37 +65,37 @@ static void estimation_timer(unsigned long arg) | |||
65 | s = container_of(e, struct ip_vs_stats, est); | 65 | s = container_of(e, struct ip_vs_stats, est); |
66 | 66 | ||
67 | spin_lock(&s->lock); | 67 | spin_lock(&s->lock); |
68 | n_conns = s->conns; | 68 | n_conns = s->ustats.conns; |
69 | n_inpkts = s->inpkts; | 69 | n_inpkts = s->ustats.inpkts; |
70 | n_outpkts = s->outpkts; | 70 | n_outpkts = s->ustats.outpkts; |
71 | n_inbytes = s->inbytes; | 71 | n_inbytes = s->ustats.inbytes; |
72 | n_outbytes = s->outbytes; | 72 | n_outbytes = s->ustats.outbytes; |
73 | 73 | ||
74 | /* scaled by 2^10, but divided 2 seconds */ | 74 | /* scaled by 2^10, but divided 2 seconds */ |
75 | rate = (n_conns - e->last_conns)<<9; | 75 | rate = (n_conns - e->last_conns)<<9; |
76 | e->last_conns = n_conns; | 76 | e->last_conns = n_conns; |
77 | e->cps += ((long)rate - (long)e->cps)>>2; | 77 | e->cps += ((long)rate - (long)e->cps)>>2; |
78 | s->cps = (e->cps+0x1FF)>>10; | 78 | s->ustats.cps = (e->cps+0x1FF)>>10; |
79 | 79 | ||
80 | rate = (n_inpkts - e->last_inpkts)<<9; | 80 | rate = (n_inpkts - e->last_inpkts)<<9; |
81 | e->last_inpkts = n_inpkts; | 81 | e->last_inpkts = n_inpkts; |
82 | e->inpps += ((long)rate - (long)e->inpps)>>2; | 82 | e->inpps += ((long)rate - (long)e->inpps)>>2; |
83 | s->inpps = (e->inpps+0x1FF)>>10; | 83 | s->ustats.inpps = (e->inpps+0x1FF)>>10; |
84 | 84 | ||
85 | rate = (n_outpkts - e->last_outpkts)<<9; | 85 | rate = (n_outpkts - e->last_outpkts)<<9; |
86 | e->last_outpkts = n_outpkts; | 86 | e->last_outpkts = n_outpkts; |
87 | e->outpps += ((long)rate - (long)e->outpps)>>2; | 87 | e->outpps += ((long)rate - (long)e->outpps)>>2; |
88 | s->outpps = (e->outpps+0x1FF)>>10; | 88 | s->ustats.outpps = (e->outpps+0x1FF)>>10; |
89 | 89 | ||
90 | rate = (n_inbytes - e->last_inbytes)<<4; | 90 | rate = (n_inbytes - e->last_inbytes)<<4; |
91 | e->last_inbytes = n_inbytes; | 91 | e->last_inbytes = n_inbytes; |
92 | e->inbps += ((long)rate - (long)e->inbps)>>2; | 92 | e->inbps += ((long)rate - (long)e->inbps)>>2; |
93 | s->inbps = (e->inbps+0xF)>>5; | 93 | s->ustats.inbps = (e->inbps+0xF)>>5; |
94 | 94 | ||
95 | rate = (n_outbytes - e->last_outbytes)<<4; | 95 | rate = (n_outbytes - e->last_outbytes)<<4; |
96 | e->last_outbytes = n_outbytes; | 96 | e->last_outbytes = n_outbytes; |
97 | e->outbps += ((long)rate - (long)e->outbps)>>2; | 97 | e->outbps += ((long)rate - (long)e->outbps)>>2; |
98 | s->outbps = (e->outbps+0xF)>>5; | 98 | s->ustats.outbps = (e->outbps+0xF)>>5; |
99 | spin_unlock(&s->lock); | 99 | spin_unlock(&s->lock); |
100 | } | 100 | } |
101 | spin_unlock(&est_lock); | 101 | spin_unlock(&est_lock); |
@@ -108,24 +108,22 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats) | |||
108 | 108 | ||
109 | INIT_LIST_HEAD(&est->list); | 109 | INIT_LIST_HEAD(&est->list); |
110 | 110 | ||
111 | est->last_conns = stats->conns; | 111 | est->last_conns = stats->ustats.conns; |
112 | est->cps = stats->cps<<10; | 112 | est->cps = stats->ustats.cps<<10; |
113 | 113 | ||
114 | est->last_inpkts = stats->inpkts; | 114 | est->last_inpkts = stats->ustats.inpkts; |
115 | est->inpps = stats->inpps<<10; | 115 | est->inpps = stats->ustats.inpps<<10; |
116 | 116 | ||
117 | est->last_outpkts = stats->outpkts; | 117 | est->last_outpkts = stats->ustats.outpkts; |
118 | est->outpps = stats->outpps<<10; | 118 | est->outpps = stats->ustats.outpps<<10; |
119 | 119 | ||
120 | est->last_inbytes = stats->inbytes; | 120 | est->last_inbytes = stats->ustats.inbytes; |
121 | est->inbps = stats->inbps<<5; | 121 | est->inbps = stats->ustats.inbps<<5; |
122 | 122 | ||
123 | est->last_outbytes = stats->outbytes; | 123 | est->last_outbytes = stats->ustats.outbytes; |
124 | est->outbps = stats->outbps<<5; | 124 | est->outbps = stats->ustats.outbps<<5; |
125 | 125 | ||
126 | spin_lock_bh(&est_lock); | 126 | spin_lock_bh(&est_lock); |
127 | if (list_empty(&est_list)) | ||
128 | mod_timer(&est_timer, jiffies + 2 * HZ); | ||
129 | list_add(&est->list, &est_list); | 127 | list_add(&est->list, &est_list); |
130 | spin_unlock_bh(&est_lock); | 128 | spin_unlock_bh(&est_lock); |
131 | } | 129 | } |
@@ -136,11 +134,6 @@ void ip_vs_kill_estimator(struct ip_vs_stats *stats) | |||
136 | 134 | ||
137 | spin_lock_bh(&est_lock); | 135 | spin_lock_bh(&est_lock); |
138 | list_del(&est->list); | 136 | list_del(&est->list); |
139 | while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) { | ||
140 | spin_unlock_bh(&est_lock); | ||
141 | cpu_relax(); | ||
142 | spin_lock_bh(&est_lock); | ||
143 | } | ||
144 | spin_unlock_bh(&est_lock); | 137 | spin_unlock_bh(&est_lock); |
145 | } | 138 | } |
146 | 139 | ||
@@ -160,3 +153,14 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats) | |||
160 | est->inbps = 0; | 153 | est->inbps = 0; |
161 | est->outbps = 0; | 154 | est->outbps = 0; |
162 | } | 155 | } |
156 | |||
157 | int __init ip_vs_estimator_init(void) | ||
158 | { | ||
159 | mod_timer(&est_timer, jiffies + 2 * HZ); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | void ip_vs_estimator_cleanup(void) | ||
164 | { | ||
165 | del_timer_sync(&est_timer); | ||
166 | } | ||
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c index c1c758e4f733..2e7dbd8b73a4 100644 --- a/net/ipv4/ipvs/ip_vs_ftp.c +++ b/net/ipv4/ipvs/ip_vs_ftp.c | |||
@@ -140,13 +140,21 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
140 | struct tcphdr *th; | 140 | struct tcphdr *th; |
141 | char *data, *data_limit; | 141 | char *data, *data_limit; |
142 | char *start, *end; | 142 | char *start, *end; |
143 | __be32 from; | 143 | union nf_inet_addr from; |
144 | __be16 port; | 144 | __be16 port; |
145 | struct ip_vs_conn *n_cp; | 145 | struct ip_vs_conn *n_cp; |
146 | char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ | 146 | char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ |
147 | unsigned buf_len; | 147 | unsigned buf_len; |
148 | int ret; | 148 | int ret; |
149 | 149 | ||
150 | #ifdef CONFIG_IP_VS_IPV6 | ||
151 | /* This application helper doesn't work with IPv6 yet, | ||
152 | * so turn this into a no-op for IPv6 packets | ||
153 | */ | ||
154 | if (cp->af == AF_INET6) | ||
155 | return 1; | ||
156 | #endif | ||
157 | |||
150 | *diff = 0; | 158 | *diff = 0; |
151 | 159 | ||
152 | /* Only useful for established sessions */ | 160 | /* Only useful for established sessions */ |
@@ -166,24 +174,25 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
166 | if (ip_vs_ftp_get_addrport(data, data_limit, | 174 | if (ip_vs_ftp_get_addrport(data, data_limit, |
167 | SERVER_STRING, | 175 | SERVER_STRING, |
168 | sizeof(SERVER_STRING)-1, ')', | 176 | sizeof(SERVER_STRING)-1, ')', |
169 | &from, &port, | 177 | &from.ip, &port, |
170 | &start, &end) != 1) | 178 | &start, &end) != 1) |
171 | return 1; | 179 | return 1; |
172 | 180 | ||
173 | IP_VS_DBG(7, "PASV response (%u.%u.%u.%u:%d) -> " | 181 | IP_VS_DBG(7, "PASV response (%u.%u.%u.%u:%d) -> " |
174 | "%u.%u.%u.%u:%d detected\n", | 182 | "%u.%u.%u.%u:%d detected\n", |
175 | NIPQUAD(from), ntohs(port), NIPQUAD(cp->caddr), 0); | 183 | NIPQUAD(from.ip), ntohs(port), |
184 | NIPQUAD(cp->caddr.ip), 0); | ||
176 | 185 | ||
177 | /* | 186 | /* |
178 | * Now update or create an connection entry for it | 187 | * Now update or create an connection entry for it |
179 | */ | 188 | */ |
180 | n_cp = ip_vs_conn_out_get(iph->protocol, from, port, | 189 | n_cp = ip_vs_conn_out_get(AF_INET, iph->protocol, &from, port, |
181 | cp->caddr, 0); | 190 | &cp->caddr, 0); |
182 | if (!n_cp) { | 191 | if (!n_cp) { |
183 | n_cp = ip_vs_conn_new(IPPROTO_TCP, | 192 | n_cp = ip_vs_conn_new(AF_INET, IPPROTO_TCP, |
184 | cp->caddr, 0, | 193 | &cp->caddr, 0, |
185 | cp->vaddr, port, | 194 | &cp->vaddr, port, |
186 | from, port, | 195 | &from, port, |
187 | IP_VS_CONN_F_NO_CPORT, | 196 | IP_VS_CONN_F_NO_CPORT, |
188 | cp->dest); | 197 | cp->dest); |
189 | if (!n_cp) | 198 | if (!n_cp) |
@@ -196,9 +205,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
196 | /* | 205 | /* |
197 | * Replace the old passive address with the new one | 206 | * Replace the old passive address with the new one |
198 | */ | 207 | */ |
199 | from = n_cp->vaddr; | 208 | from.ip = n_cp->vaddr.ip; |
200 | port = n_cp->vport; | 209 | port = n_cp->vport; |
201 | sprintf(buf,"%d,%d,%d,%d,%d,%d", NIPQUAD(from), | 210 | sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip), |
202 | (ntohs(port)>>8)&255, ntohs(port)&255); | 211 | (ntohs(port)>>8)&255, ntohs(port)&255); |
203 | buf_len = strlen(buf); | 212 | buf_len = strlen(buf); |
204 | 213 | ||
@@ -243,10 +252,18 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
243 | struct tcphdr *th; | 252 | struct tcphdr *th; |
244 | char *data, *data_start, *data_limit; | 253 | char *data, *data_start, *data_limit; |
245 | char *start, *end; | 254 | char *start, *end; |
246 | __be32 to; | 255 | union nf_inet_addr to; |
247 | __be16 port; | 256 | __be16 port; |
248 | struct ip_vs_conn *n_cp; | 257 | struct ip_vs_conn *n_cp; |
249 | 258 | ||
259 | #ifdef CONFIG_IP_VS_IPV6 | ||
260 | /* This application helper doesn't work with IPv6 yet, | ||
261 | * so turn this into a no-op for IPv6 packets | ||
262 | */ | ||
263 | if (cp->af == AF_INET6) | ||
264 | return 1; | ||
265 | #endif | ||
266 | |||
250 | /* no diff required for incoming packets */ | 267 | /* no diff required for incoming packets */ |
251 | *diff = 0; | 268 | *diff = 0; |
252 | 269 | ||
@@ -291,12 +308,12 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
291 | */ | 308 | */ |
292 | if (ip_vs_ftp_get_addrport(data_start, data_limit, | 309 | if (ip_vs_ftp_get_addrport(data_start, data_limit, |
293 | CLIENT_STRING, sizeof(CLIENT_STRING)-1, | 310 | CLIENT_STRING, sizeof(CLIENT_STRING)-1, |
294 | '\r', &to, &port, | 311 | '\r', &to.ip, &port, |
295 | &start, &end) != 1) | 312 | &start, &end) != 1) |
296 | return 1; | 313 | return 1; |
297 | 314 | ||
298 | IP_VS_DBG(7, "PORT %u.%u.%u.%u:%d detected\n", | 315 | IP_VS_DBG(7, "PORT %u.%u.%u.%u:%d detected\n", |
299 | NIPQUAD(to), ntohs(port)); | 316 | NIPQUAD(to.ip), ntohs(port)); |
300 | 317 | ||
301 | /* Passive mode off */ | 318 | /* Passive mode off */ |
302 | cp->app_data = NULL; | 319 | cp->app_data = NULL; |
@@ -306,16 +323,16 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, | |||
306 | */ | 323 | */ |
307 | IP_VS_DBG(7, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n", | 324 | IP_VS_DBG(7, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n", |
308 | ip_vs_proto_name(iph->protocol), | 325 | ip_vs_proto_name(iph->protocol), |
309 | NIPQUAD(to), ntohs(port), NIPQUAD(cp->vaddr), 0); | 326 | NIPQUAD(to.ip), ntohs(port), NIPQUAD(cp->vaddr.ip), 0); |
310 | 327 | ||
311 | n_cp = ip_vs_conn_in_get(iph->protocol, | 328 | n_cp = ip_vs_conn_in_get(AF_INET, iph->protocol, |
312 | to, port, | 329 | &to, port, |
313 | cp->vaddr, htons(ntohs(cp->vport)-1)); | 330 | &cp->vaddr, htons(ntohs(cp->vport)-1)); |
314 | if (!n_cp) { | 331 | if (!n_cp) { |
315 | n_cp = ip_vs_conn_new(IPPROTO_TCP, | 332 | n_cp = ip_vs_conn_new(AF_INET, IPPROTO_TCP, |
316 | to, port, | 333 | &to, port, |
317 | cp->vaddr, htons(ntohs(cp->vport)-1), | 334 | &cp->vaddr, htons(ntohs(cp->vport)-1), |
318 | cp->daddr, htons(ntohs(cp->dport)-1), | 335 | &cp->daddr, htons(ntohs(cp->dport)-1), |
319 | 0, | 336 | 0, |
320 | cp->dest); | 337 | cp->dest); |
321 | if (!n_cp) | 338 | if (!n_cp) |
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c index 7a6a319f544a..6ecef3518cac 100644 --- a/net/ipv4/ipvs/ip_vs_lblc.c +++ b/net/ipv4/ipvs/ip_vs_lblc.c | |||
@@ -96,7 +96,6 @@ struct ip_vs_lblc_entry { | |||
96 | * IPVS lblc hash table | 96 | * IPVS lblc hash table |
97 | */ | 97 | */ |
98 | struct ip_vs_lblc_table { | 98 | struct ip_vs_lblc_table { |
99 | rwlock_t lock; /* lock for this table */ | ||
100 | struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ | 99 | struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ |
101 | atomic_t entries; /* number of entries */ | 100 | atomic_t entries; /* number of entries */ |
102 | int max_size; /* maximum size of entries */ | 101 | int max_size; /* maximum size of entries */ |
@@ -123,31 +122,6 @@ static ctl_table vs_vars_table[] = { | |||
123 | 122 | ||
124 | static struct ctl_table_header * sysctl_header; | 123 | static struct ctl_table_header * sysctl_header; |
125 | 124 | ||
126 | /* | ||
127 | * new/free a ip_vs_lblc_entry, which is a mapping of a destionation | ||
128 | * IP address to a server. | ||
129 | */ | ||
130 | static inline struct ip_vs_lblc_entry * | ||
131 | ip_vs_lblc_new(__be32 daddr, struct ip_vs_dest *dest) | ||
132 | { | ||
133 | struct ip_vs_lblc_entry *en; | ||
134 | |||
135 | en = kmalloc(sizeof(struct ip_vs_lblc_entry), GFP_ATOMIC); | ||
136 | if (en == NULL) { | ||
137 | IP_VS_ERR("ip_vs_lblc_new(): no memory\n"); | ||
138 | return NULL; | ||
139 | } | ||
140 | |||
141 | INIT_LIST_HEAD(&en->list); | ||
142 | en->addr = daddr; | ||
143 | |||
144 | atomic_inc(&dest->refcnt); | ||
145 | en->dest = dest; | ||
146 | |||
147 | return en; | ||
148 | } | ||
149 | |||
150 | |||
151 | static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) | 125 | static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) |
152 | { | 126 | { |
153 | list_del(&en->list); | 127 | list_del(&en->list); |
@@ -173,55 +147,66 @@ static inline unsigned ip_vs_lblc_hashkey(__be32 addr) | |||
173 | * Hash an entry in the ip_vs_lblc_table. | 147 | * Hash an entry in the ip_vs_lblc_table. |
174 | * returns bool success. | 148 | * returns bool success. |
175 | */ | 149 | */ |
176 | static int | 150 | static void |
177 | ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) | 151 | ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) |
178 | { | 152 | { |
179 | unsigned hash; | 153 | unsigned hash = ip_vs_lblc_hashkey(en->addr); |
180 | |||
181 | if (!list_empty(&en->list)) { | ||
182 | IP_VS_ERR("ip_vs_lblc_hash(): request for already hashed, " | ||
183 | "called from %p\n", __builtin_return_address(0)); | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * Hash by destination IP address | ||
189 | */ | ||
190 | hash = ip_vs_lblc_hashkey(en->addr); | ||
191 | 154 | ||
192 | write_lock(&tbl->lock); | ||
193 | list_add(&en->list, &tbl->bucket[hash]); | 155 | list_add(&en->list, &tbl->bucket[hash]); |
194 | atomic_inc(&tbl->entries); | 156 | atomic_inc(&tbl->entries); |
195 | write_unlock(&tbl->lock); | ||
196 | |||
197 | return 1; | ||
198 | } | 157 | } |
199 | 158 | ||
200 | 159 | ||
201 | /* | 160 | /* |
202 | * Get ip_vs_lblc_entry associated with supplied parameters. | 161 | * Get ip_vs_lblc_entry associated with supplied parameters. Called under read |
162 | * lock | ||
203 | */ | 163 | */ |
204 | static inline struct ip_vs_lblc_entry * | 164 | static inline struct ip_vs_lblc_entry * |
205 | ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) | 165 | ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) |
206 | { | 166 | { |
207 | unsigned hash; | 167 | unsigned hash = ip_vs_lblc_hashkey(addr); |
208 | struct ip_vs_lblc_entry *en; | 168 | struct ip_vs_lblc_entry *en; |
209 | 169 | ||
210 | hash = ip_vs_lblc_hashkey(addr); | 170 | list_for_each_entry(en, &tbl->bucket[hash], list) |
171 | if (en->addr == addr) | ||
172 | return en; | ||
211 | 173 | ||
212 | read_lock(&tbl->lock); | 174 | return NULL; |
175 | } | ||
213 | 176 | ||
214 | list_for_each_entry(en, &tbl->bucket[hash], list) { | 177 | |
215 | if (en->addr == addr) { | 178 | /* |
216 | /* HIT */ | 179 | * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP |
217 | read_unlock(&tbl->lock); | 180 | * address to a server. Called under write lock. |
218 | return en; | 181 | */ |
182 | static inline struct ip_vs_lblc_entry * | ||
183 | ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, __be32 daddr, | ||
184 | struct ip_vs_dest *dest) | ||
185 | { | ||
186 | struct ip_vs_lblc_entry *en; | ||
187 | |||
188 | en = ip_vs_lblc_get(tbl, daddr); | ||
189 | if (!en) { | ||
190 | en = kmalloc(sizeof(*en), GFP_ATOMIC); | ||
191 | if (!en) { | ||
192 | IP_VS_ERR("ip_vs_lblc_new(): no memory\n"); | ||
193 | return NULL; | ||
219 | } | 194 | } |
220 | } | ||
221 | 195 | ||
222 | read_unlock(&tbl->lock); | 196 | en->addr = daddr; |
197 | en->lastuse = jiffies; | ||
223 | 198 | ||
224 | return NULL; | 199 | atomic_inc(&dest->refcnt); |
200 | en->dest = dest; | ||
201 | |||
202 | ip_vs_lblc_hash(tbl, en); | ||
203 | } else if (en->dest != dest) { | ||
204 | atomic_dec(&en->dest->refcnt); | ||
205 | atomic_inc(&dest->refcnt); | ||
206 | en->dest = dest; | ||
207 | } | ||
208 | |||
209 | return en; | ||
225 | } | 210 | } |
226 | 211 | ||
227 | 212 | ||
@@ -230,30 +215,29 @@ ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) | |||
230 | */ | 215 | */ |
231 | static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) | 216 | static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) |
232 | { | 217 | { |
233 | int i; | ||
234 | struct ip_vs_lblc_entry *en, *nxt; | 218 | struct ip_vs_lblc_entry *en, *nxt; |
219 | int i; | ||
235 | 220 | ||
236 | for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { | 221 | for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { |
237 | write_lock(&tbl->lock); | ||
238 | list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { | 222 | list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { |
239 | ip_vs_lblc_free(en); | 223 | ip_vs_lblc_free(en); |
240 | atomic_dec(&tbl->entries); | 224 | atomic_dec(&tbl->entries); |
241 | } | 225 | } |
242 | write_unlock(&tbl->lock); | ||
243 | } | 226 | } |
244 | } | 227 | } |
245 | 228 | ||
246 | 229 | ||
247 | static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) | 230 | static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc) |
248 | { | 231 | { |
232 | struct ip_vs_lblc_table *tbl = svc->sched_data; | ||
233 | struct ip_vs_lblc_entry *en, *nxt; | ||
249 | unsigned long now = jiffies; | 234 | unsigned long now = jiffies; |
250 | int i, j; | 235 | int i, j; |
251 | struct ip_vs_lblc_entry *en, *nxt; | ||
252 | 236 | ||
253 | for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { | 237 | for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { |
254 | j = (j + 1) & IP_VS_LBLC_TAB_MASK; | 238 | j = (j + 1) & IP_VS_LBLC_TAB_MASK; |
255 | 239 | ||
256 | write_lock(&tbl->lock); | 240 | write_lock(&svc->sched_lock); |
257 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { | 241 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { |
258 | if (time_before(now, | 242 | if (time_before(now, |
259 | en->lastuse + sysctl_ip_vs_lblc_expiration)) | 243 | en->lastuse + sysctl_ip_vs_lblc_expiration)) |
@@ -262,7 +246,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) | |||
262 | ip_vs_lblc_free(en); | 246 | ip_vs_lblc_free(en); |
263 | atomic_dec(&tbl->entries); | 247 | atomic_dec(&tbl->entries); |
264 | } | 248 | } |
265 | write_unlock(&tbl->lock); | 249 | write_unlock(&svc->sched_lock); |
266 | } | 250 | } |
267 | tbl->rover = j; | 251 | tbl->rover = j; |
268 | } | 252 | } |
@@ -281,17 +265,16 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) | |||
281 | */ | 265 | */ |
282 | static void ip_vs_lblc_check_expire(unsigned long data) | 266 | static void ip_vs_lblc_check_expire(unsigned long data) |
283 | { | 267 | { |
284 | struct ip_vs_lblc_table *tbl; | 268 | struct ip_vs_service *svc = (struct ip_vs_service *) data; |
269 | struct ip_vs_lblc_table *tbl = svc->sched_data; | ||
285 | unsigned long now = jiffies; | 270 | unsigned long now = jiffies; |
286 | int goal; | 271 | int goal; |
287 | int i, j; | 272 | int i, j; |
288 | struct ip_vs_lblc_entry *en, *nxt; | 273 | struct ip_vs_lblc_entry *en, *nxt; |
289 | 274 | ||
290 | tbl = (struct ip_vs_lblc_table *)data; | ||
291 | |||
292 | if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { | 275 | if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { |
293 | /* do full expiration check */ | 276 | /* do full expiration check */ |
294 | ip_vs_lblc_full_check(tbl); | 277 | ip_vs_lblc_full_check(svc); |
295 | tbl->counter = 1; | 278 | tbl->counter = 1; |
296 | goto out; | 279 | goto out; |
297 | } | 280 | } |
@@ -308,7 +291,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) | |||
308 | for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { | 291 | for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { |
309 | j = (j + 1) & IP_VS_LBLC_TAB_MASK; | 292 | j = (j + 1) & IP_VS_LBLC_TAB_MASK; |
310 | 293 | ||
311 | write_lock(&tbl->lock); | 294 | write_lock(&svc->sched_lock); |
312 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { | 295 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { |
313 | if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) | 296 | if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) |
314 | continue; | 297 | continue; |
@@ -317,7 +300,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) | |||
317 | atomic_dec(&tbl->entries); | 300 | atomic_dec(&tbl->entries); |
318 | goal--; | 301 | goal--; |
319 | } | 302 | } |
320 | write_unlock(&tbl->lock); | 303 | write_unlock(&svc->sched_lock); |
321 | if (goal <= 0) | 304 | if (goal <= 0) |
322 | break; | 305 | break; |
323 | } | 306 | } |
@@ -336,15 +319,14 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) | |||
336 | /* | 319 | /* |
337 | * Allocate the ip_vs_lblc_table for this service | 320 | * Allocate the ip_vs_lblc_table for this service |
338 | */ | 321 | */ |
339 | tbl = kmalloc(sizeof(struct ip_vs_lblc_table), GFP_ATOMIC); | 322 | tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); |
340 | if (tbl == NULL) { | 323 | if (tbl == NULL) { |
341 | IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n"); | 324 | IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n"); |
342 | return -ENOMEM; | 325 | return -ENOMEM; |
343 | } | 326 | } |
344 | svc->sched_data = tbl; | 327 | svc->sched_data = tbl; |
345 | IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " | 328 | IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " |
346 | "current service\n", | 329 | "current service\n", sizeof(*tbl)); |
347 | sizeof(struct ip_vs_lblc_table)); | ||
348 | 330 | ||
349 | /* | 331 | /* |
350 | * Initialize the hash buckets | 332 | * Initialize the hash buckets |
@@ -352,7 +334,6 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) | |||
352 | for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { | 334 | for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { |
353 | INIT_LIST_HEAD(&tbl->bucket[i]); | 335 | INIT_LIST_HEAD(&tbl->bucket[i]); |
354 | } | 336 | } |
355 | rwlock_init(&tbl->lock); | ||
356 | tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; | 337 | tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; |
357 | tbl->rover = 0; | 338 | tbl->rover = 0; |
358 | tbl->counter = 1; | 339 | tbl->counter = 1; |
@@ -361,9 +342,8 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) | |||
361 | * Hook periodic timer for garbage collection | 342 | * Hook periodic timer for garbage collection |
362 | */ | 343 | */ |
363 | setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire, | 344 | setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire, |
364 | (unsigned long)tbl); | 345 | (unsigned long)svc); |
365 | tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; | 346 | mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); |
366 | add_timer(&tbl->periodic_timer); | ||
367 | 347 | ||
368 | return 0; | 348 | return 0; |
369 | } | 349 | } |
@@ -380,22 +360,16 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc) | |||
380 | ip_vs_lblc_flush(tbl); | 360 | ip_vs_lblc_flush(tbl); |
381 | 361 | ||
382 | /* release the table itself */ | 362 | /* release the table itself */ |
383 | kfree(svc->sched_data); | 363 | kfree(tbl); |
384 | IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", | 364 | IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", |
385 | sizeof(struct ip_vs_lblc_table)); | 365 | sizeof(*tbl)); |
386 | 366 | ||
387 | return 0; | 367 | return 0; |
388 | } | 368 | } |
389 | 369 | ||
390 | 370 | ||
391 | static int ip_vs_lblc_update_svc(struct ip_vs_service *svc) | ||
392 | { | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | |||
397 | static inline struct ip_vs_dest * | 371 | static inline struct ip_vs_dest * |
398 | __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) | 372 | __ip_vs_lblc_schedule(struct ip_vs_service *svc, struct iphdr *iph) |
399 | { | 373 | { |
400 | struct ip_vs_dest *dest, *least; | 374 | struct ip_vs_dest *dest, *least; |
401 | int loh, doh; | 375 | int loh, doh; |
@@ -448,7 +422,7 @@ __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) | |||
448 | 422 | ||
449 | IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d " | 423 | IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d " |
450 | "activeconns %d refcnt %d weight %d overhead %d\n", | 424 | "activeconns %d refcnt %d weight %d overhead %d\n", |
451 | NIPQUAD(least->addr), ntohs(least->port), | 425 | NIPQUAD(least->addr.ip), ntohs(least->port), |
452 | atomic_read(&least->activeconns), | 426 | atomic_read(&least->activeconns), |
453 | atomic_read(&least->refcnt), | 427 | atomic_read(&least->refcnt), |
454 | atomic_read(&least->weight), loh); | 428 | atomic_read(&least->weight), loh); |
@@ -484,47 +458,55 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) | |||
484 | static struct ip_vs_dest * | 458 | static struct ip_vs_dest * |
485 | ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | 459 | ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) |
486 | { | 460 | { |
487 | struct ip_vs_dest *dest; | 461 | struct ip_vs_lblc_table *tbl = svc->sched_data; |
488 | struct ip_vs_lblc_table *tbl; | ||
489 | struct ip_vs_lblc_entry *en; | ||
490 | struct iphdr *iph = ip_hdr(skb); | 462 | struct iphdr *iph = ip_hdr(skb); |
463 | struct ip_vs_dest *dest = NULL; | ||
464 | struct ip_vs_lblc_entry *en; | ||
491 | 465 | ||
492 | IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); | 466 | IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); |
493 | 467 | ||
494 | tbl = (struct ip_vs_lblc_table *)svc->sched_data; | 468 | /* First look in our cache */ |
469 | read_lock(&svc->sched_lock); | ||
495 | en = ip_vs_lblc_get(tbl, iph->daddr); | 470 | en = ip_vs_lblc_get(tbl, iph->daddr); |
496 | if (en == NULL) { | 471 | if (en) { |
497 | dest = __ip_vs_wlc_schedule(svc, iph); | 472 | /* We only hold a read lock, but this is atomic */ |
498 | if (dest == NULL) { | 473 | en->lastuse = jiffies; |
499 | IP_VS_DBG(1, "no destination available\n"); | 474 | |
500 | return NULL; | 475 | /* |
501 | } | 476 | * If the destination is not available, i.e. it's in the trash, |
502 | en = ip_vs_lblc_new(iph->daddr, dest); | 477 | * we must ignore it, as it may be removed from under our feet, |
503 | if (en == NULL) { | 478 | * if someone drops our reference count. Our caller only makes |
504 | return NULL; | 479 | * sure that destinations, that are not in the trash, are not |
505 | } | 480 | * moved to the trash, while we are scheduling. But anyone can |
506 | ip_vs_lblc_hash(tbl, en); | 481 | * free up entries from the trash at any time. |
507 | } else { | 482 | */ |
508 | dest = en->dest; | 483 | |
509 | if (!(dest->flags & IP_VS_DEST_F_AVAILABLE) | 484 | if (en->dest->flags & IP_VS_DEST_F_AVAILABLE) |
510 | || atomic_read(&dest->weight) <= 0 | 485 | dest = en->dest; |
511 | || is_overloaded(dest, svc)) { | 486 | } |
512 | dest = __ip_vs_wlc_schedule(svc, iph); | 487 | read_unlock(&svc->sched_lock); |
513 | if (dest == NULL) { | 488 | |
514 | IP_VS_DBG(1, "no destination available\n"); | 489 | /* If the destination has a weight and is not overloaded, use it */ |
515 | return NULL; | 490 | if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc)) |
516 | } | 491 | goto out; |
517 | atomic_dec(&en->dest->refcnt); | 492 | |
518 | atomic_inc(&dest->refcnt); | 493 | /* No cache entry or it is invalid, time to schedule */ |
519 | en->dest = dest; | 494 | dest = __ip_vs_lblc_schedule(svc, iph); |
520 | } | 495 | if (!dest) { |
496 | IP_VS_DBG(1, "no destination available\n"); | ||
497 | return NULL; | ||
521 | } | 498 | } |
522 | en->lastuse = jiffies; | ||
523 | 499 | ||
500 | /* If we fail to create a cache entry, we'll just use the valid dest */ | ||
501 | write_lock(&svc->sched_lock); | ||
502 | ip_vs_lblc_new(tbl, iph->daddr, dest); | ||
503 | write_unlock(&svc->sched_lock); | ||
504 | |||
505 | out: | ||
524 | IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u " | 506 | IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u " |
525 | "--> server %u.%u.%u.%u:%d\n", | 507 | "--> server %u.%u.%u.%u:%d\n", |
526 | NIPQUAD(en->addr), | 508 | NIPQUAD(iph->daddr), |
527 | NIPQUAD(dest->addr), | 509 | NIPQUAD(dest->addr.ip), |
528 | ntohs(dest->port)); | 510 | ntohs(dest->port)); |
529 | 511 | ||
530 | return dest; | 512 | return dest; |
@@ -540,9 +522,11 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler = | |||
540 | .refcnt = ATOMIC_INIT(0), | 522 | .refcnt = ATOMIC_INIT(0), |
541 | .module = THIS_MODULE, | 523 | .module = THIS_MODULE, |
542 | .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list), | 524 | .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list), |
525 | #ifdef CONFIG_IP_VS_IPV6 | ||
526 | .supports_ipv6 = 0, | ||
527 | #endif | ||
543 | .init_service = ip_vs_lblc_init_svc, | 528 | .init_service = ip_vs_lblc_init_svc, |
544 | .done_service = ip_vs_lblc_done_svc, | 529 | .done_service = ip_vs_lblc_done_svc, |
545 | .update_service = ip_vs_lblc_update_svc, | ||
546 | .schedule = ip_vs_lblc_schedule, | 530 | .schedule = ip_vs_lblc_schedule, |
547 | }; | 531 | }; |
548 | 532 | ||
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c index c234e73968a6..1f75ea83bcf8 100644 --- a/net/ipv4/ipvs/ip_vs_lblcr.c +++ b/net/ipv4/ipvs/ip_vs_lblcr.c | |||
@@ -106,7 +106,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) | |||
106 | return NULL; | 106 | return NULL; |
107 | } | 107 | } |
108 | 108 | ||
109 | e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC); | 109 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
110 | if (e == NULL) { | 110 | if (e == NULL) { |
111 | IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n"); | 111 | IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n"); |
112 | return NULL; | 112 | return NULL; |
@@ -116,11 +116,9 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) | |||
116 | e->dest = dest; | 116 | e->dest = dest; |
117 | 117 | ||
118 | /* link it to the list */ | 118 | /* link it to the list */ |
119 | write_lock(&set->lock); | ||
120 | e->next = set->list; | 119 | e->next = set->list; |
121 | set->list = e; | 120 | set->list = e; |
122 | atomic_inc(&set->size); | 121 | atomic_inc(&set->size); |
123 | write_unlock(&set->lock); | ||
124 | 122 | ||
125 | set->lastmod = jiffies; | 123 | set->lastmod = jiffies; |
126 | return e; | 124 | return e; |
@@ -131,7 +129,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) | |||
131 | { | 129 | { |
132 | struct ip_vs_dest_list *e, **ep; | 130 | struct ip_vs_dest_list *e, **ep; |
133 | 131 | ||
134 | write_lock(&set->lock); | ||
135 | for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { | 132 | for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { |
136 | if (e->dest == dest) { | 133 | if (e->dest == dest) { |
137 | /* HIT */ | 134 | /* HIT */ |
@@ -144,7 +141,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) | |||
144 | } | 141 | } |
145 | ep = &e->next; | 142 | ep = &e->next; |
146 | } | 143 | } |
147 | write_unlock(&set->lock); | ||
148 | } | 144 | } |
149 | 145 | ||
150 | static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) | 146 | static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) |
@@ -174,7 +170,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
174 | if (set == NULL) | 170 | if (set == NULL) |
175 | return NULL; | 171 | return NULL; |
176 | 172 | ||
177 | read_lock(&set->lock); | ||
178 | /* select the first destination server, whose weight > 0 */ | 173 | /* select the first destination server, whose weight > 0 */ |
179 | for (e=set->list; e!=NULL; e=e->next) { | 174 | for (e=set->list; e!=NULL; e=e->next) { |
180 | least = e->dest; | 175 | least = e->dest; |
@@ -188,7 +183,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
188 | goto nextstage; | 183 | goto nextstage; |
189 | } | 184 | } |
190 | } | 185 | } |
191 | read_unlock(&set->lock); | ||
192 | return NULL; | 186 | return NULL; |
193 | 187 | ||
194 | /* find the destination with the weighted least load */ | 188 | /* find the destination with the weighted least load */ |
@@ -207,11 +201,10 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
207 | loh = doh; | 201 | loh = doh; |
208 | } | 202 | } |
209 | } | 203 | } |
210 | read_unlock(&set->lock); | ||
211 | 204 | ||
212 | IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d " | 205 | IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d " |
213 | "activeconns %d refcnt %d weight %d overhead %d\n", | 206 | "activeconns %d refcnt %d weight %d overhead %d\n", |
214 | NIPQUAD(least->addr), ntohs(least->port), | 207 | NIPQUAD(least->addr.ip), ntohs(least->port), |
215 | atomic_read(&least->activeconns), | 208 | atomic_read(&least->activeconns), |
216 | atomic_read(&least->refcnt), | 209 | atomic_read(&least->refcnt), |
217 | atomic_read(&least->weight), loh); | 210 | atomic_read(&least->weight), loh); |
@@ -229,7 +222,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
229 | if (set == NULL) | 222 | if (set == NULL) |
230 | return NULL; | 223 | return NULL; |
231 | 224 | ||
232 | read_lock(&set->lock); | ||
233 | /* select the first destination server, whose weight > 0 */ | 225 | /* select the first destination server, whose weight > 0 */ |
234 | for (e=set->list; e!=NULL; e=e->next) { | 226 | for (e=set->list; e!=NULL; e=e->next) { |
235 | most = e->dest; | 227 | most = e->dest; |
@@ -239,7 +231,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
239 | goto nextstage; | 231 | goto nextstage; |
240 | } | 232 | } |
241 | } | 233 | } |
242 | read_unlock(&set->lock); | ||
243 | return NULL; | 234 | return NULL; |
244 | 235 | ||
245 | /* find the destination with the weighted most load */ | 236 | /* find the destination with the weighted most load */ |
@@ -256,11 +247,10 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
256 | moh = doh; | 247 | moh = doh; |
257 | } | 248 | } |
258 | } | 249 | } |
259 | read_unlock(&set->lock); | ||
260 | 250 | ||
261 | IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d " | 251 | IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d " |
262 | "activeconns %d refcnt %d weight %d overhead %d\n", | 252 | "activeconns %d refcnt %d weight %d overhead %d\n", |
263 | NIPQUAD(most->addr), ntohs(most->port), | 253 | NIPQUAD(most->addr.ip), ntohs(most->port), |
264 | atomic_read(&most->activeconns), | 254 | atomic_read(&most->activeconns), |
265 | atomic_read(&most->refcnt), | 255 | atomic_read(&most->refcnt), |
266 | atomic_read(&most->weight), moh); | 256 | atomic_read(&most->weight), moh); |
@@ -284,7 +274,6 @@ struct ip_vs_lblcr_entry { | |||
284 | * IPVS lblcr hash table | 274 | * IPVS lblcr hash table |
285 | */ | 275 | */ |
286 | struct ip_vs_lblcr_table { | 276 | struct ip_vs_lblcr_table { |
287 | rwlock_t lock; /* lock for this table */ | ||
288 | struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ | 277 | struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ |
289 | atomic_t entries; /* number of entries */ | 278 | atomic_t entries; /* number of entries */ |
290 | int max_size; /* maximum size of entries */ | 279 | int max_size; /* maximum size of entries */ |
@@ -311,32 +300,6 @@ static ctl_table vs_vars_table[] = { | |||
311 | 300 | ||
312 | static struct ctl_table_header * sysctl_header; | 301 | static struct ctl_table_header * sysctl_header; |
313 | 302 | ||
314 | /* | ||
315 | * new/free a ip_vs_lblcr_entry, which is a mapping of a destination | ||
316 | * IP address to a server. | ||
317 | */ | ||
318 | static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr) | ||
319 | { | ||
320 | struct ip_vs_lblcr_entry *en; | ||
321 | |||
322 | en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC); | ||
323 | if (en == NULL) { | ||
324 | IP_VS_ERR("ip_vs_lblcr_new(): no memory\n"); | ||
325 | return NULL; | ||
326 | } | ||
327 | |||
328 | INIT_LIST_HEAD(&en->list); | ||
329 | en->addr = daddr; | ||
330 | |||
331 | /* initilize its dest set */ | ||
332 | atomic_set(&(en->set.size), 0); | ||
333 | en->set.list = NULL; | ||
334 | rwlock_init(&en->set.lock); | ||
335 | |||
336 | return en; | ||
337 | } | ||
338 | |||
339 | |||
340 | static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) | 303 | static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) |
341 | { | 304 | { |
342 | list_del(&en->list); | 305 | list_del(&en->list); |
@@ -358,55 +321,68 @@ static inline unsigned ip_vs_lblcr_hashkey(__be32 addr) | |||
358 | * Hash an entry in the ip_vs_lblcr_table. | 321 | * Hash an entry in the ip_vs_lblcr_table. |
359 | * returns bool success. | 322 | * returns bool success. |
360 | */ | 323 | */ |
361 | static int | 324 | static void |
362 | ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) | 325 | ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) |
363 | { | 326 | { |
364 | unsigned hash; | 327 | unsigned hash = ip_vs_lblcr_hashkey(en->addr); |
365 | 328 | ||
366 | if (!list_empty(&en->list)) { | ||
367 | IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, " | ||
368 | "called from %p\n", __builtin_return_address(0)); | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Hash by destination IP address | ||
374 | */ | ||
375 | hash = ip_vs_lblcr_hashkey(en->addr); | ||
376 | |||
377 | write_lock(&tbl->lock); | ||
378 | list_add(&en->list, &tbl->bucket[hash]); | 329 | list_add(&en->list, &tbl->bucket[hash]); |
379 | atomic_inc(&tbl->entries); | 330 | atomic_inc(&tbl->entries); |
380 | write_unlock(&tbl->lock); | ||
381 | |||
382 | return 1; | ||
383 | } | 331 | } |
384 | 332 | ||
385 | 333 | ||
386 | /* | 334 | /* |
387 | * Get ip_vs_lblcr_entry associated with supplied parameters. | 335 | * Get ip_vs_lblcr_entry associated with supplied parameters. Called under |
336 | * read lock. | ||
388 | */ | 337 | */ |
389 | static inline struct ip_vs_lblcr_entry * | 338 | static inline struct ip_vs_lblcr_entry * |
390 | ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr) | 339 | ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr) |
391 | { | 340 | { |
392 | unsigned hash; | 341 | unsigned hash = ip_vs_lblcr_hashkey(addr); |
393 | struct ip_vs_lblcr_entry *en; | 342 | struct ip_vs_lblcr_entry *en; |
394 | 343 | ||
395 | hash = ip_vs_lblcr_hashkey(addr); | 344 | list_for_each_entry(en, &tbl->bucket[hash], list) |
345 | if (en->addr == addr) | ||
346 | return en; | ||
396 | 347 | ||
397 | read_lock(&tbl->lock); | 348 | return NULL; |
349 | } | ||
398 | 350 | ||
399 | list_for_each_entry(en, &tbl->bucket[hash], list) { | 351 | |
400 | if (en->addr == addr) { | 352 | /* |
401 | /* HIT */ | 353 | * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination |
402 | read_unlock(&tbl->lock); | 354 | * IP address to a server. Called under write lock. |
403 | return en; | 355 | */ |
356 | static inline struct ip_vs_lblcr_entry * | ||
357 | ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, __be32 daddr, | ||
358 | struct ip_vs_dest *dest) | ||
359 | { | ||
360 | struct ip_vs_lblcr_entry *en; | ||
361 | |||
362 | en = ip_vs_lblcr_get(tbl, daddr); | ||
363 | if (!en) { | ||
364 | en = kmalloc(sizeof(*en), GFP_ATOMIC); | ||
365 | if (!en) { | ||
366 | IP_VS_ERR("ip_vs_lblcr_new(): no memory\n"); | ||
367 | return NULL; | ||
404 | } | 368 | } |
369 | |||
370 | en->addr = daddr; | ||
371 | en->lastuse = jiffies; | ||
372 | |||
373 | /* initilize its dest set */ | ||
374 | atomic_set(&(en->set.size), 0); | ||
375 | en->set.list = NULL; | ||
376 | rwlock_init(&en->set.lock); | ||
377 | |||
378 | ip_vs_lblcr_hash(tbl, en); | ||
405 | } | 379 | } |
406 | 380 | ||
407 | read_unlock(&tbl->lock); | 381 | write_lock(&en->set.lock); |
382 | ip_vs_dest_set_insert(&en->set, dest); | ||
383 | write_unlock(&en->set.lock); | ||
408 | 384 | ||
409 | return NULL; | 385 | return en; |
410 | } | 386 | } |
411 | 387 | ||
412 | 388 | ||
@@ -418,19 +394,18 @@ static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl) | |||
418 | int i; | 394 | int i; |
419 | struct ip_vs_lblcr_entry *en, *nxt; | 395 | struct ip_vs_lblcr_entry *en, *nxt; |
420 | 396 | ||
397 | /* No locking required, only called during cleanup. */ | ||
421 | for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { | 398 | for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { |
422 | write_lock(&tbl->lock); | ||
423 | list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { | 399 | list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { |
424 | ip_vs_lblcr_free(en); | 400 | ip_vs_lblcr_free(en); |
425 | atomic_dec(&tbl->entries); | ||
426 | } | 401 | } |
427 | write_unlock(&tbl->lock); | ||
428 | } | 402 | } |
429 | } | 403 | } |
430 | 404 | ||
431 | 405 | ||
432 | static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) | 406 | static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc) |
433 | { | 407 | { |
408 | struct ip_vs_lblcr_table *tbl = svc->sched_data; | ||
434 | unsigned long now = jiffies; | 409 | unsigned long now = jiffies; |
435 | int i, j; | 410 | int i, j; |
436 | struct ip_vs_lblcr_entry *en, *nxt; | 411 | struct ip_vs_lblcr_entry *en, *nxt; |
@@ -438,7 +413,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) | |||
438 | for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { | 413 | for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { |
439 | j = (j + 1) & IP_VS_LBLCR_TAB_MASK; | 414 | j = (j + 1) & IP_VS_LBLCR_TAB_MASK; |
440 | 415 | ||
441 | write_lock(&tbl->lock); | 416 | write_lock(&svc->sched_lock); |
442 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { | 417 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { |
443 | if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration, | 418 | if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration, |
444 | now)) | 419 | now)) |
@@ -447,7 +422,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) | |||
447 | ip_vs_lblcr_free(en); | 422 | ip_vs_lblcr_free(en); |
448 | atomic_dec(&tbl->entries); | 423 | atomic_dec(&tbl->entries); |
449 | } | 424 | } |
450 | write_unlock(&tbl->lock); | 425 | write_unlock(&svc->sched_lock); |
451 | } | 426 | } |
452 | tbl->rover = j; | 427 | tbl->rover = j; |
453 | } | 428 | } |
@@ -466,17 +441,16 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) | |||
466 | */ | 441 | */ |
467 | static void ip_vs_lblcr_check_expire(unsigned long data) | 442 | static void ip_vs_lblcr_check_expire(unsigned long data) |
468 | { | 443 | { |
469 | struct ip_vs_lblcr_table *tbl; | 444 | struct ip_vs_service *svc = (struct ip_vs_service *) data; |
445 | struct ip_vs_lblcr_table *tbl = svc->sched_data; | ||
470 | unsigned long now = jiffies; | 446 | unsigned long now = jiffies; |
471 | int goal; | 447 | int goal; |
472 | int i, j; | 448 | int i, j; |
473 | struct ip_vs_lblcr_entry *en, *nxt; | 449 | struct ip_vs_lblcr_entry *en, *nxt; |
474 | 450 | ||
475 | tbl = (struct ip_vs_lblcr_table *)data; | ||
476 | |||
477 | if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { | 451 | if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { |
478 | /* do full expiration check */ | 452 | /* do full expiration check */ |
479 | ip_vs_lblcr_full_check(tbl); | 453 | ip_vs_lblcr_full_check(svc); |
480 | tbl->counter = 1; | 454 | tbl->counter = 1; |
481 | goto out; | 455 | goto out; |
482 | } | 456 | } |
@@ -493,7 +467,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data) | |||
493 | for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { | 467 | for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { |
494 | j = (j + 1) & IP_VS_LBLCR_TAB_MASK; | 468 | j = (j + 1) & IP_VS_LBLCR_TAB_MASK; |
495 | 469 | ||
496 | write_lock(&tbl->lock); | 470 | write_lock(&svc->sched_lock); |
497 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { | 471 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { |
498 | if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) | 472 | if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) |
499 | continue; | 473 | continue; |
@@ -502,7 +476,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data) | |||
502 | atomic_dec(&tbl->entries); | 476 | atomic_dec(&tbl->entries); |
503 | goal--; | 477 | goal--; |
504 | } | 478 | } |
505 | write_unlock(&tbl->lock); | 479 | write_unlock(&svc->sched_lock); |
506 | if (goal <= 0) | 480 | if (goal <= 0) |
507 | break; | 481 | break; |
508 | } | 482 | } |
@@ -520,15 +494,14 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) | |||
520 | /* | 494 | /* |
521 | * Allocate the ip_vs_lblcr_table for this service | 495 | * Allocate the ip_vs_lblcr_table for this service |
522 | */ | 496 | */ |
523 | tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC); | 497 | tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); |
524 | if (tbl == NULL) { | 498 | if (tbl == NULL) { |
525 | IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n"); | 499 | IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n"); |
526 | return -ENOMEM; | 500 | return -ENOMEM; |
527 | } | 501 | } |
528 | svc->sched_data = tbl; | 502 | svc->sched_data = tbl; |
529 | IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " | 503 | IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " |
530 | "current service\n", | 504 | "current service\n", sizeof(*tbl)); |
531 | sizeof(struct ip_vs_lblcr_table)); | ||
532 | 505 | ||
533 | /* | 506 | /* |
534 | * Initialize the hash buckets | 507 | * Initialize the hash buckets |
@@ -536,7 +509,6 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) | |||
536 | for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { | 509 | for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { |
537 | INIT_LIST_HEAD(&tbl->bucket[i]); | 510 | INIT_LIST_HEAD(&tbl->bucket[i]); |
538 | } | 511 | } |
539 | rwlock_init(&tbl->lock); | ||
540 | tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; | 512 | tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; |
541 | tbl->rover = 0; | 513 | tbl->rover = 0; |
542 | tbl->counter = 1; | 514 | tbl->counter = 1; |
@@ -545,9 +517,8 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) | |||
545 | * Hook periodic timer for garbage collection | 517 | * Hook periodic timer for garbage collection |
546 | */ | 518 | */ |
547 | setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire, | 519 | setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire, |
548 | (unsigned long)tbl); | 520 | (unsigned long)svc); |
549 | tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; | 521 | mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); |
550 | add_timer(&tbl->periodic_timer); | ||
551 | 522 | ||
552 | return 0; | 523 | return 0; |
553 | } | 524 | } |
@@ -564,22 +535,16 @@ static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc) | |||
564 | ip_vs_lblcr_flush(tbl); | 535 | ip_vs_lblcr_flush(tbl); |
565 | 536 | ||
566 | /* release the table itself */ | 537 | /* release the table itself */ |
567 | kfree(svc->sched_data); | 538 | kfree(tbl); |
568 | IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", | 539 | IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", |
569 | sizeof(struct ip_vs_lblcr_table)); | 540 | sizeof(*tbl)); |
570 | 541 | ||
571 | return 0; | 542 | return 0; |
572 | } | 543 | } |
573 | 544 | ||
574 | 545 | ||
575 | static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc) | ||
576 | { | ||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | |||
581 | static inline struct ip_vs_dest * | 546 | static inline struct ip_vs_dest * |
582 | __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) | 547 | __ip_vs_lblcr_schedule(struct ip_vs_service *svc, struct iphdr *iph) |
583 | { | 548 | { |
584 | struct ip_vs_dest *dest, *least; | 549 | struct ip_vs_dest *dest, *least; |
585 | int loh, doh; | 550 | int loh, doh; |
@@ -633,7 +598,7 @@ __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) | |||
633 | 598 | ||
634 | IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d " | 599 | IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d " |
635 | "activeconns %d refcnt %d weight %d overhead %d\n", | 600 | "activeconns %d refcnt %d weight %d overhead %d\n", |
636 | NIPQUAD(least->addr), ntohs(least->port), | 601 | NIPQUAD(least->addr.ip), ntohs(least->port), |
637 | atomic_read(&least->activeconns), | 602 | atomic_read(&least->activeconns), |
638 | atomic_read(&least->refcnt), | 603 | atomic_read(&least->refcnt), |
639 | atomic_read(&least->weight), loh); | 604 | atomic_read(&least->weight), loh); |
@@ -669,51 +634,79 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) | |||
669 | static struct ip_vs_dest * | 634 | static struct ip_vs_dest * |
670 | ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | 635 | ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) |
671 | { | 636 | { |
672 | struct ip_vs_dest *dest; | 637 | struct ip_vs_lblcr_table *tbl = svc->sched_data; |
673 | struct ip_vs_lblcr_table *tbl; | ||
674 | struct ip_vs_lblcr_entry *en; | ||
675 | struct iphdr *iph = ip_hdr(skb); | 638 | struct iphdr *iph = ip_hdr(skb); |
639 | struct ip_vs_dest *dest = NULL; | ||
640 | struct ip_vs_lblcr_entry *en; | ||
676 | 641 | ||
677 | IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n"); | 642 | IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n"); |
678 | 643 | ||
679 | tbl = (struct ip_vs_lblcr_table *)svc->sched_data; | 644 | /* First look in our cache */ |
645 | read_lock(&svc->sched_lock); | ||
680 | en = ip_vs_lblcr_get(tbl, iph->daddr); | 646 | en = ip_vs_lblcr_get(tbl, iph->daddr); |
681 | if (en == NULL) { | 647 | if (en) { |
682 | dest = __ip_vs_wlc_schedule(svc, iph); | 648 | /* We only hold a read lock, but this is atomic */ |
683 | if (dest == NULL) { | 649 | en->lastuse = jiffies; |
684 | IP_VS_DBG(1, "no destination available\n"); | 650 | |
685 | return NULL; | 651 | /* Get the least loaded destination */ |
686 | } | 652 | read_lock(&en->set.lock); |
687 | en = ip_vs_lblcr_new(iph->daddr); | ||
688 | if (en == NULL) { | ||
689 | return NULL; | ||
690 | } | ||
691 | ip_vs_dest_set_insert(&en->set, dest); | ||
692 | ip_vs_lblcr_hash(tbl, en); | ||
693 | } else { | ||
694 | dest = ip_vs_dest_set_min(&en->set); | 653 | dest = ip_vs_dest_set_min(&en->set); |
695 | if (!dest || is_overloaded(dest, svc)) { | 654 | read_unlock(&en->set.lock); |
696 | dest = __ip_vs_wlc_schedule(svc, iph); | 655 | |
697 | if (dest == NULL) { | 656 | /* More than one destination + enough time passed by, cleanup */ |
698 | IP_VS_DBG(1, "no destination available\n"); | ||
699 | return NULL; | ||
700 | } | ||
701 | ip_vs_dest_set_insert(&en->set, dest); | ||
702 | } | ||
703 | if (atomic_read(&en->set.size) > 1 && | 657 | if (atomic_read(&en->set.size) > 1 && |
704 | jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) { | 658 | time_after(jiffies, en->set.lastmod + |
659 | sysctl_ip_vs_lblcr_expiration)) { | ||
705 | struct ip_vs_dest *m; | 660 | struct ip_vs_dest *m; |
661 | |||
662 | write_lock(&en->set.lock); | ||
706 | m = ip_vs_dest_set_max(&en->set); | 663 | m = ip_vs_dest_set_max(&en->set); |
707 | if (m) | 664 | if (m) |
708 | ip_vs_dest_set_erase(&en->set, m); | 665 | ip_vs_dest_set_erase(&en->set, m); |
666 | write_unlock(&en->set.lock); | ||
709 | } | 667 | } |
668 | |||
669 | /* If the destination is not overloaded, use it */ | ||
670 | if (dest && !is_overloaded(dest, svc)) { | ||
671 | read_unlock(&svc->sched_lock); | ||
672 | goto out; | ||
673 | } | ||
674 | |||
675 | /* The cache entry is invalid, time to schedule */ | ||
676 | dest = __ip_vs_lblcr_schedule(svc, iph); | ||
677 | if (!dest) { | ||
678 | IP_VS_DBG(1, "no destination available\n"); | ||
679 | read_unlock(&svc->sched_lock); | ||
680 | return NULL; | ||
681 | } | ||
682 | |||
683 | /* Update our cache entry */ | ||
684 | write_lock(&en->set.lock); | ||
685 | ip_vs_dest_set_insert(&en->set, dest); | ||
686 | write_unlock(&en->set.lock); | ||
687 | } | ||
688 | read_unlock(&svc->sched_lock); | ||
689 | |||
690 | if (dest) | ||
691 | goto out; | ||
692 | |||
693 | /* No cache entry, time to schedule */ | ||
694 | dest = __ip_vs_lblcr_schedule(svc, iph); | ||
695 | if (!dest) { | ||
696 | IP_VS_DBG(1, "no destination available\n"); | ||
697 | return NULL; | ||
710 | } | 698 | } |
711 | en->lastuse = jiffies; | ||
712 | 699 | ||
700 | /* If we fail to create a cache entry, we'll just use the valid dest */ | ||
701 | write_lock(&svc->sched_lock); | ||
702 | ip_vs_lblcr_new(tbl, iph->daddr, dest); | ||
703 | write_unlock(&svc->sched_lock); | ||
704 | |||
705 | out: | ||
713 | IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u " | 706 | IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u " |
714 | "--> server %u.%u.%u.%u:%d\n", | 707 | "--> server %u.%u.%u.%u:%d\n", |
715 | NIPQUAD(en->addr), | 708 | NIPQUAD(iph->daddr), |
716 | NIPQUAD(dest->addr), | 709 | NIPQUAD(dest->addr.ip), |
717 | ntohs(dest->port)); | 710 | ntohs(dest->port)); |
718 | 711 | ||
719 | return dest; | 712 | return dest; |
@@ -729,9 +722,11 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler = | |||
729 | .refcnt = ATOMIC_INIT(0), | 722 | .refcnt = ATOMIC_INIT(0), |
730 | .module = THIS_MODULE, | 723 | .module = THIS_MODULE, |
731 | .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), | 724 | .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), |
725 | #ifdef CONFIG_IP_VS_IPV6 | ||
726 | .supports_ipv6 = 0, | ||
727 | #endif | ||
732 | .init_service = ip_vs_lblcr_init_svc, | 728 | .init_service = ip_vs_lblcr_init_svc, |
733 | .done_service = ip_vs_lblcr_done_svc, | 729 | .done_service = ip_vs_lblcr_done_svc, |
734 | .update_service = ip_vs_lblcr_update_svc, | ||
735 | .schedule = ip_vs_lblcr_schedule, | 730 | .schedule = ip_vs_lblcr_schedule, |
736 | }; | 731 | }; |
737 | 732 | ||
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c index ebcdbf75ac65..b69f808ac461 100644 --- a/net/ipv4/ipvs/ip_vs_lc.c +++ b/net/ipv4/ipvs/ip_vs_lc.c | |||
@@ -20,24 +20,6 @@ | |||
20 | #include <net/ip_vs.h> | 20 | #include <net/ip_vs.h> |
21 | 21 | ||
22 | 22 | ||
23 | static int ip_vs_lc_init_svc(struct ip_vs_service *svc) | ||
24 | { | ||
25 | return 0; | ||
26 | } | ||
27 | |||
28 | |||
29 | static int ip_vs_lc_done_svc(struct ip_vs_service *svc) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | |||
35 | static int ip_vs_lc_update_svc(struct ip_vs_service *svc) | ||
36 | { | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | |||
41 | static inline unsigned int | 23 | static inline unsigned int |
42 | ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) | 24 | ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) |
43 | { | 25 | { |
@@ -85,10 +67,10 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
85 | } | 67 | } |
86 | 68 | ||
87 | if (least) | 69 | if (least) |
88 | IP_VS_DBG(6, "LC: server %u.%u.%u.%u:%u activeconns %d inactconns %d\n", | 70 | IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n", |
89 | NIPQUAD(least->addr), ntohs(least->port), | 71 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), |
90 | atomic_read(&least->activeconns), | 72 | atomic_read(&least->activeconns), |
91 | atomic_read(&least->inactconns)); | 73 | atomic_read(&least->inactconns)); |
92 | 74 | ||
93 | return least; | 75 | return least; |
94 | } | 76 | } |
@@ -99,9 +81,9 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = { | |||
99 | .refcnt = ATOMIC_INIT(0), | 81 | .refcnt = ATOMIC_INIT(0), |
100 | .module = THIS_MODULE, | 82 | .module = THIS_MODULE, |
101 | .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), | 83 | .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), |
102 | .init_service = ip_vs_lc_init_svc, | 84 | #ifdef CONFIG_IP_VS_IPV6 |
103 | .done_service = ip_vs_lc_done_svc, | 85 | .supports_ipv6 = 1, |
104 | .update_service = ip_vs_lc_update_svc, | 86 | #endif |
105 | .schedule = ip_vs_lc_schedule, | 87 | .schedule = ip_vs_lc_schedule, |
106 | }; | 88 | }; |
107 | 89 | ||
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c index 92f3a6770031..9a2d8033f08f 100644 --- a/net/ipv4/ipvs/ip_vs_nq.c +++ b/net/ipv4/ipvs/ip_vs_nq.c | |||
@@ -37,27 +37,6 @@ | |||
37 | #include <net/ip_vs.h> | 37 | #include <net/ip_vs.h> |
38 | 38 | ||
39 | 39 | ||
40 | static int | ||
41 | ip_vs_nq_init_svc(struct ip_vs_service *svc) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | |||
47 | static int | ||
48 | ip_vs_nq_done_svc(struct ip_vs_service *svc) | ||
49 | { | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | |||
54 | static int | ||
55 | ip_vs_nq_update_svc(struct ip_vs_service *svc) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | |||
61 | static inline unsigned int | 40 | static inline unsigned int |
62 | ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) | 41 | ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) |
63 | { | 42 | { |
@@ -120,12 +99,12 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
120 | return NULL; | 99 | return NULL; |
121 | 100 | ||
122 | out: | 101 | out: |
123 | IP_VS_DBG(6, "NQ: server %u.%u.%u.%u:%u " | 102 | IP_VS_DBG_BUF(6, "NQ: server %s:%u " |
124 | "activeconns %d refcnt %d weight %d overhead %d\n", | 103 | "activeconns %d refcnt %d weight %d overhead %d\n", |
125 | NIPQUAD(least->addr), ntohs(least->port), | 104 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), |
126 | atomic_read(&least->activeconns), | 105 | atomic_read(&least->activeconns), |
127 | atomic_read(&least->refcnt), | 106 | atomic_read(&least->refcnt), |
128 | atomic_read(&least->weight), loh); | 107 | atomic_read(&least->weight), loh); |
129 | 108 | ||
130 | return least; | 109 | return least; |
131 | } | 110 | } |
@@ -137,9 +116,9 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler = | |||
137 | .refcnt = ATOMIC_INIT(0), | 116 | .refcnt = ATOMIC_INIT(0), |
138 | .module = THIS_MODULE, | 117 | .module = THIS_MODULE, |
139 | .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), | 118 | .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), |
140 | .init_service = ip_vs_nq_init_svc, | 119 | #ifdef CONFIG_IP_VS_IPV6 |
141 | .done_service = ip_vs_nq_done_svc, | 120 | .supports_ipv6 = 1, |
142 | .update_service = ip_vs_nq_update_svc, | 121 | #endif |
143 | .schedule = ip_vs_nq_schedule, | 122 | .schedule = ip_vs_nq_schedule, |
144 | }; | 123 | }; |
145 | 124 | ||
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c index 6099a88fc200..0791f9e08feb 100644 --- a/net/ipv4/ipvs/ip_vs_proto.c +++ b/net/ipv4/ipvs/ip_vs_proto.c | |||
@@ -151,11 +151,11 @@ const char * ip_vs_state_name(__u16 proto, int state) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | 153 | ||
154 | void | 154 | static void |
155 | ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, | 155 | ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp, |
156 | const struct sk_buff *skb, | 156 | const struct sk_buff *skb, |
157 | int offset, | 157 | int offset, |
158 | const char *msg) | 158 | const char *msg) |
159 | { | 159 | { |
160 | char buf[128]; | 160 | char buf[128]; |
161 | struct iphdr _iph, *ih; | 161 | struct iphdr _iph, *ih; |
@@ -189,6 +189,61 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, | |||
189 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | 189 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); |
190 | } | 190 | } |
191 | 191 | ||
192 | #ifdef CONFIG_IP_VS_IPV6 | ||
193 | static void | ||
194 | ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, | ||
195 | const struct sk_buff *skb, | ||
196 | int offset, | ||
197 | const char *msg) | ||
198 | { | ||
199 | char buf[192]; | ||
200 | struct ipv6hdr _iph, *ih; | ||
201 | |||
202 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | ||
203 | if (ih == NULL) | ||
204 | sprintf(buf, "%s TRUNCATED", pp->name); | ||
205 | else if (ih->nexthdr == IPPROTO_FRAGMENT) | ||
206 | sprintf(buf, "%s " NIP6_FMT "->" NIP6_FMT " frag", | ||
207 | pp->name, NIP6(ih->saddr), | ||
208 | NIP6(ih->daddr)); | ||
209 | else { | ||
210 | __be16 _ports[2], *pptr; | ||
211 | |||
212 | pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), | ||
213 | sizeof(_ports), _ports); | ||
214 | if (pptr == NULL) | ||
215 | sprintf(buf, "%s TRUNCATED " NIP6_FMT "->" NIP6_FMT, | ||
216 | pp->name, | ||
217 | NIP6(ih->saddr), | ||
218 | NIP6(ih->daddr)); | ||
219 | else | ||
220 | sprintf(buf, "%s " NIP6_FMT ":%u->" NIP6_FMT ":%u", | ||
221 | pp->name, | ||
222 | NIP6(ih->saddr), | ||
223 | ntohs(pptr[0]), | ||
224 | NIP6(ih->daddr), | ||
225 | ntohs(pptr[1])); | ||
226 | } | ||
227 | |||
228 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | ||
229 | } | ||
230 | #endif | ||
231 | |||
232 | |||
233 | void | ||
234 | ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, | ||
235 | const struct sk_buff *skb, | ||
236 | int offset, | ||
237 | const char *msg) | ||
238 | { | ||
239 | #ifdef CONFIG_IP_VS_IPV6 | ||
240 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
241 | ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg); | ||
242 | else | ||
243 | #endif | ||
244 | ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg); | ||
245 | } | ||
246 | |||
192 | 247 | ||
193 | int __init ip_vs_protocol_init(void) | 248 | int __init ip_vs_protocol_init(void) |
194 | { | 249 | { |
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah.c deleted file mode 100644 index 73e0ea87c1f5..000000000000 --- a/net/ipv4/ipvs/ip_vs_proto_ah.c +++ /dev/null | |||
@@ -1,178 +0,0 @@ | |||
1 | /* | ||
2 | * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS | ||
3 | * | ||
4 | * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 | ||
5 | * Wensong Zhang <wensong@linuxvirtualserver.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation; | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/in.h> | ||
14 | #include <linux/ip.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/netfilter.h> | ||
18 | #include <linux/netfilter_ipv4.h> | ||
19 | |||
20 | #include <net/ip_vs.h> | ||
21 | |||
22 | |||
23 | /* TODO: | ||
24 | |||
25 | struct isakmp_hdr { | ||
26 | __u8 icookie[8]; | ||
27 | __u8 rcookie[8]; | ||
28 | __u8 np; | ||
29 | __u8 version; | ||
30 | __u8 xchgtype; | ||
31 | __u8 flags; | ||
32 | __u32 msgid; | ||
33 | __u32 length; | ||
34 | }; | ||
35 | |||
36 | */ | ||
37 | |||
38 | #define PORT_ISAKMP 500 | ||
39 | |||
40 | |||
41 | static struct ip_vs_conn * | ||
42 | ah_conn_in_get(const struct sk_buff *skb, | ||
43 | struct ip_vs_protocol *pp, | ||
44 | const struct iphdr *iph, | ||
45 | unsigned int proto_off, | ||
46 | int inverse) | ||
47 | { | ||
48 | struct ip_vs_conn *cp; | ||
49 | |||
50 | if (likely(!inverse)) { | ||
51 | cp = ip_vs_conn_in_get(IPPROTO_UDP, | ||
52 | iph->saddr, | ||
53 | htons(PORT_ISAKMP), | ||
54 | iph->daddr, | ||
55 | htons(PORT_ISAKMP)); | ||
56 | } else { | ||
57 | cp = ip_vs_conn_in_get(IPPROTO_UDP, | ||
58 | iph->daddr, | ||
59 | htons(PORT_ISAKMP), | ||
60 | iph->saddr, | ||
61 | htons(PORT_ISAKMP)); | ||
62 | } | ||
63 | |||
64 | if (!cp) { | ||
65 | /* | ||
66 | * We are not sure if the packet is from our | ||
67 | * service, so our conn_schedule hook should return NF_ACCEPT | ||
68 | */ | ||
69 | IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet " | ||
70 | "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n", | ||
71 | inverse ? "ICMP+" : "", | ||
72 | pp->name, | ||
73 | NIPQUAD(iph->saddr), | ||
74 | NIPQUAD(iph->daddr)); | ||
75 | } | ||
76 | |||
77 | return cp; | ||
78 | } | ||
79 | |||
80 | |||
81 | static struct ip_vs_conn * | ||
82 | ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
83 | const struct iphdr *iph, unsigned int proto_off, int inverse) | ||
84 | { | ||
85 | struct ip_vs_conn *cp; | ||
86 | |||
87 | if (likely(!inverse)) { | ||
88 | cp = ip_vs_conn_out_get(IPPROTO_UDP, | ||
89 | iph->saddr, | ||
90 | htons(PORT_ISAKMP), | ||
91 | iph->daddr, | ||
92 | htons(PORT_ISAKMP)); | ||
93 | } else { | ||
94 | cp = ip_vs_conn_out_get(IPPROTO_UDP, | ||
95 | iph->daddr, | ||
96 | htons(PORT_ISAKMP), | ||
97 | iph->saddr, | ||
98 | htons(PORT_ISAKMP)); | ||
99 | } | ||
100 | |||
101 | if (!cp) { | ||
102 | IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet " | ||
103 | "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n", | ||
104 | inverse ? "ICMP+" : "", | ||
105 | pp->name, | ||
106 | NIPQUAD(iph->saddr), | ||
107 | NIPQUAD(iph->daddr)); | ||
108 | } | ||
109 | |||
110 | return cp; | ||
111 | } | ||
112 | |||
113 | |||
114 | static int | ||
115 | ah_conn_schedule(struct sk_buff *skb, | ||
116 | struct ip_vs_protocol *pp, | ||
117 | int *verdict, struct ip_vs_conn **cpp) | ||
118 | { | ||
119 | /* | ||
120 | * AH is only related traffic. Pass the packet to IP stack. | ||
121 | */ | ||
122 | *verdict = NF_ACCEPT; | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | |||
127 | static void | ||
128 | ah_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, | ||
129 | int offset, const char *msg) | ||
130 | { | ||
131 | char buf[256]; | ||
132 | struct iphdr _iph, *ih; | ||
133 | |||
134 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | ||
135 | if (ih == NULL) | ||
136 | sprintf(buf, "%s TRUNCATED", pp->name); | ||
137 | else | ||
138 | sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u", | ||
139 | pp->name, NIPQUAD(ih->saddr), | ||
140 | NIPQUAD(ih->daddr)); | ||
141 | |||
142 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | ||
143 | } | ||
144 | |||
145 | |||
146 | static void ah_init(struct ip_vs_protocol *pp) | ||
147 | { | ||
148 | /* nothing to do now */ | ||
149 | } | ||
150 | |||
151 | |||
152 | static void ah_exit(struct ip_vs_protocol *pp) | ||
153 | { | ||
154 | /* nothing to do now */ | ||
155 | } | ||
156 | |||
157 | |||
158 | struct ip_vs_protocol ip_vs_protocol_ah = { | ||
159 | .name = "AH", | ||
160 | .protocol = IPPROTO_AH, | ||
161 | .num_states = 1, | ||
162 | .dont_defrag = 1, | ||
163 | .init = ah_init, | ||
164 | .exit = ah_exit, | ||
165 | .conn_schedule = ah_conn_schedule, | ||
166 | .conn_in_get = ah_conn_in_get, | ||
167 | .conn_out_get = ah_conn_out_get, | ||
168 | .snat_handler = NULL, | ||
169 | .dnat_handler = NULL, | ||
170 | .csum_check = NULL, | ||
171 | .state_transition = NULL, | ||
172 | .register_app = NULL, | ||
173 | .unregister_app = NULL, | ||
174 | .app_conn_bind = NULL, | ||
175 | .debug_packet = ah_debug_packet, | ||
176 | .timeout_change = NULL, /* ISAKMP */ | ||
177 | .set_state_timeout = NULL, | ||
178 | }; | ||
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah_esp.c b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c new file mode 100644 index 000000000000..80ab0c8e5b4a --- /dev/null +++ b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * ip_vs_proto_ah_esp.c: AH/ESP IPSec load balancing support for IPVS | ||
3 | * | ||
4 | * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 | ||
5 | * Wensong Zhang <wensong@linuxvirtualserver.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation; | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/in.h> | ||
14 | #include <linux/ip.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/netfilter.h> | ||
18 | #include <linux/netfilter_ipv4.h> | ||
19 | |||
20 | #include <net/ip_vs.h> | ||
21 | |||
22 | |||
23 | /* TODO: | ||
24 | |||
25 | struct isakmp_hdr { | ||
26 | __u8 icookie[8]; | ||
27 | __u8 rcookie[8]; | ||
28 | __u8 np; | ||
29 | __u8 version; | ||
30 | __u8 xchgtype; | ||
31 | __u8 flags; | ||
32 | __u32 msgid; | ||
33 | __u32 length; | ||
34 | }; | ||
35 | |||
36 | */ | ||
37 | |||
38 | #define PORT_ISAKMP 500 | ||
39 | |||
40 | |||
41 | static struct ip_vs_conn * | ||
42 | ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
43 | const struct ip_vs_iphdr *iph, unsigned int proto_off, | ||
44 | int inverse) | ||
45 | { | ||
46 | struct ip_vs_conn *cp; | ||
47 | |||
48 | if (likely(!inverse)) { | ||
49 | cp = ip_vs_conn_in_get(af, IPPROTO_UDP, | ||
50 | &iph->saddr, | ||
51 | htons(PORT_ISAKMP), | ||
52 | &iph->daddr, | ||
53 | htons(PORT_ISAKMP)); | ||
54 | } else { | ||
55 | cp = ip_vs_conn_in_get(af, IPPROTO_UDP, | ||
56 | &iph->daddr, | ||
57 | htons(PORT_ISAKMP), | ||
58 | &iph->saddr, | ||
59 | htons(PORT_ISAKMP)); | ||
60 | } | ||
61 | |||
62 | if (!cp) { | ||
63 | /* | ||
64 | * We are not sure if the packet is from our | ||
65 | * service, so our conn_schedule hook should return NF_ACCEPT | ||
66 | */ | ||
67 | IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet " | ||
68 | "%s%s %s->%s\n", | ||
69 | inverse ? "ICMP+" : "", | ||
70 | pp->name, | ||
71 | IP_VS_DBG_ADDR(af, &iph->saddr), | ||
72 | IP_VS_DBG_ADDR(af, &iph->daddr)); | ||
73 | } | ||
74 | |||
75 | return cp; | ||
76 | } | ||
77 | |||
78 | |||
79 | static struct ip_vs_conn * | ||
80 | ah_esp_conn_out_get(int af, const struct sk_buff *skb, | ||
81 | struct ip_vs_protocol *pp, | ||
82 | const struct ip_vs_iphdr *iph, | ||
83 | unsigned int proto_off, | ||
84 | int inverse) | ||
85 | { | ||
86 | struct ip_vs_conn *cp; | ||
87 | |||
88 | if (likely(!inverse)) { | ||
89 | cp = ip_vs_conn_out_get(af, IPPROTO_UDP, | ||
90 | &iph->saddr, | ||
91 | htons(PORT_ISAKMP), | ||
92 | &iph->daddr, | ||
93 | htons(PORT_ISAKMP)); | ||
94 | } else { | ||
95 | cp = ip_vs_conn_out_get(af, IPPROTO_UDP, | ||
96 | &iph->daddr, | ||
97 | htons(PORT_ISAKMP), | ||
98 | &iph->saddr, | ||
99 | htons(PORT_ISAKMP)); | ||
100 | } | ||
101 | |||
102 | if (!cp) { | ||
103 | IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet " | ||
104 | "%s%s %s->%s\n", | ||
105 | inverse ? "ICMP+" : "", | ||
106 | pp->name, | ||
107 | IP_VS_DBG_ADDR(af, &iph->saddr), | ||
108 | IP_VS_DBG_ADDR(af, &iph->daddr)); | ||
109 | } | ||
110 | |||
111 | return cp; | ||
112 | } | ||
113 | |||
114 | |||
115 | static int | ||
116 | ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
117 | int *verdict, struct ip_vs_conn **cpp) | ||
118 | { | ||
119 | /* | ||
120 | * AH/ESP is only related traffic. Pass the packet to IP stack. | ||
121 | */ | ||
122 | *verdict = NF_ACCEPT; | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | |||
127 | static void | ||
128 | ah_esp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb, | ||
129 | int offset, const char *msg) | ||
130 | { | ||
131 | char buf[256]; | ||
132 | struct iphdr _iph, *ih; | ||
133 | |||
134 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | ||
135 | if (ih == NULL) | ||
136 | sprintf(buf, "%s TRUNCATED", pp->name); | ||
137 | else | ||
138 | sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u", | ||
139 | pp->name, NIPQUAD(ih->saddr), | ||
140 | NIPQUAD(ih->daddr)); | ||
141 | |||
142 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | ||
143 | } | ||
144 | |||
145 | #ifdef CONFIG_IP_VS_IPV6 | ||
146 | static void | ||
147 | ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb, | ||
148 | int offset, const char *msg) | ||
149 | { | ||
150 | char buf[256]; | ||
151 | struct ipv6hdr _iph, *ih; | ||
152 | |||
153 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | ||
154 | if (ih == NULL) | ||
155 | sprintf(buf, "%s TRUNCATED", pp->name); | ||
156 | else | ||
157 | sprintf(buf, "%s " NIP6_FMT "->" NIP6_FMT, | ||
158 | pp->name, NIP6(ih->saddr), | ||
159 | NIP6(ih->daddr)); | ||
160 | |||
161 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | ||
162 | } | ||
163 | #endif | ||
164 | |||
165 | static void | ||
166 | ah_esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, | ||
167 | int offset, const char *msg) | ||
168 | { | ||
169 | #ifdef CONFIG_IP_VS_IPV6 | ||
170 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
171 | ah_esp_debug_packet_v6(pp, skb, offset, msg); | ||
172 | else | ||
173 | #endif | ||
174 | ah_esp_debug_packet_v4(pp, skb, offset, msg); | ||
175 | } | ||
176 | |||
177 | |||
178 | static void ah_esp_init(struct ip_vs_protocol *pp) | ||
179 | { | ||
180 | /* nothing to do now */ | ||
181 | } | ||
182 | |||
183 | |||
184 | static void ah_esp_exit(struct ip_vs_protocol *pp) | ||
185 | { | ||
186 | /* nothing to do now */ | ||
187 | } | ||
188 | |||
189 | |||
190 | #ifdef CONFIG_IP_VS_PROTO_AH | ||
191 | struct ip_vs_protocol ip_vs_protocol_ah = { | ||
192 | .name = "AH", | ||
193 | .protocol = IPPROTO_AH, | ||
194 | .num_states = 1, | ||
195 | .dont_defrag = 1, | ||
196 | .init = ah_esp_init, | ||
197 | .exit = ah_esp_exit, | ||
198 | .conn_schedule = ah_esp_conn_schedule, | ||
199 | .conn_in_get = ah_esp_conn_in_get, | ||
200 | .conn_out_get = ah_esp_conn_out_get, | ||
201 | .snat_handler = NULL, | ||
202 | .dnat_handler = NULL, | ||
203 | .csum_check = NULL, | ||
204 | .state_transition = NULL, | ||
205 | .register_app = NULL, | ||
206 | .unregister_app = NULL, | ||
207 | .app_conn_bind = NULL, | ||
208 | .debug_packet = ah_esp_debug_packet, | ||
209 | .timeout_change = NULL, /* ISAKMP */ | ||
210 | .set_state_timeout = NULL, | ||
211 | }; | ||
212 | #endif | ||
213 | |||
214 | #ifdef CONFIG_IP_VS_PROTO_ESP | ||
215 | struct ip_vs_protocol ip_vs_protocol_esp = { | ||
216 | .name = "ESP", | ||
217 | .protocol = IPPROTO_ESP, | ||
218 | .num_states = 1, | ||
219 | .dont_defrag = 1, | ||
220 | .init = ah_esp_init, | ||
221 | .exit = ah_esp_exit, | ||
222 | .conn_schedule = ah_esp_conn_schedule, | ||
223 | .conn_in_get = ah_esp_conn_in_get, | ||
224 | .conn_out_get = ah_esp_conn_out_get, | ||
225 | .snat_handler = NULL, | ||
226 | .dnat_handler = NULL, | ||
227 | .csum_check = NULL, | ||
228 | .state_transition = NULL, | ||
229 | .register_app = NULL, | ||
230 | .unregister_app = NULL, | ||
231 | .app_conn_bind = NULL, | ||
232 | .debug_packet = ah_esp_debug_packet, | ||
233 | .timeout_change = NULL, /* ISAKMP */ | ||
234 | }; | ||
235 | #endif | ||
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c deleted file mode 100644 index 21d70c8ffa54..000000000000 --- a/net/ipv4/ipvs/ip_vs_proto_esp.c +++ /dev/null | |||
@@ -1,176 +0,0 @@ | |||
1 | /* | ||
2 | * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS | ||
3 | * | ||
4 | * Authors: Julian Anastasov <ja@ssi.bg>, February 2002 | ||
5 | * Wensong Zhang <wensong@linuxvirtualserver.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation; | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/in.h> | ||
14 | #include <linux/ip.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/netfilter.h> | ||
18 | #include <linux/netfilter_ipv4.h> | ||
19 | |||
20 | #include <net/ip_vs.h> | ||
21 | |||
22 | |||
23 | /* TODO: | ||
24 | |||
25 | struct isakmp_hdr { | ||
26 | __u8 icookie[8]; | ||
27 | __u8 rcookie[8]; | ||
28 | __u8 np; | ||
29 | __u8 version; | ||
30 | __u8 xchgtype; | ||
31 | __u8 flags; | ||
32 | __u32 msgid; | ||
33 | __u32 length; | ||
34 | }; | ||
35 | |||
36 | */ | ||
37 | |||
38 | #define PORT_ISAKMP 500 | ||
39 | |||
40 | |||
41 | static struct ip_vs_conn * | ||
42 | esp_conn_in_get(const struct sk_buff *skb, | ||
43 | struct ip_vs_protocol *pp, | ||
44 | const struct iphdr *iph, | ||
45 | unsigned int proto_off, | ||
46 | int inverse) | ||
47 | { | ||
48 | struct ip_vs_conn *cp; | ||
49 | |||
50 | if (likely(!inverse)) { | ||
51 | cp = ip_vs_conn_in_get(IPPROTO_UDP, | ||
52 | iph->saddr, | ||
53 | htons(PORT_ISAKMP), | ||
54 | iph->daddr, | ||
55 | htons(PORT_ISAKMP)); | ||
56 | } else { | ||
57 | cp = ip_vs_conn_in_get(IPPROTO_UDP, | ||
58 | iph->daddr, | ||
59 | htons(PORT_ISAKMP), | ||
60 | iph->saddr, | ||
61 | htons(PORT_ISAKMP)); | ||
62 | } | ||
63 | |||
64 | if (!cp) { | ||
65 | /* | ||
66 | * We are not sure if the packet is from our | ||
67 | * service, so our conn_schedule hook should return NF_ACCEPT | ||
68 | */ | ||
69 | IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet " | ||
70 | "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n", | ||
71 | inverse ? "ICMP+" : "", | ||
72 | pp->name, | ||
73 | NIPQUAD(iph->saddr), | ||
74 | NIPQUAD(iph->daddr)); | ||
75 | } | ||
76 | |||
77 | return cp; | ||
78 | } | ||
79 | |||
80 | |||
81 | static struct ip_vs_conn * | ||
82 | esp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
83 | const struct iphdr *iph, unsigned int proto_off, int inverse) | ||
84 | { | ||
85 | struct ip_vs_conn *cp; | ||
86 | |||
87 | if (likely(!inverse)) { | ||
88 | cp = ip_vs_conn_out_get(IPPROTO_UDP, | ||
89 | iph->saddr, | ||
90 | htons(PORT_ISAKMP), | ||
91 | iph->daddr, | ||
92 | htons(PORT_ISAKMP)); | ||
93 | } else { | ||
94 | cp = ip_vs_conn_out_get(IPPROTO_UDP, | ||
95 | iph->daddr, | ||
96 | htons(PORT_ISAKMP), | ||
97 | iph->saddr, | ||
98 | htons(PORT_ISAKMP)); | ||
99 | } | ||
100 | |||
101 | if (!cp) { | ||
102 | IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet " | ||
103 | "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n", | ||
104 | inverse ? "ICMP+" : "", | ||
105 | pp->name, | ||
106 | NIPQUAD(iph->saddr), | ||
107 | NIPQUAD(iph->daddr)); | ||
108 | } | ||
109 | |||
110 | return cp; | ||
111 | } | ||
112 | |||
113 | |||
114 | static int | ||
115 | esp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, | ||
116 | int *verdict, struct ip_vs_conn **cpp) | ||
117 | { | ||
118 | /* | ||
119 | * ESP is only related traffic. Pass the packet to IP stack. | ||
120 | */ | ||
121 | *verdict = NF_ACCEPT; | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | |||
126 | static void | ||
127 | esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, | ||
128 | int offset, const char *msg) | ||
129 | { | ||
130 | char buf[256]; | ||
131 | struct iphdr _iph, *ih; | ||
132 | |||
133 | ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); | ||
134 | if (ih == NULL) | ||
135 | sprintf(buf, "%s TRUNCATED", pp->name); | ||
136 | else | ||
137 | sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u", | ||
138 | pp->name, NIPQUAD(ih->saddr), | ||
139 | NIPQUAD(ih->daddr)); | ||
140 | |||
141 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | ||
142 | } | ||
143 | |||
144 | |||
145 | static void esp_init(struct ip_vs_protocol *pp) | ||
146 | { | ||
147 | /* nothing to do now */ | ||
148 | } | ||
149 | |||
150 | |||
151 | static void esp_exit(struct ip_vs_protocol *pp) | ||
152 | { | ||
153 | /* nothing to do now */ | ||
154 | } | ||
155 | |||
156 | |||
157 | struct ip_vs_protocol ip_vs_protocol_esp = { | ||
158 | .name = "ESP", | ||
159 | .protocol = IPPROTO_ESP, | ||
160 | .num_states = 1, | ||
161 | .dont_defrag = 1, | ||
162 | .init = esp_init, | ||
163 | .exit = esp_exit, | ||
164 | .conn_schedule = esp_conn_schedule, | ||
165 | .conn_in_get = esp_conn_in_get, | ||
166 | .conn_out_get = esp_conn_out_get, | ||
167 | .snat_handler = NULL, | ||
168 | .dnat_handler = NULL, | ||
169 | .csum_check = NULL, | ||
170 | .state_transition = NULL, | ||
171 | .register_app = NULL, | ||
172 | .unregister_app = NULL, | ||
173 | .app_conn_bind = NULL, | ||
174 | .debug_packet = esp_debug_packet, | ||
175 | .timeout_change = NULL, /* ISAKMP */ | ||
176 | }; | ||
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c index d0ea467986a0..dd4566ea2bff 100644 --- a/net/ipv4/ipvs/ip_vs_proto_tcp.c +++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/tcp.h> /* for tcphdr */ | 18 | #include <linux/tcp.h> /* for tcphdr */ |
19 | #include <net/ip.h> | 19 | #include <net/ip.h> |
20 | #include <net/tcp.h> /* for csum_tcpudp_magic */ | 20 | #include <net/tcp.h> /* for csum_tcpudp_magic */ |
21 | #include <net/ip6_checksum.h> | ||
21 | #include <linux/netfilter.h> | 22 | #include <linux/netfilter.h> |
22 | #include <linux/netfilter_ipv4.h> | 23 | #include <linux/netfilter_ipv4.h> |
23 | 24 | ||
@@ -25,8 +26,9 @@ | |||
25 | 26 | ||
26 | 27 | ||
27 | static struct ip_vs_conn * | 28 | static struct ip_vs_conn * |
28 | tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | 29 | tcp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, |
29 | const struct iphdr *iph, unsigned int proto_off, int inverse) | 30 | const struct ip_vs_iphdr *iph, unsigned int proto_off, |
31 | int inverse) | ||
30 | { | 32 | { |
31 | __be16 _ports[2], *pptr; | 33 | __be16 _ports[2], *pptr; |
32 | 34 | ||
@@ -35,19 +37,20 @@ tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
35 | return NULL; | 37 | return NULL; |
36 | 38 | ||
37 | if (likely(!inverse)) { | 39 | if (likely(!inverse)) { |
38 | return ip_vs_conn_in_get(iph->protocol, | 40 | return ip_vs_conn_in_get(af, iph->protocol, |
39 | iph->saddr, pptr[0], | 41 | &iph->saddr, pptr[0], |
40 | iph->daddr, pptr[1]); | 42 | &iph->daddr, pptr[1]); |
41 | } else { | 43 | } else { |
42 | return ip_vs_conn_in_get(iph->protocol, | 44 | return ip_vs_conn_in_get(af, iph->protocol, |
43 | iph->daddr, pptr[1], | 45 | &iph->daddr, pptr[1], |
44 | iph->saddr, pptr[0]); | 46 | &iph->saddr, pptr[0]); |
45 | } | 47 | } |
46 | } | 48 | } |
47 | 49 | ||
48 | static struct ip_vs_conn * | 50 | static struct ip_vs_conn * |
49 | tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | 51 | tcp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, |
50 | const struct iphdr *iph, unsigned int proto_off, int inverse) | 52 | const struct ip_vs_iphdr *iph, unsigned int proto_off, |
53 | int inverse) | ||
51 | { | 54 | { |
52 | __be16 _ports[2], *pptr; | 55 | __be16 _ports[2], *pptr; |
53 | 56 | ||
@@ -56,34 +59,36 @@ tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
56 | return NULL; | 59 | return NULL; |
57 | 60 | ||
58 | if (likely(!inverse)) { | 61 | if (likely(!inverse)) { |
59 | return ip_vs_conn_out_get(iph->protocol, | 62 | return ip_vs_conn_out_get(af, iph->protocol, |
60 | iph->saddr, pptr[0], | 63 | &iph->saddr, pptr[0], |
61 | iph->daddr, pptr[1]); | 64 | &iph->daddr, pptr[1]); |
62 | } else { | 65 | } else { |
63 | return ip_vs_conn_out_get(iph->protocol, | 66 | return ip_vs_conn_out_get(af, iph->protocol, |
64 | iph->daddr, pptr[1], | 67 | &iph->daddr, pptr[1], |
65 | iph->saddr, pptr[0]); | 68 | &iph->saddr, pptr[0]); |
66 | } | 69 | } |
67 | } | 70 | } |
68 | 71 | ||
69 | 72 | ||
70 | static int | 73 | static int |
71 | tcp_conn_schedule(struct sk_buff *skb, | 74 | tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, |
72 | struct ip_vs_protocol *pp, | ||
73 | int *verdict, struct ip_vs_conn **cpp) | 75 | int *verdict, struct ip_vs_conn **cpp) |
74 | { | 76 | { |
75 | struct ip_vs_service *svc; | 77 | struct ip_vs_service *svc; |
76 | struct tcphdr _tcph, *th; | 78 | struct tcphdr _tcph, *th; |
79 | struct ip_vs_iphdr iph; | ||
77 | 80 | ||
78 | th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); | 81 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
82 | |||
83 | th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph); | ||
79 | if (th == NULL) { | 84 | if (th == NULL) { |
80 | *verdict = NF_DROP; | 85 | *verdict = NF_DROP; |
81 | return 0; | 86 | return 0; |
82 | } | 87 | } |
83 | 88 | ||
84 | if (th->syn && | 89 | if (th->syn && |
85 | (svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol, | 90 | (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr, |
86 | ip_hdr(skb)->daddr, th->dest))) { | 91 | th->dest))) { |
87 | if (ip_vs_todrop()) { | 92 | if (ip_vs_todrop()) { |
88 | /* | 93 | /* |
89 | * It seems that we are very loaded. | 94 | * It seems that we are very loaded. |
@@ -110,22 +115,62 @@ tcp_conn_schedule(struct sk_buff *skb, | |||
110 | 115 | ||
111 | 116 | ||
112 | static inline void | 117 | static inline void |
113 | tcp_fast_csum_update(struct tcphdr *tcph, __be32 oldip, __be32 newip, | 118 | tcp_fast_csum_update(int af, struct tcphdr *tcph, |
119 | const union nf_inet_addr *oldip, | ||
120 | const union nf_inet_addr *newip, | ||
114 | __be16 oldport, __be16 newport) | 121 | __be16 oldport, __be16 newport) |
115 | { | 122 | { |
123 | #ifdef CONFIG_IP_VS_IPV6 | ||
124 | if (af == AF_INET6) | ||
125 | tcph->check = | ||
126 | csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, | ||
127 | ip_vs_check_diff2(oldport, newport, | ||
128 | ~csum_unfold(tcph->check)))); | ||
129 | else | ||
130 | #endif | ||
116 | tcph->check = | 131 | tcph->check = |
117 | csum_fold(ip_vs_check_diff4(oldip, newip, | 132 | csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, |
118 | ip_vs_check_diff2(oldport, newport, | 133 | ip_vs_check_diff2(oldport, newport, |
119 | ~csum_unfold(tcph->check)))); | 134 | ~csum_unfold(tcph->check)))); |
120 | } | 135 | } |
121 | 136 | ||
122 | 137 | ||
138 | static inline void | ||
139 | tcp_partial_csum_update(int af, struct tcphdr *tcph, | ||
140 | const union nf_inet_addr *oldip, | ||
141 | const union nf_inet_addr *newip, | ||
142 | __be16 oldlen, __be16 newlen) | ||
143 | { | ||
144 | #ifdef CONFIG_IP_VS_IPV6 | ||
145 | if (af == AF_INET6) | ||
146 | tcph->check = | ||
147 | csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, | ||
148 | ip_vs_check_diff2(oldlen, newlen, | ||
149 | ~csum_unfold(tcph->check)))); | ||
150 | else | ||
151 | #endif | ||
152 | tcph->check = | ||
153 | csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, | ||
154 | ip_vs_check_diff2(oldlen, newlen, | ||
155 | ~csum_unfold(tcph->check)))); | ||
156 | } | ||
157 | |||
158 | |||
123 | static int | 159 | static int |
124 | tcp_snat_handler(struct sk_buff *skb, | 160 | tcp_snat_handler(struct sk_buff *skb, |
125 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 161 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
126 | { | 162 | { |
127 | struct tcphdr *tcph; | 163 | struct tcphdr *tcph; |
128 | const unsigned int tcphoff = ip_hdrlen(skb); | 164 | unsigned int tcphoff; |
165 | int oldlen; | ||
166 | |||
167 | #ifdef CONFIG_IP_VS_IPV6 | ||
168 | if (cp->af == AF_INET6) | ||
169 | tcphoff = sizeof(struct ipv6hdr); | ||
170 | else | ||
171 | #endif | ||
172 | tcphoff = ip_hdrlen(skb); | ||
173 | oldlen = skb->len - tcphoff; | ||
129 | 174 | ||
130 | /* csum_check requires unshared skb */ | 175 | /* csum_check requires unshared skb */ |
131 | if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) | 176 | if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) |
@@ -133,7 +178,7 @@ tcp_snat_handler(struct sk_buff *skb, | |||
133 | 178 | ||
134 | if (unlikely(cp->app != NULL)) { | 179 | if (unlikely(cp->app != NULL)) { |
135 | /* Some checks before mangling */ | 180 | /* Some checks before mangling */ |
136 | if (pp->csum_check && !pp->csum_check(skb, pp)) | 181 | if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) |
137 | return 0; | 182 | return 0; |
138 | 183 | ||
139 | /* Call application helper if needed */ | 184 | /* Call application helper if needed */ |
@@ -141,13 +186,17 @@ tcp_snat_handler(struct sk_buff *skb, | |||
141 | return 0; | 186 | return 0; |
142 | } | 187 | } |
143 | 188 | ||
144 | tcph = (void *)ip_hdr(skb) + tcphoff; | 189 | tcph = (void *)skb_network_header(skb) + tcphoff; |
145 | tcph->source = cp->vport; | 190 | tcph->source = cp->vport; |
146 | 191 | ||
147 | /* Adjust TCP checksums */ | 192 | /* Adjust TCP checksums */ |
148 | if (!cp->app) { | 193 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
194 | tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, | ||
195 | htonl(oldlen), | ||
196 | htonl(skb->len - tcphoff)); | ||
197 | } else if (!cp->app) { | ||
149 | /* Only port and addr are changed, do fast csum update */ | 198 | /* Only port and addr are changed, do fast csum update */ |
150 | tcp_fast_csum_update(tcph, cp->daddr, cp->vaddr, | 199 | tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, |
151 | cp->dport, cp->vport); | 200 | cp->dport, cp->vport); |
152 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 201 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
153 | skb->ip_summed = CHECKSUM_NONE; | 202 | skb->ip_summed = CHECKSUM_NONE; |
@@ -155,9 +204,20 @@ tcp_snat_handler(struct sk_buff *skb, | |||
155 | /* full checksum calculation */ | 204 | /* full checksum calculation */ |
156 | tcph->check = 0; | 205 | tcph->check = 0; |
157 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); | 206 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); |
158 | tcph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, | 207 | #ifdef CONFIG_IP_VS_IPV6 |
159 | skb->len - tcphoff, | 208 | if (cp->af == AF_INET6) |
160 | cp->protocol, skb->csum); | 209 | tcph->check = csum_ipv6_magic(&cp->vaddr.in6, |
210 | &cp->caddr.in6, | ||
211 | skb->len - tcphoff, | ||
212 | cp->protocol, skb->csum); | ||
213 | else | ||
214 | #endif | ||
215 | tcph->check = csum_tcpudp_magic(cp->vaddr.ip, | ||
216 | cp->caddr.ip, | ||
217 | skb->len - tcphoff, | ||
218 | cp->protocol, | ||
219 | skb->csum); | ||
220 | |||
161 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", | 221 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", |
162 | pp->name, tcph->check, | 222 | pp->name, tcph->check, |
163 | (char*)&(tcph->check) - (char*)tcph); | 223 | (char*)&(tcph->check) - (char*)tcph); |
@@ -171,7 +231,16 @@ tcp_dnat_handler(struct sk_buff *skb, | |||
171 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 231 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
172 | { | 232 | { |
173 | struct tcphdr *tcph; | 233 | struct tcphdr *tcph; |
174 | const unsigned int tcphoff = ip_hdrlen(skb); | 234 | unsigned int tcphoff; |
235 | int oldlen; | ||
236 | |||
237 | #ifdef CONFIG_IP_VS_IPV6 | ||
238 | if (cp->af == AF_INET6) | ||
239 | tcphoff = sizeof(struct ipv6hdr); | ||
240 | else | ||
241 | #endif | ||
242 | tcphoff = ip_hdrlen(skb); | ||
243 | oldlen = skb->len - tcphoff; | ||
175 | 244 | ||
176 | /* csum_check requires unshared skb */ | 245 | /* csum_check requires unshared skb */ |
177 | if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) | 246 | if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) |
@@ -179,7 +248,7 @@ tcp_dnat_handler(struct sk_buff *skb, | |||
179 | 248 | ||
180 | if (unlikely(cp->app != NULL)) { | 249 | if (unlikely(cp->app != NULL)) { |
181 | /* Some checks before mangling */ | 250 | /* Some checks before mangling */ |
182 | if (pp->csum_check && !pp->csum_check(skb, pp)) | 251 | if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) |
183 | return 0; | 252 | return 0; |
184 | 253 | ||
185 | /* | 254 | /* |
@@ -190,15 +259,19 @@ tcp_dnat_handler(struct sk_buff *skb, | |||
190 | return 0; | 259 | return 0; |
191 | } | 260 | } |
192 | 261 | ||
193 | tcph = (void *)ip_hdr(skb) + tcphoff; | 262 | tcph = (void *)skb_network_header(skb) + tcphoff; |
194 | tcph->dest = cp->dport; | 263 | tcph->dest = cp->dport; |
195 | 264 | ||
196 | /* | 265 | /* |
197 | * Adjust TCP checksums | 266 | * Adjust TCP checksums |
198 | */ | 267 | */ |
199 | if (!cp->app) { | 268 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
269 | tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, | ||
270 | htonl(oldlen), | ||
271 | htonl(skb->len - tcphoff)); | ||
272 | } else if (!cp->app) { | ||
200 | /* Only port and addr are changed, do fast csum update */ | 273 | /* Only port and addr are changed, do fast csum update */ |
201 | tcp_fast_csum_update(tcph, cp->vaddr, cp->daddr, | 274 | tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, |
202 | cp->vport, cp->dport); | 275 | cp->vport, cp->dport); |
203 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 276 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
204 | skb->ip_summed = CHECKSUM_NONE; | 277 | skb->ip_summed = CHECKSUM_NONE; |
@@ -206,9 +279,19 @@ tcp_dnat_handler(struct sk_buff *skb, | |||
206 | /* full checksum calculation */ | 279 | /* full checksum calculation */ |
207 | tcph->check = 0; | 280 | tcph->check = 0; |
208 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); | 281 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); |
209 | tcph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, | 282 | #ifdef CONFIG_IP_VS_IPV6 |
210 | skb->len - tcphoff, | 283 | if (cp->af == AF_INET6) |
211 | cp->protocol, skb->csum); | 284 | tcph->check = csum_ipv6_magic(&cp->caddr.in6, |
285 | &cp->daddr.in6, | ||
286 | skb->len - tcphoff, | ||
287 | cp->protocol, skb->csum); | ||
288 | else | ||
289 | #endif | ||
290 | tcph->check = csum_tcpudp_magic(cp->caddr.ip, | ||
291 | cp->daddr.ip, | ||
292 | skb->len - tcphoff, | ||
293 | cp->protocol, | ||
294 | skb->csum); | ||
212 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 295 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
213 | } | 296 | } |
214 | return 1; | 297 | return 1; |
@@ -216,21 +299,43 @@ tcp_dnat_handler(struct sk_buff *skb, | |||
216 | 299 | ||
217 | 300 | ||
218 | static int | 301 | static int |
219 | tcp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) | 302 | tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) |
220 | { | 303 | { |
221 | const unsigned int tcphoff = ip_hdrlen(skb); | 304 | unsigned int tcphoff; |
305 | |||
306 | #ifdef CONFIG_IP_VS_IPV6 | ||
307 | if (af == AF_INET6) | ||
308 | tcphoff = sizeof(struct ipv6hdr); | ||
309 | else | ||
310 | #endif | ||
311 | tcphoff = ip_hdrlen(skb); | ||
222 | 312 | ||
223 | switch (skb->ip_summed) { | 313 | switch (skb->ip_summed) { |
224 | case CHECKSUM_NONE: | 314 | case CHECKSUM_NONE: |
225 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); | 315 | skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); |
226 | case CHECKSUM_COMPLETE: | 316 | case CHECKSUM_COMPLETE: |
227 | if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, | 317 | #ifdef CONFIG_IP_VS_IPV6 |
228 | skb->len - tcphoff, | 318 | if (af == AF_INET6) { |
229 | ip_hdr(skb)->protocol, skb->csum)) { | 319 | if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
230 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | 320 | &ipv6_hdr(skb)->daddr, |
231 | "Failed checksum for"); | 321 | skb->len - tcphoff, |
232 | return 0; | 322 | ipv6_hdr(skb)->nexthdr, |
233 | } | 323 | skb->csum)) { |
324 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | ||
325 | "Failed checksum for"); | ||
326 | return 0; | ||
327 | } | ||
328 | } else | ||
329 | #endif | ||
330 | if (csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
331 | ip_hdr(skb)->daddr, | ||
332 | skb->len - tcphoff, | ||
333 | ip_hdr(skb)->protocol, | ||
334 | skb->csum)) { | ||
335 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | ||
336 | "Failed checksum for"); | ||
337 | return 0; | ||
338 | } | ||
234 | break; | 339 | break; |
235 | default: | 340 | default: |
236 | /* No need to checksum. */ | 341 | /* No need to checksum. */ |
@@ -419,19 +524,23 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp, | |||
419 | if (new_state != cp->state) { | 524 | if (new_state != cp->state) { |
420 | struct ip_vs_dest *dest = cp->dest; | 525 | struct ip_vs_dest *dest = cp->dest; |
421 | 526 | ||
422 | IP_VS_DBG(8, "%s %s [%c%c%c%c] %u.%u.%u.%u:%d->" | 527 | IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->" |
423 | "%u.%u.%u.%u:%d state: %s->%s conn->refcnt:%d\n", | 528 | "%s:%d state: %s->%s conn->refcnt:%d\n", |
424 | pp->name, | 529 | pp->name, |
425 | (state_off==TCP_DIR_OUTPUT)?"output ":"input ", | 530 | ((state_off == TCP_DIR_OUTPUT) ? |
426 | th->syn? 'S' : '.', | 531 | "output " : "input "), |
427 | th->fin? 'F' : '.', | 532 | th->syn ? 'S' : '.', |
428 | th->ack? 'A' : '.', | 533 | th->fin ? 'F' : '.', |
429 | th->rst? 'R' : '.', | 534 | th->ack ? 'A' : '.', |
430 | NIPQUAD(cp->daddr), ntohs(cp->dport), | 535 | th->rst ? 'R' : '.', |
431 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 536 | IP_VS_DBG_ADDR(cp->af, &cp->daddr), |
432 | tcp_state_name(cp->state), | 537 | ntohs(cp->dport), |
433 | tcp_state_name(new_state), | 538 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), |
434 | atomic_read(&cp->refcnt)); | 539 | ntohs(cp->cport), |
540 | tcp_state_name(cp->state), | ||
541 | tcp_state_name(new_state), | ||
542 | atomic_read(&cp->refcnt)); | ||
543 | |||
435 | if (dest) { | 544 | if (dest) { |
436 | if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && | 545 | if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && |
437 | (new_state != IP_VS_TCP_S_ESTABLISHED)) { | 546 | (new_state != IP_VS_TCP_S_ESTABLISHED)) { |
@@ -461,7 +570,13 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction, | |||
461 | { | 570 | { |
462 | struct tcphdr _tcph, *th; | 571 | struct tcphdr _tcph, *th; |
463 | 572 | ||
464 | th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); | 573 | #ifdef CONFIG_IP_VS_IPV6 |
574 | int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); | ||
575 | #else | ||
576 | int ihl = ip_hdrlen(skb); | ||
577 | #endif | ||
578 | |||
579 | th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph); | ||
465 | if (th == NULL) | 580 | if (th == NULL) |
466 | return 0; | 581 | return 0; |
467 | 582 | ||
@@ -546,12 +661,15 @@ tcp_app_conn_bind(struct ip_vs_conn *cp) | |||
546 | break; | 661 | break; |
547 | spin_unlock(&tcp_app_lock); | 662 | spin_unlock(&tcp_app_lock); |
548 | 663 | ||
549 | IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" | 664 | IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->" |
550 | "%u.%u.%u.%u:%u to app %s on port %u\n", | 665 | "%s:%u to app %s on port %u\n", |
551 | __func__, | 666 | __func__, |
552 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 667 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), |
553 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 668 | ntohs(cp->cport), |
554 | inc->name, ntohs(inc->port)); | 669 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), |
670 | ntohs(cp->vport), | ||
671 | inc->name, ntohs(inc->port)); | ||
672 | |||
555 | cp->app = inc; | 673 | cp->app = inc; |
556 | if (inc->init_conn) | 674 | if (inc->init_conn) |
557 | result = inc->init_conn(inc, cp); | 675 | result = inc->init_conn(inc, cp); |
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c index c6be5d56823f..6eb6039d6343 100644 --- a/net/ipv4/ipvs/ip_vs_proto_udp.c +++ b/net/ipv4/ipvs/ip_vs_proto_udp.c | |||
@@ -22,10 +22,12 @@ | |||
22 | 22 | ||
23 | #include <net/ip_vs.h> | 23 | #include <net/ip_vs.h> |
24 | #include <net/ip.h> | 24 | #include <net/ip.h> |
25 | #include <net/ip6_checksum.h> | ||
25 | 26 | ||
26 | static struct ip_vs_conn * | 27 | static struct ip_vs_conn * |
27 | udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | 28 | udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, |
28 | const struct iphdr *iph, unsigned int proto_off, int inverse) | 29 | const struct ip_vs_iphdr *iph, unsigned int proto_off, |
30 | int inverse) | ||
29 | { | 31 | { |
30 | struct ip_vs_conn *cp; | 32 | struct ip_vs_conn *cp; |
31 | __be16 _ports[2], *pptr; | 33 | __be16 _ports[2], *pptr; |
@@ -35,13 +37,13 @@ udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
35 | return NULL; | 37 | return NULL; |
36 | 38 | ||
37 | if (likely(!inverse)) { | 39 | if (likely(!inverse)) { |
38 | cp = ip_vs_conn_in_get(iph->protocol, | 40 | cp = ip_vs_conn_in_get(af, iph->protocol, |
39 | iph->saddr, pptr[0], | 41 | &iph->saddr, pptr[0], |
40 | iph->daddr, pptr[1]); | 42 | &iph->daddr, pptr[1]); |
41 | } else { | 43 | } else { |
42 | cp = ip_vs_conn_in_get(iph->protocol, | 44 | cp = ip_vs_conn_in_get(af, iph->protocol, |
43 | iph->daddr, pptr[1], | 45 | &iph->daddr, pptr[1], |
44 | iph->saddr, pptr[0]); | 46 | &iph->saddr, pptr[0]); |
45 | } | 47 | } |
46 | 48 | ||
47 | return cp; | 49 | return cp; |
@@ -49,25 +51,25 @@ udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
49 | 51 | ||
50 | 52 | ||
51 | static struct ip_vs_conn * | 53 | static struct ip_vs_conn * |
52 | udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | 54 | udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, |
53 | const struct iphdr *iph, unsigned int proto_off, int inverse) | 55 | const struct ip_vs_iphdr *iph, unsigned int proto_off, |
56 | int inverse) | ||
54 | { | 57 | { |
55 | struct ip_vs_conn *cp; | 58 | struct ip_vs_conn *cp; |
56 | __be16 _ports[2], *pptr; | 59 | __be16 _ports[2], *pptr; |
57 | 60 | ||
58 | pptr = skb_header_pointer(skb, ip_hdrlen(skb), | 61 | pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); |
59 | sizeof(_ports), _ports); | ||
60 | if (pptr == NULL) | 62 | if (pptr == NULL) |
61 | return NULL; | 63 | return NULL; |
62 | 64 | ||
63 | if (likely(!inverse)) { | 65 | if (likely(!inverse)) { |
64 | cp = ip_vs_conn_out_get(iph->protocol, | 66 | cp = ip_vs_conn_out_get(af, iph->protocol, |
65 | iph->saddr, pptr[0], | 67 | &iph->saddr, pptr[0], |
66 | iph->daddr, pptr[1]); | 68 | &iph->daddr, pptr[1]); |
67 | } else { | 69 | } else { |
68 | cp = ip_vs_conn_out_get(iph->protocol, | 70 | cp = ip_vs_conn_out_get(af, iph->protocol, |
69 | iph->daddr, pptr[1], | 71 | &iph->daddr, pptr[1], |
70 | iph->saddr, pptr[0]); | 72 | &iph->saddr, pptr[0]); |
71 | } | 73 | } |
72 | 74 | ||
73 | return cp; | 75 | return cp; |
@@ -75,21 +77,24 @@ udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
75 | 77 | ||
76 | 78 | ||
77 | static int | 79 | static int |
78 | udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, | 80 | udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, |
79 | int *verdict, struct ip_vs_conn **cpp) | 81 | int *verdict, struct ip_vs_conn **cpp) |
80 | { | 82 | { |
81 | struct ip_vs_service *svc; | 83 | struct ip_vs_service *svc; |
82 | struct udphdr _udph, *uh; | 84 | struct udphdr _udph, *uh; |
85 | struct ip_vs_iphdr iph; | ||
86 | |||
87 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
83 | 88 | ||
84 | uh = skb_header_pointer(skb, ip_hdrlen(skb), | 89 | uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph); |
85 | sizeof(_udph), &_udph); | ||
86 | if (uh == NULL) { | 90 | if (uh == NULL) { |
87 | *verdict = NF_DROP; | 91 | *verdict = NF_DROP; |
88 | return 0; | 92 | return 0; |
89 | } | 93 | } |
90 | 94 | ||
91 | if ((svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol, | 95 | svc = ip_vs_service_get(af, skb->mark, iph.protocol, |
92 | ip_hdr(skb)->daddr, uh->dest))) { | 96 | &iph.daddr, uh->dest); |
97 | if (svc) { | ||
93 | if (ip_vs_todrop()) { | 98 | if (ip_vs_todrop()) { |
94 | /* | 99 | /* |
95 | * It seems that we are very loaded. | 100 | * It seems that we are very loaded. |
@@ -116,23 +121,63 @@ udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
116 | 121 | ||
117 | 122 | ||
118 | static inline void | 123 | static inline void |
119 | udp_fast_csum_update(struct udphdr *uhdr, __be32 oldip, __be32 newip, | 124 | udp_fast_csum_update(int af, struct udphdr *uhdr, |
125 | const union nf_inet_addr *oldip, | ||
126 | const union nf_inet_addr *newip, | ||
120 | __be16 oldport, __be16 newport) | 127 | __be16 oldport, __be16 newport) |
121 | { | 128 | { |
122 | uhdr->check = | 129 | #ifdef CONFIG_IP_VS_IPV6 |
123 | csum_fold(ip_vs_check_diff4(oldip, newip, | 130 | if (af == AF_INET6) |
124 | ip_vs_check_diff2(oldport, newport, | 131 | uhdr->check = |
125 | ~csum_unfold(uhdr->check)))); | 132 | csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, |
133 | ip_vs_check_diff2(oldport, newport, | ||
134 | ~csum_unfold(uhdr->check)))); | ||
135 | else | ||
136 | #endif | ||
137 | uhdr->check = | ||
138 | csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, | ||
139 | ip_vs_check_diff2(oldport, newport, | ||
140 | ~csum_unfold(uhdr->check)))); | ||
126 | if (!uhdr->check) | 141 | if (!uhdr->check) |
127 | uhdr->check = CSUM_MANGLED_0; | 142 | uhdr->check = CSUM_MANGLED_0; |
128 | } | 143 | } |
129 | 144 | ||
145 | static inline void | ||
146 | udp_partial_csum_update(int af, struct udphdr *uhdr, | ||
147 | const union nf_inet_addr *oldip, | ||
148 | const union nf_inet_addr *newip, | ||
149 | __be16 oldlen, __be16 newlen) | ||
150 | { | ||
151 | #ifdef CONFIG_IP_VS_IPV6 | ||
152 | if (af == AF_INET6) | ||
153 | uhdr->check = | ||
154 | csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, | ||
155 | ip_vs_check_diff2(oldlen, newlen, | ||
156 | ~csum_unfold(uhdr->check)))); | ||
157 | else | ||
158 | #endif | ||
159 | uhdr->check = | ||
160 | csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, | ||
161 | ip_vs_check_diff2(oldlen, newlen, | ||
162 | ~csum_unfold(uhdr->check)))); | ||
163 | } | ||
164 | |||
165 | |||
130 | static int | 166 | static int |
131 | udp_snat_handler(struct sk_buff *skb, | 167 | udp_snat_handler(struct sk_buff *skb, |
132 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 168 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
133 | { | 169 | { |
134 | struct udphdr *udph; | 170 | struct udphdr *udph; |
135 | const unsigned int udphoff = ip_hdrlen(skb); | 171 | unsigned int udphoff; |
172 | int oldlen; | ||
173 | |||
174 | #ifdef CONFIG_IP_VS_IPV6 | ||
175 | if (cp->af == AF_INET6) | ||
176 | udphoff = sizeof(struct ipv6hdr); | ||
177 | else | ||
178 | #endif | ||
179 | udphoff = ip_hdrlen(skb); | ||
180 | oldlen = skb->len - udphoff; | ||
136 | 181 | ||
137 | /* csum_check requires unshared skb */ | 182 | /* csum_check requires unshared skb */ |
138 | if (!skb_make_writable(skb, udphoff+sizeof(*udph))) | 183 | if (!skb_make_writable(skb, udphoff+sizeof(*udph))) |
@@ -140,7 +185,7 @@ udp_snat_handler(struct sk_buff *skb, | |||
140 | 185 | ||
141 | if (unlikely(cp->app != NULL)) { | 186 | if (unlikely(cp->app != NULL)) { |
142 | /* Some checks before mangling */ | 187 | /* Some checks before mangling */ |
143 | if (pp->csum_check && !pp->csum_check(skb, pp)) | 188 | if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) |
144 | return 0; | 189 | return 0; |
145 | 190 | ||
146 | /* | 191 | /* |
@@ -150,15 +195,19 @@ udp_snat_handler(struct sk_buff *skb, | |||
150 | return 0; | 195 | return 0; |
151 | } | 196 | } |
152 | 197 | ||
153 | udph = (void *)ip_hdr(skb) + udphoff; | 198 | udph = (void *)skb_network_header(skb) + udphoff; |
154 | udph->source = cp->vport; | 199 | udph->source = cp->vport; |
155 | 200 | ||
156 | /* | 201 | /* |
157 | * Adjust UDP checksums | 202 | * Adjust UDP checksums |
158 | */ | 203 | */ |
159 | if (!cp->app && (udph->check != 0)) { | 204 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
205 | udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, | ||
206 | htonl(oldlen), | ||
207 | htonl(skb->len - udphoff)); | ||
208 | } else if (!cp->app && (udph->check != 0)) { | ||
160 | /* Only port and addr are changed, do fast csum update */ | 209 | /* Only port and addr are changed, do fast csum update */ |
161 | udp_fast_csum_update(udph, cp->daddr, cp->vaddr, | 210 | udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, |
162 | cp->dport, cp->vport); | 211 | cp->dport, cp->vport); |
163 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 212 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
164 | skb->ip_summed = CHECKSUM_NONE; | 213 | skb->ip_summed = CHECKSUM_NONE; |
@@ -166,9 +215,19 @@ udp_snat_handler(struct sk_buff *skb, | |||
166 | /* full checksum calculation */ | 215 | /* full checksum calculation */ |
167 | udph->check = 0; | 216 | udph->check = 0; |
168 | skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); | 217 | skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); |
169 | udph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, | 218 | #ifdef CONFIG_IP_VS_IPV6 |
170 | skb->len - udphoff, | 219 | if (cp->af == AF_INET6) |
171 | cp->protocol, skb->csum); | 220 | udph->check = csum_ipv6_magic(&cp->vaddr.in6, |
221 | &cp->caddr.in6, | ||
222 | skb->len - udphoff, | ||
223 | cp->protocol, skb->csum); | ||
224 | else | ||
225 | #endif | ||
226 | udph->check = csum_tcpudp_magic(cp->vaddr.ip, | ||
227 | cp->caddr.ip, | ||
228 | skb->len - udphoff, | ||
229 | cp->protocol, | ||
230 | skb->csum); | ||
172 | if (udph->check == 0) | 231 | if (udph->check == 0) |
173 | udph->check = CSUM_MANGLED_0; | 232 | udph->check = CSUM_MANGLED_0; |
174 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", | 233 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", |
@@ -184,7 +243,16 @@ udp_dnat_handler(struct sk_buff *skb, | |||
184 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 243 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) |
185 | { | 244 | { |
186 | struct udphdr *udph; | 245 | struct udphdr *udph; |
187 | unsigned int udphoff = ip_hdrlen(skb); | 246 | unsigned int udphoff; |
247 | int oldlen; | ||
248 | |||
249 | #ifdef CONFIG_IP_VS_IPV6 | ||
250 | if (cp->af == AF_INET6) | ||
251 | udphoff = sizeof(struct ipv6hdr); | ||
252 | else | ||
253 | #endif | ||
254 | udphoff = ip_hdrlen(skb); | ||
255 | oldlen = skb->len - udphoff; | ||
188 | 256 | ||
189 | /* csum_check requires unshared skb */ | 257 | /* csum_check requires unshared skb */ |
190 | if (!skb_make_writable(skb, udphoff+sizeof(*udph))) | 258 | if (!skb_make_writable(skb, udphoff+sizeof(*udph))) |
@@ -192,7 +260,7 @@ udp_dnat_handler(struct sk_buff *skb, | |||
192 | 260 | ||
193 | if (unlikely(cp->app != NULL)) { | 261 | if (unlikely(cp->app != NULL)) { |
194 | /* Some checks before mangling */ | 262 | /* Some checks before mangling */ |
195 | if (pp->csum_check && !pp->csum_check(skb, pp)) | 263 | if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) |
196 | return 0; | 264 | return 0; |
197 | 265 | ||
198 | /* | 266 | /* |
@@ -203,15 +271,19 @@ udp_dnat_handler(struct sk_buff *skb, | |||
203 | return 0; | 271 | return 0; |
204 | } | 272 | } |
205 | 273 | ||
206 | udph = (void *)ip_hdr(skb) + udphoff; | 274 | udph = (void *)skb_network_header(skb) + udphoff; |
207 | udph->dest = cp->dport; | 275 | udph->dest = cp->dport; |
208 | 276 | ||
209 | /* | 277 | /* |
210 | * Adjust UDP checksums | 278 | * Adjust UDP checksums |
211 | */ | 279 | */ |
212 | if (!cp->app && (udph->check != 0)) { | 280 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
281 | udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, | ||
282 | htonl(oldlen), | ||
283 | htonl(skb->len - udphoff)); | ||
284 | } else if (!cp->app && (udph->check != 0)) { | ||
213 | /* Only port and addr are changed, do fast csum update */ | 285 | /* Only port and addr are changed, do fast csum update */ |
214 | udp_fast_csum_update(udph, cp->vaddr, cp->daddr, | 286 | udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr, |
215 | cp->vport, cp->dport); | 287 | cp->vport, cp->dport); |
216 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 288 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
217 | skb->ip_summed = CHECKSUM_NONE; | 289 | skb->ip_summed = CHECKSUM_NONE; |
@@ -219,9 +291,19 @@ udp_dnat_handler(struct sk_buff *skb, | |||
219 | /* full checksum calculation */ | 291 | /* full checksum calculation */ |
220 | udph->check = 0; | 292 | udph->check = 0; |
221 | skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); | 293 | skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); |
222 | udph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, | 294 | #ifdef CONFIG_IP_VS_IPV6 |
223 | skb->len - udphoff, | 295 | if (cp->af == AF_INET6) |
224 | cp->protocol, skb->csum); | 296 | udph->check = csum_ipv6_magic(&cp->caddr.in6, |
297 | &cp->daddr.in6, | ||
298 | skb->len - udphoff, | ||
299 | cp->protocol, skb->csum); | ||
300 | else | ||
301 | #endif | ||
302 | udph->check = csum_tcpudp_magic(cp->caddr.ip, | ||
303 | cp->daddr.ip, | ||
304 | skb->len - udphoff, | ||
305 | cp->protocol, | ||
306 | skb->csum); | ||
225 | if (udph->check == 0) | 307 | if (udph->check == 0) |
226 | udph->check = CSUM_MANGLED_0; | 308 | udph->check = CSUM_MANGLED_0; |
227 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 309 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -231,10 +313,17 @@ udp_dnat_handler(struct sk_buff *skb, | |||
231 | 313 | ||
232 | 314 | ||
233 | static int | 315 | static int |
234 | udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) | 316 | udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) |
235 | { | 317 | { |
236 | struct udphdr _udph, *uh; | 318 | struct udphdr _udph, *uh; |
237 | const unsigned int udphoff = ip_hdrlen(skb); | 319 | unsigned int udphoff; |
320 | |||
321 | #ifdef CONFIG_IP_VS_IPV6 | ||
322 | if (af == AF_INET6) | ||
323 | udphoff = sizeof(struct ipv6hdr); | ||
324 | else | ||
325 | #endif | ||
326 | udphoff = ip_hdrlen(skb); | ||
238 | 327 | ||
239 | uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph); | 328 | uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph); |
240 | if (uh == NULL) | 329 | if (uh == NULL) |
@@ -246,15 +335,28 @@ udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) | |||
246 | skb->csum = skb_checksum(skb, udphoff, | 335 | skb->csum = skb_checksum(skb, udphoff, |
247 | skb->len - udphoff, 0); | 336 | skb->len - udphoff, 0); |
248 | case CHECKSUM_COMPLETE: | 337 | case CHECKSUM_COMPLETE: |
249 | if (csum_tcpudp_magic(ip_hdr(skb)->saddr, | 338 | #ifdef CONFIG_IP_VS_IPV6 |
250 | ip_hdr(skb)->daddr, | 339 | if (af == AF_INET6) { |
251 | skb->len - udphoff, | 340 | if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
252 | ip_hdr(skb)->protocol, | 341 | &ipv6_hdr(skb)->daddr, |
253 | skb->csum)) { | 342 | skb->len - udphoff, |
254 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | 343 | ipv6_hdr(skb)->nexthdr, |
255 | "Failed checksum for"); | 344 | skb->csum)) { |
256 | return 0; | 345 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, |
257 | } | 346 | "Failed checksum for"); |
347 | return 0; | ||
348 | } | ||
349 | } else | ||
350 | #endif | ||
351 | if (csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
352 | ip_hdr(skb)->daddr, | ||
353 | skb->len - udphoff, | ||
354 | ip_hdr(skb)->protocol, | ||
355 | skb->csum)) { | ||
356 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | ||
357 | "Failed checksum for"); | ||
358 | return 0; | ||
359 | } | ||
258 | break; | 360 | break; |
259 | default: | 361 | default: |
260 | /* No need to checksum. */ | 362 | /* No need to checksum. */ |
@@ -340,12 +442,15 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp) | |||
340 | break; | 442 | break; |
341 | spin_unlock(&udp_app_lock); | 443 | spin_unlock(&udp_app_lock); |
342 | 444 | ||
343 | IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" | 445 | IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->" |
344 | "%u.%u.%u.%u:%u to app %s on port %u\n", | 446 | "%s:%u to app %s on port %u\n", |
345 | __func__, | 447 | __func__, |
346 | NIPQUAD(cp->caddr), ntohs(cp->cport), | 448 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), |
347 | NIPQUAD(cp->vaddr), ntohs(cp->vport), | 449 | ntohs(cp->cport), |
348 | inc->name, ntohs(inc->port)); | 450 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), |
451 | ntohs(cp->vport), | ||
452 | inc->name, ntohs(inc->port)); | ||
453 | |||
349 | cp->app = inc; | 454 | cp->app = inc; |
350 | if (inc->init_conn) | 455 | if (inc->init_conn) |
351 | result = inc->init_conn(inc, cp); | 456 | result = inc->init_conn(inc, cp); |
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c index 358110d17e59..a22195f68ac4 100644 --- a/net/ipv4/ipvs/ip_vs_rr.c +++ b/net/ipv4/ipvs/ip_vs_rr.c | |||
@@ -32,12 +32,6 @@ static int ip_vs_rr_init_svc(struct ip_vs_service *svc) | |||
32 | } | 32 | } |
33 | 33 | ||
34 | 34 | ||
35 | static int ip_vs_rr_done_svc(struct ip_vs_service *svc) | ||
36 | { | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | |||
41 | static int ip_vs_rr_update_svc(struct ip_vs_service *svc) | 35 | static int ip_vs_rr_update_svc(struct ip_vs_service *svc) |
42 | { | 36 | { |
43 | svc->sched_data = &svc->destinations; | 37 | svc->sched_data = &svc->destinations; |
@@ -80,11 +74,11 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
80 | out: | 74 | out: |
81 | svc->sched_data = q; | 75 | svc->sched_data = q; |
82 | write_unlock(&svc->sched_lock); | 76 | write_unlock(&svc->sched_lock); |
83 | IP_VS_DBG(6, "RR: server %u.%u.%u.%u:%u " | 77 | IP_VS_DBG_BUF(6, "RR: server %s:%u " |
84 | "activeconns %d refcnt %d weight %d\n", | 78 | "activeconns %d refcnt %d weight %d\n", |
85 | NIPQUAD(dest->addr), ntohs(dest->port), | 79 | IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), |
86 | atomic_read(&dest->activeconns), | 80 | atomic_read(&dest->activeconns), |
87 | atomic_read(&dest->refcnt), atomic_read(&dest->weight)); | 81 | atomic_read(&dest->refcnt), atomic_read(&dest->weight)); |
88 | 82 | ||
89 | return dest; | 83 | return dest; |
90 | } | 84 | } |
@@ -95,8 +89,10 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = { | |||
95 | .refcnt = ATOMIC_INIT(0), | 89 | .refcnt = ATOMIC_INIT(0), |
96 | .module = THIS_MODULE, | 90 | .module = THIS_MODULE, |
97 | .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), | 91 | .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), |
92 | #ifdef CONFIG_IP_VS_IPV6 | ||
93 | .supports_ipv6 = 1, | ||
94 | #endif | ||
98 | .init_service = ip_vs_rr_init_svc, | 95 | .init_service = ip_vs_rr_init_svc, |
99 | .done_service = ip_vs_rr_done_svc, | ||
100 | .update_service = ip_vs_rr_update_svc, | 96 | .update_service = ip_vs_rr_update_svc, |
101 | .schedule = ip_vs_rr_schedule, | 97 | .schedule = ip_vs_rr_schedule, |
102 | }; | 98 | }; |
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c index 77663d84cbd1..7d2f22f04b83 100644 --- a/net/ipv4/ipvs/ip_vs_sed.c +++ b/net/ipv4/ipvs/ip_vs_sed.c | |||
@@ -41,27 +41,6 @@ | |||
41 | #include <net/ip_vs.h> | 41 | #include <net/ip_vs.h> |
42 | 42 | ||
43 | 43 | ||
44 | static int | ||
45 | ip_vs_sed_init_svc(struct ip_vs_service *svc) | ||
46 | { | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | |||
51 | static int | ||
52 | ip_vs_sed_done_svc(struct ip_vs_service *svc) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | |||
58 | static int | ||
59 | ip_vs_sed_update_svc(struct ip_vs_service *svc) | ||
60 | { | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | |||
65 | static inline unsigned int | 44 | static inline unsigned int |
66 | ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) | 45 | ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) |
67 | { | 46 | { |
@@ -122,12 +101,12 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
122 | } | 101 | } |
123 | } | 102 | } |
124 | 103 | ||
125 | IP_VS_DBG(6, "SED: server %u.%u.%u.%u:%u " | 104 | IP_VS_DBG_BUF(6, "SED: server %s:%u " |
126 | "activeconns %d refcnt %d weight %d overhead %d\n", | 105 | "activeconns %d refcnt %d weight %d overhead %d\n", |
127 | NIPQUAD(least->addr), ntohs(least->port), | 106 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), |
128 | atomic_read(&least->activeconns), | 107 | atomic_read(&least->activeconns), |
129 | atomic_read(&least->refcnt), | 108 | atomic_read(&least->refcnt), |
130 | atomic_read(&least->weight), loh); | 109 | atomic_read(&least->weight), loh); |
131 | 110 | ||
132 | return least; | 111 | return least; |
133 | } | 112 | } |
@@ -139,9 +118,9 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler = | |||
139 | .refcnt = ATOMIC_INIT(0), | 118 | .refcnt = ATOMIC_INIT(0), |
140 | .module = THIS_MODULE, | 119 | .module = THIS_MODULE, |
141 | .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), | 120 | .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), |
142 | .init_service = ip_vs_sed_init_svc, | 121 | #ifdef CONFIG_IP_VS_IPV6 |
143 | .done_service = ip_vs_sed_done_svc, | 122 | .supports_ipv6 = 1, |
144 | .update_service = ip_vs_sed_update_svc, | 123 | #endif |
145 | .schedule = ip_vs_sed_schedule, | 124 | .schedule = ip_vs_sed_schedule, |
146 | }; | 125 | }; |
147 | 126 | ||
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c index 7b979e228056..1d96de27fefd 100644 --- a/net/ipv4/ipvs/ip_vs_sh.c +++ b/net/ipv4/ipvs/ip_vs_sh.c | |||
@@ -215,7 +215,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
215 | IP_VS_DBG(6, "SH: source IP address %u.%u.%u.%u " | 215 | IP_VS_DBG(6, "SH: source IP address %u.%u.%u.%u " |
216 | "--> server %u.%u.%u.%u:%d\n", | 216 | "--> server %u.%u.%u.%u:%d\n", |
217 | NIPQUAD(iph->saddr), | 217 | NIPQUAD(iph->saddr), |
218 | NIPQUAD(dest->addr), | 218 | NIPQUAD(dest->addr.ip), |
219 | ntohs(dest->port)); | 219 | ntohs(dest->port)); |
220 | 220 | ||
221 | return dest; | 221 | return dest; |
@@ -231,6 +231,9 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler = | |||
231 | .refcnt = ATOMIC_INIT(0), | 231 | .refcnt = ATOMIC_INIT(0), |
232 | .module = THIS_MODULE, | 232 | .module = THIS_MODULE, |
233 | .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list), | 233 | .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list), |
234 | #ifdef CONFIG_IP_VS_IPV6 | ||
235 | .supports_ipv6 = 0, | ||
236 | #endif | ||
234 | .init_service = ip_vs_sh_init_svc, | 237 | .init_service = ip_vs_sh_init_svc, |
235 | .done_service = ip_vs_sh_done_svc, | 238 | .done_service = ip_vs_sh_done_svc, |
236 | .update_service = ip_vs_sh_update_svc, | 239 | .update_service = ip_vs_sh_update_svc, |
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index a652da2c3200..28237a5f62e2 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c | |||
@@ -256,9 +256,9 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp) | |||
256 | s->cport = cp->cport; | 256 | s->cport = cp->cport; |
257 | s->vport = cp->vport; | 257 | s->vport = cp->vport; |
258 | s->dport = cp->dport; | 258 | s->dport = cp->dport; |
259 | s->caddr = cp->caddr; | 259 | s->caddr = cp->caddr.ip; |
260 | s->vaddr = cp->vaddr; | 260 | s->vaddr = cp->vaddr.ip; |
261 | s->daddr = cp->daddr; | 261 | s->daddr = cp->daddr.ip; |
262 | s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); | 262 | s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); |
263 | s->state = htons(cp->state); | 263 | s->state = htons(cp->state); |
264 | if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { | 264 | if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { |
@@ -366,21 +366,28 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) | |||
366 | } | 366 | } |
367 | 367 | ||
368 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) | 368 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) |
369 | cp = ip_vs_conn_in_get(s->protocol, | 369 | cp = ip_vs_conn_in_get(AF_INET, s->protocol, |
370 | s->caddr, s->cport, | 370 | (union nf_inet_addr *)&s->caddr, |
371 | s->vaddr, s->vport); | 371 | s->cport, |
372 | (union nf_inet_addr *)&s->vaddr, | ||
373 | s->vport); | ||
372 | else | 374 | else |
373 | cp = ip_vs_ct_in_get(s->protocol, | 375 | cp = ip_vs_ct_in_get(AF_INET, s->protocol, |
374 | s->caddr, s->cport, | 376 | (union nf_inet_addr *)&s->caddr, |
375 | s->vaddr, s->vport); | 377 | s->cport, |
378 | (union nf_inet_addr *)&s->vaddr, | ||
379 | s->vport); | ||
376 | if (!cp) { | 380 | if (!cp) { |
377 | /* | 381 | /* |
378 | * Find the appropriate destination for the connection. | 382 | * Find the appropriate destination for the connection. |
379 | * If it is not found the connection will remain unbound | 383 | * If it is not found the connection will remain unbound |
380 | * but still handled. | 384 | * but still handled. |
381 | */ | 385 | */ |
382 | dest = ip_vs_find_dest(s->daddr, s->dport, | 386 | dest = ip_vs_find_dest(AF_INET, |
383 | s->vaddr, s->vport, | 387 | (union nf_inet_addr *)&s->daddr, |
388 | s->dport, | ||
389 | (union nf_inet_addr *)&s->vaddr, | ||
390 | s->vport, | ||
384 | s->protocol); | 391 | s->protocol); |
385 | /* Set the approprite ativity flag */ | 392 | /* Set the approprite ativity flag */ |
386 | if (s->protocol == IPPROTO_TCP) { | 393 | if (s->protocol == IPPROTO_TCP) { |
@@ -389,10 +396,13 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) | |||
389 | else | 396 | else |
390 | flags &= ~IP_VS_CONN_F_INACTIVE; | 397 | flags &= ~IP_VS_CONN_F_INACTIVE; |
391 | } | 398 | } |
392 | cp = ip_vs_conn_new(s->protocol, | 399 | cp = ip_vs_conn_new(AF_INET, s->protocol, |
393 | s->caddr, s->cport, | 400 | (union nf_inet_addr *)&s->caddr, |
394 | s->vaddr, s->vport, | 401 | s->cport, |
395 | s->daddr, s->dport, | 402 | (union nf_inet_addr *)&s->vaddr, |
403 | s->vport, | ||
404 | (union nf_inet_addr *)&s->daddr, | ||
405 | s->dport, | ||
396 | flags, dest); | 406 | flags, dest); |
397 | if (dest) | 407 | if (dest) |
398 | atomic_dec(&dest->refcnt); | 408 | atomic_dec(&dest->refcnt); |
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c index 9b0ef86bb1f7..8c596e712599 100644 --- a/net/ipv4/ipvs/ip_vs_wlc.c +++ b/net/ipv4/ipvs/ip_vs_wlc.c | |||
@@ -25,27 +25,6 @@ | |||
25 | #include <net/ip_vs.h> | 25 | #include <net/ip_vs.h> |
26 | 26 | ||
27 | 27 | ||
28 | static int | ||
29 | ip_vs_wlc_init_svc(struct ip_vs_service *svc) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | |||
35 | static int | ||
36 | ip_vs_wlc_done_svc(struct ip_vs_service *svc) | ||
37 | { | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | |||
42 | static int | ||
43 | ip_vs_wlc_update_svc(struct ip_vs_service *svc) | ||
44 | { | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | |||
49 | static inline unsigned int | 28 | static inline unsigned int |
50 | ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) | 29 | ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) |
51 | { | 30 | { |
@@ -110,12 +89,12 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
110 | } | 89 | } |
111 | } | 90 | } |
112 | 91 | ||
113 | IP_VS_DBG(6, "WLC: server %u.%u.%u.%u:%u " | 92 | IP_VS_DBG_BUF(6, "WLC: server %s:%u " |
114 | "activeconns %d refcnt %d weight %d overhead %d\n", | 93 | "activeconns %d refcnt %d weight %d overhead %d\n", |
115 | NIPQUAD(least->addr), ntohs(least->port), | 94 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), |
116 | atomic_read(&least->activeconns), | 95 | atomic_read(&least->activeconns), |
117 | atomic_read(&least->refcnt), | 96 | atomic_read(&least->refcnt), |
118 | atomic_read(&least->weight), loh); | 97 | atomic_read(&least->weight), loh); |
119 | 98 | ||
120 | return least; | 99 | return least; |
121 | } | 100 | } |
@@ -127,9 +106,9 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler = | |||
127 | .refcnt = ATOMIC_INIT(0), | 106 | .refcnt = ATOMIC_INIT(0), |
128 | .module = THIS_MODULE, | 107 | .module = THIS_MODULE, |
129 | .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list), | 108 | .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list), |
130 | .init_service = ip_vs_wlc_init_svc, | 109 | #ifdef CONFIG_IP_VS_IPV6 |
131 | .done_service = ip_vs_wlc_done_svc, | 110 | .supports_ipv6 = 1, |
132 | .update_service = ip_vs_wlc_update_svc, | 111 | #endif |
133 | .schedule = ip_vs_wlc_schedule, | 112 | .schedule = ip_vs_wlc_schedule, |
134 | }; | 113 | }; |
135 | 114 | ||
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c index 0d86a79b87b5..7ea92fed50bf 100644 --- a/net/ipv4/ipvs/ip_vs_wrr.c +++ b/net/ipv4/ipvs/ip_vs_wrr.c | |||
@@ -195,12 +195,12 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
195 | } | 195 | } |
196 | } | 196 | } |
197 | 197 | ||
198 | IP_VS_DBG(6, "WRR: server %u.%u.%u.%u:%u " | 198 | IP_VS_DBG_BUF(6, "WRR: server %s:%u " |
199 | "activeconns %d refcnt %d weight %d\n", | 199 | "activeconns %d refcnt %d weight %d\n", |
200 | NIPQUAD(dest->addr), ntohs(dest->port), | 200 | IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), |
201 | atomic_read(&dest->activeconns), | 201 | atomic_read(&dest->activeconns), |
202 | atomic_read(&dest->refcnt), | 202 | atomic_read(&dest->refcnt), |
203 | atomic_read(&dest->weight)); | 203 | atomic_read(&dest->weight)); |
204 | 204 | ||
205 | out: | 205 | out: |
206 | write_unlock(&svc->sched_lock); | 206 | write_unlock(&svc->sched_lock); |
@@ -213,6 +213,9 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = { | |||
213 | .refcnt = ATOMIC_INIT(0), | 213 | .refcnt = ATOMIC_INIT(0), |
214 | .module = THIS_MODULE, | 214 | .module = THIS_MODULE, |
215 | .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list), | 215 | .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list), |
216 | #ifdef CONFIG_IP_VS_IPV6 | ||
217 | .supports_ipv6 = 1, | ||
218 | #endif | ||
216 | .init_service = ip_vs_wrr_init_svc, | 219 | .init_service = ip_vs_wrr_init_svc, |
217 | .done_service = ip_vs_wrr_done_svc, | 220 | .done_service = ip_vs_wrr_done_svc, |
218 | .update_service = ip_vs_wrr_update_svc, | 221 | .update_service = ip_vs_wrr_update_svc, |
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index 9892d4aca42e..02ddc2b3ce2e 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c | |||
@@ -20,6 +20,9 @@ | |||
20 | #include <net/udp.h> | 20 | #include <net/udp.h> |
21 | #include <net/icmp.h> /* for icmp_send */ | 21 | #include <net/icmp.h> /* for icmp_send */ |
22 | #include <net/route.h> /* for ip_route_output */ | 22 | #include <net/route.h> /* for ip_route_output */ |
23 | #include <net/ipv6.h> | ||
24 | #include <net/ip6_route.h> | ||
25 | #include <linux/icmpv6.h> | ||
23 | #include <linux/netfilter.h> | 26 | #include <linux/netfilter.h> |
24 | #include <linux/netfilter_ipv4.h> | 27 | #include <linux/netfilter_ipv4.h> |
25 | 28 | ||
@@ -47,7 +50,8 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos, u32 cookie) | |||
47 | 50 | ||
48 | if (!dst) | 51 | if (!dst) |
49 | return NULL; | 52 | return NULL; |
50 | if ((dst->obsolete || rtos != dest->dst_rtos) && | 53 | if ((dst->obsolete |
54 | || (dest->af == AF_INET && rtos != dest->dst_rtos)) && | ||
51 | dst->ops->check(dst, cookie) == NULL) { | 55 | dst->ops->check(dst, cookie) == NULL) { |
52 | dest->dst_cache = NULL; | 56 | dest->dst_cache = NULL; |
53 | dst_release(dst); | 57 | dst_release(dst); |
@@ -71,7 +75,7 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos) | |||
71 | .oif = 0, | 75 | .oif = 0, |
72 | .nl_u = { | 76 | .nl_u = { |
73 | .ip4_u = { | 77 | .ip4_u = { |
74 | .daddr = dest->addr, | 78 | .daddr = dest->addr.ip, |
75 | .saddr = 0, | 79 | .saddr = 0, |
76 | .tos = rtos, } }, | 80 | .tos = rtos, } }, |
77 | }; | 81 | }; |
@@ -80,12 +84,12 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos) | |||
80 | spin_unlock(&dest->dst_lock); | 84 | spin_unlock(&dest->dst_lock); |
81 | IP_VS_DBG_RL("ip_route_output error, " | 85 | IP_VS_DBG_RL("ip_route_output error, " |
82 | "dest: %u.%u.%u.%u\n", | 86 | "dest: %u.%u.%u.%u\n", |
83 | NIPQUAD(dest->addr)); | 87 | NIPQUAD(dest->addr.ip)); |
84 | return NULL; | 88 | return NULL; |
85 | } | 89 | } |
86 | __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst)); | 90 | __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst)); |
87 | IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n", | 91 | IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n", |
88 | NIPQUAD(dest->addr), | 92 | NIPQUAD(dest->addr.ip), |
89 | atomic_read(&rt->u.dst.__refcnt), rtos); | 93 | atomic_read(&rt->u.dst.__refcnt), rtos); |
90 | } | 94 | } |
91 | spin_unlock(&dest->dst_lock); | 95 | spin_unlock(&dest->dst_lock); |
@@ -94,14 +98,14 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos) | |||
94 | .oif = 0, | 98 | .oif = 0, |
95 | .nl_u = { | 99 | .nl_u = { |
96 | .ip4_u = { | 100 | .ip4_u = { |
97 | .daddr = cp->daddr, | 101 | .daddr = cp->daddr.ip, |
98 | .saddr = 0, | 102 | .saddr = 0, |
99 | .tos = rtos, } }, | 103 | .tos = rtos, } }, |
100 | }; | 104 | }; |
101 | 105 | ||
102 | if (ip_route_output_key(&init_net, &rt, &fl)) { | 106 | if (ip_route_output_key(&init_net, &rt, &fl)) { |
103 | IP_VS_DBG_RL("ip_route_output error, dest: " | 107 | IP_VS_DBG_RL("ip_route_output error, dest: " |
104 | "%u.%u.%u.%u\n", NIPQUAD(cp->daddr)); | 108 | "%u.%u.%u.%u\n", NIPQUAD(cp->daddr.ip)); |
105 | return NULL; | 109 | return NULL; |
106 | } | 110 | } |
107 | } | 111 | } |
@@ -109,6 +113,70 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos) | |||
109 | return rt; | 113 | return rt; |
110 | } | 114 | } |
111 | 115 | ||
116 | #ifdef CONFIG_IP_VS_IPV6 | ||
117 | static struct rt6_info * | ||
118 | __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp) | ||
119 | { | ||
120 | struct rt6_info *rt; /* Route to the other host */ | ||
121 | struct ip_vs_dest *dest = cp->dest; | ||
122 | |||
123 | if (dest) { | ||
124 | spin_lock(&dest->dst_lock); | ||
125 | rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0, 0); | ||
126 | if (!rt) { | ||
127 | struct flowi fl = { | ||
128 | .oif = 0, | ||
129 | .nl_u = { | ||
130 | .ip6_u = { | ||
131 | .daddr = dest->addr.in6, | ||
132 | .saddr = { | ||
133 | .s6_addr32 = | ||
134 | { 0, 0, 0, 0 }, | ||
135 | }, | ||
136 | }, | ||
137 | }, | ||
138 | }; | ||
139 | |||
140 | rt = (struct rt6_info *)ip6_route_output(&init_net, | ||
141 | NULL, &fl); | ||
142 | if (!rt) { | ||
143 | spin_unlock(&dest->dst_lock); | ||
144 | IP_VS_DBG_RL("ip6_route_output error, " | ||
145 | "dest: " NIP6_FMT "\n", | ||
146 | NIP6(dest->addr.in6)); | ||
147 | return NULL; | ||
148 | } | ||
149 | __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst)); | ||
150 | IP_VS_DBG(10, "new dst " NIP6_FMT ", refcnt=%d\n", | ||
151 | NIP6(dest->addr.in6), | ||
152 | atomic_read(&rt->u.dst.__refcnt)); | ||
153 | } | ||
154 | spin_unlock(&dest->dst_lock); | ||
155 | } else { | ||
156 | struct flowi fl = { | ||
157 | .oif = 0, | ||
158 | .nl_u = { | ||
159 | .ip6_u = { | ||
160 | .daddr = cp->daddr.in6, | ||
161 | .saddr = { | ||
162 | .s6_addr32 = { 0, 0, 0, 0 }, | ||
163 | }, | ||
164 | }, | ||
165 | }, | ||
166 | }; | ||
167 | |||
168 | rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); | ||
169 | if (!rt) { | ||
170 | IP_VS_DBG_RL("ip6_route_output error, dest: " | ||
171 | NIP6_FMT "\n", NIP6(cp->daddr.in6)); | ||
172 | return NULL; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | return rt; | ||
177 | } | ||
178 | #endif | ||
179 | |||
112 | 180 | ||
113 | /* | 181 | /* |
114 | * Release dest->dst_cache before a dest is removed | 182 | * Release dest->dst_cache before a dest is removed |
@@ -123,11 +191,11 @@ ip_vs_dst_reset(struct ip_vs_dest *dest) | |||
123 | dst_release(old_dst); | 191 | dst_release(old_dst); |
124 | } | 192 | } |
125 | 193 | ||
126 | #define IP_VS_XMIT(skb, rt) \ | 194 | #define IP_VS_XMIT(pf, skb, rt) \ |
127 | do { \ | 195 | do { \ |
128 | (skb)->ipvs_property = 1; \ | 196 | (skb)->ipvs_property = 1; \ |
129 | skb_forward_csum(skb); \ | 197 | skb_forward_csum(skb); \ |
130 | NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, (skb), NULL, \ | 198 | NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \ |
131 | (rt)->u.dst.dev, dst_output); \ | 199 | (rt)->u.dst.dev, dst_output); \ |
132 | } while (0) | 200 | } while (0) |
133 | 201 | ||
@@ -200,7 +268,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
200 | /* Another hack: avoid icmp_send in ip_fragment */ | 268 | /* Another hack: avoid icmp_send in ip_fragment */ |
201 | skb->local_df = 1; | 269 | skb->local_df = 1; |
202 | 270 | ||
203 | IP_VS_XMIT(skb, rt); | 271 | IP_VS_XMIT(PF_INET, skb, rt); |
204 | 272 | ||
205 | LeaveFunction(10); | 273 | LeaveFunction(10); |
206 | return NF_STOLEN; | 274 | return NF_STOLEN; |
@@ -213,6 +281,70 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
213 | return NF_STOLEN; | 281 | return NF_STOLEN; |
214 | } | 282 | } |
215 | 283 | ||
284 | #ifdef CONFIG_IP_VS_IPV6 | ||
285 | int | ||
286 | ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
287 | struct ip_vs_protocol *pp) | ||
288 | { | ||
289 | struct rt6_info *rt; /* Route to the other host */ | ||
290 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
291 | int mtu; | ||
292 | struct flowi fl = { | ||
293 | .oif = 0, | ||
294 | .nl_u = { | ||
295 | .ip6_u = { | ||
296 | .daddr = iph->daddr, | ||
297 | .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } }, | ||
298 | }; | ||
299 | |||
300 | EnterFunction(10); | ||
301 | |||
302 | rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); | ||
303 | if (!rt) { | ||
304 | IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): ip6_route_output error, " | ||
305 | "dest: " NIP6_FMT "\n", NIP6(iph->daddr)); | ||
306 | goto tx_error_icmp; | ||
307 | } | ||
308 | |||
309 | /* MTU checking */ | ||
310 | mtu = dst_mtu(&rt->u.dst); | ||
311 | if (skb->len > mtu) { | ||
312 | dst_release(&rt->u.dst); | ||
313 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | ||
314 | IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): frag needed\n"); | ||
315 | goto tx_error; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Call ip_send_check because we are not sure it is called | ||
320 | * after ip_defrag. Is copy-on-write needed? | ||
321 | */ | ||
322 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
323 | if (unlikely(skb == NULL)) { | ||
324 | dst_release(&rt->u.dst); | ||
325 | return NF_STOLEN; | ||
326 | } | ||
327 | |||
328 | /* drop old route */ | ||
329 | dst_release(skb->dst); | ||
330 | skb->dst = &rt->u.dst; | ||
331 | |||
332 | /* Another hack: avoid icmp_send in ip_fragment */ | ||
333 | skb->local_df = 1; | ||
334 | |||
335 | IP_VS_XMIT(PF_INET6, skb, rt); | ||
336 | |||
337 | LeaveFunction(10); | ||
338 | return NF_STOLEN; | ||
339 | |||
340 | tx_error_icmp: | ||
341 | dst_link_failure(skb); | ||
342 | tx_error: | ||
343 | kfree_skb(skb); | ||
344 | LeaveFunction(10); | ||
345 | return NF_STOLEN; | ||
346 | } | ||
347 | #endif | ||
216 | 348 | ||
217 | /* | 349 | /* |
218 | * NAT transmitter (only for outside-to-inside nat forwarding) | 350 | * NAT transmitter (only for outside-to-inside nat forwarding) |
@@ -264,7 +396,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
264 | /* mangle the packet */ | 396 | /* mangle the packet */ |
265 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) | 397 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) |
266 | goto tx_error; | 398 | goto tx_error; |
267 | ip_hdr(skb)->daddr = cp->daddr; | 399 | ip_hdr(skb)->daddr = cp->daddr.ip; |
268 | ip_send_check(ip_hdr(skb)); | 400 | ip_send_check(ip_hdr(skb)); |
269 | 401 | ||
270 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); | 402 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); |
@@ -276,7 +408,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
276 | /* Another hack: avoid icmp_send in ip_fragment */ | 408 | /* Another hack: avoid icmp_send in ip_fragment */ |
277 | skb->local_df = 1; | 409 | skb->local_df = 1; |
278 | 410 | ||
279 | IP_VS_XMIT(skb, rt); | 411 | IP_VS_XMIT(PF_INET, skb, rt); |
280 | 412 | ||
281 | LeaveFunction(10); | 413 | LeaveFunction(10); |
282 | return NF_STOLEN; | 414 | return NF_STOLEN; |
@@ -292,6 +424,83 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
292 | goto tx_error; | 424 | goto tx_error; |
293 | } | 425 | } |
294 | 426 | ||
427 | #ifdef CONFIG_IP_VS_IPV6 | ||
428 | int | ||
429 | ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
430 | struct ip_vs_protocol *pp) | ||
431 | { | ||
432 | struct rt6_info *rt; /* Route to the other host */ | ||
433 | int mtu; | ||
434 | |||
435 | EnterFunction(10); | ||
436 | |||
437 | /* check if it is a connection of no-client-port */ | ||
438 | if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) { | ||
439 | __be16 _pt, *p; | ||
440 | p = skb_header_pointer(skb, sizeof(struct ipv6hdr), | ||
441 | sizeof(_pt), &_pt); | ||
442 | if (p == NULL) | ||
443 | goto tx_error; | ||
444 | ip_vs_conn_fill_cport(cp, *p); | ||
445 | IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p)); | ||
446 | } | ||
447 | |||
448 | rt = __ip_vs_get_out_rt_v6(cp); | ||
449 | if (!rt) | ||
450 | goto tx_error_icmp; | ||
451 | |||
452 | /* MTU checking */ | ||
453 | mtu = dst_mtu(&rt->u.dst); | ||
454 | if (skb->len > mtu) { | ||
455 | dst_release(&rt->u.dst); | ||
456 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | ||
457 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | ||
458 | "ip_vs_nat_xmit_v6(): frag needed for"); | ||
459 | goto tx_error; | ||
460 | } | ||
461 | |||
462 | /* copy-on-write the packet before mangling it */ | ||
463 | if (!skb_make_writable(skb, sizeof(struct ipv6hdr))) | ||
464 | goto tx_error_put; | ||
465 | |||
466 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) | ||
467 | goto tx_error_put; | ||
468 | |||
469 | /* drop old route */ | ||
470 | dst_release(skb->dst); | ||
471 | skb->dst = &rt->u.dst; | ||
472 | |||
473 | /* mangle the packet */ | ||
474 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) | ||
475 | goto tx_error; | ||
476 | ipv6_hdr(skb)->daddr = cp->daddr.in6; | ||
477 | |||
478 | IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); | ||
479 | |||
480 | /* FIXME: when application helper enlarges the packet and the length | ||
481 | is larger than the MTU of outgoing device, there will be still | ||
482 | MTU problem. */ | ||
483 | |||
484 | /* Another hack: avoid icmp_send in ip_fragment */ | ||
485 | skb->local_df = 1; | ||
486 | |||
487 | IP_VS_XMIT(PF_INET6, skb, rt); | ||
488 | |||
489 | LeaveFunction(10); | ||
490 | return NF_STOLEN; | ||
491 | |||
492 | tx_error_icmp: | ||
493 | dst_link_failure(skb); | ||
494 | tx_error: | ||
495 | LeaveFunction(10); | ||
496 | kfree_skb(skb); | ||
497 | return NF_STOLEN; | ||
498 | tx_error_put: | ||
499 | dst_release(&rt->u.dst); | ||
500 | goto tx_error; | ||
501 | } | ||
502 | #endif | ||
503 | |||
295 | 504 | ||
296 | /* | 505 | /* |
297 | * IP Tunneling transmitter | 506 | * IP Tunneling transmitter |
@@ -423,6 +632,112 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
423 | return NF_STOLEN; | 632 | return NF_STOLEN; |
424 | } | 633 | } |
425 | 634 | ||
635 | #ifdef CONFIG_IP_VS_IPV6 | ||
636 | int | ||
637 | ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
638 | struct ip_vs_protocol *pp) | ||
639 | { | ||
640 | struct rt6_info *rt; /* Route to the other host */ | ||
641 | struct net_device *tdev; /* Device to other host */ | ||
642 | struct ipv6hdr *old_iph = ipv6_hdr(skb); | ||
643 | sk_buff_data_t old_transport_header = skb->transport_header; | ||
644 | struct ipv6hdr *iph; /* Our new IP header */ | ||
645 | unsigned int max_headroom; /* The extra header space needed */ | ||
646 | int mtu; | ||
647 | |||
648 | EnterFunction(10); | ||
649 | |||
650 | if (skb->protocol != htons(ETH_P_IPV6)) { | ||
651 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): protocol error, " | ||
652 | "ETH_P_IPV6: %d, skb protocol: %d\n", | ||
653 | htons(ETH_P_IPV6), skb->protocol); | ||
654 | goto tx_error; | ||
655 | } | ||
656 | |||
657 | rt = __ip_vs_get_out_rt_v6(cp); | ||
658 | if (!rt) | ||
659 | goto tx_error_icmp; | ||
660 | |||
661 | tdev = rt->u.dst.dev; | ||
662 | |||
663 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr); | ||
664 | /* TODO IPv6: do we need this check in IPv6? */ | ||
665 | if (mtu < 1280) { | ||
666 | dst_release(&rt->u.dst); | ||
667 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n"); | ||
668 | goto tx_error; | ||
669 | } | ||
670 | if (skb->dst) | ||
671 | skb->dst->ops->update_pmtu(skb->dst, mtu); | ||
672 | |||
673 | if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { | ||
674 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | ||
675 | dst_release(&rt->u.dst); | ||
676 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): frag needed\n"); | ||
677 | goto tx_error; | ||
678 | } | ||
679 | |||
680 | /* | ||
681 | * Okay, now see if we can stuff it in the buffer as-is. | ||
682 | */ | ||
683 | max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr); | ||
684 | |||
685 | if (skb_headroom(skb) < max_headroom | ||
686 | || skb_cloned(skb) || skb_shared(skb)) { | ||
687 | struct sk_buff *new_skb = | ||
688 | skb_realloc_headroom(skb, max_headroom); | ||
689 | if (!new_skb) { | ||
690 | dst_release(&rt->u.dst); | ||
691 | kfree_skb(skb); | ||
692 | IP_VS_ERR_RL("ip_vs_tunnel_xmit_v6(): no memory\n"); | ||
693 | return NF_STOLEN; | ||
694 | } | ||
695 | kfree_skb(skb); | ||
696 | skb = new_skb; | ||
697 | old_iph = ipv6_hdr(skb); | ||
698 | } | ||
699 | |||
700 | skb->transport_header = old_transport_header; | ||
701 | |||
702 | skb_push(skb, sizeof(struct ipv6hdr)); | ||
703 | skb_reset_network_header(skb); | ||
704 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
705 | |||
706 | /* drop old route */ | ||
707 | dst_release(skb->dst); | ||
708 | skb->dst = &rt->u.dst; | ||
709 | |||
710 | /* | ||
711 | * Push down and install the IPIP header. | ||
712 | */ | ||
713 | iph = ipv6_hdr(skb); | ||
714 | iph->version = 6; | ||
715 | iph->nexthdr = IPPROTO_IPV6; | ||
716 | iph->payload_len = old_iph->payload_len + sizeof(old_iph); | ||
717 | iph->priority = old_iph->priority; | ||
718 | memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); | ||
719 | iph->daddr = rt->rt6i_dst.addr; | ||
720 | iph->saddr = cp->vaddr.in6; /* rt->rt6i_src.addr; */ | ||
721 | iph->hop_limit = old_iph->hop_limit; | ||
722 | |||
723 | /* Another hack: avoid icmp_send in ip_fragment */ | ||
724 | skb->local_df = 1; | ||
725 | |||
726 | ip6_local_out(skb); | ||
727 | |||
728 | LeaveFunction(10); | ||
729 | |||
730 | return NF_STOLEN; | ||
731 | |||
732 | tx_error_icmp: | ||
733 | dst_link_failure(skb); | ||
734 | tx_error: | ||
735 | kfree_skb(skb); | ||
736 | LeaveFunction(10); | ||
737 | return NF_STOLEN; | ||
738 | } | ||
739 | #endif | ||
740 | |||
426 | 741 | ||
427 | /* | 742 | /* |
428 | * Direct Routing transmitter | 743 | * Direct Routing transmitter |
@@ -467,7 +782,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
467 | /* Another hack: avoid icmp_send in ip_fragment */ | 782 | /* Another hack: avoid icmp_send in ip_fragment */ |
468 | skb->local_df = 1; | 783 | skb->local_df = 1; |
469 | 784 | ||
470 | IP_VS_XMIT(skb, rt); | 785 | IP_VS_XMIT(PF_INET, skb, rt); |
471 | 786 | ||
472 | LeaveFunction(10); | 787 | LeaveFunction(10); |
473 | return NF_STOLEN; | 788 | return NF_STOLEN; |
@@ -480,6 +795,60 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
480 | return NF_STOLEN; | 795 | return NF_STOLEN; |
481 | } | 796 | } |
482 | 797 | ||
798 | #ifdef CONFIG_IP_VS_IPV6 | ||
799 | int | ||
800 | ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
801 | struct ip_vs_protocol *pp) | ||
802 | { | ||
803 | struct rt6_info *rt; /* Route to the other host */ | ||
804 | int mtu; | ||
805 | |||
806 | EnterFunction(10); | ||
807 | |||
808 | rt = __ip_vs_get_out_rt_v6(cp); | ||
809 | if (!rt) | ||
810 | goto tx_error_icmp; | ||
811 | |||
812 | /* MTU checking */ | ||
813 | mtu = dst_mtu(&rt->u.dst); | ||
814 | if (skb->len > mtu) { | ||
815 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | ||
816 | dst_release(&rt->u.dst); | ||
817 | IP_VS_DBG_RL("ip_vs_dr_xmit_v6(): frag needed\n"); | ||
818 | goto tx_error; | ||
819 | } | ||
820 | |||
821 | /* | ||
822 | * Call ip_send_check because we are not sure it is called | ||
823 | * after ip_defrag. Is copy-on-write needed? | ||
824 | */ | ||
825 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
826 | if (unlikely(skb == NULL)) { | ||
827 | dst_release(&rt->u.dst); | ||
828 | return NF_STOLEN; | ||
829 | } | ||
830 | |||
831 | /* drop old route */ | ||
832 | dst_release(skb->dst); | ||
833 | skb->dst = &rt->u.dst; | ||
834 | |||
835 | /* Another hack: avoid icmp_send in ip_fragment */ | ||
836 | skb->local_df = 1; | ||
837 | |||
838 | IP_VS_XMIT(PF_INET6, skb, rt); | ||
839 | |||
840 | LeaveFunction(10); | ||
841 | return NF_STOLEN; | ||
842 | |||
843 | tx_error_icmp: | ||
844 | dst_link_failure(skb); | ||
845 | tx_error: | ||
846 | kfree_skb(skb); | ||
847 | LeaveFunction(10); | ||
848 | return NF_STOLEN; | ||
849 | } | ||
850 | #endif | ||
851 | |||
483 | 852 | ||
484 | /* | 853 | /* |
485 | * ICMP packet transmitter | 854 | * ICMP packet transmitter |
@@ -540,7 +909,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
540 | /* Another hack: avoid icmp_send in ip_fragment */ | 909 | /* Another hack: avoid icmp_send in ip_fragment */ |
541 | skb->local_df = 1; | 910 | skb->local_df = 1; |
542 | 911 | ||
543 | IP_VS_XMIT(skb, rt); | 912 | IP_VS_XMIT(PF_INET, skb, rt); |
544 | 913 | ||
545 | rc = NF_STOLEN; | 914 | rc = NF_STOLEN; |
546 | goto out; | 915 | goto out; |
@@ -557,3 +926,79 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
557 | ip_rt_put(rt); | 926 | ip_rt_put(rt); |
558 | goto tx_error; | 927 | goto tx_error; |
559 | } | 928 | } |
929 | |||
930 | #ifdef CONFIG_IP_VS_IPV6 | ||
931 | int | ||
932 | ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
933 | struct ip_vs_protocol *pp, int offset) | ||
934 | { | ||
935 | struct rt6_info *rt; /* Route to the other host */ | ||
936 | int mtu; | ||
937 | int rc; | ||
938 | |||
939 | EnterFunction(10); | ||
940 | |||
941 | /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be | ||
942 | forwarded directly here, because there is no need to | ||
943 | translate address/port back */ | ||
944 | if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { | ||
945 | if (cp->packet_xmit) | ||
946 | rc = cp->packet_xmit(skb, cp, pp); | ||
947 | else | ||
948 | rc = NF_ACCEPT; | ||
949 | /* do not touch skb anymore */ | ||
950 | atomic_inc(&cp->in_pkts); | ||
951 | goto out; | ||
952 | } | ||
953 | |||
954 | /* | ||
955 | * mangle and send the packet here (only for VS/NAT) | ||
956 | */ | ||
957 | |||
958 | rt = __ip_vs_get_out_rt_v6(cp); | ||
959 | if (!rt) | ||
960 | goto tx_error_icmp; | ||
961 | |||
962 | /* MTU checking */ | ||
963 | mtu = dst_mtu(&rt->u.dst); | ||
964 | if (skb->len > mtu) { | ||
965 | dst_release(&rt->u.dst); | ||
966 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | ||
967 | IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n"); | ||
968 | goto tx_error; | ||
969 | } | ||
970 | |||
971 | /* copy-on-write the packet before mangling it */ | ||
972 | if (!skb_make_writable(skb, offset)) | ||
973 | goto tx_error_put; | ||
974 | |||
975 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) | ||
976 | goto tx_error_put; | ||
977 | |||
978 | /* drop the old route when skb is not shared */ | ||
979 | dst_release(skb->dst); | ||
980 | skb->dst = &rt->u.dst; | ||
981 | |||
982 | ip_vs_nat_icmp_v6(skb, pp, cp, 0); | ||
983 | |||
984 | /* Another hack: avoid icmp_send in ip_fragment */ | ||
985 | skb->local_df = 1; | ||
986 | |||
987 | IP_VS_XMIT(PF_INET6, skb, rt); | ||
988 | |||
989 | rc = NF_STOLEN; | ||
990 | goto out; | ||
991 | |||
992 | tx_error_icmp: | ||
993 | dst_link_failure(skb); | ||
994 | tx_error: | ||
995 | dev_kfree_skb(skb); | ||
996 | rc = NF_STOLEN; | ||
997 | out: | ||
998 | LeaveFunction(10); | ||
999 | return rc; | ||
1000 | tx_error_put: | ||
1001 | dst_release(&rt->u.dst); | ||
1002 | goto tx_error; | ||
1003 | } | ||
1004 | #endif | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6ee5354c9aa1..f62187bb6d08 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -282,6 +282,8 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq) | |||
282 | struct rtable *r = NULL; | 282 | struct rtable *r = NULL; |
283 | 283 | ||
284 | for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { | 284 | for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { |
285 | if (!rt_hash_table[st->bucket].chain) | ||
286 | continue; | ||
285 | rcu_read_lock_bh(); | 287 | rcu_read_lock_bh(); |
286 | r = rcu_dereference(rt_hash_table[st->bucket].chain); | 288 | r = rcu_dereference(rt_hash_table[st->bucket].chain); |
287 | while (r) { | 289 | while (r) { |
@@ -299,11 +301,14 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq, | |||
299 | struct rtable *r) | 301 | struct rtable *r) |
300 | { | 302 | { |
301 | struct rt_cache_iter_state *st = seq->private; | 303 | struct rt_cache_iter_state *st = seq->private; |
304 | |||
302 | r = r->u.dst.rt_next; | 305 | r = r->u.dst.rt_next; |
303 | while (!r) { | 306 | while (!r) { |
304 | rcu_read_unlock_bh(); | 307 | rcu_read_unlock_bh(); |
305 | if (--st->bucket < 0) | 308 | do { |
306 | break; | 309 | if (--st->bucket < 0) |
310 | return NULL; | ||
311 | } while (!rt_hash_table[st->bucket].chain); | ||
307 | rcu_read_lock_bh(); | 312 | rcu_read_lock_bh(); |
308 | r = rt_hash_table[st->bucket].chain; | 313 | r = rt_hash_table[st->bucket].chain; |
309 | } | 314 | } |
@@ -2840,7 +2845,9 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2840 | if (s_h < 0) | 2845 | if (s_h < 0) |
2841 | s_h = 0; | 2846 | s_h = 0; |
2842 | s_idx = idx = cb->args[1]; | 2847 | s_idx = idx = cb->args[1]; |
2843 | for (h = s_h; h <= rt_hash_mask; h++) { | 2848 | for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) { |
2849 | if (!rt_hash_table[h].chain) | ||
2850 | continue; | ||
2844 | rcu_read_lock_bh(); | 2851 | rcu_read_lock_bh(); |
2845 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; | 2852 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; |
2846 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { | 2853 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { |
@@ -2859,7 +2866,6 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2859 | dst_release(xchg(&skb->dst, NULL)); | 2866 | dst_release(xchg(&skb->dst, NULL)); |
2860 | } | 2867 | } |
2861 | rcu_read_unlock_bh(); | 2868 | rcu_read_unlock_bh(); |
2862 | s_idx = 0; | ||
2863 | } | 2869 | } |
2864 | 2870 | ||
2865 | done: | 2871 | done: |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 67ccce2a96bd..3b76bce769dd 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -979,6 +979,39 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
979 | } | 979 | } |
980 | } | 980 | } |
981 | 981 | ||
982 | /* This must be called before lost_out is incremented */ | ||
983 | static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) | ||
984 | { | ||
985 | if ((tp->retransmit_skb_hint == NULL) || | ||
986 | before(TCP_SKB_CB(skb)->seq, | ||
987 | TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) | ||
988 | tp->retransmit_skb_hint = skb; | ||
989 | |||
990 | if (!tp->lost_out || | ||
991 | after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) | ||
992 | tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; | ||
993 | } | ||
994 | |||
995 | static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) | ||
996 | { | ||
997 | if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { | ||
998 | tcp_verify_retransmit_hint(tp, skb); | ||
999 | |||
1000 | tp->lost_out += tcp_skb_pcount(skb); | ||
1001 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | ||
1002 | } | ||
1003 | } | ||
1004 | |||
1005 | void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) | ||
1006 | { | ||
1007 | tcp_verify_retransmit_hint(tp, skb); | ||
1008 | |||
1009 | if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { | ||
1010 | tp->lost_out += tcp_skb_pcount(skb); | ||
1011 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | ||
1012 | } | ||
1013 | } | ||
1014 | |||
982 | /* This procedure tags the retransmission queue when SACKs arrive. | 1015 | /* This procedure tags the retransmission queue when SACKs arrive. |
983 | * | 1016 | * |
984 | * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). | 1017 | * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). |
@@ -1155,13 +1188,7 @@ static void tcp_mark_lost_retrans(struct sock *sk) | |||
1155 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | 1188 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |
1156 | tp->retrans_out -= tcp_skb_pcount(skb); | 1189 | tp->retrans_out -= tcp_skb_pcount(skb); |
1157 | 1190 | ||
1158 | /* clear lost hint */ | 1191 | tcp_skb_mark_lost_uncond_verify(tp, skb); |
1159 | tp->retransmit_skb_hint = NULL; | ||
1160 | |||
1161 | if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { | ||
1162 | tp->lost_out += tcp_skb_pcount(skb); | ||
1163 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | ||
1164 | } | ||
1165 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); | 1192 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); |
1166 | } else { | 1193 | } else { |
1167 | if (before(ack_seq, new_low_seq)) | 1194 | if (before(ack_seq, new_low_seq)) |
@@ -1271,9 +1298,6 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, | |||
1271 | ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); | 1298 | ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); |
1272 | tp->lost_out -= tcp_skb_pcount(skb); | 1299 | tp->lost_out -= tcp_skb_pcount(skb); |
1273 | tp->retrans_out -= tcp_skb_pcount(skb); | 1300 | tp->retrans_out -= tcp_skb_pcount(skb); |
1274 | |||
1275 | /* clear lost hint */ | ||
1276 | tp->retransmit_skb_hint = NULL; | ||
1277 | } | 1301 | } |
1278 | } else { | 1302 | } else { |
1279 | if (!(sacked & TCPCB_RETRANS)) { | 1303 | if (!(sacked & TCPCB_RETRANS)) { |
@@ -1292,9 +1316,6 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, | |||
1292 | if (sacked & TCPCB_LOST) { | 1316 | if (sacked & TCPCB_LOST) { |
1293 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; | 1317 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; |
1294 | tp->lost_out -= tcp_skb_pcount(skb); | 1318 | tp->lost_out -= tcp_skb_pcount(skb); |
1295 | |||
1296 | /* clear lost hint */ | ||
1297 | tp->retransmit_skb_hint = NULL; | ||
1298 | } | 1319 | } |
1299 | } | 1320 | } |
1300 | 1321 | ||
@@ -1324,7 +1345,6 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, | |||
1324 | if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) { | 1345 | if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) { |
1325 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | 1346 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |
1326 | tp->retrans_out -= tcp_skb_pcount(skb); | 1347 | tp->retrans_out -= tcp_skb_pcount(skb); |
1327 | tp->retransmit_skb_hint = NULL; | ||
1328 | } | 1348 | } |
1329 | 1349 | ||
1330 | return flag; | 1350 | return flag; |
@@ -1726,6 +1746,8 @@ int tcp_use_frto(struct sock *sk) | |||
1726 | return 0; | 1746 | return 0; |
1727 | 1747 | ||
1728 | skb = tcp_write_queue_head(sk); | 1748 | skb = tcp_write_queue_head(sk); |
1749 | if (tcp_skb_is_last(sk, skb)) | ||
1750 | return 1; | ||
1729 | skb = tcp_write_queue_next(sk, skb); /* Skips head */ | 1751 | skb = tcp_write_queue_next(sk, skb); /* Skips head */ |
1730 | tcp_for_write_queue_from(skb, sk) { | 1752 | tcp_for_write_queue_from(skb, sk) { |
1731 | if (skb == tcp_send_head(sk)) | 1753 | if (skb == tcp_send_head(sk)) |
@@ -1867,6 +1889,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) | |||
1867 | if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { | 1889 | if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { |
1868 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1890 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
1869 | tp->lost_out += tcp_skb_pcount(skb); | 1891 | tp->lost_out += tcp_skb_pcount(skb); |
1892 | tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; | ||
1870 | } | 1893 | } |
1871 | } | 1894 | } |
1872 | tcp_verify_left_out(tp); | 1895 | tcp_verify_left_out(tp); |
@@ -1883,7 +1906,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) | |||
1883 | tp->high_seq = tp->snd_nxt; | 1906 | tp->high_seq = tp->snd_nxt; |
1884 | TCP_ECN_queue_cwr(tp); | 1907 | TCP_ECN_queue_cwr(tp); |
1885 | 1908 | ||
1886 | tcp_clear_retrans_hints_partial(tp); | 1909 | tcp_clear_all_retrans_hints(tp); |
1887 | } | 1910 | } |
1888 | 1911 | ||
1889 | static void tcp_clear_retrans_partial(struct tcp_sock *tp) | 1912 | static void tcp_clear_retrans_partial(struct tcp_sock *tp) |
@@ -1934,12 +1957,11 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1934 | /* Push undo marker, if it was plain RTO and nothing | 1957 | /* Push undo marker, if it was plain RTO and nothing |
1935 | * was retransmitted. */ | 1958 | * was retransmitted. */ |
1936 | tp->undo_marker = tp->snd_una; | 1959 | tp->undo_marker = tp->snd_una; |
1937 | tcp_clear_retrans_hints_partial(tp); | ||
1938 | } else { | 1960 | } else { |
1939 | tp->sacked_out = 0; | 1961 | tp->sacked_out = 0; |
1940 | tp->fackets_out = 0; | 1962 | tp->fackets_out = 0; |
1941 | tcp_clear_all_retrans_hints(tp); | ||
1942 | } | 1963 | } |
1964 | tcp_clear_all_retrans_hints(tp); | ||
1943 | 1965 | ||
1944 | tcp_for_write_queue(skb, sk) { | 1966 | tcp_for_write_queue(skb, sk) { |
1945 | if (skb == tcp_send_head(sk)) | 1967 | if (skb == tcp_send_head(sk)) |
@@ -1952,6 +1974,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1952 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; | 1974 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; |
1953 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1975 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
1954 | tp->lost_out += tcp_skb_pcount(skb); | 1976 | tp->lost_out += tcp_skb_pcount(skb); |
1977 | tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; | ||
1955 | } | 1978 | } |
1956 | } | 1979 | } |
1957 | tcp_verify_left_out(tp); | 1980 | tcp_verify_left_out(tp); |
@@ -2157,19 +2180,6 @@ static int tcp_time_to_recover(struct sock *sk) | |||
2157 | return 0; | 2180 | return 0; |
2158 | } | 2181 | } |
2159 | 2182 | ||
2160 | /* RFC: This is from the original, I doubt that this is necessary at all: | ||
2161 | * clear xmit_retrans hint if seq of this skb is beyond hint. How could we | ||
2162 | * retransmitted past LOST markings in the first place? I'm not fully sure | ||
2163 | * about undo and end of connection cases, which can cause R without L? | ||
2164 | */ | ||
2165 | static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) | ||
2166 | { | ||
2167 | if ((tp->retransmit_skb_hint != NULL) && | ||
2168 | before(TCP_SKB_CB(skb)->seq, | ||
2169 | TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) | ||
2170 | tp->retransmit_skb_hint = NULL; | ||
2171 | } | ||
2172 | |||
2173 | /* Mark head of queue up as lost. With RFC3517 SACK, the packets is | 2183 | /* Mark head of queue up as lost. With RFC3517 SACK, the packets is |
2174 | * is against sacked "cnt", otherwise it's against facked "cnt" | 2184 | * is against sacked "cnt", otherwise it's against facked "cnt" |
2175 | */ | 2185 | */ |
@@ -2217,11 +2227,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets) | |||
2217 | cnt = packets; | 2227 | cnt = packets; |
2218 | } | 2228 | } |
2219 | 2229 | ||
2220 | if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { | 2230 | tcp_skb_mark_lost(tp, skb); |
2221 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | ||
2222 | tp->lost_out += tcp_skb_pcount(skb); | ||
2223 | tcp_verify_retransmit_hint(tp, skb); | ||
2224 | } | ||
2225 | } | 2231 | } |
2226 | tcp_verify_left_out(tp); | 2232 | tcp_verify_left_out(tp); |
2227 | } | 2233 | } |
@@ -2263,11 +2269,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) | |||
2263 | if (!tcp_skb_timedout(sk, skb)) | 2269 | if (!tcp_skb_timedout(sk, skb)) |
2264 | break; | 2270 | break; |
2265 | 2271 | ||
2266 | if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { | 2272 | tcp_skb_mark_lost(tp, skb); |
2267 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | ||
2268 | tp->lost_out += tcp_skb_pcount(skb); | ||
2269 | tcp_verify_retransmit_hint(tp, skb); | ||
2270 | } | ||
2271 | } | 2273 | } |
2272 | 2274 | ||
2273 | tp->scoreboard_skb_hint = skb; | 2275 | tp->scoreboard_skb_hint = skb; |
@@ -2378,10 +2380,6 @@ static void tcp_undo_cwr(struct sock *sk, const int undo) | |||
2378 | } | 2380 | } |
2379 | tcp_moderate_cwnd(tp); | 2381 | tcp_moderate_cwnd(tp); |
2380 | tp->snd_cwnd_stamp = tcp_time_stamp; | 2382 | tp->snd_cwnd_stamp = tcp_time_stamp; |
2381 | |||
2382 | /* There is something screwy going on with the retrans hints after | ||
2383 | an undo */ | ||
2384 | tcp_clear_all_retrans_hints(tp); | ||
2385 | } | 2383 | } |
2386 | 2384 | ||
2387 | static inline int tcp_may_undo(struct tcp_sock *tp) | 2385 | static inline int tcp_may_undo(struct tcp_sock *tp) |
@@ -2848,6 +2846,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets) | |||
2848 | int flag = 0; | 2846 | int flag = 0; |
2849 | u32 pkts_acked = 0; | 2847 | u32 pkts_acked = 0; |
2850 | u32 reord = tp->packets_out; | 2848 | u32 reord = tp->packets_out; |
2849 | u32 prior_sacked = tp->sacked_out; | ||
2851 | s32 seq_rtt = -1; | 2850 | s32 seq_rtt = -1; |
2852 | s32 ca_seq_rtt = -1; | 2851 | s32 ca_seq_rtt = -1; |
2853 | ktime_t last_ackt = net_invalid_timestamp(); | 2852 | ktime_t last_ackt = net_invalid_timestamp(); |
@@ -2929,7 +2928,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets) | |||
2929 | 2928 | ||
2930 | tcp_unlink_write_queue(skb, sk); | 2929 | tcp_unlink_write_queue(skb, sk); |
2931 | sk_wmem_free_skb(sk, skb); | 2930 | sk_wmem_free_skb(sk, skb); |
2932 | tcp_clear_all_retrans_hints(tp); | 2931 | tp->scoreboard_skb_hint = NULL; |
2932 | if (skb == tp->retransmit_skb_hint) | ||
2933 | tp->retransmit_skb_hint = NULL; | ||
2934 | if (skb == tp->lost_skb_hint) | ||
2935 | tp->lost_skb_hint = NULL; | ||
2933 | } | 2936 | } |
2934 | 2937 | ||
2935 | if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) | 2938 | if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) |
@@ -2948,6 +2951,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets) | |||
2948 | /* Non-retransmitted hole got filled? That's reordering */ | 2951 | /* Non-retransmitted hole got filled? That's reordering */ |
2949 | if (reord < prior_fackets) | 2952 | if (reord < prior_fackets) |
2950 | tcp_update_reordering(sk, tp->fackets_out - reord, 0); | 2953 | tcp_update_reordering(sk, tp->fackets_out - reord, 0); |
2954 | |||
2955 | /* No need to care for underflows here because | ||
2956 | * the lost_skb_hint gets NULLed if we're past it | ||
2957 | * (or something non-trivial happened) | ||
2958 | */ | ||
2959 | if (tcp_is_fack(tp)) | ||
2960 | tp->lost_cnt_hint -= pkts_acked; | ||
2961 | else | ||
2962 | tp->lost_cnt_hint -= prior_sacked - tp->sacked_out; | ||
2951 | } | 2963 | } |
2952 | 2964 | ||
2953 | tp->fackets_out -= min(pkts_acked, tp->fackets_out); | 2965 | tp->fackets_out -= min(pkts_acked, tp->fackets_out); |
@@ -3442,6 +3454,22 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
3442 | } | 3454 | } |
3443 | } | 3455 | } |
3444 | 3456 | ||
3457 | static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th) | ||
3458 | { | ||
3459 | __be32 *ptr = (__be32 *)(th + 1); | ||
3460 | |||
3461 | if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | ||
3462 | | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { | ||
3463 | tp->rx_opt.saw_tstamp = 1; | ||
3464 | ++ptr; | ||
3465 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | ||
3466 | ++ptr; | ||
3467 | tp->rx_opt.rcv_tsecr = ntohl(*ptr); | ||
3468 | return 1; | ||
3469 | } | ||
3470 | return 0; | ||
3471 | } | ||
3472 | |||
3445 | /* Fast parse options. This hopes to only see timestamps. | 3473 | /* Fast parse options. This hopes to only see timestamps. |
3446 | * If it is wrong it falls back on tcp_parse_options(). | 3474 | * If it is wrong it falls back on tcp_parse_options(). |
3447 | */ | 3475 | */ |
@@ -3453,16 +3481,8 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, | |||
3453 | return 0; | 3481 | return 0; |
3454 | } else if (tp->rx_opt.tstamp_ok && | 3482 | } else if (tp->rx_opt.tstamp_ok && |
3455 | th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { | 3483 | th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { |
3456 | __be32 *ptr = (__be32 *)(th + 1); | 3484 | if (tcp_parse_aligned_timestamp(tp, th)) |
3457 | if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | ||
3458 | | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { | ||
3459 | tp->rx_opt.saw_tstamp = 1; | ||
3460 | ++ptr; | ||
3461 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | ||
3462 | ++ptr; | ||
3463 | tp->rx_opt.rcv_tsecr = ntohl(*ptr); | ||
3464 | return 1; | 3485 | return 1; |
3465 | } | ||
3466 | } | 3486 | } |
3467 | tcp_parse_options(skb, &tp->rx_opt, 1); | 3487 | tcp_parse_options(skb, &tp->rx_opt, 1); |
3468 | return 1; | 3488 | return 1; |
@@ -4138,7 +4158,7 @@ drop: | |||
4138 | skb1 = skb1->prev; | 4158 | skb1 = skb1->prev; |
4139 | } | 4159 | } |
4140 | } | 4160 | } |
4141 | __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); | 4161 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); |
4142 | 4162 | ||
4143 | /* And clean segments covered by new one as whole. */ | 4163 | /* And clean segments covered by new one as whole. */ |
4144 | while ((skb1 = skb->next) != | 4164 | while ((skb1 = skb->next) != |
@@ -4161,6 +4181,18 @@ add_sack: | |||
4161 | } | 4181 | } |
4162 | } | 4182 | } |
4163 | 4183 | ||
4184 | static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, | ||
4185 | struct sk_buff_head *list) | ||
4186 | { | ||
4187 | struct sk_buff *next = skb->next; | ||
4188 | |||
4189 | __skb_unlink(skb, list); | ||
4190 | __kfree_skb(skb); | ||
4191 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); | ||
4192 | |||
4193 | return next; | ||
4194 | } | ||
4195 | |||
4164 | /* Collapse contiguous sequence of skbs head..tail with | 4196 | /* Collapse contiguous sequence of skbs head..tail with |
4165 | * sequence numbers start..end. | 4197 | * sequence numbers start..end. |
4166 | * Segments with FIN/SYN are not collapsed (only because this | 4198 | * Segments with FIN/SYN are not collapsed (only because this |
@@ -4178,11 +4210,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4178 | for (skb = head; skb != tail;) { | 4210 | for (skb = head; skb != tail;) { |
4179 | /* No new bits? It is possible on ofo queue. */ | 4211 | /* No new bits? It is possible on ofo queue. */ |
4180 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4212 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4181 | struct sk_buff *next = skb->next; | 4213 | skb = tcp_collapse_one(sk, skb, list); |
4182 | __skb_unlink(skb, list); | ||
4183 | __kfree_skb(skb); | ||
4184 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); | ||
4185 | skb = next; | ||
4186 | continue; | 4214 | continue; |
4187 | } | 4215 | } |
4188 | 4216 | ||
@@ -4228,7 +4256,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4228 | memcpy(nskb->head, skb->head, header); | 4256 | memcpy(nskb->head, skb->head, header); |
4229 | memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); | 4257 | memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); |
4230 | TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; | 4258 | TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; |
4231 | __skb_insert(nskb, skb->prev, skb, list); | 4259 | __skb_queue_before(list, skb, nskb); |
4232 | skb_set_owner_r(nskb, sk); | 4260 | skb_set_owner_r(nskb, sk); |
4233 | 4261 | ||
4234 | /* Copy data, releasing collapsed skbs. */ | 4262 | /* Copy data, releasing collapsed skbs. */ |
@@ -4246,11 +4274,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4246 | start += size; | 4274 | start += size; |
4247 | } | 4275 | } |
4248 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4276 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4249 | struct sk_buff *next = skb->next; | 4277 | skb = tcp_collapse_one(sk, skb, list); |
4250 | __skb_unlink(skb, list); | ||
4251 | __kfree_skb(skb); | ||
4252 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); | ||
4253 | skb = next; | ||
4254 | if (skb == tail || | 4278 | if (skb == tail || |
4255 | tcp_hdr(skb)->syn || | 4279 | tcp_hdr(skb)->syn || |
4256 | tcp_hdr(skb)->fin) | 4280 | tcp_hdr(skb)->fin) |
@@ -4691,6 +4715,67 @@ out: | |||
4691 | } | 4715 | } |
4692 | #endif /* CONFIG_NET_DMA */ | 4716 | #endif /* CONFIG_NET_DMA */ |
4693 | 4717 | ||
4718 | /* Does PAWS and seqno based validation of an incoming segment, flags will | ||
4719 | * play significant role here. | ||
4720 | */ | ||
4721 | static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | ||
4722 | struct tcphdr *th, int syn_inerr) | ||
4723 | { | ||
4724 | struct tcp_sock *tp = tcp_sk(sk); | ||
4725 | |||
4726 | /* RFC1323: H1. Apply PAWS check first. */ | ||
4727 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | ||
4728 | tcp_paws_discard(sk, skb)) { | ||
4729 | if (!th->rst) { | ||
4730 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); | ||
4731 | tcp_send_dupack(sk, skb); | ||
4732 | goto discard; | ||
4733 | } | ||
4734 | /* Reset is accepted even if it did not pass PAWS. */ | ||
4735 | } | ||
4736 | |||
4737 | /* Step 1: check sequence number */ | ||
4738 | if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { | ||
4739 | /* RFC793, page 37: "In all states except SYN-SENT, all reset | ||
4740 | * (RST) segments are validated by checking their SEQ-fields." | ||
4741 | * And page 69: "If an incoming segment is not acceptable, | ||
4742 | * an acknowledgment should be sent in reply (unless the RST | ||
4743 | * bit is set, if so drop the segment and return)". | ||
4744 | */ | ||
4745 | if (!th->rst) | ||
4746 | tcp_send_dupack(sk, skb); | ||
4747 | goto discard; | ||
4748 | } | ||
4749 | |||
4750 | /* Step 2: check RST bit */ | ||
4751 | if (th->rst) { | ||
4752 | tcp_reset(sk); | ||
4753 | goto discard; | ||
4754 | } | ||
4755 | |||
4756 | /* ts_recent update must be made after we are sure that the packet | ||
4757 | * is in window. | ||
4758 | */ | ||
4759 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
4760 | |||
4761 | /* step 3: check security and precedence [ignored] */ | ||
4762 | |||
4763 | /* step 4: Check for a SYN in window. */ | ||
4764 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | ||
4765 | if (syn_inerr) | ||
4766 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); | ||
4767 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); | ||
4768 | tcp_reset(sk); | ||
4769 | return -1; | ||
4770 | } | ||
4771 | |||
4772 | return 1; | ||
4773 | |||
4774 | discard: | ||
4775 | __kfree_skb(skb); | ||
4776 | return 0; | ||
4777 | } | ||
4778 | |||
4694 | /* | 4779 | /* |
4695 | * TCP receive function for the ESTABLISHED state. | 4780 | * TCP receive function for the ESTABLISHED state. |
4696 | * | 4781 | * |
@@ -4718,6 +4803,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4718 | struct tcphdr *th, unsigned len) | 4803 | struct tcphdr *th, unsigned len) |
4719 | { | 4804 | { |
4720 | struct tcp_sock *tp = tcp_sk(sk); | 4805 | struct tcp_sock *tp = tcp_sk(sk); |
4806 | int res; | ||
4721 | 4807 | ||
4722 | /* | 4808 | /* |
4723 | * Header prediction. | 4809 | * Header prediction. |
@@ -4756,19 +4842,10 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4756 | 4842 | ||
4757 | /* Check timestamp */ | 4843 | /* Check timestamp */ |
4758 | if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { | 4844 | if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { |
4759 | __be32 *ptr = (__be32 *)(th + 1); | ||
4760 | |||
4761 | /* No? Slow path! */ | 4845 | /* No? Slow path! */ |
4762 | if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 4846 | if (!tcp_parse_aligned_timestamp(tp, th)) |
4763 | | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) | ||
4764 | goto slow_path; | 4847 | goto slow_path; |
4765 | 4848 | ||
4766 | tp->rx_opt.saw_tstamp = 1; | ||
4767 | ++ptr; | ||
4768 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | ||
4769 | ++ptr; | ||
4770 | tp->rx_opt.rcv_tsecr = ntohl(*ptr); | ||
4771 | |||
4772 | /* If PAWS failed, check it more carefully in slow path */ | 4849 | /* If PAWS failed, check it more carefully in slow path */ |
4773 | if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) | 4850 | if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) |
4774 | goto slow_path; | 4851 | goto slow_path; |
@@ -4899,51 +4976,12 @@ slow_path: | |||
4899 | goto csum_error; | 4976 | goto csum_error; |
4900 | 4977 | ||
4901 | /* | 4978 | /* |
4902 | * RFC1323: H1. Apply PAWS check first. | ||
4903 | */ | ||
4904 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | ||
4905 | tcp_paws_discard(sk, skb)) { | ||
4906 | if (!th->rst) { | ||
4907 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); | ||
4908 | tcp_send_dupack(sk, skb); | ||
4909 | goto discard; | ||
4910 | } | ||
4911 | /* Resets are accepted even if PAWS failed. | ||
4912 | |||
4913 | ts_recent update must be made after we are sure | ||
4914 | that the packet is in window. | ||
4915 | */ | ||
4916 | } | ||
4917 | |||
4918 | /* | ||
4919 | * Standard slow path. | 4979 | * Standard slow path. |
4920 | */ | 4980 | */ |
4921 | 4981 | ||
4922 | if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { | 4982 | res = tcp_validate_incoming(sk, skb, th, 1); |
4923 | /* RFC793, page 37: "In all states except SYN-SENT, all reset | 4983 | if (res <= 0) |
4924 | * (RST) segments are validated by checking their SEQ-fields." | 4984 | return -res; |
4925 | * And page 69: "If an incoming segment is not acceptable, | ||
4926 | * an acknowledgment should be sent in reply (unless the RST bit | ||
4927 | * is set, if so drop the segment and return)". | ||
4928 | */ | ||
4929 | if (!th->rst) | ||
4930 | tcp_send_dupack(sk, skb); | ||
4931 | goto discard; | ||
4932 | } | ||
4933 | |||
4934 | if (th->rst) { | ||
4935 | tcp_reset(sk); | ||
4936 | goto discard; | ||
4937 | } | ||
4938 | |||
4939 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
4940 | |||
4941 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | ||
4942 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); | ||
4943 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); | ||
4944 | tcp_reset(sk); | ||
4945 | return 1; | ||
4946 | } | ||
4947 | 4985 | ||
4948 | step5: | 4986 | step5: |
4949 | if (th->ack) | 4987 | if (th->ack) |
@@ -5225,6 +5263,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5225 | struct tcp_sock *tp = tcp_sk(sk); | 5263 | struct tcp_sock *tp = tcp_sk(sk); |
5226 | struct inet_connection_sock *icsk = inet_csk(sk); | 5264 | struct inet_connection_sock *icsk = inet_csk(sk); |
5227 | int queued = 0; | 5265 | int queued = 0; |
5266 | int res; | ||
5228 | 5267 | ||
5229 | tp->rx_opt.saw_tstamp = 0; | 5268 | tp->rx_opt.saw_tstamp = 0; |
5230 | 5269 | ||
@@ -5277,42 +5316,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5277 | return 0; | 5316 | return 0; |
5278 | } | 5317 | } |
5279 | 5318 | ||
5280 | if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && | 5319 | res = tcp_validate_incoming(sk, skb, th, 0); |
5281 | tcp_paws_discard(sk, skb)) { | 5320 | if (res <= 0) |
5282 | if (!th->rst) { | 5321 | return -res; |
5283 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); | ||
5284 | tcp_send_dupack(sk, skb); | ||
5285 | goto discard; | ||
5286 | } | ||
5287 | /* Reset is accepted even if it did not pass PAWS. */ | ||
5288 | } | ||
5289 | |||
5290 | /* step 1: check sequence number */ | ||
5291 | if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { | ||
5292 | if (!th->rst) | ||
5293 | tcp_send_dupack(sk, skb); | ||
5294 | goto discard; | ||
5295 | } | ||
5296 | |||
5297 | /* step 2: check RST bit */ | ||
5298 | if (th->rst) { | ||
5299 | tcp_reset(sk); | ||
5300 | goto discard; | ||
5301 | } | ||
5302 | |||
5303 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
5304 | |||
5305 | /* step 3: check security and precedence [ignored] */ | ||
5306 | |||
5307 | /* step 4: | ||
5308 | * | ||
5309 | * Check for a SYN in window. | ||
5310 | */ | ||
5311 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | ||
5312 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); | ||
5313 | tcp_reset(sk); | ||
5314 | return 1; | ||
5315 | } | ||
5316 | 5322 | ||
5317 | /* step 5: check the ACK field */ | 5323 | /* step 5: check the ACK field */ |
5318 | if (th->ack) { | 5324 | if (th->ack) { |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 011478e46c40..d13688e3558d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1364,6 +1364,10 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1364 | tcp_mtup_init(newsk); | 1364 | tcp_mtup_init(newsk); |
1365 | tcp_sync_mss(newsk, dst_mtu(dst)); | 1365 | tcp_sync_mss(newsk, dst_mtu(dst)); |
1366 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); | 1366 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); |
1367 | if (tcp_sk(sk)->rx_opt.user_mss && | ||
1368 | tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) | ||
1369 | newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; | ||
1370 | |||
1367 | tcp_initialize_rcv_mss(newsk); | 1371 | tcp_initialize_rcv_mss(newsk); |
1368 | 1372 | ||
1369 | #ifdef CONFIG_TCP_MD5SIG | 1373 | #ifdef CONFIG_TCP_MD5SIG |
@@ -1946,6 +1950,12 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos) | |||
1946 | return rc; | 1950 | return rc; |
1947 | } | 1951 | } |
1948 | 1952 | ||
1953 | static inline int empty_bucket(struct tcp_iter_state *st) | ||
1954 | { | ||
1955 | return hlist_empty(&tcp_hashinfo.ehash[st->bucket].chain) && | ||
1956 | hlist_empty(&tcp_hashinfo.ehash[st->bucket].twchain); | ||
1957 | } | ||
1958 | |||
1949 | static void *established_get_first(struct seq_file *seq) | 1959 | static void *established_get_first(struct seq_file *seq) |
1950 | { | 1960 | { |
1951 | struct tcp_iter_state* st = seq->private; | 1961 | struct tcp_iter_state* st = seq->private; |
@@ -1958,6 +1968,10 @@ static void *established_get_first(struct seq_file *seq) | |||
1958 | struct inet_timewait_sock *tw; | 1968 | struct inet_timewait_sock *tw; |
1959 | rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); | 1969 | rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); |
1960 | 1970 | ||
1971 | /* Lockless fast path for the common case of empty buckets */ | ||
1972 | if (empty_bucket(st)) | ||
1973 | continue; | ||
1974 | |||
1961 | read_lock_bh(lock); | 1975 | read_lock_bh(lock); |
1962 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { | 1976 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { |
1963 | if (sk->sk_family != st->family || | 1977 | if (sk->sk_family != st->family || |
@@ -2008,13 +2022,15 @@ get_tw: | |||
2008 | read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); | 2022 | read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); |
2009 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2023 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
2010 | 2024 | ||
2011 | if (++st->bucket < tcp_hashinfo.ehash_size) { | 2025 | /* Look for next non empty bucket */ |
2012 | read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); | 2026 | while (++st->bucket < tcp_hashinfo.ehash_size && |
2013 | sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); | 2027 | empty_bucket(st)) |
2014 | } else { | 2028 | ; |
2015 | cur = NULL; | 2029 | if (st->bucket >= tcp_hashinfo.ehash_size) |
2016 | goto out; | 2030 | return NULL; |
2017 | } | 2031 | |
2032 | read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); | ||
2033 | sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); | ||
2018 | } else | 2034 | } else |
2019 | sk = sk_next(sk); | 2035 | sk = sk_next(sk); |
2020 | 2036 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8165f5aa8c71..a8499ef3234a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1824,6 +1824,8 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, | |||
1824 | 1824 | ||
1825 | /* changed transmit queue under us so clear hints */ | 1825 | /* changed transmit queue under us so clear hints */ |
1826 | tcp_clear_retrans_hints_partial(tp); | 1826 | tcp_clear_retrans_hints_partial(tp); |
1827 | if (next_skb == tp->retransmit_skb_hint) | ||
1828 | tp->retransmit_skb_hint = skb; | ||
1827 | 1829 | ||
1828 | sk_wmem_free_skb(sk, next_skb); | 1830 | sk_wmem_free_skb(sk, next_skb); |
1829 | } | 1831 | } |
@@ -1838,7 +1840,7 @@ void tcp_simple_retransmit(struct sock *sk) | |||
1838 | struct tcp_sock *tp = tcp_sk(sk); | 1840 | struct tcp_sock *tp = tcp_sk(sk); |
1839 | struct sk_buff *skb; | 1841 | struct sk_buff *skb; |
1840 | unsigned int mss = tcp_current_mss(sk, 0); | 1842 | unsigned int mss = tcp_current_mss(sk, 0); |
1841 | int lost = 0; | 1843 | u32 prior_lost = tp->lost_out; |
1842 | 1844 | ||
1843 | tcp_for_write_queue(skb, sk) { | 1845 | tcp_for_write_queue(skb, sk) { |
1844 | if (skb == tcp_send_head(sk)) | 1846 | if (skb == tcp_send_head(sk)) |
@@ -1849,17 +1851,13 @@ void tcp_simple_retransmit(struct sock *sk) | |||
1849 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | 1851 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |
1850 | tp->retrans_out -= tcp_skb_pcount(skb); | 1852 | tp->retrans_out -= tcp_skb_pcount(skb); |
1851 | } | 1853 | } |
1852 | if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST)) { | 1854 | tcp_skb_mark_lost_uncond_verify(tp, skb); |
1853 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | ||
1854 | tp->lost_out += tcp_skb_pcount(skb); | ||
1855 | lost = 1; | ||
1856 | } | ||
1857 | } | 1855 | } |
1858 | } | 1856 | } |
1859 | 1857 | ||
1860 | tcp_clear_all_retrans_hints(tp); | 1858 | tcp_clear_retrans_hints_partial(tp); |
1861 | 1859 | ||
1862 | if (!lost) | 1860 | if (prior_lost == tp->lost_out) |
1863 | return; | 1861 | return; |
1864 | 1862 | ||
1865 | if (tcp_is_reno(tp)) | 1863 | if (tcp_is_reno(tp)) |
@@ -1934,8 +1932,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1934 | /* Collapse two adjacent packets if worthwhile and we can. */ | 1932 | /* Collapse two adjacent packets if worthwhile and we can. */ |
1935 | if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && | 1933 | if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && |
1936 | (skb->len < (cur_mss >> 1)) && | 1934 | (skb->len < (cur_mss >> 1)) && |
1937 | (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && | ||
1938 | (!tcp_skb_is_last(sk, skb)) && | 1935 | (!tcp_skb_is_last(sk, skb)) && |
1936 | (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && | ||
1939 | (skb_shinfo(skb)->nr_frags == 0 && | 1937 | (skb_shinfo(skb)->nr_frags == 0 && |
1940 | skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && | 1938 | skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && |
1941 | (tcp_skb_pcount(skb) == 1 && | 1939 | (tcp_skb_pcount(skb) == 1 && |
@@ -1996,86 +1994,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1996 | return err; | 1994 | return err; |
1997 | } | 1995 | } |
1998 | 1996 | ||
1999 | /* This gets called after a retransmit timeout, and the initially | 1997 | static int tcp_can_forward_retransmit(struct sock *sk) |
2000 | * retransmitted data is acknowledged. It tries to continue | ||
2001 | * resending the rest of the retransmit queue, until either | ||
2002 | * we've sent it all or the congestion window limit is reached. | ||
2003 | * If doing SACK, the first ACK which comes back for a timeout | ||
2004 | * based retransmit packet might feed us FACK information again. | ||
2005 | * If so, we use it to avoid unnecessarily retransmissions. | ||
2006 | */ | ||
2007 | void tcp_xmit_retransmit_queue(struct sock *sk) | ||
2008 | { | 1998 | { |
2009 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1999 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2010 | struct tcp_sock *tp = tcp_sk(sk); | 2000 | struct tcp_sock *tp = tcp_sk(sk); |
2011 | struct sk_buff *skb; | ||
2012 | int packet_cnt; | ||
2013 | |||
2014 | if (tp->retransmit_skb_hint) { | ||
2015 | skb = tp->retransmit_skb_hint; | ||
2016 | packet_cnt = tp->retransmit_cnt_hint; | ||
2017 | } else { | ||
2018 | skb = tcp_write_queue_head(sk); | ||
2019 | packet_cnt = 0; | ||
2020 | } | ||
2021 | |||
2022 | /* First pass: retransmit lost packets. */ | ||
2023 | if (tp->lost_out) { | ||
2024 | tcp_for_write_queue_from(skb, sk) { | ||
2025 | __u8 sacked = TCP_SKB_CB(skb)->sacked; | ||
2026 | |||
2027 | if (skb == tcp_send_head(sk)) | ||
2028 | break; | ||
2029 | /* we could do better than to assign each time */ | ||
2030 | tp->retransmit_skb_hint = skb; | ||
2031 | tp->retransmit_cnt_hint = packet_cnt; | ||
2032 | |||
2033 | /* Assume this retransmit will generate | ||
2034 | * only one packet for congestion window | ||
2035 | * calculation purposes. This works because | ||
2036 | * tcp_retransmit_skb() will chop up the | ||
2037 | * packet to be MSS sized and all the | ||
2038 | * packet counting works out. | ||
2039 | */ | ||
2040 | if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) | ||
2041 | return; | ||
2042 | |||
2043 | if (sacked & TCPCB_LOST) { | ||
2044 | if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { | ||
2045 | int mib_idx; | ||
2046 | |||
2047 | if (tcp_retransmit_skb(sk, skb)) { | ||
2048 | tp->retransmit_skb_hint = NULL; | ||
2049 | return; | ||
2050 | } | ||
2051 | if (icsk->icsk_ca_state != TCP_CA_Loss) | ||
2052 | mib_idx = LINUX_MIB_TCPFASTRETRANS; | ||
2053 | else | ||
2054 | mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; | ||
2055 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
2056 | |||
2057 | if (skb == tcp_write_queue_head(sk)) | ||
2058 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | ||
2059 | inet_csk(sk)->icsk_rto, | ||
2060 | TCP_RTO_MAX); | ||
2061 | } | ||
2062 | |||
2063 | packet_cnt += tcp_skb_pcount(skb); | ||
2064 | if (packet_cnt >= tp->lost_out) | ||
2065 | break; | ||
2066 | } | ||
2067 | } | ||
2068 | } | ||
2069 | |||
2070 | /* OK, demanded retransmission is finished. */ | ||
2071 | 2001 | ||
2072 | /* Forward retransmissions are possible only during Recovery. */ | 2002 | /* Forward retransmissions are possible only during Recovery. */ |
2073 | if (icsk->icsk_ca_state != TCP_CA_Recovery) | 2003 | if (icsk->icsk_ca_state != TCP_CA_Recovery) |
2074 | return; | 2004 | return 0; |
2075 | 2005 | ||
2076 | /* No forward retransmissions in Reno are possible. */ | 2006 | /* No forward retransmissions in Reno are possible. */ |
2077 | if (tcp_is_reno(tp)) | 2007 | if (tcp_is_reno(tp)) |
2078 | return; | 2008 | return 0; |
2079 | 2009 | ||
2080 | /* Yeah, we have to make difficult choice between forward transmission | 2010 | /* Yeah, we have to make difficult choice between forward transmission |
2081 | * and retransmission... Both ways have their merits... | 2011 | * and retransmission... Both ways have their merits... |
@@ -2086,43 +2016,104 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2086 | */ | 2016 | */ |
2087 | 2017 | ||
2088 | if (tcp_may_send_now(sk)) | 2018 | if (tcp_may_send_now(sk)) |
2089 | return; | 2019 | return 0; |
2090 | 2020 | ||
2091 | /* If nothing is SACKed, highest_sack in the loop won't be valid */ | 2021 | return 1; |
2092 | if (!tp->sacked_out) | 2022 | } |
2093 | return; | ||
2094 | 2023 | ||
2095 | if (tp->forward_skb_hint) | 2024 | /* This gets called after a retransmit timeout, and the initially |
2096 | skb = tp->forward_skb_hint; | 2025 | * retransmitted data is acknowledged. It tries to continue |
2097 | else | 2026 | * resending the rest of the retransmit queue, until either |
2027 | * we've sent it all or the congestion window limit is reached. | ||
2028 | * If doing SACK, the first ACK which comes back for a timeout | ||
2029 | * based retransmit packet might feed us FACK information again. | ||
2030 | * If so, we use it to avoid unnecessarily retransmissions. | ||
2031 | */ | ||
2032 | void tcp_xmit_retransmit_queue(struct sock *sk) | ||
2033 | { | ||
2034 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
2035 | struct tcp_sock *tp = tcp_sk(sk); | ||
2036 | struct sk_buff *skb; | ||
2037 | struct sk_buff *hole = NULL; | ||
2038 | u32 last_lost; | ||
2039 | int mib_idx; | ||
2040 | int fwd_rexmitting = 0; | ||
2041 | |||
2042 | if (!tp->lost_out) | ||
2043 | tp->retransmit_high = tp->snd_una; | ||
2044 | |||
2045 | if (tp->retransmit_skb_hint) { | ||
2046 | skb = tp->retransmit_skb_hint; | ||
2047 | last_lost = TCP_SKB_CB(skb)->end_seq; | ||
2048 | if (after(last_lost, tp->retransmit_high)) | ||
2049 | last_lost = tp->retransmit_high; | ||
2050 | } else { | ||
2098 | skb = tcp_write_queue_head(sk); | 2051 | skb = tcp_write_queue_head(sk); |
2052 | last_lost = tp->snd_una; | ||
2053 | } | ||
2099 | 2054 | ||
2055 | /* First pass: retransmit lost packets. */ | ||
2100 | tcp_for_write_queue_from(skb, sk) { | 2056 | tcp_for_write_queue_from(skb, sk) { |
2101 | if (skb == tcp_send_head(sk)) | 2057 | __u8 sacked = TCP_SKB_CB(skb)->sacked; |
2102 | break; | ||
2103 | tp->forward_skb_hint = skb; | ||
2104 | 2058 | ||
2105 | if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) | 2059 | if (skb == tcp_send_head(sk)) |
2106 | break; | 2060 | break; |
2061 | /* we could do better than to assign each time */ | ||
2062 | if (hole == NULL) | ||
2063 | tp->retransmit_skb_hint = skb; | ||
2107 | 2064 | ||
2065 | /* Assume this retransmit will generate | ||
2066 | * only one packet for congestion window | ||
2067 | * calculation purposes. This works because | ||
2068 | * tcp_retransmit_skb() will chop up the | ||
2069 | * packet to be MSS sized and all the | ||
2070 | * packet counting works out. | ||
2071 | */ | ||
2108 | if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) | 2072 | if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) |
2109 | break; | 2073 | return; |
2074 | |||
2075 | if (fwd_rexmitting) { | ||
2076 | begin_fwd: | ||
2077 | if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) | ||
2078 | break; | ||
2079 | mib_idx = LINUX_MIB_TCPFORWARDRETRANS; | ||
2080 | |||
2081 | } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { | ||
2082 | tp->retransmit_high = last_lost; | ||
2083 | if (!tcp_can_forward_retransmit(sk)) | ||
2084 | break; | ||
2085 | /* Backtrack if necessary to non-L'ed skb */ | ||
2086 | if (hole != NULL) { | ||
2087 | skb = hole; | ||
2088 | hole = NULL; | ||
2089 | } | ||
2090 | fwd_rexmitting = 1; | ||
2091 | goto begin_fwd; | ||
2110 | 2092 | ||
2111 | if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) | 2093 | } else if (!(sacked & TCPCB_LOST)) { |
2094 | if (hole == NULL && !(sacked & TCPCB_SACKED_RETRANS)) | ||
2095 | hole = skb; | ||
2112 | continue; | 2096 | continue; |
2113 | 2097 | ||
2114 | /* Ok, retransmit it. */ | 2098 | } else { |
2115 | if (tcp_retransmit_skb(sk, skb)) { | 2099 | last_lost = TCP_SKB_CB(skb)->end_seq; |
2116 | tp->forward_skb_hint = NULL; | 2100 | if (icsk->icsk_ca_state != TCP_CA_Loss) |
2117 | break; | 2101 | mib_idx = LINUX_MIB_TCPFASTRETRANS; |
2102 | else | ||
2103 | mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; | ||
2118 | } | 2104 | } |
2119 | 2105 | ||
2106 | if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) | ||
2107 | continue; | ||
2108 | |||
2109 | if (tcp_retransmit_skb(sk, skb)) | ||
2110 | return; | ||
2111 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
2112 | |||
2120 | if (skb == tcp_write_queue_head(sk)) | 2113 | if (skb == tcp_write_queue_head(sk)) |
2121 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 2114 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
2122 | inet_csk(sk)->icsk_rto, | 2115 | inet_csk(sk)->icsk_rto, |
2123 | TCP_RTO_MAX); | 2116 | TCP_RTO_MAX); |
2124 | |||
2125 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFORWARDRETRANS); | ||
2126 | } | 2117 | } |
2127 | } | 2118 | } |
2128 | 2119 | ||
@@ -2241,6 +2232,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2241 | struct sk_buff *skb; | 2232 | struct sk_buff *skb; |
2242 | struct tcp_md5sig_key *md5; | 2233 | struct tcp_md5sig_key *md5; |
2243 | __u8 *md5_hash_location; | 2234 | __u8 *md5_hash_location; |
2235 | int mss; | ||
2244 | 2236 | ||
2245 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); | 2237 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); |
2246 | if (skb == NULL) | 2238 | if (skb == NULL) |
@@ -2251,13 +2243,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2251 | 2243 | ||
2252 | skb->dst = dst_clone(dst); | 2244 | skb->dst = dst_clone(dst); |
2253 | 2245 | ||
2246 | mss = dst_metric(dst, RTAX_ADVMSS); | ||
2247 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) | ||
2248 | mss = tp->rx_opt.user_mss; | ||
2249 | |||
2254 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ | 2250 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ |
2255 | __u8 rcv_wscale; | 2251 | __u8 rcv_wscale; |
2256 | /* Set this up on the first call only */ | 2252 | /* Set this up on the first call only */ |
2257 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); | 2253 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); |
2258 | /* tcp_full_space because it is guaranteed to be the first packet */ | 2254 | /* tcp_full_space because it is guaranteed to be the first packet */ |
2259 | tcp_select_initial_window(tcp_full_space(sk), | 2255 | tcp_select_initial_window(tcp_full_space(sk), |
2260 | dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), | 2256 | mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), |
2261 | &req->rcv_wnd, | 2257 | &req->rcv_wnd, |
2262 | &req->window_clamp, | 2258 | &req->window_clamp, |
2263 | ireq->wscale_ok, | 2259 | ireq->wscale_ok, |
@@ -2267,8 +2263,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2267 | 2263 | ||
2268 | memset(&opts, 0, sizeof(opts)); | 2264 | memset(&opts, 0, sizeof(opts)); |
2269 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2265 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
2270 | tcp_header_size = tcp_synack_options(sk, req, | 2266 | tcp_header_size = tcp_synack_options(sk, req, mss, |
2271 | dst_metric(dst, RTAX_ADVMSS), | ||
2272 | skb, &opts, &md5) + | 2267 | skb, &opts, &md5) + |
2273 | sizeof(struct tcphdr); | 2268 | sizeof(struct tcphdr); |
2274 | 2269 | ||
@@ -2342,6 +2337,9 @@ static void tcp_connect_init(struct sock *sk) | |||
2342 | if (!tp->window_clamp) | 2337 | if (!tp->window_clamp) |
2343 | tp->window_clamp = dst_metric(dst, RTAX_WINDOW); | 2338 | tp->window_clamp = dst_metric(dst, RTAX_WINDOW); |
2344 | tp->advmss = dst_metric(dst, RTAX_ADVMSS); | 2339 | tp->advmss = dst_metric(dst, RTAX_ADVMSS); |
2340 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) | ||
2341 | tp->advmss = tp->rx_opt.user_mss; | ||
2342 | |||
2345 | tcp_initialize_rcv_mss(sk); | 2343 | tcp_initialize_rcv_mss(sk); |
2346 | 2344 | ||
2347 | tcp_select_initial_window(tcp_full_space(sk), | 2345 | tcp_select_initial_window(tcp_full_space(sk), |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 17c7b098cdb0..64ce3d33d9c6 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1050,10 +1050,10 @@ ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | switch (skb->protocol) { | 1052 | switch (skb->protocol) { |
1053 | case __constant_htons(ETH_P_IP): | 1053 | case htons(ETH_P_IP): |
1054 | ret = ip4ip6_tnl_xmit(skb, dev); | 1054 | ret = ip4ip6_tnl_xmit(skb, dev); |
1055 | break; | 1055 | break; |
1056 | case __constant_htons(ETH_P_IPV6): | 1056 | case htons(ETH_P_IPV6): |
1057 | ret = ip6ip6_tnl_xmit(skb, dev); | 1057 | ret = ip6ip6_tnl_xmit(skb, dev); |
1058 | break; | 1058 | break; |
1059 | default: | 1059 | default: |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 52d06dd4b817..9967ac7a01a8 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/ipv6.h> | 27 | #include <linux/ipv6.h> |
28 | #include <linux/icmpv6.h> | 28 | #include <linux/icmpv6.h> |
29 | #include <linux/random.h> | 29 | #include <linux/random.h> |
30 | #include <linux/jhash.h> | ||
31 | 30 | ||
32 | #include <net/sock.h> | 31 | #include <net/sock.h> |
33 | #include <net/snmp.h> | 32 | #include <net/snmp.h> |
@@ -103,39 +102,12 @@ struct ctl_table nf_ct_ipv6_sysctl_table[] = { | |||
103 | }; | 102 | }; |
104 | #endif | 103 | #endif |
105 | 104 | ||
106 | static unsigned int ip6qhashfn(__be32 id, const struct in6_addr *saddr, | ||
107 | const struct in6_addr *daddr) | ||
108 | { | ||
109 | u32 a, b, c; | ||
110 | |||
111 | a = (__force u32)saddr->s6_addr32[0]; | ||
112 | b = (__force u32)saddr->s6_addr32[1]; | ||
113 | c = (__force u32)saddr->s6_addr32[2]; | ||
114 | |||
115 | a += JHASH_GOLDEN_RATIO; | ||
116 | b += JHASH_GOLDEN_RATIO; | ||
117 | c += nf_frags.rnd; | ||
118 | __jhash_mix(a, b, c); | ||
119 | |||
120 | a += (__force u32)saddr->s6_addr32[3]; | ||
121 | b += (__force u32)daddr->s6_addr32[0]; | ||
122 | c += (__force u32)daddr->s6_addr32[1]; | ||
123 | __jhash_mix(a, b, c); | ||
124 | |||
125 | a += (__force u32)daddr->s6_addr32[2]; | ||
126 | b += (__force u32)daddr->s6_addr32[3]; | ||
127 | c += (__force u32)id; | ||
128 | __jhash_mix(a, b, c); | ||
129 | |||
130 | return c & (INETFRAGS_HASHSZ - 1); | ||
131 | } | ||
132 | |||
133 | static unsigned int nf_hashfn(struct inet_frag_queue *q) | 105 | static unsigned int nf_hashfn(struct inet_frag_queue *q) |
134 | { | 106 | { |
135 | const struct nf_ct_frag6_queue *nq; | 107 | const struct nf_ct_frag6_queue *nq; |
136 | 108 | ||
137 | nq = container_of(q, struct nf_ct_frag6_queue, q); | 109 | nq = container_of(q, struct nf_ct_frag6_queue, q); |
138 | return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr); | 110 | return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd); |
139 | } | 111 | } |
140 | 112 | ||
141 | static void nf_skb_free(struct sk_buff *skb) | 113 | static void nf_skb_free(struct sk_buff *skb) |
@@ -209,7 +181,7 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) | |||
209 | arg.dst = dst; | 181 | arg.dst = dst; |
210 | 182 | ||
211 | read_lock_bh(&nf_frags.lock); | 183 | read_lock_bh(&nf_frags.lock); |
212 | hash = ip6qhashfn(id, src, dst); | 184 | hash = inet6_hash_frag(id, src, dst, nf_frags.rnd); |
213 | 185 | ||
214 | q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash); | 186 | q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash); |
215 | local_bh_enable(); | 187 | local_bh_enable(); |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 89184b576e23..2eeadfa039cb 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -99,8 +99,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
99 | * callers should be careful not to use the hash value outside the ipfrag_lock | 99 | * callers should be careful not to use the hash value outside the ipfrag_lock |
100 | * as doing so could race with ipfrag_hash_rnd being recalculated. | 100 | * as doing so could race with ipfrag_hash_rnd being recalculated. |
101 | */ | 101 | */ |
102 | static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, | 102 | unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, |
103 | struct in6_addr *daddr) | 103 | const struct in6_addr *daddr, u32 rnd) |
104 | { | 104 | { |
105 | u32 a, b, c; | 105 | u32 a, b, c; |
106 | 106 | ||
@@ -110,7 +110,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, | |||
110 | 110 | ||
111 | a += JHASH_GOLDEN_RATIO; | 111 | a += JHASH_GOLDEN_RATIO; |
112 | b += JHASH_GOLDEN_RATIO; | 112 | b += JHASH_GOLDEN_RATIO; |
113 | c += ip6_frags.rnd; | 113 | c += rnd; |
114 | __jhash_mix(a, b, c); | 114 | __jhash_mix(a, b, c); |
115 | 115 | ||
116 | a += (__force u32)saddr->s6_addr32[3]; | 116 | a += (__force u32)saddr->s6_addr32[3]; |
@@ -125,13 +125,14 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, | |||
125 | 125 | ||
126 | return c & (INETFRAGS_HASHSZ - 1); | 126 | return c & (INETFRAGS_HASHSZ - 1); |
127 | } | 127 | } |
128 | EXPORT_SYMBOL_GPL(inet6_hash_frag); | ||
128 | 129 | ||
129 | static unsigned int ip6_hashfn(struct inet_frag_queue *q) | 130 | static unsigned int ip6_hashfn(struct inet_frag_queue *q) |
130 | { | 131 | { |
131 | struct frag_queue *fq; | 132 | struct frag_queue *fq; |
132 | 133 | ||
133 | fq = container_of(q, struct frag_queue, q); | 134 | fq = container_of(q, struct frag_queue, q); |
134 | return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr); | 135 | return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); |
135 | } | 136 | } |
136 | 137 | ||
137 | int ip6_frag_match(struct inet_frag_queue *q, void *a) | 138 | int ip6_frag_match(struct inet_frag_queue *q, void *a) |
@@ -247,7 +248,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, | |||
247 | arg.dst = dst; | 248 | arg.dst = dst; |
248 | 249 | ||
249 | read_lock(&ip6_frags.lock); | 250 | read_lock(&ip6_frags.lock); |
250 | hash = ip6qhashfn(id, src, dst); | 251 | hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); |
251 | 252 | ||
252 | q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); | 253 | q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); |
253 | if (q == NULL) | 254 | if (q == NULL) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 63442a1e741c..f4385a6569c2 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1003,6 +1003,25 @@ int icmp6_dst_gc(void) | |||
1003 | return more; | 1003 | return more; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg), | ||
1007 | void *arg) | ||
1008 | { | ||
1009 | struct dst_entry *dst, **pprev; | ||
1010 | |||
1011 | spin_lock_bh(&icmp6_dst_lock); | ||
1012 | pprev = &icmp6_dst_gc_list; | ||
1013 | while ((dst = *pprev) != NULL) { | ||
1014 | struct rt6_info *rt = (struct rt6_info *) dst; | ||
1015 | if (func(rt, arg)) { | ||
1016 | *pprev = dst->next; | ||
1017 | dst_free(dst); | ||
1018 | } else { | ||
1019 | pprev = &dst->next; | ||
1020 | } | ||
1021 | } | ||
1022 | spin_unlock_bh(&icmp6_dst_lock); | ||
1023 | } | ||
1024 | |||
1006 | static int ip6_dst_gc(struct dst_ops *ops) | 1025 | static int ip6_dst_gc(struct dst_ops *ops) |
1007 | { | 1026 | { |
1008 | unsigned long now = jiffies; | 1027 | unsigned long now = jiffies; |
@@ -1930,6 +1949,7 @@ void rt6_ifdown(struct net *net, struct net_device *dev) | |||
1930 | }; | 1949 | }; |
1931 | 1950 | ||
1932 | fib6_clean_all(net, fib6_ifdown, 0, &adn); | 1951 | fib6_clean_all(net, fib6_ifdown, 0, &adn); |
1952 | icmp6_clean_all(fib6_ifdown, &adn); | ||
1933 | } | 1953 | } |
1934 | 1954 | ||
1935 | struct rt6_mtu_change_arg | 1955 | struct rt6_mtu_change_arg |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 10e22fd48222..df16b68644e7 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1286,7 +1286,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1286 | struct request_sock *req, | 1286 | struct request_sock *req, |
1287 | struct dst_entry *dst) | 1287 | struct dst_entry *dst) |
1288 | { | 1288 | { |
1289 | struct inet6_request_sock *treq = inet6_rsk(req); | 1289 | struct inet6_request_sock *treq; |
1290 | struct ipv6_pinfo *newnp, *np = inet6_sk(sk); | 1290 | struct ipv6_pinfo *newnp, *np = inet6_sk(sk); |
1291 | struct tcp6_sock *newtcp6sk; | 1291 | struct tcp6_sock *newtcp6sk; |
1292 | struct inet_sock *newinet; | 1292 | struct inet_sock *newinet; |
@@ -1350,6 +1350,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1350 | return newsk; | 1350 | return newsk; |
1351 | } | 1351 | } |
1352 | 1352 | ||
1353 | treq = inet6_rsk(req); | ||
1353 | opt = np->opt; | 1354 | opt = np->opt; |
1354 | 1355 | ||
1355 | if (sk_acceptq_is_full(sk)) | 1356 | if (sk_acceptq_is_full(sk)) |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 80d693392b0f..8427518e4f20 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -179,19 +179,6 @@ config MAC80211_VERBOSE_MPL_DEBUG | |||
179 | 179 | ||
180 | Do not select this option. | 180 | Do not select this option. |
181 | 181 | ||
182 | config MAC80211_LOWTX_FRAME_DUMP | ||
183 | bool "Debug frame dumping" | ||
184 | depends on MAC80211_DEBUG_MENU | ||
185 | ---help--- | ||
186 | Selecting this option will cause the stack to | ||
187 | print a message for each frame that is handed | ||
188 | to the lowlevel driver for transmission. This | ||
189 | message includes all MAC addresses and the | ||
190 | frame control field. | ||
191 | |||
192 | If unsure, say N and insert the debugging code | ||
193 | you require into the driver you are debugging. | ||
194 | |||
195 | config MAC80211_DEBUG_COUNTERS | 182 | config MAC80211_DEBUG_COUNTERS |
196 | bool "Extra statistics for TX/RX debugging" | 183 | bool "Extra statistics for TX/RX debugging" |
197 | depends on MAC80211_DEBUG_MENU | 184 | depends on MAC80211_DEBUG_MENU |
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index a169b0201d61..2dc8f2bff27b 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile | |||
@@ -7,6 +7,8 @@ mac80211-y := \ | |||
7 | sta_info.o \ | 7 | sta_info.o \ |
8 | wep.o \ | 8 | wep.o \ |
9 | wpa.o \ | 9 | wpa.o \ |
10 | scan.o \ | ||
11 | ht.o \ | ||
10 | mlme.o \ | 12 | mlme.o \ |
11 | iface.o \ | 13 | iface.o \ |
12 | rate.o \ | 14 | rate.o \ |
@@ -15,6 +17,7 @@ mac80211-y := \ | |||
15 | aes_ccm.o \ | 17 | aes_ccm.o \ |
16 | cfg.o \ | 18 | cfg.o \ |
17 | rx.o \ | 19 | rx.o \ |
20 | spectmgmt.o \ | ||
18 | tx.o \ | 21 | tx.o \ |
19 | key.o \ | 22 | key.o \ |
20 | util.o \ | 23 | util.o \ |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 297c257864c7..855126a3039d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -17,26 +17,26 @@ | |||
17 | #include "rate.h" | 17 | #include "rate.h" |
18 | #include "mesh.h" | 18 | #include "mesh.h" |
19 | 19 | ||
20 | static enum ieee80211_if_types | 20 | struct ieee80211_hw *wiphy_to_hw(struct wiphy *wiphy) |
21 | nl80211_type_to_mac80211_type(enum nl80211_iftype type) | 21 | { |
22 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
23 | return &local->hw; | ||
24 | } | ||
25 | EXPORT_SYMBOL(wiphy_to_hw); | ||
26 | |||
27 | static bool nl80211_type_check(enum nl80211_iftype type) | ||
22 | { | 28 | { |
23 | switch (type) { | 29 | switch (type) { |
24 | case NL80211_IFTYPE_UNSPECIFIED: | ||
25 | return IEEE80211_IF_TYPE_STA; | ||
26 | case NL80211_IFTYPE_ADHOC: | 30 | case NL80211_IFTYPE_ADHOC: |
27 | return IEEE80211_IF_TYPE_IBSS; | ||
28 | case NL80211_IFTYPE_STATION: | 31 | case NL80211_IFTYPE_STATION: |
29 | return IEEE80211_IF_TYPE_STA; | ||
30 | case NL80211_IFTYPE_MONITOR: | 32 | case NL80211_IFTYPE_MONITOR: |
31 | return IEEE80211_IF_TYPE_MNTR; | ||
32 | #ifdef CONFIG_MAC80211_MESH | 33 | #ifdef CONFIG_MAC80211_MESH |
33 | case NL80211_IFTYPE_MESH_POINT: | 34 | case NL80211_IFTYPE_MESH_POINT: |
34 | return IEEE80211_IF_TYPE_MESH_POINT; | ||
35 | #endif | 35 | #endif |
36 | case NL80211_IFTYPE_WDS: | 36 | case NL80211_IFTYPE_WDS: |
37 | return IEEE80211_IF_TYPE_WDS; | 37 | return true; |
38 | default: | 38 | default: |
39 | return IEEE80211_IF_TYPE_INVALID; | 39 | return false; |
40 | } | 40 | } |
41 | } | 41 | } |
42 | 42 | ||
@@ -45,17 +45,15 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name, | |||
45 | struct vif_params *params) | 45 | struct vif_params *params) |
46 | { | 46 | { |
47 | struct ieee80211_local *local = wiphy_priv(wiphy); | 47 | struct ieee80211_local *local = wiphy_priv(wiphy); |
48 | enum ieee80211_if_types itype; | ||
49 | struct net_device *dev; | 48 | struct net_device *dev; |
50 | struct ieee80211_sub_if_data *sdata; | 49 | struct ieee80211_sub_if_data *sdata; |
51 | int err; | 50 | int err; |
52 | 51 | ||
53 | itype = nl80211_type_to_mac80211_type(type); | 52 | if (!nl80211_type_check(type)) |
54 | if (itype == IEEE80211_IF_TYPE_INVALID) | ||
55 | return -EINVAL; | 53 | return -EINVAL; |
56 | 54 | ||
57 | err = ieee80211_if_add(local, name, &dev, itype, params); | 55 | err = ieee80211_if_add(local, name, &dev, type, params); |
58 | if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags) | 56 | if (err || type != NL80211_IFTYPE_MONITOR || !flags) |
59 | return err; | 57 | return err; |
60 | 58 | ||
61 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 59 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
@@ -66,13 +64,16 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name, | |||
66 | static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) | 64 | static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) |
67 | { | 65 | { |
68 | struct net_device *dev; | 66 | struct net_device *dev; |
67 | struct ieee80211_sub_if_data *sdata; | ||
69 | 68 | ||
70 | /* we're under RTNL */ | 69 | /* we're under RTNL */ |
71 | dev = __dev_get_by_index(&init_net, ifindex); | 70 | dev = __dev_get_by_index(&init_net, ifindex); |
72 | if (!dev) | 71 | if (!dev) |
73 | return -ENODEV; | 72 | return -ENODEV; |
74 | 73 | ||
75 | ieee80211_if_remove(dev); | 74 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
75 | |||
76 | ieee80211_if_remove(sdata); | ||
76 | 77 | ||
77 | return 0; | 78 | return 0; |
78 | } | 79 | } |
@@ -81,9 +82,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, | |||
81 | enum nl80211_iftype type, u32 *flags, | 82 | enum nl80211_iftype type, u32 *flags, |
82 | struct vif_params *params) | 83 | struct vif_params *params) |
83 | { | 84 | { |
84 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
85 | struct net_device *dev; | 85 | struct net_device *dev; |
86 | enum ieee80211_if_types itype; | ||
87 | struct ieee80211_sub_if_data *sdata; | 86 | struct ieee80211_sub_if_data *sdata; |
88 | int ret; | 87 | int ret; |
89 | 88 | ||
@@ -92,25 +91,24 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, | |||
92 | if (!dev) | 91 | if (!dev) |
93 | return -ENODEV; | 92 | return -ENODEV; |
94 | 93 | ||
95 | itype = nl80211_type_to_mac80211_type(type); | 94 | if (!nl80211_type_check(type)) |
96 | if (itype == IEEE80211_IF_TYPE_INVALID) | ||
97 | return -EINVAL; | 95 | return -EINVAL; |
98 | 96 | ||
99 | if (dev == local->mdev) | ||
100 | return -EOPNOTSUPP; | ||
101 | |||
102 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 97 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
103 | 98 | ||
104 | ret = ieee80211_if_change_type(sdata, itype); | 99 | ret = ieee80211_if_change_type(sdata, type); |
105 | if (ret) | 100 | if (ret) |
106 | return ret; | 101 | return ret; |
107 | 102 | ||
103 | if (netif_running(sdata->dev)) | ||
104 | return -EBUSY; | ||
105 | |||
108 | if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len) | 106 | if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len) |
109 | ieee80211_if_sta_set_mesh_id(&sdata->u.sta, | 107 | ieee80211_sdata_set_mesh_id(sdata, |
110 | params->mesh_id_len, | 108 | params->mesh_id_len, |
111 | params->mesh_id); | 109 | params->mesh_id); |
112 | 110 | ||
113 | if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || !flags) | 111 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags) |
114 | return 0; | 112 | return 0; |
115 | 113 | ||
116 | sdata->u.mntr_flags = *flags; | 114 | sdata->u.mntr_flags = *flags; |
@@ -121,16 +119,12 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
121 | u8 key_idx, u8 *mac_addr, | 119 | u8 key_idx, u8 *mac_addr, |
122 | struct key_params *params) | 120 | struct key_params *params) |
123 | { | 121 | { |
124 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
125 | struct ieee80211_sub_if_data *sdata; | 122 | struct ieee80211_sub_if_data *sdata; |
126 | struct sta_info *sta = NULL; | 123 | struct sta_info *sta = NULL; |
127 | enum ieee80211_key_alg alg; | 124 | enum ieee80211_key_alg alg; |
128 | struct ieee80211_key *key; | 125 | struct ieee80211_key *key; |
129 | int err; | 126 | int err; |
130 | 127 | ||
131 | if (dev == local->mdev) | ||
132 | return -EOPNOTSUPP; | ||
133 | |||
134 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 128 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
135 | 129 | ||
136 | switch (params->cipher) { | 130 | switch (params->cipher) { |
@@ -175,14 +169,10 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
175 | static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, | 169 | static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, |
176 | u8 key_idx, u8 *mac_addr) | 170 | u8 key_idx, u8 *mac_addr) |
177 | { | 171 | { |
178 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
179 | struct ieee80211_sub_if_data *sdata; | 172 | struct ieee80211_sub_if_data *sdata; |
180 | struct sta_info *sta; | 173 | struct sta_info *sta; |
181 | int ret; | 174 | int ret; |
182 | 175 | ||
183 | if (dev == local->mdev) | ||
184 | return -EOPNOTSUPP; | ||
185 | |||
186 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 176 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
187 | 177 | ||
188 | rcu_read_lock(); | 178 | rcu_read_lock(); |
@@ -223,7 +213,6 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, | |||
223 | void (*callback)(void *cookie, | 213 | void (*callback)(void *cookie, |
224 | struct key_params *params)) | 214 | struct key_params *params)) |
225 | { | 215 | { |
226 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
227 | struct ieee80211_sub_if_data *sdata; | 216 | struct ieee80211_sub_if_data *sdata; |
228 | struct sta_info *sta = NULL; | 217 | struct sta_info *sta = NULL; |
229 | u8 seq[6] = {0}; | 218 | u8 seq[6] = {0}; |
@@ -233,9 +222,6 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, | |||
233 | u16 iv16; | 222 | u16 iv16; |
234 | int err = -ENOENT; | 223 | int err = -ENOENT; |
235 | 224 | ||
236 | if (dev == local->mdev) | ||
237 | return -EOPNOTSUPP; | ||
238 | |||
239 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 225 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
240 | 226 | ||
241 | rcu_read_lock(); | 227 | rcu_read_lock(); |
@@ -311,12 +297,8 @@ static int ieee80211_config_default_key(struct wiphy *wiphy, | |||
311 | struct net_device *dev, | 297 | struct net_device *dev, |
312 | u8 key_idx) | 298 | u8 key_idx) |
313 | { | 299 | { |
314 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
315 | struct ieee80211_sub_if_data *sdata; | 300 | struct ieee80211_sub_if_data *sdata; |
316 | 301 | ||
317 | if (dev == local->mdev) | ||
318 | return -EOPNOTSUPP; | ||
319 | |||
320 | rcu_read_lock(); | 302 | rcu_read_lock(); |
321 | 303 | ||
322 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 304 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
@@ -365,7 +347,7 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, | |||
365 | sta = sta_info_get_by_idx(local, idx, dev); | 347 | sta = sta_info_get_by_idx(local, idx, dev); |
366 | if (sta) { | 348 | if (sta) { |
367 | ret = 0; | 349 | ret = 0; |
368 | memcpy(mac, sta->addr, ETH_ALEN); | 350 | memcpy(mac, sta->sta.addr, ETH_ALEN); |
369 | sta_set_sinfo(sta, sinfo); | 351 | sta_set_sinfo(sta, sinfo); |
370 | } | 352 | } |
371 | 353 | ||
@@ -497,16 +479,12 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata, | |||
497 | static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, | 479 | static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, |
498 | struct beacon_parameters *params) | 480 | struct beacon_parameters *params) |
499 | { | 481 | { |
500 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
501 | struct ieee80211_sub_if_data *sdata; | 482 | struct ieee80211_sub_if_data *sdata; |
502 | struct beacon_data *old; | 483 | struct beacon_data *old; |
503 | 484 | ||
504 | if (dev == local->mdev) | ||
505 | return -EOPNOTSUPP; | ||
506 | |||
507 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 485 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
508 | 486 | ||
509 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP) | 487 | if (sdata->vif.type != NL80211_IFTYPE_AP) |
510 | return -EINVAL; | 488 | return -EINVAL; |
511 | 489 | ||
512 | old = sdata->u.ap.beacon; | 490 | old = sdata->u.ap.beacon; |
@@ -520,16 +498,12 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, | |||
520 | static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, | 498 | static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, |
521 | struct beacon_parameters *params) | 499 | struct beacon_parameters *params) |
522 | { | 500 | { |
523 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
524 | struct ieee80211_sub_if_data *sdata; | 501 | struct ieee80211_sub_if_data *sdata; |
525 | struct beacon_data *old; | 502 | struct beacon_data *old; |
526 | 503 | ||
527 | if (dev == local->mdev) | ||
528 | return -EOPNOTSUPP; | ||
529 | |||
530 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 504 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
531 | 505 | ||
532 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP) | 506 | if (sdata->vif.type != NL80211_IFTYPE_AP) |
533 | return -EINVAL; | 507 | return -EINVAL; |
534 | 508 | ||
535 | old = sdata->u.ap.beacon; | 509 | old = sdata->u.ap.beacon; |
@@ -542,16 +516,12 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, | |||
542 | 516 | ||
543 | static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev) | 517 | static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev) |
544 | { | 518 | { |
545 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
546 | struct ieee80211_sub_if_data *sdata; | 519 | struct ieee80211_sub_if_data *sdata; |
547 | struct beacon_data *old; | 520 | struct beacon_data *old; |
548 | 521 | ||
549 | if (dev == local->mdev) | ||
550 | return -EOPNOTSUPP; | ||
551 | |||
552 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 522 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
553 | 523 | ||
554 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP) | 524 | if (sdata->vif.type != NL80211_IFTYPE_AP) |
555 | return -EINVAL; | 525 | return -EINVAL; |
556 | 526 | ||
557 | old = sdata->u.ap.beacon; | 527 | old = sdata->u.ap.beacon; |
@@ -594,7 +564,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta) | |||
594 | * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ | 564 | * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ |
595 | 565 | ||
596 | memset(msg->da, 0xff, ETH_ALEN); | 566 | memset(msg->da, 0xff, ETH_ALEN); |
597 | memcpy(msg->sa, sta->addr, ETH_ALEN); | 567 | memcpy(msg->sa, sta->sta.addr, ETH_ALEN); |
598 | msg->len = htons(6); | 568 | msg->len = htons(6); |
599 | msg->dsap = 0; | 569 | msg->dsap = 0; |
600 | msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */ | 570 | msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */ |
@@ -649,9 +619,9 @@ static void sta_apply_parameters(struct ieee80211_local *local, | |||
649 | */ | 619 | */ |
650 | 620 | ||
651 | if (params->aid) { | 621 | if (params->aid) { |
652 | sta->aid = params->aid; | 622 | sta->sta.aid = params->aid; |
653 | if (sta->aid > IEEE80211_MAX_AID) | 623 | if (sta->sta.aid > IEEE80211_MAX_AID) |
654 | sta->aid = 0; /* XXX: should this be an error? */ | 624 | sta->sta.aid = 0; /* XXX: should this be an error? */ |
655 | } | 625 | } |
656 | 626 | ||
657 | if (params->listen_interval >= 0) | 627 | if (params->listen_interval >= 0) |
@@ -668,7 +638,12 @@ static void sta_apply_parameters(struct ieee80211_local *local, | |||
668 | rates |= BIT(j); | 638 | rates |= BIT(j); |
669 | } | 639 | } |
670 | } | 640 | } |
671 | sta->supp_rates[local->oper_channel->band] = rates; | 641 | sta->sta.supp_rates[local->oper_channel->band] = rates; |
642 | } | ||
643 | |||
644 | if (params->ht_capa) { | ||
645 | ieee80211_ht_cap_ie_to_ht_info(params->ht_capa, | ||
646 | &sta->sta.ht_info); | ||
672 | } | 647 | } |
673 | 648 | ||
674 | if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { | 649 | if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { |
@@ -691,9 +666,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, | |||
691 | struct ieee80211_sub_if_data *sdata; | 666 | struct ieee80211_sub_if_data *sdata; |
692 | int err; | 667 | int err; |
693 | 668 | ||
694 | if (dev == local->mdev || params->vlan == local->mdev) | ||
695 | return -EOPNOTSUPP; | ||
696 | |||
697 | /* Prevent a race with changing the rate control algorithm */ | 669 | /* Prevent a race with changing the rate control algorithm */ |
698 | if (!netif_running(dev)) | 670 | if (!netif_running(dev)) |
699 | return -ENETDOWN; | 671 | return -ENETDOWN; |
@@ -701,8 +673,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, | |||
701 | if (params->vlan) { | 673 | if (params->vlan) { |
702 | sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); | 674 | sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); |
703 | 675 | ||
704 | if (sdata->vif.type != IEEE80211_IF_TYPE_VLAN && | 676 | if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && |
705 | sdata->vif.type != IEEE80211_IF_TYPE_AP) | 677 | sdata->vif.type != NL80211_IFTYPE_AP) |
706 | return -EINVAL; | 678 | return -EINVAL; |
707 | } else | 679 | } else |
708 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 680 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
@@ -721,7 +693,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, | |||
721 | 693 | ||
722 | sta_apply_parameters(local, sta, params); | 694 | sta_apply_parameters(local, sta, params); |
723 | 695 | ||
724 | rate_control_rate_init(sta, local); | 696 | rate_control_rate_init(sta); |
725 | 697 | ||
726 | rcu_read_lock(); | 698 | rcu_read_lock(); |
727 | 699 | ||
@@ -732,8 +704,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, | |||
732 | return err; | 704 | return err; |
733 | } | 705 | } |
734 | 706 | ||
735 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN || | 707 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || |
736 | sdata->vif.type == IEEE80211_IF_TYPE_AP) | 708 | sdata->vif.type == NL80211_IFTYPE_AP) |
737 | ieee80211_send_layer2_update(sta); | 709 | ieee80211_send_layer2_update(sta); |
738 | 710 | ||
739 | rcu_read_unlock(); | 711 | rcu_read_unlock(); |
@@ -748,9 +720,6 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, | |||
748 | struct ieee80211_sub_if_data *sdata; | 720 | struct ieee80211_sub_if_data *sdata; |
749 | struct sta_info *sta; | 721 | struct sta_info *sta; |
750 | 722 | ||
751 | if (dev == local->mdev) | ||
752 | return -EOPNOTSUPP; | ||
753 | |||
754 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 723 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
755 | 724 | ||
756 | if (mac) { | 725 | if (mac) { |
@@ -782,9 +751,6 @@ static int ieee80211_change_station(struct wiphy *wiphy, | |||
782 | struct sta_info *sta; | 751 | struct sta_info *sta; |
783 | struct ieee80211_sub_if_data *vlansdata; | 752 | struct ieee80211_sub_if_data *vlansdata; |
784 | 753 | ||
785 | if (dev == local->mdev || params->vlan == local->mdev) | ||
786 | return -EOPNOTSUPP; | ||
787 | |||
788 | rcu_read_lock(); | 754 | rcu_read_lock(); |
789 | 755 | ||
790 | /* XXX: get sta belonging to dev */ | 756 | /* XXX: get sta belonging to dev */ |
@@ -797,8 +763,8 @@ static int ieee80211_change_station(struct wiphy *wiphy, | |||
797 | if (params->vlan && params->vlan != sta->sdata->dev) { | 763 | if (params->vlan && params->vlan != sta->sdata->dev) { |
798 | vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); | 764 | vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); |
799 | 765 | ||
800 | if (vlansdata->vif.type != IEEE80211_IF_TYPE_VLAN && | 766 | if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN && |
801 | vlansdata->vif.type != IEEE80211_IF_TYPE_AP) { | 767 | vlansdata->vif.type != NL80211_IFTYPE_AP) { |
802 | rcu_read_unlock(); | 768 | rcu_read_unlock(); |
803 | return -EINVAL; | 769 | return -EINVAL; |
804 | } | 770 | } |
@@ -824,15 +790,12 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
824 | struct sta_info *sta; | 790 | struct sta_info *sta; |
825 | int err; | 791 | int err; |
826 | 792 | ||
827 | if (dev == local->mdev) | ||
828 | return -EOPNOTSUPP; | ||
829 | |||
830 | if (!netif_running(dev)) | 793 | if (!netif_running(dev)) |
831 | return -ENETDOWN; | 794 | return -ENETDOWN; |
832 | 795 | ||
833 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 796 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
834 | 797 | ||
835 | if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) | 798 | if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) |
836 | return -ENOTSUPP; | 799 | return -ENOTSUPP; |
837 | 800 | ||
838 | rcu_read_lock(); | 801 | rcu_read_lock(); |
@@ -842,13 +805,13 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
842 | return -ENOENT; | 805 | return -ENOENT; |
843 | } | 806 | } |
844 | 807 | ||
845 | err = mesh_path_add(dst, dev); | 808 | err = mesh_path_add(dst, sdata); |
846 | if (err) { | 809 | if (err) { |
847 | rcu_read_unlock(); | 810 | rcu_read_unlock(); |
848 | return err; | 811 | return err; |
849 | } | 812 | } |
850 | 813 | ||
851 | mpath = mesh_path_lookup(dst, dev); | 814 | mpath = mesh_path_lookup(dst, sdata); |
852 | if (!mpath) { | 815 | if (!mpath) { |
853 | rcu_read_unlock(); | 816 | rcu_read_unlock(); |
854 | return -ENXIO; | 817 | return -ENXIO; |
@@ -862,10 +825,12 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
862 | static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, | 825 | static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, |
863 | u8 *dst) | 826 | u8 *dst) |
864 | { | 827 | { |
828 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
829 | |||
865 | if (dst) | 830 | if (dst) |
866 | return mesh_path_del(dst, dev); | 831 | return mesh_path_del(dst, sdata); |
867 | 832 | ||
868 | mesh_path_flush(dev); | 833 | mesh_path_flush(sdata); |
869 | return 0; | 834 | return 0; |
870 | } | 835 | } |
871 | 836 | ||
@@ -878,15 +843,12 @@ static int ieee80211_change_mpath(struct wiphy *wiphy, | |||
878 | struct mesh_path *mpath; | 843 | struct mesh_path *mpath; |
879 | struct sta_info *sta; | 844 | struct sta_info *sta; |
880 | 845 | ||
881 | if (dev == local->mdev) | ||
882 | return -EOPNOTSUPP; | ||
883 | |||
884 | if (!netif_running(dev)) | 846 | if (!netif_running(dev)) |
885 | return -ENETDOWN; | 847 | return -ENETDOWN; |
886 | 848 | ||
887 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 849 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
888 | 850 | ||
889 | if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) | 851 | if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) |
890 | return -ENOTSUPP; | 852 | return -ENOTSUPP; |
891 | 853 | ||
892 | rcu_read_lock(); | 854 | rcu_read_lock(); |
@@ -897,7 +859,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy, | |||
897 | return -ENOENT; | 859 | return -ENOENT; |
898 | } | 860 | } |
899 | 861 | ||
900 | mpath = mesh_path_lookup(dst, dev); | 862 | mpath = mesh_path_lookup(dst, sdata); |
901 | if (!mpath) { | 863 | if (!mpath) { |
902 | rcu_read_unlock(); | 864 | rcu_read_unlock(); |
903 | return -ENOENT; | 865 | return -ENOENT; |
@@ -913,7 +875,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
913 | struct mpath_info *pinfo) | 875 | struct mpath_info *pinfo) |
914 | { | 876 | { |
915 | if (mpath->next_hop) | 877 | if (mpath->next_hop) |
916 | memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN); | 878 | memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN); |
917 | else | 879 | else |
918 | memset(next_hop, 0, ETH_ALEN); | 880 | memset(next_hop, 0, ETH_ALEN); |
919 | 881 | ||
@@ -952,20 +914,16 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
952 | u8 *dst, u8 *next_hop, struct mpath_info *pinfo) | 914 | u8 *dst, u8 *next_hop, struct mpath_info *pinfo) |
953 | 915 | ||
954 | { | 916 | { |
955 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
956 | struct ieee80211_sub_if_data *sdata; | 917 | struct ieee80211_sub_if_data *sdata; |
957 | struct mesh_path *mpath; | 918 | struct mesh_path *mpath; |
958 | 919 | ||
959 | if (dev == local->mdev) | ||
960 | return -EOPNOTSUPP; | ||
961 | |||
962 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 920 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
963 | 921 | ||
964 | if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) | 922 | if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) |
965 | return -ENOTSUPP; | 923 | return -ENOTSUPP; |
966 | 924 | ||
967 | rcu_read_lock(); | 925 | rcu_read_lock(); |
968 | mpath = mesh_path_lookup(dst, dev); | 926 | mpath = mesh_path_lookup(dst, sdata); |
969 | if (!mpath) { | 927 | if (!mpath) { |
970 | rcu_read_unlock(); | 928 | rcu_read_unlock(); |
971 | return -ENOENT; | 929 | return -ENOENT; |
@@ -980,20 +938,16 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
980 | int idx, u8 *dst, u8 *next_hop, | 938 | int idx, u8 *dst, u8 *next_hop, |
981 | struct mpath_info *pinfo) | 939 | struct mpath_info *pinfo) |
982 | { | 940 | { |
983 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
984 | struct ieee80211_sub_if_data *sdata; | 941 | struct ieee80211_sub_if_data *sdata; |
985 | struct mesh_path *mpath; | 942 | struct mesh_path *mpath; |
986 | 943 | ||
987 | if (dev == local->mdev) | ||
988 | return -EOPNOTSUPP; | ||
989 | |||
990 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 944 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
991 | 945 | ||
992 | if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) | 946 | if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT) |
993 | return -ENOTSUPP; | 947 | return -ENOTSUPP; |
994 | 948 | ||
995 | rcu_read_lock(); | 949 | rcu_read_lock(); |
996 | mpath = mesh_path_lookup_by_idx(idx, dev); | 950 | mpath = mesh_path_lookup_by_idx(idx, sdata); |
997 | if (!mpath) { | 951 | if (!mpath) { |
998 | rcu_read_unlock(); | 952 | rcu_read_unlock(); |
999 | return -ENOENT; | 953 | return -ENOENT; |
@@ -1005,6 +959,38 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
1005 | } | 959 | } |
1006 | #endif | 960 | #endif |
1007 | 961 | ||
962 | static int ieee80211_change_bss(struct wiphy *wiphy, | ||
963 | struct net_device *dev, | ||
964 | struct bss_parameters *params) | ||
965 | { | ||
966 | struct ieee80211_sub_if_data *sdata; | ||
967 | u32 changed = 0; | ||
968 | |||
969 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
970 | |||
971 | if (sdata->vif.type != NL80211_IFTYPE_AP) | ||
972 | return -EINVAL; | ||
973 | |||
974 | if (params->use_cts_prot >= 0) { | ||
975 | sdata->bss_conf.use_cts_prot = params->use_cts_prot; | ||
976 | changed |= BSS_CHANGED_ERP_CTS_PROT; | ||
977 | } | ||
978 | if (params->use_short_preamble >= 0) { | ||
979 | sdata->bss_conf.use_short_preamble = | ||
980 | params->use_short_preamble; | ||
981 | changed |= BSS_CHANGED_ERP_PREAMBLE; | ||
982 | } | ||
983 | if (params->use_short_slot_time >= 0) { | ||
984 | sdata->bss_conf.use_short_slot = | ||
985 | params->use_short_slot_time; | ||
986 | changed |= BSS_CHANGED_ERP_SLOT; | ||
987 | } | ||
988 | |||
989 | ieee80211_bss_info_change_notify(sdata, changed); | ||
990 | |||
991 | return 0; | ||
992 | } | ||
993 | |||
1008 | struct cfg80211_ops mac80211_config_ops = { | 994 | struct cfg80211_ops mac80211_config_ops = { |
1009 | .add_virtual_intf = ieee80211_add_iface, | 995 | .add_virtual_intf = ieee80211_add_iface, |
1010 | .del_virtual_intf = ieee80211_del_iface, | 996 | .del_virtual_intf = ieee80211_del_iface, |
@@ -1028,4 +1014,5 @@ struct cfg80211_ops mac80211_config_ops = { | |||
1028 | .get_mpath = ieee80211_get_mpath, | 1014 | .get_mpath = ieee80211_get_mpath, |
1029 | .dump_mpath = ieee80211_dump_mpath, | 1015 | .dump_mpath = ieee80211_dump_mpath, |
1030 | #endif | 1016 | #endif |
1017 | .change_bss = ieee80211_change_bss, | ||
1031 | }; | 1018 | }; |
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index ee509f1109e2..24ce54463310 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -51,8 +51,6 @@ DEBUGFS_READONLY_FILE(antenna_sel_tx, 20, "%d", | |||
51 | local->hw.conf.antenna_sel_tx); | 51 | local->hw.conf.antenna_sel_tx); |
52 | DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d", | 52 | DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d", |
53 | local->hw.conf.antenna_sel_rx); | 53 | local->hw.conf.antenna_sel_rx); |
54 | DEBUGFS_READONLY_FILE(bridge_packets, 20, "%d", | ||
55 | local->bridge_packets); | ||
56 | DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", | 54 | DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", |
57 | local->rts_threshold); | 55 | local->rts_threshold); |
58 | DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", | 56 | DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", |
@@ -206,7 +204,6 @@ void debugfs_hw_add(struct ieee80211_local *local) | |||
206 | DEBUGFS_ADD(frequency); | 204 | DEBUGFS_ADD(frequency); |
207 | DEBUGFS_ADD(antenna_sel_tx); | 205 | DEBUGFS_ADD(antenna_sel_tx); |
208 | DEBUGFS_ADD(antenna_sel_rx); | 206 | DEBUGFS_ADD(antenna_sel_rx); |
209 | DEBUGFS_ADD(bridge_packets); | ||
210 | DEBUGFS_ADD(rts_threshold); | 207 | DEBUGFS_ADD(rts_threshold); |
211 | DEBUGFS_ADD(fragmentation_threshold); | 208 | DEBUGFS_ADD(fragmentation_threshold); |
212 | DEBUGFS_ADD(short_retry_limit); | 209 | DEBUGFS_ADD(short_retry_limit); |
@@ -263,7 +260,6 @@ void debugfs_hw_del(struct ieee80211_local *local) | |||
263 | DEBUGFS_DEL(frequency); | 260 | DEBUGFS_DEL(frequency); |
264 | DEBUGFS_DEL(antenna_sel_tx); | 261 | DEBUGFS_DEL(antenna_sel_tx); |
265 | DEBUGFS_DEL(antenna_sel_rx); | 262 | DEBUGFS_DEL(antenna_sel_rx); |
266 | DEBUGFS_DEL(bridge_packets); | ||
267 | DEBUGFS_DEL(rts_threshold); | 263 | DEBUGFS_DEL(rts_threshold); |
268 | DEBUGFS_DEL(fragmentation_threshold); | 264 | DEBUGFS_DEL(fragmentation_threshold); |
269 | DEBUGFS_DEL(short_retry_limit); | 265 | DEBUGFS_DEL(short_retry_limit); |
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index cf82acec913a..a3294d109322 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c | |||
@@ -206,7 +206,8 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key) | |||
206 | rcu_read_lock(); | 206 | rcu_read_lock(); |
207 | sta = rcu_dereference(key->sta); | 207 | sta = rcu_dereference(key->sta); |
208 | if (sta) | 208 | if (sta) |
209 | sprintf(buf, "../../stations/%s", print_mac(mac, sta->addr)); | 209 | sprintf(buf, "../../stations/%s", |
210 | print_mac(mac, sta->sta.addr)); | ||
210 | rcu_read_unlock(); | 211 | rcu_read_unlock(); |
211 | 212 | ||
212 | /* using sta as a boolean is fine outside RCU lock */ | 213 | /* using sta as a boolean is fine outside RCU lock */ |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 8165df578c92..2a4515623776 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -173,7 +173,6 @@ IEEE80211_IF_FILE(assoc_tries, u.sta.assoc_tries, DEC); | |||
173 | IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX); | 173 | IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX); |
174 | IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC); | 174 | IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC); |
175 | IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC); | 175 | IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC); |
176 | IEEE80211_IF_FILE(num_beacons_sta, u.sta.num_beacons, DEC); | ||
177 | 176 | ||
178 | static ssize_t ieee80211_if_fmt_flags( | 177 | static ssize_t ieee80211_if_fmt_flags( |
179 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) | 178 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) |
@@ -192,7 +191,6 @@ __IEEE80211_IF_FILE(flags); | |||
192 | /* AP attributes */ | 191 | /* AP attributes */ |
193 | IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); | 192 | IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); |
194 | IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); | 193 | IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); |
195 | IEEE80211_IF_FILE(num_beacons, u.ap.num_beacons, DEC); | ||
196 | 194 | ||
197 | static ssize_t ieee80211_if_fmt_num_buffered_multicast( | 195 | static ssize_t ieee80211_if_fmt_num_buffered_multicast( |
198 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) | 196 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) |
@@ -207,37 +205,37 @@ IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); | |||
207 | 205 | ||
208 | #ifdef CONFIG_MAC80211_MESH | 206 | #ifdef CONFIG_MAC80211_MESH |
209 | /* Mesh stats attributes */ | 207 | /* Mesh stats attributes */ |
210 | IEEE80211_IF_FILE(fwded_frames, u.sta.mshstats.fwded_frames, DEC); | 208 | IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); |
211 | IEEE80211_IF_FILE(dropped_frames_ttl, u.sta.mshstats.dropped_frames_ttl, DEC); | 209 | IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); |
212 | IEEE80211_IF_FILE(dropped_frames_no_route, | 210 | IEEE80211_IF_FILE(dropped_frames_no_route, |
213 | u.sta.mshstats.dropped_frames_no_route, DEC); | 211 | u.mesh.mshstats.dropped_frames_no_route, DEC); |
214 | IEEE80211_IF_FILE(estab_plinks, u.sta.mshstats.estab_plinks, ATOMIC); | 212 | IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); |
215 | 213 | ||
216 | /* Mesh parameters */ | 214 | /* Mesh parameters */ |
217 | IEEE80211_IF_WFILE(dot11MeshMaxRetries, | 215 | IEEE80211_IF_WFILE(dot11MeshMaxRetries, |
218 | u.sta.mshcfg.dot11MeshMaxRetries, DEC, u8); | 216 | u.mesh.mshcfg.dot11MeshMaxRetries, DEC, u8); |
219 | IEEE80211_IF_WFILE(dot11MeshRetryTimeout, | 217 | IEEE80211_IF_WFILE(dot11MeshRetryTimeout, |
220 | u.sta.mshcfg.dot11MeshRetryTimeout, DEC, u16); | 218 | u.mesh.mshcfg.dot11MeshRetryTimeout, DEC, u16); |
221 | IEEE80211_IF_WFILE(dot11MeshConfirmTimeout, | 219 | IEEE80211_IF_WFILE(dot11MeshConfirmTimeout, |
222 | u.sta.mshcfg.dot11MeshConfirmTimeout, DEC, u16); | 220 | u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC, u16); |
223 | IEEE80211_IF_WFILE(dot11MeshHoldingTimeout, | 221 | IEEE80211_IF_WFILE(dot11MeshHoldingTimeout, |
224 | u.sta.mshcfg.dot11MeshHoldingTimeout, DEC, u16); | 222 | u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC, u16); |
225 | IEEE80211_IF_WFILE(dot11MeshTTL, u.sta.mshcfg.dot11MeshTTL, DEC, u8); | 223 | IEEE80211_IF_WFILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC, u8); |
226 | IEEE80211_IF_WFILE(auto_open_plinks, u.sta.mshcfg.auto_open_plinks, DEC, u8); | 224 | IEEE80211_IF_WFILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC, u8); |
227 | IEEE80211_IF_WFILE(dot11MeshMaxPeerLinks, | 225 | IEEE80211_IF_WFILE(dot11MeshMaxPeerLinks, |
228 | u.sta.mshcfg.dot11MeshMaxPeerLinks, DEC, u16); | 226 | u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC, u16); |
229 | IEEE80211_IF_WFILE(dot11MeshHWMPactivePathTimeout, | 227 | IEEE80211_IF_WFILE(dot11MeshHWMPactivePathTimeout, |
230 | u.sta.mshcfg.dot11MeshHWMPactivePathTimeout, DEC, u32); | 228 | u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC, u32); |
231 | IEEE80211_IF_WFILE(dot11MeshHWMPpreqMinInterval, | 229 | IEEE80211_IF_WFILE(dot11MeshHWMPpreqMinInterval, |
232 | u.sta.mshcfg.dot11MeshHWMPpreqMinInterval, DEC, u16); | 230 | u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC, u16); |
233 | IEEE80211_IF_WFILE(dot11MeshHWMPnetDiameterTraversalTime, | 231 | IEEE80211_IF_WFILE(dot11MeshHWMPnetDiameterTraversalTime, |
234 | u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC, u16); | 232 | u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC, u16); |
235 | IEEE80211_IF_WFILE(dot11MeshHWMPmaxPREQretries, | 233 | IEEE80211_IF_WFILE(dot11MeshHWMPmaxPREQretries, |
236 | u.sta.mshcfg.dot11MeshHWMPmaxPREQretries, DEC, u8); | 234 | u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC, u8); |
237 | IEEE80211_IF_WFILE(path_refresh_time, | 235 | IEEE80211_IF_WFILE(path_refresh_time, |
238 | u.sta.mshcfg.path_refresh_time, DEC, u32); | 236 | u.mesh.mshcfg.path_refresh_time, DEC, u32); |
239 | IEEE80211_IF_WFILE(min_discovery_timeout, | 237 | IEEE80211_IF_WFILE(min_discovery_timeout, |
240 | u.sta.mshcfg.min_discovery_timeout, DEC, u16); | 238 | u.mesh.mshcfg.min_discovery_timeout, DEC, u16); |
241 | #endif | 239 | #endif |
242 | 240 | ||
243 | 241 | ||
@@ -265,7 +263,6 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) | |||
265 | DEBUGFS_ADD(auth_alg, sta); | 263 | DEBUGFS_ADD(auth_alg, sta); |
266 | DEBUGFS_ADD(auth_transaction, sta); | 264 | DEBUGFS_ADD(auth_transaction, sta); |
267 | DEBUGFS_ADD(flags, sta); | 265 | DEBUGFS_ADD(flags, sta); |
268 | DEBUGFS_ADD(num_beacons_sta, sta); | ||
269 | } | 266 | } |
270 | 267 | ||
271 | static void add_ap_files(struct ieee80211_sub_if_data *sdata) | 268 | static void add_ap_files(struct ieee80211_sub_if_data *sdata) |
@@ -276,7 +273,6 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata) | |||
276 | 273 | ||
277 | DEBUGFS_ADD(num_sta_ps, ap); | 274 | DEBUGFS_ADD(num_sta_ps, ap); |
278 | DEBUGFS_ADD(dtim_count, ap); | 275 | DEBUGFS_ADD(dtim_count, ap); |
279 | DEBUGFS_ADD(num_beacons, ap); | ||
280 | DEBUGFS_ADD(num_buffered_multicast, ap); | 276 | DEBUGFS_ADD(num_buffered_multicast, ap); |
281 | } | 277 | } |
282 | 278 | ||
@@ -345,26 +341,26 @@ static void add_files(struct ieee80211_sub_if_data *sdata) | |||
345 | return; | 341 | return; |
346 | 342 | ||
347 | switch (sdata->vif.type) { | 343 | switch (sdata->vif.type) { |
348 | case IEEE80211_IF_TYPE_MESH_POINT: | 344 | case NL80211_IFTYPE_MESH_POINT: |
349 | #ifdef CONFIG_MAC80211_MESH | 345 | #ifdef CONFIG_MAC80211_MESH |
350 | add_mesh_stats(sdata); | 346 | add_mesh_stats(sdata); |
351 | add_mesh_config(sdata); | 347 | add_mesh_config(sdata); |
352 | #endif | 348 | #endif |
353 | /* fall through */ | 349 | break; |
354 | case IEEE80211_IF_TYPE_STA: | 350 | case NL80211_IFTYPE_STATION: |
355 | case IEEE80211_IF_TYPE_IBSS: | 351 | case NL80211_IFTYPE_ADHOC: |
356 | add_sta_files(sdata); | 352 | add_sta_files(sdata); |
357 | break; | 353 | break; |
358 | case IEEE80211_IF_TYPE_AP: | 354 | case NL80211_IFTYPE_AP: |
359 | add_ap_files(sdata); | 355 | add_ap_files(sdata); |
360 | break; | 356 | break; |
361 | case IEEE80211_IF_TYPE_WDS: | 357 | case NL80211_IFTYPE_WDS: |
362 | add_wds_files(sdata); | 358 | add_wds_files(sdata); |
363 | break; | 359 | break; |
364 | case IEEE80211_IF_TYPE_MNTR: | 360 | case NL80211_IFTYPE_MONITOR: |
365 | add_monitor_files(sdata); | 361 | add_monitor_files(sdata); |
366 | break; | 362 | break; |
367 | case IEEE80211_IF_TYPE_VLAN: | 363 | case NL80211_IFTYPE_AP_VLAN: |
368 | add_vlan_files(sdata); | 364 | add_vlan_files(sdata); |
369 | break; | 365 | break; |
370 | default: | 366 | default: |
@@ -398,7 +394,6 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata) | |||
398 | DEBUGFS_DEL(auth_alg, sta); | 394 | DEBUGFS_DEL(auth_alg, sta); |
399 | DEBUGFS_DEL(auth_transaction, sta); | 395 | DEBUGFS_DEL(auth_transaction, sta); |
400 | DEBUGFS_DEL(flags, sta); | 396 | DEBUGFS_DEL(flags, sta); |
401 | DEBUGFS_DEL(num_beacons_sta, sta); | ||
402 | } | 397 | } |
403 | 398 | ||
404 | static void del_ap_files(struct ieee80211_sub_if_data *sdata) | 399 | static void del_ap_files(struct ieee80211_sub_if_data *sdata) |
@@ -409,7 +404,6 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata) | |||
409 | 404 | ||
410 | DEBUGFS_DEL(num_sta_ps, ap); | 405 | DEBUGFS_DEL(num_sta_ps, ap); |
411 | DEBUGFS_DEL(dtim_count, ap); | 406 | DEBUGFS_DEL(dtim_count, ap); |
412 | DEBUGFS_DEL(num_beacons, ap); | ||
413 | DEBUGFS_DEL(num_buffered_multicast, ap); | 407 | DEBUGFS_DEL(num_buffered_multicast, ap); |
414 | } | 408 | } |
415 | 409 | ||
@@ -482,26 +476,26 @@ static void del_files(struct ieee80211_sub_if_data *sdata) | |||
482 | return; | 476 | return; |
483 | 477 | ||
484 | switch (sdata->vif.type) { | 478 | switch (sdata->vif.type) { |
485 | case IEEE80211_IF_TYPE_MESH_POINT: | 479 | case NL80211_IFTYPE_MESH_POINT: |
486 | #ifdef CONFIG_MAC80211_MESH | 480 | #ifdef CONFIG_MAC80211_MESH |
487 | del_mesh_stats(sdata); | 481 | del_mesh_stats(sdata); |
488 | del_mesh_config(sdata); | 482 | del_mesh_config(sdata); |
489 | #endif | 483 | #endif |
490 | /* fall through */ | 484 | break; |
491 | case IEEE80211_IF_TYPE_STA: | 485 | case NL80211_IFTYPE_STATION: |
492 | case IEEE80211_IF_TYPE_IBSS: | 486 | case NL80211_IFTYPE_ADHOC: |
493 | del_sta_files(sdata); | 487 | del_sta_files(sdata); |
494 | break; | 488 | break; |
495 | case IEEE80211_IF_TYPE_AP: | 489 | case NL80211_IFTYPE_AP: |
496 | del_ap_files(sdata); | 490 | del_ap_files(sdata); |
497 | break; | 491 | break; |
498 | case IEEE80211_IF_TYPE_WDS: | 492 | case NL80211_IFTYPE_WDS: |
499 | del_wds_files(sdata); | 493 | del_wds_files(sdata); |
500 | break; | 494 | break; |
501 | case IEEE80211_IF_TYPE_MNTR: | 495 | case NL80211_IFTYPE_MONITOR: |
502 | del_monitor_files(sdata); | 496 | del_monitor_files(sdata); |
503 | break; | 497 | break; |
504 | case IEEE80211_IF_TYPE_VLAN: | 498 | case NL80211_IFTYPE_AP_VLAN: |
505 | del_vlan_files(sdata); | 499 | del_vlan_files(sdata); |
506 | break; | 500 | break; |
507 | default: | 501 | default: |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 79a062782d52..b9902e425f09 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -50,7 +50,7 @@ static const struct file_operations sta_ ##name## _ops = { \ | |||
50 | STA_READ_##format(name, field) \ | 50 | STA_READ_##format(name, field) \ |
51 | STA_OPS(name) | 51 | STA_OPS(name) |
52 | 52 | ||
53 | STA_FILE(aid, aid, D); | 53 | STA_FILE(aid, sta.aid, D); |
54 | STA_FILE(dev, sdata->dev->name, S); | 54 | STA_FILE(dev, sdata->dev->name, S); |
55 | STA_FILE(rx_packets, rx_packets, LU); | 55 | STA_FILE(rx_packets, rx_packets, LU); |
56 | STA_FILE(tx_packets, tx_packets, LU); | 56 | STA_FILE(tx_packets, tx_packets, LU); |
@@ -173,10 +173,9 @@ static ssize_t sta_agg_status_write(struct file *file, | |||
173 | const char __user *user_buf, size_t count, loff_t *ppos) | 173 | const char __user *user_buf, size_t count, loff_t *ppos) |
174 | { | 174 | { |
175 | struct sta_info *sta = file->private_data; | 175 | struct sta_info *sta = file->private_data; |
176 | struct net_device *dev = sta->sdata->dev; | 176 | struct ieee80211_local *local = sta->sdata->local; |
177 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
178 | struct ieee80211_hw *hw = &local->hw; | 177 | struct ieee80211_hw *hw = &local->hw; |
179 | u8 *da = sta->addr; | 178 | u8 *da = sta->sta.addr; |
180 | static int tid_static_tx[16] = {0, 0, 0, 0, 0, 0, 0, 0, | 179 | static int tid_static_tx[16] = {0, 0, 0, 0, 0, 0, 0, 0, |
181 | 0, 0, 0, 0, 0, 0, 0, 0}; | 180 | 0, 0, 0, 0, 0, 0, 0, 0}; |
182 | static int tid_static_rx[16] = {1, 1, 1, 1, 1, 1, 1, 1, | 181 | static int tid_static_rx[16] = {1, 1, 1, 1, 1, 1, 1, 1, |
@@ -201,7 +200,7 @@ static ssize_t sta_agg_status_write(struct file *file, | |||
201 | tid_num = tid_num - 100; | 200 | tid_num = tid_num - 100; |
202 | if (tid_static_rx[tid_num] == 1) { | 201 | if (tid_static_rx[tid_num] == 1) { |
203 | strcpy(state, "off "); | 202 | strcpy(state, "off "); |
204 | ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0, | 203 | ieee80211_sta_stop_rx_ba_session(sta->sdata, da, tid_num, 0, |
205 | WLAN_REASON_QSTA_REQUIRE_SETUP); | 204 | WLAN_REASON_QSTA_REQUIRE_SETUP); |
206 | sta->ampdu_mlme.tid_state_rx[tid_num] |= | 205 | sta->ampdu_mlme.tid_state_rx[tid_num] |= |
207 | HT_AGG_STATE_DEBUGFS_CTL; | 206 | HT_AGG_STATE_DEBUGFS_CTL; |
@@ -253,7 +252,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) | |||
253 | if (!stations_dir) | 252 | if (!stations_dir) |
254 | return; | 253 | return; |
255 | 254 | ||
256 | mac = print_mac(mbuf, sta->addr); | 255 | mac = print_mac(mbuf, sta->sta.addr); |
257 | 256 | ||
258 | sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); | 257 | sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); |
259 | if (!sta->debugfs.dir) | 258 | if (!sta->debugfs.dir) |
diff --git a/net/mac80211/event.c b/net/mac80211/event.c index 2280f40b4560..8de60de70bc9 100644 --- a/net/mac80211/event.c +++ b/net/mac80211/event.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * mac80211 - events | 8 | * mac80211 - events |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/netdevice.h> | ||
12 | #include <net/iw_handler.h> | 11 | #include <net/iw_handler.h> |
13 | #include "ieee80211_i.h" | 12 | #include "ieee80211_i.h" |
14 | 13 | ||
@@ -17,7 +16,7 @@ | |||
17 | * (in the variable hdr) must be long enough to extract the TKIP | 16 | * (in the variable hdr) must be long enough to extract the TKIP |
18 | * fields like TSC | 17 | * fields like TSC |
19 | */ | 18 | */ |
20 | void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, | 19 | void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, |
21 | struct ieee80211_hdr *hdr) | 20 | struct ieee80211_hdr *hdr) |
22 | { | 21 | { |
23 | union iwreq_data wrqu; | 22 | union iwreq_data wrqu; |
@@ -32,7 +31,7 @@ void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, | |||
32 | print_mac(mac, hdr->addr2)); | 31 | print_mac(mac, hdr->addr2)); |
33 | memset(&wrqu, 0, sizeof(wrqu)); | 32 | memset(&wrqu, 0, sizeof(wrqu)); |
34 | wrqu.data.length = strlen(buf); | 33 | wrqu.data.length = strlen(buf); |
35 | wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); | 34 | wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf); |
36 | kfree(buf); | 35 | kfree(buf); |
37 | } | 36 | } |
38 | 37 | ||
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c new file mode 100644 index 000000000000..dc7d9a3d70d5 --- /dev/null +++ b/net/mac80211/ht.c | |||
@@ -0,0 +1,992 @@ | |||
1 | /* | ||
2 | * HT handling | ||
3 | * | ||
4 | * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> | ||
5 | * Copyright 2002-2005, Instant802 Networks, Inc. | ||
6 | * Copyright 2005-2006, Devicescape Software, Inc. | ||
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | ||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | ||
9 | * Copyright 2007-2008, Intel Corporation | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | */ | ||
15 | |||
16 | #include <linux/ieee80211.h> | ||
17 | #include <net/wireless.h> | ||
18 | #include <net/mac80211.h> | ||
19 | #include "ieee80211_i.h" | ||
20 | #include "sta_info.h" | ||
21 | #include "wme.h" | ||
22 | |||
23 | int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, | ||
24 | struct ieee80211_ht_info *ht_info) | ||
25 | { | ||
26 | |||
27 | if (ht_info == NULL) | ||
28 | return -EINVAL; | ||
29 | |||
30 | memset(ht_info, 0, sizeof(*ht_info)); | ||
31 | |||
32 | if (ht_cap_ie) { | ||
33 | u8 ampdu_info = ht_cap_ie->ampdu_params_info; | ||
34 | |||
35 | ht_info->ht_supported = 1; | ||
36 | ht_info->cap = le16_to_cpu(ht_cap_ie->cap_info); | ||
37 | ht_info->ampdu_factor = | ||
38 | ampdu_info & IEEE80211_HT_CAP_AMPDU_FACTOR; | ||
39 | ht_info->ampdu_density = | ||
40 | (ampdu_info & IEEE80211_HT_CAP_AMPDU_DENSITY) >> 2; | ||
41 | memcpy(ht_info->supp_mcs_set, ht_cap_ie->supp_mcs_set, 16); | ||
42 | } else | ||
43 | ht_info->ht_supported = 0; | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( | ||
49 | struct ieee80211_ht_addt_info *ht_add_info_ie, | ||
50 | struct ieee80211_ht_bss_info *bss_info) | ||
51 | { | ||
52 | if (bss_info == NULL) | ||
53 | return -EINVAL; | ||
54 | |||
55 | memset(bss_info, 0, sizeof(*bss_info)); | ||
56 | |||
57 | if (ht_add_info_ie) { | ||
58 | u16 op_mode; | ||
59 | op_mode = le16_to_cpu(ht_add_info_ie->operation_mode); | ||
60 | |||
61 | bss_info->primary_channel = ht_add_info_ie->control_chan; | ||
62 | bss_info->bss_cap = ht_add_info_ie->ht_param; | ||
63 | bss_info->bss_op_mode = (u8)(op_mode & 0xff); | ||
64 | } | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, | ||
70 | const u8 *da, u16 tid, | ||
71 | u8 dialog_token, u16 start_seq_num, | ||
72 | u16 agg_size, u16 timeout) | ||
73 | { | ||
74 | struct ieee80211_local *local = sdata->local; | ||
75 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
76 | struct sk_buff *skb; | ||
77 | struct ieee80211_mgmt *mgmt; | ||
78 | u16 capab; | ||
79 | |||
80 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | ||
81 | |||
82 | if (!skb) { | ||
83 | printk(KERN_ERR "%s: failed to allocate buffer " | ||
84 | "for addba request frame\n", sdata->dev->name); | ||
85 | return; | ||
86 | } | ||
87 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
88 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | ||
89 | memset(mgmt, 0, 24); | ||
90 | memcpy(mgmt->da, da, ETH_ALEN); | ||
91 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | ||
92 | if (sdata->vif.type == NL80211_IFTYPE_AP) | ||
93 | memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); | ||
94 | else | ||
95 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
96 | |||
97 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | ||
98 | IEEE80211_STYPE_ACTION); | ||
99 | |||
100 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); | ||
101 | |||
102 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | ||
103 | mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; | ||
104 | |||
105 | mgmt->u.action.u.addba_req.dialog_token = dialog_token; | ||
106 | capab = (u16)(1 << 1); /* bit 1 aggregation policy */ | ||
107 | capab |= (u16)(tid << 2); /* bit 5:2 TID number */ | ||
108 | capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ | ||
109 | |||
110 | mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); | ||
111 | |||
112 | mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); | ||
113 | mgmt->u.action.u.addba_req.start_seq_num = | ||
114 | cpu_to_le16(start_seq_num << 4); | ||
115 | |||
116 | ieee80211_tx_skb(sdata, skb, 0); | ||
117 | } | ||
118 | |||
119 | static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, | ||
120 | u8 dialog_token, u16 status, u16 policy, | ||
121 | u16 buf_size, u16 timeout) | ||
122 | { | ||
123 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
124 | struct ieee80211_local *local = sdata->local; | ||
125 | struct sk_buff *skb; | ||
126 | struct ieee80211_mgmt *mgmt; | ||
127 | u16 capab; | ||
128 | |||
129 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | ||
130 | |||
131 | if (!skb) { | ||
132 | printk(KERN_DEBUG "%s: failed to allocate buffer " | ||
133 | "for addba resp frame\n", sdata->dev->name); | ||
134 | return; | ||
135 | } | ||
136 | |||
137 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
138 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | ||
139 | memset(mgmt, 0, 24); | ||
140 | memcpy(mgmt->da, da, ETH_ALEN); | ||
141 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | ||
142 | if (sdata->vif.type == NL80211_IFTYPE_AP) | ||
143 | memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); | ||
144 | else | ||
145 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
146 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | ||
147 | IEEE80211_STYPE_ACTION); | ||
148 | |||
149 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); | ||
150 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | ||
151 | mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; | ||
152 | mgmt->u.action.u.addba_resp.dialog_token = dialog_token; | ||
153 | |||
154 | capab = (u16)(policy << 1); /* bit 1 aggregation policy */ | ||
155 | capab |= (u16)(tid << 2); /* bit 5:2 TID number */ | ||
156 | capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ | ||
157 | |||
158 | mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); | ||
159 | mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); | ||
160 | mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); | ||
161 | |||
162 | ieee80211_tx_skb(sdata, skb, 0); | ||
163 | } | ||
164 | |||
165 | static void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, | ||
166 | const u8 *da, u16 tid, | ||
167 | u16 initiator, u16 reason_code) | ||
168 | { | ||
169 | struct ieee80211_local *local = sdata->local; | ||
170 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
171 | struct sk_buff *skb; | ||
172 | struct ieee80211_mgmt *mgmt; | ||
173 | u16 params; | ||
174 | |||
175 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | ||
176 | |||
177 | if (!skb) { | ||
178 | printk(KERN_ERR "%s: failed to allocate buffer " | ||
179 | "for delba frame\n", sdata->dev->name); | ||
180 | return; | ||
181 | } | ||
182 | |||
183 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
184 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | ||
185 | memset(mgmt, 0, 24); | ||
186 | memcpy(mgmt->da, da, ETH_ALEN); | ||
187 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | ||
188 | if (sdata->vif.type == NL80211_IFTYPE_AP) | ||
189 | memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); | ||
190 | else | ||
191 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
192 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | ||
193 | IEEE80211_STYPE_ACTION); | ||
194 | |||
195 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); | ||
196 | |||
197 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | ||
198 | mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; | ||
199 | params = (u16)(initiator << 11); /* bit 11 initiator */ | ||
200 | params |= (u16)(tid << 12); /* bit 15:12 TID number */ | ||
201 | |||
202 | mgmt->u.action.u.delba.params = cpu_to_le16(params); | ||
203 | mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); | ||
204 | |||
205 | ieee80211_tx_skb(sdata, skb, 0); | ||
206 | } | ||
207 | |||
208 | void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn) | ||
209 | { | ||
210 | struct ieee80211_local *local = sdata->local; | ||
211 | struct sk_buff *skb; | ||
212 | struct ieee80211_bar *bar; | ||
213 | u16 bar_control = 0; | ||
214 | |||
215 | skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); | ||
216 | if (!skb) { | ||
217 | printk(KERN_ERR "%s: failed to allocate buffer for " | ||
218 | "bar frame\n", sdata->dev->name); | ||
219 | return; | ||
220 | } | ||
221 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
222 | bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); | ||
223 | memset(bar, 0, sizeof(*bar)); | ||
224 | bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | | ||
225 | IEEE80211_STYPE_BACK_REQ); | ||
226 | memcpy(bar->ra, ra, ETH_ALEN); | ||
227 | memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN); | ||
228 | bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; | ||
229 | bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; | ||
230 | bar_control |= (u16)(tid << 12); | ||
231 | bar->control = cpu_to_le16(bar_control); | ||
232 | bar->start_seq_num = cpu_to_le16(ssn); | ||
233 | |||
234 | ieee80211_tx_skb(sdata, skb, 0); | ||
235 | } | ||
236 | |||
237 | void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, | ||
238 | u16 initiator, u16 reason) | ||
239 | { | ||
240 | struct ieee80211_local *local = sdata->local; | ||
241 | struct ieee80211_hw *hw = &local->hw; | ||
242 | struct sta_info *sta; | ||
243 | int ret, i; | ||
244 | DECLARE_MAC_BUF(mac); | ||
245 | |||
246 | rcu_read_lock(); | ||
247 | |||
248 | sta = sta_info_get(local, ra); | ||
249 | if (!sta) { | ||
250 | rcu_read_unlock(); | ||
251 | return; | ||
252 | } | ||
253 | |||
254 | /* check if TID is in operational state */ | ||
255 | spin_lock_bh(&sta->lock); | ||
256 | if (sta->ampdu_mlme.tid_state_rx[tid] | ||
257 | != HT_AGG_STATE_OPERATIONAL) { | ||
258 | spin_unlock_bh(&sta->lock); | ||
259 | rcu_read_unlock(); | ||
260 | return; | ||
261 | } | ||
262 | sta->ampdu_mlme.tid_state_rx[tid] = | ||
263 | HT_AGG_STATE_REQ_STOP_BA_MSK | | ||
264 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
265 | spin_unlock_bh(&sta->lock); | ||
266 | |||
267 | /* stop HW Rx aggregation. ampdu_action existence | ||
268 | * already verified in session init so we add the BUG_ON */ | ||
269 | BUG_ON(!local->ops->ampdu_action); | ||
270 | |||
271 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
272 | printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n", | ||
273 | print_mac(mac, ra), tid); | ||
274 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
275 | |||
276 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, | ||
277 | &sta->sta, tid, NULL); | ||
278 | if (ret) | ||
279 | printk(KERN_DEBUG "HW problem - can not stop rx " | ||
280 | "aggregation for tid %d\n", tid); | ||
281 | |||
282 | /* shutdown timer has not expired */ | ||
283 | if (initiator != WLAN_BACK_TIMER) | ||
284 | del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
285 | |||
286 | /* check if this is a self generated aggregation halt */ | ||
287 | if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) | ||
288 | ieee80211_send_delba(sdata, ra, tid, 0, reason); | ||
289 | |||
290 | /* free the reordering buffer */ | ||
291 | for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { | ||
292 | if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) { | ||
293 | /* release the reordered frames */ | ||
294 | dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]); | ||
295 | sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--; | ||
296 | sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL; | ||
297 | } | ||
298 | } | ||
299 | /* free resources */ | ||
300 | kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); | ||
301 | kfree(sta->ampdu_mlme.tid_rx[tid]); | ||
302 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
303 | sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE; | ||
304 | |||
305 | rcu_read_unlock(); | ||
306 | } | ||
307 | |||
308 | |||
309 | /* | ||
310 | * After sending add Block Ack request we activated a timer until | ||
311 | * add Block Ack response will arrive from the recipient. | ||
312 | * If this timer expires sta_addba_resp_timer_expired will be executed. | ||
313 | */ | ||
314 | static void sta_addba_resp_timer_expired(unsigned long data) | ||
315 | { | ||
316 | /* not an elegant detour, but there is no choice as the timer passes | ||
317 | * only one argument, and both sta_info and TID are needed, so init | ||
318 | * flow in sta_info_create gives the TID as data, while the timer_to_id | ||
319 | * array gives the sta through container_of */ | ||
320 | u16 tid = *(u8 *)data; | ||
321 | struct sta_info *temp_sta = container_of((void *)data, | ||
322 | struct sta_info, timer_to_tid[tid]); | ||
323 | |||
324 | struct ieee80211_local *local = temp_sta->local; | ||
325 | struct ieee80211_hw *hw = &local->hw; | ||
326 | struct sta_info *sta; | ||
327 | u8 *state; | ||
328 | |||
329 | rcu_read_lock(); | ||
330 | |||
331 | sta = sta_info_get(local, temp_sta->sta.addr); | ||
332 | if (!sta) { | ||
333 | rcu_read_unlock(); | ||
334 | return; | ||
335 | } | ||
336 | |||
337 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
338 | /* check if the TID waits for addBA response */ | ||
339 | spin_lock_bh(&sta->lock); | ||
340 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
341 | spin_unlock_bh(&sta->lock); | ||
342 | *state = HT_AGG_STATE_IDLE; | ||
343 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
344 | printk(KERN_DEBUG "timer expired on tid %d but we are not " | ||
345 | "expecting addBA response there", tid); | ||
346 | #endif | ||
347 | goto timer_expired_exit; | ||
348 | } | ||
349 | |||
350 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
351 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); | ||
352 | #endif | ||
353 | |||
354 | /* go through the state check in stop_BA_session */ | ||
355 | *state = HT_AGG_STATE_OPERATIONAL; | ||
356 | spin_unlock_bh(&sta->lock); | ||
357 | ieee80211_stop_tx_ba_session(hw, temp_sta->sta.addr, tid, | ||
358 | WLAN_BACK_INITIATOR); | ||
359 | |||
360 | timer_expired_exit: | ||
361 | rcu_read_unlock(); | ||
362 | } | ||
363 | |||
364 | void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr) | ||
365 | { | ||
366 | struct ieee80211_local *local = sdata->local; | ||
367 | int i; | ||
368 | |||
369 | for (i = 0; i < STA_TID_NUM; i++) { | ||
370 | ieee80211_stop_tx_ba_session(&local->hw, addr, i, | ||
371 | WLAN_BACK_INITIATOR); | ||
372 | ieee80211_sta_stop_rx_ba_session(sdata, addr, i, | ||
373 | WLAN_BACK_RECIPIENT, | ||
374 | WLAN_REASON_QSTA_LEAVE_QBSS); | ||
375 | } | ||
376 | } | ||
377 | |||
378 | int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | ||
379 | { | ||
380 | struct ieee80211_local *local = hw_to_local(hw); | ||
381 | struct sta_info *sta; | ||
382 | struct ieee80211_sub_if_data *sdata; | ||
383 | u16 start_seq_num; | ||
384 | u8 *state; | ||
385 | int ret; | ||
386 | DECLARE_MAC_BUF(mac); | ||
387 | |||
388 | if (tid >= STA_TID_NUM) | ||
389 | return -EINVAL; | ||
390 | |||
391 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
392 | printk(KERN_DEBUG "Open BA session requested for %s tid %u\n", | ||
393 | print_mac(mac, ra), tid); | ||
394 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
395 | |||
396 | rcu_read_lock(); | ||
397 | |||
398 | sta = sta_info_get(local, ra); | ||
399 | if (!sta) { | ||
400 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
401 | printk(KERN_DEBUG "Could not find the station\n"); | ||
402 | #endif | ||
403 | ret = -ENOENT; | ||
404 | goto exit; | ||
405 | } | ||
406 | |||
407 | spin_lock_bh(&sta->lock); | ||
408 | |||
409 | /* we have tried too many times, receiver does not want A-MPDU */ | ||
410 | if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { | ||
411 | ret = -EBUSY; | ||
412 | goto err_unlock_sta; | ||
413 | } | ||
414 | |||
415 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
416 | /* check if the TID is not in aggregation flow already */ | ||
417 | if (*state != HT_AGG_STATE_IDLE) { | ||
418 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
419 | printk(KERN_DEBUG "BA request denied - session is not " | ||
420 | "idle on tid %u\n", tid); | ||
421 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
422 | ret = -EAGAIN; | ||
423 | goto err_unlock_sta; | ||
424 | } | ||
425 | |||
426 | /* prepare A-MPDU MLME for Tx aggregation */ | ||
427 | sta->ampdu_mlme.tid_tx[tid] = | ||
428 | kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); | ||
429 | if (!sta->ampdu_mlme.tid_tx[tid]) { | ||
430 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
431 | if (net_ratelimit()) | ||
432 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", | ||
433 | tid); | ||
434 | #endif | ||
435 | ret = -ENOMEM; | ||
436 | goto err_unlock_sta; | ||
437 | } | ||
438 | /* Tx timer */ | ||
439 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = | ||
440 | sta_addba_resp_timer_expired; | ||
441 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = | ||
442 | (unsigned long)&sta->timer_to_tid[tid]; | ||
443 | init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
444 | |||
445 | /* create a new queue for this aggregation */ | ||
446 | ret = ieee80211_ht_agg_queue_add(local, sta, tid); | ||
447 | |||
448 | /* case no queue is available to aggregation | ||
449 | * don't switch to aggregation */ | ||
450 | if (ret) { | ||
451 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
452 | printk(KERN_DEBUG "BA request denied - queue unavailable for" | ||
453 | " tid %d\n", tid); | ||
454 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
455 | goto err_unlock_queue; | ||
456 | } | ||
457 | sdata = sta->sdata; | ||
458 | |||
459 | /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the | ||
460 | * call back right away, it must see that the flow has begun */ | ||
461 | *state |= HT_ADDBA_REQUESTED_MSK; | ||
462 | |||
463 | /* This is slightly racy because the queue isn't stopped */ | ||
464 | start_seq_num = sta->tid_seq[tid]; | ||
465 | |||
466 | if (local->ops->ampdu_action) | ||
467 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, | ||
468 | &sta->sta, tid, &start_seq_num); | ||
469 | |||
470 | if (ret) { | ||
471 | /* No need to requeue the packets in the agg queue, since we | ||
472 | * held the tx lock: no packet could be enqueued to the newly | ||
473 | * allocated queue */ | ||
474 | ieee80211_ht_agg_queue_remove(local, sta, tid, 0); | ||
475 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
476 | printk(KERN_DEBUG "BA request denied - HW unavailable for" | ||
477 | " tid %d\n", tid); | ||
478 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
479 | *state = HT_AGG_STATE_IDLE; | ||
480 | goto err_unlock_queue; | ||
481 | } | ||
482 | |||
483 | /* Will put all the packets in the new SW queue */ | ||
484 | ieee80211_requeue(local, ieee802_1d_to_ac[tid]); | ||
485 | spin_unlock_bh(&sta->lock); | ||
486 | |||
487 | /* send an addBA request */ | ||
488 | sta->ampdu_mlme.dialog_token_allocator++; | ||
489 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = | ||
490 | sta->ampdu_mlme.dialog_token_allocator; | ||
491 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | ||
492 | |||
493 | |||
494 | ieee80211_send_addba_request(sta->sdata, ra, tid, | ||
495 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | ||
496 | sta->ampdu_mlme.tid_tx[tid]->ssn, | ||
497 | 0x40, 5000); | ||
498 | /* activate the timer for the recipient's addBA response */ | ||
499 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = | ||
500 | jiffies + ADDBA_RESP_INTERVAL; | ||
501 | add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
502 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
503 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); | ||
504 | #endif | ||
505 | goto exit; | ||
506 | |||
507 | err_unlock_queue: | ||
508 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
509 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
510 | ret = -EBUSY; | ||
511 | err_unlock_sta: | ||
512 | spin_unlock_bh(&sta->lock); | ||
513 | exit: | ||
514 | rcu_read_unlock(); | ||
515 | return ret; | ||
516 | } | ||
517 | EXPORT_SYMBOL(ieee80211_start_tx_ba_session); | ||
518 | |||
519 | int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, | ||
520 | u8 *ra, u16 tid, | ||
521 | enum ieee80211_back_parties initiator) | ||
522 | { | ||
523 | struct ieee80211_local *local = hw_to_local(hw); | ||
524 | struct sta_info *sta; | ||
525 | u8 *state; | ||
526 | int ret = 0; | ||
527 | DECLARE_MAC_BUF(mac); | ||
528 | |||
529 | if (tid >= STA_TID_NUM) | ||
530 | return -EINVAL; | ||
531 | |||
532 | rcu_read_lock(); | ||
533 | sta = sta_info_get(local, ra); | ||
534 | if (!sta) { | ||
535 | rcu_read_unlock(); | ||
536 | return -ENOENT; | ||
537 | } | ||
538 | |||
539 | /* check if the TID is in aggregation */ | ||
540 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
541 | spin_lock_bh(&sta->lock); | ||
542 | |||
543 | if (*state != HT_AGG_STATE_OPERATIONAL) { | ||
544 | ret = -ENOENT; | ||
545 | goto stop_BA_exit; | ||
546 | } | ||
547 | |||
548 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
549 | printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n", | ||
550 | print_mac(mac, ra), tid); | ||
551 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
552 | |||
553 | ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]); | ||
554 | |||
555 | *state = HT_AGG_STATE_REQ_STOP_BA_MSK | | ||
556 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
557 | |||
558 | if (local->ops->ampdu_action) | ||
559 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP, | ||
560 | &sta->sta, tid, NULL); | ||
561 | |||
562 | /* case HW denied going back to legacy */ | ||
563 | if (ret) { | ||
564 | WARN_ON(ret != -EBUSY); | ||
565 | *state = HT_AGG_STATE_OPERATIONAL; | ||
566 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
567 | goto stop_BA_exit; | ||
568 | } | ||
569 | |||
570 | stop_BA_exit: | ||
571 | spin_unlock_bh(&sta->lock); | ||
572 | rcu_read_unlock(); | ||
573 | return ret; | ||
574 | } | ||
575 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); | ||
576 | |||
577 | void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) | ||
578 | { | ||
579 | struct ieee80211_local *local = hw_to_local(hw); | ||
580 | struct sta_info *sta; | ||
581 | u8 *state; | ||
582 | DECLARE_MAC_BUF(mac); | ||
583 | |||
584 | if (tid >= STA_TID_NUM) { | ||
585 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
586 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | ||
587 | tid, STA_TID_NUM); | ||
588 | #endif | ||
589 | return; | ||
590 | } | ||
591 | |||
592 | rcu_read_lock(); | ||
593 | sta = sta_info_get(local, ra); | ||
594 | if (!sta) { | ||
595 | rcu_read_unlock(); | ||
596 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
597 | printk(KERN_DEBUG "Could not find station: %s\n", | ||
598 | print_mac(mac, ra)); | ||
599 | #endif | ||
600 | return; | ||
601 | } | ||
602 | |||
603 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
604 | spin_lock_bh(&sta->lock); | ||
605 | |||
606 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
607 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
608 | printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", | ||
609 | *state); | ||
610 | #endif | ||
611 | spin_unlock_bh(&sta->lock); | ||
612 | rcu_read_unlock(); | ||
613 | return; | ||
614 | } | ||
615 | |||
616 | WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK); | ||
617 | |||
618 | *state |= HT_ADDBA_DRV_READY_MSK; | ||
619 | |||
620 | if (*state == HT_AGG_STATE_OPERATIONAL) { | ||
621 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
622 | printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); | ||
623 | #endif | ||
624 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
625 | } | ||
626 | spin_unlock_bh(&sta->lock); | ||
627 | rcu_read_unlock(); | ||
628 | } | ||
629 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); | ||
630 | |||
631 | void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) | ||
632 | { | ||
633 | struct ieee80211_local *local = hw_to_local(hw); | ||
634 | struct sta_info *sta; | ||
635 | u8 *state; | ||
636 | int agg_queue; | ||
637 | DECLARE_MAC_BUF(mac); | ||
638 | |||
639 | if (tid >= STA_TID_NUM) { | ||
640 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
641 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | ||
642 | tid, STA_TID_NUM); | ||
643 | #endif | ||
644 | return; | ||
645 | } | ||
646 | |||
647 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
648 | printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n", | ||
649 | print_mac(mac, ra), tid); | ||
650 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
651 | |||
652 | rcu_read_lock(); | ||
653 | sta = sta_info_get(local, ra); | ||
654 | if (!sta) { | ||
655 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
656 | printk(KERN_DEBUG "Could not find station: %s\n", | ||
657 | print_mac(mac, ra)); | ||
658 | #endif | ||
659 | rcu_read_unlock(); | ||
660 | return; | ||
661 | } | ||
662 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
663 | |||
664 | /* NOTE: no need to use sta->lock in this state check, as | ||
665 | * ieee80211_stop_tx_ba_session will let only one stop call to | ||
666 | * pass through per sta/tid | ||
667 | */ | ||
668 | if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { | ||
669 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
670 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); | ||
671 | #endif | ||
672 | rcu_read_unlock(); | ||
673 | return; | ||
674 | } | ||
675 | |||
676 | if (*state & HT_AGG_STATE_INITIATOR_MSK) | ||
677 | ieee80211_send_delba(sta->sdata, ra, tid, | ||
678 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); | ||
679 | |||
680 | agg_queue = sta->tid_to_tx_q[tid]; | ||
681 | |||
682 | ieee80211_ht_agg_queue_remove(local, sta, tid, 1); | ||
683 | |||
684 | /* We just requeued the all the frames that were in the | ||
685 | * removed queue, and since we might miss a softirq we do | ||
686 | * netif_schedule_queue. ieee80211_wake_queue is not used | ||
687 | * here as this queue is not necessarily stopped | ||
688 | */ | ||
689 | netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue)); | ||
690 | spin_lock_bh(&sta->lock); | ||
691 | *state = HT_AGG_STATE_IDLE; | ||
692 | sta->ampdu_mlme.addba_req_num[tid] = 0; | ||
693 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
694 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
695 | spin_unlock_bh(&sta->lock); | ||
696 | |||
697 | rcu_read_unlock(); | ||
698 | } | ||
699 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); | ||
700 | |||
701 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | ||
702 | const u8 *ra, u16 tid) | ||
703 | { | ||
704 | struct ieee80211_local *local = hw_to_local(hw); | ||
705 | struct ieee80211_ra_tid *ra_tid; | ||
706 | struct sk_buff *skb = dev_alloc_skb(0); | ||
707 | |||
708 | if (unlikely(!skb)) { | ||
709 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
710 | if (net_ratelimit()) | ||
711 | printk(KERN_WARNING "%s: Not enough memory, " | ||
712 | "dropping start BA session", skb->dev->name); | ||
713 | #endif | ||
714 | return; | ||
715 | } | ||
716 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
717 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | ||
718 | ra_tid->tid = tid; | ||
719 | |||
720 | skb->pkt_type = IEEE80211_ADDBA_MSG; | ||
721 | skb_queue_tail(&local->skb_queue, skb); | ||
722 | tasklet_schedule(&local->tasklet); | ||
723 | } | ||
724 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); | ||
725 | |||
726 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | ||
727 | const u8 *ra, u16 tid) | ||
728 | { | ||
729 | struct ieee80211_local *local = hw_to_local(hw); | ||
730 | struct ieee80211_ra_tid *ra_tid; | ||
731 | struct sk_buff *skb = dev_alloc_skb(0); | ||
732 | |||
733 | if (unlikely(!skb)) { | ||
734 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
735 | if (net_ratelimit()) | ||
736 | printk(KERN_WARNING "%s: Not enough memory, " | ||
737 | "dropping stop BA session", skb->dev->name); | ||
738 | #endif | ||
739 | return; | ||
740 | } | ||
741 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
742 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | ||
743 | ra_tid->tid = tid; | ||
744 | |||
745 | skb->pkt_type = IEEE80211_DELBA_MSG; | ||
746 | skb_queue_tail(&local->skb_queue, skb); | ||
747 | tasklet_schedule(&local->tasklet); | ||
748 | } | ||
749 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); | ||
750 | |||
751 | /* | ||
752 | * After accepting the AddBA Request we activated a timer, | ||
753 | * resetting it after each frame that arrives from the originator. | ||
754 | * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. | ||
755 | */ | ||
756 | static void sta_rx_agg_session_timer_expired(unsigned long data) | ||
757 | { | ||
758 | /* not an elegant detour, but there is no choice as the timer passes | ||
759 | * only one argument, and various sta_info are needed here, so init | ||
760 | * flow in sta_info_create gives the TID as data, while the timer_to_id | ||
761 | * array gives the sta through container_of */ | ||
762 | u8 *ptid = (u8 *)data; | ||
763 | u8 *timer_to_id = ptid - *ptid; | ||
764 | struct sta_info *sta = container_of(timer_to_id, struct sta_info, | ||
765 | timer_to_tid[0]); | ||
766 | |||
767 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
768 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | ||
769 | #endif | ||
770 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, | ||
771 | (u16)*ptid, WLAN_BACK_TIMER, | ||
772 | WLAN_REASON_QSTA_TIMEOUT); | ||
773 | } | ||
774 | |||
775 | void ieee80211_process_addba_request(struct ieee80211_local *local, | ||
776 | struct sta_info *sta, | ||
777 | struct ieee80211_mgmt *mgmt, | ||
778 | size_t len) | ||
779 | { | ||
780 | struct ieee80211_hw *hw = &local->hw; | ||
781 | struct ieee80211_conf *conf = &hw->conf; | ||
782 | struct tid_ampdu_rx *tid_agg_rx; | ||
783 | u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; | ||
784 | u8 dialog_token; | ||
785 | int ret = -EOPNOTSUPP; | ||
786 | DECLARE_MAC_BUF(mac); | ||
787 | |||
788 | /* extract session parameters from addba request frame */ | ||
789 | dialog_token = mgmt->u.action.u.addba_req.dialog_token; | ||
790 | timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); | ||
791 | start_seq_num = | ||
792 | le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4; | ||
793 | |||
794 | capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); | ||
795 | ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1; | ||
796 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | ||
797 | buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; | ||
798 | |||
799 | status = WLAN_STATUS_REQUEST_DECLINED; | ||
800 | |||
801 | /* sanity check for incoming parameters: | ||
802 | * check if configuration can support the BA policy | ||
803 | * and if buffer size does not exceeds max value */ | ||
804 | if (((ba_policy != 1) | ||
805 | && (!(conf->ht_conf.cap & IEEE80211_HT_CAP_DELAY_BA))) | ||
806 | || (buf_size > IEEE80211_MAX_AMPDU_BUF)) { | ||
807 | status = WLAN_STATUS_INVALID_QOS_PARAM; | ||
808 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
809 | if (net_ratelimit()) | ||
810 | printk(KERN_DEBUG "AddBA Req with bad params from " | ||
811 | "%s on tid %u. policy %d, buffer size %d\n", | ||
812 | print_mac(mac, mgmt->sa), tid, ba_policy, | ||
813 | buf_size); | ||
814 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
815 | goto end_no_lock; | ||
816 | } | ||
817 | /* determine default buffer size */ | ||
818 | if (buf_size == 0) { | ||
819 | struct ieee80211_supported_band *sband; | ||
820 | |||
821 | sband = local->hw.wiphy->bands[conf->channel->band]; | ||
822 | buf_size = IEEE80211_MIN_AMPDU_BUF; | ||
823 | buf_size = buf_size << sband->ht_info.ampdu_factor; | ||
824 | } | ||
825 | |||
826 | |||
827 | /* examine state machine */ | ||
828 | spin_lock_bh(&sta->lock); | ||
829 | |||
830 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { | ||
831 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
832 | if (net_ratelimit()) | ||
833 | printk(KERN_DEBUG "unexpected AddBA Req from " | ||
834 | "%s on tid %u\n", | ||
835 | print_mac(mac, mgmt->sa), tid); | ||
836 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
837 | goto end; | ||
838 | } | ||
839 | |||
840 | /* prepare A-MPDU MLME for Rx aggregation */ | ||
841 | sta->ampdu_mlme.tid_rx[tid] = | ||
842 | kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); | ||
843 | if (!sta->ampdu_mlme.tid_rx[tid]) { | ||
844 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
845 | if (net_ratelimit()) | ||
846 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", | ||
847 | tid); | ||
848 | #endif | ||
849 | goto end; | ||
850 | } | ||
851 | /* rx timer */ | ||
852 | sta->ampdu_mlme.tid_rx[tid]->session_timer.function = | ||
853 | sta_rx_agg_session_timer_expired; | ||
854 | sta->ampdu_mlme.tid_rx[tid]->session_timer.data = | ||
855 | (unsigned long)&sta->timer_to_tid[tid]; | ||
856 | init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
857 | |||
858 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | ||
859 | |||
860 | /* prepare reordering buffer */ | ||
861 | tid_agg_rx->reorder_buf = | ||
862 | kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC); | ||
863 | if (!tid_agg_rx->reorder_buf) { | ||
864 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
865 | if (net_ratelimit()) | ||
866 | printk(KERN_ERR "can not allocate reordering buffer " | ||
867 | "to tid %d\n", tid); | ||
868 | #endif | ||
869 | kfree(sta->ampdu_mlme.tid_rx[tid]); | ||
870 | goto end; | ||
871 | } | ||
872 | memset(tid_agg_rx->reorder_buf, 0, | ||
873 | buf_size * sizeof(struct sk_buff *)); | ||
874 | |||
875 | if (local->ops->ampdu_action) | ||
876 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, | ||
877 | &sta->sta, tid, &start_seq_num); | ||
878 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
879 | printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); | ||
880 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
881 | |||
882 | if (ret) { | ||
883 | kfree(tid_agg_rx->reorder_buf); | ||
884 | kfree(tid_agg_rx); | ||
885 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
886 | goto end; | ||
887 | } | ||
888 | |||
889 | /* change state and send addba resp */ | ||
890 | sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL; | ||
891 | tid_agg_rx->dialog_token = dialog_token; | ||
892 | tid_agg_rx->ssn = start_seq_num; | ||
893 | tid_agg_rx->head_seq_num = start_seq_num; | ||
894 | tid_agg_rx->buf_size = buf_size; | ||
895 | tid_agg_rx->timeout = timeout; | ||
896 | tid_agg_rx->stored_mpdu_num = 0; | ||
897 | status = WLAN_STATUS_SUCCESS; | ||
898 | end: | ||
899 | spin_unlock_bh(&sta->lock); | ||
900 | |||
901 | end_no_lock: | ||
902 | ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, | ||
903 | dialog_token, status, 1, buf_size, timeout); | ||
904 | } | ||
905 | |||
906 | void ieee80211_process_addba_resp(struct ieee80211_local *local, | ||
907 | struct sta_info *sta, | ||
908 | struct ieee80211_mgmt *mgmt, | ||
909 | size_t len) | ||
910 | { | ||
911 | struct ieee80211_hw *hw = &local->hw; | ||
912 | u16 capab; | ||
913 | u16 tid; | ||
914 | u8 *state; | ||
915 | |||
916 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); | ||
917 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | ||
918 | |||
919 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
920 | |||
921 | spin_lock_bh(&sta->lock); | ||
922 | |||
923 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
924 | spin_unlock_bh(&sta->lock); | ||
925 | return; | ||
926 | } | ||
927 | |||
928 | if (mgmt->u.action.u.addba_resp.dialog_token != | ||
929 | sta->ampdu_mlme.tid_tx[tid]->dialog_token) { | ||
930 | spin_unlock_bh(&sta->lock); | ||
931 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
932 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); | ||
933 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
934 | return; | ||
935 | } | ||
936 | |||
937 | del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
938 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
939 | printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); | ||
940 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
941 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) | ||
942 | == WLAN_STATUS_SUCCESS) { | ||
943 | *state |= HT_ADDBA_RECEIVED_MSK; | ||
944 | sta->ampdu_mlme.addba_req_num[tid] = 0; | ||
945 | |||
946 | if (*state == HT_AGG_STATE_OPERATIONAL) | ||
947 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
948 | |||
949 | spin_unlock_bh(&sta->lock); | ||
950 | } else { | ||
951 | sta->ampdu_mlme.addba_req_num[tid]++; | ||
952 | /* this will allow the state check in stop_BA_session */ | ||
953 | *state = HT_AGG_STATE_OPERATIONAL; | ||
954 | spin_unlock_bh(&sta->lock); | ||
955 | ieee80211_stop_tx_ba_session(hw, sta->sta.addr, tid, | ||
956 | WLAN_BACK_INITIATOR); | ||
957 | } | ||
958 | } | ||
959 | |||
960 | void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, | ||
961 | struct sta_info *sta, | ||
962 | struct ieee80211_mgmt *mgmt, size_t len) | ||
963 | { | ||
964 | struct ieee80211_local *local = sdata->local; | ||
965 | u16 tid, params; | ||
966 | u16 initiator; | ||
967 | DECLARE_MAC_BUF(mac); | ||
968 | |||
969 | params = le16_to_cpu(mgmt->u.action.u.delba.params); | ||
970 | tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; | ||
971 | initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; | ||
972 | |||
973 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
974 | if (net_ratelimit()) | ||
975 | printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n", | ||
976 | print_mac(mac, mgmt->sa), | ||
977 | initiator ? "initiator" : "recipient", tid, | ||
978 | mgmt->u.action.u.delba.reason_code); | ||
979 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
980 | |||
981 | if (initiator == WLAN_BACK_INITIATOR) | ||
982 | ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid, | ||
983 | WLAN_BACK_INITIATOR, 0); | ||
984 | else { /* WLAN_BACK_RECIPIENT */ | ||
985 | spin_lock_bh(&sta->lock); | ||
986 | sta->ampdu_mlme.tid_state_tx[tid] = | ||
987 | HT_AGG_STATE_OPERATIONAL; | ||
988 | spin_unlock_bh(&sta->lock); | ||
989 | ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid, | ||
990 | WLAN_BACK_RECIPIENT); | ||
991 | } | ||
992 | } | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 4498d8713652..8025b294588b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -29,17 +29,6 @@ | |||
29 | #include "key.h" | 29 | #include "key.h" |
30 | #include "sta_info.h" | 30 | #include "sta_info.h" |
31 | 31 | ||
32 | /* ieee80211.o internal definitions, etc. These are not included into | ||
33 | * low-level drivers. */ | ||
34 | |||
35 | #ifndef ETH_P_PAE | ||
36 | #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ | ||
37 | #endif /* ETH_P_PAE */ | ||
38 | |||
39 | #define WLAN_FC_DATA_PRESENT(fc) (((fc) & 0x4c) == 0x08) | ||
40 | |||
41 | #define IEEE80211_FC(type, subtype) cpu_to_le16(type | subtype) | ||
42 | |||
43 | struct ieee80211_local; | 32 | struct ieee80211_local; |
44 | 33 | ||
45 | /* Maximum number of broadcast/multicast frames to buffer when some of the | 34 | /* Maximum number of broadcast/multicast frames to buffer when some of the |
@@ -61,6 +50,12 @@ struct ieee80211_local; | |||
61 | * increased memory use (about 2 kB of RAM per entry). */ | 50 | * increased memory use (about 2 kB of RAM per entry). */ |
62 | #define IEEE80211_FRAGMENT_MAX 4 | 51 | #define IEEE80211_FRAGMENT_MAX 4 |
63 | 52 | ||
53 | /* | ||
54 | * Time after which we ignore scan results and no longer report/use | ||
55 | * them in any way. | ||
56 | */ | ||
57 | #define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ) | ||
58 | |||
64 | struct ieee80211_fragment_entry { | 59 | struct ieee80211_fragment_entry { |
65 | unsigned long first_frag_time; | 60 | unsigned long first_frag_time; |
66 | unsigned int seq; | 61 | unsigned int seq; |
@@ -73,9 +68,9 @@ struct ieee80211_fragment_entry { | |||
73 | }; | 68 | }; |
74 | 69 | ||
75 | 70 | ||
76 | struct ieee80211_sta_bss { | 71 | struct ieee80211_bss { |
77 | struct list_head list; | 72 | struct list_head list; |
78 | struct ieee80211_sta_bss *hnext; | 73 | struct ieee80211_bss *hnext; |
79 | size_t ssid_len; | 74 | size_t ssid_len; |
80 | 75 | ||
81 | atomic_t users; | 76 | atomic_t users; |
@@ -87,16 +82,11 @@ struct ieee80211_sta_bss { | |||
87 | enum ieee80211_band band; | 82 | enum ieee80211_band band; |
88 | int freq; | 83 | int freq; |
89 | int signal, noise, qual; | 84 | int signal, noise, qual; |
90 | u8 *wpa_ie; | 85 | u8 *ies; /* all information elements from the last Beacon or Probe |
91 | size_t wpa_ie_len; | 86 | * Response frames; note Beacon frame is not allowed to |
92 | u8 *rsn_ie; | 87 | * override values from Probe Response */ |
93 | size_t rsn_ie_len; | 88 | size_t ies_len; |
94 | u8 *wmm_ie; | 89 | bool wmm_used; |
95 | size_t wmm_ie_len; | ||
96 | u8 *ht_ie; | ||
97 | size_t ht_ie_len; | ||
98 | u8 *ht_add_ie; | ||
99 | size_t ht_add_ie_len; | ||
100 | #ifdef CONFIG_MAC80211_MESH | 90 | #ifdef CONFIG_MAC80211_MESH |
101 | u8 *mesh_id; | 91 | u8 *mesh_id; |
102 | size_t mesh_id_len; | 92 | size_t mesh_id_len; |
@@ -108,7 +98,7 @@ struct ieee80211_sta_bss { | |||
108 | u64 timestamp; | 98 | u64 timestamp; |
109 | int beacon_int; | 99 | int beacon_int; |
110 | 100 | ||
111 | bool probe_resp; | 101 | unsigned long last_probe_resp; |
112 | unsigned long last_update; | 102 | unsigned long last_update; |
113 | 103 | ||
114 | /* during assocation, we save an ERP value from a probe response so | 104 | /* during assocation, we save an ERP value from a probe response so |
@@ -119,7 +109,7 @@ struct ieee80211_sta_bss { | |||
119 | u8 erp_value; | 109 | u8 erp_value; |
120 | }; | 110 | }; |
121 | 111 | ||
122 | static inline u8 *bss_mesh_cfg(struct ieee80211_sta_bss *bss) | 112 | static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss) |
123 | { | 113 | { |
124 | #ifdef CONFIG_MAC80211_MESH | 114 | #ifdef CONFIG_MAC80211_MESH |
125 | return bss->mesh_cfg; | 115 | return bss->mesh_cfg; |
@@ -127,7 +117,7 @@ static inline u8 *bss_mesh_cfg(struct ieee80211_sta_bss *bss) | |||
127 | return NULL; | 117 | return NULL; |
128 | } | 118 | } |
129 | 119 | ||
130 | static inline u8 *bss_mesh_id(struct ieee80211_sta_bss *bss) | 120 | static inline u8 *bss_mesh_id(struct ieee80211_bss *bss) |
131 | { | 121 | { |
132 | #ifdef CONFIG_MAC80211_MESH | 122 | #ifdef CONFIG_MAC80211_MESH |
133 | return bss->mesh_id; | 123 | return bss->mesh_id; |
@@ -135,7 +125,7 @@ static inline u8 *bss_mesh_id(struct ieee80211_sta_bss *bss) | |||
135 | return NULL; | 125 | return NULL; |
136 | } | 126 | } |
137 | 127 | ||
138 | static inline u8 bss_mesh_id_len(struct ieee80211_sta_bss *bss) | 128 | static inline u8 bss_mesh_id_len(struct ieee80211_bss *bss) |
139 | { | 129 | { |
140 | #ifdef CONFIG_MAC80211_MESH | 130 | #ifdef CONFIG_MAC80211_MESH |
141 | return bss->mesh_id_len; | 131 | return bss->mesh_id_len; |
@@ -174,7 +164,7 @@ struct ieee80211_tx_data { | |||
174 | struct sk_buff **extra_frag; | 164 | struct sk_buff **extra_frag; |
175 | int num_extra_frag; | 165 | int num_extra_frag; |
176 | 166 | ||
177 | u16 fc, ethertype; | 167 | u16 ethertype; |
178 | unsigned int flags; | 168 | unsigned int flags; |
179 | }; | 169 | }; |
180 | 170 | ||
@@ -202,7 +192,7 @@ struct ieee80211_rx_data { | |||
202 | struct ieee80211_rx_status *status; | 192 | struct ieee80211_rx_status *status; |
203 | struct ieee80211_rate *rate; | 193 | struct ieee80211_rate *rate; |
204 | 194 | ||
205 | u16 fc, ethertype; | 195 | u16 ethertype; |
206 | unsigned int flags; | 196 | unsigned int flags; |
207 | int sent_ps_buffered; | 197 | int sent_ps_buffered; |
208 | int queue; | 198 | int queue; |
@@ -239,7 +229,6 @@ struct ieee80211_if_ap { | |||
239 | struct sk_buff_head ps_bc_buf; | 229 | struct sk_buff_head ps_bc_buf; |
240 | atomic_t num_sta_ps; /* number of stations in PS mode */ | 230 | atomic_t num_sta_ps; /* number of stations in PS mode */ |
241 | int dtim_count; | 231 | int dtim_count; |
242 | int num_beacons; /* number of TXed beacon frames for this BSS */ | ||
243 | }; | 232 | }; |
244 | 233 | ||
245 | struct ieee80211_if_wds { | 234 | struct ieee80211_if_wds { |
@@ -300,48 +289,37 @@ struct mesh_config { | |||
300 | #define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) | 289 | #define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) |
301 | #define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) | 290 | #define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) |
302 | #define IEEE80211_STA_PRIVACY_INVOKED BIT(13) | 291 | #define IEEE80211_STA_PRIVACY_INVOKED BIT(13) |
292 | /* flags for MLME request */ | ||
293 | #define IEEE80211_STA_REQ_SCAN 0 | ||
294 | #define IEEE80211_STA_REQ_DIRECT_PROBE 1 | ||
295 | #define IEEE80211_STA_REQ_AUTH 2 | ||
296 | #define IEEE80211_STA_REQ_RUN 3 | ||
297 | |||
298 | /* STA/IBSS MLME states */ | ||
299 | enum ieee80211_sta_mlme_state { | ||
300 | IEEE80211_STA_MLME_DISABLED, | ||
301 | IEEE80211_STA_MLME_DIRECT_PROBE, | ||
302 | IEEE80211_STA_MLME_AUTHENTICATE, | ||
303 | IEEE80211_STA_MLME_ASSOCIATE, | ||
304 | IEEE80211_STA_MLME_ASSOCIATED, | ||
305 | IEEE80211_STA_MLME_IBSS_SEARCH, | ||
306 | IEEE80211_STA_MLME_IBSS_JOINED, | ||
307 | }; | ||
308 | |||
309 | /* bitfield of allowed auth algs */ | ||
310 | #define IEEE80211_AUTH_ALG_OPEN BIT(0) | ||
311 | #define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1) | ||
312 | #define IEEE80211_AUTH_ALG_LEAP BIT(2) | ||
313 | |||
303 | struct ieee80211_if_sta { | 314 | struct ieee80211_if_sta { |
304 | struct timer_list timer; | 315 | struct timer_list timer; |
305 | struct work_struct work; | 316 | struct work_struct work; |
306 | u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; | 317 | u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; |
307 | u8 ssid[IEEE80211_MAX_SSID_LEN]; | 318 | u8 ssid[IEEE80211_MAX_SSID_LEN]; |
308 | enum { | 319 | enum ieee80211_sta_mlme_state state; |
309 | IEEE80211_DISABLED, IEEE80211_AUTHENTICATE, | ||
310 | IEEE80211_ASSOCIATE, IEEE80211_ASSOCIATED, | ||
311 | IEEE80211_IBSS_SEARCH, IEEE80211_IBSS_JOINED, | ||
312 | IEEE80211_MESH_UP | ||
313 | } state; | ||
314 | size_t ssid_len; | 320 | size_t ssid_len; |
315 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; | 321 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; |
316 | size_t scan_ssid_len; | 322 | size_t scan_ssid_len; |
317 | #ifdef CONFIG_MAC80211_MESH | ||
318 | struct timer_list mesh_path_timer; | ||
319 | u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; | ||
320 | size_t mesh_id_len; | ||
321 | /* Active Path Selection Protocol Identifier */ | ||
322 | u8 mesh_pp_id[4]; | ||
323 | /* Active Path Selection Metric Identifier */ | ||
324 | u8 mesh_pm_id[4]; | ||
325 | /* Congestion Control Mode Identifier */ | ||
326 | u8 mesh_cc_id[4]; | ||
327 | /* Local mesh Destination Sequence Number */ | ||
328 | u32 dsn; | ||
329 | /* Last used PREQ ID */ | ||
330 | u32 preq_id; | ||
331 | atomic_t mpaths; | ||
332 | /* Timestamp of last DSN update */ | ||
333 | unsigned long last_dsn_update; | ||
334 | /* Timestamp of last DSN sent */ | ||
335 | unsigned long last_preq; | ||
336 | struct mesh_rmc *rmc; | ||
337 | spinlock_t mesh_preq_queue_lock; | ||
338 | struct mesh_preq_queue preq_queue; | ||
339 | int preq_queue_len; | ||
340 | struct mesh_stats mshstats; | ||
341 | struct mesh_config mshcfg; | ||
342 | u32 mesh_seqnum; | ||
343 | bool accepting_plinks; | ||
344 | #endif | ||
345 | u16 aid; | 323 | u16 aid; |
346 | u16 ap_capab, capab; | 324 | u16 ap_capab, capab; |
347 | u8 *extra_ie; /* to be added to the end of AssocReq */ | 325 | u8 *extra_ie; /* to be added to the end of AssocReq */ |
@@ -353,20 +331,17 @@ struct ieee80211_if_sta { | |||
353 | 331 | ||
354 | struct sk_buff_head skb_queue; | 332 | struct sk_buff_head skb_queue; |
355 | 333 | ||
356 | int auth_tries, assoc_tries; | 334 | int assoc_scan_tries; /* number of scans done pre-association */ |
335 | int direct_probe_tries; /* retries for direct probes */ | ||
336 | int auth_tries; /* retries for auth req */ | ||
337 | int assoc_tries; /* retries for assoc req */ | ||
357 | 338 | ||
358 | unsigned long request; | 339 | unsigned long request; |
359 | 340 | ||
360 | unsigned long last_probe; | 341 | unsigned long last_probe; |
361 | 342 | ||
362 | unsigned int flags; | 343 | unsigned int flags; |
363 | #define IEEE80211_STA_REQ_SCAN 0 | ||
364 | #define IEEE80211_STA_REQ_AUTH 1 | ||
365 | #define IEEE80211_STA_REQ_RUN 2 | ||
366 | 344 | ||
367 | #define IEEE80211_AUTH_ALG_OPEN BIT(0) | ||
368 | #define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1) | ||
369 | #define IEEE80211_AUTH_ALG_LEAP BIT(2) | ||
370 | unsigned int auth_algs; /* bitfield of allowed auth algs */ | 345 | unsigned int auth_algs; /* bitfield of allowed auth algs */ |
371 | int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ | 346 | int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ |
372 | int auth_transaction; | 347 | int auth_transaction; |
@@ -376,31 +351,70 @@ struct ieee80211_if_sta { | |||
376 | u32 supp_rates_bits[IEEE80211_NUM_BANDS]; | 351 | u32 supp_rates_bits[IEEE80211_NUM_BANDS]; |
377 | 352 | ||
378 | int wmm_last_param_set; | 353 | int wmm_last_param_set; |
379 | int num_beacons; /* number of TXed beacon frames by this STA */ | ||
380 | }; | 354 | }; |
381 | 355 | ||
382 | static inline void ieee80211_if_sta_set_mesh_id(struct ieee80211_if_sta *ifsta, | 356 | struct ieee80211_if_mesh { |
383 | u8 mesh_id_len, u8 *mesh_id) | 357 | struct work_struct work; |
384 | { | 358 | struct timer_list housekeeping_timer; |
385 | #ifdef CONFIG_MAC80211_MESH | 359 | struct timer_list mesh_path_timer; |
386 | ifsta->mesh_id_len = mesh_id_len; | 360 | struct sk_buff_head skb_queue; |
387 | memcpy(ifsta->mesh_id, mesh_id, mesh_id_len); | 361 | |
388 | #endif | 362 | bool housekeeping; |
389 | } | 363 | |
364 | u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; | ||
365 | size_t mesh_id_len; | ||
366 | /* Active Path Selection Protocol Identifier */ | ||
367 | u8 mesh_pp_id[4]; | ||
368 | /* Active Path Selection Metric Identifier */ | ||
369 | u8 mesh_pm_id[4]; | ||
370 | /* Congestion Control Mode Identifier */ | ||
371 | u8 mesh_cc_id[4]; | ||
372 | /* Local mesh Destination Sequence Number */ | ||
373 | u32 dsn; | ||
374 | /* Last used PREQ ID */ | ||
375 | u32 preq_id; | ||
376 | atomic_t mpaths; | ||
377 | /* Timestamp of last DSN update */ | ||
378 | unsigned long last_dsn_update; | ||
379 | /* Timestamp of last DSN sent */ | ||
380 | unsigned long last_preq; | ||
381 | struct mesh_rmc *rmc; | ||
382 | spinlock_t mesh_preq_queue_lock; | ||
383 | struct mesh_preq_queue preq_queue; | ||
384 | int preq_queue_len; | ||
385 | struct mesh_stats mshstats; | ||
386 | struct mesh_config mshcfg; | ||
387 | u32 mesh_seqnum; | ||
388 | bool accepting_plinks; | ||
389 | }; | ||
390 | 390 | ||
391 | #ifdef CONFIG_MAC80211_MESH | 391 | #ifdef CONFIG_MAC80211_MESH |
392 | #define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \ | 392 | #define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \ |
393 | do { (sta)->mshstats.name++; } while (0) | 393 | do { (msh)->mshstats.name++; } while (0) |
394 | #else | 394 | #else |
395 | #define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \ | 395 | #define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \ |
396 | do { } while (0) | 396 | do { } while (0) |
397 | #endif | 397 | #endif |
398 | 398 | ||
399 | /* flags used in struct ieee80211_sub_if_data.flags */ | 399 | /** |
400 | #define IEEE80211_SDATA_ALLMULTI BIT(0) | 400 | * enum ieee80211_sub_if_data_flags - virtual interface flags |
401 | #define IEEE80211_SDATA_PROMISC BIT(1) | 401 | * |
402 | #define IEEE80211_SDATA_USERSPACE_MLME BIT(2) | 402 | * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets |
403 | #define IEEE80211_SDATA_OPERATING_GMODE BIT(3) | 403 | * @IEEE80211_SDATA_PROMISC: interface is promisc |
404 | * @IEEE80211_SDATA_USERSPACE_MLME: userspace MLME is active | ||
405 | * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode | ||
406 | * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between | ||
407 | * associated stations and deliver multicast frames both | ||
408 | * back to wireless media and to the local net stack. | ||
409 | */ | ||
410 | enum ieee80211_sub_if_data_flags { | ||
411 | IEEE80211_SDATA_ALLMULTI = BIT(0), | ||
412 | IEEE80211_SDATA_PROMISC = BIT(1), | ||
413 | IEEE80211_SDATA_USERSPACE_MLME = BIT(2), | ||
414 | IEEE80211_SDATA_OPERATING_GMODE = BIT(3), | ||
415 | IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(4), | ||
416 | }; | ||
417 | |||
404 | struct ieee80211_sub_if_data { | 418 | struct ieee80211_sub_if_data { |
405 | struct list_head list; | 419 | struct list_head list; |
406 | 420 | ||
@@ -416,11 +430,6 @@ struct ieee80211_sub_if_data { | |||
416 | 430 | ||
417 | int drop_unencrypted; | 431 | int drop_unencrypted; |
418 | 432 | ||
419 | /* | ||
420 | * basic rates of this AP or the AP we're associated to | ||
421 | */ | ||
422 | u64 basic_rates; | ||
423 | |||
424 | /* Fragment table for host-based reassembly */ | 433 | /* Fragment table for host-based reassembly */ |
425 | struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; | 434 | struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; |
426 | unsigned int fragment_next; | 435 | unsigned int fragment_next; |
@@ -447,6 +456,9 @@ struct ieee80211_sub_if_data { | |||
447 | struct ieee80211_if_wds wds; | 456 | struct ieee80211_if_wds wds; |
448 | struct ieee80211_if_vlan vlan; | 457 | struct ieee80211_if_vlan vlan; |
449 | struct ieee80211_if_sta sta; | 458 | struct ieee80211_if_sta sta; |
459 | #ifdef CONFIG_MAC80211_MESH | ||
460 | struct ieee80211_if_mesh mesh; | ||
461 | #endif | ||
450 | u32 mntr_flags; | 462 | u32 mntr_flags; |
451 | } u; | 463 | } u; |
452 | 464 | ||
@@ -469,7 +481,6 @@ struct ieee80211_sub_if_data { | |||
469 | struct dentry *auth_alg; | 481 | struct dentry *auth_alg; |
470 | struct dentry *auth_transaction; | 482 | struct dentry *auth_transaction; |
471 | struct dentry *flags; | 483 | struct dentry *flags; |
472 | struct dentry *num_beacons_sta; | ||
473 | struct dentry *force_unicast_rateidx; | 484 | struct dentry *force_unicast_rateidx; |
474 | struct dentry *max_ratectrl_rateidx; | 485 | struct dentry *max_ratectrl_rateidx; |
475 | } sta; | 486 | } sta; |
@@ -477,7 +488,6 @@ struct ieee80211_sub_if_data { | |||
477 | struct dentry *drop_unencrypted; | 488 | struct dentry *drop_unencrypted; |
478 | struct dentry *num_sta_ps; | 489 | struct dentry *num_sta_ps; |
479 | struct dentry *dtim_count; | 490 | struct dentry *dtim_count; |
480 | struct dentry *num_beacons; | ||
481 | struct dentry *force_unicast_rateidx; | 491 | struct dentry *force_unicast_rateidx; |
482 | struct dentry *max_ratectrl_rateidx; | 492 | struct dentry *max_ratectrl_rateidx; |
483 | struct dentry *num_buffered_multicast; | 493 | struct dentry *num_buffered_multicast; |
@@ -540,6 +550,19 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) | |||
540 | return container_of(p, struct ieee80211_sub_if_data, vif); | 550 | return container_of(p, struct ieee80211_sub_if_data, vif); |
541 | } | 551 | } |
542 | 552 | ||
553 | static inline void | ||
554 | ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata, | ||
555 | u8 mesh_id_len, u8 *mesh_id) | ||
556 | { | ||
557 | #ifdef CONFIG_MAC80211_MESH | ||
558 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
559 | ifmsh->mesh_id_len = mesh_id_len; | ||
560 | memcpy(ifmsh->mesh_id, mesh_id, mesh_id_len); | ||
561 | #else | ||
562 | WARN_ON(1); | ||
563 | #endif | ||
564 | } | ||
565 | |||
543 | enum { | 566 | enum { |
544 | IEEE80211_RX_MSG = 1, | 567 | IEEE80211_RX_MSG = 1, |
545 | IEEE80211_TX_STATUS_MSG = 2, | 568 | IEEE80211_TX_STATUS_MSG = 2, |
@@ -550,6 +573,10 @@ enum { | |||
550 | /* maximum number of hardware queues we support. */ | 573 | /* maximum number of hardware queues we support. */ |
551 | #define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES) | 574 | #define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES) |
552 | 575 | ||
576 | struct ieee80211_master_priv { | ||
577 | struct ieee80211_local *local; | ||
578 | }; | ||
579 | |||
553 | struct ieee80211_local { | 580 | struct ieee80211_local { |
554 | /* embed the driver visible part. | 581 | /* embed the driver visible part. |
555 | * don't cast (use the static inlines below), but we keep | 582 | * don't cast (use the static inlines below), but we keep |
@@ -613,10 +640,6 @@ struct ieee80211_local { | |||
613 | struct crypto_blkcipher *wep_rx_tfm; | 640 | struct crypto_blkcipher *wep_rx_tfm; |
614 | u32 wep_iv; | 641 | u32 wep_iv; |
615 | 642 | ||
616 | int bridge_packets; /* bridge packets between associated stations and | ||
617 | * deliver multicast frames both back to wireless | ||
618 | * media and to the local net stack */ | ||
619 | |||
620 | struct list_head interfaces; | 643 | struct list_head interfaces; |
621 | 644 | ||
622 | /* | 645 | /* |
@@ -626,21 +649,21 @@ struct ieee80211_local { | |||
626 | spinlock_t key_lock; | 649 | spinlock_t key_lock; |
627 | 650 | ||
628 | 651 | ||
629 | bool sta_sw_scanning; | 652 | /* Scanning and BSS list */ |
630 | bool sta_hw_scanning; | 653 | bool sw_scanning, hw_scanning; |
631 | int scan_channel_idx; | 654 | int scan_channel_idx; |
632 | enum ieee80211_band scan_band; | 655 | enum ieee80211_band scan_band; |
633 | 656 | ||
634 | enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; | 657 | enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; |
635 | unsigned long last_scan_completed; | 658 | unsigned long last_scan_completed; |
636 | struct delayed_work scan_work; | 659 | struct delayed_work scan_work; |
637 | struct net_device *scan_dev; | 660 | struct ieee80211_sub_if_data *scan_sdata; |
638 | struct ieee80211_channel *oper_channel, *scan_channel; | 661 | struct ieee80211_channel *oper_channel, *scan_channel; |
639 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; | 662 | u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; |
640 | size_t scan_ssid_len; | 663 | size_t scan_ssid_len; |
641 | struct list_head sta_bss_list; | 664 | struct list_head bss_list; |
642 | struct ieee80211_sta_bss *sta_bss_hash[STA_HASH_SIZE]; | 665 | struct ieee80211_bss *bss_hash[STA_HASH_SIZE]; |
643 | spinlock_t sta_bss_lock; | 666 | spinlock_t bss_lock; |
644 | 667 | ||
645 | /* SNMP counters */ | 668 | /* SNMP counters */ |
646 | /* dot11CountersTable */ | 669 | /* dot11CountersTable */ |
@@ -701,10 +724,11 @@ struct ieee80211_local { | |||
701 | 724 | ||
702 | #ifdef CONFIG_MAC80211_DEBUGFS | 725 | #ifdef CONFIG_MAC80211_DEBUGFS |
703 | struct local_debugfsdentries { | 726 | struct local_debugfsdentries { |
727 | struct dentry *rcdir; | ||
728 | struct dentry *rcname; | ||
704 | struct dentry *frequency; | 729 | struct dentry *frequency; |
705 | struct dentry *antenna_sel_tx; | 730 | struct dentry *antenna_sel_tx; |
706 | struct dentry *antenna_sel_rx; | 731 | struct dentry *antenna_sel_rx; |
707 | struct dentry *bridge_packets; | ||
708 | struct dentry *rts_threshold; | 732 | struct dentry *rts_threshold; |
709 | struct dentry *fragmentation_threshold; | 733 | struct dentry *fragmentation_threshold; |
710 | struct dentry *short_retry_limit; | 734 | struct dentry *short_retry_limit; |
@@ -774,6 +798,9 @@ struct ieee80211_ra_tid { | |||
774 | 798 | ||
775 | /* Parsed Information Elements */ | 799 | /* Parsed Information Elements */ |
776 | struct ieee802_11_elems { | 800 | struct ieee802_11_elems { |
801 | u8 *ie_start; | ||
802 | size_t total_len; | ||
803 | |||
777 | /* pointers to IEs */ | 804 | /* pointers to IEs */ |
778 | u8 *ssid; | 805 | u8 *ssid; |
779 | u8 *supp_rates; | 806 | u8 *supp_rates; |
@@ -857,86 +884,82 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) | |||
857 | } | 884 | } |
858 | 885 | ||
859 | 886 | ||
860 | /* ieee80211.c */ | ||
861 | int ieee80211_hw_config(struct ieee80211_local *local); | 887 | int ieee80211_hw_config(struct ieee80211_local *local); |
862 | int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed); | 888 | int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed); |
863 | void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); | 889 | void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); |
864 | u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, | 890 | u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, |
865 | struct ieee80211_ht_info *req_ht_cap, | 891 | struct ieee80211_ht_info *req_ht_cap, |
866 | struct ieee80211_ht_bss_info *req_bss_cap); | 892 | struct ieee80211_ht_bss_info *req_bss_cap); |
893 | void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | ||
894 | u32 changed); | ||
895 | void ieee80211_configure_filter(struct ieee80211_local *local); | ||
867 | 896 | ||
868 | /* ieee80211_ioctl.c */ | 897 | /* wireless extensions */ |
869 | extern const struct iw_handler_def ieee80211_iw_handler_def; | 898 | extern const struct iw_handler_def ieee80211_iw_handler_def; |
870 | int ieee80211_set_freq(struct net_device *dev, int freq); | ||
871 | 899 | ||
872 | /* ieee80211_sta.c */ | 900 | /* STA/IBSS code */ |
873 | void ieee80211_sta_timer(unsigned long data); | 901 | void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); |
874 | void ieee80211_sta_work(struct work_struct *work); | 902 | void ieee80211_scan_work(struct work_struct *work); |
875 | void ieee80211_sta_scan_work(struct work_struct *work); | 903 | void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, |
876 | void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | ||
877 | struct ieee80211_rx_status *rx_status); | 904 | struct ieee80211_rx_status *rx_status); |
878 | int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len); | 905 | int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len); |
879 | int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len); | 906 | int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len); |
880 | int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid); | 907 | int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid); |
881 | int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len); | 908 | void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata, |
882 | void ieee80211_sta_req_auth(struct net_device *dev, | ||
883 | struct ieee80211_if_sta *ifsta); | 909 | struct ieee80211_if_sta *ifsta); |
884 | int ieee80211_sta_scan_results(struct net_device *dev, | 910 | struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, |
885 | struct iw_request_info *info, | ||
886 | char *buf, size_t len); | ||
887 | ieee80211_rx_result ieee80211_sta_rx_scan( | ||
888 | struct net_device *dev, struct sk_buff *skb, | ||
889 | struct ieee80211_rx_status *rx_status); | ||
890 | void ieee80211_rx_bss_list_init(struct ieee80211_local *local); | ||
891 | void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local); | ||
892 | int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len); | ||
893 | struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, | ||
894 | struct sk_buff *skb, u8 *bssid, | 911 | struct sk_buff *skb, u8 *bssid, |
895 | u8 *addr, u64 supp_rates); | 912 | u8 *addr, u64 supp_rates); |
896 | int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); | 913 | int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason); |
897 | int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); | 914 | int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason); |
898 | void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | 915 | u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); |
899 | u32 changed); | ||
900 | u32 ieee80211_reset_erp_info(struct net_device *dev); | ||
901 | int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, | ||
902 | struct ieee80211_ht_info *ht_info); | ||
903 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( | ||
904 | struct ieee80211_ht_addt_info *ht_add_info_ie, | ||
905 | struct ieee80211_ht_bss_info *bss_info); | ||
906 | void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, | ||
907 | u16 tid, u8 dialog_token, u16 start_seq_num, | ||
908 | u16 agg_size, u16 timeout); | ||
909 | void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, | ||
910 | u16 initiator, u16 reason_code); | ||
911 | void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn); | ||
912 | |||
913 | void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da, | ||
914 | u16 tid, u16 initiator, u16 reason); | ||
915 | void sta_addba_resp_timer_expired(unsigned long data); | ||
916 | void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr); | ||
917 | u64 ieee80211_sta_get_rates(struct ieee80211_local *local, | 916 | u64 ieee80211_sta_get_rates(struct ieee80211_local *local, |
918 | struct ieee802_11_elems *elems, | 917 | struct ieee802_11_elems *elems, |
919 | enum ieee80211_band band); | 918 | enum ieee80211_band band); |
920 | void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, | 919 | void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, |
921 | int encrypt); | 920 | u8 *ssid, size_t ssid_len); |
922 | void ieee802_11_parse_elems(u8 *start, size_t len, | 921 | |
923 | struct ieee802_11_elems *elems); | 922 | /* scan/BSS handling */ |
924 | 923 | int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, | |
925 | #ifdef CONFIG_MAC80211_MESH | 924 | u8 *ssid, size_t ssid_len); |
926 | void ieee80211_start_mesh(struct net_device *dev); | 925 | int ieee80211_scan_results(struct ieee80211_local *local, |
927 | #else | 926 | struct iw_request_info *info, |
928 | static inline void ieee80211_start_mesh(struct net_device *dev) | 927 | char *buf, size_t len); |
929 | {} | 928 | ieee80211_rx_result |
930 | #endif | 929 | ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, |
930 | struct sk_buff *skb, | ||
931 | struct ieee80211_rx_status *rx_status); | ||
932 | void ieee80211_rx_bss_list_init(struct ieee80211_local *local); | ||
933 | void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local); | ||
934 | int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, | ||
935 | char *ie, size_t len); | ||
936 | |||
937 | void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); | ||
938 | int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata, | ||
939 | u8 *ssid, size_t ssid_len); | ||
940 | struct ieee80211_bss * | ||
941 | ieee80211_bss_info_update(struct ieee80211_local *local, | ||
942 | struct ieee80211_rx_status *rx_status, | ||
943 | struct ieee80211_mgmt *mgmt, | ||
944 | size_t len, | ||
945 | struct ieee802_11_elems *elems, | ||
946 | int freq, bool beacon); | ||
947 | struct ieee80211_bss * | ||
948 | ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq, | ||
949 | u8 *ssid, u8 ssid_len); | ||
950 | struct ieee80211_bss * | ||
951 | ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, | ||
952 | u8 *ssid, u8 ssid_len); | ||
953 | void ieee80211_rx_bss_put(struct ieee80211_local *local, | ||
954 | struct ieee80211_bss *bss); | ||
931 | 955 | ||
932 | /* interface handling */ | 956 | /* interface handling */ |
933 | void ieee80211_if_setup(struct net_device *dev); | ||
934 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, | 957 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
935 | struct net_device **new_dev, enum ieee80211_if_types type, | 958 | struct net_device **new_dev, enum nl80211_iftype type, |
936 | struct vif_params *params); | 959 | struct vif_params *params); |
937 | int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | 960 | int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, |
938 | enum ieee80211_if_types type); | 961 | enum nl80211_iftype type); |
939 | void ieee80211_if_remove(struct net_device *dev); | 962 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); |
940 | void ieee80211_remove_interfaces(struct ieee80211_local *local); | 963 | void ieee80211_remove_interfaces(struct ieee80211_local *local); |
941 | 964 | ||
942 | /* tx handling */ | 965 | /* tx handling */ |
@@ -946,16 +969,52 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev); | |||
946 | int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); | 969 | int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); |
947 | int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); | 970 | int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); |
948 | 971 | ||
972 | /* HT */ | ||
973 | int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, | ||
974 | struct ieee80211_ht_info *ht_info); | ||
975 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( | ||
976 | struct ieee80211_ht_addt_info *ht_add_info_ie, | ||
977 | struct ieee80211_ht_bss_info *bss_info); | ||
978 | void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn); | ||
979 | |||
980 | void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da, | ||
981 | u16 tid, u16 initiator, u16 reason); | ||
982 | void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr); | ||
983 | void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, | ||
984 | struct sta_info *sta, | ||
985 | struct ieee80211_mgmt *mgmt, size_t len); | ||
986 | void ieee80211_process_addba_resp(struct ieee80211_local *local, | ||
987 | struct sta_info *sta, | ||
988 | struct ieee80211_mgmt *mgmt, | ||
989 | size_t len); | ||
990 | void ieee80211_process_addba_request(struct ieee80211_local *local, | ||
991 | struct sta_info *sta, | ||
992 | struct ieee80211_mgmt *mgmt, | ||
993 | size_t len); | ||
994 | |||
995 | /* Spectrum management */ | ||
996 | void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | ||
997 | struct ieee80211_mgmt *mgmt, | ||
998 | size_t len); | ||
999 | |||
949 | /* utility functions/constants */ | 1000 | /* utility functions/constants */ |
950 | extern void *mac80211_wiphy_privid; /* for wiphy privid */ | 1001 | extern void *mac80211_wiphy_privid; /* for wiphy privid */ |
951 | extern const unsigned char rfc1042_header[6]; | 1002 | extern const unsigned char rfc1042_header[6]; |
952 | extern const unsigned char bridge_tunnel_header[6]; | 1003 | extern const unsigned char bridge_tunnel_header[6]; |
953 | u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, | 1004 | u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, |
954 | enum ieee80211_if_types type); | 1005 | enum nl80211_iftype type); |
955 | int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, | 1006 | int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, |
956 | int rate, int erp, int short_preamble); | 1007 | int rate, int erp, int short_preamble); |
957 | void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, | 1008 | void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, |
958 | struct ieee80211_hdr *hdr); | 1009 | struct ieee80211_hdr *hdr); |
1010 | void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); | ||
1011 | void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
1012 | int encrypt); | ||
1013 | void ieee802_11_parse_elems(u8 *start, size_t len, | ||
1014 | struct ieee802_11_elems *elems); | ||
1015 | int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq); | ||
1016 | u64 ieee80211_mandatory_rates(struct ieee80211_local *local, | ||
1017 | enum ieee80211_band band); | ||
959 | 1018 | ||
960 | #ifdef CONFIG_MAC80211_NOINLINE | 1019 | #ifdef CONFIG_MAC80211_NOINLINE |
961 | #define debug_noinline noinline | 1020 | #define debug_noinline noinline |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 610ed1d9893a..8336fee68d3e 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Interface handling (except master interface) | ||
3 | * | ||
2 | * Copyright 2002-2005, Instant802 Networks, Inc. | 4 | * Copyright 2002-2005, Instant802 Networks, Inc. |
3 | * Copyright 2005-2006, Devicescape Software, Inc. | 5 | * Copyright 2005-2006, Devicescape Software, Inc. |
4 | * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> | 6 | * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> |
@@ -17,7 +19,539 @@ | |||
17 | #include "sta_info.h" | 19 | #include "sta_info.h" |
18 | #include "debugfs_netdev.h" | 20 | #include "debugfs_netdev.h" |
19 | #include "mesh.h" | 21 | #include "mesh.h" |
22 | #include "led.h" | ||
23 | |||
24 | static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) | ||
25 | { | ||
26 | int meshhdrlen; | ||
27 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
28 | |||
29 | meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0; | ||
30 | |||
31 | /* FIX: what would be proper limits for MTU? | ||
32 | * This interface uses 802.3 frames. */ | ||
33 | if (new_mtu < 256 || | ||
34 | new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { | ||
35 | return -EINVAL; | ||
36 | } | ||
37 | |||
38 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
39 | printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu); | ||
40 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
41 | dev->mtu = new_mtu; | ||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | static inline int identical_mac_addr_allowed(int type1, int type2) | ||
46 | { | ||
47 | return type1 == NL80211_IFTYPE_MONITOR || | ||
48 | type2 == NL80211_IFTYPE_MONITOR || | ||
49 | (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) || | ||
50 | (type1 == NL80211_IFTYPE_WDS && | ||
51 | (type2 == NL80211_IFTYPE_WDS || | ||
52 | type2 == NL80211_IFTYPE_AP)) || | ||
53 | (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) || | ||
54 | (type1 == NL80211_IFTYPE_AP_VLAN && | ||
55 | (type2 == NL80211_IFTYPE_AP || | ||
56 | type2 == NL80211_IFTYPE_AP_VLAN)); | ||
57 | } | ||
58 | |||
59 | static int ieee80211_open(struct net_device *dev) | ||
60 | { | ||
61 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
62 | struct ieee80211_sub_if_data *nsdata; | ||
63 | struct ieee80211_local *local = sdata->local; | ||
64 | struct sta_info *sta; | ||
65 | struct ieee80211_if_init_conf conf; | ||
66 | u32 changed = 0; | ||
67 | int res; | ||
68 | bool need_hw_reconfig = 0; | ||
69 | u8 null_addr[ETH_ALEN] = {0}; | ||
70 | |||
71 | /* fail early if user set an invalid address */ | ||
72 | if (compare_ether_addr(dev->dev_addr, null_addr) && | ||
73 | !is_valid_ether_addr(dev->dev_addr)) | ||
74 | return -EADDRNOTAVAIL; | ||
75 | |||
76 | /* we hold the RTNL here so can safely walk the list */ | ||
77 | list_for_each_entry(nsdata, &local->interfaces, list) { | ||
78 | struct net_device *ndev = nsdata->dev; | ||
79 | |||
80 | if (ndev != dev && netif_running(ndev)) { | ||
81 | /* | ||
82 | * Allow only a single IBSS interface to be up at any | ||
83 | * time. This is restricted because beacon distribution | ||
84 | * cannot work properly if both are in the same IBSS. | ||
85 | * | ||
86 | * To remove this restriction we'd have to disallow them | ||
87 | * from setting the same SSID on different IBSS interfaces | ||
88 | * belonging to the same hardware. Then, however, we're | ||
89 | * faced with having to adopt two different TSF timers... | ||
90 | */ | ||
91 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC && | ||
92 | nsdata->vif.type == NL80211_IFTYPE_ADHOC) | ||
93 | return -EBUSY; | ||
94 | |||
95 | /* | ||
96 | * The remaining checks are only performed for interfaces | ||
97 | * with the same MAC address. | ||
98 | */ | ||
99 | if (compare_ether_addr(dev->dev_addr, ndev->dev_addr)) | ||
100 | continue; | ||
101 | |||
102 | /* | ||
103 | * check whether it may have the same address | ||
104 | */ | ||
105 | if (!identical_mac_addr_allowed(sdata->vif.type, | ||
106 | nsdata->vif.type)) | ||
107 | return -ENOTUNIQ; | ||
108 | |||
109 | /* | ||
110 | * can only add VLANs to enabled APs | ||
111 | */ | ||
112 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && | ||
113 | nsdata->vif.type == NL80211_IFTYPE_AP) | ||
114 | sdata->bss = &nsdata->u.ap; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | switch (sdata->vif.type) { | ||
119 | case NL80211_IFTYPE_WDS: | ||
120 | if (!is_valid_ether_addr(sdata->u.wds.remote_addr)) | ||
121 | return -ENOLINK; | ||
122 | break; | ||
123 | case NL80211_IFTYPE_AP_VLAN: | ||
124 | if (!sdata->bss) | ||
125 | return -ENOLINK; | ||
126 | list_add(&sdata->u.vlan.list, &sdata->bss->vlans); | ||
127 | break; | ||
128 | case NL80211_IFTYPE_AP: | ||
129 | sdata->bss = &sdata->u.ap; | ||
130 | break; | ||
131 | case NL80211_IFTYPE_MESH_POINT: | ||
132 | if (!ieee80211_vif_is_mesh(&sdata->vif)) | ||
133 | break; | ||
134 | /* mesh ifaces must set allmulti to forward mcast traffic */ | ||
135 | atomic_inc(&local->iff_allmultis); | ||
136 | break; | ||
137 | case NL80211_IFTYPE_STATION: | ||
138 | case NL80211_IFTYPE_MONITOR: | ||
139 | case NL80211_IFTYPE_ADHOC: | ||
140 | /* no special treatment */ | ||
141 | break; | ||
142 | case NL80211_IFTYPE_UNSPECIFIED: | ||
143 | case __NL80211_IFTYPE_AFTER_LAST: | ||
144 | /* cannot happen */ | ||
145 | WARN_ON(1); | ||
146 | break; | ||
147 | } | ||
148 | |||
149 | if (local->open_count == 0) { | ||
150 | res = 0; | ||
151 | if (local->ops->start) | ||
152 | res = local->ops->start(local_to_hw(local)); | ||
153 | if (res) | ||
154 | goto err_del_bss; | ||
155 | need_hw_reconfig = 1; | ||
156 | ieee80211_led_radio(local, local->hw.conf.radio_enabled); | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Check all interfaces and copy the hopefully now-present | ||
161 | * MAC address to those that have the special null one. | ||
162 | */ | ||
163 | list_for_each_entry(nsdata, &local->interfaces, list) { | ||
164 | struct net_device *ndev = nsdata->dev; | ||
165 | |||
166 | /* | ||
167 | * No need to check netif_running since we do not allow | ||
168 | * it to start up with this invalid address. | ||
169 | */ | ||
170 | if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) | ||
171 | memcpy(ndev->dev_addr, | ||
172 | local->hw.wiphy->perm_addr, | ||
173 | ETH_ALEN); | ||
174 | } | ||
175 | |||
176 | if (compare_ether_addr(null_addr, local->mdev->dev_addr) == 0) | ||
177 | memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, | ||
178 | ETH_ALEN); | ||
179 | |||
180 | /* | ||
181 | * Validate the MAC address for this device. | ||
182 | */ | ||
183 | if (!is_valid_ether_addr(dev->dev_addr)) { | ||
184 | if (!local->open_count && local->ops->stop) | ||
185 | local->ops->stop(local_to_hw(local)); | ||
186 | return -EADDRNOTAVAIL; | ||
187 | } | ||
188 | |||
189 | switch (sdata->vif.type) { | ||
190 | case NL80211_IFTYPE_AP_VLAN: | ||
191 | /* no need to tell driver */ | ||
192 | break; | ||
193 | case NL80211_IFTYPE_MONITOR: | ||
194 | if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { | ||
195 | local->cooked_mntrs++; | ||
196 | break; | ||
197 | } | ||
198 | |||
199 | /* must be before the call to ieee80211_configure_filter */ | ||
200 | local->monitors++; | ||
201 | if (local->monitors == 1) | ||
202 | local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; | ||
203 | |||
204 | if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) | ||
205 | local->fif_fcsfail++; | ||
206 | if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) | ||
207 | local->fif_plcpfail++; | ||
208 | if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) | ||
209 | local->fif_control++; | ||
210 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) | ||
211 | local->fif_other_bss++; | ||
212 | |||
213 | netif_addr_lock_bh(local->mdev); | ||
214 | ieee80211_configure_filter(local); | ||
215 | netif_addr_unlock_bh(local->mdev); | ||
216 | break; | ||
217 | case NL80211_IFTYPE_STATION: | ||
218 | case NL80211_IFTYPE_ADHOC: | ||
219 | sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET; | ||
220 | /* fall through */ | ||
221 | default: | ||
222 | conf.vif = &sdata->vif; | ||
223 | conf.type = sdata->vif.type; | ||
224 | conf.mac_addr = dev->dev_addr; | ||
225 | res = local->ops->add_interface(local_to_hw(local), &conf); | ||
226 | if (res) | ||
227 | goto err_stop; | ||
228 | |||
229 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
230 | ieee80211_start_mesh(sdata); | ||
231 | changed |= ieee80211_reset_erp_info(sdata); | ||
232 | ieee80211_bss_info_change_notify(sdata, changed); | ||
233 | ieee80211_enable_keys(sdata); | ||
234 | |||
235 | if (sdata->vif.type == NL80211_IFTYPE_STATION && | ||
236 | !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) | ||
237 | netif_carrier_off(dev); | ||
238 | else | ||
239 | netif_carrier_on(dev); | ||
240 | } | ||
241 | |||
242 | if (sdata->vif.type == NL80211_IFTYPE_WDS) { | ||
243 | /* Create STA entry for the WDS peer */ | ||
244 | sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr, | ||
245 | GFP_KERNEL); | ||
246 | if (!sta) { | ||
247 | res = -ENOMEM; | ||
248 | goto err_del_interface; | ||
249 | } | ||
250 | |||
251 | /* no locking required since STA is not live yet */ | ||
252 | sta->flags |= WLAN_STA_AUTHORIZED; | ||
253 | |||
254 | res = sta_info_insert(sta); | ||
255 | if (res) { | ||
256 | /* STA has been freed */ | ||
257 | goto err_del_interface; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | if (local->open_count == 0) { | ||
262 | res = dev_open(local->mdev); | ||
263 | WARN_ON(res); | ||
264 | if (res) | ||
265 | goto err_del_interface; | ||
266 | tasklet_enable(&local->tx_pending_tasklet); | ||
267 | tasklet_enable(&local->tasklet); | ||
268 | } | ||
269 | |||
270 | /* | ||
271 | * set_multicast_list will be invoked by the networking core | ||
272 | * which will check whether any increments here were done in | ||
273 | * error and sync them down to the hardware as filter flags. | ||
274 | */ | ||
275 | if (sdata->flags & IEEE80211_SDATA_ALLMULTI) | ||
276 | atomic_inc(&local->iff_allmultis); | ||
277 | |||
278 | if (sdata->flags & IEEE80211_SDATA_PROMISC) | ||
279 | atomic_inc(&local->iff_promiscs); | ||
280 | |||
281 | local->open_count++; | ||
282 | if (need_hw_reconfig) { | ||
283 | ieee80211_hw_config(local); | ||
284 | /* | ||
285 | * set default queue parameters so drivers don't | ||
286 | * need to initialise the hardware if the hardware | ||
287 | * doesn't start up with sane defaults | ||
288 | */ | ||
289 | ieee80211_set_wmm_default(sdata); | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * ieee80211_sta_work is disabled while network interface | ||
294 | * is down. Therefore, some configuration changes may not | ||
295 | * yet be effective. Trigger execution of ieee80211_sta_work | ||
296 | * to fix this. | ||
297 | */ | ||
298 | if (sdata->vif.type == NL80211_IFTYPE_STATION || | ||
299 | sdata->vif.type == NL80211_IFTYPE_ADHOC) { | ||
300 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
301 | queue_work(local->hw.workqueue, &ifsta->work); | ||
302 | } | ||
303 | |||
304 | netif_tx_start_all_queues(dev); | ||
305 | |||
306 | return 0; | ||
307 | err_del_interface: | ||
308 | local->ops->remove_interface(local_to_hw(local), &conf); | ||
309 | err_stop: | ||
310 | if (!local->open_count && local->ops->stop) | ||
311 | local->ops->stop(local_to_hw(local)); | ||
312 | err_del_bss: | ||
313 | sdata->bss = NULL; | ||
314 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
315 | list_del(&sdata->u.vlan.list); | ||
316 | return res; | ||
317 | } | ||
318 | |||
319 | static int ieee80211_stop(struct net_device *dev) | ||
320 | { | ||
321 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
322 | struct ieee80211_local *local = sdata->local; | ||
323 | struct ieee80211_if_init_conf conf; | ||
324 | struct sta_info *sta; | ||
325 | |||
326 | /* | ||
327 | * Stop TX on this interface first. | ||
328 | */ | ||
329 | netif_tx_stop_all_queues(dev); | ||
330 | |||
331 | /* | ||
332 | * Now delete all active aggregation sessions. | ||
333 | */ | ||
334 | rcu_read_lock(); | ||
335 | |||
336 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | ||
337 | if (sta->sdata == sdata) | ||
338 | ieee80211_sta_tear_down_BA_sessions(sdata, | ||
339 | sta->sta.addr); | ||
340 | } | ||
341 | |||
342 | rcu_read_unlock(); | ||
343 | |||
344 | /* | ||
345 | * Remove all stations associated with this interface. | ||
346 | * | ||
347 | * This must be done before calling ops->remove_interface() | ||
348 | * because otherwise we can later invoke ops->sta_notify() | ||
349 | * whenever the STAs are removed, and that invalidates driver | ||
350 | * assumptions about always getting a vif pointer that is valid | ||
351 | * (because if we remove a STA after ops->remove_interface() | ||
352 | * the driver will have removed the vif info already!) | ||
353 | * | ||
354 | * We could relax this and only unlink the stations from the | ||
355 | * hash table and list but keep them on a per-sdata list that | ||
356 | * will be inserted back again when the interface is brought | ||
357 | * up again, but I don't currently see a use case for that, | ||
358 | * except with WDS which gets a STA entry created when it is | ||
359 | * brought up. | ||
360 | */ | ||
361 | sta_info_flush(local, sdata); | ||
362 | |||
363 | /* | ||
364 | * Don't count this interface for promisc/allmulti while it | ||
365 | * is down. dev_mc_unsync() will invoke set_multicast_list | ||
366 | * on the master interface which will sync these down to the | ||
367 | * hardware as filter flags. | ||
368 | */ | ||
369 | if (sdata->flags & IEEE80211_SDATA_ALLMULTI) | ||
370 | atomic_dec(&local->iff_allmultis); | ||
371 | |||
372 | if (sdata->flags & IEEE80211_SDATA_PROMISC) | ||
373 | atomic_dec(&local->iff_promiscs); | ||
374 | |||
375 | dev_mc_unsync(local->mdev, dev); | ||
376 | |||
377 | /* APs need special treatment */ | ||
378 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | ||
379 | struct ieee80211_sub_if_data *vlan, *tmp; | ||
380 | struct beacon_data *old_beacon = sdata->u.ap.beacon; | ||
381 | |||
382 | /* remove beacon */ | ||
383 | rcu_assign_pointer(sdata->u.ap.beacon, NULL); | ||
384 | synchronize_rcu(); | ||
385 | kfree(old_beacon); | ||
386 | |||
387 | /* down all dependent devices, that is VLANs */ | ||
388 | list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans, | ||
389 | u.vlan.list) | ||
390 | dev_close(vlan->dev); | ||
391 | WARN_ON(!list_empty(&sdata->u.ap.vlans)); | ||
392 | } | ||
393 | |||
394 | local->open_count--; | ||
395 | |||
396 | switch (sdata->vif.type) { | ||
397 | case NL80211_IFTYPE_AP_VLAN: | ||
398 | list_del(&sdata->u.vlan.list); | ||
399 | /* no need to tell driver */ | ||
400 | break; | ||
401 | case NL80211_IFTYPE_MONITOR: | ||
402 | if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { | ||
403 | local->cooked_mntrs--; | ||
404 | break; | ||
405 | } | ||
406 | |||
407 | local->monitors--; | ||
408 | if (local->monitors == 0) | ||
409 | local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; | ||
410 | |||
411 | if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) | ||
412 | local->fif_fcsfail--; | ||
413 | if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) | ||
414 | local->fif_plcpfail--; | ||
415 | if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) | ||
416 | local->fif_control--; | ||
417 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) | ||
418 | local->fif_other_bss--; | ||
419 | |||
420 | netif_addr_lock_bh(local->mdev); | ||
421 | ieee80211_configure_filter(local); | ||
422 | netif_addr_unlock_bh(local->mdev); | ||
423 | break; | ||
424 | case NL80211_IFTYPE_STATION: | ||
425 | case NL80211_IFTYPE_ADHOC: | ||
426 | sdata->u.sta.state = IEEE80211_STA_MLME_DISABLED; | ||
427 | memset(sdata->u.sta.bssid, 0, ETH_ALEN); | ||
428 | del_timer_sync(&sdata->u.sta.timer); | ||
429 | /* | ||
430 | * If the timer fired while we waited for it, it will have | ||
431 | * requeued the work. Now the work will be running again | ||
432 | * but will not rearm the timer again because it checks | ||
433 | * whether the interface is running, which, at this point, | ||
434 | * it no longer is. | ||
435 | */ | ||
436 | cancel_work_sync(&sdata->u.sta.work); | ||
437 | /* | ||
438 | * When we get here, the interface is marked down. | ||
439 | * Call synchronize_rcu() to wait for the RX path | ||
440 | * should it be using the interface and enqueuing | ||
441 | * frames at this very time on another CPU. | ||
442 | */ | ||
443 | synchronize_rcu(); | ||
444 | skb_queue_purge(&sdata->u.sta.skb_queue); | ||
445 | |||
446 | sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED; | ||
447 | kfree(sdata->u.sta.extra_ie); | ||
448 | sdata->u.sta.extra_ie = NULL; | ||
449 | sdata->u.sta.extra_ie_len = 0; | ||
450 | /* fall through */ | ||
451 | case NL80211_IFTYPE_MESH_POINT: | ||
452 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | ||
453 | /* allmulti is always set on mesh ifaces */ | ||
454 | atomic_dec(&local->iff_allmultis); | ||
455 | ieee80211_stop_mesh(sdata); | ||
456 | } | ||
457 | /* fall through */ | ||
458 | default: | ||
459 | if (local->scan_sdata == sdata) { | ||
460 | if (!local->ops->hw_scan) | ||
461 | cancel_delayed_work_sync(&local->scan_work); | ||
462 | /* | ||
463 | * The software scan can no longer run now, so we can | ||
464 | * clear out the scan_sdata reference. However, the | ||
465 | * hardware scan may still be running. The complete | ||
466 | * function must be prepared to handle a NULL value. | ||
467 | */ | ||
468 | local->scan_sdata = NULL; | ||
469 | /* | ||
470 | * The memory barrier guarantees that another CPU | ||
471 | * that is hardware-scanning will now see the fact | ||
472 | * that this interface is gone. | ||
473 | */ | ||
474 | smp_mb(); | ||
475 | /* | ||
476 | * If software scanning, complete the scan but since | ||
477 | * the scan_sdata is NULL already don't send out a | ||
478 | * scan event to userspace -- the scan is incomplete. | ||
479 | */ | ||
480 | if (local->sw_scanning) | ||
481 | ieee80211_scan_completed(&local->hw); | ||
482 | } | ||
483 | |||
484 | conf.vif = &sdata->vif; | ||
485 | conf.type = sdata->vif.type; | ||
486 | conf.mac_addr = dev->dev_addr; | ||
487 | /* disable all keys for as long as this netdev is down */ | ||
488 | ieee80211_disable_keys(sdata); | ||
489 | local->ops->remove_interface(local_to_hw(local), &conf); | ||
490 | } | ||
491 | |||
492 | sdata->bss = NULL; | ||
493 | |||
494 | if (local->open_count == 0) { | ||
495 | if (netif_running(local->mdev)) | ||
496 | dev_close(local->mdev); | ||
497 | |||
498 | if (local->ops->stop) | ||
499 | local->ops->stop(local_to_hw(local)); | ||
500 | |||
501 | ieee80211_led_radio(local, 0); | ||
502 | |||
503 | flush_workqueue(local->hw.workqueue); | ||
504 | |||
505 | tasklet_disable(&local->tx_pending_tasklet); | ||
506 | tasklet_disable(&local->tasklet); | ||
507 | } | ||
508 | |||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static void ieee80211_set_multicast_list(struct net_device *dev) | ||
513 | { | ||
514 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
515 | struct ieee80211_local *local = sdata->local; | ||
516 | int allmulti, promisc, sdata_allmulti, sdata_promisc; | ||
517 | |||
518 | allmulti = !!(dev->flags & IFF_ALLMULTI); | ||
519 | promisc = !!(dev->flags & IFF_PROMISC); | ||
520 | sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI); | ||
521 | sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC); | ||
522 | |||
523 | if (allmulti != sdata_allmulti) { | ||
524 | if (dev->flags & IFF_ALLMULTI) | ||
525 | atomic_inc(&local->iff_allmultis); | ||
526 | else | ||
527 | atomic_dec(&local->iff_allmultis); | ||
528 | sdata->flags ^= IEEE80211_SDATA_ALLMULTI; | ||
529 | } | ||
530 | |||
531 | if (promisc != sdata_promisc) { | ||
532 | if (dev->flags & IFF_PROMISC) | ||
533 | atomic_inc(&local->iff_promiscs); | ||
534 | else | ||
535 | atomic_dec(&local->iff_promiscs); | ||
536 | sdata->flags ^= IEEE80211_SDATA_PROMISC; | ||
537 | } | ||
538 | |||
539 | dev_mc_sync(local->mdev, dev); | ||
540 | } | ||
20 | 541 | ||
542 | static void ieee80211_if_setup(struct net_device *dev) | ||
543 | { | ||
544 | ether_setup(dev); | ||
545 | dev->hard_start_xmit = ieee80211_subif_start_xmit; | ||
546 | dev->wireless_handlers = &ieee80211_iw_handler_def; | ||
547 | dev->set_multicast_list = ieee80211_set_multicast_list; | ||
548 | dev->change_mtu = ieee80211_change_mtu; | ||
549 | dev->open = ieee80211_open; | ||
550 | dev->stop = ieee80211_stop; | ||
551 | dev->destructor = free_netdev; | ||
552 | /* we will validate the address ourselves in ->open */ | ||
553 | dev->validate_addr = NULL; | ||
554 | } | ||
21 | /* | 555 | /* |
22 | * Called when the netdev is removed or, by the code below, before | 556 | * Called when the netdev is removed or, by the code below, before |
23 | * the interface type changes. | 557 | * the interface type changes. |
@@ -31,17 +565,17 @@ static void ieee80211_teardown_sdata(struct net_device *dev) | |||
31 | int flushed; | 565 | int flushed; |
32 | int i; | 566 | int i; |
33 | 567 | ||
34 | ieee80211_debugfs_remove_netdev(sdata); | ||
35 | |||
36 | /* free extra data */ | 568 | /* free extra data */ |
37 | ieee80211_free_keys(sdata); | 569 | ieee80211_free_keys(sdata); |
38 | 570 | ||
571 | ieee80211_debugfs_remove_netdev(sdata); | ||
572 | |||
39 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) | 573 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) |
40 | __skb_queue_purge(&sdata->fragments[i].skb_list); | 574 | __skb_queue_purge(&sdata->fragments[i].skb_list); |
41 | sdata->fragment_next = 0; | 575 | sdata->fragment_next = 0; |
42 | 576 | ||
43 | switch (sdata->vif.type) { | 577 | switch (sdata->vif.type) { |
44 | case IEEE80211_IF_TYPE_AP: | 578 | case NL80211_IFTYPE_AP: |
45 | beacon = sdata->u.ap.beacon; | 579 | beacon = sdata->u.ap.beacon; |
46 | rcu_assign_pointer(sdata->u.ap.beacon, NULL); | 580 | rcu_assign_pointer(sdata->u.ap.beacon, NULL); |
47 | synchronize_rcu(); | 581 | synchronize_rcu(); |
@@ -53,23 +587,23 @@ static void ieee80211_teardown_sdata(struct net_device *dev) | |||
53 | } | 587 | } |
54 | 588 | ||
55 | break; | 589 | break; |
56 | case IEEE80211_IF_TYPE_MESH_POINT: | 590 | case NL80211_IFTYPE_MESH_POINT: |
57 | /* Allow compiler to elide mesh_rmc_free call. */ | ||
58 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 591 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
59 | mesh_rmc_free(dev); | 592 | mesh_rmc_free(sdata); |
60 | /* fall through */ | 593 | break; |
61 | case IEEE80211_IF_TYPE_STA: | 594 | case NL80211_IFTYPE_STATION: |
62 | case IEEE80211_IF_TYPE_IBSS: | 595 | case NL80211_IFTYPE_ADHOC: |
63 | kfree(sdata->u.sta.extra_ie); | 596 | kfree(sdata->u.sta.extra_ie); |
64 | kfree(sdata->u.sta.assocreq_ies); | 597 | kfree(sdata->u.sta.assocreq_ies); |
65 | kfree(sdata->u.sta.assocresp_ies); | 598 | kfree(sdata->u.sta.assocresp_ies); |
66 | kfree_skb(sdata->u.sta.probe_resp); | 599 | kfree_skb(sdata->u.sta.probe_resp); |
67 | break; | 600 | break; |
68 | case IEEE80211_IF_TYPE_WDS: | 601 | case NL80211_IFTYPE_WDS: |
69 | case IEEE80211_IF_TYPE_VLAN: | 602 | case NL80211_IFTYPE_AP_VLAN: |
70 | case IEEE80211_IF_TYPE_MNTR: | 603 | case NL80211_IFTYPE_MONITOR: |
71 | break; | 604 | break; |
72 | case IEEE80211_IF_TYPE_INVALID: | 605 | case NL80211_IFTYPE_UNSPECIFIED: |
606 | case __NL80211_IFTYPE_AFTER_LAST: | ||
73 | BUG(); | 607 | BUG(); |
74 | break; | 608 | break; |
75 | } | 609 | } |
@@ -82,55 +616,43 @@ static void ieee80211_teardown_sdata(struct net_device *dev) | |||
82 | * Helper function to initialise an interface to a specific type. | 616 | * Helper function to initialise an interface to a specific type. |
83 | */ | 617 | */ |
84 | static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, | 618 | static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, |
85 | enum ieee80211_if_types type) | 619 | enum nl80211_iftype type) |
86 | { | 620 | { |
87 | struct ieee80211_if_sta *ifsta; | ||
88 | |||
89 | /* clear type-dependent union */ | 621 | /* clear type-dependent union */ |
90 | memset(&sdata->u, 0, sizeof(sdata->u)); | 622 | memset(&sdata->u, 0, sizeof(sdata->u)); |
91 | 623 | ||
92 | /* and set some type-dependent values */ | 624 | /* and set some type-dependent values */ |
93 | sdata->vif.type = type; | 625 | sdata->vif.type = type; |
626 | sdata->dev->hard_start_xmit = ieee80211_subif_start_xmit; | ||
627 | sdata->wdev.iftype = type; | ||
94 | 628 | ||
95 | /* only monitor differs */ | 629 | /* only monitor differs */ |
96 | sdata->dev->type = ARPHRD_ETHER; | 630 | sdata->dev->type = ARPHRD_ETHER; |
97 | 631 | ||
98 | switch (type) { | 632 | switch (type) { |
99 | case IEEE80211_IF_TYPE_AP: | 633 | case NL80211_IFTYPE_AP: |
100 | skb_queue_head_init(&sdata->u.ap.ps_bc_buf); | 634 | skb_queue_head_init(&sdata->u.ap.ps_bc_buf); |
101 | INIT_LIST_HEAD(&sdata->u.ap.vlans); | 635 | INIT_LIST_HEAD(&sdata->u.ap.vlans); |
102 | break; | 636 | break; |
103 | case IEEE80211_IF_TYPE_MESH_POINT: | 637 | case NL80211_IFTYPE_STATION: |
104 | case IEEE80211_IF_TYPE_STA: | 638 | case NL80211_IFTYPE_ADHOC: |
105 | case IEEE80211_IF_TYPE_IBSS: | 639 | ieee80211_sta_setup_sdata(sdata); |
106 | ifsta = &sdata->u.sta; | 640 | break; |
107 | INIT_WORK(&ifsta->work, ieee80211_sta_work); | 641 | case NL80211_IFTYPE_MESH_POINT: |
108 | setup_timer(&ifsta->timer, ieee80211_sta_timer, | ||
109 | (unsigned long) sdata); | ||
110 | skb_queue_head_init(&ifsta->skb_queue); | ||
111 | |||
112 | ifsta->capab = WLAN_CAPABILITY_ESS; | ||
113 | ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN | | ||
114 | IEEE80211_AUTH_ALG_SHARED_KEY; | ||
115 | ifsta->flags |= IEEE80211_STA_CREATE_IBSS | | ||
116 | IEEE80211_STA_AUTO_BSSID_SEL | | ||
117 | IEEE80211_STA_AUTO_CHANNEL_SEL; | ||
118 | if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4) | ||
119 | ifsta->flags |= IEEE80211_STA_WMM_ENABLED; | ||
120 | |||
121 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 642 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
122 | ieee80211_mesh_init_sdata(sdata); | 643 | ieee80211_mesh_init_sdata(sdata); |
123 | break; | 644 | break; |
124 | case IEEE80211_IF_TYPE_MNTR: | 645 | case NL80211_IFTYPE_MONITOR: |
125 | sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; | 646 | sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; |
126 | sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit; | 647 | sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit; |
127 | sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | | 648 | sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | |
128 | MONITOR_FLAG_OTHER_BSS; | 649 | MONITOR_FLAG_OTHER_BSS; |
129 | break; | 650 | break; |
130 | case IEEE80211_IF_TYPE_WDS: | 651 | case NL80211_IFTYPE_WDS: |
131 | case IEEE80211_IF_TYPE_VLAN: | 652 | case NL80211_IFTYPE_AP_VLAN: |
132 | break; | 653 | break; |
133 | case IEEE80211_IF_TYPE_INVALID: | 654 | case NL80211_IFTYPE_UNSPECIFIED: |
655 | case __NL80211_IFTYPE_AFTER_LAST: | ||
134 | BUG(); | 656 | BUG(); |
135 | break; | 657 | break; |
136 | } | 658 | } |
@@ -139,7 +661,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, | |||
139 | } | 661 | } |
140 | 662 | ||
141 | int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | 663 | int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, |
142 | enum ieee80211_if_types type) | 664 | enum nl80211_iftype type) |
143 | { | 665 | { |
144 | ASSERT_RTNL(); | 666 | ASSERT_RTNL(); |
145 | 667 | ||
@@ -160,14 +682,16 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
160 | ieee80211_setup_sdata(sdata, type); | 682 | ieee80211_setup_sdata(sdata, type); |
161 | 683 | ||
162 | /* reset some values that shouldn't be kept across type changes */ | 684 | /* reset some values that shouldn't be kept across type changes */ |
163 | sdata->basic_rates = 0; | 685 | sdata->bss_conf.basic_rates = |
686 | ieee80211_mandatory_rates(sdata->local, | ||
687 | sdata->local->hw.conf.channel->band); | ||
164 | sdata->drop_unencrypted = 0; | 688 | sdata->drop_unencrypted = 0; |
165 | 689 | ||
166 | return 0; | 690 | return 0; |
167 | } | 691 | } |
168 | 692 | ||
169 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, | 693 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
170 | struct net_device **new_dev, enum ieee80211_if_types type, | 694 | struct net_device **new_dev, enum nl80211_iftype type, |
171 | struct vif_params *params) | 695 | struct vif_params *params) |
172 | { | 696 | { |
173 | struct net_device *ndev; | 697 | struct net_device *ndev; |
@@ -225,9 +749,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
225 | 749 | ||
226 | if (ieee80211_vif_is_mesh(&sdata->vif) && | 750 | if (ieee80211_vif_is_mesh(&sdata->vif) && |
227 | params && params->mesh_id_len) | 751 | params && params->mesh_id_len) |
228 | ieee80211_if_sta_set_mesh_id(&sdata->u.sta, | 752 | ieee80211_sdata_set_mesh_id(sdata, |
229 | params->mesh_id_len, | 753 | params->mesh_id_len, |
230 | params->mesh_id); | 754 | params->mesh_id); |
231 | 755 | ||
232 | list_add_tail_rcu(&sdata->list, &local->interfaces); | 756 | list_add_tail_rcu(&sdata->list, &local->interfaces); |
233 | 757 | ||
@@ -241,15 +765,13 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
241 | return ret; | 765 | return ret; |
242 | } | 766 | } |
243 | 767 | ||
244 | void ieee80211_if_remove(struct net_device *dev) | 768 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) |
245 | { | 769 | { |
246 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
247 | |||
248 | ASSERT_RTNL(); | 770 | ASSERT_RTNL(); |
249 | 771 | ||
250 | list_del_rcu(&sdata->list); | 772 | list_del_rcu(&sdata->list); |
251 | synchronize_rcu(); | 773 | synchronize_rcu(); |
252 | unregister_netdevice(dev); | 774 | unregister_netdevice(sdata->dev); |
253 | } | 775 | } |
254 | 776 | ||
255 | /* | 777 | /* |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 6597c779e35a..57afcd38cd9e 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -118,12 +118,12 @@ static const u8 *get_mac_for_key(struct ieee80211_key *key) | |||
118 | * address to indicate a transmit-only key. | 118 | * address to indicate a transmit-only key. |
119 | */ | 119 | */ |
120 | if (key->conf.alg != ALG_WEP && | 120 | if (key->conf.alg != ALG_WEP && |
121 | (key->sdata->vif.type == IEEE80211_IF_TYPE_AP || | 121 | (key->sdata->vif.type == NL80211_IFTYPE_AP || |
122 | key->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) | 122 | key->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) |
123 | addr = zero_addr; | 123 | addr = zero_addr; |
124 | 124 | ||
125 | if (key->sta) | 125 | if (key->sta) |
126 | addr = key->sta->addr; | 126 | addr = key->sta->sta.addr; |
127 | 127 | ||
128 | return addr; | 128 | return addr; |
129 | } | 129 | } |
@@ -331,7 +331,7 @@ void ieee80211_key_link(struct ieee80211_key *key, | |||
331 | */ | 331 | */ |
332 | key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; | 332 | key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; |
333 | } else { | 333 | } else { |
334 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { | 334 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
335 | struct sta_info *ap; | 335 | struct sta_info *ap; |
336 | 336 | ||
337 | /* | 337 | /* |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index aa5a191598c9..d608c44047c0 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -45,16 +45,9 @@ struct ieee80211_tx_status_rtap_hdr { | |||
45 | u8 data_retries; | 45 | u8 data_retries; |
46 | } __attribute__ ((packed)); | 46 | } __attribute__ ((packed)); |
47 | 47 | ||
48 | /* common interface routines */ | ||
49 | |||
50 | static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr) | ||
51 | { | ||
52 | memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ | ||
53 | return ETH_ALEN; | ||
54 | } | ||
55 | 48 | ||
56 | /* must be called under mdev tx lock */ | 49 | /* must be called under mdev tx lock */ |
57 | static void ieee80211_configure_filter(struct ieee80211_local *local) | 50 | void ieee80211_configure_filter(struct ieee80211_local *local) |
58 | { | 51 | { |
59 | unsigned int changed_flags; | 52 | unsigned int changed_flags; |
60 | unsigned int new_flags = 0; | 53 | unsigned int new_flags = 0; |
@@ -97,9 +90,24 @@ static void ieee80211_configure_filter(struct ieee80211_local *local) | |||
97 | 90 | ||
98 | /* master interface */ | 91 | /* master interface */ |
99 | 92 | ||
93 | static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr) | ||
94 | { | ||
95 | memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ | ||
96 | return ETH_ALEN; | ||
97 | } | ||
98 | |||
99 | static const struct header_ops ieee80211_header_ops = { | ||
100 | .create = eth_header, | ||
101 | .parse = header_parse_80211, | ||
102 | .rebuild = eth_rebuild_header, | ||
103 | .cache = eth_header_cache, | ||
104 | .cache_update = eth_header_cache_update, | ||
105 | }; | ||
106 | |||
100 | static int ieee80211_master_open(struct net_device *dev) | 107 | static int ieee80211_master_open(struct net_device *dev) |
101 | { | 108 | { |
102 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 109 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); |
110 | struct ieee80211_local *local = mpriv->local; | ||
103 | struct ieee80211_sub_if_data *sdata; | 111 | struct ieee80211_sub_if_data *sdata; |
104 | int res = -EOPNOTSUPP; | 112 | int res = -EOPNOTSUPP; |
105 | 113 | ||
@@ -121,7 +129,8 @@ static int ieee80211_master_open(struct net_device *dev) | |||
121 | 129 | ||
122 | static int ieee80211_master_stop(struct net_device *dev) | 130 | static int ieee80211_master_stop(struct net_device *dev) |
123 | { | 131 | { |
124 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 132 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); |
133 | struct ieee80211_local *local = mpriv->local; | ||
125 | struct ieee80211_sub_if_data *sdata; | 134 | struct ieee80211_sub_if_data *sdata; |
126 | 135 | ||
127 | /* we hold the RTNL here so can safely walk the list */ | 136 | /* we hold the RTNL here so can safely walk the list */ |
@@ -134,849 +143,12 @@ static int ieee80211_master_stop(struct net_device *dev) | |||
134 | 143 | ||
135 | static void ieee80211_master_set_multicast_list(struct net_device *dev) | 144 | static void ieee80211_master_set_multicast_list(struct net_device *dev) |
136 | { | 145 | { |
137 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 146 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); |
147 | struct ieee80211_local *local = mpriv->local; | ||
138 | 148 | ||
139 | ieee80211_configure_filter(local); | 149 | ieee80211_configure_filter(local); |
140 | } | 150 | } |
141 | 151 | ||
142 | /* regular interfaces */ | ||
143 | |||
144 | static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) | ||
145 | { | ||
146 | int meshhdrlen; | ||
147 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
148 | |||
149 | meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0; | ||
150 | |||
151 | /* FIX: what would be proper limits for MTU? | ||
152 | * This interface uses 802.3 frames. */ | ||
153 | if (new_mtu < 256 || | ||
154 | new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { | ||
155 | return -EINVAL; | ||
156 | } | ||
157 | |||
158 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
159 | printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu); | ||
160 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
161 | dev->mtu = new_mtu; | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static inline int identical_mac_addr_allowed(int type1, int type2) | ||
166 | { | ||
167 | return (type1 == IEEE80211_IF_TYPE_MNTR || | ||
168 | type2 == IEEE80211_IF_TYPE_MNTR || | ||
169 | (type1 == IEEE80211_IF_TYPE_AP && | ||
170 | type2 == IEEE80211_IF_TYPE_WDS) || | ||
171 | (type1 == IEEE80211_IF_TYPE_WDS && | ||
172 | (type2 == IEEE80211_IF_TYPE_WDS || | ||
173 | type2 == IEEE80211_IF_TYPE_AP)) || | ||
174 | (type1 == IEEE80211_IF_TYPE_AP && | ||
175 | type2 == IEEE80211_IF_TYPE_VLAN) || | ||
176 | (type1 == IEEE80211_IF_TYPE_VLAN && | ||
177 | (type2 == IEEE80211_IF_TYPE_AP || | ||
178 | type2 == IEEE80211_IF_TYPE_VLAN))); | ||
179 | } | ||
180 | |||
181 | static int ieee80211_open(struct net_device *dev) | ||
182 | { | ||
183 | struct ieee80211_sub_if_data *sdata, *nsdata; | ||
184 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
185 | struct sta_info *sta; | ||
186 | struct ieee80211_if_init_conf conf; | ||
187 | u32 changed = 0; | ||
188 | int res; | ||
189 | bool need_hw_reconfig = 0; | ||
190 | |||
191 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
192 | |||
193 | /* we hold the RTNL here so can safely walk the list */ | ||
194 | list_for_each_entry(nsdata, &local->interfaces, list) { | ||
195 | struct net_device *ndev = nsdata->dev; | ||
196 | |||
197 | if (ndev != dev && netif_running(ndev)) { | ||
198 | /* | ||
199 | * Allow only a single IBSS interface to be up at any | ||
200 | * time. This is restricted because beacon distribution | ||
201 | * cannot work properly if both are in the same IBSS. | ||
202 | * | ||
203 | * To remove this restriction we'd have to disallow them | ||
204 | * from setting the same SSID on different IBSS interfaces | ||
205 | * belonging to the same hardware. Then, however, we're | ||
206 | * faced with having to adopt two different TSF timers... | ||
207 | */ | ||
208 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | ||
209 | nsdata->vif.type == IEEE80211_IF_TYPE_IBSS) | ||
210 | return -EBUSY; | ||
211 | |||
212 | /* | ||
213 | * The remaining checks are only performed for interfaces | ||
214 | * with the same MAC address. | ||
215 | */ | ||
216 | if (compare_ether_addr(dev->dev_addr, ndev->dev_addr)) | ||
217 | continue; | ||
218 | |||
219 | /* | ||
220 | * check whether it may have the same address | ||
221 | */ | ||
222 | if (!identical_mac_addr_allowed(sdata->vif.type, | ||
223 | nsdata->vif.type)) | ||
224 | return -ENOTUNIQ; | ||
225 | |||
226 | /* | ||
227 | * can only add VLANs to enabled APs | ||
228 | */ | ||
229 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN && | ||
230 | nsdata->vif.type == IEEE80211_IF_TYPE_AP) | ||
231 | sdata->bss = &nsdata->u.ap; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | switch (sdata->vif.type) { | ||
236 | case IEEE80211_IF_TYPE_WDS: | ||
237 | if (!is_valid_ether_addr(sdata->u.wds.remote_addr)) | ||
238 | return -ENOLINK; | ||
239 | break; | ||
240 | case IEEE80211_IF_TYPE_VLAN: | ||
241 | if (!sdata->bss) | ||
242 | return -ENOLINK; | ||
243 | list_add(&sdata->u.vlan.list, &sdata->bss->vlans); | ||
244 | break; | ||
245 | case IEEE80211_IF_TYPE_AP: | ||
246 | sdata->bss = &sdata->u.ap; | ||
247 | break; | ||
248 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
249 | /* mesh ifaces must set allmulti to forward mcast traffic */ | ||
250 | atomic_inc(&local->iff_allmultis); | ||
251 | break; | ||
252 | case IEEE80211_IF_TYPE_STA: | ||
253 | case IEEE80211_IF_TYPE_MNTR: | ||
254 | case IEEE80211_IF_TYPE_IBSS: | ||
255 | /* no special treatment */ | ||
256 | break; | ||
257 | case IEEE80211_IF_TYPE_INVALID: | ||
258 | /* cannot happen */ | ||
259 | WARN_ON(1); | ||
260 | break; | ||
261 | } | ||
262 | |||
263 | if (local->open_count == 0) { | ||
264 | res = 0; | ||
265 | if (local->ops->start) | ||
266 | res = local->ops->start(local_to_hw(local)); | ||
267 | if (res) | ||
268 | goto err_del_bss; | ||
269 | need_hw_reconfig = 1; | ||
270 | ieee80211_led_radio(local, local->hw.conf.radio_enabled); | ||
271 | } | ||
272 | |||
273 | switch (sdata->vif.type) { | ||
274 | case IEEE80211_IF_TYPE_VLAN: | ||
275 | /* no need to tell driver */ | ||
276 | break; | ||
277 | case IEEE80211_IF_TYPE_MNTR: | ||
278 | if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { | ||
279 | local->cooked_mntrs++; | ||
280 | break; | ||
281 | } | ||
282 | |||
283 | /* must be before the call to ieee80211_configure_filter */ | ||
284 | local->monitors++; | ||
285 | if (local->monitors == 1) | ||
286 | local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; | ||
287 | |||
288 | if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) | ||
289 | local->fif_fcsfail++; | ||
290 | if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) | ||
291 | local->fif_plcpfail++; | ||
292 | if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) | ||
293 | local->fif_control++; | ||
294 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) | ||
295 | local->fif_other_bss++; | ||
296 | |||
297 | netif_addr_lock_bh(local->mdev); | ||
298 | ieee80211_configure_filter(local); | ||
299 | netif_addr_unlock_bh(local->mdev); | ||
300 | break; | ||
301 | case IEEE80211_IF_TYPE_STA: | ||
302 | case IEEE80211_IF_TYPE_IBSS: | ||
303 | sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET; | ||
304 | /* fall through */ | ||
305 | default: | ||
306 | conf.vif = &sdata->vif; | ||
307 | conf.type = sdata->vif.type; | ||
308 | conf.mac_addr = dev->dev_addr; | ||
309 | res = local->ops->add_interface(local_to_hw(local), &conf); | ||
310 | if (res) | ||
311 | goto err_stop; | ||
312 | |||
313 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
314 | ieee80211_start_mesh(sdata->dev); | ||
315 | changed |= ieee80211_reset_erp_info(dev); | ||
316 | ieee80211_bss_info_change_notify(sdata, changed); | ||
317 | ieee80211_enable_keys(sdata); | ||
318 | |||
319 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && | ||
320 | !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) | ||
321 | netif_carrier_off(dev); | ||
322 | else | ||
323 | netif_carrier_on(dev); | ||
324 | } | ||
325 | |||
326 | if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { | ||
327 | /* Create STA entry for the WDS peer */ | ||
328 | sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr, | ||
329 | GFP_KERNEL); | ||
330 | if (!sta) { | ||
331 | res = -ENOMEM; | ||
332 | goto err_del_interface; | ||
333 | } | ||
334 | |||
335 | /* no locking required since STA is not live yet */ | ||
336 | sta->flags |= WLAN_STA_AUTHORIZED; | ||
337 | |||
338 | res = sta_info_insert(sta); | ||
339 | if (res) { | ||
340 | /* STA has been freed */ | ||
341 | goto err_del_interface; | ||
342 | } | ||
343 | } | ||
344 | |||
345 | if (local->open_count == 0) { | ||
346 | res = dev_open(local->mdev); | ||
347 | WARN_ON(res); | ||
348 | if (res) | ||
349 | goto err_del_interface; | ||
350 | tasklet_enable(&local->tx_pending_tasklet); | ||
351 | tasklet_enable(&local->tasklet); | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * set_multicast_list will be invoked by the networking core | ||
356 | * which will check whether any increments here were done in | ||
357 | * error and sync them down to the hardware as filter flags. | ||
358 | */ | ||
359 | if (sdata->flags & IEEE80211_SDATA_ALLMULTI) | ||
360 | atomic_inc(&local->iff_allmultis); | ||
361 | |||
362 | if (sdata->flags & IEEE80211_SDATA_PROMISC) | ||
363 | atomic_inc(&local->iff_promiscs); | ||
364 | |||
365 | local->open_count++; | ||
366 | if (need_hw_reconfig) | ||
367 | ieee80211_hw_config(local); | ||
368 | |||
369 | /* | ||
370 | * ieee80211_sta_work is disabled while network interface | ||
371 | * is down. Therefore, some configuration changes may not | ||
372 | * yet be effective. Trigger execution of ieee80211_sta_work | ||
373 | * to fix this. | ||
374 | */ | ||
375 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | ||
376 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | ||
377 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
378 | queue_work(local->hw.workqueue, &ifsta->work); | ||
379 | } | ||
380 | |||
381 | netif_tx_start_all_queues(dev); | ||
382 | |||
383 | return 0; | ||
384 | err_del_interface: | ||
385 | local->ops->remove_interface(local_to_hw(local), &conf); | ||
386 | err_stop: | ||
387 | if (!local->open_count && local->ops->stop) | ||
388 | local->ops->stop(local_to_hw(local)); | ||
389 | err_del_bss: | ||
390 | sdata->bss = NULL; | ||
391 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) | ||
392 | list_del(&sdata->u.vlan.list); | ||
393 | return res; | ||
394 | } | ||
395 | |||
396 | static int ieee80211_stop(struct net_device *dev) | ||
397 | { | ||
398 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
399 | struct ieee80211_local *local = sdata->local; | ||
400 | struct ieee80211_if_init_conf conf; | ||
401 | struct sta_info *sta; | ||
402 | |||
403 | /* | ||
404 | * Stop TX on this interface first. | ||
405 | */ | ||
406 | netif_tx_stop_all_queues(dev); | ||
407 | |||
408 | /* | ||
409 | * Now delete all active aggregation sessions. | ||
410 | */ | ||
411 | rcu_read_lock(); | ||
412 | |||
413 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | ||
414 | if (sta->sdata == sdata) | ||
415 | ieee80211_sta_tear_down_BA_sessions(dev, sta->addr); | ||
416 | } | ||
417 | |||
418 | rcu_read_unlock(); | ||
419 | |||
420 | /* | ||
421 | * Remove all stations associated with this interface. | ||
422 | * | ||
423 | * This must be done before calling ops->remove_interface() | ||
424 | * because otherwise we can later invoke ops->sta_notify() | ||
425 | * whenever the STAs are removed, and that invalidates driver | ||
426 | * assumptions about always getting a vif pointer that is valid | ||
427 | * (because if we remove a STA after ops->remove_interface() | ||
428 | * the driver will have removed the vif info already!) | ||
429 | * | ||
430 | * We could relax this and only unlink the stations from the | ||
431 | * hash table and list but keep them on a per-sdata list that | ||
432 | * will be inserted back again when the interface is brought | ||
433 | * up again, but I don't currently see a use case for that, | ||
434 | * except with WDS which gets a STA entry created when it is | ||
435 | * brought up. | ||
436 | */ | ||
437 | sta_info_flush(local, sdata); | ||
438 | |||
439 | /* | ||
440 | * Don't count this interface for promisc/allmulti while it | ||
441 | * is down. dev_mc_unsync() will invoke set_multicast_list | ||
442 | * on the master interface which will sync these down to the | ||
443 | * hardware as filter flags. | ||
444 | */ | ||
445 | if (sdata->flags & IEEE80211_SDATA_ALLMULTI) | ||
446 | atomic_dec(&local->iff_allmultis); | ||
447 | |||
448 | if (sdata->flags & IEEE80211_SDATA_PROMISC) | ||
449 | atomic_dec(&local->iff_promiscs); | ||
450 | |||
451 | dev_mc_unsync(local->mdev, dev); | ||
452 | |||
453 | /* APs need special treatment */ | ||
454 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { | ||
455 | struct ieee80211_sub_if_data *vlan, *tmp; | ||
456 | struct beacon_data *old_beacon = sdata->u.ap.beacon; | ||
457 | |||
458 | /* remove beacon */ | ||
459 | rcu_assign_pointer(sdata->u.ap.beacon, NULL); | ||
460 | synchronize_rcu(); | ||
461 | kfree(old_beacon); | ||
462 | |||
463 | /* down all dependent devices, that is VLANs */ | ||
464 | list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans, | ||
465 | u.vlan.list) | ||
466 | dev_close(vlan->dev); | ||
467 | WARN_ON(!list_empty(&sdata->u.ap.vlans)); | ||
468 | } | ||
469 | |||
470 | local->open_count--; | ||
471 | |||
472 | switch (sdata->vif.type) { | ||
473 | case IEEE80211_IF_TYPE_VLAN: | ||
474 | list_del(&sdata->u.vlan.list); | ||
475 | /* no need to tell driver */ | ||
476 | break; | ||
477 | case IEEE80211_IF_TYPE_MNTR: | ||
478 | if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { | ||
479 | local->cooked_mntrs--; | ||
480 | break; | ||
481 | } | ||
482 | |||
483 | local->monitors--; | ||
484 | if (local->monitors == 0) | ||
485 | local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; | ||
486 | |||
487 | if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL) | ||
488 | local->fif_fcsfail--; | ||
489 | if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) | ||
490 | local->fif_plcpfail--; | ||
491 | if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) | ||
492 | local->fif_control--; | ||
493 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) | ||
494 | local->fif_other_bss--; | ||
495 | |||
496 | netif_addr_lock_bh(local->mdev); | ||
497 | ieee80211_configure_filter(local); | ||
498 | netif_addr_unlock_bh(local->mdev); | ||
499 | break; | ||
500 | case IEEE80211_IF_TYPE_MESH_POINT: | ||
501 | /* allmulti is always set on mesh ifaces */ | ||
502 | atomic_dec(&local->iff_allmultis); | ||
503 | /* fall through */ | ||
504 | case IEEE80211_IF_TYPE_STA: | ||
505 | case IEEE80211_IF_TYPE_IBSS: | ||
506 | sdata->u.sta.state = IEEE80211_DISABLED; | ||
507 | memset(sdata->u.sta.bssid, 0, ETH_ALEN); | ||
508 | del_timer_sync(&sdata->u.sta.timer); | ||
509 | /* | ||
510 | * When we get here, the interface is marked down. | ||
511 | * Call synchronize_rcu() to wait for the RX path | ||
512 | * should it be using the interface and enqueuing | ||
513 | * frames at this very time on another CPU. | ||
514 | */ | ||
515 | synchronize_rcu(); | ||
516 | skb_queue_purge(&sdata->u.sta.skb_queue); | ||
517 | |||
518 | if (local->scan_dev == sdata->dev) { | ||
519 | if (!local->ops->hw_scan) { | ||
520 | local->sta_sw_scanning = 0; | ||
521 | cancel_delayed_work(&local->scan_work); | ||
522 | } else | ||
523 | local->sta_hw_scanning = 0; | ||
524 | } | ||
525 | |||
526 | sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED; | ||
527 | kfree(sdata->u.sta.extra_ie); | ||
528 | sdata->u.sta.extra_ie = NULL; | ||
529 | sdata->u.sta.extra_ie_len = 0; | ||
530 | /* fall through */ | ||
531 | default: | ||
532 | conf.vif = &sdata->vif; | ||
533 | conf.type = sdata->vif.type; | ||
534 | conf.mac_addr = dev->dev_addr; | ||
535 | /* disable all keys for as long as this netdev is down */ | ||
536 | ieee80211_disable_keys(sdata); | ||
537 | local->ops->remove_interface(local_to_hw(local), &conf); | ||
538 | } | ||
539 | |||
540 | sdata->bss = NULL; | ||
541 | |||
542 | if (local->open_count == 0) { | ||
543 | if (netif_running(local->mdev)) | ||
544 | dev_close(local->mdev); | ||
545 | |||
546 | if (local->ops->stop) | ||
547 | local->ops->stop(local_to_hw(local)); | ||
548 | |||
549 | ieee80211_led_radio(local, 0); | ||
550 | |||
551 | flush_workqueue(local->hw.workqueue); | ||
552 | |||
553 | tasklet_disable(&local->tx_pending_tasklet); | ||
554 | tasklet_disable(&local->tasklet); | ||
555 | } | ||
556 | |||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) | ||
561 | { | ||
562 | struct ieee80211_local *local = hw_to_local(hw); | ||
563 | struct sta_info *sta; | ||
564 | struct ieee80211_sub_if_data *sdata; | ||
565 | u16 start_seq_num = 0; | ||
566 | u8 *state; | ||
567 | int ret; | ||
568 | DECLARE_MAC_BUF(mac); | ||
569 | |||
570 | if (tid >= STA_TID_NUM) | ||
571 | return -EINVAL; | ||
572 | |||
573 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
574 | printk(KERN_DEBUG "Open BA session requested for %s tid %u\n", | ||
575 | print_mac(mac, ra), tid); | ||
576 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
577 | |||
578 | rcu_read_lock(); | ||
579 | |||
580 | sta = sta_info_get(local, ra); | ||
581 | if (!sta) { | ||
582 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
583 | printk(KERN_DEBUG "Could not find the station\n"); | ||
584 | #endif | ||
585 | ret = -ENOENT; | ||
586 | goto exit; | ||
587 | } | ||
588 | |||
589 | spin_lock_bh(&sta->lock); | ||
590 | |||
591 | /* we have tried too many times, receiver does not want A-MPDU */ | ||
592 | if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { | ||
593 | ret = -EBUSY; | ||
594 | goto err_unlock_sta; | ||
595 | } | ||
596 | |||
597 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
598 | /* check if the TID is not in aggregation flow already */ | ||
599 | if (*state != HT_AGG_STATE_IDLE) { | ||
600 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
601 | printk(KERN_DEBUG "BA request denied - session is not " | ||
602 | "idle on tid %u\n", tid); | ||
603 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
604 | ret = -EAGAIN; | ||
605 | goto err_unlock_sta; | ||
606 | } | ||
607 | |||
608 | /* prepare A-MPDU MLME for Tx aggregation */ | ||
609 | sta->ampdu_mlme.tid_tx[tid] = | ||
610 | kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); | ||
611 | if (!sta->ampdu_mlme.tid_tx[tid]) { | ||
612 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
613 | if (net_ratelimit()) | ||
614 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", | ||
615 | tid); | ||
616 | #endif | ||
617 | ret = -ENOMEM; | ||
618 | goto err_unlock_sta; | ||
619 | } | ||
620 | /* Tx timer */ | ||
621 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = | ||
622 | sta_addba_resp_timer_expired; | ||
623 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = | ||
624 | (unsigned long)&sta->timer_to_tid[tid]; | ||
625 | init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
626 | |||
627 | /* create a new queue for this aggregation */ | ||
628 | ret = ieee80211_ht_agg_queue_add(local, sta, tid); | ||
629 | |||
630 | /* case no queue is available to aggregation | ||
631 | * don't switch to aggregation */ | ||
632 | if (ret) { | ||
633 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
634 | printk(KERN_DEBUG "BA request denied - queue unavailable for" | ||
635 | " tid %d\n", tid); | ||
636 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
637 | goto err_unlock_queue; | ||
638 | } | ||
639 | sdata = sta->sdata; | ||
640 | |||
641 | /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the | ||
642 | * call back right away, it must see that the flow has begun */ | ||
643 | *state |= HT_ADDBA_REQUESTED_MSK; | ||
644 | |||
645 | if (local->ops->ampdu_action) | ||
646 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, | ||
647 | ra, tid, &start_seq_num); | ||
648 | |||
649 | if (ret) { | ||
650 | /* No need to requeue the packets in the agg queue, since we | ||
651 | * held the tx lock: no packet could be enqueued to the newly | ||
652 | * allocated queue */ | ||
653 | ieee80211_ht_agg_queue_remove(local, sta, tid, 0); | ||
654 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
655 | printk(KERN_DEBUG "BA request denied - HW unavailable for" | ||
656 | " tid %d\n", tid); | ||
657 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
658 | *state = HT_AGG_STATE_IDLE; | ||
659 | goto err_unlock_queue; | ||
660 | } | ||
661 | |||
662 | /* Will put all the packets in the new SW queue */ | ||
663 | ieee80211_requeue(local, ieee802_1d_to_ac[tid]); | ||
664 | spin_unlock_bh(&sta->lock); | ||
665 | |||
666 | /* send an addBA request */ | ||
667 | sta->ampdu_mlme.dialog_token_allocator++; | ||
668 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = | ||
669 | sta->ampdu_mlme.dialog_token_allocator; | ||
670 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | ||
671 | |||
672 | |||
673 | ieee80211_send_addba_request(sta->sdata->dev, ra, tid, | ||
674 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | ||
675 | sta->ampdu_mlme.tid_tx[tid]->ssn, | ||
676 | 0x40, 5000); | ||
677 | /* activate the timer for the recipient's addBA response */ | ||
678 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = | ||
679 | jiffies + ADDBA_RESP_INTERVAL; | ||
680 | add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
681 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
682 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); | ||
683 | #endif | ||
684 | goto exit; | ||
685 | |||
686 | err_unlock_queue: | ||
687 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
688 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
689 | ret = -EBUSY; | ||
690 | err_unlock_sta: | ||
691 | spin_unlock_bh(&sta->lock); | ||
692 | exit: | ||
693 | rcu_read_unlock(); | ||
694 | return ret; | ||
695 | } | ||
696 | EXPORT_SYMBOL(ieee80211_start_tx_ba_session); | ||
697 | |||
698 | int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, | ||
699 | u8 *ra, u16 tid, | ||
700 | enum ieee80211_back_parties initiator) | ||
701 | { | ||
702 | struct ieee80211_local *local = hw_to_local(hw); | ||
703 | struct sta_info *sta; | ||
704 | u8 *state; | ||
705 | int ret = 0; | ||
706 | DECLARE_MAC_BUF(mac); | ||
707 | |||
708 | if (tid >= STA_TID_NUM) | ||
709 | return -EINVAL; | ||
710 | |||
711 | rcu_read_lock(); | ||
712 | sta = sta_info_get(local, ra); | ||
713 | if (!sta) { | ||
714 | rcu_read_unlock(); | ||
715 | return -ENOENT; | ||
716 | } | ||
717 | |||
718 | /* check if the TID is in aggregation */ | ||
719 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
720 | spin_lock_bh(&sta->lock); | ||
721 | |||
722 | if (*state != HT_AGG_STATE_OPERATIONAL) { | ||
723 | ret = -ENOENT; | ||
724 | goto stop_BA_exit; | ||
725 | } | ||
726 | |||
727 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
728 | printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n", | ||
729 | print_mac(mac, ra), tid); | ||
730 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
731 | |||
732 | ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]); | ||
733 | |||
734 | *state = HT_AGG_STATE_REQ_STOP_BA_MSK | | ||
735 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
736 | |||
737 | if (local->ops->ampdu_action) | ||
738 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP, | ||
739 | ra, tid, NULL); | ||
740 | |||
741 | /* case HW denied going back to legacy */ | ||
742 | if (ret) { | ||
743 | WARN_ON(ret != -EBUSY); | ||
744 | *state = HT_AGG_STATE_OPERATIONAL; | ||
745 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
746 | goto stop_BA_exit; | ||
747 | } | ||
748 | |||
749 | stop_BA_exit: | ||
750 | spin_unlock_bh(&sta->lock); | ||
751 | rcu_read_unlock(); | ||
752 | return ret; | ||
753 | } | ||
754 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); | ||
755 | |||
756 | void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) | ||
757 | { | ||
758 | struct ieee80211_local *local = hw_to_local(hw); | ||
759 | struct sta_info *sta; | ||
760 | u8 *state; | ||
761 | DECLARE_MAC_BUF(mac); | ||
762 | |||
763 | if (tid >= STA_TID_NUM) { | ||
764 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
765 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | ||
766 | tid, STA_TID_NUM); | ||
767 | #endif | ||
768 | return; | ||
769 | } | ||
770 | |||
771 | rcu_read_lock(); | ||
772 | sta = sta_info_get(local, ra); | ||
773 | if (!sta) { | ||
774 | rcu_read_unlock(); | ||
775 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
776 | printk(KERN_DEBUG "Could not find station: %s\n", | ||
777 | print_mac(mac, ra)); | ||
778 | #endif | ||
779 | return; | ||
780 | } | ||
781 | |||
782 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
783 | spin_lock_bh(&sta->lock); | ||
784 | |||
785 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | ||
786 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
787 | printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", | ||
788 | *state); | ||
789 | #endif | ||
790 | spin_unlock_bh(&sta->lock); | ||
791 | rcu_read_unlock(); | ||
792 | return; | ||
793 | } | ||
794 | |||
795 | WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK); | ||
796 | |||
797 | *state |= HT_ADDBA_DRV_READY_MSK; | ||
798 | |||
799 | if (*state == HT_AGG_STATE_OPERATIONAL) { | ||
800 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
801 | printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); | ||
802 | #endif | ||
803 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | ||
804 | } | ||
805 | spin_unlock_bh(&sta->lock); | ||
806 | rcu_read_unlock(); | ||
807 | } | ||
808 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); | ||
809 | |||
810 | void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid) | ||
811 | { | ||
812 | struct ieee80211_local *local = hw_to_local(hw); | ||
813 | struct sta_info *sta; | ||
814 | u8 *state; | ||
815 | int agg_queue; | ||
816 | DECLARE_MAC_BUF(mac); | ||
817 | |||
818 | if (tid >= STA_TID_NUM) { | ||
819 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
820 | printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", | ||
821 | tid, STA_TID_NUM); | ||
822 | #endif | ||
823 | return; | ||
824 | } | ||
825 | |||
826 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
827 | printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n", | ||
828 | print_mac(mac, ra), tid); | ||
829 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
830 | |||
831 | rcu_read_lock(); | ||
832 | sta = sta_info_get(local, ra); | ||
833 | if (!sta) { | ||
834 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
835 | printk(KERN_DEBUG "Could not find station: %s\n", | ||
836 | print_mac(mac, ra)); | ||
837 | #endif | ||
838 | rcu_read_unlock(); | ||
839 | return; | ||
840 | } | ||
841 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
842 | |||
843 | /* NOTE: no need to use sta->lock in this state check, as | ||
844 | * ieee80211_stop_tx_ba_session will let only one stop call to | ||
845 | * pass through per sta/tid | ||
846 | */ | ||
847 | if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { | ||
848 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
849 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); | ||
850 | #endif | ||
851 | rcu_read_unlock(); | ||
852 | return; | ||
853 | } | ||
854 | |||
855 | if (*state & HT_AGG_STATE_INITIATOR_MSK) | ||
856 | ieee80211_send_delba(sta->sdata->dev, ra, tid, | ||
857 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); | ||
858 | |||
859 | agg_queue = sta->tid_to_tx_q[tid]; | ||
860 | |||
861 | ieee80211_ht_agg_queue_remove(local, sta, tid, 1); | ||
862 | |||
863 | /* We just requeued the all the frames that were in the | ||
864 | * removed queue, and since we might miss a softirq we do | ||
865 | * netif_schedule_queue. ieee80211_wake_queue is not used | ||
866 | * here as this queue is not necessarily stopped | ||
867 | */ | ||
868 | netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue)); | ||
869 | spin_lock_bh(&sta->lock); | ||
870 | *state = HT_AGG_STATE_IDLE; | ||
871 | sta->ampdu_mlme.addba_req_num[tid] = 0; | ||
872 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
873 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
874 | spin_unlock_bh(&sta->lock); | ||
875 | |||
876 | rcu_read_unlock(); | ||
877 | } | ||
878 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); | ||
879 | |||
880 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | ||
881 | const u8 *ra, u16 tid) | ||
882 | { | ||
883 | struct ieee80211_local *local = hw_to_local(hw); | ||
884 | struct ieee80211_ra_tid *ra_tid; | ||
885 | struct sk_buff *skb = dev_alloc_skb(0); | ||
886 | |||
887 | if (unlikely(!skb)) { | ||
888 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
889 | if (net_ratelimit()) | ||
890 | printk(KERN_WARNING "%s: Not enough memory, " | ||
891 | "dropping start BA session", skb->dev->name); | ||
892 | #endif | ||
893 | return; | ||
894 | } | ||
895 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
896 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | ||
897 | ra_tid->tid = tid; | ||
898 | |||
899 | skb->pkt_type = IEEE80211_ADDBA_MSG; | ||
900 | skb_queue_tail(&local->skb_queue, skb); | ||
901 | tasklet_schedule(&local->tasklet); | ||
902 | } | ||
903 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); | ||
904 | |||
905 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, | ||
906 | const u8 *ra, u16 tid) | ||
907 | { | ||
908 | struct ieee80211_local *local = hw_to_local(hw); | ||
909 | struct ieee80211_ra_tid *ra_tid; | ||
910 | struct sk_buff *skb = dev_alloc_skb(0); | ||
911 | |||
912 | if (unlikely(!skb)) { | ||
913 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
914 | if (net_ratelimit()) | ||
915 | printk(KERN_WARNING "%s: Not enough memory, " | ||
916 | "dropping stop BA session", skb->dev->name); | ||
917 | #endif | ||
918 | return; | ||
919 | } | ||
920 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
921 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | ||
922 | ra_tid->tid = tid; | ||
923 | |||
924 | skb->pkt_type = IEEE80211_DELBA_MSG; | ||
925 | skb_queue_tail(&local->skb_queue, skb); | ||
926 | tasklet_schedule(&local->tasklet); | ||
927 | } | ||
928 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); | ||
929 | |||
930 | static void ieee80211_set_multicast_list(struct net_device *dev) | ||
931 | { | ||
932 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
933 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
934 | int allmulti, promisc, sdata_allmulti, sdata_promisc; | ||
935 | |||
936 | allmulti = !!(dev->flags & IFF_ALLMULTI); | ||
937 | promisc = !!(dev->flags & IFF_PROMISC); | ||
938 | sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI); | ||
939 | sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC); | ||
940 | |||
941 | if (allmulti != sdata_allmulti) { | ||
942 | if (dev->flags & IFF_ALLMULTI) | ||
943 | atomic_inc(&local->iff_allmultis); | ||
944 | else | ||
945 | atomic_dec(&local->iff_allmultis); | ||
946 | sdata->flags ^= IEEE80211_SDATA_ALLMULTI; | ||
947 | } | ||
948 | |||
949 | if (promisc != sdata_promisc) { | ||
950 | if (dev->flags & IFF_PROMISC) | ||
951 | atomic_inc(&local->iff_promiscs); | ||
952 | else | ||
953 | atomic_dec(&local->iff_promiscs); | ||
954 | sdata->flags ^= IEEE80211_SDATA_PROMISC; | ||
955 | } | ||
956 | |||
957 | dev_mc_sync(local->mdev, dev); | ||
958 | } | ||
959 | |||
960 | static const struct header_ops ieee80211_header_ops = { | ||
961 | .create = eth_header, | ||
962 | .parse = header_parse_80211, | ||
963 | .rebuild = eth_rebuild_header, | ||
964 | .cache = eth_header_cache, | ||
965 | .cache_update = eth_header_cache_update, | ||
966 | }; | ||
967 | |||
968 | void ieee80211_if_setup(struct net_device *dev) | ||
969 | { | ||
970 | ether_setup(dev); | ||
971 | dev->hard_start_xmit = ieee80211_subif_start_xmit; | ||
972 | dev->wireless_handlers = &ieee80211_iw_handler_def; | ||
973 | dev->set_multicast_list = ieee80211_set_multicast_list; | ||
974 | dev->change_mtu = ieee80211_change_mtu; | ||
975 | dev->open = ieee80211_open; | ||
976 | dev->stop = ieee80211_stop; | ||
977 | dev->destructor = free_netdev; | ||
978 | } | ||
979 | |||
980 | /* everything else */ | 152 | /* everything else */ |
981 | 153 | ||
982 | int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed) | 154 | int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed) |
@@ -987,18 +159,21 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed) | |||
987 | if (WARN_ON(!netif_running(sdata->dev))) | 159 | if (WARN_ON(!netif_running(sdata->dev))) |
988 | return 0; | 160 | return 0; |
989 | 161 | ||
162 | if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) | ||
163 | return -EINVAL; | ||
164 | |||
990 | if (!local->ops->config_interface) | 165 | if (!local->ops->config_interface) |
991 | return 0; | 166 | return 0; |
992 | 167 | ||
993 | memset(&conf, 0, sizeof(conf)); | 168 | memset(&conf, 0, sizeof(conf)); |
994 | conf.changed = changed; | 169 | conf.changed = changed; |
995 | 170 | ||
996 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 171 | if (sdata->vif.type == NL80211_IFTYPE_STATION || |
997 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 172 | sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
998 | conf.bssid = sdata->u.sta.bssid; | 173 | conf.bssid = sdata->u.sta.bssid; |
999 | conf.ssid = sdata->u.sta.ssid; | 174 | conf.ssid = sdata->u.sta.ssid; |
1000 | conf.ssid_len = sdata->u.sta.ssid_len; | 175 | conf.ssid_len = sdata->u.sta.ssid_len; |
1001 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { | 176 | } else if (sdata->vif.type == NL80211_IFTYPE_AP) { |
1002 | conf.bssid = sdata->dev->dev_addr; | 177 | conf.bssid = sdata->dev->dev_addr; |
1003 | conf.ssid = sdata->u.ap.ssid; | 178 | conf.ssid = sdata->u.ap.ssid; |
1004 | conf.ssid_len = sdata->u.ap.ssid_len; | 179 | conf.ssid_len = sdata->u.ap.ssid_len; |
@@ -1027,7 +202,7 @@ int ieee80211_hw_config(struct ieee80211_local *local) | |||
1027 | struct ieee80211_channel *chan; | 202 | struct ieee80211_channel *chan; |
1028 | int ret = 0; | 203 | int ret = 0; |
1029 | 204 | ||
1030 | if (local->sta_sw_scanning) | 205 | if (local->sw_scanning) |
1031 | chan = local->scan_channel; | 206 | chan = local->scan_channel; |
1032 | else | 207 | else |
1033 | chan = local->oper_channel; | 208 | chan = local->oper_channel; |
@@ -1099,8 +274,8 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, | |||
1099 | ht_conf.ht_supported = 1; | 274 | ht_conf.ht_supported = 1; |
1100 | 275 | ||
1101 | ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; | 276 | ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; |
1102 | ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); | 277 | ht_conf.cap &= ~(IEEE80211_HT_CAP_SM_PS); |
1103 | ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS; | 278 | ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_SM_PS; |
1104 | ht_bss_conf.primary_channel = req_bss_cap->primary_channel; | 279 | ht_bss_conf.primary_channel = req_bss_cap->primary_channel; |
1105 | ht_bss_conf.bss_cap = req_bss_cap->bss_cap; | 280 | ht_bss_conf.bss_cap = req_bss_cap->bss_cap; |
1106 | ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; | 281 | ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; |
@@ -1152,6 +327,9 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
1152 | { | 327 | { |
1153 | struct ieee80211_local *local = sdata->local; | 328 | struct ieee80211_local *local = sdata->local; |
1154 | 329 | ||
330 | if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) | ||
331 | return; | ||
332 | |||
1155 | if (!changed) | 333 | if (!changed) |
1156 | return; | 334 | return; |
1157 | 335 | ||
@@ -1162,10 +340,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
1162 | changed); | 340 | changed); |
1163 | } | 341 | } |
1164 | 342 | ||
1165 | u32 ieee80211_reset_erp_info(struct net_device *dev) | 343 | u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) |
1166 | { | 344 | { |
1167 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1168 | |||
1169 | sdata->bss_conf.use_cts_prot = 0; | 345 | sdata->bss_conf.use_cts_prot = 0; |
1170 | sdata->bss_conf.use_short_preamble = 0; | 346 | sdata->bss_conf.use_short_preamble = 0; |
1171 | return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE; | 347 | return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE; |
@@ -1244,9 +420,10 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local, | |||
1244 | struct ieee80211_key *key, | 420 | struct ieee80211_key *key, |
1245 | struct sk_buff *skb) | 421 | struct sk_buff *skb) |
1246 | { | 422 | { |
1247 | int hdrlen, iv_len, mic_len; | 423 | unsigned int hdrlen, iv_len, mic_len; |
424 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1248 | 425 | ||
1249 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 426 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
1250 | 427 | ||
1251 | if (!key) | 428 | if (!key) |
1252 | goto no_key; | 429 | goto no_key; |
@@ -1268,24 +445,20 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local, | |||
1268 | goto no_key; | 445 | goto no_key; |
1269 | } | 446 | } |
1270 | 447 | ||
1271 | if (skb->len >= mic_len && | 448 | if (skb->len >= hdrlen + mic_len && |
1272 | !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) | 449 | !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) |
1273 | skb_trim(skb, skb->len - mic_len); | 450 | skb_trim(skb, skb->len - mic_len); |
1274 | if (skb->len >= iv_len && skb->len > hdrlen) { | 451 | if (skb->len >= hdrlen + iv_len) { |
1275 | memmove(skb->data + iv_len, skb->data, hdrlen); | 452 | memmove(skb->data + iv_len, skb->data, hdrlen); |
1276 | skb_pull(skb, iv_len); | 453 | hdr = (struct ieee80211_hdr *)skb_pull(skb, iv_len); |
1277 | } | 454 | } |
1278 | 455 | ||
1279 | no_key: | 456 | no_key: |
1280 | { | 457 | if (ieee80211_is_data_qos(hdr->frame_control)) { |
1281 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 458 | hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); |
1282 | u16 fc = le16_to_cpu(hdr->frame_control); | 459 | memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data, |
1283 | if ((fc & 0x8C) == 0x88) /* QoS Control Field */ { | 460 | hdrlen - IEEE80211_QOS_CTL_LEN); |
1284 | fc &= ~IEEE80211_STYPE_QOS_DATA; | 461 | skb_pull(skb, IEEE80211_QOS_CTL_LEN); |
1285 | hdr->frame_control = cpu_to_le16(fc); | ||
1286 | memmove(skb->data + 2, skb->data, hdrlen - 2); | ||
1287 | skb_pull(skb, 2); | ||
1288 | } | ||
1289 | } | 462 | } |
1290 | } | 463 | } |
1291 | 464 | ||
@@ -1369,6 +542,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
1369 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 542 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1370 | u16 frag, type; | 543 | u16 frag, type; |
1371 | __le16 fc; | 544 | __le16 fc; |
545 | struct ieee80211_supported_band *sband; | ||
1372 | struct ieee80211_tx_status_rtap_hdr *rthdr; | 546 | struct ieee80211_tx_status_rtap_hdr *rthdr; |
1373 | struct ieee80211_sub_if_data *sdata; | 547 | struct ieee80211_sub_if_data *sdata; |
1374 | struct net_device *prev_dev = NULL; | 548 | struct net_device *prev_dev = NULL; |
@@ -1376,47 +550,48 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
1376 | 550 | ||
1377 | rcu_read_lock(); | 551 | rcu_read_lock(); |
1378 | 552 | ||
1379 | if (info->status.excessive_retries) { | 553 | sta = sta_info_get(local, hdr->addr1); |
1380 | sta = sta_info_get(local, hdr->addr1); | 554 | |
1381 | if (sta) { | 555 | if (sta) { |
1382 | if (test_sta_flags(sta, WLAN_STA_PS)) { | 556 | if (info->status.excessive_retries && |
1383 | /* | 557 | test_sta_flags(sta, WLAN_STA_PS)) { |
1384 | * The STA is in power save mode, so assume | 558 | /* |
1385 | * that this TX packet failed because of that. | 559 | * The STA is in power save mode, so assume |
1386 | */ | 560 | * that this TX packet failed because of that. |
1387 | ieee80211_handle_filtered_frame(local, sta, skb); | 561 | */ |
1388 | rcu_read_unlock(); | 562 | ieee80211_handle_filtered_frame(local, sta, skb); |
1389 | return; | 563 | rcu_read_unlock(); |
1390 | } | 564 | return; |
1391 | } | 565 | } |
1392 | } | ||
1393 | 566 | ||
1394 | fc = hdr->frame_control; | 567 | fc = hdr->frame_control; |
568 | |||
569 | if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && | ||
570 | (ieee80211_is_data_qos(fc))) { | ||
571 | u16 tid, ssn; | ||
572 | u8 *qc; | ||
1395 | 573 | ||
1396 | if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && | ||
1397 | (ieee80211_is_data_qos(fc))) { | ||
1398 | u16 tid, ssn; | ||
1399 | u8 *qc; | ||
1400 | sta = sta_info_get(local, hdr->addr1); | ||
1401 | if (sta) { | ||
1402 | qc = ieee80211_get_qos_ctl(hdr); | 574 | qc = ieee80211_get_qos_ctl(hdr); |
1403 | tid = qc[0] & 0xf; | 575 | tid = qc[0] & 0xf; |
1404 | ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) | 576 | ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) |
1405 | & IEEE80211_SCTL_SEQ); | 577 | & IEEE80211_SCTL_SEQ); |
1406 | ieee80211_send_bar(sta->sdata->dev, hdr->addr1, | 578 | ieee80211_send_bar(sta->sdata, hdr->addr1, |
1407 | tid, ssn); | 579 | tid, ssn); |
1408 | } | 580 | } |
1409 | } | ||
1410 | 581 | ||
1411 | if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { | 582 | if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { |
1412 | sta = sta_info_get(local, hdr->addr1); | ||
1413 | if (sta) { | ||
1414 | ieee80211_handle_filtered_frame(local, sta, skb); | 583 | ieee80211_handle_filtered_frame(local, sta, skb); |
1415 | rcu_read_unlock(); | 584 | rcu_read_unlock(); |
1416 | return; | 585 | return; |
586 | } else { | ||
587 | if (info->status.excessive_retries) | ||
588 | sta->tx_retry_failed++; | ||
589 | sta->tx_retry_count += info->status.retry_count; | ||
1417 | } | 590 | } |
1418 | } else | 591 | |
1419 | rate_control_tx_status(local->mdev, skb); | 592 | sband = local->hw.wiphy->bands[info->band]; |
593 | rate_control_tx_status(local, sband, sta, skb); | ||
594 | } | ||
1420 | 595 | ||
1421 | rcu_read_unlock(); | 596 | rcu_read_unlock(); |
1422 | 597 | ||
@@ -1504,7 +679,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
1504 | 679 | ||
1505 | rcu_read_lock(); | 680 | rcu_read_lock(); |
1506 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 681 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
1507 | if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) { | 682 | if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { |
1508 | if (!netif_running(sdata->dev)) | 683 | if (!netif_running(sdata->dev)) |
1509 | continue; | 684 | continue; |
1510 | 685 | ||
@@ -1580,8 +755,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
1580 | 755 | ||
1581 | local->hw.queues = 1; /* default */ | 756 | local->hw.queues = 1; /* default */ |
1582 | 757 | ||
1583 | local->bridge_packets = 1; | ||
1584 | |||
1585 | local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; | 758 | local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; |
1586 | local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD; | 759 | local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD; |
1587 | local->short_retry_limit = 7; | 760 | local->short_retry_limit = 7; |
@@ -1592,7 +765,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
1592 | 765 | ||
1593 | spin_lock_init(&local->key_lock); | 766 | spin_lock_init(&local->key_lock); |
1594 | 767 | ||
1595 | INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work); | 768 | INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); |
1596 | 769 | ||
1597 | sta_info_init(local); | 770 | sta_info_init(local); |
1598 | 771 | ||
@@ -1619,7 +792,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1619 | int result; | 792 | int result; |
1620 | enum ieee80211_band band; | 793 | enum ieee80211_band band; |
1621 | struct net_device *mdev; | 794 | struct net_device *mdev; |
1622 | struct wireless_dev *mwdev; | 795 | struct ieee80211_master_priv *mpriv; |
1623 | 796 | ||
1624 | /* | 797 | /* |
1625 | * generic code guarantees at least one band, | 798 | * generic code guarantees at least one band, |
@@ -1639,6 +812,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1639 | } | 812 | } |
1640 | } | 813 | } |
1641 | 814 | ||
815 | /* if low-level driver supports AP, we also support VLAN */ | ||
816 | if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) | ||
817 | local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); | ||
818 | |||
819 | /* mac80211 always supports monitor */ | ||
820 | local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); | ||
821 | |||
1642 | result = wiphy_register(local->hw.wiphy); | 822 | result = wiphy_register(local->hw.wiphy); |
1643 | if (result < 0) | 823 | if (result < 0) |
1644 | return result; | 824 | return result; |
@@ -1654,16 +834,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1654 | if (hw->queues < 4) | 834 | if (hw->queues < 4) |
1655 | hw->ampdu_queues = 0; | 835 | hw->ampdu_queues = 0; |
1656 | 836 | ||
1657 | mdev = alloc_netdev_mq(sizeof(struct wireless_dev), | 837 | mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv), |
1658 | "wmaster%d", ether_setup, | 838 | "wmaster%d", ether_setup, |
1659 | ieee80211_num_queues(hw)); | 839 | ieee80211_num_queues(hw)); |
1660 | if (!mdev) | 840 | if (!mdev) |
1661 | goto fail_mdev_alloc; | 841 | goto fail_mdev_alloc; |
1662 | 842 | ||
1663 | mwdev = netdev_priv(mdev); | 843 | mpriv = netdev_priv(mdev); |
1664 | mdev->ieee80211_ptr = mwdev; | 844 | mpriv->local = local; |
1665 | mwdev->wiphy = local->hw.wiphy; | ||
1666 | |||
1667 | local->mdev = mdev; | 845 | local->mdev = mdev; |
1668 | 846 | ||
1669 | ieee80211_rx_bss_list_init(local); | 847 | ieee80211_rx_bss_list_init(local); |
@@ -1745,7 +923,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1745 | 923 | ||
1746 | /* add one default STA interface */ | 924 | /* add one default STA interface */ |
1747 | result = ieee80211_if_add(local, "wlan%d", NULL, | 925 | result = ieee80211_if_add(local, "wlan%d", NULL, |
1748 | IEEE80211_IF_TYPE_STA, NULL); | 926 | NL80211_IFTYPE_STATION, NULL); |
1749 | if (result) | 927 | if (result) |
1750 | printk(KERN_WARNING "%s: Failed to add default virtual iface\n", | 928 | printk(KERN_WARNING "%s: Failed to add default virtual iface\n", |
1751 | wiphy_name(local->hw.wiphy)); | 929 | wiphy_name(local->hw.wiphy)); |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 35f2f95f2fa7..8013277924f2 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -12,6 +12,9 @@ | |||
12 | #include "ieee80211_i.h" | 12 | #include "ieee80211_i.h" |
13 | #include "mesh.h" | 13 | #include "mesh.h" |
14 | 14 | ||
15 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) | ||
16 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) | ||
17 | |||
15 | #define PP_OFFSET 1 /* Path Selection Protocol */ | 18 | #define PP_OFFSET 1 /* Path Selection Protocol */ |
16 | #define PM_OFFSET 5 /* Path Selection Metric */ | 19 | #define PM_OFFSET 5 /* Path Selection Metric */ |
17 | #define CC_OFFSET 9 /* Congestion Control Mode */ | 20 | #define CC_OFFSET 9 /* Congestion Control Mode */ |
@@ -35,19 +38,28 @@ void ieee80211s_stop(void) | |||
35 | kmem_cache_destroy(rm_cache); | 38 | kmem_cache_destroy(rm_cache); |
36 | } | 39 | } |
37 | 40 | ||
41 | static void ieee80211_mesh_housekeeping_timer(unsigned long data) | ||
42 | { | ||
43 | struct ieee80211_sub_if_data *sdata = (void *) data; | ||
44 | struct ieee80211_local *local = sdata->local; | ||
45 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
46 | |||
47 | ifmsh->housekeeping = true; | ||
48 | queue_work(local->hw.workqueue, &ifmsh->work); | ||
49 | } | ||
50 | |||
38 | /** | 51 | /** |
39 | * mesh_matches_local - check if the config of a mesh point matches ours | 52 | * mesh_matches_local - check if the config of a mesh point matches ours |
40 | * | 53 | * |
41 | * @ie: information elements of a management frame from the mesh peer | 54 | * @ie: information elements of a management frame from the mesh peer |
42 | * @dev: local mesh interface | 55 | * @sdata: local mesh subif |
43 | * | 56 | * |
44 | * This function checks if the mesh configuration of a mesh point matches the | 57 | * This function checks if the mesh configuration of a mesh point matches the |
45 | * local mesh configuration, i.e. if both nodes belong to the same mesh network. | 58 | * local mesh configuration, i.e. if both nodes belong to the same mesh network. |
46 | */ | 59 | */ |
47 | bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev) | 60 | bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) |
48 | { | 61 | { |
49 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 62 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
50 | struct ieee80211_if_sta *sta = &sdata->u.sta; | ||
51 | 63 | ||
52 | /* | 64 | /* |
53 | * As support for each feature is added, check for matching | 65 | * As support for each feature is added, check for matching |
@@ -59,11 +71,11 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev) | |||
59 | * - MDA enabled | 71 | * - MDA enabled |
60 | * - Power management control on fc | 72 | * - Power management control on fc |
61 | */ | 73 | */ |
62 | if (sta->mesh_id_len == ie->mesh_id_len && | 74 | if (ifmsh->mesh_id_len == ie->mesh_id_len && |
63 | memcmp(sta->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && | 75 | memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && |
64 | memcmp(sta->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 && | 76 | memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 && |
65 | memcmp(sta->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 && | 77 | memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 && |
66 | memcmp(sta->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0) | 78 | memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0) |
67 | return true; | 79 | return true; |
68 | 80 | ||
69 | return false; | 81 | return false; |
@@ -73,10 +85,8 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev) | |||
73 | * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links | 85 | * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links |
74 | * | 86 | * |
75 | * @ie: information elements of a management frame from the mesh peer | 87 | * @ie: information elements of a management frame from the mesh peer |
76 | * @dev: local mesh interface | ||
77 | */ | 88 | */ |
78 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, | 89 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) |
79 | struct net_device *dev) | ||
80 | { | 90 | { |
81 | return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0; | 91 | return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0; |
82 | } | 92 | } |
@@ -98,11 +108,11 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) | |||
98 | */ | 108 | */ |
99 | free_plinks = mesh_plink_availables(sdata); | 109 | free_plinks = mesh_plink_availables(sdata); |
100 | 110 | ||
101 | if (free_plinks != sdata->u.sta.accepting_plinks) | 111 | if (free_plinks != sdata->u.mesh.accepting_plinks) |
102 | ieee80211_sta_timer((unsigned long) sdata); | 112 | ieee80211_mesh_housekeeping_timer((unsigned long) sdata); |
103 | } | 113 | } |
104 | 114 | ||
105 | void mesh_ids_set_default(struct ieee80211_if_sta *sta) | 115 | void mesh_ids_set_default(struct ieee80211_if_mesh *sta) |
106 | { | 116 | { |
107 | u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff}; | 117 | u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff}; |
108 | 118 | ||
@@ -111,28 +121,26 @@ void mesh_ids_set_default(struct ieee80211_if_sta *sta) | |||
111 | memcpy(sta->mesh_cc_id, def_id, 4); | 121 | memcpy(sta->mesh_cc_id, def_id, 4); |
112 | } | 122 | } |
113 | 123 | ||
114 | int mesh_rmc_init(struct net_device *dev) | 124 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) |
115 | { | 125 | { |
116 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
117 | int i; | 126 | int i; |
118 | 127 | ||
119 | sdata->u.sta.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); | 128 | sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); |
120 | if (!sdata->u.sta.rmc) | 129 | if (!sdata->u.mesh.rmc) |
121 | return -ENOMEM; | 130 | return -ENOMEM; |
122 | sdata->u.sta.rmc->idx_mask = RMC_BUCKETS - 1; | 131 | sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1; |
123 | for (i = 0; i < RMC_BUCKETS; i++) | 132 | for (i = 0; i < RMC_BUCKETS; i++) |
124 | INIT_LIST_HEAD(&sdata->u.sta.rmc->bucket[i].list); | 133 | INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list); |
125 | return 0; | 134 | return 0; |
126 | } | 135 | } |
127 | 136 | ||
128 | void mesh_rmc_free(struct net_device *dev) | 137 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata) |
129 | { | 138 | { |
130 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 139 | struct mesh_rmc *rmc = sdata->u.mesh.rmc; |
131 | struct mesh_rmc *rmc = sdata->u.sta.rmc; | ||
132 | struct rmc_entry *p, *n; | 140 | struct rmc_entry *p, *n; |
133 | int i; | 141 | int i; |
134 | 142 | ||
135 | if (!sdata->u.sta.rmc) | 143 | if (!sdata->u.mesh.rmc) |
136 | return; | 144 | return; |
137 | 145 | ||
138 | for (i = 0; i < RMC_BUCKETS; i++) | 146 | for (i = 0; i < RMC_BUCKETS; i++) |
@@ -142,7 +150,7 @@ void mesh_rmc_free(struct net_device *dev) | |||
142 | } | 150 | } |
143 | 151 | ||
144 | kfree(rmc); | 152 | kfree(rmc); |
145 | sdata->u.sta.rmc = NULL; | 153 | sdata->u.mesh.rmc = NULL; |
146 | } | 154 | } |
147 | 155 | ||
148 | /** | 156 | /** |
@@ -158,10 +166,9 @@ void mesh_rmc_free(struct net_device *dev) | |||
158 | * it. | 166 | * it. |
159 | */ | 167 | */ |
160 | int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, | 168 | int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, |
161 | struct net_device *dev) | 169 | struct ieee80211_sub_if_data *sdata) |
162 | { | 170 | { |
163 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 171 | struct mesh_rmc *rmc = sdata->u.mesh.rmc; |
164 | struct mesh_rmc *rmc = sdata->u.sta.rmc; | ||
165 | u32 seqnum = 0; | 172 | u32 seqnum = 0; |
166 | int entries = 0; | 173 | int entries = 0; |
167 | u8 idx; | 174 | u8 idx; |
@@ -194,10 +201,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, | |||
194 | return 0; | 201 | return 0; |
195 | } | 202 | } |
196 | 203 | ||
197 | void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev) | 204 | void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) |
198 | { | 205 | { |
199 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 206 | struct ieee80211_local *local = sdata->local; |
200 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
201 | struct ieee80211_supported_band *sband; | 207 | struct ieee80211_supported_band *sband; |
202 | u8 *pos; | 208 | u8 *pos; |
203 | int len, i, rate; | 209 | int len, i, rate; |
@@ -224,11 +230,11 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev) | |||
224 | } | 230 | } |
225 | } | 231 | } |
226 | 232 | ||
227 | pos = skb_put(skb, 2 + sdata->u.sta.mesh_id_len); | 233 | pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len); |
228 | *pos++ = WLAN_EID_MESH_ID; | 234 | *pos++ = WLAN_EID_MESH_ID; |
229 | *pos++ = sdata->u.sta.mesh_id_len; | 235 | *pos++ = sdata->u.mesh.mesh_id_len; |
230 | if (sdata->u.sta.mesh_id_len) | 236 | if (sdata->u.mesh.mesh_id_len) |
231 | memcpy(pos, sdata->u.sta.mesh_id, sdata->u.sta.mesh_id_len); | 237 | memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len); |
232 | 238 | ||
233 | pos = skb_put(skb, 21); | 239 | pos = skb_put(skb, 21); |
234 | *pos++ = WLAN_EID_MESH_CONFIG; | 240 | *pos++ = WLAN_EID_MESH_CONFIG; |
@@ -237,15 +243,15 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev) | |||
237 | *pos++ = 1; | 243 | *pos++ = 1; |
238 | 244 | ||
239 | /* Active path selection protocol ID */ | 245 | /* Active path selection protocol ID */ |
240 | memcpy(pos, sdata->u.sta.mesh_pp_id, 4); | 246 | memcpy(pos, sdata->u.mesh.mesh_pp_id, 4); |
241 | pos += 4; | 247 | pos += 4; |
242 | 248 | ||
243 | /* Active path selection metric ID */ | 249 | /* Active path selection metric ID */ |
244 | memcpy(pos, sdata->u.sta.mesh_pm_id, 4); | 250 | memcpy(pos, sdata->u.mesh.mesh_pm_id, 4); |
245 | pos += 4; | 251 | pos += 4; |
246 | 252 | ||
247 | /* Congestion control mode identifier */ | 253 | /* Congestion control mode identifier */ |
248 | memcpy(pos, sdata->u.sta.mesh_cc_id, 4); | 254 | memcpy(pos, sdata->u.mesh.mesh_cc_id, 4); |
249 | pos += 4; | 255 | pos += 4; |
250 | 256 | ||
251 | /* Channel precedence: | 257 | /* Channel precedence: |
@@ -255,17 +261,17 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev) | |||
255 | pos += 4; | 261 | pos += 4; |
256 | 262 | ||
257 | /* Mesh capability */ | 263 | /* Mesh capability */ |
258 | sdata->u.sta.accepting_plinks = mesh_plink_availables(sdata); | 264 | sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); |
259 | *pos++ = sdata->u.sta.accepting_plinks ? ACCEPT_PLINKS : 0x00; | 265 | *pos++ = sdata->u.mesh.accepting_plinks ? ACCEPT_PLINKS : 0x00; |
260 | *pos++ = 0x00; | 266 | *pos++ = 0x00; |
261 | 267 | ||
262 | return; | 268 | return; |
263 | } | 269 | } |
264 | 270 | ||
265 | u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl) | 271 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) |
266 | { | 272 | { |
267 | /* Use last four bytes of hw addr and interface index as hash index */ | 273 | /* Use last four bytes of hw addr and interface index as hash index */ |
268 | return jhash_2words(*(u32 *)(addr+2), dev->ifindex, tbl->hash_rnd) | 274 | return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) |
269 | & tbl->hash_mask; | 275 | & tbl->hash_mask; |
270 | } | 276 | } |
271 | 277 | ||
@@ -344,10 +350,10 @@ static void ieee80211_mesh_path_timer(unsigned long data) | |||
344 | { | 350 | { |
345 | struct ieee80211_sub_if_data *sdata = | 351 | struct ieee80211_sub_if_data *sdata = |
346 | (struct ieee80211_sub_if_data *) data; | 352 | (struct ieee80211_sub_if_data *) data; |
347 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 353 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
348 | struct ieee80211_local *local = wdev_priv(&sdata->wdev); | 354 | struct ieee80211_local *local = sdata->local; |
349 | 355 | ||
350 | queue_work(local->hw.workqueue, &ifsta->work); | 356 | queue_work(local->hw.workqueue, &ifmsh->work); |
351 | } | 357 | } |
352 | 358 | ||
353 | struct mesh_table *mesh_table_grow(struct mesh_table *tbl) | 359 | struct mesh_table *mesh_table_grow(struct mesh_table *tbl) |
@@ -399,50 +405,264 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | |||
399 | struct ieee80211_sub_if_data *sdata) | 405 | struct ieee80211_sub_if_data *sdata) |
400 | { | 406 | { |
401 | meshhdr->flags = 0; | 407 | meshhdr->flags = 0; |
402 | meshhdr->ttl = sdata->u.sta.mshcfg.dot11MeshTTL; | 408 | meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; |
403 | put_unaligned(cpu_to_le32(sdata->u.sta.mesh_seqnum), &meshhdr->seqnum); | 409 | put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); |
404 | sdata->u.sta.mesh_seqnum++; | 410 | sdata->u.mesh.mesh_seqnum++; |
405 | 411 | ||
406 | return 6; | 412 | return 6; |
407 | } | 413 | } |
408 | 414 | ||
415 | static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, | ||
416 | struct ieee80211_if_mesh *ifmsh) | ||
417 | { | ||
418 | bool free_plinks; | ||
419 | |||
420 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
421 | printk(KERN_DEBUG "%s: running mesh housekeeping\n", | ||
422 | sdata->dev->name); | ||
423 | #endif | ||
424 | |||
425 | ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); | ||
426 | mesh_path_expire(sdata); | ||
427 | |||
428 | free_plinks = mesh_plink_availables(sdata); | ||
429 | if (free_plinks != sdata->u.mesh.accepting_plinks) | ||
430 | ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); | ||
431 | |||
432 | ifmsh->housekeeping = false; | ||
433 | mod_timer(&ifmsh->housekeeping_timer, | ||
434 | round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); | ||
435 | } | ||
436 | |||
437 | |||
438 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | ||
439 | { | ||
440 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
441 | struct ieee80211_local *local = sdata->local; | ||
442 | |||
443 | ifmsh->housekeeping = true; | ||
444 | queue_work(local->hw.workqueue, &ifmsh->work); | ||
445 | ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); | ||
446 | } | ||
447 | |||
448 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | ||
449 | { | ||
450 | del_timer_sync(&sdata->u.mesh.housekeeping_timer); | ||
451 | /* | ||
452 | * If the timer fired while we waited for it, it will have | ||
453 | * requeued the work. Now the work will be running again | ||
454 | * but will not rearm the timer again because it checks | ||
455 | * whether the interface is running, which, at this point, | ||
456 | * it no longer is. | ||
457 | */ | ||
458 | cancel_work_sync(&sdata->u.mesh.work); | ||
459 | |||
460 | /* | ||
461 | * When we get here, the interface is marked down. | ||
462 | * Call synchronize_rcu() to wait for the RX path | ||
463 | * should it be using the interface and enqueuing | ||
464 | * frames at this very time on another CPU. | ||
465 | */ | ||
466 | synchronize_rcu(); | ||
467 | skb_queue_purge(&sdata->u.mesh.skb_queue); | ||
468 | } | ||
469 | |||
470 | static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | ||
471 | u16 stype, | ||
472 | struct ieee80211_mgmt *mgmt, | ||
473 | size_t len, | ||
474 | struct ieee80211_rx_status *rx_status) | ||
475 | { | ||
476 | struct ieee80211_local *local= sdata->local; | ||
477 | struct ieee802_11_elems elems; | ||
478 | struct ieee80211_channel *channel; | ||
479 | u64 supp_rates = 0; | ||
480 | size_t baselen; | ||
481 | int freq; | ||
482 | enum ieee80211_band band = rx_status->band; | ||
483 | |||
484 | /* ignore ProbeResp to foreign address */ | ||
485 | if (stype == IEEE80211_STYPE_PROBE_RESP && | ||
486 | compare_ether_addr(mgmt->da, sdata->dev->dev_addr)) | ||
487 | return; | ||
488 | |||
489 | baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; | ||
490 | if (baselen > len) | ||
491 | return; | ||
492 | |||
493 | ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, | ||
494 | &elems); | ||
495 | |||
496 | if (elems.ds_params && elems.ds_params_len == 1) | ||
497 | freq = ieee80211_channel_to_frequency(elems.ds_params[0]); | ||
498 | else | ||
499 | freq = rx_status->freq; | ||
500 | |||
501 | channel = ieee80211_get_channel(local->hw.wiphy, freq); | ||
502 | |||
503 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) | ||
504 | return; | ||
505 | |||
506 | if (elems.mesh_id && elems.mesh_config && | ||
507 | mesh_matches_local(&elems, sdata)) { | ||
508 | supp_rates = ieee80211_sta_get_rates(local, &elems, band); | ||
509 | |||
510 | mesh_neighbour_update(mgmt->sa, supp_rates, sdata, | ||
511 | mesh_peer_accepts_plinks(&elems)); | ||
512 | } | ||
513 | } | ||
514 | |||
515 | static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, | ||
516 | struct ieee80211_mgmt *mgmt, | ||
517 | size_t len, | ||
518 | struct ieee80211_rx_status *rx_status) | ||
519 | { | ||
520 | switch (mgmt->u.action.category) { | ||
521 | case PLINK_CATEGORY: | ||
522 | mesh_rx_plink_frame(sdata, mgmt, len, rx_status); | ||
523 | break; | ||
524 | case MESH_PATH_SEL_CATEGORY: | ||
525 | mesh_rx_path_sel_frame(sdata, mgmt, len); | ||
526 | break; | ||
527 | } | ||
528 | } | ||
529 | |||
530 | static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | ||
531 | struct sk_buff *skb) | ||
532 | { | ||
533 | struct ieee80211_rx_status *rx_status; | ||
534 | struct ieee80211_if_mesh *ifmsh; | ||
535 | struct ieee80211_mgmt *mgmt; | ||
536 | u16 stype; | ||
537 | |||
538 | ifmsh = &sdata->u.mesh; | ||
539 | |||
540 | rx_status = (struct ieee80211_rx_status *) skb->cb; | ||
541 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
542 | stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; | ||
543 | |||
544 | switch (stype) { | ||
545 | case IEEE80211_STYPE_PROBE_RESP: | ||
546 | case IEEE80211_STYPE_BEACON: | ||
547 | ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len, | ||
548 | rx_status); | ||
549 | break; | ||
550 | case IEEE80211_STYPE_ACTION: | ||
551 | ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); | ||
552 | break; | ||
553 | } | ||
554 | |||
555 | kfree_skb(skb); | ||
556 | } | ||
557 | |||
558 | static void ieee80211_mesh_work(struct work_struct *work) | ||
559 | { | ||
560 | struct ieee80211_sub_if_data *sdata = | ||
561 | container_of(work, struct ieee80211_sub_if_data, u.mesh.work); | ||
562 | struct ieee80211_local *local = sdata->local; | ||
563 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
564 | struct sk_buff *skb; | ||
565 | |||
566 | if (!netif_running(sdata->dev)) | ||
567 | return; | ||
568 | |||
569 | if (local->sw_scanning || local->hw_scanning) | ||
570 | return; | ||
571 | |||
572 | while ((skb = skb_dequeue(&ifmsh->skb_queue))) | ||
573 | ieee80211_mesh_rx_queued_mgmt(sdata, skb); | ||
574 | |||
575 | if (ifmsh->preq_queue_len && | ||
576 | time_after(jiffies, | ||
577 | ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) | ||
578 | mesh_path_start_discovery(sdata); | ||
579 | |||
580 | if (ifmsh->housekeeping) | ||
581 | ieee80211_mesh_housekeeping(sdata, ifmsh); | ||
582 | } | ||
583 | |||
584 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | ||
585 | { | ||
586 | struct ieee80211_sub_if_data *sdata; | ||
587 | |||
588 | rcu_read_lock(); | ||
589 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
590 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
591 | queue_work(local->hw.workqueue, &sdata->u.mesh.work); | ||
592 | rcu_read_unlock(); | ||
593 | } | ||
594 | |||
409 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | 595 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) |
410 | { | 596 | { |
411 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 597 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
412 | 598 | ||
413 | ifsta->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; | 599 | INIT_WORK(&ifmsh->work, ieee80211_mesh_work); |
414 | ifsta->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; | 600 | setup_timer(&ifmsh->housekeeping_timer, |
415 | ifsta->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T; | 601 | ieee80211_mesh_housekeeping_timer, |
416 | ifsta->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR; | 602 | (unsigned long) sdata); |
417 | ifsta->mshcfg.dot11MeshTTL = MESH_TTL; | 603 | skb_queue_head_init(&sdata->u.mesh.skb_queue); |
418 | ifsta->mshcfg.auto_open_plinks = true; | 604 | |
419 | ifsta->mshcfg.dot11MeshMaxPeerLinks = | 605 | ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; |
606 | ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; | ||
607 | ifmsh->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T; | ||
608 | ifmsh->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR; | ||
609 | ifmsh->mshcfg.dot11MeshTTL = MESH_TTL; | ||
610 | ifmsh->mshcfg.auto_open_plinks = true; | ||
611 | ifmsh->mshcfg.dot11MeshMaxPeerLinks = | ||
420 | MESH_MAX_ESTAB_PLINKS; | 612 | MESH_MAX_ESTAB_PLINKS; |
421 | ifsta->mshcfg.dot11MeshHWMPactivePathTimeout = | 613 | ifmsh->mshcfg.dot11MeshHWMPactivePathTimeout = |
422 | MESH_PATH_TIMEOUT; | 614 | MESH_PATH_TIMEOUT; |
423 | ifsta->mshcfg.dot11MeshHWMPpreqMinInterval = | 615 | ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval = |
424 | MESH_PREQ_MIN_INT; | 616 | MESH_PREQ_MIN_INT; |
425 | ifsta->mshcfg.dot11MeshHWMPnetDiameterTraversalTime = | 617 | ifmsh->mshcfg.dot11MeshHWMPnetDiameterTraversalTime = |
426 | MESH_DIAM_TRAVERSAL_TIME; | 618 | MESH_DIAM_TRAVERSAL_TIME; |
427 | ifsta->mshcfg.dot11MeshHWMPmaxPREQretries = | 619 | ifmsh->mshcfg.dot11MeshHWMPmaxPREQretries = |
428 | MESH_MAX_PREQ_RETRIES; | 620 | MESH_MAX_PREQ_RETRIES; |
429 | ifsta->mshcfg.path_refresh_time = | 621 | ifmsh->mshcfg.path_refresh_time = |
430 | MESH_PATH_REFRESH_TIME; | 622 | MESH_PATH_REFRESH_TIME; |
431 | ifsta->mshcfg.min_discovery_timeout = | 623 | ifmsh->mshcfg.min_discovery_timeout = |
432 | MESH_MIN_DISCOVERY_TIMEOUT; | 624 | MESH_MIN_DISCOVERY_TIMEOUT; |
433 | ifsta->accepting_plinks = true; | 625 | ifmsh->accepting_plinks = true; |
434 | ifsta->preq_id = 0; | 626 | ifmsh->preq_id = 0; |
435 | ifsta->dsn = 0; | 627 | ifmsh->dsn = 0; |
436 | atomic_set(&ifsta->mpaths, 0); | 628 | atomic_set(&ifmsh->mpaths, 0); |
437 | mesh_rmc_init(sdata->dev); | 629 | mesh_rmc_init(sdata); |
438 | ifsta->last_preq = jiffies; | 630 | ifmsh->last_preq = jiffies; |
439 | /* Allocate all mesh structures when creating the first mesh interface. */ | 631 | /* Allocate all mesh structures when creating the first mesh interface. */ |
440 | if (!mesh_allocated) | 632 | if (!mesh_allocated) |
441 | ieee80211s_init(); | 633 | ieee80211s_init(); |
442 | mesh_ids_set_default(ifsta); | 634 | mesh_ids_set_default(ifmsh); |
443 | setup_timer(&ifsta->mesh_path_timer, | 635 | setup_timer(&ifmsh->mesh_path_timer, |
444 | ieee80211_mesh_path_timer, | 636 | ieee80211_mesh_path_timer, |
445 | (unsigned long) sdata); | 637 | (unsigned long) sdata); |
446 | INIT_LIST_HEAD(&ifsta->preq_queue.list); | 638 | INIT_LIST_HEAD(&ifmsh->preq_queue.list); |
447 | spin_lock_init(&ifsta->mesh_preq_queue_lock); | 639 | spin_lock_init(&ifmsh->mesh_preq_queue_lock); |
640 | } | ||
641 | |||
642 | ieee80211_rx_result | ||
643 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
644 | struct ieee80211_rx_status *rx_status) | ||
645 | { | ||
646 | struct ieee80211_local *local = sdata->local; | ||
647 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
648 | struct ieee80211_mgmt *mgmt; | ||
649 | u16 fc; | ||
650 | |||
651 | if (skb->len < 24) | ||
652 | return RX_DROP_MONITOR; | ||
653 | |||
654 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
655 | fc = le16_to_cpu(mgmt->frame_control); | ||
656 | |||
657 | switch (fc & IEEE80211_FCTL_STYPE) { | ||
658 | case IEEE80211_STYPE_PROBE_RESP: | ||
659 | case IEEE80211_STYPE_BEACON: | ||
660 | case IEEE80211_STYPE_ACTION: | ||
661 | memcpy(skb->cb, rx_status, sizeof(*rx_status)); | ||
662 | skb_queue_tail(&ifmsh->skb_queue, skb); | ||
663 | queue_work(local->hw.workqueue, &ifmsh->work); | ||
664 | return RX_QUEUED; | ||
665 | } | ||
666 | |||
667 | return RX_CONTINUE; | ||
448 | } | 668 | } |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 7495fbb0d211..e10471c6ba42 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -47,7 +47,7 @@ enum mesh_path_flags { | |||
47 | * struct mesh_path - mac80211 mesh path structure | 47 | * struct mesh_path - mac80211 mesh path structure |
48 | * | 48 | * |
49 | * @dst: mesh path destination mac address | 49 | * @dst: mesh path destination mac address |
50 | * @dev: mesh path device | 50 | * @sdata: mesh subif |
51 | * @next_hop: mesh neighbor to which frames for this destination will be | 51 | * @next_hop: mesh neighbor to which frames for this destination will be |
52 | * forwarded | 52 | * forwarded |
53 | * @timer: mesh path discovery timer | 53 | * @timer: mesh path discovery timer |
@@ -64,14 +64,15 @@ enum mesh_path_flags { | |||
64 | * @state_lock: mesh pat state lock | 64 | * @state_lock: mesh pat state lock |
65 | * | 65 | * |
66 | * | 66 | * |
67 | * The combination of dst and dev is unique in the mesh path table. Since the | 67 | * The combination of dst and sdata is unique in the mesh path table. Since the |
68 | * next_hop STA is only protected by RCU as well, deleting the STA must also | 68 | * next_hop STA is only protected by RCU as well, deleting the STA must also |
69 | * remove/substitute the mesh_path structure and wait until that is no longer | 69 | * remove/substitute the mesh_path structure and wait until that is no longer |
70 | * reachable before destroying the STA completely. | 70 | * reachable before destroying the STA completely. |
71 | */ | 71 | */ |
72 | struct mesh_path { | 72 | struct mesh_path { |
73 | u8 dst[ETH_ALEN]; | 73 | u8 dst[ETH_ALEN]; |
74 | struct net_device *dev; | 74 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ |
75 | struct ieee80211_sub_if_data *sdata; | ||
75 | struct sta_info *next_hop; | 76 | struct sta_info *next_hop; |
76 | struct timer_list timer; | 77 | struct timer_list timer; |
77 | struct sk_buff_head frame_queue; | 78 | struct sk_buff_head frame_queue; |
@@ -203,67 +204,82 @@ int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); | |||
203 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | 204 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, |
204 | struct ieee80211_sub_if_data *sdata); | 205 | struct ieee80211_sub_if_data *sdata); |
205 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, | 206 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, |
206 | struct net_device *dev); | 207 | struct ieee80211_sub_if_data *sdata); |
207 | bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev); | 208 | bool mesh_matches_local(struct ieee802_11_elems *ie, |
208 | void mesh_ids_set_default(struct ieee80211_if_sta *sta); | 209 | struct ieee80211_sub_if_data *sdata); |
209 | void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev); | 210 | void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); |
210 | void mesh_rmc_free(struct net_device *dev); | 211 | void mesh_mgmt_ies_add(struct sk_buff *skb, |
211 | int mesh_rmc_init(struct net_device *dev); | 212 | struct ieee80211_sub_if_data *sdata); |
213 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); | ||
214 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); | ||
212 | void ieee80211s_init(void); | 215 | void ieee80211s_init(void); |
213 | void ieee80211s_stop(void); | 216 | void ieee80211s_stop(void); |
214 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); | 217 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); |
218 | ieee80211_rx_result | ||
219 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
220 | struct ieee80211_rx_status *rx_status); | ||
221 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); | ||
222 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); | ||
215 | 223 | ||
216 | /* Mesh paths */ | 224 | /* Mesh paths */ |
217 | int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev); | 225 | int mesh_nexthop_lookup(struct sk_buff *skb, |
218 | void mesh_path_start_discovery(struct net_device *dev); | 226 | struct ieee80211_sub_if_data *sdata); |
219 | struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev); | 227 | void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); |
220 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev); | 228 | struct mesh_path *mesh_path_lookup(u8 *dst, |
229 | struct ieee80211_sub_if_data *sdata); | ||
230 | struct mesh_path *mpp_path_lookup(u8 *dst, | ||
231 | struct ieee80211_sub_if_data *sdata); | ||
232 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata); | ||
233 | struct mesh_path *mesh_path_lookup_by_idx(int idx, | ||
234 | struct ieee80211_sub_if_data *sdata); | ||
221 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); | 235 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); |
222 | void mesh_path_expire(struct net_device *dev); | 236 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata); |
223 | void mesh_path_flush(struct net_device *dev); | 237 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata); |
224 | void mesh_rx_path_sel_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | 238 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, |
225 | size_t len); | 239 | struct ieee80211_mgmt *mgmt, size_t len); |
226 | int mesh_path_add(u8 *dst, struct net_device *dev); | 240 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); |
227 | /* Mesh plinks */ | 241 | /* Mesh plinks */ |
228 | void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, | 242 | void mesh_neighbour_update(u8 *hw_addr, u64 rates, |
229 | bool add); | 243 | struct ieee80211_sub_if_data *sdata, bool add); |
230 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, | 244 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); |
231 | struct net_device *dev); | ||
232 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); | 245 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); |
233 | void mesh_plink_broken(struct sta_info *sta); | 246 | void mesh_plink_broken(struct sta_info *sta); |
234 | void mesh_plink_deactivate(struct sta_info *sta); | 247 | void mesh_plink_deactivate(struct sta_info *sta); |
235 | int mesh_plink_open(struct sta_info *sta); | 248 | int mesh_plink_open(struct sta_info *sta); |
236 | int mesh_plink_close(struct sta_info *sta); | 249 | int mesh_plink_close(struct sta_info *sta); |
237 | void mesh_plink_block(struct sta_info *sta); | 250 | void mesh_plink_block(struct sta_info *sta); |
238 | void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | 251 | void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, |
239 | size_t len, struct ieee80211_rx_status *rx_status); | 252 | struct ieee80211_mgmt *mgmt, size_t len, |
253 | struct ieee80211_rx_status *rx_status); | ||
240 | 254 | ||
241 | /* Private interfaces */ | 255 | /* Private interfaces */ |
242 | /* Mesh tables */ | 256 | /* Mesh tables */ |
243 | struct mesh_table *mesh_table_alloc(int size_order); | 257 | struct mesh_table *mesh_table_alloc(int size_order); |
244 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs); | 258 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs); |
245 | struct mesh_table *mesh_table_grow(struct mesh_table *tbl); | 259 | struct mesh_table *mesh_table_grow(struct mesh_table *tbl); |
246 | u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl); | 260 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, |
261 | struct mesh_table *tbl); | ||
247 | /* Mesh paths */ | 262 | /* Mesh paths */ |
248 | int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra, | 263 | int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra, |
249 | struct net_device *dev); | 264 | struct ieee80211_sub_if_data *sdata); |
250 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); | 265 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); |
251 | void mesh_path_flush_pending(struct mesh_path *mpath); | 266 | void mesh_path_flush_pending(struct mesh_path *mpath); |
252 | void mesh_path_tx_pending(struct mesh_path *mpath); | 267 | void mesh_path_tx_pending(struct mesh_path *mpath); |
253 | int mesh_pathtbl_init(void); | 268 | int mesh_pathtbl_init(void); |
254 | void mesh_pathtbl_unregister(void); | 269 | void mesh_pathtbl_unregister(void); |
255 | int mesh_path_del(u8 *addr, struct net_device *dev); | 270 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); |
256 | void mesh_path_timer(unsigned long data); | 271 | void mesh_path_timer(unsigned long data); |
257 | void mesh_path_flush_by_nexthop(struct sta_info *sta); | 272 | void mesh_path_flush_by_nexthop(struct sta_info *sta); |
258 | void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev); | 273 | void mesh_path_discard_frame(struct sk_buff *skb, |
274 | struct ieee80211_sub_if_data *sdata); | ||
259 | 275 | ||
260 | #ifdef CONFIG_MAC80211_MESH | 276 | #ifdef CONFIG_MAC80211_MESH |
261 | extern int mesh_allocated; | 277 | extern int mesh_allocated; |
262 | 278 | ||
263 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) | 279 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) |
264 | { | 280 | { |
265 | return sdata->u.sta.mshcfg.dot11MeshMaxPeerLinks - | 281 | return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - |
266 | atomic_read(&sdata->u.sta.mshstats.estab_plinks); | 282 | atomic_read(&sdata->u.mesh.mshstats.estab_plinks); |
267 | } | 283 | } |
268 | 284 | ||
269 | static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) | 285 | static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) |
@@ -281,8 +297,12 @@ static inline void mesh_path_activate(struct mesh_path *mpath) | |||
281 | for (i = 0; i <= x->hash_mask; i++) \ | 297 | for (i = 0; i <= x->hash_mask; i++) \ |
282 | hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) | 298 | hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) |
283 | 299 | ||
300 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); | ||
301 | |||
284 | #else | 302 | #else |
285 | #define mesh_allocated 0 | 303 | #define mesh_allocated 0 |
304 | static inline void | ||
305 | ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} | ||
286 | #endif | 306 | #endif |
287 | 307 | ||
288 | #endif /* IEEE80211S_H */ | 308 | #endif /* IEEE80211S_H */ |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 08aca446ca01..501c7831adb4 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -64,14 +64,14 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) | |||
64 | #define DSN_LT(x, y) ((long) (x) - (long) (y) < 0) | 64 | #define DSN_LT(x, y) ((long) (x) - (long) (y) < 0) |
65 | 65 | ||
66 | #define net_traversal_jiffies(s) \ | 66 | #define net_traversal_jiffies(s) \ |
67 | msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) | 67 | msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) |
68 | #define default_lifetime(s) \ | 68 | #define default_lifetime(s) \ |
69 | MSEC_TO_TU(s->u.sta.mshcfg.dot11MeshHWMPactivePathTimeout) | 69 | MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout) |
70 | #define min_preq_int_jiff(s) \ | 70 | #define min_preq_int_jiff(s) \ |
71 | (msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPpreqMinInterval)) | 71 | (msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval)) |
72 | #define max_preq_retries(s) (s->u.sta.mshcfg.dot11MeshHWMPmaxPREQretries) | 72 | #define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries) |
73 | #define disc_timeout_jiff(s) \ | 73 | #define disc_timeout_jiff(s) \ |
74 | msecs_to_jiffies(sdata->u.sta.mshcfg.min_discovery_timeout) | 74 | msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout) |
75 | 75 | ||
76 | enum mpath_frame_type { | 76 | enum mpath_frame_type { |
77 | MPATH_PREQ = 0, | 77 | MPATH_PREQ = 0, |
@@ -82,9 +82,9 @@ enum mpath_frame_type { | |||
82 | static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | 82 | static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, |
83 | u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, | 83 | u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, |
84 | __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, | 84 | __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, |
85 | __le32 metric, __le32 preq_id, struct net_device *dev) | 85 | __le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata) |
86 | { | 86 | { |
87 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 87 | struct ieee80211_local *local = sdata->local; |
88 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | 88 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); |
89 | struct ieee80211_mgmt *mgmt; | 89 | struct ieee80211_mgmt *mgmt; |
90 | u8 *pos; | 90 | u8 *pos; |
@@ -99,11 +99,11 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
99 | mgmt = (struct ieee80211_mgmt *) | 99 | mgmt = (struct ieee80211_mgmt *) |
100 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); | 100 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); |
101 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); | 101 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); |
102 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 102 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
103 | IEEE80211_STYPE_ACTION); | 103 | IEEE80211_STYPE_ACTION); |
104 | 104 | ||
105 | memcpy(mgmt->da, da, ETH_ALEN); | 105 | memcpy(mgmt->da, da, ETH_ALEN); |
106 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 106 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
107 | /* BSSID is left zeroed, wildcard value */ | 107 | /* BSSID is left zeroed, wildcard value */ |
108 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; | 108 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; |
109 | mgmt->u.action.u.mesh_action.action_code = action; | 109 | mgmt->u.action.u.mesh_action.action_code = action; |
@@ -149,7 +149,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
149 | pos += ETH_ALEN; | 149 | pos += ETH_ALEN; |
150 | memcpy(pos, &dst_dsn, 4); | 150 | memcpy(pos, &dst_dsn, 4); |
151 | 151 | ||
152 | ieee80211_sta_tx(dev, skb, 0); | 152 | ieee80211_tx_skb(sdata, skb, 0); |
153 | return 0; | 153 | return 0; |
154 | } | 154 | } |
155 | 155 | ||
@@ -161,9 +161,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
161 | * @ra: node this frame is addressed to | 161 | * @ra: node this frame is addressed to |
162 | */ | 162 | */ |
163 | int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, | 163 | int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, |
164 | struct net_device *dev) | 164 | struct ieee80211_sub_if_data *sdata) |
165 | { | 165 | { |
166 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 166 | struct ieee80211_local *local = sdata->local; |
167 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | 167 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); |
168 | struct ieee80211_mgmt *mgmt; | 168 | struct ieee80211_mgmt *mgmt; |
169 | u8 *pos; | 169 | u8 *pos; |
@@ -178,11 +178,11 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, | |||
178 | mgmt = (struct ieee80211_mgmt *) | 178 | mgmt = (struct ieee80211_mgmt *) |
179 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); | 179 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); |
180 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); | 180 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); |
181 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 181 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
182 | IEEE80211_STYPE_ACTION); | 182 | IEEE80211_STYPE_ACTION); |
183 | 183 | ||
184 | memcpy(mgmt->da, ra, ETH_ALEN); | 184 | memcpy(mgmt->da, ra, ETH_ALEN); |
185 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 185 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
186 | /* BSSID is left zeroed, wildcard value */ | 186 | /* BSSID is left zeroed, wildcard value */ |
187 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; | 187 | mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; |
188 | mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; | 188 | mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; |
@@ -198,7 +198,7 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, | |||
198 | pos += ETH_ALEN; | 198 | pos += ETH_ALEN; |
199 | memcpy(pos, &dst_dsn, 4); | 199 | memcpy(pos, &dst_dsn, 4); |
200 | 200 | ||
201 | ieee80211_sta_tx(dev, skb, 0); | 201 | ieee80211_tx_skb(sdata, skb, 0); |
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
@@ -223,7 +223,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, | |||
223 | /* bitrate is in units of 100 Kbps, while we need rate in units of | 223 | /* bitrate is in units of 100 Kbps, while we need rate in units of |
224 | * 1Mbps. This will be corrected on tx_time computation. | 224 | * 1Mbps. This will be corrected on tx_time computation. |
225 | */ | 225 | */ |
226 | rate = sband->bitrates[sta->txrate_idx].bitrate; | 226 | rate = sband->bitrates[sta->last_txrate_idx].bitrate; |
227 | tx_time = (device_constant + 10 * test_frame_len / rate); | 227 | tx_time = (device_constant + 10 * test_frame_len / rate); |
228 | estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); | 228 | estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); |
229 | result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; | 229 | result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; |
@@ -233,7 +233,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, | |||
233 | /** | 233 | /** |
234 | * hwmp_route_info_get - Update routing info to originator and transmitter | 234 | * hwmp_route_info_get - Update routing info to originator and transmitter |
235 | * | 235 | * |
236 | * @dev: local mesh interface | 236 | * @sdata: local mesh subif |
237 | * @mgmt: mesh management frame | 237 | * @mgmt: mesh management frame |
238 | * @hwmp_ie: hwmp information element (PREP or PREQ) | 238 | * @hwmp_ie: hwmp information element (PREP or PREQ) |
239 | * | 239 | * |
@@ -246,11 +246,11 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, | |||
246 | * Notes: this function is the only place (besides user-provided info) where | 246 | * Notes: this function is the only place (besides user-provided info) where |
247 | * path routing information is updated. | 247 | * path routing information is updated. |
248 | */ | 248 | */ |
249 | static u32 hwmp_route_info_get(struct net_device *dev, | 249 | static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, |
250 | struct ieee80211_mgmt *mgmt, | 250 | struct ieee80211_mgmt *mgmt, |
251 | u8 *hwmp_ie) | 251 | u8 *hwmp_ie) |
252 | { | 252 | { |
253 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 253 | struct ieee80211_local *local = sdata->local; |
254 | struct mesh_path *mpath; | 254 | struct mesh_path *mpath; |
255 | struct sta_info *sta; | 255 | struct sta_info *sta; |
256 | bool fresh_info; | 256 | bool fresh_info; |
@@ -301,14 +301,14 @@ static u32 hwmp_route_info_get(struct net_device *dev, | |||
301 | new_metric = MAX_METRIC; | 301 | new_metric = MAX_METRIC; |
302 | exp_time = TU_TO_EXP_TIME(orig_lifetime); | 302 | exp_time = TU_TO_EXP_TIME(orig_lifetime); |
303 | 303 | ||
304 | if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) { | 304 | if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { |
305 | /* This MP is the originator, we are not interested in this | 305 | /* This MP is the originator, we are not interested in this |
306 | * frame, except for updating transmitter's path info. | 306 | * frame, except for updating transmitter's path info. |
307 | */ | 307 | */ |
308 | process = false; | 308 | process = false; |
309 | fresh_info = false; | 309 | fresh_info = false; |
310 | } else { | 310 | } else { |
311 | mpath = mesh_path_lookup(orig_addr, dev); | 311 | mpath = mesh_path_lookup(orig_addr, sdata); |
312 | if (mpath) { | 312 | if (mpath) { |
313 | spin_lock_bh(&mpath->state_lock); | 313 | spin_lock_bh(&mpath->state_lock); |
314 | if (mpath->flags & MESH_PATH_FIXED) | 314 | if (mpath->flags & MESH_PATH_FIXED) |
@@ -324,8 +324,8 @@ static u32 hwmp_route_info_get(struct net_device *dev, | |||
324 | } | 324 | } |
325 | } | 325 | } |
326 | } else { | 326 | } else { |
327 | mesh_path_add(orig_addr, dev); | 327 | mesh_path_add(orig_addr, sdata); |
328 | mpath = mesh_path_lookup(orig_addr, dev); | 328 | mpath = mesh_path_lookup(orig_addr, sdata); |
329 | if (!mpath) { | 329 | if (!mpath) { |
330 | rcu_read_unlock(); | 330 | rcu_read_unlock(); |
331 | return 0; | 331 | return 0; |
@@ -357,7 +357,7 @@ static u32 hwmp_route_info_get(struct net_device *dev, | |||
357 | else { | 357 | else { |
358 | fresh_info = true; | 358 | fresh_info = true; |
359 | 359 | ||
360 | mpath = mesh_path_lookup(ta, dev); | 360 | mpath = mesh_path_lookup(ta, sdata); |
361 | if (mpath) { | 361 | if (mpath) { |
362 | spin_lock_bh(&mpath->state_lock); | 362 | spin_lock_bh(&mpath->state_lock); |
363 | if ((mpath->flags & MESH_PATH_FIXED) || | 363 | if ((mpath->flags & MESH_PATH_FIXED) || |
@@ -365,8 +365,8 @@ static u32 hwmp_route_info_get(struct net_device *dev, | |||
365 | (last_hop_metric > mpath->metric))) | 365 | (last_hop_metric > mpath->metric))) |
366 | fresh_info = false; | 366 | fresh_info = false; |
367 | } else { | 367 | } else { |
368 | mesh_path_add(ta, dev); | 368 | mesh_path_add(ta, sdata); |
369 | mpath = mesh_path_lookup(ta, dev); | 369 | mpath = mesh_path_lookup(ta, sdata); |
370 | if (!mpath) { | 370 | if (!mpath) { |
371 | rcu_read_unlock(); | 371 | rcu_read_unlock(); |
372 | return 0; | 372 | return 0; |
@@ -392,11 +392,10 @@ static u32 hwmp_route_info_get(struct net_device *dev, | |||
392 | return process ? new_metric : 0; | 392 | return process ? new_metric : 0; |
393 | } | 393 | } |
394 | 394 | ||
395 | static void hwmp_preq_frame_process(struct net_device *dev, | 395 | static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, |
396 | struct ieee80211_mgmt *mgmt, | 396 | struct ieee80211_mgmt *mgmt, |
397 | u8 *preq_elem, u32 metric) { | 397 | u8 *preq_elem, u32 metric) { |
398 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 398 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
399 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
400 | struct mesh_path *mpath; | 399 | struct mesh_path *mpath; |
401 | u8 *dst_addr, *orig_addr; | 400 | u8 *dst_addr, *orig_addr; |
402 | u8 dst_flags, ttl; | 401 | u8 dst_flags, ttl; |
@@ -411,19 +410,19 @@ static void hwmp_preq_frame_process(struct net_device *dev, | |||
411 | orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); | 410 | orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); |
412 | dst_flags = PREQ_IE_DST_F(preq_elem); | 411 | dst_flags = PREQ_IE_DST_F(preq_elem); |
413 | 412 | ||
414 | if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) { | 413 | if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { |
415 | forward = false; | 414 | forward = false; |
416 | reply = true; | 415 | reply = true; |
417 | metric = 0; | 416 | metric = 0; |
418 | if (time_after(jiffies, ifsta->last_dsn_update + | 417 | if (time_after(jiffies, ifmsh->last_dsn_update + |
419 | net_traversal_jiffies(sdata)) || | 418 | net_traversal_jiffies(sdata)) || |
420 | time_before(jiffies, ifsta->last_dsn_update)) { | 419 | time_before(jiffies, ifmsh->last_dsn_update)) { |
421 | dst_dsn = ++ifsta->dsn; | 420 | dst_dsn = ++ifmsh->dsn; |
422 | ifsta->last_dsn_update = jiffies; | 421 | ifmsh->last_dsn_update = jiffies; |
423 | } | 422 | } |
424 | } else { | 423 | } else { |
425 | rcu_read_lock(); | 424 | rcu_read_lock(); |
426 | mpath = mesh_path_lookup(dst_addr, dev); | 425 | mpath = mesh_path_lookup(dst_addr, sdata); |
427 | if (mpath) { | 426 | if (mpath) { |
428 | if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || | 427 | if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || |
429 | DSN_LT(mpath->dsn, dst_dsn)) { | 428 | DSN_LT(mpath->dsn, dst_dsn)) { |
@@ -445,15 +444,15 @@ static void hwmp_preq_frame_process(struct net_device *dev, | |||
445 | 444 | ||
446 | if (reply) { | 445 | if (reply) { |
447 | lifetime = PREQ_IE_LIFETIME(preq_elem); | 446 | lifetime = PREQ_IE_LIFETIME(preq_elem); |
448 | ttl = ifsta->mshcfg.dot11MeshTTL; | 447 | ttl = ifmsh->mshcfg.dot11MeshTTL; |
449 | if (ttl != 0) | 448 | if (ttl != 0) |
450 | mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr, | 449 | mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr, |
451 | cpu_to_le32(dst_dsn), 0, orig_addr, | 450 | cpu_to_le32(dst_dsn), 0, orig_addr, |
452 | cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, | 451 | cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, |
453 | cpu_to_le32(lifetime), cpu_to_le32(metric), | 452 | cpu_to_le32(lifetime), cpu_to_le32(metric), |
454 | 0, dev); | 453 | 0, sdata); |
455 | else | 454 | else |
456 | ifsta->mshstats.dropped_frames_ttl++; | 455 | ifmsh->mshstats.dropped_frames_ttl++; |
457 | } | 456 | } |
458 | 457 | ||
459 | if (forward) { | 458 | if (forward) { |
@@ -463,7 +462,7 @@ static void hwmp_preq_frame_process(struct net_device *dev, | |||
463 | ttl = PREQ_IE_TTL(preq_elem); | 462 | ttl = PREQ_IE_TTL(preq_elem); |
464 | lifetime = PREQ_IE_LIFETIME(preq_elem); | 463 | lifetime = PREQ_IE_LIFETIME(preq_elem); |
465 | if (ttl <= 1) { | 464 | if (ttl <= 1) { |
466 | ifsta->mshstats.dropped_frames_ttl++; | 465 | ifmsh->mshstats.dropped_frames_ttl++; |
467 | return; | 466 | return; |
468 | } | 467 | } |
469 | --ttl; | 468 | --ttl; |
@@ -472,20 +471,19 @@ static void hwmp_preq_frame_process(struct net_device *dev, | |||
472 | hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; | 471 | hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; |
473 | mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, | 472 | mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, |
474 | cpu_to_le32(orig_dsn), dst_flags, dst_addr, | 473 | cpu_to_le32(orig_dsn), dst_flags, dst_addr, |
475 | cpu_to_le32(dst_dsn), dev->broadcast, | 474 | cpu_to_le32(dst_dsn), sdata->dev->broadcast, |
476 | hopcount, ttl, cpu_to_le32(lifetime), | 475 | hopcount, ttl, cpu_to_le32(lifetime), |
477 | cpu_to_le32(metric), cpu_to_le32(preq_id), | 476 | cpu_to_le32(metric), cpu_to_le32(preq_id), |
478 | dev); | 477 | sdata); |
479 | ifsta->mshstats.fwded_frames++; | 478 | ifmsh->mshstats.fwded_frames++; |
480 | } | 479 | } |
481 | } | 480 | } |
482 | 481 | ||
483 | 482 | ||
484 | static void hwmp_prep_frame_process(struct net_device *dev, | 483 | static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, |
485 | struct ieee80211_mgmt *mgmt, | 484 | struct ieee80211_mgmt *mgmt, |
486 | u8 *prep_elem, u32 metric) | 485 | u8 *prep_elem, u32 metric) |
487 | { | 486 | { |
488 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
489 | struct mesh_path *mpath; | 487 | struct mesh_path *mpath; |
490 | u8 *dst_addr, *orig_addr; | 488 | u8 *dst_addr, *orig_addr; |
491 | u8 ttl, hopcount, flags; | 489 | u8 ttl, hopcount, flags; |
@@ -499,18 +497,18 @@ static void hwmp_prep_frame_process(struct net_device *dev, | |||
499 | * replies | 497 | * replies |
500 | */ | 498 | */ |
501 | dst_addr = PREP_IE_DST_ADDR(prep_elem); | 499 | dst_addr = PREP_IE_DST_ADDR(prep_elem); |
502 | if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) | 500 | if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) |
503 | /* destination, no forwarding required */ | 501 | /* destination, no forwarding required */ |
504 | return; | 502 | return; |
505 | 503 | ||
506 | ttl = PREP_IE_TTL(prep_elem); | 504 | ttl = PREP_IE_TTL(prep_elem); |
507 | if (ttl <= 1) { | 505 | if (ttl <= 1) { |
508 | sdata->u.sta.mshstats.dropped_frames_ttl++; | 506 | sdata->u.mesh.mshstats.dropped_frames_ttl++; |
509 | return; | 507 | return; |
510 | } | 508 | } |
511 | 509 | ||
512 | rcu_read_lock(); | 510 | rcu_read_lock(); |
513 | mpath = mesh_path_lookup(dst_addr, dev); | 511 | mpath = mesh_path_lookup(dst_addr, sdata); |
514 | if (mpath) | 512 | if (mpath) |
515 | spin_lock_bh(&mpath->state_lock); | 513 | spin_lock_bh(&mpath->state_lock); |
516 | else | 514 | else |
@@ -519,7 +517,7 @@ static void hwmp_prep_frame_process(struct net_device *dev, | |||
519 | spin_unlock_bh(&mpath->state_lock); | 517 | spin_unlock_bh(&mpath->state_lock); |
520 | goto fail; | 518 | goto fail; |
521 | } | 519 | } |
522 | memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN); | 520 | memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN); |
523 | spin_unlock_bh(&mpath->state_lock); | 521 | spin_unlock_bh(&mpath->state_lock); |
524 | --ttl; | 522 | --ttl; |
525 | flags = PREP_IE_FLAGS(prep_elem); | 523 | flags = PREP_IE_FLAGS(prep_elem); |
@@ -531,20 +529,20 @@ static void hwmp_prep_frame_process(struct net_device *dev, | |||
531 | 529 | ||
532 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, | 530 | mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, |
533 | cpu_to_le32(orig_dsn), 0, dst_addr, | 531 | cpu_to_le32(orig_dsn), 0, dst_addr, |
534 | cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl, | 532 | cpu_to_le32(dst_dsn), mpath->next_hop->sta.addr, hopcount, ttl, |
535 | cpu_to_le32(lifetime), cpu_to_le32(metric), | 533 | cpu_to_le32(lifetime), cpu_to_le32(metric), |
536 | 0, dev); | 534 | 0, sdata); |
537 | rcu_read_unlock(); | 535 | rcu_read_unlock(); |
538 | sdata->u.sta.mshstats.fwded_frames++; | 536 | sdata->u.mesh.mshstats.fwded_frames++; |
539 | return; | 537 | return; |
540 | 538 | ||
541 | fail: | 539 | fail: |
542 | rcu_read_unlock(); | 540 | rcu_read_unlock(); |
543 | sdata->u.sta.mshstats.dropped_frames_no_route++; | 541 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
544 | return; | 542 | return; |
545 | } | 543 | } |
546 | 544 | ||
547 | static void hwmp_perr_frame_process(struct net_device *dev, | 545 | static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, |
548 | struct ieee80211_mgmt *mgmt, u8 *perr_elem) | 546 | struct ieee80211_mgmt *mgmt, u8 *perr_elem) |
549 | { | 547 | { |
550 | struct mesh_path *mpath; | 548 | struct mesh_path *mpath; |
@@ -555,18 +553,18 @@ static void hwmp_perr_frame_process(struct net_device *dev, | |||
555 | dst_addr = PERR_IE_DST_ADDR(perr_elem); | 553 | dst_addr = PERR_IE_DST_ADDR(perr_elem); |
556 | dst_dsn = PERR_IE_DST_DSN(perr_elem); | 554 | dst_dsn = PERR_IE_DST_DSN(perr_elem); |
557 | rcu_read_lock(); | 555 | rcu_read_lock(); |
558 | mpath = mesh_path_lookup(dst_addr, dev); | 556 | mpath = mesh_path_lookup(dst_addr, sdata); |
559 | if (mpath) { | 557 | if (mpath) { |
560 | spin_lock_bh(&mpath->state_lock); | 558 | spin_lock_bh(&mpath->state_lock); |
561 | if (mpath->flags & MESH_PATH_ACTIVE && | 559 | if (mpath->flags & MESH_PATH_ACTIVE && |
562 | memcmp(ta, mpath->next_hop->addr, ETH_ALEN) == 0 && | 560 | memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 && |
563 | (!(mpath->flags & MESH_PATH_DSN_VALID) || | 561 | (!(mpath->flags & MESH_PATH_DSN_VALID) || |
564 | DSN_GT(dst_dsn, mpath->dsn))) { | 562 | DSN_GT(dst_dsn, mpath->dsn))) { |
565 | mpath->flags &= ~MESH_PATH_ACTIVE; | 563 | mpath->flags &= ~MESH_PATH_ACTIVE; |
566 | mpath->dsn = dst_dsn; | 564 | mpath->dsn = dst_dsn; |
567 | spin_unlock_bh(&mpath->state_lock); | 565 | spin_unlock_bh(&mpath->state_lock); |
568 | mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), | 566 | mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), |
569 | dev->broadcast, dev); | 567 | sdata->dev->broadcast, sdata); |
570 | } else | 568 | } else |
571 | spin_unlock_bh(&mpath->state_lock); | 569 | spin_unlock_bh(&mpath->state_lock); |
572 | } | 570 | } |
@@ -575,7 +573,7 @@ static void hwmp_perr_frame_process(struct net_device *dev, | |||
575 | 573 | ||
576 | 574 | ||
577 | 575 | ||
578 | void mesh_rx_path_sel_frame(struct net_device *dev, | 576 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, |
579 | struct ieee80211_mgmt *mgmt, | 577 | struct ieee80211_mgmt *mgmt, |
580 | size_t len) | 578 | size_t len) |
581 | { | 579 | { |
@@ -583,6 +581,10 @@ void mesh_rx_path_sel_frame(struct net_device *dev, | |||
583 | size_t baselen; | 581 | size_t baselen; |
584 | u32 last_hop_metric; | 582 | u32 last_hop_metric; |
585 | 583 | ||
584 | /* need action_code */ | ||
585 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | ||
586 | return; | ||
587 | |||
586 | baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; | 588 | baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; |
587 | ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, | 589 | ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, |
588 | len - baselen, &elems); | 590 | len - baselen, &elems); |
@@ -592,25 +594,25 @@ void mesh_rx_path_sel_frame(struct net_device *dev, | |||
592 | if (!elems.preq || elems.preq_len != 37) | 594 | if (!elems.preq || elems.preq_len != 37) |
593 | /* Right now we support just 1 destination and no AE */ | 595 | /* Right now we support just 1 destination and no AE */ |
594 | return; | 596 | return; |
595 | last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.preq); | 597 | last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq); |
596 | if (!last_hop_metric) | 598 | if (!last_hop_metric) |
597 | return; | 599 | return; |
598 | hwmp_preq_frame_process(dev, mgmt, elems.preq, last_hop_metric); | 600 | hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric); |
599 | break; | 601 | break; |
600 | case MPATH_PREP: | 602 | case MPATH_PREP: |
601 | if (!elems.prep || elems.prep_len != 31) | 603 | if (!elems.prep || elems.prep_len != 31) |
602 | /* Right now we support no AE */ | 604 | /* Right now we support no AE */ |
603 | return; | 605 | return; |
604 | last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.prep); | 606 | last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep); |
605 | if (!last_hop_metric) | 607 | if (!last_hop_metric) |
606 | return; | 608 | return; |
607 | hwmp_prep_frame_process(dev, mgmt, elems.prep, last_hop_metric); | 609 | hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric); |
608 | break; | 610 | break; |
609 | case MPATH_PERR: | 611 | case MPATH_PERR: |
610 | if (!elems.perr || elems.perr_len != 12) | 612 | if (!elems.perr || elems.perr_len != 12) |
611 | /* Right now we support only one destination per PERR */ | 613 | /* Right now we support only one destination per PERR */ |
612 | return; | 614 | return; |
613 | hwmp_perr_frame_process(dev, mgmt, elems.perr); | 615 | hwmp_perr_frame_process(sdata, mgmt, elems.perr); |
614 | default: | 616 | default: |
615 | return; | 617 | return; |
616 | } | 618 | } |
@@ -628,9 +630,8 @@ void mesh_rx_path_sel_frame(struct net_device *dev, | |||
628 | */ | 630 | */ |
629 | static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | 631 | static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) |
630 | { | 632 | { |
631 | struct ieee80211_sub_if_data *sdata = | 633 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
632 | IEEE80211_DEV_TO_SUB_IF(mpath->dev); | 634 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
633 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
634 | struct mesh_preq_queue *preq_node; | 635 | struct mesh_preq_queue *preq_node; |
635 | 636 | ||
636 | preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL); | 637 | preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL); |
@@ -639,9 +640,9 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | |||
639 | return; | 640 | return; |
640 | } | 641 | } |
641 | 642 | ||
642 | spin_lock(&ifsta->mesh_preq_queue_lock); | 643 | spin_lock(&ifmsh->mesh_preq_queue_lock); |
643 | if (ifsta->preq_queue_len == MAX_PREQ_QUEUE_LEN) { | 644 | if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) { |
644 | spin_unlock(&ifsta->mesh_preq_queue_lock); | 645 | spin_unlock(&ifmsh->mesh_preq_queue_lock); |
645 | kfree(preq_node); | 646 | kfree(preq_node); |
646 | if (printk_ratelimit()) | 647 | if (printk_ratelimit()) |
647 | printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n"); | 648 | printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n"); |
@@ -651,55 +652,53 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | |||
651 | memcpy(preq_node->dst, mpath->dst, ETH_ALEN); | 652 | memcpy(preq_node->dst, mpath->dst, ETH_ALEN); |
652 | preq_node->flags = flags; | 653 | preq_node->flags = flags; |
653 | 654 | ||
654 | list_add_tail(&preq_node->list, &ifsta->preq_queue.list); | 655 | list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); |
655 | ++ifsta->preq_queue_len; | 656 | ++ifmsh->preq_queue_len; |
656 | spin_unlock(&ifsta->mesh_preq_queue_lock); | 657 | spin_unlock(&ifmsh->mesh_preq_queue_lock); |
657 | 658 | ||
658 | if (time_after(jiffies, ifsta->last_preq + min_preq_int_jiff(sdata))) | 659 | if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) |
659 | queue_work(sdata->local->hw.workqueue, &ifsta->work); | 660 | queue_work(sdata->local->hw.workqueue, &ifmsh->work); |
660 | 661 | ||
661 | else if (time_before(jiffies, ifsta->last_preq)) { | 662 | else if (time_before(jiffies, ifmsh->last_preq)) { |
662 | /* avoid long wait if did not send preqs for a long time | 663 | /* avoid long wait if did not send preqs for a long time |
663 | * and jiffies wrapped around | 664 | * and jiffies wrapped around |
664 | */ | 665 | */ |
665 | ifsta->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; | 666 | ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; |
666 | queue_work(sdata->local->hw.workqueue, &ifsta->work); | 667 | queue_work(sdata->local->hw.workqueue, &ifmsh->work); |
667 | } else | 668 | } else |
668 | mod_timer(&ifsta->mesh_path_timer, ifsta->last_preq + | 669 | mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + |
669 | min_preq_int_jiff(sdata)); | 670 | min_preq_int_jiff(sdata)); |
670 | } | 671 | } |
671 | 672 | ||
672 | /** | 673 | /** |
673 | * mesh_path_start_discovery - launch a path discovery from the PREQ queue | 674 | * mesh_path_start_discovery - launch a path discovery from the PREQ queue |
674 | * | 675 | * |
675 | * @dev: local mesh interface | 676 | * @sdata: local mesh subif |
676 | */ | 677 | */ |
677 | void mesh_path_start_discovery(struct net_device *dev) | 678 | void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) |
678 | { | 679 | { |
679 | struct ieee80211_sub_if_data *sdata = | 680 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
680 | IEEE80211_DEV_TO_SUB_IF(dev); | ||
681 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
682 | struct mesh_preq_queue *preq_node; | 681 | struct mesh_preq_queue *preq_node; |
683 | struct mesh_path *mpath; | 682 | struct mesh_path *mpath; |
684 | u8 ttl, dst_flags; | 683 | u8 ttl, dst_flags; |
685 | u32 lifetime; | 684 | u32 lifetime; |
686 | 685 | ||
687 | spin_lock(&ifsta->mesh_preq_queue_lock); | 686 | spin_lock(&ifmsh->mesh_preq_queue_lock); |
688 | if (!ifsta->preq_queue_len || | 687 | if (!ifmsh->preq_queue_len || |
689 | time_before(jiffies, ifsta->last_preq + | 688 | time_before(jiffies, ifmsh->last_preq + |
690 | min_preq_int_jiff(sdata))) { | 689 | min_preq_int_jiff(sdata))) { |
691 | spin_unlock(&ifsta->mesh_preq_queue_lock); | 690 | spin_unlock(&ifmsh->mesh_preq_queue_lock); |
692 | return; | 691 | return; |
693 | } | 692 | } |
694 | 693 | ||
695 | preq_node = list_first_entry(&ifsta->preq_queue.list, | 694 | preq_node = list_first_entry(&ifmsh->preq_queue.list, |
696 | struct mesh_preq_queue, list); | 695 | struct mesh_preq_queue, list); |
697 | list_del(&preq_node->list); | 696 | list_del(&preq_node->list); |
698 | --ifsta->preq_queue_len; | 697 | --ifmsh->preq_queue_len; |
699 | spin_unlock(&ifsta->mesh_preq_queue_lock); | 698 | spin_unlock(&ifmsh->mesh_preq_queue_lock); |
700 | 699 | ||
701 | rcu_read_lock(); | 700 | rcu_read_lock(); |
702 | mpath = mesh_path_lookup(preq_node->dst, dev); | 701 | mpath = mesh_path_lookup(preq_node->dst, sdata); |
703 | if (!mpath) | 702 | if (!mpath) |
704 | goto enddiscovery; | 703 | goto enddiscovery; |
705 | 704 | ||
@@ -721,18 +720,18 @@ void mesh_path_start_discovery(struct net_device *dev) | |||
721 | goto enddiscovery; | 720 | goto enddiscovery; |
722 | } | 721 | } |
723 | 722 | ||
724 | ifsta->last_preq = jiffies; | 723 | ifmsh->last_preq = jiffies; |
725 | 724 | ||
726 | if (time_after(jiffies, ifsta->last_dsn_update + | 725 | if (time_after(jiffies, ifmsh->last_dsn_update + |
727 | net_traversal_jiffies(sdata)) || | 726 | net_traversal_jiffies(sdata)) || |
728 | time_before(jiffies, ifsta->last_dsn_update)) { | 727 | time_before(jiffies, ifmsh->last_dsn_update)) { |
729 | ++ifsta->dsn; | 728 | ++ifmsh->dsn; |
730 | sdata->u.sta.last_dsn_update = jiffies; | 729 | sdata->u.mesh.last_dsn_update = jiffies; |
731 | } | 730 | } |
732 | lifetime = default_lifetime(sdata); | 731 | lifetime = default_lifetime(sdata); |
733 | ttl = sdata->u.sta.mshcfg.dot11MeshTTL; | 732 | ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; |
734 | if (ttl == 0) { | 733 | if (ttl == 0) { |
735 | sdata->u.sta.mshstats.dropped_frames_ttl++; | 734 | sdata->u.mesh.mshstats.dropped_frames_ttl++; |
736 | spin_unlock_bh(&mpath->state_lock); | 735 | spin_unlock_bh(&mpath->state_lock); |
737 | goto enddiscovery; | 736 | goto enddiscovery; |
738 | } | 737 | } |
@@ -743,11 +742,11 @@ void mesh_path_start_discovery(struct net_device *dev) | |||
743 | dst_flags = MP_F_RF; | 742 | dst_flags = MP_F_RF; |
744 | 743 | ||
745 | spin_unlock_bh(&mpath->state_lock); | 744 | spin_unlock_bh(&mpath->state_lock); |
746 | mesh_path_sel_frame_tx(MPATH_PREQ, 0, dev->dev_addr, | 745 | mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr, |
747 | cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst, | 746 | cpu_to_le32(ifmsh->dsn), dst_flags, mpath->dst, |
748 | cpu_to_le32(mpath->dsn), dev->broadcast, 0, | 747 | cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0, |
749 | ttl, cpu_to_le32(lifetime), 0, | 748 | ttl, cpu_to_le32(lifetime), 0, |
750 | cpu_to_le32(ifsta->preq_id++), dev); | 749 | cpu_to_le32(ifmsh->preq_id++), sdata); |
751 | mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); | 750 | mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); |
752 | 751 | ||
753 | enddiscovery: | 752 | enddiscovery: |
@@ -759,7 +758,7 @@ enddiscovery: | |||
759 | * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame | 758 | * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame |
760 | * | 759 | * |
761 | * @skb: 802.11 frame to be sent | 760 | * @skb: 802.11 frame to be sent |
762 | * @dev: network device the frame will be sent through | 761 | * @sdata: network subif the frame will be sent through |
763 | * @fwd_frame: true if this frame was originally from a different host | 762 | * @fwd_frame: true if this frame was originally from a different host |
764 | * | 763 | * |
765 | * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is | 764 | * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is |
@@ -767,9 +766,9 @@ enddiscovery: | |||
767 | * sent when the path is resolved. This means the caller must not free the skb | 766 | * sent when the path is resolved. This means the caller must not free the skb |
768 | * in this case. | 767 | * in this case. |
769 | */ | 768 | */ |
770 | int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) | 769 | int mesh_nexthop_lookup(struct sk_buff *skb, |
770 | struct ieee80211_sub_if_data *sdata) | ||
771 | { | 771 | { |
772 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
773 | struct sk_buff *skb_to_free = NULL; | 772 | struct sk_buff *skb_to_free = NULL; |
774 | struct mesh_path *mpath; | 773 | struct mesh_path *mpath; |
775 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 774 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
@@ -777,14 +776,14 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) | |||
777 | int err = 0; | 776 | int err = 0; |
778 | 777 | ||
779 | rcu_read_lock(); | 778 | rcu_read_lock(); |
780 | mpath = mesh_path_lookup(dst_addr, dev); | 779 | mpath = mesh_path_lookup(dst_addr, sdata); |
781 | 780 | ||
782 | if (!mpath) { | 781 | if (!mpath) { |
783 | mesh_path_add(dst_addr, dev); | 782 | mesh_path_add(dst_addr, sdata); |
784 | mpath = mesh_path_lookup(dst_addr, dev); | 783 | mpath = mesh_path_lookup(dst_addr, sdata); |
785 | if (!mpath) { | 784 | if (!mpath) { |
786 | dev_kfree_skb(skb); | 785 | dev_kfree_skb(skb); |
787 | sdata->u.sta.mshstats.dropped_frames_no_route++; | 786 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
788 | err = -ENOSPC; | 787 | err = -ENOSPC; |
789 | goto endlookup; | 788 | goto endlookup; |
790 | } | 789 | } |
@@ -792,14 +791,15 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) | |||
792 | 791 | ||
793 | if (mpath->flags & MESH_PATH_ACTIVE) { | 792 | if (mpath->flags & MESH_PATH_ACTIVE) { |
794 | if (time_after(jiffies, mpath->exp_time - | 793 | if (time_after(jiffies, mpath->exp_time - |
795 | msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time)) | 794 | msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) |
796 | && !memcmp(dev->dev_addr, hdr->addr4, ETH_ALEN) | 795 | && !memcmp(sdata->dev->dev_addr, hdr->addr4, |
796 | ETH_ALEN) | ||
797 | && !(mpath->flags & MESH_PATH_RESOLVING) | 797 | && !(mpath->flags & MESH_PATH_RESOLVING) |
798 | && !(mpath->flags & MESH_PATH_FIXED)) { | 798 | && !(mpath->flags & MESH_PATH_FIXED)) { |
799 | mesh_queue_preq(mpath, | 799 | mesh_queue_preq(mpath, |
800 | PREQ_Q_F_START | PREQ_Q_F_REFRESH); | 800 | PREQ_Q_F_START | PREQ_Q_F_REFRESH); |
801 | } | 801 | } |
802 | memcpy(hdr->addr1, mpath->next_hop->addr, | 802 | memcpy(hdr->addr1, mpath->next_hop->sta.addr, |
803 | ETH_ALEN); | 803 | ETH_ALEN); |
804 | } else { | 804 | } else { |
805 | if (!(mpath->flags & MESH_PATH_RESOLVING)) { | 805 | if (!(mpath->flags & MESH_PATH_RESOLVING)) { |
@@ -815,7 +815,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) | |||
815 | 815 | ||
816 | skb_queue_tail(&mpath->frame_queue, skb); | 816 | skb_queue_tail(&mpath->frame_queue, skb); |
817 | if (skb_to_free) | 817 | if (skb_to_free) |
818 | mesh_path_discard_frame(skb_to_free, dev); | 818 | mesh_path_discard_frame(skb_to_free, sdata); |
819 | err = -ENOENT; | 819 | err = -ENOENT; |
820 | } | 820 | } |
821 | 821 | ||
@@ -835,7 +835,7 @@ void mesh_path_timer(unsigned long data) | |||
835 | if (!mpath) | 835 | if (!mpath) |
836 | goto endmpathtimer; | 836 | goto endmpathtimer; |
837 | spin_lock_bh(&mpath->state_lock); | 837 | spin_lock_bh(&mpath->state_lock); |
838 | sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); | 838 | sdata = mpath->sdata; |
839 | if (mpath->flags & MESH_PATH_RESOLVED || | 839 | if (mpath->flags & MESH_PATH_RESOLVED || |
840 | (!(mpath->flags & MESH_PATH_RESOLVING))) | 840 | (!(mpath->flags & MESH_PATH_RESOLVING))) |
841 | mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); | 841 | mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 838ee60492ad..3c72557df45a 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/etherdevice.h> | 10 | #include <linux/etherdevice.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/random.h> | 12 | #include <linux/random.h> |
14 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
15 | #include <linux/string.h> | 14 | #include <linux/string.h> |
@@ -37,6 +36,7 @@ struct mpath_node { | |||
37 | }; | 36 | }; |
38 | 37 | ||
39 | static struct mesh_table *mesh_paths; | 38 | static struct mesh_table *mesh_paths; |
39 | static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ | ||
40 | 40 | ||
41 | /* This lock will have the grow table function as writer and add / delete nodes | 41 | /* This lock will have the grow table function as writer and add / delete nodes |
42 | * as readers. When reading the table (i.e. doing lookups) we are well protected | 42 | * as readers. When reading the table (i.e. doing lookups) we are well protected |
@@ -62,13 +62,13 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | |||
62 | /** | 62 | /** |
63 | * mesh_path_lookup - look up a path in the mesh path table | 63 | * mesh_path_lookup - look up a path in the mesh path table |
64 | * @dst: hardware address (ETH_ALEN length) of destination | 64 | * @dst: hardware address (ETH_ALEN length) of destination |
65 | * @dev: local interface | 65 | * @sdata: local subif |
66 | * | 66 | * |
67 | * Returns: pointer to the mesh path structure, or NULL if not found | 67 | * Returns: pointer to the mesh path structure, or NULL if not found |
68 | * | 68 | * |
69 | * Locking: must be called within a read rcu section. | 69 | * Locking: must be called within a read rcu section. |
70 | */ | 70 | */ |
71 | struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | 71 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
72 | { | 72 | { |
73 | struct mesh_path *mpath; | 73 | struct mesh_path *mpath; |
74 | struct hlist_node *n; | 74 | struct hlist_node *n; |
@@ -78,10 +78,10 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | |||
78 | 78 | ||
79 | tbl = rcu_dereference(mesh_paths); | 79 | tbl = rcu_dereference(mesh_paths); |
80 | 80 | ||
81 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)]; | 81 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
82 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 82 | hlist_for_each_entry_rcu(node, n, bucket, list) { |
83 | mpath = node->mpath; | 83 | mpath = node->mpath; |
84 | if (mpath->dev == dev && | 84 | if (mpath->sdata == sdata && |
85 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | 85 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { |
86 | if (MPATH_EXPIRED(mpath)) { | 86 | if (MPATH_EXPIRED(mpath)) { |
87 | spin_lock_bh(&mpath->state_lock); | 87 | spin_lock_bh(&mpath->state_lock); |
@@ -95,16 +95,44 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | |||
95 | return NULL; | 95 | return NULL; |
96 | } | 96 | } |
97 | 97 | ||
98 | struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | ||
99 | { | ||
100 | struct mesh_path *mpath; | ||
101 | struct hlist_node *n; | ||
102 | struct hlist_head *bucket; | ||
103 | struct mesh_table *tbl; | ||
104 | struct mpath_node *node; | ||
105 | |||
106 | tbl = rcu_dereference(mpp_paths); | ||
107 | |||
108 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | ||
109 | hlist_for_each_entry_rcu(node, n, bucket, list) { | ||
110 | mpath = node->mpath; | ||
111 | if (mpath->sdata == sdata && | ||
112 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | ||
113 | if (MPATH_EXPIRED(mpath)) { | ||
114 | spin_lock_bh(&mpath->state_lock); | ||
115 | if (MPATH_EXPIRED(mpath)) | ||
116 | mpath->flags &= ~MESH_PATH_ACTIVE; | ||
117 | spin_unlock_bh(&mpath->state_lock); | ||
118 | } | ||
119 | return mpath; | ||
120 | } | ||
121 | } | ||
122 | return NULL; | ||
123 | } | ||
124 | |||
125 | |||
98 | /** | 126 | /** |
99 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index | 127 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index |
100 | * @idx: index | 128 | * @idx: index |
101 | * @dev: local interface, or NULL for all entries | 129 | * @sdata: local subif, or NULL for all entries |
102 | * | 130 | * |
103 | * Returns: pointer to the mesh path structure, or NULL if not found. | 131 | * Returns: pointer to the mesh path structure, or NULL if not found. |
104 | * | 132 | * |
105 | * Locking: must be called within a read rcu section. | 133 | * Locking: must be called within a read rcu section. |
106 | */ | 134 | */ |
107 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | 135 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) |
108 | { | 136 | { |
109 | struct mpath_node *node; | 137 | struct mpath_node *node; |
110 | struct hlist_node *p; | 138 | struct hlist_node *p; |
@@ -112,7 +140,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | |||
112 | int j = 0; | 140 | int j = 0; |
113 | 141 | ||
114 | for_each_mesh_entry(mesh_paths, p, node, i) { | 142 | for_each_mesh_entry(mesh_paths, p, node, i) { |
115 | if (dev && node->mpath->dev != dev) | 143 | if (sdata && node->mpath->sdata != sdata) |
116 | continue; | 144 | continue; |
117 | if (j++ == idx) { | 145 | if (j++ == idx) { |
118 | if (MPATH_EXPIRED(node->mpath)) { | 146 | if (MPATH_EXPIRED(node->mpath)) { |
@@ -131,15 +159,14 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | |||
131 | /** | 159 | /** |
132 | * mesh_path_add - allocate and add a new path to the mesh path table | 160 | * mesh_path_add - allocate and add a new path to the mesh path table |
133 | * @addr: destination address of the path (ETH_ALEN length) | 161 | * @addr: destination address of the path (ETH_ALEN length) |
134 | * @dev: local interface | 162 | * @sdata: local subif |
135 | * | 163 | * |
136 | * Returns: 0 on sucess | 164 | * Returns: 0 on sucess |
137 | * | 165 | * |
138 | * State: the initial state of the new path is set to 0 | 166 | * State: the initial state of the new path is set to 0 |
139 | */ | 167 | */ |
140 | int mesh_path_add(u8 *dst, struct net_device *dev) | 168 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) |
141 | { | 169 | { |
142 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
143 | struct mesh_path *mpath, *new_mpath; | 170 | struct mesh_path *mpath, *new_mpath; |
144 | struct mpath_node *node, *new_node; | 171 | struct mpath_node *node, *new_node; |
145 | struct hlist_head *bucket; | 172 | struct hlist_head *bucket; |
@@ -148,14 +175,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
148 | int err = 0; | 175 | int err = 0; |
149 | u32 hash_idx; | 176 | u32 hash_idx; |
150 | 177 | ||
151 | if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0) | 178 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) |
152 | /* never add ourselves as neighbours */ | 179 | /* never add ourselves as neighbours */ |
153 | return -ENOTSUPP; | 180 | return -ENOTSUPP; |
154 | 181 | ||
155 | if (is_multicast_ether_addr(dst)) | 182 | if (is_multicast_ether_addr(dst)) |
156 | return -ENOTSUPP; | 183 | return -ENOTSUPP; |
157 | 184 | ||
158 | if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) | 185 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) |
159 | return -ENOSPC; | 186 | return -ENOSPC; |
160 | 187 | ||
161 | err = -ENOMEM; | 188 | err = -ENOMEM; |
@@ -169,7 +196,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
169 | 196 | ||
170 | read_lock(&pathtbl_resize_lock); | 197 | read_lock(&pathtbl_resize_lock); |
171 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 198 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
172 | new_mpath->dev = dev; | 199 | new_mpath->sdata = sdata; |
173 | new_mpath->flags = 0; | 200 | new_mpath->flags = 0; |
174 | skb_queue_head_init(&new_mpath->frame_queue); | 201 | skb_queue_head_init(&new_mpath->frame_queue); |
175 | new_node->mpath = new_mpath; | 202 | new_node->mpath = new_mpath; |
@@ -179,7 +206,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
179 | spin_lock_init(&new_mpath->state_lock); | 206 | spin_lock_init(&new_mpath->state_lock); |
180 | init_timer(&new_mpath->timer); | 207 | init_timer(&new_mpath->timer); |
181 | 208 | ||
182 | hash_idx = mesh_table_hash(dst, dev, mesh_paths); | 209 | hash_idx = mesh_table_hash(dst, sdata, mesh_paths); |
183 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 210 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
184 | 211 | ||
185 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 212 | spin_lock(&mesh_paths->hashwlock[hash_idx]); |
@@ -187,7 +214,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
187 | err = -EEXIST; | 214 | err = -EEXIST; |
188 | hlist_for_each_entry(node, n, bucket, list) { | 215 | hlist_for_each_entry(node, n, bucket, list) { |
189 | mpath = node->mpath; | 216 | mpath = node->mpath; |
190 | if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0) | 217 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) |
191 | goto err_exists; | 218 | goto err_exists; |
192 | } | 219 | } |
193 | 220 | ||
@@ -223,7 +250,92 @@ err_exists: | |||
223 | err_node_alloc: | 250 | err_node_alloc: |
224 | kfree(new_mpath); | 251 | kfree(new_mpath); |
225 | err_path_alloc: | 252 | err_path_alloc: |
226 | atomic_dec(&sdata->u.sta.mpaths); | 253 | atomic_dec(&sdata->u.mesh.mpaths); |
254 | return err; | ||
255 | } | ||
256 | |||
257 | |||
258 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | ||
259 | { | ||
260 | struct mesh_path *mpath, *new_mpath; | ||
261 | struct mpath_node *node, *new_node; | ||
262 | struct hlist_head *bucket; | ||
263 | struct hlist_node *n; | ||
264 | int grow = 0; | ||
265 | int err = 0; | ||
266 | u32 hash_idx; | ||
267 | |||
268 | |||
269 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) | ||
270 | /* never add ourselves as neighbours */ | ||
271 | return -ENOTSUPP; | ||
272 | |||
273 | if (is_multicast_ether_addr(dst)) | ||
274 | return -ENOTSUPP; | ||
275 | |||
276 | err = -ENOMEM; | ||
277 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); | ||
278 | if (!new_mpath) | ||
279 | goto err_path_alloc; | ||
280 | |||
281 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | ||
282 | if (!new_node) | ||
283 | goto err_node_alloc; | ||
284 | |||
285 | read_lock(&pathtbl_resize_lock); | ||
286 | memcpy(new_mpath->dst, dst, ETH_ALEN); | ||
287 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | ||
288 | new_mpath->sdata = sdata; | ||
289 | new_mpath->flags = 0; | ||
290 | skb_queue_head_init(&new_mpath->frame_queue); | ||
291 | new_node->mpath = new_mpath; | ||
292 | new_mpath->exp_time = jiffies; | ||
293 | spin_lock_init(&new_mpath->state_lock); | ||
294 | |||
295 | hash_idx = mesh_table_hash(dst, sdata, mpp_paths); | ||
296 | bucket = &mpp_paths->hash_buckets[hash_idx]; | ||
297 | |||
298 | spin_lock(&mpp_paths->hashwlock[hash_idx]); | ||
299 | |||
300 | err = -EEXIST; | ||
301 | hlist_for_each_entry(node, n, bucket, list) { | ||
302 | mpath = node->mpath; | ||
303 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) | ||
304 | goto err_exists; | ||
305 | } | ||
306 | |||
307 | hlist_add_head_rcu(&new_node->list, bucket); | ||
308 | if (atomic_inc_return(&mpp_paths->entries) >= | ||
309 | mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) | ||
310 | grow = 1; | ||
311 | |||
312 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | ||
313 | read_unlock(&pathtbl_resize_lock); | ||
314 | if (grow) { | ||
315 | struct mesh_table *oldtbl, *newtbl; | ||
316 | |||
317 | write_lock(&pathtbl_resize_lock); | ||
318 | oldtbl = mpp_paths; | ||
319 | newtbl = mesh_table_grow(mpp_paths); | ||
320 | if (!newtbl) { | ||
321 | write_unlock(&pathtbl_resize_lock); | ||
322 | return 0; | ||
323 | } | ||
324 | rcu_assign_pointer(mpp_paths, newtbl); | ||
325 | write_unlock(&pathtbl_resize_lock); | ||
326 | |||
327 | synchronize_rcu(); | ||
328 | mesh_table_free(oldtbl, false); | ||
329 | } | ||
330 | return 0; | ||
331 | |||
332 | err_exists: | ||
333 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | ||
334 | read_unlock(&pathtbl_resize_lock); | ||
335 | kfree(new_node); | ||
336 | err_node_alloc: | ||
337 | kfree(new_mpath); | ||
338 | err_path_alloc: | ||
227 | return err; | 339 | return err; |
228 | } | 340 | } |
229 | 341 | ||
@@ -241,7 +353,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
241 | struct mesh_path *mpath; | 353 | struct mesh_path *mpath; |
242 | struct mpath_node *node; | 354 | struct mpath_node *node; |
243 | struct hlist_node *p; | 355 | struct hlist_node *p; |
244 | struct net_device *dev = sta->sdata->dev; | 356 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
245 | int i; | 357 | int i; |
246 | 358 | ||
247 | rcu_read_lock(); | 359 | rcu_read_lock(); |
@@ -256,7 +368,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
256 | spin_unlock_bh(&mpath->state_lock); | 368 | spin_unlock_bh(&mpath->state_lock); |
257 | mesh_path_error_tx(mpath->dst, | 369 | mesh_path_error_tx(mpath->dst, |
258 | cpu_to_le32(mpath->dsn), | 370 | cpu_to_le32(mpath->dsn), |
259 | dev->broadcast, dev); | 371 | sdata->dev->broadcast, sdata); |
260 | } else | 372 | } else |
261 | spin_unlock_bh(&mpath->state_lock); | 373 | spin_unlock_bh(&mpath->state_lock); |
262 | } | 374 | } |
@@ -284,11 +396,11 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
284 | for_each_mesh_entry(mesh_paths, p, node, i) { | 396 | for_each_mesh_entry(mesh_paths, p, node, i) { |
285 | mpath = node->mpath; | 397 | mpath = node->mpath; |
286 | if (mpath->next_hop == sta) | 398 | if (mpath->next_hop == sta) |
287 | mesh_path_del(mpath->dst, mpath->dev); | 399 | mesh_path_del(mpath->dst, mpath->sdata); |
288 | } | 400 | } |
289 | } | 401 | } |
290 | 402 | ||
291 | void mesh_path_flush(struct net_device *dev) | 403 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata) |
292 | { | 404 | { |
293 | struct mesh_path *mpath; | 405 | struct mesh_path *mpath; |
294 | struct mpath_node *node; | 406 | struct mpath_node *node; |
@@ -297,19 +409,18 @@ void mesh_path_flush(struct net_device *dev) | |||
297 | 409 | ||
298 | for_each_mesh_entry(mesh_paths, p, node, i) { | 410 | for_each_mesh_entry(mesh_paths, p, node, i) { |
299 | mpath = node->mpath; | 411 | mpath = node->mpath; |
300 | if (mpath->dev == dev) | 412 | if (mpath->sdata == sdata) |
301 | mesh_path_del(mpath->dst, mpath->dev); | 413 | mesh_path_del(mpath->dst, mpath->sdata); |
302 | } | 414 | } |
303 | } | 415 | } |
304 | 416 | ||
305 | static void mesh_path_node_reclaim(struct rcu_head *rp) | 417 | static void mesh_path_node_reclaim(struct rcu_head *rp) |
306 | { | 418 | { |
307 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | 419 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); |
308 | struct ieee80211_sub_if_data *sdata = | 420 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; |
309 | IEEE80211_DEV_TO_SUB_IF(node->mpath->dev); | ||
310 | 421 | ||
311 | del_timer_sync(&node->mpath->timer); | 422 | del_timer_sync(&node->mpath->timer); |
312 | atomic_dec(&sdata->u.sta.mpaths); | 423 | atomic_dec(&sdata->u.mesh.mpaths); |
313 | kfree(node->mpath); | 424 | kfree(node->mpath); |
314 | kfree(node); | 425 | kfree(node); |
315 | } | 426 | } |
@@ -318,11 +429,11 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) | |||
318 | * mesh_path_del - delete a mesh path from the table | 429 | * mesh_path_del - delete a mesh path from the table |
319 | * | 430 | * |
320 | * @addr: dst address (ETH_ALEN length) | 431 | * @addr: dst address (ETH_ALEN length) |
321 | * @dev: local interface | 432 | * @sdata: local subif |
322 | * | 433 | * |
323 | * Returns: 0 if succesful | 434 | * Returns: 0 if succesful |
324 | */ | 435 | */ |
325 | int mesh_path_del(u8 *addr, struct net_device *dev) | 436 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) |
326 | { | 437 | { |
327 | struct mesh_path *mpath; | 438 | struct mesh_path *mpath; |
328 | struct mpath_node *node; | 439 | struct mpath_node *node; |
@@ -332,13 +443,13 @@ int mesh_path_del(u8 *addr, struct net_device *dev) | |||
332 | int err = 0; | 443 | int err = 0; |
333 | 444 | ||
334 | read_lock(&pathtbl_resize_lock); | 445 | read_lock(&pathtbl_resize_lock); |
335 | hash_idx = mesh_table_hash(addr, dev, mesh_paths); | 446 | hash_idx = mesh_table_hash(addr, sdata, mesh_paths); |
336 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 447 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
337 | 448 | ||
338 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 449 | spin_lock(&mesh_paths->hashwlock[hash_idx]); |
339 | hlist_for_each_entry(node, n, bucket, list) { | 450 | hlist_for_each_entry(node, n, bucket, list) { |
340 | mpath = node->mpath; | 451 | mpath = node->mpath; |
341 | if (mpath->dev == dev && | 452 | if (mpath->sdata == sdata && |
342 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | 453 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { |
343 | spin_lock_bh(&mpath->state_lock); | 454 | spin_lock_bh(&mpath->state_lock); |
344 | mpath->flags |= MESH_PATH_RESOLVING; | 455 | mpath->flags |= MESH_PATH_RESOLVING; |
@@ -378,33 +489,33 @@ void mesh_path_tx_pending(struct mesh_path *mpath) | |||
378 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | 489 | * mesh_path_discard_frame - discard a frame whose path could not be resolved |
379 | * | 490 | * |
380 | * @skb: frame to discard | 491 | * @skb: frame to discard |
381 | * @dev: network device the frame was to be sent through | 492 | * @sdata: network subif the frame was to be sent through |
382 | * | 493 | * |
383 | * If the frame was beign forwarded from another MP, a PERR frame will be sent | 494 | * If the frame was beign forwarded from another MP, a PERR frame will be sent |
384 | * to the precursor. | 495 | * to the precursor. |
385 | * | 496 | * |
386 | * Locking: the function must me called within a rcu_read_lock region | 497 | * Locking: the function must me called within a rcu_read_lock region |
387 | */ | 498 | */ |
388 | void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) | 499 | void mesh_path_discard_frame(struct sk_buff *skb, |
500 | struct ieee80211_sub_if_data *sdata) | ||
389 | { | 501 | { |
390 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
391 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 502 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
392 | struct mesh_path *mpath; | 503 | struct mesh_path *mpath; |
393 | u32 dsn = 0; | 504 | u32 dsn = 0; |
394 | 505 | ||
395 | if (memcmp(hdr->addr4, dev->dev_addr, ETH_ALEN) != 0) { | 506 | if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) { |
396 | u8 *ra, *da; | 507 | u8 *ra, *da; |
397 | 508 | ||
398 | da = hdr->addr3; | 509 | da = hdr->addr3; |
399 | ra = hdr->addr2; | 510 | ra = hdr->addr2; |
400 | mpath = mesh_path_lookup(da, dev); | 511 | mpath = mesh_path_lookup(da, sdata); |
401 | if (mpath) | 512 | if (mpath) |
402 | dsn = ++mpath->dsn; | 513 | dsn = ++mpath->dsn; |
403 | mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev); | 514 | mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata); |
404 | } | 515 | } |
405 | 516 | ||
406 | kfree_skb(skb); | 517 | kfree_skb(skb); |
407 | sdata->u.sta.mshstats.dropped_frames_no_route++; | 518 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
408 | } | 519 | } |
409 | 520 | ||
410 | /** | 521 | /** |
@@ -416,14 +527,11 @@ void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) | |||
416 | */ | 527 | */ |
417 | void mesh_path_flush_pending(struct mesh_path *mpath) | 528 | void mesh_path_flush_pending(struct mesh_path *mpath) |
418 | { | 529 | { |
419 | struct ieee80211_sub_if_data *sdata; | ||
420 | struct sk_buff *skb; | 530 | struct sk_buff *skb; |
421 | 531 | ||
422 | sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); | ||
423 | |||
424 | while ((skb = skb_dequeue(&mpath->frame_queue)) && | 532 | while ((skb = skb_dequeue(&mpath->frame_queue)) && |
425 | (mpath->flags & MESH_PATH_ACTIVE)) | 533 | (mpath->flags & MESH_PATH_ACTIVE)) |
426 | mesh_path_discard_frame(skb, mpath->dev); | 534 | mesh_path_discard_frame(skb, mpath->sdata); |
427 | } | 535 | } |
428 | 536 | ||
429 | /** | 537 | /** |
@@ -472,7 +580,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | |||
472 | node = hlist_entry(p, struct mpath_node, list); | 580 | node = hlist_entry(p, struct mpath_node, list); |
473 | mpath = node->mpath; | 581 | mpath = node->mpath; |
474 | new_node->mpath = mpath; | 582 | new_node->mpath = mpath; |
475 | hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); | 583 | hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); |
476 | hlist_add_head(&new_node->list, | 584 | hlist_add_head(&new_node->list, |
477 | &newtbl->hash_buckets[hash_idx]); | 585 | &newtbl->hash_buckets[hash_idx]); |
478 | return 0; | 586 | return 0; |
@@ -481,15 +589,25 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | |||
481 | int mesh_pathtbl_init(void) | 589 | int mesh_pathtbl_init(void) |
482 | { | 590 | { |
483 | mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 591 | mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
592 | if (!mesh_paths) | ||
593 | return -ENOMEM; | ||
484 | mesh_paths->free_node = &mesh_path_node_free; | 594 | mesh_paths->free_node = &mesh_path_node_free; |
485 | mesh_paths->copy_node = &mesh_path_node_copy; | 595 | mesh_paths->copy_node = &mesh_path_node_copy; |
486 | mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; | 596 | mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; |
487 | if (!mesh_paths) | 597 | |
598 | mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | ||
599 | if (!mpp_paths) { | ||
600 | mesh_table_free(mesh_paths, true); | ||
488 | return -ENOMEM; | 601 | return -ENOMEM; |
602 | } | ||
603 | mpp_paths->free_node = &mesh_path_node_free; | ||
604 | mpp_paths->copy_node = &mesh_path_node_copy; | ||
605 | mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; | ||
606 | |||
489 | return 0; | 607 | return 0; |
490 | } | 608 | } |
491 | 609 | ||
492 | void mesh_path_expire(struct net_device *dev) | 610 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
493 | { | 611 | { |
494 | struct mesh_path *mpath; | 612 | struct mesh_path *mpath; |
495 | struct mpath_node *node; | 613 | struct mpath_node *node; |
@@ -498,7 +616,7 @@ void mesh_path_expire(struct net_device *dev) | |||
498 | 616 | ||
499 | read_lock(&pathtbl_resize_lock); | 617 | read_lock(&pathtbl_resize_lock); |
500 | for_each_mesh_entry(mesh_paths, p, node, i) { | 618 | for_each_mesh_entry(mesh_paths, p, node, i) { |
501 | if (node->mpath->dev != dev) | 619 | if (node->mpath->sdata != sdata) |
502 | continue; | 620 | continue; |
503 | mpath = node->mpath; | 621 | mpath = node->mpath; |
504 | spin_lock_bh(&mpath->state_lock); | 622 | spin_lock_bh(&mpath->state_lock); |
@@ -507,7 +625,7 @@ void mesh_path_expire(struct net_device *dev) | |||
507 | time_after(jiffies, | 625 | time_after(jiffies, |
508 | mpath->exp_time + MESH_PATH_EXPIRE)) { | 626 | mpath->exp_time + MESH_PATH_EXPIRE)) { |
509 | spin_unlock_bh(&mpath->state_lock); | 627 | spin_unlock_bh(&mpath->state_lock); |
510 | mesh_path_del(mpath->dst, mpath->dev); | 628 | mesh_path_del(mpath->dst, mpath->sdata); |
511 | } else | 629 | } else |
512 | spin_unlock_bh(&mpath->state_lock); | 630 | spin_unlock_bh(&mpath->state_lock); |
513 | } | 631 | } |
@@ -517,4 +635,5 @@ void mesh_path_expire(struct net_device *dev) | |||
517 | void mesh_pathtbl_unregister(void) | 635 | void mesh_pathtbl_unregister(void) |
518 | { | 636 | { |
519 | mesh_table_free(mesh_paths, true); | 637 | mesh_table_free(mesh_paths, true); |
638 | mesh_table_free(mpp_paths, true); | ||
520 | } | 639 | } |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 9efeb1f07025..faac101c0f85 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -36,11 +36,11 @@ | |||
36 | #define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9 | 36 | #define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9 |
37 | #define MESH_SECURITY_FAILED_VERIFICATION 10 | 37 | #define MESH_SECURITY_FAILED_VERIFICATION 10 |
38 | 38 | ||
39 | #define dot11MeshMaxRetries(s) (s->u.sta.mshcfg.dot11MeshMaxRetries) | 39 | #define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries) |
40 | #define dot11MeshRetryTimeout(s) (s->u.sta.mshcfg.dot11MeshRetryTimeout) | 40 | #define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout) |
41 | #define dot11MeshConfirmTimeout(s) (s->u.sta.mshcfg.dot11MeshConfirmTimeout) | 41 | #define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout) |
42 | #define dot11MeshHoldingTimeout(s) (s->u.sta.mshcfg.dot11MeshHoldingTimeout) | 42 | #define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout) |
43 | #define dot11MeshMaxPeerLinks(s) (s->u.sta.mshcfg.dot11MeshMaxPeerLinks) | 43 | #define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks) |
44 | 44 | ||
45 | enum plink_frame_type { | 45 | enum plink_frame_type { |
46 | PLINK_OPEN = 0, | 46 | PLINK_OPEN = 0, |
@@ -63,14 +63,14 @@ enum plink_event { | |||
63 | static inline | 63 | static inline |
64 | void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) | 64 | void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) |
65 | { | 65 | { |
66 | atomic_inc(&sdata->u.sta.mshstats.estab_plinks); | 66 | atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); |
67 | mesh_accept_plinks_update(sdata); | 67 | mesh_accept_plinks_update(sdata); |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline | 70 | static inline |
71 | void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) | 71 | void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) |
72 | { | 72 | { |
73 | atomic_dec(&sdata->u.sta.mshstats.estab_plinks); | 73 | atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); |
74 | mesh_accept_plinks_update(sdata); | 74 | mesh_accept_plinks_update(sdata); |
75 | } | 75 | } |
76 | 76 | ||
@@ -106,7 +106,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, | |||
106 | return NULL; | 106 | return NULL; |
107 | 107 | ||
108 | sta->flags = WLAN_STA_AUTHORIZED; | 108 | sta->flags = WLAN_STA_AUTHORIZED; |
109 | sta->supp_rates[local->hw.conf.channel->band] = rates; | 109 | sta->sta.supp_rates[local->hw.conf.channel->band] = rates; |
110 | 110 | ||
111 | return sta; | 111 | return sta; |
112 | } | 112 | } |
@@ -144,10 +144,10 @@ void mesh_plink_deactivate(struct sta_info *sta) | |||
144 | spin_unlock_bh(&sta->lock); | 144 | spin_unlock_bh(&sta->lock); |
145 | } | 145 | } |
146 | 146 | ||
147 | static int mesh_plink_frame_tx(struct net_device *dev, | 147 | static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, |
148 | enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, | 148 | enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, |
149 | __le16 reason) { | 149 | __le16 reason) { |
150 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 150 | struct ieee80211_local *local = sdata->local; |
151 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); | 151 | struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); |
152 | struct ieee80211_mgmt *mgmt; | 152 | struct ieee80211_mgmt *mgmt; |
153 | bool include_plid = false; | 153 | bool include_plid = false; |
@@ -163,10 +163,10 @@ static int mesh_plink_frame_tx(struct net_device *dev, | |||
163 | mgmt = (struct ieee80211_mgmt *) | 163 | mgmt = (struct ieee80211_mgmt *) |
164 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); | 164 | skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); |
165 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); | 165 | memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); |
166 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 166 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
167 | IEEE80211_STYPE_ACTION); | 167 | IEEE80211_STYPE_ACTION); |
168 | memcpy(mgmt->da, da, ETH_ALEN); | 168 | memcpy(mgmt->da, da, ETH_ALEN); |
169 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 169 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
170 | /* BSSID is left zeroed, wildcard value */ | 170 | /* BSSID is left zeroed, wildcard value */ |
171 | mgmt->u.action.category = PLINK_CATEGORY; | 171 | mgmt->u.action.category = PLINK_CATEGORY; |
172 | mgmt->u.action.u.plink_action.action_code = action; | 172 | mgmt->u.action.u.plink_action.action_code = action; |
@@ -180,7 +180,7 @@ static int mesh_plink_frame_tx(struct net_device *dev, | |||
180 | /* two-byte status code followed by two-byte AID */ | 180 | /* two-byte status code followed by two-byte AID */ |
181 | memset(pos, 0, 4); | 181 | memset(pos, 0, 4); |
182 | } | 182 | } |
183 | mesh_mgmt_ies_add(skb, dev); | 183 | mesh_mgmt_ies_add(skb, sdata); |
184 | } | 184 | } |
185 | 185 | ||
186 | /* Add Peer Link Management element */ | 186 | /* Add Peer Link Management element */ |
@@ -217,15 +217,14 @@ static int mesh_plink_frame_tx(struct net_device *dev, | |||
217 | memcpy(pos, &reason, 2); | 217 | memcpy(pos, &reason, 2); |
218 | } | 218 | } |
219 | 219 | ||
220 | ieee80211_sta_tx(dev, skb, 0); | 220 | ieee80211_tx_skb(sdata, skb, 0); |
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, | 224 | void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct ieee80211_sub_if_data *sdata, |
225 | bool peer_accepting_plinks) | 225 | bool peer_accepting_plinks) |
226 | { | 226 | { |
227 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 227 | struct ieee80211_local *local = sdata->local; |
228 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
229 | struct sta_info *sta; | 228 | struct sta_info *sta; |
230 | 229 | ||
231 | rcu_read_lock(); | 230 | rcu_read_lock(); |
@@ -244,10 +243,10 @@ void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, | |||
244 | } | 243 | } |
245 | 244 | ||
246 | sta->last_rx = jiffies; | 245 | sta->last_rx = jiffies; |
247 | sta->supp_rates[local->hw.conf.channel->band] = rates; | 246 | sta->sta.supp_rates[local->hw.conf.channel->band] = rates; |
248 | if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN && | 247 | if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN && |
249 | sdata->u.sta.accepting_plinks && | 248 | sdata->u.mesh.accepting_plinks && |
250 | sdata->u.sta.mshcfg.auto_open_plinks) | 249 | sdata->u.mesh.mshcfg.auto_open_plinks) |
251 | mesh_plink_open(sta); | 250 | mesh_plink_open(sta); |
252 | 251 | ||
253 | rcu_read_unlock(); | 252 | rcu_read_unlock(); |
@@ -257,7 +256,6 @@ static void mesh_plink_timer(unsigned long data) | |||
257 | { | 256 | { |
258 | struct sta_info *sta; | 257 | struct sta_info *sta; |
259 | __le16 llid, plid, reason; | 258 | __le16 llid, plid, reason; |
260 | struct net_device *dev = NULL; | ||
261 | struct ieee80211_sub_if_data *sdata; | 259 | struct ieee80211_sub_if_data *sdata; |
262 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG | 260 | #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG |
263 | DECLARE_MAC_BUF(mac); | 261 | DECLARE_MAC_BUF(mac); |
@@ -277,12 +275,11 @@ static void mesh_plink_timer(unsigned long data) | |||
277 | return; | 275 | return; |
278 | } | 276 | } |
279 | mpl_dbg("Mesh plink timer for %s fired on state %d\n", | 277 | mpl_dbg("Mesh plink timer for %s fired on state %d\n", |
280 | print_mac(mac, sta->addr), sta->plink_state); | 278 | print_mac(mac, sta->sta.addr), sta->plink_state); |
281 | reason = 0; | 279 | reason = 0; |
282 | llid = sta->llid; | 280 | llid = sta->llid; |
283 | plid = sta->plid; | 281 | plid = sta->plid; |
284 | sdata = sta->sdata; | 282 | sdata = sta->sdata; |
285 | dev = sdata->dev; | ||
286 | 283 | ||
287 | switch (sta->plink_state) { | 284 | switch (sta->plink_state) { |
288 | case PLINK_OPN_RCVD: | 285 | case PLINK_OPN_RCVD: |
@@ -291,7 +288,7 @@ static void mesh_plink_timer(unsigned long data) | |||
291 | if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { | 288 | if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { |
292 | u32 rand; | 289 | u32 rand; |
293 | mpl_dbg("Mesh plink for %s (retry, timeout): %d %d\n", | 290 | mpl_dbg("Mesh plink for %s (retry, timeout): %d %d\n", |
294 | print_mac(mac, sta->addr), | 291 | print_mac(mac, sta->sta.addr), |
295 | sta->plink_retries, sta->plink_timeout); | 292 | sta->plink_retries, sta->plink_timeout); |
296 | get_random_bytes(&rand, sizeof(u32)); | 293 | get_random_bytes(&rand, sizeof(u32)); |
297 | sta->plink_timeout = sta->plink_timeout + | 294 | sta->plink_timeout = sta->plink_timeout + |
@@ -299,7 +296,7 @@ static void mesh_plink_timer(unsigned long data) | |||
299 | ++sta->plink_retries; | 296 | ++sta->plink_retries; |
300 | mod_plink_timer(sta, sta->plink_timeout); | 297 | mod_plink_timer(sta, sta->plink_timeout); |
301 | spin_unlock_bh(&sta->lock); | 298 | spin_unlock_bh(&sta->lock); |
302 | mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, | 299 | mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, |
303 | 0, 0); | 300 | 0, 0); |
304 | break; | 301 | break; |
305 | } | 302 | } |
@@ -312,7 +309,7 @@ static void mesh_plink_timer(unsigned long data) | |||
312 | sta->plink_state = PLINK_HOLDING; | 309 | sta->plink_state = PLINK_HOLDING; |
313 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | 310 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); |
314 | spin_unlock_bh(&sta->lock); | 311 | spin_unlock_bh(&sta->lock); |
315 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, | 312 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, |
316 | reason); | 313 | reason); |
317 | break; | 314 | break; |
318 | case PLINK_HOLDING: | 315 | case PLINK_HOLDING: |
@@ -355,10 +352,10 @@ int mesh_plink_open(struct sta_info *sta) | |||
355 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); | 352 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); |
356 | spin_unlock_bh(&sta->lock); | 353 | spin_unlock_bh(&sta->lock); |
357 | mpl_dbg("Mesh plink: starting establishment with %s\n", | 354 | mpl_dbg("Mesh plink: starting establishment with %s\n", |
358 | print_mac(mac, sta->addr)); | 355 | print_mac(mac, sta->sta.addr)); |
359 | 356 | ||
360 | return mesh_plink_frame_tx(sdata->dev, PLINK_OPEN, | 357 | return mesh_plink_frame_tx(sdata, PLINK_OPEN, |
361 | sta->addr, llid, 0, 0); | 358 | sta->sta.addr, llid, 0, 0); |
362 | } | 359 | } |
363 | 360 | ||
364 | void mesh_plink_block(struct sta_info *sta) | 361 | void mesh_plink_block(struct sta_info *sta) |
@@ -382,7 +379,7 @@ int mesh_plink_close(struct sta_info *sta) | |||
382 | #endif | 379 | #endif |
383 | 380 | ||
384 | mpl_dbg("Mesh plink: closing link with %s\n", | 381 | mpl_dbg("Mesh plink: closing link with %s\n", |
385 | print_mac(mac, sta->addr)); | 382 | print_mac(mac, sta->sta.addr)); |
386 | spin_lock_bh(&sta->lock); | 383 | spin_lock_bh(&sta->lock); |
387 | sta->reason = cpu_to_le16(MESH_LINK_CANCELLED); | 384 | sta->reason = cpu_to_le16(MESH_LINK_CANCELLED); |
388 | reason = sta->reason; | 385 | reason = sta->reason; |
@@ -403,15 +400,14 @@ int mesh_plink_close(struct sta_info *sta) | |||
403 | llid = sta->llid; | 400 | llid = sta->llid; |
404 | plid = sta->plid; | 401 | plid = sta->plid; |
405 | spin_unlock_bh(&sta->lock); | 402 | spin_unlock_bh(&sta->lock); |
406 | mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, | 403 | mesh_plink_frame_tx(sta->sdata, PLINK_CLOSE, sta->sta.addr, llid, |
407 | plid, reason); | 404 | plid, reason); |
408 | return 0; | 405 | return 0; |
409 | } | 406 | } |
410 | 407 | ||
411 | void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | 408 | void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, |
412 | size_t len, struct ieee80211_rx_status *rx_status) | 409 | size_t len, struct ieee80211_rx_status *rx_status) |
413 | { | 410 | { |
414 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
415 | struct ieee80211_local *local = sdata->local; | 411 | struct ieee80211_local *local = sdata->local; |
416 | struct ieee802_11_elems elems; | 412 | struct ieee802_11_elems elems; |
417 | struct sta_info *sta; | 413 | struct sta_info *sta; |
@@ -425,6 +421,10 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
425 | DECLARE_MAC_BUF(mac); | 421 | DECLARE_MAC_BUF(mac); |
426 | #endif | 422 | #endif |
427 | 423 | ||
424 | /* need action_code, aux */ | ||
425 | if (len < IEEE80211_MIN_ACTION_SIZE + 3) | ||
426 | return; | ||
427 | |||
428 | if (is_multicast_ether_addr(mgmt->da)) { | 428 | if (is_multicast_ether_addr(mgmt->da)) { |
429 | mpl_dbg("Mesh plink: ignore frame from multicast address"); | 429 | mpl_dbg("Mesh plink: ignore frame from multicast address"); |
430 | return; | 430 | return; |
@@ -478,7 +478,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
478 | 478 | ||
479 | /* Now we will figure out the appropriate event... */ | 479 | /* Now we will figure out the appropriate event... */ |
480 | event = PLINK_UNDEFINED; | 480 | event = PLINK_UNDEFINED; |
481 | if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, dev))) { | 481 | if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) { |
482 | switch (ftype) { | 482 | switch (ftype) { |
483 | case PLINK_OPEN: | 483 | case PLINK_OPEN: |
484 | event = OPN_RJCT; | 484 | event = OPN_RJCT; |
@@ -577,9 +577,9 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
577 | sta->llid = llid; | 577 | sta->llid = llid; |
578 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); | 578 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); |
579 | spin_unlock_bh(&sta->lock); | 579 | spin_unlock_bh(&sta->lock); |
580 | mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, | 580 | mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid, |
581 | 0, 0); | 581 | 0, 0); |
582 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, | 582 | mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, |
583 | llid, plid, 0); | 583 | llid, plid, 0); |
584 | break; | 584 | break; |
585 | default: | 585 | default: |
@@ -604,7 +604,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
604 | 604 | ||
605 | llid = sta->llid; | 605 | llid = sta->llid; |
606 | spin_unlock_bh(&sta->lock); | 606 | spin_unlock_bh(&sta->lock); |
607 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 607 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, |
608 | plid, reason); | 608 | plid, reason); |
609 | break; | 609 | break; |
610 | case OPN_ACPT: | 610 | case OPN_ACPT: |
@@ -613,7 +613,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
613 | sta->plid = plid; | 613 | sta->plid = plid; |
614 | llid = sta->llid; | 614 | llid = sta->llid; |
615 | spin_unlock_bh(&sta->lock); | 615 | spin_unlock_bh(&sta->lock); |
616 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 616 | mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, |
617 | plid, 0); | 617 | plid, 0); |
618 | break; | 618 | break; |
619 | case CNF_ACPT: | 619 | case CNF_ACPT: |
@@ -646,13 +646,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
646 | 646 | ||
647 | llid = sta->llid; | 647 | llid = sta->llid; |
648 | spin_unlock_bh(&sta->lock); | 648 | spin_unlock_bh(&sta->lock); |
649 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 649 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, |
650 | plid, reason); | 650 | plid, reason); |
651 | break; | 651 | break; |
652 | case OPN_ACPT: | 652 | case OPN_ACPT: |
653 | llid = sta->llid; | 653 | llid = sta->llid; |
654 | spin_unlock_bh(&sta->lock); | 654 | spin_unlock_bh(&sta->lock); |
655 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 655 | mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, |
656 | plid, 0); | 656 | plid, 0); |
657 | break; | 657 | break; |
658 | case CNF_ACPT: | 658 | case CNF_ACPT: |
@@ -661,7 +661,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
661 | mesh_plink_inc_estab_count(sdata); | 661 | mesh_plink_inc_estab_count(sdata); |
662 | spin_unlock_bh(&sta->lock); | 662 | spin_unlock_bh(&sta->lock); |
663 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", | 663 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", |
664 | print_mac(mac, sta->addr)); | 664 | print_mac(mac, sta->sta.addr)); |
665 | break; | 665 | break; |
666 | default: | 666 | default: |
667 | spin_unlock_bh(&sta->lock); | 667 | spin_unlock_bh(&sta->lock); |
@@ -685,7 +685,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
685 | 685 | ||
686 | llid = sta->llid; | 686 | llid = sta->llid; |
687 | spin_unlock_bh(&sta->lock); | 687 | spin_unlock_bh(&sta->lock); |
688 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 688 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, |
689 | plid, reason); | 689 | plid, reason); |
690 | break; | 690 | break; |
691 | case OPN_ACPT: | 691 | case OPN_ACPT: |
@@ -694,8 +694,8 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
694 | mesh_plink_inc_estab_count(sdata); | 694 | mesh_plink_inc_estab_count(sdata); |
695 | spin_unlock_bh(&sta->lock); | 695 | spin_unlock_bh(&sta->lock); |
696 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", | 696 | mpl_dbg("Mesh plink with %s ESTABLISHED\n", |
697 | print_mac(mac, sta->addr)); | 697 | print_mac(mac, sta->sta.addr)); |
698 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 698 | mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, |
699 | plid, 0); | 699 | plid, 0); |
700 | break; | 700 | break; |
701 | default: | 701 | default: |
@@ -714,13 +714,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
714 | llid = sta->llid; | 714 | llid = sta->llid; |
715 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | 715 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); |
716 | spin_unlock_bh(&sta->lock); | 716 | spin_unlock_bh(&sta->lock); |
717 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 717 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, |
718 | plid, reason); | 718 | plid, reason); |
719 | break; | 719 | break; |
720 | case OPN_ACPT: | 720 | case OPN_ACPT: |
721 | llid = sta->llid; | 721 | llid = sta->llid; |
722 | spin_unlock_bh(&sta->lock); | 722 | spin_unlock_bh(&sta->lock); |
723 | mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, | 723 | mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, |
724 | plid, 0); | 724 | plid, 0); |
725 | break; | 725 | break; |
726 | default: | 726 | default: |
@@ -743,8 +743,8 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, | |||
743 | llid = sta->llid; | 743 | llid = sta->llid; |
744 | reason = sta->reason; | 744 | reason = sta->reason; |
745 | spin_unlock_bh(&sta->lock); | 745 | spin_unlock_bh(&sta->lock); |
746 | mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, | 746 | mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, |
747 | plid, reason); | 747 | llid, plid, reason); |
748 | break; | 748 | break; |
749 | default: | 749 | default: |
750 | spin_unlock_bh(&sta->lock); | 750 | spin_unlock_bh(&sta->lock); |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 902cac1bd246..e859a0ab6162 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -11,11 +11,6 @@ | |||
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | /* TODO: | ||
15 | * order BSS list by RSSI(?) ("quality of AP") | ||
16 | * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE, | ||
17 | * SSID) | ||
18 | */ | ||
19 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
20 | #include <linux/if_ether.h> | 15 | #include <linux/if_ether.h> |
21 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
@@ -26,607 +21,184 @@ | |||
26 | #include <linux/etherdevice.h> | 21 | #include <linux/etherdevice.h> |
27 | #include <linux/rtnetlink.h> | 22 | #include <linux/rtnetlink.h> |
28 | #include <net/iw_handler.h> | 23 | #include <net/iw_handler.h> |
29 | #include <asm/types.h> | ||
30 | |||
31 | #include <net/mac80211.h> | 24 | #include <net/mac80211.h> |
25 | #include <asm/unaligned.h> | ||
26 | |||
32 | #include "ieee80211_i.h" | 27 | #include "ieee80211_i.h" |
33 | #include "rate.h" | 28 | #include "rate.h" |
34 | #include "led.h" | 29 | #include "led.h" |
35 | #include "mesh.h" | ||
36 | 30 | ||
31 | #define IEEE80211_ASSOC_SCANS_MAX_TRIES 2 | ||
37 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) | 32 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) |
38 | #define IEEE80211_AUTH_MAX_TRIES 3 | 33 | #define IEEE80211_AUTH_MAX_TRIES 3 |
39 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) | 34 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) |
40 | #define IEEE80211_ASSOC_MAX_TRIES 3 | 35 | #define IEEE80211_ASSOC_MAX_TRIES 3 |
41 | #define IEEE80211_MONITORING_INTERVAL (2 * HZ) | 36 | #define IEEE80211_MONITORING_INTERVAL (2 * HZ) |
42 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) | ||
43 | #define IEEE80211_PROBE_INTERVAL (60 * HZ) | 37 | #define IEEE80211_PROBE_INTERVAL (60 * HZ) |
44 | #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) | 38 | #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) |
45 | #define IEEE80211_SCAN_INTERVAL (2 * HZ) | 39 | #define IEEE80211_SCAN_INTERVAL (2 * HZ) |
46 | #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) | 40 | #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) |
47 | #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) | 41 | #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) |
48 | 42 | ||
49 | #define IEEE80211_PROBE_DELAY (HZ / 33) | ||
50 | #define IEEE80211_CHANNEL_TIME (HZ / 33) | ||
51 | #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5) | ||
52 | #define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ) | ||
53 | #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) | 43 | #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) |
54 | #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) | 44 | #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) |
55 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) | ||
56 | 45 | ||
57 | #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 | 46 | #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 |
58 | 47 | ||
59 | 48 | ||
60 | #define ERP_INFO_USE_PROTECTION BIT(1) | 49 | /* utils */ |
61 | |||
62 | /* mgmt header + 1 byte action code */ | ||
63 | #define IEEE80211_MIN_ACTION_SIZE (24 + 1) | ||
64 | |||
65 | #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 | ||
66 | #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C | ||
67 | #define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 | ||
68 | #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 | ||
69 | #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 | ||
70 | |||
71 | /* next values represent the buffer size for A-MPDU frame. | ||
72 | * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */ | ||
73 | #define IEEE80211_MIN_AMPDU_BUF 0x8 | ||
74 | #define IEEE80211_MAX_AMPDU_BUF 0x40 | ||
75 | |||
76 | static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, | ||
77 | u8 *ssid, size_t ssid_len); | ||
78 | static struct ieee80211_sta_bss * | ||
79 | ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, | ||
80 | u8 *ssid, u8 ssid_len); | ||
81 | static void ieee80211_rx_bss_put(struct ieee80211_local *local, | ||
82 | struct ieee80211_sta_bss *bss); | ||
83 | static int ieee80211_sta_find_ibss(struct net_device *dev, | ||
84 | struct ieee80211_if_sta *ifsta); | ||
85 | static int ieee80211_sta_wep_configured(struct net_device *dev); | ||
86 | static int ieee80211_sta_start_scan(struct net_device *dev, | ||
87 | u8 *ssid, size_t ssid_len); | ||
88 | static int ieee80211_sta_config_auth(struct net_device *dev, | ||
89 | struct ieee80211_if_sta *ifsta); | ||
90 | static void sta_rx_agg_session_timer_expired(unsigned long data); | ||
91 | |||
92 | |||
93 | void ieee802_11_parse_elems(u8 *start, size_t len, | ||
94 | struct ieee802_11_elems *elems) | ||
95 | { | ||
96 | size_t left = len; | ||
97 | u8 *pos = start; | ||
98 | |||
99 | memset(elems, 0, sizeof(*elems)); | ||
100 | |||
101 | while (left >= 2) { | ||
102 | u8 id, elen; | ||
103 | |||
104 | id = *pos++; | ||
105 | elen = *pos++; | ||
106 | left -= 2; | ||
107 | |||
108 | if (elen > left) | ||
109 | return; | ||
110 | |||
111 | switch (id) { | ||
112 | case WLAN_EID_SSID: | ||
113 | elems->ssid = pos; | ||
114 | elems->ssid_len = elen; | ||
115 | break; | ||
116 | case WLAN_EID_SUPP_RATES: | ||
117 | elems->supp_rates = pos; | ||
118 | elems->supp_rates_len = elen; | ||
119 | break; | ||
120 | case WLAN_EID_FH_PARAMS: | ||
121 | elems->fh_params = pos; | ||
122 | elems->fh_params_len = elen; | ||
123 | break; | ||
124 | case WLAN_EID_DS_PARAMS: | ||
125 | elems->ds_params = pos; | ||
126 | elems->ds_params_len = elen; | ||
127 | break; | ||
128 | case WLAN_EID_CF_PARAMS: | ||
129 | elems->cf_params = pos; | ||
130 | elems->cf_params_len = elen; | ||
131 | break; | ||
132 | case WLAN_EID_TIM: | ||
133 | elems->tim = pos; | ||
134 | elems->tim_len = elen; | ||
135 | break; | ||
136 | case WLAN_EID_IBSS_PARAMS: | ||
137 | elems->ibss_params = pos; | ||
138 | elems->ibss_params_len = elen; | ||
139 | break; | ||
140 | case WLAN_EID_CHALLENGE: | ||
141 | elems->challenge = pos; | ||
142 | elems->challenge_len = elen; | ||
143 | break; | ||
144 | case WLAN_EID_WPA: | ||
145 | if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && | ||
146 | pos[2] == 0xf2) { | ||
147 | /* Microsoft OUI (00:50:F2) */ | ||
148 | if (pos[3] == 1) { | ||
149 | /* OUI Type 1 - WPA IE */ | ||
150 | elems->wpa = pos; | ||
151 | elems->wpa_len = elen; | ||
152 | } else if (elen >= 5 && pos[3] == 2) { | ||
153 | if (pos[4] == 0) { | ||
154 | elems->wmm_info = pos; | ||
155 | elems->wmm_info_len = elen; | ||
156 | } else if (pos[4] == 1) { | ||
157 | elems->wmm_param = pos; | ||
158 | elems->wmm_param_len = elen; | ||
159 | } | ||
160 | } | ||
161 | } | ||
162 | break; | ||
163 | case WLAN_EID_RSN: | ||
164 | elems->rsn = pos; | ||
165 | elems->rsn_len = elen; | ||
166 | break; | ||
167 | case WLAN_EID_ERP_INFO: | ||
168 | elems->erp_info = pos; | ||
169 | elems->erp_info_len = elen; | ||
170 | break; | ||
171 | case WLAN_EID_EXT_SUPP_RATES: | ||
172 | elems->ext_supp_rates = pos; | ||
173 | elems->ext_supp_rates_len = elen; | ||
174 | break; | ||
175 | case WLAN_EID_HT_CAPABILITY: | ||
176 | elems->ht_cap_elem = pos; | ||
177 | elems->ht_cap_elem_len = elen; | ||
178 | break; | ||
179 | case WLAN_EID_HT_EXTRA_INFO: | ||
180 | elems->ht_info_elem = pos; | ||
181 | elems->ht_info_elem_len = elen; | ||
182 | break; | ||
183 | case WLAN_EID_MESH_ID: | ||
184 | elems->mesh_id = pos; | ||
185 | elems->mesh_id_len = elen; | ||
186 | break; | ||
187 | case WLAN_EID_MESH_CONFIG: | ||
188 | elems->mesh_config = pos; | ||
189 | elems->mesh_config_len = elen; | ||
190 | break; | ||
191 | case WLAN_EID_PEER_LINK: | ||
192 | elems->peer_link = pos; | ||
193 | elems->peer_link_len = elen; | ||
194 | break; | ||
195 | case WLAN_EID_PREQ: | ||
196 | elems->preq = pos; | ||
197 | elems->preq_len = elen; | ||
198 | break; | ||
199 | case WLAN_EID_PREP: | ||
200 | elems->prep = pos; | ||
201 | elems->prep_len = elen; | ||
202 | break; | ||
203 | case WLAN_EID_PERR: | ||
204 | elems->perr = pos; | ||
205 | elems->perr_len = elen; | ||
206 | break; | ||
207 | case WLAN_EID_CHANNEL_SWITCH: | ||
208 | elems->ch_switch_elem = pos; | ||
209 | elems->ch_switch_elem_len = elen; | ||
210 | break; | ||
211 | case WLAN_EID_QUIET: | ||
212 | if (!elems->quiet_elem) { | ||
213 | elems->quiet_elem = pos; | ||
214 | elems->quiet_elem_len = elen; | ||
215 | } | ||
216 | elems->num_of_quiet_elem++; | ||
217 | break; | ||
218 | case WLAN_EID_COUNTRY: | ||
219 | elems->country_elem = pos; | ||
220 | elems->country_elem_len = elen; | ||
221 | break; | ||
222 | case WLAN_EID_PWR_CONSTRAINT: | ||
223 | elems->pwr_constr_elem = pos; | ||
224 | elems->pwr_constr_elem_len = elen; | ||
225 | break; | ||
226 | default: | ||
227 | break; | ||
228 | } | ||
229 | |||
230 | left -= elen; | ||
231 | pos += elen; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | |||
236 | static int ecw2cw(int ecw) | 50 | static int ecw2cw(int ecw) |
237 | { | 51 | { |
238 | return (1 << ecw) - 1; | 52 | return (1 << ecw) - 1; |
239 | } | 53 | } |
240 | 54 | ||
241 | 55 | static u8 *ieee80211_bss_get_ie(struct ieee80211_bss *bss, u8 ie) | |
242 | static void ieee80211_sta_def_wmm_params(struct net_device *dev, | ||
243 | struct ieee80211_sta_bss *bss, | ||
244 | int ibss) | ||
245 | { | 56 | { |
246 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 57 | u8 *end, *pos; |
247 | struct ieee80211_local *local = sdata->local; | ||
248 | int i, have_higher_than_11mbit = 0; | ||
249 | |||
250 | 58 | ||
251 | /* cf. IEEE 802.11 9.2.12 */ | 59 | pos = bss->ies; |
252 | for (i = 0; i < bss->supp_rates_len; i++) | 60 | if (pos == NULL) |
253 | if ((bss->supp_rates[i] & 0x7f) * 5 > 110) | 61 | return NULL; |
254 | have_higher_than_11mbit = 1; | 62 | end = pos + bss->ies_len; |
255 | |||
256 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && | ||
257 | have_higher_than_11mbit) | ||
258 | sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; | ||
259 | else | ||
260 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; | ||
261 | |||
262 | |||
263 | if (local->ops->conf_tx) { | ||
264 | struct ieee80211_tx_queue_params qparam; | ||
265 | |||
266 | memset(&qparam, 0, sizeof(qparam)); | ||
267 | |||
268 | qparam.aifs = 2; | ||
269 | |||
270 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && | ||
271 | !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)) | ||
272 | qparam.cw_min = 31; | ||
273 | else | ||
274 | qparam.cw_min = 15; | ||
275 | |||
276 | qparam.cw_max = 1023; | ||
277 | qparam.txop = 0; | ||
278 | |||
279 | for (i = 0; i < local_to_hw(local)->queues; i++) | ||
280 | local->ops->conf_tx(local_to_hw(local), i, &qparam); | ||
281 | } | ||
282 | } | ||
283 | |||
284 | static void ieee80211_sta_wmm_params(struct net_device *dev, | ||
285 | struct ieee80211_if_sta *ifsta, | ||
286 | u8 *wmm_param, size_t wmm_param_len) | ||
287 | { | ||
288 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
289 | struct ieee80211_tx_queue_params params; | ||
290 | size_t left; | ||
291 | int count; | ||
292 | u8 *pos; | ||
293 | |||
294 | if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED)) | ||
295 | return; | ||
296 | |||
297 | if (!wmm_param) | ||
298 | return; | ||
299 | |||
300 | if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) | ||
301 | return; | ||
302 | count = wmm_param[6] & 0x0f; | ||
303 | if (count == ifsta->wmm_last_param_set) | ||
304 | return; | ||
305 | ifsta->wmm_last_param_set = count; | ||
306 | |||
307 | pos = wmm_param + 8; | ||
308 | left = wmm_param_len - 8; | ||
309 | |||
310 | memset(¶ms, 0, sizeof(params)); | ||
311 | |||
312 | if (!local->ops->conf_tx) | ||
313 | return; | ||
314 | |||
315 | local->wmm_acm = 0; | ||
316 | for (; left >= 4; left -= 4, pos += 4) { | ||
317 | int aci = (pos[0] >> 5) & 0x03; | ||
318 | int acm = (pos[0] >> 4) & 0x01; | ||
319 | int queue; | ||
320 | 63 | ||
321 | switch (aci) { | 64 | while (pos + 1 < end) { |
322 | case 1: | 65 | if (pos + 2 + pos[1] > end) |
323 | queue = 3; | ||
324 | if (acm) | ||
325 | local->wmm_acm |= BIT(0) | BIT(3); | ||
326 | break; | ||
327 | case 2: | ||
328 | queue = 1; | ||
329 | if (acm) | ||
330 | local->wmm_acm |= BIT(4) | BIT(5); | ||
331 | break; | ||
332 | case 3: | ||
333 | queue = 0; | ||
334 | if (acm) | ||
335 | local->wmm_acm |= BIT(6) | BIT(7); | ||
336 | break; | 66 | break; |
337 | case 0: | 67 | if (pos[0] == ie) |
338 | default: | 68 | return pos; |
339 | queue = 2; | 69 | pos += 2 + pos[1]; |
340 | if (acm) | ||
341 | local->wmm_acm |= BIT(1) | BIT(2); | ||
342 | break; | ||
343 | } | ||
344 | |||
345 | params.aifs = pos[0] & 0x0f; | ||
346 | params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); | ||
347 | params.cw_min = ecw2cw(pos[1] & 0x0f); | ||
348 | params.txop = get_unaligned_le16(pos + 2); | ||
349 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
350 | printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " | ||
351 | "cWmin=%d cWmax=%d txop=%d\n", | ||
352 | dev->name, queue, aci, acm, params.aifs, params.cw_min, | ||
353 | params.cw_max, params.txop); | ||
354 | #endif | ||
355 | /* TODO: handle ACM (block TX, fallback to next lowest allowed | ||
356 | * AC for now) */ | ||
357 | if (local->ops->conf_tx(local_to_hw(local), queue, ¶ms)) { | ||
358 | printk(KERN_DEBUG "%s: failed to set TX queue " | ||
359 | "parameters for queue %d\n", dev->name, queue); | ||
360 | } | ||
361 | } | ||
362 | } | ||
363 | |||
364 | static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata, | ||
365 | bool use_protection, | ||
366 | bool use_short_preamble) | ||
367 | { | ||
368 | struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; | ||
369 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
370 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
371 | DECLARE_MAC_BUF(mac); | ||
372 | #endif | ||
373 | u32 changed = 0; | ||
374 | |||
375 | if (use_protection != bss_conf->use_cts_prot) { | ||
376 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
377 | if (net_ratelimit()) { | ||
378 | printk(KERN_DEBUG "%s: CTS protection %s (BSSID=" | ||
379 | "%s)\n", | ||
380 | sdata->dev->name, | ||
381 | use_protection ? "enabled" : "disabled", | ||
382 | print_mac(mac, ifsta->bssid)); | ||
383 | } | ||
384 | #endif | ||
385 | bss_conf->use_cts_prot = use_protection; | ||
386 | changed |= BSS_CHANGED_ERP_CTS_PROT; | ||
387 | } | 70 | } |
388 | 71 | ||
389 | if (use_short_preamble != bss_conf->use_short_preamble) { | 72 | return NULL; |
390 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
391 | if (net_ratelimit()) { | ||
392 | printk(KERN_DEBUG "%s: switched to %s barker preamble" | ||
393 | " (BSSID=%s)\n", | ||
394 | sdata->dev->name, | ||
395 | use_short_preamble ? "short" : "long", | ||
396 | print_mac(mac, ifsta->bssid)); | ||
397 | } | ||
398 | #endif | ||
399 | bss_conf->use_short_preamble = use_short_preamble; | ||
400 | changed |= BSS_CHANGED_ERP_PREAMBLE; | ||
401 | } | ||
402 | |||
403 | return changed; | ||
404 | } | 73 | } |
405 | 74 | ||
406 | static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata, | 75 | static int ieee80211_compatible_rates(struct ieee80211_bss *bss, |
407 | u8 erp_value) | 76 | struct ieee80211_supported_band *sband, |
408 | { | 77 | u64 *rates) |
409 | bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0; | ||
410 | bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0; | ||
411 | |||
412 | return ieee80211_handle_protect_preamb(sdata, | ||
413 | use_protection, use_short_preamble); | ||
414 | } | ||
415 | |||
416 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | ||
417 | struct ieee80211_sta_bss *bss) | ||
418 | { | 78 | { |
419 | u32 changed = 0; | 79 | int i, j, count; |
80 | *rates = 0; | ||
81 | count = 0; | ||
82 | for (i = 0; i < bss->supp_rates_len; i++) { | ||
83 | int rate = (bss->supp_rates[i] & 0x7F) * 5; | ||
420 | 84 | ||
421 | if (bss->has_erp_value) | 85 | for (j = 0; j < sband->n_bitrates; j++) |
422 | changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value); | 86 | if (sband->bitrates[j].bitrate == rate) { |
423 | else { | 87 | *rates |= BIT(j); |
424 | u16 capab = bss->capability; | 88 | count++; |
425 | changed |= ieee80211_handle_protect_preamb(sdata, false, | 89 | break; |
426 | (capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0); | 90 | } |
427 | } | 91 | } |
428 | 92 | ||
429 | return changed; | 93 | return count; |
430 | } | ||
431 | |||
432 | int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie, | ||
433 | struct ieee80211_ht_info *ht_info) | ||
434 | { | ||
435 | |||
436 | if (ht_info == NULL) | ||
437 | return -EINVAL; | ||
438 | |||
439 | memset(ht_info, 0, sizeof(*ht_info)); | ||
440 | |||
441 | if (ht_cap_ie) { | ||
442 | u8 ampdu_info = ht_cap_ie->ampdu_params_info; | ||
443 | |||
444 | ht_info->ht_supported = 1; | ||
445 | ht_info->cap = le16_to_cpu(ht_cap_ie->cap_info); | ||
446 | ht_info->ampdu_factor = | ||
447 | ampdu_info & IEEE80211_HT_CAP_AMPDU_FACTOR; | ||
448 | ht_info->ampdu_density = | ||
449 | (ampdu_info & IEEE80211_HT_CAP_AMPDU_DENSITY) >> 2; | ||
450 | memcpy(ht_info->supp_mcs_set, ht_cap_ie->supp_mcs_set, 16); | ||
451 | } else | ||
452 | ht_info->ht_supported = 0; | ||
453 | |||
454 | return 0; | ||
455 | } | 94 | } |
456 | 95 | ||
457 | int ieee80211_ht_addt_info_ie_to_ht_bss_info( | 96 | /* also used by mesh code */ |
458 | struct ieee80211_ht_addt_info *ht_add_info_ie, | 97 | u64 ieee80211_sta_get_rates(struct ieee80211_local *local, |
459 | struct ieee80211_ht_bss_info *bss_info) | 98 | struct ieee802_11_elems *elems, |
99 | enum ieee80211_band band) | ||
460 | { | 100 | { |
461 | if (bss_info == NULL) | 101 | struct ieee80211_supported_band *sband; |
462 | return -EINVAL; | 102 | struct ieee80211_rate *bitrates; |
463 | 103 | size_t num_rates; | |
464 | memset(bss_info, 0, sizeof(*bss_info)); | 104 | u64 supp_rates; |
465 | 105 | int i, j; | |
466 | if (ht_add_info_ie) { | 106 | sband = local->hw.wiphy->bands[band]; |
467 | u16 op_mode; | ||
468 | op_mode = le16_to_cpu(ht_add_info_ie->operation_mode); | ||
469 | 107 | ||
470 | bss_info->primary_channel = ht_add_info_ie->control_chan; | 108 | if (!sband) { |
471 | bss_info->bss_cap = ht_add_info_ie->ht_param; | 109 | WARN_ON(1); |
472 | bss_info->bss_op_mode = (u8)(op_mode & 0xff); | 110 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
473 | } | 111 | } |
474 | 112 | ||
475 | return 0; | 113 | bitrates = sband->bitrates; |
114 | num_rates = sband->n_bitrates; | ||
115 | supp_rates = 0; | ||
116 | for (i = 0; i < elems->supp_rates_len + | ||
117 | elems->ext_supp_rates_len; i++) { | ||
118 | u8 rate = 0; | ||
119 | int own_rate; | ||
120 | if (i < elems->supp_rates_len) | ||
121 | rate = elems->supp_rates[i]; | ||
122 | else if (elems->ext_supp_rates) | ||
123 | rate = elems->ext_supp_rates | ||
124 | [i - elems->supp_rates_len]; | ||
125 | own_rate = 5 * (rate & 0x7f); | ||
126 | for (j = 0; j < num_rates; j++) | ||
127 | if (bitrates[j].bitrate == own_rate) | ||
128 | supp_rates |= BIT(j); | ||
129 | } | ||
130 | return supp_rates; | ||
476 | } | 131 | } |
477 | 132 | ||
478 | static void ieee80211_sta_send_associnfo(struct net_device *dev, | 133 | /* frame sending functions */ |
479 | struct ieee80211_if_sta *ifsta) | 134 | |
135 | /* also used by scanning code */ | ||
136 | void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, | ||
137 | u8 *ssid, size_t ssid_len) | ||
480 | { | 138 | { |
481 | char *buf; | 139 | struct ieee80211_local *local = sdata->local; |
482 | size_t len; | 140 | struct ieee80211_supported_band *sband; |
141 | struct sk_buff *skb; | ||
142 | struct ieee80211_mgmt *mgmt; | ||
143 | u8 *pos, *supp_rates, *esupp_rates = NULL; | ||
483 | int i; | 144 | int i; |
484 | union iwreq_data wrqu; | ||
485 | |||
486 | if (!ifsta->assocreq_ies && !ifsta->assocresp_ies) | ||
487 | return; | ||
488 | 145 | ||
489 | buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len + | 146 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200); |
490 | ifsta->assocresp_ies_len), GFP_KERNEL); | 147 | if (!skb) { |
491 | if (!buf) | 148 | printk(KERN_DEBUG "%s: failed to allocate buffer for probe " |
149 | "request\n", sdata->dev->name); | ||
492 | return; | 150 | return; |
493 | |||
494 | len = sprintf(buf, "ASSOCINFO("); | ||
495 | if (ifsta->assocreq_ies) { | ||
496 | len += sprintf(buf + len, "ReqIEs="); | ||
497 | for (i = 0; i < ifsta->assocreq_ies_len; i++) { | ||
498 | len += sprintf(buf + len, "%02x", | ||
499 | ifsta->assocreq_ies[i]); | ||
500 | } | ||
501 | } | 151 | } |
502 | if (ifsta->assocresp_ies) { | 152 | skb_reserve(skb, local->hw.extra_tx_headroom); |
503 | if (ifsta->assocreq_ies) | ||
504 | len += sprintf(buf + len, " "); | ||
505 | len += sprintf(buf + len, "RespIEs="); | ||
506 | for (i = 0; i < ifsta->assocresp_ies_len; i++) { | ||
507 | len += sprintf(buf + len, "%02x", | ||
508 | ifsta->assocresp_ies[i]); | ||
509 | } | ||
510 | } | ||
511 | len += sprintf(buf + len, ")"); | ||
512 | 153 | ||
513 | if (len > IW_CUSTOM_MAX) { | 154 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
514 | len = sprintf(buf, "ASSOCRESPIE="); | 155 | memset(mgmt, 0, 24); |
515 | for (i = 0; i < ifsta->assocresp_ies_len; i++) { | 156 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
516 | len += sprintf(buf + len, "%02x", | 157 | IEEE80211_STYPE_PROBE_REQ); |
517 | ifsta->assocresp_ies[i]); | 158 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
518 | } | 159 | if (dst) { |
160 | memcpy(mgmt->da, dst, ETH_ALEN); | ||
161 | memcpy(mgmt->bssid, dst, ETH_ALEN); | ||
162 | } else { | ||
163 | memset(mgmt->da, 0xff, ETH_ALEN); | ||
164 | memset(mgmt->bssid, 0xff, ETH_ALEN); | ||
519 | } | 165 | } |
166 | pos = skb_put(skb, 2 + ssid_len); | ||
167 | *pos++ = WLAN_EID_SSID; | ||
168 | *pos++ = ssid_len; | ||
169 | memcpy(pos, ssid, ssid_len); | ||
520 | 170 | ||
521 | memset(&wrqu, 0, sizeof(wrqu)); | 171 | supp_rates = skb_put(skb, 2); |
522 | wrqu.data.length = len; | 172 | supp_rates[0] = WLAN_EID_SUPP_RATES; |
523 | wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); | 173 | supp_rates[1] = 0; |
524 | 174 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | |
525 | kfree(buf); | ||
526 | } | ||
527 | |||
528 | |||
529 | static void ieee80211_set_associated(struct net_device *dev, | ||
530 | struct ieee80211_if_sta *ifsta, | ||
531 | bool assoc) | ||
532 | { | ||
533 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
534 | struct ieee80211_local *local = sdata->local; | ||
535 | struct ieee80211_conf *conf = &local_to_hw(local)->conf; | ||
536 | union iwreq_data wrqu; | ||
537 | u32 changed = BSS_CHANGED_ASSOC; | ||
538 | |||
539 | if (assoc) { | ||
540 | struct ieee80211_sta_bss *bss; | ||
541 | |||
542 | ifsta->flags |= IEEE80211_STA_ASSOCIATED; | ||
543 | |||
544 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | ||
545 | return; | ||
546 | |||
547 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | ||
548 | conf->channel->center_freq, | ||
549 | ifsta->ssid, ifsta->ssid_len); | ||
550 | if (bss) { | ||
551 | /* set timing information */ | ||
552 | sdata->bss_conf.beacon_int = bss->beacon_int; | ||
553 | sdata->bss_conf.timestamp = bss->timestamp; | ||
554 | sdata->bss_conf.dtim_period = bss->dtim_period; | ||
555 | |||
556 | changed |= ieee80211_handle_bss_capability(sdata, bss); | ||
557 | |||
558 | ieee80211_rx_bss_put(local, bss); | ||
559 | } | ||
560 | 175 | ||
561 | if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { | 176 | for (i = 0; i < sband->n_bitrates; i++) { |
562 | changed |= BSS_CHANGED_HT; | 177 | struct ieee80211_rate *rate = &sband->bitrates[i]; |
563 | sdata->bss_conf.assoc_ht = 1; | 178 | if (esupp_rates) { |
564 | sdata->bss_conf.ht_conf = &conf->ht_conf; | 179 | pos = skb_put(skb, 1); |
565 | sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf; | 180 | esupp_rates[1]++; |
181 | } else if (supp_rates[1] == 8) { | ||
182 | esupp_rates = skb_put(skb, 3); | ||
183 | esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES; | ||
184 | esupp_rates[1] = 1; | ||
185 | pos = &esupp_rates[2]; | ||
186 | } else { | ||
187 | pos = skb_put(skb, 1); | ||
188 | supp_rates[1]++; | ||
566 | } | 189 | } |
567 | 190 | *pos = rate->bitrate / 5; | |
568 | ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET; | ||
569 | memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN); | ||
570 | memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN); | ||
571 | ieee80211_sta_send_associnfo(dev, ifsta); | ||
572 | } else { | ||
573 | netif_carrier_off(dev); | ||
574 | ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid); | ||
575 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; | ||
576 | changed |= ieee80211_reset_erp_info(dev); | ||
577 | |||
578 | sdata->bss_conf.assoc_ht = 0; | ||
579 | sdata->bss_conf.ht_conf = NULL; | ||
580 | sdata->bss_conf.ht_bss_conf = NULL; | ||
581 | |||
582 | memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); | ||
583 | } | 191 | } |
584 | ifsta->last_probe = jiffies; | ||
585 | ieee80211_led_assoc(local, assoc); | ||
586 | |||
587 | sdata->bss_conf.assoc = assoc; | ||
588 | ieee80211_bss_info_change_notify(sdata, changed); | ||
589 | 192 | ||
590 | if (assoc) | 193 | ieee80211_tx_skb(sdata, skb, 0); |
591 | netif_carrier_on(dev); | ||
592 | |||
593 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | ||
594 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | ||
595 | } | 194 | } |
596 | 195 | ||
597 | static void ieee80211_set_disassoc(struct net_device *dev, | 196 | static void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, |
598 | struct ieee80211_if_sta *ifsta, int deauth) | ||
599 | { | ||
600 | if (deauth) | ||
601 | ifsta->auth_tries = 0; | ||
602 | ifsta->assoc_tries = 0; | ||
603 | ieee80211_set_associated(dev, ifsta, 0); | ||
604 | } | ||
605 | |||
606 | void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, | ||
607 | int encrypt) | ||
608 | { | ||
609 | struct ieee80211_sub_if_data *sdata; | ||
610 | |||
611 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
612 | skb->dev = sdata->local->mdev; | ||
613 | skb_set_mac_header(skb, 0); | ||
614 | skb_set_network_header(skb, 0); | ||
615 | skb_set_transport_header(skb, 0); | ||
616 | |||
617 | skb->iif = sdata->dev->ifindex; | ||
618 | skb->do_not_encrypt = !encrypt; | ||
619 | |||
620 | dev_queue_xmit(skb); | ||
621 | } | ||
622 | |||
623 | |||
624 | static void ieee80211_send_auth(struct net_device *dev, | ||
625 | struct ieee80211_if_sta *ifsta, | 197 | struct ieee80211_if_sta *ifsta, |
626 | int transaction, u8 *extra, size_t extra_len, | 198 | int transaction, u8 *extra, size_t extra_len, |
627 | int encrypt) | 199 | int encrypt) |
628 | { | 200 | { |
629 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 201 | struct ieee80211_local *local = sdata->local; |
630 | struct sk_buff *skb; | 202 | struct sk_buff *skb; |
631 | struct ieee80211_mgmt *mgmt; | 203 | struct ieee80211_mgmt *mgmt; |
632 | 204 | ||
@@ -634,19 +206,19 @@ static void ieee80211_send_auth(struct net_device *dev, | |||
634 | sizeof(*mgmt) + 6 + extra_len); | 206 | sizeof(*mgmt) + 6 + extra_len); |
635 | if (!skb) { | 207 | if (!skb) { |
636 | printk(KERN_DEBUG "%s: failed to allocate buffer for auth " | 208 | printk(KERN_DEBUG "%s: failed to allocate buffer for auth " |
637 | "frame\n", dev->name); | 209 | "frame\n", sdata->dev->name); |
638 | return; | 210 | return; |
639 | } | 211 | } |
640 | skb_reserve(skb, local->hw.extra_tx_headroom); | 212 | skb_reserve(skb, local->hw.extra_tx_headroom); |
641 | 213 | ||
642 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); | 214 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); |
643 | memset(mgmt, 0, 24 + 6); | 215 | memset(mgmt, 0, 24 + 6); |
644 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 216 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
645 | IEEE80211_STYPE_AUTH); | 217 | IEEE80211_STYPE_AUTH); |
646 | if (encrypt) | 218 | if (encrypt) |
647 | mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); | 219 | mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
648 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); | 220 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); |
649 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 221 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
650 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 222 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); |
651 | mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg); | 223 | mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg); |
652 | mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); | 224 | mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); |
@@ -655,64 +227,19 @@ static void ieee80211_send_auth(struct net_device *dev, | |||
655 | if (extra) | 227 | if (extra) |
656 | memcpy(skb_put(skb, extra_len), extra, extra_len); | 228 | memcpy(skb_put(skb, extra_len), extra, extra_len); |
657 | 229 | ||
658 | ieee80211_sta_tx(dev, skb, encrypt); | 230 | ieee80211_tx_skb(sdata, skb, encrypt); |
659 | } | ||
660 | |||
661 | |||
662 | static void ieee80211_authenticate(struct net_device *dev, | ||
663 | struct ieee80211_if_sta *ifsta) | ||
664 | { | ||
665 | DECLARE_MAC_BUF(mac); | ||
666 | |||
667 | ifsta->auth_tries++; | ||
668 | if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) { | ||
669 | printk(KERN_DEBUG "%s: authentication with AP %s" | ||
670 | " timed out\n", | ||
671 | dev->name, print_mac(mac, ifsta->bssid)); | ||
672 | ifsta->state = IEEE80211_DISABLED; | ||
673 | return; | ||
674 | } | ||
675 | |||
676 | ifsta->state = IEEE80211_AUTHENTICATE; | ||
677 | printk(KERN_DEBUG "%s: authenticate with AP %s\n", | ||
678 | dev->name, print_mac(mac, ifsta->bssid)); | ||
679 | |||
680 | ieee80211_send_auth(dev, ifsta, 1, NULL, 0, 0); | ||
681 | |||
682 | mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT); | ||
683 | } | 231 | } |
684 | 232 | ||
685 | static int ieee80211_compatible_rates(struct ieee80211_sta_bss *bss, | 233 | static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, |
686 | struct ieee80211_supported_band *sband, | ||
687 | u64 *rates) | ||
688 | { | ||
689 | int i, j, count; | ||
690 | *rates = 0; | ||
691 | count = 0; | ||
692 | for (i = 0; i < bss->supp_rates_len; i++) { | ||
693 | int rate = (bss->supp_rates[i] & 0x7F) * 5; | ||
694 | |||
695 | for (j = 0; j < sband->n_bitrates; j++) | ||
696 | if (sband->bitrates[j].bitrate == rate) { | ||
697 | *rates |= BIT(j); | ||
698 | count++; | ||
699 | break; | ||
700 | } | ||
701 | } | ||
702 | |||
703 | return count; | ||
704 | } | ||
705 | |||
706 | static void ieee80211_send_assoc(struct net_device *dev, | ||
707 | struct ieee80211_if_sta *ifsta) | 234 | struct ieee80211_if_sta *ifsta) |
708 | { | 235 | { |
709 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 236 | struct ieee80211_local *local = sdata->local; |
710 | struct sk_buff *skb; | 237 | struct sk_buff *skb; |
711 | struct ieee80211_mgmt *mgmt; | 238 | struct ieee80211_mgmt *mgmt; |
712 | u8 *pos, *ies; | 239 | u8 *pos, *ies, *ht_add_ie; |
713 | int i, len, count, rates_len, supp_rates_len; | 240 | int i, len, count, rates_len, supp_rates_len; |
714 | u16 capab; | 241 | u16 capab; |
715 | struct ieee80211_sta_bss *bss; | 242 | struct ieee80211_bss *bss; |
716 | int wmm = 0; | 243 | int wmm = 0; |
717 | struct ieee80211_supported_band *sband; | 244 | struct ieee80211_supported_band *sband; |
718 | u64 rates = 0; | 245 | u64 rates = 0; |
@@ -722,7 +249,7 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
722 | ifsta->ssid_len); | 249 | ifsta->ssid_len); |
723 | if (!skb) { | 250 | if (!skb) { |
724 | printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " | 251 | printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " |
725 | "frame\n", dev->name); | 252 | "frame\n", sdata->dev->name); |
726 | return; | 253 | return; |
727 | } | 254 | } |
728 | skb_reserve(skb, local->hw.extra_tx_headroom); | 255 | skb_reserve(skb, local->hw.extra_tx_headroom); |
@@ -738,13 +265,13 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
738 | capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; | 265 | capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; |
739 | } | 266 | } |
740 | 267 | ||
741 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | 268 | bss = ieee80211_rx_bss_get(local, ifsta->bssid, |
742 | local->hw.conf.channel->center_freq, | 269 | local->hw.conf.channel->center_freq, |
743 | ifsta->ssid, ifsta->ssid_len); | 270 | ifsta->ssid, ifsta->ssid_len); |
744 | if (bss) { | 271 | if (bss) { |
745 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) | 272 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) |
746 | capab |= WLAN_CAPABILITY_PRIVACY; | 273 | capab |= WLAN_CAPABILITY_PRIVACY; |
747 | if (bss->wmm_ie) | 274 | if (bss->wmm_used) |
748 | wmm = 1; | 275 | wmm = 1; |
749 | 276 | ||
750 | /* get all rates supported by the device and the AP as | 277 | /* get all rates supported by the device and the AP as |
@@ -766,13 +293,13 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
766 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 293 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
767 | memset(mgmt, 0, 24); | 294 | memset(mgmt, 0, 24); |
768 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); | 295 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); |
769 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 296 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
770 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 297 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); |
771 | 298 | ||
772 | if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) { | 299 | if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) { |
773 | skb_put(skb, 10); | 300 | skb_put(skb, 10); |
774 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 301 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
775 | IEEE80211_STYPE_REASSOC_REQ); | 302 | IEEE80211_STYPE_REASSOC_REQ); |
776 | mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); | 303 | mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); |
777 | mgmt->u.reassoc_req.listen_interval = | 304 | mgmt->u.reassoc_req.listen_interval = |
778 | cpu_to_le16(local->hw.conf.listen_interval); | 305 | cpu_to_le16(local->hw.conf.listen_interval); |
@@ -780,8 +307,8 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
780 | ETH_ALEN); | 307 | ETH_ALEN); |
781 | } else { | 308 | } else { |
782 | skb_put(skb, 4); | 309 | skb_put(skb, 4); |
783 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 310 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
784 | IEEE80211_STYPE_ASSOC_REQ); | 311 | IEEE80211_STYPE_ASSOC_REQ); |
785 | mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); | 312 | mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); |
786 | mgmt->u.reassoc_req.listen_interval = | 313 | mgmt->u.reassoc_req.listen_interval = |
787 | cpu_to_le16(local->hw.conf.listen_interval); | 314 | cpu_to_le16(local->hw.conf.listen_interval); |
@@ -866,9 +393,10 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
866 | 393 | ||
867 | /* wmm support is a must to HT */ | 394 | /* wmm support is a must to HT */ |
868 | if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) && | 395 | if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) && |
869 | sband->ht_info.ht_supported && bss->ht_add_ie) { | 396 | sband->ht_info.ht_supported && |
397 | (ht_add_ie = ieee80211_bss_get_ie(bss, WLAN_EID_HT_EXTRA_INFO))) { | ||
870 | struct ieee80211_ht_addt_info *ht_add_info = | 398 | struct ieee80211_ht_addt_info *ht_add_info = |
871 | (struct ieee80211_ht_addt_info *)bss->ht_add_ie; | 399 | (struct ieee80211_ht_addt_info *)ht_add_ie; |
872 | u16 cap = sband->ht_info.cap; | 400 | u16 cap = sband->ht_info.cap; |
873 | __le16 tmp; | 401 | __le16 tmp; |
874 | u32 flags = local->hw.conf.channel->flags; | 402 | u32 flags = local->hw.conf.channel->flags; |
@@ -907,21 +435,22 @@ static void ieee80211_send_assoc(struct net_device *dev, | |||
907 | if (ifsta->assocreq_ies) | 435 | if (ifsta->assocreq_ies) |
908 | memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len); | 436 | memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len); |
909 | 437 | ||
910 | ieee80211_sta_tx(dev, skb, 0); | 438 | ieee80211_tx_skb(sdata, skb, 0); |
911 | } | 439 | } |
912 | 440 | ||
913 | 441 | ||
914 | static void ieee80211_send_deauth(struct net_device *dev, | 442 | static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, |
915 | struct ieee80211_if_sta *ifsta, u16 reason) | 443 | u16 stype, u16 reason) |
916 | { | 444 | { |
917 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 445 | struct ieee80211_local *local = sdata->local; |
446 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
918 | struct sk_buff *skb; | 447 | struct sk_buff *skb; |
919 | struct ieee80211_mgmt *mgmt; | 448 | struct ieee80211_mgmt *mgmt; |
920 | 449 | ||
921 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); | 450 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); |
922 | if (!skb) { | 451 | if (!skb) { |
923 | printk(KERN_DEBUG "%s: failed to allocate buffer for deauth " | 452 | printk(KERN_DEBUG "%s: failed to allocate buffer for " |
924 | "frame\n", dev->name); | 453 | "deauth/disassoc frame\n", sdata->dev->name); |
925 | return; | 454 | return; |
926 | } | 455 | } |
927 | skb_reserve(skb, local->hw.extra_tx_headroom); | 456 | skb_reserve(skb, local->hw.extra_tx_headroom); |
@@ -929,940 +458,561 @@ static void ieee80211_send_deauth(struct net_device *dev, | |||
929 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 458 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
930 | memset(mgmt, 0, 24); | 459 | memset(mgmt, 0, 24); |
931 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); | 460 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); |
932 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 461 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
933 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 462 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); |
934 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 463 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); |
935 | IEEE80211_STYPE_DEAUTH); | ||
936 | skb_put(skb, 2); | 464 | skb_put(skb, 2); |
465 | /* u.deauth.reason_code == u.disassoc.reason_code */ | ||
937 | mgmt->u.deauth.reason_code = cpu_to_le16(reason); | 466 | mgmt->u.deauth.reason_code = cpu_to_le16(reason); |
938 | 467 | ||
939 | ieee80211_sta_tx(dev, skb, 0); | 468 | ieee80211_tx_skb(sdata, skb, 0); |
940 | } | 469 | } |
941 | 470 | ||
942 | 471 | /* MLME */ | |
943 | static void ieee80211_send_disassoc(struct net_device *dev, | 472 | static void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, |
944 | struct ieee80211_if_sta *ifsta, u16 reason) | 473 | struct ieee80211_bss *bss) |
945 | { | 474 | { |
946 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 475 | struct ieee80211_local *local = sdata->local; |
947 | struct sk_buff *skb; | 476 | int i, have_higher_than_11mbit = 0; |
948 | struct ieee80211_mgmt *mgmt; | ||
949 | 477 | ||
950 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); | 478 | /* cf. IEEE 802.11 9.2.12 */ |
951 | if (!skb) { | 479 | for (i = 0; i < bss->supp_rates_len; i++) |
952 | printk(KERN_DEBUG "%s: failed to allocate buffer for disassoc " | 480 | if ((bss->supp_rates[i] & 0x7f) * 5 > 110) |
953 | "frame\n", dev->name); | 481 | have_higher_than_11mbit = 1; |
954 | return; | ||
955 | } | ||
956 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
957 | 482 | ||
958 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 483 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && |
959 | memset(mgmt, 0, 24); | 484 | have_higher_than_11mbit) |
960 | memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); | 485 | sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; |
961 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 486 | else |
962 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 487 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; |
963 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
964 | IEEE80211_STYPE_DISASSOC); | ||
965 | skb_put(skb, 2); | ||
966 | mgmt->u.disassoc.reason_code = cpu_to_le16(reason); | ||
967 | 488 | ||
968 | ieee80211_sta_tx(dev, skb, 0); | 489 | ieee80211_set_wmm_default(sdata); |
969 | } | 490 | } |
970 | 491 | ||
971 | 492 | static void ieee80211_sta_wmm_params(struct ieee80211_local *local, | |
972 | static int ieee80211_privacy_mismatch(struct net_device *dev, | 493 | struct ieee80211_if_sta *ifsta, |
973 | struct ieee80211_if_sta *ifsta) | 494 | u8 *wmm_param, size_t wmm_param_len) |
974 | { | 495 | { |
975 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 496 | struct ieee80211_tx_queue_params params; |
976 | struct ieee80211_sta_bss *bss; | 497 | size_t left; |
977 | int bss_privacy; | 498 | int count; |
978 | int wep_privacy; | 499 | u8 *pos; |
979 | int privacy_invoked; | ||
980 | |||
981 | if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL)) | ||
982 | return 0; | ||
983 | |||
984 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | ||
985 | local->hw.conf.channel->center_freq, | ||
986 | ifsta->ssid, ifsta->ssid_len); | ||
987 | if (!bss) | ||
988 | return 0; | ||
989 | |||
990 | bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY); | ||
991 | wep_privacy = !!ieee80211_sta_wep_configured(dev); | ||
992 | privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED); | ||
993 | 500 | ||
994 | ieee80211_rx_bss_put(local, bss); | 501 | if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED)) |
502 | return; | ||
995 | 503 | ||
996 | if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked)) | 504 | if (!wmm_param) |
997 | return 0; | 505 | return; |
998 | 506 | ||
999 | return 1; | 507 | if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) |
1000 | } | 508 | return; |
509 | count = wmm_param[6] & 0x0f; | ||
510 | if (count == ifsta->wmm_last_param_set) | ||
511 | return; | ||
512 | ifsta->wmm_last_param_set = count; | ||
1001 | 513 | ||
514 | pos = wmm_param + 8; | ||
515 | left = wmm_param_len - 8; | ||
1002 | 516 | ||
1003 | static void ieee80211_associate(struct net_device *dev, | 517 | memset(¶ms, 0, sizeof(params)); |
1004 | struct ieee80211_if_sta *ifsta) | ||
1005 | { | ||
1006 | DECLARE_MAC_BUF(mac); | ||
1007 | 518 | ||
1008 | ifsta->assoc_tries++; | 519 | if (!local->ops->conf_tx) |
1009 | if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) { | ||
1010 | printk(KERN_DEBUG "%s: association with AP %s" | ||
1011 | " timed out\n", | ||
1012 | dev->name, print_mac(mac, ifsta->bssid)); | ||
1013 | ifsta->state = IEEE80211_DISABLED; | ||
1014 | return; | 520 | return; |
1015 | } | ||
1016 | 521 | ||
1017 | ifsta->state = IEEE80211_ASSOCIATE; | 522 | local->wmm_acm = 0; |
1018 | printk(KERN_DEBUG "%s: associate with AP %s\n", | 523 | for (; left >= 4; left -= 4, pos += 4) { |
1019 | dev->name, print_mac(mac, ifsta->bssid)); | 524 | int aci = (pos[0] >> 5) & 0x03; |
1020 | if (ieee80211_privacy_mismatch(dev, ifsta)) { | 525 | int acm = (pos[0] >> 4) & 0x01; |
1021 | printk(KERN_DEBUG "%s: mismatch in privacy configuration and " | 526 | int queue; |
1022 | "mixed-cell disabled - abort association\n", dev->name); | ||
1023 | ifsta->state = IEEE80211_DISABLED; | ||
1024 | return; | ||
1025 | } | ||
1026 | 527 | ||
1027 | ieee80211_send_assoc(dev, ifsta); | 528 | switch (aci) { |
529 | case 1: | ||
530 | queue = 3; | ||
531 | if (acm) | ||
532 | local->wmm_acm |= BIT(0) | BIT(3); | ||
533 | break; | ||
534 | case 2: | ||
535 | queue = 1; | ||
536 | if (acm) | ||
537 | local->wmm_acm |= BIT(4) | BIT(5); | ||
538 | break; | ||
539 | case 3: | ||
540 | queue = 0; | ||
541 | if (acm) | ||
542 | local->wmm_acm |= BIT(6) | BIT(7); | ||
543 | break; | ||
544 | case 0: | ||
545 | default: | ||
546 | queue = 2; | ||
547 | if (acm) | ||
548 | local->wmm_acm |= BIT(1) | BIT(2); | ||
549 | break; | ||
550 | } | ||
1028 | 551 | ||
1029 | mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); | 552 | params.aifs = pos[0] & 0x0f; |
553 | params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); | ||
554 | params.cw_min = ecw2cw(pos[1] & 0x0f); | ||
555 | params.txop = get_unaligned_le16(pos + 2); | ||
556 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
557 | printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " | ||
558 | "cWmin=%d cWmax=%d txop=%d\n", | ||
559 | local->mdev->name, queue, aci, acm, params.aifs, params.cw_min, | ||
560 | params.cw_max, params.txop); | ||
561 | #endif | ||
562 | /* TODO: handle ACM (block TX, fallback to next lowest allowed | ||
563 | * AC for now) */ | ||
564 | if (local->ops->conf_tx(local_to_hw(local), queue, ¶ms)) { | ||
565 | printk(KERN_DEBUG "%s: failed to set TX queue " | ||
566 | "parameters for queue %d\n", local->mdev->name, queue); | ||
567 | } | ||
568 | } | ||
1030 | } | 569 | } |
1031 | 570 | ||
1032 | 571 | static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata, | |
1033 | static void ieee80211_associated(struct net_device *dev, | 572 | bool use_protection, |
1034 | struct ieee80211_if_sta *ifsta) | 573 | bool use_short_preamble) |
1035 | { | 574 | { |
1036 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 575 | struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf; |
1037 | struct sta_info *sta; | 576 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
1038 | int disassoc; | 577 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
1039 | DECLARE_MAC_BUF(mac); | 578 | DECLARE_MAC_BUF(mac); |
579 | #endif | ||
580 | u32 changed = 0; | ||
1040 | 581 | ||
1041 | /* TODO: start monitoring current AP signal quality and number of | 582 | if (use_protection != bss_conf->use_cts_prot) { |
1042 | * missed beacons. Scan other channels every now and then and search | 583 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
1043 | * for better APs. */ | 584 | if (net_ratelimit()) { |
1044 | /* TODO: remove expired BSSes */ | 585 | printk(KERN_DEBUG "%s: CTS protection %s (BSSID=" |
1045 | 586 | "%s)\n", | |
1046 | ifsta->state = IEEE80211_ASSOCIATED; | 587 | sdata->dev->name, |
1047 | 588 | use_protection ? "enabled" : "disabled", | |
1048 | rcu_read_lock(); | 589 | print_mac(mac, ifsta->bssid)); |
1049 | |||
1050 | sta = sta_info_get(local, ifsta->bssid); | ||
1051 | if (!sta) { | ||
1052 | printk(KERN_DEBUG "%s: No STA entry for own AP %s\n", | ||
1053 | dev->name, print_mac(mac, ifsta->bssid)); | ||
1054 | disassoc = 1; | ||
1055 | } else { | ||
1056 | disassoc = 0; | ||
1057 | if (time_after(jiffies, | ||
1058 | sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { | ||
1059 | if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) { | ||
1060 | printk(KERN_DEBUG "%s: No ProbeResp from " | ||
1061 | "current AP %s - assume out of " | ||
1062 | "range\n", | ||
1063 | dev->name, print_mac(mac, ifsta->bssid)); | ||
1064 | disassoc = 1; | ||
1065 | sta_info_unlink(&sta); | ||
1066 | } else | ||
1067 | ieee80211_send_probe_req(dev, ifsta->bssid, | ||
1068 | local->scan_ssid, | ||
1069 | local->scan_ssid_len); | ||
1070 | ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL; | ||
1071 | } else { | ||
1072 | ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL; | ||
1073 | if (time_after(jiffies, ifsta->last_probe + | ||
1074 | IEEE80211_PROBE_INTERVAL)) { | ||
1075 | ifsta->last_probe = jiffies; | ||
1076 | ieee80211_send_probe_req(dev, ifsta->bssid, | ||
1077 | ifsta->ssid, | ||
1078 | ifsta->ssid_len); | ||
1079 | } | ||
1080 | } | 590 | } |
591 | #endif | ||
592 | bss_conf->use_cts_prot = use_protection; | ||
593 | changed |= BSS_CHANGED_ERP_CTS_PROT; | ||
1081 | } | 594 | } |
1082 | 595 | ||
1083 | rcu_read_unlock(); | 596 | if (use_short_preamble != bss_conf->use_short_preamble) { |
1084 | 597 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | |
1085 | if (disassoc && sta) | 598 | if (net_ratelimit()) { |
1086 | sta_info_destroy(sta); | 599 | printk(KERN_DEBUG "%s: switched to %s barker preamble" |
1087 | 600 | " (BSSID=%s)\n", | |
1088 | if (disassoc) { | 601 | sdata->dev->name, |
1089 | ifsta->state = IEEE80211_DISABLED; | 602 | use_short_preamble ? "short" : "long", |
1090 | ieee80211_set_associated(dev, ifsta, 0); | 603 | print_mac(mac, ifsta->bssid)); |
1091 | } else { | 604 | } |
1092 | mod_timer(&ifsta->timer, jiffies + | 605 | #endif |
1093 | IEEE80211_MONITORING_INTERVAL); | 606 | bss_conf->use_short_preamble = use_short_preamble; |
607 | changed |= BSS_CHANGED_ERP_PREAMBLE; | ||
1094 | } | 608 | } |
1095 | } | ||
1096 | 609 | ||
610 | return changed; | ||
611 | } | ||
1097 | 612 | ||
1098 | static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, | 613 | static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata, |
1099 | u8 *ssid, size_t ssid_len) | 614 | u8 erp_value) |
1100 | { | 615 | { |
1101 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 616 | bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0; |
1102 | struct ieee80211_supported_band *sband; | 617 | bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0; |
1103 | struct sk_buff *skb; | ||
1104 | struct ieee80211_mgmt *mgmt; | ||
1105 | u8 *pos, *supp_rates, *esupp_rates = NULL; | ||
1106 | int i; | ||
1107 | |||
1108 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200); | ||
1109 | if (!skb) { | ||
1110 | printk(KERN_DEBUG "%s: failed to allocate buffer for probe " | ||
1111 | "request\n", dev->name); | ||
1112 | return; | ||
1113 | } | ||
1114 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1115 | 618 | ||
1116 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 619 | return ieee80211_handle_protect_preamb(sdata, |
1117 | memset(mgmt, 0, 24); | 620 | use_protection, use_short_preamble); |
1118 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 621 | } |
1119 | IEEE80211_STYPE_PROBE_REQ); | ||
1120 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | ||
1121 | if (dst) { | ||
1122 | memcpy(mgmt->da, dst, ETH_ALEN); | ||
1123 | memcpy(mgmt->bssid, dst, ETH_ALEN); | ||
1124 | } else { | ||
1125 | memset(mgmt->da, 0xff, ETH_ALEN); | ||
1126 | memset(mgmt->bssid, 0xff, ETH_ALEN); | ||
1127 | } | ||
1128 | pos = skb_put(skb, 2 + ssid_len); | ||
1129 | *pos++ = WLAN_EID_SSID; | ||
1130 | *pos++ = ssid_len; | ||
1131 | memcpy(pos, ssid, ssid_len); | ||
1132 | 622 | ||
1133 | supp_rates = skb_put(skb, 2); | 623 | static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, |
1134 | supp_rates[0] = WLAN_EID_SUPP_RATES; | 624 | struct ieee80211_bss *bss) |
1135 | supp_rates[1] = 0; | 625 | { |
1136 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 626 | u32 changed = 0; |
1137 | 627 | ||
1138 | for (i = 0; i < sband->n_bitrates; i++) { | 628 | if (bss->has_erp_value) |
1139 | struct ieee80211_rate *rate = &sband->bitrates[i]; | 629 | changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value); |
1140 | if (esupp_rates) { | 630 | else { |
1141 | pos = skb_put(skb, 1); | 631 | u16 capab = bss->capability; |
1142 | esupp_rates[1]++; | 632 | changed |= ieee80211_handle_protect_preamb(sdata, false, |
1143 | } else if (supp_rates[1] == 8) { | 633 | (capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0); |
1144 | esupp_rates = skb_put(skb, 3); | ||
1145 | esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES; | ||
1146 | esupp_rates[1] = 1; | ||
1147 | pos = &esupp_rates[2]; | ||
1148 | } else { | ||
1149 | pos = skb_put(skb, 1); | ||
1150 | supp_rates[1]++; | ||
1151 | } | ||
1152 | *pos = rate->bitrate / 5; | ||
1153 | } | 634 | } |
1154 | 635 | ||
1155 | ieee80211_sta_tx(dev, skb, 0); | 636 | return changed; |
1156 | } | 637 | } |
1157 | 638 | ||
639 | static void ieee80211_sta_send_apinfo(struct ieee80211_sub_if_data *sdata, | ||
640 | struct ieee80211_if_sta *ifsta) | ||
641 | { | ||
642 | union iwreq_data wrqu; | ||
643 | memset(&wrqu, 0, sizeof(wrqu)); | ||
644 | if (ifsta->flags & IEEE80211_STA_ASSOCIATED) | ||
645 | memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN); | ||
646 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | ||
647 | wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL); | ||
648 | } | ||
1158 | 649 | ||
1159 | static int ieee80211_sta_wep_configured(struct net_device *dev) | 650 | static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata, |
651 | struct ieee80211_if_sta *ifsta) | ||
1160 | { | 652 | { |
1161 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 653 | union iwreq_data wrqu; |
1162 | if (!sdata || !sdata->default_key || | 654 | |
1163 | sdata->default_key->conf.alg != ALG_WEP) | 655 | if (ifsta->assocreq_ies) { |
1164 | return 0; | 656 | memset(&wrqu, 0, sizeof(wrqu)); |
1165 | return 1; | 657 | wrqu.data.length = ifsta->assocreq_ies_len; |
658 | wireless_send_event(sdata->dev, IWEVASSOCREQIE, &wrqu, | ||
659 | ifsta->assocreq_ies); | ||
660 | } | ||
661 | if (ifsta->assocresp_ies) { | ||
662 | memset(&wrqu, 0, sizeof(wrqu)); | ||
663 | wrqu.data.length = ifsta->assocresp_ies_len; | ||
664 | wireless_send_event(sdata->dev, IWEVASSOCRESPIE, &wrqu, | ||
665 | ifsta->assocresp_ies); | ||
666 | } | ||
1166 | } | 667 | } |
1167 | 668 | ||
1168 | 669 | ||
1169 | static void ieee80211_auth_completed(struct net_device *dev, | 670 | static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, |
1170 | struct ieee80211_if_sta *ifsta) | 671 | struct ieee80211_if_sta *ifsta) |
1171 | { | 672 | { |
1172 | printk(KERN_DEBUG "%s: authenticated\n", dev->name); | 673 | struct ieee80211_local *local = sdata->local; |
1173 | ifsta->flags |= IEEE80211_STA_AUTHENTICATED; | 674 | struct ieee80211_conf *conf = &local_to_hw(local)->conf; |
1174 | ieee80211_associate(dev, ifsta); | 675 | u32 changed = BSS_CHANGED_ASSOC; |
1175 | } | ||
1176 | 676 | ||
677 | struct ieee80211_bss *bss; | ||
1177 | 678 | ||
1178 | static void ieee80211_auth_challenge(struct net_device *dev, | 679 | ifsta->flags |= IEEE80211_STA_ASSOCIATED; |
1179 | struct ieee80211_if_sta *ifsta, | ||
1180 | struct ieee80211_mgmt *mgmt, | ||
1181 | size_t len) | ||
1182 | { | ||
1183 | u8 *pos; | ||
1184 | struct ieee802_11_elems elems; | ||
1185 | 680 | ||
1186 | pos = mgmt->u.auth.variable; | 681 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
1187 | ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); | ||
1188 | if (!elems.challenge) | ||
1189 | return; | 682 | return; |
1190 | ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2, | ||
1191 | elems.challenge_len + 2, 1); | ||
1192 | } | ||
1193 | 683 | ||
1194 | static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid, | 684 | bss = ieee80211_rx_bss_get(local, ifsta->bssid, |
1195 | u8 dialog_token, u16 status, u16 policy, | 685 | conf->channel->center_freq, |
1196 | u16 buf_size, u16 timeout) | 686 | ifsta->ssid, ifsta->ssid_len); |
1197 | { | 687 | if (bss) { |
1198 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 688 | /* set timing information */ |
1199 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 689 | sdata->bss_conf.beacon_int = bss->beacon_int; |
1200 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 690 | sdata->bss_conf.timestamp = bss->timestamp; |
1201 | struct sk_buff *skb; | 691 | sdata->bss_conf.dtim_period = bss->dtim_period; |
1202 | struct ieee80211_mgmt *mgmt; | ||
1203 | u16 capab; | ||
1204 | 692 | ||
1205 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | 693 | changed |= ieee80211_handle_bss_capability(sdata, bss); |
1206 | 694 | ||
1207 | if (!skb) { | 695 | ieee80211_rx_bss_put(local, bss); |
1208 | printk(KERN_DEBUG "%s: failed to allocate buffer " | ||
1209 | "for addba resp frame\n", dev->name); | ||
1210 | return; | ||
1211 | } | 696 | } |
1212 | 697 | ||
1213 | skb_reserve(skb, local->hw.extra_tx_headroom); | 698 | if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { |
1214 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 699 | changed |= BSS_CHANGED_HT; |
1215 | memset(mgmt, 0, 24); | 700 | sdata->bss_conf.assoc_ht = 1; |
1216 | memcpy(mgmt->da, da, ETH_ALEN); | 701 | sdata->bss_conf.ht_conf = &conf->ht_conf; |
1217 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 702 | sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf; |
1218 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) | 703 | } |
1219 | memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); | ||
1220 | else | ||
1221 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
1222 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1223 | IEEE80211_STYPE_ACTION); | ||
1224 | 704 | ||
1225 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); | 705 | ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET; |
1226 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | 706 | memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN); |
1227 | mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; | 707 | ieee80211_sta_send_associnfo(sdata, ifsta); |
1228 | mgmt->u.action.u.addba_resp.dialog_token = dialog_token; | ||
1229 | 708 | ||
1230 | capab = (u16)(policy << 1); /* bit 1 aggregation policy */ | 709 | ifsta->last_probe = jiffies; |
1231 | capab |= (u16)(tid << 2); /* bit 5:2 TID number */ | 710 | ieee80211_led_assoc(local, 1); |
1232 | capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ | ||
1233 | 711 | ||
1234 | mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); | 712 | sdata->bss_conf.assoc = 1; |
1235 | mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); | 713 | /* |
1236 | mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); | 714 | * For now just always ask the driver to update the basic rateset |
715 | * when we have associated, we aren't checking whether it actually | ||
716 | * changed or not. | ||
717 | */ | ||
718 | changed |= BSS_CHANGED_BASIC_RATES; | ||
719 | ieee80211_bss_info_change_notify(sdata, changed); | ||
1237 | 720 | ||
1238 | ieee80211_sta_tx(dev, skb, 0); | 721 | netif_tx_start_all_queues(sdata->dev); |
722 | netif_carrier_on(sdata->dev); | ||
1239 | 723 | ||
1240 | return; | 724 | ieee80211_sta_send_apinfo(sdata, ifsta); |
1241 | } | 725 | } |
1242 | 726 | ||
1243 | void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, | 727 | static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata, |
1244 | u16 tid, u8 dialog_token, u16 start_seq_num, | 728 | struct ieee80211_if_sta *ifsta) |
1245 | u16 agg_size, u16 timeout) | ||
1246 | { | 729 | { |
1247 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 730 | DECLARE_MAC_BUF(mac); |
1248 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1249 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
1250 | struct sk_buff *skb; | ||
1251 | struct ieee80211_mgmt *mgmt; | ||
1252 | u16 capab; | ||
1253 | |||
1254 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | ||
1255 | 731 | ||
1256 | if (!skb) { | 732 | ifsta->direct_probe_tries++; |
1257 | printk(KERN_ERR "%s: failed to allocate buffer " | 733 | if (ifsta->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) { |
1258 | "for addba request frame\n", dev->name); | 734 | printk(KERN_DEBUG "%s: direct probe to AP %s timed out\n", |
735 | sdata->dev->name, print_mac(mac, ifsta->bssid)); | ||
736 | ifsta->state = IEEE80211_STA_MLME_DISABLED; | ||
1259 | return; | 737 | return; |
1260 | } | 738 | } |
1261 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1262 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | ||
1263 | memset(mgmt, 0, 24); | ||
1264 | memcpy(mgmt->da, da, ETH_ALEN); | ||
1265 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | ||
1266 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) | ||
1267 | memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); | ||
1268 | else | ||
1269 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
1270 | 739 | ||
1271 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 740 | printk(KERN_DEBUG "%s: direct probe to AP %s try %d\n", |
1272 | IEEE80211_STYPE_ACTION); | 741 | sdata->dev->name, print_mac(mac, ifsta->bssid), |
742 | ifsta->direct_probe_tries); | ||
1273 | 743 | ||
1274 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); | 744 | ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE; |
1275 | 745 | ||
1276 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | 746 | set_bit(IEEE80211_STA_REQ_DIRECT_PROBE, &ifsta->request); |
1277 | mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; | ||
1278 | 747 | ||
1279 | mgmt->u.action.u.addba_req.dialog_token = dialog_token; | 748 | /* Direct probe is sent to broadcast address as some APs |
1280 | capab = (u16)(1 << 1); /* bit 1 aggregation policy */ | 749 | * will not answer to direct packet in unassociated state. |
1281 | capab |= (u16)(tid << 2); /* bit 5:2 TID number */ | 750 | */ |
1282 | capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ | 751 | ieee80211_send_probe_req(sdata, NULL, |
1283 | 752 | ifsta->ssid, ifsta->ssid_len); | |
1284 | mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); | ||
1285 | |||
1286 | mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); | ||
1287 | mgmt->u.action.u.addba_req.start_seq_num = | ||
1288 | cpu_to_le16(start_seq_num << 4); | ||
1289 | 753 | ||
1290 | ieee80211_sta_tx(dev, skb, 0); | 754 | mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT); |
1291 | } | 755 | } |
1292 | 756 | ||
1293 | static void ieee80211_sta_process_addba_request(struct net_device *dev, | 757 | |
1294 | struct ieee80211_mgmt *mgmt, | 758 | static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata, |
1295 | size_t len) | 759 | struct ieee80211_if_sta *ifsta) |
1296 | { | 760 | { |
1297 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1298 | struct ieee80211_hw *hw = &local->hw; | ||
1299 | struct ieee80211_conf *conf = &hw->conf; | ||
1300 | struct sta_info *sta; | ||
1301 | struct tid_ampdu_rx *tid_agg_rx; | ||
1302 | u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; | ||
1303 | u8 dialog_token; | ||
1304 | int ret = -EOPNOTSUPP; | ||
1305 | DECLARE_MAC_BUF(mac); | 761 | DECLARE_MAC_BUF(mac); |
1306 | 762 | ||
1307 | rcu_read_lock(); | 763 | ifsta->auth_tries++; |
1308 | 764 | if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) { | |
1309 | sta = sta_info_get(local, mgmt->sa); | 765 | printk(KERN_DEBUG "%s: authentication with AP %s" |
1310 | if (!sta) { | 766 | " timed out\n", |
1311 | rcu_read_unlock(); | 767 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
768 | ifsta->state = IEEE80211_STA_MLME_DISABLED; | ||
1312 | return; | 769 | return; |
1313 | } | 770 | } |
1314 | 771 | ||
1315 | /* extract session parameters from addba request frame */ | 772 | ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE; |
1316 | dialog_token = mgmt->u.action.u.addba_req.dialog_token; | 773 | printk(KERN_DEBUG "%s: authenticate with AP %s\n", |
1317 | timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); | 774 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
1318 | start_seq_num = | ||
1319 | le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4; | ||
1320 | |||
1321 | capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); | ||
1322 | ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1; | ||
1323 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | ||
1324 | buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; | ||
1325 | |||
1326 | status = WLAN_STATUS_REQUEST_DECLINED; | ||
1327 | |||
1328 | /* sanity check for incoming parameters: | ||
1329 | * check if configuration can support the BA policy | ||
1330 | * and if buffer size does not exceeds max value */ | ||
1331 | if (((ba_policy != 1) | ||
1332 | && (!(conf->ht_conf.cap & IEEE80211_HT_CAP_DELAY_BA))) | ||
1333 | || (buf_size > IEEE80211_MAX_AMPDU_BUF)) { | ||
1334 | status = WLAN_STATUS_INVALID_QOS_PARAM; | ||
1335 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1336 | if (net_ratelimit()) | ||
1337 | printk(KERN_DEBUG "AddBA Req with bad params from " | ||
1338 | "%s on tid %u. policy %d, buffer size %d\n", | ||
1339 | print_mac(mac, mgmt->sa), tid, ba_policy, | ||
1340 | buf_size); | ||
1341 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1342 | goto end_no_lock; | ||
1343 | } | ||
1344 | /* determine default buffer size */ | ||
1345 | if (buf_size == 0) { | ||
1346 | struct ieee80211_supported_band *sband; | ||
1347 | |||
1348 | sband = local->hw.wiphy->bands[conf->channel->band]; | ||
1349 | buf_size = IEEE80211_MIN_AMPDU_BUF; | ||
1350 | buf_size = buf_size << sband->ht_info.ampdu_factor; | ||
1351 | } | ||
1352 | |||
1353 | |||
1354 | /* examine state machine */ | ||
1355 | spin_lock_bh(&sta->lock); | ||
1356 | 775 | ||
1357 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { | 776 | ieee80211_send_auth(sdata, ifsta, 1, NULL, 0, 0); |
1358 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1359 | if (net_ratelimit()) | ||
1360 | printk(KERN_DEBUG "unexpected AddBA Req from " | ||
1361 | "%s on tid %u\n", | ||
1362 | print_mac(mac, mgmt->sa), tid); | ||
1363 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1364 | goto end; | ||
1365 | } | ||
1366 | 777 | ||
1367 | /* prepare A-MPDU MLME for Rx aggregation */ | 778 | mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT); |
1368 | sta->ampdu_mlme.tid_rx[tid] = | ||
1369 | kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); | ||
1370 | if (!sta->ampdu_mlme.tid_rx[tid]) { | ||
1371 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1372 | if (net_ratelimit()) | ||
1373 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", | ||
1374 | tid); | ||
1375 | #endif | ||
1376 | goto end; | ||
1377 | } | ||
1378 | /* rx timer */ | ||
1379 | sta->ampdu_mlme.tid_rx[tid]->session_timer.function = | ||
1380 | sta_rx_agg_session_timer_expired; | ||
1381 | sta->ampdu_mlme.tid_rx[tid]->session_timer.data = | ||
1382 | (unsigned long)&sta->timer_to_tid[tid]; | ||
1383 | init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
1384 | |||
1385 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | ||
1386 | |||
1387 | /* prepare reordering buffer */ | ||
1388 | tid_agg_rx->reorder_buf = | ||
1389 | kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC); | ||
1390 | if (!tid_agg_rx->reorder_buf) { | ||
1391 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1392 | if (net_ratelimit()) | ||
1393 | printk(KERN_ERR "can not allocate reordering buffer " | ||
1394 | "to tid %d\n", tid); | ||
1395 | #endif | ||
1396 | kfree(sta->ampdu_mlme.tid_rx[tid]); | ||
1397 | goto end; | ||
1398 | } | ||
1399 | memset(tid_agg_rx->reorder_buf, 0, | ||
1400 | buf_size * sizeof(struct sk_buff *)); | ||
1401 | |||
1402 | if (local->ops->ampdu_action) | ||
1403 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START, | ||
1404 | sta->addr, tid, &start_seq_num); | ||
1405 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1406 | printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); | ||
1407 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1408 | |||
1409 | if (ret) { | ||
1410 | kfree(tid_agg_rx->reorder_buf); | ||
1411 | kfree(tid_agg_rx); | ||
1412 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
1413 | goto end; | ||
1414 | } | ||
1415 | |||
1416 | /* change state and send addba resp */ | ||
1417 | sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL; | ||
1418 | tid_agg_rx->dialog_token = dialog_token; | ||
1419 | tid_agg_rx->ssn = start_seq_num; | ||
1420 | tid_agg_rx->head_seq_num = start_seq_num; | ||
1421 | tid_agg_rx->buf_size = buf_size; | ||
1422 | tid_agg_rx->timeout = timeout; | ||
1423 | tid_agg_rx->stored_mpdu_num = 0; | ||
1424 | status = WLAN_STATUS_SUCCESS; | ||
1425 | end: | ||
1426 | spin_unlock_bh(&sta->lock); | ||
1427 | |||
1428 | end_no_lock: | ||
1429 | ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid, | ||
1430 | dialog_token, status, 1, buf_size, timeout); | ||
1431 | rcu_read_unlock(); | ||
1432 | } | 779 | } |
1433 | 780 | ||
1434 | static void ieee80211_sta_process_addba_resp(struct net_device *dev, | 781 | static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, |
1435 | struct ieee80211_mgmt *mgmt, | 782 | struct ieee80211_if_sta *ifsta, bool deauth, |
1436 | size_t len) | 783 | bool self_disconnected, u16 reason) |
1437 | { | 784 | { |
1438 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 785 | struct ieee80211_local *local = sdata->local; |
1439 | struct ieee80211_hw *hw = &local->hw; | ||
1440 | struct sta_info *sta; | 786 | struct sta_info *sta; |
1441 | u16 capab; | 787 | u32 changed = BSS_CHANGED_ASSOC; |
1442 | u16 tid; | ||
1443 | u8 *state; | ||
1444 | 788 | ||
1445 | rcu_read_lock(); | 789 | rcu_read_lock(); |
1446 | 790 | ||
1447 | sta = sta_info_get(local, mgmt->sa); | 791 | sta = sta_info_get(local, ifsta->bssid); |
1448 | if (!sta) { | 792 | if (!sta) { |
1449 | rcu_read_unlock(); | 793 | rcu_read_unlock(); |
1450 | return; | 794 | return; |
1451 | } | 795 | } |
1452 | 796 | ||
1453 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); | 797 | if (deauth) { |
1454 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | 798 | ifsta->direct_probe_tries = 0; |
799 | ifsta->auth_tries = 0; | ||
800 | } | ||
801 | ifsta->assoc_scan_tries = 0; | ||
802 | ifsta->assoc_tries = 0; | ||
1455 | 803 | ||
1456 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 804 | netif_tx_stop_all_queues(sdata->dev); |
805 | netif_carrier_off(sdata->dev); | ||
1457 | 806 | ||
1458 | spin_lock_bh(&sta->lock); | 807 | ieee80211_sta_tear_down_BA_sessions(sdata, sta->sta.addr); |
1459 | 808 | ||
1460 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | 809 | if (self_disconnected) { |
1461 | spin_unlock_bh(&sta->lock); | 810 | if (deauth) |
1462 | goto addba_resp_exit; | 811 | ieee80211_send_deauth_disassoc(sdata, |
812 | IEEE80211_STYPE_DEAUTH, reason); | ||
813 | else | ||
814 | ieee80211_send_deauth_disassoc(sdata, | ||
815 | IEEE80211_STYPE_DISASSOC, reason); | ||
1463 | } | 816 | } |
1464 | 817 | ||
1465 | if (mgmt->u.action.u.addba_resp.dialog_token != | 818 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; |
1466 | sta->ampdu_mlme.tid_tx[tid]->dialog_token) { | 819 | changed |= ieee80211_reset_erp_info(sdata); |
1467 | spin_unlock_bh(&sta->lock); | ||
1468 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1469 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); | ||
1470 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1471 | goto addba_resp_exit; | ||
1472 | } | ||
1473 | 820 | ||
1474 | del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | 821 | if (sdata->bss_conf.assoc_ht) |
1475 | #ifdef CONFIG_MAC80211_HT_DEBUG | 822 | changed |= BSS_CHANGED_HT; |
1476 | printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); | ||
1477 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1478 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) | ||
1479 | == WLAN_STATUS_SUCCESS) { | ||
1480 | *state |= HT_ADDBA_RECEIVED_MSK; | ||
1481 | sta->ampdu_mlme.addba_req_num[tid] = 0; | ||
1482 | 823 | ||
1483 | if (*state == HT_AGG_STATE_OPERATIONAL) | 824 | sdata->bss_conf.assoc_ht = 0; |
1484 | ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); | 825 | sdata->bss_conf.ht_conf = NULL; |
826 | sdata->bss_conf.ht_bss_conf = NULL; | ||
1485 | 827 | ||
1486 | spin_unlock_bh(&sta->lock); | 828 | ieee80211_led_assoc(local, 0); |
1487 | } else { | 829 | sdata->bss_conf.assoc = 0; |
1488 | sta->ampdu_mlme.addba_req_num[tid]++; | 830 | |
1489 | /* this will allow the state check in stop_BA_session */ | 831 | ieee80211_sta_send_apinfo(sdata, ifsta); |
1490 | *state = HT_AGG_STATE_OPERATIONAL; | 832 | |
1491 | spin_unlock_bh(&sta->lock); | 833 | if (self_disconnected) |
1492 | ieee80211_stop_tx_ba_session(hw, sta->addr, tid, | 834 | ifsta->state = IEEE80211_STA_MLME_DISABLED; |
1493 | WLAN_BACK_INITIATOR); | 835 | |
1494 | } | 836 | sta_info_unlink(&sta); |
1495 | 837 | ||
1496 | addba_resp_exit: | ||
1497 | rcu_read_unlock(); | 838 | rcu_read_unlock(); |
839 | |||
840 | sta_info_destroy(sta); | ||
1498 | } | 841 | } |
1499 | 842 | ||
1500 | void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, | 843 | static int ieee80211_sta_wep_configured(struct ieee80211_sub_if_data *sdata) |
1501 | u16 initiator, u16 reason_code) | ||
1502 | { | 844 | { |
1503 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 845 | if (!sdata || !sdata->default_key || |
1504 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 846 | sdata->default_key->conf.alg != ALG_WEP) |
1505 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 847 | return 0; |
1506 | struct sk_buff *skb; | 848 | return 1; |
1507 | struct ieee80211_mgmt *mgmt; | 849 | } |
1508 | u16 params; | ||
1509 | |||
1510 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | ||
1511 | |||
1512 | if (!skb) { | ||
1513 | printk(KERN_ERR "%s: failed to allocate buffer " | ||
1514 | "for delba frame\n", dev->name); | ||
1515 | return; | ||
1516 | } | ||
1517 | 850 | ||
1518 | skb_reserve(skb, local->hw.extra_tx_headroom); | 851 | static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata, |
1519 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 852 | struct ieee80211_if_sta *ifsta) |
1520 | memset(mgmt, 0, 24); | 853 | { |
1521 | memcpy(mgmt->da, da, ETH_ALEN); | 854 | struct ieee80211_local *local = sdata->local; |
1522 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 855 | struct ieee80211_bss *bss; |
1523 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) | 856 | int bss_privacy; |
1524 | memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); | 857 | int wep_privacy; |
1525 | else | 858 | int privacy_invoked; |
1526 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | ||
1527 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1528 | IEEE80211_STYPE_ACTION); | ||
1529 | 859 | ||
1530 | skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); | 860 | if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL)) |
861 | return 0; | ||
1531 | 862 | ||
1532 | mgmt->u.action.category = WLAN_CATEGORY_BACK; | 863 | bss = ieee80211_rx_bss_get(local, ifsta->bssid, |
1533 | mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; | 864 | local->hw.conf.channel->center_freq, |
1534 | params = (u16)(initiator << 11); /* bit 11 initiator */ | 865 | ifsta->ssid, ifsta->ssid_len); |
1535 | params |= (u16)(tid << 12); /* bit 15:12 TID number */ | 866 | if (!bss) |
867 | return 0; | ||
1536 | 868 | ||
1537 | mgmt->u.action.u.delba.params = cpu_to_le16(params); | 869 | bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY); |
1538 | mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); | 870 | wep_privacy = !!ieee80211_sta_wep_configured(sdata); |
871 | privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED); | ||
1539 | 872 | ||
1540 | ieee80211_sta_tx(dev, skb, 0); | 873 | ieee80211_rx_bss_put(local, bss); |
1541 | } | ||
1542 | 874 | ||
1543 | void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn) | 875 | if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked)) |
1544 | { | 876 | return 0; |
1545 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1546 | struct sk_buff *skb; | ||
1547 | struct ieee80211_bar *bar; | ||
1548 | u16 bar_control = 0; | ||
1549 | 877 | ||
1550 | skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); | 878 | return 1; |
1551 | if (!skb) { | ||
1552 | printk(KERN_ERR "%s: failed to allocate buffer for " | ||
1553 | "bar frame\n", dev->name); | ||
1554 | return; | ||
1555 | } | ||
1556 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1557 | bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); | ||
1558 | memset(bar, 0, sizeof(*bar)); | ||
1559 | bar->frame_control = IEEE80211_FC(IEEE80211_FTYPE_CTL, | ||
1560 | IEEE80211_STYPE_BACK_REQ); | ||
1561 | memcpy(bar->ra, ra, ETH_ALEN); | ||
1562 | memcpy(bar->ta, dev->dev_addr, ETH_ALEN); | ||
1563 | bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; | ||
1564 | bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; | ||
1565 | bar_control |= (u16)(tid << 12); | ||
1566 | bar->control = cpu_to_le16(bar_control); | ||
1567 | bar->start_seq_num = cpu_to_le16(ssn); | ||
1568 | |||
1569 | ieee80211_sta_tx(dev, skb, 0); | ||
1570 | } | 879 | } |
1571 | 880 | ||
1572 | void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, | 881 | static void ieee80211_associate(struct ieee80211_sub_if_data *sdata, |
1573 | u16 initiator, u16 reason) | 882 | struct ieee80211_if_sta *ifsta) |
1574 | { | 883 | { |
1575 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1576 | struct ieee80211_hw *hw = &local->hw; | ||
1577 | struct sta_info *sta; | ||
1578 | int ret, i; | ||
1579 | DECLARE_MAC_BUF(mac); | 884 | DECLARE_MAC_BUF(mac); |
1580 | 885 | ||
1581 | rcu_read_lock(); | 886 | ifsta->assoc_tries++; |
1582 | 887 | if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) { | |
1583 | sta = sta_info_get(local, ra); | 888 | printk(KERN_DEBUG "%s: association with AP %s" |
1584 | if (!sta) { | 889 | " timed out\n", |
1585 | rcu_read_unlock(); | 890 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
891 | ifsta->state = IEEE80211_STA_MLME_DISABLED; | ||
1586 | return; | 892 | return; |
1587 | } | 893 | } |
1588 | 894 | ||
1589 | /* check if TID is in operational state */ | 895 | ifsta->state = IEEE80211_STA_MLME_ASSOCIATE; |
1590 | spin_lock_bh(&sta->lock); | 896 | printk(KERN_DEBUG "%s: associate with AP %s\n", |
1591 | if (sta->ampdu_mlme.tid_state_rx[tid] | 897 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
1592 | != HT_AGG_STATE_OPERATIONAL) { | 898 | if (ieee80211_privacy_mismatch(sdata, ifsta)) { |
1593 | spin_unlock_bh(&sta->lock); | 899 | printk(KERN_DEBUG "%s: mismatch in privacy configuration and " |
1594 | rcu_read_unlock(); | 900 | "mixed-cell disabled - abort association\n", sdata->dev->name); |
901 | ifsta->state = IEEE80211_STA_MLME_DISABLED; | ||
1595 | return; | 902 | return; |
1596 | } | 903 | } |
1597 | sta->ampdu_mlme.tid_state_rx[tid] = | ||
1598 | HT_AGG_STATE_REQ_STOP_BA_MSK | | ||
1599 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
1600 | spin_unlock_bh(&sta->lock); | ||
1601 | |||
1602 | /* stop HW Rx aggregation. ampdu_action existence | ||
1603 | * already verified in session init so we add the BUG_ON */ | ||
1604 | BUG_ON(!local->ops->ampdu_action); | ||
1605 | |||
1606 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1607 | printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n", | ||
1608 | print_mac(mac, ra), tid); | ||
1609 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1610 | |||
1611 | ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP, | ||
1612 | ra, tid, NULL); | ||
1613 | if (ret) | ||
1614 | printk(KERN_DEBUG "HW problem - can not stop rx " | ||
1615 | "aggregation for tid %d\n", tid); | ||
1616 | |||
1617 | /* shutdown timer has not expired */ | ||
1618 | if (initiator != WLAN_BACK_TIMER) | ||
1619 | del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
1620 | |||
1621 | /* check if this is a self generated aggregation halt */ | ||
1622 | if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) | ||
1623 | ieee80211_send_delba(dev, ra, tid, 0, reason); | ||
1624 | |||
1625 | /* free the reordering buffer */ | ||
1626 | for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { | ||
1627 | if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) { | ||
1628 | /* release the reordered frames */ | ||
1629 | dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]); | ||
1630 | sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--; | ||
1631 | sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL; | ||
1632 | } | ||
1633 | } | ||
1634 | /* free resources */ | ||
1635 | kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); | ||
1636 | kfree(sta->ampdu_mlme.tid_rx[tid]); | ||
1637 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
1638 | sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE; | ||
1639 | 904 | ||
1640 | rcu_read_unlock(); | 905 | ieee80211_send_assoc(sdata, ifsta); |
906 | |||
907 | mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); | ||
1641 | } | 908 | } |
1642 | 909 | ||
1643 | 910 | ||
1644 | static void ieee80211_sta_process_delba(struct net_device *dev, | 911 | static void ieee80211_associated(struct ieee80211_sub_if_data *sdata, |
1645 | struct ieee80211_mgmt *mgmt, size_t len) | 912 | struct ieee80211_if_sta *ifsta) |
1646 | { | 913 | { |
1647 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 914 | struct ieee80211_local *local = sdata->local; |
1648 | struct sta_info *sta; | 915 | struct sta_info *sta; |
1649 | u16 tid, params; | 916 | int disassoc; |
1650 | u16 initiator; | ||
1651 | DECLARE_MAC_BUF(mac); | 917 | DECLARE_MAC_BUF(mac); |
1652 | 918 | ||
1653 | rcu_read_lock(); | 919 | /* TODO: start monitoring current AP signal quality and number of |
1654 | 920 | * missed beacons. Scan other channels every now and then and search | |
1655 | sta = sta_info_get(local, mgmt->sa); | 921 | * for better APs. */ |
1656 | if (!sta) { | 922 | /* TODO: remove expired BSSes */ |
1657 | rcu_read_unlock(); | ||
1658 | return; | ||
1659 | } | ||
1660 | |||
1661 | params = le16_to_cpu(mgmt->u.action.u.delba.params); | ||
1662 | tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; | ||
1663 | initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11; | ||
1664 | |||
1665 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1666 | if (net_ratelimit()) | ||
1667 | printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n", | ||
1668 | print_mac(mac, mgmt->sa), | ||
1669 | initiator ? "initiator" : "recipient", tid, | ||
1670 | mgmt->u.action.u.delba.reason_code); | ||
1671 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
1672 | |||
1673 | if (initiator == WLAN_BACK_INITIATOR) | ||
1674 | ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid, | ||
1675 | WLAN_BACK_INITIATOR, 0); | ||
1676 | else { /* WLAN_BACK_RECIPIENT */ | ||
1677 | spin_lock_bh(&sta->lock); | ||
1678 | sta->ampdu_mlme.tid_state_tx[tid] = | ||
1679 | HT_AGG_STATE_OPERATIONAL; | ||
1680 | spin_unlock_bh(&sta->lock); | ||
1681 | ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, | ||
1682 | WLAN_BACK_RECIPIENT); | ||
1683 | } | ||
1684 | rcu_read_unlock(); | ||
1685 | } | ||
1686 | 923 | ||
1687 | /* | 924 | ifsta->state = IEEE80211_STA_MLME_ASSOCIATED; |
1688 | * After sending add Block Ack request we activated a timer until | ||
1689 | * add Block Ack response will arrive from the recipient. | ||
1690 | * If this timer expires sta_addba_resp_timer_expired will be executed. | ||
1691 | */ | ||
1692 | void sta_addba_resp_timer_expired(unsigned long data) | ||
1693 | { | ||
1694 | /* not an elegant detour, but there is no choice as the timer passes | ||
1695 | * only one argument, and both sta_info and TID are needed, so init | ||
1696 | * flow in sta_info_create gives the TID as data, while the timer_to_id | ||
1697 | * array gives the sta through container_of */ | ||
1698 | u16 tid = *(u8 *)data; | ||
1699 | struct sta_info *temp_sta = container_of((void *)data, | ||
1700 | struct sta_info, timer_to_tid[tid]); | ||
1701 | |||
1702 | struct ieee80211_local *local = temp_sta->local; | ||
1703 | struct ieee80211_hw *hw = &local->hw; | ||
1704 | struct sta_info *sta; | ||
1705 | u8 *state; | ||
1706 | 925 | ||
1707 | rcu_read_lock(); | 926 | rcu_read_lock(); |
1708 | 927 | ||
1709 | sta = sta_info_get(local, temp_sta->addr); | 928 | sta = sta_info_get(local, ifsta->bssid); |
1710 | if (!sta) { | 929 | if (!sta) { |
1711 | rcu_read_unlock(); | 930 | printk(KERN_DEBUG "%s: No STA entry for own AP %s\n", |
1712 | return; | 931 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
1713 | } | 932 | disassoc = 1; |
1714 | 933 | } else { | |
1715 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 934 | disassoc = 0; |
1716 | /* check if the TID waits for addBA response */ | 935 | if (time_after(jiffies, |
1717 | spin_lock_bh(&sta->lock); | 936 | sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { |
1718 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) { | 937 | if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) { |
1719 | spin_unlock_bh(&sta->lock); | 938 | printk(KERN_DEBUG "%s: No ProbeResp from " |
1720 | *state = HT_AGG_STATE_IDLE; | 939 | "current AP %s - assume out of " |
1721 | #ifdef CONFIG_MAC80211_HT_DEBUG | 940 | "range\n", |
1722 | printk(KERN_DEBUG "timer expired on tid %d but we are not " | 941 | sdata->dev->name, print_mac(mac, ifsta->bssid)); |
1723 | "expecting addBA response there", tid); | 942 | disassoc = 1; |
1724 | #endif | 943 | } else |
1725 | goto timer_expired_exit; | 944 | ieee80211_send_probe_req(sdata, ifsta->bssid, |
945 | ifsta->ssid, | ||
946 | ifsta->ssid_len); | ||
947 | ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL; | ||
948 | } else { | ||
949 | ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL; | ||
950 | if (time_after(jiffies, ifsta->last_probe + | ||
951 | IEEE80211_PROBE_INTERVAL)) { | ||
952 | ifsta->last_probe = jiffies; | ||
953 | ieee80211_send_probe_req(sdata, ifsta->bssid, | ||
954 | ifsta->ssid, | ||
955 | ifsta->ssid_len); | ||
956 | } | ||
957 | } | ||
1726 | } | 958 | } |
1727 | 959 | ||
1728 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1729 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); | ||
1730 | #endif | ||
1731 | |||
1732 | /* go through the state check in stop_BA_session */ | ||
1733 | *state = HT_AGG_STATE_OPERATIONAL; | ||
1734 | spin_unlock_bh(&sta->lock); | ||
1735 | ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid, | ||
1736 | WLAN_BACK_INITIATOR); | ||
1737 | |||
1738 | timer_expired_exit: | ||
1739 | rcu_read_unlock(); | 960 | rcu_read_unlock(); |
1740 | } | ||
1741 | 961 | ||
1742 | /* | 962 | if (disassoc) |
1743 | * After accepting the AddBA Request we activated a timer, | 963 | ieee80211_set_disassoc(sdata, ifsta, true, true, |
1744 | * resetting it after each frame that arrives from the originator. | 964 | WLAN_REASON_PREV_AUTH_NOT_VALID); |
1745 | * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. | 965 | else |
1746 | */ | 966 | mod_timer(&ifsta->timer, jiffies + |
1747 | static void sta_rx_agg_session_timer_expired(unsigned long data) | 967 | IEEE80211_MONITORING_INTERVAL); |
1748 | { | ||
1749 | /* not an elegant detour, but there is no choice as the timer passes | ||
1750 | * only one argument, and various sta_info are needed here, so init | ||
1751 | * flow in sta_info_create gives the TID as data, while the timer_to_id | ||
1752 | * array gives the sta through container_of */ | ||
1753 | u8 *ptid = (u8 *)data; | ||
1754 | u8 *timer_to_id = ptid - *ptid; | ||
1755 | struct sta_info *sta = container_of(timer_to_id, struct sta_info, | ||
1756 | timer_to_tid[0]); | ||
1757 | |||
1758 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
1759 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | ||
1760 | #endif | ||
1761 | ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, | ||
1762 | (u16)*ptid, WLAN_BACK_TIMER, | ||
1763 | WLAN_REASON_QSTA_TIMEOUT); | ||
1764 | } | 968 | } |
1765 | 969 | ||
1766 | void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr) | ||
1767 | { | ||
1768 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1769 | int i; | ||
1770 | 970 | ||
1771 | for (i = 0; i < STA_TID_NUM; i++) { | 971 | static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata, |
1772 | ieee80211_stop_tx_ba_session(&local->hw, addr, i, | 972 | struct ieee80211_if_sta *ifsta) |
1773 | WLAN_BACK_INITIATOR); | 973 | { |
1774 | ieee80211_sta_stop_rx_ba_session(dev, addr, i, | 974 | printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name); |
1775 | WLAN_BACK_RECIPIENT, | 975 | ifsta->flags |= IEEE80211_STA_AUTHENTICATED; |
1776 | WLAN_REASON_QSTA_LEAVE_QBSS); | 976 | ieee80211_associate(sdata, ifsta); |
1777 | } | ||
1778 | } | 977 | } |
1779 | 978 | ||
1780 | static void ieee80211_send_refuse_measurement_request(struct net_device *dev, | ||
1781 | struct ieee80211_msrment_ie *request_ie, | ||
1782 | const u8 *da, const u8 *bssid, | ||
1783 | u8 dialog_token) | ||
1784 | { | ||
1785 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1786 | struct sk_buff *skb; | ||
1787 | struct ieee80211_mgmt *msr_report; | ||
1788 | 979 | ||
1789 | skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + | 980 | static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, |
1790 | sizeof(struct ieee80211_msrment_ie)); | 981 | struct ieee80211_if_sta *ifsta, |
982 | struct ieee80211_mgmt *mgmt, | ||
983 | size_t len) | ||
984 | { | ||
985 | u8 *pos; | ||
986 | struct ieee802_11_elems elems; | ||
1791 | 987 | ||
1792 | if (!skb) { | 988 | pos = mgmt->u.auth.variable; |
1793 | printk(KERN_ERR "%s: failed to allocate buffer for " | 989 | ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); |
1794 | "measurement report frame\n", dev->name); | 990 | if (!elems.challenge) |
1795 | return; | 991 | return; |
1796 | } | 992 | ieee80211_send_auth(sdata, ifsta, 3, elems.challenge - 2, |
1797 | 993 | elems.challenge_len + 2, 1); | |
1798 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1799 | msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); | ||
1800 | memset(msr_report, 0, 24); | ||
1801 | memcpy(msr_report->da, da, ETH_ALEN); | ||
1802 | memcpy(msr_report->sa, dev->dev_addr, ETH_ALEN); | ||
1803 | memcpy(msr_report->bssid, bssid, ETH_ALEN); | ||
1804 | msr_report->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | ||
1805 | IEEE80211_STYPE_ACTION); | ||
1806 | |||
1807 | skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement)); | ||
1808 | msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; | ||
1809 | msr_report->u.action.u.measurement.action_code = | ||
1810 | WLAN_ACTION_SPCT_MSR_RPRT; | ||
1811 | msr_report->u.action.u.measurement.dialog_token = dialog_token; | ||
1812 | |||
1813 | msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT; | ||
1814 | msr_report->u.action.u.measurement.length = | ||
1815 | sizeof(struct ieee80211_msrment_ie); | ||
1816 | |||
1817 | memset(&msr_report->u.action.u.measurement.msr_elem, 0, | ||
1818 | sizeof(struct ieee80211_msrment_ie)); | ||
1819 | msr_report->u.action.u.measurement.msr_elem.token = request_ie->token; | ||
1820 | msr_report->u.action.u.measurement.msr_elem.mode |= | ||
1821 | IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; | ||
1822 | msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; | ||
1823 | |||
1824 | ieee80211_sta_tx(dev, skb, 0); | ||
1825 | } | ||
1826 | |||
1827 | static void ieee80211_sta_process_measurement_req(struct net_device *dev, | ||
1828 | struct ieee80211_mgmt *mgmt, | ||
1829 | size_t len) | ||
1830 | { | ||
1831 | /* | ||
1832 | * Ignoring measurement request is spec violation. | ||
1833 | * Mandatory measurements must be reported optional | ||
1834 | * measurements might be refused or reported incapable | ||
1835 | * For now just refuse | ||
1836 | * TODO: Answer basic measurement as unmeasured | ||
1837 | */ | ||
1838 | ieee80211_send_refuse_measurement_request(dev, | ||
1839 | &mgmt->u.action.u.measurement.msr_elem, | ||
1840 | mgmt->sa, mgmt->bssid, | ||
1841 | mgmt->u.action.u.measurement.dialog_token); | ||
1842 | } | 994 | } |
1843 | 995 | ||
1844 | 996 | static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | |
1845 | static void ieee80211_rx_mgmt_auth(struct net_device *dev, | ||
1846 | struct ieee80211_if_sta *ifsta, | 997 | struct ieee80211_if_sta *ifsta, |
1847 | struct ieee80211_mgmt *mgmt, | 998 | struct ieee80211_mgmt *mgmt, |
1848 | size_t len) | 999 | size_t len) |
1849 | { | 1000 | { |
1850 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1851 | u16 auth_alg, auth_transaction, status_code; | 1001 | u16 auth_alg, auth_transaction, status_code; |
1852 | DECLARE_MAC_BUF(mac); | 1002 | DECLARE_MAC_BUF(mac); |
1853 | 1003 | ||
1854 | if (ifsta->state != IEEE80211_AUTHENTICATE && | 1004 | if (ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE && |
1855 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) | 1005 | sdata->vif.type != NL80211_IFTYPE_ADHOC) |
1856 | return; | 1006 | return; |
1857 | 1007 | ||
1858 | if (len < 24 + 6) | 1008 | if (len < 24 + 6) |
1859 | return; | 1009 | return; |
1860 | 1010 | ||
1861 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 1011 | if (sdata->vif.type != NL80211_IFTYPE_ADHOC && |
1862 | memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) | 1012 | memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) |
1863 | return; | 1013 | return; |
1864 | 1014 | ||
1865 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 1015 | if (sdata->vif.type != NL80211_IFTYPE_ADHOC && |
1866 | memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) | 1016 | memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) |
1867 | return; | 1017 | return; |
1868 | 1018 | ||
@@ -1870,7 +1020,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, | |||
1870 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); | 1020 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); |
1871 | status_code = le16_to_cpu(mgmt->u.auth.status_code); | 1021 | status_code = le16_to_cpu(mgmt->u.auth.status_code); |
1872 | 1022 | ||
1873 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 1023 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
1874 | /* | 1024 | /* |
1875 | * IEEE 802.11 standard does not require authentication in IBSS | 1025 | * IEEE 802.11 standard does not require authentication in IBSS |
1876 | * networks and most implementations do not seem to use it. | 1026 | * networks and most implementations do not seem to use it. |
@@ -1879,7 +1029,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, | |||
1879 | */ | 1029 | */ |
1880 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) | 1030 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) |
1881 | return; | 1031 | return; |
1882 | ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0); | 1032 | ieee80211_send_auth(sdata, ifsta, 2, NULL, 0, 0); |
1883 | } | 1033 | } |
1884 | 1034 | ||
1885 | if (auth_alg != ifsta->auth_alg || | 1035 | if (auth_alg != ifsta->auth_alg || |
@@ -1912,7 +1062,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, | |||
1912 | algs[pos] == 0xff) | 1062 | algs[pos] == 0xff) |
1913 | continue; | 1063 | continue; |
1914 | if (algs[pos] == WLAN_AUTH_SHARED_KEY && | 1064 | if (algs[pos] == WLAN_AUTH_SHARED_KEY && |
1915 | !ieee80211_sta_wep_configured(dev)) | 1065 | !ieee80211_sta_wep_configured(sdata)) |
1916 | continue; | 1066 | continue; |
1917 | ifsta->auth_alg = algs[pos]; | 1067 | ifsta->auth_alg = algs[pos]; |
1918 | break; | 1068 | break; |
@@ -1924,19 +1074,19 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, | |||
1924 | switch (ifsta->auth_alg) { | 1074 | switch (ifsta->auth_alg) { |
1925 | case WLAN_AUTH_OPEN: | 1075 | case WLAN_AUTH_OPEN: |
1926 | case WLAN_AUTH_LEAP: | 1076 | case WLAN_AUTH_LEAP: |
1927 | ieee80211_auth_completed(dev, ifsta); | 1077 | ieee80211_auth_completed(sdata, ifsta); |
1928 | break; | 1078 | break; |
1929 | case WLAN_AUTH_SHARED_KEY: | 1079 | case WLAN_AUTH_SHARED_KEY: |
1930 | if (ifsta->auth_transaction == 4) | 1080 | if (ifsta->auth_transaction == 4) |
1931 | ieee80211_auth_completed(dev, ifsta); | 1081 | ieee80211_auth_completed(sdata, ifsta); |
1932 | else | 1082 | else |
1933 | ieee80211_auth_challenge(dev, ifsta, mgmt, len); | 1083 | ieee80211_auth_challenge(sdata, ifsta, mgmt, len); |
1934 | break; | 1084 | break; |
1935 | } | 1085 | } |
1936 | } | 1086 | } |
1937 | 1087 | ||
1938 | 1088 | ||
1939 | static void ieee80211_rx_mgmt_deauth(struct net_device *dev, | 1089 | static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, |
1940 | struct ieee80211_if_sta *ifsta, | 1090 | struct ieee80211_if_sta *ifsta, |
1941 | struct ieee80211_mgmt *mgmt, | 1091 | struct ieee80211_mgmt *mgmt, |
1942 | size_t len) | 1092 | size_t len) |
@@ -1953,22 +1103,22 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev, | |||
1953 | reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); | 1103 | reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); |
1954 | 1104 | ||
1955 | if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) | 1105 | if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) |
1956 | printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); | 1106 | printk(KERN_DEBUG "%s: deauthenticated\n", sdata->dev->name); |
1957 | 1107 | ||
1958 | if (ifsta->state == IEEE80211_AUTHENTICATE || | 1108 | if (ifsta->state == IEEE80211_STA_MLME_AUTHENTICATE || |
1959 | ifsta->state == IEEE80211_ASSOCIATE || | 1109 | ifsta->state == IEEE80211_STA_MLME_ASSOCIATE || |
1960 | ifsta->state == IEEE80211_ASSOCIATED) { | 1110 | ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) { |
1961 | ifsta->state = IEEE80211_AUTHENTICATE; | 1111 | ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE; |
1962 | mod_timer(&ifsta->timer, jiffies + | 1112 | mod_timer(&ifsta->timer, jiffies + |
1963 | IEEE80211_RETRY_AUTH_INTERVAL); | 1113 | IEEE80211_RETRY_AUTH_INTERVAL); |
1964 | } | 1114 | } |
1965 | 1115 | ||
1966 | ieee80211_set_disassoc(dev, ifsta, 1); | 1116 | ieee80211_set_disassoc(sdata, ifsta, true, false, 0); |
1967 | ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED; | 1117 | ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED; |
1968 | } | 1118 | } |
1969 | 1119 | ||
1970 | 1120 | ||
1971 | static void ieee80211_rx_mgmt_disassoc(struct net_device *dev, | 1121 | static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, |
1972 | struct ieee80211_if_sta *ifsta, | 1122 | struct ieee80211_if_sta *ifsta, |
1973 | struct ieee80211_mgmt *mgmt, | 1123 | struct ieee80211_mgmt *mgmt, |
1974 | size_t len) | 1124 | size_t len) |
@@ -1985,15 +1135,15 @@ static void ieee80211_rx_mgmt_disassoc(struct net_device *dev, | |||
1985 | reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); | 1135 | reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); |
1986 | 1136 | ||
1987 | if (ifsta->flags & IEEE80211_STA_ASSOCIATED) | 1137 | if (ifsta->flags & IEEE80211_STA_ASSOCIATED) |
1988 | printk(KERN_DEBUG "%s: disassociated\n", dev->name); | 1138 | printk(KERN_DEBUG "%s: disassociated\n", sdata->dev->name); |
1989 | 1139 | ||
1990 | if (ifsta->state == IEEE80211_ASSOCIATED) { | 1140 | if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) { |
1991 | ifsta->state = IEEE80211_ASSOCIATE; | 1141 | ifsta->state = IEEE80211_STA_MLME_ASSOCIATE; |
1992 | mod_timer(&ifsta->timer, jiffies + | 1142 | mod_timer(&ifsta->timer, jiffies + |
1993 | IEEE80211_RETRY_AUTH_INTERVAL); | 1143 | IEEE80211_RETRY_AUTH_INTERVAL); |
1994 | } | 1144 | } |
1995 | 1145 | ||
1996 | ieee80211_set_disassoc(dev, ifsta, 0); | 1146 | ieee80211_set_disassoc(sdata, ifsta, false, false, 0); |
1997 | } | 1147 | } |
1998 | 1148 | ||
1999 | 1149 | ||
@@ -2004,7 +1154,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2004 | int reassoc) | 1154 | int reassoc) |
2005 | { | 1155 | { |
2006 | struct ieee80211_local *local = sdata->local; | 1156 | struct ieee80211_local *local = sdata->local; |
2007 | struct net_device *dev = sdata->dev; | ||
2008 | struct ieee80211_supported_band *sband; | 1157 | struct ieee80211_supported_band *sband; |
2009 | struct sta_info *sta; | 1158 | struct sta_info *sta; |
2010 | u64 rates, basic_rates; | 1159 | u64 rates, basic_rates; |
@@ -2019,7 +1168,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2019 | /* AssocResp and ReassocResp have identical structure, so process both | 1168 | /* AssocResp and ReassocResp have identical structure, so process both |
2020 | * of them in this function. */ | 1169 | * of them in this function. */ |
2021 | 1170 | ||
2022 | if (ifsta->state != IEEE80211_ASSOCIATE) | 1171 | if (ifsta->state != IEEE80211_STA_MLME_ASSOCIATE) |
2023 | return; | 1172 | return; |
2024 | 1173 | ||
2025 | if (len < 24 + 6) | 1174 | if (len < 24 + 6) |
@@ -2034,12 +1183,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2034 | 1183 | ||
2035 | printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " | 1184 | printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " |
2036 | "status=%d aid=%d)\n", | 1185 | "status=%d aid=%d)\n", |
2037 | dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), | 1186 | sdata->dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), |
2038 | capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); | 1187 | capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); |
2039 | 1188 | ||
2040 | if (status_code != WLAN_STATUS_SUCCESS) { | 1189 | if (status_code != WLAN_STATUS_SUCCESS) { |
2041 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", | 1190 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", |
2042 | dev->name, status_code); | 1191 | sdata->dev->name, status_code); |
2043 | /* if this was a reassociation, ensure we try a "full" | 1192 | /* if this was a reassociation, ensure we try a "full" |
2044 | * association next time. This works around some broken APs | 1193 | * association next time. This works around some broken APs |
2045 | * which do not correctly reject reassociation requests. */ | 1194 | * which do not correctly reject reassociation requests. */ |
@@ -2049,7 +1198,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2049 | 1198 | ||
2050 | if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) | 1199 | if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) |
2051 | printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " | 1200 | printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " |
2052 | "set\n", dev->name, aid); | 1201 | "set\n", sdata->dev->name, aid); |
2053 | aid &= ~(BIT(15) | BIT(14)); | 1202 | aid &= ~(BIT(15) | BIT(14)); |
2054 | 1203 | ||
2055 | pos = mgmt->u.assoc_resp.variable; | 1204 | pos = mgmt->u.assoc_resp.variable; |
@@ -2057,11 +1206,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2057 | 1206 | ||
2058 | if (!elems.supp_rates) { | 1207 | if (!elems.supp_rates) { |
2059 | printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", | 1208 | printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", |
2060 | dev->name); | 1209 | sdata->dev->name); |
2061 | return; | 1210 | return; |
2062 | } | 1211 | } |
2063 | 1212 | ||
2064 | printk(KERN_DEBUG "%s: associated\n", dev->name); | 1213 | printk(KERN_DEBUG "%s: associated\n", sdata->dev->name); |
2065 | ifsta->aid = aid; | 1214 | ifsta->aid = aid; |
2066 | ifsta->ap_capab = capab_info; | 1215 | ifsta->ap_capab = capab_info; |
2067 | 1216 | ||
@@ -2076,17 +1225,17 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2076 | /* Add STA entry for the AP */ | 1225 | /* Add STA entry for the AP */ |
2077 | sta = sta_info_get(local, ifsta->bssid); | 1226 | sta = sta_info_get(local, ifsta->bssid); |
2078 | if (!sta) { | 1227 | if (!sta) { |
2079 | struct ieee80211_sta_bss *bss; | 1228 | struct ieee80211_bss *bss; |
2080 | int err; | 1229 | int err; |
2081 | 1230 | ||
2082 | sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC); | 1231 | sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC); |
2083 | if (!sta) { | 1232 | if (!sta) { |
2084 | printk(KERN_DEBUG "%s: failed to alloc STA entry for" | 1233 | printk(KERN_DEBUG "%s: failed to alloc STA entry for" |
2085 | " the AP\n", dev->name); | 1234 | " the AP\n", sdata->dev->name); |
2086 | rcu_read_unlock(); | 1235 | rcu_read_unlock(); |
2087 | return; | 1236 | return; |
2088 | } | 1237 | } |
2089 | bss = ieee80211_rx_bss_get(dev, ifsta->bssid, | 1238 | bss = ieee80211_rx_bss_get(local, ifsta->bssid, |
2090 | local->hw.conf.channel->center_freq, | 1239 | local->hw.conf.channel->center_freq, |
2091 | ifsta->ssid, ifsta->ssid_len); | 1240 | ifsta->ssid, ifsta->ssid_len); |
2092 | if (bss) { | 1241 | if (bss) { |
@@ -2099,7 +1248,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2099 | err = sta_info_insert(sta); | 1248 | err = sta_info_insert(sta); |
2100 | if (err) { | 1249 | if (err) { |
2101 | printk(KERN_DEBUG "%s: failed to insert STA entry for" | 1250 | printk(KERN_DEBUG "%s: failed to insert STA entry for" |
2102 | " the AP (error %d)\n", dev->name, err); | 1251 | " the AP (error %d)\n", sdata->dev->name, err); |
2103 | rcu_read_unlock(); | 1252 | rcu_read_unlock(); |
2104 | return; | 1253 | return; |
2105 | } | 1254 | } |
@@ -2152,8 +1301,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2152 | } | 1301 | } |
2153 | } | 1302 | } |
2154 | 1303 | ||
2155 | sta->supp_rates[local->hw.conf.channel->band] = rates; | 1304 | sta->sta.supp_rates[local->hw.conf.channel->band] = rates; |
2156 | sdata->basic_rates = basic_rates; | 1305 | sdata->bss_conf.basic_rates = basic_rates; |
2157 | 1306 | ||
2158 | /* cf. IEEE 802.11 9.2.12 */ | 1307 | /* cf. IEEE 802.11 9.2.12 */ |
2159 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && | 1308 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && |
@@ -2167,19 +1316,19 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2167 | struct ieee80211_ht_bss_info bss_info; | 1316 | struct ieee80211_ht_bss_info bss_info; |
2168 | ieee80211_ht_cap_ie_to_ht_info( | 1317 | ieee80211_ht_cap_ie_to_ht_info( |
2169 | (struct ieee80211_ht_cap *) | 1318 | (struct ieee80211_ht_cap *) |
2170 | elems.ht_cap_elem, &sta->ht_info); | 1319 | elems.ht_cap_elem, &sta->sta.ht_info); |
2171 | ieee80211_ht_addt_info_ie_to_ht_bss_info( | 1320 | ieee80211_ht_addt_info_ie_to_ht_bss_info( |
2172 | (struct ieee80211_ht_addt_info *) | 1321 | (struct ieee80211_ht_addt_info *) |
2173 | elems.ht_info_elem, &bss_info); | 1322 | elems.ht_info_elem, &bss_info); |
2174 | ieee80211_handle_ht(local, 1, &sta->ht_info, &bss_info); | 1323 | ieee80211_handle_ht(local, 1, &sta->sta.ht_info, &bss_info); |
2175 | } | 1324 | } |
2176 | 1325 | ||
2177 | rate_control_rate_init(sta, local); | 1326 | rate_control_rate_init(sta); |
2178 | 1327 | ||
2179 | if (elems.wmm_param) { | 1328 | if (elems.wmm_param) { |
2180 | set_sta_flags(sta, WLAN_STA_WME); | 1329 | set_sta_flags(sta, WLAN_STA_WME); |
2181 | rcu_read_unlock(); | 1330 | rcu_read_unlock(); |
2182 | ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, | 1331 | ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param, |
2183 | elems.wmm_param_len); | 1332 | elems.wmm_param_len); |
2184 | } else | 1333 | } else |
2185 | rcu_read_unlock(); | 1334 | rcu_read_unlock(); |
@@ -2188,234 +1337,26 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
2188 | * ieee80211_set_associated() will tell the driver */ | 1337 | * ieee80211_set_associated() will tell the driver */ |
2189 | bss_conf->aid = aid; | 1338 | bss_conf->aid = aid; |
2190 | bss_conf->assoc_capability = capab_info; | 1339 | bss_conf->assoc_capability = capab_info; |
2191 | ieee80211_set_associated(dev, ifsta, 1); | 1340 | ieee80211_set_associated(sdata, ifsta); |
2192 | 1341 | ||
2193 | ieee80211_associated(dev, ifsta); | 1342 | ieee80211_associated(sdata, ifsta); |
2194 | } | 1343 | } |
2195 | 1344 | ||
2196 | 1345 | ||
2197 | /* Caller must hold local->sta_bss_lock */ | 1346 | static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, |
2198 | static void __ieee80211_rx_bss_hash_add(struct net_device *dev, | ||
2199 | struct ieee80211_sta_bss *bss) | ||
2200 | { | ||
2201 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2202 | u8 hash_idx; | ||
2203 | |||
2204 | if (bss_mesh_cfg(bss)) | ||
2205 | hash_idx = mesh_id_hash(bss_mesh_id(bss), | ||
2206 | bss_mesh_id_len(bss)); | ||
2207 | else | ||
2208 | hash_idx = STA_HASH(bss->bssid); | ||
2209 | |||
2210 | bss->hnext = local->sta_bss_hash[hash_idx]; | ||
2211 | local->sta_bss_hash[hash_idx] = bss; | ||
2212 | } | ||
2213 | |||
2214 | |||
2215 | /* Caller must hold local->sta_bss_lock */ | ||
2216 | static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local, | ||
2217 | struct ieee80211_sta_bss *bss) | ||
2218 | { | ||
2219 | struct ieee80211_sta_bss *b, *prev = NULL; | ||
2220 | b = local->sta_bss_hash[STA_HASH(bss->bssid)]; | ||
2221 | while (b) { | ||
2222 | if (b == bss) { | ||
2223 | if (!prev) | ||
2224 | local->sta_bss_hash[STA_HASH(bss->bssid)] = | ||
2225 | bss->hnext; | ||
2226 | else | ||
2227 | prev->hnext = bss->hnext; | ||
2228 | break; | ||
2229 | } | ||
2230 | prev = b; | ||
2231 | b = b->hnext; | ||
2232 | } | ||
2233 | } | ||
2234 | |||
2235 | |||
2236 | static struct ieee80211_sta_bss * | ||
2237 | ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int freq, | ||
2238 | u8 *ssid, u8 ssid_len) | ||
2239 | { | ||
2240 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2241 | struct ieee80211_sta_bss *bss; | ||
2242 | |||
2243 | bss = kzalloc(sizeof(*bss), GFP_ATOMIC); | ||
2244 | if (!bss) | ||
2245 | return NULL; | ||
2246 | atomic_inc(&bss->users); | ||
2247 | atomic_inc(&bss->users); | ||
2248 | memcpy(bss->bssid, bssid, ETH_ALEN); | ||
2249 | bss->freq = freq; | ||
2250 | if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) { | ||
2251 | memcpy(bss->ssid, ssid, ssid_len); | ||
2252 | bss->ssid_len = ssid_len; | ||
2253 | } | ||
2254 | |||
2255 | spin_lock_bh(&local->sta_bss_lock); | ||
2256 | /* TODO: order by RSSI? */ | ||
2257 | list_add_tail(&bss->list, &local->sta_bss_list); | ||
2258 | __ieee80211_rx_bss_hash_add(dev, bss); | ||
2259 | spin_unlock_bh(&local->sta_bss_lock); | ||
2260 | return bss; | ||
2261 | } | ||
2262 | |||
2263 | static struct ieee80211_sta_bss * | ||
2264 | ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq, | ||
2265 | u8 *ssid, u8 ssid_len) | ||
2266 | { | ||
2267 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2268 | struct ieee80211_sta_bss *bss; | ||
2269 | |||
2270 | spin_lock_bh(&local->sta_bss_lock); | ||
2271 | bss = local->sta_bss_hash[STA_HASH(bssid)]; | ||
2272 | while (bss) { | ||
2273 | if (!bss_mesh_cfg(bss) && | ||
2274 | !memcmp(bss->bssid, bssid, ETH_ALEN) && | ||
2275 | bss->freq == freq && | ||
2276 | bss->ssid_len == ssid_len && | ||
2277 | (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) { | ||
2278 | atomic_inc(&bss->users); | ||
2279 | break; | ||
2280 | } | ||
2281 | bss = bss->hnext; | ||
2282 | } | ||
2283 | spin_unlock_bh(&local->sta_bss_lock); | ||
2284 | return bss; | ||
2285 | } | ||
2286 | |||
2287 | #ifdef CONFIG_MAC80211_MESH | ||
2288 | static struct ieee80211_sta_bss * | ||
2289 | ieee80211_rx_mesh_bss_get(struct net_device *dev, u8 *mesh_id, int mesh_id_len, | ||
2290 | u8 *mesh_cfg, int freq) | ||
2291 | { | ||
2292 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2293 | struct ieee80211_sta_bss *bss; | ||
2294 | |||
2295 | spin_lock_bh(&local->sta_bss_lock); | ||
2296 | bss = local->sta_bss_hash[mesh_id_hash(mesh_id, mesh_id_len)]; | ||
2297 | while (bss) { | ||
2298 | if (bss_mesh_cfg(bss) && | ||
2299 | !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) && | ||
2300 | bss->freq == freq && | ||
2301 | mesh_id_len == bss->mesh_id_len && | ||
2302 | (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id, | ||
2303 | mesh_id_len))) { | ||
2304 | atomic_inc(&bss->users); | ||
2305 | break; | ||
2306 | } | ||
2307 | bss = bss->hnext; | ||
2308 | } | ||
2309 | spin_unlock_bh(&local->sta_bss_lock); | ||
2310 | return bss; | ||
2311 | } | ||
2312 | |||
2313 | static struct ieee80211_sta_bss * | ||
2314 | ieee80211_rx_mesh_bss_add(struct net_device *dev, u8 *mesh_id, int mesh_id_len, | ||
2315 | u8 *mesh_cfg, int mesh_config_len, int freq) | ||
2316 | { | ||
2317 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
2318 | struct ieee80211_sta_bss *bss; | ||
2319 | |||
2320 | if (mesh_config_len != MESH_CFG_LEN) | ||
2321 | return NULL; | ||
2322 | |||
2323 | bss = kzalloc(sizeof(*bss), GFP_ATOMIC); | ||
2324 | if (!bss) | ||
2325 | return NULL; | ||
2326 | |||
2327 | bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC); | ||
2328 | if (!bss->mesh_cfg) { | ||
2329 | kfree(bss); | ||
2330 | return NULL; | ||
2331 | } | ||
2332 | |||
2333 | if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) { | ||
2334 | bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC); | ||
2335 | if (!bss->mesh_id) { | ||
2336 | kfree(bss->mesh_cfg); | ||
2337 | kfree(bss); | ||
2338 | return NULL; | ||
2339 | } | ||
2340 | memcpy(bss->mesh_id, mesh_id, mesh_id_len); | ||
2341 | } | ||
2342 | |||
2343 | atomic_inc(&bss->users); | ||
2344 | atomic_inc(&bss->users); | ||
2345 | memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN); | ||
2346 | bss->mesh_id_len = mesh_id_len; | ||
2347 | bss->freq = freq; | ||
2348 | spin_lock_bh(&local->sta_bss_lock); | ||
2349 | /* TODO: order by RSSI? */ | ||
2350 | list_add_tail(&bss->list, &local->sta_bss_list); | ||
2351 | __ieee80211_rx_bss_hash_add(dev, bss); | ||
2352 | spin_unlock_bh(&local->sta_bss_lock); | ||
2353 | return bss; | ||
2354 | } | ||
2355 | #endif | ||
2356 | |||
2357 | static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss) | ||
2358 | { | ||
2359 | kfree(bss->wpa_ie); | ||
2360 | kfree(bss->rsn_ie); | ||
2361 | kfree(bss->wmm_ie); | ||
2362 | kfree(bss->ht_ie); | ||
2363 | kfree(bss->ht_add_ie); | ||
2364 | kfree(bss_mesh_id(bss)); | ||
2365 | kfree(bss_mesh_cfg(bss)); | ||
2366 | kfree(bss); | ||
2367 | } | ||
2368 | |||
2369 | |||
2370 | static void ieee80211_rx_bss_put(struct ieee80211_local *local, | ||
2371 | struct ieee80211_sta_bss *bss) | ||
2372 | { | ||
2373 | local_bh_disable(); | ||
2374 | if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) { | ||
2375 | local_bh_enable(); | ||
2376 | return; | ||
2377 | } | ||
2378 | |||
2379 | __ieee80211_rx_bss_hash_del(local, bss); | ||
2380 | list_del(&bss->list); | ||
2381 | spin_unlock_bh(&local->sta_bss_lock); | ||
2382 | ieee80211_rx_bss_free(bss); | ||
2383 | } | ||
2384 | |||
2385 | |||
2386 | void ieee80211_rx_bss_list_init(struct ieee80211_local *local) | ||
2387 | { | ||
2388 | spin_lock_init(&local->sta_bss_lock); | ||
2389 | INIT_LIST_HEAD(&local->sta_bss_list); | ||
2390 | } | ||
2391 | |||
2392 | |||
2393 | void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local) | ||
2394 | { | ||
2395 | struct ieee80211_sta_bss *bss, *tmp; | ||
2396 | |||
2397 | list_for_each_entry_safe(bss, tmp, &local->sta_bss_list, list) | ||
2398 | ieee80211_rx_bss_put(local, bss); | ||
2399 | } | ||
2400 | |||
2401 | |||
2402 | static int ieee80211_sta_join_ibss(struct net_device *dev, | ||
2403 | struct ieee80211_if_sta *ifsta, | 1347 | struct ieee80211_if_sta *ifsta, |
2404 | struct ieee80211_sta_bss *bss) | 1348 | struct ieee80211_bss *bss) |
2405 | { | 1349 | { |
2406 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1350 | struct ieee80211_local *local = sdata->local; |
2407 | int res, rates, i, j; | 1351 | int res, rates, i, j; |
2408 | struct sk_buff *skb; | 1352 | struct sk_buff *skb; |
2409 | struct ieee80211_mgmt *mgmt; | 1353 | struct ieee80211_mgmt *mgmt; |
2410 | u8 *pos; | 1354 | u8 *pos; |
2411 | struct ieee80211_sub_if_data *sdata; | ||
2412 | struct ieee80211_supported_band *sband; | 1355 | struct ieee80211_supported_band *sband; |
2413 | union iwreq_data wrqu; | 1356 | union iwreq_data wrqu; |
2414 | 1357 | ||
2415 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 1358 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
2416 | 1359 | ||
2417 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2418 | |||
2419 | /* Remove possible STA entries from other IBSS networks. */ | 1360 | /* Remove possible STA entries from other IBSS networks. */ |
2420 | sta_info_flush_delayed(sdata); | 1361 | sta_info_flush_delayed(sdata); |
2421 | 1362 | ||
@@ -2433,7 +1374,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2433 | sdata->drop_unencrypted = bss->capability & | 1374 | sdata->drop_unencrypted = bss->capability & |
2434 | WLAN_CAPABILITY_PRIVACY ? 1 : 0; | 1375 | WLAN_CAPABILITY_PRIVACY ? 1 : 0; |
2435 | 1376 | ||
2436 | res = ieee80211_set_freq(dev, bss->freq); | 1377 | res = ieee80211_set_freq(sdata, bss->freq); |
2437 | 1378 | ||
2438 | if (res) | 1379 | if (res) |
2439 | return res; | 1380 | return res; |
@@ -2446,10 +1387,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2446 | mgmt = (struct ieee80211_mgmt *) | 1387 | mgmt = (struct ieee80211_mgmt *) |
2447 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); | 1388 | skb_put(skb, 24 + sizeof(mgmt->u.beacon)); |
2448 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); | 1389 | memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); |
2449 | mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 1390 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
2450 | IEEE80211_STYPE_PROBE_RESP); | 1391 | IEEE80211_STYPE_PROBE_RESP); |
2451 | memset(mgmt->da, 0xff, ETH_ALEN); | 1392 | memset(mgmt->da, 0xff, ETH_ALEN); |
2452 | memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); | 1393 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
2453 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); | 1394 | memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); |
2454 | mgmt->u.beacon.beacon_int = | 1395 | mgmt->u.beacon.beacon_int = |
2455 | cpu_to_le16(local->hw.conf.beacon_int); | 1396 | cpu_to_le16(local->hw.conf.beacon_int); |
@@ -2506,108 +1447,38 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, | |||
2506 | } | 1447 | } |
2507 | ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; | 1448 | ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; |
2508 | 1449 | ||
2509 | ieee80211_sta_def_wmm_params(dev, bss, 1); | 1450 | ieee80211_sta_def_wmm_params(sdata, bss); |
2510 | 1451 | ||
2511 | ifsta->state = IEEE80211_IBSS_JOINED; | 1452 | ifsta->state = IEEE80211_STA_MLME_IBSS_JOINED; |
2512 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); | 1453 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); |
2513 | 1454 | ||
1455 | ieee80211_led_assoc(local, true); | ||
1456 | |||
2514 | memset(&wrqu, 0, sizeof(wrqu)); | 1457 | memset(&wrqu, 0, sizeof(wrqu)); |
2515 | memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); | 1458 | memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); |
2516 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | 1459 | wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL); |
2517 | 1460 | ||
2518 | return res; | 1461 | return res; |
2519 | } | 1462 | } |
2520 | 1463 | ||
2521 | u64 ieee80211_sta_get_rates(struct ieee80211_local *local, | 1464 | static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, |
2522 | struct ieee802_11_elems *elems, | ||
2523 | enum ieee80211_band band) | ||
2524 | { | ||
2525 | struct ieee80211_supported_band *sband; | ||
2526 | struct ieee80211_rate *bitrates; | ||
2527 | size_t num_rates; | ||
2528 | u64 supp_rates; | ||
2529 | int i, j; | ||
2530 | sband = local->hw.wiphy->bands[band]; | ||
2531 | |||
2532 | if (!sband) { | ||
2533 | WARN_ON(1); | ||
2534 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
2535 | } | ||
2536 | |||
2537 | bitrates = sband->bitrates; | ||
2538 | num_rates = sband->n_bitrates; | ||
2539 | supp_rates = 0; | ||
2540 | for (i = 0; i < elems->supp_rates_len + | ||
2541 | elems->ext_supp_rates_len; i++) { | ||
2542 | u8 rate = 0; | ||
2543 | int own_rate; | ||
2544 | if (i < elems->supp_rates_len) | ||
2545 | rate = elems->supp_rates[i]; | ||
2546 | else if (elems->ext_supp_rates) | ||
2547 | rate = elems->ext_supp_rates | ||
2548 | [i - elems->supp_rates_len]; | ||
2549 | own_rate = 5 * (rate & 0x7f); | ||
2550 | for (j = 0; j < num_rates; j++) | ||
2551 | if (bitrates[j].bitrate == own_rate) | ||
2552 | supp_rates |= BIT(j); | ||
2553 | } | ||
2554 | return supp_rates; | ||
2555 | } | ||
2556 | |||
2557 | |||
2558 | static void ieee80211_rx_bss_info(struct net_device *dev, | ||
2559 | struct ieee80211_mgmt *mgmt, | 1465 | struct ieee80211_mgmt *mgmt, |
2560 | size_t len, | 1466 | size_t len, |
2561 | struct ieee80211_rx_status *rx_status, | 1467 | struct ieee80211_rx_status *rx_status, |
2562 | struct ieee802_11_elems *elems, | 1468 | struct ieee802_11_elems *elems, |
2563 | int beacon) | 1469 | bool beacon) |
2564 | { | 1470 | { |
2565 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1471 | struct ieee80211_local *local = sdata->local; |
2566 | int freq, clen; | 1472 | int freq; |
2567 | struct ieee80211_sta_bss *bss; | 1473 | struct ieee80211_bss *bss; |
2568 | struct sta_info *sta; | 1474 | struct sta_info *sta; |
2569 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2570 | u64 beacon_timestamp, rx_timestamp; | ||
2571 | struct ieee80211_channel *channel; | 1475 | struct ieee80211_channel *channel; |
1476 | u64 beacon_timestamp, rx_timestamp; | ||
1477 | u64 supp_rates = 0; | ||
1478 | enum ieee80211_band band = rx_status->band; | ||
2572 | DECLARE_MAC_BUF(mac); | 1479 | DECLARE_MAC_BUF(mac); |
2573 | DECLARE_MAC_BUF(mac2); | 1480 | DECLARE_MAC_BUF(mac2); |
2574 | 1481 | ||
2575 | if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN)) | ||
2576 | return; /* ignore ProbeResp to foreign address */ | ||
2577 | |||
2578 | beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); | ||
2579 | |||
2580 | if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id && | ||
2581 | elems->mesh_config && mesh_matches_local(elems, dev)) { | ||
2582 | u64 rates = ieee80211_sta_get_rates(local, elems, | ||
2583 | rx_status->band); | ||
2584 | |||
2585 | mesh_neighbour_update(mgmt->sa, rates, dev, | ||
2586 | mesh_peer_accepts_plinks(elems, dev)); | ||
2587 | } | ||
2588 | |||
2589 | rcu_read_lock(); | ||
2590 | |||
2591 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates && | ||
2592 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 && | ||
2593 | (sta = sta_info_get(local, mgmt->sa))) { | ||
2594 | u64 prev_rates; | ||
2595 | u64 supp_rates = ieee80211_sta_get_rates(local, elems, | ||
2596 | rx_status->band); | ||
2597 | |||
2598 | prev_rates = sta->supp_rates[rx_status->band]; | ||
2599 | sta->supp_rates[rx_status->band] &= supp_rates; | ||
2600 | if (sta->supp_rates[rx_status->band] == 0) { | ||
2601 | /* No matching rates - this should not really happen. | ||
2602 | * Make sure that at least one rate is marked | ||
2603 | * supported to avoid issues with TX rate ctrl. */ | ||
2604 | sta->supp_rates[rx_status->band] = | ||
2605 | sdata->u.sta.supp_rates_bits[rx_status->band]; | ||
2606 | } | ||
2607 | } | ||
2608 | |||
2609 | rcu_read_unlock(); | ||
2610 | |||
2611 | if (elems->ds_params && elems->ds_params_len == 1) | 1482 | if (elems->ds_params && elems->ds_params_len == 1) |
2612 | freq = ieee80211_channel_to_frequency(elems->ds_params[0]); | 1483 | freq = ieee80211_channel_to_frequency(elems->ds_params[0]); |
2613 | else | 1484 | else |
@@ -2618,215 +1489,60 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2618 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) | 1489 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) |
2619 | return; | 1490 | return; |
2620 | 1491 | ||
2621 | #ifdef CONFIG_MAC80211_MESH | 1492 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC && elems->supp_rates && |
2622 | if (elems->mesh_config) | 1493 | memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0) { |
2623 | bss = ieee80211_rx_mesh_bss_get(dev, elems->mesh_id, | 1494 | supp_rates = ieee80211_sta_get_rates(local, elems, band); |
2624 | elems->mesh_id_len, elems->mesh_config, freq); | ||
2625 | else | ||
2626 | #endif | ||
2627 | bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq, | ||
2628 | elems->ssid, elems->ssid_len); | ||
2629 | if (!bss) { | ||
2630 | #ifdef CONFIG_MAC80211_MESH | ||
2631 | if (elems->mesh_config) | ||
2632 | bss = ieee80211_rx_mesh_bss_add(dev, elems->mesh_id, | ||
2633 | elems->mesh_id_len, elems->mesh_config, | ||
2634 | elems->mesh_config_len, freq); | ||
2635 | else | ||
2636 | #endif | ||
2637 | bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq, | ||
2638 | elems->ssid, elems->ssid_len); | ||
2639 | if (!bss) | ||
2640 | return; | ||
2641 | } else { | ||
2642 | #if 0 | ||
2643 | /* TODO: order by RSSI? */ | ||
2644 | spin_lock_bh(&local->sta_bss_lock); | ||
2645 | list_move_tail(&bss->list, &local->sta_bss_list); | ||
2646 | spin_unlock_bh(&local->sta_bss_lock); | ||
2647 | #endif | ||
2648 | } | ||
2649 | 1495 | ||
2650 | /* save the ERP value so that it is available at association time */ | 1496 | rcu_read_lock(); |
2651 | if (elems->erp_info && elems->erp_info_len >= 1) { | ||
2652 | bss->erp_value = elems->erp_info[0]; | ||
2653 | bss->has_erp_value = 1; | ||
2654 | } | ||
2655 | |||
2656 | if (elems->ht_cap_elem && | ||
2657 | (!bss->ht_ie || bss->ht_ie_len != elems->ht_cap_elem_len || | ||
2658 | memcmp(bss->ht_ie, elems->ht_cap_elem, elems->ht_cap_elem_len))) { | ||
2659 | kfree(bss->ht_ie); | ||
2660 | bss->ht_ie = kmalloc(elems->ht_cap_elem_len + 2, GFP_ATOMIC); | ||
2661 | if (bss->ht_ie) { | ||
2662 | memcpy(bss->ht_ie, elems->ht_cap_elem - 2, | ||
2663 | elems->ht_cap_elem_len + 2); | ||
2664 | bss->ht_ie_len = elems->ht_cap_elem_len + 2; | ||
2665 | } else | ||
2666 | bss->ht_ie_len = 0; | ||
2667 | } else if (!elems->ht_cap_elem && bss->ht_ie) { | ||
2668 | kfree(bss->ht_ie); | ||
2669 | bss->ht_ie = NULL; | ||
2670 | bss->ht_ie_len = 0; | ||
2671 | } | ||
2672 | 1497 | ||
2673 | if (elems->ht_info_elem && | 1498 | sta = sta_info_get(local, mgmt->sa); |
2674 | (!bss->ht_add_ie || | 1499 | if (sta) { |
2675 | bss->ht_add_ie_len != elems->ht_info_elem_len || | 1500 | u64 prev_rates; |
2676 | memcmp(bss->ht_add_ie, elems->ht_info_elem, | ||
2677 | elems->ht_info_elem_len))) { | ||
2678 | kfree(bss->ht_add_ie); | ||
2679 | bss->ht_add_ie = | ||
2680 | kmalloc(elems->ht_info_elem_len + 2, GFP_ATOMIC); | ||
2681 | if (bss->ht_add_ie) { | ||
2682 | memcpy(bss->ht_add_ie, elems->ht_info_elem - 2, | ||
2683 | elems->ht_info_elem_len + 2); | ||
2684 | bss->ht_add_ie_len = elems->ht_info_elem_len + 2; | ||
2685 | } else | ||
2686 | bss->ht_add_ie_len = 0; | ||
2687 | } else if (!elems->ht_info_elem && bss->ht_add_ie) { | ||
2688 | kfree(bss->ht_add_ie); | ||
2689 | bss->ht_add_ie = NULL; | ||
2690 | bss->ht_add_ie_len = 0; | ||
2691 | } | ||
2692 | 1501 | ||
2693 | bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); | 1502 | prev_rates = sta->sta.supp_rates[band]; |
2694 | bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); | 1503 | /* make sure mandatory rates are always added */ |
1504 | sta->sta.supp_rates[band] = supp_rates | | ||
1505 | ieee80211_mandatory_rates(local, band); | ||
2695 | 1506 | ||
2696 | if (elems->tim) { | 1507 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
2697 | struct ieee80211_tim_ie *tim_ie = | 1508 | if (sta->sta.supp_rates[band] != prev_rates) |
2698 | (struct ieee80211_tim_ie *)elems->tim; | 1509 | printk(KERN_DEBUG "%s: updated supp_rates set " |
2699 | bss->dtim_period = tim_ie->dtim_period; | 1510 | "for %s based on beacon info (0x%llx | " |
2700 | } | 1511 | "0x%llx -> 0x%llx)\n", |
1512 | sdata->dev->name, | ||
1513 | print_mac(mac, sta->sta.addr), | ||
1514 | (unsigned long long) prev_rates, | ||
1515 | (unsigned long long) supp_rates, | ||
1516 | (unsigned long long) sta->sta.supp_rates[band]); | ||
1517 | #endif | ||
1518 | } else { | ||
1519 | ieee80211_ibss_add_sta(sdata, NULL, mgmt->bssid, | ||
1520 | mgmt->sa, supp_rates); | ||
1521 | } | ||
2701 | 1522 | ||
2702 | /* set default value for buggy APs */ | 1523 | rcu_read_unlock(); |
2703 | if (!elems->tim || bss->dtim_period == 0) | ||
2704 | bss->dtim_period = 1; | ||
2705 | |||
2706 | bss->supp_rates_len = 0; | ||
2707 | if (elems->supp_rates) { | ||
2708 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; | ||
2709 | if (clen > elems->supp_rates_len) | ||
2710 | clen = elems->supp_rates_len; | ||
2711 | memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates, | ||
2712 | clen); | ||
2713 | bss->supp_rates_len += clen; | ||
2714 | } | ||
2715 | if (elems->ext_supp_rates) { | ||
2716 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; | ||
2717 | if (clen > elems->ext_supp_rates_len) | ||
2718 | clen = elems->ext_supp_rates_len; | ||
2719 | memcpy(&bss->supp_rates[bss->supp_rates_len], | ||
2720 | elems->ext_supp_rates, clen); | ||
2721 | bss->supp_rates_len += clen; | ||
2722 | } | 1524 | } |
2723 | 1525 | ||
2724 | bss->band = rx_status->band; | 1526 | bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, |
1527 | freq, beacon); | ||
1528 | if (!bss) | ||
1529 | return; | ||
2725 | 1530 | ||
2726 | bss->timestamp = beacon_timestamp; | 1531 | /* was just updated in ieee80211_bss_info_update */ |
2727 | bss->last_update = jiffies; | 1532 | beacon_timestamp = bss->timestamp; |
2728 | bss->signal = rx_status->signal; | ||
2729 | bss->noise = rx_status->noise; | ||
2730 | bss->qual = rx_status->qual; | ||
2731 | if (!beacon && !bss->probe_resp) | ||
2732 | bss->probe_resp = true; | ||
2733 | 1533 | ||
2734 | /* | 1534 | /* |
2735 | * In STA mode, the remaining parameters should not be overridden | 1535 | * In STA mode, the remaining parameters should not be overridden |
2736 | * by beacons because they're not necessarily accurate there. | 1536 | * by beacons because they're not necessarily accurate there. |
2737 | */ | 1537 | */ |
2738 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 1538 | if (sdata->vif.type != NL80211_IFTYPE_ADHOC && |
2739 | bss->probe_resp && beacon) { | 1539 | bss->last_probe_resp && beacon) { |
2740 | ieee80211_rx_bss_put(local, bss); | 1540 | ieee80211_rx_bss_put(local, bss); |
2741 | return; | 1541 | return; |
2742 | } | 1542 | } |
2743 | 1543 | ||
2744 | if (elems->wpa && | ||
2745 | (!bss->wpa_ie || bss->wpa_ie_len != elems->wpa_len || | ||
2746 | memcmp(bss->wpa_ie, elems->wpa, elems->wpa_len))) { | ||
2747 | kfree(bss->wpa_ie); | ||
2748 | bss->wpa_ie = kmalloc(elems->wpa_len + 2, GFP_ATOMIC); | ||
2749 | if (bss->wpa_ie) { | ||
2750 | memcpy(bss->wpa_ie, elems->wpa - 2, elems->wpa_len + 2); | ||
2751 | bss->wpa_ie_len = elems->wpa_len + 2; | ||
2752 | } else | ||
2753 | bss->wpa_ie_len = 0; | ||
2754 | } else if (!elems->wpa && bss->wpa_ie) { | ||
2755 | kfree(bss->wpa_ie); | ||
2756 | bss->wpa_ie = NULL; | ||
2757 | bss->wpa_ie_len = 0; | ||
2758 | } | ||
2759 | |||
2760 | if (elems->rsn && | ||
2761 | (!bss->rsn_ie || bss->rsn_ie_len != elems->rsn_len || | ||
2762 | memcmp(bss->rsn_ie, elems->rsn, elems->rsn_len))) { | ||
2763 | kfree(bss->rsn_ie); | ||
2764 | bss->rsn_ie = kmalloc(elems->rsn_len + 2, GFP_ATOMIC); | ||
2765 | if (bss->rsn_ie) { | ||
2766 | memcpy(bss->rsn_ie, elems->rsn - 2, elems->rsn_len + 2); | ||
2767 | bss->rsn_ie_len = elems->rsn_len + 2; | ||
2768 | } else | ||
2769 | bss->rsn_ie_len = 0; | ||
2770 | } else if (!elems->rsn && bss->rsn_ie) { | ||
2771 | kfree(bss->rsn_ie); | ||
2772 | bss->rsn_ie = NULL; | ||
2773 | bss->rsn_ie_len = 0; | ||
2774 | } | ||
2775 | |||
2776 | /* | ||
2777 | * Cf. | ||
2778 | * http://www.wipo.int/pctdb/en/wo.jsp?wo=2007047181&IA=WO2007047181&DISPLAY=DESC | ||
2779 | * | ||
2780 | * quoting: | ||
2781 | * | ||
2782 | * In particular, "Wi-Fi CERTIFIED for WMM - Support for Multimedia | ||
2783 | * Applications with Quality of Service in Wi-Fi Networks," Wi- Fi | ||
2784 | * Alliance (September 1, 2004) is incorporated by reference herein. | ||
2785 | * The inclusion of the WMM Parameters in probe responses and | ||
2786 | * association responses is mandatory for WMM enabled networks. The | ||
2787 | * inclusion of the WMM Parameters in beacons, however, is optional. | ||
2788 | */ | ||
2789 | |||
2790 | if (elems->wmm_param && | ||
2791 | (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_param_len || | ||
2792 | memcmp(bss->wmm_ie, elems->wmm_param, elems->wmm_param_len))) { | ||
2793 | kfree(bss->wmm_ie); | ||
2794 | bss->wmm_ie = kmalloc(elems->wmm_param_len + 2, GFP_ATOMIC); | ||
2795 | if (bss->wmm_ie) { | ||
2796 | memcpy(bss->wmm_ie, elems->wmm_param - 2, | ||
2797 | elems->wmm_param_len + 2); | ||
2798 | bss->wmm_ie_len = elems->wmm_param_len + 2; | ||
2799 | } else | ||
2800 | bss->wmm_ie_len = 0; | ||
2801 | } else if (elems->wmm_info && | ||
2802 | (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_info_len || | ||
2803 | memcmp(bss->wmm_ie, elems->wmm_info, | ||
2804 | elems->wmm_info_len))) { | ||
2805 | /* As for certain AP's Fifth bit is not set in WMM IE in | ||
2806 | * beacon frames.So while parsing the beacon frame the | ||
2807 | * wmm_info structure is used instead of wmm_param. | ||
2808 | * wmm_info structure was never used to set bss->wmm_ie. | ||
2809 | * This code fixes this problem by copying the WME | ||
2810 | * information from wmm_info to bss->wmm_ie and enabling | ||
2811 | * n-band association. | ||
2812 | */ | ||
2813 | kfree(bss->wmm_ie); | ||
2814 | bss->wmm_ie = kmalloc(elems->wmm_info_len + 2, GFP_ATOMIC); | ||
2815 | if (bss->wmm_ie) { | ||
2816 | memcpy(bss->wmm_ie, elems->wmm_info - 2, | ||
2817 | elems->wmm_info_len + 2); | ||
2818 | bss->wmm_ie_len = elems->wmm_info_len + 2; | ||
2819 | } else | ||
2820 | bss->wmm_ie_len = 0; | ||
2821 | } else if (!elems->wmm_param && !elems->wmm_info && bss->wmm_ie) { | ||
2822 | kfree(bss->wmm_ie); | ||
2823 | bss->wmm_ie = NULL; | ||
2824 | bss->wmm_ie_len = 0; | ||
2825 | } | ||
2826 | |||
2827 | /* check if we need to merge IBSS */ | 1544 | /* check if we need to merge IBSS */ |
2828 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon && | 1545 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC && beacon && |
2829 | !local->sta_sw_scanning && !local->sta_hw_scanning && | ||
2830 | bss->capability & WLAN_CAPABILITY_IBSS && | 1546 | bss->capability & WLAN_CAPABILITY_IBSS && |
2831 | bss->freq == local->oper_channel->center_freq && | 1547 | bss->freq == local->oper_channel->center_freq && |
2832 | elems->ssid_len == sdata->u.sta.ssid_len && | 1548 | elems->ssid_len == sdata->u.sta.ssid_len && |
@@ -2848,7 +1564,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2848 | * e.g: at 1 MBit that means mactime is 192 usec earlier | 1564 | * e.g: at 1 MBit that means mactime is 192 usec earlier |
2849 | * (=24 bytes * 8 usecs/byte) than the beacon timestamp. | 1565 | * (=24 bytes * 8 usecs/byte) than the beacon timestamp. |
2850 | */ | 1566 | */ |
2851 | int rate = local->hw.wiphy->bands[rx_status->band]-> | 1567 | int rate = local->hw.wiphy->bands[band]-> |
2852 | bitrates[rx_status->rate_idx].bitrate; | 1568 | bitrates[rx_status->rate_idx].bitrate; |
2853 | rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); | 1569 | rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); |
2854 | } else if (local && local->ops && local->ops->get_tsf) | 1570 | } else if (local && local->ops && local->ops->get_tsf) |
@@ -2871,12 +1587,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2871 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 1587 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
2872 | printk(KERN_DEBUG "%s: beacon TSF higher than " | 1588 | printk(KERN_DEBUG "%s: beacon TSF higher than " |
2873 | "local TSF - IBSS merge with BSSID %s\n", | 1589 | "local TSF - IBSS merge with BSSID %s\n", |
2874 | dev->name, print_mac(mac, mgmt->bssid)); | 1590 | sdata->dev->name, print_mac(mac, mgmt->bssid)); |
2875 | #endif | 1591 | #endif |
2876 | ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss); | 1592 | ieee80211_sta_join_ibss(sdata, &sdata->u.sta, bss); |
2877 | ieee80211_ibss_add_sta(dev, NULL, | 1593 | ieee80211_ibss_add_sta(sdata, NULL, |
2878 | mgmt->bssid, mgmt->sa, | 1594 | mgmt->bssid, mgmt->sa, |
2879 | BIT(rx_status->rate_idx)); | 1595 | supp_rates); |
2880 | } | 1596 | } |
2881 | } | 1597 | } |
2882 | 1598 | ||
@@ -2884,13 +1600,17 @@ static void ieee80211_rx_bss_info(struct net_device *dev, | |||
2884 | } | 1600 | } |
2885 | 1601 | ||
2886 | 1602 | ||
2887 | static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev, | 1603 | static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, |
2888 | struct ieee80211_mgmt *mgmt, | 1604 | struct ieee80211_mgmt *mgmt, |
2889 | size_t len, | 1605 | size_t len, |
2890 | struct ieee80211_rx_status *rx_status) | 1606 | struct ieee80211_rx_status *rx_status) |
2891 | { | 1607 | { |
2892 | size_t baselen; | 1608 | size_t baselen; |
2893 | struct ieee802_11_elems elems; | 1609 | struct ieee802_11_elems elems; |
1610 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
1611 | |||
1612 | if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) | ||
1613 | return; /* ignore ProbeResp to foreign address */ | ||
2894 | 1614 | ||
2895 | baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; | 1615 | baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; |
2896 | if (baselen > len) | 1616 | if (baselen > len) |
@@ -2899,20 +1619,27 @@ static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev, | |||
2899 | ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, | 1619 | ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, |
2900 | &elems); | 1620 | &elems); |
2901 | 1621 | ||
2902 | ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 0); | 1622 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); |
1623 | |||
1624 | /* direct probe may be part of the association flow */ | ||
1625 | if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE, | ||
1626 | &ifsta->request)) { | ||
1627 | printk(KERN_DEBUG "%s direct probe responded\n", | ||
1628 | sdata->dev->name); | ||
1629 | ieee80211_authenticate(sdata, ifsta); | ||
1630 | } | ||
2903 | } | 1631 | } |
2904 | 1632 | ||
2905 | 1633 | ||
2906 | static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | 1634 | static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, |
2907 | struct ieee80211_mgmt *mgmt, | 1635 | struct ieee80211_mgmt *mgmt, |
2908 | size_t len, | 1636 | size_t len, |
2909 | struct ieee80211_rx_status *rx_status) | 1637 | struct ieee80211_rx_status *rx_status) |
2910 | { | 1638 | { |
2911 | struct ieee80211_sub_if_data *sdata; | ||
2912 | struct ieee80211_if_sta *ifsta; | 1639 | struct ieee80211_if_sta *ifsta; |
2913 | size_t baselen; | 1640 | size_t baselen; |
2914 | struct ieee802_11_elems elems; | 1641 | struct ieee802_11_elems elems; |
2915 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1642 | struct ieee80211_local *local = sdata->local; |
2916 | struct ieee80211_conf *conf = &local->hw.conf; | 1643 | struct ieee80211_conf *conf = &local->hw.conf; |
2917 | u32 changed = 0; | 1644 | u32 changed = 0; |
2918 | 1645 | ||
@@ -2923,10 +1650,9 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | |||
2923 | 1650 | ||
2924 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); | 1651 | ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); |
2925 | 1652 | ||
2926 | ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 1); | 1653 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); |
2927 | 1654 | ||
2928 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1655 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
2929 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | ||
2930 | return; | 1656 | return; |
2931 | ifsta = &sdata->u.sta; | 1657 | ifsta = &sdata->u.sta; |
2932 | 1658 | ||
@@ -2934,15 +1660,9 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | |||
2934 | memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) | 1660 | memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) |
2935 | return; | 1661 | return; |
2936 | 1662 | ||
2937 | ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, | 1663 | ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param, |
2938 | elems.wmm_param_len); | 1664 | elems.wmm_param_len); |
2939 | 1665 | ||
2940 | /* Do not send changes to driver if we are scanning. This removes | ||
2941 | * requirement that driver's bss_info_changed function needs to be | ||
2942 | * atomic. */ | ||
2943 | if (local->sta_sw_scanning || local->sta_hw_scanning) | ||
2944 | return; | ||
2945 | |||
2946 | if (elems.erp_info && elems.erp_info_len >= 1) | 1666 | if (elems.erp_info && elems.erp_info_len >= 1) |
2947 | changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]); | 1667 | changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]); |
2948 | else { | 1668 | else { |
@@ -2966,14 +1686,13 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, | |||
2966 | } | 1686 | } |
2967 | 1687 | ||
2968 | 1688 | ||
2969 | static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | 1689 | static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, |
2970 | struct ieee80211_if_sta *ifsta, | 1690 | struct ieee80211_if_sta *ifsta, |
2971 | struct ieee80211_mgmt *mgmt, | 1691 | struct ieee80211_mgmt *mgmt, |
2972 | size_t len, | 1692 | size_t len, |
2973 | struct ieee80211_rx_status *rx_status) | 1693 | struct ieee80211_rx_status *rx_status) |
2974 | { | 1694 | { |
2975 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1695 | struct ieee80211_local *local = sdata->local; |
2976 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
2977 | int tx_last_beacon; | 1696 | int tx_last_beacon; |
2978 | struct sk_buff *skb; | 1697 | struct sk_buff *skb; |
2979 | struct ieee80211_mgmt *resp; | 1698 | struct ieee80211_mgmt *resp; |
@@ -2984,8 +1703,8 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | |||
2984 | DECLARE_MAC_BUF(mac3); | 1703 | DECLARE_MAC_BUF(mac3); |
2985 | #endif | 1704 | #endif |
2986 | 1705 | ||
2987 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS || | 1706 | if (sdata->vif.type != NL80211_IFTYPE_ADHOC || |
2988 | ifsta->state != IEEE80211_IBSS_JOINED || | 1707 | ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED || |
2989 | len < 24 + 2 || !ifsta->probe_resp) | 1708 | len < 24 + 2 || !ifsta->probe_resp) |
2990 | return; | 1709 | return; |
2991 | 1710 | ||
@@ -2997,7 +1716,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | |||
2997 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 1716 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
2998 | printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID=" | 1717 | printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID=" |
2999 | "%s (tx_last_beacon=%d)\n", | 1718 | "%s (tx_last_beacon=%d)\n", |
3000 | dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da), | 1719 | sdata->dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da), |
3001 | print_mac(mac3, mgmt->bssid), tx_last_beacon); | 1720 | print_mac(mac3, mgmt->bssid), tx_last_beacon); |
3002 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 1721 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
3003 | 1722 | ||
@@ -3015,7 +1734,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | |||
3015 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 1734 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
3016 | printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " | 1735 | printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " |
3017 | "from %s\n", | 1736 | "from %s\n", |
3018 | dev->name, print_mac(mac, mgmt->sa)); | 1737 | sdata->dev->name, print_mac(mac, mgmt->sa)); |
3019 | #endif | 1738 | #endif |
3020 | return; | 1739 | return; |
3021 | } | 1740 | } |
@@ -3035,74 +1754,15 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, | |||
3035 | memcpy(resp->da, mgmt->sa, ETH_ALEN); | 1754 | memcpy(resp->da, mgmt->sa, ETH_ALEN); |
3036 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 1755 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
3037 | printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n", | 1756 | printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n", |
3038 | dev->name, print_mac(mac, resp->da)); | 1757 | sdata->dev->name, print_mac(mac, resp->da)); |
3039 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 1758 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
3040 | ieee80211_sta_tx(dev, skb, 0); | 1759 | ieee80211_tx_skb(sdata, skb, 0); |
3041 | } | ||
3042 | |||
3043 | static void ieee80211_rx_mgmt_action(struct net_device *dev, | ||
3044 | struct ieee80211_if_sta *ifsta, | ||
3045 | struct ieee80211_mgmt *mgmt, | ||
3046 | size_t len, | ||
3047 | struct ieee80211_rx_status *rx_status) | ||
3048 | { | ||
3049 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3050 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
3051 | |||
3052 | if (len < IEEE80211_MIN_ACTION_SIZE) | ||
3053 | return; | ||
3054 | |||
3055 | switch (mgmt->u.action.category) { | ||
3056 | case WLAN_CATEGORY_SPECTRUM_MGMT: | ||
3057 | if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) | ||
3058 | break; | ||
3059 | switch (mgmt->u.action.u.chan_switch.action_code) { | ||
3060 | case WLAN_ACTION_SPCT_MSR_REQ: | ||
3061 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
3062 | sizeof(mgmt->u.action.u.measurement))) | ||
3063 | break; | ||
3064 | ieee80211_sta_process_measurement_req(dev, mgmt, len); | ||
3065 | break; | ||
3066 | } | ||
3067 | break; | ||
3068 | case WLAN_CATEGORY_BACK: | ||
3069 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
3070 | case WLAN_ACTION_ADDBA_REQ: | ||
3071 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
3072 | sizeof(mgmt->u.action.u.addba_req))) | ||
3073 | break; | ||
3074 | ieee80211_sta_process_addba_request(dev, mgmt, len); | ||
3075 | break; | ||
3076 | case WLAN_ACTION_ADDBA_RESP: | ||
3077 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
3078 | sizeof(mgmt->u.action.u.addba_resp))) | ||
3079 | break; | ||
3080 | ieee80211_sta_process_addba_resp(dev, mgmt, len); | ||
3081 | break; | ||
3082 | case WLAN_ACTION_DELBA: | ||
3083 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
3084 | sizeof(mgmt->u.action.u.delba))) | ||
3085 | break; | ||
3086 | ieee80211_sta_process_delba(dev, mgmt, len); | ||
3087 | break; | ||
3088 | } | ||
3089 | break; | ||
3090 | case PLINK_CATEGORY: | ||
3091 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
3092 | mesh_rx_plink_frame(dev, mgmt, len, rx_status); | ||
3093 | break; | ||
3094 | case MESH_PATH_SEL_CATEGORY: | ||
3095 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
3096 | mesh_rx_path_sel_frame(dev, mgmt, len); | ||
3097 | break; | ||
3098 | } | ||
3099 | } | 1760 | } |
3100 | 1761 | ||
3101 | void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | 1762 | void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, |
3102 | struct ieee80211_rx_status *rx_status) | 1763 | struct ieee80211_rx_status *rx_status) |
3103 | { | 1764 | { |
3104 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1765 | struct ieee80211_local *local = sdata->local; |
3105 | struct ieee80211_sub_if_data *sdata; | ||
3106 | struct ieee80211_if_sta *ifsta; | 1766 | struct ieee80211_if_sta *ifsta; |
3107 | struct ieee80211_mgmt *mgmt; | 1767 | struct ieee80211_mgmt *mgmt; |
3108 | u16 fc; | 1768 | u16 fc; |
@@ -3110,7 +1770,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | |||
3110 | if (skb->len < 24) | 1770 | if (skb->len < 24) |
3111 | goto fail; | 1771 | goto fail; |
3112 | 1772 | ||
3113 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3114 | ifsta = &sdata->u.sta; | 1773 | ifsta = &sdata->u.sta; |
3115 | 1774 | ||
3116 | mgmt = (struct ieee80211_mgmt *) skb->data; | 1775 | mgmt = (struct ieee80211_mgmt *) skb->data; |
@@ -3120,7 +1779,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | |||
3120 | case IEEE80211_STYPE_PROBE_REQ: | 1779 | case IEEE80211_STYPE_PROBE_REQ: |
3121 | case IEEE80211_STYPE_PROBE_RESP: | 1780 | case IEEE80211_STYPE_PROBE_RESP: |
3122 | case IEEE80211_STYPE_BEACON: | 1781 | case IEEE80211_STYPE_BEACON: |
3123 | case IEEE80211_STYPE_ACTION: | ||
3124 | memcpy(skb->cb, rx_status, sizeof(*rx_status)); | 1782 | memcpy(skb->cb, rx_status, sizeof(*rx_status)); |
3125 | case IEEE80211_STYPE_AUTH: | 1783 | case IEEE80211_STYPE_AUTH: |
3126 | case IEEE80211_STYPE_ASSOC_RESP: | 1784 | case IEEE80211_STYPE_ASSOC_RESP: |
@@ -3136,17 +1794,14 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, | |||
3136 | kfree_skb(skb); | 1794 | kfree_skb(skb); |
3137 | } | 1795 | } |
3138 | 1796 | ||
3139 | 1797 | static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |
3140 | static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev, | ||
3141 | struct sk_buff *skb) | 1798 | struct sk_buff *skb) |
3142 | { | 1799 | { |
3143 | struct ieee80211_rx_status *rx_status; | 1800 | struct ieee80211_rx_status *rx_status; |
3144 | struct ieee80211_sub_if_data *sdata; | ||
3145 | struct ieee80211_if_sta *ifsta; | 1801 | struct ieee80211_if_sta *ifsta; |
3146 | struct ieee80211_mgmt *mgmt; | 1802 | struct ieee80211_mgmt *mgmt; |
3147 | u16 fc; | 1803 | u16 fc; |
3148 | 1804 | ||
3149 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3150 | ifsta = &sdata->u.sta; | 1805 | ifsta = &sdata->u.sta; |
3151 | 1806 | ||
3152 | rx_status = (struct ieee80211_rx_status *) skb->cb; | 1807 | rx_status = (struct ieee80211_rx_status *) skb->cb; |
@@ -3155,17 +1810,17 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev, | |||
3155 | 1810 | ||
3156 | switch (fc & IEEE80211_FCTL_STYPE) { | 1811 | switch (fc & IEEE80211_FCTL_STYPE) { |
3157 | case IEEE80211_STYPE_PROBE_REQ: | 1812 | case IEEE80211_STYPE_PROBE_REQ: |
3158 | ieee80211_rx_mgmt_probe_req(dev, ifsta, mgmt, skb->len, | 1813 | ieee80211_rx_mgmt_probe_req(sdata, ifsta, mgmt, skb->len, |
3159 | rx_status); | 1814 | rx_status); |
3160 | break; | 1815 | break; |
3161 | case IEEE80211_STYPE_PROBE_RESP: | 1816 | case IEEE80211_STYPE_PROBE_RESP: |
3162 | ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status); | 1817 | ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len, rx_status); |
3163 | break; | 1818 | break; |
3164 | case IEEE80211_STYPE_BEACON: | 1819 | case IEEE80211_STYPE_BEACON: |
3165 | ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status); | 1820 | ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); |
3166 | break; | 1821 | break; |
3167 | case IEEE80211_STYPE_AUTH: | 1822 | case IEEE80211_STYPE_AUTH: |
3168 | ieee80211_rx_mgmt_auth(dev, ifsta, mgmt, skb->len); | 1823 | ieee80211_rx_mgmt_auth(sdata, ifsta, mgmt, skb->len); |
3169 | break; | 1824 | break; |
3170 | case IEEE80211_STYPE_ASSOC_RESP: | 1825 | case IEEE80211_STYPE_ASSOC_RESP: |
3171 | ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0); | 1826 | ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0); |
@@ -3174,13 +1829,10 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev, | |||
3174 | ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1); | 1829 | ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1); |
3175 | break; | 1830 | break; |
3176 | case IEEE80211_STYPE_DEAUTH: | 1831 | case IEEE80211_STYPE_DEAUTH: |
3177 | ieee80211_rx_mgmt_deauth(dev, ifsta, mgmt, skb->len); | 1832 | ieee80211_rx_mgmt_deauth(sdata, ifsta, mgmt, skb->len); |
3178 | break; | 1833 | break; |
3179 | case IEEE80211_STYPE_DISASSOC: | 1834 | case IEEE80211_STYPE_DISASSOC: |
3180 | ieee80211_rx_mgmt_disassoc(dev, ifsta, mgmt, skb->len); | 1835 | ieee80211_rx_mgmt_disassoc(sdata, ifsta, mgmt, skb->len); |
3181 | break; | ||
3182 | case IEEE80211_STYPE_ACTION: | ||
3183 | ieee80211_rx_mgmt_action(dev, ifsta, mgmt, skb->len, rx_status); | ||
3184 | break; | 1836 | break; |
3185 | } | 1837 | } |
3186 | 1838 | ||
@@ -3188,47 +1840,11 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev, | |||
3188 | } | 1840 | } |
3189 | 1841 | ||
3190 | 1842 | ||
3191 | ieee80211_rx_result | 1843 | static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) |
3192 | ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb, | ||
3193 | struct ieee80211_rx_status *rx_status) | ||
3194 | { | ||
3195 | struct ieee80211_mgmt *mgmt; | ||
3196 | __le16 fc; | ||
3197 | |||
3198 | if (skb->len < 2) | ||
3199 | return RX_DROP_UNUSABLE; | ||
3200 | |||
3201 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
3202 | fc = mgmt->frame_control; | ||
3203 | |||
3204 | if (ieee80211_is_ctl(fc)) | ||
3205 | return RX_CONTINUE; | ||
3206 | |||
3207 | if (skb->len < 24) | ||
3208 | return RX_DROP_MONITOR; | ||
3209 | |||
3210 | if (ieee80211_is_probe_resp(fc)) { | ||
3211 | ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status); | ||
3212 | dev_kfree_skb(skb); | ||
3213 | return RX_QUEUED; | ||
3214 | } | ||
3215 | |||
3216 | if (ieee80211_is_beacon(fc)) { | ||
3217 | ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status); | ||
3218 | dev_kfree_skb(skb); | ||
3219 | return RX_QUEUED; | ||
3220 | } | ||
3221 | |||
3222 | return RX_CONTINUE; | ||
3223 | } | ||
3224 | |||
3225 | |||
3226 | static int ieee80211_sta_active_ibss(struct net_device *dev) | ||
3227 | { | 1844 | { |
3228 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1845 | struct ieee80211_local *local = sdata->local; |
3229 | int active = 0; | 1846 | int active = 0; |
3230 | struct sta_info *sta; | 1847 | struct sta_info *sta; |
3231 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3232 | 1848 | ||
3233 | rcu_read_lock(); | 1849 | rcu_read_lock(); |
3234 | 1850 | ||
@@ -3247,179 +1863,36 @@ static int ieee80211_sta_active_ibss(struct net_device *dev) | |||
3247 | } | 1863 | } |
3248 | 1864 | ||
3249 | 1865 | ||
3250 | static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time) | 1866 | static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata, |
3251 | { | ||
3252 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
3253 | struct sta_info *sta, *tmp; | ||
3254 | LIST_HEAD(tmp_list); | ||
3255 | DECLARE_MAC_BUF(mac); | ||
3256 | unsigned long flags; | ||
3257 | |||
3258 | spin_lock_irqsave(&local->sta_lock, flags); | ||
3259 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) | ||
3260 | if (time_after(jiffies, sta->last_rx + exp_time)) { | ||
3261 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | ||
3262 | printk(KERN_DEBUG "%s: expiring inactive STA %s\n", | ||
3263 | dev->name, print_mac(mac, sta->addr)); | ||
3264 | #endif | ||
3265 | __sta_info_unlink(&sta); | ||
3266 | if (sta) | ||
3267 | list_add(&sta->list, &tmp_list); | ||
3268 | } | ||
3269 | spin_unlock_irqrestore(&local->sta_lock, flags); | ||
3270 | |||
3271 | list_for_each_entry_safe(sta, tmp, &tmp_list, list) | ||
3272 | sta_info_destroy(sta); | ||
3273 | } | ||
3274 | |||
3275 | |||
3276 | static void ieee80211_sta_merge_ibss(struct net_device *dev, | ||
3277 | struct ieee80211_if_sta *ifsta) | 1867 | struct ieee80211_if_sta *ifsta) |
3278 | { | 1868 | { |
3279 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); | 1869 | mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); |
3280 | 1870 | ||
3281 | ieee80211_sta_expire(dev, IEEE80211_IBSS_INACTIVITY_LIMIT); | 1871 | ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); |
3282 | if (ieee80211_sta_active_ibss(dev)) | 1872 | if (ieee80211_sta_active_ibss(sdata)) |
3283 | return; | 1873 | return; |
3284 | 1874 | ||
3285 | printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " | 1875 | printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " |
3286 | "IBSS networks with same SSID (merge)\n", dev->name); | 1876 | "IBSS networks with same SSID (merge)\n", sdata->dev->name); |
3287 | ieee80211_sta_req_scan(dev, ifsta->ssid, ifsta->ssid_len); | 1877 | ieee80211_request_scan(sdata, ifsta->ssid, ifsta->ssid_len); |
3288 | } | 1878 | } |
3289 | 1879 | ||
3290 | 1880 | ||
3291 | #ifdef CONFIG_MAC80211_MESH | 1881 | static void ieee80211_sta_timer(unsigned long data) |
3292 | static void ieee80211_mesh_housekeeping(struct net_device *dev, | ||
3293 | struct ieee80211_if_sta *ifsta) | ||
3294 | { | ||
3295 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3296 | bool free_plinks; | ||
3297 | |||
3298 | ieee80211_sta_expire(dev, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); | ||
3299 | mesh_path_expire(dev); | ||
3300 | |||
3301 | free_plinks = mesh_plink_availables(sdata); | ||
3302 | if (free_plinks != sdata->u.sta.accepting_plinks) | ||
3303 | ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); | ||
3304 | |||
3305 | mod_timer(&ifsta->timer, jiffies + | ||
3306 | IEEE80211_MESH_HOUSEKEEPING_INTERVAL); | ||
3307 | } | ||
3308 | |||
3309 | |||
3310 | void ieee80211_start_mesh(struct net_device *dev) | ||
3311 | { | ||
3312 | struct ieee80211_if_sta *ifsta; | ||
3313 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3314 | ifsta = &sdata->u.sta; | ||
3315 | ifsta->state = IEEE80211_MESH_UP; | ||
3316 | ieee80211_sta_timer((unsigned long)sdata); | ||
3317 | ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON); | ||
3318 | } | ||
3319 | #endif | ||
3320 | |||
3321 | |||
3322 | void ieee80211_sta_timer(unsigned long data) | ||
3323 | { | 1882 | { |
3324 | struct ieee80211_sub_if_data *sdata = | 1883 | struct ieee80211_sub_if_data *sdata = |
3325 | (struct ieee80211_sub_if_data *) data; | 1884 | (struct ieee80211_sub_if_data *) data; |
3326 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 1885 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
3327 | struct ieee80211_local *local = wdev_priv(&sdata->wdev); | 1886 | struct ieee80211_local *local = sdata->local; |
3328 | 1887 | ||
3329 | set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); | 1888 | set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); |
3330 | queue_work(local->hw.workqueue, &ifsta->work); | 1889 | queue_work(local->hw.workqueue, &ifsta->work); |
3331 | } | 1890 | } |
3332 | 1891 | ||
3333 | void ieee80211_sta_work(struct work_struct *work) | 1892 | static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata, |
3334 | { | ||
3335 | struct ieee80211_sub_if_data *sdata = | ||
3336 | container_of(work, struct ieee80211_sub_if_data, u.sta.work); | ||
3337 | struct net_device *dev = sdata->dev; | ||
3338 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
3339 | struct ieee80211_if_sta *ifsta; | ||
3340 | struct sk_buff *skb; | ||
3341 | |||
3342 | if (!netif_running(dev)) | ||
3343 | return; | ||
3344 | |||
3345 | if (local->sta_sw_scanning || local->sta_hw_scanning) | ||
3346 | return; | ||
3347 | |||
3348 | if (WARN_ON(sdata->vif.type != IEEE80211_IF_TYPE_STA && | ||
3349 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | ||
3350 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) | ||
3351 | return; | ||
3352 | ifsta = &sdata->u.sta; | ||
3353 | |||
3354 | while ((skb = skb_dequeue(&ifsta->skb_queue))) | ||
3355 | ieee80211_sta_rx_queued_mgmt(dev, skb); | ||
3356 | |||
3357 | #ifdef CONFIG_MAC80211_MESH | ||
3358 | if (ifsta->preq_queue_len && | ||
3359 | time_after(jiffies, | ||
3360 | ifsta->last_preq + msecs_to_jiffies(ifsta->mshcfg.dot11MeshHWMPpreqMinInterval))) | ||
3361 | mesh_path_start_discovery(dev); | ||
3362 | #endif | ||
3363 | |||
3364 | if (ifsta->state != IEEE80211_AUTHENTICATE && | ||
3365 | ifsta->state != IEEE80211_ASSOCIATE && | ||
3366 | test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) { | ||
3367 | if (ifsta->scan_ssid_len) | ||
3368 | ieee80211_sta_start_scan(dev, ifsta->scan_ssid, ifsta->scan_ssid_len); | ||
3369 | else | ||
3370 | ieee80211_sta_start_scan(dev, NULL, 0); | ||
3371 | return; | ||
3372 | } | ||
3373 | |||
3374 | if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) { | ||
3375 | if (ieee80211_sta_config_auth(dev, ifsta)) | ||
3376 | return; | ||
3377 | clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); | ||
3378 | } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request)) | ||
3379 | return; | ||
3380 | |||
3381 | switch (ifsta->state) { | ||
3382 | case IEEE80211_DISABLED: | ||
3383 | break; | ||
3384 | case IEEE80211_AUTHENTICATE: | ||
3385 | ieee80211_authenticate(dev, ifsta); | ||
3386 | break; | ||
3387 | case IEEE80211_ASSOCIATE: | ||
3388 | ieee80211_associate(dev, ifsta); | ||
3389 | break; | ||
3390 | case IEEE80211_ASSOCIATED: | ||
3391 | ieee80211_associated(dev, ifsta); | ||
3392 | break; | ||
3393 | case IEEE80211_IBSS_SEARCH: | ||
3394 | ieee80211_sta_find_ibss(dev, ifsta); | ||
3395 | break; | ||
3396 | case IEEE80211_IBSS_JOINED: | ||
3397 | ieee80211_sta_merge_ibss(dev, ifsta); | ||
3398 | break; | ||
3399 | #ifdef CONFIG_MAC80211_MESH | ||
3400 | case IEEE80211_MESH_UP: | ||
3401 | ieee80211_mesh_housekeeping(dev, ifsta); | ||
3402 | break; | ||
3403 | #endif | ||
3404 | default: | ||
3405 | WARN_ON(1); | ||
3406 | break; | ||
3407 | } | ||
3408 | |||
3409 | if (ieee80211_privacy_mismatch(dev, ifsta)) { | ||
3410 | printk(KERN_DEBUG "%s: privacy configuration mismatch and " | ||
3411 | "mixed-cell disabled - disassociate\n", dev->name); | ||
3412 | |||
3413 | ieee80211_send_disassoc(dev, ifsta, WLAN_REASON_UNSPECIFIED); | ||
3414 | ieee80211_set_disassoc(dev, ifsta, 0); | ||
3415 | } | ||
3416 | } | ||
3417 | |||
3418 | |||
3419 | static void ieee80211_sta_reset_auth(struct net_device *dev, | ||
3420 | struct ieee80211_if_sta *ifsta) | 1893 | struct ieee80211_if_sta *ifsta) |
3421 | { | 1894 | { |
3422 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1895 | struct ieee80211_local *local = sdata->local; |
3423 | 1896 | ||
3424 | if (local->ops->reset_tsf) { | 1897 | if (local->ops->reset_tsf) { |
3425 | /* Reset own TSF to allow time synchronization work. */ | 1898 | /* Reset own TSF to allow time synchronization work. */ |
@@ -3439,29 +1912,15 @@ static void ieee80211_sta_reset_auth(struct net_device *dev, | |||
3439 | ifsta->auth_alg = WLAN_AUTH_OPEN; | 1912 | ifsta->auth_alg = WLAN_AUTH_OPEN; |
3440 | ifsta->auth_transaction = -1; | 1913 | ifsta->auth_transaction = -1; |
3441 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; | 1914 | ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; |
3442 | ifsta->auth_tries = ifsta->assoc_tries = 0; | 1915 | ifsta->assoc_scan_tries = 0; |
3443 | netif_carrier_off(dev); | 1916 | ifsta->direct_probe_tries = 0; |
1917 | ifsta->auth_tries = 0; | ||
1918 | ifsta->assoc_tries = 0; | ||
1919 | netif_tx_stop_all_queues(sdata->dev); | ||
1920 | netif_carrier_off(sdata->dev); | ||
3444 | } | 1921 | } |
3445 | 1922 | ||
3446 | 1923 | ||
3447 | void ieee80211_sta_req_auth(struct net_device *dev, | ||
3448 | struct ieee80211_if_sta *ifsta) | ||
3449 | { | ||
3450 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
3451 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3452 | |||
3453 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | ||
3454 | return; | ||
3455 | |||
3456 | if ((ifsta->flags & (IEEE80211_STA_BSSID_SET | | ||
3457 | IEEE80211_STA_AUTO_BSSID_SEL)) && | ||
3458 | (ifsta->flags & (IEEE80211_STA_SSID_SET | | ||
3459 | IEEE80211_STA_AUTO_SSID_SEL))) { | ||
3460 | set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); | ||
3461 | queue_work(local->hw.workqueue, &ifsta->work); | ||
3462 | } | ||
3463 | } | ||
3464 | |||
3465 | static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta, | 1924 | static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta, |
3466 | const char *ssid, int ssid_len) | 1925 | const char *ssid, int ssid_len) |
3467 | { | 1926 | { |
@@ -3492,81 +1951,11 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta, | |||
3492 | return 0; | 1951 | return 0; |
3493 | } | 1952 | } |
3494 | 1953 | ||
3495 | static int ieee80211_sta_config_auth(struct net_device *dev, | 1954 | static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata, |
3496 | struct ieee80211_if_sta *ifsta) | 1955 | struct ieee80211_if_sta *ifsta) |
3497 | { | 1956 | { |
3498 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1957 | struct ieee80211_local *local = sdata->local; |
3499 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1958 | struct ieee80211_bss *bss; |
3500 | struct ieee80211_sta_bss *bss, *selected = NULL; | ||
3501 | int top_rssi = 0, freq; | ||
3502 | |||
3503 | spin_lock_bh(&local->sta_bss_lock); | ||
3504 | freq = local->oper_channel->center_freq; | ||
3505 | list_for_each_entry(bss, &local->sta_bss_list, list) { | ||
3506 | if (!(bss->capability & WLAN_CAPABILITY_ESS)) | ||
3507 | continue; | ||
3508 | |||
3509 | if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL | | ||
3510 | IEEE80211_STA_AUTO_BSSID_SEL | | ||
3511 | IEEE80211_STA_AUTO_CHANNEL_SEL)) && | ||
3512 | (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^ | ||
3513 | !!sdata->default_key)) | ||
3514 | continue; | ||
3515 | |||
3516 | if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) && | ||
3517 | bss->freq != freq) | ||
3518 | continue; | ||
3519 | |||
3520 | if (!(ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) && | ||
3521 | memcmp(bss->bssid, ifsta->bssid, ETH_ALEN)) | ||
3522 | continue; | ||
3523 | |||
3524 | if (!(ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) && | ||
3525 | !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len)) | ||
3526 | continue; | ||
3527 | |||
3528 | if (!selected || top_rssi < bss->signal) { | ||
3529 | selected = bss; | ||
3530 | top_rssi = bss->signal; | ||
3531 | } | ||
3532 | } | ||
3533 | if (selected) | ||
3534 | atomic_inc(&selected->users); | ||
3535 | spin_unlock_bh(&local->sta_bss_lock); | ||
3536 | |||
3537 | if (selected) { | ||
3538 | ieee80211_set_freq(dev, selected->freq); | ||
3539 | if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) | ||
3540 | ieee80211_sta_set_ssid(dev, selected->ssid, | ||
3541 | selected->ssid_len); | ||
3542 | ieee80211_sta_set_bssid(dev, selected->bssid); | ||
3543 | ieee80211_sta_def_wmm_params(dev, selected, 0); | ||
3544 | ieee80211_rx_bss_put(local, selected); | ||
3545 | ifsta->state = IEEE80211_AUTHENTICATE; | ||
3546 | ieee80211_sta_reset_auth(dev, ifsta); | ||
3547 | return 0; | ||
3548 | } else { | ||
3549 | if (ifsta->state != IEEE80211_AUTHENTICATE) { | ||
3550 | if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) | ||
3551 | ieee80211_sta_start_scan(dev, NULL, 0); | ||
3552 | else | ||
3553 | ieee80211_sta_start_scan(dev, ifsta->ssid, | ||
3554 | ifsta->ssid_len); | ||
3555 | ifsta->state = IEEE80211_AUTHENTICATE; | ||
3556 | set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); | ||
3557 | } else | ||
3558 | ifsta->state = IEEE80211_DISABLED; | ||
3559 | } | ||
3560 | return -1; | ||
3561 | } | ||
3562 | |||
3563 | |||
3564 | static int ieee80211_sta_create_ibss(struct net_device *dev, | ||
3565 | struct ieee80211_if_sta *ifsta) | ||
3566 | { | ||
3567 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
3568 | struct ieee80211_sta_bss *bss; | ||
3569 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3570 | struct ieee80211_supported_band *sband; | 1959 | struct ieee80211_supported_band *sband; |
3571 | u8 bssid[ETH_ALEN], *pos; | 1960 | u8 bssid[ETH_ALEN], *pos; |
3572 | int i; | 1961 | int i; |
@@ -3582,15 +1971,15 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, | |||
3582 | * random number generator get different BSSID. */ | 1971 | * random number generator get different BSSID. */ |
3583 | get_random_bytes(bssid, ETH_ALEN); | 1972 | get_random_bytes(bssid, ETH_ALEN); |
3584 | for (i = 0; i < ETH_ALEN; i++) | 1973 | for (i = 0; i < ETH_ALEN; i++) |
3585 | bssid[i] ^= dev->dev_addr[i]; | 1974 | bssid[i] ^= sdata->dev->dev_addr[i]; |
3586 | bssid[0] &= ~0x01; | 1975 | bssid[0] &= ~0x01; |
3587 | bssid[0] |= 0x02; | 1976 | bssid[0] |= 0x02; |
3588 | #endif | 1977 | #endif |
3589 | 1978 | ||
3590 | printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", | 1979 | printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", |
3591 | dev->name, print_mac(mac, bssid)); | 1980 | sdata->dev->name, print_mac(mac, bssid)); |
3592 | 1981 | ||
3593 | bss = ieee80211_rx_bss_add(dev, bssid, | 1982 | bss = ieee80211_rx_bss_add(local, bssid, |
3594 | local->hw.conf.channel->center_freq, | 1983 | local->hw.conf.channel->center_freq, |
3595 | sdata->u.sta.ssid, sdata->u.sta.ssid_len); | 1984 | sdata->u.sta.ssid, sdata->u.sta.ssid_len); |
3596 | if (!bss) | 1985 | if (!bss) |
@@ -3617,17 +2006,17 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, | |||
3617 | *pos++ = (u8) (rate / 5); | 2006 | *pos++ = (u8) (rate / 5); |
3618 | } | 2007 | } |
3619 | 2008 | ||
3620 | ret = ieee80211_sta_join_ibss(dev, ifsta, bss); | 2009 | ret = ieee80211_sta_join_ibss(sdata, ifsta, bss); |
3621 | ieee80211_rx_bss_put(local, bss); | 2010 | ieee80211_rx_bss_put(local, bss); |
3622 | return ret; | 2011 | return ret; |
3623 | } | 2012 | } |
3624 | 2013 | ||
3625 | 2014 | ||
3626 | static int ieee80211_sta_find_ibss(struct net_device *dev, | 2015 | static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata, |
3627 | struct ieee80211_if_sta *ifsta) | 2016 | struct ieee80211_if_sta *ifsta) |
3628 | { | 2017 | { |
3629 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2018 | struct ieee80211_local *local = sdata->local; |
3630 | struct ieee80211_sta_bss *bss; | 2019 | struct ieee80211_bss *bss; |
3631 | int found = 0; | 2020 | int found = 0; |
3632 | u8 bssid[ETH_ALEN]; | 2021 | u8 bssid[ETH_ALEN]; |
3633 | int active_ibss; | 2022 | int active_ibss; |
@@ -3637,13 +2026,13 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, | |||
3637 | if (ifsta->ssid_len == 0) | 2026 | if (ifsta->ssid_len == 0) |
3638 | return -EINVAL; | 2027 | return -EINVAL; |
3639 | 2028 | ||
3640 | active_ibss = ieee80211_sta_active_ibss(dev); | 2029 | active_ibss = ieee80211_sta_active_ibss(sdata); |
3641 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 2030 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
3642 | printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", | 2031 | printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", |
3643 | dev->name, active_ibss); | 2032 | sdata->dev->name, active_ibss); |
3644 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 2033 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
3645 | spin_lock_bh(&local->sta_bss_lock); | 2034 | spin_lock_bh(&local->bss_lock); |
3646 | list_for_each_entry(bss, &local->sta_bss_list, list) { | 2035 | list_for_each_entry(bss, &local->bss_list, list) { |
3647 | if (ifsta->ssid_len != bss->ssid_len || | 2036 | if (ifsta->ssid_len != bss->ssid_len || |
3648 | memcmp(ifsta->ssid, bss->ssid, bss->ssid_len) != 0 | 2037 | memcmp(ifsta->ssid, bss->ssid, bss->ssid_len) != 0 |
3649 | || !(bss->capability & WLAN_CAPABILITY_IBSS)) | 2038 | || !(bss->capability & WLAN_CAPABILITY_IBSS)) |
@@ -3657,7 +2046,7 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, | |||
3657 | if (active_ibss || memcmp(bssid, ifsta->bssid, ETH_ALEN) != 0) | 2046 | if (active_ibss || memcmp(bssid, ifsta->bssid, ETH_ALEN) != 0) |
3658 | break; | 2047 | break; |
3659 | } | 2048 | } |
3660 | spin_unlock_bh(&local->sta_bss_lock); | 2049 | spin_unlock_bh(&local->bss_lock); |
3661 | 2050 | ||
3662 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | 2051 | #ifdef CONFIG_MAC80211_IBSS_DEBUG |
3663 | if (found) | 2052 | if (found) |
@@ -3675,15 +2064,15 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, | |||
3675 | else | 2064 | else |
3676 | search_freq = local->hw.conf.channel->center_freq; | 2065 | search_freq = local->hw.conf.channel->center_freq; |
3677 | 2066 | ||
3678 | bss = ieee80211_rx_bss_get(dev, bssid, search_freq, | 2067 | bss = ieee80211_rx_bss_get(local, bssid, search_freq, |
3679 | ifsta->ssid, ifsta->ssid_len); | 2068 | ifsta->ssid, ifsta->ssid_len); |
3680 | if (!bss) | 2069 | if (!bss) |
3681 | goto dont_join; | 2070 | goto dont_join; |
3682 | 2071 | ||
3683 | printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" | 2072 | printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" |
3684 | " based on configured SSID\n", | 2073 | " based on configured SSID\n", |
3685 | dev->name, print_mac(mac, bssid)); | 2074 | sdata->dev->name, print_mac(mac, bssid)); |
3686 | ret = ieee80211_sta_join_ibss(dev, ifsta, bss); | 2075 | ret = ieee80211_sta_join_ibss(sdata, ifsta, bss); |
3687 | ieee80211_rx_bss_put(local, bss); | 2076 | ieee80211_rx_bss_put(local, bss); |
3688 | return ret; | 2077 | return ret; |
3689 | } | 2078 | } |
@@ -3694,17 +2083,17 @@ dont_join: | |||
3694 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ | 2083 | #endif /* CONFIG_MAC80211_IBSS_DEBUG */ |
3695 | 2084 | ||
3696 | /* Selected IBSS not found in current scan results - try to scan */ | 2085 | /* Selected IBSS not found in current scan results - try to scan */ |
3697 | if (ifsta->state == IEEE80211_IBSS_JOINED && | 2086 | if (ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED && |
3698 | !ieee80211_sta_active_ibss(dev)) { | 2087 | !ieee80211_sta_active_ibss(sdata)) { |
3699 | mod_timer(&ifsta->timer, jiffies + | 2088 | mod_timer(&ifsta->timer, jiffies + |
3700 | IEEE80211_IBSS_MERGE_INTERVAL); | 2089 | IEEE80211_IBSS_MERGE_INTERVAL); |
3701 | } else if (time_after(jiffies, local->last_scan_completed + | 2090 | } else if (time_after(jiffies, local->last_scan_completed + |
3702 | IEEE80211_SCAN_INTERVAL)) { | 2091 | IEEE80211_SCAN_INTERVAL)) { |
3703 | printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " | 2092 | printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " |
3704 | "join\n", dev->name); | 2093 | "join\n", sdata->dev->name); |
3705 | return ieee80211_sta_req_scan(dev, ifsta->ssid, | 2094 | return ieee80211_request_scan(sdata, ifsta->ssid, |
3706 | ifsta->ssid_len); | 2095 | ifsta->ssid_len); |
3707 | } else if (ifsta->state != IEEE80211_IBSS_JOINED) { | 2096 | } else if (ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED) { |
3708 | int interval = IEEE80211_SCAN_INTERVAL; | 2097 | int interval = IEEE80211_SCAN_INTERVAL; |
3709 | 2098 | ||
3710 | if (time_after(jiffies, ifsta->ibss_join_req + | 2099 | if (time_after(jiffies, ifsta->ibss_join_req + |
@@ -3712,10 +2101,10 @@ dont_join: | |||
3712 | if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && | 2101 | if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && |
3713 | (!(local->oper_channel->flags & | 2102 | (!(local->oper_channel->flags & |
3714 | IEEE80211_CHAN_NO_IBSS))) | 2103 | IEEE80211_CHAN_NO_IBSS))) |
3715 | return ieee80211_sta_create_ibss(dev, ifsta); | 2104 | return ieee80211_sta_create_ibss(sdata, ifsta); |
3716 | if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { | 2105 | if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { |
3717 | printk(KERN_DEBUG "%s: IBSS not allowed on" | 2106 | printk(KERN_DEBUG "%s: IBSS not allowed on" |
3718 | " %d MHz\n", dev->name, | 2107 | " %d MHz\n", sdata->dev->name, |
3719 | local->hw.conf.channel->center_freq); | 2108 | local->hw.conf.channel->center_freq); |
3720 | } | 2109 | } |
3721 | 2110 | ||
@@ -3724,7 +2113,7 @@ dont_join: | |||
3724 | interval = IEEE80211_SCAN_INTERVAL_SLOW; | 2113 | interval = IEEE80211_SCAN_INTERVAL_SLOW; |
3725 | } | 2114 | } |
3726 | 2115 | ||
3727 | ifsta->state = IEEE80211_IBSS_SEARCH; | 2116 | ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH; |
3728 | mod_timer(&ifsta->timer, jiffies + interval); | 2117 | mod_timer(&ifsta->timer, jiffies + interval); |
3729 | return 0; | 2118 | return 0; |
3730 | } | 2119 | } |
@@ -3733,620 +2122,344 @@ dont_join: | |||
3733 | } | 2122 | } |
3734 | 2123 | ||
3735 | 2124 | ||
3736 | int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) | 2125 | static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata, |
2126 | struct ieee80211_if_sta *ifsta) | ||
3737 | { | 2127 | { |
3738 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 2128 | struct ieee80211_local *local = sdata->local; |
3739 | struct ieee80211_if_sta *ifsta; | 2129 | struct ieee80211_bss *bss, *selected = NULL; |
3740 | int res; | 2130 | int top_rssi = 0, freq; |
3741 | 2131 | ||
3742 | if (len > IEEE80211_MAX_SSID_LEN) | 2132 | spin_lock_bh(&local->bss_lock); |
3743 | return -EINVAL; | 2133 | freq = local->oper_channel->center_freq; |
2134 | list_for_each_entry(bss, &local->bss_list, list) { | ||
2135 | if (!(bss->capability & WLAN_CAPABILITY_ESS)) | ||
2136 | continue; | ||
3744 | 2137 | ||
3745 | ifsta = &sdata->u.sta; | 2138 | if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL | |
2139 | IEEE80211_STA_AUTO_BSSID_SEL | | ||
2140 | IEEE80211_STA_AUTO_CHANNEL_SEL)) && | ||
2141 | (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^ | ||
2142 | !!sdata->default_key)) | ||
2143 | continue; | ||
3746 | 2144 | ||
3747 | if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) { | 2145 | if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) && |
3748 | memset(ifsta->ssid, 0, sizeof(ifsta->ssid)); | 2146 | bss->freq != freq) |
3749 | memcpy(ifsta->ssid, ssid, len); | 2147 | continue; |
3750 | ifsta->ssid_len = len; | ||
3751 | ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET; | ||
3752 | 2148 | ||
3753 | res = 0; | 2149 | if (!(ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) && |
3754 | /* | 2150 | memcmp(bss->bssid, ifsta->bssid, ETH_ALEN)) |
3755 | * Hack! MLME code needs to be cleaned up to have different | 2151 | continue; |
3756 | * entry points for configuration and internal selection change | ||
3757 | */ | ||
3758 | if (netif_running(sdata->dev)) | ||
3759 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID); | ||
3760 | if (res) { | ||
3761 | printk(KERN_DEBUG "%s: Failed to config new SSID to " | ||
3762 | "the low-level driver\n", dev->name); | ||
3763 | return res; | ||
3764 | } | ||
3765 | } | ||
3766 | 2152 | ||
3767 | if (len) | 2153 | if (!(ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) && |
3768 | ifsta->flags |= IEEE80211_STA_SSID_SET; | 2154 | !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len)) |
3769 | else | 2155 | continue; |
3770 | ifsta->flags &= ~IEEE80211_STA_SSID_SET; | ||
3771 | 2156 | ||
3772 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | 2157 | if (!selected || top_rssi < bss->signal) { |
3773 | !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { | 2158 | selected = bss; |
3774 | ifsta->ibss_join_req = jiffies; | 2159 | top_rssi = bss->signal; |
3775 | ifsta->state = IEEE80211_IBSS_SEARCH; | 2160 | } |
3776 | return ieee80211_sta_find_ibss(dev, ifsta); | ||
3777 | } | 2161 | } |
2162 | if (selected) | ||
2163 | atomic_inc(&selected->users); | ||
2164 | spin_unlock_bh(&local->bss_lock); | ||
3778 | 2165 | ||
3779 | return 0; | 2166 | if (selected) { |
3780 | } | 2167 | ieee80211_set_freq(sdata, selected->freq); |
2168 | if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) | ||
2169 | ieee80211_sta_set_ssid(sdata, selected->ssid, | ||
2170 | selected->ssid_len); | ||
2171 | ieee80211_sta_set_bssid(sdata, selected->bssid); | ||
2172 | ieee80211_sta_def_wmm_params(sdata, selected); | ||
3781 | 2173 | ||
2174 | /* Send out direct probe if no probe resp was received or | ||
2175 | * the one we have is outdated | ||
2176 | */ | ||
2177 | if (!selected->last_probe_resp || | ||
2178 | time_after(jiffies, selected->last_probe_resp | ||
2179 | + IEEE80211_SCAN_RESULT_EXPIRE)) | ||
2180 | ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE; | ||
2181 | else | ||
2182 | ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE; | ||
3782 | 2183 | ||
3783 | int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len) | 2184 | ieee80211_rx_bss_put(local, selected); |
3784 | { | 2185 | ieee80211_sta_reset_auth(sdata, ifsta); |
3785 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 2186 | return 0; |
3786 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 2187 | } else { |
3787 | memcpy(ssid, ifsta->ssid, ifsta->ssid_len); | 2188 | if (ifsta->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) { |
3788 | *len = ifsta->ssid_len; | 2189 | ifsta->assoc_scan_tries++; |
3789 | return 0; | 2190 | if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) |
2191 | ieee80211_start_scan(sdata, NULL, 0); | ||
2192 | else | ||
2193 | ieee80211_start_scan(sdata, ifsta->ssid, | ||
2194 | ifsta->ssid_len); | ||
2195 | ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE; | ||
2196 | set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); | ||
2197 | } else | ||
2198 | ifsta->state = IEEE80211_STA_MLME_DISABLED; | ||
2199 | } | ||
2200 | return -1; | ||
3790 | } | 2201 | } |
3791 | 2202 | ||
3792 | 2203 | ||
3793 | int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid) | 2204 | static void ieee80211_sta_work(struct work_struct *work) |
3794 | { | 2205 | { |
3795 | struct ieee80211_sub_if_data *sdata; | 2206 | struct ieee80211_sub_if_data *sdata = |
2207 | container_of(work, struct ieee80211_sub_if_data, u.sta.work); | ||
2208 | struct ieee80211_local *local = sdata->local; | ||
3796 | struct ieee80211_if_sta *ifsta; | 2209 | struct ieee80211_if_sta *ifsta; |
3797 | int res; | 2210 | struct sk_buff *skb; |
3798 | |||
3799 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3800 | ifsta = &sdata->u.sta; | ||
3801 | 2211 | ||
3802 | if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { | 2212 | if (!netif_running(sdata->dev)) |
3803 | memcpy(ifsta->bssid, bssid, ETH_ALEN); | 2213 | return; |
3804 | res = 0; | ||
3805 | /* | ||
3806 | * Hack! See also ieee80211_sta_set_ssid. | ||
3807 | */ | ||
3808 | if (netif_running(sdata->dev)) | ||
3809 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID); | ||
3810 | if (res) { | ||
3811 | printk(KERN_DEBUG "%s: Failed to config new BSSID to " | ||
3812 | "the low-level driver\n", dev->name); | ||
3813 | return res; | ||
3814 | } | ||
3815 | } | ||
3816 | 2214 | ||
3817 | if (is_valid_ether_addr(bssid)) | 2215 | if (local->sw_scanning || local->hw_scanning) |
3818 | ifsta->flags |= IEEE80211_STA_BSSID_SET; | 2216 | return; |
3819 | else | ||
3820 | ifsta->flags &= ~IEEE80211_STA_BSSID_SET; | ||
3821 | 2217 | ||
3822 | return 0; | 2218 | if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION && |
3823 | } | 2219 | sdata->vif.type != NL80211_IFTYPE_ADHOC)) |
2220 | return; | ||
2221 | ifsta = &sdata->u.sta; | ||
3824 | 2222 | ||
2223 | while ((skb = skb_dequeue(&ifsta->skb_queue))) | ||
2224 | ieee80211_sta_rx_queued_mgmt(sdata, skb); | ||
3825 | 2225 | ||
3826 | static void ieee80211_send_nullfunc(struct ieee80211_local *local, | 2226 | if (ifsta->state != IEEE80211_STA_MLME_DIRECT_PROBE && |
3827 | struct ieee80211_sub_if_data *sdata, | 2227 | ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE && |
3828 | int powersave) | 2228 | ifsta->state != IEEE80211_STA_MLME_ASSOCIATE && |
3829 | { | 2229 | test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) { |
3830 | struct sk_buff *skb; | 2230 | ieee80211_start_scan(sdata, ifsta->scan_ssid, |
3831 | struct ieee80211_hdr *nullfunc; | 2231 | ifsta->scan_ssid_len); |
3832 | __le16 fc; | 2232 | return; |
2233 | } | ||
3833 | 2234 | ||
3834 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); | 2235 | if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) { |
3835 | if (!skb) { | 2236 | if (ieee80211_sta_config_auth(sdata, ifsta)) |
3836 | printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " | 2237 | return; |
3837 | "frame\n", sdata->dev->name); | 2238 | clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); |
2239 | } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request)) | ||
3838 | return; | 2240 | return; |
2241 | |||
2242 | switch (ifsta->state) { | ||
2243 | case IEEE80211_STA_MLME_DISABLED: | ||
2244 | break; | ||
2245 | case IEEE80211_STA_MLME_DIRECT_PROBE: | ||
2246 | ieee80211_direct_probe(sdata, ifsta); | ||
2247 | break; | ||
2248 | case IEEE80211_STA_MLME_AUTHENTICATE: | ||
2249 | ieee80211_authenticate(sdata, ifsta); | ||
2250 | break; | ||
2251 | case IEEE80211_STA_MLME_ASSOCIATE: | ||
2252 | ieee80211_associate(sdata, ifsta); | ||
2253 | break; | ||
2254 | case IEEE80211_STA_MLME_ASSOCIATED: | ||
2255 | ieee80211_associated(sdata, ifsta); | ||
2256 | break; | ||
2257 | case IEEE80211_STA_MLME_IBSS_SEARCH: | ||
2258 | ieee80211_sta_find_ibss(sdata, ifsta); | ||
2259 | break; | ||
2260 | case IEEE80211_STA_MLME_IBSS_JOINED: | ||
2261 | ieee80211_sta_merge_ibss(sdata, ifsta); | ||
2262 | break; | ||
2263 | default: | ||
2264 | WARN_ON(1); | ||
2265 | break; | ||
3839 | } | 2266 | } |
3840 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
3841 | 2267 | ||
3842 | nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); | 2268 | if (ieee80211_privacy_mismatch(sdata, ifsta)) { |
3843 | memset(nullfunc, 0, 24); | 2269 | printk(KERN_DEBUG "%s: privacy configuration mismatch and " |
3844 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | | 2270 | "mixed-cell disabled - disassociate\n", sdata->dev->name); |
3845 | IEEE80211_FCTL_TODS); | ||
3846 | if (powersave) | ||
3847 | fc |= cpu_to_le16(IEEE80211_FCTL_PM); | ||
3848 | nullfunc->frame_control = fc; | ||
3849 | memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN); | ||
3850 | memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); | ||
3851 | memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN); | ||
3852 | |||
3853 | ieee80211_sta_tx(sdata->dev, skb, 0); | ||
3854 | } | ||
3855 | 2271 | ||
2272 | ieee80211_set_disassoc(sdata, ifsta, false, true, | ||
2273 | WLAN_REASON_UNSPECIFIED); | ||
2274 | } | ||
2275 | } | ||
3856 | 2276 | ||
3857 | static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) | 2277 | static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) |
3858 | { | 2278 | { |
3859 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 2279 | if (sdata->vif.type == NL80211_IFTYPE_STATION) |
3860 | ieee80211_vif_is_mesh(&sdata->vif)) | 2280 | queue_work(sdata->local->hw.workqueue, |
3861 | ieee80211_sta_timer((unsigned long)sdata); | 2281 | &sdata->u.sta.work); |
3862 | } | 2282 | } |
3863 | 2283 | ||
3864 | void ieee80211_scan_completed(struct ieee80211_hw *hw) | 2284 | /* interface setup */ |
2285 | void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) | ||
3865 | { | 2286 | { |
3866 | struct ieee80211_local *local = hw_to_local(hw); | 2287 | struct ieee80211_if_sta *ifsta; |
3867 | struct net_device *dev = local->scan_dev; | ||
3868 | struct ieee80211_sub_if_data *sdata; | ||
3869 | union iwreq_data wrqu; | ||
3870 | 2288 | ||
3871 | local->last_scan_completed = jiffies; | 2289 | ifsta = &sdata->u.sta; |
3872 | memset(&wrqu, 0, sizeof(wrqu)); | 2290 | INIT_WORK(&ifsta->work, ieee80211_sta_work); |
3873 | wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); | 2291 | setup_timer(&ifsta->timer, ieee80211_sta_timer, |
3874 | 2292 | (unsigned long) sdata); | |
3875 | if (local->sta_hw_scanning) { | 2293 | skb_queue_head_init(&ifsta->skb_queue); |
3876 | local->sta_hw_scanning = 0; | 2294 | |
3877 | if (ieee80211_hw_config(local)) | 2295 | ifsta->capab = WLAN_CAPABILITY_ESS; |
3878 | printk(KERN_DEBUG "%s: failed to restore operational " | 2296 | ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN | |
3879 | "channel after scan\n", dev->name); | 2297 | IEEE80211_AUTH_ALG_SHARED_KEY; |
3880 | /* Restart STA timer for HW scan case */ | 2298 | ifsta->flags |= IEEE80211_STA_CREATE_IBSS | |
3881 | rcu_read_lock(); | 2299 | IEEE80211_STA_AUTO_BSSID_SEL | |
3882 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 2300 | IEEE80211_STA_AUTO_CHANNEL_SEL; |
3883 | ieee80211_restart_sta_timer(sdata); | 2301 | if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4) |
3884 | rcu_read_unlock(); | 2302 | ifsta->flags |= IEEE80211_STA_WMM_ENABLED; |
2303 | } | ||
2304 | |||
2305 | /* | ||
2306 | * Add a new IBSS station, will also be called by the RX code when, | ||
2307 | * in IBSS mode, receiving a frame from a yet-unknown station, hence | ||
2308 | * must be callable in atomic context. | ||
2309 | */ | ||
2310 | struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, | ||
2311 | struct sk_buff *skb, u8 *bssid, | ||
2312 | u8 *addr, u64 supp_rates) | ||
2313 | { | ||
2314 | struct ieee80211_local *local = sdata->local; | ||
2315 | struct sta_info *sta; | ||
2316 | DECLARE_MAC_BUF(mac); | ||
2317 | int band = local->hw.conf.channel->band; | ||
3885 | 2318 | ||
3886 | goto done; | 2319 | /* TODO: Could consider removing the least recently used entry and |
2320 | * allow new one to be added. */ | ||
2321 | if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { | ||
2322 | if (net_ratelimit()) { | ||
2323 | printk(KERN_DEBUG "%s: No room for a new IBSS STA " | ||
2324 | "entry %s\n", sdata->dev->name, print_mac(mac, addr)); | ||
2325 | } | ||
2326 | return NULL; | ||
3887 | } | 2327 | } |
3888 | 2328 | ||
3889 | local->sta_sw_scanning = 0; | 2329 | if (compare_ether_addr(bssid, sdata->u.sta.bssid)) |
3890 | if (ieee80211_hw_config(local)) | 2330 | return NULL; |
3891 | printk(KERN_DEBUG "%s: failed to restore operational " | ||
3892 | "channel after scan\n", dev->name); | ||
3893 | 2331 | ||
2332 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
2333 | printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", | ||
2334 | wiphy_name(local->hw.wiphy), print_mac(mac, addr), sdata->dev->name); | ||
2335 | #endif | ||
3894 | 2336 | ||
3895 | netif_tx_lock_bh(local->mdev); | 2337 | sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); |
3896 | netif_addr_lock(local->mdev); | 2338 | if (!sta) |
3897 | local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; | 2339 | return NULL; |
3898 | local->ops->configure_filter(local_to_hw(local), | ||
3899 | FIF_BCN_PRBRESP_PROMISC, | ||
3900 | &local->filter_flags, | ||
3901 | local->mdev->mc_count, | ||
3902 | local->mdev->mc_list); | ||
3903 | 2340 | ||
3904 | netif_addr_unlock(local->mdev); | 2341 | set_sta_flags(sta, WLAN_STA_AUTHORIZED); |
3905 | netif_tx_unlock_bh(local->mdev); | ||
3906 | 2342 | ||
3907 | rcu_read_lock(); | 2343 | /* make sure mandatory rates are always added */ |
3908 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 2344 | sta->sta.supp_rates[band] = supp_rates | |
3909 | /* Tell AP we're back */ | 2345 | ieee80211_mandatory_rates(local, band); |
3910 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && | ||
3911 | sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) | ||
3912 | ieee80211_send_nullfunc(local, sdata, 0); | ||
3913 | 2346 | ||
3914 | ieee80211_restart_sta_timer(sdata); | 2347 | rate_control_rate_init(sta); |
3915 | 2348 | ||
3916 | netif_wake_queue(sdata->dev); | 2349 | if (sta_info_insert(sta)) |
3917 | } | 2350 | return NULL; |
3918 | rcu_read_unlock(); | ||
3919 | 2351 | ||
3920 | done: | 2352 | return sta; |
3921 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3922 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | ||
3923 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | ||
3924 | if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) || | ||
3925 | (!(ifsta->state == IEEE80211_IBSS_JOINED) && | ||
3926 | !ieee80211_sta_active_ibss(dev))) | ||
3927 | ieee80211_sta_find_ibss(dev, ifsta); | ||
3928 | } | ||
3929 | } | 2353 | } |
3930 | EXPORT_SYMBOL(ieee80211_scan_completed); | ||
3931 | 2354 | ||
3932 | void ieee80211_sta_scan_work(struct work_struct *work) | 2355 | /* configuration hooks */ |
2356 | void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata, | ||
2357 | struct ieee80211_if_sta *ifsta) | ||
3933 | { | 2358 | { |
3934 | struct ieee80211_local *local = | 2359 | struct ieee80211_local *local = sdata->local; |
3935 | container_of(work, struct ieee80211_local, scan_work.work); | ||
3936 | struct net_device *dev = local->scan_dev; | ||
3937 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
3938 | struct ieee80211_supported_band *sband; | ||
3939 | struct ieee80211_channel *chan; | ||
3940 | int skip; | ||
3941 | unsigned long next_delay = 0; | ||
3942 | 2360 | ||
3943 | if (!local->sta_sw_scanning) | 2361 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
3944 | return; | 2362 | return; |
3945 | 2363 | ||
3946 | switch (local->scan_state) { | 2364 | if ((ifsta->flags & (IEEE80211_STA_BSSID_SET | |
3947 | case SCAN_SET_CHANNEL: | 2365 | IEEE80211_STA_AUTO_BSSID_SEL)) && |
3948 | /* | 2366 | (ifsta->flags & (IEEE80211_STA_SSID_SET | |
3949 | * Get current scan band. scan_band may be IEEE80211_NUM_BANDS | 2367 | IEEE80211_STA_AUTO_SSID_SEL))) { |
3950 | * after we successfully scanned the last channel of the last | ||
3951 | * band (and the last band is supported by the hw) | ||
3952 | */ | ||
3953 | if (local->scan_band < IEEE80211_NUM_BANDS) | ||
3954 | sband = local->hw.wiphy->bands[local->scan_band]; | ||
3955 | else | ||
3956 | sband = NULL; | ||
3957 | |||
3958 | /* | ||
3959 | * If we are at an unsupported band and have more bands | ||
3960 | * left to scan, advance to the next supported one. | ||
3961 | */ | ||
3962 | while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) { | ||
3963 | local->scan_band++; | ||
3964 | sband = local->hw.wiphy->bands[local->scan_band]; | ||
3965 | local->scan_channel_idx = 0; | ||
3966 | } | ||
3967 | |||
3968 | /* if no more bands/channels left, complete scan */ | ||
3969 | if (!sband || local->scan_channel_idx >= sband->n_channels) { | ||
3970 | ieee80211_scan_completed(local_to_hw(local)); | ||
3971 | return; | ||
3972 | } | ||
3973 | skip = 0; | ||
3974 | chan = &sband->channels[local->scan_channel_idx]; | ||
3975 | |||
3976 | if (chan->flags & IEEE80211_CHAN_DISABLED || | ||
3977 | (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | ||
3978 | chan->flags & IEEE80211_CHAN_NO_IBSS)) | ||
3979 | skip = 1; | ||
3980 | |||
3981 | if (!skip) { | ||
3982 | local->scan_channel = chan; | ||
3983 | if (ieee80211_hw_config(local)) { | ||
3984 | printk(KERN_DEBUG "%s: failed to set freq to " | ||
3985 | "%d MHz for scan\n", dev->name, | ||
3986 | chan->center_freq); | ||
3987 | skip = 1; | ||
3988 | } | ||
3989 | } | ||
3990 | |||
3991 | /* advance state machine to next channel/band */ | ||
3992 | local->scan_channel_idx++; | ||
3993 | if (local->scan_channel_idx >= sband->n_channels) { | ||
3994 | /* | ||
3995 | * scan_band may end up == IEEE80211_NUM_BANDS, but | ||
3996 | * we'll catch that case above and complete the scan | ||
3997 | * if that is the case. | ||
3998 | */ | ||
3999 | local->scan_band++; | ||
4000 | local->scan_channel_idx = 0; | ||
4001 | } | ||
4002 | |||
4003 | if (skip) | ||
4004 | break; | ||
4005 | 2368 | ||
4006 | next_delay = IEEE80211_PROBE_DELAY + | 2369 | if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) |
4007 | usecs_to_jiffies(local->hw.channel_change_time); | 2370 | ieee80211_set_disassoc(sdata, ifsta, true, true, |
4008 | local->scan_state = SCAN_SEND_PROBE; | 2371 | WLAN_REASON_DEAUTH_LEAVING); |
4009 | break; | ||
4010 | case SCAN_SEND_PROBE: | ||
4011 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | ||
4012 | local->scan_state = SCAN_SET_CHANNEL; | ||
4013 | 2372 | ||
4014 | if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN) | 2373 | set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); |
4015 | break; | 2374 | queue_work(local->hw.workqueue, &ifsta->work); |
4016 | ieee80211_send_probe_req(dev, NULL, local->scan_ssid, | ||
4017 | local->scan_ssid_len); | ||
4018 | next_delay = IEEE80211_CHANNEL_TIME; | ||
4019 | break; | ||
4020 | } | 2375 | } |
4021 | |||
4022 | if (local->sta_sw_scanning) | ||
4023 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | ||
4024 | next_delay); | ||
4025 | } | 2376 | } |
4026 | 2377 | ||
4027 | 2378 | int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len) | |
4028 | static int ieee80211_sta_start_scan(struct net_device *dev, | ||
4029 | u8 *ssid, size_t ssid_len) | ||
4030 | { | 2379 | { |
4031 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2380 | struct ieee80211_if_sta *ifsta; |
4032 | struct ieee80211_sub_if_data *sdata; | 2381 | int res; |
4033 | 2382 | ||
4034 | if (ssid_len > IEEE80211_MAX_SSID_LEN) | 2383 | if (len > IEEE80211_MAX_SSID_LEN) |
4035 | return -EINVAL; | 2384 | return -EINVAL; |
4036 | 2385 | ||
4037 | /* MLME-SCAN.request (page 118) page 144 (11.1.3.1) | 2386 | ifsta = &sdata->u.sta; |
4038 | * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS | ||
4039 | * BSSID: MACAddress | ||
4040 | * SSID | ||
4041 | * ScanType: ACTIVE, PASSIVE | ||
4042 | * ProbeDelay: delay (in microseconds) to be used prior to transmitting | ||
4043 | * a Probe frame during active scanning | ||
4044 | * ChannelList | ||
4045 | * MinChannelTime (>= ProbeDelay), in TU | ||
4046 | * MaxChannelTime: (>= MinChannelTime), in TU | ||
4047 | */ | ||
4048 | |||
4049 | /* MLME-SCAN.confirm | ||
4050 | * BSSDescriptionSet | ||
4051 | * ResultCode: SUCCESS, INVALID_PARAMETERS | ||
4052 | */ | ||
4053 | 2387 | ||
4054 | if (local->sta_sw_scanning || local->sta_hw_scanning) { | 2388 | if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) { |
4055 | if (local->scan_dev == dev) | 2389 | memset(ifsta->ssid, 0, sizeof(ifsta->ssid)); |
4056 | return 0; | 2390 | memcpy(ifsta->ssid, ssid, len); |
4057 | return -EBUSY; | 2391 | ifsta->ssid_len = len; |
4058 | } | 2392 | ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET; |
4059 | 2393 | ||
4060 | if (local->ops->hw_scan) { | 2394 | res = 0; |
4061 | int rc = local->ops->hw_scan(local_to_hw(local), | 2395 | /* |
4062 | ssid, ssid_len); | 2396 | * Hack! MLME code needs to be cleaned up to have different |
4063 | if (!rc) { | 2397 | * entry points for configuration and internal selection change |
4064 | local->sta_hw_scanning = 1; | 2398 | */ |
4065 | local->scan_dev = dev; | 2399 | if (netif_running(sdata->dev)) |
2400 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID); | ||
2401 | if (res) { | ||
2402 | printk(KERN_DEBUG "%s: Failed to config new SSID to " | ||
2403 | "the low-level driver\n", sdata->dev->name); | ||
2404 | return res; | ||
4066 | } | 2405 | } |
4067 | return rc; | ||
4068 | } | 2406 | } |
4069 | 2407 | ||
4070 | local->sta_sw_scanning = 1; | 2408 | if (len) |
2409 | ifsta->flags |= IEEE80211_STA_SSID_SET; | ||
2410 | else | ||
2411 | ifsta->flags &= ~IEEE80211_STA_SSID_SET; | ||
4071 | 2412 | ||
4072 | rcu_read_lock(); | 2413 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC && |
4073 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 2414 | !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { |
4074 | netif_stop_queue(sdata->dev); | 2415 | ifsta->ibss_join_req = jiffies; |
4075 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA && | 2416 | ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH; |
4076 | (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)) | 2417 | return ieee80211_sta_find_ibss(sdata, ifsta); |
4077 | ieee80211_send_nullfunc(local, sdata, 1); | ||
4078 | } | 2418 | } |
4079 | rcu_read_unlock(); | ||
4080 | |||
4081 | if (ssid) { | ||
4082 | local->scan_ssid_len = ssid_len; | ||
4083 | memcpy(local->scan_ssid, ssid, ssid_len); | ||
4084 | } else | ||
4085 | local->scan_ssid_len = 0; | ||
4086 | local->scan_state = SCAN_SET_CHANNEL; | ||
4087 | local->scan_channel_idx = 0; | ||
4088 | local->scan_band = IEEE80211_BAND_2GHZ; | ||
4089 | local->scan_dev = dev; | ||
4090 | |||
4091 | netif_addr_lock_bh(local->mdev); | ||
4092 | local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; | ||
4093 | local->ops->configure_filter(local_to_hw(local), | ||
4094 | FIF_BCN_PRBRESP_PROMISC, | ||
4095 | &local->filter_flags, | ||
4096 | local->mdev->mc_count, | ||
4097 | local->mdev->mc_list); | ||
4098 | netif_addr_unlock_bh(local->mdev); | ||
4099 | |||
4100 | /* TODO: start scan as soon as all nullfunc frames are ACKed */ | ||
4101 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | ||
4102 | IEEE80211_CHANNEL_TIME); | ||
4103 | 2419 | ||
4104 | return 0; | 2420 | return 0; |
4105 | } | 2421 | } |
4106 | 2422 | ||
4107 | 2423 | int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len) | |
4108 | int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len) | ||
4109 | { | 2424 | { |
4110 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
4111 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 2425 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
4112 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2426 | memcpy(ssid, ifsta->ssid, ifsta->ssid_len); |
4113 | 2427 | *len = ifsta->ssid_len; | |
4114 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | ||
4115 | return ieee80211_sta_start_scan(dev, ssid, ssid_len); | ||
4116 | |||
4117 | if (local->sta_sw_scanning || local->sta_hw_scanning) { | ||
4118 | if (local->scan_dev == dev) | ||
4119 | return 0; | ||
4120 | return -EBUSY; | ||
4121 | } | ||
4122 | |||
4123 | ifsta->scan_ssid_len = ssid_len; | ||
4124 | if (ssid_len) | ||
4125 | memcpy(ifsta->scan_ssid, ssid, ssid_len); | ||
4126 | set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request); | ||
4127 | queue_work(local->hw.workqueue, &ifsta->work); | ||
4128 | return 0; | 2428 | return 0; |
4129 | } | 2429 | } |
4130 | 2430 | ||
4131 | static char * | 2431 | int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid) |
4132 | ieee80211_sta_scan_result(struct net_device *dev, | ||
4133 | struct iw_request_info *info, | ||
4134 | struct ieee80211_sta_bss *bss, | ||
4135 | char *current_ev, char *end_buf) | ||
4136 | { | 2432 | { |
4137 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2433 | struct ieee80211_if_sta *ifsta; |
4138 | struct iw_event iwe; | 2434 | int res; |
4139 | |||
4140 | if (time_after(jiffies, | ||
4141 | bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE)) | ||
4142 | return current_ev; | ||
4143 | |||
4144 | memset(&iwe, 0, sizeof(iwe)); | ||
4145 | iwe.cmd = SIOCGIWAP; | ||
4146 | iwe.u.ap_addr.sa_family = ARPHRD_ETHER; | ||
4147 | memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN); | ||
4148 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
4149 | IW_EV_ADDR_LEN); | ||
4150 | |||
4151 | memset(&iwe, 0, sizeof(iwe)); | ||
4152 | iwe.cmd = SIOCGIWESSID; | ||
4153 | if (bss_mesh_cfg(bss)) { | ||
4154 | iwe.u.data.length = bss_mesh_id_len(bss); | ||
4155 | iwe.u.data.flags = 1; | ||
4156 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4157 | &iwe, bss_mesh_id(bss)); | ||
4158 | } else { | ||
4159 | iwe.u.data.length = bss->ssid_len; | ||
4160 | iwe.u.data.flags = 1; | ||
4161 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4162 | &iwe, bss->ssid); | ||
4163 | } | ||
4164 | |||
4165 | if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) | ||
4166 | || bss_mesh_cfg(bss)) { | ||
4167 | memset(&iwe, 0, sizeof(iwe)); | ||
4168 | iwe.cmd = SIOCGIWMODE; | ||
4169 | if (bss_mesh_cfg(bss)) | ||
4170 | iwe.u.mode = IW_MODE_MESH; | ||
4171 | else if (bss->capability & WLAN_CAPABILITY_ESS) | ||
4172 | iwe.u.mode = IW_MODE_MASTER; | ||
4173 | else | ||
4174 | iwe.u.mode = IW_MODE_ADHOC; | ||
4175 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, | ||
4176 | &iwe, IW_EV_UINT_LEN); | ||
4177 | } | ||
4178 | |||
4179 | memset(&iwe, 0, sizeof(iwe)); | ||
4180 | iwe.cmd = SIOCGIWFREQ; | ||
4181 | iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq); | ||
4182 | iwe.u.freq.e = 0; | ||
4183 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
4184 | IW_EV_FREQ_LEN); | ||
4185 | |||
4186 | memset(&iwe, 0, sizeof(iwe)); | ||
4187 | iwe.cmd = SIOCGIWFREQ; | ||
4188 | iwe.u.freq.m = bss->freq; | ||
4189 | iwe.u.freq.e = 6; | ||
4190 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
4191 | IW_EV_FREQ_LEN); | ||
4192 | memset(&iwe, 0, sizeof(iwe)); | ||
4193 | iwe.cmd = IWEVQUAL; | ||
4194 | iwe.u.qual.qual = bss->qual; | ||
4195 | iwe.u.qual.level = bss->signal; | ||
4196 | iwe.u.qual.noise = bss->noise; | ||
4197 | iwe.u.qual.updated = local->wstats_flags; | ||
4198 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
4199 | IW_EV_QUAL_LEN); | ||
4200 | |||
4201 | memset(&iwe, 0, sizeof(iwe)); | ||
4202 | iwe.cmd = SIOCGIWENCODE; | ||
4203 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) | ||
4204 | iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; | ||
4205 | else | ||
4206 | iwe.u.data.flags = IW_ENCODE_DISABLED; | ||
4207 | iwe.u.data.length = 0; | ||
4208 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4209 | &iwe, ""); | ||
4210 | |||
4211 | if (bss && bss->wpa_ie) { | ||
4212 | memset(&iwe, 0, sizeof(iwe)); | ||
4213 | iwe.cmd = IWEVGENIE; | ||
4214 | iwe.u.data.length = bss->wpa_ie_len; | ||
4215 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4216 | &iwe, bss->wpa_ie); | ||
4217 | } | ||
4218 | |||
4219 | if (bss && bss->rsn_ie) { | ||
4220 | memset(&iwe, 0, sizeof(iwe)); | ||
4221 | iwe.cmd = IWEVGENIE; | ||
4222 | iwe.u.data.length = bss->rsn_ie_len; | ||
4223 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4224 | &iwe, bss->rsn_ie); | ||
4225 | } | ||
4226 | |||
4227 | if (bss && bss->ht_ie) { | ||
4228 | memset(&iwe, 0, sizeof(iwe)); | ||
4229 | iwe.cmd = IWEVGENIE; | ||
4230 | iwe.u.data.length = bss->ht_ie_len; | ||
4231 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
4232 | &iwe, bss->ht_ie); | ||
4233 | } | ||
4234 | |||
4235 | if (bss && bss->supp_rates_len > 0) { | ||
4236 | /* display all supported rates in readable format */ | ||
4237 | char *p = current_ev + iwe_stream_lcp_len(info); | ||
4238 | int i; | ||
4239 | |||
4240 | memset(&iwe, 0, sizeof(iwe)); | ||
4241 | iwe.cmd = SIOCGIWRATE; | ||
4242 | /* Those two flags are ignored... */ | ||
4243 | iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; | ||
4244 | |||
4245 | for (i = 0; i < bss->supp_rates_len; i++) { | ||
4246 | iwe.u.bitrate.value = ((bss->supp_rates[i] & | ||
4247 | 0x7f) * 500000); | ||
4248 | p = iwe_stream_add_value(info, current_ev, p, | ||
4249 | end_buf, &iwe, IW_EV_PARAM_LEN); | ||
4250 | } | ||
4251 | current_ev = p; | ||
4252 | } | ||
4253 | 2435 | ||
4254 | if (bss) { | 2436 | ifsta = &sdata->u.sta; |
4255 | char *buf; | ||
4256 | buf = kmalloc(30, GFP_ATOMIC); | ||
4257 | if (buf) { | ||
4258 | memset(&iwe, 0, sizeof(iwe)); | ||
4259 | iwe.cmd = IWEVCUSTOM; | ||
4260 | sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp)); | ||
4261 | iwe.u.data.length = strlen(buf); | ||
4262 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4263 | end_buf, | ||
4264 | &iwe, buf); | ||
4265 | memset(&iwe, 0, sizeof(iwe)); | ||
4266 | iwe.cmd = IWEVCUSTOM; | ||
4267 | sprintf(buf, " Last beacon: %dms ago", | ||
4268 | jiffies_to_msecs(jiffies - bss->last_update)); | ||
4269 | iwe.u.data.length = strlen(buf); | ||
4270 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4271 | end_buf, &iwe, buf); | ||
4272 | kfree(buf); | ||
4273 | } | ||
4274 | } | ||
4275 | 2437 | ||
4276 | if (bss_mesh_cfg(bss)) { | 2438 | if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { |
4277 | char *buf; | 2439 | memcpy(ifsta->bssid, bssid, ETH_ALEN); |
4278 | u8 *cfg = bss_mesh_cfg(bss); | 2440 | res = 0; |
4279 | buf = kmalloc(50, GFP_ATOMIC); | 2441 | /* |
4280 | if (buf) { | 2442 | * Hack! See also ieee80211_sta_set_ssid. |
4281 | memset(&iwe, 0, sizeof(iwe)); | 2443 | */ |
4282 | iwe.cmd = IWEVCUSTOM; | 2444 | if (netif_running(sdata->dev)) |
4283 | sprintf(buf, "Mesh network (version %d)", cfg[0]); | 2445 | res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID); |
4284 | iwe.u.data.length = strlen(buf); | 2446 | if (res) { |
4285 | current_ev = iwe_stream_add_point(info, current_ev, | 2447 | printk(KERN_DEBUG "%s: Failed to config new BSSID to " |
4286 | end_buf, | 2448 | "the low-level driver\n", sdata->dev->name); |
4287 | &iwe, buf); | 2449 | return res; |
4288 | sprintf(buf, "Path Selection Protocol ID: " | ||
4289 | "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3], | ||
4290 | cfg[4]); | ||
4291 | iwe.u.data.length = strlen(buf); | ||
4292 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4293 | end_buf, | ||
4294 | &iwe, buf); | ||
4295 | sprintf(buf, "Path Selection Metric ID: " | ||
4296 | "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7], | ||
4297 | cfg[8]); | ||
4298 | iwe.u.data.length = strlen(buf); | ||
4299 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4300 | end_buf, | ||
4301 | &iwe, buf); | ||
4302 | sprintf(buf, "Congestion Control Mode ID: " | ||
4303 | "0x%02X%02X%02X%02X", cfg[9], cfg[10], | ||
4304 | cfg[11], cfg[12]); | ||
4305 | iwe.u.data.length = strlen(buf); | ||
4306 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4307 | end_buf, | ||
4308 | &iwe, buf); | ||
4309 | sprintf(buf, "Channel Precedence: " | ||
4310 | "0x%02X%02X%02X%02X", cfg[13], cfg[14], | ||
4311 | cfg[15], cfg[16]); | ||
4312 | iwe.u.data.length = strlen(buf); | ||
4313 | current_ev = iwe_stream_add_point(info, current_ev, | ||
4314 | end_buf, | ||
4315 | &iwe, buf); | ||
4316 | kfree(buf); | ||
4317 | } | 2450 | } |
4318 | } | 2451 | } |
4319 | 2452 | ||
4320 | return current_ev; | 2453 | if (is_valid_ether_addr(bssid)) |
4321 | } | 2454 | ifsta->flags |= IEEE80211_STA_BSSID_SET; |
4322 | 2455 | else | |
2456 | ifsta->flags &= ~IEEE80211_STA_BSSID_SET; | ||
4323 | 2457 | ||
4324 | int ieee80211_sta_scan_results(struct net_device *dev, | 2458 | return 0; |
4325 | struct iw_request_info *info, | ||
4326 | char *buf, size_t len) | ||
4327 | { | ||
4328 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
4329 | char *current_ev = buf; | ||
4330 | char *end_buf = buf + len; | ||
4331 | struct ieee80211_sta_bss *bss; | ||
4332 | |||
4333 | spin_lock_bh(&local->sta_bss_lock); | ||
4334 | list_for_each_entry(bss, &local->sta_bss_list, list) { | ||
4335 | if (buf + len - current_ev <= IW_EV_ADDR_LEN) { | ||
4336 | spin_unlock_bh(&local->sta_bss_lock); | ||
4337 | return -E2BIG; | ||
4338 | } | ||
4339 | current_ev = ieee80211_sta_scan_result(dev, info, bss, | ||
4340 | current_ev, end_buf); | ||
4341 | } | ||
4342 | spin_unlock_bh(&local->sta_bss_lock); | ||
4343 | return current_ev - buf; | ||
4344 | } | 2459 | } |
4345 | 2460 | ||
4346 | 2461 | int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len) | |
4347 | int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len) | ||
4348 | { | 2462 | { |
4349 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
4350 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 2463 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
4351 | 2464 | ||
4352 | kfree(ifsta->extra_ie); | 2465 | kfree(ifsta->extra_ie); |
@@ -4365,92 +2478,60 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len) | |||
4365 | return 0; | 2478 | return 0; |
4366 | } | 2479 | } |
4367 | 2480 | ||
4368 | 2481 | int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason) | |
4369 | struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev, | ||
4370 | struct sk_buff *skb, u8 *bssid, | ||
4371 | u8 *addr, u64 supp_rates) | ||
4372 | { | ||
4373 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
4374 | struct sta_info *sta; | ||
4375 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
4376 | DECLARE_MAC_BUF(mac); | ||
4377 | int band = local->hw.conf.channel->band; | ||
4378 | |||
4379 | /* TODO: Could consider removing the least recently used entry and | ||
4380 | * allow new one to be added. */ | ||
4381 | if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { | ||
4382 | if (net_ratelimit()) { | ||
4383 | printk(KERN_DEBUG "%s: No room for a new IBSS STA " | ||
4384 | "entry %s\n", dev->name, print_mac(mac, addr)); | ||
4385 | } | ||
4386 | return NULL; | ||
4387 | } | ||
4388 | |||
4389 | if (compare_ether_addr(bssid, sdata->u.sta.bssid)) | ||
4390 | return NULL; | ||
4391 | |||
4392 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
4393 | printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", | ||
4394 | wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); | ||
4395 | #endif | ||
4396 | |||
4397 | sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); | ||
4398 | if (!sta) | ||
4399 | return NULL; | ||
4400 | |||
4401 | set_sta_flags(sta, WLAN_STA_AUTHORIZED); | ||
4402 | |||
4403 | if (supp_rates) | ||
4404 | sta->supp_rates[band] = supp_rates; | ||
4405 | else | ||
4406 | sta->supp_rates[band] = sdata->u.sta.supp_rates_bits[band]; | ||
4407 | |||
4408 | rate_control_rate_init(sta, local); | ||
4409 | |||
4410 | if (sta_info_insert(sta)) | ||
4411 | return NULL; | ||
4412 | |||
4413 | return sta; | ||
4414 | } | ||
4415 | |||
4416 | |||
4417 | int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason) | ||
4418 | { | 2482 | { |
4419 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
4420 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 2483 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
4421 | 2484 | ||
4422 | printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", | 2485 | printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", |
4423 | dev->name, reason); | 2486 | sdata->dev->name, reason); |
4424 | 2487 | ||
4425 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && | 2488 | if (sdata->vif.type != NL80211_IFTYPE_STATION && |
4426 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) | 2489 | sdata->vif.type != NL80211_IFTYPE_ADHOC) |
4427 | return -EINVAL; | 2490 | return -EINVAL; |
4428 | 2491 | ||
4429 | ieee80211_send_deauth(dev, ifsta, reason); | 2492 | ieee80211_set_disassoc(sdata, ifsta, true, true, reason); |
4430 | ieee80211_set_disassoc(dev, ifsta, 1); | ||
4431 | return 0; | 2493 | return 0; |
4432 | } | 2494 | } |
4433 | 2495 | ||
4434 | 2496 | int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason) | |
4435 | int ieee80211_sta_disassociate(struct net_device *dev, u16 reason) | ||
4436 | { | 2497 | { |
4437 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
4438 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 2498 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
4439 | 2499 | ||
4440 | printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", | 2500 | printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", |
4441 | dev->name, reason); | 2501 | sdata->dev->name, reason); |
4442 | 2502 | ||
4443 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | 2503 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
4444 | return -EINVAL; | 2504 | return -EINVAL; |
4445 | 2505 | ||
4446 | if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED)) | 2506 | if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED)) |
4447 | return -1; | 2507 | return -1; |
4448 | 2508 | ||
4449 | ieee80211_send_disassoc(dev, ifsta, reason); | 2509 | ieee80211_set_disassoc(sdata, ifsta, false, true, reason); |
4450 | ieee80211_set_disassoc(dev, ifsta, 0); | ||
4451 | return 0; | 2510 | return 0; |
4452 | } | 2511 | } |
4453 | 2512 | ||
2513 | /* scan finished notification */ | ||
2514 | void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) | ||
2515 | { | ||
2516 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; | ||
2517 | struct ieee80211_if_sta *ifsta; | ||
2518 | |||
2519 | if (sdata && sdata->vif.type == NL80211_IFTYPE_ADHOC) { | ||
2520 | ifsta = &sdata->u.sta; | ||
2521 | if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) || | ||
2522 | (!(ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED) && | ||
2523 | !ieee80211_sta_active_ibss(sdata))) | ||
2524 | ieee80211_sta_find_ibss(sdata, ifsta); | ||
2525 | } | ||
2526 | |||
2527 | /* Restart STA timers */ | ||
2528 | rcu_read_lock(); | ||
2529 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
2530 | ieee80211_restart_sta_timer(sdata); | ||
2531 | rcu_read_unlock(); | ||
2532 | } | ||
2533 | |||
2534 | /* driver notification call */ | ||
4454 | void ieee80211_notify_mac(struct ieee80211_hw *hw, | 2535 | void ieee80211_notify_mac(struct ieee80211_hw *hw, |
4455 | enum ieee80211_notification_types notif_type) | 2536 | enum ieee80211_notification_types notif_type) |
4456 | { | 2537 | { |
@@ -4461,10 +2542,10 @@ void ieee80211_notify_mac(struct ieee80211_hw *hw, | |||
4461 | case IEEE80211_NOTIFY_RE_ASSOC: | 2542 | case IEEE80211_NOTIFY_RE_ASSOC: |
4462 | rcu_read_lock(); | 2543 | rcu_read_lock(); |
4463 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 2544 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
4464 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | 2545 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
4465 | continue; | 2546 | continue; |
4466 | 2547 | ||
4467 | ieee80211_sta_req_auth(sdata->dev, &sdata->u.sta); | 2548 | ieee80211_sta_req_auth(sdata, &sdata->u.sta); |
4468 | } | 2549 | } |
4469 | rcu_read_unlock(); | 2550 | rcu_read_unlock(); |
4470 | break; | 2551 | break; |
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 0388c090dfe9..5d786720d935 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/rtnetlink.h> | 12 | #include <linux/rtnetlink.h> |
13 | #include "rate.h" | 13 | #include "rate.h" |
14 | #include "ieee80211_i.h" | 14 | #include "ieee80211_i.h" |
15 | #include "debugfs.h" | ||
15 | 16 | ||
16 | struct rate_control_alg { | 17 | struct rate_control_alg { |
17 | struct list_head list; | 18 | struct list_head list; |
@@ -127,19 +128,46 @@ static void ieee80211_rate_control_ops_put(struct rate_control_ops *ops) | |||
127 | module_put(ops->module); | 128 | module_put(ops->module); |
128 | } | 129 | } |
129 | 130 | ||
131 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
132 | static ssize_t rcname_read(struct file *file, char __user *userbuf, | ||
133 | size_t count, loff_t *ppos) | ||
134 | { | ||
135 | struct rate_control_ref *ref = file->private_data; | ||
136 | int len = strlen(ref->ops->name); | ||
137 | |||
138 | return simple_read_from_buffer(userbuf, count, ppos, | ||
139 | ref->ops->name, len); | ||
140 | } | ||
141 | |||
142 | static const struct file_operations rcname_ops = { | ||
143 | .read = rcname_read, | ||
144 | .open = mac80211_open_file_generic, | ||
145 | }; | ||
146 | #endif | ||
147 | |||
130 | struct rate_control_ref *rate_control_alloc(const char *name, | 148 | struct rate_control_ref *rate_control_alloc(const char *name, |
131 | struct ieee80211_local *local) | 149 | struct ieee80211_local *local) |
132 | { | 150 | { |
151 | struct dentry *debugfsdir = NULL; | ||
133 | struct rate_control_ref *ref; | 152 | struct rate_control_ref *ref; |
134 | 153 | ||
135 | ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); | 154 | ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); |
136 | if (!ref) | 155 | if (!ref) |
137 | goto fail_ref; | 156 | goto fail_ref; |
138 | kref_init(&ref->kref); | 157 | kref_init(&ref->kref); |
158 | ref->local = local; | ||
139 | ref->ops = ieee80211_rate_control_ops_get(name); | 159 | ref->ops = ieee80211_rate_control_ops_get(name); |
140 | if (!ref->ops) | 160 | if (!ref->ops) |
141 | goto fail_ops; | 161 | goto fail_ops; |
142 | ref->priv = ref->ops->alloc(local); | 162 | |
163 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
164 | debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir); | ||
165 | local->debugfs.rcdir = debugfsdir; | ||
166 | local->debugfs.rcname = debugfs_create_file("name", 0400, debugfsdir, | ||
167 | ref, &rcname_ops); | ||
168 | #endif | ||
169 | |||
170 | ref->priv = ref->ops->alloc(&local->hw, debugfsdir); | ||
143 | if (!ref->priv) | 171 | if (!ref->priv) |
144 | goto fail_priv; | 172 | goto fail_priv; |
145 | return ref; | 173 | return ref; |
@@ -158,29 +186,46 @@ static void rate_control_release(struct kref *kref) | |||
158 | 186 | ||
159 | ctrl_ref = container_of(kref, struct rate_control_ref, kref); | 187 | ctrl_ref = container_of(kref, struct rate_control_ref, kref); |
160 | ctrl_ref->ops->free(ctrl_ref->priv); | 188 | ctrl_ref->ops->free(ctrl_ref->priv); |
189 | |||
190 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
191 | debugfs_remove(ctrl_ref->local->debugfs.rcname); | ||
192 | ctrl_ref->local->debugfs.rcname = NULL; | ||
193 | debugfs_remove(ctrl_ref->local->debugfs.rcdir); | ||
194 | ctrl_ref->local->debugfs.rcdir = NULL; | ||
195 | #endif | ||
196 | |||
161 | ieee80211_rate_control_ops_put(ctrl_ref->ops); | 197 | ieee80211_rate_control_ops_put(ctrl_ref->ops); |
162 | kfree(ctrl_ref); | 198 | kfree(ctrl_ref); |
163 | } | 199 | } |
164 | 200 | ||
165 | void rate_control_get_rate(struct net_device *dev, | 201 | void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, |
166 | struct ieee80211_supported_band *sband, | 202 | struct ieee80211_supported_band *sband, |
167 | struct sk_buff *skb, | 203 | struct sta_info *sta, struct sk_buff *skb, |
168 | struct rate_selection *sel) | 204 | struct rate_selection *sel) |
169 | { | 205 | { |
170 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 206 | struct rate_control_ref *ref = sdata->local->rate_ctrl; |
171 | struct rate_control_ref *ref = local->rate_ctrl; | 207 | void *priv_sta = NULL; |
172 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 208 | struct ieee80211_sta *ista = NULL; |
173 | struct sta_info *sta; | ||
174 | int i; | 209 | int i; |
175 | 210 | ||
176 | rcu_read_lock(); | ||
177 | sta = sta_info_get(local, hdr->addr1); | ||
178 | |||
179 | sel->rate_idx = -1; | 211 | sel->rate_idx = -1; |
180 | sel->nonerp_idx = -1; | 212 | sel->nonerp_idx = -1; |
181 | sel->probe_idx = -1; | 213 | sel->probe_idx = -1; |
214 | sel->max_rate_idx = sdata->max_ratectrl_rateidx; | ||
215 | |||
216 | if (sta) { | ||
217 | ista = &sta->sta; | ||
218 | priv_sta = sta->rate_ctrl_priv; | ||
219 | } | ||
220 | |||
221 | if (sta && sdata->force_unicast_rateidx > -1) | ||
222 | sel->rate_idx = sdata->force_unicast_rateidx; | ||
223 | else | ||
224 | ref->ops->get_rate(ref->priv, sband, ista, priv_sta, skb, sel); | ||
182 | 225 | ||
183 | ref->ops->get_rate(ref->priv, dev, sband, skb, sel); | 226 | if (sdata->max_ratectrl_rateidx > -1 && |
227 | sel->rate_idx > sdata->max_ratectrl_rateidx) | ||
228 | sel->rate_idx = sdata->max_ratectrl_rateidx; | ||
184 | 229 | ||
185 | BUG_ON(sel->rate_idx < 0); | 230 | BUG_ON(sel->rate_idx < 0); |
186 | 231 | ||
@@ -191,13 +236,11 @@ void rate_control_get_rate(struct net_device *dev, | |||
191 | if (sband->bitrates[sel->rate_idx].bitrate < rate->bitrate) | 236 | if (sband->bitrates[sel->rate_idx].bitrate < rate->bitrate) |
192 | break; | 237 | break; |
193 | 238 | ||
194 | if (rate_supported(sta, sband->band, i) && | 239 | if (rate_supported(ista, sband->band, i) && |
195 | !(rate->flags & IEEE80211_RATE_ERP_G)) | 240 | !(rate->flags & IEEE80211_RATE_ERP_G)) |
196 | sel->nonerp_idx = i; | 241 | sel->nonerp_idx = i; |
197 | } | 242 | } |
198 | } | 243 | } |
199 | |||
200 | rcu_read_unlock(); | ||
201 | } | 244 | } |
202 | 245 | ||
203 | struct rate_control_ref *rate_control_get(struct rate_control_ref *ref) | 246 | struct rate_control_ref *rate_control_get(struct rate_control_ref *ref) |
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index ede7ab56f65b..eb94e584d24e 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h | |||
@@ -19,77 +19,48 @@ | |||
19 | #include "ieee80211_i.h" | 19 | #include "ieee80211_i.h" |
20 | #include "sta_info.h" | 20 | #include "sta_info.h" |
21 | 21 | ||
22 | /** | ||
23 | * struct rate_selection - rate selection for rate control algos | ||
24 | * @rate: selected transmission rate index | ||
25 | * @nonerp: Non-ERP rate to use instead if ERP cannot be used | ||
26 | * @probe: rate for probing (or -1) | ||
27 | * | ||
28 | */ | ||
29 | struct rate_selection { | ||
30 | s8 rate_idx, nonerp_idx, probe_idx; | ||
31 | }; | ||
32 | |||
33 | struct rate_control_ops { | ||
34 | struct module *module; | ||
35 | const char *name; | ||
36 | void (*tx_status)(void *priv, struct net_device *dev, | ||
37 | struct sk_buff *skb); | ||
38 | void (*get_rate)(void *priv, struct net_device *dev, | ||
39 | struct ieee80211_supported_band *band, | ||
40 | struct sk_buff *skb, | ||
41 | struct rate_selection *sel); | ||
42 | void (*rate_init)(void *priv, void *priv_sta, | ||
43 | struct ieee80211_local *local, struct sta_info *sta); | ||
44 | void (*clear)(void *priv); | ||
45 | |||
46 | void *(*alloc)(struct ieee80211_local *local); | ||
47 | void (*free)(void *priv); | ||
48 | void *(*alloc_sta)(void *priv, gfp_t gfp); | ||
49 | void (*free_sta)(void *priv, void *priv_sta); | ||
50 | |||
51 | int (*add_attrs)(void *priv, struct kobject *kobj); | ||
52 | void (*remove_attrs)(void *priv, struct kobject *kobj); | ||
53 | void (*add_sta_debugfs)(void *priv, void *priv_sta, | ||
54 | struct dentry *dir); | ||
55 | void (*remove_sta_debugfs)(void *priv, void *priv_sta); | ||
56 | }; | ||
57 | |||
58 | struct rate_control_ref { | 22 | struct rate_control_ref { |
23 | struct ieee80211_local *local; | ||
59 | struct rate_control_ops *ops; | 24 | struct rate_control_ops *ops; |
60 | void *priv; | 25 | void *priv; |
61 | struct kref kref; | 26 | struct kref kref; |
62 | }; | 27 | }; |
63 | 28 | ||
64 | int ieee80211_rate_control_register(struct rate_control_ops *ops); | ||
65 | void ieee80211_rate_control_unregister(struct rate_control_ops *ops); | ||
66 | |||
67 | /* Get a reference to the rate control algorithm. If `name' is NULL, get the | 29 | /* Get a reference to the rate control algorithm. If `name' is NULL, get the |
68 | * first available algorithm. */ | 30 | * first available algorithm. */ |
69 | struct rate_control_ref *rate_control_alloc(const char *name, | 31 | struct rate_control_ref *rate_control_alloc(const char *name, |
70 | struct ieee80211_local *local); | 32 | struct ieee80211_local *local); |
71 | void rate_control_get_rate(struct net_device *dev, | 33 | void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, |
72 | struct ieee80211_supported_band *sband, | 34 | struct ieee80211_supported_band *sband, |
73 | struct sk_buff *skb, | 35 | struct sta_info *sta, struct sk_buff *skb, |
74 | struct rate_selection *sel); | 36 | struct rate_selection *sel); |
75 | struct rate_control_ref *rate_control_get(struct rate_control_ref *ref); | 37 | struct rate_control_ref *rate_control_get(struct rate_control_ref *ref); |
76 | void rate_control_put(struct rate_control_ref *ref); | 38 | void rate_control_put(struct rate_control_ref *ref); |
77 | 39 | ||
78 | static inline void rate_control_tx_status(struct net_device *dev, | 40 | static inline void rate_control_tx_status(struct ieee80211_local *local, |
41 | struct ieee80211_supported_band *sband, | ||
42 | struct sta_info *sta, | ||
79 | struct sk_buff *skb) | 43 | struct sk_buff *skb) |
80 | { | 44 | { |
81 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
82 | struct rate_control_ref *ref = local->rate_ctrl; | 45 | struct rate_control_ref *ref = local->rate_ctrl; |
46 | struct ieee80211_sta *ista = &sta->sta; | ||
47 | void *priv_sta = sta->rate_ctrl_priv; | ||
83 | 48 | ||
84 | ref->ops->tx_status(ref->priv, dev, skb); | 49 | ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); |
85 | } | 50 | } |
86 | 51 | ||
87 | 52 | ||
88 | static inline void rate_control_rate_init(struct sta_info *sta, | 53 | static inline void rate_control_rate_init(struct sta_info *sta) |
89 | struct ieee80211_local *local) | ||
90 | { | 54 | { |
55 | struct ieee80211_local *local = sta->sdata->local; | ||
91 | struct rate_control_ref *ref = sta->rate_ctrl; | 56 | struct rate_control_ref *ref = sta->rate_ctrl; |
92 | ref->ops->rate_init(ref->priv, sta->rate_ctrl_priv, local, sta); | 57 | struct ieee80211_sta *ista = &sta->sta; |
58 | void *priv_sta = sta->rate_ctrl_priv; | ||
59 | struct ieee80211_supported_band *sband; | ||
60 | |||
61 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
62 | |||
63 | ref->ops->rate_init(ref->priv, sband, ista, priv_sta); | ||
93 | } | 64 | } |
94 | 65 | ||
95 | 66 | ||
@@ -100,15 +71,19 @@ static inline void rate_control_clear(struct ieee80211_local *local) | |||
100 | } | 71 | } |
101 | 72 | ||
102 | static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, | 73 | static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, |
74 | struct ieee80211_sta *sta, | ||
103 | gfp_t gfp) | 75 | gfp_t gfp) |
104 | { | 76 | { |
105 | return ref->ops->alloc_sta(ref->priv, gfp); | 77 | return ref->ops->alloc_sta(ref->priv, sta, gfp); |
106 | } | 78 | } |
107 | 79 | ||
108 | static inline void rate_control_free_sta(struct rate_control_ref *ref, | 80 | static inline void rate_control_free_sta(struct sta_info *sta) |
109 | void *priv) | ||
110 | { | 81 | { |
111 | ref->ops->free_sta(ref->priv, priv); | 82 | struct rate_control_ref *ref = sta->rate_ctrl; |
83 | struct ieee80211_sta *ista = &sta->sta; | ||
84 | void *priv_sta = sta->rate_ctrl_priv; | ||
85 | |||
86 | ref->ops->free_sta(ref->priv, ista, priv_sta); | ||
112 | } | 87 | } |
113 | 88 | ||
114 | static inline void rate_control_add_sta_debugfs(struct sta_info *sta) | 89 | static inline void rate_control_add_sta_debugfs(struct sta_info *sta) |
@@ -130,31 +105,6 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta) | |||
130 | #endif | 105 | #endif |
131 | } | 106 | } |
132 | 107 | ||
133 | static inline int rate_supported(struct sta_info *sta, | ||
134 | enum ieee80211_band band, | ||
135 | int index) | ||
136 | { | ||
137 | return (sta == NULL || sta->supp_rates[band] & BIT(index)); | ||
138 | } | ||
139 | |||
140 | static inline s8 | ||
141 | rate_lowest_index(struct ieee80211_local *local, | ||
142 | struct ieee80211_supported_band *sband, | ||
143 | struct sta_info *sta) | ||
144 | { | ||
145 | int i; | ||
146 | |||
147 | for (i = 0; i < sband->n_bitrates; i++) | ||
148 | if (rate_supported(sta, sband->band, i)) | ||
149 | return i; | ||
150 | |||
151 | /* warn when we cannot find a rate. */ | ||
152 | WARN_ON(1); | ||
153 | |||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | |||
158 | /* functions for rate control related to a device */ | 108 | /* functions for rate control related to a device */ |
159 | int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, | 109 | int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, |
160 | const char *name); | 110 | const char *name); |
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h index 0a9135b974b5..01d64d53f3b9 100644 --- a/net/mac80211/rc80211_pid.h +++ b/net/mac80211/rc80211_pid.h | |||
@@ -124,7 +124,6 @@ struct rc_pid_events_file_info { | |||
124 | * struct rc_pid_debugfs_entries - tunable parameters | 124 | * struct rc_pid_debugfs_entries - tunable parameters |
125 | * | 125 | * |
126 | * Algorithm parameters, tunable via debugfs. | 126 | * Algorithm parameters, tunable via debugfs. |
127 | * @dir: the debugfs directory for a specific phy | ||
128 | * @target: target percentage for failed frames | 127 | * @target: target percentage for failed frames |
129 | * @sampling_period: error sampling interval in milliseconds | 128 | * @sampling_period: error sampling interval in milliseconds |
130 | * @coeff_p: absolute value of the proportional coefficient | 129 | * @coeff_p: absolute value of the proportional coefficient |
@@ -143,7 +142,6 @@ struct rc_pid_events_file_info { | |||
143 | * ordering of rates) | 142 | * ordering of rates) |
144 | */ | 143 | */ |
145 | struct rc_pid_debugfs_entries { | 144 | struct rc_pid_debugfs_entries { |
146 | struct dentry *dir; | ||
147 | struct dentry *target; | 145 | struct dentry *target; |
148 | struct dentry *sampling_period; | 146 | struct dentry *sampling_period; |
149 | struct dentry *coeff_p; | 147 | struct dentry *coeff_p; |
@@ -180,6 +178,8 @@ struct rc_pid_sta_info { | |||
180 | u32 tx_num_failed; | 178 | u32 tx_num_failed; |
181 | u32 tx_num_xmit; | 179 | u32 tx_num_xmit; |
182 | 180 | ||
181 | int txrate_idx; | ||
182 | |||
183 | /* Average failed frames percentage error (i.e. actual vs. target | 183 | /* Average failed frames percentage error (i.e. actual vs. target |
184 | * percentage), scaled by RC_PID_SMOOTHING. This value is computed | 184 | * percentage), scaled by RC_PID_SMOOTHING. This value is computed |
185 | * using using an exponential weighted average technique: | 185 | * using using an exponential weighted average technique: |
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index a914ba73ccf5..86eb374e3b87 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c | |||
@@ -68,17 +68,14 @@ | |||
68 | * exhibited a worse failed frames behaviour and we'll choose the highest rate | 68 | * exhibited a worse failed frames behaviour and we'll choose the highest rate |
69 | * whose failed frames behaviour is not worse than the one of the original rate | 69 | * whose failed frames behaviour is not worse than the one of the original rate |
70 | * target. While at it, check that the new rate is valid. */ | 70 | * target. While at it, check that the new rate is valid. */ |
71 | static void rate_control_pid_adjust_rate(struct ieee80211_local *local, | 71 | static void rate_control_pid_adjust_rate(struct ieee80211_supported_band *sband, |
72 | struct sta_info *sta, int adj, | 72 | struct ieee80211_sta *sta, |
73 | struct rc_pid_sta_info *spinfo, int adj, | ||
73 | struct rc_pid_rateinfo *rinfo) | 74 | struct rc_pid_rateinfo *rinfo) |
74 | { | 75 | { |
75 | struct ieee80211_sub_if_data *sdata; | ||
76 | struct ieee80211_supported_band *sband; | ||
77 | int cur_sorted, new_sorted, probe, tmp, n_bitrates, band; | 76 | int cur_sorted, new_sorted, probe, tmp, n_bitrates, band; |
78 | int cur = sta->txrate_idx; | 77 | int cur = spinfo->txrate_idx; |
79 | 78 | ||
80 | sdata = sta->sdata; | ||
81 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
82 | band = sband->band; | 79 | band = sband->band; |
83 | n_bitrates = sband->n_bitrates; | 80 | n_bitrates = sband->n_bitrates; |
84 | 81 | ||
@@ -111,7 +108,7 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local, | |||
111 | /* Fit the rate found to the nearest supported rate. */ | 108 | /* Fit the rate found to the nearest supported rate. */ |
112 | do { | 109 | do { |
113 | if (rate_supported(sta, band, rinfo[tmp].index)) { | 110 | if (rate_supported(sta, band, rinfo[tmp].index)) { |
114 | sta->txrate_idx = rinfo[tmp].index; | 111 | spinfo->txrate_idx = rinfo[tmp].index; |
115 | break; | 112 | break; |
116 | } | 113 | } |
117 | if (adj < 0) | 114 | if (adj < 0) |
@@ -121,9 +118,9 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local, | |||
121 | } while (tmp < n_bitrates && tmp >= 0); | 118 | } while (tmp < n_bitrates && tmp >= 0); |
122 | 119 | ||
123 | #ifdef CONFIG_MAC80211_DEBUGFS | 120 | #ifdef CONFIG_MAC80211_DEBUGFS |
124 | rate_control_pid_event_rate_change( | 121 | rate_control_pid_event_rate_change(&spinfo->events, |
125 | &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events, | 122 | spinfo->txrate_idx, |
126 | sta->txrate_idx, sband->bitrates[sta->txrate_idx].bitrate); | 123 | sband->bitrates[spinfo->txrate_idx].bitrate); |
127 | #endif | 124 | #endif |
128 | } | 125 | } |
129 | 126 | ||
@@ -145,15 +142,11 @@ static void rate_control_pid_normalize(struct rc_pid_info *pinfo, int l) | |||
145 | } | 142 | } |
146 | 143 | ||
147 | static void rate_control_pid_sample(struct rc_pid_info *pinfo, | 144 | static void rate_control_pid_sample(struct rc_pid_info *pinfo, |
148 | struct ieee80211_local *local, | 145 | struct ieee80211_supported_band *sband, |
149 | struct sta_info *sta) | 146 | struct ieee80211_sta *sta, |
147 | struct rc_pid_sta_info *spinfo) | ||
150 | { | 148 | { |
151 | #ifdef CONFIG_MAC80211_MESH | ||
152 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
153 | #endif | ||
154 | struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv; | ||
155 | struct rc_pid_rateinfo *rinfo = pinfo->rinfo; | 149 | struct rc_pid_rateinfo *rinfo = pinfo->rinfo; |
156 | struct ieee80211_supported_band *sband; | ||
157 | u32 pf; | 150 | u32 pf; |
158 | s32 err_avg; | 151 | s32 err_avg; |
159 | u32 err_prop; | 152 | u32 err_prop; |
@@ -162,9 +155,6 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, | |||
162 | int adj, i, j, tmp; | 155 | int adj, i, j, tmp; |
163 | unsigned long period; | 156 | unsigned long period; |
164 | 157 | ||
165 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
166 | spinfo = sta->rate_ctrl_priv; | ||
167 | |||
168 | /* In case nothing happened during the previous control interval, turn | 158 | /* In case nothing happened during the previous control interval, turn |
169 | * the sharpening factor on. */ | 159 | * the sharpening factor on. */ |
170 | period = (HZ * pinfo->sampling_period + 500) / 1000; | 160 | period = (HZ * pinfo->sampling_period + 500) / 1000; |
@@ -180,14 +170,15 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, | |||
180 | if (unlikely(spinfo->tx_num_xmit == 0)) | 170 | if (unlikely(spinfo->tx_num_xmit == 0)) |
181 | pf = spinfo->last_pf; | 171 | pf = spinfo->last_pf; |
182 | else { | 172 | else { |
173 | /* XXX: BAD HACK!!! */ | ||
174 | struct sta_info *si = container_of(sta, struct sta_info, sta); | ||
175 | |||
183 | pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; | 176 | pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; |
184 | #ifdef CONFIG_MAC80211_MESH | 177 | |
185 | if (pf == 100 && | 178 | if (ieee80211_vif_is_mesh(&si->sdata->vif) && pf == 100) |
186 | sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) | 179 | mesh_plink_broken(si); |
187 | mesh_plink_broken(sta); | ||
188 | #endif | ||
189 | pf <<= RC_PID_ARITH_SHIFT; | 180 | pf <<= RC_PID_ARITH_SHIFT; |
190 | sta->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9) | 181 | si->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9) |
191 | >> RC_PID_ARITH_SHIFT; | 182 | >> RC_PID_ARITH_SHIFT; |
192 | } | 183 | } |
193 | 184 | ||
@@ -195,16 +186,16 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, | |||
195 | spinfo->tx_num_failed = 0; | 186 | spinfo->tx_num_failed = 0; |
196 | 187 | ||
197 | /* If we just switched rate, update the rate behaviour info. */ | 188 | /* If we just switched rate, update the rate behaviour info. */ |
198 | if (pinfo->oldrate != sta->txrate_idx) { | 189 | if (pinfo->oldrate != spinfo->txrate_idx) { |
199 | 190 | ||
200 | i = rinfo[pinfo->oldrate].rev_index; | 191 | i = rinfo[pinfo->oldrate].rev_index; |
201 | j = rinfo[sta->txrate_idx].rev_index; | 192 | j = rinfo[spinfo->txrate_idx].rev_index; |
202 | 193 | ||
203 | tmp = (pf - spinfo->last_pf); | 194 | tmp = (pf - spinfo->last_pf); |
204 | tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT); | 195 | tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT); |
205 | 196 | ||
206 | rinfo[j].diff = rinfo[i].diff + tmp; | 197 | rinfo[j].diff = rinfo[i].diff + tmp; |
207 | pinfo->oldrate = sta->txrate_idx; | 198 | pinfo->oldrate = spinfo->txrate_idx; |
208 | } | 199 | } |
209 | rate_control_pid_normalize(pinfo, sband->n_bitrates); | 200 | rate_control_pid_normalize(pinfo, sband->n_bitrates); |
210 | 201 | ||
@@ -233,43 +224,26 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, | |||
233 | 224 | ||
234 | /* Change rate. */ | 225 | /* Change rate. */ |
235 | if (adj) | 226 | if (adj) |
236 | rate_control_pid_adjust_rate(local, sta, adj, rinfo); | 227 | rate_control_pid_adjust_rate(sband, sta, spinfo, adj, rinfo); |
237 | } | 228 | } |
238 | 229 | ||
239 | static void rate_control_pid_tx_status(void *priv, struct net_device *dev, | 230 | static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_band *sband, |
231 | struct ieee80211_sta *sta, void *priv_sta, | ||
240 | struct sk_buff *skb) | 232 | struct sk_buff *skb) |
241 | { | 233 | { |
242 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
243 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
244 | struct ieee80211_sub_if_data *sdata; | ||
245 | struct rc_pid_info *pinfo = priv; | 234 | struct rc_pid_info *pinfo = priv; |
246 | struct sta_info *sta; | 235 | struct rc_pid_sta_info *spinfo = priv_sta; |
247 | struct rc_pid_sta_info *spinfo; | ||
248 | unsigned long period; | 236 | unsigned long period; |
249 | struct ieee80211_supported_band *sband; | ||
250 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 237 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
251 | 238 | ||
252 | rcu_read_lock(); | 239 | if (!spinfo) |
253 | 240 | return; | |
254 | sta = sta_info_get(local, hdr->addr1); | ||
255 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
256 | |||
257 | if (!sta) | ||
258 | goto unlock; | ||
259 | |||
260 | /* Don't update the state if we're not controlling the rate. */ | ||
261 | sdata = sta->sdata; | ||
262 | if (sdata->force_unicast_rateidx > -1) { | ||
263 | sta->txrate_idx = sdata->max_ratectrl_rateidx; | ||
264 | goto unlock; | ||
265 | } | ||
266 | 241 | ||
267 | /* Ignore all frames that were sent with a different rate than the rate | 242 | /* Ignore all frames that were sent with a different rate than the rate |
268 | * we currently advise mac80211 to use. */ | 243 | * we currently advise mac80211 to use. */ |
269 | if (info->tx_rate_idx != sta->txrate_idx) | 244 | if (info->tx_rate_idx != spinfo->txrate_idx) |
270 | goto unlock; | 245 | return; |
271 | 246 | ||
272 | spinfo = sta->rate_ctrl_priv; | ||
273 | spinfo->tx_num_xmit++; | 247 | spinfo->tx_num_xmit++; |
274 | 248 | ||
275 | #ifdef CONFIG_MAC80211_DEBUGFS | 249 | #ifdef CONFIG_MAC80211_DEBUGFS |
@@ -287,93 +261,68 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev, | |||
287 | spinfo->tx_num_xmit++; | 261 | spinfo->tx_num_xmit++; |
288 | } | 262 | } |
289 | 263 | ||
290 | if (info->status.excessive_retries) { | ||
291 | sta->tx_retry_failed++; | ||
292 | sta->tx_num_consecutive_failures++; | ||
293 | sta->tx_num_mpdu_fail++; | ||
294 | } else { | ||
295 | sta->tx_num_consecutive_failures = 0; | ||
296 | sta->tx_num_mpdu_ok++; | ||
297 | } | ||
298 | sta->tx_retry_count += info->status.retry_count; | ||
299 | sta->tx_num_mpdu_fail += info->status.retry_count; | ||
300 | |||
301 | /* Update PID controller state. */ | 264 | /* Update PID controller state. */ |
302 | period = (HZ * pinfo->sampling_period + 500) / 1000; | 265 | period = (HZ * pinfo->sampling_period + 500) / 1000; |
303 | if (!period) | 266 | if (!period) |
304 | period = 1; | 267 | period = 1; |
305 | if (time_after(jiffies, spinfo->last_sample + period)) | 268 | if (time_after(jiffies, spinfo->last_sample + period)) |
306 | rate_control_pid_sample(pinfo, local, sta); | 269 | rate_control_pid_sample(pinfo, sband, sta, spinfo); |
307 | |||
308 | unlock: | ||
309 | rcu_read_unlock(); | ||
310 | } | 270 | } |
311 | 271 | ||
312 | static void rate_control_pid_get_rate(void *priv, struct net_device *dev, | 272 | static void |
313 | struct ieee80211_supported_band *sband, | 273 | rate_control_pid_get_rate(void *priv, struct ieee80211_supported_band *sband, |
314 | struct sk_buff *skb, | 274 | struct ieee80211_sta *sta, void *priv_sta, |
315 | struct rate_selection *sel) | 275 | struct sk_buff *skb, |
276 | struct rate_selection *sel) | ||
316 | { | 277 | { |
317 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
318 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 278 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
319 | struct ieee80211_sub_if_data *sdata; | 279 | struct rc_pid_sta_info *spinfo = priv_sta; |
320 | struct sta_info *sta; | ||
321 | int rateidx; | 280 | int rateidx; |
322 | u16 fc; | 281 | u16 fc; |
323 | 282 | ||
324 | rcu_read_lock(); | ||
325 | |||
326 | sta = sta_info_get(local, hdr->addr1); | ||
327 | |||
328 | /* Send management frames and broadcast/multicast data using lowest | 283 | /* Send management frames and broadcast/multicast data using lowest |
329 | * rate. */ | 284 | * rate. */ |
330 | fc = le16_to_cpu(hdr->frame_control); | 285 | fc = le16_to_cpu(hdr->frame_control); |
331 | if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || | 286 | if (!sta || !spinfo || |
332 | is_multicast_ether_addr(hdr->addr1) || !sta) { | 287 | (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || |
333 | sel->rate_idx = rate_lowest_index(local, sband, sta); | 288 | is_multicast_ether_addr(hdr->addr1)) { |
334 | rcu_read_unlock(); | 289 | sel->rate_idx = rate_lowest_index(sband, sta); |
335 | return; | 290 | return; |
336 | } | 291 | } |
337 | 292 | ||
338 | /* If a forced rate is in effect, select it. */ | 293 | rateidx = spinfo->txrate_idx; |
339 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
340 | if (sdata->force_unicast_rateidx > -1) | ||
341 | sta->txrate_idx = sdata->force_unicast_rateidx; | ||
342 | |||
343 | rateidx = sta->txrate_idx; | ||
344 | 294 | ||
345 | if (rateidx >= sband->n_bitrates) | 295 | if (rateidx >= sband->n_bitrates) |
346 | rateidx = sband->n_bitrates - 1; | 296 | rateidx = sband->n_bitrates - 1; |
347 | 297 | ||
348 | sta->last_txrate_idx = rateidx; | ||
349 | |||
350 | rcu_read_unlock(); | ||
351 | |||
352 | sel->rate_idx = rateidx; | 298 | sel->rate_idx = rateidx; |
353 | 299 | ||
354 | #ifdef CONFIG_MAC80211_DEBUGFS | 300 | #ifdef CONFIG_MAC80211_DEBUGFS |
355 | rate_control_pid_event_tx_rate( | 301 | rate_control_pid_event_tx_rate(&spinfo->events, |
356 | &((struct rc_pid_sta_info *) sta->rate_ctrl_priv)->events, | ||
357 | rateidx, sband->bitrates[rateidx].bitrate); | 302 | rateidx, sband->bitrates[rateidx].bitrate); |
358 | #endif | 303 | #endif |
359 | } | 304 | } |
360 | 305 | ||
361 | static void rate_control_pid_rate_init(void *priv, void *priv_sta, | 306 | static void |
362 | struct ieee80211_local *local, | 307 | rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, |
363 | struct sta_info *sta) | 308 | struct ieee80211_sta *sta, void *priv_sta) |
364 | { | 309 | { |
310 | struct rc_pid_sta_info *spinfo = priv_sta; | ||
311 | struct sta_info *si; | ||
312 | |||
365 | /* TODO: This routine should consider using RSSI from previous packets | 313 | /* TODO: This routine should consider using RSSI from previous packets |
366 | * as we need to have IEEE 802.1X auth succeed immediately after assoc.. | 314 | * as we need to have IEEE 802.1X auth succeed immediately after assoc.. |
367 | * Until that method is implemented, we will use the lowest supported | 315 | * Until that method is implemented, we will use the lowest supported |
368 | * rate as a workaround. */ | 316 | * rate as a workaround. */ |
369 | struct ieee80211_supported_band *sband; | ||
370 | 317 | ||
371 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 318 | spinfo->txrate_idx = rate_lowest_index(sband, sta); |
372 | sta->txrate_idx = rate_lowest_index(local, sband, sta); | 319 | /* HACK */ |
373 | sta->fail_avg = 0; | 320 | si = container_of(sta, struct sta_info, sta); |
321 | si->fail_avg = 0; | ||
374 | } | 322 | } |
375 | 323 | ||
376 | static void *rate_control_pid_alloc(struct ieee80211_local *local) | 324 | static void *rate_control_pid_alloc(struct ieee80211_hw *hw, |
325 | struct dentry *debugfsdir) | ||
377 | { | 326 | { |
378 | struct rc_pid_info *pinfo; | 327 | struct rc_pid_info *pinfo; |
379 | struct rc_pid_rateinfo *rinfo; | 328 | struct rc_pid_rateinfo *rinfo; |
@@ -384,7 +333,7 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local) | |||
384 | struct rc_pid_debugfs_entries *de; | 333 | struct rc_pid_debugfs_entries *de; |
385 | #endif | 334 | #endif |
386 | 335 | ||
387 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 336 | sband = hw->wiphy->bands[hw->conf.channel->band]; |
388 | 337 | ||
389 | pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); | 338 | pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); |
390 | if (!pinfo) | 339 | if (!pinfo) |
@@ -439,30 +388,28 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local) | |||
439 | 388 | ||
440 | #ifdef CONFIG_MAC80211_DEBUGFS | 389 | #ifdef CONFIG_MAC80211_DEBUGFS |
441 | de = &pinfo->dentries; | 390 | de = &pinfo->dentries; |
442 | de->dir = debugfs_create_dir("rc80211_pid", | ||
443 | local->hw.wiphy->debugfsdir); | ||
444 | de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, | 391 | de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, |
445 | de->dir, &pinfo->target); | 392 | debugfsdir, &pinfo->target); |
446 | de->sampling_period = debugfs_create_u32("sampling_period", | 393 | de->sampling_period = debugfs_create_u32("sampling_period", |
447 | S_IRUSR | S_IWUSR, de->dir, | 394 | S_IRUSR | S_IWUSR, debugfsdir, |
448 | &pinfo->sampling_period); | 395 | &pinfo->sampling_period); |
449 | de->coeff_p = debugfs_create_u32("coeff_p", S_IRUSR | S_IWUSR, | 396 | de->coeff_p = debugfs_create_u32("coeff_p", S_IRUSR | S_IWUSR, |
450 | de->dir, &pinfo->coeff_p); | 397 | debugfsdir, &pinfo->coeff_p); |
451 | de->coeff_i = debugfs_create_u32("coeff_i", S_IRUSR | S_IWUSR, | 398 | de->coeff_i = debugfs_create_u32("coeff_i", S_IRUSR | S_IWUSR, |
452 | de->dir, &pinfo->coeff_i); | 399 | debugfsdir, &pinfo->coeff_i); |
453 | de->coeff_d = debugfs_create_u32("coeff_d", S_IRUSR | S_IWUSR, | 400 | de->coeff_d = debugfs_create_u32("coeff_d", S_IRUSR | S_IWUSR, |
454 | de->dir, &pinfo->coeff_d); | 401 | debugfsdir, &pinfo->coeff_d); |
455 | de->smoothing_shift = debugfs_create_u32("smoothing_shift", | 402 | de->smoothing_shift = debugfs_create_u32("smoothing_shift", |
456 | S_IRUSR | S_IWUSR, de->dir, | 403 | S_IRUSR | S_IWUSR, debugfsdir, |
457 | &pinfo->smoothing_shift); | 404 | &pinfo->smoothing_shift); |
458 | de->sharpen_factor = debugfs_create_u32("sharpen_factor", | 405 | de->sharpen_factor = debugfs_create_u32("sharpen_factor", |
459 | S_IRUSR | S_IWUSR, de->dir, | 406 | S_IRUSR | S_IWUSR, debugfsdir, |
460 | &pinfo->sharpen_factor); | 407 | &pinfo->sharpen_factor); |
461 | de->sharpen_duration = debugfs_create_u32("sharpen_duration", | 408 | de->sharpen_duration = debugfs_create_u32("sharpen_duration", |
462 | S_IRUSR | S_IWUSR, de->dir, | 409 | S_IRUSR | S_IWUSR, debugfsdir, |
463 | &pinfo->sharpen_duration); | 410 | &pinfo->sharpen_duration); |
464 | de->norm_offset = debugfs_create_u32("norm_offset", | 411 | de->norm_offset = debugfs_create_u32("norm_offset", |
465 | S_IRUSR | S_IWUSR, de->dir, | 412 | S_IRUSR | S_IWUSR, debugfsdir, |
466 | &pinfo->norm_offset); | 413 | &pinfo->norm_offset); |
467 | #endif | 414 | #endif |
468 | 415 | ||
@@ -484,7 +431,6 @@ static void rate_control_pid_free(void *priv) | |||
484 | debugfs_remove(de->coeff_p); | 431 | debugfs_remove(de->coeff_p); |
485 | debugfs_remove(de->sampling_period); | 432 | debugfs_remove(de->sampling_period); |
486 | debugfs_remove(de->target); | 433 | debugfs_remove(de->target); |
487 | debugfs_remove(de->dir); | ||
488 | #endif | 434 | #endif |
489 | 435 | ||
490 | kfree(pinfo->rinfo); | 436 | kfree(pinfo->rinfo); |
@@ -495,7 +441,8 @@ static void rate_control_pid_clear(void *priv) | |||
495 | { | 441 | { |
496 | } | 442 | } |
497 | 443 | ||
498 | static void *rate_control_pid_alloc_sta(void *priv, gfp_t gfp) | 444 | static void *rate_control_pid_alloc_sta(void *priv, struct ieee80211_sta *sta, |
445 | gfp_t gfp) | ||
499 | { | 446 | { |
500 | struct rc_pid_sta_info *spinfo; | 447 | struct rc_pid_sta_info *spinfo; |
501 | 448 | ||
@@ -513,10 +460,10 @@ static void *rate_control_pid_alloc_sta(void *priv, gfp_t gfp) | |||
513 | return spinfo; | 460 | return spinfo; |
514 | } | 461 | } |
515 | 462 | ||
516 | static void rate_control_pid_free_sta(void *priv, void *priv_sta) | 463 | static void rate_control_pid_free_sta(void *priv, struct ieee80211_sta *sta, |
464 | void *priv_sta) | ||
517 | { | 465 | { |
518 | struct rc_pid_sta_info *spinfo = priv_sta; | 466 | kfree(priv_sta); |
519 | kfree(spinfo); | ||
520 | } | 467 | } |
521 | 468 | ||
522 | static struct rate_control_ops mac80211_rcpid = { | 469 | static struct rate_control_ops mac80211_rcpid = { |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6db854505193..77e7b014872b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -143,6 +143,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
143 | /* IEEE80211_RADIOTAP_FLAGS */ | 143 | /* IEEE80211_RADIOTAP_FLAGS */ |
144 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) | 144 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) |
145 | *pos |= IEEE80211_RADIOTAP_F_FCS; | 145 | *pos |= IEEE80211_RADIOTAP_F_FCS; |
146 | if (status->flag & RX_FLAG_SHORTPRE) | ||
147 | *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; | ||
146 | pos++; | 148 | pos++; |
147 | 149 | ||
148 | /* IEEE80211_RADIOTAP_RATE */ | 150 | /* IEEE80211_RADIOTAP_RATE */ |
@@ -155,8 +157,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
155 | if (status->band == IEEE80211_BAND_5GHZ) | 157 | if (status->band == IEEE80211_BAND_5GHZ) |
156 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | | 158 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | |
157 | IEEE80211_CHAN_5GHZ); | 159 | IEEE80211_CHAN_5GHZ); |
160 | else if (rate->flags & IEEE80211_RATE_ERP_G) | ||
161 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | | ||
162 | IEEE80211_CHAN_2GHZ); | ||
158 | else | 163 | else |
159 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_DYN | | 164 | *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK | |
160 | IEEE80211_CHAN_2GHZ); | 165 | IEEE80211_CHAN_2GHZ); |
161 | pos += 2; | 166 | pos += 2; |
162 | 167 | ||
@@ -290,7 +295,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
290 | if (!netif_running(sdata->dev)) | 295 | if (!netif_running(sdata->dev)) |
291 | continue; | 296 | continue; |
292 | 297 | ||
293 | if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR) | 298 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR) |
294 | continue; | 299 | continue; |
295 | 300 | ||
296 | if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) | 301 | if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) |
@@ -398,12 +403,12 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) | |||
398 | struct ieee80211_local *local = rx->local; | 403 | struct ieee80211_local *local = rx->local; |
399 | struct sk_buff *skb = rx->skb; | 404 | struct sk_buff *skb = rx->skb; |
400 | 405 | ||
401 | if (unlikely(local->sta_hw_scanning)) | 406 | if (unlikely(local->hw_scanning)) |
402 | return ieee80211_sta_rx_scan(rx->dev, skb, rx->status); | 407 | return ieee80211_scan_rx(rx->sdata, skb, rx->status); |
403 | 408 | ||
404 | if (unlikely(local->sta_sw_scanning)) { | 409 | if (unlikely(local->sw_scanning)) { |
405 | /* drop all the other packets during a software scan anyway */ | 410 | /* drop all the other packets during a software scan anyway */ |
406 | if (ieee80211_sta_rx_scan(rx->dev, skb, rx->status) | 411 | if (ieee80211_scan_rx(rx->sdata, skb, rx->status) |
407 | != RX_QUEUED) | 412 | != RX_QUEUED) |
408 | dev_kfree_skb(skb); | 413 | dev_kfree_skb(skb); |
409 | return RX_QUEUED; | 414 | return RX_QUEUED; |
@@ -461,7 +466,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
461 | 466 | ||
462 | if (ieee80211_is_data(hdr->frame_control) && | 467 | if (ieee80211_is_data(hdr->frame_control) && |
463 | is_multicast_ether_addr(hdr->addr1) && | 468 | is_multicast_ether_addr(hdr->addr1) && |
464 | mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev)) | 469 | mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata)) |
465 | return RX_DROP_MONITOR; | 470 | return RX_DROP_MONITOR; |
466 | #undef msh_h_get | 471 | #undef msh_h_get |
467 | 472 | ||
@@ -496,8 +501,8 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | |||
496 | /* Drop disallowed frame classes based on STA auth/assoc state; | 501 | /* Drop disallowed frame classes based on STA auth/assoc state; |
497 | * IEEE 802.11, Chap 5.5. | 502 | * IEEE 802.11, Chap 5.5. |
498 | * | 503 | * |
499 | * 80211.o does filtering only based on association state, i.e., it | 504 | * mac80211 filters only based on association state, i.e. it drops |
500 | * drops Class 3 frames from not associated stations. hostapd sends | 505 | * Class 3 frames from not associated stations. hostapd sends |
501 | * deauth/disassoc frames when needed. In addition, hostapd is | 506 | * deauth/disassoc frames when needed. In addition, hostapd is |
502 | * responsible for filtering on both auth and assoc states. | 507 | * responsible for filtering on both auth and assoc states. |
503 | */ | 508 | */ |
@@ -507,7 +512,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | |||
507 | 512 | ||
508 | if (unlikely((ieee80211_is_data(hdr->frame_control) || | 513 | if (unlikely((ieee80211_is_data(hdr->frame_control) || |
509 | ieee80211_is_pspoll(hdr->frame_control)) && | 514 | ieee80211_is_pspoll(hdr->frame_control)) && |
510 | rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 515 | rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && |
511 | (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { | 516 | (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { |
512 | if ((!ieee80211_has_fromds(hdr->frame_control) && | 517 | if ((!ieee80211_has_fromds(hdr->frame_control) && |
513 | !ieee80211_has_tods(hdr->frame_control) && | 518 | !ieee80211_has_tods(hdr->frame_control) && |
@@ -645,32 +650,28 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
645 | return result; | 650 | return result; |
646 | } | 651 | } |
647 | 652 | ||
648 | static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta) | 653 | static void ap_sta_ps_start(struct sta_info *sta) |
649 | { | 654 | { |
650 | struct ieee80211_sub_if_data *sdata; | 655 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
651 | DECLARE_MAC_BUF(mac); | 656 | DECLARE_MAC_BUF(mac); |
652 | 657 | ||
653 | sdata = sta->sdata; | ||
654 | |||
655 | atomic_inc(&sdata->bss->num_sta_ps); | 658 | atomic_inc(&sdata->bss->num_sta_ps); |
656 | set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL); | 659 | set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL); |
657 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 660 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
658 | printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", | 661 | printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", |
659 | dev->name, print_mac(mac, sta->addr), sta->aid); | 662 | sdata->dev->name, print_mac(mac, sta->sta.addr), sta->sta.aid); |
660 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 663 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
661 | } | 664 | } |
662 | 665 | ||
663 | static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) | 666 | static int ap_sta_ps_end(struct sta_info *sta) |
664 | { | 667 | { |
665 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 668 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
669 | struct ieee80211_local *local = sdata->local; | ||
666 | struct sk_buff *skb; | 670 | struct sk_buff *skb; |
667 | int sent = 0; | 671 | int sent = 0; |
668 | struct ieee80211_sub_if_data *sdata; | ||
669 | struct ieee80211_tx_info *info; | 672 | struct ieee80211_tx_info *info; |
670 | DECLARE_MAC_BUF(mac); | 673 | DECLARE_MAC_BUF(mac); |
671 | 674 | ||
672 | sdata = sta->sdata; | ||
673 | |||
674 | atomic_dec(&sdata->bss->num_sta_ps); | 675 | atomic_dec(&sdata->bss->num_sta_ps); |
675 | 676 | ||
676 | clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL); | 677 | clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL); |
@@ -680,7 +681,7 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) | |||
680 | 681 | ||
681 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 682 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
682 | printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n", | 683 | printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n", |
683 | dev->name, print_mac(mac, sta->addr), sta->aid); | 684 | sdata->dev->name, print_mac(mac, sta->sta.addr), sta->sta.aid); |
684 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 685 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
685 | 686 | ||
686 | /* Send all buffered frames to the station */ | 687 | /* Send all buffered frames to the station */ |
@@ -696,8 +697,8 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) | |||
696 | sent++; | 697 | sent++; |
697 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 698 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
698 | printk(KERN_DEBUG "%s: STA %s aid %d send PS frame " | 699 | printk(KERN_DEBUG "%s: STA %s aid %d send PS frame " |
699 | "since STA not sleeping anymore\n", dev->name, | 700 | "since STA not sleeping anymore\n", sdata->dev->name, |
700 | print_mac(mac, sta->addr), sta->aid); | 701 | print_mac(mac, sta->sta.addr), sta->sta.aid); |
701 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 702 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
702 | info->flags |= IEEE80211_TX_CTL_REQUEUE; | 703 | info->flags |= IEEE80211_TX_CTL_REQUEUE; |
703 | dev_queue_xmit(skb); | 704 | dev_queue_xmit(skb); |
@@ -710,7 +711,6 @@ static ieee80211_rx_result debug_noinline | |||
710 | ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | 711 | ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) |
711 | { | 712 | { |
712 | struct sta_info *sta = rx->sta; | 713 | struct sta_info *sta = rx->sta; |
713 | struct net_device *dev = rx->dev; | ||
714 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 714 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
715 | 715 | ||
716 | if (!sta) | 716 | if (!sta) |
@@ -719,14 +719,14 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
719 | /* Update last_rx only for IBSS packets which are for the current | 719 | /* Update last_rx only for IBSS packets which are for the current |
720 | * BSSID to avoid keeping the current IBSS network alive in cases where | 720 | * BSSID to avoid keeping the current IBSS network alive in cases where |
721 | * other STAs are using different BSSID. */ | 721 | * other STAs are using different BSSID. */ |
722 | if (rx->sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 722 | if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
723 | u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, | 723 | u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, |
724 | IEEE80211_IF_TYPE_IBSS); | 724 | NL80211_IFTYPE_ADHOC); |
725 | if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0) | 725 | if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0) |
726 | sta->last_rx = jiffies; | 726 | sta->last_rx = jiffies; |
727 | } else | 727 | } else |
728 | if (!is_multicast_ether_addr(hdr->addr1) || | 728 | if (!is_multicast_ether_addr(hdr->addr1) || |
729 | rx->sdata->vif.type == IEEE80211_IF_TYPE_STA) { | 729 | rx->sdata->vif.type == NL80211_IFTYPE_STATION) { |
730 | /* Update last_rx only for unicast frames in order to prevent | 730 | /* Update last_rx only for unicast frames in order to prevent |
731 | * the Probe Request frames (the only broadcast frames from a | 731 | * the Probe Request frames (the only broadcast frames from a |
732 | * STA in infrastructure mode) from keeping a connection alive. | 732 | * STA in infrastructure mode) from keeping a connection alive. |
@@ -746,16 +746,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
746 | sta->last_noise = rx->status->noise; | 746 | sta->last_noise = rx->status->noise; |
747 | 747 | ||
748 | if (!ieee80211_has_morefrags(hdr->frame_control) && | 748 | if (!ieee80211_has_morefrags(hdr->frame_control) && |
749 | (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP || | 749 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || |
750 | rx->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) { | 750 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { |
751 | /* Change STA power saving mode only in the end of a frame | 751 | /* Change STA power saving mode only in the end of a frame |
752 | * exchange sequence */ | 752 | * exchange sequence */ |
753 | if (test_sta_flags(sta, WLAN_STA_PS) && | 753 | if (test_sta_flags(sta, WLAN_STA_PS) && |
754 | !ieee80211_has_pm(hdr->frame_control)) | 754 | !ieee80211_has_pm(hdr->frame_control)) |
755 | rx->sent_ps_buffered += ap_sta_ps_end(dev, sta); | 755 | rx->sent_ps_buffered += ap_sta_ps_end(sta); |
756 | else if (!test_sta_flags(sta, WLAN_STA_PS) && | 756 | else if (!test_sta_flags(sta, WLAN_STA_PS) && |
757 | ieee80211_has_pm(hdr->frame_control)) | 757 | ieee80211_has_pm(hdr->frame_control)) |
758 | ap_sta_ps_start(dev, sta); | 758 | ap_sta_ps_start(sta); |
759 | } | 759 | } |
760 | 760 | ||
761 | /* Drop data::nullfunc frames silently, since they are used only to | 761 | /* Drop data::nullfunc frames silently, since they are used only to |
@@ -816,7 +816,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, | |||
816 | 816 | ||
817 | static inline struct ieee80211_fragment_entry * | 817 | static inline struct ieee80211_fragment_entry * |
818 | ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, | 818 | ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, |
819 | u16 fc, unsigned int frag, unsigned int seq, | 819 | unsigned int frag, unsigned int seq, |
820 | int rx_queue, struct ieee80211_hdr *hdr) | 820 | int rx_queue, struct ieee80211_hdr *hdr) |
821 | { | 821 | { |
822 | struct ieee80211_fragment_entry *entry; | 822 | struct ieee80211_fragment_entry *entry; |
@@ -825,7 +825,6 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, | |||
825 | idx = sdata->fragment_next; | 825 | idx = sdata->fragment_next; |
826 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { | 826 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { |
827 | struct ieee80211_hdr *f_hdr; | 827 | struct ieee80211_hdr *f_hdr; |
828 | u16 f_fc; | ||
829 | 828 | ||
830 | idx--; | 829 | idx--; |
831 | if (idx < 0) | 830 | if (idx < 0) |
@@ -837,10 +836,13 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, | |||
837 | entry->last_frag + 1 != frag) | 836 | entry->last_frag + 1 != frag) |
838 | continue; | 837 | continue; |
839 | 838 | ||
840 | f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data; | 839 | f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; |
841 | f_fc = le16_to_cpu(f_hdr->frame_control); | ||
842 | 840 | ||
843 | if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) || | 841 | /* |
842 | * Check ftype and addresses are equal, else check next fragment | ||
843 | */ | ||
844 | if (((hdr->frame_control ^ f_hdr->frame_control) & | ||
845 | cpu_to_le16(IEEE80211_FCTL_FTYPE)) || | ||
844 | compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || | 846 | compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || |
845 | compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) | 847 | compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) |
846 | continue; | 848 | continue; |
@@ -860,16 +862,18 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
860 | { | 862 | { |
861 | struct ieee80211_hdr *hdr; | 863 | struct ieee80211_hdr *hdr; |
862 | u16 sc; | 864 | u16 sc; |
865 | __le16 fc; | ||
863 | unsigned int frag, seq; | 866 | unsigned int frag, seq; |
864 | struct ieee80211_fragment_entry *entry; | 867 | struct ieee80211_fragment_entry *entry; |
865 | struct sk_buff *skb; | 868 | struct sk_buff *skb; |
866 | DECLARE_MAC_BUF(mac); | 869 | DECLARE_MAC_BUF(mac); |
867 | 870 | ||
868 | hdr = (struct ieee80211_hdr *) rx->skb->data; | 871 | hdr = (struct ieee80211_hdr *)rx->skb->data; |
872 | fc = hdr->frame_control; | ||
869 | sc = le16_to_cpu(hdr->seq_ctrl); | 873 | sc = le16_to_cpu(hdr->seq_ctrl); |
870 | frag = sc & IEEE80211_SCTL_FRAG; | 874 | frag = sc & IEEE80211_SCTL_FRAG; |
871 | 875 | ||
872 | if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) || | 876 | if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || |
873 | (rx->skb)->len < 24 || | 877 | (rx->skb)->len < 24 || |
874 | is_multicast_ether_addr(hdr->addr1))) { | 878 | is_multicast_ether_addr(hdr->addr1))) { |
875 | /* not fragmented */ | 879 | /* not fragmented */ |
@@ -884,7 +888,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
884 | entry = ieee80211_reassemble_add(rx->sdata, frag, seq, | 888 | entry = ieee80211_reassemble_add(rx->sdata, frag, seq, |
885 | rx->queue, &(rx->skb)); | 889 | rx->queue, &(rx->skb)); |
886 | if (rx->key && rx->key->conf.alg == ALG_CCMP && | 890 | if (rx->key && rx->key->conf.alg == ALG_CCMP && |
887 | (rx->fc & IEEE80211_FCTL_PROTECTED)) { | 891 | ieee80211_has_protected(fc)) { |
888 | /* Store CCMP PN so that we can verify that the next | 892 | /* Store CCMP PN so that we can verify that the next |
889 | * fragment has a sequential PN value. */ | 893 | * fragment has a sequential PN value. */ |
890 | entry->ccmp = 1; | 894 | entry->ccmp = 1; |
@@ -898,8 +902,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
898 | /* This is a fragment for a frame that should already be pending in | 902 | /* This is a fragment for a frame that should already be pending in |
899 | * fragment cache. Add this fragment to the end of the pending entry. | 903 | * fragment cache. Add this fragment to the end of the pending entry. |
900 | */ | 904 | */ |
901 | entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, | 905 | entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr); |
902 | rx->queue, hdr); | ||
903 | if (!entry) { | 906 | if (!entry) { |
904 | I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); | 907 | I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); |
905 | return RX_DROP_MONITOR; | 908 | return RX_DROP_MONITOR; |
@@ -924,11 +927,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
924 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); | 927 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); |
925 | } | 928 | } |
926 | 929 | ||
927 | skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc)); | 930 | skb_pull(rx->skb, ieee80211_hdrlen(fc)); |
928 | __skb_queue_tail(&entry->skb_list, rx->skb); | 931 | __skb_queue_tail(&entry->skb_list, rx->skb); |
929 | entry->last_frag = frag; | 932 | entry->last_frag = frag; |
930 | entry->extra_len += rx->skb->len; | 933 | entry->extra_len += rx->skb->len; |
931 | if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { | 934 | if (ieee80211_has_morefrags(fc)) { |
932 | rx->skb = NULL; | 935 | rx->skb = NULL; |
933 | return RX_QUEUED; | 936 | return RX_QUEUED; |
934 | } | 937 | } |
@@ -968,15 +971,14 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
968 | struct sk_buff *skb; | 971 | struct sk_buff *skb; |
969 | int no_pending_pkts; | 972 | int no_pending_pkts; |
970 | DECLARE_MAC_BUF(mac); | 973 | DECLARE_MAC_BUF(mac); |
974 | __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; | ||
971 | 975 | ||
972 | if (likely(!rx->sta || | 976 | if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || |
973 | (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL || | ||
974 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL || | ||
975 | !(rx->flags & IEEE80211_RX_RA_MATCH))) | 977 | !(rx->flags & IEEE80211_RX_RA_MATCH))) |
976 | return RX_CONTINUE; | 978 | return RX_CONTINUE; |
977 | 979 | ||
978 | if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) && | 980 | if ((sdata->vif.type != NL80211_IFTYPE_AP) && |
979 | (sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) | 981 | (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) |
980 | return RX_DROP_UNUSABLE; | 982 | return RX_DROP_UNUSABLE; |
981 | 983 | ||
982 | skb = skb_dequeue(&rx->sta->tx_filtered); | 984 | skb = skb_dequeue(&rx->sta->tx_filtered); |
@@ -1000,7 +1002,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
1000 | 1002 | ||
1001 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 1003 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
1002 | printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", | 1004 | printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", |
1003 | print_mac(mac, rx->sta->addr), rx->sta->aid, | 1005 | print_mac(mac, rx->sta->sta.addr), rx->sta->sta.aid, |
1004 | skb_queue_len(&rx->sta->ps_tx_buf)); | 1006 | skb_queue_len(&rx->sta->ps_tx_buf)); |
1005 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 1007 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
1006 | 1008 | ||
@@ -1025,7 +1027,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
1025 | */ | 1027 | */ |
1026 | printk(KERN_DEBUG "%s: STA %s sent PS Poll even " | 1028 | printk(KERN_DEBUG "%s: STA %s sent PS Poll even " |
1027 | "though there are no buffered frames for it\n", | 1029 | "though there are no buffered frames for it\n", |
1028 | rx->dev->name, print_mac(mac, rx->sta->addr)); | 1030 | rx->dev->name, print_mac(mac, rx->sta->sta.addr)); |
1029 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 1031 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
1030 | } | 1032 | } |
1031 | 1033 | ||
@@ -1050,7 +1052,6 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) | |||
1050 | ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); | 1052 | ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); |
1051 | hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); | 1053 | hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); |
1052 | /* change frame type to non QOS */ | 1054 | /* change frame type to non QOS */ |
1053 | rx->fc &= ~IEEE80211_STYPE_QOS_DATA; | ||
1054 | hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); | 1055 | hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); |
1055 | 1056 | ||
1056 | return RX_CONTINUE; | 1057 | return RX_CONTINUE; |
@@ -1067,7 +1068,7 @@ ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) | |||
1067 | } | 1068 | } |
1068 | 1069 | ||
1069 | static int | 1070 | static int |
1070 | ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx) | 1071 | ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) |
1071 | { | 1072 | { |
1072 | /* | 1073 | /* |
1073 | * Pass through unencrypted frames if the hardware has | 1074 | * Pass through unencrypted frames if the hardware has |
@@ -1077,9 +1078,8 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx) | |||
1077 | return 0; | 1078 | return 0; |
1078 | 1079 | ||
1079 | /* Drop unencrypted frames if key is set. */ | 1080 | /* Drop unencrypted frames if key is set. */ |
1080 | if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && | 1081 | if (unlikely(!ieee80211_has_protected(fc) && |
1081 | (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && | 1082 | !ieee80211_is_nullfunc(fc) && |
1082 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && | ||
1083 | (rx->key || rx->sdata->drop_unencrypted))) | 1083 | (rx->key || rx->sdata->drop_unencrypted))) |
1084 | return -EACCES; | 1084 | return -EACCES; |
1085 | 1085 | ||
@@ -1091,7 +1091,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1091 | { | 1091 | { |
1092 | struct net_device *dev = rx->dev; | 1092 | struct net_device *dev = rx->dev; |
1093 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; | 1093 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; |
1094 | u16 fc, hdrlen, ethertype; | 1094 | u16 hdrlen, ethertype; |
1095 | u8 *payload; | 1095 | u8 *payload; |
1096 | u8 dst[ETH_ALEN]; | 1096 | u8 dst[ETH_ALEN]; |
1097 | u8 src[ETH_ALEN] __aligned(2); | 1097 | u8 src[ETH_ALEN] __aligned(2); |
@@ -1102,16 +1102,10 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1102 | DECLARE_MAC_BUF(mac3); | 1102 | DECLARE_MAC_BUF(mac3); |
1103 | DECLARE_MAC_BUF(mac4); | 1103 | DECLARE_MAC_BUF(mac4); |
1104 | 1104 | ||
1105 | fc = rx->fc; | 1105 | if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) |
1106 | |||
1107 | if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) | ||
1108 | return -1; | 1106 | return -1; |
1109 | 1107 | ||
1110 | hdrlen = ieee80211_get_hdrlen(fc); | 1108 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
1111 | |||
1112 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
1113 | hdrlen += ieee80211_get_mesh_hdrlen( | ||
1114 | (struct ieee80211s_hdr *) (skb->data + hdrlen)); | ||
1115 | 1109 | ||
1116 | /* convert IEEE 802.11 header + possible LLC headers into Ethernet | 1110 | /* convert IEEE 802.11 header + possible LLC headers into Ethernet |
1117 | * header | 1111 | * header |
@@ -1122,42 +1116,38 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1122 | * 1 0 BSSID SA DA n/a | 1116 | * 1 0 BSSID SA DA n/a |
1123 | * 1 1 RA TA DA SA | 1117 | * 1 1 RA TA DA SA |
1124 | */ | 1118 | */ |
1125 | 1119 | memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN); | |
1126 | switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { | 1120 | memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN); |
1127 | case IEEE80211_FCTL_TODS: | 1121 | |
1128 | /* BSSID SA DA */ | 1122 | switch (hdr->frame_control & |
1129 | memcpy(dst, hdr->addr3, ETH_ALEN); | 1123 | cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { |
1130 | memcpy(src, hdr->addr2, ETH_ALEN); | 1124 | case __constant_cpu_to_le16(IEEE80211_FCTL_TODS): |
1131 | 1125 | if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP && | |
1132 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && | 1126 | sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) |
1133 | sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) | ||
1134 | return -1; | 1127 | return -1; |
1135 | break; | 1128 | break; |
1136 | case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): | 1129 | case __constant_cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): |
1137 | /* RA TA DA SA */ | 1130 | if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS && |
1138 | memcpy(dst, hdr->addr3, ETH_ALEN); | 1131 | sdata->vif.type != NL80211_IFTYPE_MESH_POINT)) |
1139 | memcpy(src, hdr->addr4, ETH_ALEN); | ||
1140 | |||
1141 | if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS && | ||
1142 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT)) | ||
1143 | return -1; | 1132 | return -1; |
1133 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | ||
1134 | struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *) | ||
1135 | (skb->data + hdrlen); | ||
1136 | hdrlen += ieee80211_get_mesh_hdrlen(meshdr); | ||
1137 | if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { | ||
1138 | memcpy(dst, meshdr->eaddr1, ETH_ALEN); | ||
1139 | memcpy(src, meshdr->eaddr2, ETH_ALEN); | ||
1140 | } | ||
1141 | } | ||
1144 | break; | 1142 | break; |
1145 | case IEEE80211_FCTL_FROMDS: | 1143 | case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS): |
1146 | /* DA BSSID SA */ | 1144 | if (sdata->vif.type != NL80211_IFTYPE_STATION || |
1147 | memcpy(dst, hdr->addr1, ETH_ALEN); | ||
1148 | memcpy(src, hdr->addr3, ETH_ALEN); | ||
1149 | |||
1150 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA || | ||
1151 | (is_multicast_ether_addr(dst) && | 1145 | (is_multicast_ether_addr(dst) && |
1152 | !compare_ether_addr(src, dev->dev_addr))) | 1146 | !compare_ether_addr(src, dev->dev_addr))) |
1153 | return -1; | 1147 | return -1; |
1154 | break; | 1148 | break; |
1155 | case 0: | 1149 | case __constant_cpu_to_le16(0): |
1156 | /* DA SA BSSID */ | 1150 | if (sdata->vif.type != NL80211_IFTYPE_ADHOC) |
1157 | memcpy(dst, hdr->addr1, ETH_ALEN); | ||
1158 | memcpy(src, hdr->addr2, ETH_ALEN); | ||
1159 | |||
1160 | if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) | ||
1161 | return -1; | 1151 | return -1; |
1162 | break; | 1152 | break; |
1163 | } | 1153 | } |
@@ -1193,7 +1183,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) | |||
1193 | /* | 1183 | /* |
1194 | * requires that rx->skb is a frame with ethernet header | 1184 | * requires that rx->skb is a frame with ethernet header |
1195 | */ | 1185 | */ |
1196 | static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx) | 1186 | static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) |
1197 | { | 1187 | { |
1198 | static const u8 pae_group_addr[ETH_ALEN] __aligned(2) | 1188 | static const u8 pae_group_addr[ETH_ALEN] __aligned(2) |
1199 | = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; | 1189 | = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; |
@@ -1209,7 +1199,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx) | |||
1209 | return true; | 1199 | return true; |
1210 | 1200 | ||
1211 | if (ieee80211_802_1x_port_control(rx) || | 1201 | if (ieee80211_802_1x_port_control(rx) || |
1212 | ieee80211_drop_unencrypted(rx)) | 1202 | ieee80211_drop_unencrypted(rx, fc)) |
1213 | return false; | 1203 | return false; |
1214 | 1204 | ||
1215 | return true; | 1205 | return true; |
@@ -1231,8 +1221,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) | |||
1231 | skb = rx->skb; | 1221 | skb = rx->skb; |
1232 | xmit_skb = NULL; | 1222 | xmit_skb = NULL; |
1233 | 1223 | ||
1234 | if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP || | 1224 | if ((sdata->vif.type == NL80211_IFTYPE_AP || |
1235 | sdata->vif.type == IEEE80211_IF_TYPE_VLAN) && | 1225 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && |
1226 | !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && | ||
1236 | (rx->flags & IEEE80211_RX_RA_MATCH)) { | 1227 | (rx->flags & IEEE80211_RX_RA_MATCH)) { |
1237 | if (is_multicast_ether_addr(ehdr->h_dest)) { | 1228 | if (is_multicast_ether_addr(ehdr->h_dest)) { |
1238 | /* | 1229 | /* |
@@ -1279,20 +1270,21 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1279 | { | 1270 | { |
1280 | struct net_device *dev = rx->dev; | 1271 | struct net_device *dev = rx->dev; |
1281 | struct ieee80211_local *local = rx->local; | 1272 | struct ieee80211_local *local = rx->local; |
1282 | u16 fc, ethertype; | 1273 | u16 ethertype; |
1283 | u8 *payload; | 1274 | u8 *payload; |
1284 | struct sk_buff *skb = rx->skb, *frame = NULL; | 1275 | struct sk_buff *skb = rx->skb, *frame = NULL; |
1276 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1277 | __le16 fc = hdr->frame_control; | ||
1285 | const struct ethhdr *eth; | 1278 | const struct ethhdr *eth; |
1286 | int remaining, err; | 1279 | int remaining, err; |
1287 | u8 dst[ETH_ALEN]; | 1280 | u8 dst[ETH_ALEN]; |
1288 | u8 src[ETH_ALEN]; | 1281 | u8 src[ETH_ALEN]; |
1289 | DECLARE_MAC_BUF(mac); | 1282 | DECLARE_MAC_BUF(mac); |
1290 | 1283 | ||
1291 | fc = rx->fc; | 1284 | if (unlikely(!ieee80211_is_data(fc))) |
1292 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) | ||
1293 | return RX_CONTINUE; | 1285 | return RX_CONTINUE; |
1294 | 1286 | ||
1295 | if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) | 1287 | if (unlikely(!ieee80211_is_data_present(fc))) |
1296 | return RX_DROP_MONITOR; | 1288 | return RX_DROP_MONITOR; |
1297 | 1289 | ||
1298 | if (!(rx->flags & IEEE80211_RX_AMSDU)) | 1290 | if (!(rx->flags & IEEE80211_RX_AMSDU)) |
@@ -1374,7 +1366,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1374 | memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); | 1366 | memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); |
1375 | } | 1367 | } |
1376 | 1368 | ||
1377 | if (!ieee80211_frame_allowed(rx)) { | 1369 | if (!ieee80211_frame_allowed(rx, fc)) { |
1378 | if (skb == frame) /* last frame */ | 1370 | if (skb == frame) /* last frame */ |
1379 | return RX_DROP_UNUSABLE; | 1371 | return RX_DROP_UNUSABLE; |
1380 | dev_kfree_skb(frame); | 1372 | dev_kfree_skb(frame); |
@@ -1387,7 +1379,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1387 | return RX_QUEUED; | 1379 | return RX_QUEUED; |
1388 | } | 1380 | } |
1389 | 1381 | ||
1390 | static ieee80211_rx_result debug_noinline | 1382 | static ieee80211_rx_result |
1391 | ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | 1383 | ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) |
1392 | { | 1384 | { |
1393 | struct ieee80211_hdr *hdr; | 1385 | struct ieee80211_hdr *hdr; |
@@ -1406,6 +1398,25 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1406 | /* illegal frame */ | 1398 | /* illegal frame */ |
1407 | return RX_DROP_MONITOR; | 1399 | return RX_DROP_MONITOR; |
1408 | 1400 | ||
1401 | if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){ | ||
1402 | struct ieee80211_sub_if_data *sdata; | ||
1403 | struct mesh_path *mppath; | ||
1404 | |||
1405 | sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | ||
1406 | rcu_read_lock(); | ||
1407 | mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata); | ||
1408 | if (!mppath) { | ||
1409 | mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata); | ||
1410 | } else { | ||
1411 | spin_lock_bh(&mppath->state_lock); | ||
1412 | mppath->exp_time = jiffies; | ||
1413 | if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0) | ||
1414 | memcpy(mppath->mpp, hdr->addr4, ETH_ALEN); | ||
1415 | spin_unlock_bh(&mppath->state_lock); | ||
1416 | } | ||
1417 | rcu_read_unlock(); | ||
1418 | } | ||
1419 | |||
1409 | if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0) | 1420 | if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0) |
1410 | return RX_CONTINUE; | 1421 | return RX_CONTINUE; |
1411 | 1422 | ||
@@ -1413,7 +1424,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1413 | 1424 | ||
1414 | if (rx->flags & IEEE80211_RX_RA_MATCH) { | 1425 | if (rx->flags & IEEE80211_RX_RA_MATCH) { |
1415 | if (!mesh_hdr->ttl) | 1426 | if (!mesh_hdr->ttl) |
1416 | IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.sta, | 1427 | IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh, |
1417 | dropped_frames_ttl); | 1428 | dropped_frames_ttl); |
1418 | else { | 1429 | else { |
1419 | struct ieee80211_hdr *fwd_hdr; | 1430 | struct ieee80211_hdr *fwd_hdr; |
@@ -1448,21 +1459,21 @@ static ieee80211_rx_result debug_noinline | |||
1448 | ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | 1459 | ieee80211_rx_h_data(struct ieee80211_rx_data *rx) |
1449 | { | 1460 | { |
1450 | struct net_device *dev = rx->dev; | 1461 | struct net_device *dev = rx->dev; |
1451 | u16 fc; | 1462 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
1463 | __le16 fc = hdr->frame_control; | ||
1452 | int err; | 1464 | int err; |
1453 | 1465 | ||
1454 | fc = rx->fc; | 1466 | if (unlikely(!ieee80211_is_data(hdr->frame_control))) |
1455 | if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) | ||
1456 | return RX_CONTINUE; | 1467 | return RX_CONTINUE; |
1457 | 1468 | ||
1458 | if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) | 1469 | if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) |
1459 | return RX_DROP_MONITOR; | 1470 | return RX_DROP_MONITOR; |
1460 | 1471 | ||
1461 | err = ieee80211_data_to_8023(rx); | 1472 | err = ieee80211_data_to_8023(rx); |
1462 | if (unlikely(err)) | 1473 | if (unlikely(err)) |
1463 | return RX_DROP_UNUSABLE; | 1474 | return RX_DROP_UNUSABLE; |
1464 | 1475 | ||
1465 | if (!ieee80211_frame_allowed(rx)) | 1476 | if (!ieee80211_frame_allowed(rx, fc)) |
1466 | return RX_DROP_MONITOR; | 1477 | return RX_DROP_MONITOR; |
1467 | 1478 | ||
1468 | rx->skb->dev = dev; | 1479 | rx->skb->dev = dev; |
@@ -1520,22 +1531,97 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | |||
1520 | } | 1531 | } |
1521 | 1532 | ||
1522 | static ieee80211_rx_result debug_noinline | 1533 | static ieee80211_rx_result debug_noinline |
1534 | ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | ||
1535 | { | ||
1536 | struct ieee80211_local *local = rx->local; | ||
1537 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | ||
1538 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; | ||
1539 | int len = rx->skb->len; | ||
1540 | |||
1541 | if (!ieee80211_is_action(mgmt->frame_control)) | ||
1542 | return RX_CONTINUE; | ||
1543 | |||
1544 | if (!rx->sta) | ||
1545 | return RX_DROP_MONITOR; | ||
1546 | |||
1547 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | ||
1548 | return RX_DROP_MONITOR; | ||
1549 | |||
1550 | /* all categories we currently handle have action_code */ | ||
1551 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | ||
1552 | return RX_DROP_MONITOR; | ||
1553 | |||
1554 | /* | ||
1555 | * FIXME: revisit this, I'm sure we should handle most | ||
1556 | * of these frames in other modes as well! | ||
1557 | */ | ||
1558 | if (sdata->vif.type != NL80211_IFTYPE_STATION && | ||
1559 | sdata->vif.type != NL80211_IFTYPE_ADHOC) | ||
1560 | return RX_CONTINUE; | ||
1561 | |||
1562 | switch (mgmt->u.action.category) { | ||
1563 | case WLAN_CATEGORY_BACK: | ||
1564 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
1565 | case WLAN_ACTION_ADDBA_REQ: | ||
1566 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1567 | sizeof(mgmt->u.action.u.addba_req))) | ||
1568 | return RX_DROP_MONITOR; | ||
1569 | ieee80211_process_addba_request(local, rx->sta, mgmt, len); | ||
1570 | break; | ||
1571 | case WLAN_ACTION_ADDBA_RESP: | ||
1572 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1573 | sizeof(mgmt->u.action.u.addba_resp))) | ||
1574 | return RX_DROP_MONITOR; | ||
1575 | ieee80211_process_addba_resp(local, rx->sta, mgmt, len); | ||
1576 | break; | ||
1577 | case WLAN_ACTION_DELBA: | ||
1578 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1579 | sizeof(mgmt->u.action.u.delba))) | ||
1580 | return RX_DROP_MONITOR; | ||
1581 | ieee80211_process_delba(sdata, rx->sta, mgmt, len); | ||
1582 | break; | ||
1583 | } | ||
1584 | break; | ||
1585 | case WLAN_CATEGORY_SPECTRUM_MGMT: | ||
1586 | if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) | ||
1587 | return RX_DROP_MONITOR; | ||
1588 | switch (mgmt->u.action.u.measurement.action_code) { | ||
1589 | case WLAN_ACTION_SPCT_MSR_REQ: | ||
1590 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1591 | sizeof(mgmt->u.action.u.measurement))) | ||
1592 | return RX_DROP_MONITOR; | ||
1593 | ieee80211_process_measurement_req(sdata, mgmt, len); | ||
1594 | break; | ||
1595 | } | ||
1596 | break; | ||
1597 | default: | ||
1598 | return RX_CONTINUE; | ||
1599 | } | ||
1600 | |||
1601 | rx->sta->rx_packets++; | ||
1602 | dev_kfree_skb(rx->skb); | ||
1603 | return RX_QUEUED; | ||
1604 | } | ||
1605 | |||
1606 | static ieee80211_rx_result debug_noinline | ||
1523 | ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) | 1607 | ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) |
1524 | { | 1608 | { |
1525 | struct ieee80211_sub_if_data *sdata; | 1609 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); |
1526 | 1610 | ||
1527 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 1611 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
1528 | return RX_DROP_MONITOR; | 1612 | return RX_DROP_MONITOR; |
1529 | 1613 | ||
1530 | sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | 1614 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
1531 | if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || | 1615 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status); |
1532 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS || | 1616 | |
1533 | sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) && | 1617 | if (sdata->vif.type != NL80211_IFTYPE_STATION && |
1534 | !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) | 1618 | sdata->vif.type != NL80211_IFTYPE_ADHOC) |
1535 | ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->status); | 1619 | return RX_DROP_MONITOR; |
1536 | else | 1620 | |
1621 | if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) | ||
1537 | return RX_DROP_MONITOR; | 1622 | return RX_DROP_MONITOR; |
1538 | 1623 | ||
1624 | ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status); | ||
1539 | return RX_QUEUED; | 1625 | return RX_QUEUED; |
1540 | } | 1626 | } |
1541 | 1627 | ||
@@ -1565,7 +1651,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev, | |||
1565 | if (!ieee80211_has_protected(hdr->frame_control)) | 1651 | if (!ieee80211_has_protected(hdr->frame_control)) |
1566 | goto ignore; | 1652 | goto ignore; |
1567 | 1653 | ||
1568 | if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) { | 1654 | if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) { |
1569 | /* | 1655 | /* |
1570 | * APs with pairwise keys should never receive Michael MIC | 1656 | * APs with pairwise keys should never receive Michael MIC |
1571 | * errors for non-zero keyidx because these are reserved for | 1657 | * errors for non-zero keyidx because these are reserved for |
@@ -1579,7 +1665,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev, | |||
1579 | !ieee80211_is_auth(hdr->frame_control)) | 1665 | !ieee80211_is_auth(hdr->frame_control)) |
1580 | goto ignore; | 1666 | goto ignore; |
1581 | 1667 | ||
1582 | mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); | 1668 | mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr); |
1583 | ignore: | 1669 | ignore: |
1584 | dev_kfree_skb(rx->skb); | 1670 | dev_kfree_skb(rx->skb); |
1585 | rx->skb = NULL; | 1671 | rx->skb = NULL; |
@@ -1635,7 +1721,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx) | |||
1635 | if (!netif_running(sdata->dev)) | 1721 | if (!netif_running(sdata->dev)) |
1636 | continue; | 1722 | continue; |
1637 | 1723 | ||
1638 | if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || | 1724 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR || |
1639 | !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) | 1725 | !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) |
1640 | continue; | 1726 | continue; |
1641 | 1727 | ||
@@ -1698,6 +1784,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | |||
1698 | CALL_RXH(ieee80211_rx_h_mesh_fwding); | 1784 | CALL_RXH(ieee80211_rx_h_mesh_fwding); |
1699 | CALL_RXH(ieee80211_rx_h_data) | 1785 | CALL_RXH(ieee80211_rx_h_data) |
1700 | CALL_RXH(ieee80211_rx_h_ctrl) | 1786 | CALL_RXH(ieee80211_rx_h_ctrl) |
1787 | CALL_RXH(ieee80211_rx_h_action) | ||
1701 | CALL_RXH(ieee80211_rx_h_mgmt) | 1788 | CALL_RXH(ieee80211_rx_h_mgmt) |
1702 | 1789 | ||
1703 | #undef CALL_RXH | 1790 | #undef CALL_RXH |
@@ -1733,7 +1820,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1733 | int multicast = is_multicast_ether_addr(hdr->addr1); | 1820 | int multicast = is_multicast_ether_addr(hdr->addr1); |
1734 | 1821 | ||
1735 | switch (sdata->vif.type) { | 1822 | switch (sdata->vif.type) { |
1736 | case IEEE80211_IF_TYPE_STA: | 1823 | case NL80211_IFTYPE_STATION: |
1737 | if (!bssid) | 1824 | if (!bssid) |
1738 | return 0; | 1825 | return 0; |
1739 | if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { | 1826 | if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { |
@@ -1748,14 +1835,10 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1748 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 1835 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1749 | } | 1836 | } |
1750 | break; | 1837 | break; |
1751 | case IEEE80211_IF_TYPE_IBSS: | 1838 | case NL80211_IFTYPE_ADHOC: |
1752 | if (!bssid) | 1839 | if (!bssid) |
1753 | return 0; | 1840 | return 0; |
1754 | if (ieee80211_is_beacon(hdr->frame_control)) { | 1841 | if (ieee80211_is_beacon(hdr->frame_control)) { |
1755 | if (!rx->sta) | ||
1756 | rx->sta = ieee80211_ibss_add_sta(sdata->dev, | ||
1757 | rx->skb, bssid, hdr->addr2, | ||
1758 | BIT(rx->status->rate_idx)); | ||
1759 | return 1; | 1842 | return 1; |
1760 | } | 1843 | } |
1761 | else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { | 1844 | else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { |
@@ -1769,11 +1852,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1769 | return 0; | 1852 | return 0; |
1770 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 1853 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1771 | } else if (!rx->sta) | 1854 | } else if (!rx->sta) |
1772 | rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, | 1855 | rx->sta = ieee80211_ibss_add_sta(sdata, rx->skb, |
1773 | bssid, hdr->addr2, | 1856 | bssid, hdr->addr2, |
1774 | BIT(rx->status->rate_idx)); | 1857 | BIT(rx->status->rate_idx)); |
1775 | break; | 1858 | break; |
1776 | case IEEE80211_IF_TYPE_MESH_POINT: | 1859 | case NL80211_IFTYPE_MESH_POINT: |
1777 | if (!multicast && | 1860 | if (!multicast && |
1778 | compare_ether_addr(sdata->dev->dev_addr, | 1861 | compare_ether_addr(sdata->dev->dev_addr, |
1779 | hdr->addr1) != 0) { | 1862 | hdr->addr1) != 0) { |
@@ -1783,8 +1866,8 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1783 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 1866 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1784 | } | 1867 | } |
1785 | break; | 1868 | break; |
1786 | case IEEE80211_IF_TYPE_VLAN: | 1869 | case NL80211_IFTYPE_AP_VLAN: |
1787 | case IEEE80211_IF_TYPE_AP: | 1870 | case NL80211_IFTYPE_AP: |
1788 | if (!bssid) { | 1871 | if (!bssid) { |
1789 | if (compare_ether_addr(sdata->dev->dev_addr, | 1872 | if (compare_ether_addr(sdata->dev->dev_addr, |
1790 | hdr->addr1)) | 1873 | hdr->addr1)) |
@@ -1796,16 +1879,17 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
1796 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 1879 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
1797 | } | 1880 | } |
1798 | break; | 1881 | break; |
1799 | case IEEE80211_IF_TYPE_WDS: | 1882 | case NL80211_IFTYPE_WDS: |
1800 | if (bssid || !ieee80211_is_data(hdr->frame_control)) | 1883 | if (bssid || !ieee80211_is_data(hdr->frame_control)) |
1801 | return 0; | 1884 | return 0; |
1802 | if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) | 1885 | if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) |
1803 | return 0; | 1886 | return 0; |
1804 | break; | 1887 | break; |
1805 | case IEEE80211_IF_TYPE_MNTR: | 1888 | case NL80211_IFTYPE_MONITOR: |
1806 | /* take everything */ | 1889 | /* take everything */ |
1807 | break; | 1890 | break; |
1808 | case IEEE80211_IF_TYPE_INVALID: | 1891 | case NL80211_IFTYPE_UNSPECIFIED: |
1892 | case __NL80211_IFTYPE_AFTER_LAST: | ||
1809 | /* should never get here */ | 1893 | /* should never get here */ |
1810 | WARN_ON(1); | 1894 | WARN_ON(1); |
1811 | break; | 1895 | break; |
@@ -1827,23 +1911,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
1827 | struct ieee80211_sub_if_data *sdata; | 1911 | struct ieee80211_sub_if_data *sdata; |
1828 | struct ieee80211_hdr *hdr; | 1912 | struct ieee80211_hdr *hdr; |
1829 | struct ieee80211_rx_data rx; | 1913 | struct ieee80211_rx_data rx; |
1830 | u16 type; | ||
1831 | int prepares; | 1914 | int prepares; |
1832 | struct ieee80211_sub_if_data *prev = NULL; | 1915 | struct ieee80211_sub_if_data *prev = NULL; |
1833 | struct sk_buff *skb_new; | 1916 | struct sk_buff *skb_new; |
1834 | u8 *bssid; | 1917 | u8 *bssid; |
1835 | 1918 | ||
1836 | hdr = (struct ieee80211_hdr *) skb->data; | 1919 | hdr = (struct ieee80211_hdr *)skb->data; |
1837 | memset(&rx, 0, sizeof(rx)); | 1920 | memset(&rx, 0, sizeof(rx)); |
1838 | rx.skb = skb; | 1921 | rx.skb = skb; |
1839 | rx.local = local; | 1922 | rx.local = local; |
1840 | 1923 | ||
1841 | rx.status = status; | 1924 | rx.status = status; |
1842 | rx.rate = rate; | 1925 | rx.rate = rate; |
1843 | rx.fc = le16_to_cpu(hdr->frame_control); | ||
1844 | type = rx.fc & IEEE80211_FCTL_FTYPE; | ||
1845 | 1926 | ||
1846 | if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) | 1927 | if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control)) |
1847 | local->dot11ReceivedFragmentCount++; | 1928 | local->dot11ReceivedFragmentCount++; |
1848 | 1929 | ||
1849 | rx.sta = sta_info_get(local, hdr->addr2); | 1930 | rx.sta = sta_info_get(local, hdr->addr2); |
@@ -1857,7 +1938,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
1857 | return; | 1938 | return; |
1858 | } | 1939 | } |
1859 | 1940 | ||
1860 | if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning)) | 1941 | if (unlikely(local->sw_scanning || local->hw_scanning)) |
1861 | rx.flags |= IEEE80211_RX_IN_SCAN; | 1942 | rx.flags |= IEEE80211_RX_IN_SCAN; |
1862 | 1943 | ||
1863 | ieee80211_parse_qos(&rx); | 1944 | ieee80211_parse_qos(&rx); |
@@ -1869,7 +1950,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
1869 | if (!netif_running(sdata->dev)) | 1950 | if (!netif_running(sdata->dev)) |
1870 | continue; | 1951 | continue; |
1871 | 1952 | ||
1872 | if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) | 1953 | if (sdata->vif.type == NL80211_IFTYPE_MONITOR) |
1873 | continue; | 1954 | continue; |
1874 | 1955 | ||
1875 | bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); | 1956 | bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); |
@@ -1904,14 +1985,12 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
1904 | prev->dev->name); | 1985 | prev->dev->name); |
1905 | continue; | 1986 | continue; |
1906 | } | 1987 | } |
1907 | rx.fc = le16_to_cpu(hdr->frame_control); | ||
1908 | ieee80211_invoke_rx_handlers(prev, &rx, skb_new); | 1988 | ieee80211_invoke_rx_handlers(prev, &rx, skb_new); |
1909 | prev = sdata; | 1989 | prev = sdata; |
1910 | } | 1990 | } |
1911 | if (prev) { | 1991 | if (prev) |
1912 | rx.fc = le16_to_cpu(hdr->frame_control); | ||
1913 | ieee80211_invoke_rx_handlers(prev, &rx, skb); | 1992 | ieee80211_invoke_rx_handlers(prev, &rx, skb); |
1914 | } else | 1993 | else |
1915 | dev_kfree_skb(skb); | 1994 | dev_kfree_skb(skb); |
1916 | } | 1995 | } |
1917 | 1996 | ||
@@ -2080,7 +2159,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | |||
2080 | /* if this mpdu is fragmented - terminate rx aggregation session */ | 2159 | /* if this mpdu is fragmented - terminate rx aggregation session */ |
2081 | sc = le16_to_cpu(hdr->seq_ctrl); | 2160 | sc = le16_to_cpu(hdr->seq_ctrl); |
2082 | if (sc & IEEE80211_SCTL_FRAG) { | 2161 | if (sc & IEEE80211_SCTL_FRAG) { |
2083 | ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, | 2162 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, |
2084 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); | 2163 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); |
2085 | ret = 1; | 2164 | ret = 1; |
2086 | goto end_reorder; | 2165 | goto end_reorder; |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c new file mode 100644 index 000000000000..8e6685e7ae85 --- /dev/null +++ b/net/mac80211/scan.c | |||
@@ -0,0 +1,937 @@ | |||
1 | /* | ||
2 | * Scanning implementation | ||
3 | * | ||
4 | * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> | ||
5 | * Copyright 2004, Instant802 Networks, Inc. | ||
6 | * Copyright 2005, Devicescape Software, Inc. | ||
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | ||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | /* TODO: | ||
16 | * order BSS list by RSSI(?) ("quality of AP") | ||
17 | * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE, | ||
18 | * SSID) | ||
19 | */ | ||
20 | |||
21 | #include <linux/wireless.h> | ||
22 | #include <linux/if_arp.h> | ||
23 | #include <net/mac80211.h> | ||
24 | #include <net/iw_handler.h> | ||
25 | |||
26 | #include "ieee80211_i.h" | ||
27 | #include "mesh.h" | ||
28 | |||
29 | #define IEEE80211_PROBE_DELAY (HZ / 33) | ||
30 | #define IEEE80211_CHANNEL_TIME (HZ / 33) | ||
31 | #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5) | ||
32 | |||
33 | void ieee80211_rx_bss_list_init(struct ieee80211_local *local) | ||
34 | { | ||
35 | spin_lock_init(&local->bss_lock); | ||
36 | INIT_LIST_HEAD(&local->bss_list); | ||
37 | } | ||
38 | |||
39 | void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local) | ||
40 | { | ||
41 | struct ieee80211_bss *bss, *tmp; | ||
42 | |||
43 | list_for_each_entry_safe(bss, tmp, &local->bss_list, list) | ||
44 | ieee80211_rx_bss_put(local, bss); | ||
45 | } | ||
46 | |||
47 | struct ieee80211_bss * | ||
48 | ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, | ||
49 | u8 *ssid, u8 ssid_len) | ||
50 | { | ||
51 | struct ieee80211_bss *bss; | ||
52 | |||
53 | spin_lock_bh(&local->bss_lock); | ||
54 | bss = local->bss_hash[STA_HASH(bssid)]; | ||
55 | while (bss) { | ||
56 | if (!bss_mesh_cfg(bss) && | ||
57 | !memcmp(bss->bssid, bssid, ETH_ALEN) && | ||
58 | bss->freq == freq && | ||
59 | bss->ssid_len == ssid_len && | ||
60 | (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) { | ||
61 | atomic_inc(&bss->users); | ||
62 | break; | ||
63 | } | ||
64 | bss = bss->hnext; | ||
65 | } | ||
66 | spin_unlock_bh(&local->bss_lock); | ||
67 | return bss; | ||
68 | } | ||
69 | |||
70 | /* Caller must hold local->bss_lock */ | ||
71 | static void __ieee80211_rx_bss_hash_add(struct ieee80211_local *local, | ||
72 | struct ieee80211_bss *bss) | ||
73 | { | ||
74 | u8 hash_idx; | ||
75 | |||
76 | if (bss_mesh_cfg(bss)) | ||
77 | hash_idx = mesh_id_hash(bss_mesh_id(bss), | ||
78 | bss_mesh_id_len(bss)); | ||
79 | else | ||
80 | hash_idx = STA_HASH(bss->bssid); | ||
81 | |||
82 | bss->hnext = local->bss_hash[hash_idx]; | ||
83 | local->bss_hash[hash_idx] = bss; | ||
84 | } | ||
85 | |||
86 | /* Caller must hold local->bss_lock */ | ||
87 | static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local, | ||
88 | struct ieee80211_bss *bss) | ||
89 | { | ||
90 | struct ieee80211_bss *b, *prev = NULL; | ||
91 | b = local->bss_hash[STA_HASH(bss->bssid)]; | ||
92 | while (b) { | ||
93 | if (b == bss) { | ||
94 | if (!prev) | ||
95 | local->bss_hash[STA_HASH(bss->bssid)] = | ||
96 | bss->hnext; | ||
97 | else | ||
98 | prev->hnext = bss->hnext; | ||
99 | break; | ||
100 | } | ||
101 | prev = b; | ||
102 | b = b->hnext; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | struct ieee80211_bss * | ||
107 | ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq, | ||
108 | u8 *ssid, u8 ssid_len) | ||
109 | { | ||
110 | struct ieee80211_bss *bss; | ||
111 | |||
112 | bss = kzalloc(sizeof(*bss), GFP_ATOMIC); | ||
113 | if (!bss) | ||
114 | return NULL; | ||
115 | atomic_set(&bss->users, 2); | ||
116 | memcpy(bss->bssid, bssid, ETH_ALEN); | ||
117 | bss->freq = freq; | ||
118 | if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) { | ||
119 | memcpy(bss->ssid, ssid, ssid_len); | ||
120 | bss->ssid_len = ssid_len; | ||
121 | } | ||
122 | |||
123 | spin_lock_bh(&local->bss_lock); | ||
124 | /* TODO: order by RSSI? */ | ||
125 | list_add_tail(&bss->list, &local->bss_list); | ||
126 | __ieee80211_rx_bss_hash_add(local, bss); | ||
127 | spin_unlock_bh(&local->bss_lock); | ||
128 | return bss; | ||
129 | } | ||
130 | |||
131 | #ifdef CONFIG_MAC80211_MESH | ||
132 | static struct ieee80211_bss * | ||
133 | ieee80211_rx_mesh_bss_get(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len, | ||
134 | u8 *mesh_cfg, int freq) | ||
135 | { | ||
136 | struct ieee80211_bss *bss; | ||
137 | |||
138 | spin_lock_bh(&local->bss_lock); | ||
139 | bss = local->bss_hash[mesh_id_hash(mesh_id, mesh_id_len)]; | ||
140 | while (bss) { | ||
141 | if (bss_mesh_cfg(bss) && | ||
142 | !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) && | ||
143 | bss->freq == freq && | ||
144 | mesh_id_len == bss->mesh_id_len && | ||
145 | (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id, | ||
146 | mesh_id_len))) { | ||
147 | atomic_inc(&bss->users); | ||
148 | break; | ||
149 | } | ||
150 | bss = bss->hnext; | ||
151 | } | ||
152 | spin_unlock_bh(&local->bss_lock); | ||
153 | return bss; | ||
154 | } | ||
155 | |||
156 | static struct ieee80211_bss * | ||
157 | ieee80211_rx_mesh_bss_add(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len, | ||
158 | u8 *mesh_cfg, int mesh_config_len, int freq) | ||
159 | { | ||
160 | struct ieee80211_bss *bss; | ||
161 | |||
162 | if (mesh_config_len != MESH_CFG_LEN) | ||
163 | return NULL; | ||
164 | |||
165 | bss = kzalloc(sizeof(*bss), GFP_ATOMIC); | ||
166 | if (!bss) | ||
167 | return NULL; | ||
168 | |||
169 | bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC); | ||
170 | if (!bss->mesh_cfg) { | ||
171 | kfree(bss); | ||
172 | return NULL; | ||
173 | } | ||
174 | |||
175 | if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) { | ||
176 | bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC); | ||
177 | if (!bss->mesh_id) { | ||
178 | kfree(bss->mesh_cfg); | ||
179 | kfree(bss); | ||
180 | return NULL; | ||
181 | } | ||
182 | memcpy(bss->mesh_id, mesh_id, mesh_id_len); | ||
183 | } | ||
184 | |||
185 | atomic_set(&bss->users, 2); | ||
186 | memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN); | ||
187 | bss->mesh_id_len = mesh_id_len; | ||
188 | bss->freq = freq; | ||
189 | spin_lock_bh(&local->bss_lock); | ||
190 | /* TODO: order by RSSI? */ | ||
191 | list_add_tail(&bss->list, &local->bss_list); | ||
192 | __ieee80211_rx_bss_hash_add(local, bss); | ||
193 | spin_unlock_bh(&local->bss_lock); | ||
194 | return bss; | ||
195 | } | ||
196 | #endif | ||
197 | |||
198 | static void ieee80211_rx_bss_free(struct ieee80211_bss *bss) | ||
199 | { | ||
200 | kfree(bss->ies); | ||
201 | kfree(bss_mesh_id(bss)); | ||
202 | kfree(bss_mesh_cfg(bss)); | ||
203 | kfree(bss); | ||
204 | } | ||
205 | |||
206 | void ieee80211_rx_bss_put(struct ieee80211_local *local, | ||
207 | struct ieee80211_bss *bss) | ||
208 | { | ||
209 | local_bh_disable(); | ||
210 | if (!atomic_dec_and_lock(&bss->users, &local->bss_lock)) { | ||
211 | local_bh_enable(); | ||
212 | return; | ||
213 | } | ||
214 | |||
215 | __ieee80211_rx_bss_hash_del(local, bss); | ||
216 | list_del(&bss->list); | ||
217 | spin_unlock_bh(&local->bss_lock); | ||
218 | ieee80211_rx_bss_free(bss); | ||
219 | } | ||
220 | |||
221 | struct ieee80211_bss * | ||
222 | ieee80211_bss_info_update(struct ieee80211_local *local, | ||
223 | struct ieee80211_rx_status *rx_status, | ||
224 | struct ieee80211_mgmt *mgmt, | ||
225 | size_t len, | ||
226 | struct ieee802_11_elems *elems, | ||
227 | int freq, bool beacon) | ||
228 | { | ||
229 | struct ieee80211_bss *bss; | ||
230 | int clen; | ||
231 | |||
232 | #ifdef CONFIG_MAC80211_MESH | ||
233 | if (elems->mesh_config) | ||
234 | bss = ieee80211_rx_mesh_bss_get(local, elems->mesh_id, | ||
235 | elems->mesh_id_len, elems->mesh_config, freq); | ||
236 | else | ||
237 | #endif | ||
238 | bss = ieee80211_rx_bss_get(local, mgmt->bssid, freq, | ||
239 | elems->ssid, elems->ssid_len); | ||
240 | if (!bss) { | ||
241 | #ifdef CONFIG_MAC80211_MESH | ||
242 | if (elems->mesh_config) | ||
243 | bss = ieee80211_rx_mesh_bss_add(local, elems->mesh_id, | ||
244 | elems->mesh_id_len, elems->mesh_config, | ||
245 | elems->mesh_config_len, freq); | ||
246 | else | ||
247 | #endif | ||
248 | bss = ieee80211_rx_bss_add(local, mgmt->bssid, freq, | ||
249 | elems->ssid, elems->ssid_len); | ||
250 | if (!bss) | ||
251 | return NULL; | ||
252 | } else { | ||
253 | #if 0 | ||
254 | /* TODO: order by RSSI? */ | ||
255 | spin_lock_bh(&local->bss_lock); | ||
256 | list_move_tail(&bss->list, &local->bss_list); | ||
257 | spin_unlock_bh(&local->bss_lock); | ||
258 | #endif | ||
259 | } | ||
260 | |||
261 | /* save the ERP value so that it is available at association time */ | ||
262 | if (elems->erp_info && elems->erp_info_len >= 1) { | ||
263 | bss->erp_value = elems->erp_info[0]; | ||
264 | bss->has_erp_value = 1; | ||
265 | } | ||
266 | |||
267 | bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); | ||
268 | bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); | ||
269 | |||
270 | if (elems->tim) { | ||
271 | struct ieee80211_tim_ie *tim_ie = | ||
272 | (struct ieee80211_tim_ie *)elems->tim; | ||
273 | bss->dtim_period = tim_ie->dtim_period; | ||
274 | } | ||
275 | |||
276 | /* set default value for buggy APs */ | ||
277 | if (!elems->tim || bss->dtim_period == 0) | ||
278 | bss->dtim_period = 1; | ||
279 | |||
280 | bss->supp_rates_len = 0; | ||
281 | if (elems->supp_rates) { | ||
282 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; | ||
283 | if (clen > elems->supp_rates_len) | ||
284 | clen = elems->supp_rates_len; | ||
285 | memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates, | ||
286 | clen); | ||
287 | bss->supp_rates_len += clen; | ||
288 | } | ||
289 | if (elems->ext_supp_rates) { | ||
290 | clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; | ||
291 | if (clen > elems->ext_supp_rates_len) | ||
292 | clen = elems->ext_supp_rates_len; | ||
293 | memcpy(&bss->supp_rates[bss->supp_rates_len], | ||
294 | elems->ext_supp_rates, clen); | ||
295 | bss->supp_rates_len += clen; | ||
296 | } | ||
297 | |||
298 | bss->band = rx_status->band; | ||
299 | |||
300 | bss->timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); | ||
301 | bss->last_update = jiffies; | ||
302 | bss->signal = rx_status->signal; | ||
303 | bss->noise = rx_status->noise; | ||
304 | bss->qual = rx_status->qual; | ||
305 | bss->wmm_used = elems->wmm_param || elems->wmm_info; | ||
306 | |||
307 | if (!beacon) | ||
308 | bss->last_probe_resp = jiffies; | ||
309 | |||
310 | /* | ||
311 | * For probe responses, or if we don't have any information yet, | ||
312 | * use the IEs from the beacon. | ||
313 | */ | ||
314 | if (!bss->ies || !beacon) { | ||
315 | if (bss->ies == NULL || bss->ies_len < elems->total_len) { | ||
316 | kfree(bss->ies); | ||
317 | bss->ies = kmalloc(elems->total_len, GFP_ATOMIC); | ||
318 | } | ||
319 | if (bss->ies) { | ||
320 | memcpy(bss->ies, elems->ie_start, elems->total_len); | ||
321 | bss->ies_len = elems->total_len; | ||
322 | } else | ||
323 | bss->ies_len = 0; | ||
324 | } | ||
325 | |||
326 | return bss; | ||
327 | } | ||
328 | |||
329 | ieee80211_rx_result | ||
330 | ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
331 | struct ieee80211_rx_status *rx_status) | ||
332 | { | ||
333 | struct ieee80211_mgmt *mgmt; | ||
334 | struct ieee80211_bss *bss; | ||
335 | u8 *elements; | ||
336 | struct ieee80211_channel *channel; | ||
337 | size_t baselen; | ||
338 | int freq; | ||
339 | __le16 fc; | ||
340 | bool presp, beacon = false; | ||
341 | struct ieee802_11_elems elems; | ||
342 | |||
343 | if (skb->len < 2) | ||
344 | return RX_DROP_UNUSABLE; | ||
345 | |||
346 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
347 | fc = mgmt->frame_control; | ||
348 | |||
349 | if (ieee80211_is_ctl(fc)) | ||
350 | return RX_CONTINUE; | ||
351 | |||
352 | if (skb->len < 24) | ||
353 | return RX_DROP_MONITOR; | ||
354 | |||
355 | presp = ieee80211_is_probe_resp(fc); | ||
356 | if (presp) { | ||
357 | /* ignore ProbeResp to foreign address */ | ||
358 | if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) | ||
359 | return RX_DROP_MONITOR; | ||
360 | |||
361 | presp = true; | ||
362 | elements = mgmt->u.probe_resp.variable; | ||
363 | baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); | ||
364 | } else { | ||
365 | beacon = ieee80211_is_beacon(fc); | ||
366 | baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable); | ||
367 | elements = mgmt->u.beacon.variable; | ||
368 | } | ||
369 | |||
370 | if (!presp && !beacon) | ||
371 | return RX_CONTINUE; | ||
372 | |||
373 | if (baselen > skb->len) | ||
374 | return RX_DROP_MONITOR; | ||
375 | |||
376 | ieee802_11_parse_elems(elements, skb->len - baselen, &elems); | ||
377 | |||
378 | if (elems.ds_params && elems.ds_params_len == 1) | ||
379 | freq = ieee80211_channel_to_frequency(elems.ds_params[0]); | ||
380 | else | ||
381 | freq = rx_status->freq; | ||
382 | |||
383 | channel = ieee80211_get_channel(sdata->local->hw.wiphy, freq); | ||
384 | |||
385 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) | ||
386 | return RX_DROP_MONITOR; | ||
387 | |||
388 | bss = ieee80211_bss_info_update(sdata->local, rx_status, | ||
389 | mgmt, skb->len, &elems, | ||
390 | freq, beacon); | ||
391 | ieee80211_rx_bss_put(sdata->local, bss); | ||
392 | |||
393 | dev_kfree_skb(skb); | ||
394 | return RX_QUEUED; | ||
395 | } | ||
396 | |||
397 | static void ieee80211_send_nullfunc(struct ieee80211_local *local, | ||
398 | struct ieee80211_sub_if_data *sdata, | ||
399 | int powersave) | ||
400 | { | ||
401 | struct sk_buff *skb; | ||
402 | struct ieee80211_hdr *nullfunc; | ||
403 | __le16 fc; | ||
404 | |||
405 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); | ||
406 | if (!skb) { | ||
407 | printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " | ||
408 | "frame\n", sdata->dev->name); | ||
409 | return; | ||
410 | } | ||
411 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
412 | |||
413 | nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); | ||
414 | memset(nullfunc, 0, 24); | ||
415 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | | ||
416 | IEEE80211_FCTL_TODS); | ||
417 | if (powersave) | ||
418 | fc |= cpu_to_le16(IEEE80211_FCTL_PM); | ||
419 | nullfunc->frame_control = fc; | ||
420 | memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN); | ||
421 | memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); | ||
422 | memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN); | ||
423 | |||
424 | ieee80211_tx_skb(sdata, skb, 0); | ||
425 | } | ||
426 | |||
427 | void ieee80211_scan_completed(struct ieee80211_hw *hw) | ||
428 | { | ||
429 | struct ieee80211_local *local = hw_to_local(hw); | ||
430 | struct ieee80211_sub_if_data *sdata; | ||
431 | union iwreq_data wrqu; | ||
432 | |||
433 | if (WARN_ON(!local->hw_scanning && !local->sw_scanning)) | ||
434 | return; | ||
435 | |||
436 | local->last_scan_completed = jiffies; | ||
437 | memset(&wrqu, 0, sizeof(wrqu)); | ||
438 | |||
439 | /* | ||
440 | * local->scan_sdata could have been NULLed by the interface | ||
441 | * down code in case we were scanning on an interface that is | ||
442 | * being taken down. | ||
443 | */ | ||
444 | sdata = local->scan_sdata; | ||
445 | if (sdata) | ||
446 | wireless_send_event(sdata->dev, SIOCGIWSCAN, &wrqu, NULL); | ||
447 | |||
448 | if (local->hw_scanning) { | ||
449 | local->hw_scanning = false; | ||
450 | if (ieee80211_hw_config(local)) | ||
451 | printk(KERN_DEBUG "%s: failed to restore operational " | ||
452 | "channel after scan\n", wiphy_name(local->hw.wiphy)); | ||
453 | |||
454 | goto done; | ||
455 | } | ||
456 | |||
457 | local->sw_scanning = false; | ||
458 | if (ieee80211_hw_config(local)) | ||
459 | printk(KERN_DEBUG "%s: failed to restore operational " | ||
460 | "channel after scan\n", wiphy_name(local->hw.wiphy)); | ||
461 | |||
462 | |||
463 | netif_tx_lock_bh(local->mdev); | ||
464 | netif_addr_lock(local->mdev); | ||
465 | local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; | ||
466 | local->ops->configure_filter(local_to_hw(local), | ||
467 | FIF_BCN_PRBRESP_PROMISC, | ||
468 | &local->filter_flags, | ||
469 | local->mdev->mc_count, | ||
470 | local->mdev->mc_list); | ||
471 | |||
472 | netif_addr_unlock(local->mdev); | ||
473 | netif_tx_unlock_bh(local->mdev); | ||
474 | |||
475 | rcu_read_lock(); | ||
476 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
477 | /* Tell AP we're back */ | ||
478 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
479 | if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) { | ||
480 | ieee80211_send_nullfunc(local, sdata, 0); | ||
481 | netif_tx_wake_all_queues(sdata->dev); | ||
482 | } | ||
483 | } else | ||
484 | netif_tx_wake_all_queues(sdata->dev); | ||
485 | } | ||
486 | rcu_read_unlock(); | ||
487 | |||
488 | done: | ||
489 | ieee80211_mlme_notify_scan_completed(local); | ||
490 | ieee80211_mesh_notify_scan_completed(local); | ||
491 | } | ||
492 | EXPORT_SYMBOL(ieee80211_scan_completed); | ||
493 | |||
494 | |||
495 | void ieee80211_scan_work(struct work_struct *work) | ||
496 | { | ||
497 | struct ieee80211_local *local = | ||
498 | container_of(work, struct ieee80211_local, scan_work.work); | ||
499 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; | ||
500 | struct ieee80211_supported_band *sband; | ||
501 | struct ieee80211_channel *chan; | ||
502 | int skip; | ||
503 | unsigned long next_delay = 0; | ||
504 | |||
505 | /* | ||
506 | * Avoid re-scheduling when the sdata is going away. | ||
507 | */ | ||
508 | if (!netif_running(sdata->dev)) | ||
509 | return; | ||
510 | |||
511 | switch (local->scan_state) { | ||
512 | case SCAN_SET_CHANNEL: | ||
513 | /* | ||
514 | * Get current scan band. scan_band may be IEEE80211_NUM_BANDS | ||
515 | * after we successfully scanned the last channel of the last | ||
516 | * band (and the last band is supported by the hw) | ||
517 | */ | ||
518 | if (local->scan_band < IEEE80211_NUM_BANDS) | ||
519 | sband = local->hw.wiphy->bands[local->scan_band]; | ||
520 | else | ||
521 | sband = NULL; | ||
522 | |||
523 | /* | ||
524 | * If we are at an unsupported band and have more bands | ||
525 | * left to scan, advance to the next supported one. | ||
526 | */ | ||
527 | while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) { | ||
528 | local->scan_band++; | ||
529 | sband = local->hw.wiphy->bands[local->scan_band]; | ||
530 | local->scan_channel_idx = 0; | ||
531 | } | ||
532 | |||
533 | /* if no more bands/channels left, complete scan */ | ||
534 | if (!sband || local->scan_channel_idx >= sband->n_channels) { | ||
535 | ieee80211_scan_completed(local_to_hw(local)); | ||
536 | return; | ||
537 | } | ||
538 | skip = 0; | ||
539 | chan = &sband->channels[local->scan_channel_idx]; | ||
540 | |||
541 | if (chan->flags & IEEE80211_CHAN_DISABLED || | ||
542 | (sdata->vif.type == NL80211_IFTYPE_ADHOC && | ||
543 | chan->flags & IEEE80211_CHAN_NO_IBSS)) | ||
544 | skip = 1; | ||
545 | |||
546 | if (!skip) { | ||
547 | local->scan_channel = chan; | ||
548 | if (ieee80211_hw_config(local)) { | ||
549 | printk(KERN_DEBUG "%s: failed to set freq to " | ||
550 | "%d MHz for scan\n", wiphy_name(local->hw.wiphy), | ||
551 | chan->center_freq); | ||
552 | skip = 1; | ||
553 | } | ||
554 | } | ||
555 | |||
556 | /* advance state machine to next channel/band */ | ||
557 | local->scan_channel_idx++; | ||
558 | if (local->scan_channel_idx >= sband->n_channels) { | ||
559 | /* | ||
560 | * scan_band may end up == IEEE80211_NUM_BANDS, but | ||
561 | * we'll catch that case above and complete the scan | ||
562 | * if that is the case. | ||
563 | */ | ||
564 | local->scan_band++; | ||
565 | local->scan_channel_idx = 0; | ||
566 | } | ||
567 | |||
568 | if (skip) | ||
569 | break; | ||
570 | |||
571 | next_delay = IEEE80211_PROBE_DELAY + | ||
572 | usecs_to_jiffies(local->hw.channel_change_time); | ||
573 | local->scan_state = SCAN_SEND_PROBE; | ||
574 | break; | ||
575 | case SCAN_SEND_PROBE: | ||
576 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | ||
577 | local->scan_state = SCAN_SET_CHANNEL; | ||
578 | |||
579 | if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN) | ||
580 | break; | ||
581 | ieee80211_send_probe_req(sdata, NULL, local->scan_ssid, | ||
582 | local->scan_ssid_len); | ||
583 | next_delay = IEEE80211_CHANNEL_TIME; | ||
584 | break; | ||
585 | } | ||
586 | |||
587 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | ||
588 | next_delay); | ||
589 | } | ||
590 | |||
591 | |||
592 | int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata, | ||
593 | u8 *ssid, size_t ssid_len) | ||
594 | { | ||
595 | struct ieee80211_local *local = scan_sdata->local; | ||
596 | struct ieee80211_sub_if_data *sdata; | ||
597 | |||
598 | if (ssid_len > IEEE80211_MAX_SSID_LEN) | ||
599 | return -EINVAL; | ||
600 | |||
601 | /* MLME-SCAN.request (page 118) page 144 (11.1.3.1) | ||
602 | * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS | ||
603 | * BSSID: MACAddress | ||
604 | * SSID | ||
605 | * ScanType: ACTIVE, PASSIVE | ||
606 | * ProbeDelay: delay (in microseconds) to be used prior to transmitting | ||
607 | * a Probe frame during active scanning | ||
608 | * ChannelList | ||
609 | * MinChannelTime (>= ProbeDelay), in TU | ||
610 | * MaxChannelTime: (>= MinChannelTime), in TU | ||
611 | */ | ||
612 | |||
613 | /* MLME-SCAN.confirm | ||
614 | * BSSDescriptionSet | ||
615 | * ResultCode: SUCCESS, INVALID_PARAMETERS | ||
616 | */ | ||
617 | |||
618 | if (local->sw_scanning || local->hw_scanning) { | ||
619 | if (local->scan_sdata == scan_sdata) | ||
620 | return 0; | ||
621 | return -EBUSY; | ||
622 | } | ||
623 | |||
624 | if (local->ops->hw_scan) { | ||
625 | int rc; | ||
626 | |||
627 | local->hw_scanning = true; | ||
628 | rc = local->ops->hw_scan(local_to_hw(local), ssid, ssid_len); | ||
629 | if (rc) { | ||
630 | local->hw_scanning = false; | ||
631 | return rc; | ||
632 | } | ||
633 | local->scan_sdata = scan_sdata; | ||
634 | return 0; | ||
635 | } | ||
636 | |||
637 | local->sw_scanning = true; | ||
638 | |||
639 | rcu_read_lock(); | ||
640 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
641 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
642 | if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) { | ||
643 | netif_tx_stop_all_queues(sdata->dev); | ||
644 | ieee80211_send_nullfunc(local, sdata, 1); | ||
645 | } | ||
646 | } else | ||
647 | netif_tx_stop_all_queues(sdata->dev); | ||
648 | } | ||
649 | rcu_read_unlock(); | ||
650 | |||
651 | if (ssid) { | ||
652 | local->scan_ssid_len = ssid_len; | ||
653 | memcpy(local->scan_ssid, ssid, ssid_len); | ||
654 | } else | ||
655 | local->scan_ssid_len = 0; | ||
656 | local->scan_state = SCAN_SET_CHANNEL; | ||
657 | local->scan_channel_idx = 0; | ||
658 | local->scan_band = IEEE80211_BAND_2GHZ; | ||
659 | local->scan_sdata = scan_sdata; | ||
660 | |||
661 | netif_addr_lock_bh(local->mdev); | ||
662 | local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; | ||
663 | local->ops->configure_filter(local_to_hw(local), | ||
664 | FIF_BCN_PRBRESP_PROMISC, | ||
665 | &local->filter_flags, | ||
666 | local->mdev->mc_count, | ||
667 | local->mdev->mc_list); | ||
668 | netif_addr_unlock_bh(local->mdev); | ||
669 | |||
670 | /* TODO: start scan as soon as all nullfunc frames are ACKed */ | ||
671 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | ||
672 | IEEE80211_CHANNEL_TIME); | ||
673 | |||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | |||
678 | int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, | ||
679 | u8 *ssid, size_t ssid_len) | ||
680 | { | ||
681 | struct ieee80211_local *local = sdata->local; | ||
682 | struct ieee80211_if_sta *ifsta; | ||
683 | |||
684 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
685 | return ieee80211_start_scan(sdata, ssid, ssid_len); | ||
686 | |||
687 | /* | ||
688 | * STA has a state machine that might need to defer scanning | ||
689 | * while it's trying to associate/authenticate, therefore we | ||
690 | * queue it up to the state machine in that case. | ||
691 | */ | ||
692 | |||
693 | if (local->sw_scanning || local->hw_scanning) { | ||
694 | if (local->scan_sdata == sdata) | ||
695 | return 0; | ||
696 | return -EBUSY; | ||
697 | } | ||
698 | |||
699 | ifsta = &sdata->u.sta; | ||
700 | |||
701 | ifsta->scan_ssid_len = ssid_len; | ||
702 | if (ssid_len) | ||
703 | memcpy(ifsta->scan_ssid, ssid, ssid_len); | ||
704 | set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request); | ||
705 | queue_work(local->hw.workqueue, &ifsta->work); | ||
706 | |||
707 | return 0; | ||
708 | } | ||
709 | |||
710 | |||
711 | static void ieee80211_scan_add_ies(struct iw_request_info *info, | ||
712 | struct ieee80211_bss *bss, | ||
713 | char **current_ev, char *end_buf) | ||
714 | { | ||
715 | u8 *pos, *end, *next; | ||
716 | struct iw_event iwe; | ||
717 | |||
718 | if (bss == NULL || bss->ies == NULL) | ||
719 | return; | ||
720 | |||
721 | /* | ||
722 | * If needed, fragment the IEs buffer (at IE boundaries) into short | ||
723 | * enough fragments to fit into IW_GENERIC_IE_MAX octet messages. | ||
724 | */ | ||
725 | pos = bss->ies; | ||
726 | end = pos + bss->ies_len; | ||
727 | |||
728 | while (end - pos > IW_GENERIC_IE_MAX) { | ||
729 | next = pos + 2 + pos[1]; | ||
730 | while (next + 2 + next[1] - pos < IW_GENERIC_IE_MAX) | ||
731 | next = next + 2 + next[1]; | ||
732 | |||
733 | memset(&iwe, 0, sizeof(iwe)); | ||
734 | iwe.cmd = IWEVGENIE; | ||
735 | iwe.u.data.length = next - pos; | ||
736 | *current_ev = iwe_stream_add_point(info, *current_ev, | ||
737 | end_buf, &iwe, pos); | ||
738 | |||
739 | pos = next; | ||
740 | } | ||
741 | |||
742 | if (end > pos) { | ||
743 | memset(&iwe, 0, sizeof(iwe)); | ||
744 | iwe.cmd = IWEVGENIE; | ||
745 | iwe.u.data.length = end - pos; | ||
746 | *current_ev = iwe_stream_add_point(info, *current_ev, | ||
747 | end_buf, &iwe, pos); | ||
748 | } | ||
749 | } | ||
750 | |||
751 | |||
752 | static char * | ||
753 | ieee80211_scan_result(struct ieee80211_local *local, | ||
754 | struct iw_request_info *info, | ||
755 | struct ieee80211_bss *bss, | ||
756 | char *current_ev, char *end_buf) | ||
757 | { | ||
758 | struct iw_event iwe; | ||
759 | char *buf; | ||
760 | |||
761 | if (time_after(jiffies, | ||
762 | bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE)) | ||
763 | return current_ev; | ||
764 | |||
765 | memset(&iwe, 0, sizeof(iwe)); | ||
766 | iwe.cmd = SIOCGIWAP; | ||
767 | iwe.u.ap_addr.sa_family = ARPHRD_ETHER; | ||
768 | memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN); | ||
769 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
770 | IW_EV_ADDR_LEN); | ||
771 | |||
772 | memset(&iwe, 0, sizeof(iwe)); | ||
773 | iwe.cmd = SIOCGIWESSID; | ||
774 | if (bss_mesh_cfg(bss)) { | ||
775 | iwe.u.data.length = bss_mesh_id_len(bss); | ||
776 | iwe.u.data.flags = 1; | ||
777 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
778 | &iwe, bss_mesh_id(bss)); | ||
779 | } else { | ||
780 | iwe.u.data.length = bss->ssid_len; | ||
781 | iwe.u.data.flags = 1; | ||
782 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
783 | &iwe, bss->ssid); | ||
784 | } | ||
785 | |||
786 | if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) | ||
787 | || bss_mesh_cfg(bss)) { | ||
788 | memset(&iwe, 0, sizeof(iwe)); | ||
789 | iwe.cmd = SIOCGIWMODE; | ||
790 | if (bss_mesh_cfg(bss)) | ||
791 | iwe.u.mode = IW_MODE_MESH; | ||
792 | else if (bss->capability & WLAN_CAPABILITY_ESS) | ||
793 | iwe.u.mode = IW_MODE_MASTER; | ||
794 | else | ||
795 | iwe.u.mode = IW_MODE_ADHOC; | ||
796 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, | ||
797 | &iwe, IW_EV_UINT_LEN); | ||
798 | } | ||
799 | |||
800 | memset(&iwe, 0, sizeof(iwe)); | ||
801 | iwe.cmd = SIOCGIWFREQ; | ||
802 | iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq); | ||
803 | iwe.u.freq.e = 0; | ||
804 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
805 | IW_EV_FREQ_LEN); | ||
806 | |||
807 | memset(&iwe, 0, sizeof(iwe)); | ||
808 | iwe.cmd = SIOCGIWFREQ; | ||
809 | iwe.u.freq.m = bss->freq; | ||
810 | iwe.u.freq.e = 6; | ||
811 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
812 | IW_EV_FREQ_LEN); | ||
813 | memset(&iwe, 0, sizeof(iwe)); | ||
814 | iwe.cmd = IWEVQUAL; | ||
815 | iwe.u.qual.qual = bss->qual; | ||
816 | iwe.u.qual.level = bss->signal; | ||
817 | iwe.u.qual.noise = bss->noise; | ||
818 | iwe.u.qual.updated = local->wstats_flags; | ||
819 | current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, | ||
820 | IW_EV_QUAL_LEN); | ||
821 | |||
822 | memset(&iwe, 0, sizeof(iwe)); | ||
823 | iwe.cmd = SIOCGIWENCODE; | ||
824 | if (bss->capability & WLAN_CAPABILITY_PRIVACY) | ||
825 | iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; | ||
826 | else | ||
827 | iwe.u.data.flags = IW_ENCODE_DISABLED; | ||
828 | iwe.u.data.length = 0; | ||
829 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
830 | &iwe, ""); | ||
831 | |||
832 | ieee80211_scan_add_ies(info, bss, ¤t_ev, end_buf); | ||
833 | |||
834 | if (bss->supp_rates_len > 0) { | ||
835 | /* display all supported rates in readable format */ | ||
836 | char *p = current_ev + iwe_stream_lcp_len(info); | ||
837 | int i; | ||
838 | |||
839 | memset(&iwe, 0, sizeof(iwe)); | ||
840 | iwe.cmd = SIOCGIWRATE; | ||
841 | /* Those two flags are ignored... */ | ||
842 | iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; | ||
843 | |||
844 | for (i = 0; i < bss->supp_rates_len; i++) { | ||
845 | iwe.u.bitrate.value = ((bss->supp_rates[i] & | ||
846 | 0x7f) * 500000); | ||
847 | p = iwe_stream_add_value(info, current_ev, p, | ||
848 | end_buf, &iwe, IW_EV_PARAM_LEN); | ||
849 | } | ||
850 | current_ev = p; | ||
851 | } | ||
852 | |||
853 | buf = kmalloc(30, GFP_ATOMIC); | ||
854 | if (buf) { | ||
855 | memset(&iwe, 0, sizeof(iwe)); | ||
856 | iwe.cmd = IWEVCUSTOM; | ||
857 | sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp)); | ||
858 | iwe.u.data.length = strlen(buf); | ||
859 | current_ev = iwe_stream_add_point(info, current_ev, end_buf, | ||
860 | &iwe, buf); | ||
861 | memset(&iwe, 0, sizeof(iwe)); | ||
862 | iwe.cmd = IWEVCUSTOM; | ||
863 | sprintf(buf, " Last beacon: %dms ago", | ||
864 | jiffies_to_msecs(jiffies - bss->last_update)); | ||
865 | iwe.u.data.length = strlen(buf); | ||
866 | current_ev = iwe_stream_add_point(info, current_ev, | ||
867 | end_buf, &iwe, buf); | ||
868 | kfree(buf); | ||
869 | } | ||
870 | |||
871 | if (bss_mesh_cfg(bss)) { | ||
872 | u8 *cfg = bss_mesh_cfg(bss); | ||
873 | buf = kmalloc(50, GFP_ATOMIC); | ||
874 | if (buf) { | ||
875 | memset(&iwe, 0, sizeof(iwe)); | ||
876 | iwe.cmd = IWEVCUSTOM; | ||
877 | sprintf(buf, "Mesh network (version %d)", cfg[0]); | ||
878 | iwe.u.data.length = strlen(buf); | ||
879 | current_ev = iwe_stream_add_point(info, current_ev, | ||
880 | end_buf, | ||
881 | &iwe, buf); | ||
882 | sprintf(buf, "Path Selection Protocol ID: " | ||
883 | "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3], | ||
884 | cfg[4]); | ||
885 | iwe.u.data.length = strlen(buf); | ||
886 | current_ev = iwe_stream_add_point(info, current_ev, | ||
887 | end_buf, | ||
888 | &iwe, buf); | ||
889 | sprintf(buf, "Path Selection Metric ID: " | ||
890 | "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7], | ||
891 | cfg[8]); | ||
892 | iwe.u.data.length = strlen(buf); | ||
893 | current_ev = iwe_stream_add_point(info, current_ev, | ||
894 | end_buf, | ||
895 | &iwe, buf); | ||
896 | sprintf(buf, "Congestion Control Mode ID: " | ||
897 | "0x%02X%02X%02X%02X", cfg[9], cfg[10], | ||
898 | cfg[11], cfg[12]); | ||
899 | iwe.u.data.length = strlen(buf); | ||
900 | current_ev = iwe_stream_add_point(info, current_ev, | ||
901 | end_buf, | ||
902 | &iwe, buf); | ||
903 | sprintf(buf, "Channel Precedence: " | ||
904 | "0x%02X%02X%02X%02X", cfg[13], cfg[14], | ||
905 | cfg[15], cfg[16]); | ||
906 | iwe.u.data.length = strlen(buf); | ||
907 | current_ev = iwe_stream_add_point(info, current_ev, | ||
908 | end_buf, | ||
909 | &iwe, buf); | ||
910 | kfree(buf); | ||
911 | } | ||
912 | } | ||
913 | |||
914 | return current_ev; | ||
915 | } | ||
916 | |||
917 | |||
918 | int ieee80211_scan_results(struct ieee80211_local *local, | ||
919 | struct iw_request_info *info, | ||
920 | char *buf, size_t len) | ||
921 | { | ||
922 | char *current_ev = buf; | ||
923 | char *end_buf = buf + len; | ||
924 | struct ieee80211_bss *bss; | ||
925 | |||
926 | spin_lock_bh(&local->bss_lock); | ||
927 | list_for_each_entry(bss, &local->bss_list, list) { | ||
928 | if (buf + len - current_ev <= IW_EV_ADDR_LEN) { | ||
929 | spin_unlock_bh(&local->bss_lock); | ||
930 | return -E2BIG; | ||
931 | } | ||
932 | current_ev = ieee80211_scan_result(local, info, bss, | ||
933 | current_ev, end_buf); | ||
934 | } | ||
935 | spin_unlock_bh(&local->bss_lock); | ||
936 | return current_ev - buf; | ||
937 | } | ||
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c new file mode 100644 index 000000000000..f72bad636d8e --- /dev/null +++ b/net/mac80211/spectmgmt.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * spectrum management | ||
3 | * | ||
4 | * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> | ||
5 | * Copyright 2002-2005, Instant802 Networks, Inc. | ||
6 | * Copyright 2005-2006, Devicescape Software, Inc. | ||
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | ||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | ||
9 | * Copyright 2007-2008, Intel Corporation | ||
10 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/ieee80211.h> | ||
18 | #include <net/wireless.h> | ||
19 | #include <net/mac80211.h> | ||
20 | #include "ieee80211_i.h" | ||
21 | #include "sta_info.h" | ||
22 | #include "wme.h" | ||
23 | |||
24 | static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata, | ||
25 | struct ieee80211_msrment_ie *request_ie, | ||
26 | const u8 *da, const u8 *bssid, | ||
27 | u8 dialog_token) | ||
28 | { | ||
29 | struct ieee80211_local *local = sdata->local; | ||
30 | struct sk_buff *skb; | ||
31 | struct ieee80211_mgmt *msr_report; | ||
32 | |||
33 | skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + | ||
34 | sizeof(struct ieee80211_msrment_ie)); | ||
35 | |||
36 | if (!skb) { | ||
37 | printk(KERN_ERR "%s: failed to allocate buffer for " | ||
38 | "measurement report frame\n", sdata->dev->name); | ||
39 | return; | ||
40 | } | ||
41 | |||
42 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
43 | msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); | ||
44 | memset(msr_report, 0, 24); | ||
45 | memcpy(msr_report->da, da, ETH_ALEN); | ||
46 | memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN); | ||
47 | memcpy(msr_report->bssid, bssid, ETH_ALEN); | ||
48 | msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | ||
49 | IEEE80211_STYPE_ACTION); | ||
50 | |||
51 | skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement)); | ||
52 | msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; | ||
53 | msr_report->u.action.u.measurement.action_code = | ||
54 | WLAN_ACTION_SPCT_MSR_RPRT; | ||
55 | msr_report->u.action.u.measurement.dialog_token = dialog_token; | ||
56 | |||
57 | msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT; | ||
58 | msr_report->u.action.u.measurement.length = | ||
59 | sizeof(struct ieee80211_msrment_ie); | ||
60 | |||
61 | memset(&msr_report->u.action.u.measurement.msr_elem, 0, | ||
62 | sizeof(struct ieee80211_msrment_ie)); | ||
63 | msr_report->u.action.u.measurement.msr_elem.token = request_ie->token; | ||
64 | msr_report->u.action.u.measurement.msr_elem.mode |= | ||
65 | IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED; | ||
66 | msr_report->u.action.u.measurement.msr_elem.type = request_ie->type; | ||
67 | |||
68 | ieee80211_tx_skb(sdata, skb, 0); | ||
69 | } | ||
70 | |||
71 | void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | ||
72 | struct ieee80211_mgmt *mgmt, | ||
73 | size_t len) | ||
74 | { | ||
75 | /* | ||
76 | * Ignoring measurement request is spec violation. | ||
77 | * Mandatory measurements must be reported optional | ||
78 | * measurements might be refused or reported incapable | ||
79 | * For now just refuse | ||
80 | * TODO: Answer basic measurement as unmeasured | ||
81 | */ | ||
82 | ieee80211_send_refuse_measurement_request(sdata, | ||
83 | &mgmt->u.action.u.measurement.msr_elem, | ||
84 | mgmt->sa, mgmt->bssid, | ||
85 | mgmt->u.action.u.measurement.dialog_token); | ||
86 | } | ||
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index f2ba653b9d69..9b72d15bc8dc 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -73,11 +73,11 @@ static int sta_info_hash_del(struct ieee80211_local *local, | |||
73 | { | 73 | { |
74 | struct sta_info *s; | 74 | struct sta_info *s; |
75 | 75 | ||
76 | s = local->sta_hash[STA_HASH(sta->addr)]; | 76 | s = local->sta_hash[STA_HASH(sta->sta.addr)]; |
77 | if (!s) | 77 | if (!s) |
78 | return -ENOENT; | 78 | return -ENOENT; |
79 | if (s == sta) { | 79 | if (s == sta) { |
80 | rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)], | 80 | rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], |
81 | s->hnext); | 81 | s->hnext); |
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
@@ -93,26 +93,19 @@ static int sta_info_hash_del(struct ieee80211_local *local, | |||
93 | } | 93 | } |
94 | 94 | ||
95 | /* protected by RCU */ | 95 | /* protected by RCU */ |
96 | static struct sta_info *__sta_info_find(struct ieee80211_local *local, | 96 | struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr) |
97 | u8 *addr) | ||
98 | { | 97 | { |
99 | struct sta_info *sta; | 98 | struct sta_info *sta; |
100 | 99 | ||
101 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); | 100 | sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); |
102 | while (sta) { | 101 | while (sta) { |
103 | if (compare_ether_addr(sta->addr, addr) == 0) | 102 | if (compare_ether_addr(sta->sta.addr, addr) == 0) |
104 | break; | 103 | break; |
105 | sta = rcu_dereference(sta->hnext); | 104 | sta = rcu_dereference(sta->hnext); |
106 | } | 105 | } |
107 | return sta; | 106 | return sta; |
108 | } | 107 | } |
109 | 108 | ||
110 | struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr) | ||
111 | { | ||
112 | return __sta_info_find(local, addr); | ||
113 | } | ||
114 | EXPORT_SYMBOL(sta_info_get); | ||
115 | |||
116 | struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx, | 109 | struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx, |
117 | struct net_device *dev) | 110 | struct net_device *dev) |
118 | { | 111 | { |
@@ -146,12 +139,12 @@ static void __sta_info_free(struct ieee80211_local *local, | |||
146 | { | 139 | { |
147 | DECLARE_MAC_BUF(mbuf); | 140 | DECLARE_MAC_BUF(mbuf); |
148 | 141 | ||
149 | rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv); | 142 | rate_control_free_sta(sta); |
150 | rate_control_put(sta->rate_ctrl); | 143 | rate_control_put(sta->rate_ctrl); |
151 | 144 | ||
152 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 145 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
153 | printk(KERN_DEBUG "%s: Destroyed STA %s\n", | 146 | printk(KERN_DEBUG "%s: Destroyed STA %s\n", |
154 | wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr)); | 147 | wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->sta.addr)); |
155 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 148 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ |
156 | 149 | ||
157 | kfree(sta); | 150 | kfree(sta); |
@@ -219,8 +212,8 @@ void sta_info_destroy(struct sta_info *sta) | |||
219 | static void sta_info_hash_add(struct ieee80211_local *local, | 212 | static void sta_info_hash_add(struct ieee80211_local *local, |
220 | struct sta_info *sta) | 213 | struct sta_info *sta) |
221 | { | 214 | { |
222 | sta->hnext = local->sta_hash[STA_HASH(sta->addr)]; | 215 | sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)]; |
223 | rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)], sta); | 216 | rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta); |
224 | } | 217 | } |
225 | 218 | ||
226 | struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | 219 | struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, |
@@ -231,20 +224,20 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
231 | int i; | 224 | int i; |
232 | DECLARE_MAC_BUF(mbuf); | 225 | DECLARE_MAC_BUF(mbuf); |
233 | 226 | ||
234 | sta = kzalloc(sizeof(*sta), gfp); | 227 | sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp); |
235 | if (!sta) | 228 | if (!sta) |
236 | return NULL; | 229 | return NULL; |
237 | 230 | ||
238 | spin_lock_init(&sta->lock); | 231 | spin_lock_init(&sta->lock); |
239 | spin_lock_init(&sta->flaglock); | 232 | spin_lock_init(&sta->flaglock); |
240 | 233 | ||
241 | memcpy(sta->addr, addr, ETH_ALEN); | 234 | memcpy(sta->sta.addr, addr, ETH_ALEN); |
242 | sta->local = local; | 235 | sta->local = local; |
243 | sta->sdata = sdata; | 236 | sta->sdata = sdata; |
244 | 237 | ||
245 | sta->rate_ctrl = rate_control_get(local->rate_ctrl); | 238 | sta->rate_ctrl = rate_control_get(local->rate_ctrl); |
246 | sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, | 239 | sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, |
247 | gfp); | 240 | &sta->sta, gfp); |
248 | if (!sta->rate_ctrl_priv) { | 241 | if (!sta->rate_ctrl_priv) { |
249 | rate_control_put(sta->rate_ctrl); | 242 | rate_control_put(sta->rate_ctrl); |
250 | kfree(sta); | 243 | kfree(sta); |
@@ -271,7 +264,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
271 | 264 | ||
272 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 265 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
273 | printk(KERN_DEBUG "%s: Allocated STA %s\n", | 266 | printk(KERN_DEBUG "%s: Allocated STA %s\n", |
274 | wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr)); | 267 | wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->sta.addr)); |
275 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 268 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ |
276 | 269 | ||
277 | #ifdef CONFIG_MAC80211_MESH | 270 | #ifdef CONFIG_MAC80211_MESH |
@@ -300,15 +293,15 @@ int sta_info_insert(struct sta_info *sta) | |||
300 | goto out_free; | 293 | goto out_free; |
301 | } | 294 | } |
302 | 295 | ||
303 | if (WARN_ON(compare_ether_addr(sta->addr, sdata->dev->dev_addr) == 0 || | 296 | if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->dev->dev_addr) == 0 || |
304 | is_multicast_ether_addr(sta->addr))) { | 297 | is_multicast_ether_addr(sta->sta.addr))) { |
305 | err = -EINVAL; | 298 | err = -EINVAL; |
306 | goto out_free; | 299 | goto out_free; |
307 | } | 300 | } |
308 | 301 | ||
309 | spin_lock_irqsave(&local->sta_lock, flags); | 302 | spin_lock_irqsave(&local->sta_lock, flags); |
310 | /* check if STA exists already */ | 303 | /* check if STA exists already */ |
311 | if (__sta_info_find(local, sta->addr)) { | 304 | if (sta_info_get(local, sta->sta.addr)) { |
312 | spin_unlock_irqrestore(&local->sta_lock, flags); | 305 | spin_unlock_irqrestore(&local->sta_lock, flags); |
313 | err = -EEXIST; | 306 | err = -EEXIST; |
314 | goto out_free; | 307 | goto out_free; |
@@ -319,18 +312,18 @@ int sta_info_insert(struct sta_info *sta) | |||
319 | 312 | ||
320 | /* notify driver */ | 313 | /* notify driver */ |
321 | if (local->ops->sta_notify) { | 314 | if (local->ops->sta_notify) { |
322 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) | 315 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
323 | sdata = container_of(sdata->bss, | 316 | sdata = container_of(sdata->bss, |
324 | struct ieee80211_sub_if_data, | 317 | struct ieee80211_sub_if_data, |
325 | u.ap); | 318 | u.ap); |
326 | 319 | ||
327 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, | 320 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, |
328 | STA_NOTIFY_ADD, sta->addr); | 321 | STA_NOTIFY_ADD, &sta->sta); |
329 | } | 322 | } |
330 | 323 | ||
331 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 324 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
332 | printk(KERN_DEBUG "%s: Inserted STA %s\n", | 325 | printk(KERN_DEBUG "%s: Inserted STA %s\n", |
333 | wiphy_name(local->hw.wiphy), print_mac(mac, sta->addr)); | 326 | wiphy_name(local->hw.wiphy), print_mac(mac, sta->sta.addr)); |
334 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 327 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ |
335 | 328 | ||
336 | spin_unlock_irqrestore(&local->sta_lock, flags); | 329 | spin_unlock_irqrestore(&local->sta_lock, flags); |
@@ -379,11 +372,12 @@ static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss, | |||
379 | { | 372 | { |
380 | BUG_ON(!bss); | 373 | BUG_ON(!bss); |
381 | 374 | ||
382 | __bss_tim_set(bss, sta->aid); | 375 | __bss_tim_set(bss, sta->sta.aid); |
383 | 376 | ||
384 | if (sta->local->ops->set_tim) { | 377 | if (sta->local->ops->set_tim) { |
385 | sta->local->tim_in_locked_section = true; | 378 | sta->local->tim_in_locked_section = true; |
386 | sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1); | 379 | sta->local->ops->set_tim(local_to_hw(sta->local), |
380 | &sta->sta, true); | ||
387 | sta->local->tim_in_locked_section = false; | 381 | sta->local->tim_in_locked_section = false; |
388 | } | 382 | } |
389 | } | 383 | } |
@@ -404,11 +398,12 @@ static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss, | |||
404 | { | 398 | { |
405 | BUG_ON(!bss); | 399 | BUG_ON(!bss); |
406 | 400 | ||
407 | __bss_tim_clear(bss, sta->aid); | 401 | __bss_tim_clear(bss, sta->sta.aid); |
408 | 402 | ||
409 | if (sta->local->ops->set_tim) { | 403 | if (sta->local->ops->set_tim) { |
410 | sta->local->tim_in_locked_section = true; | 404 | sta->local->tim_in_locked_section = true; |
411 | sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0); | 405 | sta->local->ops->set_tim(local_to_hw(sta->local), |
406 | &sta->sta, false); | ||
412 | sta->local->tim_in_locked_section = false; | 407 | sta->local->tim_in_locked_section = false; |
413 | } | 408 | } |
414 | } | 409 | } |
@@ -424,7 +419,7 @@ void sta_info_clear_tim_bit(struct sta_info *sta) | |||
424 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); | 419 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); |
425 | } | 420 | } |
426 | 421 | ||
427 | void __sta_info_unlink(struct sta_info **sta) | 422 | static void __sta_info_unlink(struct sta_info **sta) |
428 | { | 423 | { |
429 | struct ieee80211_local *local = (*sta)->local; | 424 | struct ieee80211_local *local = (*sta)->local; |
430 | struct ieee80211_sub_if_data *sdata = (*sta)->sdata; | 425 | struct ieee80211_sub_if_data *sdata = (*sta)->sdata; |
@@ -456,13 +451,13 @@ void __sta_info_unlink(struct sta_info **sta) | |||
456 | local->num_sta--; | 451 | local->num_sta--; |
457 | 452 | ||
458 | if (local->ops->sta_notify) { | 453 | if (local->ops->sta_notify) { |
459 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) | 454 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
460 | sdata = container_of(sdata->bss, | 455 | sdata = container_of(sdata->bss, |
461 | struct ieee80211_sub_if_data, | 456 | struct ieee80211_sub_if_data, |
462 | u.ap); | 457 | u.ap); |
463 | 458 | ||
464 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, | 459 | local->ops->sta_notify(local_to_hw(local), &sdata->vif, |
465 | STA_NOTIFY_REMOVE, (*sta)->addr); | 460 | STA_NOTIFY_REMOVE, &(*sta)->sta); |
466 | } | 461 | } |
467 | 462 | ||
468 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | 463 | if (ieee80211_vif_is_mesh(&sdata->vif)) { |
@@ -474,7 +469,7 @@ void __sta_info_unlink(struct sta_info **sta) | |||
474 | 469 | ||
475 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 470 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
476 | printk(KERN_DEBUG "%s: Removed STA %s\n", | 471 | printk(KERN_DEBUG "%s: Removed STA %s\n", |
477 | wiphy_name(local->hw.wiphy), print_mac(mbuf, (*sta)->addr)); | 472 | wiphy_name(local->hw.wiphy), print_mac(mbuf, (*sta)->sta.addr)); |
478 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | 473 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ |
479 | 474 | ||
480 | /* | 475 | /* |
@@ -570,7 +565,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, | |||
570 | local->total_ps_buffered--; | 565 | local->total_ps_buffered--; |
571 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 566 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
572 | printk(KERN_DEBUG "Buffered frame expired (STA " | 567 | printk(KERN_DEBUG "Buffered frame expired (STA " |
573 | "%s)\n", print_mac(mac, sta->addr)); | 568 | "%s)\n", print_mac(mac, sta->sta.addr)); |
574 | #endif | 569 | #endif |
575 | dev_kfree_skb(skb); | 570 | dev_kfree_skb(skb); |
576 | 571 | ||
@@ -802,3 +797,40 @@ void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata) | |||
802 | schedule_work(&local->sta_flush_work); | 797 | schedule_work(&local->sta_flush_work); |
803 | spin_unlock_irqrestore(&local->sta_lock, flags); | 798 | spin_unlock_irqrestore(&local->sta_lock, flags); |
804 | } | 799 | } |
800 | |||
801 | void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, | ||
802 | unsigned long exp_time) | ||
803 | { | ||
804 | struct ieee80211_local *local = sdata->local; | ||
805 | struct sta_info *sta, *tmp; | ||
806 | LIST_HEAD(tmp_list); | ||
807 | DECLARE_MAC_BUF(mac); | ||
808 | unsigned long flags; | ||
809 | |||
810 | spin_lock_irqsave(&local->sta_lock, flags); | ||
811 | list_for_each_entry_safe(sta, tmp, &local->sta_list, list) | ||
812 | if (time_after(jiffies, sta->last_rx + exp_time)) { | ||
813 | #ifdef CONFIG_MAC80211_IBSS_DEBUG | ||
814 | printk(KERN_DEBUG "%s: expiring inactive STA %s\n", | ||
815 | sdata->dev->name, print_mac(mac, sta->sta.addr)); | ||
816 | #endif | ||
817 | __sta_info_unlink(&sta); | ||
818 | if (sta) | ||
819 | list_add(&sta->list, &tmp_list); | ||
820 | } | ||
821 | spin_unlock_irqrestore(&local->sta_lock, flags); | ||
822 | |||
823 | list_for_each_entry_safe(sta, tmp, &tmp_list, list) | ||
824 | sta_info_destroy(sta); | ||
825 | } | ||
826 | |||
827 | struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw, | ||
828 | const u8 *addr) | ||
829 | { | ||
830 | struct sta_info *sta = sta_info_get(hw_to_local(hw), addr); | ||
831 | |||
832 | if (!sta) | ||
833 | return NULL; | ||
834 | return &sta->sta; | ||
835 | } | ||
836 | EXPORT_SYMBOL(ieee80211_find_sta); | ||
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 109db787ccb7..a6b51862a89d 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -167,8 +167,6 @@ struct sta_ampdu_mlme { | |||
167 | * @lock: used for locking all fields that require locking, see comments | 167 | * @lock: used for locking all fields that require locking, see comments |
168 | * in the header file. | 168 | * in the header file. |
169 | * @flaglock: spinlock for flags accesses | 169 | * @flaglock: spinlock for flags accesses |
170 | * @ht_info: HT capabilities of this STA | ||
171 | * @supp_rates: Bitmap of supported rates (per band) | ||
172 | * @addr: MAC address of this STA | 170 | * @addr: MAC address of this STA |
173 | * @aid: STA's unique AID (1..2007, 0 = not assigned yet), | 171 | * @aid: STA's unique AID (1..2007, 0 = not assigned yet), |
174 | * only used in AP (and IBSS?) mode | 172 | * only used in AP (and IBSS?) mode |
@@ -191,20 +189,15 @@ struct sta_ampdu_mlme { | |||
191 | * @last_qual: qual of last received frame from this STA | 189 | * @last_qual: qual of last received frame from this STA |
192 | * @last_noise: noise of last received frame from this STA | 190 | * @last_noise: noise of last received frame from this STA |
193 | * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) | 191 | * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) |
194 | * @wme_rx_queue: TBD | ||
195 | * @tx_filtered_count: TBD | 192 | * @tx_filtered_count: TBD |
196 | * @tx_retry_failed: TBD | 193 | * @tx_retry_failed: TBD |
197 | * @tx_retry_count: TBD | 194 | * @tx_retry_count: TBD |
198 | * @tx_num_consecutive_failures: TBD | ||
199 | * @tx_num_mpdu_ok: TBD | ||
200 | * @tx_num_mpdu_fail: TBD | ||
201 | * @fail_avg: moving percentage of failed MSDUs | 195 | * @fail_avg: moving percentage of failed MSDUs |
202 | * @tx_packets: number of RX/TX MSDUs | 196 | * @tx_packets: number of RX/TX MSDUs |
203 | * @tx_bytes: TBD | 197 | * @tx_bytes: TBD |
204 | * @tx_fragments: number of transmitted MPDUs | 198 | * @tx_fragments: number of transmitted MPDUs |
205 | * @txrate_idx: TBD | 199 | * @last_txrate_idx: Index of the last used transmit rate |
206 | * @last_txrate_idx: TBD | 200 | * @tid_seq: TBD |
207 | * @wme_tx_queue: TBD | ||
208 | * @ampdu_mlme: TBD | 201 | * @ampdu_mlme: TBD |
209 | * @timer_to_tid: identity mapping to ID timers | 202 | * @timer_to_tid: identity mapping to ID timers |
210 | * @tid_to_tx_q: map tid to tx queue | 203 | * @tid_to_tx_q: map tid to tx queue |
@@ -217,6 +210,7 @@ struct sta_ampdu_mlme { | |||
217 | * @plink_timeout: TBD | 210 | * @plink_timeout: TBD |
218 | * @plink_timer: TBD | 211 | * @plink_timer: TBD |
219 | * @debugfs: debug filesystem info | 212 | * @debugfs: debug filesystem info |
213 | * @sta: station information we share with the driver | ||
220 | */ | 214 | */ |
221 | struct sta_info { | 215 | struct sta_info { |
222 | /* General information, mostly static */ | 216 | /* General information, mostly static */ |
@@ -229,10 +223,7 @@ struct sta_info { | |||
229 | void *rate_ctrl_priv; | 223 | void *rate_ctrl_priv; |
230 | spinlock_t lock; | 224 | spinlock_t lock; |
231 | spinlock_t flaglock; | 225 | spinlock_t flaglock; |
232 | struct ieee80211_ht_info ht_info; | 226 | |
233 | u64 supp_rates[IEEE80211_NUM_BANDS]; | ||
234 | u8 addr[ETH_ALEN]; | ||
235 | u16 aid; | ||
236 | u16 listen_interval; | 227 | u16 listen_interval; |
237 | 228 | ||
238 | /* | 229 | /* |
@@ -265,17 +256,10 @@ struct sta_info { | |||
265 | int last_qual; | 256 | int last_qual; |
266 | int last_noise; | 257 | int last_noise; |
267 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; | 258 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; |
268 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
269 | unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES]; | ||
270 | #endif | ||
271 | 259 | ||
272 | /* Updated from TX status path only, no locking requirements */ | 260 | /* Updated from TX status path only, no locking requirements */ |
273 | unsigned long tx_filtered_count; | 261 | unsigned long tx_filtered_count; |
274 | unsigned long tx_retry_failed, tx_retry_count; | 262 | unsigned long tx_retry_failed, tx_retry_count; |
275 | /* TODO: update in generic code not rate control? */ | ||
276 | u32 tx_num_consecutive_failures; | ||
277 | u32 tx_num_mpdu_ok; | ||
278 | u32 tx_num_mpdu_fail; | ||
279 | /* moving percentage of failed MSDUs */ | 263 | /* moving percentage of failed MSDUs */ |
280 | unsigned int fail_avg; | 264 | unsigned int fail_avg; |
281 | 265 | ||
@@ -283,12 +267,8 @@ struct sta_info { | |||
283 | unsigned long tx_packets; | 267 | unsigned long tx_packets; |
284 | unsigned long tx_bytes; | 268 | unsigned long tx_bytes; |
285 | unsigned long tx_fragments; | 269 | unsigned long tx_fragments; |
286 | int txrate_idx; | 270 | unsigned int last_txrate_idx; |
287 | int last_txrate_idx; | ||
288 | u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; | 271 | u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; |
289 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
290 | unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; | ||
291 | #endif | ||
292 | 272 | ||
293 | /* | 273 | /* |
294 | * Aggregation information, locked with lock. | 274 | * Aggregation information, locked with lock. |
@@ -319,13 +299,12 @@ struct sta_info { | |||
319 | struct dentry *num_ps_buf_frames; | 299 | struct dentry *num_ps_buf_frames; |
320 | struct dentry *inactive_ms; | 300 | struct dentry *inactive_ms; |
321 | struct dentry *last_seq_ctrl; | 301 | struct dentry *last_seq_ctrl; |
322 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
323 | struct dentry *wme_rx_queue; | ||
324 | struct dentry *wme_tx_queue; | ||
325 | #endif | ||
326 | struct dentry *agg_status; | 302 | struct dentry *agg_status; |
327 | } debugfs; | 303 | } debugfs; |
328 | #endif | 304 | #endif |
305 | |||
306 | /* keep last! */ | ||
307 | struct ieee80211_sta sta; | ||
329 | }; | 308 | }; |
330 | 309 | ||
331 | static inline enum plink_state sta_plink_state(struct sta_info *sta) | 310 | static inline enum plink_state sta_plink_state(struct sta_info *sta) |
@@ -425,7 +404,7 @@ static inline u32 get_sta_flags(struct sta_info *sta) | |||
425 | /* | 404 | /* |
426 | * Get a STA info, must have be under RCU read lock. | 405 | * Get a STA info, must have be under RCU read lock. |
427 | */ | 406 | */ |
428 | struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr); | 407 | struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr); |
429 | /* | 408 | /* |
430 | * Get STA info by index, BROKEN! | 409 | * Get STA info by index, BROKEN! |
431 | */ | 410 | */ |
@@ -451,7 +430,6 @@ int sta_info_insert(struct sta_info *sta); | |||
451 | * has already unlinked it. | 430 | * has already unlinked it. |
452 | */ | 431 | */ |
453 | void sta_info_unlink(struct sta_info **sta); | 432 | void sta_info_unlink(struct sta_info **sta); |
454 | void __sta_info_unlink(struct sta_info **sta); | ||
455 | 433 | ||
456 | void sta_info_destroy(struct sta_info *sta); | 434 | void sta_info_destroy(struct sta_info *sta); |
457 | void sta_info_set_tim_bit(struct sta_info *sta); | 435 | void sta_info_set_tim_bit(struct sta_info *sta); |
@@ -463,5 +441,7 @@ void sta_info_stop(struct ieee80211_local *local); | |||
463 | int sta_info_flush(struct ieee80211_local *local, | 441 | int sta_info_flush(struct ieee80211_local *local, |
464 | struct ieee80211_sub_if_data *sdata); | 442 | struct ieee80211_sub_if_data *sdata); |
465 | void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata); | 443 | void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata); |
444 | void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, | ||
445 | unsigned long exp_time); | ||
466 | 446 | ||
467 | #endif /* STA_INFO_H */ | 447 | #endif /* STA_INFO_H */ |
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c index 995f7af3d25e..34b32bc8f609 100644 --- a/net/mac80211/tkip.c +++ b/net/mac80211/tkip.c | |||
@@ -304,7 +304,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, | |||
304 | key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { | 304 | key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { |
305 | u8 bcast[ETH_ALEN] = | 305 | u8 bcast[ETH_ALEN] = |
306 | {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 306 | {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
307 | u8 *sta_addr = key->sta->addr; | 307 | u8 *sta_addr = key->sta->sta.addr; |
308 | 308 | ||
309 | if (is_multicast_ether_addr(ra)) | 309 | if (is_multicast_ether_addr(ra)) |
310 | sta_addr = bcast; | 310 | sta_addr = bcast; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 4788f7b91f49..0cc2e23f082c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -38,43 +38,6 @@ | |||
38 | 38 | ||
39 | /* misc utils */ | 39 | /* misc utils */ |
40 | 40 | ||
41 | #ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP | ||
42 | static void ieee80211_dump_frame(const char *ifname, const char *title, | ||
43 | const struct sk_buff *skb) | ||
44 | { | ||
45 | const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
46 | unsigned int hdrlen; | ||
47 | DECLARE_MAC_BUF(mac); | ||
48 | |||
49 | printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len); | ||
50 | if (skb->len < 4) { | ||
51 | printk("\n"); | ||
52 | return; | ||
53 | } | ||
54 | |||
55 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | ||
56 | if (hdrlen > skb->len) | ||
57 | hdrlen = skb->len; | ||
58 | if (hdrlen >= 4) | ||
59 | printk(" FC=0x%04x DUR=0x%04x", | ||
60 | le16_to_cpu(hdr->frame_control), le16_to_cpu(hdr->duration_id)); | ||
61 | if (hdrlen >= 10) | ||
62 | printk(" A1=%s", print_mac(mac, hdr->addr1)); | ||
63 | if (hdrlen >= 16) | ||
64 | printk(" A2=%s", print_mac(mac, hdr->addr2)); | ||
65 | if (hdrlen >= 24) | ||
66 | printk(" A3=%s", print_mac(mac, hdr->addr3)); | ||
67 | if (hdrlen >= 30) | ||
68 | printk(" A4=%s", print_mac(mac, hdr->addr4)); | ||
69 | printk("\n"); | ||
70 | } | ||
71 | #else /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ | ||
72 | static inline void ieee80211_dump_frame(const char *ifname, const char *title, | ||
73 | struct sk_buff *skb) | ||
74 | { | ||
75 | } | ||
76 | #endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ | ||
77 | |||
78 | static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | 41 | static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, |
79 | int next_frag_len) | 42 | int next_frag_len) |
80 | { | 43 | { |
@@ -82,6 +45,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | |||
82 | struct ieee80211_rate *txrate; | 45 | struct ieee80211_rate *txrate; |
83 | struct ieee80211_local *local = tx->local; | 46 | struct ieee80211_local *local = tx->local; |
84 | struct ieee80211_supported_band *sband; | 47 | struct ieee80211_supported_band *sband; |
48 | struct ieee80211_hdr *hdr; | ||
85 | 49 | ||
86 | sband = local->hw.wiphy->bands[tx->channel->band]; | 50 | sband = local->hw.wiphy->bands[tx->channel->band]; |
87 | txrate = &sband->bitrates[tx->rate_idx]; | 51 | txrate = &sband->bitrates[tx->rate_idx]; |
@@ -107,10 +71,10 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | |||
107 | * at the highest possible rate belonging to the PHY rates in the | 71 | * at the highest possible rate belonging to the PHY rates in the |
108 | * BSSBasicRateSet | 72 | * BSSBasicRateSet |
109 | */ | 73 | */ |
110 | 74 | hdr = (struct ieee80211_hdr *)tx->skb->data; | |
111 | if ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) { | 75 | if (ieee80211_is_ctl(hdr->frame_control)) { |
112 | /* TODO: These control frames are not currently sent by | 76 | /* TODO: These control frames are not currently sent by |
113 | * 80211.o, but should they be implemented, this function | 77 | * mac80211, but should they be implemented, this function |
114 | * needs to be updated to support duration field calculation. | 78 | * needs to be updated to support duration field calculation. |
115 | * | 79 | * |
116 | * RTS: time needed to transmit pending data/mgmt frame plus | 80 | * RTS: time needed to transmit pending data/mgmt frame plus |
@@ -152,7 +116,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | |||
152 | if (r->bitrate > txrate->bitrate) | 116 | if (r->bitrate > txrate->bitrate) |
153 | break; | 117 | break; |
154 | 118 | ||
155 | if (tx->sdata->basic_rates & BIT(i)) | 119 | if (tx->sdata->bss_conf.basic_rates & BIT(i)) |
156 | rate = r->bitrate; | 120 | rate = r->bitrate; |
157 | 121 | ||
158 | switch (sband->band) { | 122 | switch (sband->band) { |
@@ -201,11 +165,10 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, | |||
201 | return cpu_to_le16(dur); | 165 | return cpu_to_le16(dur); |
202 | } | 166 | } |
203 | 167 | ||
204 | static int inline is_ieee80211_device(struct net_device *dev, | 168 | static int inline is_ieee80211_device(struct ieee80211_local *local, |
205 | struct net_device *master) | 169 | struct net_device *dev) |
206 | { | 170 | { |
207 | return (wdev_priv(dev->ieee80211_ptr) == | 171 | return local == wdev_priv(dev->ieee80211_ptr); |
208 | wdev_priv(master->ieee80211_ptr)); | ||
209 | } | 172 | } |
210 | 173 | ||
211 | /* tx handlers */ | 174 | /* tx handlers */ |
@@ -213,21 +176,19 @@ static int inline is_ieee80211_device(struct net_device *dev, | |||
213 | static ieee80211_tx_result debug_noinline | 176 | static ieee80211_tx_result debug_noinline |
214 | ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | 177 | ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) |
215 | { | 178 | { |
216 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 179 | |
217 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | 180 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
218 | #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ | ||
219 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 181 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
220 | u32 sta_flags; | 182 | u32 sta_flags; |
221 | 183 | ||
222 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) | 184 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) |
223 | return TX_CONTINUE; | 185 | return TX_CONTINUE; |
224 | 186 | ||
225 | if (unlikely(tx->local->sta_sw_scanning) && | 187 | if (unlikely(tx->local->sw_scanning) && |
226 | ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || | 188 | !ieee80211_is_probe_req(hdr->frame_control)) |
227 | (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ)) | ||
228 | return TX_DROP; | 189 | return TX_DROP; |
229 | 190 | ||
230 | if (tx->sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) | 191 | if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) |
231 | return TX_CONTINUE; | 192 | return TX_CONTINUE; |
232 | 193 | ||
233 | if (tx->flags & IEEE80211_TX_PS_BUFFERED) | 194 | if (tx->flags & IEEE80211_TX_PS_BUFFERED) |
@@ -237,8 +198,8 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
237 | 198 | ||
238 | if (likely(tx->flags & IEEE80211_TX_UNICAST)) { | 199 | if (likely(tx->flags & IEEE80211_TX_UNICAST)) { |
239 | if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && | 200 | if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && |
240 | tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 201 | tx->sdata->vif.type != NL80211_IFTYPE_ADHOC && |
241 | (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { | 202 | ieee80211_is_data(hdr->frame_control))) { |
242 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 203 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
243 | DECLARE_MAC_BUF(mac); | 204 | DECLARE_MAC_BUF(mac); |
244 | printk(KERN_DEBUG "%s: dropped data frame to not " | 205 | printk(KERN_DEBUG "%s: dropped data frame to not " |
@@ -249,9 +210,9 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
249 | return TX_DROP; | 210 | return TX_DROP; |
250 | } | 211 | } |
251 | } else { | 212 | } else { |
252 | if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && | 213 | if (unlikely(ieee80211_is_data(hdr->frame_control) && |
253 | tx->local->num_sta == 0 && | 214 | tx->local->num_sta == 0 && |
254 | tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS)) { | 215 | tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) { |
255 | /* | 216 | /* |
256 | * No associated STAs - no need to send multicast | 217 | * No associated STAs - no need to send multicast |
257 | * frames. | 218 | * frames. |
@@ -282,7 +243,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
282 | 243 | ||
283 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 244 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
284 | struct ieee80211_if_ap *ap; | 245 | struct ieee80211_if_ap *ap; |
285 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP) | 246 | if (sdata->vif.type != NL80211_IFTYPE_AP) |
286 | continue; | 247 | continue; |
287 | ap = &sdata->u.ap; | 248 | ap = &sdata->u.ap; |
288 | skb = skb_dequeue(&ap->ps_bc_buf); | 249 | skb = skb_dequeue(&ap->ps_bc_buf); |
@@ -315,6 +276,7 @@ static ieee80211_tx_result | |||
315 | ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | 276 | ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) |
316 | { | 277 | { |
317 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 278 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
279 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
318 | 280 | ||
319 | /* | 281 | /* |
320 | * broadcast/multicast frame | 282 | * broadcast/multicast frame |
@@ -329,7 +291,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
329 | return TX_CONTINUE; | 291 | return TX_CONTINUE; |
330 | 292 | ||
331 | /* no buffering for ordered frames */ | 293 | /* no buffering for ordered frames */ |
332 | if (tx->fc & IEEE80211_FCTL_ORDER) | 294 | if (ieee80211_has_order(hdr->frame_control)) |
333 | return TX_CONTINUE; | 295 | return TX_CONTINUE; |
334 | 296 | ||
335 | /* no stations in PS mode */ | 297 | /* no stations in PS mode */ |
@@ -367,12 +329,11 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
367 | { | 329 | { |
368 | struct sta_info *sta = tx->sta; | 330 | struct sta_info *sta = tx->sta; |
369 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 331 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
332 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
370 | u32 staflags; | 333 | u32 staflags; |
371 | DECLARE_MAC_BUF(mac); | 334 | DECLARE_MAC_BUF(mac); |
372 | 335 | ||
373 | if (unlikely(!sta || | 336 | if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control))) |
374 | ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && | ||
375 | (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) | ||
376 | return TX_CONTINUE; | 337 | return TX_CONTINUE; |
377 | 338 | ||
378 | staflags = get_sta_flags(sta); | 339 | staflags = get_sta_flags(sta); |
@@ -382,7 +343,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
382 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 343 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
383 | printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " | 344 | printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " |
384 | "before %d)\n", | 345 | "before %d)\n", |
385 | print_mac(mac, sta->addr), sta->aid, | 346 | print_mac(mac, sta->sta.addr), sta->sta.aid, |
386 | skb_queue_len(&sta->ps_tx_buf)); | 347 | skb_queue_len(&sta->ps_tx_buf)); |
387 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 348 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
388 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) | 349 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) |
@@ -393,7 +354,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
393 | if (net_ratelimit()) { | 354 | if (net_ratelimit()) { |
394 | printk(KERN_DEBUG "%s: STA %s TX " | 355 | printk(KERN_DEBUG "%s: STA %s TX " |
395 | "buffer full - dropping oldest frame\n", | 356 | "buffer full - dropping oldest frame\n", |
396 | tx->dev->name, print_mac(mac, sta->addr)); | 357 | tx->dev->name, print_mac(mac, sta->sta.addr)); |
397 | } | 358 | } |
398 | #endif | 359 | #endif |
399 | dev_kfree_skb(old); | 360 | dev_kfree_skb(old); |
@@ -412,7 +373,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
412 | else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) { | 373 | else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) { |
413 | printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " | 374 | printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " |
414 | "set -> send frame\n", tx->dev->name, | 375 | "set -> send frame\n", tx->dev->name, |
415 | print_mac(mac, sta->addr)); | 376 | print_mac(mac, sta->sta.addr)); |
416 | } | 377 | } |
417 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 378 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
418 | clear_sta_flags(sta, WLAN_STA_PSPOLL); | 379 | clear_sta_flags(sta, WLAN_STA_PSPOLL); |
@@ -437,7 +398,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | |||
437 | { | 398 | { |
438 | struct ieee80211_key *key; | 399 | struct ieee80211_key *key; |
439 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 400 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
440 | u16 fc = tx->fc; | 401 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
441 | 402 | ||
442 | if (unlikely(tx->skb->do_not_encrypt)) | 403 | if (unlikely(tx->skb->do_not_encrypt)) |
443 | tx->key = NULL; | 404 | tx->key = NULL; |
@@ -454,22 +415,16 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | |||
454 | tx->key = NULL; | 415 | tx->key = NULL; |
455 | 416 | ||
456 | if (tx->key) { | 417 | if (tx->key) { |
457 | u16 ftype, stype; | ||
458 | |||
459 | tx->key->tx_rx_count++; | 418 | tx->key->tx_rx_count++; |
460 | /* TODO: add threshold stuff again */ | 419 | /* TODO: add threshold stuff again */ |
461 | 420 | ||
462 | switch (tx->key->conf.alg) { | 421 | switch (tx->key->conf.alg) { |
463 | case ALG_WEP: | 422 | case ALG_WEP: |
464 | ftype = fc & IEEE80211_FCTL_FTYPE; | 423 | if (ieee80211_is_auth(hdr->frame_control)) |
465 | stype = fc & IEEE80211_FCTL_STYPE; | ||
466 | |||
467 | if (ftype == IEEE80211_FTYPE_MGMT && | ||
468 | stype == IEEE80211_STYPE_AUTH) | ||
469 | break; | 424 | break; |
470 | case ALG_TKIP: | 425 | case ALG_TKIP: |
471 | case ALG_CCMP: | 426 | case ALG_CCMP: |
472 | if (!WLAN_FC_DATA_PRESENT(fc)) | 427 | if (!ieee80211_is_data_present(hdr->frame_control)) |
473 | tx->key = NULL; | 428 | tx->key = NULL; |
474 | break; | 429 | break; |
475 | } | 430 | } |
@@ -491,7 +446,10 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) | |||
491 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; | 446 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; |
492 | 447 | ||
493 | if (likely(tx->rate_idx < 0)) { | 448 | if (likely(tx->rate_idx < 0)) { |
494 | rate_control_get_rate(tx->dev, sband, tx->skb, &rsel); | 449 | rate_control_get_rate(tx->sdata, sband, tx->sta, |
450 | tx->skb, &rsel); | ||
451 | if (tx->sta) | ||
452 | tx->sta->last_txrate_idx = rsel.rate_idx; | ||
495 | tx->rate_idx = rsel.rate_idx; | 453 | tx->rate_idx = rsel.rate_idx; |
496 | if (unlikely(rsel.probe_idx >= 0)) { | 454 | if (unlikely(rsel.probe_idx >= 0)) { |
497 | info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; | 455 | info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; |
@@ -535,7 +493,7 @@ ieee80211_tx_h_misc(struct ieee80211_tx_data *tx) | |||
535 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; | 493 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; |
536 | 494 | ||
537 | if (tx->sta) | 495 | if (tx->sta) |
538 | info->control.aid = tx->sta->aid; | 496 | info->control.sta = &tx->sta->sta; |
539 | 497 | ||
540 | if (!info->control.retry_limit) { | 498 | if (!info->control.retry_limit) { |
541 | if (!is_multicast_ether_addr(hdr->addr1)) { | 499 | if (!is_multicast_ether_addr(hdr->addr1)) { |
@@ -601,7 +559,7 @@ ieee80211_tx_h_misc(struct ieee80211_tx_data *tx) | |||
601 | for (idx = 0; idx < sband->n_bitrates; idx++) { | 559 | for (idx = 0; idx < sband->n_bitrates; idx++) { |
602 | if (sband->bitrates[idx].bitrate > rate->bitrate) | 560 | if (sband->bitrates[idx].bitrate > rate->bitrate) |
603 | continue; | 561 | continue; |
604 | if (tx->sdata->basic_rates & BIT(idx) && | 562 | if (tx->sdata->bss_conf.basic_rates & BIT(idx) && |
605 | (baserate < 0 || | 563 | (baserate < 0 || |
606 | (sband->bitrates[baserate].bitrate | 564 | (sband->bitrates[baserate].bitrate |
607 | < sband->bitrates[idx].bitrate))) | 565 | < sband->bitrates[idx].bitrate))) |
@@ -615,7 +573,7 @@ ieee80211_tx_h_misc(struct ieee80211_tx_data *tx) | |||
615 | } | 573 | } |
616 | 574 | ||
617 | if (tx->sta) | 575 | if (tx->sta) |
618 | info->control.aid = tx->sta->aid; | 576 | info->control.sta = &tx->sta->sta; |
619 | 577 | ||
620 | return TX_CONTINUE; | 578 | return TX_CONTINUE; |
621 | } | 579 | } |
@@ -629,7 +587,14 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) | |||
629 | u8 *qc; | 587 | u8 *qc; |
630 | int tid; | 588 | int tid; |
631 | 589 | ||
632 | /* only for injected frames */ | 590 | /* |
591 | * Packet injection may want to control the sequence | ||
592 | * number, if we have no matching interface then we | ||
593 | * neither assign one ourselves nor ask the driver to. | ||
594 | */ | ||
595 | if (unlikely(!info->control.vif)) | ||
596 | return TX_CONTINUE; | ||
597 | |||
633 | if (unlikely(ieee80211_is_ctl(hdr->frame_control))) | 598 | if (unlikely(ieee80211_is_ctl(hdr->frame_control))) |
634 | return TX_CONTINUE; | 599 | return TX_CONTINUE; |
635 | 600 | ||
@@ -854,7 +819,6 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
854 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; | 819 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; |
855 | 820 | ||
856 | skb->do_not_encrypt = 1; | 821 | skb->do_not_encrypt = 1; |
857 | info->flags |= IEEE80211_TX_CTL_INJECTED; | ||
858 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; | 822 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; |
859 | 823 | ||
860 | /* | 824 | /* |
@@ -986,7 +950,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
986 | 950 | ||
987 | /* process and remove the injection radiotap header */ | 951 | /* process and remove the injection radiotap header */ |
988 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 952 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
989 | if (unlikely(sdata->vif.type == IEEE80211_IF_TYPE_MNTR)) { | 953 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { |
990 | if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP) | 954 | if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP) |
991 | return TX_DROP; | 955 | return TX_DROP; |
992 | 956 | ||
@@ -1000,7 +964,6 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1000 | hdr = (struct ieee80211_hdr *) skb->data; | 964 | hdr = (struct ieee80211_hdr *) skb->data; |
1001 | 965 | ||
1002 | tx->sta = sta_info_get(local, hdr->addr1); | 966 | tx->sta = sta_info_get(local, hdr->addr1); |
1003 | tx->fc = le16_to_cpu(hdr->frame_control); | ||
1004 | 967 | ||
1005 | if (is_multicast_ether_addr(hdr->addr1)) { | 968 | if (is_multicast_ether_addr(hdr->addr1)) { |
1006 | tx->flags &= ~IEEE80211_TX_UNICAST; | 969 | tx->flags &= ~IEEE80211_TX_UNICAST; |
@@ -1025,7 +988,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1025 | else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) | 988 | else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) |
1026 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; | 989 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; |
1027 | 990 | ||
1028 | hdrlen = ieee80211_get_hdrlen(tx->fc); | 991 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
1029 | if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { | 992 | if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { |
1030 | u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; | 993 | u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; |
1031 | tx->ethertype = (pos[0] << 8) | pos[1]; | 994 | tx->ethertype = (pos[0] << 8) | pos[1]; |
@@ -1038,14 +1001,14 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1038 | /* | 1001 | /* |
1039 | * NB: @tx is uninitialised when passed in here | 1002 | * NB: @tx is uninitialised when passed in here |
1040 | */ | 1003 | */ |
1041 | static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | 1004 | static int ieee80211_tx_prepare(struct ieee80211_local *local, |
1042 | struct sk_buff *skb, | 1005 | struct ieee80211_tx_data *tx, |
1043 | struct net_device *mdev) | 1006 | struct sk_buff *skb) |
1044 | { | 1007 | { |
1045 | struct net_device *dev; | 1008 | struct net_device *dev; |
1046 | 1009 | ||
1047 | dev = dev_get_by_index(&init_net, skb->iif); | 1010 | dev = dev_get_by_index(&init_net, skb->iif); |
1048 | if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { | 1011 | if (unlikely(dev && !is_ieee80211_device(local, dev))) { |
1049 | dev_put(dev); | 1012 | dev_put(dev); |
1050 | dev = NULL; | 1013 | dev = NULL; |
1051 | } | 1014 | } |
@@ -1068,8 +1031,6 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | |||
1068 | return IEEE80211_TX_AGAIN; | 1031 | return IEEE80211_TX_AGAIN; |
1069 | info = IEEE80211_SKB_CB(skb); | 1032 | info = IEEE80211_SKB_CB(skb); |
1070 | 1033 | ||
1071 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), | ||
1072 | "TX to low-level driver", skb); | ||
1073 | ret = local->ops->tx(local_to_hw(local), skb); | 1034 | ret = local->ops->tx(local_to_hw(local), skb); |
1074 | if (ret) | 1035 | if (ret) |
1075 | return IEEE80211_TX_AGAIN; | 1036 | return IEEE80211_TX_AGAIN; |
@@ -1099,9 +1060,6 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, | |||
1099 | ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; | 1060 | ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; |
1100 | } | 1061 | } |
1101 | 1062 | ||
1102 | ieee80211_dump_frame(wiphy_name(local->hw.wiphy), | ||
1103 | "TX to low-level driver", | ||
1104 | tx->extra_frag[i]); | ||
1105 | ret = local->ops->tx(local_to_hw(local), | 1063 | ret = local->ops->tx(local_to_hw(local), |
1106 | tx->extra_frag[i]); | 1064 | tx->extra_frag[i]); |
1107 | if (ret) | 1065 | if (ret) |
@@ -1300,17 +1258,24 @@ static int ieee80211_skb_resize(struct ieee80211_local *local, | |||
1300 | int ieee80211_master_start_xmit(struct sk_buff *skb, | 1258 | int ieee80211_master_start_xmit(struct sk_buff *skb, |
1301 | struct net_device *dev) | 1259 | struct net_device *dev) |
1302 | { | 1260 | { |
1261 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); | ||
1262 | struct ieee80211_local *local = mpriv->local; | ||
1303 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1263 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1304 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 1264 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
1305 | struct net_device *odev = NULL; | 1265 | struct net_device *odev = NULL; |
1306 | struct ieee80211_sub_if_data *osdata; | 1266 | struct ieee80211_sub_if_data *osdata; |
1307 | int headroom; | 1267 | int headroom; |
1308 | bool may_encrypt; | 1268 | bool may_encrypt; |
1269 | enum { | ||
1270 | NOT_MONITOR, | ||
1271 | FOUND_SDATA, | ||
1272 | UNKNOWN_ADDRESS, | ||
1273 | } monitor_iface = NOT_MONITOR; | ||
1309 | int ret; | 1274 | int ret; |
1310 | 1275 | ||
1311 | if (skb->iif) | 1276 | if (skb->iif) |
1312 | odev = dev_get_by_index(&init_net, skb->iif); | 1277 | odev = dev_get_by_index(&init_net, skb->iif); |
1313 | if (unlikely(odev && !is_ieee80211_device(odev, dev))) { | 1278 | if (unlikely(odev && !is_ieee80211_device(local, odev))) { |
1314 | dev_put(odev); | 1279 | dev_put(odev); |
1315 | odev = NULL; | 1280 | odev = NULL; |
1316 | } | 1281 | } |
@@ -1335,12 +1300,56 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, | |||
1335 | if (is_multicast_ether_addr(hdr->addr3)) | 1300 | if (is_multicast_ether_addr(hdr->addr3)) |
1336 | memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); | 1301 | memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); |
1337 | else | 1302 | else |
1338 | if (mesh_nexthop_lookup(skb, odev)) | 1303 | if (mesh_nexthop_lookup(skb, osdata)) |
1339 | return 0; | 1304 | return 0; |
1340 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) | 1305 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) |
1341 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.sta, | 1306 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, |
1342 | fwded_frames); | 1307 | fwded_frames); |
1343 | } | 1308 | } |
1309 | } else if (unlikely(osdata->vif.type == NL80211_IFTYPE_MONITOR)) { | ||
1310 | struct ieee80211_sub_if_data *sdata; | ||
1311 | struct ieee80211_local *local = osdata->local; | ||
1312 | struct ieee80211_hdr *hdr; | ||
1313 | int hdrlen; | ||
1314 | u16 len_rthdr; | ||
1315 | |||
1316 | info->flags |= IEEE80211_TX_CTL_INJECTED; | ||
1317 | monitor_iface = UNKNOWN_ADDRESS; | ||
1318 | |||
1319 | len_rthdr = ieee80211_get_radiotap_len(skb->data); | ||
1320 | hdr = (struct ieee80211_hdr *)skb->data + len_rthdr; | ||
1321 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | ||
1322 | |||
1323 | /* check the header is complete in the frame */ | ||
1324 | if (likely(skb->len >= len_rthdr + hdrlen)) { | ||
1325 | /* | ||
1326 | * We process outgoing injected frames that have a | ||
1327 | * local address we handle as though they are our | ||
1328 | * own frames. | ||
1329 | * This code here isn't entirely correct, the local | ||
1330 | * MAC address is not necessarily enough to find | ||
1331 | * the interface to use; for that proper VLAN/WDS | ||
1332 | * support we will need a different mechanism. | ||
1333 | */ | ||
1334 | |||
1335 | rcu_read_lock(); | ||
1336 | list_for_each_entry_rcu(sdata, &local->interfaces, | ||
1337 | list) { | ||
1338 | if (!netif_running(sdata->dev)) | ||
1339 | continue; | ||
1340 | if (compare_ether_addr(sdata->dev->dev_addr, | ||
1341 | hdr->addr2)) { | ||
1342 | dev_hold(sdata->dev); | ||
1343 | dev_put(odev); | ||
1344 | osdata = sdata; | ||
1345 | odev = osdata->dev; | ||
1346 | skb->iif = sdata->dev->ifindex; | ||
1347 | monitor_iface = FOUND_SDATA; | ||
1348 | break; | ||
1349 | } | ||
1350 | } | ||
1351 | rcu_read_unlock(); | ||
1352 | } | ||
1344 | } | 1353 | } |
1345 | 1354 | ||
1346 | may_encrypt = !skb->do_not_encrypt; | 1355 | may_encrypt = !skb->do_not_encrypt; |
@@ -1357,7 +1366,12 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, | |||
1357 | return 0; | 1366 | return 0; |
1358 | } | 1367 | } |
1359 | 1368 | ||
1360 | info->control.vif = &osdata->vif; | 1369 | if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
1370 | osdata = container_of(osdata->bss, | ||
1371 | struct ieee80211_sub_if_data, | ||
1372 | u.ap); | ||
1373 | if (likely(monitor_iface != UNKNOWN_ADDRESS)) | ||
1374 | info->control.vif = &osdata->vif; | ||
1361 | ret = ieee80211_tx(odev, skb); | 1375 | ret = ieee80211_tx(odev, skb); |
1362 | dev_put(odev); | 1376 | dev_put(odev); |
1363 | 1377 | ||
@@ -1437,8 +1451,8 @@ fail: | |||
1437 | int ieee80211_subif_start_xmit(struct sk_buff *skb, | 1451 | int ieee80211_subif_start_xmit(struct sk_buff *skb, |
1438 | struct net_device *dev) | 1452 | struct net_device *dev) |
1439 | { | 1453 | { |
1440 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1454 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1441 | struct ieee80211_sub_if_data *sdata; | 1455 | struct ieee80211_local *local = sdata->local; |
1442 | int ret = 1, head_need; | 1456 | int ret = 1, head_need; |
1443 | u16 ethertype, hdrlen, meshhdrlen = 0; | 1457 | u16 ethertype, hdrlen, meshhdrlen = 0; |
1444 | __le16 fc; | 1458 | __le16 fc; |
@@ -1450,7 +1464,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1450 | struct sta_info *sta; | 1464 | struct sta_info *sta; |
1451 | u32 sta_flags = 0; | 1465 | u32 sta_flags = 0; |
1452 | 1466 | ||
1453 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1454 | if (unlikely(skb->len < ETH_HLEN)) { | 1467 | if (unlikely(skb->len < ETH_HLEN)) { |
1455 | ret = 0; | 1468 | ret = 0; |
1456 | goto fail; | 1469 | goto fail; |
@@ -1465,8 +1478,8 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1465 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); | 1478 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); |
1466 | 1479 | ||
1467 | switch (sdata->vif.type) { | 1480 | switch (sdata->vif.type) { |
1468 | case IEEE80211_IF_TYPE_AP: | 1481 | case NL80211_IFTYPE_AP: |
1469 | case IEEE80211_IF_TYPE_VLAN: | 1482 | case NL80211_IFTYPE_AP_VLAN: |
1470 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); | 1483 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); |
1471 | /* DA BSSID SA */ | 1484 | /* DA BSSID SA */ |
1472 | memcpy(hdr.addr1, skb->data, ETH_ALEN); | 1485 | memcpy(hdr.addr1, skb->data, ETH_ALEN); |
@@ -1474,7 +1487,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1474 | memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); | 1487 | memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); |
1475 | hdrlen = 24; | 1488 | hdrlen = 24; |
1476 | break; | 1489 | break; |
1477 | case IEEE80211_IF_TYPE_WDS: | 1490 | case NL80211_IFTYPE_WDS: |
1478 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); | 1491 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); |
1479 | /* RA TA DA SA */ | 1492 | /* RA TA DA SA */ |
1480 | memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); | 1493 | memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); |
@@ -1484,24 +1497,56 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1484 | hdrlen = 30; | 1497 | hdrlen = 30; |
1485 | break; | 1498 | break; |
1486 | #ifdef CONFIG_MAC80211_MESH | 1499 | #ifdef CONFIG_MAC80211_MESH |
1487 | case IEEE80211_IF_TYPE_MESH_POINT: | 1500 | case NL80211_IFTYPE_MESH_POINT: |
1488 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); | 1501 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); |
1489 | /* RA TA DA SA */ | 1502 | if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { |
1490 | memset(hdr.addr1, 0, ETH_ALEN); | ||
1491 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | ||
1492 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | ||
1493 | memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | ||
1494 | if (!sdata->u.sta.mshcfg.dot11MeshTTL) { | ||
1495 | /* Do not send frames with mesh_ttl == 0 */ | 1503 | /* Do not send frames with mesh_ttl == 0 */ |
1496 | sdata->u.sta.mshstats.dropped_frames_ttl++; | 1504 | sdata->u.mesh.mshstats.dropped_frames_ttl++; |
1497 | ret = 0; | 1505 | ret = 0; |
1498 | goto fail; | 1506 | goto fail; |
1499 | } | 1507 | } |
1500 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata); | 1508 | memset(&mesh_hdr, 0, sizeof(mesh_hdr)); |
1509 | |||
1510 | if (compare_ether_addr(dev->dev_addr, | ||
1511 | skb->data + ETH_ALEN) == 0) { | ||
1512 | /* RA TA DA SA */ | ||
1513 | memset(hdr.addr1, 0, ETH_ALEN); | ||
1514 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | ||
1515 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | ||
1516 | memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | ||
1517 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata); | ||
1518 | } else { | ||
1519 | /* packet from other interface */ | ||
1520 | struct mesh_path *mppath; | ||
1521 | |||
1522 | memset(hdr.addr1, 0, ETH_ALEN); | ||
1523 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | ||
1524 | memcpy(hdr.addr4, dev->dev_addr, ETH_ALEN); | ||
1525 | |||
1526 | if (is_multicast_ether_addr(skb->data)) | ||
1527 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | ||
1528 | else { | ||
1529 | rcu_read_lock(); | ||
1530 | mppath = mpp_path_lookup(skb->data, sdata); | ||
1531 | if (mppath) | ||
1532 | memcpy(hdr.addr3, mppath->mpp, ETH_ALEN); | ||
1533 | else | ||
1534 | memset(hdr.addr3, 0xff, ETH_ALEN); | ||
1535 | rcu_read_unlock(); | ||
1536 | } | ||
1537 | |||
1538 | mesh_hdr.flags |= MESH_FLAGS_AE_A5_A6; | ||
1539 | mesh_hdr.ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; | ||
1540 | put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &mesh_hdr.seqnum); | ||
1541 | memcpy(mesh_hdr.eaddr1, skb->data, ETH_ALEN); | ||
1542 | memcpy(mesh_hdr.eaddr2, skb->data + ETH_ALEN, ETH_ALEN); | ||
1543 | sdata->u.mesh.mesh_seqnum++; | ||
1544 | meshhdrlen = 18; | ||
1545 | } | ||
1501 | hdrlen = 30; | 1546 | hdrlen = 30; |
1502 | break; | 1547 | break; |
1503 | #endif | 1548 | #endif |
1504 | case IEEE80211_IF_TYPE_STA: | 1549 | case NL80211_IFTYPE_STATION: |
1505 | fc |= cpu_to_le16(IEEE80211_FCTL_TODS); | 1550 | fc |= cpu_to_le16(IEEE80211_FCTL_TODS); |
1506 | /* BSSID SA DA */ | 1551 | /* BSSID SA DA */ |
1507 | memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); | 1552 | memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); |
@@ -1509,7 +1554,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1509 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | 1554 | memcpy(hdr.addr3, skb->data, ETH_ALEN); |
1510 | hdrlen = 24; | 1555 | hdrlen = 24; |
1511 | break; | 1556 | break; |
1512 | case IEEE80211_IF_TYPE_IBSS: | 1557 | case NL80211_IFTYPE_ADHOC: |
1513 | /* DA SA BSSID */ | 1558 | /* DA SA BSSID */ |
1514 | memcpy(hdr.addr1, skb->data, ETH_ALEN); | 1559 | memcpy(hdr.addr1, skb->data, ETH_ALEN); |
1515 | memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); | 1560 | memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); |
@@ -1588,19 +1633,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1588 | nh_pos -= skip_header_bytes; | 1633 | nh_pos -= skip_header_bytes; |
1589 | h_pos -= skip_header_bytes; | 1634 | h_pos -= skip_header_bytes; |
1590 | 1635 | ||
1591 | /* TODO: implement support for fragments so that there is no need to | ||
1592 | * reallocate and copy payload; it might be enough to support one | ||
1593 | * extra fragment that would be copied in the beginning of the frame | ||
1594 | * data.. anyway, it would be nice to include this into skb structure | ||
1595 | * somehow | ||
1596 | * | ||
1597 | * There are few options for this: | ||
1598 | * use skb->cb as an extra space for 802.11 header | ||
1599 | * allocate new buffer if not enough headroom | ||
1600 | * make sure that there is enough headroom in every skb by increasing | ||
1601 | * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and | ||
1602 | * alloc_skb() (net/core/skbuff.c) | ||
1603 | */ | ||
1604 | head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); | 1636 | head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); |
1605 | 1637 | ||
1606 | /* | 1638 | /* |
@@ -1823,10 +1855,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1823 | struct rate_selection rsel; | 1855 | struct rate_selection rsel; |
1824 | struct beacon_data *beacon; | 1856 | struct beacon_data *beacon; |
1825 | struct ieee80211_supported_band *sband; | 1857 | struct ieee80211_supported_band *sband; |
1826 | struct ieee80211_mgmt *mgmt; | ||
1827 | int *num_beacons; | ||
1828 | enum ieee80211_band band = local->hw.conf.channel->band; | 1858 | enum ieee80211_band band = local->hw.conf.channel->band; |
1829 | u8 *pos; | ||
1830 | 1859 | ||
1831 | sband = local->hw.wiphy->bands[band]; | 1860 | sband = local->hw.wiphy->bands[band]; |
1832 | 1861 | ||
@@ -1835,7 +1864,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1835 | sdata = vif_to_sdata(vif); | 1864 | sdata = vif_to_sdata(vif); |
1836 | bdev = sdata->dev; | 1865 | bdev = sdata->dev; |
1837 | 1866 | ||
1838 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { | 1867 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
1839 | ap = &sdata->u.ap; | 1868 | ap = &sdata->u.ap; |
1840 | beacon = rcu_dereference(ap->beacon); | 1869 | beacon = rcu_dereference(ap->beacon); |
1841 | if (ap && beacon) { | 1870 | if (ap && beacon) { |
@@ -1873,11 +1902,9 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1873 | if (beacon->tail) | 1902 | if (beacon->tail) |
1874 | memcpy(skb_put(skb, beacon->tail_len), | 1903 | memcpy(skb_put(skb, beacon->tail_len), |
1875 | beacon->tail, beacon->tail_len); | 1904 | beacon->tail, beacon->tail_len); |
1876 | |||
1877 | num_beacons = &ap->num_beacons; | ||
1878 | } else | 1905 | } else |
1879 | goto out; | 1906 | goto out; |
1880 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 1907 | } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
1881 | struct ieee80211_hdr *hdr; | 1908 | struct ieee80211_hdr *hdr; |
1882 | ifsta = &sdata->u.sta; | 1909 | ifsta = &sdata->u.sta; |
1883 | 1910 | ||
@@ -1889,11 +1916,13 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1889 | goto out; | 1916 | goto out; |
1890 | 1917 | ||
1891 | hdr = (struct ieee80211_hdr *) skb->data; | 1918 | hdr = (struct ieee80211_hdr *) skb->data; |
1892 | hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, | 1919 | hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
1893 | IEEE80211_STYPE_BEACON); | 1920 | IEEE80211_STYPE_BEACON); |
1894 | 1921 | ||
1895 | num_beacons = &ifsta->num_beacons; | ||
1896 | } else if (ieee80211_vif_is_mesh(&sdata->vif)) { | 1922 | } else if (ieee80211_vif_is_mesh(&sdata->vif)) { |
1923 | struct ieee80211_mgmt *mgmt; | ||
1924 | u8 *pos; | ||
1925 | |||
1897 | /* headroom, head length, tail length and maximum TIM length */ | 1926 | /* headroom, head length, tail length and maximum TIM length */ |
1898 | skb = dev_alloc_skb(local->tx_headroom + 400); | 1927 | skb = dev_alloc_skb(local->tx_headroom + 400); |
1899 | if (!skb) | 1928 | if (!skb) |
@@ -1916,9 +1945,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1916 | *pos++ = WLAN_EID_SSID; | 1945 | *pos++ = WLAN_EID_SSID; |
1917 | *pos++ = 0x0; | 1946 | *pos++ = 0x0; |
1918 | 1947 | ||
1919 | mesh_mgmt_ies_add(skb, sdata->dev); | 1948 | mesh_mgmt_ies_add(skb, sdata); |
1920 | |||
1921 | num_beacons = &sdata->u.sta.num_beacons; | ||
1922 | } else { | 1949 | } else { |
1923 | WARN_ON(1); | 1950 | WARN_ON(1); |
1924 | goto out; | 1951 | goto out; |
@@ -1929,7 +1956,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1929 | skb->do_not_encrypt = 1; | 1956 | skb->do_not_encrypt = 1; |
1930 | 1957 | ||
1931 | info->band = band; | 1958 | info->band = band; |
1932 | rate_control_get_rate(local->mdev, sband, skb, &rsel); | 1959 | rate_control_get_rate(sdata, sband, NULL, skb, &rsel); |
1933 | 1960 | ||
1934 | if (unlikely(rsel.rate_idx < 0)) { | 1961 | if (unlikely(rsel.rate_idx < 0)) { |
1935 | if (net_ratelimit()) { | 1962 | if (net_ratelimit()) { |
@@ -1955,7 +1982,6 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
1955 | info->antenna_sel_tx = local->hw.conf.antenna_sel_tx; | 1982 | info->antenna_sel_tx = local->hw.conf.antenna_sel_tx; |
1956 | info->control.retry_limit = 1; | 1983 | info->control.retry_limit = 1; |
1957 | 1984 | ||
1958 | (*num_beacons)++; | ||
1959 | out: | 1985 | out: |
1960 | rcu_read_unlock(); | 1986 | rcu_read_unlock(); |
1961 | return skb; | 1987 | return skb; |
@@ -2017,7 +2043,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2017 | rcu_read_lock(); | 2043 | rcu_read_lock(); |
2018 | beacon = rcu_dereference(bss->beacon); | 2044 | beacon = rcu_dereference(bss->beacon); |
2019 | 2045 | ||
2020 | if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || !beacon->head) | 2046 | if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head) |
2021 | goto out; | 2047 | goto out; |
2022 | 2048 | ||
2023 | if (bss->dtim_count != 0) | 2049 | if (bss->dtim_count != 0) |
@@ -2039,7 +2065,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2039 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 2065 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
2040 | } | 2066 | } |
2041 | 2067 | ||
2042 | if (!ieee80211_tx_prepare(&tx, skb, local->mdev)) | 2068 | if (!ieee80211_tx_prepare(local, &tx, skb)) |
2043 | break; | 2069 | break; |
2044 | dev_kfree_skb_any(skb); | 2070 | dev_kfree_skb_any(skb); |
2045 | } | 2071 | } |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 0d463c80c404..f32561ec224c 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -43,7 +43,7 @@ const unsigned char bridge_tunnel_header[] __aligned(2) = | |||
43 | 43 | ||
44 | 44 | ||
45 | u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, | 45 | u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, |
46 | enum ieee80211_if_types type) | 46 | enum nl80211_iftype type) |
47 | { | 47 | { |
48 | __le16 fc = hdr->frame_control; | 48 | __le16 fc = hdr->frame_control; |
49 | 49 | ||
@@ -77,10 +77,10 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, | |||
77 | 77 | ||
78 | if (ieee80211_is_back_req(fc)) { | 78 | if (ieee80211_is_back_req(fc)) { |
79 | switch (type) { | 79 | switch (type) { |
80 | case IEEE80211_IF_TYPE_STA: | 80 | case NL80211_IFTYPE_STATION: |
81 | return hdr->addr2; | 81 | return hdr->addr2; |
82 | case IEEE80211_IF_TYPE_AP: | 82 | case NL80211_IFTYPE_AP: |
83 | case IEEE80211_IF_TYPE_VLAN: | 83 | case NL80211_IFTYPE_AP_VLAN: |
84 | return hdr->addr1; | 84 | return hdr->addr1; |
85 | default: | 85 | default: |
86 | break; /* fall through to the return */ | 86 | break; /* fall through to the return */ |
@@ -91,45 +91,6 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, | |||
91 | return NULL; | 91 | return NULL; |
92 | } | 92 | } |
93 | 93 | ||
94 | int ieee80211_get_hdrlen(u16 fc) | ||
95 | { | ||
96 | int hdrlen = 24; | ||
97 | |||
98 | switch (fc & IEEE80211_FCTL_FTYPE) { | ||
99 | case IEEE80211_FTYPE_DATA: | ||
100 | if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS)) | ||
101 | hdrlen = 30; /* Addr4 */ | ||
102 | /* | ||
103 | * The QoS Control field is two bytes and its presence is | ||
104 | * indicated by the IEEE80211_STYPE_QOS_DATA bit. Add 2 to | ||
105 | * hdrlen if that bit is set. | ||
106 | * This works by masking out the bit and shifting it to | ||
107 | * bit position 1 so the result has the value 0 or 2. | ||
108 | */ | ||
109 | hdrlen += (fc & IEEE80211_STYPE_QOS_DATA) | ||
110 | >> (ilog2(IEEE80211_STYPE_QOS_DATA)-1); | ||
111 | break; | ||
112 | case IEEE80211_FTYPE_CTL: | ||
113 | /* | ||
114 | * ACK and CTS are 10 bytes, all others 16. To see how | ||
115 | * to get this condition consider | ||
116 | * subtype mask: 0b0000000011110000 (0x00F0) | ||
117 | * ACK subtype: 0b0000000011010000 (0x00D0) | ||
118 | * CTS subtype: 0b0000000011000000 (0x00C0) | ||
119 | * bits that matter: ^^^ (0x00E0) | ||
120 | * value of those: 0b0000000011000000 (0x00C0) | ||
121 | */ | ||
122 | if ((fc & 0xE0) == 0xC0) | ||
123 | hdrlen = 10; | ||
124 | else | ||
125 | hdrlen = 16; | ||
126 | break; | ||
127 | } | ||
128 | |||
129 | return hdrlen; | ||
130 | } | ||
131 | EXPORT_SYMBOL(ieee80211_get_hdrlen); | ||
132 | |||
133 | unsigned int ieee80211_hdrlen(__le16 fc) | 94 | unsigned int ieee80211_hdrlen(__le16 fc) |
134 | { | 95 | { |
135 | unsigned int hdrlen = 24; | 96 | unsigned int hdrlen = 24; |
@@ -270,16 +231,21 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, | |||
270 | struct ieee80211_rate *rate) | 231 | struct ieee80211_rate *rate) |
271 | { | 232 | { |
272 | struct ieee80211_local *local = hw_to_local(hw); | 233 | struct ieee80211_local *local = hw_to_local(hw); |
273 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 234 | struct ieee80211_sub_if_data *sdata; |
274 | u16 dur; | 235 | u16 dur; |
275 | int erp; | 236 | int erp; |
237 | bool short_preamble = false; | ||
276 | 238 | ||
277 | erp = 0; | 239 | erp = 0; |
278 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | 240 | if (vif) { |
279 | erp = rate->flags & IEEE80211_RATE_ERP_G; | 241 | sdata = vif_to_sdata(vif); |
242 | short_preamble = sdata->bss_conf.use_short_preamble; | ||
243 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | ||
244 | erp = rate->flags & IEEE80211_RATE_ERP_G; | ||
245 | } | ||
280 | 246 | ||
281 | dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp, | 247 | dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp, |
282 | sdata->bss_conf.use_short_preamble); | 248 | short_preamble); |
283 | 249 | ||
284 | return cpu_to_le16(dur); | 250 | return cpu_to_le16(dur); |
285 | } | 251 | } |
@@ -291,7 +257,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, | |||
291 | { | 257 | { |
292 | struct ieee80211_local *local = hw_to_local(hw); | 258 | struct ieee80211_local *local = hw_to_local(hw); |
293 | struct ieee80211_rate *rate; | 259 | struct ieee80211_rate *rate; |
294 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 260 | struct ieee80211_sub_if_data *sdata; |
295 | bool short_preamble; | 261 | bool short_preamble; |
296 | int erp; | 262 | int erp; |
297 | u16 dur; | 263 | u16 dur; |
@@ -299,13 +265,17 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, | |||
299 | 265 | ||
300 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 266 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
301 | 267 | ||
302 | short_preamble = sdata->bss_conf.use_short_preamble; | 268 | short_preamble = false; |
303 | 269 | ||
304 | rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; | 270 | rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; |
305 | 271 | ||
306 | erp = 0; | 272 | erp = 0; |
307 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | 273 | if (vif) { |
308 | erp = rate->flags & IEEE80211_RATE_ERP_G; | 274 | sdata = vif_to_sdata(vif); |
275 | short_preamble = sdata->bss_conf.use_short_preamble; | ||
276 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | ||
277 | erp = rate->flags & IEEE80211_RATE_ERP_G; | ||
278 | } | ||
309 | 279 | ||
310 | /* CTS duration */ | 280 | /* CTS duration */ |
311 | dur = ieee80211_frame_duration(local, 10, rate->bitrate, | 281 | dur = ieee80211_frame_duration(local, 10, rate->bitrate, |
@@ -328,7 +298,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, | |||
328 | { | 298 | { |
329 | struct ieee80211_local *local = hw_to_local(hw); | 299 | struct ieee80211_local *local = hw_to_local(hw); |
330 | struct ieee80211_rate *rate; | 300 | struct ieee80211_rate *rate; |
331 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 301 | struct ieee80211_sub_if_data *sdata; |
332 | bool short_preamble; | 302 | bool short_preamble; |
333 | int erp; | 303 | int erp; |
334 | u16 dur; | 304 | u16 dur; |
@@ -336,12 +306,16 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, | |||
336 | 306 | ||
337 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 307 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
338 | 308 | ||
339 | short_preamble = sdata->bss_conf.use_short_preamble; | 309 | short_preamble = false; |
340 | 310 | ||
341 | rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; | 311 | rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; |
342 | erp = 0; | 312 | erp = 0; |
343 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | 313 | if (vif) { |
344 | erp = rate->flags & IEEE80211_RATE_ERP_G; | 314 | sdata = vif_to_sdata(vif); |
315 | short_preamble = sdata->bss_conf.use_short_preamble; | ||
316 | if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) | ||
317 | erp = rate->flags & IEEE80211_RATE_ERP_G; | ||
318 | } | ||
345 | 319 | ||
346 | /* Data frame duration */ | 320 | /* Data frame duration */ |
347 | dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, | 321 | dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, |
@@ -386,6 +360,13 @@ void ieee80211_stop_queues(struct ieee80211_hw *hw) | |||
386 | } | 360 | } |
387 | EXPORT_SYMBOL(ieee80211_stop_queues); | 361 | EXPORT_SYMBOL(ieee80211_stop_queues); |
388 | 362 | ||
363 | int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) | ||
364 | { | ||
365 | struct ieee80211_local *local = hw_to_local(hw); | ||
366 | return __netif_subqueue_stopped(local->mdev, queue); | ||
367 | } | ||
368 | EXPORT_SYMBOL(ieee80211_queue_stopped); | ||
369 | |||
389 | void ieee80211_wake_queues(struct ieee80211_hw *hw) | 370 | void ieee80211_wake_queues(struct ieee80211_hw *hw) |
390 | { | 371 | { |
391 | int i; | 372 | int i; |
@@ -408,15 +389,16 @@ void ieee80211_iterate_active_interfaces( | |||
408 | 389 | ||
409 | list_for_each_entry(sdata, &local->interfaces, list) { | 390 | list_for_each_entry(sdata, &local->interfaces, list) { |
410 | switch (sdata->vif.type) { | 391 | switch (sdata->vif.type) { |
411 | case IEEE80211_IF_TYPE_INVALID: | 392 | case __NL80211_IFTYPE_AFTER_LAST: |
412 | case IEEE80211_IF_TYPE_MNTR: | 393 | case NL80211_IFTYPE_UNSPECIFIED: |
413 | case IEEE80211_IF_TYPE_VLAN: | 394 | case NL80211_IFTYPE_MONITOR: |
395 | case NL80211_IFTYPE_AP_VLAN: | ||
414 | continue; | 396 | continue; |
415 | case IEEE80211_IF_TYPE_AP: | 397 | case NL80211_IFTYPE_AP: |
416 | case IEEE80211_IF_TYPE_STA: | 398 | case NL80211_IFTYPE_STATION: |
417 | case IEEE80211_IF_TYPE_IBSS: | 399 | case NL80211_IFTYPE_ADHOC: |
418 | case IEEE80211_IF_TYPE_WDS: | 400 | case NL80211_IFTYPE_WDS: |
419 | case IEEE80211_IF_TYPE_MESH_POINT: | 401 | case NL80211_IFTYPE_MESH_POINT: |
420 | break; | 402 | break; |
421 | } | 403 | } |
422 | if (netif_running(sdata->dev)) | 404 | if (netif_running(sdata->dev)) |
@@ -441,15 +423,16 @@ void ieee80211_iterate_active_interfaces_atomic( | |||
441 | 423 | ||
442 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 424 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
443 | switch (sdata->vif.type) { | 425 | switch (sdata->vif.type) { |
444 | case IEEE80211_IF_TYPE_INVALID: | 426 | case __NL80211_IFTYPE_AFTER_LAST: |
445 | case IEEE80211_IF_TYPE_MNTR: | 427 | case NL80211_IFTYPE_UNSPECIFIED: |
446 | case IEEE80211_IF_TYPE_VLAN: | 428 | case NL80211_IFTYPE_MONITOR: |
429 | case NL80211_IFTYPE_AP_VLAN: | ||
447 | continue; | 430 | continue; |
448 | case IEEE80211_IF_TYPE_AP: | 431 | case NL80211_IFTYPE_AP: |
449 | case IEEE80211_IF_TYPE_STA: | 432 | case NL80211_IFTYPE_STATION: |
450 | case IEEE80211_IF_TYPE_IBSS: | 433 | case NL80211_IFTYPE_ADHOC: |
451 | case IEEE80211_IF_TYPE_WDS: | 434 | case NL80211_IFTYPE_WDS: |
452 | case IEEE80211_IF_TYPE_MESH_POINT: | 435 | case NL80211_IFTYPE_MESH_POINT: |
453 | break; | 436 | break; |
454 | } | 437 | } |
455 | if (netif_running(sdata->dev)) | 438 | if (netif_running(sdata->dev)) |
@@ -460,3 +443,243 @@ void ieee80211_iterate_active_interfaces_atomic( | |||
460 | rcu_read_unlock(); | 443 | rcu_read_unlock(); |
461 | } | 444 | } |
462 | EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); | 445 | EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); |
446 | |||
447 | void ieee802_11_parse_elems(u8 *start, size_t len, | ||
448 | struct ieee802_11_elems *elems) | ||
449 | { | ||
450 | size_t left = len; | ||
451 | u8 *pos = start; | ||
452 | |||
453 | memset(elems, 0, sizeof(*elems)); | ||
454 | elems->ie_start = start; | ||
455 | elems->total_len = len; | ||
456 | |||
457 | while (left >= 2) { | ||
458 | u8 id, elen; | ||
459 | |||
460 | id = *pos++; | ||
461 | elen = *pos++; | ||
462 | left -= 2; | ||
463 | |||
464 | if (elen > left) | ||
465 | return; | ||
466 | |||
467 | switch (id) { | ||
468 | case WLAN_EID_SSID: | ||
469 | elems->ssid = pos; | ||
470 | elems->ssid_len = elen; | ||
471 | break; | ||
472 | case WLAN_EID_SUPP_RATES: | ||
473 | elems->supp_rates = pos; | ||
474 | elems->supp_rates_len = elen; | ||
475 | break; | ||
476 | case WLAN_EID_FH_PARAMS: | ||
477 | elems->fh_params = pos; | ||
478 | elems->fh_params_len = elen; | ||
479 | break; | ||
480 | case WLAN_EID_DS_PARAMS: | ||
481 | elems->ds_params = pos; | ||
482 | elems->ds_params_len = elen; | ||
483 | break; | ||
484 | case WLAN_EID_CF_PARAMS: | ||
485 | elems->cf_params = pos; | ||
486 | elems->cf_params_len = elen; | ||
487 | break; | ||
488 | case WLAN_EID_TIM: | ||
489 | elems->tim = pos; | ||
490 | elems->tim_len = elen; | ||
491 | break; | ||
492 | case WLAN_EID_IBSS_PARAMS: | ||
493 | elems->ibss_params = pos; | ||
494 | elems->ibss_params_len = elen; | ||
495 | break; | ||
496 | case WLAN_EID_CHALLENGE: | ||
497 | elems->challenge = pos; | ||
498 | elems->challenge_len = elen; | ||
499 | break; | ||
500 | case WLAN_EID_WPA: | ||
501 | if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && | ||
502 | pos[2] == 0xf2) { | ||
503 | /* Microsoft OUI (00:50:F2) */ | ||
504 | if (pos[3] == 1) { | ||
505 | /* OUI Type 1 - WPA IE */ | ||
506 | elems->wpa = pos; | ||
507 | elems->wpa_len = elen; | ||
508 | } else if (elen >= 5 && pos[3] == 2) { | ||
509 | if (pos[4] == 0) { | ||
510 | elems->wmm_info = pos; | ||
511 | elems->wmm_info_len = elen; | ||
512 | } else if (pos[4] == 1) { | ||
513 | elems->wmm_param = pos; | ||
514 | elems->wmm_param_len = elen; | ||
515 | } | ||
516 | } | ||
517 | } | ||
518 | break; | ||
519 | case WLAN_EID_RSN: | ||
520 | elems->rsn = pos; | ||
521 | elems->rsn_len = elen; | ||
522 | break; | ||
523 | case WLAN_EID_ERP_INFO: | ||
524 | elems->erp_info = pos; | ||
525 | elems->erp_info_len = elen; | ||
526 | break; | ||
527 | case WLAN_EID_EXT_SUPP_RATES: | ||
528 | elems->ext_supp_rates = pos; | ||
529 | elems->ext_supp_rates_len = elen; | ||
530 | break; | ||
531 | case WLAN_EID_HT_CAPABILITY: | ||
532 | elems->ht_cap_elem = pos; | ||
533 | elems->ht_cap_elem_len = elen; | ||
534 | break; | ||
535 | case WLAN_EID_HT_EXTRA_INFO: | ||
536 | elems->ht_info_elem = pos; | ||
537 | elems->ht_info_elem_len = elen; | ||
538 | break; | ||
539 | case WLAN_EID_MESH_ID: | ||
540 | elems->mesh_id = pos; | ||
541 | elems->mesh_id_len = elen; | ||
542 | break; | ||
543 | case WLAN_EID_MESH_CONFIG: | ||
544 | elems->mesh_config = pos; | ||
545 | elems->mesh_config_len = elen; | ||
546 | break; | ||
547 | case WLAN_EID_PEER_LINK: | ||
548 | elems->peer_link = pos; | ||
549 | elems->peer_link_len = elen; | ||
550 | break; | ||
551 | case WLAN_EID_PREQ: | ||
552 | elems->preq = pos; | ||
553 | elems->preq_len = elen; | ||
554 | break; | ||
555 | case WLAN_EID_PREP: | ||
556 | elems->prep = pos; | ||
557 | elems->prep_len = elen; | ||
558 | break; | ||
559 | case WLAN_EID_PERR: | ||
560 | elems->perr = pos; | ||
561 | elems->perr_len = elen; | ||
562 | break; | ||
563 | case WLAN_EID_CHANNEL_SWITCH: | ||
564 | elems->ch_switch_elem = pos; | ||
565 | elems->ch_switch_elem_len = elen; | ||
566 | break; | ||
567 | case WLAN_EID_QUIET: | ||
568 | if (!elems->quiet_elem) { | ||
569 | elems->quiet_elem = pos; | ||
570 | elems->quiet_elem_len = elen; | ||
571 | } | ||
572 | elems->num_of_quiet_elem++; | ||
573 | break; | ||
574 | case WLAN_EID_COUNTRY: | ||
575 | elems->country_elem = pos; | ||
576 | elems->country_elem_len = elen; | ||
577 | break; | ||
578 | case WLAN_EID_PWR_CONSTRAINT: | ||
579 | elems->pwr_constr_elem = pos; | ||
580 | elems->pwr_constr_elem_len = elen; | ||
581 | break; | ||
582 | default: | ||
583 | break; | ||
584 | } | ||
585 | |||
586 | left -= elen; | ||
587 | pos += elen; | ||
588 | } | ||
589 | } | ||
590 | |||
591 | void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata) | ||
592 | { | ||
593 | struct ieee80211_local *local = sdata->local; | ||
594 | struct ieee80211_tx_queue_params qparam; | ||
595 | int i; | ||
596 | |||
597 | if (!local->ops->conf_tx) | ||
598 | return; | ||
599 | |||
600 | memset(&qparam, 0, sizeof(qparam)); | ||
601 | |||
602 | qparam.aifs = 2; | ||
603 | |||
604 | if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && | ||
605 | !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)) | ||
606 | qparam.cw_min = 31; | ||
607 | else | ||
608 | qparam.cw_min = 15; | ||
609 | |||
610 | qparam.cw_max = 1023; | ||
611 | qparam.txop = 0; | ||
612 | |||
613 | for (i = 0; i < local_to_hw(local)->queues; i++) | ||
614 | local->ops->conf_tx(local_to_hw(local), i, &qparam); | ||
615 | } | ||
616 | |||
617 | void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
618 | int encrypt) | ||
619 | { | ||
620 | skb->dev = sdata->local->mdev; | ||
621 | skb_set_mac_header(skb, 0); | ||
622 | skb_set_network_header(skb, 0); | ||
623 | skb_set_transport_header(skb, 0); | ||
624 | |||
625 | skb->iif = sdata->dev->ifindex; | ||
626 | skb->do_not_encrypt = !encrypt; | ||
627 | |||
628 | dev_queue_xmit(skb); | ||
629 | } | ||
630 | |||
631 | int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freqMHz) | ||
632 | { | ||
633 | int ret = -EINVAL; | ||
634 | struct ieee80211_channel *chan; | ||
635 | struct ieee80211_local *local = sdata->local; | ||
636 | |||
637 | chan = ieee80211_get_channel(local->hw.wiphy, freqMHz); | ||
638 | |||
639 | if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { | ||
640 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC && | ||
641 | chan->flags & IEEE80211_CHAN_NO_IBSS) { | ||
642 | printk(KERN_DEBUG "%s: IBSS not allowed on frequency " | ||
643 | "%d MHz\n", sdata->dev->name, chan->center_freq); | ||
644 | return ret; | ||
645 | } | ||
646 | local->oper_channel = chan; | ||
647 | |||
648 | if (local->sw_scanning || local->hw_scanning) | ||
649 | ret = 0; | ||
650 | else | ||
651 | ret = ieee80211_hw_config(local); | ||
652 | |||
653 | rate_control_clear(local); | ||
654 | } | ||
655 | |||
656 | return ret; | ||
657 | } | ||
658 | |||
659 | u64 ieee80211_mandatory_rates(struct ieee80211_local *local, | ||
660 | enum ieee80211_band band) | ||
661 | { | ||
662 | struct ieee80211_supported_band *sband; | ||
663 | struct ieee80211_rate *bitrates; | ||
664 | u64 mandatory_rates; | ||
665 | enum ieee80211_rate_flags mandatory_flag; | ||
666 | int i; | ||
667 | |||
668 | sband = local->hw.wiphy->bands[band]; | ||
669 | if (!sband) { | ||
670 | WARN_ON(1); | ||
671 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
672 | } | ||
673 | |||
674 | if (band == IEEE80211_BAND_2GHZ) | ||
675 | mandatory_flag = IEEE80211_RATE_MANDATORY_B; | ||
676 | else | ||
677 | mandatory_flag = IEEE80211_RATE_MANDATORY_A; | ||
678 | |||
679 | bitrates = sband->bitrates; | ||
680 | mandatory_rates = 0; | ||
681 | for (i = 0; i < sband->n_bitrates; i++) | ||
682 | if (bitrates[i].flags & mandatory_flag) | ||
683 | mandatory_rates |= BIT(i); | ||
684 | return mandatory_rates; | ||
685 | } | ||
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 5c2bf0a3d4db..376c84987e4f 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c | |||
@@ -228,11 +228,10 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, | |||
228 | return -1; | 228 | return -1; |
229 | 229 | ||
230 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 230 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
231 | 231 | if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN) | |
232 | if (skb->len < 8 + hdrlen) | ||
233 | return -1; | 232 | return -1; |
234 | 233 | ||
235 | len = skb->len - hdrlen - 8; | 234 | len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN; |
236 | 235 | ||
237 | keyidx = skb->data[hdrlen + 3] >> 6; | 236 | keyidx = skb->data[hdrlen + 3] >> 6; |
238 | 237 | ||
@@ -292,9 +291,10 @@ u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) | |||
292 | ieee80211_rx_result | 291 | ieee80211_rx_result |
293 | ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) | 292 | ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) |
294 | { | 293 | { |
295 | if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && | 294 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
296 | ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || | 295 | |
297 | (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) | 296 | if (!ieee80211_is_data(hdr->frame_control) && |
297 | !ieee80211_is_auth(hdr->frame_control)) | ||
298 | return RX_CONTINUE; | 298 | return RX_CONTINUE; |
299 | 299 | ||
300 | if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { | 300 | if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { |
@@ -303,7 +303,7 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) | |||
303 | } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { | 303 | } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { |
304 | ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); | 304 | ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); |
305 | /* remove ICV */ | 305 | /* remove ICV */ |
306 | skb_trim(rx->skb, rx->skb->len - 4); | 306 | skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN); |
307 | } | 307 | } |
308 | 308 | ||
309 | return RX_CONTINUE; | 309 | return RX_CONTINUE; |
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index 34fa8ed1e784..7e0d53abde24 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c | |||
@@ -27,22 +27,19 @@ | |||
27 | #include "aes_ccm.h" | 27 | #include "aes_ccm.h" |
28 | 28 | ||
29 | 29 | ||
30 | static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr, | 30 | static int ieee80211_set_encryption(struct ieee80211_sub_if_data *sdata, u8 *sta_addr, |
31 | int idx, int alg, int remove, | 31 | int idx, int alg, int remove, |
32 | int set_tx_key, const u8 *_key, | 32 | int set_tx_key, const u8 *_key, |
33 | size_t key_len) | 33 | size_t key_len) |
34 | { | 34 | { |
35 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 35 | struct ieee80211_local *local = sdata->local; |
36 | struct sta_info *sta; | 36 | struct sta_info *sta; |
37 | struct ieee80211_key *key; | 37 | struct ieee80211_key *key; |
38 | struct ieee80211_sub_if_data *sdata; | ||
39 | int err; | 38 | int err; |
40 | 39 | ||
41 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
42 | |||
43 | if (idx < 0 || idx >= NUM_DEFAULT_KEYS) { | 40 | if (idx < 0 || idx >= NUM_DEFAULT_KEYS) { |
44 | printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n", | 41 | printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n", |
45 | dev->name, idx); | 42 | sdata->dev->name, idx); |
46 | return -EINVAL; | 43 | return -EINVAL; |
47 | } | 44 | } |
48 | 45 | ||
@@ -125,13 +122,13 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev, | |||
125 | if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) | 122 | if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) |
126 | return -EOPNOTSUPP; | 123 | return -EOPNOTSUPP; |
127 | 124 | ||
128 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 125 | if (sdata->vif.type == NL80211_IFTYPE_STATION || |
129 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 126 | sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
130 | int ret = ieee80211_sta_set_extra_ie(dev, extra, data->length); | 127 | int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length); |
131 | if (ret) | 128 | if (ret) |
132 | return ret; | 129 | return ret; |
133 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; | 130 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; |
134 | ieee80211_sta_req_auth(dev, &sdata->u.sta); | 131 | ieee80211_sta_req_auth(sdata, &sdata->u.sta); |
135 | return 0; | 132 | return 0; |
136 | } | 133 | } |
137 | 134 | ||
@@ -276,21 +273,21 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev, | |||
276 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 273 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
277 | int type; | 274 | int type; |
278 | 275 | ||
279 | if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) | 276 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
280 | return -EOPNOTSUPP; | 277 | return -EOPNOTSUPP; |
281 | 278 | ||
282 | switch (*mode) { | 279 | switch (*mode) { |
283 | case IW_MODE_INFRA: | 280 | case IW_MODE_INFRA: |
284 | type = IEEE80211_IF_TYPE_STA; | 281 | type = NL80211_IFTYPE_STATION; |
285 | break; | 282 | break; |
286 | case IW_MODE_ADHOC: | 283 | case IW_MODE_ADHOC: |
287 | type = IEEE80211_IF_TYPE_IBSS; | 284 | type = NL80211_IFTYPE_ADHOC; |
288 | break; | 285 | break; |
289 | case IW_MODE_REPEAT: | 286 | case IW_MODE_REPEAT: |
290 | type = IEEE80211_IF_TYPE_WDS; | 287 | type = NL80211_IFTYPE_WDS; |
291 | break; | 288 | break; |
292 | case IW_MODE_MONITOR: | 289 | case IW_MODE_MONITOR: |
293 | type = IEEE80211_IF_TYPE_MNTR; | 290 | type = NL80211_IFTYPE_MONITOR; |
294 | break; | 291 | break; |
295 | default: | 292 | default: |
296 | return -EINVAL; | 293 | return -EINVAL; |
@@ -308,22 +305,22 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev, | |||
308 | 305 | ||
309 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 306 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
310 | switch (sdata->vif.type) { | 307 | switch (sdata->vif.type) { |
311 | case IEEE80211_IF_TYPE_AP: | 308 | case NL80211_IFTYPE_AP: |
312 | *mode = IW_MODE_MASTER; | 309 | *mode = IW_MODE_MASTER; |
313 | break; | 310 | break; |
314 | case IEEE80211_IF_TYPE_STA: | 311 | case NL80211_IFTYPE_STATION: |
315 | *mode = IW_MODE_INFRA; | 312 | *mode = IW_MODE_INFRA; |
316 | break; | 313 | break; |
317 | case IEEE80211_IF_TYPE_IBSS: | 314 | case NL80211_IFTYPE_ADHOC: |
318 | *mode = IW_MODE_ADHOC; | 315 | *mode = IW_MODE_ADHOC; |
319 | break; | 316 | break; |
320 | case IEEE80211_IF_TYPE_MNTR: | 317 | case NL80211_IFTYPE_MONITOR: |
321 | *mode = IW_MODE_MONITOR; | 318 | *mode = IW_MODE_MONITOR; |
322 | break; | 319 | break; |
323 | case IEEE80211_IF_TYPE_WDS: | 320 | case NL80211_IFTYPE_WDS: |
324 | *mode = IW_MODE_REPEAT; | 321 | *mode = IW_MODE_REPEAT; |
325 | break; | 322 | break; |
326 | case IEEE80211_IF_TYPE_VLAN: | 323 | case NL80211_IFTYPE_AP_VLAN: |
327 | *mode = IW_MODE_SECOND; /* FIXME */ | 324 | *mode = IW_MODE_SECOND; /* FIXME */ |
328 | break; | 325 | break; |
329 | default: | 326 | default: |
@@ -333,60 +330,31 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev, | |||
333 | return 0; | 330 | return 0; |
334 | } | 331 | } |
335 | 332 | ||
336 | int ieee80211_set_freq(struct net_device *dev, int freqMHz) | ||
337 | { | ||
338 | int ret = -EINVAL; | ||
339 | struct ieee80211_channel *chan; | ||
340 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
341 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
342 | |||
343 | chan = ieee80211_get_channel(local->hw.wiphy, freqMHz); | ||
344 | |||
345 | if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { | ||
346 | if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && | ||
347 | chan->flags & IEEE80211_CHAN_NO_IBSS) { | ||
348 | printk(KERN_DEBUG "%s: IBSS not allowed on frequency " | ||
349 | "%d MHz\n", dev->name, chan->center_freq); | ||
350 | return ret; | ||
351 | } | ||
352 | local->oper_channel = chan; | ||
353 | |||
354 | if (local->sta_sw_scanning || local->sta_hw_scanning) | ||
355 | ret = 0; | ||
356 | else | ||
357 | ret = ieee80211_hw_config(local); | ||
358 | |||
359 | rate_control_clear(local); | ||
360 | } | ||
361 | |||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | static int ieee80211_ioctl_siwfreq(struct net_device *dev, | 333 | static int ieee80211_ioctl_siwfreq(struct net_device *dev, |
366 | struct iw_request_info *info, | 334 | struct iw_request_info *info, |
367 | struct iw_freq *freq, char *extra) | 335 | struct iw_freq *freq, char *extra) |
368 | { | 336 | { |
369 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 337 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
370 | 338 | ||
371 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) | 339 | if (sdata->vif.type == NL80211_IFTYPE_STATION) |
372 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL; | 340 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL; |
373 | 341 | ||
374 | /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */ | 342 | /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */ |
375 | if (freq->e == 0) { | 343 | if (freq->e == 0) { |
376 | if (freq->m < 0) { | 344 | if (freq->m < 0) { |
377 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) | 345 | if (sdata->vif.type == NL80211_IFTYPE_STATION) |
378 | sdata->u.sta.flags |= | 346 | sdata->u.sta.flags |= |
379 | IEEE80211_STA_AUTO_CHANNEL_SEL; | 347 | IEEE80211_STA_AUTO_CHANNEL_SEL; |
380 | return 0; | 348 | return 0; |
381 | } else | 349 | } else |
382 | return ieee80211_set_freq(dev, | 350 | return ieee80211_set_freq(sdata, |
383 | ieee80211_channel_to_frequency(freq->m)); | 351 | ieee80211_channel_to_frequency(freq->m)); |
384 | } else { | 352 | } else { |
385 | int i, div = 1000000; | 353 | int i, div = 1000000; |
386 | for (i = 0; i < freq->e; i++) | 354 | for (i = 0; i < freq->e; i++) |
387 | div /= 10; | 355 | div /= 10; |
388 | if (div > 0) | 356 | if (div > 0) |
389 | return ieee80211_set_freq(dev, freq->m / div); | 357 | return ieee80211_set_freq(sdata, freq->m / div); |
390 | else | 358 | else |
391 | return -EINVAL; | 359 | return -EINVAL; |
392 | } | 360 | } |
@@ -418,8 +386,8 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev, | |||
418 | len--; | 386 | len--; |
419 | 387 | ||
420 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 388 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
421 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 389 | if (sdata->vif.type == NL80211_IFTYPE_STATION || |
422 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 390 | sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
423 | int ret; | 391 | int ret; |
424 | if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { | 392 | if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { |
425 | if (len > IEEE80211_MAX_SSID_LEN) | 393 | if (len > IEEE80211_MAX_SSID_LEN) |
@@ -432,14 +400,14 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev, | |||
432 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; | 400 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; |
433 | else | 401 | else |
434 | sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL; | 402 | sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL; |
435 | ret = ieee80211_sta_set_ssid(dev, ssid, len); | 403 | ret = ieee80211_sta_set_ssid(sdata, ssid, len); |
436 | if (ret) | 404 | if (ret) |
437 | return ret; | 405 | return ret; |
438 | ieee80211_sta_req_auth(dev, &sdata->u.sta); | 406 | ieee80211_sta_req_auth(sdata, &sdata->u.sta); |
439 | return 0; | 407 | return 0; |
440 | } | 408 | } |
441 | 409 | ||
442 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { | 410 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
443 | memcpy(sdata->u.ap.ssid, ssid, len); | 411 | memcpy(sdata->u.ap.ssid, ssid, len); |
444 | memset(sdata->u.ap.ssid + len, 0, | 412 | memset(sdata->u.ap.ssid + len, 0, |
445 | IEEE80211_MAX_SSID_LEN - len); | 413 | IEEE80211_MAX_SSID_LEN - len); |
@@ -458,9 +426,9 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev, | |||
458 | 426 | ||
459 | struct ieee80211_sub_if_data *sdata; | 427 | struct ieee80211_sub_if_data *sdata; |
460 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 428 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
461 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 429 | if (sdata->vif.type == NL80211_IFTYPE_STATION || |
462 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 430 | sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
463 | int res = ieee80211_sta_get_ssid(dev, ssid, &len); | 431 | int res = ieee80211_sta_get_ssid(sdata, ssid, &len); |
464 | if (res == 0) { | 432 | if (res == 0) { |
465 | data->length = len; | 433 | data->length = len; |
466 | data->flags = 1; | 434 | data->flags = 1; |
@@ -469,7 +437,7 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev, | |||
469 | return res; | 437 | return res; |
470 | } | 438 | } |
471 | 439 | ||
472 | if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { | 440 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
473 | len = sdata->u.ap.ssid_len; | 441 | len = sdata->u.ap.ssid_len; |
474 | if (len > IW_ESSID_MAX_SIZE) | 442 | if (len > IW_ESSID_MAX_SIZE) |
475 | len = IW_ESSID_MAX_SIZE; | 443 | len = IW_ESSID_MAX_SIZE; |
@@ -489,8 +457,8 @@ static int ieee80211_ioctl_siwap(struct net_device *dev, | |||
489 | struct ieee80211_sub_if_data *sdata; | 457 | struct ieee80211_sub_if_data *sdata; |
490 | 458 | ||
491 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 459 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
492 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 460 | if (sdata->vif.type == NL80211_IFTYPE_STATION || |
493 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 461 | sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
494 | int ret; | 462 | int ret; |
495 | if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { | 463 | if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { |
496 | memcpy(sdata->u.sta.bssid, (u8 *) &ap_addr->sa_data, | 464 | memcpy(sdata->u.sta.bssid, (u8 *) &ap_addr->sa_data, |
@@ -504,12 +472,12 @@ static int ieee80211_ioctl_siwap(struct net_device *dev, | |||
504 | sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL; | 472 | sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL; |
505 | else | 473 | else |
506 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; | 474 | sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; |
507 | ret = ieee80211_sta_set_bssid(dev, (u8 *) &ap_addr->sa_data); | 475 | ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data); |
508 | if (ret) | 476 | if (ret) |
509 | return ret; | 477 | return ret; |
510 | ieee80211_sta_req_auth(dev, &sdata->u.sta); | 478 | ieee80211_sta_req_auth(sdata, &sdata->u.sta); |
511 | return 0; | 479 | return 0; |
512 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { | 480 | } else if (sdata->vif.type == NL80211_IFTYPE_WDS) { |
513 | /* | 481 | /* |
514 | * If it is necessary to update the WDS peer address | 482 | * If it is necessary to update the WDS peer address |
515 | * while the interface is running, then we need to do | 483 | * while the interface is running, then we need to do |
@@ -537,10 +505,10 @@ static int ieee80211_ioctl_giwap(struct net_device *dev, | |||
537 | struct ieee80211_sub_if_data *sdata; | 505 | struct ieee80211_sub_if_data *sdata; |
538 | 506 | ||
539 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 507 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
540 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 508 | if (sdata->vif.type == NL80211_IFTYPE_STATION || |
541 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { | 509 | sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
542 | if (sdata->u.sta.state == IEEE80211_ASSOCIATED || | 510 | if (sdata->u.sta.state == IEEE80211_STA_MLME_ASSOCIATED || |
543 | sdata->u.sta.state == IEEE80211_IBSS_JOINED) { | 511 | sdata->u.sta.state == IEEE80211_STA_MLME_IBSS_JOINED) { |
544 | ap_addr->sa_family = ARPHRD_ETHER; | 512 | ap_addr->sa_family = ARPHRD_ETHER; |
545 | memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN); | 513 | memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN); |
546 | return 0; | 514 | return 0; |
@@ -548,7 +516,7 @@ static int ieee80211_ioctl_giwap(struct net_device *dev, | |||
548 | memset(&ap_addr->sa_data, 0, ETH_ALEN); | 516 | memset(&ap_addr->sa_data, 0, ETH_ALEN); |
549 | return 0; | 517 | return 0; |
550 | } | 518 | } |
551 | } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { | 519 | } else if (sdata->vif.type == NL80211_IFTYPE_WDS) { |
552 | ap_addr->sa_family = ARPHRD_ETHER; | 520 | ap_addr->sa_family = ARPHRD_ETHER; |
553 | memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN); | 521 | memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN); |
554 | return 0; | 522 | return 0; |
@@ -570,10 +538,10 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev, | |||
570 | if (!netif_running(dev)) | 538 | if (!netif_running(dev)) |
571 | return -ENETDOWN; | 539 | return -ENETDOWN; |
572 | 540 | ||
573 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && | 541 | if (sdata->vif.type != NL80211_IFTYPE_STATION && |
574 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS && | 542 | sdata->vif.type != NL80211_IFTYPE_ADHOC && |
575 | sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT && | 543 | sdata->vif.type != NL80211_IFTYPE_MESH_POINT && |
576 | sdata->vif.type != IEEE80211_IF_TYPE_AP) | 544 | sdata->vif.type != NL80211_IFTYPE_AP) |
577 | return -EOPNOTSUPP; | 545 | return -EOPNOTSUPP; |
578 | 546 | ||
579 | /* if SSID was specified explicitly then use that */ | 547 | /* if SSID was specified explicitly then use that */ |
@@ -584,7 +552,7 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev, | |||
584 | ssid_len = req->essid_len; | 552 | ssid_len = req->essid_len; |
585 | } | 553 | } |
586 | 554 | ||
587 | return ieee80211_sta_req_scan(dev, ssid, ssid_len); | 555 | return ieee80211_request_scan(sdata, ssid, ssid_len); |
588 | } | 556 | } |
589 | 557 | ||
590 | 558 | ||
@@ -594,11 +562,14 @@ static int ieee80211_ioctl_giwscan(struct net_device *dev, | |||
594 | { | 562 | { |
595 | int res; | 563 | int res; |
596 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 564 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
565 | struct ieee80211_sub_if_data *sdata; | ||
566 | |||
567 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
597 | 568 | ||
598 | if (local->sta_sw_scanning || local->sta_hw_scanning) | 569 | if (local->sw_scanning || local->hw_scanning) |
599 | return -EAGAIN; | 570 | return -EAGAIN; |
600 | 571 | ||
601 | res = ieee80211_sta_scan_results(dev, info, extra, data->length); | 572 | res = ieee80211_scan_results(local, info, extra, data->length); |
602 | if (res >= 0) { | 573 | if (res >= 0) { |
603 | data->length = res; | 574 | data->length = res; |
604 | return 0; | 575 | return 0; |
@@ -656,7 +627,7 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev, | |||
656 | 627 | ||
657 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 628 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
658 | 629 | ||
659 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | 630 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
660 | return -EOPNOTSUPP; | 631 | return -EOPNOTSUPP; |
661 | 632 | ||
662 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 633 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
@@ -665,8 +636,8 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev, | |||
665 | 636 | ||
666 | sta = sta_info_get(local, sdata->u.sta.bssid); | 637 | sta = sta_info_get(local, sdata->u.sta.bssid); |
667 | 638 | ||
668 | if (sta && sta->txrate_idx < sband->n_bitrates) | 639 | if (sta && sta->last_txrate_idx < sband->n_bitrates) |
669 | rate->value = sband->bitrates[sta->txrate_idx].bitrate; | 640 | rate->value = sband->bitrates[sta->last_txrate_idx].bitrate; |
670 | else | 641 | else |
671 | rate->value = 0; | 642 | rate->value = 0; |
672 | 643 | ||
@@ -887,17 +858,17 @@ static int ieee80211_ioctl_siwmlme(struct net_device *dev, | |||
887 | struct iw_mlme *mlme = (struct iw_mlme *) extra; | 858 | struct iw_mlme *mlme = (struct iw_mlme *) extra; |
888 | 859 | ||
889 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 860 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
890 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA && | 861 | if (sdata->vif.type != NL80211_IFTYPE_STATION && |
891 | sdata->vif.type != IEEE80211_IF_TYPE_IBSS) | 862 | sdata->vif.type != NL80211_IFTYPE_ADHOC) |
892 | return -EINVAL; | 863 | return -EINVAL; |
893 | 864 | ||
894 | switch (mlme->cmd) { | 865 | switch (mlme->cmd) { |
895 | case IW_MLME_DEAUTH: | 866 | case IW_MLME_DEAUTH: |
896 | /* TODO: mlme->addr.sa_data */ | 867 | /* TODO: mlme->addr.sa_data */ |
897 | return ieee80211_sta_deauthenticate(dev, mlme->reason_code); | 868 | return ieee80211_sta_deauthenticate(sdata, mlme->reason_code); |
898 | case IW_MLME_DISASSOC: | 869 | case IW_MLME_DISASSOC: |
899 | /* TODO: mlme->addr.sa_data */ | 870 | /* TODO: mlme->addr.sa_data */ |
900 | return ieee80211_sta_disassociate(dev, mlme->reason_code); | 871 | return ieee80211_sta_disassociate(sdata, mlme->reason_code); |
901 | default: | 872 | default: |
902 | return -EOPNOTSUPP; | 873 | return -EOPNOTSUPP; |
903 | } | 874 | } |
@@ -938,7 +909,7 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev, | |||
938 | } | 909 | } |
939 | 910 | ||
940 | return ieee80211_set_encryption( | 911 | return ieee80211_set_encryption( |
941 | dev, bcaddr, | 912 | sdata, bcaddr, |
942 | idx, alg, remove, | 913 | idx, alg, remove, |
943 | !sdata->default_key, | 914 | !sdata->default_key, |
944 | keybuf, erq->length); | 915 | keybuf, erq->length); |
@@ -983,7 +954,7 @@ static int ieee80211_ioctl_giwencode(struct net_device *dev, | |||
983 | erq->length = sdata->keys[idx]->conf.keylen; | 954 | erq->length = sdata->keys[idx]->conf.keylen; |
984 | erq->flags |= IW_ENCODE_ENABLED; | 955 | erq->flags |= IW_ENCODE_ENABLED; |
985 | 956 | ||
986 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { | 957 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
987 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; | 958 | struct ieee80211_if_sta *ifsta = &sdata->u.sta; |
988 | switch (ifsta->auth_alg) { | 959 | switch (ifsta->auth_alg) { |
989 | case WLAN_AUTH_OPEN: | 960 | case WLAN_AUTH_OPEN: |
@@ -1057,7 +1028,7 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev, | |||
1057 | sdata->drop_unencrypted = !!data->value; | 1028 | sdata->drop_unencrypted = !!data->value; |
1058 | break; | 1029 | break; |
1059 | case IW_AUTH_PRIVACY_INVOKED: | 1030 | case IW_AUTH_PRIVACY_INVOKED: |
1060 | if (sdata->vif.type != IEEE80211_IF_TYPE_STA) | 1031 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
1061 | ret = -EINVAL; | 1032 | ret = -EINVAL; |
1062 | else { | 1033 | else { |
1063 | sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED; | 1034 | sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED; |
@@ -1072,8 +1043,8 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev, | |||
1072 | } | 1043 | } |
1073 | break; | 1044 | break; |
1074 | case IW_AUTH_80211_AUTH_ALG: | 1045 | case IW_AUTH_80211_AUTH_ALG: |
1075 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 1046 | if (sdata->vif.type == NL80211_IFTYPE_STATION || |
1076 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) | 1047 | sdata->vif.type == NL80211_IFTYPE_ADHOC) |
1077 | sdata->u.sta.auth_algs = data->value; | 1048 | sdata->u.sta.auth_algs = data->value; |
1078 | else | 1049 | else |
1079 | ret = -EOPNOTSUPP; | 1050 | ret = -EOPNOTSUPP; |
@@ -1095,8 +1066,8 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev | |||
1095 | 1066 | ||
1096 | rcu_read_lock(); | 1067 | rcu_read_lock(); |
1097 | 1068 | ||
1098 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 1069 | if (sdata->vif.type == NL80211_IFTYPE_STATION || |
1099 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) | 1070 | sdata->vif.type == NL80211_IFTYPE_ADHOC) |
1100 | sta = sta_info_get(local, sdata->u.sta.bssid); | 1071 | sta = sta_info_get(local, sdata->u.sta.bssid); |
1101 | if (!sta) { | 1072 | if (!sta) { |
1102 | wstats->discard.fragment = 0; | 1073 | wstats->discard.fragment = 0; |
@@ -1126,8 +1097,8 @@ static int ieee80211_ioctl_giwauth(struct net_device *dev, | |||
1126 | 1097 | ||
1127 | switch (data->flags & IW_AUTH_INDEX) { | 1098 | switch (data->flags & IW_AUTH_INDEX) { |
1128 | case IW_AUTH_80211_AUTH_ALG: | 1099 | case IW_AUTH_80211_AUTH_ALG: |
1129 | if (sdata->vif.type == IEEE80211_IF_TYPE_STA || | 1100 | if (sdata->vif.type == NL80211_IFTYPE_STATION || |
1130 | sdata->vif.type == IEEE80211_IF_TYPE_IBSS) | 1101 | sdata->vif.type == NL80211_IFTYPE_ADHOC) |
1131 | data->value = sdata->u.sta.auth_algs; | 1102 | data->value = sdata->u.sta.auth_algs; |
1132 | else | 1103 | else |
1133 | ret = -EOPNOTSUPP; | 1104 | ret = -EOPNOTSUPP; |
@@ -1184,7 +1155,7 @@ static int ieee80211_ioctl_siwencodeext(struct net_device *dev, | |||
1184 | } else | 1155 | } else |
1185 | idx--; | 1156 | idx--; |
1186 | 1157 | ||
1187 | return ieee80211_set_encryption(dev, ext->addr.sa_data, idx, alg, | 1158 | return ieee80211_set_encryption(sdata, ext->addr.sa_data, idx, alg, |
1188 | remove, | 1159 | remove, |
1189 | ext->ext_flags & | 1160 | ext->ext_flags & |
1190 | IW_ENCODE_EXT_SET_TX_KEY, | 1161 | IW_ENCODE_EXT_SET_TX_KEY, |
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 4310e2f65661..139b5f267b34 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -39,7 +39,7 @@ static unsigned int classify_1d(struct sk_buff *skb) | |||
39 | return skb->priority - 256; | 39 | return skb->priority - 256; |
40 | 40 | ||
41 | switch (skb->protocol) { | 41 | switch (skb->protocol) { |
42 | case __constant_htons(ETH_P_IP): | 42 | case htons(ETH_P_IP): |
43 | dscp = ip_hdr(skb)->tos & 0xfc; | 43 | dscp = ip_hdr(skb)->tos & 0xfc; |
44 | break; | 44 | break; |
45 | 45 | ||
@@ -47,8 +47,6 @@ static unsigned int classify_1d(struct sk_buff *skb) | |||
47 | return 0; | 47 | return 0; |
48 | } | 48 | } |
49 | 49 | ||
50 | if (dscp & 0x1c) | ||
51 | return 0; | ||
52 | return dscp >> 5; | 50 | return dscp >> 5; |
53 | } | 51 | } |
54 | 52 | ||
@@ -75,9 +73,8 @@ static int wme_downgrade_ac(struct sk_buff *skb) | |||
75 | 73 | ||
76 | 74 | ||
77 | /* Indicate which queue to use. */ | 75 | /* Indicate which queue to use. */ |
78 | static u16 classify80211(struct sk_buff *skb, struct net_device *dev) | 76 | static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb) |
79 | { | 77 | { |
80 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
81 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 78 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
82 | 79 | ||
83 | if (!ieee80211_is_data(hdr->frame_control)) { | 80 | if (!ieee80211_is_data(hdr->frame_control)) { |
@@ -115,14 +112,15 @@ static u16 classify80211(struct sk_buff *skb, struct net_device *dev) | |||
115 | 112 | ||
116 | u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) | 113 | u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) |
117 | { | 114 | { |
115 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); | ||
116 | struct ieee80211_local *local = mpriv->local; | ||
118 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 117 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
119 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
120 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 118 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
121 | struct sta_info *sta; | 119 | struct sta_info *sta; |
122 | u16 queue; | 120 | u16 queue; |
123 | u8 tid; | 121 | u8 tid; |
124 | 122 | ||
125 | queue = classify80211(skb, dev); | 123 | queue = classify80211(local, skb); |
126 | if (unlikely(queue >= local->hw.queues)) | 124 | if (unlikely(queue >= local->hw.queues)) |
127 | queue = local->hw.queues - 1; | 125 | queue = local->hw.queues - 1; |
128 | 126 | ||
@@ -212,7 +210,7 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, | |||
212 | DECLARE_MAC_BUF(mac); | 210 | DECLARE_MAC_BUF(mac); |
213 | printk(KERN_DEBUG "allocated aggregation queue" | 211 | printk(KERN_DEBUG "allocated aggregation queue" |
214 | " %d tid %d addr %s pool=0x%lX\n", | 212 | " %d tid %d addr %s pool=0x%lX\n", |
215 | i, tid, print_mac(mac, sta->addr), | 213 | i, tid, print_mac(mac, sta->sta.addr), |
216 | local->queue_pool[0]); | 214 | local->queue_pool[0]); |
217 | } | 215 | } |
218 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 216 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index 04de28c071a6..bc62f28a4d3d 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * IEEE 802.11 driver (80211.o) - QoS datatypes | ||
3 | * Copyright 2004, Instant802 Networks, Inc. | 2 | * Copyright 2004, Instant802 Networks, Inc. |
4 | * Copyright 2005, Devicescape Software, Inc. | 3 | * Copyright 2005, Devicescape Software, Inc. |
5 | * | 4 | * |
@@ -14,8 +13,6 @@ | |||
14 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
15 | #include "ieee80211_i.h" | 14 | #include "ieee80211_i.h" |
16 | 15 | ||
17 | #define QOS_CONTROL_LEN 2 | ||
18 | |||
19 | #define QOS_CONTROL_ACK_POLICY_NORMAL 0 | 16 | #define QOS_CONTROL_ACK_POLICY_NORMAL 0 |
20 | #define QOS_CONTROL_ACK_POLICY_NOACK 1 | 17 | #define QOS_CONTROL_ACK_POLICY_NOACK 1 |
21 | 18 | ||
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 2f33df0dcccf..37ae9a959f63 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -127,7 +127,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
127 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 127 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
128 | return RX_DROP_UNUSABLE; | 128 | return RX_DROP_UNUSABLE; |
129 | 129 | ||
130 | mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, | 130 | mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, |
131 | (void *) skb->data); | 131 | (void *) skb->data); |
132 | return RX_DROP_UNUSABLE; | 132 | return RX_DROP_UNUSABLE; |
133 | } | 133 | } |
@@ -256,7 +256,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | |||
256 | 256 | ||
257 | res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, | 257 | res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, |
258 | key, skb->data + hdrlen, | 258 | key, skb->data + hdrlen, |
259 | skb->len - hdrlen, rx->sta->addr, | 259 | skb->len - hdrlen, rx->sta->sta.addr, |
260 | hdr->addr1, hwaccel, rx->queue, | 260 | hdr->addr1, hwaccel, rx->queue, |
261 | &rx->tkip_iv32, | 261 | &rx->tkip_iv32, |
262 | &rx->tkip_iv16); | 262 | &rx->tkip_iv16); |
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index 9f328593287e..307a2c3c2df4 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c | |||
@@ -136,17 +136,19 @@ static void localtime_3(struct xtm *r, time_t time) | |||
136 | * from w repeatedly while counting.) | 136 | * from w repeatedly while counting.) |
137 | */ | 137 | */ |
138 | if (is_leap(year)) { | 138 | if (is_leap(year)) { |
139 | /* use days_since_leapyear[] in a leap year */ | ||
139 | for (i = ARRAY_SIZE(days_since_leapyear) - 1; | 140 | for (i = ARRAY_SIZE(days_since_leapyear) - 1; |
140 | i > 0 && days_since_year[i] > w; --i) | 141 | i > 0 && days_since_leapyear[i] > w; --i) |
141 | /* just loop */; | 142 | /* just loop */; |
143 | r->monthday = w - days_since_leapyear[i] + 1; | ||
142 | } else { | 144 | } else { |
143 | for (i = ARRAY_SIZE(days_since_year) - 1; | 145 | for (i = ARRAY_SIZE(days_since_year) - 1; |
144 | i > 0 && days_since_year[i] > w; --i) | 146 | i > 0 && days_since_year[i] > w; --i) |
145 | /* just loop */; | 147 | /* just loop */; |
148 | r->monthday = w - days_since_year[i] + 1; | ||
146 | } | 149 | } |
147 | 150 | ||
148 | r->month = i + 1; | 151 | r->month = i + 1; |
149 | r->monthday = w - days_since_year[i] + 1; | ||
150 | return; | 152 | return; |
151 | } | 153 | } |
152 | 154 | ||
diff --git a/net/phonet/Kconfig b/net/phonet/Kconfig new file mode 100644 index 000000000000..51a5669573f2 --- /dev/null +++ b/net/phonet/Kconfig | |||
@@ -0,0 +1,16 @@ | |||
1 | # | ||
2 | # Phonet protocol | ||
3 | # | ||
4 | |||
5 | config PHONET | ||
6 | tristate "Phonet protocols family" | ||
7 | help | ||
8 | The Phone Network protocol (PhoNet) is a packet-oriented | ||
9 | communication protocol developped by Nokia for use with its modems. | ||
10 | |||
11 | This is required for Maemo to use cellular data connectivity (if | ||
12 | supported). It can also be used to control Nokia phones | ||
13 | from a Linux computer, although AT commands may be easier to use. | ||
14 | |||
15 | To compile this driver as a module, choose M here: the module | ||
16 | will be called phonet. If unsure, say N. | ||
diff --git a/net/phonet/Makefile b/net/phonet/Makefile new file mode 100644 index 000000000000..ae9c3ed5be83 --- /dev/null +++ b/net/phonet/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | obj-$(CONFIG_PHONET) += phonet.o | ||
2 | |||
3 | phonet-objs := \ | ||
4 | pn_dev.o \ | ||
5 | pn_netlink.o \ | ||
6 | socket.o \ | ||
7 | datagram.o \ | ||
8 | sysctl.o \ | ||
9 | af_phonet.o | ||
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c new file mode 100644 index 000000000000..1d8df6b7e3df --- /dev/null +++ b/net/phonet/af_phonet.c | |||
@@ -0,0 +1,468 @@ | |||
1 | /* | ||
2 | * File: af_phonet.c | ||
3 | * | ||
4 | * Phonet protocols family | ||
5 | * | ||
6 | * Copyright (C) 2008 Nokia Corporation. | ||
7 | * | ||
8 | * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> | ||
9 | * Original author: Sakari Ailus <sakari.ailus@nokia.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * version 2 as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
23 | * 02110-1301 USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <asm/unaligned.h> | ||
29 | #include <net/sock.h> | ||
30 | |||
31 | #include <linux/if_phonet.h> | ||
32 | #include <linux/phonet.h> | ||
33 | #include <net/phonet/phonet.h> | ||
34 | #include <net/phonet/pn_dev.h> | ||
35 | |||
36 | static struct net_proto_family phonet_proto_family; | ||
37 | static struct phonet_protocol *phonet_proto_get(int protocol); | ||
38 | static inline void phonet_proto_put(struct phonet_protocol *pp); | ||
39 | |||
40 | /* protocol family functions */ | ||
41 | |||
42 | static int pn_socket_create(struct net *net, struct socket *sock, int protocol) | ||
43 | { | ||
44 | struct sock *sk; | ||
45 | struct pn_sock *pn; | ||
46 | struct phonet_protocol *pnp; | ||
47 | int err; | ||
48 | |||
49 | if (net != &init_net) | ||
50 | return -EAFNOSUPPORT; | ||
51 | |||
52 | if (!capable(CAP_SYS_ADMIN)) | ||
53 | return -EPERM; | ||
54 | |||
55 | if (protocol == 0) { | ||
56 | /* Default protocol selection */ | ||
57 | switch (sock->type) { | ||
58 | case SOCK_DGRAM: | ||
59 | protocol = PN_PROTO_PHONET; | ||
60 | break; | ||
61 | default: | ||
62 | return -EPROTONOSUPPORT; | ||
63 | } | ||
64 | } | ||
65 | |||
66 | pnp = phonet_proto_get(protocol); | ||
67 | if (pnp == NULL) | ||
68 | return -EPROTONOSUPPORT; | ||
69 | if (sock->type != pnp->sock_type) { | ||
70 | err = -EPROTONOSUPPORT; | ||
71 | goto out; | ||
72 | } | ||
73 | |||
74 | sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot); | ||
75 | if (sk == NULL) { | ||
76 | err = -ENOMEM; | ||
77 | goto out; | ||
78 | } | ||
79 | |||
80 | sock_init_data(sock, sk); | ||
81 | sock->state = SS_UNCONNECTED; | ||
82 | sock->ops = pnp->ops; | ||
83 | sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; | ||
84 | sk->sk_protocol = protocol; | ||
85 | pn = pn_sk(sk); | ||
86 | pn->sobject = 0; | ||
87 | pn->resource = 0; | ||
88 | sk->sk_prot->init(sk); | ||
89 | err = 0; | ||
90 | |||
91 | out: | ||
92 | phonet_proto_put(pnp); | ||
93 | return err; | ||
94 | } | ||
95 | |||
96 | static struct net_proto_family phonet_proto_family = { | ||
97 | .family = AF_PHONET, | ||
98 | .create = pn_socket_create, | ||
99 | .owner = THIS_MODULE, | ||
100 | }; | ||
101 | |||
102 | /* Phonet device header operations */ | ||
103 | static int pn_header_create(struct sk_buff *skb, struct net_device *dev, | ||
104 | unsigned short type, const void *daddr, | ||
105 | const void *saddr, unsigned len) | ||
106 | { | ||
107 | u8 *media = skb_push(skb, 1); | ||
108 | |||
109 | if (type != ETH_P_PHONET) | ||
110 | return -1; | ||
111 | |||
112 | if (!saddr) | ||
113 | saddr = dev->dev_addr; | ||
114 | *media = *(const u8 *)saddr; | ||
115 | return 1; | ||
116 | } | ||
117 | |||
118 | static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr) | ||
119 | { | ||
120 | const u8 *media = skb_mac_header(skb); | ||
121 | *haddr = *media; | ||
122 | return 1; | ||
123 | } | ||
124 | |||
125 | struct header_ops phonet_header_ops = { | ||
126 | .create = pn_header_create, | ||
127 | .parse = pn_header_parse, | ||
128 | }; | ||
129 | EXPORT_SYMBOL(phonet_header_ops); | ||
130 | |||
131 | /* | ||
132 | * Prepends an ISI header and sends a datagram. | ||
133 | */ | ||
134 | static int pn_send(struct sk_buff *skb, struct net_device *dev, | ||
135 | u16 dst, u16 src, u8 res, u8 irq) | ||
136 | { | ||
137 | struct phonethdr *ph; | ||
138 | int err; | ||
139 | |||
140 | if (skb->len + 2 > 0xffff) { | ||
141 | /* Phonet length field would overflow */ | ||
142 | err = -EMSGSIZE; | ||
143 | goto drop; | ||
144 | } | ||
145 | |||
146 | skb_reset_transport_header(skb); | ||
147 | WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */ | ||
148 | skb_push(skb, sizeof(struct phonethdr)); | ||
149 | skb_reset_network_header(skb); | ||
150 | ph = pn_hdr(skb); | ||
151 | ph->pn_rdev = pn_dev(dst); | ||
152 | ph->pn_sdev = pn_dev(src); | ||
153 | ph->pn_res = res; | ||
154 | ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph)); | ||
155 | ph->pn_robj = pn_obj(dst); | ||
156 | ph->pn_sobj = pn_obj(src); | ||
157 | |||
158 | skb->protocol = htons(ETH_P_PHONET); | ||
159 | skb->priority = 0; | ||
160 | skb->dev = dev; | ||
161 | |||
162 | if (pn_addr(src) == pn_addr(dst)) { | ||
163 | skb_reset_mac_header(skb); | ||
164 | skb->pkt_type = PACKET_LOOPBACK; | ||
165 | skb_orphan(skb); | ||
166 | if (irq) | ||
167 | netif_rx(skb); | ||
168 | else | ||
169 | netif_rx_ni(skb); | ||
170 | err = 0; | ||
171 | } else { | ||
172 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), | ||
173 | NULL, NULL, skb->len); | ||
174 | if (err < 0) { | ||
175 | err = -EHOSTUNREACH; | ||
176 | goto drop; | ||
177 | } | ||
178 | err = dev_queue_xmit(skb); | ||
179 | } | ||
180 | |||
181 | return err; | ||
182 | drop: | ||
183 | kfree_skb(skb); | ||
184 | return err; | ||
185 | } | ||
186 | |||
187 | static int pn_raw_send(const void *data, int len, struct net_device *dev, | ||
188 | u16 dst, u16 src, u8 res) | ||
189 | { | ||
190 | struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC); | ||
191 | if (skb == NULL) | ||
192 | return -ENOMEM; | ||
193 | |||
194 | skb_reserve(skb, MAX_PHONET_HEADER); | ||
195 | __skb_put(skb, len); | ||
196 | skb_copy_to_linear_data(skb, data, len); | ||
197 | return pn_send(skb, dev, dst, src, res, 1); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * Create a Phonet header for the skb and send it out. Returns | ||
202 | * non-zero error code if failed. The skb is freed then. | ||
203 | */ | ||
204 | int pn_skb_send(struct sock *sk, struct sk_buff *skb, | ||
205 | const struct sockaddr_pn *target) | ||
206 | { | ||
207 | struct net_device *dev; | ||
208 | struct pn_sock *pn = pn_sk(sk); | ||
209 | int err; | ||
210 | u16 src; | ||
211 | u8 daddr = pn_sockaddr_get_addr(target), saddr = PN_NO_ADDR; | ||
212 | |||
213 | err = -EHOSTUNREACH; | ||
214 | if (sk->sk_bound_dev_if) | ||
215 | dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); | ||
216 | else | ||
217 | dev = phonet_device_get(sock_net(sk)); | ||
218 | if (!dev || !(dev->flags & IFF_UP)) | ||
219 | goto drop; | ||
220 | |||
221 | saddr = phonet_address_get(dev, daddr); | ||
222 | if (saddr == PN_NO_ADDR) | ||
223 | goto drop; | ||
224 | |||
225 | src = pn->sobject; | ||
226 | if (!pn_addr(src)) | ||
227 | src = pn_object(saddr, pn_obj(src)); | ||
228 | |||
229 | err = pn_send(skb, dev, pn_sockaddr_get_object(target), | ||
230 | src, pn_sockaddr_get_resource(target), 0); | ||
231 | dev_put(dev); | ||
232 | return err; | ||
233 | |||
234 | drop: | ||
235 | kfree_skb(skb); | ||
236 | if (dev) | ||
237 | dev_put(dev); | ||
238 | return err; | ||
239 | } | ||
240 | EXPORT_SYMBOL(pn_skb_send); | ||
241 | |||
242 | /* Do not send an error message in response to an error message */ | ||
243 | static inline int can_respond(struct sk_buff *skb) | ||
244 | { | ||
245 | const struct phonethdr *ph; | ||
246 | const struct phonetmsg *pm; | ||
247 | u8 submsg_id; | ||
248 | |||
249 | if (!pskb_may_pull(skb, 3)) | ||
250 | return 0; | ||
251 | |||
252 | ph = pn_hdr(skb); | ||
253 | if (phonet_address_get(skb->dev, ph->pn_rdev) != ph->pn_rdev) | ||
254 | return 0; /* we are not the destination */ | ||
255 | if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5)) | ||
256 | return 0; | ||
257 | |||
258 | ph = pn_hdr(skb); /* re-acquires the pointer */ | ||
259 | pm = pn_msg(skb); | ||
260 | if (pm->pn_msg_id != PN_COMMON_MESSAGE) | ||
261 | return 1; | ||
262 | submsg_id = (ph->pn_res == PN_PREFIX) | ||
263 | ? pm->pn_e_submsg_id : pm->pn_submsg_id; | ||
264 | if (submsg_id != PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP && | ||
265 | pm->pn_e_submsg_id != PN_COMM_SERVICE_NOT_IDENTIFIED_RESP) | ||
266 | return 1; | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static int send_obj_unreachable(struct sk_buff *rskb) | ||
271 | { | ||
272 | const struct phonethdr *oph = pn_hdr(rskb); | ||
273 | const struct phonetmsg *opm = pn_msg(rskb); | ||
274 | struct phonetmsg resp; | ||
275 | |||
276 | memset(&resp, 0, sizeof(resp)); | ||
277 | resp.pn_trans_id = opm->pn_trans_id; | ||
278 | resp.pn_msg_id = PN_COMMON_MESSAGE; | ||
279 | if (oph->pn_res == PN_PREFIX) { | ||
280 | resp.pn_e_res_id = opm->pn_e_res_id; | ||
281 | resp.pn_e_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP; | ||
282 | resp.pn_e_orig_msg_id = opm->pn_msg_id; | ||
283 | resp.pn_e_status = 0; | ||
284 | } else { | ||
285 | resp.pn_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP; | ||
286 | resp.pn_orig_msg_id = opm->pn_msg_id; | ||
287 | resp.pn_status = 0; | ||
288 | } | ||
289 | return pn_raw_send(&resp, sizeof(resp), rskb->dev, | ||
290 | pn_object(oph->pn_sdev, oph->pn_sobj), | ||
291 | pn_object(oph->pn_rdev, oph->pn_robj), | ||
292 | oph->pn_res); | ||
293 | } | ||
294 | |||
295 | static int send_reset_indications(struct sk_buff *rskb) | ||
296 | { | ||
297 | struct phonethdr *oph = pn_hdr(rskb); | ||
298 | static const u8 data[4] = { | ||
299 | 0x00 /* trans ID */, 0x10 /* subscribe msg */, | ||
300 | 0x00 /* subscription count */, 0x00 /* dummy */ | ||
301 | }; | ||
302 | |||
303 | return pn_raw_send(data, sizeof(data), rskb->dev, | ||
304 | pn_object(oph->pn_sdev, 0x00), | ||
305 | pn_object(oph->pn_rdev, oph->pn_robj), 0x10); | ||
306 | } | ||
307 | |||
308 | |||
309 | /* packet type functions */ | ||
310 | |||
311 | /* | ||
312 | * Stuff received packets to associated sockets. | ||
313 | * On error, returns non-zero and releases the skb. | ||
314 | */ | ||
315 | static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, | ||
316 | struct packet_type *pkttype, | ||
317 | struct net_device *orig_dev) | ||
318 | { | ||
319 | struct phonethdr *ph; | ||
320 | struct sock *sk; | ||
321 | struct sockaddr_pn sa; | ||
322 | u16 len; | ||
323 | |||
324 | if (dev_net(dev) != &init_net) | ||
325 | goto out; | ||
326 | |||
327 | /* check we have at least a full Phonet header */ | ||
328 | if (!pskb_pull(skb, sizeof(struct phonethdr))) | ||
329 | goto out; | ||
330 | |||
331 | /* check that the advertised length is correct */ | ||
332 | ph = pn_hdr(skb); | ||
333 | len = get_unaligned_be16(&ph->pn_length); | ||
334 | if (len < 2) | ||
335 | goto out; | ||
336 | len -= 2; | ||
337 | if ((len > skb->len) || pskb_trim(skb, len)) | ||
338 | goto out; | ||
339 | skb_reset_transport_header(skb); | ||
340 | |||
341 | pn_skb_get_dst_sockaddr(skb, &sa); | ||
342 | if (pn_sockaddr_get_addr(&sa) == 0) | ||
343 | goto out; /* currently, we cannot be device 0 */ | ||
344 | |||
345 | sk = pn_find_sock_by_sa(&sa); | ||
346 | if (sk == NULL) { | ||
347 | if (can_respond(skb)) { | ||
348 | send_obj_unreachable(skb); | ||
349 | send_reset_indications(skb); | ||
350 | } | ||
351 | goto out; | ||
352 | } | ||
353 | |||
354 | /* Push data to the socket (or other sockets connected to it). */ | ||
355 | return sk_receive_skb(sk, skb, 0); | ||
356 | |||
357 | out: | ||
358 | kfree_skb(skb); | ||
359 | return NET_RX_DROP; | ||
360 | } | ||
361 | |||
362 | static struct packet_type phonet_packet_type = { | ||
363 | .type = __constant_htons(ETH_P_PHONET), | ||
364 | .dev = NULL, | ||
365 | .func = phonet_rcv, | ||
366 | }; | ||
367 | |||
368 | /* Transport protocol registration */ | ||
369 | static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; | ||
370 | static DEFINE_SPINLOCK(proto_tab_lock); | ||
371 | |||
372 | int __init_or_module phonet_proto_register(int protocol, | ||
373 | struct phonet_protocol *pp) | ||
374 | { | ||
375 | int err = 0; | ||
376 | |||
377 | if (protocol >= PHONET_NPROTO) | ||
378 | return -EINVAL; | ||
379 | |||
380 | err = proto_register(pp->prot, 1); | ||
381 | if (err) | ||
382 | return err; | ||
383 | |||
384 | spin_lock(&proto_tab_lock); | ||
385 | if (proto_tab[protocol]) | ||
386 | err = -EBUSY; | ||
387 | else | ||
388 | proto_tab[protocol] = pp; | ||
389 | spin_unlock(&proto_tab_lock); | ||
390 | |||
391 | return err; | ||
392 | } | ||
393 | EXPORT_SYMBOL(phonet_proto_register); | ||
394 | |||
395 | void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) | ||
396 | { | ||
397 | spin_lock(&proto_tab_lock); | ||
398 | BUG_ON(proto_tab[protocol] != pp); | ||
399 | proto_tab[protocol] = NULL; | ||
400 | spin_unlock(&proto_tab_lock); | ||
401 | proto_unregister(pp->prot); | ||
402 | } | ||
403 | EXPORT_SYMBOL(phonet_proto_unregister); | ||
404 | |||
405 | static struct phonet_protocol *phonet_proto_get(int protocol) | ||
406 | { | ||
407 | struct phonet_protocol *pp; | ||
408 | |||
409 | if (protocol >= PHONET_NPROTO) | ||
410 | return NULL; | ||
411 | |||
412 | spin_lock(&proto_tab_lock); | ||
413 | pp = proto_tab[protocol]; | ||
414 | if (pp && !try_module_get(pp->prot->owner)) | ||
415 | pp = NULL; | ||
416 | spin_unlock(&proto_tab_lock); | ||
417 | |||
418 | return pp; | ||
419 | } | ||
420 | |||
421 | static inline void phonet_proto_put(struct phonet_protocol *pp) | ||
422 | { | ||
423 | module_put(pp->prot->owner); | ||
424 | } | ||
425 | |||
426 | /* Module registration */ | ||
427 | static int __init phonet_init(void) | ||
428 | { | ||
429 | int err; | ||
430 | |||
431 | err = sock_register(&phonet_proto_family); | ||
432 | if (err) { | ||
433 | printk(KERN_ALERT | ||
434 | "phonet protocol family initialization failed\n"); | ||
435 | return err; | ||
436 | } | ||
437 | |||
438 | phonet_device_init(); | ||
439 | dev_add_pack(&phonet_packet_type); | ||
440 | phonet_netlink_register(); | ||
441 | phonet_sysctl_init(); | ||
442 | |||
443 | err = isi_register(); | ||
444 | if (err) | ||
445 | goto err; | ||
446 | return 0; | ||
447 | |||
448 | err: | ||
449 | phonet_sysctl_exit(); | ||
450 | sock_unregister(AF_PHONET); | ||
451 | dev_remove_pack(&phonet_packet_type); | ||
452 | phonet_device_exit(); | ||
453 | return err; | ||
454 | } | ||
455 | |||
456 | static void __exit phonet_exit(void) | ||
457 | { | ||
458 | isi_unregister(); | ||
459 | phonet_sysctl_exit(); | ||
460 | sock_unregister(AF_PHONET); | ||
461 | dev_remove_pack(&phonet_packet_type); | ||
462 | phonet_device_exit(); | ||
463 | } | ||
464 | |||
465 | module_init(phonet_init); | ||
466 | module_exit(phonet_exit); | ||
467 | MODULE_DESCRIPTION("Phonet protocol stack for Linux"); | ||
468 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c new file mode 100644 index 000000000000..e087862ed7e4 --- /dev/null +++ b/net/phonet/datagram.c | |||
@@ -0,0 +1,197 @@ | |||
1 | /* | ||
2 | * File: datagram.c | ||
3 | * | ||
4 | * Datagram (ISI) Phonet sockets | ||
5 | * | ||
6 | * Copyright (C) 2008 Nokia Corporation. | ||
7 | * | ||
8 | * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> | ||
9 | * Original author: Sakari Ailus <sakari.ailus@nokia.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * version 2 as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
23 | * 02110-1301 USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/socket.h> | ||
28 | #include <asm/ioctls.h> | ||
29 | #include <net/sock.h> | ||
30 | |||
31 | #include <linux/phonet.h> | ||
32 | #include <net/phonet/phonet.h> | ||
33 | |||
34 | static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb); | ||
35 | |||
36 | /* associated socket ceases to exist */ | ||
37 | static void pn_sock_close(struct sock *sk, long timeout) | ||
38 | { | ||
39 | sk_common_release(sk); | ||
40 | } | ||
41 | |||
42 | static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg) | ||
43 | { | ||
44 | struct sk_buff *skb; | ||
45 | int answ; | ||
46 | |||
47 | switch (cmd) { | ||
48 | case SIOCINQ: | ||
49 | lock_sock(sk); | ||
50 | skb = skb_peek(&sk->sk_receive_queue); | ||
51 | answ = skb ? skb->len : 0; | ||
52 | release_sock(sk); | ||
53 | return put_user(answ, (int __user *)arg); | ||
54 | } | ||
55 | |||
56 | return -ENOIOCTLCMD; | ||
57 | } | ||
58 | |||
59 | /* Destroy socket. All references are gone. */ | ||
60 | static void pn_destruct(struct sock *sk) | ||
61 | { | ||
62 | skb_queue_purge(&sk->sk_receive_queue); | ||
63 | } | ||
64 | |||
65 | static int pn_init(struct sock *sk) | ||
66 | { | ||
67 | sk->sk_destruct = pn_destruct; | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static int pn_sendmsg(struct kiocb *iocb, struct sock *sk, | ||
72 | struct msghdr *msg, size_t len) | ||
73 | { | ||
74 | struct sockaddr_pn *target; | ||
75 | struct sk_buff *skb; | ||
76 | int err; | ||
77 | |||
78 | if (msg->msg_flags & MSG_OOB) | ||
79 | return -EOPNOTSUPP; | ||
80 | |||
81 | if (msg->msg_name == NULL) | ||
82 | return -EDESTADDRREQ; | ||
83 | |||
84 | if (msg->msg_namelen < sizeof(struct sockaddr_pn)) | ||
85 | return -EINVAL; | ||
86 | |||
87 | target = (struct sockaddr_pn *)msg->msg_name; | ||
88 | if (target->spn_family != AF_PHONET) | ||
89 | return -EAFNOSUPPORT; | ||
90 | |||
91 | skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len, | ||
92 | msg->msg_flags & MSG_DONTWAIT, &err); | ||
93 | if (skb == NULL) | ||
94 | return err; | ||
95 | skb_reserve(skb, MAX_PHONET_HEADER); | ||
96 | |||
97 | err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len); | ||
98 | if (err < 0) { | ||
99 | kfree_skb(skb); | ||
100 | return err; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Fill in the Phonet header and | ||
105 | * finally pass the packet forwards. | ||
106 | */ | ||
107 | err = pn_skb_send(sk, skb, target); | ||
108 | |||
109 | /* If ok, return len. */ | ||
110 | return (err >= 0) ? len : err; | ||
111 | } | ||
112 | |||
113 | static int pn_recvmsg(struct kiocb *iocb, struct sock *sk, | ||
114 | struct msghdr *msg, size_t len, int noblock, | ||
115 | int flags, int *addr_len) | ||
116 | { | ||
117 | struct sk_buff *skb = NULL; | ||
118 | struct sockaddr_pn sa; | ||
119 | int rval = -EOPNOTSUPP; | ||
120 | int copylen; | ||
121 | |||
122 | if (flags & MSG_OOB) | ||
123 | goto out_nofree; | ||
124 | |||
125 | if (addr_len) | ||
126 | *addr_len = sizeof(sa); | ||
127 | |||
128 | skb = skb_recv_datagram(sk, flags, noblock, &rval); | ||
129 | if (skb == NULL) | ||
130 | goto out_nofree; | ||
131 | |||
132 | pn_skb_get_src_sockaddr(skb, &sa); | ||
133 | |||
134 | copylen = skb->len; | ||
135 | if (len < copylen) { | ||
136 | msg->msg_flags |= MSG_TRUNC; | ||
137 | copylen = len; | ||
138 | } | ||
139 | |||
140 | rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen); | ||
141 | if (rval) { | ||
142 | rval = -EFAULT; | ||
143 | goto out; | ||
144 | } | ||
145 | |||
146 | rval = (flags & MSG_TRUNC) ? skb->len : copylen; | ||
147 | |||
148 | if (msg->msg_name != NULL) | ||
149 | memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn)); | ||
150 | |||
151 | out: | ||
152 | skb_free_datagram(sk, skb); | ||
153 | |||
154 | out_nofree: | ||
155 | return rval; | ||
156 | } | ||
157 | |||
158 | /* Queue an skb for a sock. */ | ||
159 | static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) | ||
160 | { | ||
161 | int err = sock_queue_rcv_skb(sk, skb); | ||
162 | if (err < 0) | ||
163 | kfree_skb(skb); | ||
164 | return err ? NET_RX_DROP : NET_RX_SUCCESS; | ||
165 | } | ||
166 | |||
167 | /* Module registration */ | ||
168 | static struct proto pn_proto = { | ||
169 | .close = pn_sock_close, | ||
170 | .ioctl = pn_ioctl, | ||
171 | .init = pn_init, | ||
172 | .sendmsg = pn_sendmsg, | ||
173 | .recvmsg = pn_recvmsg, | ||
174 | .backlog_rcv = pn_backlog_rcv, | ||
175 | .hash = pn_sock_hash, | ||
176 | .unhash = pn_sock_unhash, | ||
177 | .get_port = pn_sock_get_port, | ||
178 | .obj_size = sizeof(struct pn_sock), | ||
179 | .owner = THIS_MODULE, | ||
180 | .name = "PHONET", | ||
181 | }; | ||
182 | |||
183 | static struct phonet_protocol pn_dgram_proto = { | ||
184 | .ops = &phonet_dgram_ops, | ||
185 | .prot = &pn_proto, | ||
186 | .sock_type = SOCK_DGRAM, | ||
187 | }; | ||
188 | |||
189 | int __init isi_register(void) | ||
190 | { | ||
191 | return phonet_proto_register(PN_PROTO_PHONET, &pn_dgram_proto); | ||
192 | } | ||
193 | |||
194 | void __exit isi_unregister(void) | ||
195 | { | ||
196 | phonet_proto_unregister(PN_PROTO_PHONET, &pn_dgram_proto); | ||
197 | } | ||
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c new file mode 100644 index 000000000000..53be9fc82aaa --- /dev/null +++ b/net/phonet/pn_dev.c | |||
@@ -0,0 +1,208 @@ | |||
1 | /* | ||
2 | * File: pn_dev.c | ||
3 | * | ||
4 | * Phonet network device | ||
5 | * | ||
6 | * Copyright (C) 2008 Nokia Corporation. | ||
7 | * | ||
8 | * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> | ||
9 | * Original author: Sakari Ailus <sakari.ailus@nokia.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * version 2 as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
23 | * 02110-1301 USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/net.h> | ||
28 | #include <linux/netdevice.h> | ||
29 | #include <linux/phonet.h> | ||
30 | #include <net/sock.h> | ||
31 | #include <net/phonet/pn_dev.h> | ||
32 | |||
33 | /* when accessing, remember to lock with spin_lock(&pndevs.lock); */ | ||
34 | struct phonet_device_list pndevs = { | ||
35 | .list = LIST_HEAD_INIT(pndevs.list), | ||
36 | .lock = __SPIN_LOCK_UNLOCKED(pndevs.lock), | ||
37 | }; | ||
38 | |||
39 | /* Allocate new Phonet device. */ | ||
40 | static struct phonet_device *__phonet_device_alloc(struct net_device *dev) | ||
41 | { | ||
42 | struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC); | ||
43 | if (pnd == NULL) | ||
44 | return NULL; | ||
45 | pnd->netdev = dev; | ||
46 | bitmap_zero(pnd->addrs, 64); | ||
47 | |||
48 | list_add(&pnd->list, &pndevs.list); | ||
49 | return pnd; | ||
50 | } | ||
51 | |||
52 | static struct phonet_device *__phonet_get(struct net_device *dev) | ||
53 | { | ||
54 | struct phonet_device *pnd; | ||
55 | |||
56 | list_for_each_entry(pnd, &pndevs.list, list) { | ||
57 | if (pnd->netdev == dev) | ||
58 | return pnd; | ||
59 | } | ||
60 | return NULL; | ||
61 | } | ||
62 | |||
63 | static void __phonet_device_free(struct phonet_device *pnd) | ||
64 | { | ||
65 | list_del(&pnd->list); | ||
66 | kfree(pnd); | ||
67 | } | ||
68 | |||
69 | struct net_device *phonet_device_get(struct net *net) | ||
70 | { | ||
71 | struct phonet_device *pnd; | ||
72 | struct net_device *dev; | ||
73 | |||
74 | spin_lock_bh(&pndevs.lock); | ||
75 | list_for_each_entry(pnd, &pndevs.list, list) { | ||
76 | dev = pnd->netdev; | ||
77 | BUG_ON(!dev); | ||
78 | |||
79 | if (dev_net(dev) == net && | ||
80 | (dev->reg_state == NETREG_REGISTERED) && | ||
81 | ((pnd->netdev->flags & IFF_UP)) == IFF_UP) | ||
82 | break; | ||
83 | dev = NULL; | ||
84 | } | ||
85 | if (dev) | ||
86 | dev_hold(dev); | ||
87 | spin_unlock_bh(&pndevs.lock); | ||
88 | return dev; | ||
89 | } | ||
90 | |||
91 | int phonet_address_add(struct net_device *dev, u8 addr) | ||
92 | { | ||
93 | struct phonet_device *pnd; | ||
94 | int err = 0; | ||
95 | |||
96 | spin_lock_bh(&pndevs.lock); | ||
97 | /* Find or create Phonet-specific device data */ | ||
98 | pnd = __phonet_get(dev); | ||
99 | if (pnd == NULL) | ||
100 | pnd = __phonet_device_alloc(dev); | ||
101 | if (unlikely(pnd == NULL)) | ||
102 | err = -ENOMEM; | ||
103 | else if (test_and_set_bit(addr >> 2, pnd->addrs)) | ||
104 | err = -EEXIST; | ||
105 | spin_unlock_bh(&pndevs.lock); | ||
106 | return err; | ||
107 | } | ||
108 | |||
109 | int phonet_address_del(struct net_device *dev, u8 addr) | ||
110 | { | ||
111 | struct phonet_device *pnd; | ||
112 | int err = 0; | ||
113 | |||
114 | spin_lock_bh(&pndevs.lock); | ||
115 | pnd = __phonet_get(dev); | ||
116 | if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) | ||
117 | err = -EADDRNOTAVAIL; | ||
118 | if (bitmap_empty(pnd->addrs, 64)) | ||
119 | __phonet_device_free(pnd); | ||
120 | spin_unlock_bh(&pndevs.lock); | ||
121 | return err; | ||
122 | } | ||
123 | |||
124 | /* Gets a source address toward a destination, through a interface. */ | ||
125 | u8 phonet_address_get(struct net_device *dev, u8 addr) | ||
126 | { | ||
127 | struct phonet_device *pnd; | ||
128 | |||
129 | spin_lock_bh(&pndevs.lock); | ||
130 | pnd = __phonet_get(dev); | ||
131 | if (pnd) { | ||
132 | BUG_ON(bitmap_empty(pnd->addrs, 64)); | ||
133 | |||
134 | /* Use same source address as destination, if possible */ | ||
135 | if (!test_bit(addr >> 2, pnd->addrs)) | ||
136 | addr = find_first_bit(pnd->addrs, 64) << 2; | ||
137 | } else | ||
138 | addr = PN_NO_ADDR; | ||
139 | spin_unlock_bh(&pndevs.lock); | ||
140 | return addr; | ||
141 | } | ||
142 | |||
143 | int phonet_address_lookup(u8 addr) | ||
144 | { | ||
145 | struct phonet_device *pnd; | ||
146 | |||
147 | spin_lock_bh(&pndevs.lock); | ||
148 | list_for_each_entry(pnd, &pndevs.list, list) { | ||
149 | /* Don't allow unregistering devices! */ | ||
150 | if ((pnd->netdev->reg_state != NETREG_REGISTERED) || | ||
151 | ((pnd->netdev->flags & IFF_UP)) != IFF_UP) | ||
152 | continue; | ||
153 | |||
154 | if (test_bit(addr >> 2, pnd->addrs)) { | ||
155 | spin_unlock_bh(&pndevs.lock); | ||
156 | return 0; | ||
157 | } | ||
158 | } | ||
159 | spin_unlock_bh(&pndevs.lock); | ||
160 | return -EADDRNOTAVAIL; | ||
161 | } | ||
162 | |||
163 | /* notify Phonet of device events */ | ||
164 | static int phonet_device_notify(struct notifier_block *me, unsigned long what, | ||
165 | void *arg) | ||
166 | { | ||
167 | struct net_device *dev = arg; | ||
168 | |||
169 | if (what == NETDEV_UNREGISTER) { | ||
170 | struct phonet_device *pnd; | ||
171 | |||
172 | /* Destroy phonet-specific device data */ | ||
173 | spin_lock_bh(&pndevs.lock); | ||
174 | pnd = __phonet_get(dev); | ||
175 | if (pnd) | ||
176 | __phonet_device_free(pnd); | ||
177 | spin_unlock_bh(&pndevs.lock); | ||
178 | } | ||
179 | return 0; | ||
180 | |||
181 | } | ||
182 | |||
183 | static struct notifier_block phonet_device_notifier = { | ||
184 | .notifier_call = phonet_device_notify, | ||
185 | .priority = 0, | ||
186 | }; | ||
187 | |||
188 | /* Initialize Phonet devices list */ | ||
189 | void phonet_device_init(void) | ||
190 | { | ||
191 | register_netdevice_notifier(&phonet_device_notifier); | ||
192 | } | ||
193 | |||
194 | void phonet_device_exit(void) | ||
195 | { | ||
196 | struct phonet_device *pnd, *n; | ||
197 | |||
198 | rtnl_unregister_all(PF_PHONET); | ||
199 | rtnl_lock(); | ||
200 | spin_lock_bh(&pndevs.lock); | ||
201 | |||
202 | list_for_each_entry_safe(pnd, n, &pndevs.list, list) | ||
203 | __phonet_device_free(pnd); | ||
204 | |||
205 | spin_unlock_bh(&pndevs.lock); | ||
206 | rtnl_unlock(); | ||
207 | unregister_netdevice_notifier(&phonet_device_notifier); | ||
208 | } | ||
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c new file mode 100644 index 000000000000..b1770d66bc8d --- /dev/null +++ b/net/phonet/pn_netlink.c | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * File: pn_netlink.c | ||
3 | * | ||
4 | * Phonet netlink interface | ||
5 | * | ||
6 | * Copyright (C) 2008 Nokia Corporation. | ||
7 | * | ||
8 | * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> | ||
9 | * Original author: Sakari Ailus <sakari.ailus@nokia.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * version 2 as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
23 | * 02110-1301 USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/netlink.h> | ||
28 | #include <linux/phonet.h> | ||
29 | #include <net/sock.h> | ||
30 | #include <net/phonet/pn_dev.h> | ||
31 | |||
32 | static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, | ||
33 | u32 pid, u32 seq, int event); | ||
34 | |||
35 | static void rtmsg_notify(int event, struct net_device *dev, u8 addr) | ||
36 | { | ||
37 | struct sk_buff *skb; | ||
38 | int err = -ENOBUFS; | ||
39 | |||
40 | skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + | ||
41 | nla_total_size(1), GFP_KERNEL); | ||
42 | if (skb == NULL) | ||
43 | goto errout; | ||
44 | err = fill_addr(skb, dev, addr, 0, 0, event); | ||
45 | if (err < 0) { | ||
46 | WARN_ON(err == -EMSGSIZE); | ||
47 | kfree_skb(skb); | ||
48 | goto errout; | ||
49 | } | ||
50 | err = rtnl_notify(skb, dev_net(dev), 0, | ||
51 | RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL); | ||
52 | errout: | ||
53 | if (err < 0) | ||
54 | rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err); | ||
55 | } | ||
56 | |||
57 | static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = { | ||
58 | [IFA_LOCAL] = { .type = NLA_U8 }, | ||
59 | }; | ||
60 | |||
61 | static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) | ||
62 | { | ||
63 | struct net *net = sock_net(skb->sk); | ||
64 | struct nlattr *tb[IFA_MAX+1]; | ||
65 | struct net_device *dev; | ||
66 | struct ifaddrmsg *ifm; | ||
67 | int err; | ||
68 | u8 pnaddr; | ||
69 | |||
70 | if (!capable(CAP_SYS_ADMIN)) | ||
71 | return -EPERM; | ||
72 | |||
73 | ASSERT_RTNL(); | ||
74 | |||
75 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy); | ||
76 | if (err < 0) | ||
77 | return err; | ||
78 | |||
79 | ifm = nlmsg_data(nlh); | ||
80 | if (tb[IFA_LOCAL] == NULL) | ||
81 | return -EINVAL; | ||
82 | pnaddr = nla_get_u8(tb[IFA_LOCAL]); | ||
83 | if (pnaddr & 3) | ||
84 | /* Phonet addresses only have 6 high-order bits */ | ||
85 | return -EINVAL; | ||
86 | |||
87 | dev = __dev_get_by_index(net, ifm->ifa_index); | ||
88 | if (dev == NULL) | ||
89 | return -ENODEV; | ||
90 | |||
91 | if (nlh->nlmsg_type == RTM_NEWADDR) | ||
92 | err = phonet_address_add(dev, pnaddr); | ||
93 | else | ||
94 | err = phonet_address_del(dev, pnaddr); | ||
95 | if (!err) | ||
96 | rtmsg_notify(nlh->nlmsg_type, dev, pnaddr); | ||
97 | return err; | ||
98 | } | ||
99 | |||
100 | static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, | ||
101 | u32 pid, u32 seq, int event) | ||
102 | { | ||
103 | struct ifaddrmsg *ifm; | ||
104 | struct nlmsghdr *nlh; | ||
105 | |||
106 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), 0); | ||
107 | if (nlh == NULL) | ||
108 | return -EMSGSIZE; | ||
109 | |||
110 | ifm = nlmsg_data(nlh); | ||
111 | ifm->ifa_family = AF_PHONET; | ||
112 | ifm->ifa_prefixlen = 0; | ||
113 | ifm->ifa_flags = IFA_F_PERMANENT; | ||
114 | ifm->ifa_scope = RT_SCOPE_LINK; | ||
115 | ifm->ifa_index = dev->ifindex; | ||
116 | NLA_PUT_U8(skb, IFA_LOCAL, addr); | ||
117 | return nlmsg_end(skb, nlh); | ||
118 | |||
119 | nla_put_failure: | ||
120 | nlmsg_cancel(skb, nlh); | ||
121 | return -EMSGSIZE; | ||
122 | } | ||
123 | |||
124 | static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | ||
125 | { | ||
126 | struct phonet_device *pnd; | ||
127 | int dev_idx = 0, dev_start_idx = cb->args[0]; | ||
128 | int addr_idx = 0, addr_start_idx = cb->args[1]; | ||
129 | |||
130 | spin_lock_bh(&pndevs.lock); | ||
131 | list_for_each_entry(pnd, &pndevs.list, list) { | ||
132 | u8 addr; | ||
133 | |||
134 | if (dev_idx > dev_start_idx) | ||
135 | addr_start_idx = 0; | ||
136 | if (dev_idx++ < dev_start_idx) | ||
137 | continue; | ||
138 | |||
139 | addr_idx = 0; | ||
140 | for (addr = find_first_bit(pnd->addrs, 64); addr < 64; | ||
141 | addr = find_next_bit(pnd->addrs, 64, 1+addr)) { | ||
142 | if (addr_idx++ < addr_start_idx) | ||
143 | continue; | ||
144 | |||
145 | if (fill_addr(skb, pnd->netdev, addr << 2, | ||
146 | NETLINK_CB(cb->skb).pid, | ||
147 | cb->nlh->nlmsg_seq, RTM_NEWADDR)) | ||
148 | goto out; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | out: | ||
153 | spin_unlock_bh(&pndevs.lock); | ||
154 | cb->args[0] = dev_idx; | ||
155 | cb->args[1] = addr_idx; | ||
156 | |||
157 | return skb->len; | ||
158 | } | ||
159 | |||
160 | void __init phonet_netlink_register(void) | ||
161 | { | ||
162 | rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); | ||
163 | rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); | ||
164 | rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); | ||
165 | } | ||
diff --git a/net/phonet/socket.c b/net/phonet/socket.c new file mode 100644 index 000000000000..dfd4061646db --- /dev/null +++ b/net/phonet/socket.c | |||
@@ -0,0 +1,312 @@ | |||
1 | /* | ||
2 | * File: socket.c | ||
3 | * | ||
4 | * Phonet sockets | ||
5 | * | ||
6 | * Copyright (C) 2008 Nokia Corporation. | ||
7 | * | ||
8 | * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> | ||
9 | * Original author: Sakari Ailus <sakari.ailus@nokia.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * version 2 as published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
23 | * 02110-1301 USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/net.h> | ||
28 | #include <net/sock.h> | ||
29 | #include <net/tcp_states.h> | ||
30 | |||
31 | #include <linux/phonet.h> | ||
32 | #include <net/phonet/phonet.h> | ||
33 | #include <net/phonet/pn_dev.h> | ||
34 | |||
35 | static int pn_socket_release(struct socket *sock) | ||
36 | { | ||
37 | struct sock *sk = sock->sk; | ||
38 | |||
39 | if (sk) { | ||
40 | sock->sk = NULL; | ||
41 | sk->sk_prot->close(sk, 0); | ||
42 | } | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | static struct { | ||
47 | struct hlist_head hlist; | ||
48 | spinlock_t lock; | ||
49 | } pnsocks = { | ||
50 | .hlist = HLIST_HEAD_INIT, | ||
51 | .lock = __SPIN_LOCK_UNLOCKED(pnsocks.lock), | ||
52 | }; | ||
53 | |||
54 | /* | ||
55 | * Find address based on socket address, match only certain fields. | ||
56 | * Also grab sock if it was found. Remember to sock_put it later. | ||
57 | */ | ||
58 | struct sock *pn_find_sock_by_sa(const struct sockaddr_pn *spn) | ||
59 | { | ||
60 | struct hlist_node *node; | ||
61 | struct sock *sknode; | ||
62 | struct sock *rval = NULL; | ||
63 | u16 obj = pn_sockaddr_get_object(spn); | ||
64 | u8 res = spn->spn_resource; | ||
65 | |||
66 | spin_lock_bh(&pnsocks.lock); | ||
67 | |||
68 | sk_for_each(sknode, node, &pnsocks.hlist) { | ||
69 | struct pn_sock *pn = pn_sk(sknode); | ||
70 | BUG_ON(!pn->sobject); /* unbound socket */ | ||
71 | |||
72 | if (pn_port(obj)) { | ||
73 | /* Look up socket by port */ | ||
74 | if (pn_port(pn->sobject) != pn_port(obj)) | ||
75 | continue; | ||
76 | } else { | ||
77 | /* If port is zero, look up by resource */ | ||
78 | if (pn->resource != res) | ||
79 | continue; | ||
80 | } | ||
81 | if (pn_addr(pn->sobject) | ||
82 | && pn_addr(pn->sobject) != pn_addr(obj)) | ||
83 | continue; | ||
84 | |||
85 | rval = sknode; | ||
86 | sock_hold(sknode); | ||
87 | break; | ||
88 | } | ||
89 | |||
90 | spin_unlock_bh(&pnsocks.lock); | ||
91 | |||
92 | return rval; | ||
93 | |||
94 | } | ||
95 | |||
96 | void pn_sock_hash(struct sock *sk) | ||
97 | { | ||
98 | spin_lock_bh(&pnsocks.lock); | ||
99 | sk_add_node(sk, &pnsocks.hlist); | ||
100 | spin_unlock_bh(&pnsocks.lock); | ||
101 | } | ||
102 | EXPORT_SYMBOL(pn_sock_hash); | ||
103 | |||
104 | void pn_sock_unhash(struct sock *sk) | ||
105 | { | ||
106 | spin_lock_bh(&pnsocks.lock); | ||
107 | sk_del_node_init(sk); | ||
108 | spin_unlock_bh(&pnsocks.lock); | ||
109 | } | ||
110 | EXPORT_SYMBOL(pn_sock_unhash); | ||
111 | |||
112 | static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len) | ||
113 | { | ||
114 | struct sock *sk = sock->sk; | ||
115 | struct pn_sock *pn = pn_sk(sk); | ||
116 | struct sockaddr_pn *spn = (struct sockaddr_pn *)addr; | ||
117 | int err; | ||
118 | u16 handle; | ||
119 | u8 saddr; | ||
120 | |||
121 | if (sk->sk_prot->bind) | ||
122 | return sk->sk_prot->bind(sk, addr, len); | ||
123 | |||
124 | if (len < sizeof(struct sockaddr_pn)) | ||
125 | return -EINVAL; | ||
126 | if (spn->spn_family != AF_PHONET) | ||
127 | return -EAFNOSUPPORT; | ||
128 | |||
129 | handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr); | ||
130 | saddr = pn_addr(handle); | ||
131 | if (saddr && phonet_address_lookup(saddr)) | ||
132 | return -EADDRNOTAVAIL; | ||
133 | |||
134 | lock_sock(sk); | ||
135 | if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) { | ||
136 | err = -EINVAL; /* attempt to rebind */ | ||
137 | goto out; | ||
138 | } | ||
139 | err = sk->sk_prot->get_port(sk, pn_port(handle)); | ||
140 | if (err) | ||
141 | goto out; | ||
142 | |||
143 | /* get_port() sets the port, bind() sets the address if applicable */ | ||
144 | pn->sobject = pn_object(saddr, pn_port(pn->sobject)); | ||
145 | pn->resource = spn->spn_resource; | ||
146 | |||
147 | /* Enable RX on the socket */ | ||
148 | sk->sk_prot->hash(sk); | ||
149 | out: | ||
150 | release_sock(sk); | ||
151 | return err; | ||
152 | } | ||
153 | |||
154 | static int pn_socket_autobind(struct socket *sock) | ||
155 | { | ||
156 | struct sockaddr_pn sa; | ||
157 | int err; | ||
158 | |||
159 | memset(&sa, 0, sizeof(sa)); | ||
160 | sa.spn_family = AF_PHONET; | ||
161 | err = pn_socket_bind(sock, (struct sockaddr *)&sa, | ||
162 | sizeof(struct sockaddr_pn)); | ||
163 | if (err != -EINVAL) | ||
164 | return err; | ||
165 | BUG_ON(!pn_port(pn_sk(sock->sk)->sobject)); | ||
166 | return 0; /* socket was already bound */ | ||
167 | } | ||
168 | |||
169 | static int pn_socket_getname(struct socket *sock, struct sockaddr *addr, | ||
170 | int *sockaddr_len, int peer) | ||
171 | { | ||
172 | struct sock *sk = sock->sk; | ||
173 | struct pn_sock *pn = pn_sk(sk); | ||
174 | |||
175 | memset(addr, 0, sizeof(struct sockaddr_pn)); | ||
176 | addr->sa_family = AF_PHONET; | ||
177 | if (!peer) /* Race with bind() here is userland's problem. */ | ||
178 | pn_sockaddr_set_object((struct sockaddr_pn *)addr, | ||
179 | pn->sobject); | ||
180 | |||
181 | *sockaddr_len = sizeof(struct sockaddr_pn); | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static int pn_socket_ioctl(struct socket *sock, unsigned int cmd, | ||
186 | unsigned long arg) | ||
187 | { | ||
188 | struct sock *sk = sock->sk; | ||
189 | struct pn_sock *pn = pn_sk(sk); | ||
190 | |||
191 | if (cmd == SIOCPNGETOBJECT) { | ||
192 | struct net_device *dev; | ||
193 | u16 handle; | ||
194 | u8 saddr; | ||
195 | |||
196 | if (get_user(handle, (__u16 __user *)arg)) | ||
197 | return -EFAULT; | ||
198 | |||
199 | lock_sock(sk); | ||
200 | if (sk->sk_bound_dev_if) | ||
201 | dev = dev_get_by_index(sock_net(sk), | ||
202 | sk->sk_bound_dev_if); | ||
203 | else | ||
204 | dev = phonet_device_get(sock_net(sk)); | ||
205 | if (dev && (dev->flags & IFF_UP)) | ||
206 | saddr = phonet_address_get(dev, pn_addr(handle)); | ||
207 | else | ||
208 | saddr = PN_NO_ADDR; | ||
209 | release_sock(sk); | ||
210 | |||
211 | if (dev) | ||
212 | dev_put(dev); | ||
213 | if (saddr == PN_NO_ADDR) | ||
214 | return -EHOSTUNREACH; | ||
215 | |||
216 | handle = pn_object(saddr, pn_port(pn->sobject)); | ||
217 | return put_user(handle, (__u16 __user *)arg); | ||
218 | } | ||
219 | |||
220 | return sk->sk_prot->ioctl(sk, cmd, arg); | ||
221 | } | ||
222 | |||
223 | static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock, | ||
224 | struct msghdr *m, size_t total_len) | ||
225 | { | ||
226 | struct sock *sk = sock->sk; | ||
227 | |||
228 | if (pn_socket_autobind(sock)) | ||
229 | return -EAGAIN; | ||
230 | |||
231 | return sk->sk_prot->sendmsg(iocb, sk, m, total_len); | ||
232 | } | ||
233 | |||
234 | const struct proto_ops phonet_dgram_ops = { | ||
235 | .family = AF_PHONET, | ||
236 | .owner = THIS_MODULE, | ||
237 | .release = pn_socket_release, | ||
238 | .bind = pn_socket_bind, | ||
239 | .connect = sock_no_connect, | ||
240 | .socketpair = sock_no_socketpair, | ||
241 | .accept = sock_no_accept, | ||
242 | .getname = pn_socket_getname, | ||
243 | .poll = datagram_poll, | ||
244 | .ioctl = pn_socket_ioctl, | ||
245 | .listen = sock_no_listen, | ||
246 | .shutdown = sock_no_shutdown, | ||
247 | .setsockopt = sock_no_setsockopt, | ||
248 | .getsockopt = sock_no_getsockopt, | ||
249 | #ifdef CONFIG_COMPAT | ||
250 | .compat_setsockopt = sock_no_setsockopt, | ||
251 | .compat_getsockopt = sock_no_getsockopt, | ||
252 | #endif | ||
253 | .sendmsg = pn_socket_sendmsg, | ||
254 | .recvmsg = sock_common_recvmsg, | ||
255 | .mmap = sock_no_mmap, | ||
256 | .sendpage = sock_no_sendpage, | ||
257 | }; | ||
258 | |||
259 | static DEFINE_MUTEX(port_mutex); | ||
260 | |||
261 | /* allocate port for a socket */ | ||
262 | int pn_sock_get_port(struct sock *sk, unsigned short sport) | ||
263 | { | ||
264 | static int port_cur; | ||
265 | struct pn_sock *pn = pn_sk(sk); | ||
266 | struct sockaddr_pn try_sa; | ||
267 | struct sock *tmpsk; | ||
268 | |||
269 | memset(&try_sa, 0, sizeof(struct sockaddr_pn)); | ||
270 | try_sa.spn_family = AF_PHONET; | ||
271 | |||
272 | mutex_lock(&port_mutex); | ||
273 | |||
274 | if (!sport) { | ||
275 | /* search free port */ | ||
276 | int port, pmin, pmax; | ||
277 | |||
278 | phonet_get_local_port_range(&pmin, &pmax); | ||
279 | for (port = pmin; port <= pmax; port++) { | ||
280 | port_cur++; | ||
281 | if (port_cur < pmin || port_cur > pmax) | ||
282 | port_cur = pmin; | ||
283 | |||
284 | pn_sockaddr_set_port(&try_sa, port_cur); | ||
285 | tmpsk = pn_find_sock_by_sa(&try_sa); | ||
286 | if (tmpsk == NULL) { | ||
287 | sport = port_cur; | ||
288 | goto found; | ||
289 | } else | ||
290 | sock_put(tmpsk); | ||
291 | } | ||
292 | } else { | ||
293 | /* try to find specific port */ | ||
294 | pn_sockaddr_set_port(&try_sa, sport); | ||
295 | tmpsk = pn_find_sock_by_sa(&try_sa); | ||
296 | if (tmpsk == NULL) | ||
297 | /* No sock there! We can use that port... */ | ||
298 | goto found; | ||
299 | else | ||
300 | sock_put(tmpsk); | ||
301 | } | ||
302 | mutex_unlock(&port_mutex); | ||
303 | |||
304 | /* the port must be in use already */ | ||
305 | return -EADDRINUSE; | ||
306 | |||
307 | found: | ||
308 | mutex_unlock(&port_mutex); | ||
309 | pn->sobject = pn_object(pn_addr(pn->sobject), sport); | ||
310 | return 0; | ||
311 | } | ||
312 | EXPORT_SYMBOL(pn_sock_get_port); | ||
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c new file mode 100644 index 000000000000..600a4309b8c8 --- /dev/null +++ b/net/phonet/sysctl.c | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * File: sysctl.c | ||
3 | * | ||
4 | * Phonet /proc/sys/net/phonet interface implementation | ||
5 | * | ||
6 | * Copyright (C) 2008 Nokia Corporation. | ||
7 | * | ||
8 | * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * version 2 as published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
22 | * 02110-1301 USA | ||
23 | */ | ||
24 | |||
25 | #include <linux/seqlock.h> | ||
26 | #include <linux/sysctl.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/init.h> | ||
29 | |||
30 | #define DYNAMIC_PORT_MIN 0x40 | ||
31 | #define DYNAMIC_PORT_MAX 0x7f | ||
32 | |||
33 | static DEFINE_SEQLOCK(local_port_range_lock); | ||
34 | static int local_port_range_min[2] = {0, 0}; | ||
35 | static int local_port_range_max[2] = {1023, 1023}; | ||
36 | static int local_port_range[2] = {DYNAMIC_PORT_MIN, DYNAMIC_PORT_MAX}; | ||
37 | static struct ctl_table_header *phonet_table_hrd; | ||
38 | |||
39 | static void set_local_port_range(int range[2]) | ||
40 | { | ||
41 | write_seqlock(&local_port_range_lock); | ||
42 | local_port_range[0] = range[0]; | ||
43 | local_port_range[1] = range[1]; | ||
44 | write_sequnlock(&local_port_range_lock); | ||
45 | } | ||
46 | |||
47 | void phonet_get_local_port_range(int *min, int *max) | ||
48 | { | ||
49 | unsigned seq; | ||
50 | do { | ||
51 | seq = read_seqbegin(&local_port_range_lock); | ||
52 | if (min) | ||
53 | *min = local_port_range[0]; | ||
54 | if (max) | ||
55 | *max = local_port_range[1]; | ||
56 | } while (read_seqretry(&local_port_range_lock, seq)); | ||
57 | } | ||
58 | |||
59 | static int proc_local_port_range(ctl_table *table, int write, struct file *filp, | ||
60 | void __user *buffer, | ||
61 | size_t *lenp, loff_t *ppos) | ||
62 | { | ||
63 | int ret; | ||
64 | int range[2] = {local_port_range[0], local_port_range[1]}; | ||
65 | ctl_table tmp = { | ||
66 | .data = &range, | ||
67 | .maxlen = sizeof(range), | ||
68 | .mode = table->mode, | ||
69 | .extra1 = &local_port_range_min, | ||
70 | .extra2 = &local_port_range_max, | ||
71 | }; | ||
72 | |||
73 | ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos); | ||
74 | |||
75 | if (write && ret == 0) { | ||
76 | if (range[1] < range[0]) | ||
77 | ret = -EINVAL; | ||
78 | else | ||
79 | set_local_port_range(range); | ||
80 | } | ||
81 | |||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | static struct ctl_table phonet_table[] = { | ||
86 | { | ||
87 | .ctl_name = CTL_UNNUMBERED, | ||
88 | .procname = "local_port_range", | ||
89 | .data = &local_port_range, | ||
90 | .maxlen = sizeof(local_port_range), | ||
91 | .mode = 0644, | ||
92 | .proc_handler = &proc_local_port_range, | ||
93 | .strategy = NULL, | ||
94 | }, | ||
95 | { .ctl_name = 0 } | ||
96 | }; | ||
97 | |||
98 | struct ctl_path phonet_ctl_path[] = { | ||
99 | { .procname = "net", .ctl_name = CTL_NET, }, | ||
100 | { .procname = "phonet", .ctl_name = CTL_UNNUMBERED, }, | ||
101 | { }, | ||
102 | }; | ||
103 | |||
104 | int __init phonet_sysctl_init(void) | ||
105 | { | ||
106 | phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table); | ||
107 | return phonet_table_hrd == NULL ? -ENOMEM : 0; | ||
108 | } | ||
109 | |||
110 | void phonet_sysctl_exit(void) | ||
111 | { | ||
112 | unregister_sysctl_table(phonet_table_hrd); | ||
113 | } | ||
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h index f63d05045685..bbfa646157c6 100644 --- a/net/rfkill/rfkill-input.h +++ b/net/rfkill/rfkill-input.h | |||
@@ -13,5 +13,6 @@ | |||
13 | 13 | ||
14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); | 14 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); |
15 | void rfkill_epo(void); | 15 | void rfkill_epo(void); |
16 | void rfkill_restore_states(void); | ||
16 | 17 | ||
17 | #endif /* __RFKILL_INPUT_H */ | 18 | #endif /* __RFKILL_INPUT_H */ |
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index 74aecc098bad..ea0dc04b3c77 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c | |||
@@ -37,14 +37,20 @@ MODULE_DESCRIPTION("RF switch support"); | |||
37 | MODULE_LICENSE("GPL"); | 37 | MODULE_LICENSE("GPL"); |
38 | 38 | ||
39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ | 39 | static LIST_HEAD(rfkill_list); /* list of registered rf switches */ |
40 | static DEFINE_MUTEX(rfkill_mutex); | 40 | static DEFINE_MUTEX(rfkill_global_mutex); |
41 | 41 | ||
42 | static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; | 42 | static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; |
43 | module_param_named(default_state, rfkill_default_state, uint, 0444); | 43 | module_param_named(default_state, rfkill_default_state, uint, 0444); |
44 | MODULE_PARM_DESC(default_state, | 44 | MODULE_PARM_DESC(default_state, |
45 | "Default initial state for all radio types, 0 = radio off"); | 45 | "Default initial state for all radio types, 0 = radio off"); |
46 | 46 | ||
47 | static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; | 47 | struct rfkill_gsw_state { |
48 | enum rfkill_state current_state; | ||
49 | enum rfkill_state default_state; | ||
50 | }; | ||
51 | |||
52 | static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX]; | ||
53 | static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
48 | 54 | ||
49 | static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); | 55 | static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); |
50 | 56 | ||
@@ -70,6 +76,7 @@ static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); | |||
70 | */ | 76 | */ |
71 | int register_rfkill_notifier(struct notifier_block *nb) | 77 | int register_rfkill_notifier(struct notifier_block *nb) |
72 | { | 78 | { |
79 | BUG_ON(!nb); | ||
73 | return blocking_notifier_chain_register(&rfkill_notifier_list, nb); | 80 | return blocking_notifier_chain_register(&rfkill_notifier_list, nb); |
74 | } | 81 | } |
75 | EXPORT_SYMBOL_GPL(register_rfkill_notifier); | 82 | EXPORT_SYMBOL_GPL(register_rfkill_notifier); |
@@ -85,6 +92,7 @@ EXPORT_SYMBOL_GPL(register_rfkill_notifier); | |||
85 | */ | 92 | */ |
86 | int unregister_rfkill_notifier(struct notifier_block *nb) | 93 | int unregister_rfkill_notifier(struct notifier_block *nb) |
87 | { | 94 | { |
95 | BUG_ON(!nb); | ||
88 | return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb); | 96 | return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb); |
89 | } | 97 | } |
90 | EXPORT_SYMBOL_GPL(unregister_rfkill_notifier); | 98 | EXPORT_SYMBOL_GPL(unregister_rfkill_notifier); |
@@ -195,6 +203,11 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, | |||
195 | * BLOCK even a transmitter that is already in state | 203 | * BLOCK even a transmitter that is already in state |
196 | * RFKILL_STATE_HARD_BLOCKED */ | 204 | * RFKILL_STATE_HARD_BLOCKED */ |
197 | break; | 205 | break; |
206 | default: | ||
207 | WARN(1, KERN_WARNING | ||
208 | "rfkill: illegal state %d passed as parameter " | ||
209 | "to rfkill_toggle_radio\n", state); | ||
210 | return -EINVAL; | ||
198 | } | 211 | } |
199 | 212 | ||
200 | if (force || state != rfkill->state) { | 213 | if (force || state != rfkill->state) { |
@@ -213,22 +226,29 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, | |||
213 | } | 226 | } |
214 | 227 | ||
215 | /** | 228 | /** |
216 | * rfkill_switch_all - Toggle state of all switches of given type | 229 | * __rfkill_switch_all - Toggle state of all switches of given type |
217 | * @type: type of interfaces to be affected | 230 | * @type: type of interfaces to be affected |
218 | * @state: the new state | 231 | * @state: the new state |
219 | * | 232 | * |
220 | * This function toggles the state of all switches of given type, | 233 | * This function toggles the state of all switches of given type, |
221 | * unless a specific switch is claimed by userspace (in which case, | 234 | * unless a specific switch is claimed by userspace (in which case, |
222 | * that switch is left alone) or suspended. | 235 | * that switch is left alone) or suspended. |
236 | * | ||
237 | * Caller must have acquired rfkill_global_mutex. | ||
223 | */ | 238 | */ |
224 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | 239 | static void __rfkill_switch_all(const enum rfkill_type type, |
240 | const enum rfkill_state state) | ||
225 | { | 241 | { |
226 | struct rfkill *rfkill; | 242 | struct rfkill *rfkill; |
227 | 243 | ||
228 | mutex_lock(&rfkill_mutex); | 244 | if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX), |
229 | 245 | KERN_WARNING | |
230 | rfkill_states[type] = state; | 246 | "rfkill: illegal state %d or type %d " |
247 | "passed as parameter to __rfkill_switch_all\n", | ||
248 | state, type)) | ||
249 | return; | ||
231 | 250 | ||
251 | rfkill_global_states[type].current_state = state; | ||
232 | list_for_each_entry(rfkill, &rfkill_list, node) { | 252 | list_for_each_entry(rfkill, &rfkill_list, node) { |
233 | if ((!rfkill->user_claim) && (rfkill->type == type)) { | 253 | if ((!rfkill->user_claim) && (rfkill->type == type)) { |
234 | mutex_lock(&rfkill->mutex); | 254 | mutex_lock(&rfkill->mutex); |
@@ -236,8 +256,21 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | |||
236 | mutex_unlock(&rfkill->mutex); | 256 | mutex_unlock(&rfkill->mutex); |
237 | } | 257 | } |
238 | } | 258 | } |
259 | } | ||
239 | 260 | ||
240 | mutex_unlock(&rfkill_mutex); | 261 | /** |
262 | * rfkill_switch_all - Toggle state of all switches of given type | ||
263 | * @type: type of interfaces to be affected | ||
264 | * @state: the new state | ||
265 | * | ||
266 | * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). | ||
267 | * Please refer to __rfkill_switch_all() for details. | ||
268 | */ | ||
269 | void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) | ||
270 | { | ||
271 | mutex_lock(&rfkill_global_mutex); | ||
272 | __rfkill_switch_all(type, state); | ||
273 | mutex_unlock(&rfkill_global_mutex); | ||
241 | } | 274 | } |
242 | EXPORT_SYMBOL(rfkill_switch_all); | 275 | EXPORT_SYMBOL(rfkill_switch_all); |
243 | 276 | ||
@@ -245,23 +278,53 @@ EXPORT_SYMBOL(rfkill_switch_all); | |||
245 | * rfkill_epo - emergency power off all transmitters | 278 | * rfkill_epo - emergency power off all transmitters |
246 | * | 279 | * |
247 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, | 280 | * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, |
248 | * ignoring everything in its path but rfkill_mutex and rfkill->mutex. | 281 | * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. |
282 | * | ||
283 | * The global state before the EPO is saved and can be restored later | ||
284 | * using rfkill_restore_states(). | ||
249 | */ | 285 | */ |
250 | void rfkill_epo(void) | 286 | void rfkill_epo(void) |
251 | { | 287 | { |
252 | struct rfkill *rfkill; | 288 | struct rfkill *rfkill; |
289 | int i; | ||
290 | |||
291 | mutex_lock(&rfkill_global_mutex); | ||
253 | 292 | ||
254 | mutex_lock(&rfkill_mutex); | ||
255 | list_for_each_entry(rfkill, &rfkill_list, node) { | 293 | list_for_each_entry(rfkill, &rfkill_list, node) { |
256 | mutex_lock(&rfkill->mutex); | 294 | mutex_lock(&rfkill->mutex); |
257 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | 295 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); |
258 | mutex_unlock(&rfkill->mutex); | 296 | mutex_unlock(&rfkill->mutex); |
259 | } | 297 | } |
260 | mutex_unlock(&rfkill_mutex); | 298 | for (i = 0; i < RFKILL_TYPE_MAX; i++) { |
299 | rfkill_global_states[i].default_state = | ||
300 | rfkill_global_states[i].current_state; | ||
301 | rfkill_global_states[i].current_state = | ||
302 | RFKILL_STATE_SOFT_BLOCKED; | ||
303 | } | ||
304 | mutex_unlock(&rfkill_global_mutex); | ||
261 | } | 305 | } |
262 | EXPORT_SYMBOL_GPL(rfkill_epo); | 306 | EXPORT_SYMBOL_GPL(rfkill_epo); |
263 | 307 | ||
264 | /** | 308 | /** |
309 | * rfkill_restore_states - restore global states | ||
310 | * | ||
311 | * Restore (and sync switches to) the global state from the | ||
312 | * states in rfkill_default_states. This can undo the effects of | ||
313 | * a call to rfkill_epo(). | ||
314 | */ | ||
315 | void rfkill_restore_states(void) | ||
316 | { | ||
317 | int i; | ||
318 | |||
319 | mutex_lock(&rfkill_global_mutex); | ||
320 | |||
321 | for (i = 0; i < RFKILL_TYPE_MAX; i++) | ||
322 | __rfkill_switch_all(i, rfkill_global_states[i].default_state); | ||
323 | mutex_unlock(&rfkill_global_mutex); | ||
324 | } | ||
325 | EXPORT_SYMBOL_GPL(rfkill_restore_states); | ||
326 | |||
327 | /** | ||
265 | * rfkill_force_state - Force the internal rfkill radio state | 328 | * rfkill_force_state - Force the internal rfkill radio state |
266 | * @rfkill: pointer to the rfkill class to modify. | 329 | * @rfkill: pointer to the rfkill class to modify. |
267 | * @state: the current radio state the class should be forced to. | 330 | * @state: the current radio state the class should be forced to. |
@@ -282,9 +345,11 @@ int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state) | |||
282 | { | 345 | { |
283 | enum rfkill_state oldstate; | 346 | enum rfkill_state oldstate; |
284 | 347 | ||
285 | if (state != RFKILL_STATE_SOFT_BLOCKED && | 348 | BUG_ON(!rfkill); |
286 | state != RFKILL_STATE_UNBLOCKED && | 349 | if (WARN((state >= RFKILL_STATE_MAX), |
287 | state != RFKILL_STATE_HARD_BLOCKED) | 350 | KERN_WARNING |
351 | "rfkill: illegal state %d passed as parameter " | ||
352 | "to rfkill_force_state\n", state)) | ||
288 | return -EINVAL; | 353 | return -EINVAL; |
289 | 354 | ||
290 | mutex_lock(&rfkill->mutex); | 355 | mutex_lock(&rfkill->mutex); |
@@ -352,12 +417,16 @@ static ssize_t rfkill_state_store(struct device *dev, | |||
352 | const char *buf, size_t count) | 417 | const char *buf, size_t count) |
353 | { | 418 | { |
354 | struct rfkill *rfkill = to_rfkill(dev); | 419 | struct rfkill *rfkill = to_rfkill(dev); |
355 | unsigned int state = simple_strtoul(buf, NULL, 0); | 420 | unsigned long state; |
356 | int error; | 421 | int error; |
357 | 422 | ||
358 | if (!capable(CAP_NET_ADMIN)) | 423 | if (!capable(CAP_NET_ADMIN)) |
359 | return -EPERM; | 424 | return -EPERM; |
360 | 425 | ||
426 | error = strict_strtoul(buf, 0, &state); | ||
427 | if (error) | ||
428 | return error; | ||
429 | |||
361 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ | 430 | /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ |
362 | if (state != RFKILL_STATE_UNBLOCKED && | 431 | if (state != RFKILL_STATE_UNBLOCKED && |
363 | state != RFKILL_STATE_SOFT_BLOCKED) | 432 | state != RFKILL_STATE_SOFT_BLOCKED) |
@@ -385,7 +454,8 @@ static ssize_t rfkill_claim_store(struct device *dev, | |||
385 | const char *buf, size_t count) | 454 | const char *buf, size_t count) |
386 | { | 455 | { |
387 | struct rfkill *rfkill = to_rfkill(dev); | 456 | struct rfkill *rfkill = to_rfkill(dev); |
388 | bool claim = !!simple_strtoul(buf, NULL, 0); | 457 | unsigned long claim_tmp; |
458 | bool claim; | ||
389 | int error; | 459 | int error; |
390 | 460 | ||
391 | if (!capable(CAP_NET_ADMIN)) | 461 | if (!capable(CAP_NET_ADMIN)) |
@@ -394,11 +464,16 @@ static ssize_t rfkill_claim_store(struct device *dev, | |||
394 | if (rfkill->user_claim_unsupported) | 464 | if (rfkill->user_claim_unsupported) |
395 | return -EOPNOTSUPP; | 465 | return -EOPNOTSUPP; |
396 | 466 | ||
467 | error = strict_strtoul(buf, 0, &claim_tmp); | ||
468 | if (error) | ||
469 | return error; | ||
470 | claim = !!claim_tmp; | ||
471 | |||
397 | /* | 472 | /* |
398 | * Take the global lock to make sure the kernel is not in | 473 | * Take the global lock to make sure the kernel is not in |
399 | * the middle of rfkill_switch_all | 474 | * the middle of rfkill_switch_all |
400 | */ | 475 | */ |
401 | error = mutex_lock_interruptible(&rfkill_mutex); | 476 | error = mutex_lock_interruptible(&rfkill_global_mutex); |
402 | if (error) | 477 | if (error) |
403 | return error; | 478 | return error; |
404 | 479 | ||
@@ -406,14 +481,14 @@ static ssize_t rfkill_claim_store(struct device *dev, | |||
406 | if (!claim) { | 481 | if (!claim) { |
407 | mutex_lock(&rfkill->mutex); | 482 | mutex_lock(&rfkill->mutex); |
408 | rfkill_toggle_radio(rfkill, | 483 | rfkill_toggle_radio(rfkill, |
409 | rfkill_states[rfkill->type], | 484 | rfkill_global_states[rfkill->type].current_state, |
410 | 0); | 485 | 0); |
411 | mutex_unlock(&rfkill->mutex); | 486 | mutex_unlock(&rfkill->mutex); |
412 | } | 487 | } |
413 | rfkill->user_claim = claim; | 488 | rfkill->user_claim = claim; |
414 | } | 489 | } |
415 | 490 | ||
416 | mutex_unlock(&rfkill_mutex); | 491 | mutex_unlock(&rfkill_global_mutex); |
417 | 492 | ||
418 | return error ? error : count; | 493 | return error ? error : count; |
419 | } | 494 | } |
@@ -437,21 +512,9 @@ static void rfkill_release(struct device *dev) | |||
437 | #ifdef CONFIG_PM | 512 | #ifdef CONFIG_PM |
438 | static int rfkill_suspend(struct device *dev, pm_message_t state) | 513 | static int rfkill_suspend(struct device *dev, pm_message_t state) |
439 | { | 514 | { |
440 | struct rfkill *rfkill = to_rfkill(dev); | 515 | /* mark class device as suspended */ |
441 | 516 | if (dev->power.power_state.event != state.event) | |
442 | if (dev->power.power_state.event != state.event) { | ||
443 | if (state.event & PM_EVENT_SLEEP) { | ||
444 | /* Stop transmitter, keep state, no notifies */ | ||
445 | update_rfkill_state(rfkill); | ||
446 | |||
447 | mutex_lock(&rfkill->mutex); | ||
448 | rfkill->toggle_radio(rfkill->data, | ||
449 | RFKILL_STATE_SOFT_BLOCKED); | ||
450 | mutex_unlock(&rfkill->mutex); | ||
451 | } | ||
452 | |||
453 | dev->power.power_state = state; | 517 | dev->power.power_state = state; |
454 | } | ||
455 | 518 | ||
456 | return 0; | 519 | return 0; |
457 | } | 520 | } |
@@ -525,24 +588,60 @@ static struct class rfkill_class = { | |||
525 | .dev_uevent = rfkill_dev_uevent, | 588 | .dev_uevent = rfkill_dev_uevent, |
526 | }; | 589 | }; |
527 | 590 | ||
591 | static int rfkill_check_duplicity(const struct rfkill *rfkill) | ||
592 | { | ||
593 | struct rfkill *p; | ||
594 | unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)]; | ||
595 | |||
596 | memset(seen, 0, sizeof(seen)); | ||
597 | |||
598 | list_for_each_entry(p, &rfkill_list, node) { | ||
599 | if (WARN((p == rfkill), KERN_WARNING | ||
600 | "rfkill: illegal attempt to register " | ||
601 | "an already registered rfkill struct\n")) | ||
602 | return -EEXIST; | ||
603 | set_bit(p->type, seen); | ||
604 | } | ||
605 | |||
606 | /* 0: first switch of its kind */ | ||
607 | return test_bit(rfkill->type, seen); | ||
608 | } | ||
609 | |||
528 | static int rfkill_add_switch(struct rfkill *rfkill) | 610 | static int rfkill_add_switch(struct rfkill *rfkill) |
529 | { | 611 | { |
530 | mutex_lock(&rfkill_mutex); | 612 | int error; |
613 | |||
614 | mutex_lock(&rfkill_global_mutex); | ||
615 | |||
616 | error = rfkill_check_duplicity(rfkill); | ||
617 | if (error < 0) | ||
618 | goto unlock_out; | ||
531 | 619 | ||
532 | rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type], 0); | 620 | if (!error) { |
621 | /* lock default after first use */ | ||
622 | set_bit(rfkill->type, rfkill_states_lockdflt); | ||
623 | rfkill_global_states[rfkill->type].current_state = | ||
624 | rfkill_global_states[rfkill->type].default_state; | ||
625 | } | ||
626 | |||
627 | rfkill_toggle_radio(rfkill, | ||
628 | rfkill_global_states[rfkill->type].current_state, | ||
629 | 0); | ||
533 | 630 | ||
534 | list_add_tail(&rfkill->node, &rfkill_list); | 631 | list_add_tail(&rfkill->node, &rfkill_list); |
535 | 632 | ||
536 | mutex_unlock(&rfkill_mutex); | 633 | error = 0; |
634 | unlock_out: | ||
635 | mutex_unlock(&rfkill_global_mutex); | ||
537 | 636 | ||
538 | return 0; | 637 | return error; |
539 | } | 638 | } |
540 | 639 | ||
541 | static void rfkill_remove_switch(struct rfkill *rfkill) | 640 | static void rfkill_remove_switch(struct rfkill *rfkill) |
542 | { | 641 | { |
543 | mutex_lock(&rfkill_mutex); | 642 | mutex_lock(&rfkill_global_mutex); |
544 | list_del_init(&rfkill->node); | 643 | list_del_init(&rfkill->node); |
545 | mutex_unlock(&rfkill_mutex); | 644 | mutex_unlock(&rfkill_global_mutex); |
546 | 645 | ||
547 | mutex_lock(&rfkill->mutex); | 646 | mutex_lock(&rfkill->mutex); |
548 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); | 647 | rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); |
@@ -562,11 +661,18 @@ static void rfkill_remove_switch(struct rfkill *rfkill) | |||
562 | * NOTE: If registration fails the structure shoudl be freed by calling | 661 | * NOTE: If registration fails the structure shoudl be freed by calling |
563 | * rfkill_free() otherwise rfkill_unregister() should be used. | 662 | * rfkill_free() otherwise rfkill_unregister() should be used. |
564 | */ | 663 | */ |
565 | struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type) | 664 | struct rfkill * __must_check rfkill_allocate(struct device *parent, |
665 | enum rfkill_type type) | ||
566 | { | 666 | { |
567 | struct rfkill *rfkill; | 667 | struct rfkill *rfkill; |
568 | struct device *dev; | 668 | struct device *dev; |
569 | 669 | ||
670 | if (WARN((type >= RFKILL_TYPE_MAX), | ||
671 | KERN_WARNING | ||
672 | "rfkill: illegal type %d passed as parameter " | ||
673 | "to rfkill_allocate\n", type)) | ||
674 | return NULL; | ||
675 | |||
570 | rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); | 676 | rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); |
571 | if (!rfkill) | 677 | if (!rfkill) |
572 | return NULL; | 678 | return NULL; |
@@ -633,15 +739,18 @@ static void rfkill_led_trigger_unregister(struct rfkill *rfkill) | |||
633 | * structure needs to be registered. Immediately from registration the | 739 | * structure needs to be registered. Immediately from registration the |
634 | * switch driver should be able to service calls to toggle_radio. | 740 | * switch driver should be able to service calls to toggle_radio. |
635 | */ | 741 | */ |
636 | int rfkill_register(struct rfkill *rfkill) | 742 | int __must_check rfkill_register(struct rfkill *rfkill) |
637 | { | 743 | { |
638 | static atomic_t rfkill_no = ATOMIC_INIT(0); | 744 | static atomic_t rfkill_no = ATOMIC_INIT(0); |
639 | struct device *dev = &rfkill->dev; | 745 | struct device *dev = &rfkill->dev; |
640 | int error; | 746 | int error; |
641 | 747 | ||
642 | if (!rfkill->toggle_radio) | 748 | if (WARN((!rfkill || !rfkill->toggle_radio || |
643 | return -EINVAL; | 749 | rfkill->type >= RFKILL_TYPE_MAX || |
644 | if (rfkill->type >= RFKILL_TYPE_MAX) | 750 | rfkill->state >= RFKILL_STATE_MAX), |
751 | KERN_WARNING | ||
752 | "rfkill: attempt to register a " | ||
753 | "badly initialized rfkill struct\n")) | ||
645 | return -EINVAL; | 754 | return -EINVAL; |
646 | 755 | ||
647 | snprintf(dev->bus_id, sizeof(dev->bus_id), | 756 | snprintf(dev->bus_id, sizeof(dev->bus_id), |
@@ -676,6 +785,7 @@ EXPORT_SYMBOL(rfkill_register); | |||
676 | */ | 785 | */ |
677 | void rfkill_unregister(struct rfkill *rfkill) | 786 | void rfkill_unregister(struct rfkill *rfkill) |
678 | { | 787 | { |
788 | BUG_ON(!rfkill); | ||
679 | device_del(&rfkill->dev); | 789 | device_del(&rfkill->dev); |
680 | rfkill_remove_switch(rfkill); | 790 | rfkill_remove_switch(rfkill); |
681 | rfkill_led_trigger_unregister(rfkill); | 791 | rfkill_led_trigger_unregister(rfkill); |
@@ -683,6 +793,56 @@ void rfkill_unregister(struct rfkill *rfkill) | |||
683 | } | 793 | } |
684 | EXPORT_SYMBOL(rfkill_unregister); | 794 | EXPORT_SYMBOL(rfkill_unregister); |
685 | 795 | ||
796 | /** | ||
797 | * rfkill_set_default - set initial value for a switch type | ||
798 | * @type - the type of switch to set the default state of | ||
799 | * @state - the new default state for that group of switches | ||
800 | * | ||
801 | * Sets the initial state rfkill should use for a given type. | ||
802 | * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED | ||
803 | * and RFKILL_STATE_UNBLOCKED. | ||
804 | * | ||
805 | * This function is meant to be used by platform drivers for platforms | ||
806 | * that can save switch state across power down/reboot. | ||
807 | * | ||
808 | * The default state for each switch type can be changed exactly once. | ||
809 | * After a switch of that type is registered, the default state cannot | ||
810 | * be changed anymore. This guards against multiple drivers it the | ||
811 | * same platform trying to set the initial switch default state, which | ||
812 | * is not allowed. | ||
813 | * | ||
814 | * Returns -EPERM if the state has already been set once or is in use, | ||
815 | * so drivers likely want to either ignore or at most printk(KERN_NOTICE) | ||
816 | * if this function returns -EPERM. | ||
817 | * | ||
818 | * Returns 0 if the new default state was set, or an error if it | ||
819 | * could not be set. | ||
820 | */ | ||
821 | int rfkill_set_default(enum rfkill_type type, enum rfkill_state state) | ||
822 | { | ||
823 | int error; | ||
824 | |||
825 | if (WARN((type >= RFKILL_TYPE_MAX || | ||
826 | (state != RFKILL_STATE_SOFT_BLOCKED && | ||
827 | state != RFKILL_STATE_UNBLOCKED)), | ||
828 | KERN_WARNING | ||
829 | "rfkill: illegal state %d or type %d passed as " | ||
830 | "parameter to rfkill_set_default\n", state, type)) | ||
831 | return -EINVAL; | ||
832 | |||
833 | mutex_lock(&rfkill_global_mutex); | ||
834 | |||
835 | if (!test_and_set_bit(type, rfkill_states_lockdflt)) { | ||
836 | rfkill_global_states[type].default_state = state; | ||
837 | error = 0; | ||
838 | } else | ||
839 | error = -EPERM; | ||
840 | |||
841 | mutex_unlock(&rfkill_global_mutex); | ||
842 | return error; | ||
843 | } | ||
844 | EXPORT_SYMBOL_GPL(rfkill_set_default); | ||
845 | |||
686 | /* | 846 | /* |
687 | * Rfkill module initialization/deinitialization. | 847 | * Rfkill module initialization/deinitialization. |
688 | */ | 848 | */ |
@@ -696,8 +856,8 @@ static int __init rfkill_init(void) | |||
696 | rfkill_default_state != RFKILL_STATE_UNBLOCKED) | 856 | rfkill_default_state != RFKILL_STATE_UNBLOCKED) |
697 | return -EINVAL; | 857 | return -EINVAL; |
698 | 858 | ||
699 | for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) | 859 | for (i = 0; i < RFKILL_TYPE_MAX; i++) |
700 | rfkill_states[i] = rfkill_default_state; | 860 | rfkill_global_states[i].default_state = rfkill_default_state; |
701 | 861 | ||
702 | error = class_register(&rfkill_class); | 862 | error = class_register(&rfkill_class); |
703 | if (error) { | 863 | if (error) { |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 9437b27ff84d..6767e54155db 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -106,6 +106,15 @@ config NET_SCH_PRIO | |||
106 | To compile this code as a module, choose M here: the | 106 | To compile this code as a module, choose M here: the |
107 | module will be called sch_prio. | 107 | module will be called sch_prio. |
108 | 108 | ||
109 | config NET_SCH_MULTIQ | ||
110 | tristate "Hardware Multiqueue-aware Multi Band Queuing (MULTIQ)" | ||
111 | ---help--- | ||
112 | Say Y here if you want to use an n-band queue packet scheduler | ||
113 | to support devices that have multiple hardware transmit queues. | ||
114 | |||
115 | To compile this code as a module, choose M here: the | ||
116 | module will be called sch_multiq. | ||
117 | |||
109 | config NET_SCH_RED | 118 | config NET_SCH_RED |
110 | tristate "Random Early Detection (RED)" | 119 | tristate "Random Early Detection (RED)" |
111 | ---help--- | 120 | ---help--- |
@@ -476,6 +485,17 @@ config NET_ACT_SIMP | |||
476 | To compile this code as a module, choose M here: the | 485 | To compile this code as a module, choose M here: the |
477 | module will be called simple. | 486 | module will be called simple. |
478 | 487 | ||
488 | config NET_ACT_SKBEDIT | ||
489 | tristate "SKB Editing" | ||
490 | depends on NET_CLS_ACT | ||
491 | ---help--- | ||
492 | Say Y here to change skb priority or queue_mapping settings. | ||
493 | |||
494 | If unsure, say N. | ||
495 | |||
496 | To compile this code as a module, choose M here: the | ||
497 | module will be called skbedit. | ||
498 | |||
479 | config NET_CLS_IND | 499 | config NET_CLS_IND |
480 | bool "Incoming device classification" | 500 | bool "Incoming device classification" |
481 | depends on NET_CLS_U32 || NET_CLS_FW | 501 | depends on NET_CLS_U32 || NET_CLS_FW |
diff --git a/net/sched/Makefile b/net/sched/Makefile index 1d2b0f7df848..e60c9925b269 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile | |||
@@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o | |||
14 | obj-$(CONFIG_NET_ACT_NAT) += act_nat.o | 14 | obj-$(CONFIG_NET_ACT_NAT) += act_nat.o |
15 | obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o | 15 | obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o |
16 | obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o | 16 | obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o |
17 | obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o | ||
17 | obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o | 18 | obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o |
18 | obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o | 19 | obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o |
19 | obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o | 20 | obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o |
@@ -26,6 +27,7 @@ obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o | |||
26 | obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o | 27 | obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o |
27 | obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o | 28 | obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o |
28 | obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o | 29 | obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o |
30 | obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o | ||
29 | obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o | 31 | obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o |
30 | obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o | 32 | obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o |
31 | obj-$(CONFIG_NET_CLS_U32) += cls_u32.o | 33 | obj-$(CONFIG_NET_CLS_U32) += cls_u32.o |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c new file mode 100644 index 000000000000..fe9777e77f35 --- /dev/null +++ b/net/sched/act_skbedit.c | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008, Intel Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
16 | * | ||
17 | * Author: Alexander Duyck <alexander.h.duyck@intel.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/skbuff.h> | ||
24 | #include <linux/rtnetlink.h> | ||
25 | #include <net/netlink.h> | ||
26 | #include <net/pkt_sched.h> | ||
27 | |||
28 | #include <linux/tc_act/tc_skbedit.h> | ||
29 | #include <net/tc_act/tc_skbedit.h> | ||
30 | |||
31 | #define SKBEDIT_TAB_MASK 15 | ||
32 | static struct tcf_common *tcf_skbedit_ht[SKBEDIT_TAB_MASK + 1]; | ||
33 | static u32 skbedit_idx_gen; | ||
34 | static DEFINE_RWLOCK(skbedit_lock); | ||
35 | |||
36 | static struct tcf_hashinfo skbedit_hash_info = { | ||
37 | .htab = tcf_skbedit_ht, | ||
38 | .hmask = SKBEDIT_TAB_MASK, | ||
39 | .lock = &skbedit_lock, | ||
40 | }; | ||
41 | |||
42 | static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, | ||
43 | struct tcf_result *res) | ||
44 | { | ||
45 | struct tcf_skbedit *d = a->priv; | ||
46 | |||
47 | spin_lock(&d->tcf_lock); | ||
48 | d->tcf_tm.lastuse = jiffies; | ||
49 | d->tcf_bstats.bytes += qdisc_pkt_len(skb); | ||
50 | d->tcf_bstats.packets++; | ||
51 | |||
52 | if (d->flags & SKBEDIT_F_PRIORITY) | ||
53 | skb->priority = d->priority; | ||
54 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING && | ||
55 | skb->dev->real_num_tx_queues > d->queue_mapping) | ||
56 | skb_set_queue_mapping(skb, d->queue_mapping); | ||
57 | |||
58 | spin_unlock(&d->tcf_lock); | ||
59 | return d->tcf_action; | ||
60 | } | ||
61 | |||
62 | static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { | ||
63 | [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, | ||
64 | [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, | ||
65 | [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, | ||
66 | }; | ||
67 | |||
68 | static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, | ||
69 | struct tc_action *a, int ovr, int bind) | ||
70 | { | ||
71 | struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; | ||
72 | struct tc_skbedit *parm; | ||
73 | struct tcf_skbedit *d; | ||
74 | struct tcf_common *pc; | ||
75 | u32 flags = 0, *priority = NULL; | ||
76 | u16 *queue_mapping = NULL; | ||
77 | int ret = 0, err; | ||
78 | |||
79 | if (nla == NULL) | ||
80 | return -EINVAL; | ||
81 | |||
82 | err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy); | ||
83 | if (err < 0) | ||
84 | return err; | ||
85 | |||
86 | if (tb[TCA_SKBEDIT_PARMS] == NULL) | ||
87 | return -EINVAL; | ||
88 | |||
89 | if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { | ||
90 | flags |= SKBEDIT_F_PRIORITY; | ||
91 | priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]); | ||
92 | } | ||
93 | |||
94 | if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { | ||
95 | flags |= SKBEDIT_F_QUEUE_MAPPING; | ||
96 | queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); | ||
97 | } | ||
98 | if (!flags) | ||
99 | return -EINVAL; | ||
100 | |||
101 | parm = nla_data(tb[TCA_SKBEDIT_PARMS]); | ||
102 | |||
103 | pc = tcf_hash_check(parm->index, a, bind, &skbedit_hash_info); | ||
104 | if (!pc) { | ||
105 | pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, | ||
106 | &skbedit_idx_gen, &skbedit_hash_info); | ||
107 | if (unlikely(!pc)) | ||
108 | return -ENOMEM; | ||
109 | |||
110 | d = to_skbedit(pc); | ||
111 | ret = ACT_P_CREATED; | ||
112 | } else { | ||
113 | d = to_skbedit(pc); | ||
114 | if (!ovr) { | ||
115 | tcf_hash_release(pc, bind, &skbedit_hash_info); | ||
116 | return -EEXIST; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | spin_lock_bh(&d->tcf_lock); | ||
121 | |||
122 | d->flags = flags; | ||
123 | if (flags & SKBEDIT_F_PRIORITY) | ||
124 | d->priority = *priority; | ||
125 | if (flags & SKBEDIT_F_QUEUE_MAPPING) | ||
126 | d->queue_mapping = *queue_mapping; | ||
127 | d->tcf_action = parm->action; | ||
128 | |||
129 | spin_unlock_bh(&d->tcf_lock); | ||
130 | |||
131 | if (ret == ACT_P_CREATED) | ||
132 | tcf_hash_insert(pc, &skbedit_hash_info); | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind) | ||
137 | { | ||
138 | struct tcf_skbedit *d = a->priv; | ||
139 | |||
140 | if (d) | ||
141 | return tcf_hash_release(&d->common, bind, &skbedit_hash_info); | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, | ||
146 | int bind, int ref) | ||
147 | { | ||
148 | unsigned char *b = skb_tail_pointer(skb); | ||
149 | struct tcf_skbedit *d = a->priv; | ||
150 | struct tc_skbedit opt; | ||
151 | struct tcf_t t; | ||
152 | |||
153 | opt.index = d->tcf_index; | ||
154 | opt.refcnt = d->tcf_refcnt - ref; | ||
155 | opt.bindcnt = d->tcf_bindcnt - bind; | ||
156 | opt.action = d->tcf_action; | ||
157 | NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); | ||
158 | if (d->flags & SKBEDIT_F_PRIORITY) | ||
159 | NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), | ||
160 | &d->priority); | ||
161 | if (d->flags & SKBEDIT_F_QUEUE_MAPPING) | ||
162 | NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, | ||
163 | sizeof(d->queue_mapping), &d->queue_mapping); | ||
164 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); | ||
165 | t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); | ||
166 | t.expires = jiffies_to_clock_t(d->tcf_tm.expires); | ||
167 | NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t); | ||
168 | return skb->len; | ||
169 | |||
170 | nla_put_failure: | ||
171 | nlmsg_trim(skb, b); | ||
172 | return -1; | ||
173 | } | ||
174 | |||
175 | static struct tc_action_ops act_skbedit_ops = { | ||
176 | .kind = "skbedit", | ||
177 | .hinfo = &skbedit_hash_info, | ||
178 | .type = TCA_ACT_SKBEDIT, | ||
179 | .capab = TCA_CAP_NONE, | ||
180 | .owner = THIS_MODULE, | ||
181 | .act = tcf_skbedit, | ||
182 | .dump = tcf_skbedit_dump, | ||
183 | .cleanup = tcf_skbedit_cleanup, | ||
184 | .init = tcf_skbedit_init, | ||
185 | .walk = tcf_generic_walker, | ||
186 | }; | ||
187 | |||
188 | MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); | ||
189 | MODULE_DESCRIPTION("SKB Editing"); | ||
190 | MODULE_LICENSE("GPL"); | ||
191 | |||
192 | static int __init skbedit_init_module(void) | ||
193 | { | ||
194 | return tcf_register_action(&act_skbedit_ops); | ||
195 | } | ||
196 | |||
197 | static void __exit skbedit_cleanup_module(void) | ||
198 | { | ||
199 | tcf_unregister_action(&act_skbedit_ops); | ||
200 | } | ||
201 | |||
202 | module_init(skbedit_init_module); | ||
203 | module_exit(skbedit_cleanup_module); | ||
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 8f63a1a94014..0ebaff637e31 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c | |||
@@ -67,9 +67,9 @@ static inline u32 addr_fold(void *addr) | |||
67 | static u32 flow_get_src(const struct sk_buff *skb) | 67 | static u32 flow_get_src(const struct sk_buff *skb) |
68 | { | 68 | { |
69 | switch (skb->protocol) { | 69 | switch (skb->protocol) { |
70 | case __constant_htons(ETH_P_IP): | 70 | case htons(ETH_P_IP): |
71 | return ntohl(ip_hdr(skb)->saddr); | 71 | return ntohl(ip_hdr(skb)->saddr); |
72 | case __constant_htons(ETH_P_IPV6): | 72 | case htons(ETH_P_IPV6): |
73 | return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); | 73 | return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); |
74 | default: | 74 | default: |
75 | return addr_fold(skb->sk); | 75 | return addr_fold(skb->sk); |
@@ -79,9 +79,9 @@ static u32 flow_get_src(const struct sk_buff *skb) | |||
79 | static u32 flow_get_dst(const struct sk_buff *skb) | 79 | static u32 flow_get_dst(const struct sk_buff *skb) |
80 | { | 80 | { |
81 | switch (skb->protocol) { | 81 | switch (skb->protocol) { |
82 | case __constant_htons(ETH_P_IP): | 82 | case htons(ETH_P_IP): |
83 | return ntohl(ip_hdr(skb)->daddr); | 83 | return ntohl(ip_hdr(skb)->daddr); |
84 | case __constant_htons(ETH_P_IPV6): | 84 | case htons(ETH_P_IPV6): |
85 | return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); | 85 | return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); |
86 | default: | 86 | default: |
87 | return addr_fold(skb->dst) ^ (__force u16)skb->protocol; | 87 | return addr_fold(skb->dst) ^ (__force u16)skb->protocol; |
@@ -91,9 +91,9 @@ static u32 flow_get_dst(const struct sk_buff *skb) | |||
91 | static u32 flow_get_proto(const struct sk_buff *skb) | 91 | static u32 flow_get_proto(const struct sk_buff *skb) |
92 | { | 92 | { |
93 | switch (skb->protocol) { | 93 | switch (skb->protocol) { |
94 | case __constant_htons(ETH_P_IP): | 94 | case htons(ETH_P_IP): |
95 | return ip_hdr(skb)->protocol; | 95 | return ip_hdr(skb)->protocol; |
96 | case __constant_htons(ETH_P_IPV6): | 96 | case htons(ETH_P_IPV6): |
97 | return ipv6_hdr(skb)->nexthdr; | 97 | return ipv6_hdr(skb)->nexthdr; |
98 | default: | 98 | default: |
99 | return 0; | 99 | return 0; |
@@ -120,7 +120,7 @@ static u32 flow_get_proto_src(const struct sk_buff *skb) | |||
120 | u32 res = 0; | 120 | u32 res = 0; |
121 | 121 | ||
122 | switch (skb->protocol) { | 122 | switch (skb->protocol) { |
123 | case __constant_htons(ETH_P_IP): { | 123 | case htons(ETH_P_IP): { |
124 | struct iphdr *iph = ip_hdr(skb); | 124 | struct iphdr *iph = ip_hdr(skb); |
125 | 125 | ||
126 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && | 126 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && |
@@ -128,7 +128,7 @@ static u32 flow_get_proto_src(const struct sk_buff *skb) | |||
128 | res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); | 128 | res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); |
129 | break; | 129 | break; |
130 | } | 130 | } |
131 | case __constant_htons(ETH_P_IPV6): { | 131 | case htons(ETH_P_IPV6): { |
132 | struct ipv6hdr *iph = ipv6_hdr(skb); | 132 | struct ipv6hdr *iph = ipv6_hdr(skb); |
133 | 133 | ||
134 | if (has_ports(iph->nexthdr)) | 134 | if (has_ports(iph->nexthdr)) |
@@ -147,7 +147,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb) | |||
147 | u32 res = 0; | 147 | u32 res = 0; |
148 | 148 | ||
149 | switch (skb->protocol) { | 149 | switch (skb->protocol) { |
150 | case __constant_htons(ETH_P_IP): { | 150 | case htons(ETH_P_IP): { |
151 | struct iphdr *iph = ip_hdr(skb); | 151 | struct iphdr *iph = ip_hdr(skb); |
152 | 152 | ||
153 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && | 153 | if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && |
@@ -155,7 +155,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb) | |||
155 | res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); | 155 | res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); |
156 | break; | 156 | break; |
157 | } | 157 | } |
158 | case __constant_htons(ETH_P_IPV6): { | 158 | case htons(ETH_P_IPV6): { |
159 | struct ipv6hdr *iph = ipv6_hdr(skb); | 159 | struct ipv6hdr *iph = ipv6_hdr(skb); |
160 | 160 | ||
161 | if (has_ports(iph->nexthdr)) | 161 | if (has_ports(iph->nexthdr)) |
@@ -213,9 +213,9 @@ static u32 flow_get_nfct(const struct sk_buff *skb) | |||
213 | static u32 flow_get_nfct_src(const struct sk_buff *skb) | 213 | static u32 flow_get_nfct_src(const struct sk_buff *skb) |
214 | { | 214 | { |
215 | switch (skb->protocol) { | 215 | switch (skb->protocol) { |
216 | case __constant_htons(ETH_P_IP): | 216 | case htons(ETH_P_IP): |
217 | return ntohl(CTTUPLE(skb, src.u3.ip)); | 217 | return ntohl(CTTUPLE(skb, src.u3.ip)); |
218 | case __constant_htons(ETH_P_IPV6): | 218 | case htons(ETH_P_IPV6): |
219 | return ntohl(CTTUPLE(skb, src.u3.ip6[3])); | 219 | return ntohl(CTTUPLE(skb, src.u3.ip6[3])); |
220 | } | 220 | } |
221 | fallback: | 221 | fallback: |
@@ -225,9 +225,9 @@ fallback: | |||
225 | static u32 flow_get_nfct_dst(const struct sk_buff *skb) | 225 | static u32 flow_get_nfct_dst(const struct sk_buff *skb) |
226 | { | 226 | { |
227 | switch (skb->protocol) { | 227 | switch (skb->protocol) { |
228 | case __constant_htons(ETH_P_IP): | 228 | case htons(ETH_P_IP): |
229 | return ntohl(CTTUPLE(skb, dst.u3.ip)); | 229 | return ntohl(CTTUPLE(skb, dst.u3.ip)); |
230 | case __constant_htons(ETH_P_IPV6): | 230 | case htons(ETH_P_IPV6): |
231 | return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); | 231 | return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); |
232 | } | 232 | } |
233 | fallback: | 233 | fallback: |
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c index cc49c932641d..bc450397487a 100644 --- a/net/sched/em_cmp.c +++ b/net/sched/em_cmp.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/tc_ematch/tc_em_cmp.h> | 16 | #include <linux/tc_ematch/tc_em_cmp.h> |
17 | #include <asm/unaligned.h> | ||
17 | #include <net/pkt_cls.h> | 18 | #include <net/pkt_cls.h> |
18 | 19 | ||
19 | static inline int cmp_needs_transformation(struct tcf_em_cmp *cmp) | 20 | static inline int cmp_needs_transformation(struct tcf_em_cmp *cmp) |
@@ -37,8 +38,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em, | |||
37 | break; | 38 | break; |
38 | 39 | ||
39 | case TCF_EM_ALIGN_U16: | 40 | case TCF_EM_ALIGN_U16: |
40 | val = *ptr << 8; | 41 | val = get_unaligned_be16(ptr); |
41 | val |= *(ptr+1); | ||
42 | 42 | ||
43 | if (cmp_needs_transformation(cmp)) | 43 | if (cmp_needs_transformation(cmp)) |
44 | val = be16_to_cpu(val); | 44 | val = be16_to_cpu(val); |
@@ -47,10 +47,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em, | |||
47 | case TCF_EM_ALIGN_U32: | 47 | case TCF_EM_ALIGN_U32: |
48 | /* Worth checking boundries? The branching seems | 48 | /* Worth checking boundries? The branching seems |
49 | * to get worse. Visit again. */ | 49 | * to get worse. Visit again. */ |
50 | val = *ptr << 24; | 50 | val = get_unaligned_be32(ptr); |
51 | val |= *(ptr+1) << 16; | ||
52 | val |= *(ptr+2) << 8; | ||
53 | val |= *(ptr+3); | ||
54 | 51 | ||
55 | if (cmp_needs_transformation(cmp)) | 52 | if (cmp_needs_transformation(cmp)) |
56 | val = be32_to_cpu(val); | 53 | val = be32_to_cpu(val); |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index edd1298f85f6..ba43aab3a851 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -202,7 +202,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
202 | 202 | ||
203 | if (p->set_tc_index) { | 203 | if (p->set_tc_index) { |
204 | switch (skb->protocol) { | 204 | switch (skb->protocol) { |
205 | case __constant_htons(ETH_P_IP): | 205 | case htons(ETH_P_IP): |
206 | if (skb_cow_head(skb, sizeof(struct iphdr))) | 206 | if (skb_cow_head(skb, sizeof(struct iphdr))) |
207 | goto drop; | 207 | goto drop; |
208 | 208 | ||
@@ -210,7 +210,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
210 | & ~INET_ECN_MASK; | 210 | & ~INET_ECN_MASK; |
211 | break; | 211 | break; |
212 | 212 | ||
213 | case __constant_htons(ETH_P_IPV6): | 213 | case htons(ETH_P_IPV6): |
214 | if (skb_cow_head(skb, sizeof(struct ipv6hdr))) | 214 | if (skb_cow_head(skb, sizeof(struct ipv6hdr))) |
215 | goto drop; | 215 | goto drop; |
216 | 216 | ||
@@ -289,11 +289,11 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) | |||
289 | pr_debug("index %d->%d\n", skb->tc_index, index); | 289 | pr_debug("index %d->%d\n", skb->tc_index, index); |
290 | 290 | ||
291 | switch (skb->protocol) { | 291 | switch (skb->protocol) { |
292 | case __constant_htons(ETH_P_IP): | 292 | case htons(ETH_P_IP): |
293 | ipv4_change_dsfield(ip_hdr(skb), p->mask[index], | 293 | ipv4_change_dsfield(ip_hdr(skb), p->mask[index], |
294 | p->value[index]); | 294 | p->value[index]); |
295 | break; | 295 | break; |
296 | case __constant_htons(ETH_P_IPV6): | 296 | case htons(ETH_P_IPV6): |
297 | ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index], | 297 | ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index], |
298 | p->value[index]); | 298 | p->value[index]); |
299 | break; | 299 | break; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ec0a0839ce51..5e7e0bd38fe8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -44,10 +44,7 @@ static inline int qdisc_qlen(struct Qdisc *q) | |||
44 | 44 | ||
45 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | 45 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
46 | { | 46 | { |
47 | if (unlikely(skb->next)) | 47 | __skb_queue_head(&q->requeue, skb); |
48 | q->gso_skb = skb; | ||
49 | else | ||
50 | q->ops->requeue(skb, q); | ||
51 | 48 | ||
52 | __netif_schedule(q); | 49 | __netif_schedule(q); |
53 | return 0; | 50 | return 0; |
@@ -55,12 +52,21 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | |||
55 | 52 | ||
56 | static inline struct sk_buff *dequeue_skb(struct Qdisc *q) | 53 | static inline struct sk_buff *dequeue_skb(struct Qdisc *q) |
57 | { | 54 | { |
58 | struct sk_buff *skb; | 55 | struct sk_buff *skb = skb_peek(&q->requeue); |
56 | |||
57 | if (unlikely(skb)) { | ||
58 | struct net_device *dev = qdisc_dev(q); | ||
59 | struct netdev_queue *txq; | ||
59 | 60 | ||
60 | if ((skb = q->gso_skb)) | 61 | /* check the reason of requeuing without tx lock first */ |
61 | q->gso_skb = NULL; | 62 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
62 | else | 63 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) |
64 | __skb_unlink(skb, &q->requeue); | ||
65 | else | ||
66 | skb = NULL; | ||
67 | } else { | ||
63 | skb = q->dequeue(q); | 68 | skb = q->dequeue(q); |
69 | } | ||
64 | 70 | ||
65 | return skb; | 71 | return skb; |
66 | } | 72 | } |
@@ -327,6 +333,7 @@ struct Qdisc noop_qdisc = { | |||
327 | .flags = TCQ_F_BUILTIN, | 333 | .flags = TCQ_F_BUILTIN, |
328 | .ops = &noop_qdisc_ops, | 334 | .ops = &noop_qdisc_ops, |
329 | .list = LIST_HEAD_INIT(noop_qdisc.list), | 335 | .list = LIST_HEAD_INIT(noop_qdisc.list), |
336 | .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), | ||
330 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), | 337 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), |
331 | .dev_queue = &noop_netdev_queue, | 338 | .dev_queue = &noop_netdev_queue, |
332 | }; | 339 | }; |
@@ -352,6 +359,7 @@ static struct Qdisc noqueue_qdisc = { | |||
352 | .flags = TCQ_F_BUILTIN, | 359 | .flags = TCQ_F_BUILTIN, |
353 | .ops = &noqueue_qdisc_ops, | 360 | .ops = &noqueue_qdisc_ops, |
354 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), | 361 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), |
362 | .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), | ||
355 | .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), | 363 | .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), |
356 | .dev_queue = &noqueue_netdev_queue, | 364 | .dev_queue = &noqueue_netdev_queue, |
357 | }; | 365 | }; |
@@ -472,6 +480,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | |||
472 | sch->padded = (char *) sch - (char *) p; | 480 | sch->padded = (char *) sch - (char *) p; |
473 | 481 | ||
474 | INIT_LIST_HEAD(&sch->list); | 482 | INIT_LIST_HEAD(&sch->list); |
483 | skb_queue_head_init(&sch->requeue); | ||
475 | skb_queue_head_init(&sch->q); | 484 | skb_queue_head_init(&sch->q); |
476 | sch->ops = ops; | 485 | sch->ops = ops; |
477 | sch->enqueue = ops->enqueue; | 486 | sch->enqueue = ops->enqueue; |
@@ -539,7 +548,7 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
539 | module_put(ops->owner); | 548 | module_put(ops->owner); |
540 | dev_put(qdisc_dev(qdisc)); | 549 | dev_put(qdisc_dev(qdisc)); |
541 | 550 | ||
542 | kfree_skb(qdisc->gso_skb); | 551 | __skb_queue_purge(&qdisc->requeue); |
543 | 552 | ||
544 | kfree((char *) qdisc - qdisc->padded); | 553 | kfree((char *) qdisc - qdisc->padded); |
545 | } | 554 | } |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c new file mode 100644 index 000000000000..915f3149dde2 --- /dev/null +++ b/net/sched/sch_multiq.c | |||
@@ -0,0 +1,477 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008, Intel Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along with | ||
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
16 | * | ||
17 | * Author: Alexander Duyck <alexander.h.duyck@intel.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/skbuff.h> | ||
26 | #include <net/netlink.h> | ||
27 | #include <net/pkt_sched.h> | ||
28 | |||
29 | |||
30 | struct multiq_sched_data { | ||
31 | u16 bands; | ||
32 | u16 max_bands; | ||
33 | u16 curband; | ||
34 | struct tcf_proto *filter_list; | ||
35 | struct Qdisc **queues; | ||
36 | }; | ||
37 | |||
38 | |||
39 | static struct Qdisc * | ||
40 | multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) | ||
41 | { | ||
42 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
43 | u32 band; | ||
44 | struct tcf_result res; | ||
45 | int err; | ||
46 | |||
47 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | ||
48 | err = tc_classify(skb, q->filter_list, &res); | ||
49 | #ifdef CONFIG_NET_CLS_ACT | ||
50 | switch (err) { | ||
51 | case TC_ACT_STOLEN: | ||
52 | case TC_ACT_QUEUED: | ||
53 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; | ||
54 | case TC_ACT_SHOT: | ||
55 | return NULL; | ||
56 | } | ||
57 | #endif | ||
58 | band = skb_get_queue_mapping(skb); | ||
59 | |||
60 | if (band >= q->bands) | ||
61 | return q->queues[0]; | ||
62 | |||
63 | return q->queues[band]; | ||
64 | } | ||
65 | |||
66 | static int | ||
67 | multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | ||
68 | { | ||
69 | struct Qdisc *qdisc; | ||
70 | int ret; | ||
71 | |||
72 | qdisc = multiq_classify(skb, sch, &ret); | ||
73 | #ifdef CONFIG_NET_CLS_ACT | ||
74 | if (qdisc == NULL) { | ||
75 | |||
76 | if (ret & __NET_XMIT_BYPASS) | ||
77 | sch->qstats.drops++; | ||
78 | kfree_skb(skb); | ||
79 | return ret; | ||
80 | } | ||
81 | #endif | ||
82 | |||
83 | ret = qdisc_enqueue(skb, qdisc); | ||
84 | if (ret == NET_XMIT_SUCCESS) { | ||
85 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
86 | sch->bstats.packets++; | ||
87 | sch->q.qlen++; | ||
88 | return NET_XMIT_SUCCESS; | ||
89 | } | ||
90 | if (net_xmit_drop_count(ret)) | ||
91 | sch->qstats.drops++; | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | |||
96 | static int | ||
97 | multiq_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
98 | { | ||
99 | struct Qdisc *qdisc; | ||
100 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
101 | int ret; | ||
102 | |||
103 | qdisc = multiq_classify(skb, sch, &ret); | ||
104 | #ifdef CONFIG_NET_CLS_ACT | ||
105 | if (qdisc == NULL) { | ||
106 | if (ret & __NET_XMIT_BYPASS) | ||
107 | sch->qstats.drops++; | ||
108 | kfree_skb(skb); | ||
109 | return ret; | ||
110 | } | ||
111 | #endif | ||
112 | |||
113 | ret = qdisc->ops->requeue(skb, qdisc); | ||
114 | if (ret == NET_XMIT_SUCCESS) { | ||
115 | sch->q.qlen++; | ||
116 | sch->qstats.requeues++; | ||
117 | if (q->curband) | ||
118 | q->curband--; | ||
119 | else | ||
120 | q->curband = q->bands - 1; | ||
121 | return NET_XMIT_SUCCESS; | ||
122 | } | ||
123 | if (net_xmit_drop_count(ret)) | ||
124 | sch->qstats.drops++; | ||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | |||
129 | static struct sk_buff *multiq_dequeue(struct Qdisc *sch) | ||
130 | { | ||
131 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
132 | struct Qdisc *qdisc; | ||
133 | struct sk_buff *skb; | ||
134 | int band; | ||
135 | |||
136 | for (band = 0; band < q->bands; band++) { | ||
137 | /* cycle through bands to ensure fairness */ | ||
138 | q->curband++; | ||
139 | if (q->curband >= q->bands) | ||
140 | q->curband = 0; | ||
141 | |||
142 | /* Check that target subqueue is available before | ||
143 | * pulling an skb to avoid excessive requeues | ||
144 | */ | ||
145 | if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { | ||
146 | qdisc = q->queues[q->curband]; | ||
147 | skb = qdisc->dequeue(qdisc); | ||
148 | if (skb) { | ||
149 | sch->q.qlen--; | ||
150 | return skb; | ||
151 | } | ||
152 | } | ||
153 | } | ||
154 | return NULL; | ||
155 | |||
156 | } | ||
157 | |||
158 | static unsigned int multiq_drop(struct Qdisc *sch) | ||
159 | { | ||
160 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
161 | int band; | ||
162 | unsigned int len; | ||
163 | struct Qdisc *qdisc; | ||
164 | |||
165 | for (band = q->bands-1; band >= 0; band--) { | ||
166 | qdisc = q->queues[band]; | ||
167 | if (qdisc->ops->drop) { | ||
168 | len = qdisc->ops->drop(qdisc); | ||
169 | if (len != 0) { | ||
170 | sch->q.qlen--; | ||
171 | return len; | ||
172 | } | ||
173 | } | ||
174 | } | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | |||
179 | static void | ||
180 | multiq_reset(struct Qdisc *sch) | ||
181 | { | ||
182 | u16 band; | ||
183 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
184 | |||
185 | for (band = 0; band < q->bands; band++) | ||
186 | qdisc_reset(q->queues[band]); | ||
187 | sch->q.qlen = 0; | ||
188 | q->curband = 0; | ||
189 | } | ||
190 | |||
191 | static void | ||
192 | multiq_destroy(struct Qdisc *sch) | ||
193 | { | ||
194 | int band; | ||
195 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
196 | |||
197 | tcf_destroy_chain(&q->filter_list); | ||
198 | for (band = 0; band < q->bands; band++) | ||
199 | qdisc_destroy(q->queues[band]); | ||
200 | |||
201 | kfree(q->queues); | ||
202 | } | ||
203 | |||
204 | static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) | ||
205 | { | ||
206 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
207 | struct tc_multiq_qopt *qopt; | ||
208 | int i; | ||
209 | |||
210 | if (!netif_is_multiqueue(qdisc_dev(sch))) | ||
211 | return -EINVAL; | ||
212 | if (nla_len(opt) < sizeof(*qopt)) | ||
213 | return -EINVAL; | ||
214 | |||
215 | qopt = nla_data(opt); | ||
216 | |||
217 | qopt->bands = qdisc_dev(sch)->real_num_tx_queues; | ||
218 | |||
219 | sch_tree_lock(sch); | ||
220 | q->bands = qopt->bands; | ||
221 | for (i = q->bands; i < q->max_bands; i++) { | ||
222 | if (q->queues[i] != &noop_qdisc) { | ||
223 | struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc); | ||
224 | qdisc_tree_decrease_qlen(child, child->q.qlen); | ||
225 | qdisc_destroy(child); | ||
226 | } | ||
227 | } | ||
228 | |||
229 | sch_tree_unlock(sch); | ||
230 | |||
231 | for (i = 0; i < q->bands; i++) { | ||
232 | if (q->queues[i] == &noop_qdisc) { | ||
233 | struct Qdisc *child; | ||
234 | child = qdisc_create_dflt(qdisc_dev(sch), | ||
235 | sch->dev_queue, | ||
236 | &pfifo_qdisc_ops, | ||
237 | TC_H_MAKE(sch->handle, | ||
238 | i + 1)); | ||
239 | if (child) { | ||
240 | sch_tree_lock(sch); | ||
241 | child = xchg(&q->queues[i], child); | ||
242 | |||
243 | if (child != &noop_qdisc) { | ||
244 | qdisc_tree_decrease_qlen(child, | ||
245 | child->q.qlen); | ||
246 | qdisc_destroy(child); | ||
247 | } | ||
248 | sch_tree_unlock(sch); | ||
249 | } | ||
250 | } | ||
251 | } | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static int multiq_init(struct Qdisc *sch, struct nlattr *opt) | ||
256 | { | ||
257 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
258 | int i, err; | ||
259 | |||
260 | q->queues = NULL; | ||
261 | |||
262 | if (opt == NULL) | ||
263 | return -EINVAL; | ||
264 | |||
265 | q->max_bands = qdisc_dev(sch)->num_tx_queues; | ||
266 | |||
267 | q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); | ||
268 | if (!q->queues) | ||
269 | return -ENOBUFS; | ||
270 | for (i = 0; i < q->max_bands; i++) | ||
271 | q->queues[i] = &noop_qdisc; | ||
272 | |||
273 | err = multiq_tune(sch,opt); | ||
274 | |||
275 | if (err) | ||
276 | kfree(q->queues); | ||
277 | |||
278 | return err; | ||
279 | } | ||
280 | |||
281 | static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) | ||
282 | { | ||
283 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
284 | unsigned char *b = skb_tail_pointer(skb); | ||
285 | struct tc_multiq_qopt opt; | ||
286 | |||
287 | opt.bands = q->bands; | ||
288 | opt.max_bands = q->max_bands; | ||
289 | |||
290 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | ||
291 | |||
292 | return skb->len; | ||
293 | |||
294 | nla_put_failure: | ||
295 | nlmsg_trim(skb, b); | ||
296 | return -1; | ||
297 | } | ||
298 | |||
299 | static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | ||
300 | struct Qdisc **old) | ||
301 | { | ||
302 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
303 | unsigned long band = arg - 1; | ||
304 | |||
305 | if (band >= q->bands) | ||
306 | return -EINVAL; | ||
307 | |||
308 | if (new == NULL) | ||
309 | new = &noop_qdisc; | ||
310 | |||
311 | sch_tree_lock(sch); | ||
312 | *old = q->queues[band]; | ||
313 | q->queues[band] = new; | ||
314 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | ||
315 | qdisc_reset(*old); | ||
316 | sch_tree_unlock(sch); | ||
317 | |||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static struct Qdisc * | ||
322 | multiq_leaf(struct Qdisc *sch, unsigned long arg) | ||
323 | { | ||
324 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
325 | unsigned long band = arg - 1; | ||
326 | |||
327 | if (band >= q->bands) | ||
328 | return NULL; | ||
329 | |||
330 | return q->queues[band]; | ||
331 | } | ||
332 | |||
333 | static unsigned long multiq_get(struct Qdisc *sch, u32 classid) | ||
334 | { | ||
335 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
336 | unsigned long band = TC_H_MIN(classid); | ||
337 | |||
338 | if (band - 1 >= q->bands) | ||
339 | return 0; | ||
340 | return band; | ||
341 | } | ||
342 | |||
343 | static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, | ||
344 | u32 classid) | ||
345 | { | ||
346 | return multiq_get(sch, classid); | ||
347 | } | ||
348 | |||
349 | |||
350 | static void multiq_put(struct Qdisc *q, unsigned long cl) | ||
351 | { | ||
352 | return; | ||
353 | } | ||
354 | |||
355 | static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent, | ||
356 | struct nlattr **tca, unsigned long *arg) | ||
357 | { | ||
358 | unsigned long cl = *arg; | ||
359 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
360 | |||
361 | if (cl - 1 > q->bands) | ||
362 | return -ENOENT; | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static int multiq_delete(struct Qdisc *sch, unsigned long cl) | ||
367 | { | ||
368 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
369 | if (cl - 1 > q->bands) | ||
370 | return -ENOENT; | ||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | |||
375 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, | ||
376 | struct sk_buff *skb, struct tcmsg *tcm) | ||
377 | { | ||
378 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
379 | |||
380 | if (cl - 1 > q->bands) | ||
381 | return -ENOENT; | ||
382 | tcm->tcm_handle |= TC_H_MIN(cl); | ||
383 | if (q->queues[cl-1]) | ||
384 | tcm->tcm_info = q->queues[cl-1]->handle; | ||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | ||
389 | struct gnet_dump *d) | ||
390 | { | ||
391 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
392 | struct Qdisc *cl_q; | ||
393 | |||
394 | cl_q = q->queues[cl - 1]; | ||
395 | if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || | ||
396 | gnet_stats_copy_queue(d, &cl_q->qstats) < 0) | ||
397 | return -1; | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | ||
403 | { | ||
404 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
405 | int band; | ||
406 | |||
407 | if (arg->stop) | ||
408 | return; | ||
409 | |||
410 | for (band = 0; band < q->bands; band++) { | ||
411 | if (arg->count < arg->skip) { | ||
412 | arg->count++; | ||
413 | continue; | ||
414 | } | ||
415 | if (arg->fn(sch, band+1, arg) < 0) { | ||
416 | arg->stop = 1; | ||
417 | break; | ||
418 | } | ||
419 | arg->count++; | ||
420 | } | ||
421 | } | ||
422 | |||
423 | static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl) | ||
424 | { | ||
425 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
426 | |||
427 | if (cl) | ||
428 | return NULL; | ||
429 | return &q->filter_list; | ||
430 | } | ||
431 | |||
432 | static const struct Qdisc_class_ops multiq_class_ops = { | ||
433 | .graft = multiq_graft, | ||
434 | .leaf = multiq_leaf, | ||
435 | .get = multiq_get, | ||
436 | .put = multiq_put, | ||
437 | .change = multiq_change, | ||
438 | .delete = multiq_delete, | ||
439 | .walk = multiq_walk, | ||
440 | .tcf_chain = multiq_find_tcf, | ||
441 | .bind_tcf = multiq_bind, | ||
442 | .unbind_tcf = multiq_put, | ||
443 | .dump = multiq_dump_class, | ||
444 | .dump_stats = multiq_dump_class_stats, | ||
445 | }; | ||
446 | |||
447 | static struct Qdisc_ops multiq_qdisc_ops __read_mostly = { | ||
448 | .next = NULL, | ||
449 | .cl_ops = &multiq_class_ops, | ||
450 | .id = "multiq", | ||
451 | .priv_size = sizeof(struct multiq_sched_data), | ||
452 | .enqueue = multiq_enqueue, | ||
453 | .dequeue = multiq_dequeue, | ||
454 | .requeue = multiq_requeue, | ||
455 | .drop = multiq_drop, | ||
456 | .init = multiq_init, | ||
457 | .reset = multiq_reset, | ||
458 | .destroy = multiq_destroy, | ||
459 | .change = multiq_tune, | ||
460 | .dump = multiq_dump, | ||
461 | .owner = THIS_MODULE, | ||
462 | }; | ||
463 | |||
464 | static int __init multiq_module_init(void) | ||
465 | { | ||
466 | return register_qdisc(&multiq_qdisc_ops); | ||
467 | } | ||
468 | |||
469 | static void __exit multiq_module_exit(void) | ||
470 | { | ||
471 | unregister_qdisc(&multiq_qdisc_ops); | ||
472 | } | ||
473 | |||
474 | module_init(multiq_module_init) | ||
475 | module_exit(multiq_module_exit) | ||
476 | |||
477 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 3781e55046d0..a11959908d9a 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -388,6 +388,20 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { | |||
388 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, | 388 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, |
389 | }; | 389 | }; |
390 | 390 | ||
391 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, | ||
392 | const struct nla_policy *policy, int len) | ||
393 | { | ||
394 | int nested_len = nla_len(nla) - NLA_ALIGN(len); | ||
395 | |||
396 | if (nested_len < 0) | ||
397 | return -EINVAL; | ||
398 | if (nested_len >= nla_attr_size(0)) | ||
399 | return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), | ||
400 | nested_len, policy); | ||
401 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); | ||
402 | return 0; | ||
403 | } | ||
404 | |||
391 | /* Parse netlink message to set options */ | 405 | /* Parse netlink message to set options */ |
392 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) | 406 | static int netem_change(struct Qdisc *sch, struct nlattr *opt) |
393 | { | 407 | { |
@@ -399,8 +413,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) | |||
399 | if (opt == NULL) | 413 | if (opt == NULL) |
400 | return -EINVAL; | 414 | return -EINVAL; |
401 | 415 | ||
402 | ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy, | 416 | qopt = nla_data(opt); |
403 | qopt, sizeof(*qopt)); | 417 | ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); |
404 | if (ret < 0) | 418 | if (ret < 0) |
405 | return ret; | 419 | return ret; |
406 | 420 | ||
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index a6697c686c7f..504a78cdb718 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -254,16 +254,12 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
254 | { | 254 | { |
255 | struct prio_sched_data *q = qdisc_priv(sch); | 255 | struct prio_sched_data *q = qdisc_priv(sch); |
256 | unsigned char *b = skb_tail_pointer(skb); | 256 | unsigned char *b = skb_tail_pointer(skb); |
257 | struct nlattr *nest; | ||
258 | struct tc_prio_qopt opt; | 257 | struct tc_prio_qopt opt; |
259 | 258 | ||
260 | opt.bands = q->bands; | 259 | opt.bands = q->bands; |
261 | memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); | 260 | memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); |
262 | 261 | ||
263 | nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); | 262 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
264 | if (nest == NULL) | ||
265 | goto nla_put_failure; | ||
266 | nla_nest_compat_end(skb, nest); | ||
267 | 263 | ||
268 | return skb->len; | 264 | return skb->len; |
269 | 265 | ||
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 6e041d10dbdb..fe1508ef0d3d 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -119,7 +119,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
119 | u32 h, h2; | 119 | u32 h, h2; |
120 | 120 | ||
121 | switch (skb->protocol) { | 121 | switch (skb->protocol) { |
122 | case __constant_htons(ETH_P_IP): | 122 | case htons(ETH_P_IP): |
123 | { | 123 | { |
124 | const struct iphdr *iph = ip_hdr(skb); | 124 | const struct iphdr *iph = ip_hdr(skb); |
125 | h = iph->daddr; | 125 | h = iph->daddr; |
@@ -134,7 +134,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |||
134 | h2 ^= *(((u32*)iph) + iph->ihl); | 134 | h2 ^= *(((u32*)iph) + iph->ihl); |
135 | break; | 135 | break; |
136 | } | 136 | } |
137 | case __constant_htons(ETH_P_IPV6): | 137 | case htons(ETH_P_IPV6): |
138 | { | 138 | { |
139 | struct ipv6hdr *iph = ipv6_hdr(skb); | 139 | struct ipv6hdr *iph = ipv6_hdr(skb); |
140 | h = iph->daddr.s6_addr32[3]; | 140 | h = iph->daddr.s6_addr32[3]; |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 5061a26c5028..7b23803343cc 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -317,7 +317,7 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, | |||
317 | } | 317 | } |
318 | 318 | ||
319 | /* Insert before pos. */ | 319 | /* Insert before pos. */ |
320 | __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm); | 320 | __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); |
321 | 321 | ||
322 | } | 322 | } |
323 | 323 | ||
@@ -825,8 +825,7 @@ static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, | |||
825 | 825 | ||
826 | 826 | ||
827 | /* Insert before pos. */ | 827 | /* Insert before pos. */ |
828 | __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby); | 828 | __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); |
829 | |||
830 | } | 829 | } |
831 | 830 | ||
832 | static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, | 831 | static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index e55427f73dfe..5c1954d28d09 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -769,7 +769,7 @@ repost: | |||
769 | /* check for expected message types */ | 769 | /* check for expected message types */ |
770 | /* The order of some of these tests is important. */ | 770 | /* The order of some of these tests is important. */ |
771 | switch (headerp->rm_type) { | 771 | switch (headerp->rm_type) { |
772 | case __constant_htonl(RDMA_MSG): | 772 | case htonl(RDMA_MSG): |
773 | /* never expect read chunks */ | 773 | /* never expect read chunks */ |
774 | /* never expect reply chunks (two ways to check) */ | 774 | /* never expect reply chunks (two ways to check) */ |
775 | /* never expect write chunks without having offered RDMA */ | 775 | /* never expect write chunks without having offered RDMA */ |
@@ -802,7 +802,7 @@ repost: | |||
802 | rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len); | 802 | rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len); |
803 | break; | 803 | break; |
804 | 804 | ||
805 | case __constant_htonl(RDMA_NOMSG): | 805 | case htonl(RDMA_NOMSG): |
806 | /* never expect read or write chunks, always reply chunks */ | 806 | /* never expect read or write chunks, always reply chunks */ |
807 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || | 807 | if (headerp->rm_body.rm_chunks[0] != xdr_zero || |
808 | headerp->rm_body.rm_chunks[1] != xdr_zero || | 808 | headerp->rm_body.rm_chunks[1] != xdr_zero || |
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 833b024f8f66..b97bd9fe6b79 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig | |||
@@ -14,6 +14,38 @@ config NL80211 | |||
14 | 14 | ||
15 | If unsure, say Y. | 15 | If unsure, say Y. |
16 | 16 | ||
17 | config WIRELESS_OLD_REGULATORY | ||
18 | bool "Old wireless static regulatory defintions" | ||
19 | default n | ||
20 | ---help--- | ||
21 | This option enables the old static regulatory information | ||
22 | and uses it within the new framework. This is available | ||
23 | temporarily as an option to help prevent immediate issues | ||
24 | due to the switch to the new regulatory framework which | ||
25 | does require a new userspace application which has the | ||
26 | database of regulatory information (CRDA) and another for | ||
27 | setting regulatory domains (iw). | ||
28 | |||
29 | For more information see: | ||
30 | |||
31 | http://wireless.kernel.org/en/developers/Regulatory/CRDA | ||
32 | http://wireless.kernel.org/en/users/Documentation/iw | ||
33 | |||
34 | It is important to note though that if you *do* have CRDA present | ||
35 | and if this option is enabled CRDA *will* be called to update the | ||
36 | regulatory domain (for US and JP only). Support for letting the user | ||
37 | set the regulatory domain through iw is also supported. This option | ||
38 | mainly exists to leave around for a kernel release some old static | ||
39 | regulatory domains that were defined and to keep around the old | ||
40 | ieee80211_regdom module parameter. This is being phased out and you | ||
41 | should stop using them ASAP. | ||
42 | |||
43 | Say N unless you cannot install a new userspace application | ||
44 | or have one currently depending on the ieee80211_regdom module | ||
45 | parameter and cannot port it to use the new userspace interfaces. | ||
46 | |||
47 | This is scheduled for removal for 2.6.29. | ||
48 | |||
17 | config WIRELESS_EXT | 49 | config WIRELESS_EXT |
18 | bool "Wireless extensions" | 50 | bool "Wireless extensions" |
19 | default n | 51 | default n |
diff --git a/net/wireless/core.c b/net/wireless/core.c index f1da0b93bc56..5cadbeb76a14 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * This is the linux wireless configuration interface. | 2 | * This is the linux wireless configuration interface. |
3 | * | 3 | * |
4 | * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> | 4 | * Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net> |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/if.h> | 7 | #include <linux/if.h> |
@@ -13,12 +13,14 @@ | |||
13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
14 | #include <linux/notifier.h> | 14 | #include <linux/notifier.h> |
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/list.h> | ||
16 | #include <net/genetlink.h> | 17 | #include <net/genetlink.h> |
17 | #include <net/cfg80211.h> | 18 | #include <net/cfg80211.h> |
18 | #include <net/wireless.h> | 19 | #include <net/wireless.h> |
19 | #include "nl80211.h" | 20 | #include "nl80211.h" |
20 | #include "core.h" | 21 | #include "core.h" |
21 | #include "sysfs.h" | 22 | #include "sysfs.h" |
23 | #include "reg.h" | ||
22 | 24 | ||
23 | /* name for sysfs, %d is appended */ | 25 | /* name for sysfs, %d is appended */ |
24 | #define PHY_NAME "phy" | 26 | #define PHY_NAME "phy" |
@@ -32,7 +34,6 @@ MODULE_DESCRIPTION("wireless configuration support"); | |||
32 | * often because we need to do it for each command */ | 34 | * often because we need to do it for each command */ |
33 | LIST_HEAD(cfg80211_drv_list); | 35 | LIST_HEAD(cfg80211_drv_list); |
34 | DEFINE_MUTEX(cfg80211_drv_mutex); | 36 | DEFINE_MUTEX(cfg80211_drv_mutex); |
35 | static int wiphy_counter; | ||
36 | 37 | ||
37 | /* for debugfs */ | 38 | /* for debugfs */ |
38 | static struct dentry *ieee80211_debugfs_dir; | 39 | static struct dentry *ieee80211_debugfs_dir; |
@@ -204,6 +205,8 @@ out_unlock: | |||
204 | 205 | ||
205 | struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv) | 206 | struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv) |
206 | { | 207 | { |
208 | static int wiphy_counter; | ||
209 | |||
207 | struct cfg80211_registered_device *drv; | 210 | struct cfg80211_registered_device *drv; |
208 | int alloc_size; | 211 | int alloc_size; |
209 | 212 | ||
@@ -220,21 +223,18 @@ struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv) | |||
220 | 223 | ||
221 | mutex_lock(&cfg80211_drv_mutex); | 224 | mutex_lock(&cfg80211_drv_mutex); |
222 | 225 | ||
223 | drv->idx = wiphy_counter; | 226 | drv->idx = wiphy_counter++; |
224 | |||
225 | /* now increase counter for the next device unless | ||
226 | * it has wrapped previously */ | ||
227 | if (wiphy_counter >= 0) | ||
228 | wiphy_counter++; | ||
229 | |||
230 | mutex_unlock(&cfg80211_drv_mutex); | ||
231 | 227 | ||
232 | if (unlikely(drv->idx < 0)) { | 228 | if (unlikely(drv->idx < 0)) { |
229 | wiphy_counter--; | ||
230 | mutex_unlock(&cfg80211_drv_mutex); | ||
233 | /* ugh, wrapped! */ | 231 | /* ugh, wrapped! */ |
234 | kfree(drv); | 232 | kfree(drv); |
235 | return NULL; | 233 | return NULL; |
236 | } | 234 | } |
237 | 235 | ||
236 | mutex_unlock(&cfg80211_drv_mutex); | ||
237 | |||
238 | /* give it a proper name */ | 238 | /* give it a proper name */ |
239 | snprintf(drv->wiphy.dev.bus_id, BUS_ID_SIZE, | 239 | snprintf(drv->wiphy.dev.bus_id, BUS_ID_SIZE, |
240 | PHY_NAME "%d", drv->idx); | 240 | PHY_NAME "%d", drv->idx); |
@@ -259,6 +259,13 @@ int wiphy_register(struct wiphy *wiphy) | |||
259 | struct ieee80211_supported_band *sband; | 259 | struct ieee80211_supported_band *sband; |
260 | bool have_band = false; | 260 | bool have_band = false; |
261 | int i; | 261 | int i; |
262 | u16 ifmodes = wiphy->interface_modes; | ||
263 | |||
264 | /* sanity check ifmodes */ | ||
265 | WARN_ON(!ifmodes); | ||
266 | ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; | ||
267 | if (WARN_ON(ifmodes != wiphy->interface_modes)) | ||
268 | wiphy->interface_modes = ifmodes; | ||
262 | 269 | ||
263 | /* sanity check supported bands/channels */ | 270 | /* sanity check supported bands/channels */ |
264 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 271 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
@@ -295,7 +302,9 @@ int wiphy_register(struct wiphy *wiphy) | |||
295 | ieee80211_set_bitrate_flags(wiphy); | 302 | ieee80211_set_bitrate_flags(wiphy); |
296 | 303 | ||
297 | /* set up regulatory info */ | 304 | /* set up regulatory info */ |
298 | wiphy_update_regulatory(wiphy); | 305 | mutex_lock(&cfg80211_reg_mutex); |
306 | wiphy_update_regulatory(wiphy, REGDOM_SET_BY_CORE); | ||
307 | mutex_unlock(&cfg80211_reg_mutex); | ||
299 | 308 | ||
300 | mutex_lock(&cfg80211_drv_mutex); | 309 | mutex_lock(&cfg80211_drv_mutex); |
301 | 310 | ||
@@ -373,6 +382,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | |||
373 | 382 | ||
374 | rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); | 383 | rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); |
375 | 384 | ||
385 | WARN_ON(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_UNSPECIFIED); | ||
386 | |||
376 | switch (state) { | 387 | switch (state) { |
377 | case NETDEV_REGISTER: | 388 | case NETDEV_REGISTER: |
378 | mutex_lock(&rdev->devlist_mtx); | 389 | mutex_lock(&rdev->devlist_mtx); |
@@ -404,7 +415,9 @@ static struct notifier_block cfg80211_netdev_notifier = { | |||
404 | 415 | ||
405 | static int cfg80211_init(void) | 416 | static int cfg80211_init(void) |
406 | { | 417 | { |
407 | int err = wiphy_sysfs_init(); | 418 | int err; |
419 | |||
420 | err = wiphy_sysfs_init(); | ||
408 | if (err) | 421 | if (err) |
409 | goto out_fail_sysfs; | 422 | goto out_fail_sysfs; |
410 | 423 | ||
@@ -418,8 +431,14 @@ static int cfg80211_init(void) | |||
418 | 431 | ||
419 | ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL); | 432 | ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL); |
420 | 433 | ||
434 | err = regulatory_init(); | ||
435 | if (err) | ||
436 | goto out_fail_reg; | ||
437 | |||
421 | return 0; | 438 | return 0; |
422 | 439 | ||
440 | out_fail_reg: | ||
441 | debugfs_remove(ieee80211_debugfs_dir); | ||
423 | out_fail_nl80211: | 442 | out_fail_nl80211: |
424 | unregister_netdevice_notifier(&cfg80211_netdev_notifier); | 443 | unregister_netdevice_notifier(&cfg80211_netdev_notifier); |
425 | out_fail_notifier: | 444 | out_fail_notifier: |
@@ -427,6 +446,7 @@ out_fail_notifier: | |||
427 | out_fail_sysfs: | 446 | out_fail_sysfs: |
428 | return err; | 447 | return err; |
429 | } | 448 | } |
449 | |||
430 | subsys_initcall(cfg80211_init); | 450 | subsys_initcall(cfg80211_init); |
431 | 451 | ||
432 | static void cfg80211_exit(void) | 452 | static void cfg80211_exit(void) |
@@ -435,5 +455,6 @@ static void cfg80211_exit(void) | |||
435 | nl80211_exit(); | 455 | nl80211_exit(); |
436 | unregister_netdevice_notifier(&cfg80211_netdev_notifier); | 456 | unregister_netdevice_notifier(&cfg80211_netdev_notifier); |
437 | wiphy_sysfs_exit(); | 457 | wiphy_sysfs_exit(); |
458 | regulatory_exit(); | ||
438 | } | 459 | } |
439 | module_exit(cfg80211_exit); | 460 | module_exit(cfg80211_exit); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 7a02c356d63d..771cc5cc7658 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -79,6 +79,6 @@ extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv, | |||
79 | char *newname); | 79 | char *newname); |
80 | 80 | ||
81 | void ieee80211_set_bitrate_flags(struct wiphy *wiphy); | 81 | void ieee80211_set_bitrate_flags(struct wiphy *wiphy); |
82 | void wiphy_update_regulatory(struct wiphy *wiphy); | 82 | void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby); |
83 | 83 | ||
84 | #endif /* __NET_WIRELESS_CORE_H */ | 84 | #endif /* __NET_WIRELESS_CORE_H */ |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 59eb2cf42e5f..572793c8c7ab 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <net/cfg80211.h> | 18 | #include <net/cfg80211.h> |
19 | #include "core.h" | 19 | #include "core.h" |
20 | #include "nl80211.h" | 20 | #include "nl80211.h" |
21 | #include "reg.h" | ||
21 | 22 | ||
22 | /* the netlink family */ | 23 | /* the netlink family */ |
23 | static struct genl_family nl80211_fam = { | 24 | static struct genl_family nl80211_fam = { |
@@ -87,6 +88,16 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { | |||
87 | [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, | 88 | [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, |
88 | .len = IEEE80211_MAX_MESH_ID_LEN }, | 89 | .len = IEEE80211_MAX_MESH_ID_LEN }, |
89 | [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, | 90 | [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, |
91 | |||
92 | [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, | ||
93 | [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, | ||
94 | |||
95 | [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 }, | ||
96 | [NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 }, | ||
97 | [NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 }, | ||
98 | |||
99 | [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, | ||
100 | .len = NL80211_HT_CAPABILITY_LEN }, | ||
90 | }; | 101 | }; |
91 | 102 | ||
92 | /* message building helper */ | 103 | /* message building helper */ |
@@ -106,10 +117,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
106 | struct nlattr *nl_bands, *nl_band; | 117 | struct nlattr *nl_bands, *nl_band; |
107 | struct nlattr *nl_freqs, *nl_freq; | 118 | struct nlattr *nl_freqs, *nl_freq; |
108 | struct nlattr *nl_rates, *nl_rate; | 119 | struct nlattr *nl_rates, *nl_rate; |
120 | struct nlattr *nl_modes; | ||
109 | enum ieee80211_band band; | 121 | enum ieee80211_band band; |
110 | struct ieee80211_channel *chan; | 122 | struct ieee80211_channel *chan; |
111 | struct ieee80211_rate *rate; | 123 | struct ieee80211_rate *rate; |
112 | int i; | 124 | int i; |
125 | u16 ifmodes = dev->wiphy.interface_modes; | ||
113 | 126 | ||
114 | hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); | 127 | hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); |
115 | if (!hdr) | 128 | if (!hdr) |
@@ -118,6 +131,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
118 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); | 131 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); |
119 | NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); | 132 | NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); |
120 | 133 | ||
134 | nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); | ||
135 | if (!nl_modes) | ||
136 | goto nla_put_failure; | ||
137 | |||
138 | i = 0; | ||
139 | while (ifmodes) { | ||
140 | if (ifmodes & 1) | ||
141 | NLA_PUT_FLAG(msg, i); | ||
142 | ifmodes >>= 1; | ||
143 | i++; | ||
144 | } | ||
145 | |||
146 | nla_nest_end(msg, nl_modes); | ||
147 | |||
121 | nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); | 148 | nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); |
122 | if (!nl_bands) | 149 | if (!nl_bands) |
123 | goto nla_put_failure; | 150 | goto nla_put_failure; |
@@ -272,7 +299,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
272 | 299 | ||
273 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); | 300 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); |
274 | NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); | 301 | NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); |
275 | /* TODO: interface type */ | 302 | NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); |
276 | return genlmsg_end(msg, hdr); | 303 | return genlmsg_end(msg, hdr); |
277 | 304 | ||
278 | nla_put_failure: | 305 | nla_put_failure: |
@@ -391,40 +418,56 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | |||
391 | int err, ifindex; | 418 | int err, ifindex; |
392 | enum nl80211_iftype type; | 419 | enum nl80211_iftype type; |
393 | struct net_device *dev; | 420 | struct net_device *dev; |
394 | u32 flags; | 421 | u32 _flags, *flags = NULL; |
395 | 422 | ||
396 | memset(¶ms, 0, sizeof(params)); | 423 | memset(¶ms, 0, sizeof(params)); |
397 | 424 | ||
398 | if (info->attrs[NL80211_ATTR_IFTYPE]) { | ||
399 | type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); | ||
400 | if (type > NL80211_IFTYPE_MAX) | ||
401 | return -EINVAL; | ||
402 | } else | ||
403 | return -EINVAL; | ||
404 | |||
405 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 425 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); |
406 | if (err) | 426 | if (err) |
407 | return err; | 427 | return err; |
408 | ifindex = dev->ifindex; | 428 | ifindex = dev->ifindex; |
429 | type = dev->ieee80211_ptr->iftype; | ||
409 | dev_put(dev); | 430 | dev_put(dev); |
410 | 431 | ||
411 | if (!drv->ops->change_virtual_intf) { | 432 | err = -EINVAL; |
433 | if (info->attrs[NL80211_ATTR_IFTYPE]) { | ||
434 | type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); | ||
435 | if (type > NL80211_IFTYPE_MAX) | ||
436 | goto unlock; | ||
437 | } | ||
438 | |||
439 | if (!drv->ops->change_virtual_intf || | ||
440 | !(drv->wiphy.interface_modes & (1 << type))) { | ||
412 | err = -EOPNOTSUPP; | 441 | err = -EOPNOTSUPP; |
413 | goto unlock; | 442 | goto unlock; |
414 | } | 443 | } |
415 | 444 | ||
416 | if (type == NL80211_IFTYPE_MESH_POINT && | 445 | if (info->attrs[NL80211_ATTR_MESH_ID]) { |
417 | info->attrs[NL80211_ATTR_MESH_ID]) { | 446 | if (type != NL80211_IFTYPE_MESH_POINT) { |
447 | err = -EINVAL; | ||
448 | goto unlock; | ||
449 | } | ||
418 | params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); | 450 | params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); |
419 | params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); | 451 | params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); |
420 | } | 452 | } |
421 | 453 | ||
454 | if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { | ||
455 | if (type != NL80211_IFTYPE_MONITOR) { | ||
456 | err = -EINVAL; | ||
457 | goto unlock; | ||
458 | } | ||
459 | err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS], | ||
460 | &_flags); | ||
461 | if (!err) | ||
462 | flags = &_flags; | ||
463 | } | ||
422 | rtnl_lock(); | 464 | rtnl_lock(); |
423 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? | ||
424 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, | ||
425 | &flags); | ||
426 | err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, | 465 | err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, |
427 | type, err ? NULL : &flags, ¶ms); | 466 | type, flags, ¶ms); |
467 | |||
468 | dev = __dev_get_by_index(&init_net, ifindex); | ||
469 | WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != type)); | ||
470 | |||
428 | rtnl_unlock(); | 471 | rtnl_unlock(); |
429 | 472 | ||
430 | unlock: | 473 | unlock: |
@@ -455,7 +498,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
455 | if (IS_ERR(drv)) | 498 | if (IS_ERR(drv)) |
456 | return PTR_ERR(drv); | 499 | return PTR_ERR(drv); |
457 | 500 | ||
458 | if (!drv->ops->add_virtual_intf) { | 501 | if (!drv->ops->add_virtual_intf || |
502 | !(drv->wiphy.interface_modes & (1 << type))) { | ||
459 | err = -EOPNOTSUPP; | 503 | err = -EOPNOTSUPP; |
460 | goto unlock; | 504 | goto unlock; |
461 | } | 505 | } |
@@ -1125,6 +1169,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) | |||
1125 | params.listen_interval = | 1169 | params.listen_interval = |
1126 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); | 1170 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); |
1127 | 1171 | ||
1172 | if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) | ||
1173 | params.ht_capa = | ||
1174 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); | ||
1175 | |||
1128 | if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], | 1176 | if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], |
1129 | ¶ms.station_flags)) | 1177 | ¶ms.station_flags)) |
1130 | return -EINVAL; | 1178 | return -EINVAL; |
@@ -1188,6 +1236,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
1188 | params.listen_interval = | 1236 | params.listen_interval = |
1189 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); | 1237 | nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); |
1190 | params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); | 1238 | params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); |
1239 | if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) | ||
1240 | params.ht_capa = | ||
1241 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); | ||
1191 | 1242 | ||
1192 | if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], | 1243 | if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], |
1193 | ¶ms.station_flags)) | 1244 | ¶ms.station_flags)) |
@@ -1525,6 +1576,183 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) | |||
1525 | return err; | 1576 | return err; |
1526 | } | 1577 | } |
1527 | 1578 | ||
1579 | static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) | ||
1580 | { | ||
1581 | struct cfg80211_registered_device *drv; | ||
1582 | int err; | ||
1583 | struct net_device *dev; | ||
1584 | struct bss_parameters params; | ||
1585 | |||
1586 | memset(¶ms, 0, sizeof(params)); | ||
1587 | /* default to not changing parameters */ | ||
1588 | params.use_cts_prot = -1; | ||
1589 | params.use_short_preamble = -1; | ||
1590 | params.use_short_slot_time = -1; | ||
1591 | |||
1592 | if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) | ||
1593 | params.use_cts_prot = | ||
1594 | nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]); | ||
1595 | if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]) | ||
1596 | params.use_short_preamble = | ||
1597 | nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]); | ||
1598 | if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]) | ||
1599 | params.use_short_slot_time = | ||
1600 | nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]); | ||
1601 | |||
1602 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | ||
1603 | if (err) | ||
1604 | return err; | ||
1605 | |||
1606 | if (!drv->ops->change_bss) { | ||
1607 | err = -EOPNOTSUPP; | ||
1608 | goto out; | ||
1609 | } | ||
1610 | |||
1611 | rtnl_lock(); | ||
1612 | err = drv->ops->change_bss(&drv->wiphy, dev, ¶ms); | ||
1613 | rtnl_unlock(); | ||
1614 | |||
1615 | out: | ||
1616 | cfg80211_put_dev(drv); | ||
1617 | dev_put(dev); | ||
1618 | return err; | ||
1619 | } | ||
1620 | |||
1621 | static const struct nla_policy | ||
1622 | reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { | ||
1623 | [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, | ||
1624 | [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, | ||
1625 | [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, | ||
1626 | [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 }, | ||
1627 | [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 }, | ||
1628 | [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 }, | ||
1629 | }; | ||
1630 | |||
1631 | static int parse_reg_rule(struct nlattr *tb[], | ||
1632 | struct ieee80211_reg_rule *reg_rule) | ||
1633 | { | ||
1634 | struct ieee80211_freq_range *freq_range = ®_rule->freq_range; | ||
1635 | struct ieee80211_power_rule *power_rule = ®_rule->power_rule; | ||
1636 | |||
1637 | if (!tb[NL80211_ATTR_REG_RULE_FLAGS]) | ||
1638 | return -EINVAL; | ||
1639 | if (!tb[NL80211_ATTR_FREQ_RANGE_START]) | ||
1640 | return -EINVAL; | ||
1641 | if (!tb[NL80211_ATTR_FREQ_RANGE_END]) | ||
1642 | return -EINVAL; | ||
1643 | if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]) | ||
1644 | return -EINVAL; | ||
1645 | if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]) | ||
1646 | return -EINVAL; | ||
1647 | |||
1648 | reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]); | ||
1649 | |||
1650 | freq_range->start_freq_khz = | ||
1651 | nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]); | ||
1652 | freq_range->end_freq_khz = | ||
1653 | nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]); | ||
1654 | freq_range->max_bandwidth_khz = | ||
1655 | nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]); | ||
1656 | |||
1657 | power_rule->max_eirp = | ||
1658 | nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]); | ||
1659 | |||
1660 | if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]) | ||
1661 | power_rule->max_antenna_gain = | ||
1662 | nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]); | ||
1663 | |||
1664 | return 0; | ||
1665 | } | ||
1666 | |||
1667 | static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info) | ||
1668 | { | ||
1669 | int r; | ||
1670 | char *data = NULL; | ||
1671 | |||
1672 | if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) | ||
1673 | return -EINVAL; | ||
1674 | |||
1675 | data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); | ||
1676 | |||
1677 | #ifdef CONFIG_WIRELESS_OLD_REGULATORY | ||
1678 | /* We ignore world regdom requests with the old regdom setup */ | ||
1679 | if (is_world_regdom(data)) | ||
1680 | return -EINVAL; | ||
1681 | #endif | ||
1682 | mutex_lock(&cfg80211_drv_mutex); | ||
1683 | r = __regulatory_hint(NULL, REGDOM_SET_BY_USER, data, NULL); | ||
1684 | mutex_unlock(&cfg80211_drv_mutex); | ||
1685 | return r; | ||
1686 | } | ||
1687 | |||
1688 | static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) | ||
1689 | { | ||
1690 | struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1]; | ||
1691 | struct nlattr *nl_reg_rule; | ||
1692 | char *alpha2 = NULL; | ||
1693 | int rem_reg_rules = 0, r = 0; | ||
1694 | u32 num_rules = 0, rule_idx = 0, size_of_regd; | ||
1695 | struct ieee80211_regdomain *rd = NULL; | ||
1696 | |||
1697 | if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) | ||
1698 | return -EINVAL; | ||
1699 | |||
1700 | if (!info->attrs[NL80211_ATTR_REG_RULES]) | ||
1701 | return -EINVAL; | ||
1702 | |||
1703 | alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); | ||
1704 | |||
1705 | nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], | ||
1706 | rem_reg_rules) { | ||
1707 | num_rules++; | ||
1708 | if (num_rules > NL80211_MAX_SUPP_REG_RULES) | ||
1709 | goto bad_reg; | ||
1710 | } | ||
1711 | |||
1712 | if (!reg_is_valid_request(alpha2)) | ||
1713 | return -EINVAL; | ||
1714 | |||
1715 | size_of_regd = sizeof(struct ieee80211_regdomain) + | ||
1716 | (num_rules * sizeof(struct ieee80211_reg_rule)); | ||
1717 | |||
1718 | rd = kzalloc(size_of_regd, GFP_KERNEL); | ||
1719 | if (!rd) | ||
1720 | return -ENOMEM; | ||
1721 | |||
1722 | rd->n_reg_rules = num_rules; | ||
1723 | rd->alpha2[0] = alpha2[0]; | ||
1724 | rd->alpha2[1] = alpha2[1]; | ||
1725 | |||
1726 | nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], | ||
1727 | rem_reg_rules) { | ||
1728 | nla_parse(tb, NL80211_REG_RULE_ATTR_MAX, | ||
1729 | nla_data(nl_reg_rule), nla_len(nl_reg_rule), | ||
1730 | reg_rule_policy); | ||
1731 | r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]); | ||
1732 | if (r) | ||
1733 | goto bad_reg; | ||
1734 | |||
1735 | rule_idx++; | ||
1736 | |||
1737 | if (rule_idx > NL80211_MAX_SUPP_REG_RULES) | ||
1738 | goto bad_reg; | ||
1739 | } | ||
1740 | |||
1741 | BUG_ON(rule_idx != num_rules); | ||
1742 | |||
1743 | mutex_lock(&cfg80211_drv_mutex); | ||
1744 | r = set_regdom(rd); | ||
1745 | mutex_unlock(&cfg80211_drv_mutex); | ||
1746 | if (r) | ||
1747 | goto bad_reg; | ||
1748 | |||
1749 | return r; | ||
1750 | |||
1751 | bad_reg: | ||
1752 | kfree(rd); | ||
1753 | return -EINVAL; | ||
1754 | } | ||
1755 | |||
1528 | static struct genl_ops nl80211_ops[] = { | 1756 | static struct genl_ops nl80211_ops[] = { |
1529 | { | 1757 | { |
1530 | .cmd = NL80211_CMD_GET_WIPHY, | 1758 | .cmd = NL80211_CMD_GET_WIPHY, |
@@ -1656,6 +1884,24 @@ static struct genl_ops nl80211_ops[] = { | |||
1656 | .policy = nl80211_policy, | 1884 | .policy = nl80211_policy, |
1657 | .flags = GENL_ADMIN_PERM, | 1885 | .flags = GENL_ADMIN_PERM, |
1658 | }, | 1886 | }, |
1887 | { | ||
1888 | .cmd = NL80211_CMD_SET_BSS, | ||
1889 | .doit = nl80211_set_bss, | ||
1890 | .policy = nl80211_policy, | ||
1891 | .flags = GENL_ADMIN_PERM, | ||
1892 | }, | ||
1893 | { | ||
1894 | .cmd = NL80211_CMD_SET_REG, | ||
1895 | .doit = nl80211_set_reg, | ||
1896 | .policy = nl80211_policy, | ||
1897 | .flags = GENL_ADMIN_PERM, | ||
1898 | }, | ||
1899 | { | ||
1900 | .cmd = NL80211_CMD_REQ_SET_REG, | ||
1901 | .doit = nl80211_req_set_reg, | ||
1902 | .policy = nl80211_policy, | ||
1903 | .flags = GENL_ADMIN_PERM, | ||
1904 | }, | ||
1659 | }; | 1905 | }; |
1660 | 1906 | ||
1661 | /* multicast groups */ | 1907 | /* multicast groups */ |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 855bff4b3250..626dbb688499 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -2,179 +2,871 @@ | |||
2 | * Copyright 2002-2005, Instant802 Networks, Inc. | 2 | * Copyright 2002-2005, Instant802 Networks, Inc. |
3 | * Copyright 2005-2006, Devicescape Software, Inc. | 3 | * Copyright 2005-2006, Devicescape Software, Inc. |
4 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> | 4 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> |
5 | * Copyright 2008 Luis R. Rodriguez <lrodriguz@atheros.com> | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
9 | */ | 10 | */ |
10 | 11 | ||
11 | /* | 12 | /** |
12 | * This regulatory domain control implementation is highly incomplete, it | 13 | * DOC: Wireless regulatory infrastructure |
13 | * only exists for the purpose of not regressing mac80211. | ||
14 | * | ||
15 | * For now, drivers can restrict the set of allowed channels by either | ||
16 | * not registering those channels or setting the IEEE80211_CHAN_DISABLED | ||
17 | * flag; that flag will only be *set* by this code, never *cleared. | ||
18 | * | 14 | * |
19 | * The usual implementation is for a driver to read a device EEPROM to | 15 | * The usual implementation is for a driver to read a device EEPROM to |
20 | * determine which regulatory domain it should be operating under, then | 16 | * determine which regulatory domain it should be operating under, then |
21 | * looking up the allowable channels in a driver-local table and finally | 17 | * looking up the allowable channels in a driver-local table and finally |
22 | * registering those channels in the wiphy structure. | 18 | * registering those channels in the wiphy structure. |
23 | * | 19 | * |
24 | * Alternatively, drivers that trust the regulatory domain control here | 20 | * Another set of compliance enforcement is for drivers to use their |
25 | * will register a complete set of capabilities and the control code | 21 | * own compliance limits which can be stored on the EEPROM. The host |
26 | * will restrict the set by setting the IEEE80211_CHAN_* flags. | 22 | * driver or firmware may ensure these are used. |
23 | * | ||
24 | * In addition to all this we provide an extra layer of regulatory | ||
25 | * conformance. For drivers which do not have any regulatory | ||
26 | * information CRDA provides the complete regulatory solution. | ||
27 | * For others it provides a community effort on further restrictions | ||
28 | * to enhance compliance. | ||
29 | * | ||
30 | * Note: When number of rules --> infinity we will not be able to | ||
31 | * index on alpha2 any more, instead we'll probably have to | ||
32 | * rely on some SHA1 checksum of the regdomain for example. | ||
33 | * | ||
27 | */ | 34 | */ |
28 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/list.h> | ||
37 | #include <linux/random.h> | ||
38 | #include <linux/nl80211.h> | ||
39 | #include <linux/platform_device.h> | ||
29 | #include <net/wireless.h> | 40 | #include <net/wireless.h> |
41 | #include <net/cfg80211.h> | ||
30 | #include "core.h" | 42 | #include "core.h" |
43 | #include "reg.h" | ||
31 | 44 | ||
32 | static char *ieee80211_regdom = "US"; | 45 | /* wiphy is set if this request's initiator is REGDOM_SET_BY_DRIVER */ |
33 | module_param(ieee80211_regdom, charp, 0444); | 46 | struct regulatory_request { |
34 | MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); | 47 | struct list_head list; |
35 | 48 | struct wiphy *wiphy; | |
36 | struct ieee80211_channel_range { | 49 | int granted; |
37 | short start_freq; | 50 | enum reg_set_by initiator; |
38 | short end_freq; | 51 | char alpha2[2]; |
39 | int max_power; | ||
40 | int max_antenna_gain; | ||
41 | u32 flags; | ||
42 | }; | 52 | }; |
43 | 53 | ||
44 | struct ieee80211_regdomain { | 54 | static LIST_HEAD(regulatory_requests); |
45 | const char *code; | 55 | DEFINE_MUTEX(cfg80211_reg_mutex); |
46 | const struct ieee80211_channel_range *ranges; | 56 | |
47 | int n_ranges; | 57 | /* To trigger userspace events */ |
58 | static struct platform_device *reg_pdev; | ||
59 | |||
60 | /* Keep the ordering from large to small */ | ||
61 | static u32 supported_bandwidths[] = { | ||
62 | MHZ_TO_KHZ(40), | ||
63 | MHZ_TO_KHZ(20), | ||
48 | }; | 64 | }; |
49 | 65 | ||
50 | #define RANGE_PWR(_start, _end, _pwr, _ag, _flags) \ | 66 | static struct list_head regulatory_requests; |
51 | { _start, _end, _pwr, _ag, _flags } | ||
52 | 67 | ||
68 | /* Central wireless core regulatory domains, we only need two, | ||
69 | * the current one and a world regulatory domain in case we have no | ||
70 | * information to give us an alpha2 */ | ||
71 | static const struct ieee80211_regdomain *cfg80211_regdomain; | ||
53 | 72 | ||
54 | /* | 73 | /* We keep a static world regulatory domain in case of the absence of CRDA */ |
55 | * Ideally, in the future, these definitions will be loaded from a | 74 | static const struct ieee80211_regdomain world_regdom = { |
56 | * userspace table via some daemon. | 75 | .n_reg_rules = 1, |
57 | */ | 76 | .alpha2 = "00", |
58 | static const struct ieee80211_channel_range ieee80211_US_channels[] = { | 77 | .reg_rules = { |
59 | /* IEEE 802.11b/g, channels 1..11 */ | 78 | REG_RULE(2412-10, 2462+10, 40, 6, 20, |
60 | RANGE_PWR(2412, 2462, 27, 6, 0), | 79 | NL80211_RRF_PASSIVE_SCAN | |
61 | /* IEEE 802.11a, channel 36*/ | 80 | NL80211_RRF_NO_IBSS), |
62 | RANGE_PWR(5180, 5180, 23, 6, 0), | 81 | } |
63 | /* IEEE 802.11a, channel 40*/ | ||
64 | RANGE_PWR(5200, 5200, 23, 6, 0), | ||
65 | /* IEEE 802.11a, channel 44*/ | ||
66 | RANGE_PWR(5220, 5220, 23, 6, 0), | ||
67 | /* IEEE 802.11a, channels 48..64 */ | ||
68 | RANGE_PWR(5240, 5320, 23, 6, 0), | ||
69 | /* IEEE 802.11a, channels 149..165, outdoor */ | ||
70 | RANGE_PWR(5745, 5825, 30, 6, 0), | ||
71 | }; | 82 | }; |
72 | 83 | ||
73 | static const struct ieee80211_channel_range ieee80211_JP_channels[] = { | 84 | static const struct ieee80211_regdomain *cfg80211_world_regdom = |
74 | /* IEEE 802.11b/g, channels 1..14 */ | 85 | &world_regdom; |
75 | RANGE_PWR(2412, 2484, 20, 6, 0), | 86 | |
76 | /* IEEE 802.11a, channels 34..48 */ | 87 | #ifdef CONFIG_WIRELESS_OLD_REGULATORY |
77 | RANGE_PWR(5170, 5240, 20, 6, IEEE80211_CHAN_PASSIVE_SCAN), | 88 | static char *ieee80211_regdom = "US"; |
78 | /* IEEE 802.11a, channels 52..64 */ | 89 | module_param(ieee80211_regdom, charp, 0444); |
79 | RANGE_PWR(5260, 5320, 20, 6, IEEE80211_CHAN_NO_IBSS | | 90 | MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); |
80 | IEEE80211_CHAN_RADAR), | 91 | |
81 | }; | 92 | /* We assume 40 MHz bandwidth for the old regulatory work. |
93 | * We make emphasis we are using the exact same frequencies | ||
94 | * as before */ | ||
82 | 95 | ||
83 | static const struct ieee80211_channel_range ieee80211_EU_channels[] = { | 96 | static const struct ieee80211_regdomain us_regdom = { |
84 | /* IEEE 802.11b/g, channels 1..13 */ | 97 | .n_reg_rules = 6, |
85 | RANGE_PWR(2412, 2472, 20, 6, 0), | 98 | .alpha2 = "US", |
86 | /* IEEE 802.11a, channel 36*/ | 99 | .reg_rules = { |
87 | RANGE_PWR(5180, 5180, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN), | 100 | /* IEEE 802.11b/g, channels 1..11 */ |
88 | /* IEEE 802.11a, channel 40*/ | 101 | REG_RULE(2412-10, 2462+10, 40, 6, 27, 0), |
89 | RANGE_PWR(5200, 5200, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN), | 102 | /* IEEE 802.11a, channel 36 */ |
90 | /* IEEE 802.11a, channel 44*/ | 103 | REG_RULE(5180-10, 5180+10, 40, 6, 23, 0), |
91 | RANGE_PWR(5220, 5220, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN), | 104 | /* IEEE 802.11a, channel 40 */ |
92 | /* IEEE 802.11a, channels 48..64 */ | 105 | REG_RULE(5200-10, 5200+10, 40, 6, 23, 0), |
93 | RANGE_PWR(5240, 5320, 23, 6, IEEE80211_CHAN_NO_IBSS | | 106 | /* IEEE 802.11a, channel 44 */ |
94 | IEEE80211_CHAN_RADAR), | 107 | REG_RULE(5220-10, 5220+10, 40, 6, 23, 0), |
95 | /* IEEE 802.11a, channels 100..140 */ | 108 | /* IEEE 802.11a, channels 48..64 */ |
96 | RANGE_PWR(5500, 5700, 30, 6, IEEE80211_CHAN_NO_IBSS | | 109 | REG_RULE(5240-10, 5320+10, 40, 6, 23, 0), |
97 | IEEE80211_CHAN_RADAR), | 110 | /* IEEE 802.11a, channels 149..165, outdoor */ |
111 | REG_RULE(5745-10, 5825+10, 40, 6, 30, 0), | ||
112 | } | ||
98 | }; | 113 | }; |
99 | 114 | ||
100 | #define REGDOM(_code) \ | 115 | static const struct ieee80211_regdomain jp_regdom = { |
101 | { \ | 116 | .n_reg_rules = 3, |
102 | .code = __stringify(_code), \ | 117 | .alpha2 = "JP", |
103 | .ranges = ieee80211_ ##_code## _channels, \ | 118 | .reg_rules = { |
104 | .n_ranges = ARRAY_SIZE(ieee80211_ ##_code## _channels), \ | 119 | /* IEEE 802.11b/g, channels 1..14 */ |
120 | REG_RULE(2412-10, 2484+10, 40, 6, 20, 0), | ||
121 | /* IEEE 802.11a, channels 34..48 */ | ||
122 | REG_RULE(5170-10, 5240+10, 40, 6, 20, | ||
123 | NL80211_RRF_PASSIVE_SCAN), | ||
124 | /* IEEE 802.11a, channels 52..64 */ | ||
125 | REG_RULE(5260-10, 5320+10, 40, 6, 20, | ||
126 | NL80211_RRF_NO_IBSS | | ||
127 | NL80211_RRF_DFS), | ||
105 | } | 128 | } |
129 | }; | ||
106 | 130 | ||
107 | static const struct ieee80211_regdomain ieee80211_regdoms[] = { | 131 | static const struct ieee80211_regdomain eu_regdom = { |
108 | REGDOM(US), | 132 | .n_reg_rules = 6, |
109 | REGDOM(JP), | 133 | /* This alpha2 is bogus, we leave it here just for stupid |
110 | REGDOM(EU), | 134 | * backward compatibility */ |
135 | .alpha2 = "EU", | ||
136 | .reg_rules = { | ||
137 | /* IEEE 802.11b/g, channels 1..13 */ | ||
138 | REG_RULE(2412-10, 2472+10, 40, 6, 20, 0), | ||
139 | /* IEEE 802.11a, channel 36 */ | ||
140 | REG_RULE(5180-10, 5180+10, 40, 6, 23, | ||
141 | NL80211_RRF_PASSIVE_SCAN), | ||
142 | /* IEEE 802.11a, channel 40 */ | ||
143 | REG_RULE(5200-10, 5200+10, 40, 6, 23, | ||
144 | NL80211_RRF_PASSIVE_SCAN), | ||
145 | /* IEEE 802.11a, channel 44 */ | ||
146 | REG_RULE(5220-10, 5220+10, 40, 6, 23, | ||
147 | NL80211_RRF_PASSIVE_SCAN), | ||
148 | /* IEEE 802.11a, channels 48..64 */ | ||
149 | REG_RULE(5240-10, 5320+10, 40, 6, 20, | ||
150 | NL80211_RRF_NO_IBSS | | ||
151 | NL80211_RRF_DFS), | ||
152 | /* IEEE 802.11a, channels 100..140 */ | ||
153 | REG_RULE(5500-10, 5700+10, 40, 6, 30, | ||
154 | NL80211_RRF_NO_IBSS | | ||
155 | NL80211_RRF_DFS), | ||
156 | } | ||
111 | }; | 157 | }; |
112 | 158 | ||
159 | static const struct ieee80211_regdomain *static_regdom(char *alpha2) | ||
160 | { | ||
161 | if (alpha2[0] == 'U' && alpha2[1] == 'S') | ||
162 | return &us_regdom; | ||
163 | if (alpha2[0] == 'J' && alpha2[1] == 'P') | ||
164 | return &jp_regdom; | ||
165 | if (alpha2[0] == 'E' && alpha2[1] == 'U') | ||
166 | return &eu_regdom; | ||
167 | /* Default, as per the old rules */ | ||
168 | return &us_regdom; | ||
169 | } | ||
170 | |||
171 | static bool is_old_static_regdom(const struct ieee80211_regdomain *rd) | ||
172 | { | ||
173 | if (rd == &us_regdom || rd == &jp_regdom || rd == &eu_regdom) | ||
174 | return true; | ||
175 | return false; | ||
176 | } | ||
177 | #else | ||
178 | static inline bool is_old_static_regdom(const struct ieee80211_regdomain *rd) | ||
179 | { | ||
180 | return false; | ||
181 | } | ||
182 | #endif | ||
113 | 183 | ||
114 | static const struct ieee80211_regdomain *get_regdom(void) | 184 | static void reset_regdomains(void) |
115 | { | 185 | { |
116 | static const struct ieee80211_channel_range | 186 | /* avoid freeing static information or freeing something twice */ |
117 | ieee80211_world_channels[] = { | 187 | if (cfg80211_regdomain == cfg80211_world_regdom) |
118 | /* IEEE 802.11b/g, channels 1..11 */ | 188 | cfg80211_regdomain = NULL; |
119 | RANGE_PWR(2412, 2462, 27, 6, 0), | 189 | if (cfg80211_world_regdom == &world_regdom) |
190 | cfg80211_world_regdom = NULL; | ||
191 | if (cfg80211_regdomain == &world_regdom) | ||
192 | cfg80211_regdomain = NULL; | ||
193 | if (is_old_static_regdom(cfg80211_regdomain)) | ||
194 | cfg80211_regdomain = NULL; | ||
195 | |||
196 | kfree(cfg80211_regdomain); | ||
197 | kfree(cfg80211_world_regdom); | ||
198 | |||
199 | cfg80211_world_regdom = &world_regdom; | ||
200 | cfg80211_regdomain = NULL; | ||
201 | } | ||
202 | |||
203 | /* Dynamic world regulatory domain requested by the wireless | ||
204 | * core upon initialization */ | ||
205 | static void update_world_regdomain(const struct ieee80211_regdomain *rd) | ||
206 | { | ||
207 | BUG_ON(list_empty(®ulatory_requests)); | ||
208 | |||
209 | reset_regdomains(); | ||
210 | |||
211 | cfg80211_world_regdom = rd; | ||
212 | cfg80211_regdomain = rd; | ||
213 | } | ||
214 | |||
215 | bool is_world_regdom(const char *alpha2) | ||
216 | { | ||
217 | if (!alpha2) | ||
218 | return false; | ||
219 | if (alpha2[0] == '0' && alpha2[1] == '0') | ||
220 | return true; | ||
221 | return false; | ||
222 | } | ||
223 | |||
224 | static bool is_alpha2_set(const char *alpha2) | ||
225 | { | ||
226 | if (!alpha2) | ||
227 | return false; | ||
228 | if (alpha2[0] != 0 && alpha2[1] != 0) | ||
229 | return true; | ||
230 | return false; | ||
231 | } | ||
232 | |||
233 | static bool is_alpha_upper(char letter) | ||
234 | { | ||
235 | /* ASCII A - Z */ | ||
236 | if (letter >= 65 && letter <= 90) | ||
237 | return true; | ||
238 | return false; | ||
239 | } | ||
240 | |||
241 | static bool is_unknown_alpha2(const char *alpha2) | ||
242 | { | ||
243 | if (!alpha2) | ||
244 | return false; | ||
245 | /* Special case where regulatory domain was built by driver | ||
246 | * but a specific alpha2 cannot be determined */ | ||
247 | if (alpha2[0] == '9' && alpha2[1] == '9') | ||
248 | return true; | ||
249 | return false; | ||
250 | } | ||
251 | |||
252 | static bool is_an_alpha2(const char *alpha2) | ||
253 | { | ||
254 | if (!alpha2) | ||
255 | return false; | ||
256 | if (is_alpha_upper(alpha2[0]) && is_alpha_upper(alpha2[1])) | ||
257 | return true; | ||
258 | return false; | ||
259 | } | ||
260 | |||
261 | static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y) | ||
262 | { | ||
263 | if (!alpha2_x || !alpha2_y) | ||
264 | return false; | ||
265 | if (alpha2_x[0] == alpha2_y[0] && | ||
266 | alpha2_x[1] == alpha2_y[1]) | ||
267 | return true; | ||
268 | return false; | ||
269 | } | ||
270 | |||
271 | static bool regdom_changed(const char *alpha2) | ||
272 | { | ||
273 | if (!cfg80211_regdomain) | ||
274 | return true; | ||
275 | if (alpha2_equal(cfg80211_regdomain->alpha2, alpha2)) | ||
276 | return false; | ||
277 | return true; | ||
278 | } | ||
279 | |||
280 | /* This lets us keep regulatory code which is updated on a regulatory | ||
281 | * basis in userspace. */ | ||
282 | static int call_crda(const char *alpha2) | ||
283 | { | ||
284 | char country_env[9 + 2] = "COUNTRY="; | ||
285 | char *envp[] = { | ||
286 | country_env, | ||
287 | NULL | ||
120 | }; | 288 | }; |
121 | static const struct ieee80211_regdomain regdom_world = REGDOM(world); | ||
122 | int i; | ||
123 | 289 | ||
124 | for (i = 0; i < ARRAY_SIZE(ieee80211_regdoms); i++) | 290 | if (!is_world_regdom((char *) alpha2)) |
125 | if (strcmp(ieee80211_regdom, ieee80211_regdoms[i].code) == 0) | 291 | printk(KERN_INFO "cfg80211: Calling CRDA for country: %c%c\n", |
126 | return &ieee80211_regdoms[i]; | 292 | alpha2[0], alpha2[1]); |
293 | else | ||
294 | printk(KERN_INFO "cfg80211: Calling CRDA to update world " | ||
295 | "regulatory domain\n"); | ||
296 | |||
297 | country_env[8] = alpha2[0]; | ||
298 | country_env[9] = alpha2[1]; | ||
127 | 299 | ||
128 | return ®dom_world; | 300 | return kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, envp); |
129 | } | 301 | } |
130 | 302 | ||
303 | /* This has the logic which determines when a new request | ||
304 | * should be ignored. */ | ||
305 | static int ignore_request(struct wiphy *wiphy, enum reg_set_by set_by, | ||
306 | char *alpha2, struct ieee80211_regdomain *rd) | ||
307 | { | ||
308 | struct regulatory_request *last_request = NULL; | ||
309 | |||
310 | /* All initial requests are respected */ | ||
311 | if (list_empty(®ulatory_requests)) | ||
312 | return 0; | ||
313 | |||
314 | last_request = list_first_entry(®ulatory_requests, | ||
315 | struct regulatory_request, list); | ||
131 | 316 | ||
132 | static void handle_channel(struct ieee80211_channel *chan, | 317 | switch (set_by) { |
133 | const struct ieee80211_regdomain *rd) | 318 | case REGDOM_SET_BY_INIT: |
319 | return -EINVAL; | ||
320 | case REGDOM_SET_BY_CORE: | ||
321 | /* Always respect new wireless core hints, should only | ||
322 | * come in for updating the world regulatory domain at init | ||
323 | * anyway */ | ||
324 | return 0; | ||
325 | case REGDOM_SET_BY_COUNTRY_IE: | ||
326 | if (last_request->initiator == set_by) { | ||
327 | if (last_request->wiphy != wiphy) { | ||
328 | /* Two cards with two APs claiming different | ||
329 | * different Country IE alpha2s! | ||
330 | * You're special!! */ | ||
331 | if (!alpha2_equal(last_request->alpha2, | ||
332 | cfg80211_regdomain->alpha2)) { | ||
333 | /* XXX: Deal with conflict, consider | ||
334 | * building a new one out of the | ||
335 | * intersection */ | ||
336 | WARN_ON(1); | ||
337 | return -EOPNOTSUPP; | ||
338 | } | ||
339 | return -EALREADY; | ||
340 | } | ||
341 | /* Two consecutive Country IE hints on the same wiphy */ | ||
342 | if (!alpha2_equal(cfg80211_regdomain->alpha2, alpha2)) | ||
343 | return 0; | ||
344 | return -EALREADY; | ||
345 | } | ||
346 | if (WARN_ON(!is_alpha2_set(alpha2) || !is_an_alpha2(alpha2)), | ||
347 | "Invalid Country IE regulatory hint passed " | ||
348 | "to the wireless core\n") | ||
349 | return -EINVAL; | ||
350 | /* We ignore Country IE hints for now, as we haven't yet | ||
351 | * added the dot11MultiDomainCapabilityEnabled flag | ||
352 | * for wiphys */ | ||
353 | return 1; | ||
354 | case REGDOM_SET_BY_DRIVER: | ||
355 | BUG_ON(!wiphy); | ||
356 | if (last_request->initiator == set_by) { | ||
357 | /* Two separate drivers hinting different things, | ||
358 | * this is possible if you have two devices present | ||
359 | * on a system with different EEPROM regulatory | ||
360 | * readings. XXX: Do intersection, we support only | ||
361 | * the first regulatory hint for now */ | ||
362 | if (last_request->wiphy != wiphy) | ||
363 | return -EALREADY; | ||
364 | if (rd) | ||
365 | return -EALREADY; | ||
366 | /* Driver should not be trying to hint different | ||
367 | * regulatory domains! */ | ||
368 | BUG_ON(!alpha2_equal(alpha2, | ||
369 | cfg80211_regdomain->alpha2)); | ||
370 | return -EALREADY; | ||
371 | } | ||
372 | if (last_request->initiator == REGDOM_SET_BY_CORE) | ||
373 | return 0; | ||
374 | /* XXX: Handle intersection, and add the | ||
375 | * dot11MultiDomainCapabilityEnabled flag to wiphy. For now | ||
376 | * we assume the driver has this set to false, following the | ||
377 | * 802.11d dot11MultiDomainCapabilityEnabled documentation */ | ||
378 | if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) | ||
379 | return 0; | ||
380 | return 0; | ||
381 | case REGDOM_SET_BY_USER: | ||
382 | if (last_request->initiator == set_by || | ||
383 | last_request->initiator == REGDOM_SET_BY_CORE) | ||
384 | return 0; | ||
385 | /* Drivers can use their wiphy's reg_notifier() | ||
386 | * to override any information */ | ||
387 | if (last_request->initiator == REGDOM_SET_BY_DRIVER) | ||
388 | return 0; | ||
389 | /* XXX: Handle intersection */ | ||
390 | if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) | ||
391 | return -EOPNOTSUPP; | ||
392 | return 0; | ||
393 | default: | ||
394 | return -EINVAL; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | static bool __reg_is_valid_request(const char *alpha2, | ||
399 | struct regulatory_request **request) | ||
400 | { | ||
401 | struct regulatory_request *req; | ||
402 | if (list_empty(®ulatory_requests)) | ||
403 | return false; | ||
404 | list_for_each_entry(req, ®ulatory_requests, list) { | ||
405 | if (alpha2_equal(req->alpha2, alpha2)) { | ||
406 | *request = req; | ||
407 | return true; | ||
408 | } | ||
409 | } | ||
410 | return false; | ||
411 | } | ||
412 | |||
413 | /* Used by nl80211 before kmalloc'ing our regulatory domain */ | ||
414 | bool reg_is_valid_request(const char *alpha2) | ||
415 | { | ||
416 | struct regulatory_request *request = NULL; | ||
417 | return __reg_is_valid_request(alpha2, &request); | ||
418 | } | ||
419 | |||
420 | /* Sanity check on a regulatory rule */ | ||
421 | static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule) | ||
422 | { | ||
423 | const struct ieee80211_freq_range *freq_range = &rule->freq_range; | ||
424 | u32 freq_diff; | ||
425 | |||
426 | if (freq_range->start_freq_khz == 0 || freq_range->end_freq_khz == 0) | ||
427 | return false; | ||
428 | |||
429 | if (freq_range->start_freq_khz > freq_range->end_freq_khz) | ||
430 | return false; | ||
431 | |||
432 | freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; | ||
433 | |||
434 | if (freq_range->max_bandwidth_khz > freq_diff) | ||
435 | return false; | ||
436 | |||
437 | return true; | ||
438 | } | ||
439 | |||
440 | static bool is_valid_rd(const struct ieee80211_regdomain *rd) | ||
441 | { | ||
442 | const struct ieee80211_reg_rule *reg_rule = NULL; | ||
443 | unsigned int i; | ||
444 | |||
445 | if (!rd->n_reg_rules) | ||
446 | return false; | ||
447 | |||
448 | for (i = 0; i < rd->n_reg_rules; i++) { | ||
449 | reg_rule = &rd->reg_rules[i]; | ||
450 | if (!is_valid_reg_rule(reg_rule)) | ||
451 | return false; | ||
452 | } | ||
453 | |||
454 | return true; | ||
455 | } | ||
456 | |||
457 | /* Returns value in KHz */ | ||
458 | static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range, | ||
459 | u32 freq) | ||
460 | { | ||
461 | unsigned int i; | ||
462 | for (i = 0; i < ARRAY_SIZE(supported_bandwidths); i++) { | ||
463 | u32 start_freq_khz = freq - supported_bandwidths[i]/2; | ||
464 | u32 end_freq_khz = freq + supported_bandwidths[i]/2; | ||
465 | if (start_freq_khz >= freq_range->start_freq_khz && | ||
466 | end_freq_khz <= freq_range->end_freq_khz) | ||
467 | return supported_bandwidths[i]; | ||
468 | } | ||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | /* XXX: add support for the rest of enum nl80211_reg_rule_flags, we may | ||
473 | * want to just have the channel structure use these */ | ||
474 | static u32 map_regdom_flags(u32 rd_flags) | ||
475 | { | ||
476 | u32 channel_flags = 0; | ||
477 | if (rd_flags & NL80211_RRF_PASSIVE_SCAN) | ||
478 | channel_flags |= IEEE80211_CHAN_PASSIVE_SCAN; | ||
479 | if (rd_flags & NL80211_RRF_NO_IBSS) | ||
480 | channel_flags |= IEEE80211_CHAN_NO_IBSS; | ||
481 | if (rd_flags & NL80211_RRF_DFS) | ||
482 | channel_flags |= IEEE80211_CHAN_RADAR; | ||
483 | return channel_flags; | ||
484 | } | ||
485 | |||
486 | /** | ||
487 | * freq_reg_info - get regulatory information for the given frequency | ||
488 | * @center_freq: Frequency in KHz for which we want regulatory information for | ||
489 | * @bandwidth: the bandwidth requirement you have in KHz, if you do not have one | ||
490 | * you can set this to 0. If this frequency is allowed we then set | ||
491 | * this value to the maximum allowed bandwidth. | ||
492 | * @reg_rule: the regulatory rule which we have for this frequency | ||
493 | * | ||
494 | * Use this function to get the regulatory rule for a specific frequency. | ||
495 | */ | ||
496 | static int freq_reg_info(u32 center_freq, u32 *bandwidth, | ||
497 | const struct ieee80211_reg_rule **reg_rule) | ||
134 | { | 498 | { |
135 | int i; | 499 | int i; |
136 | u32 flags = chan->orig_flags; | 500 | u32 max_bandwidth = 0; |
137 | const struct ieee80211_channel_range *rg = NULL; | ||
138 | 501 | ||
139 | for (i = 0; i < rd->n_ranges; i++) { | 502 | if (!cfg80211_regdomain) |
140 | if (rd->ranges[i].start_freq <= chan->center_freq && | 503 | return -EINVAL; |
141 | chan->center_freq <= rd->ranges[i].end_freq) { | 504 | |
142 | rg = &rd->ranges[i]; | 505 | for (i = 0; i < cfg80211_regdomain->n_reg_rules; i++) { |
506 | const struct ieee80211_reg_rule *rr; | ||
507 | const struct ieee80211_freq_range *fr = NULL; | ||
508 | const struct ieee80211_power_rule *pr = NULL; | ||
509 | |||
510 | rr = &cfg80211_regdomain->reg_rules[i]; | ||
511 | fr = &rr->freq_range; | ||
512 | pr = &rr->power_rule; | ||
513 | max_bandwidth = freq_max_bandwidth(fr, center_freq); | ||
514 | if (max_bandwidth && *bandwidth <= max_bandwidth) { | ||
515 | *reg_rule = rr; | ||
516 | *bandwidth = max_bandwidth; | ||
143 | break; | 517 | break; |
144 | } | 518 | } |
145 | } | 519 | } |
146 | 520 | ||
147 | if (!rg) { | 521 | return !max_bandwidth; |
148 | /* not found */ | 522 | } |
523 | |||
524 | static void handle_channel(struct ieee80211_channel *chan) | ||
525 | { | ||
526 | int r; | ||
527 | u32 flags = chan->orig_flags; | ||
528 | u32 max_bandwidth = 0; | ||
529 | const struct ieee80211_reg_rule *reg_rule = NULL; | ||
530 | const struct ieee80211_power_rule *power_rule = NULL; | ||
531 | |||
532 | r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq), | ||
533 | &max_bandwidth, ®_rule); | ||
534 | |||
535 | if (r) { | ||
149 | flags |= IEEE80211_CHAN_DISABLED; | 536 | flags |= IEEE80211_CHAN_DISABLED; |
150 | chan->flags = flags; | 537 | chan->flags = flags; |
151 | return; | 538 | return; |
152 | } | 539 | } |
153 | 540 | ||
154 | chan->flags = flags; | 541 | power_rule = ®_rule->power_rule; |
542 | |||
543 | chan->flags = flags | map_regdom_flags(reg_rule->flags); | ||
155 | chan->max_antenna_gain = min(chan->orig_mag, | 544 | chan->max_antenna_gain = min(chan->orig_mag, |
156 | rg->max_antenna_gain); | 545 | (int) MBI_TO_DBI(power_rule->max_antenna_gain)); |
546 | chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth); | ||
157 | if (chan->orig_mpwr) | 547 | if (chan->orig_mpwr) |
158 | chan->max_power = min(chan->orig_mpwr, rg->max_power); | 548 | chan->max_power = min(chan->orig_mpwr, |
549 | (int) MBM_TO_DBM(power_rule->max_eirp)); | ||
159 | else | 550 | else |
160 | chan->max_power = rg->max_power; | 551 | chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); |
161 | } | 552 | } |
162 | 553 | ||
163 | static void handle_band(struct ieee80211_supported_band *sband, | 554 | static void handle_band(struct ieee80211_supported_band *sband) |
164 | const struct ieee80211_regdomain *rd) | ||
165 | { | 555 | { |
166 | int i; | 556 | int i; |
167 | 557 | ||
168 | for (i = 0; i < sband->n_channels; i++) | 558 | for (i = 0; i < sband->n_channels; i++) |
169 | handle_channel(&sband->channels[i], rd); | 559 | handle_channel(&sband->channels[i]); |
170 | } | 560 | } |
171 | 561 | ||
172 | void wiphy_update_regulatory(struct wiphy *wiphy) | 562 | static void update_all_wiphy_regulatory(enum reg_set_by setby) |
173 | { | 563 | { |
174 | enum ieee80211_band band; | 564 | struct cfg80211_registered_device *drv; |
175 | const struct ieee80211_regdomain *rd = get_regdom(); | 565 | |
566 | list_for_each_entry(drv, &cfg80211_drv_list, list) | ||
567 | wiphy_update_regulatory(&drv->wiphy, setby); | ||
568 | } | ||
176 | 569 | ||
177 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) | 570 | void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby) |
571 | { | ||
572 | enum ieee80211_band band; | ||
573 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | ||
178 | if (wiphy->bands[band]) | 574 | if (wiphy->bands[band]) |
179 | handle_band(wiphy->bands[band], rd); | 575 | handle_band(wiphy->bands[band]); |
576 | if (wiphy->reg_notifier) | ||
577 | wiphy->reg_notifier(wiphy, setby); | ||
578 | } | ||
579 | } | ||
580 | |||
581 | /* Caller must hold &cfg80211_drv_mutex */ | ||
582 | int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by, | ||
583 | const char *alpha2, struct ieee80211_regdomain *rd) | ||
584 | { | ||
585 | struct regulatory_request *request; | ||
586 | char *rd_alpha2; | ||
587 | int r = 0; | ||
588 | |||
589 | r = ignore_request(wiphy, set_by, (char *) alpha2, rd); | ||
590 | if (r) | ||
591 | return r; | ||
592 | |||
593 | if (rd) | ||
594 | rd_alpha2 = rd->alpha2; | ||
595 | else | ||
596 | rd_alpha2 = (char *) alpha2; | ||
597 | |||
598 | switch (set_by) { | ||
599 | case REGDOM_SET_BY_CORE: | ||
600 | case REGDOM_SET_BY_COUNTRY_IE: | ||
601 | case REGDOM_SET_BY_DRIVER: | ||
602 | case REGDOM_SET_BY_USER: | ||
603 | request = kzalloc(sizeof(struct regulatory_request), | ||
604 | GFP_KERNEL); | ||
605 | if (!request) | ||
606 | return -ENOMEM; | ||
607 | |||
608 | request->alpha2[0] = rd_alpha2[0]; | ||
609 | request->alpha2[1] = rd_alpha2[1]; | ||
610 | request->initiator = set_by; | ||
611 | request->wiphy = wiphy; | ||
612 | |||
613 | list_add_tail(&request->list, ®ulatory_requests); | ||
614 | if (rd) | ||
615 | break; | ||
616 | r = call_crda(alpha2); | ||
617 | #ifndef CONFIG_WIRELESS_OLD_REGULATORY | ||
618 | if (r) | ||
619 | printk(KERN_ERR "cfg80211: Failed calling CRDA\n"); | ||
620 | #endif | ||
621 | break; | ||
622 | default: | ||
623 | r = -ENOTSUPP; | ||
624 | break; | ||
625 | } | ||
626 | |||
627 | return r; | ||
628 | } | ||
629 | |||
630 | /* If rd is not NULL and if this call fails the caller must free it */ | ||
631 | int regulatory_hint(struct wiphy *wiphy, const char *alpha2, | ||
632 | struct ieee80211_regdomain *rd) | ||
633 | { | ||
634 | int r; | ||
635 | BUG_ON(!rd && !alpha2); | ||
636 | |||
637 | mutex_lock(&cfg80211_drv_mutex); | ||
638 | |||
639 | r = __regulatory_hint(wiphy, REGDOM_SET_BY_DRIVER, alpha2, rd); | ||
640 | if (r || !rd) | ||
641 | goto unlock_and_exit; | ||
642 | |||
643 | /* If the driver passed a regulatory domain we skipped asking | ||
644 | * userspace for one so we can now go ahead and set it */ | ||
645 | r = set_regdom(rd); | ||
646 | |||
647 | unlock_and_exit: | ||
648 | mutex_unlock(&cfg80211_drv_mutex); | ||
649 | return r; | ||
650 | } | ||
651 | EXPORT_SYMBOL(regulatory_hint); | ||
652 | |||
653 | |||
654 | static void print_rd_rules(const struct ieee80211_regdomain *rd) | ||
655 | { | ||
656 | unsigned int i; | ||
657 | const struct ieee80211_reg_rule *reg_rule = NULL; | ||
658 | const struct ieee80211_freq_range *freq_range = NULL; | ||
659 | const struct ieee80211_power_rule *power_rule = NULL; | ||
660 | |||
661 | printk(KERN_INFO "\t(start_freq - end_freq @ bandwidth), " | ||
662 | "(max_antenna_gain, max_eirp)\n"); | ||
663 | |||
664 | for (i = 0; i < rd->n_reg_rules; i++) { | ||
665 | reg_rule = &rd->reg_rules[i]; | ||
666 | freq_range = ®_rule->freq_range; | ||
667 | power_rule = ®_rule->power_rule; | ||
668 | |||
669 | /* There may not be documentation for max antenna gain | ||
670 | * in certain regions */ | ||
671 | if (power_rule->max_antenna_gain) | ||
672 | printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), " | ||
673 | "(%d mBi, %d mBm)\n", | ||
674 | freq_range->start_freq_khz, | ||
675 | freq_range->end_freq_khz, | ||
676 | freq_range->max_bandwidth_khz, | ||
677 | power_rule->max_antenna_gain, | ||
678 | power_rule->max_eirp); | ||
679 | else | ||
680 | printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), " | ||
681 | "(N/A, %d mBm)\n", | ||
682 | freq_range->start_freq_khz, | ||
683 | freq_range->end_freq_khz, | ||
684 | freq_range->max_bandwidth_khz, | ||
685 | power_rule->max_eirp); | ||
686 | } | ||
687 | } | ||
688 | |||
689 | static void print_regdomain(const struct ieee80211_regdomain *rd) | ||
690 | { | ||
691 | |||
692 | if (is_world_regdom(rd->alpha2)) | ||
693 | printk(KERN_INFO "cfg80211: World regulatory " | ||
694 | "domain updated:\n"); | ||
695 | else { | ||
696 | if (is_unknown_alpha2(rd->alpha2)) | ||
697 | printk(KERN_INFO "cfg80211: Regulatory domain " | ||
698 | "changed to driver built-in settings " | ||
699 | "(unknown country)\n"); | ||
700 | else | ||
701 | printk(KERN_INFO "cfg80211: Regulatory domain " | ||
702 | "changed to country: %c%c\n", | ||
703 | rd->alpha2[0], rd->alpha2[1]); | ||
704 | } | ||
705 | print_rd_rules(rd); | ||
706 | } | ||
707 | |||
708 | void print_regdomain_info(const struct ieee80211_regdomain *rd) | ||
709 | { | ||
710 | printk(KERN_INFO "cfg80211: Regulatory domain: %c%c\n", | ||
711 | rd->alpha2[0], rd->alpha2[1]); | ||
712 | print_rd_rules(rd); | ||
713 | } | ||
714 | |||
715 | static int __set_regdom(const struct ieee80211_regdomain *rd) | ||
716 | { | ||
717 | struct regulatory_request *request = NULL; | ||
718 | |||
719 | /* Some basic sanity checks first */ | ||
720 | |||
721 | if (is_world_regdom(rd->alpha2)) { | ||
722 | if (WARN_ON(!__reg_is_valid_request(rd->alpha2, &request))) | ||
723 | return -EINVAL; | ||
724 | update_world_regdomain(rd); | ||
725 | return 0; | ||
726 | } | ||
727 | |||
728 | if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) && | ||
729 | !is_unknown_alpha2(rd->alpha2)) | ||
730 | return -EINVAL; | ||
731 | |||
732 | if (list_empty(®ulatory_requests)) | ||
733 | return -EINVAL; | ||
734 | |||
735 | /* allow overriding the static definitions if CRDA is present */ | ||
736 | if (!is_old_static_regdom(cfg80211_regdomain) && | ||
737 | !regdom_changed(rd->alpha2)) | ||
738 | return -EINVAL; | ||
739 | |||
740 | /* Now lets set the regulatory domain, update all driver channels | ||
741 | * and finally inform them of what we have done, in case they want | ||
742 | * to review or adjust their own settings based on their own | ||
743 | * internal EEPROM data */ | ||
744 | |||
745 | if (WARN_ON(!__reg_is_valid_request(rd->alpha2, &request))) | ||
746 | return -EINVAL; | ||
747 | |||
748 | reset_regdomains(); | ||
749 | |||
750 | /* Country IE parsing coming soon */ | ||
751 | switch (request->initiator) { | ||
752 | case REGDOM_SET_BY_CORE: | ||
753 | case REGDOM_SET_BY_DRIVER: | ||
754 | case REGDOM_SET_BY_USER: | ||
755 | if (!is_valid_rd(rd)) { | ||
756 | printk(KERN_ERR "cfg80211: Invalid " | ||
757 | "regulatory domain detected:\n"); | ||
758 | print_regdomain_info(rd); | ||
759 | return -EINVAL; | ||
760 | } | ||
761 | break; | ||
762 | case REGDOM_SET_BY_COUNTRY_IE: /* Not yet */ | ||
763 | WARN_ON(1); | ||
764 | default: | ||
765 | return -EOPNOTSUPP; | ||
766 | } | ||
767 | |||
768 | /* Tada! */ | ||
769 | cfg80211_regdomain = rd; | ||
770 | request->granted = 1; | ||
771 | |||
772 | return 0; | ||
773 | } | ||
774 | |||
775 | |||
776 | /* Use this call to set the current regulatory domain. Conflicts with | ||
777 | * multiple drivers can be ironed out later. Caller must've already | ||
778 | * kmalloc'd the rd structure. If this calls fails you should kfree() | ||
779 | * the passed rd. Caller must hold cfg80211_drv_mutex */ | ||
780 | int set_regdom(const struct ieee80211_regdomain *rd) | ||
781 | { | ||
782 | struct regulatory_request *this_request = NULL, *prev_request = NULL; | ||
783 | int r; | ||
784 | |||
785 | if (!list_empty(®ulatory_requests)) | ||
786 | prev_request = list_first_entry(®ulatory_requests, | ||
787 | struct regulatory_request, list); | ||
788 | |||
789 | /* Note that this doesn't update the wiphys, this is done below */ | ||
790 | r = __set_regdom(rd); | ||
791 | if (r) | ||
792 | return r; | ||
793 | |||
794 | BUG_ON((!__reg_is_valid_request(rd->alpha2, &this_request))); | ||
795 | |||
796 | /* The initial standard core update of the world regulatory domain, no | ||
797 | * need to keep that request info around if it didn't fail. */ | ||
798 | if (is_world_regdom(rd->alpha2) && | ||
799 | this_request->initiator == REGDOM_SET_BY_CORE && | ||
800 | this_request->granted) { | ||
801 | list_del(&this_request->list); | ||
802 | kfree(this_request); | ||
803 | this_request = NULL; | ||
804 | } | ||
805 | |||
806 | /* Remove old requests, we only leave behind the last one */ | ||
807 | if (prev_request) { | ||
808 | list_del(&prev_request->list); | ||
809 | kfree(prev_request); | ||
810 | prev_request = NULL; | ||
811 | } | ||
812 | |||
813 | /* This would make this whole thing pointless */ | ||
814 | BUG_ON(rd != cfg80211_regdomain); | ||
815 | |||
816 | /* update all wiphys now with the new established regulatory domain */ | ||
817 | update_all_wiphy_regulatory(this_request->initiator); | ||
818 | |||
819 | print_regdomain(rd); | ||
820 | |||
821 | return r; | ||
822 | } | ||
823 | |||
824 | int regulatory_init(void) | ||
825 | { | ||
826 | int err; | ||
827 | |||
828 | reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0); | ||
829 | if (IS_ERR(reg_pdev)) | ||
830 | return PTR_ERR(reg_pdev); | ||
831 | |||
832 | #ifdef CONFIG_WIRELESS_OLD_REGULATORY | ||
833 | cfg80211_regdomain = static_regdom(ieee80211_regdom); | ||
834 | |||
835 | printk(KERN_INFO "cfg80211: Using static regulatory domain info\n"); | ||
836 | print_regdomain_info(cfg80211_regdomain); | ||
837 | /* The old code still requests for a new regdomain and if | ||
838 | * you have CRDA you get it updated, otherwise you get | ||
839 | * stuck with the static values. We ignore "EU" code as | ||
840 | * that is not a valid ISO / IEC 3166 alpha2 */ | ||
841 | if (ieee80211_regdom[0] != 'E' && ieee80211_regdom[1] != 'U') | ||
842 | err = __regulatory_hint(NULL, REGDOM_SET_BY_CORE, | ||
843 | ieee80211_regdom, NULL); | ||
844 | #else | ||
845 | cfg80211_regdomain = cfg80211_world_regdom; | ||
846 | |||
847 | err = __regulatory_hint(NULL, REGDOM_SET_BY_CORE, "00", NULL); | ||
848 | if (err) | ||
849 | printk(KERN_ERR "cfg80211: calling CRDA failed - " | ||
850 | "unable to update world regulatory domain, " | ||
851 | "using static definition\n"); | ||
852 | #endif | ||
853 | |||
854 | return 0; | ||
855 | } | ||
856 | |||
857 | void regulatory_exit(void) | ||
858 | { | ||
859 | struct regulatory_request *req, *req_tmp; | ||
860 | |||
861 | mutex_lock(&cfg80211_drv_mutex); | ||
862 | |||
863 | reset_regdomains(); | ||
864 | |||
865 | list_for_each_entry_safe(req, req_tmp, ®ulatory_requests, list) { | ||
866 | list_del(&req->list); | ||
867 | kfree(req); | ||
868 | } | ||
869 | platform_device_unregister(reg_pdev); | ||
870 | |||
871 | mutex_unlock(&cfg80211_drv_mutex); | ||
180 | } | 872 | } |
diff --git a/net/wireless/reg.h b/net/wireless/reg.h new file mode 100644 index 000000000000..a33362872f3c --- /dev/null +++ b/net/wireless/reg.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __NET_WIRELESS_REG_H | ||
2 | #define __NET_WIRELESS_REG_H | ||
3 | |||
4 | extern struct mutex cfg80211_reg_mutex; | ||
5 | bool is_world_regdom(const char *alpha2); | ||
6 | bool reg_is_valid_request(const char *alpha2); | ||
7 | |||
8 | int regulatory_init(void); | ||
9 | void regulatory_exit(void); | ||
10 | |||
11 | int set_regdom(const struct ieee80211_regdomain *rd); | ||
12 | |||
13 | #endif /* __NET_WIRELESS_REG_H */ | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index b7754b1b73a4..ef9ccbc38752 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | #include "xfrm_hash.h" | 35 | #include "xfrm_hash.h" |
36 | 36 | ||
37 | int sysctl_xfrm_larval_drop __read_mostly; | 37 | int sysctl_xfrm_larval_drop __read_mostly = 1; |
38 | 38 | ||
39 | #ifdef CONFIG_XFRM_STATISTICS | 39 | #ifdef CONFIG_XFRM_STATISTICS |
40 | DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly; | 40 | DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 0a8f09c3144c..053970e8765d 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -59,6 +59,14 @@ static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; | |||
59 | static unsigned int xfrm_state_num; | 59 | static unsigned int xfrm_state_num; |
60 | static unsigned int xfrm_state_genid; | 60 | static unsigned int xfrm_state_genid; |
61 | 61 | ||
62 | /* Counter indicating ongoing walk, protected by xfrm_state_lock. */ | ||
63 | static unsigned long xfrm_state_walk_ongoing; | ||
64 | /* Counter indicating walk completion, protected by xfrm_cfg_mutex. */ | ||
65 | static unsigned long xfrm_state_walk_completed; | ||
66 | |||
67 | /* List of outstanding state walks used to set the completed counter. */ | ||
68 | static LIST_HEAD(xfrm_state_walks); | ||
69 | |||
62 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); | 70 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); |
63 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); | 71 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); |
64 | 72 | ||
@@ -191,7 +199,8 @@ static DEFINE_RWLOCK(xfrm_state_afinfo_lock); | |||
191 | static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO]; | 199 | static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO]; |
192 | 200 | ||
193 | static struct work_struct xfrm_state_gc_work; | 201 | static struct work_struct xfrm_state_gc_work; |
194 | static HLIST_HEAD(xfrm_state_gc_list); | 202 | static LIST_HEAD(xfrm_state_gc_leftovers); |
203 | static LIST_HEAD(xfrm_state_gc_list); | ||
195 | static DEFINE_SPINLOCK(xfrm_state_gc_lock); | 204 | static DEFINE_SPINLOCK(xfrm_state_gc_lock); |
196 | 205 | ||
197 | int __xfrm_state_delete(struct xfrm_state *x); | 206 | int __xfrm_state_delete(struct xfrm_state *x); |
@@ -403,17 +412,23 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) | |||
403 | 412 | ||
404 | static void xfrm_state_gc_task(struct work_struct *data) | 413 | static void xfrm_state_gc_task(struct work_struct *data) |
405 | { | 414 | { |
406 | struct xfrm_state *x; | 415 | struct xfrm_state *x, *tmp; |
407 | struct hlist_node *entry, *tmp; | 416 | unsigned long completed; |
408 | struct hlist_head gc_list; | ||
409 | 417 | ||
418 | mutex_lock(&xfrm_cfg_mutex); | ||
410 | spin_lock_bh(&xfrm_state_gc_lock); | 419 | spin_lock_bh(&xfrm_state_gc_lock); |
411 | gc_list.first = xfrm_state_gc_list.first; | 420 | list_splice_tail_init(&xfrm_state_gc_list, &xfrm_state_gc_leftovers); |
412 | INIT_HLIST_HEAD(&xfrm_state_gc_list); | ||
413 | spin_unlock_bh(&xfrm_state_gc_lock); | 421 | spin_unlock_bh(&xfrm_state_gc_lock); |
414 | 422 | ||
415 | hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst) | 423 | completed = xfrm_state_walk_completed; |
424 | mutex_unlock(&xfrm_cfg_mutex); | ||
425 | |||
426 | list_for_each_entry_safe(x, tmp, &xfrm_state_gc_leftovers, gclist) { | ||
427 | if ((long)(x->lastused - completed) > 0) | ||
428 | break; | ||
429 | list_del(&x->gclist); | ||
416 | xfrm_state_gc_destroy(x); | 430 | xfrm_state_gc_destroy(x); |
431 | } | ||
417 | 432 | ||
418 | wake_up(&km_waitq); | 433 | wake_up(&km_waitq); |
419 | } | 434 | } |
@@ -540,12 +555,8 @@ void __xfrm_state_destroy(struct xfrm_state *x) | |||
540 | { | 555 | { |
541 | WARN_ON(x->km.state != XFRM_STATE_DEAD); | 556 | WARN_ON(x->km.state != XFRM_STATE_DEAD); |
542 | 557 | ||
543 | spin_lock_bh(&xfrm_state_lock); | ||
544 | list_del(&x->all); | ||
545 | spin_unlock_bh(&xfrm_state_lock); | ||
546 | |||
547 | spin_lock_bh(&xfrm_state_gc_lock); | 558 | spin_lock_bh(&xfrm_state_gc_lock); |
548 | hlist_add_head(&x->bydst, &xfrm_state_gc_list); | 559 | list_add_tail(&x->gclist, &xfrm_state_gc_list); |
549 | spin_unlock_bh(&xfrm_state_gc_lock); | 560 | spin_unlock_bh(&xfrm_state_gc_lock); |
550 | schedule_work(&xfrm_state_gc_work); | 561 | schedule_work(&xfrm_state_gc_work); |
551 | } | 562 | } |
@@ -558,6 +569,8 @@ int __xfrm_state_delete(struct xfrm_state *x) | |||
558 | if (x->km.state != XFRM_STATE_DEAD) { | 569 | if (x->km.state != XFRM_STATE_DEAD) { |
559 | x->km.state = XFRM_STATE_DEAD; | 570 | x->km.state = XFRM_STATE_DEAD; |
560 | spin_lock(&xfrm_state_lock); | 571 | spin_lock(&xfrm_state_lock); |
572 | x->lastused = xfrm_state_walk_ongoing; | ||
573 | list_del_rcu(&x->all); | ||
561 | hlist_del(&x->bydst); | 574 | hlist_del(&x->bydst); |
562 | hlist_del(&x->bysrc); | 575 | hlist_del(&x->bysrc); |
563 | if (x->id.spi) | 576 | if (x->id.spi) |
@@ -1594,6 +1607,41 @@ out: | |||
1594 | } | 1607 | } |
1595 | EXPORT_SYMBOL(xfrm_state_walk); | 1608 | EXPORT_SYMBOL(xfrm_state_walk); |
1596 | 1609 | ||
1610 | void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto) | ||
1611 | { | ||
1612 | walk->proto = proto; | ||
1613 | walk->state = NULL; | ||
1614 | walk->count = 0; | ||
1615 | list_add_tail(&walk->list, &xfrm_state_walks); | ||
1616 | walk->genid = ++xfrm_state_walk_ongoing; | ||
1617 | } | ||
1618 | EXPORT_SYMBOL(xfrm_state_walk_init); | ||
1619 | |||
1620 | void xfrm_state_walk_done(struct xfrm_state_walk *walk) | ||
1621 | { | ||
1622 | struct list_head *prev; | ||
1623 | |||
1624 | if (walk->state != NULL) { | ||
1625 | xfrm_state_put(walk->state); | ||
1626 | walk->state = NULL; | ||
1627 | } | ||
1628 | |||
1629 | prev = walk->list.prev; | ||
1630 | list_del(&walk->list); | ||
1631 | |||
1632 | if (prev != &xfrm_state_walks) { | ||
1633 | list_entry(prev, struct xfrm_state_walk, list)->genid = | ||
1634 | walk->genid; | ||
1635 | return; | ||
1636 | } | ||
1637 | |||
1638 | xfrm_state_walk_completed = walk->genid; | ||
1639 | |||
1640 | if (!list_empty(&xfrm_state_gc_leftovers)) | ||
1641 | schedule_work(&xfrm_state_gc_work); | ||
1642 | } | ||
1643 | EXPORT_SYMBOL(xfrm_state_walk_done); | ||
1644 | |||
1597 | 1645 | ||
1598 | void xfrm_replay_notify(struct xfrm_state *x, int event) | 1646 | void xfrm_replay_notify(struct xfrm_state *x, int event) |
1599 | { | 1647 | { |