diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-14 13:37:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-14 13:37:28 -0400 |
commit | d7e9660ad9d5e0845f52848bce31bcf5cdcdea6b (patch) | |
tree | c6c67d145771187b194d79d603742b31090a59d6 /net | |
parent | b8cb48aae1b8c50b37dcb7710363aa69a7a0d9ca (diff) | |
parent | 13af7a6ea502fcdd4c0e3d7de6e332b102309491 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1623 commits)
netxen: update copyright
netxen: fix tx timeout recovery
netxen: fix file firmware leak
netxen: improve pci memory access
netxen: change firmware write size
tg3: Fix return ring size breakage
netxen: build fix for INET=n
cdc-phonet: autoconfigure Phonet address
Phonet: back-end for autoconfigured addresses
Phonet: fix netlink address dump error handling
ipv6: Add IFA_F_DADFAILED flag
net: Add DEVTYPE support for Ethernet based devices
mv643xx_eth.c: remove unused txq_set_wrr()
ucc_geth: Fix hangs after switching from full to half duplex
ucc_geth: Rearrange some code to avoid forward declarations
phy/marvell: Make non-aneg speed/duplex forcing work for 88E1111 PHYs
drivers/net/phy: introduce missing kfree
drivers/net/wan: introduce missing kfree
net: force bridge module(s) to be GPL
Subject: [PATCH] appletalk: Fix skb leak when ipddp interface is not loaded
...
Fixed up trivial conflicts:
- arch/x86/include/asm/socket.h
converted to <asm-generic/socket.h> in the x86 tree. The generic
header has the same new #define's, so that works out fine.
- drivers/net/tun.c
fix conflict between 89f56d1e9 ("tun: reuse struct sock fields") that
switched over to using 'tun->socket.sk' instead of the redundantly
available (and thus removed) 'tun->sk', and 2b980dbd ("lsm: Add hooks
to the TUN driver") which added a new 'tun->sk' use.
Noted in 'next' by Stephen Rothwell.
Diffstat (limited to 'net')
328 files changed, 16882 insertions, 8111 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index fe649081fbdc..8836575f9d79 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -225,12 +225,6 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) | |||
225 | return -EOPNOTSUPP; | 225 | return -EOPNOTSUPP; |
226 | } | 226 | } |
227 | 227 | ||
228 | /* The real device must be up and operating in order to | ||
229 | * assosciate a VLAN device with it. | ||
230 | */ | ||
231 | if (!(real_dev->flags & IFF_UP)) | ||
232 | return -ENETDOWN; | ||
233 | |||
234 | if (__find_vlan_dev(real_dev, vlan_id) != NULL) | 228 | if (__find_vlan_dev(real_dev, vlan_id) != NULL) |
235 | return -EEXIST; | 229 | return -EEXIST; |
236 | 230 | ||
@@ -336,12 +330,13 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) | |||
336 | snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); | 330 | snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); |
337 | } | 331 | } |
338 | 332 | ||
339 | new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, | 333 | new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name, |
340 | vlan_setup); | 334 | vlan_setup, real_dev->num_tx_queues); |
341 | 335 | ||
342 | if (new_dev == NULL) | 336 | if (new_dev == NULL) |
343 | return -ENOBUFS; | 337 | return -ENOBUFS; |
344 | 338 | ||
339 | new_dev->real_num_tx_queues = real_dev->real_num_tx_queues; | ||
345 | dev_net_set(new_dev, net); | 340 | dev_net_set(new_dev, net); |
346 | /* need 4 bytes for extra VLAN header info, | 341 | /* need 4 bytes for extra VLAN header info, |
347 | * hope the underlying device can handle it. | 342 | * hope the underlying device can handle it. |
@@ -397,6 +392,9 @@ static void vlan_transfer_features(struct net_device *dev, | |||
397 | vlandev->features &= ~dev->vlan_features; | 392 | vlandev->features &= ~dev->vlan_features; |
398 | vlandev->features |= dev->features & dev->vlan_features; | 393 | vlandev->features |= dev->features & dev->vlan_features; |
399 | vlandev->gso_max_size = dev->gso_max_size; | 394 | vlandev->gso_max_size = dev->gso_max_size; |
395 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
396 | vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; | ||
397 | #endif | ||
400 | 398 | ||
401 | if (old_features != vlandev->features) | 399 | if (old_features != vlandev->features) |
402 | netdev_features_change(vlandev); | 400 | netdev_features_change(vlandev); |
@@ -468,6 +466,19 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
468 | } | 466 | } |
469 | break; | 467 | break; |
470 | 468 | ||
469 | case NETDEV_CHANGEMTU: | ||
470 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | ||
471 | vlandev = vlan_group_get_device(grp, i); | ||
472 | if (!vlandev) | ||
473 | continue; | ||
474 | |||
475 | if (vlandev->mtu <= dev->mtu) | ||
476 | continue; | ||
477 | |||
478 | dev_set_mtu(vlandev, dev->mtu); | ||
479 | } | ||
480 | break; | ||
481 | |||
471 | case NETDEV_FEAT_CHANGE: | 482 | case NETDEV_FEAT_CHANGE: |
472 | /* Propagate device features to underlying device */ | 483 | /* Propagate device features to underlying device */ |
473 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | 484 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 96bad8f233e2..4198ec5c8abc 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -288,10 +288,14 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
288 | return rc; | 288 | return rc; |
289 | } | 289 | } |
290 | 290 | ||
291 | static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | 291 | static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, |
292 | struct net_device *dev) | ||
292 | { | 293 | { |
293 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | 294 | int i = skb_get_queue_mapping(skb); |
295 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
294 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); | 296 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); |
297 | unsigned int len; | ||
298 | int ret; | ||
295 | 299 | ||
296 | /* Handle non-VLAN frames if they are sent to us, for example by DHCP. | 300 | /* Handle non-VLAN frames if they are sent to us, for example by DHCP. |
297 | * | 301 | * |
@@ -317,29 +321,43 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
317 | vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; | 321 | vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; |
318 | } | 322 | } |
319 | 323 | ||
320 | txq->tx_packets++; | ||
321 | txq->tx_bytes += skb->len; | ||
322 | 324 | ||
323 | skb->dev = vlan_dev_info(dev)->real_dev; | 325 | skb->dev = vlan_dev_info(dev)->real_dev; |
324 | dev_queue_xmit(skb); | 326 | len = skb->len; |
327 | ret = dev_queue_xmit(skb); | ||
328 | |||
329 | if (likely(ret == NET_XMIT_SUCCESS)) { | ||
330 | txq->tx_packets++; | ||
331 | txq->tx_bytes += len; | ||
332 | } else | ||
333 | txq->tx_dropped++; | ||
334 | |||
325 | return NETDEV_TX_OK; | 335 | return NETDEV_TX_OK; |
326 | } | 336 | } |
327 | 337 | ||
328 | static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | 338 | static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, |
329 | struct net_device *dev) | 339 | struct net_device *dev) |
330 | { | 340 | { |
331 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | 341 | int i = skb_get_queue_mapping(skb); |
342 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
332 | u16 vlan_tci; | 343 | u16 vlan_tci; |
344 | unsigned int len; | ||
345 | int ret; | ||
333 | 346 | ||
334 | vlan_tci = vlan_dev_info(dev)->vlan_id; | 347 | vlan_tci = vlan_dev_info(dev)->vlan_id; |
335 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); | 348 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); |
336 | skb = __vlan_hwaccel_put_tag(skb, vlan_tci); | 349 | skb = __vlan_hwaccel_put_tag(skb, vlan_tci); |
337 | 350 | ||
338 | txq->tx_packets++; | ||
339 | txq->tx_bytes += skb->len; | ||
340 | |||
341 | skb->dev = vlan_dev_info(dev)->real_dev; | 351 | skb->dev = vlan_dev_info(dev)->real_dev; |
342 | dev_queue_xmit(skb); | 352 | len = skb->len; |
353 | ret = dev_queue_xmit(skb); | ||
354 | |||
355 | if (likely(ret == NET_XMIT_SUCCESS)) { | ||
356 | txq->tx_packets++; | ||
357 | txq->tx_bytes += len; | ||
358 | } else | ||
359 | txq->tx_dropped++; | ||
360 | |||
343 | return NETDEV_TX_OK; | 361 | return NETDEV_TX_OK; |
344 | } | 362 | } |
345 | 363 | ||
@@ -561,6 +579,55 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) | |||
561 | return err; | 579 | return err; |
562 | } | 580 | } |
563 | 581 | ||
582 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
583 | static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid, | ||
584 | struct scatterlist *sgl, unsigned int sgc) | ||
585 | { | ||
586 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
587 | const struct net_device_ops *ops = real_dev->netdev_ops; | ||
588 | int rc = 0; | ||
589 | |||
590 | if (ops->ndo_fcoe_ddp_setup) | ||
591 | rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc); | ||
592 | |||
593 | return rc; | ||
594 | } | ||
595 | |||
596 | static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid) | ||
597 | { | ||
598 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
599 | const struct net_device_ops *ops = real_dev->netdev_ops; | ||
600 | int len = 0; | ||
601 | |||
602 | if (ops->ndo_fcoe_ddp_done) | ||
603 | len = ops->ndo_fcoe_ddp_done(real_dev, xid); | ||
604 | |||
605 | return len; | ||
606 | } | ||
607 | |||
608 | static int vlan_dev_fcoe_enable(struct net_device *dev) | ||
609 | { | ||
610 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
611 | const struct net_device_ops *ops = real_dev->netdev_ops; | ||
612 | int rc = -EINVAL; | ||
613 | |||
614 | if (ops->ndo_fcoe_enable) | ||
615 | rc = ops->ndo_fcoe_enable(real_dev); | ||
616 | return rc; | ||
617 | } | ||
618 | |||
619 | static int vlan_dev_fcoe_disable(struct net_device *dev) | ||
620 | { | ||
621 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
622 | const struct net_device_ops *ops = real_dev->netdev_ops; | ||
623 | int rc = -EINVAL; | ||
624 | |||
625 | if (ops->ndo_fcoe_disable) | ||
626 | rc = ops->ndo_fcoe_disable(real_dev); | ||
627 | return rc; | ||
628 | } | ||
629 | #endif | ||
630 | |||
564 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | 631 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) |
565 | { | 632 | { |
566 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | 633 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
@@ -635,6 +702,10 @@ static int vlan_dev_init(struct net_device *dev) | |||
635 | if (is_zero_ether_addr(dev->broadcast)) | 702 | if (is_zero_ether_addr(dev->broadcast)) |
636 | memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); | 703 | memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); |
637 | 704 | ||
705 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
706 | dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid; | ||
707 | #endif | ||
708 | |||
638 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { | 709 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { |
639 | dev->header_ops = real_dev->header_ops; | 710 | dev->header_ops = real_dev->header_ops; |
640 | dev->hard_header_len = real_dev->hard_header_len; | 711 | dev->hard_header_len = real_dev->hard_header_len; |
@@ -715,6 +786,12 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
715 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 786 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
716 | .ndo_do_ioctl = vlan_dev_ioctl, | 787 | .ndo_do_ioctl = vlan_dev_ioctl, |
717 | .ndo_neigh_setup = vlan_dev_neigh_setup, | 788 | .ndo_neigh_setup = vlan_dev_neigh_setup, |
789 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
790 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | ||
791 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | ||
792 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | ||
793 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | ||
794 | #endif | ||
718 | }; | 795 | }; |
719 | 796 | ||
720 | static const struct net_device_ops vlan_netdev_accel_ops = { | 797 | static const struct net_device_ops vlan_netdev_accel_ops = { |
@@ -731,6 +808,12 @@ static const struct net_device_ops vlan_netdev_accel_ops = { | |||
731 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 808 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
732 | .ndo_do_ioctl = vlan_dev_ioctl, | 809 | .ndo_do_ioctl = vlan_dev_ioctl, |
733 | .ndo_neigh_setup = vlan_dev_neigh_setup, | 810 | .ndo_neigh_setup = vlan_dev_neigh_setup, |
811 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
812 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | ||
813 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | ||
814 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | ||
815 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | ||
816 | #endif | ||
734 | }; | 817 | }; |
735 | 818 | ||
736 | void vlan_setup(struct net_device *dev) | 819 | void vlan_setup(struct net_device *dev) |
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index e9c91dcecc9b..343146e1bceb 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c | |||
@@ -100,6 +100,25 @@ static int vlan_changelink(struct net_device *dev, | |||
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | static int vlan_get_tx_queues(struct net *net, | ||
104 | struct nlattr *tb[], | ||
105 | unsigned int *num_tx_queues, | ||
106 | unsigned int *real_num_tx_queues) | ||
107 | { | ||
108 | struct net_device *real_dev; | ||
109 | |||
110 | if (!tb[IFLA_LINK]) | ||
111 | return -EINVAL; | ||
112 | |||
113 | real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); | ||
114 | if (!real_dev) | ||
115 | return -ENODEV; | ||
116 | |||
117 | *num_tx_queues = real_dev->num_tx_queues; | ||
118 | *real_num_tx_queues = real_dev->real_num_tx_queues; | ||
119 | return 0; | ||
120 | } | ||
121 | |||
103 | static int vlan_newlink(struct net_device *dev, | 122 | static int vlan_newlink(struct net_device *dev, |
104 | struct nlattr *tb[], struct nlattr *data[]) | 123 | struct nlattr *tb[], struct nlattr *data[]) |
105 | { | 124 | { |
@@ -216,6 +235,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = { | |||
216 | .maxtype = IFLA_VLAN_MAX, | 235 | .maxtype = IFLA_VLAN_MAX, |
217 | .policy = vlan_policy, | 236 | .policy = vlan_policy, |
218 | .priv_size = sizeof(struct vlan_dev_info), | 237 | .priv_size = sizeof(struct vlan_dev_info), |
238 | .get_tx_queues = vlan_get_tx_queues, | ||
219 | .setup = vlan_setup, | 239 | .setup = vlan_setup, |
220 | .validate = vlan_validate, | 240 | .validate = vlan_validate, |
221 | .newlink = vlan_newlink, | 241 | .newlink = vlan_newlink, |
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index b55a091a33df..6262c335f3c2 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c | |||
@@ -107,7 +107,7 @@ static const struct file_operations vlandev_fops = { | |||
107 | */ | 107 | */ |
108 | 108 | ||
109 | /* Strings */ | 109 | /* Strings */ |
110 | static const char *vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = { | 110 | static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = { |
111 | [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID", | 111 | [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID", |
112 | [VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD", | 112 | [VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD", |
113 | [VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD", | 113 | [VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD", |
diff --git a/net/Kconfig b/net/Kconfig index 7051b9710675..041c35edb763 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -23,6 +23,26 @@ menuconfig NET | |||
23 | 23 | ||
24 | if NET | 24 | if NET |
25 | 25 | ||
26 | config WANT_COMPAT_NETLINK_MESSAGES | ||
27 | bool | ||
28 | help | ||
29 | This option can be selected by other options that need compat | ||
30 | netlink messages. | ||
31 | |||
32 | config COMPAT_NETLINK_MESSAGES | ||
33 | def_bool y | ||
34 | depends on COMPAT | ||
35 | depends on WIRELESS_EXT || WANT_COMPAT_NETLINK_MESSAGES | ||
36 | help | ||
37 | This option makes it possible to send different netlink messages | ||
38 | to tasks depending on whether the task is a compat task or not. To | ||
39 | achieve this, you need to set skb_shinfo(skb)->frag_list to the | ||
40 | compat skb before sending the skb, the netlink code will sort out | ||
41 | which message to actually pass to the task. | ||
42 | |||
43 | Newly written code should NEVER need this option but do | ||
44 | compat-independent messages instead! | ||
45 | |||
26 | menu "Networking options" | 46 | menu "Networking options" |
27 | 47 | ||
28 | source "net/packet/Kconfig" | 48 | source "net/packet/Kconfig" |
diff --git a/net/Makefile b/net/Makefile index ba324aefda73..1542e7268a7b 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -24,7 +24,6 @@ obj-y += ipv6/ | |||
24 | endif | 24 | endif |
25 | obj-$(CONFIG_PACKET) += packet/ | 25 | obj-$(CONFIG_PACKET) += packet/ |
26 | obj-$(CONFIG_NET_KEY) += key/ | 26 | obj-$(CONFIG_NET_KEY) += key/ |
27 | obj-$(CONFIG_NET_SCHED) += sched/ | ||
28 | obj-$(CONFIG_BRIDGE) += bridge/ | 27 | obj-$(CONFIG_BRIDGE) += bridge/ |
29 | obj-$(CONFIG_NET_DSA) += dsa/ | 28 | obj-$(CONFIG_NET_DSA) += dsa/ |
30 | obj-$(CONFIG_IPX) += ipx/ | 29 | obj-$(CONFIG_IPX) += ipx/ |
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 89f99d3beb60..9d4adfd22757 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c | |||
@@ -599,7 +599,7 @@ int aarp_send_ddp(struct net_device *dev, struct sk_buff *skb, | |||
599 | 599 | ||
600 | /* Non ELAP we cannot do. */ | 600 | /* Non ELAP we cannot do. */ |
601 | if (dev->type != ARPHRD_ETHER) | 601 | if (dev->type != ARPHRD_ETHER) |
602 | return -1; | 602 | goto free_it; |
603 | 603 | ||
604 | skb->dev = dev; | 604 | skb->dev = dev; |
605 | skb->protocol = htons(ETH_P_ATALK); | 605 | skb->protocol = htons(ETH_P_ATALK); |
@@ -634,7 +634,7 @@ int aarp_send_ddp(struct net_device *dev, struct sk_buff *skb, | |||
634 | if (!a) { | 634 | if (!a) { |
635 | /* Whoops slipped... good job it's an unreliable protocol 8) */ | 635 | /* Whoops slipped... good job it's an unreliable protocol 8) */ |
636 | write_unlock_bh(&aarp_lock); | 636 | write_unlock_bh(&aarp_lock); |
637 | return -1; | 637 | goto free_it; |
638 | } | 638 | } |
639 | 639 | ||
640 | /* Set up the queue */ | 640 | /* Set up the queue */ |
@@ -663,15 +663,21 @@ out_unlock: | |||
663 | write_unlock_bh(&aarp_lock); | 663 | write_unlock_bh(&aarp_lock); |
664 | 664 | ||
665 | /* Tell the ddp layer we have taken over for this frame. */ | 665 | /* Tell the ddp layer we have taken over for this frame. */ |
666 | return 0; | 666 | goto sent; |
667 | 667 | ||
668 | sendit: | 668 | sendit: |
669 | if (skb->sk) | 669 | if (skb->sk) |
670 | skb->priority = skb->sk->sk_priority; | 670 | skb->priority = skb->sk->sk_priority; |
671 | dev_queue_xmit(skb); | 671 | if (dev_queue_xmit(skb)) |
672 | goto drop; | ||
672 | sent: | 673 | sent: |
673 | return 1; | 674 | return NET_XMIT_SUCCESS; |
675 | free_it: | ||
676 | kfree_skb(skb); | ||
677 | drop: | ||
678 | return NET_XMIT_DROP; | ||
674 | } | 679 | } |
680 | EXPORT_SYMBOL(aarp_send_ddp); | ||
675 | 681 | ||
676 | /* | 682 | /* |
677 | * An entry in the aarp unresolved queue has become resolved. Send | 683 | * An entry in the aarp unresolved queue has become resolved. Send |
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 875eda5dbad7..4a6ff2ba4d07 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c | |||
@@ -1270,8 +1270,10 @@ static int handle_ip_over_ddp(struct sk_buff *skb) | |||
1270 | struct net_device_stats *stats; | 1270 | struct net_device_stats *stats; |
1271 | 1271 | ||
1272 | /* This needs to be able to handle ipddp"N" devices */ | 1272 | /* This needs to be able to handle ipddp"N" devices */ |
1273 | if (!dev) | 1273 | if (!dev) { |
1274 | return -ENODEV; | 1274 | kfree_skb(skb); |
1275 | return NET_RX_DROP; | ||
1276 | } | ||
1275 | 1277 | ||
1276 | skb->protocol = htons(ETH_P_IP); | 1278 | skb->protocol = htons(ETH_P_IP); |
1277 | skb_pull(skb, 13); | 1279 | skb_pull(skb, 13); |
@@ -1281,8 +1283,7 @@ static int handle_ip_over_ddp(struct sk_buff *skb) | |||
1281 | stats = netdev_priv(dev); | 1283 | stats = netdev_priv(dev); |
1282 | stats->rx_packets++; | 1284 | stats->rx_packets++; |
1283 | stats->rx_bytes += skb->len + 13; | 1285 | stats->rx_bytes += skb->len + 13; |
1284 | netif_rx(skb); /* Send the SKB up to a higher place. */ | 1286 | return netif_rx(skb); /* Send the SKB up to a higher place. */ |
1285 | return 0; | ||
1286 | } | 1287 | } |
1287 | #else | 1288 | #else |
1288 | /* make it easy for gcc to optimize this test out, i.e. kill the code */ | 1289 | /* make it easy for gcc to optimize this test out, i.e. kill the code */ |
@@ -1290,9 +1291,8 @@ static int handle_ip_over_ddp(struct sk_buff *skb) | |||
1290 | #define handle_ip_over_ddp(skb) 0 | 1291 | #define handle_ip_over_ddp(skb) 0 |
1291 | #endif | 1292 | #endif |
1292 | 1293 | ||
1293 | static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev, | 1294 | static int atalk_route_packet(struct sk_buff *skb, struct net_device *dev, |
1294 | struct ddpehdr *ddp, __u16 len_hops, | 1295 | struct ddpehdr *ddp, __u16 len_hops, int origlen) |
1295 | int origlen) | ||
1296 | { | 1296 | { |
1297 | struct atalk_route *rt; | 1297 | struct atalk_route *rt; |
1298 | struct atalk_addr ta; | 1298 | struct atalk_addr ta; |
@@ -1359,8 +1359,6 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev, | |||
1359 | /* 22 bytes - 12 ether, 2 len, 3 802.2 5 snap */ | 1359 | /* 22 bytes - 12 ether, 2 len, 3 802.2 5 snap */ |
1360 | struct sk_buff *nskb = skb_realloc_headroom(skb, 32); | 1360 | struct sk_buff *nskb = skb_realloc_headroom(skb, 32); |
1361 | kfree_skb(skb); | 1361 | kfree_skb(skb); |
1362 | if (!nskb) | ||
1363 | goto out; | ||
1364 | skb = nskb; | 1362 | skb = nskb; |
1365 | } else | 1363 | } else |
1366 | skb = skb_unshare(skb, GFP_ATOMIC); | 1364 | skb = skb_unshare(skb, GFP_ATOMIC); |
@@ -1369,12 +1367,16 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev, | |||
1369 | * If the buffer didn't vanish into the lack of space bitbucket we can | 1367 | * If the buffer didn't vanish into the lack of space bitbucket we can |
1370 | * send it. | 1368 | * send it. |
1371 | */ | 1369 | */ |
1372 | if (skb && aarp_send_ddp(rt->dev, skb, &ta, NULL) == -1) | 1370 | if (skb == NULL) |
1373 | goto free_it; | 1371 | goto drop; |
1374 | out: | 1372 | |
1375 | return; | 1373 | if (aarp_send_ddp(rt->dev, skb, &ta, NULL) == NET_XMIT_DROP) |
1374 | return NET_RX_DROP; | ||
1375 | return NET_XMIT_SUCCESS; | ||
1376 | free_it: | 1376 | free_it: |
1377 | kfree_skb(skb); | 1377 | kfree_skb(skb); |
1378 | drop: | ||
1379 | return NET_RX_DROP; | ||
1378 | } | 1380 | } |
1379 | 1381 | ||
1380 | /** | 1382 | /** |
@@ -1400,7 +1402,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1400 | __u16 len_hops; | 1402 | __u16 len_hops; |
1401 | 1403 | ||
1402 | if (!net_eq(dev_net(dev), &init_net)) | 1404 | if (!net_eq(dev_net(dev), &init_net)) |
1403 | goto freeit; | 1405 | goto drop; |
1404 | 1406 | ||
1405 | /* Don't mangle buffer if shared */ | 1407 | /* Don't mangle buffer if shared */ |
1406 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) | 1408 | if (!(skb = skb_share_check(skb, GFP_ATOMIC))) |
@@ -1408,7 +1410,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1408 | 1410 | ||
1409 | /* Size check and make sure header is contiguous */ | 1411 | /* Size check and make sure header is contiguous */ |
1410 | if (!pskb_may_pull(skb, sizeof(*ddp))) | 1412 | if (!pskb_may_pull(skb, sizeof(*ddp))) |
1411 | goto freeit; | 1413 | goto drop; |
1412 | 1414 | ||
1413 | ddp = ddp_hdr(skb); | 1415 | ddp = ddp_hdr(skb); |
1414 | 1416 | ||
@@ -1426,7 +1428,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1426 | if (skb->len < sizeof(*ddp) || skb->len < (len_hops & 1023)) { | 1428 | if (skb->len < sizeof(*ddp) || skb->len < (len_hops & 1023)) { |
1427 | pr_debug("AppleTalk: dropping corrupted frame (deh_len=%u, " | 1429 | pr_debug("AppleTalk: dropping corrupted frame (deh_len=%u, " |
1428 | "skb->len=%u)\n", len_hops & 1023, skb->len); | 1430 | "skb->len=%u)\n", len_hops & 1023, skb->len); |
1429 | goto freeit; | 1431 | goto drop; |
1430 | } | 1432 | } |
1431 | 1433 | ||
1432 | /* | 1434 | /* |
@@ -1436,7 +1438,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1436 | if (ddp->deh_sum && | 1438 | if (ddp->deh_sum && |
1437 | atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum) | 1439 | atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum) |
1438 | /* Not a valid AppleTalk frame - dustbin time */ | 1440 | /* Not a valid AppleTalk frame - dustbin time */ |
1439 | goto freeit; | 1441 | goto drop; |
1440 | 1442 | ||
1441 | /* Check the packet is aimed at us */ | 1443 | /* Check the packet is aimed at us */ |
1442 | if (!ddp->deh_dnet) /* Net 0 is 'this network' */ | 1444 | if (!ddp->deh_dnet) /* Net 0 is 'this network' */ |
@@ -1448,8 +1450,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1448 | /* Not ours, so we route the packet via the correct | 1450 | /* Not ours, so we route the packet via the correct |
1449 | * AppleTalk iface | 1451 | * AppleTalk iface |
1450 | */ | 1452 | */ |
1451 | atalk_route_packet(skb, dev, ddp, len_hops, origlen); | 1453 | return atalk_route_packet(skb, dev, ddp, len_hops, origlen); |
1452 | goto out; | ||
1453 | } | 1454 | } |
1454 | 1455 | ||
1455 | /* if IP over DDP is not selected this code will be optimized out */ | 1456 | /* if IP over DDP is not selected this code will be optimized out */ |
@@ -1465,18 +1466,21 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, | |||
1465 | 1466 | ||
1466 | sock = atalk_search_socket(&tosat, atif); | 1467 | sock = atalk_search_socket(&tosat, atif); |
1467 | if (!sock) /* But not one of our sockets */ | 1468 | if (!sock) /* But not one of our sockets */ |
1468 | goto freeit; | 1469 | goto drop; |
1469 | 1470 | ||
1470 | /* Queue packet (standard) */ | 1471 | /* Queue packet (standard) */ |
1471 | skb->sk = sock; | 1472 | skb->sk = sock; |
1472 | 1473 | ||
1473 | if (sock_queue_rcv_skb(sock, skb) < 0) | 1474 | if (sock_queue_rcv_skb(sock, skb) < 0) |
1474 | goto freeit; | 1475 | goto drop; |
1475 | out: | 1476 | |
1476 | return 0; | 1477 | return NET_RX_SUCCESS; |
1477 | freeit: | 1478 | |
1479 | drop: | ||
1478 | kfree_skb(skb); | 1480 | kfree_skb(skb); |
1479 | goto out; | 1481 | out: |
1482 | return NET_RX_DROP; | ||
1483 | |||
1480 | } | 1484 | } |
1481 | 1485 | ||
1482 | /* | 1486 | /* |
@@ -1652,10 +1656,10 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr | |||
1652 | if (skb2) { | 1656 | if (skb2) { |
1653 | loopback = 1; | 1657 | loopback = 1; |
1654 | SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk); | 1658 | SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk); |
1655 | if (aarp_send_ddp(dev, skb2, | 1659 | /* |
1656 | &usat->sat_addr, NULL) == -1) | 1660 | * If it fails it is queued/sent above in the aarp queue |
1657 | kfree_skb(skb2); | 1661 | */ |
1658 | /* else queued/sent above in the aarp queue */ | 1662 | aarp_send_ddp(dev, skb2, &usat->sat_addr, NULL); |
1659 | } | 1663 | } |
1660 | } | 1664 | } |
1661 | 1665 | ||
@@ -1685,9 +1689,10 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr | |||
1685 | usat = &gsat; | 1689 | usat = &gsat; |
1686 | } | 1690 | } |
1687 | 1691 | ||
1688 | if (aarp_send_ddp(dev, skb, &usat->sat_addr, NULL) == -1) | 1692 | /* |
1689 | kfree_skb(skb); | 1693 | * If it fails it is queued/sent above in the aarp queue |
1690 | /* else queued/sent above in the aarp queue */ | 1694 | */ |
1695 | aarp_send_ddp(dev, skb, &usat->sat_addr, NULL); | ||
1691 | } | 1696 | } |
1692 | SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len); | 1697 | SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len); |
1693 | 1698 | ||
@@ -1865,7 +1870,6 @@ static struct packet_type ppptalk_packet_type __read_mostly = { | |||
1865 | static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B }; | 1870 | static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B }; |
1866 | 1871 | ||
1867 | /* Export symbols for use by drivers when AppleTalk is a module */ | 1872 | /* Export symbols for use by drivers when AppleTalk is a module */ |
1868 | EXPORT_SYMBOL(aarp_send_ddp); | ||
1869 | EXPORT_SYMBOL(atrtr_get_dev); | 1873 | EXPORT_SYMBOL(atrtr_get_dev); |
1870 | EXPORT_SYMBOL(atalk_find_dev_addr); | 1874 | EXPORT_SYMBOL(atalk_find_dev_addr); |
1871 | 1875 | ||
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 2912665fc58c..26a646d4eb32 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -69,7 +69,7 @@ struct br2684_vcc { | |||
69 | struct net_device *device; | 69 | struct net_device *device; |
70 | /* keep old push, pop functions for chaining */ | 70 | /* keep old push, pop functions for chaining */ |
71 | void (*old_push) (struct atm_vcc * vcc, struct sk_buff * skb); | 71 | void (*old_push) (struct atm_vcc * vcc, struct sk_buff * skb); |
72 | /* void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); */ | 72 | void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); |
73 | enum br2684_encaps encaps; | 73 | enum br2684_encaps encaps; |
74 | struct list_head brvccs; | 74 | struct list_head brvccs; |
75 | #ifdef CONFIG_ATM_BR2684_IPFILTER | 75 | #ifdef CONFIG_ATM_BR2684_IPFILTER |
@@ -142,6 +142,22 @@ static struct net_device *br2684_find_dev(const struct br2684_if_spec *s) | |||
142 | return NULL; | 142 | return NULL; |
143 | } | 143 | } |
144 | 144 | ||
145 | /* chained vcc->pop function. Check if we should wake the netif_queue */ | ||
146 | static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb) | ||
147 | { | ||
148 | struct br2684_vcc *brvcc = BR2684_VCC(vcc); | ||
149 | struct net_device *net_dev = skb->dev; | ||
150 | |||
151 | pr_debug("br2684_pop(vcc %p ; net_dev %p )\n", vcc, net_dev); | ||
152 | brvcc->old_pop(vcc, skb); | ||
153 | |||
154 | if (!net_dev) | ||
155 | return; | ||
156 | |||
157 | if (atm_may_send(vcc, 0)) | ||
158 | netif_wake_queue(net_dev); | ||
159 | |||
160 | } | ||
145 | /* | 161 | /* |
146 | * Send a packet out a particular vcc. Not to useful right now, but paves | 162 | * Send a packet out a particular vcc. Not to useful right now, but paves |
147 | * the way for multiple vcc's per itf. Returns true if we can send, | 163 | * the way for multiple vcc's per itf. Returns true if we can send, |
@@ -200,20 +216,19 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev, | |||
200 | 216 | ||
201 | ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; | 217 | ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; |
202 | pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); | 218 | pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); |
203 | if (!atm_may_send(atmvcc, skb->truesize)) { | ||
204 | /* | ||
205 | * We free this here for now, because we cannot know in a higher | ||
206 | * layer whether the skb pointer it supplied wasn't freed yet. | ||
207 | * Now, it always is. | ||
208 | */ | ||
209 | dev_kfree_skb(skb); | ||
210 | return 0; | ||
211 | } | ||
212 | atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); | 219 | atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); |
213 | ATM_SKB(skb)->atm_options = atmvcc->atm_options; | 220 | ATM_SKB(skb)->atm_options = atmvcc->atm_options; |
214 | dev->stats.tx_packets++; | 221 | dev->stats.tx_packets++; |
215 | dev->stats.tx_bytes += skb->len; | 222 | dev->stats.tx_bytes += skb->len; |
216 | atmvcc->send(atmvcc, skb); | 223 | atmvcc->send(atmvcc, skb); |
224 | |||
225 | if (!atm_may_send(atmvcc, 0)) { | ||
226 | netif_stop_queue(brvcc->device); | ||
227 | /*check for race with br2684_pop*/ | ||
228 | if (atm_may_send(atmvcc, 0)) | ||
229 | netif_start_queue(brvcc->device); | ||
230 | } | ||
231 | |||
217 | return 1; | 232 | return 1; |
218 | } | 233 | } |
219 | 234 | ||
@@ -223,7 +238,8 @@ static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb, | |||
223 | return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */ | 238 | return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */ |
224 | } | 239 | } |
225 | 240 | ||
226 | static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) | 241 | static netdev_tx_t br2684_start_xmit(struct sk_buff *skb, |
242 | struct net_device *dev) | ||
227 | { | 243 | { |
228 | struct br2684_dev *brdev = BRPRIV(dev); | 244 | struct br2684_dev *brdev = BRPRIV(dev); |
229 | struct br2684_vcc *brvcc; | 245 | struct br2684_vcc *brvcc; |
@@ -238,7 +254,7 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
238 | /* netif_stop_queue(dev); */ | 254 | /* netif_stop_queue(dev); */ |
239 | dev_kfree_skb(skb); | 255 | dev_kfree_skb(skb); |
240 | read_unlock(&devs_lock); | 256 | read_unlock(&devs_lock); |
241 | return 0; | 257 | return NETDEV_TX_OK; |
242 | } | 258 | } |
243 | if (!br2684_xmit_vcc(skb, dev, brvcc)) { | 259 | if (!br2684_xmit_vcc(skb, dev, brvcc)) { |
244 | /* | 260 | /* |
@@ -252,7 +268,7 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
252 | dev->stats.tx_fifo_errors++; | 268 | dev->stats.tx_fifo_errors++; |
253 | } | 269 | } |
254 | read_unlock(&devs_lock); | 270 | read_unlock(&devs_lock); |
255 | return 0; | 271 | return NETDEV_TX_OK; |
256 | } | 272 | } |
257 | 273 | ||
258 | /* | 274 | /* |
@@ -503,8 +519,10 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) | |||
503 | atmvcc->user_back = brvcc; | 519 | atmvcc->user_back = brvcc; |
504 | brvcc->encaps = (enum br2684_encaps)be.encaps; | 520 | brvcc->encaps = (enum br2684_encaps)be.encaps; |
505 | brvcc->old_push = atmvcc->push; | 521 | brvcc->old_push = atmvcc->push; |
522 | brvcc->old_pop = atmvcc->pop; | ||
506 | barrier(); | 523 | barrier(); |
507 | atmvcc->push = br2684_push; | 524 | atmvcc->push = br2684_push; |
525 | atmvcc->pop = br2684_pop; | ||
508 | 526 | ||
509 | __skb_queue_head_init(&queue); | 527 | __skb_queue_head_init(&queue); |
510 | rq = &sk_atm(atmvcc)->sk_receive_queue; | 528 | rq = &sk_atm(atmvcc)->sk_receive_queue; |
diff --git a/net/atm/clip.c b/net/atm/clip.c index e65a3b1477f8..64629c354343 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -267,7 +267,7 @@ static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb) | |||
267 | kfree_skb(skb); | 267 | kfree_skb(skb); |
268 | } | 268 | } |
269 | 269 | ||
270 | static struct neigh_ops clip_neigh_ops = { | 270 | static const struct neigh_ops clip_neigh_ops = { |
271 | .family = AF_INET, | 271 | .family = AF_INET, |
272 | .solicit = clip_neigh_solicit, | 272 | .solicit = clip_neigh_solicit, |
273 | .error_report = clip_neigh_error, | 273 | .error_report = clip_neigh_error, |
@@ -360,7 +360,8 @@ static int clip_encap(struct atm_vcc *vcc, int mode) | |||
360 | return 0; | 360 | return 0; |
361 | } | 361 | } |
362 | 362 | ||
363 | static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | 363 | static netdev_tx_t clip_start_xmit(struct sk_buff *skb, |
364 | struct net_device *dev) | ||
364 | { | 365 | { |
365 | struct clip_priv *clip_priv = PRIV(dev); | 366 | struct clip_priv *clip_priv = PRIV(dev); |
366 | struct atmarp_entry *entry; | 367 | struct atmarp_entry *entry; |
@@ -373,7 +374,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
373 | printk(KERN_ERR "clip_start_xmit: skb_dst(skb) == NULL\n"); | 374 | printk(KERN_ERR "clip_start_xmit: skb_dst(skb) == NULL\n"); |
374 | dev_kfree_skb(skb); | 375 | dev_kfree_skb(skb); |
375 | dev->stats.tx_dropped++; | 376 | dev->stats.tx_dropped++; |
376 | return 0; | 377 | return NETDEV_TX_OK; |
377 | } | 378 | } |
378 | if (!skb_dst(skb)->neighbour) { | 379 | if (!skb_dst(skb)->neighbour) { |
379 | #if 0 | 380 | #if 0 |
@@ -387,7 +388,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
387 | printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n"); | 388 | printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n"); |
388 | dev_kfree_skb(skb); | 389 | dev_kfree_skb(skb); |
389 | dev->stats.tx_dropped++; | 390 | dev->stats.tx_dropped++; |
390 | return 0; | 391 | return NETDEV_TX_OK; |
391 | } | 392 | } |
392 | entry = NEIGH2ENTRY(skb_dst(skb)->neighbour); | 393 | entry = NEIGH2ENTRY(skb_dst(skb)->neighbour); |
393 | if (!entry->vccs) { | 394 | if (!entry->vccs) { |
@@ -402,7 +403,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
402 | dev_kfree_skb(skb); | 403 | dev_kfree_skb(skb); |
403 | dev->stats.tx_dropped++; | 404 | dev->stats.tx_dropped++; |
404 | } | 405 | } |
405 | return 0; | 406 | return NETDEV_TX_OK; |
406 | } | 407 | } |
407 | pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); | 408 | pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); |
408 | ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; | 409 | ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; |
@@ -421,14 +422,14 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
421 | old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ | 422 | old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ |
422 | if (old) { | 423 | if (old) { |
423 | printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); | 424 | printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); |
424 | return 0; | 425 | return NETDEV_TX_OK; |
425 | } | 426 | } |
426 | dev->stats.tx_packets++; | 427 | dev->stats.tx_packets++; |
427 | dev->stats.tx_bytes += skb->len; | 428 | dev->stats.tx_bytes += skb->len; |
428 | vcc->send(vcc, skb); | 429 | vcc->send(vcc, skb); |
429 | if (atm_may_send(vcc, 0)) { | 430 | if (atm_may_send(vcc, 0)) { |
430 | entry->vccs->xoff = 0; | 431 | entry->vccs->xoff = 0; |
431 | return 0; | 432 | return NETDEV_TX_OK; |
432 | } | 433 | } |
433 | spin_lock_irqsave(&clip_priv->xoff_lock, flags); | 434 | spin_lock_irqsave(&clip_priv->xoff_lock, flags); |
434 | netif_stop_queue(dev); /* XOFF -> throttle immediately */ | 435 | netif_stop_queue(dev); /* XOFF -> throttle immediately */ |
@@ -440,7 +441,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
440 | of the brief netif_stop_queue. If this isn't true or if it | 441 | of the brief netif_stop_queue. If this isn't true or if it |
441 | changes, use netif_wake_queue instead. */ | 442 | changes, use netif_wake_queue instead. */ |
442 | spin_unlock_irqrestore(&clip_priv->xoff_lock, flags); | 443 | spin_unlock_irqrestore(&clip_priv->xoff_lock, flags); |
443 | return 0; | 444 | return NETDEV_TX_OK; |
444 | } | 445 | } |
445 | 446 | ||
446 | static int clip_mkip(struct atm_vcc *vcc, int timeout) | 447 | static int clip_mkip(struct atm_vcc *vcc, int timeout) |
diff --git a/net/atm/lec.c b/net/atm/lec.c index ff2e594dca9b..b2d644560323 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -59,7 +59,8 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 }; | |||
59 | */ | 59 | */ |
60 | 60 | ||
61 | static int lec_open(struct net_device *dev); | 61 | static int lec_open(struct net_device *dev); |
62 | static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev); | 62 | static netdev_tx_t lec_start_xmit(struct sk_buff *skb, |
63 | struct net_device *dev); | ||
63 | static int lec_close(struct net_device *dev); | 64 | static int lec_close(struct net_device *dev); |
64 | static void lec_init(struct net_device *dev); | 65 | static void lec_init(struct net_device *dev); |
65 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, | 66 | static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, |
@@ -247,7 +248,8 @@ static void lec_tx_timeout(struct net_device *dev) | |||
247 | netif_wake_queue(dev); | 248 | netif_wake_queue(dev); |
248 | } | 249 | } |
249 | 250 | ||
250 | static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) | 251 | static netdev_tx_t lec_start_xmit(struct sk_buff *skb, |
252 | struct net_device *dev) | ||
251 | { | 253 | { |
252 | struct sk_buff *skb2; | 254 | struct sk_buff *skb2; |
253 | struct lec_priv *priv = netdev_priv(dev); | 255 | struct lec_priv *priv = netdev_priv(dev); |
@@ -289,7 +291,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
289 | skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); | 291 | skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); |
290 | kfree_skb(skb); | 292 | kfree_skb(skb); |
291 | if (skb2 == NULL) | 293 | if (skb2 == NULL) |
292 | return 0; | 294 | return NETDEV_TX_OK; |
293 | skb = skb2; | 295 | skb = skb2; |
294 | } | 296 | } |
295 | skb_push(skb, 2); | 297 | skb_push(skb, 2); |
@@ -307,7 +309,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
307 | skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); | 309 | skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); |
308 | kfree_skb(skb); | 310 | kfree_skb(skb); |
309 | if (skb2 == NULL) | 311 | if (skb2 == NULL) |
310 | return 0; | 312 | return NETDEV_TX_OK; |
311 | skb = skb2; | 313 | skb = skb2; |
312 | } | 314 | } |
313 | #endif | 315 | #endif |
@@ -345,7 +347,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
345 | dev_kfree_skb(skb); | 347 | dev_kfree_skb(skb); |
346 | if (skb2 == NULL) { | 348 | if (skb2 == NULL) { |
347 | dev->stats.tx_dropped++; | 349 | dev->stats.tx_dropped++; |
348 | return 0; | 350 | return NETDEV_TX_OK; |
349 | } | 351 | } |
350 | skb = skb2; | 352 | skb = skb2; |
351 | } | 353 | } |
@@ -416,7 +418,7 @@ out: | |||
416 | if (entry) | 418 | if (entry) |
417 | lec_arp_put(entry); | 419 | lec_arp_put(entry); |
418 | dev->trans_start = jiffies; | 420 | dev->trans_start = jiffies; |
419 | return 0; | 421 | return NETDEV_TX_OK; |
420 | } | 422 | } |
421 | 423 | ||
422 | /* The inverse routine to net_open(). */ | 424 | /* The inverse routine to net_open(). */ |
@@ -935,9 +937,9 @@ static int lecd_attach(struct atm_vcc *vcc, int arg) | |||
935 | } | 937 | } |
936 | 938 | ||
937 | #ifdef CONFIG_PROC_FS | 939 | #ifdef CONFIG_PROC_FS |
938 | static char *lec_arp_get_status_string(unsigned char status) | 940 | static const char *lec_arp_get_status_string(unsigned char status) |
939 | { | 941 | { |
940 | static char *lec_arp_status_string[] = { | 942 | static const char *const lec_arp_status_string[] = { |
941 | "ESI_UNKNOWN ", | 943 | "ESI_UNKNOWN ", |
942 | "ESI_ARP_PENDING ", | 944 | "ESI_ARP_PENDING ", |
943 | "ESI_VC_PENDING ", | 945 | "ESI_VC_PENDING ", |
@@ -1121,7 +1123,8 @@ static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1121 | 1123 | ||
1122 | static int lec_seq_show(struct seq_file *seq, void *v) | 1124 | static int lec_seq_show(struct seq_file *seq, void *v) |
1123 | { | 1125 | { |
1124 | static char lec_banner[] = "Itf MAC ATM destination" | 1126 | static const char lec_banner[] = |
1127 | "Itf MAC ATM destination" | ||
1125 | " Status Flags " | 1128 | " Status Flags " |
1126 | "VPI/VCI Recv VPI/VCI\n"; | 1129 | "VPI/VCI Recv VPI/VCI\n"; |
1127 | 1130 | ||
@@ -1505,7 +1508,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) | |||
1505 | } | 1508 | } |
1506 | 1509 | ||
1507 | #if DEBUG_ARP_TABLE | 1510 | #if DEBUG_ARP_TABLE |
1508 | static char *get_status_string(unsigned char st) | 1511 | static const char *get_status_string(unsigned char st) |
1509 | { | 1512 | { |
1510 | switch (st) { | 1513 | switch (st) { |
1511 | case ESI_UNKNOWN: | 1514 | case ESI_UNKNOWN: |
diff --git a/net/atm/mpc.c b/net/atm/mpc.c index e5bf11453a18..38a6cb0863f0 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c | |||
@@ -73,7 +73,8 @@ static void mpoad_close(struct atm_vcc *vcc); | |||
73 | static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb); | 73 | static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb); |
74 | 74 | ||
75 | static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb); | 75 | static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb); |
76 | static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev); | 76 | static netdev_tx_t mpc_send_packet(struct sk_buff *skb, |
77 | struct net_device *dev); | ||
77 | static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev); | 78 | static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev); |
78 | static void mpc_timer_refresh(void); | 79 | static void mpc_timer_refresh(void); |
79 | static void mpc_cache_check( unsigned long checking_time ); | 80 | static void mpc_cache_check( unsigned long checking_time ); |
@@ -528,7 +529,8 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc) | |||
528 | /* | 529 | /* |
529 | * Probably needs some error checks and locking, not sure... | 530 | * Probably needs some error checks and locking, not sure... |
530 | */ | 531 | */ |
531 | static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev) | 532 | static netdev_tx_t mpc_send_packet(struct sk_buff *skb, |
533 | struct net_device *dev) | ||
532 | { | 534 | { |
533 | struct mpoa_client *mpc; | 535 | struct mpoa_client *mpc; |
534 | struct ethhdr *eth; | 536 | struct ethhdr *eth; |
@@ -554,7 +556,7 @@ static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
554 | while (i < mpc->number_of_mps_macs) { | 556 | while (i < mpc->number_of_mps_macs) { |
555 | if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN))) | 557 | if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN))) |
556 | if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */ | 558 | if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */ |
557 | return 0; /* success! */ | 559 | return NETDEV_TX_OK; /* success! */ |
558 | i++; | 560 | i++; |
559 | } | 561 | } |
560 | 562 | ||
diff --git a/net/atm/proc.c b/net/atm/proc.c index 38de5ff61ecd..ab8419a324b6 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c | |||
@@ -151,8 +151,9 @@ static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
151 | 151 | ||
152 | static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) | 152 | static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) |
153 | { | 153 | { |
154 | static const char *class_name[] = { "off","UBR","CBR","VBR","ABR" }; | 154 | static const char *const class_name[] = |
155 | static const char *aal_name[] = { | 155 | {"off","UBR","CBR","VBR","ABR"}; |
156 | static const char *const aal_name[] = { | ||
156 | "---", "1", "2", "3/4", /* 0- 3 */ | 157 | "---", "1", "2", "3/4", /* 0- 3 */ |
157 | "???", "5", "???", "???", /* 4- 7 */ | 158 | "???", "5", "???", "???", /* 4- 7 */ |
158 | "???", "???", "???", "???", /* 8-11 */ | 159 | "???", "???", "???", "???", /* 8-11 */ |
@@ -178,7 +179,7 @@ static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) | |||
178 | 179 | ||
179 | static const char *vcc_state(struct atm_vcc *vcc) | 180 | static const char *vcc_state(struct atm_vcc *vcc) |
180 | { | 181 | { |
181 | static const char *map[] = { ATM_VS2TXT_MAP }; | 182 | static const char *const map[] = { ATM_VS2TXT_MAP }; |
182 | 183 | ||
183 | return map[ATM_VF2VS(vcc->flags)]; | 184 | return map[ATM_VF2VS(vcc->flags)]; |
184 | } | 185 | } |
@@ -335,7 +336,7 @@ static const struct file_operations vcc_seq_fops = { | |||
335 | 336 | ||
336 | static int svc_seq_show(struct seq_file *seq, void *v) | 337 | static int svc_seq_show(struct seq_file *seq, void *v) |
337 | { | 338 | { |
338 | static char atm_svc_banner[] = | 339 | static const char atm_svc_banner[] = |
339 | "Itf VPI VCI State Remote\n"; | 340 | "Itf VPI VCI State Remote\n"; |
340 | 341 | ||
341 | if (v == SEQ_START_TOKEN) | 342 | if (v == SEQ_START_TOKEN) |
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index 59fdb1d2e8ed..ed371684c133 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig | |||
@@ -34,6 +34,7 @@ menuconfig BT | |||
34 | config BT_L2CAP | 34 | config BT_L2CAP |
35 | tristate "L2CAP protocol support" | 35 | tristate "L2CAP protocol support" |
36 | depends on BT | 36 | depends on BT |
37 | select CRC16 | ||
37 | help | 38 | help |
38 | L2CAP (Logical Link Control and Adaptation Protocol) provides | 39 | L2CAP (Logical Link Control and Adaptation Protocol) provides |
39 | connection oriented and connection-less data transport. L2CAP | 40 | connection oriented and connection-less data transport. L2CAP |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 0250e0600150..8cfb5a849841 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -49,7 +49,7 @@ static struct net_proto_family *bt_proto[BT_MAX_PROTO]; | |||
49 | static DEFINE_RWLOCK(bt_proto_lock); | 49 | static DEFINE_RWLOCK(bt_proto_lock); |
50 | 50 | ||
51 | static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; | 51 | static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; |
52 | static const char *bt_key_strings[BT_MAX_PROTO] = { | 52 | static const char *const bt_key_strings[BT_MAX_PROTO] = { |
53 | "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP", | 53 | "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP", |
54 | "sk_lock-AF_BLUETOOTH-BTPROTO_HCI", | 54 | "sk_lock-AF_BLUETOOTH-BTPROTO_HCI", |
55 | "sk_lock-AF_BLUETOOTH-BTPROTO_SCO", | 55 | "sk_lock-AF_BLUETOOTH-BTPROTO_SCO", |
@@ -61,7 +61,7 @@ static const char *bt_key_strings[BT_MAX_PROTO] = { | |||
61 | }; | 61 | }; |
62 | 62 | ||
63 | static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; | 63 | static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; |
64 | static const char *bt_slock_key_strings[BT_MAX_PROTO] = { | 64 | static const char *const bt_slock_key_strings[BT_MAX_PROTO] = { |
65 | "slock-AF_BLUETOOTH-BTPROTO_L2CAP", | 65 | "slock-AF_BLUETOOTH-BTPROTO_L2CAP", |
66 | "slock-AF_BLUETOOTH-BTPROTO_HCI", | 66 | "slock-AF_BLUETOOTH-BTPROTO_HCI", |
67 | "slock-AF_BLUETOOTH-BTPROTO_SCO", | 67 | "slock-AF_BLUETOOTH-BTPROTO_SCO", |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 52a6ce0d772b..cafe9f54d841 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -533,6 +533,10 @@ static struct device *bnep_get_device(struct bnep_session *session) | |||
533 | return conn ? &conn->dev : NULL; | 533 | return conn ? &conn->dev : NULL; |
534 | } | 534 | } |
535 | 535 | ||
536 | static struct device_type bnep_type = { | ||
537 | .name = "bluetooth", | ||
538 | }; | ||
539 | |||
536 | int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) | 540 | int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) |
537 | { | 541 | { |
538 | struct net_device *dev; | 542 | struct net_device *dev; |
@@ -586,6 +590,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) | |||
586 | #endif | 590 | #endif |
587 | 591 | ||
588 | SET_NETDEV_DEV(dev, bnep_get_device(s)); | 592 | SET_NETDEV_DEV(dev, bnep_get_device(s)); |
593 | SET_NETDEV_DEVTYPE(dev, &bnep_type); | ||
589 | 594 | ||
590 | err = register_netdev(dev); | 595 | err = register_netdev(dev); |
591 | if (err) { | 596 | if (err) { |
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index d7a0e9722def..26fb831ef7e0 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -165,7 +165,8 @@ static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session | |||
165 | } | 165 | } |
166 | #endif | 166 | #endif |
167 | 167 | ||
168 | static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) | 168 | static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, |
169 | struct net_device *dev) | ||
169 | { | 170 | { |
170 | struct bnep_session *s = netdev_priv(dev); | 171 | struct bnep_session *s = netdev_priv(dev); |
171 | struct sock *sk = s->sock->sk; | 172 | struct sock *sk = s->sock->sk; |
@@ -175,14 +176,14 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
175 | #ifdef CONFIG_BT_BNEP_MC_FILTER | 176 | #ifdef CONFIG_BT_BNEP_MC_FILTER |
176 | if (bnep_net_mc_filter(skb, s)) { | 177 | if (bnep_net_mc_filter(skb, s)) { |
177 | kfree_skb(skb); | 178 | kfree_skb(skb); |
178 | return 0; | 179 | return NETDEV_TX_OK; |
179 | } | 180 | } |
180 | #endif | 181 | #endif |
181 | 182 | ||
182 | #ifdef CONFIG_BT_BNEP_PROTO_FILTER | 183 | #ifdef CONFIG_BT_BNEP_PROTO_FILTER |
183 | if (bnep_net_proto_filter(skb, s)) { | 184 | if (bnep_net_proto_filter(skb, s)) { |
184 | kfree_skb(skb); | 185 | kfree_skb(skb); |
185 | return 0; | 186 | return NETDEV_TX_OK; |
186 | } | 187 | } |
187 | #endif | 188 | #endif |
188 | 189 | ||
@@ -203,7 +204,7 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
203 | netif_stop_queue(dev); | 204 | netif_stop_queue(dev); |
204 | } | 205 | } |
205 | 206 | ||
206 | return 0; | 207 | return NETDEV_TX_OK; |
207 | } | 208 | } |
208 | 209 | ||
209 | static const struct net_device_ops bnep_netdev_ops = { | 210 | static const struct net_device_ops bnep_netdev_ops = { |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index fa47d5d84f5c..a9750984f772 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -246,6 +246,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
246 | if (hdev->notify) | 246 | if (hdev->notify) |
247 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); | 247 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); |
248 | 248 | ||
249 | atomic_set(&conn->devref, 0); | ||
250 | |||
249 | hci_conn_init_sysfs(conn); | 251 | hci_conn_init_sysfs(conn); |
250 | 252 | ||
251 | tasklet_enable(&hdev->tx_task); | 253 | tasklet_enable(&hdev->tx_task); |
@@ -288,7 +290,7 @@ int hci_conn_del(struct hci_conn *conn) | |||
288 | 290 | ||
289 | skb_queue_purge(&conn->data_q); | 291 | skb_queue_purge(&conn->data_q); |
290 | 292 | ||
291 | hci_conn_del_sysfs(conn); | 293 | hci_conn_put_device(conn); |
292 | 294 | ||
293 | hci_dev_put(hdev); | 295 | hci_dev_put(hdev); |
294 | 296 | ||
@@ -583,6 +585,19 @@ void hci_conn_check_pending(struct hci_dev *hdev) | |||
583 | hci_dev_unlock(hdev); | 585 | hci_dev_unlock(hdev); |
584 | } | 586 | } |
585 | 587 | ||
588 | void hci_conn_hold_device(struct hci_conn *conn) | ||
589 | { | ||
590 | atomic_inc(&conn->devref); | ||
591 | } | ||
592 | EXPORT_SYMBOL(hci_conn_hold_device); | ||
593 | |||
594 | void hci_conn_put_device(struct hci_conn *conn) | ||
595 | { | ||
596 | if (atomic_dec_and_test(&conn->devref)) | ||
597 | hci_conn_del_sysfs(conn); | ||
598 | } | ||
599 | EXPORT_SYMBOL(hci_conn_put_device); | ||
600 | |||
586 | int hci_get_conn_list(void __user *arg) | 601 | int hci_get_conn_list(void __user *arg) |
587 | { | 602 | { |
588 | struct hci_conn_list_req req, *cl; | 603 | struct hci_conn_list_req req, *cl; |
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 406ad07cdea1..e1da8f68759c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -911,7 +911,7 @@ int hci_register_dev(struct hci_dev *hdev) | |||
911 | hdev->reassembly[i] = NULL; | 911 | hdev->reassembly[i] = NULL; |
912 | 912 | ||
913 | init_waitqueue_head(&hdev->req_wait_q); | 913 | init_waitqueue_head(&hdev->req_wait_q); |
914 | init_MUTEX(&hdev->req_lock); | 914 | mutex_init(&hdev->req_lock); |
915 | 915 | ||
916 | inquiry_cache_init(hdev); | 916 | inquiry_cache_init(hdev); |
917 | 917 | ||
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 184ba0a88ec0..e99fe385fba2 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -887,6 +887,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
887 | } else | 887 | } else |
888 | conn->state = BT_CONNECTED; | 888 | conn->state = BT_CONNECTED; |
889 | 889 | ||
890 | hci_conn_hold_device(conn); | ||
890 | hci_conn_add_sysfs(conn); | 891 | hci_conn_add_sysfs(conn); |
891 | 892 | ||
892 | if (test_bit(HCI_AUTH, &hdev->flags)) | 893 | if (test_bit(HCI_AUTH, &hdev->flags)) |
@@ -1693,6 +1694,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu | |||
1693 | conn->handle = __le16_to_cpu(ev->handle); | 1694 | conn->handle = __le16_to_cpu(ev->handle); |
1694 | conn->state = BT_CONNECTED; | 1695 | conn->state = BT_CONNECTED; |
1695 | 1696 | ||
1697 | hci_conn_hold_device(conn); | ||
1696 | hci_conn_add_sysfs(conn); | 1698 | hci_conn_add_sysfs(conn); |
1697 | break; | 1699 | break; |
1698 | 1700 | ||
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index b18676870d55..09bedeb5579c 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -40,6 +40,7 @@ | |||
40 | 40 | ||
41 | #include <linux/input.h> | 41 | #include <linux/input.h> |
42 | #include <linux/hid.h> | 42 | #include <linux/hid.h> |
43 | #include <linux/hidraw.h> | ||
43 | 44 | ||
44 | #include <net/bluetooth/bluetooth.h> | 45 | #include <net/bluetooth/bluetooth.h> |
45 | #include <net/bluetooth/hci_core.h> | 46 | #include <net/bluetooth/hci_core.h> |
@@ -92,10 +93,14 @@ static void __hidp_link_session(struct hidp_session *session) | |||
92 | { | 93 | { |
93 | __module_get(THIS_MODULE); | 94 | __module_get(THIS_MODULE); |
94 | list_add(&session->list, &hidp_session_list); | 95 | list_add(&session->list, &hidp_session_list); |
96 | |||
97 | hci_conn_hold_device(session->conn); | ||
95 | } | 98 | } |
96 | 99 | ||
97 | static void __hidp_unlink_session(struct hidp_session *session) | 100 | static void __hidp_unlink_session(struct hidp_session *session) |
98 | { | 101 | { |
102 | hci_conn_put_device(session->conn); | ||
103 | |||
99 | list_del(&session->list); | 104 | list_del(&session->list); |
100 | module_put(THIS_MODULE); | 105 | module_put(THIS_MODULE); |
101 | } | 106 | } |
@@ -374,6 +379,7 @@ static void hidp_process_hid_control(struct hidp_session *session, | |||
374 | 379 | ||
375 | /* Kill session thread */ | 380 | /* Kill session thread */ |
376 | atomic_inc(&session->terminate); | 381 | atomic_inc(&session->terminate); |
382 | hidp_schedule(session); | ||
377 | } | 383 | } |
378 | } | 384 | } |
379 | 385 | ||
@@ -573,7 +579,11 @@ static int hidp_session(void *arg) | |||
573 | if (session->hid) { | 579 | if (session->hid) { |
574 | if (session->hid->claimed & HID_CLAIMED_INPUT) | 580 | if (session->hid->claimed & HID_CLAIMED_INPUT) |
575 | hidinput_disconnect(session->hid); | 581 | hidinput_disconnect(session->hid); |
582 | if (session->hid->claimed & HID_CLAIMED_HIDRAW) | ||
583 | hidraw_disconnect(session->hid); | ||
584 | |||
576 | hid_destroy_device(session->hid); | 585 | hid_destroy_device(session->hid); |
586 | session->hid = NULL; | ||
577 | } | 587 | } |
578 | 588 | ||
579 | /* Wakeup user-space polling for socket errors */ | 589 | /* Wakeup user-space polling for socket errors */ |
@@ -601,25 +611,27 @@ static struct device *hidp_get_device(struct hidp_session *session) | |||
601 | { | 611 | { |
602 | bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; | 612 | bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; |
603 | bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; | 613 | bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; |
614 | struct device *device = NULL; | ||
604 | struct hci_dev *hdev; | 615 | struct hci_dev *hdev; |
605 | struct hci_conn *conn; | ||
606 | 616 | ||
607 | hdev = hci_get_route(dst, src); | 617 | hdev = hci_get_route(dst, src); |
608 | if (!hdev) | 618 | if (!hdev) |
609 | return NULL; | 619 | return NULL; |
610 | 620 | ||
611 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); | 621 | session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); |
622 | if (session->conn) | ||
623 | device = &session->conn->dev; | ||
612 | 624 | ||
613 | hci_dev_put(hdev); | 625 | hci_dev_put(hdev); |
614 | 626 | ||
615 | return conn ? &conn->dev : NULL; | 627 | return device; |
616 | } | 628 | } |
617 | 629 | ||
618 | static int hidp_setup_input(struct hidp_session *session, | 630 | static int hidp_setup_input(struct hidp_session *session, |
619 | struct hidp_connadd_req *req) | 631 | struct hidp_connadd_req *req) |
620 | { | 632 | { |
621 | struct input_dev *input; | 633 | struct input_dev *input; |
622 | int i; | 634 | int err, i; |
623 | 635 | ||
624 | input = input_allocate_device(); | 636 | input = input_allocate_device(); |
625 | if (!input) | 637 | if (!input) |
@@ -666,7 +678,13 @@ static int hidp_setup_input(struct hidp_session *session, | |||
666 | 678 | ||
667 | input->event = hidp_input_event; | 679 | input->event = hidp_input_event; |
668 | 680 | ||
669 | return input_register_device(input); | 681 | err = input_register_device(input); |
682 | if (err < 0) { | ||
683 | hci_conn_put_device(session->conn); | ||
684 | return err; | ||
685 | } | ||
686 | |||
687 | return 0; | ||
670 | } | 688 | } |
671 | 689 | ||
672 | static int hidp_open(struct hid_device *hid) | 690 | static int hidp_open(struct hid_device *hid) |
@@ -748,13 +766,11 @@ static int hidp_setup_hid(struct hidp_session *session, | |||
748 | { | 766 | { |
749 | struct hid_device *hid; | 767 | struct hid_device *hid; |
750 | bdaddr_t src, dst; | 768 | bdaddr_t src, dst; |
751 | int ret; | 769 | int err; |
752 | 770 | ||
753 | hid = hid_allocate_device(); | 771 | hid = hid_allocate_device(); |
754 | if (IS_ERR(hid)) { | 772 | if (IS_ERR(hid)) |
755 | ret = PTR_ERR(session->hid); | 773 | return PTR_ERR(session->hid); |
756 | goto err; | ||
757 | } | ||
758 | 774 | ||
759 | session->hid = hid; | 775 | session->hid = hid; |
760 | session->req = req; | 776 | session->req = req; |
@@ -776,16 +792,17 @@ static int hidp_setup_hid(struct hidp_session *session, | |||
776 | hid->dev.parent = hidp_get_device(session); | 792 | hid->dev.parent = hidp_get_device(session); |
777 | hid->ll_driver = &hidp_hid_driver; | 793 | hid->ll_driver = &hidp_hid_driver; |
778 | 794 | ||
779 | ret = hid_add_device(hid); | 795 | err = hid_add_device(hid); |
780 | if (ret) | 796 | if (err < 0) |
781 | goto err_hid; | 797 | goto failed; |
782 | 798 | ||
783 | return 0; | 799 | return 0; |
784 | err_hid: | 800 | |
801 | failed: | ||
785 | hid_destroy_device(hid); | 802 | hid_destroy_device(hid); |
786 | session->hid = NULL; | 803 | session->hid = NULL; |
787 | err: | 804 | |
788 | return ret; | 805 | return err; |
789 | } | 806 | } |
790 | 807 | ||
791 | int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) | 808 | int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) |
@@ -835,13 +852,13 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, | |||
835 | if (req->rd_size > 0) { | 852 | if (req->rd_size > 0) { |
836 | err = hidp_setup_hid(session, req); | 853 | err = hidp_setup_hid(session, req); |
837 | if (err && err != -ENODEV) | 854 | if (err && err != -ENODEV) |
838 | goto err_skb; | 855 | goto purge; |
839 | } | 856 | } |
840 | 857 | ||
841 | if (!session->hid) { | 858 | if (!session->hid) { |
842 | err = hidp_setup_input(session, req); | 859 | err = hidp_setup_input(session, req); |
843 | if (err < 0) | 860 | if (err < 0) |
844 | goto err_skb; | 861 | goto purge; |
845 | } | 862 | } |
846 | 863 | ||
847 | __hidp_link_session(session); | 864 | __hidp_link_session(session); |
@@ -869,13 +886,20 @@ unlink: | |||
869 | 886 | ||
870 | __hidp_unlink_session(session); | 887 | __hidp_unlink_session(session); |
871 | 888 | ||
872 | if (session->input) | 889 | if (session->input) { |
873 | input_unregister_device(session->input); | 890 | input_unregister_device(session->input); |
874 | if (session->hid) | 891 | session->input = NULL; |
892 | } | ||
893 | |||
894 | if (session->hid) { | ||
875 | hid_destroy_device(session->hid); | 895 | hid_destroy_device(session->hid); |
876 | err_skb: | 896 | session->hid = NULL; |
897 | } | ||
898 | |||
899 | purge: | ||
877 | skb_queue_purge(&session->ctrl_transmit); | 900 | skb_queue_purge(&session->ctrl_transmit); |
878 | skb_queue_purge(&session->intr_transmit); | 901 | skb_queue_purge(&session->intr_transmit); |
902 | |||
879 | failed: | 903 | failed: |
880 | up_write(&hidp_session_sem); | 904 | up_write(&hidp_session_sem); |
881 | 905 | ||
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index e503c89057ad..faf3d74c3586 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h | |||
@@ -126,6 +126,8 @@ int hidp_get_conninfo(struct hidp_conninfo *ci); | |||
126 | struct hidp_session { | 126 | struct hidp_session { |
127 | struct list_head list; | 127 | struct list_head list; |
128 | 128 | ||
129 | struct hci_conn *conn; | ||
130 | |||
129 | struct socket *ctrl_sock; | 131 | struct socket *ctrl_sock; |
130 | struct socket *intr_sock; | 132 | struct socket *intr_sock; |
131 | 133 | ||
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index bd0a4c1bced0..b03012564647 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/list.h> | 41 | #include <linux/list.h> |
42 | #include <linux/device.h> | 42 | #include <linux/device.h> |
43 | #include <linux/uaccess.h> | 43 | #include <linux/uaccess.h> |
44 | #include <linux/crc16.h> | ||
44 | #include <net/sock.h> | 45 | #include <net/sock.h> |
45 | 46 | ||
46 | #include <asm/system.h> | 47 | #include <asm/system.h> |
@@ -50,7 +51,9 @@ | |||
50 | #include <net/bluetooth/hci_core.h> | 51 | #include <net/bluetooth/hci_core.h> |
51 | #include <net/bluetooth/l2cap.h> | 52 | #include <net/bluetooth/l2cap.h> |
52 | 53 | ||
53 | #define VERSION "2.13" | 54 | #define VERSION "2.14" |
55 | |||
56 | static int enable_ertm = 0; | ||
54 | 57 | ||
55 | static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; | 58 | static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; |
56 | static u8 l2cap_fixed_chan[8] = { 0x02, }; | 59 | static u8 l2cap_fixed_chan[8] = { 0x02, }; |
@@ -331,6 +334,48 @@ static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 | |||
331 | return hci_send_acl(conn->hcon, skb, 0); | 334 | return hci_send_acl(conn->hcon, skb, 0); |
332 | } | 335 | } |
333 | 336 | ||
337 | static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) | ||
338 | { | ||
339 | struct sk_buff *skb; | ||
340 | struct l2cap_hdr *lh; | ||
341 | struct l2cap_conn *conn = pi->conn; | ||
342 | int count, hlen = L2CAP_HDR_SIZE + 2; | ||
343 | |||
344 | if (pi->fcs == L2CAP_FCS_CRC16) | ||
345 | hlen += 2; | ||
346 | |||
347 | BT_DBG("pi %p, control 0x%2.2x", pi, control); | ||
348 | |||
349 | count = min_t(unsigned int, conn->mtu, hlen); | ||
350 | control |= L2CAP_CTRL_FRAME_TYPE; | ||
351 | |||
352 | skb = bt_skb_alloc(count, GFP_ATOMIC); | ||
353 | if (!skb) | ||
354 | return -ENOMEM; | ||
355 | |||
356 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | ||
357 | lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); | ||
358 | lh->cid = cpu_to_le16(pi->dcid); | ||
359 | put_unaligned_le16(control, skb_put(skb, 2)); | ||
360 | |||
361 | if (pi->fcs == L2CAP_FCS_CRC16) { | ||
362 | u16 fcs = crc16(0, (u8 *)lh, count - 2); | ||
363 | put_unaligned_le16(fcs, skb_put(skb, 2)); | ||
364 | } | ||
365 | |||
366 | return hci_send_acl(pi->conn->hcon, skb, 0); | ||
367 | } | ||
368 | |||
369 | static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) | ||
370 | { | ||
371 | if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) | ||
372 | control |= L2CAP_SUPER_RCV_NOT_READY; | ||
373 | else | ||
374 | control |= L2CAP_SUPER_RCV_READY; | ||
375 | |||
376 | return l2cap_send_sframe(pi, control); | ||
377 | } | ||
378 | |||
334 | static void l2cap_do_start(struct sock *sk) | 379 | static void l2cap_do_start(struct sock *sk) |
335 | { | 380 | { |
336 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | 381 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; |
@@ -364,6 +409,16 @@ static void l2cap_do_start(struct sock *sk) | |||
364 | } | 409 | } |
365 | } | 410 | } |
366 | 411 | ||
412 | static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk) | ||
413 | { | ||
414 | struct l2cap_disconn_req req; | ||
415 | |||
416 | req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); | ||
417 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | ||
418 | l2cap_send_cmd(conn, l2cap_get_ident(conn), | ||
419 | L2CAP_DISCONN_REQ, sizeof(req), &req); | ||
420 | } | ||
421 | |||
367 | /* ---- L2CAP connections ---- */ | 422 | /* ---- L2CAP connections ---- */ |
368 | static void l2cap_conn_start(struct l2cap_conn *conn) | 423 | static void l2cap_conn_start(struct l2cap_conn *conn) |
369 | { | 424 | { |
@@ -648,15 +703,10 @@ static void __l2cap_sock_close(struct sock *sk, int reason) | |||
648 | case BT_CONFIG: | 703 | case BT_CONFIG: |
649 | if (sk->sk_type == SOCK_SEQPACKET) { | 704 | if (sk->sk_type == SOCK_SEQPACKET) { |
650 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | 705 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; |
651 | struct l2cap_disconn_req req; | ||
652 | 706 | ||
653 | sk->sk_state = BT_DISCONN; | 707 | sk->sk_state = BT_DISCONN; |
654 | l2cap_sock_set_timer(sk, sk->sk_sndtimeo); | 708 | l2cap_sock_set_timer(sk, sk->sk_sndtimeo); |
655 | 709 | l2cap_send_disconn_req(conn, sk); | |
656 | req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); | ||
657 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | ||
658 | l2cap_send_cmd(conn, l2cap_get_ident(conn), | ||
659 | L2CAP_DISCONN_REQ, sizeof(req), &req); | ||
660 | } else | 710 | } else |
661 | l2cap_chan_del(sk, reason); | 711 | l2cap_chan_del(sk, reason); |
662 | break; | 712 | break; |
@@ -715,12 +765,16 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) | |||
715 | 765 | ||
716 | pi->imtu = l2cap_pi(parent)->imtu; | 766 | pi->imtu = l2cap_pi(parent)->imtu; |
717 | pi->omtu = l2cap_pi(parent)->omtu; | 767 | pi->omtu = l2cap_pi(parent)->omtu; |
768 | pi->mode = l2cap_pi(parent)->mode; | ||
769 | pi->fcs = l2cap_pi(parent)->fcs; | ||
718 | pi->sec_level = l2cap_pi(parent)->sec_level; | 770 | pi->sec_level = l2cap_pi(parent)->sec_level; |
719 | pi->role_switch = l2cap_pi(parent)->role_switch; | 771 | pi->role_switch = l2cap_pi(parent)->role_switch; |
720 | pi->force_reliable = l2cap_pi(parent)->force_reliable; | 772 | pi->force_reliable = l2cap_pi(parent)->force_reliable; |
721 | } else { | 773 | } else { |
722 | pi->imtu = L2CAP_DEFAULT_MTU; | 774 | pi->imtu = L2CAP_DEFAULT_MTU; |
723 | pi->omtu = 0; | 775 | pi->omtu = 0; |
776 | pi->mode = L2CAP_MODE_BASIC; | ||
777 | pi->fcs = L2CAP_FCS_CRC16; | ||
724 | pi->sec_level = BT_SECURITY_LOW; | 778 | pi->sec_level = BT_SECURITY_LOW; |
725 | pi->role_switch = 0; | 779 | pi->role_switch = 0; |
726 | pi->force_reliable = 0; | 780 | pi->force_reliable = 0; |
@@ -956,6 +1010,19 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al | |||
956 | goto done; | 1010 | goto done; |
957 | } | 1011 | } |
958 | 1012 | ||
1013 | switch (l2cap_pi(sk)->mode) { | ||
1014 | case L2CAP_MODE_BASIC: | ||
1015 | break; | ||
1016 | case L2CAP_MODE_ERTM: | ||
1017 | case L2CAP_MODE_STREAMING: | ||
1018 | if (enable_ertm) | ||
1019 | break; | ||
1020 | /* fall through */ | ||
1021 | default: | ||
1022 | err = -ENOTSUPP; | ||
1023 | goto done; | ||
1024 | } | ||
1025 | |||
959 | switch (sk->sk_state) { | 1026 | switch (sk->sk_state) { |
960 | case BT_CONNECT: | 1027 | case BT_CONNECT: |
961 | case BT_CONNECT2: | 1028 | case BT_CONNECT2: |
@@ -1007,6 +1074,19 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) | |||
1007 | goto done; | 1074 | goto done; |
1008 | } | 1075 | } |
1009 | 1076 | ||
1077 | switch (l2cap_pi(sk)->mode) { | ||
1078 | case L2CAP_MODE_BASIC: | ||
1079 | break; | ||
1080 | case L2CAP_MODE_ERTM: | ||
1081 | case L2CAP_MODE_STREAMING: | ||
1082 | if (enable_ertm) | ||
1083 | break; | ||
1084 | /* fall through */ | ||
1085 | default: | ||
1086 | err = -ENOTSUPP; | ||
1087 | goto done; | ||
1088 | } | ||
1089 | |||
1010 | if (!l2cap_pi(sk)->psm) { | 1090 | if (!l2cap_pi(sk)->psm) { |
1011 | bdaddr_t *src = &bt_sk(sk)->src; | 1091 | bdaddr_t *src = &bt_sk(sk)->src; |
1012 | u16 psm; | 1092 | u16 psm; |
@@ -1117,39 +1197,219 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l | |||
1117 | return 0; | 1197 | return 0; |
1118 | } | 1198 | } |
1119 | 1199 | ||
1120 | static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len) | 1200 | static void l2cap_monitor_timeout(unsigned long arg) |
1121 | { | 1201 | { |
1122 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | 1202 | struct sock *sk = (void *) arg; |
1123 | struct sk_buff *skb, **frag; | 1203 | u16 control; |
1124 | int err, hlen, count, sent = 0; | ||
1125 | struct l2cap_hdr *lh; | ||
1126 | 1204 | ||
1127 | BT_DBG("sk %p len %d", sk, len); | 1205 | bh_lock_sock(sk); |
1206 | if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { | ||
1207 | l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); | ||
1208 | return; | ||
1209 | } | ||
1128 | 1210 | ||
1129 | /* First fragment (with L2CAP header) */ | 1211 | l2cap_pi(sk)->retry_count++; |
1130 | if (sk->sk_type == SOCK_DGRAM) | 1212 | __mod_monitor_timer(); |
1131 | hlen = L2CAP_HDR_SIZE + 2; | ||
1132 | else | ||
1133 | hlen = L2CAP_HDR_SIZE; | ||
1134 | 1213 | ||
1135 | count = min_t(unsigned int, (conn->mtu - hlen), len); | 1214 | control = L2CAP_CTRL_POLL; |
1215 | l2cap_send_rr_or_rnr(l2cap_pi(sk), control); | ||
1216 | bh_unlock_sock(sk); | ||
1217 | } | ||
1136 | 1218 | ||
1137 | skb = bt_skb_send_alloc(sk, hlen + count, | 1219 | static void l2cap_retrans_timeout(unsigned long arg) |
1138 | msg->msg_flags & MSG_DONTWAIT, &err); | 1220 | { |
1139 | if (!skb) | 1221 | struct sock *sk = (void *) arg; |
1140 | return err; | 1222 | u16 control; |
1141 | 1223 | ||
1142 | /* Create L2CAP header */ | 1224 | bh_lock_sock(sk); |
1143 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | 1225 | l2cap_pi(sk)->retry_count = 1; |
1144 | lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); | 1226 | __mod_monitor_timer(); |
1145 | lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); | 1227 | |
1228 | l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; | ||
1229 | |||
1230 | control = L2CAP_CTRL_POLL; | ||
1231 | l2cap_send_rr_or_rnr(l2cap_pi(sk), control); | ||
1232 | bh_unlock_sock(sk); | ||
1233 | } | ||
1234 | |||
1235 | static void l2cap_drop_acked_frames(struct sock *sk) | ||
1236 | { | ||
1237 | struct sk_buff *skb; | ||
1238 | |||
1239 | while ((skb = skb_peek(TX_QUEUE(sk)))) { | ||
1240 | if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq) | ||
1241 | break; | ||
1242 | |||
1243 | skb = skb_dequeue(TX_QUEUE(sk)); | ||
1244 | kfree_skb(skb); | ||
1245 | |||
1246 | l2cap_pi(sk)->unacked_frames--; | ||
1247 | } | ||
1248 | |||
1249 | if (!l2cap_pi(sk)->unacked_frames) | ||
1250 | del_timer(&l2cap_pi(sk)->retrans_timer); | ||
1146 | 1251 | ||
1147 | if (sk->sk_type == SOCK_DGRAM) | 1252 | return; |
1148 | put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2)); | 1253 | } |
1254 | |||
1255 | static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb) | ||
1256 | { | ||
1257 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
1258 | int err; | ||
1259 | |||
1260 | BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); | ||
1261 | |||
1262 | err = hci_send_acl(pi->conn->hcon, skb, 0); | ||
1263 | if (err < 0) | ||
1264 | kfree_skb(skb); | ||
1265 | |||
1266 | return err; | ||
1267 | } | ||
1268 | |||
1269 | static int l2cap_streaming_send(struct sock *sk) | ||
1270 | { | ||
1271 | struct sk_buff *skb, *tx_skb; | ||
1272 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
1273 | u16 control, fcs; | ||
1274 | int err; | ||
1275 | |||
1276 | while ((skb = sk->sk_send_head)) { | ||
1277 | tx_skb = skb_clone(skb, GFP_ATOMIC); | ||
1278 | |||
1279 | control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); | ||
1280 | control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; | ||
1281 | put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); | ||
1282 | |||
1283 | if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { | ||
1284 | fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); | ||
1285 | put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); | ||
1286 | } | ||
1287 | |||
1288 | err = l2cap_do_send(sk, tx_skb); | ||
1289 | if (err < 0) { | ||
1290 | l2cap_send_disconn_req(pi->conn, sk); | ||
1291 | return err; | ||
1292 | } | ||
1293 | |||
1294 | pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; | ||
1295 | |||
1296 | if (skb_queue_is_last(TX_QUEUE(sk), skb)) | ||
1297 | sk->sk_send_head = NULL; | ||
1298 | else | ||
1299 | sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); | ||
1300 | |||
1301 | skb = skb_dequeue(TX_QUEUE(sk)); | ||
1302 | kfree_skb(skb); | ||
1303 | } | ||
1304 | return 0; | ||
1305 | } | ||
1306 | |||
1307 | static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq) | ||
1308 | { | ||
1309 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
1310 | struct sk_buff *skb, *tx_skb; | ||
1311 | u16 control, fcs; | ||
1312 | int err; | ||
1313 | |||
1314 | skb = skb_peek(TX_QUEUE(sk)); | ||
1315 | do { | ||
1316 | if (bt_cb(skb)->tx_seq != tx_seq) { | ||
1317 | if (skb_queue_is_last(TX_QUEUE(sk), skb)) | ||
1318 | break; | ||
1319 | skb = skb_queue_next(TX_QUEUE(sk), skb); | ||
1320 | continue; | ||
1321 | } | ||
1322 | |||
1323 | if (pi->remote_max_tx && | ||
1324 | bt_cb(skb)->retries == pi->remote_max_tx) { | ||
1325 | l2cap_send_disconn_req(pi->conn, sk); | ||
1326 | break; | ||
1327 | } | ||
1328 | |||
1329 | tx_skb = skb_clone(skb, GFP_ATOMIC); | ||
1330 | bt_cb(skb)->retries++; | ||
1331 | control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); | ||
1332 | control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT) | ||
1333 | | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); | ||
1334 | put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); | ||
1335 | |||
1336 | if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { | ||
1337 | fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); | ||
1338 | put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); | ||
1339 | } | ||
1340 | |||
1341 | err = l2cap_do_send(sk, tx_skb); | ||
1342 | if (err < 0) { | ||
1343 | l2cap_send_disconn_req(pi->conn, sk); | ||
1344 | return err; | ||
1345 | } | ||
1346 | break; | ||
1347 | } while(1); | ||
1348 | return 0; | ||
1349 | } | ||
1350 | |||
1351 | static int l2cap_ertm_send(struct sock *sk) | ||
1352 | { | ||
1353 | struct sk_buff *skb, *tx_skb; | ||
1354 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
1355 | u16 control, fcs; | ||
1356 | int err; | ||
1357 | |||
1358 | if (pi->conn_state & L2CAP_CONN_WAIT_F) | ||
1359 | return 0; | ||
1360 | |||
1361 | while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) | ||
1362 | && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) { | ||
1363 | tx_skb = skb_clone(skb, GFP_ATOMIC); | ||
1364 | |||
1365 | if (pi->remote_max_tx && | ||
1366 | bt_cb(skb)->retries == pi->remote_max_tx) { | ||
1367 | l2cap_send_disconn_req(pi->conn, sk); | ||
1368 | break; | ||
1369 | } | ||
1370 | |||
1371 | bt_cb(skb)->retries++; | ||
1372 | |||
1373 | control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); | ||
1374 | control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT) | ||
1375 | | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); | ||
1376 | put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); | ||
1377 | |||
1378 | |||
1379 | if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) { | ||
1380 | fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); | ||
1381 | put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); | ||
1382 | } | ||
1383 | |||
1384 | err = l2cap_do_send(sk, tx_skb); | ||
1385 | if (err < 0) { | ||
1386 | l2cap_send_disconn_req(pi->conn, sk); | ||
1387 | return err; | ||
1388 | } | ||
1389 | __mod_retrans_timer(); | ||
1390 | |||
1391 | bt_cb(skb)->tx_seq = pi->next_tx_seq; | ||
1392 | pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; | ||
1393 | |||
1394 | pi->unacked_frames++; | ||
1395 | |||
1396 | if (skb_queue_is_last(TX_QUEUE(sk), skb)) | ||
1397 | sk->sk_send_head = NULL; | ||
1398 | else | ||
1399 | sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); | ||
1400 | } | ||
1401 | |||
1402 | return 0; | ||
1403 | } | ||
1404 | |||
1405 | static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) | ||
1406 | { | ||
1407 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | ||
1408 | struct sk_buff **frag; | ||
1409 | int err, sent = 0; | ||
1149 | 1410 | ||
1150 | if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { | 1411 | if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) { |
1151 | err = -EFAULT; | 1412 | return -EFAULT; |
1152 | goto fail; | ||
1153 | } | 1413 | } |
1154 | 1414 | ||
1155 | sent += count; | 1415 | sent += count; |
@@ -1162,33 +1422,173 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len) | |||
1162 | 1422 | ||
1163 | *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); | 1423 | *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); |
1164 | if (!*frag) | 1424 | if (!*frag) |
1165 | goto fail; | 1425 | return -EFAULT; |
1166 | 1426 | if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) | |
1167 | if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) { | 1427 | return -EFAULT; |
1168 | err = -EFAULT; | ||
1169 | goto fail; | ||
1170 | } | ||
1171 | 1428 | ||
1172 | sent += count; | 1429 | sent += count; |
1173 | len -= count; | 1430 | len -= count; |
1174 | 1431 | ||
1175 | frag = &(*frag)->next; | 1432 | frag = &(*frag)->next; |
1176 | } | 1433 | } |
1177 | err = hci_send_acl(conn->hcon, skb, 0); | ||
1178 | if (err < 0) | ||
1179 | goto fail; | ||
1180 | 1434 | ||
1181 | return sent; | 1435 | return sent; |
1436 | } | ||
1182 | 1437 | ||
1183 | fail: | 1438 | static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len) |
1184 | kfree_skb(skb); | 1439 | { |
1185 | return err; | 1440 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; |
1441 | struct sk_buff *skb; | ||
1442 | int err, count, hlen = L2CAP_HDR_SIZE + 2; | ||
1443 | struct l2cap_hdr *lh; | ||
1444 | |||
1445 | BT_DBG("sk %p len %d", sk, (int)len); | ||
1446 | |||
1447 | count = min_t(unsigned int, (conn->mtu - hlen), len); | ||
1448 | skb = bt_skb_send_alloc(sk, count + hlen, | ||
1449 | msg->msg_flags & MSG_DONTWAIT, &err); | ||
1450 | if (!skb) | ||
1451 | return ERR_PTR(-ENOMEM); | ||
1452 | |||
1453 | /* Create L2CAP header */ | ||
1454 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | ||
1455 | lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); | ||
1456 | lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); | ||
1457 | put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2)); | ||
1458 | |||
1459 | err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); | ||
1460 | if (unlikely(err < 0)) { | ||
1461 | kfree_skb(skb); | ||
1462 | return ERR_PTR(err); | ||
1463 | } | ||
1464 | return skb; | ||
1465 | } | ||
1466 | |||
1467 | static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len) | ||
1468 | { | ||
1469 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | ||
1470 | struct sk_buff *skb; | ||
1471 | int err, count, hlen = L2CAP_HDR_SIZE; | ||
1472 | struct l2cap_hdr *lh; | ||
1473 | |||
1474 | BT_DBG("sk %p len %d", sk, (int)len); | ||
1475 | |||
1476 | count = min_t(unsigned int, (conn->mtu - hlen), len); | ||
1477 | skb = bt_skb_send_alloc(sk, count + hlen, | ||
1478 | msg->msg_flags & MSG_DONTWAIT, &err); | ||
1479 | if (!skb) | ||
1480 | return ERR_PTR(-ENOMEM); | ||
1481 | |||
1482 | /* Create L2CAP header */ | ||
1483 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | ||
1484 | lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); | ||
1485 | lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); | ||
1486 | |||
1487 | err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); | ||
1488 | if (unlikely(err < 0)) { | ||
1489 | kfree_skb(skb); | ||
1490 | return ERR_PTR(err); | ||
1491 | } | ||
1492 | return skb; | ||
1493 | } | ||
1494 | |||
1495 | static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen) | ||
1496 | { | ||
1497 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | ||
1498 | struct sk_buff *skb; | ||
1499 | int err, count, hlen = L2CAP_HDR_SIZE + 2; | ||
1500 | struct l2cap_hdr *lh; | ||
1501 | |||
1502 | BT_DBG("sk %p len %d", sk, (int)len); | ||
1503 | |||
1504 | if (sdulen) | ||
1505 | hlen += 2; | ||
1506 | |||
1507 | if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) | ||
1508 | hlen += 2; | ||
1509 | |||
1510 | count = min_t(unsigned int, (conn->mtu - hlen), len); | ||
1511 | skb = bt_skb_send_alloc(sk, count + hlen, | ||
1512 | msg->msg_flags & MSG_DONTWAIT, &err); | ||
1513 | if (!skb) | ||
1514 | return ERR_PTR(-ENOMEM); | ||
1515 | |||
1516 | /* Create L2CAP header */ | ||
1517 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | ||
1518 | lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); | ||
1519 | lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); | ||
1520 | put_unaligned_le16(control, skb_put(skb, 2)); | ||
1521 | if (sdulen) | ||
1522 | put_unaligned_le16(sdulen, skb_put(skb, 2)); | ||
1523 | |||
1524 | err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); | ||
1525 | if (unlikely(err < 0)) { | ||
1526 | kfree_skb(skb); | ||
1527 | return ERR_PTR(err); | ||
1528 | } | ||
1529 | |||
1530 | if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) | ||
1531 | put_unaligned_le16(0, skb_put(skb, 2)); | ||
1532 | |||
1533 | bt_cb(skb)->retries = 0; | ||
1534 | return skb; | ||
1535 | } | ||
1536 | |||
1537 | static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len) | ||
1538 | { | ||
1539 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
1540 | struct sk_buff *skb; | ||
1541 | struct sk_buff_head sar_queue; | ||
1542 | u16 control; | ||
1543 | size_t size = 0; | ||
1544 | |||
1545 | __skb_queue_head_init(&sar_queue); | ||
1546 | control = L2CAP_SDU_START; | ||
1547 | skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len); | ||
1548 | if (IS_ERR(skb)) | ||
1549 | return PTR_ERR(skb); | ||
1550 | |||
1551 | __skb_queue_tail(&sar_queue, skb); | ||
1552 | len -= pi->max_pdu_size; | ||
1553 | size +=pi->max_pdu_size; | ||
1554 | control = 0; | ||
1555 | |||
1556 | while (len > 0) { | ||
1557 | size_t buflen; | ||
1558 | |||
1559 | if (len > pi->max_pdu_size) { | ||
1560 | control |= L2CAP_SDU_CONTINUE; | ||
1561 | buflen = pi->max_pdu_size; | ||
1562 | } else { | ||
1563 | control |= L2CAP_SDU_END; | ||
1564 | buflen = len; | ||
1565 | } | ||
1566 | |||
1567 | skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0); | ||
1568 | if (IS_ERR(skb)) { | ||
1569 | skb_queue_purge(&sar_queue); | ||
1570 | return PTR_ERR(skb); | ||
1571 | } | ||
1572 | |||
1573 | __skb_queue_tail(&sar_queue, skb); | ||
1574 | len -= buflen; | ||
1575 | size += buflen; | ||
1576 | control = 0; | ||
1577 | } | ||
1578 | skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk)); | ||
1579 | if (sk->sk_send_head == NULL) | ||
1580 | sk->sk_send_head = sar_queue.next; | ||
1581 | |||
1582 | return size; | ||
1186 | } | 1583 | } |
1187 | 1584 | ||
1188 | static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) | 1585 | static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) |
1189 | { | 1586 | { |
1190 | struct sock *sk = sock->sk; | 1587 | struct sock *sk = sock->sk; |
1191 | int err = 0; | 1588 | struct l2cap_pinfo *pi = l2cap_pi(sk); |
1589 | struct sk_buff *skb; | ||
1590 | u16 control; | ||
1591 | int err; | ||
1192 | 1592 | ||
1193 | BT_DBG("sock %p, sk %p", sock, sk); | 1593 | BT_DBG("sock %p, sk %p", sock, sk); |
1194 | 1594 | ||
@@ -1200,16 +1600,73 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms | |||
1200 | return -EOPNOTSUPP; | 1600 | return -EOPNOTSUPP; |
1201 | 1601 | ||
1202 | /* Check outgoing MTU */ | 1602 | /* Check outgoing MTU */ |
1203 | if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu) | 1603 | if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC |
1604 | && len > pi->omtu) | ||
1204 | return -EINVAL; | 1605 | return -EINVAL; |
1205 | 1606 | ||
1206 | lock_sock(sk); | 1607 | lock_sock(sk); |
1207 | 1608 | ||
1208 | if (sk->sk_state == BT_CONNECTED) | 1609 | if (sk->sk_state != BT_CONNECTED) { |
1209 | err = l2cap_do_send(sk, msg, len); | ||
1210 | else | ||
1211 | err = -ENOTCONN; | 1610 | err = -ENOTCONN; |
1611 | goto done; | ||
1612 | } | ||
1613 | |||
1614 | /* Connectionless channel */ | ||
1615 | if (sk->sk_type == SOCK_DGRAM) { | ||
1616 | skb = l2cap_create_connless_pdu(sk, msg, len); | ||
1617 | err = l2cap_do_send(sk, skb); | ||
1618 | goto done; | ||
1619 | } | ||
1620 | |||
1621 | switch (pi->mode) { | ||
1622 | case L2CAP_MODE_BASIC: | ||
1623 | /* Create a basic PDU */ | ||
1624 | skb = l2cap_create_basic_pdu(sk, msg, len); | ||
1625 | if (IS_ERR(skb)) { | ||
1626 | err = PTR_ERR(skb); | ||
1627 | goto done; | ||
1628 | } | ||
1629 | |||
1630 | err = l2cap_do_send(sk, skb); | ||
1631 | if (!err) | ||
1632 | err = len; | ||
1633 | break; | ||
1634 | |||
1635 | case L2CAP_MODE_ERTM: | ||
1636 | case L2CAP_MODE_STREAMING: | ||
1637 | /* Entire SDU fits into one PDU */ | ||
1638 | if (len <= pi->max_pdu_size) { | ||
1639 | control = L2CAP_SDU_UNSEGMENTED; | ||
1640 | skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0); | ||
1641 | if (IS_ERR(skb)) { | ||
1642 | err = PTR_ERR(skb); | ||
1643 | goto done; | ||
1644 | } | ||
1645 | __skb_queue_tail(TX_QUEUE(sk), skb); | ||
1646 | if (sk->sk_send_head == NULL) | ||
1647 | sk->sk_send_head = skb; | ||
1648 | } else { | ||
1649 | /* Segment SDU into multiples PDUs */ | ||
1650 | err = l2cap_sar_segment_sdu(sk, msg, len); | ||
1651 | if (err < 0) | ||
1652 | goto done; | ||
1653 | } | ||
1654 | |||
1655 | if (pi->mode == L2CAP_MODE_STREAMING) | ||
1656 | err = l2cap_streaming_send(sk); | ||
1657 | else | ||
1658 | err = l2cap_ertm_send(sk); | ||
1659 | |||
1660 | if (!err) | ||
1661 | err = len; | ||
1662 | break; | ||
1663 | |||
1664 | default: | ||
1665 | BT_DBG("bad state %1.1x", pi->mode); | ||
1666 | err = -EINVAL; | ||
1667 | } | ||
1212 | 1668 | ||
1669 | done: | ||
1213 | release_sock(sk); | 1670 | release_sock(sk); |
1214 | return err; | 1671 | return err; |
1215 | } | 1672 | } |
@@ -1257,7 +1714,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us | |||
1257 | opts.imtu = l2cap_pi(sk)->imtu; | 1714 | opts.imtu = l2cap_pi(sk)->imtu; |
1258 | opts.omtu = l2cap_pi(sk)->omtu; | 1715 | opts.omtu = l2cap_pi(sk)->omtu; |
1259 | opts.flush_to = l2cap_pi(sk)->flush_to; | 1716 | opts.flush_to = l2cap_pi(sk)->flush_to; |
1260 | opts.mode = L2CAP_MODE_BASIC; | 1717 | opts.mode = l2cap_pi(sk)->mode; |
1718 | opts.fcs = l2cap_pi(sk)->fcs; | ||
1261 | 1719 | ||
1262 | len = min_t(unsigned int, sizeof(opts), optlen); | 1720 | len = min_t(unsigned int, sizeof(opts), optlen); |
1263 | if (copy_from_user((char *) &opts, optval, len)) { | 1721 | if (copy_from_user((char *) &opts, optval, len)) { |
@@ -1265,8 +1723,10 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us | |||
1265 | break; | 1723 | break; |
1266 | } | 1724 | } |
1267 | 1725 | ||
1268 | l2cap_pi(sk)->imtu = opts.imtu; | 1726 | l2cap_pi(sk)->imtu = opts.imtu; |
1269 | l2cap_pi(sk)->omtu = opts.omtu; | 1727 | l2cap_pi(sk)->omtu = opts.omtu; |
1728 | l2cap_pi(sk)->mode = opts.mode; | ||
1729 | l2cap_pi(sk)->fcs = opts.fcs; | ||
1270 | break; | 1730 | break; |
1271 | 1731 | ||
1272 | case L2CAP_LM: | 1732 | case L2CAP_LM: |
@@ -1379,7 +1839,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us | |||
1379 | opts.imtu = l2cap_pi(sk)->imtu; | 1839 | opts.imtu = l2cap_pi(sk)->imtu; |
1380 | opts.omtu = l2cap_pi(sk)->omtu; | 1840 | opts.omtu = l2cap_pi(sk)->omtu; |
1381 | opts.flush_to = l2cap_pi(sk)->flush_to; | 1841 | opts.flush_to = l2cap_pi(sk)->flush_to; |
1382 | opts.mode = L2CAP_MODE_BASIC; | 1842 | opts.mode = l2cap_pi(sk)->mode; |
1843 | opts.fcs = l2cap_pi(sk)->fcs; | ||
1383 | 1844 | ||
1384 | len = min_t(unsigned int, len, sizeof(opts)); | 1845 | len = min_t(unsigned int, len, sizeof(opts)); |
1385 | if (copy_to_user(optval, (char *) &opts, len)) | 1846 | if (copy_to_user(optval, (char *) &opts, len)) |
@@ -1708,16 +2169,108 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) | |||
1708 | *ptr += L2CAP_CONF_OPT_SIZE + len; | 2169 | *ptr += L2CAP_CONF_OPT_SIZE + len; |
1709 | } | 2170 | } |
1710 | 2171 | ||
2172 | static int l2cap_mode_supported(__u8 mode, __u32 feat_mask) | ||
2173 | { | ||
2174 | u32 local_feat_mask = l2cap_feat_mask; | ||
2175 | if (enable_ertm) | ||
2176 | local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; | ||
2177 | |||
2178 | switch (mode) { | ||
2179 | case L2CAP_MODE_ERTM: | ||
2180 | return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; | ||
2181 | case L2CAP_MODE_STREAMING: | ||
2182 | return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; | ||
2183 | default: | ||
2184 | return 0x00; | ||
2185 | } | ||
2186 | } | ||
2187 | |||
2188 | static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) | ||
2189 | { | ||
2190 | switch (mode) { | ||
2191 | case L2CAP_MODE_STREAMING: | ||
2192 | case L2CAP_MODE_ERTM: | ||
2193 | if (l2cap_mode_supported(mode, remote_feat_mask)) | ||
2194 | return mode; | ||
2195 | /* fall through */ | ||
2196 | default: | ||
2197 | return L2CAP_MODE_BASIC; | ||
2198 | } | ||
2199 | } | ||
2200 | |||
1711 | static int l2cap_build_conf_req(struct sock *sk, void *data) | 2201 | static int l2cap_build_conf_req(struct sock *sk, void *data) |
1712 | { | 2202 | { |
1713 | struct l2cap_pinfo *pi = l2cap_pi(sk); | 2203 | struct l2cap_pinfo *pi = l2cap_pi(sk); |
1714 | struct l2cap_conf_req *req = data; | 2204 | struct l2cap_conf_req *req = data; |
2205 | struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM }; | ||
1715 | void *ptr = req->data; | 2206 | void *ptr = req->data; |
1716 | 2207 | ||
1717 | BT_DBG("sk %p", sk); | 2208 | BT_DBG("sk %p", sk); |
1718 | 2209 | ||
1719 | if (pi->imtu != L2CAP_DEFAULT_MTU) | 2210 | if (pi->num_conf_req || pi->num_conf_rsp) |
1720 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); | 2211 | goto done; |
2212 | |||
2213 | switch (pi->mode) { | ||
2214 | case L2CAP_MODE_STREAMING: | ||
2215 | case L2CAP_MODE_ERTM: | ||
2216 | pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; | ||
2217 | if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) | ||
2218 | l2cap_send_disconn_req(pi->conn, sk); | ||
2219 | break; | ||
2220 | default: | ||
2221 | pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); | ||
2222 | break; | ||
2223 | } | ||
2224 | |||
2225 | done: | ||
2226 | switch (pi->mode) { | ||
2227 | case L2CAP_MODE_BASIC: | ||
2228 | if (pi->imtu != L2CAP_DEFAULT_MTU) | ||
2229 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); | ||
2230 | break; | ||
2231 | |||
2232 | case L2CAP_MODE_ERTM: | ||
2233 | rfc.mode = L2CAP_MODE_ERTM; | ||
2234 | rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; | ||
2235 | rfc.max_transmit = L2CAP_DEFAULT_MAX_TX; | ||
2236 | rfc.retrans_timeout = 0; | ||
2237 | rfc.monitor_timeout = 0; | ||
2238 | rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); | ||
2239 | |||
2240 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | ||
2241 | sizeof(rfc), (unsigned long) &rfc); | ||
2242 | |||
2243 | if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) | ||
2244 | break; | ||
2245 | |||
2246 | if (pi->fcs == L2CAP_FCS_NONE || | ||
2247 | pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { | ||
2248 | pi->fcs = L2CAP_FCS_NONE; | ||
2249 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); | ||
2250 | } | ||
2251 | break; | ||
2252 | |||
2253 | case L2CAP_MODE_STREAMING: | ||
2254 | rfc.mode = L2CAP_MODE_STREAMING; | ||
2255 | rfc.txwin_size = 0; | ||
2256 | rfc.max_transmit = 0; | ||
2257 | rfc.retrans_timeout = 0; | ||
2258 | rfc.monitor_timeout = 0; | ||
2259 | rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); | ||
2260 | |||
2261 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | ||
2262 | sizeof(rfc), (unsigned long) &rfc); | ||
2263 | |||
2264 | if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) | ||
2265 | break; | ||
2266 | |||
2267 | if (pi->fcs == L2CAP_FCS_NONE || | ||
2268 | pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { | ||
2269 | pi->fcs = L2CAP_FCS_NONE; | ||
2270 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); | ||
2271 | } | ||
2272 | break; | ||
2273 | } | ||
1721 | 2274 | ||
1722 | /* FIXME: Need actual value of the flush timeout */ | 2275 | /* FIXME: Need actual value of the flush timeout */ |
1723 | //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) | 2276 | //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) |
@@ -1767,6 +2320,12 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) | |||
1767 | memcpy(&rfc, (void *) val, olen); | 2320 | memcpy(&rfc, (void *) val, olen); |
1768 | break; | 2321 | break; |
1769 | 2322 | ||
2323 | case L2CAP_CONF_FCS: | ||
2324 | if (val == L2CAP_FCS_NONE) | ||
2325 | pi->conf_state |= L2CAP_CONF_NO_FCS_RECV; | ||
2326 | |||
2327 | break; | ||
2328 | |||
1770 | default: | 2329 | default: |
1771 | if (hint) | 2330 | if (hint) |
1772 | break; | 2331 | break; |
@@ -1777,30 +2336,83 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) | |||
1777 | } | 2336 | } |
1778 | } | 2337 | } |
1779 | 2338 | ||
2339 | if (pi->num_conf_rsp || pi->num_conf_req) | ||
2340 | goto done; | ||
2341 | |||
2342 | switch (pi->mode) { | ||
2343 | case L2CAP_MODE_STREAMING: | ||
2344 | case L2CAP_MODE_ERTM: | ||
2345 | pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; | ||
2346 | if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) | ||
2347 | return -ECONNREFUSED; | ||
2348 | break; | ||
2349 | default: | ||
2350 | pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); | ||
2351 | break; | ||
2352 | } | ||
2353 | |||
2354 | done: | ||
2355 | if (pi->mode != rfc.mode) { | ||
2356 | result = L2CAP_CONF_UNACCEPT; | ||
2357 | rfc.mode = pi->mode; | ||
2358 | |||
2359 | if (pi->num_conf_rsp == 1) | ||
2360 | return -ECONNREFUSED; | ||
2361 | |||
2362 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | ||
2363 | sizeof(rfc), (unsigned long) &rfc); | ||
2364 | } | ||
2365 | |||
2366 | |||
1780 | if (result == L2CAP_CONF_SUCCESS) { | 2367 | if (result == L2CAP_CONF_SUCCESS) { |
1781 | /* Configure output options and let the other side know | 2368 | /* Configure output options and let the other side know |
1782 | * which ones we don't like. */ | 2369 | * which ones we don't like. */ |
1783 | 2370 | ||
1784 | if (rfc.mode == L2CAP_MODE_BASIC) { | 2371 | if (mtu < L2CAP_DEFAULT_MIN_MTU) |
1785 | if (mtu < pi->omtu) | 2372 | result = L2CAP_CONF_UNACCEPT; |
1786 | result = L2CAP_CONF_UNACCEPT; | 2373 | else { |
1787 | else { | 2374 | pi->omtu = mtu; |
1788 | pi->omtu = mtu; | 2375 | pi->conf_state |= L2CAP_CONF_MTU_DONE; |
1789 | pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; | 2376 | } |
1790 | } | 2377 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); |
1791 | 2378 | ||
1792 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); | 2379 | switch (rfc.mode) { |
1793 | } else { | 2380 | case L2CAP_MODE_BASIC: |
2381 | pi->fcs = L2CAP_FCS_NONE; | ||
2382 | pi->conf_state |= L2CAP_CONF_MODE_DONE; | ||
2383 | break; | ||
2384 | |||
2385 | case L2CAP_MODE_ERTM: | ||
2386 | pi->remote_tx_win = rfc.txwin_size; | ||
2387 | pi->remote_max_tx = rfc.max_transmit; | ||
2388 | pi->max_pdu_size = rfc.max_pdu_size; | ||
2389 | |||
2390 | rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; | ||
2391 | rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; | ||
2392 | |||
2393 | pi->conf_state |= L2CAP_CONF_MODE_DONE; | ||
2394 | break; | ||
2395 | |||
2396 | case L2CAP_MODE_STREAMING: | ||
2397 | pi->remote_tx_win = rfc.txwin_size; | ||
2398 | pi->max_pdu_size = rfc.max_pdu_size; | ||
2399 | |||
2400 | pi->conf_state |= L2CAP_CONF_MODE_DONE; | ||
2401 | break; | ||
2402 | |||
2403 | default: | ||
1794 | result = L2CAP_CONF_UNACCEPT; | 2404 | result = L2CAP_CONF_UNACCEPT; |
1795 | 2405 | ||
1796 | memset(&rfc, 0, sizeof(rfc)); | 2406 | memset(&rfc, 0, sizeof(rfc)); |
1797 | rfc.mode = L2CAP_MODE_BASIC; | 2407 | rfc.mode = pi->mode; |
1798 | |||
1799 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | ||
1800 | sizeof(rfc), (unsigned long) &rfc); | ||
1801 | } | 2408 | } |
1802 | } | ||
1803 | 2409 | ||
2410 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | ||
2411 | sizeof(rfc), (unsigned long) &rfc); | ||
2412 | |||
2413 | if (result == L2CAP_CONF_SUCCESS) | ||
2414 | pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; | ||
2415 | } | ||
1804 | rsp->scid = cpu_to_le16(pi->dcid); | 2416 | rsp->scid = cpu_to_le16(pi->dcid); |
1805 | rsp->result = cpu_to_le16(result); | 2417 | rsp->result = cpu_to_le16(result); |
1806 | rsp->flags = cpu_to_le16(0x0000); | 2418 | rsp->flags = cpu_to_le16(0x0000); |
@@ -1808,6 +2420,73 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data) | |||
1808 | return ptr - data; | 2420 | return ptr - data; |
1809 | } | 2421 | } |
1810 | 2422 | ||
2423 | static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result) | ||
2424 | { | ||
2425 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
2426 | struct l2cap_conf_req *req = data; | ||
2427 | void *ptr = req->data; | ||
2428 | int type, olen; | ||
2429 | unsigned long val; | ||
2430 | struct l2cap_conf_rfc rfc; | ||
2431 | |||
2432 | BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data); | ||
2433 | |||
2434 | while (len >= L2CAP_CONF_OPT_SIZE) { | ||
2435 | len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); | ||
2436 | |||
2437 | switch (type) { | ||
2438 | case L2CAP_CONF_MTU: | ||
2439 | if (val < L2CAP_DEFAULT_MIN_MTU) { | ||
2440 | *result = L2CAP_CONF_UNACCEPT; | ||
2441 | pi->omtu = L2CAP_DEFAULT_MIN_MTU; | ||
2442 | } else | ||
2443 | pi->omtu = val; | ||
2444 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); | ||
2445 | break; | ||
2446 | |||
2447 | case L2CAP_CONF_FLUSH_TO: | ||
2448 | pi->flush_to = val; | ||
2449 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, | ||
2450 | 2, pi->flush_to); | ||
2451 | break; | ||
2452 | |||
2453 | case L2CAP_CONF_RFC: | ||
2454 | if (olen == sizeof(rfc)) | ||
2455 | memcpy(&rfc, (void *)val, olen); | ||
2456 | |||
2457 | if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) && | ||
2458 | rfc.mode != pi->mode) | ||
2459 | return -ECONNREFUSED; | ||
2460 | |||
2461 | pi->mode = rfc.mode; | ||
2462 | pi->fcs = 0; | ||
2463 | |||
2464 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | ||
2465 | sizeof(rfc), (unsigned long) &rfc); | ||
2466 | break; | ||
2467 | } | ||
2468 | } | ||
2469 | |||
2470 | if (*result == L2CAP_CONF_SUCCESS) { | ||
2471 | switch (rfc.mode) { | ||
2472 | case L2CAP_MODE_ERTM: | ||
2473 | pi->remote_tx_win = rfc.txwin_size; | ||
2474 | pi->retrans_timeout = rfc.retrans_timeout; | ||
2475 | pi->monitor_timeout = rfc.monitor_timeout; | ||
2476 | pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size); | ||
2477 | break; | ||
2478 | case L2CAP_MODE_STREAMING: | ||
2479 | pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size); | ||
2480 | break; | ||
2481 | } | ||
2482 | } | ||
2483 | |||
2484 | req->dcid = cpu_to_le16(pi->dcid); | ||
2485 | req->flags = cpu_to_le16(0x0000); | ||
2486 | |||
2487 | return ptr - data; | ||
2488 | } | ||
2489 | |||
1811 | static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags) | 2490 | static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags) |
1812 | { | 2491 | { |
1813 | struct l2cap_conf_rsp *rsp = data; | 2492 | struct l2cap_conf_rsp *rsp = data; |
@@ -1994,6 +2673,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
1994 | 2673 | ||
1995 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 2674 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, |
1996 | l2cap_build_conf_req(sk, req), req); | 2675 | l2cap_build_conf_req(sk, req), req); |
2676 | l2cap_pi(sk)->num_conf_req++; | ||
1997 | break; | 2677 | break; |
1998 | 2678 | ||
1999 | case L2CAP_CR_PEND: | 2679 | case L2CAP_CR_PEND: |
@@ -2052,10 +2732,13 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2052 | 2732 | ||
2053 | /* Complete config. */ | 2733 | /* Complete config. */ |
2054 | len = l2cap_parse_conf_req(sk, rsp); | 2734 | len = l2cap_parse_conf_req(sk, rsp); |
2055 | if (len < 0) | 2735 | if (len < 0) { |
2736 | l2cap_send_disconn_req(conn, sk); | ||
2056 | goto unlock; | 2737 | goto unlock; |
2738 | } | ||
2057 | 2739 | ||
2058 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); | 2740 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); |
2741 | l2cap_pi(sk)->num_conf_rsp++; | ||
2059 | 2742 | ||
2060 | /* Reset config buffer. */ | 2743 | /* Reset config buffer. */ |
2061 | l2cap_pi(sk)->conf_len = 0; | 2744 | l2cap_pi(sk)->conf_len = 0; |
@@ -2064,7 +2747,22 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2064 | goto unlock; | 2747 | goto unlock; |
2065 | 2748 | ||
2066 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { | 2749 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { |
2750 | if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) | ||
2751 | || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE) | ||
2752 | l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16; | ||
2753 | |||
2067 | sk->sk_state = BT_CONNECTED; | 2754 | sk->sk_state = BT_CONNECTED; |
2755 | l2cap_pi(sk)->next_tx_seq = 0; | ||
2756 | l2cap_pi(sk)->expected_ack_seq = 0; | ||
2757 | l2cap_pi(sk)->unacked_frames = 0; | ||
2758 | |||
2759 | setup_timer(&l2cap_pi(sk)->retrans_timer, | ||
2760 | l2cap_retrans_timeout, (unsigned long) sk); | ||
2761 | setup_timer(&l2cap_pi(sk)->monitor_timer, | ||
2762 | l2cap_monitor_timeout, (unsigned long) sk); | ||
2763 | |||
2764 | __skb_queue_head_init(TX_QUEUE(sk)); | ||
2765 | __skb_queue_head_init(SREJ_QUEUE(sk)); | ||
2068 | l2cap_chan_ready(sk); | 2766 | l2cap_chan_ready(sk); |
2069 | goto unlock; | 2767 | goto unlock; |
2070 | } | 2768 | } |
@@ -2073,6 +2771,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2073 | u8 buf[64]; | 2771 | u8 buf[64]; |
2074 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 2772 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, |
2075 | l2cap_build_conf_req(sk, buf), buf); | 2773 | l2cap_build_conf_req(sk, buf), buf); |
2774 | l2cap_pi(sk)->num_conf_req++; | ||
2076 | } | 2775 | } |
2077 | 2776 | ||
2078 | unlock: | 2777 | unlock: |
@@ -2102,29 +2801,32 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2102 | break; | 2801 | break; |
2103 | 2802 | ||
2104 | case L2CAP_CONF_UNACCEPT: | 2803 | case L2CAP_CONF_UNACCEPT: |
2105 | if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) { | 2804 | if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { |
2106 | char req[128]; | 2805 | int len = cmd->len - sizeof(*rsp); |
2107 | /* It does not make sense to adjust L2CAP parameters | 2806 | char req[64]; |
2108 | * that are currently defined in the spec. We simply | 2807 | |
2109 | * resend config request that we sent earlier. It is | 2808 | /* throw out any old stored conf requests */ |
2110 | * stupid, but it helps qualification testing which | 2809 | result = L2CAP_CONF_SUCCESS; |
2111 | * expects at least some response from us. */ | 2810 | len = l2cap_parse_conf_rsp(sk, rsp->data, |
2112 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 2811 | len, req, &result); |
2113 | l2cap_build_conf_req(sk, req), req); | 2812 | if (len < 0) { |
2114 | goto done; | 2813 | l2cap_send_disconn_req(conn, sk); |
2814 | goto done; | ||
2815 | } | ||
2816 | |||
2817 | l2cap_send_cmd(conn, l2cap_get_ident(conn), | ||
2818 | L2CAP_CONF_REQ, len, req); | ||
2819 | l2cap_pi(sk)->num_conf_req++; | ||
2820 | if (result != L2CAP_CONF_SUCCESS) | ||
2821 | goto done; | ||
2822 | break; | ||
2115 | } | 2823 | } |
2116 | 2824 | ||
2117 | default: | 2825 | default: |
2118 | sk->sk_state = BT_DISCONN; | 2826 | sk->sk_state = BT_DISCONN; |
2119 | sk->sk_err = ECONNRESET; | 2827 | sk->sk_err = ECONNRESET; |
2120 | l2cap_sock_set_timer(sk, HZ * 5); | 2828 | l2cap_sock_set_timer(sk, HZ * 5); |
2121 | { | 2829 | l2cap_send_disconn_req(conn, sk); |
2122 | struct l2cap_disconn_req req; | ||
2123 | req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); | ||
2124 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | ||
2125 | l2cap_send_cmd(conn, l2cap_get_ident(conn), | ||
2126 | L2CAP_DISCONN_REQ, sizeof(req), &req); | ||
2127 | } | ||
2128 | goto done; | 2830 | goto done; |
2129 | } | 2831 | } |
2130 | 2832 | ||
@@ -2134,7 +2836,16 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
2134 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; | 2836 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; |
2135 | 2837 | ||
2136 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { | 2838 | if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { |
2839 | if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) | ||
2840 | || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE) | ||
2841 | l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16; | ||
2842 | |||
2137 | sk->sk_state = BT_CONNECTED; | 2843 | sk->sk_state = BT_CONNECTED; |
2844 | l2cap_pi(sk)->expected_tx_seq = 0; | ||
2845 | l2cap_pi(sk)->buffer_seq = 0; | ||
2846 | l2cap_pi(sk)->num_to_ack = 0; | ||
2847 | __skb_queue_head_init(TX_QUEUE(sk)); | ||
2848 | __skb_queue_head_init(SREJ_QUEUE(sk)); | ||
2138 | l2cap_chan_ready(sk); | 2849 | l2cap_chan_ready(sk); |
2139 | } | 2850 | } |
2140 | 2851 | ||
@@ -2165,6 +2876,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd | |||
2165 | 2876 | ||
2166 | sk->sk_shutdown = SHUTDOWN_MASK; | 2877 | sk->sk_shutdown = SHUTDOWN_MASK; |
2167 | 2878 | ||
2879 | skb_queue_purge(TX_QUEUE(sk)); | ||
2880 | skb_queue_purge(SREJ_QUEUE(sk)); | ||
2881 | del_timer(&l2cap_pi(sk)->retrans_timer); | ||
2882 | del_timer(&l2cap_pi(sk)->monitor_timer); | ||
2883 | |||
2168 | l2cap_chan_del(sk, ECONNRESET); | 2884 | l2cap_chan_del(sk, ECONNRESET); |
2169 | bh_unlock_sock(sk); | 2885 | bh_unlock_sock(sk); |
2170 | 2886 | ||
@@ -2187,6 +2903,11 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd | |||
2187 | if (!sk) | 2903 | if (!sk) |
2188 | return 0; | 2904 | return 0; |
2189 | 2905 | ||
2906 | skb_queue_purge(TX_QUEUE(sk)); | ||
2907 | skb_queue_purge(SREJ_QUEUE(sk)); | ||
2908 | del_timer(&l2cap_pi(sk)->retrans_timer); | ||
2909 | del_timer(&l2cap_pi(sk)->monitor_timer); | ||
2910 | |||
2190 | l2cap_chan_del(sk, 0); | 2911 | l2cap_chan_del(sk, 0); |
2191 | bh_unlock_sock(sk); | 2912 | bh_unlock_sock(sk); |
2192 | 2913 | ||
@@ -2205,10 +2926,14 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm | |||
2205 | 2926 | ||
2206 | if (type == L2CAP_IT_FEAT_MASK) { | 2927 | if (type == L2CAP_IT_FEAT_MASK) { |
2207 | u8 buf[8]; | 2928 | u8 buf[8]; |
2929 | u32 feat_mask = l2cap_feat_mask; | ||
2208 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; | 2930 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; |
2209 | rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); | 2931 | rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); |
2210 | rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); | 2932 | rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); |
2211 | put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data); | 2933 | if (enable_ertm) |
2934 | feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | ||
2935 | | L2CAP_FEAT_FCS; | ||
2936 | put_unaligned_le32(feat_mask, rsp->data); | ||
2212 | l2cap_send_cmd(conn, cmd->ident, | 2937 | l2cap_send_cmd(conn, cmd->ident, |
2213 | L2CAP_INFO_RSP, sizeof(buf), buf); | 2938 | L2CAP_INFO_RSP, sizeof(buf), buf); |
2214 | } else if (type == L2CAP_IT_FIXED_CHAN) { | 2939 | } else if (type == L2CAP_IT_FIXED_CHAN) { |
@@ -2359,9 +3084,374 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk | |||
2359 | kfree_skb(skb); | 3084 | kfree_skb(skb); |
2360 | } | 3085 | } |
2361 | 3086 | ||
3087 | static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb) | ||
3088 | { | ||
3089 | u16 our_fcs, rcv_fcs; | ||
3090 | int hdr_size = L2CAP_HDR_SIZE + 2; | ||
3091 | |||
3092 | if (pi->fcs == L2CAP_FCS_CRC16) { | ||
3093 | skb_trim(skb, skb->len - 2); | ||
3094 | rcv_fcs = get_unaligned_le16(skb->data + skb->len); | ||
3095 | our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); | ||
3096 | |||
3097 | if (our_fcs != rcv_fcs) | ||
3098 | return -EINVAL; | ||
3099 | } | ||
3100 | return 0; | ||
3101 | } | ||
3102 | |||
3103 | static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar) | ||
3104 | { | ||
3105 | struct sk_buff *next_skb; | ||
3106 | |||
3107 | bt_cb(skb)->tx_seq = tx_seq; | ||
3108 | bt_cb(skb)->sar = sar; | ||
3109 | |||
3110 | next_skb = skb_peek(SREJ_QUEUE(sk)); | ||
3111 | if (!next_skb) { | ||
3112 | __skb_queue_tail(SREJ_QUEUE(sk), skb); | ||
3113 | return; | ||
3114 | } | ||
3115 | |||
3116 | do { | ||
3117 | if (bt_cb(next_skb)->tx_seq > tx_seq) { | ||
3118 | __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb); | ||
3119 | return; | ||
3120 | } | ||
3121 | |||
3122 | if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb)) | ||
3123 | break; | ||
3124 | |||
3125 | } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb))); | ||
3126 | |||
3127 | __skb_queue_tail(SREJ_QUEUE(sk), skb); | ||
3128 | } | ||
3129 | |||
3130 | static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) | ||
3131 | { | ||
3132 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
3133 | struct sk_buff *_skb; | ||
3134 | int err = -EINVAL; | ||
3135 | |||
3136 | switch (control & L2CAP_CTRL_SAR) { | ||
3137 | case L2CAP_SDU_UNSEGMENTED: | ||
3138 | if (pi->conn_state & L2CAP_CONN_SAR_SDU) { | ||
3139 | kfree_skb(pi->sdu); | ||
3140 | break; | ||
3141 | } | ||
3142 | |||
3143 | err = sock_queue_rcv_skb(sk, skb); | ||
3144 | if (!err) | ||
3145 | return 0; | ||
3146 | |||
3147 | break; | ||
3148 | |||
3149 | case L2CAP_SDU_START: | ||
3150 | if (pi->conn_state & L2CAP_CONN_SAR_SDU) { | ||
3151 | kfree_skb(pi->sdu); | ||
3152 | break; | ||
3153 | } | ||
3154 | |||
3155 | pi->sdu_len = get_unaligned_le16(skb->data); | ||
3156 | skb_pull(skb, 2); | ||
3157 | |||
3158 | pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); | ||
3159 | if (!pi->sdu) { | ||
3160 | err = -ENOMEM; | ||
3161 | break; | ||
3162 | } | ||
3163 | |||
3164 | memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); | ||
3165 | |||
3166 | pi->conn_state |= L2CAP_CONN_SAR_SDU; | ||
3167 | pi->partial_sdu_len = skb->len; | ||
3168 | err = 0; | ||
3169 | break; | ||
3170 | |||
3171 | case L2CAP_SDU_CONTINUE: | ||
3172 | if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) | ||
3173 | break; | ||
3174 | |||
3175 | memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); | ||
3176 | |||
3177 | pi->partial_sdu_len += skb->len; | ||
3178 | if (pi->partial_sdu_len > pi->sdu_len) | ||
3179 | kfree_skb(pi->sdu); | ||
3180 | else | ||
3181 | err = 0; | ||
3182 | |||
3183 | break; | ||
3184 | |||
3185 | case L2CAP_SDU_END: | ||
3186 | if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) | ||
3187 | break; | ||
3188 | |||
3189 | memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); | ||
3190 | |||
3191 | pi->conn_state &= ~L2CAP_CONN_SAR_SDU; | ||
3192 | pi->partial_sdu_len += skb->len; | ||
3193 | |||
3194 | if (pi->partial_sdu_len == pi->sdu_len) { | ||
3195 | _skb = skb_clone(pi->sdu, GFP_ATOMIC); | ||
3196 | err = sock_queue_rcv_skb(sk, _skb); | ||
3197 | if (err < 0) | ||
3198 | kfree_skb(_skb); | ||
3199 | } | ||
3200 | kfree_skb(pi->sdu); | ||
3201 | err = 0; | ||
3202 | |||
3203 | break; | ||
3204 | } | ||
3205 | |||
3206 | kfree_skb(skb); | ||
3207 | return err; | ||
3208 | } | ||
3209 | |||
3210 | static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq) | ||
3211 | { | ||
3212 | struct sk_buff *skb; | ||
3213 | u16 control = 0; | ||
3214 | |||
3215 | while((skb = skb_peek(SREJ_QUEUE(sk)))) { | ||
3216 | if (bt_cb(skb)->tx_seq != tx_seq) | ||
3217 | break; | ||
3218 | |||
3219 | skb = skb_dequeue(SREJ_QUEUE(sk)); | ||
3220 | control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; | ||
3221 | l2cap_sar_reassembly_sdu(sk, skb, control); | ||
3222 | l2cap_pi(sk)->buffer_seq_srej = | ||
3223 | (l2cap_pi(sk)->buffer_seq_srej + 1) % 64; | ||
3224 | tx_seq++; | ||
3225 | } | ||
3226 | } | ||
3227 | |||
3228 | static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq) | ||
3229 | { | ||
3230 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
3231 | struct srej_list *l, *tmp; | ||
3232 | u16 control; | ||
3233 | |||
3234 | list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) { | ||
3235 | if (l->tx_seq == tx_seq) { | ||
3236 | list_del(&l->list); | ||
3237 | kfree(l); | ||
3238 | return; | ||
3239 | } | ||
3240 | control = L2CAP_SUPER_SELECT_REJECT; | ||
3241 | control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; | ||
3242 | l2cap_send_sframe(pi, control); | ||
3243 | list_del(&l->list); | ||
3244 | list_add_tail(&l->list, SREJ_LIST(sk)); | ||
3245 | } | ||
3246 | } | ||
3247 | |||
3248 | static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq) | ||
3249 | { | ||
3250 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
3251 | struct srej_list *new; | ||
3252 | u16 control; | ||
3253 | |||
3254 | while (tx_seq != pi->expected_tx_seq) { | ||
3255 | control = L2CAP_SUPER_SELECT_REJECT; | ||
3256 | control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; | ||
3257 | if (pi->conn_state & L2CAP_CONN_SEND_PBIT) { | ||
3258 | control |= L2CAP_CTRL_POLL; | ||
3259 | pi->conn_state &= ~L2CAP_CONN_SEND_PBIT; | ||
3260 | } | ||
3261 | l2cap_send_sframe(pi, control); | ||
3262 | |||
3263 | new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); | ||
3264 | new->tx_seq = pi->expected_tx_seq++; | ||
3265 | list_add_tail(&new->list, SREJ_LIST(sk)); | ||
3266 | } | ||
3267 | pi->expected_tx_seq++; | ||
3268 | } | ||
3269 | |||
3270 | static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) | ||
3271 | { | ||
3272 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
3273 | u8 tx_seq = __get_txseq(rx_control); | ||
3274 | u16 tx_control = 0; | ||
3275 | u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; | ||
3276 | int err = 0; | ||
3277 | |||
3278 | BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); | ||
3279 | |||
3280 | if (tx_seq == pi->expected_tx_seq) | ||
3281 | goto expected; | ||
3282 | |||
3283 | if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { | ||
3284 | struct srej_list *first; | ||
3285 | |||
3286 | first = list_first_entry(SREJ_LIST(sk), | ||
3287 | struct srej_list, list); | ||
3288 | if (tx_seq == first->tx_seq) { | ||
3289 | l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); | ||
3290 | l2cap_check_srej_gap(sk, tx_seq); | ||
3291 | |||
3292 | list_del(&first->list); | ||
3293 | kfree(first); | ||
3294 | |||
3295 | if (list_empty(SREJ_LIST(sk))) { | ||
3296 | pi->buffer_seq = pi->buffer_seq_srej; | ||
3297 | pi->conn_state &= ~L2CAP_CONN_SREJ_SENT; | ||
3298 | } | ||
3299 | } else { | ||
3300 | struct srej_list *l; | ||
3301 | l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); | ||
3302 | |||
3303 | list_for_each_entry(l, SREJ_LIST(sk), list) { | ||
3304 | if (l->tx_seq == tx_seq) { | ||
3305 | l2cap_resend_srejframe(sk, tx_seq); | ||
3306 | return 0; | ||
3307 | } | ||
3308 | } | ||
3309 | l2cap_send_srejframe(sk, tx_seq); | ||
3310 | } | ||
3311 | } else { | ||
3312 | pi->conn_state |= L2CAP_CONN_SREJ_SENT; | ||
3313 | |||
3314 | INIT_LIST_HEAD(SREJ_LIST(sk)); | ||
3315 | pi->buffer_seq_srej = pi->buffer_seq; | ||
3316 | |||
3317 | __skb_queue_head_init(SREJ_QUEUE(sk)); | ||
3318 | l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); | ||
3319 | |||
3320 | pi->conn_state |= L2CAP_CONN_SEND_PBIT; | ||
3321 | |||
3322 | l2cap_send_srejframe(sk, tx_seq); | ||
3323 | } | ||
3324 | return 0; | ||
3325 | |||
3326 | expected: | ||
3327 | pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; | ||
3328 | |||
3329 | if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { | ||
3330 | l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); | ||
3331 | return 0; | ||
3332 | } | ||
3333 | |||
3334 | pi->buffer_seq = (pi->buffer_seq + 1) % 64; | ||
3335 | |||
3336 | err = l2cap_sar_reassembly_sdu(sk, skb, rx_control); | ||
3337 | if (err < 0) | ||
3338 | return err; | ||
3339 | |||
3340 | pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK; | ||
3341 | if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) { | ||
3342 | tx_control |= L2CAP_SUPER_RCV_READY; | ||
3343 | tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; | ||
3344 | l2cap_send_sframe(pi, tx_control); | ||
3345 | } | ||
3346 | return 0; | ||
3347 | } | ||
3348 | |||
3349 | static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) | ||
3350 | { | ||
3351 | struct l2cap_pinfo *pi = l2cap_pi(sk); | ||
3352 | u8 tx_seq = __get_reqseq(rx_control); | ||
3353 | |||
3354 | BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); | ||
3355 | |||
3356 | switch (rx_control & L2CAP_CTRL_SUPERVISE) { | ||
3357 | case L2CAP_SUPER_RCV_READY: | ||
3358 | if (rx_control & L2CAP_CTRL_POLL) { | ||
3359 | u16 control = L2CAP_CTRL_FINAL; | ||
3360 | control |= L2CAP_SUPER_RCV_READY | | ||
3361 | (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT); | ||
3362 | l2cap_send_sframe(l2cap_pi(sk), control); | ||
3363 | pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; | ||
3364 | |||
3365 | } else if (rx_control & L2CAP_CTRL_FINAL) { | ||
3366 | pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; | ||
3367 | pi->expected_ack_seq = tx_seq; | ||
3368 | l2cap_drop_acked_frames(sk); | ||
3369 | |||
3370 | if (!(pi->conn_state & L2CAP_CONN_WAIT_F)) | ||
3371 | break; | ||
3372 | |||
3373 | pi->conn_state &= ~L2CAP_CONN_WAIT_F; | ||
3374 | del_timer(&pi->monitor_timer); | ||
3375 | |||
3376 | if (pi->unacked_frames > 0) | ||
3377 | __mod_retrans_timer(); | ||
3378 | } else { | ||
3379 | pi->expected_ack_seq = tx_seq; | ||
3380 | l2cap_drop_acked_frames(sk); | ||
3381 | |||
3382 | if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) | ||
3383 | && (pi->unacked_frames > 0)) | ||
3384 | __mod_retrans_timer(); | ||
3385 | |||
3386 | l2cap_ertm_send(sk); | ||
3387 | pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; | ||
3388 | } | ||
3389 | break; | ||
3390 | |||
3391 | case L2CAP_SUPER_REJECT: | ||
3392 | pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; | ||
3393 | |||
3394 | pi->expected_ack_seq = __get_reqseq(rx_control); | ||
3395 | l2cap_drop_acked_frames(sk); | ||
3396 | |||
3397 | sk->sk_send_head = TX_QUEUE(sk)->next; | ||
3398 | pi->next_tx_seq = pi->expected_ack_seq; | ||
3399 | |||
3400 | l2cap_ertm_send(sk); | ||
3401 | |||
3402 | break; | ||
3403 | |||
3404 | case L2CAP_SUPER_SELECT_REJECT: | ||
3405 | pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; | ||
3406 | |||
3407 | if (rx_control & L2CAP_CTRL_POLL) { | ||
3408 | l2cap_retransmit_frame(sk, tx_seq); | ||
3409 | pi->expected_ack_seq = tx_seq; | ||
3410 | l2cap_drop_acked_frames(sk); | ||
3411 | l2cap_ertm_send(sk); | ||
3412 | if (pi->conn_state & L2CAP_CONN_WAIT_F) { | ||
3413 | pi->srej_save_reqseq = tx_seq; | ||
3414 | pi->conn_state |= L2CAP_CONN_SREJ_ACT; | ||
3415 | } | ||
3416 | } else if (rx_control & L2CAP_CTRL_FINAL) { | ||
3417 | if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) && | ||
3418 | pi->srej_save_reqseq == tx_seq) | ||
3419 | pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT; | ||
3420 | else | ||
3421 | l2cap_retransmit_frame(sk, tx_seq); | ||
3422 | } | ||
3423 | else { | ||
3424 | l2cap_retransmit_frame(sk, tx_seq); | ||
3425 | if (pi->conn_state & L2CAP_CONN_WAIT_F) { | ||
3426 | pi->srej_save_reqseq = tx_seq; | ||
3427 | pi->conn_state |= L2CAP_CONN_SREJ_ACT; | ||
3428 | } | ||
3429 | } | ||
3430 | break; | ||
3431 | |||
3432 | case L2CAP_SUPER_RCV_NOT_READY: | ||
3433 | pi->conn_state |= L2CAP_CONN_REMOTE_BUSY; | ||
3434 | pi->expected_ack_seq = tx_seq; | ||
3435 | l2cap_drop_acked_frames(sk); | ||
3436 | |||
3437 | del_timer(&l2cap_pi(sk)->retrans_timer); | ||
3438 | if (rx_control & L2CAP_CTRL_POLL) { | ||
3439 | u16 control = L2CAP_CTRL_FINAL; | ||
3440 | l2cap_send_rr_or_rnr(l2cap_pi(sk), control); | ||
3441 | } | ||
3442 | break; | ||
3443 | } | ||
3444 | |||
3445 | return 0; | ||
3446 | } | ||
3447 | |||
2362 | static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) | 3448 | static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) |
2363 | { | 3449 | { |
2364 | struct sock *sk; | 3450 | struct sock *sk; |
3451 | struct l2cap_pinfo *pi; | ||
3452 | u16 control, len; | ||
3453 | u8 tx_seq; | ||
3454 | int err; | ||
2365 | 3455 | ||
2366 | sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); | 3456 | sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); |
2367 | if (!sk) { | 3457 | if (!sk) { |
@@ -2369,22 +3459,91 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk | |||
2369 | goto drop; | 3459 | goto drop; |
2370 | } | 3460 | } |
2371 | 3461 | ||
3462 | pi = l2cap_pi(sk); | ||
3463 | |||
2372 | BT_DBG("sk %p, len %d", sk, skb->len); | 3464 | BT_DBG("sk %p, len %d", sk, skb->len); |
2373 | 3465 | ||
2374 | if (sk->sk_state != BT_CONNECTED) | 3466 | if (sk->sk_state != BT_CONNECTED) |
2375 | goto drop; | 3467 | goto drop; |
2376 | 3468 | ||
2377 | if (l2cap_pi(sk)->imtu < skb->len) | 3469 | switch (pi->mode) { |
2378 | goto drop; | 3470 | case L2CAP_MODE_BASIC: |
3471 | /* If socket recv buffers overflows we drop data here | ||
3472 | * which is *bad* because L2CAP has to be reliable. | ||
3473 | * But we don't have any other choice. L2CAP doesn't | ||
3474 | * provide flow control mechanism. */ | ||
2379 | 3475 | ||
2380 | /* If socket recv buffers overflows we drop data here | 3476 | if (pi->imtu < skb->len) |
2381 | * which is *bad* because L2CAP has to be reliable. | 3477 | goto drop; |
2382 | * But we don't have any other choice. L2CAP doesn't | 3478 | |
2383 | * provide flow control mechanism. */ | 3479 | if (!sock_queue_rcv_skb(sk, skb)) |
3480 | goto done; | ||
3481 | break; | ||
3482 | |||
3483 | case L2CAP_MODE_ERTM: | ||
3484 | control = get_unaligned_le16(skb->data); | ||
3485 | skb_pull(skb, 2); | ||
3486 | len = skb->len; | ||
3487 | |||
3488 | if (__is_sar_start(control)) | ||
3489 | len -= 2; | ||
3490 | |||
3491 | if (pi->fcs == L2CAP_FCS_CRC16) | ||
3492 | len -= 2; | ||
3493 | |||
3494 | /* | ||
3495 | * We can just drop the corrupted I-frame here. | ||
3496 | * Receiver will miss it and start proper recovery | ||
3497 | * procedures and ask retransmission. | ||
3498 | */ | ||
3499 | if (len > L2CAP_DEFAULT_MAX_PDU_SIZE) | ||
3500 | goto drop; | ||
3501 | |||
3502 | if (l2cap_check_fcs(pi, skb)) | ||
3503 | goto drop; | ||
3504 | |||
3505 | if (__is_iframe(control)) | ||
3506 | err = l2cap_data_channel_iframe(sk, control, skb); | ||
3507 | else | ||
3508 | err = l2cap_data_channel_sframe(sk, control, skb); | ||
3509 | |||
3510 | if (!err) | ||
3511 | goto done; | ||
3512 | break; | ||
3513 | |||
3514 | case L2CAP_MODE_STREAMING: | ||
3515 | control = get_unaligned_le16(skb->data); | ||
3516 | skb_pull(skb, 2); | ||
3517 | len = skb->len; | ||
3518 | |||
3519 | if (__is_sar_start(control)) | ||
3520 | len -= 2; | ||
3521 | |||
3522 | if (pi->fcs == L2CAP_FCS_CRC16) | ||
3523 | len -= 2; | ||
3524 | |||
3525 | if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control)) | ||
3526 | goto drop; | ||
3527 | |||
3528 | if (l2cap_check_fcs(pi, skb)) | ||
3529 | goto drop; | ||
3530 | |||
3531 | tx_seq = __get_txseq(control); | ||
3532 | |||
3533 | if (pi->expected_tx_seq == tx_seq) | ||
3534 | pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; | ||
3535 | else | ||
3536 | pi->expected_tx_seq = tx_seq + 1; | ||
3537 | |||
3538 | err = l2cap_sar_reassembly_sdu(sk, skb, control); | ||
2384 | 3539 | ||
2385 | if (!sock_queue_rcv_skb(sk, skb)) | ||
2386 | goto done; | 3540 | goto done; |
2387 | 3541 | ||
3542 | default: | ||
3543 | BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode); | ||
3544 | break; | ||
3545 | } | ||
3546 | |||
2388 | drop: | 3547 | drop: |
2389 | kfree_skb(skb); | 3548 | kfree_skb(skb); |
2390 | 3549 | ||
@@ -2433,6 +3592,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) | |||
2433 | cid = __le16_to_cpu(lh->cid); | 3592 | cid = __le16_to_cpu(lh->cid); |
2434 | len = __le16_to_cpu(lh->len); | 3593 | len = __le16_to_cpu(lh->len); |
2435 | 3594 | ||
3595 | if (len != skb->len) { | ||
3596 | kfree_skb(skb); | ||
3597 | return; | ||
3598 | } | ||
3599 | |||
2436 | BT_DBG("len %d, cid 0x%4.4x", len, cid); | 3600 | BT_DBG("len %d, cid 0x%4.4x", len, cid); |
2437 | 3601 | ||
2438 | switch (cid) { | 3602 | switch (cid) { |
@@ -2441,7 +3605,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) | |||
2441 | break; | 3605 | break; |
2442 | 3606 | ||
2443 | case L2CAP_CID_CONN_LESS: | 3607 | case L2CAP_CID_CONN_LESS: |
2444 | psm = get_unaligned((__le16 *) skb->data); | 3608 | psm = get_unaligned_le16(skb->data); |
2445 | skb_pull(skb, 2); | 3609 | skb_pull(skb, 2); |
2446 | l2cap_conless_channel(conn, psm, skb); | 3610 | l2cap_conless_channel(conn, psm, skb); |
2447 | break; | 3611 | break; |
@@ -2828,6 +3992,9 @@ EXPORT_SYMBOL(l2cap_load); | |||
2828 | module_init(l2cap_init); | 3992 | module_init(l2cap_init); |
2829 | module_exit(l2cap_exit); | 3993 | module_exit(l2cap_exit); |
2830 | 3994 | ||
3995 | module_param(enable_ertm, bool, 0644); | ||
3996 | MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode"); | ||
3997 | |||
2831 | MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); | 3998 | MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); |
2832 | MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION); | 3999 | MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION); |
2833 | MODULE_VERSION(VERSION); | 4000 | MODULE_VERSION(VERSION); |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 94b3388c188b..25692bc0a342 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -244,6 +244,33 @@ static inline int rfcomm_check_security(struct rfcomm_dlc *d) | |||
244 | auth_type); | 244 | auth_type); |
245 | } | 245 | } |
246 | 246 | ||
247 | static void rfcomm_session_timeout(unsigned long arg) | ||
248 | { | ||
249 | struct rfcomm_session *s = (void *) arg; | ||
250 | |||
251 | BT_DBG("session %p state %ld", s, s->state); | ||
252 | |||
253 | set_bit(RFCOMM_TIMED_OUT, &s->flags); | ||
254 | rfcomm_session_put(s); | ||
255 | rfcomm_schedule(RFCOMM_SCHED_TIMEO); | ||
256 | } | ||
257 | |||
258 | static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout) | ||
259 | { | ||
260 | BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout); | ||
261 | |||
262 | if (!mod_timer(&s->timer, jiffies + timeout)) | ||
263 | rfcomm_session_hold(s); | ||
264 | } | ||
265 | |||
266 | static void rfcomm_session_clear_timer(struct rfcomm_session *s) | ||
267 | { | ||
268 | BT_DBG("session %p state %ld", s, s->state); | ||
269 | |||
270 | if (timer_pending(&s->timer) && del_timer(&s->timer)) | ||
271 | rfcomm_session_put(s); | ||
272 | } | ||
273 | |||
247 | /* ---- RFCOMM DLCs ---- */ | 274 | /* ---- RFCOMM DLCs ---- */ |
248 | static void rfcomm_dlc_timeout(unsigned long arg) | 275 | static void rfcomm_dlc_timeout(unsigned long arg) |
249 | { | 276 | { |
@@ -320,6 +347,7 @@ static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d) | |||
320 | 347 | ||
321 | rfcomm_session_hold(s); | 348 | rfcomm_session_hold(s); |
322 | 349 | ||
350 | rfcomm_session_clear_timer(s); | ||
323 | rfcomm_dlc_hold(d); | 351 | rfcomm_dlc_hold(d); |
324 | list_add(&d->list, &s->dlcs); | 352 | list_add(&d->list, &s->dlcs); |
325 | d->session = s; | 353 | d->session = s; |
@@ -335,6 +363,9 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d) | |||
335 | d->session = NULL; | 363 | d->session = NULL; |
336 | rfcomm_dlc_put(d); | 364 | rfcomm_dlc_put(d); |
337 | 365 | ||
366 | if (list_empty(&s->dlcs)) | ||
367 | rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT); | ||
368 | |||
338 | rfcomm_session_put(s); | 369 | rfcomm_session_put(s); |
339 | } | 370 | } |
340 | 371 | ||
@@ -567,6 +598,8 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) | |||
567 | 598 | ||
568 | BT_DBG("session %p sock %p", s, sock); | 599 | BT_DBG("session %p sock %p", s, sock); |
569 | 600 | ||
601 | setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s); | ||
602 | |||
570 | INIT_LIST_HEAD(&s->dlcs); | 603 | INIT_LIST_HEAD(&s->dlcs); |
571 | s->state = state; | 604 | s->state = state; |
572 | s->sock = sock; | 605 | s->sock = sock; |
@@ -598,6 +631,7 @@ static void rfcomm_session_del(struct rfcomm_session *s) | |||
598 | if (state == BT_CONNECTED) | 631 | if (state == BT_CONNECTED) |
599 | rfcomm_send_disc(s, 0); | 632 | rfcomm_send_disc(s, 0); |
600 | 633 | ||
634 | rfcomm_session_clear_timer(s); | ||
601 | sock_release(s->sock); | 635 | sock_release(s->sock); |
602 | kfree(s); | 636 | kfree(s); |
603 | 637 | ||
@@ -639,6 +673,7 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err) | |||
639 | __rfcomm_dlc_close(d, err); | 673 | __rfcomm_dlc_close(d, err); |
640 | } | 674 | } |
641 | 675 | ||
676 | rfcomm_session_clear_timer(s); | ||
642 | rfcomm_session_put(s); | 677 | rfcomm_session_put(s); |
643 | } | 678 | } |
644 | 679 | ||
@@ -1879,6 +1914,12 @@ static inline void rfcomm_process_sessions(void) | |||
1879 | struct rfcomm_session *s; | 1914 | struct rfcomm_session *s; |
1880 | s = list_entry(p, struct rfcomm_session, list); | 1915 | s = list_entry(p, struct rfcomm_session, list); |
1881 | 1916 | ||
1917 | if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { | ||
1918 | s->state = BT_DISCONN; | ||
1919 | rfcomm_send_disc(s, 0); | ||
1920 | continue; | ||
1921 | } | ||
1922 | |||
1882 | if (s->state == BT_LISTEN) { | 1923 | if (s->state == BT_LISTEN) { |
1883 | rfcomm_accept_connection(s); | 1924 | rfcomm_accept_connection(s); |
1884 | continue; | 1925 | continue; |
@@ -2080,7 +2121,7 @@ static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); | |||
2080 | /* ---- Initialization ---- */ | 2121 | /* ---- Initialization ---- */ |
2081 | static int __init rfcomm_init(void) | 2122 | static int __init rfcomm_init(void) |
2082 | { | 2123 | { |
2083 | int ret; | 2124 | int err; |
2084 | 2125 | ||
2085 | l2cap_load(); | 2126 | l2cap_load(); |
2086 | 2127 | ||
@@ -2088,33 +2129,35 @@ static int __init rfcomm_init(void) | |||
2088 | 2129 | ||
2089 | rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); | 2130 | rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); |
2090 | if (IS_ERR(rfcomm_thread)) { | 2131 | if (IS_ERR(rfcomm_thread)) { |
2091 | ret = PTR_ERR(rfcomm_thread); | 2132 | err = PTR_ERR(rfcomm_thread); |
2092 | goto out_thread; | 2133 | goto unregister; |
2093 | } | 2134 | } |
2094 | 2135 | ||
2095 | if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) | 2136 | if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) |
2096 | BT_ERR("Failed to create RFCOMM info file"); | 2137 | BT_ERR("Failed to create RFCOMM info file"); |
2097 | 2138 | ||
2098 | ret = rfcomm_init_ttys(); | 2139 | err = rfcomm_init_ttys(); |
2099 | if (ret) | 2140 | if (err < 0) |
2100 | goto out_tty; | 2141 | goto stop; |
2101 | 2142 | ||
2102 | ret = rfcomm_init_sockets(); | 2143 | err = rfcomm_init_sockets(); |
2103 | if (ret) | 2144 | if (err < 0) |
2104 | goto out_sock; | 2145 | goto cleanup; |
2105 | 2146 | ||
2106 | BT_INFO("RFCOMM ver %s", VERSION); | 2147 | BT_INFO("RFCOMM ver %s", VERSION); |
2107 | 2148 | ||
2108 | return 0; | 2149 | return 0; |
2109 | 2150 | ||
2110 | out_sock: | 2151 | cleanup: |
2111 | rfcomm_cleanup_ttys(); | 2152 | rfcomm_cleanup_ttys(); |
2112 | out_tty: | 2153 | |
2154 | stop: | ||
2113 | kthread_stop(rfcomm_thread); | 2155 | kthread_stop(rfcomm_thread); |
2114 | out_thread: | 2156 | |
2157 | unregister: | ||
2115 | hci_unregister_cb(&rfcomm_cb); | 2158 | hci_unregister_cb(&rfcomm_cb); |
2116 | 2159 | ||
2117 | return ret; | 2160 | return err; |
2118 | } | 2161 | } |
2119 | 2162 | ||
2120 | static void __exit rfcomm_exit(void) | 2163 | static void __exit rfcomm_exit(void) |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 51ae0c3e470a..13c27f17192c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -359,20 +359,9 @@ static void sco_sock_kill(struct sock *sk) | |||
359 | sock_put(sk); | 359 | sock_put(sk); |
360 | } | 360 | } |
361 | 361 | ||
362 | /* Close socket. | 362 | static void __sco_sock_close(struct sock *sk) |
363 | * Must be called on unlocked socket. | ||
364 | */ | ||
365 | static void sco_sock_close(struct sock *sk) | ||
366 | { | 363 | { |
367 | struct sco_conn *conn; | 364 | BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); |
368 | |||
369 | sco_sock_clear_timer(sk); | ||
370 | |||
371 | lock_sock(sk); | ||
372 | |||
373 | conn = sco_pi(sk)->conn; | ||
374 | |||
375 | BT_DBG("sk %p state %d conn %p socket %p", sk, sk->sk_state, conn, sk->sk_socket); | ||
376 | 365 | ||
377 | switch (sk->sk_state) { | 366 | switch (sk->sk_state) { |
378 | case BT_LISTEN: | 367 | case BT_LISTEN: |
@@ -390,9 +379,15 @@ static void sco_sock_close(struct sock *sk) | |||
390 | sock_set_flag(sk, SOCK_ZAPPED); | 379 | sock_set_flag(sk, SOCK_ZAPPED); |
391 | break; | 380 | break; |
392 | } | 381 | } |
382 | } | ||
393 | 383 | ||
384 | /* Must be called on unlocked socket. */ | ||
385 | static void sco_sock_close(struct sock *sk) | ||
386 | { | ||
387 | sco_sock_clear_timer(sk); | ||
388 | lock_sock(sk); | ||
389 | __sco_sock_close(sk); | ||
394 | release_sock(sk); | 390 | release_sock(sk); |
395 | |||
396 | sco_sock_kill(sk); | 391 | sco_sock_kill(sk); |
397 | } | 392 | } |
398 | 393 | ||
@@ -748,6 +743,30 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char | |||
748 | return err; | 743 | return err; |
749 | } | 744 | } |
750 | 745 | ||
746 | static int sco_sock_shutdown(struct socket *sock, int how) | ||
747 | { | ||
748 | struct sock *sk = sock->sk; | ||
749 | int err = 0; | ||
750 | |||
751 | BT_DBG("sock %p, sk %p", sock, sk); | ||
752 | |||
753 | if (!sk) | ||
754 | return 0; | ||
755 | |||
756 | lock_sock(sk); | ||
757 | if (!sk->sk_shutdown) { | ||
758 | sk->sk_shutdown = SHUTDOWN_MASK; | ||
759 | sco_sock_clear_timer(sk); | ||
760 | __sco_sock_close(sk); | ||
761 | |||
762 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) | ||
763 | err = bt_sock_wait_state(sk, BT_CLOSED, | ||
764 | sk->sk_lingertime); | ||
765 | } | ||
766 | release_sock(sk); | ||
767 | return err; | ||
768 | } | ||
769 | |||
751 | static int sco_sock_release(struct socket *sock) | 770 | static int sco_sock_release(struct socket *sock) |
752 | { | 771 | { |
753 | struct sock *sk = sock->sk; | 772 | struct sock *sk = sock->sk; |
@@ -969,7 +988,7 @@ static const struct proto_ops sco_sock_ops = { | |||
969 | .ioctl = bt_sock_ioctl, | 988 | .ioctl = bt_sock_ioctl, |
970 | .mmap = sock_no_mmap, | 989 | .mmap = sock_no_mmap, |
971 | .socketpair = sock_no_socketpair, | 990 | .socketpair = sock_no_socketpair, |
972 | .shutdown = sock_no_shutdown, | 991 | .shutdown = sco_sock_shutdown, |
973 | .setsockopt = sco_sock_setsockopt, | 992 | .setsockopt = sco_sock_setsockopt, |
974 | .getsockopt = sco_sock_getsockopt | 993 | .getsockopt = sco_sock_getsockopt |
975 | }; | 994 | }; |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 18538d7460d7..07a07770c8b6 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include "br_private.h" | 20 | #include "br_private.h" |
21 | 21 | ||
22 | /* net device transmit always called with no BH (preempt_disabled) */ | 22 | /* net device transmit always called with no BH (preempt_disabled) */ |
23 | int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | 23 | netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) |
24 | { | 24 | { |
25 | struct net_bridge *br = netdev_priv(dev); | 25 | struct net_bridge *br = netdev_priv(dev); |
26 | const unsigned char *dest = skb->data; | 26 | const unsigned char *dest = skb->data; |
@@ -39,7 +39,7 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
39 | else | 39 | else |
40 | br_flood_deliver(br, skb); | 40 | br_flood_deliver(br, skb); |
41 | 41 | ||
42 | return 0; | 42 | return NETDEV_TX_OK; |
43 | } | 43 | } |
44 | 44 | ||
45 | static int br_dev_open(struct net_device *dev) | 45 | static int br_dev_open(struct net_device *dev) |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index d2c27c808d3b..bc1704ac6cd9 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -22,7 +22,8 @@ | |||
22 | static inline int should_deliver(const struct net_bridge_port *p, | 22 | static inline int should_deliver(const struct net_bridge_port *p, |
23 | const struct sk_buff *skb) | 23 | const struct sk_buff *skb) |
24 | { | 24 | { |
25 | return (skb->dev != p->dev && p->state == BR_STATE_FORWARDING); | 25 | return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && |
26 | p->state == BR_STATE_FORWARDING); | ||
26 | } | 27 | } |
27 | 28 | ||
28 | static inline unsigned packet_length(const struct sk_buff *skb) | 29 | static inline unsigned packet_length(const struct sk_buff *skb) |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index eb404dc3ed6e..142ebac14176 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -256,6 +256,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, | |||
256 | p->path_cost = port_cost(dev); | 256 | p->path_cost = port_cost(dev); |
257 | p->priority = 0x8000 >> BR_PORT_BITS; | 257 | p->priority = 0x8000 >> BR_PORT_BITS; |
258 | p->port_no = index; | 258 | p->port_no = index; |
259 | p->flags = 0; | ||
259 | br_init_port(p); | 260 | br_init_port(p); |
260 | p->state = BR_STATE_DISABLED; | 261 | p->state = BR_STATE_DISABLED; |
261 | br_stp_port_timer_init(p); | 262 | br_stp_port_timer_init(p); |
@@ -263,6 +264,10 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, | |||
263 | return p; | 264 | return p; |
264 | } | 265 | } |
265 | 266 | ||
267 | static struct device_type br_type = { | ||
268 | .name = "bridge", | ||
269 | }; | ||
270 | |||
266 | int br_add_bridge(struct net *net, const char *name) | 271 | int br_add_bridge(struct net *net, const char *name) |
267 | { | 272 | { |
268 | struct net_device *dev; | 273 | struct net_device *dev; |
@@ -279,6 +284,8 @@ int br_add_bridge(struct net *net, const char *name) | |||
279 | goto out_free; | 284 | goto out_free; |
280 | } | 285 | } |
281 | 286 | ||
287 | SET_NETDEV_DEVTYPE(dev, &br_type); | ||
288 | |||
282 | ret = register_netdevice(dev); | 289 | ret = register_netdevice(dev); |
283 | if (ret) | 290 | if (ret) |
284 | goto out_free; | 291 | goto out_free; |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index d22f611e4004..907a82e9023d 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -359,7 +359,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) | |||
359 | }, | 359 | }, |
360 | .proto = 0, | 360 | .proto = 0, |
361 | }; | 361 | }; |
362 | struct in_device *in_dev = in_dev_get(dev); | 362 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
363 | 363 | ||
364 | /* If err equals -EHOSTUNREACH the error is due to a | 364 | /* If err equals -EHOSTUNREACH the error is due to a |
365 | * martian destination or due to the fact that | 365 | * martian destination or due to the fact that |
@@ -905,46 +905,62 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb, | |||
905 | * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because | 905 | * For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because |
906 | * ip_refrag() can return NF_STOLEN. */ | 906 | * ip_refrag() can return NF_STOLEN. */ |
907 | static struct nf_hook_ops br_nf_ops[] __read_mostly = { | 907 | static struct nf_hook_ops br_nf_ops[] __read_mostly = { |
908 | { .hook = br_nf_pre_routing, | 908 | { |
909 | .owner = THIS_MODULE, | 909 | .hook = br_nf_pre_routing, |
910 | .pf = PF_BRIDGE, | 910 | .owner = THIS_MODULE, |
911 | .hooknum = NF_BR_PRE_ROUTING, | 911 | .pf = PF_BRIDGE, |
912 | .priority = NF_BR_PRI_BRNF, }, | 912 | .hooknum = NF_BR_PRE_ROUTING, |
913 | { .hook = br_nf_local_in, | 913 | .priority = NF_BR_PRI_BRNF, |
914 | .owner = THIS_MODULE, | 914 | }, |
915 | .pf = PF_BRIDGE, | 915 | { |
916 | .hooknum = NF_BR_LOCAL_IN, | 916 | .hook = br_nf_local_in, |
917 | .priority = NF_BR_PRI_BRNF, }, | 917 | .owner = THIS_MODULE, |
918 | { .hook = br_nf_forward_ip, | 918 | .pf = PF_BRIDGE, |
919 | .owner = THIS_MODULE, | 919 | .hooknum = NF_BR_LOCAL_IN, |
920 | .pf = PF_BRIDGE, | 920 | .priority = NF_BR_PRI_BRNF, |
921 | .hooknum = NF_BR_FORWARD, | 921 | }, |
922 | .priority = NF_BR_PRI_BRNF - 1, }, | 922 | { |
923 | { .hook = br_nf_forward_arp, | 923 | .hook = br_nf_forward_ip, |
924 | .owner = THIS_MODULE, | 924 | .owner = THIS_MODULE, |
925 | .pf = PF_BRIDGE, | 925 | .pf = PF_BRIDGE, |
926 | .hooknum = NF_BR_FORWARD, | 926 | .hooknum = NF_BR_FORWARD, |
927 | .priority = NF_BR_PRI_BRNF, }, | 927 | .priority = NF_BR_PRI_BRNF - 1, |
928 | { .hook = br_nf_local_out, | 928 | }, |
929 | .owner = THIS_MODULE, | 929 | { |
930 | .pf = PF_BRIDGE, | 930 | .hook = br_nf_forward_arp, |
931 | .hooknum = NF_BR_LOCAL_OUT, | 931 | .owner = THIS_MODULE, |
932 | .priority = NF_BR_PRI_FIRST, }, | 932 | .pf = PF_BRIDGE, |
933 | { .hook = br_nf_post_routing, | 933 | .hooknum = NF_BR_FORWARD, |
934 | .owner = THIS_MODULE, | 934 | .priority = NF_BR_PRI_BRNF, |
935 | .pf = PF_BRIDGE, | 935 | }, |
936 | .hooknum = NF_BR_POST_ROUTING, | 936 | { |
937 | .priority = NF_BR_PRI_LAST, }, | 937 | .hook = br_nf_local_out, |
938 | { .hook = ip_sabotage_in, | 938 | .owner = THIS_MODULE, |
939 | .owner = THIS_MODULE, | 939 | .pf = PF_BRIDGE, |
940 | .pf = PF_INET, | 940 | .hooknum = NF_BR_LOCAL_OUT, |
941 | .hooknum = NF_INET_PRE_ROUTING, | 941 | .priority = NF_BR_PRI_FIRST, |
942 | .priority = NF_IP_PRI_FIRST, }, | 942 | }, |
943 | { .hook = ip_sabotage_in, | 943 | { |
944 | .owner = THIS_MODULE, | 944 | .hook = br_nf_post_routing, |
945 | .pf = PF_INET6, | 945 | .owner = THIS_MODULE, |
946 | .hooknum = NF_INET_PRE_ROUTING, | 946 | .pf = PF_BRIDGE, |
947 | .priority = NF_IP6_PRI_FIRST, }, | 947 | .hooknum = NF_BR_POST_ROUTING, |
948 | .priority = NF_BR_PRI_LAST, | ||
949 | }, | ||
950 | { | ||
951 | .hook = ip_sabotage_in, | ||
952 | .owner = THIS_MODULE, | ||
953 | .pf = PF_INET, | ||
954 | .hooknum = NF_INET_PRE_ROUTING, | ||
955 | .priority = NF_IP_PRI_FIRST, | ||
956 | }, | ||
957 | { | ||
958 | .hook = ip_sabotage_in, | ||
959 | .owner = THIS_MODULE, | ||
960 | .pf = PF_INET6, | ||
961 | .hooknum = NF_INET_PRE_ROUTING, | ||
962 | .priority = NF_IP6_PRI_FIRST, | ||
963 | }, | ||
948 | }; | 964 | }; |
949 | 965 | ||
950 | #ifdef CONFIG_SYSCTL | 966 | #ifdef CONFIG_SYSCTL |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d5b5537272b4..2114e45682ea 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -81,6 +81,9 @@ struct net_bridge_port | |||
81 | struct timer_list message_age_timer; | 81 | struct timer_list message_age_timer; |
82 | struct kobject kobj; | 82 | struct kobject kobj; |
83 | struct rcu_head rcu; | 83 | struct rcu_head rcu; |
84 | |||
85 | unsigned long flags; | ||
86 | #define BR_HAIRPIN_MODE 0x00000001 | ||
84 | }; | 87 | }; |
85 | 88 | ||
86 | struct net_bridge | 89 | struct net_bridge |
@@ -140,7 +143,8 @@ static inline int br_is_root_bridge(const struct net_bridge *br) | |||
140 | 143 | ||
141 | /* br_device.c */ | 144 | /* br_device.c */ |
142 | extern void br_dev_setup(struct net_device *dev); | 145 | extern void br_dev_setup(struct net_device *dev); |
143 | extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev); | 146 | extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, |
147 | struct net_device *dev); | ||
144 | 148 | ||
145 | /* br_fdb.c */ | 149 | /* br_fdb.c */ |
146 | extern int br_fdb_init(void); | 150 | extern int br_fdb_init(void); |
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 0660515f3992..fd3f8d6c0998 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
@@ -21,7 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | #define MESSAGE_AGE_INCR ((HZ < 256) ? 1 : (HZ/256)) | 22 | #define MESSAGE_AGE_INCR ((HZ < 256) ? 1 : (HZ/256)) |
23 | 23 | ||
24 | static const char *br_port_state_names[] = { | 24 | static const char *const br_port_state_names[] = { |
25 | [BR_STATE_DISABLED] = "disabled", | 25 | [BR_STATE_DISABLED] = "disabled", |
26 | [BR_STATE_LISTENING] = "listening", | 26 | [BR_STATE_LISTENING] = "listening", |
27 | [BR_STATE_LEARNING] = "learning", | 27 | [BR_STATE_LEARNING] = "learning", |
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 4a3cdf8f3813..820643a3ba9c 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c | |||
@@ -143,6 +143,22 @@ static ssize_t store_flush(struct net_bridge_port *p, unsigned long v) | |||
143 | } | 143 | } |
144 | static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush); | 144 | static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush); |
145 | 145 | ||
146 | static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf) | ||
147 | { | ||
148 | int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0; | ||
149 | return sprintf(buf, "%d\n", hairpin_mode); | ||
150 | } | ||
151 | static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v) | ||
152 | { | ||
153 | if (v) | ||
154 | p->flags |= BR_HAIRPIN_MODE; | ||
155 | else | ||
156 | p->flags &= ~BR_HAIRPIN_MODE; | ||
157 | return 0; | ||
158 | } | ||
159 | static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR, | ||
160 | show_hairpin_mode, store_hairpin_mode); | ||
161 | |||
146 | static struct brport_attribute *brport_attrs[] = { | 162 | static struct brport_attribute *brport_attrs[] = { |
147 | &brport_attr_path_cost, | 163 | &brport_attr_path_cost, |
148 | &brport_attr_priority, | 164 | &brport_attr_priority, |
@@ -159,6 +175,7 @@ static struct brport_attribute *brport_attrs[] = { | |||
159 | &brport_attr_forward_delay_timer, | 175 | &brport_attr_forward_delay_timer, |
160 | &brport_attr_hold_timer, | 176 | &brport_attr_hold_timer, |
161 | &brport_attr_flush, | 177 | &brport_attr_flush, |
178 | &brport_attr_hairpin_mode, | ||
162 | NULL | 179 | NULL |
163 | }; | 180 | }; |
164 | 181 | ||
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index a94f3cc377c0..e4ea3fdd1d41 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
@@ -50,14 +50,6 @@ struct arppayload | |||
50 | unsigned char ip_dst[4]; | 50 | unsigned char ip_dst[4]; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static void print_MAC(const unsigned char *p) | ||
54 | { | ||
55 | int i; | ||
56 | |||
57 | for (i = 0; i < ETH_ALEN; i++, p++) | ||
58 | printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':'); | ||
59 | } | ||
60 | |||
61 | static void | 53 | static void |
62 | print_ports(const struct sk_buff *skb, uint8_t protocol, int offset) | 54 | print_ports(const struct sk_buff *skb, uint8_t protocol, int offset) |
63 | { | 55 | { |
@@ -88,14 +80,11 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum, | |||
88 | unsigned int bitmask; | 80 | unsigned int bitmask; |
89 | 81 | ||
90 | spin_lock_bh(&ebt_log_lock); | 82 | spin_lock_bh(&ebt_log_lock); |
91 | printk("<%c>%s IN=%s OUT=%s MAC source = ", '0' + loginfo->u.log.level, | 83 | printk("<%c>%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x", |
92 | prefix, in ? in->name : "", out ? out->name : ""); | 84 | '0' + loginfo->u.log.level, prefix, |
93 | 85 | in ? in->name : "", out ? out->name : "", | |
94 | print_MAC(eth_hdr(skb)->h_source); | 86 | eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, |
95 | printk("MAC dest = "); | 87 | ntohs(eth_hdr(skb)->h_proto)); |
96 | print_MAC(eth_hdr(skb)->h_dest); | ||
97 | |||
98 | printk("proto = 0x%04x", ntohs(eth_hdr(skb)->h_proto)); | ||
99 | 88 | ||
100 | if (loginfo->type == NF_LOG_TYPE_LOG) | 89 | if (loginfo->type == NF_LOG_TYPE_LOG) |
101 | bitmask = loginfo->u.log.logflags; | 90 | bitmask = loginfo->u.log.logflags; |
@@ -171,12 +160,8 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum, | |||
171 | printk(" INCOMPLETE ARP payload"); | 160 | printk(" INCOMPLETE ARP payload"); |
172 | goto out; | 161 | goto out; |
173 | } | 162 | } |
174 | printk(" ARP MAC SRC="); | 163 | printk(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4", |
175 | print_MAC(ap->mac_src); | 164 | ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); |
176 | printk(" ARP IP SRC=%pI4", ap->ip_src); | ||
177 | printk(" ARP MAC DST="); | ||
178 | print_MAC(ap->mac_dst); | ||
179 | printk(" ARP IP DST=%pI4", ap->ip_dst); | ||
180 | } | 165 | } |
181 | } | 166 | } |
182 | out: | 167 | out: |
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index 133eeae45a4f..ce50688a6431 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c | |||
@@ -266,7 +266,7 @@ static bool ebt_ulog_tg_check(const struct xt_tgchk_param *par) | |||
266 | if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN) | 266 | if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN) |
267 | uloginfo->qthreshold = EBT_ULOG_MAX_QLEN; | 267 | uloginfo->qthreshold = EBT_ULOG_MAX_QLEN; |
268 | 268 | ||
269 | return 0; | 269 | return true; |
270 | } | 270 | } |
271 | 271 | ||
272 | static struct xt_target ebt_ulog_tg_reg __read_mostly = { | 272 | static struct xt_target ebt_ulog_tg_reg __read_mostly = { |
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index c751111440f8..d32ab13e728c 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c | |||
@@ -41,7 +41,7 @@ static int check(const struct ebt_table_info *info, unsigned int valid_hooks) | |||
41 | return 0; | 41 | return 0; |
42 | } | 42 | } |
43 | 43 | ||
44 | static struct ebt_table broute_table = | 44 | static const struct ebt_table broute_table = |
45 | { | 45 | { |
46 | .name = "broute", | 46 | .name = "broute", |
47 | .table = &initial_table, | 47 | .table = &initial_table, |
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index a5eea72938a6..60b1a6ca7185 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c | |||
@@ -50,7 +50,7 @@ static int check(const struct ebt_table_info *info, unsigned int valid_hooks) | |||
50 | return 0; | 50 | return 0; |
51 | } | 51 | } |
52 | 52 | ||
53 | static struct ebt_table frame_filter = | 53 | static const struct ebt_table frame_filter = |
54 | { | 54 | { |
55 | .name = "filter", | 55 | .name = "filter", |
56 | .table = &initial_table, | 56 | .table = &initial_table, |
@@ -77,21 +77,21 @@ static struct nf_hook_ops ebt_ops_filter[] __read_mostly = { | |||
77 | { | 77 | { |
78 | .hook = ebt_in_hook, | 78 | .hook = ebt_in_hook, |
79 | .owner = THIS_MODULE, | 79 | .owner = THIS_MODULE, |
80 | .pf = PF_BRIDGE, | 80 | .pf = NFPROTO_BRIDGE, |
81 | .hooknum = NF_BR_LOCAL_IN, | 81 | .hooknum = NF_BR_LOCAL_IN, |
82 | .priority = NF_BR_PRI_FILTER_BRIDGED, | 82 | .priority = NF_BR_PRI_FILTER_BRIDGED, |
83 | }, | 83 | }, |
84 | { | 84 | { |
85 | .hook = ebt_in_hook, | 85 | .hook = ebt_in_hook, |
86 | .owner = THIS_MODULE, | 86 | .owner = THIS_MODULE, |
87 | .pf = PF_BRIDGE, | 87 | .pf = NFPROTO_BRIDGE, |
88 | .hooknum = NF_BR_FORWARD, | 88 | .hooknum = NF_BR_FORWARD, |
89 | .priority = NF_BR_PRI_FILTER_BRIDGED, | 89 | .priority = NF_BR_PRI_FILTER_BRIDGED, |
90 | }, | 90 | }, |
91 | { | 91 | { |
92 | .hook = ebt_out_hook, | 92 | .hook = ebt_out_hook, |
93 | .owner = THIS_MODULE, | 93 | .owner = THIS_MODULE, |
94 | .pf = PF_BRIDGE, | 94 | .pf = NFPROTO_BRIDGE, |
95 | .hooknum = NF_BR_LOCAL_OUT, | 95 | .hooknum = NF_BR_LOCAL_OUT, |
96 | .priority = NF_BR_PRI_FILTER_OTHER, | 96 | .priority = NF_BR_PRI_FILTER_OTHER, |
97 | }, | 97 | }, |
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 6024c551f9a9..4a98804203b0 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c | |||
@@ -77,21 +77,21 @@ static struct nf_hook_ops ebt_ops_nat[] __read_mostly = { | |||
77 | { | 77 | { |
78 | .hook = ebt_nat_out, | 78 | .hook = ebt_nat_out, |
79 | .owner = THIS_MODULE, | 79 | .owner = THIS_MODULE, |
80 | .pf = PF_BRIDGE, | 80 | .pf = NFPROTO_BRIDGE, |
81 | .hooknum = NF_BR_LOCAL_OUT, | 81 | .hooknum = NF_BR_LOCAL_OUT, |
82 | .priority = NF_BR_PRI_NAT_DST_OTHER, | 82 | .priority = NF_BR_PRI_NAT_DST_OTHER, |
83 | }, | 83 | }, |
84 | { | 84 | { |
85 | .hook = ebt_nat_out, | 85 | .hook = ebt_nat_out, |
86 | .owner = THIS_MODULE, | 86 | .owner = THIS_MODULE, |
87 | .pf = PF_BRIDGE, | 87 | .pf = NFPROTO_BRIDGE, |
88 | .hooknum = NF_BR_POST_ROUTING, | 88 | .hooknum = NF_BR_POST_ROUTING, |
89 | .priority = NF_BR_PRI_NAT_SRC, | 89 | .priority = NF_BR_PRI_NAT_SRC, |
90 | }, | 90 | }, |
91 | { | 91 | { |
92 | .hook = ebt_nat_in, | 92 | .hook = ebt_nat_in, |
93 | .owner = THIS_MODULE, | 93 | .owner = THIS_MODULE, |
94 | .pf = PF_BRIDGE, | 94 | .pf = NFPROTO_BRIDGE, |
95 | .hooknum = NF_BR_PRE_ROUTING, | 95 | .hooknum = NF_BR_PRE_ROUTING, |
96 | .priority = NF_BR_PRI_NAT_DST_BRIDGED, | 96 | .priority = NF_BR_PRI_NAT_DST_BRIDGED, |
97 | }, | 97 | }, |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 37928d5f2840..bd1c65425d4f 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1103,23 +1103,24 @@ free_newinfo: | |||
1103 | return ret; | 1103 | return ret; |
1104 | } | 1104 | } |
1105 | 1105 | ||
1106 | struct ebt_table *ebt_register_table(struct net *net, struct ebt_table *table) | 1106 | struct ebt_table * |
1107 | ebt_register_table(struct net *net, const struct ebt_table *input_table) | ||
1107 | { | 1108 | { |
1108 | struct ebt_table_info *newinfo; | 1109 | struct ebt_table_info *newinfo; |
1109 | struct ebt_table *t; | 1110 | struct ebt_table *t, *table; |
1110 | struct ebt_replace_kernel *repl; | 1111 | struct ebt_replace_kernel *repl; |
1111 | int ret, i, countersize; | 1112 | int ret, i, countersize; |
1112 | void *p; | 1113 | void *p; |
1113 | 1114 | ||
1114 | if (!table || !(repl = table->table) || !repl->entries || | 1115 | if (input_table == NULL || (repl = input_table->table) == NULL || |
1115 | repl->entries_size == 0 || | 1116 | repl->entries == 0 || repl->entries_size == 0 || |
1116 | repl->counters || table->private) { | 1117 | repl->counters != NULL || input_table->private != NULL) { |
1117 | BUGPRINT("Bad table data for ebt_register_table!!!\n"); | 1118 | BUGPRINT("Bad table data for ebt_register_table!!!\n"); |
1118 | return ERR_PTR(-EINVAL); | 1119 | return ERR_PTR(-EINVAL); |
1119 | } | 1120 | } |
1120 | 1121 | ||
1121 | /* Don't add one table to multiple lists. */ | 1122 | /* Don't add one table to multiple lists. */ |
1122 | table = kmemdup(table, sizeof(struct ebt_table), GFP_KERNEL); | 1123 | table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL); |
1123 | if (!table) { | 1124 | if (!table) { |
1124 | ret = -ENOMEM; | 1125 | ret = -ENOMEM; |
1125 | goto out; | 1126 | goto out; |
diff --git a/net/can/af_can.c b/net/can/af_can.c index e733725b11d4..ef1c43a2ed56 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -651,12 +651,16 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, | |||
651 | struct can_frame *cf = (struct can_frame *)skb->data; | 651 | struct can_frame *cf = (struct can_frame *)skb->data; |
652 | int matches; | 652 | int matches; |
653 | 653 | ||
654 | if (dev->type != ARPHRD_CAN || !net_eq(dev_net(dev), &init_net)) { | 654 | if (!net_eq(dev_net(dev), &init_net)) |
655 | kfree_skb(skb); | 655 | goto drop; |
656 | return 0; | ||
657 | } | ||
658 | 656 | ||
659 | BUG_ON(skb->len != sizeof(struct can_frame) || cf->can_dlc > 8); | 657 | if (WARN_ONCE(dev->type != ARPHRD_CAN || |
658 | skb->len != sizeof(struct can_frame) || | ||
659 | cf->can_dlc > 8, | ||
660 | "PF_CAN: dropped non conform skbuf: " | ||
661 | "dev type %d, len %d, can_dlc %d\n", | ||
662 | dev->type, skb->len, cf->can_dlc)) | ||
663 | goto drop; | ||
660 | 664 | ||
661 | /* update statistics */ | 665 | /* update statistics */ |
662 | can_stats.rx_frames++; | 666 | can_stats.rx_frames++; |
@@ -682,7 +686,11 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev, | |||
682 | can_stats.matches_delta++; | 686 | can_stats.matches_delta++; |
683 | } | 687 | } |
684 | 688 | ||
685 | return 0; | 689 | return NET_RX_SUCCESS; |
690 | |||
691 | drop: | ||
692 | kfree_skb(skb); | ||
693 | return NET_RX_DROP; | ||
686 | } | 694 | } |
687 | 695 | ||
688 | /* | 696 | /* |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 72720c710351..597da4f8f888 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/hrtimer.h> | 46 | #include <linux/hrtimer.h> |
47 | #include <linux/list.h> | 47 | #include <linux/list.h> |
48 | #include <linux/proc_fs.h> | 48 | #include <linux/proc_fs.h> |
49 | #include <linux/seq_file.h> | ||
49 | #include <linux/uio.h> | 50 | #include <linux/uio.h> |
50 | #include <linux/net.h> | 51 | #include <linux/net.h> |
51 | #include <linux/netdevice.h> | 52 | #include <linux/netdevice.h> |
@@ -146,23 +147,18 @@ static char *bcm_proc_getifname(int ifindex) | |||
146 | return "???"; | 147 | return "???"; |
147 | } | 148 | } |
148 | 149 | ||
149 | static int bcm_read_proc(char *page, char **start, off_t off, | 150 | static int bcm_proc_show(struct seq_file *m, void *v) |
150 | int count, int *eof, void *data) | ||
151 | { | 151 | { |
152 | int len = 0; | 152 | struct sock *sk = (struct sock *)m->private; |
153 | struct sock *sk = (struct sock *)data; | ||
154 | struct bcm_sock *bo = bcm_sk(sk); | 153 | struct bcm_sock *bo = bcm_sk(sk); |
155 | struct bcm_op *op; | 154 | struct bcm_op *op; |
156 | 155 | ||
157 | len += snprintf(page + len, PAGE_SIZE - len, ">>> socket %p", | 156 | seq_printf(m, ">>> socket %p", sk->sk_socket); |
158 | sk->sk_socket); | 157 | seq_printf(m, " / sk %p", sk); |
159 | len += snprintf(page + len, PAGE_SIZE - len, " / sk %p", sk); | 158 | seq_printf(m, " / bo %p", bo); |
160 | len += snprintf(page + len, PAGE_SIZE - len, " / bo %p", bo); | 159 | seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); |
161 | len += snprintf(page + len, PAGE_SIZE - len, " / dropped %lu", | 160 | seq_printf(m, " / bound %s", bcm_proc_getifname(bo->ifindex)); |
162 | bo->dropped_usr_msgs); | 161 | seq_printf(m, " <<<\n"); |
163 | len += snprintf(page + len, PAGE_SIZE - len, " / bound %s", | ||
164 | bcm_proc_getifname(bo->ifindex)); | ||
165 | len += snprintf(page + len, PAGE_SIZE - len, " <<<\n"); | ||
166 | 162 | ||
167 | list_for_each_entry(op, &bo->rx_ops, list) { | 163 | list_for_each_entry(op, &bo->rx_ops, list) { |
168 | 164 | ||
@@ -172,71 +168,62 @@ static int bcm_read_proc(char *page, char **start, off_t off, | |||
172 | if (!op->frames_abs) | 168 | if (!op->frames_abs) |
173 | continue; | 169 | continue; |
174 | 170 | ||
175 | len += snprintf(page + len, PAGE_SIZE - len, | 171 | seq_printf(m, "rx_op: %03X %-5s ", |
176 | "rx_op: %03X %-5s ", | ||
177 | op->can_id, bcm_proc_getifname(op->ifindex)); | 172 | op->can_id, bcm_proc_getifname(op->ifindex)); |
178 | len += snprintf(page + len, PAGE_SIZE - len, "[%d]%c ", | 173 | seq_printf(m, "[%d]%c ", op->nframes, |
179 | op->nframes, | ||
180 | (op->flags & RX_CHECK_DLC)?'d':' '); | 174 | (op->flags & RX_CHECK_DLC)?'d':' '); |
181 | if (op->kt_ival1.tv64) | 175 | if (op->kt_ival1.tv64) |
182 | len += snprintf(page + len, PAGE_SIZE - len, | 176 | seq_printf(m, "timeo=%lld ", |
183 | "timeo=%lld ", | ||
184 | (long long) | 177 | (long long) |
185 | ktime_to_us(op->kt_ival1)); | 178 | ktime_to_us(op->kt_ival1)); |
186 | 179 | ||
187 | if (op->kt_ival2.tv64) | 180 | if (op->kt_ival2.tv64) |
188 | len += snprintf(page + len, PAGE_SIZE - len, | 181 | seq_printf(m, "thr=%lld ", |
189 | "thr=%lld ", | ||
190 | (long long) | 182 | (long long) |
191 | ktime_to_us(op->kt_ival2)); | 183 | ktime_to_us(op->kt_ival2)); |
192 | 184 | ||
193 | len += snprintf(page + len, PAGE_SIZE - len, | 185 | seq_printf(m, "# recv %ld (%ld) => reduction: ", |
194 | "# recv %ld (%ld) => reduction: ", | ||
195 | op->frames_filtered, op->frames_abs); | 186 | op->frames_filtered, op->frames_abs); |
196 | 187 | ||
197 | reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; | 188 | reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; |
198 | 189 | ||
199 | len += snprintf(page + len, PAGE_SIZE - len, "%s%ld%%\n", | 190 | seq_printf(m, "%s%ld%%\n", |
200 | (reduction == 100)?"near ":"", reduction); | 191 | (reduction == 100)?"near ":"", reduction); |
201 | |||
202 | if (len > PAGE_SIZE - 200) { | ||
203 | /* mark output cut off */ | ||
204 | len += snprintf(page + len, PAGE_SIZE - len, "(..)\n"); | ||
205 | break; | ||
206 | } | ||
207 | } | 192 | } |
208 | 193 | ||
209 | list_for_each_entry(op, &bo->tx_ops, list) { | 194 | list_for_each_entry(op, &bo->tx_ops, list) { |
210 | 195 | ||
211 | len += snprintf(page + len, PAGE_SIZE - len, | 196 | seq_printf(m, "tx_op: %03X %s [%d] ", |
212 | "tx_op: %03X %s [%d] ", | ||
213 | op->can_id, bcm_proc_getifname(op->ifindex), | 197 | op->can_id, bcm_proc_getifname(op->ifindex), |
214 | op->nframes); | 198 | op->nframes); |
215 | 199 | ||
216 | if (op->kt_ival1.tv64) | 200 | if (op->kt_ival1.tv64) |
217 | len += snprintf(page + len, PAGE_SIZE - len, "t1=%lld ", | 201 | seq_printf(m, "t1=%lld ", |
218 | (long long) ktime_to_us(op->kt_ival1)); | 202 | (long long) ktime_to_us(op->kt_ival1)); |
219 | 203 | ||
220 | if (op->kt_ival2.tv64) | 204 | if (op->kt_ival2.tv64) |
221 | len += snprintf(page + len, PAGE_SIZE - len, "t2=%lld ", | 205 | seq_printf(m, "t2=%lld ", |
222 | (long long) ktime_to_us(op->kt_ival2)); | 206 | (long long) ktime_to_us(op->kt_ival2)); |
223 | 207 | ||
224 | len += snprintf(page + len, PAGE_SIZE - len, "# sent %ld\n", | 208 | seq_printf(m, "# sent %ld\n", op->frames_abs); |
225 | op->frames_abs); | ||
226 | |||
227 | if (len > PAGE_SIZE - 100) { | ||
228 | /* mark output cut off */ | ||
229 | len += snprintf(page + len, PAGE_SIZE - len, "(..)\n"); | ||
230 | break; | ||
231 | } | ||
232 | } | 209 | } |
210 | seq_putc(m, '\n'); | ||
211 | return 0; | ||
212 | } | ||
233 | 213 | ||
234 | len += snprintf(page + len, PAGE_SIZE - len, "\n"); | 214 | static int bcm_proc_open(struct inode *inode, struct file *file) |
235 | 215 | { | |
236 | *eof = 1; | 216 | return single_open(file, bcm_proc_show, PDE(inode)->data); |
237 | return len; | ||
238 | } | 217 | } |
239 | 218 | ||
219 | static const struct file_operations bcm_proc_fops = { | ||
220 | .owner = THIS_MODULE, | ||
221 | .open = bcm_proc_open, | ||
222 | .read = seq_read, | ||
223 | .llseek = seq_lseek, | ||
224 | .release = single_release, | ||
225 | }; | ||
226 | |||
240 | /* | 227 | /* |
241 | * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface | 228 | * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface |
242 | * of the given bcm tx op | 229 | * of the given bcm tx op |
@@ -1515,9 +1502,9 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, | |||
1515 | if (proc_dir) { | 1502 | if (proc_dir) { |
1516 | /* unique socket address as filename */ | 1503 | /* unique socket address as filename */ |
1517 | sprintf(bo->procname, "%p", sock); | 1504 | sprintf(bo->procname, "%p", sock); |
1518 | bo->bcm_proc_read = create_proc_read_entry(bo->procname, 0644, | 1505 | bo->bcm_proc_read = proc_create_data(bo->procname, 0644, |
1519 | proc_dir, | 1506 | proc_dir, |
1520 | bcm_read_proc, sk); | 1507 | &bcm_proc_fops, sk); |
1521 | } | 1508 | } |
1522 | 1509 | ||
1523 | return 0; | 1510 | return 0; |
diff --git a/net/can/proc.c b/net/can/proc.c index 1463653dbe34..9b9ad29be567 100644 --- a/net/can/proc.c +++ b/net/can/proc.c | |||
@@ -196,8 +196,8 @@ void can_stat_update(unsigned long data) | |||
196 | * | 196 | * |
197 | */ | 197 | */ |
198 | 198 | ||
199 | static int can_print_rcvlist(char *page, int len, struct hlist_head *rx_list, | 199 | static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list, |
200 | struct net_device *dev) | 200 | struct net_device *dev) |
201 | { | 201 | { |
202 | struct receiver *r; | 202 | struct receiver *r; |
203 | struct hlist_node *n; | 203 | struct hlist_node *n; |
@@ -208,199 +208,188 @@ static int can_print_rcvlist(char *page, int len, struct hlist_head *rx_list, | |||
208 | " %-5s %08X %08x %08x %08x %8ld %s\n" : | 208 | " %-5s %08X %08x %08x %08x %8ld %s\n" : |
209 | " %-5s %03X %08x %08lx %08lx %8ld %s\n"; | 209 | " %-5s %03X %08x %08lx %08lx %8ld %s\n"; |
210 | 210 | ||
211 | len += snprintf(page + len, PAGE_SIZE - len, fmt, | 211 | seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask, |
212 | DNAME(dev), r->can_id, r->mask, | ||
213 | (unsigned long)r->func, (unsigned long)r->data, | 212 | (unsigned long)r->func, (unsigned long)r->data, |
214 | r->matches, r->ident); | 213 | r->matches, r->ident); |
215 | |||
216 | /* does a typical line fit into the current buffer? */ | ||
217 | |||
218 | /* 100 Bytes before end of buffer */ | ||
219 | if (len > PAGE_SIZE - 100) { | ||
220 | /* mark output cut off */ | ||
221 | len += snprintf(page + len, PAGE_SIZE - len, | ||
222 | " (..)\n"); | ||
223 | break; | ||
224 | } | ||
225 | } | 214 | } |
226 | rcu_read_unlock(); | 215 | rcu_read_unlock(); |
227 | |||
228 | return len; | ||
229 | } | 216 | } |
230 | 217 | ||
231 | static int can_print_recv_banner(char *page, int len) | 218 | static void can_print_recv_banner(struct seq_file *m) |
232 | { | 219 | { |
233 | /* | 220 | /* |
234 | * can1. 00000000 00000000 00000000 | 221 | * can1. 00000000 00000000 00000000 |
235 | * ....... 0 tp20 | 222 | * ....... 0 tp20 |
236 | */ | 223 | */ |
237 | len += snprintf(page + len, PAGE_SIZE - len, | 224 | seq_puts(m, " device can_id can_mask function" |
238 | " device can_id can_mask function" | ||
239 | " userdata matches ident\n"); | 225 | " userdata matches ident\n"); |
240 | |||
241 | return len; | ||
242 | } | 226 | } |
243 | 227 | ||
244 | static int can_proc_read_stats(char *page, char **start, off_t off, | 228 | static int can_stats_proc_show(struct seq_file *m, void *v) |
245 | int count, int *eof, void *data) | ||
246 | { | 229 | { |
247 | int len = 0; | 230 | seq_putc(m, '\n'); |
231 | seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats.tx_frames); | ||
232 | seq_printf(m, " %8ld received frames (RXF)\n", can_stats.rx_frames); | ||
233 | seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats.matches); | ||
248 | 234 | ||
249 | len += snprintf(page + len, PAGE_SIZE - len, "\n"); | 235 | seq_putc(m, '\n'); |
250 | len += snprintf(page + len, PAGE_SIZE - len, | ||
251 | " %8ld transmitted frames (TXF)\n", | ||
252 | can_stats.tx_frames); | ||
253 | len += snprintf(page + len, PAGE_SIZE - len, | ||
254 | " %8ld received frames (RXF)\n", can_stats.rx_frames); | ||
255 | len += snprintf(page + len, PAGE_SIZE - len, | ||
256 | " %8ld matched frames (RXMF)\n", can_stats.matches); | ||
257 | |||
258 | len += snprintf(page + len, PAGE_SIZE - len, "\n"); | ||
259 | 236 | ||
260 | if (can_stattimer.function == can_stat_update) { | 237 | if (can_stattimer.function == can_stat_update) { |
261 | len += snprintf(page + len, PAGE_SIZE - len, | 238 | seq_printf(m, " %8ld %% total match ratio (RXMR)\n", |
262 | " %8ld %% total match ratio (RXMR)\n", | ||
263 | can_stats.total_rx_match_ratio); | 239 | can_stats.total_rx_match_ratio); |
264 | 240 | ||
265 | len += snprintf(page + len, PAGE_SIZE - len, | 241 | seq_printf(m, " %8ld frames/s total tx rate (TXR)\n", |
266 | " %8ld frames/s total tx rate (TXR)\n", | ||
267 | can_stats.total_tx_rate); | 242 | can_stats.total_tx_rate); |
268 | len += snprintf(page + len, PAGE_SIZE - len, | 243 | seq_printf(m, " %8ld frames/s total rx rate (RXR)\n", |
269 | " %8ld frames/s total rx rate (RXR)\n", | ||
270 | can_stats.total_rx_rate); | 244 | can_stats.total_rx_rate); |
271 | 245 | ||
272 | len += snprintf(page + len, PAGE_SIZE - len, "\n"); | 246 | seq_putc(m, '\n'); |
273 | 247 | ||
274 | len += snprintf(page + len, PAGE_SIZE - len, | 248 | seq_printf(m, " %8ld %% current match ratio (CRXMR)\n", |
275 | " %8ld %% current match ratio (CRXMR)\n", | ||
276 | can_stats.current_rx_match_ratio); | 249 | can_stats.current_rx_match_ratio); |
277 | 250 | ||
278 | len += snprintf(page + len, PAGE_SIZE - len, | 251 | seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n", |
279 | " %8ld frames/s current tx rate (CTXR)\n", | ||
280 | can_stats.current_tx_rate); | 252 | can_stats.current_tx_rate); |
281 | len += snprintf(page + len, PAGE_SIZE - len, | 253 | seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n", |
282 | " %8ld frames/s current rx rate (CRXR)\n", | ||
283 | can_stats.current_rx_rate); | 254 | can_stats.current_rx_rate); |
284 | 255 | ||
285 | len += snprintf(page + len, PAGE_SIZE - len, "\n"); | 256 | seq_putc(m, '\n'); |
286 | 257 | ||
287 | len += snprintf(page + len, PAGE_SIZE - len, | 258 | seq_printf(m, " %8ld %% max match ratio (MRXMR)\n", |
288 | " %8ld %% max match ratio (MRXMR)\n", | ||
289 | can_stats.max_rx_match_ratio); | 259 | can_stats.max_rx_match_ratio); |
290 | 260 | ||
291 | len += snprintf(page + len, PAGE_SIZE - len, | 261 | seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n", |
292 | " %8ld frames/s max tx rate (MTXR)\n", | ||
293 | can_stats.max_tx_rate); | 262 | can_stats.max_tx_rate); |
294 | len += snprintf(page + len, PAGE_SIZE - len, | 263 | seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n", |
295 | " %8ld frames/s max rx rate (MRXR)\n", | ||
296 | can_stats.max_rx_rate); | 264 | can_stats.max_rx_rate); |
297 | 265 | ||
298 | len += snprintf(page + len, PAGE_SIZE - len, "\n"); | 266 | seq_putc(m, '\n'); |
299 | } | 267 | } |
300 | 268 | ||
301 | len += snprintf(page + len, PAGE_SIZE - len, | 269 | seq_printf(m, " %8ld current receive list entries (CRCV)\n", |
302 | " %8ld current receive list entries (CRCV)\n", | ||
303 | can_pstats.rcv_entries); | 270 | can_pstats.rcv_entries); |
304 | len += snprintf(page + len, PAGE_SIZE - len, | 271 | seq_printf(m, " %8ld maximum receive list entries (MRCV)\n", |
305 | " %8ld maximum receive list entries (MRCV)\n", | ||
306 | can_pstats.rcv_entries_max); | 272 | can_pstats.rcv_entries_max); |
307 | 273 | ||
308 | if (can_pstats.stats_reset) | 274 | if (can_pstats.stats_reset) |
309 | len += snprintf(page + len, PAGE_SIZE - len, | 275 | seq_printf(m, "\n %8ld statistic resets (STR)\n", |
310 | "\n %8ld statistic resets (STR)\n", | ||
311 | can_pstats.stats_reset); | 276 | can_pstats.stats_reset); |
312 | 277 | ||
313 | if (can_pstats.user_reset) | 278 | if (can_pstats.user_reset) |
314 | len += snprintf(page + len, PAGE_SIZE - len, | 279 | seq_printf(m, " %8ld user statistic resets (USTR)\n", |
315 | " %8ld user statistic resets (USTR)\n", | ||
316 | can_pstats.user_reset); | 280 | can_pstats.user_reset); |
317 | 281 | ||
318 | len += snprintf(page + len, PAGE_SIZE - len, "\n"); | 282 | seq_putc(m, '\n'); |
319 | 283 | return 0; | |
320 | *eof = 1; | ||
321 | return len; | ||
322 | } | 284 | } |
323 | 285 | ||
324 | static int can_proc_read_reset_stats(char *page, char **start, off_t off, | 286 | static int can_stats_proc_open(struct inode *inode, struct file *file) |
325 | int count, int *eof, void *data) | ||
326 | { | 287 | { |
327 | int len = 0; | 288 | return single_open(file, can_stats_proc_show, NULL); |
289 | } | ||
290 | |||
291 | static const struct file_operations can_stats_proc_fops = { | ||
292 | .owner = THIS_MODULE, | ||
293 | .open = can_stats_proc_open, | ||
294 | .read = seq_read, | ||
295 | .llseek = seq_lseek, | ||
296 | .release = single_release, | ||
297 | }; | ||
328 | 298 | ||
299 | static int can_reset_stats_proc_show(struct seq_file *m, void *v) | ||
300 | { | ||
329 | user_reset = 1; | 301 | user_reset = 1; |
330 | 302 | ||
331 | if (can_stattimer.function == can_stat_update) { | 303 | if (can_stattimer.function == can_stat_update) { |
332 | len += snprintf(page + len, PAGE_SIZE - len, | 304 | seq_printf(m, "Scheduled statistic reset #%ld.\n", |
333 | "Scheduled statistic reset #%ld.\n", | ||
334 | can_pstats.stats_reset + 1); | 305 | can_pstats.stats_reset + 1); |
335 | 306 | ||
336 | } else { | 307 | } else { |
337 | if (can_stats.jiffies_init != jiffies) | 308 | if (can_stats.jiffies_init != jiffies) |
338 | can_init_stats(); | 309 | can_init_stats(); |
339 | 310 | ||
340 | len += snprintf(page + len, PAGE_SIZE - len, | 311 | seq_printf(m, "Performed statistic reset #%ld.\n", |
341 | "Performed statistic reset #%ld.\n", | ||
342 | can_pstats.stats_reset); | 312 | can_pstats.stats_reset); |
343 | } | 313 | } |
314 | return 0; | ||
315 | } | ||
344 | 316 | ||
345 | *eof = 1; | 317 | static int can_reset_stats_proc_open(struct inode *inode, struct file *file) |
346 | return len; | 318 | { |
319 | return single_open(file, can_reset_stats_proc_show, NULL); | ||
347 | } | 320 | } |
348 | 321 | ||
349 | static int can_proc_read_version(char *page, char **start, off_t off, | 322 | static const struct file_operations can_reset_stats_proc_fops = { |
350 | int count, int *eof, void *data) | 323 | .owner = THIS_MODULE, |
324 | .open = can_reset_stats_proc_open, | ||
325 | .read = seq_read, | ||
326 | .llseek = seq_lseek, | ||
327 | .release = single_release, | ||
328 | }; | ||
329 | |||
330 | static int can_version_proc_show(struct seq_file *m, void *v) | ||
351 | { | 331 | { |
352 | int len = 0; | 332 | seq_printf(m, "%s\n", CAN_VERSION_STRING); |
333 | return 0; | ||
334 | } | ||
353 | 335 | ||
354 | len += snprintf(page + len, PAGE_SIZE - len, "%s\n", | 336 | static int can_version_proc_open(struct inode *inode, struct file *file) |
355 | CAN_VERSION_STRING); | 337 | { |
356 | *eof = 1; | 338 | return single_open(file, can_version_proc_show, NULL); |
357 | return len; | ||
358 | } | 339 | } |
359 | 340 | ||
360 | static int can_proc_read_rcvlist(char *page, char **start, off_t off, | 341 | static const struct file_operations can_version_proc_fops = { |
361 | int count, int *eof, void *data) | 342 | .owner = THIS_MODULE, |
343 | .open = can_version_proc_open, | ||
344 | .read = seq_read, | ||
345 | .llseek = seq_lseek, | ||
346 | .release = single_release, | ||
347 | }; | ||
348 | |||
349 | static int can_rcvlist_proc_show(struct seq_file *m, void *v) | ||
362 | { | 350 | { |
363 | /* double cast to prevent GCC warning */ | 351 | /* double cast to prevent GCC warning */ |
364 | int idx = (int)(long)data; | 352 | int idx = (int)(long)m->private; |
365 | int len = 0; | ||
366 | struct dev_rcv_lists *d; | 353 | struct dev_rcv_lists *d; |
367 | struct hlist_node *n; | 354 | struct hlist_node *n; |
368 | 355 | ||
369 | len += snprintf(page + len, PAGE_SIZE - len, | 356 | seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]); |
370 | "\nreceive list '%s':\n", rx_list_name[idx]); | ||
371 | 357 | ||
372 | rcu_read_lock(); | 358 | rcu_read_lock(); |
373 | hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { | 359 | hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { |
374 | 360 | ||
375 | if (!hlist_empty(&d->rx[idx])) { | 361 | if (!hlist_empty(&d->rx[idx])) { |
376 | len = can_print_recv_banner(page, len); | 362 | can_print_recv_banner(m); |
377 | len = can_print_rcvlist(page, len, &d->rx[idx], d->dev); | 363 | can_print_rcvlist(m, &d->rx[idx], d->dev); |
378 | } else | 364 | } else |
379 | len += snprintf(page + len, PAGE_SIZE - len, | 365 | seq_printf(m, " (%s: no entry)\n", DNAME(d->dev)); |
380 | " (%s: no entry)\n", DNAME(d->dev)); | ||
381 | |||
382 | /* exit on end of buffer? */ | ||
383 | if (len > PAGE_SIZE - 100) | ||
384 | break; | ||
385 | } | 366 | } |
386 | rcu_read_unlock(); | 367 | rcu_read_unlock(); |
387 | 368 | ||
388 | len += snprintf(page + len, PAGE_SIZE - len, "\n"); | 369 | seq_putc(m, '\n'); |
370 | return 0; | ||
371 | } | ||
389 | 372 | ||
390 | *eof = 1; | 373 | static int can_rcvlist_proc_open(struct inode *inode, struct file *file) |
391 | return len; | 374 | { |
375 | return single_open(file, can_rcvlist_proc_show, PDE(inode)->data); | ||
392 | } | 376 | } |
393 | 377 | ||
394 | static int can_proc_read_rcvlist_sff(char *page, char **start, off_t off, | 378 | static const struct file_operations can_rcvlist_proc_fops = { |
395 | int count, int *eof, void *data) | 379 | .owner = THIS_MODULE, |
380 | .open = can_rcvlist_proc_open, | ||
381 | .read = seq_read, | ||
382 | .llseek = seq_lseek, | ||
383 | .release = single_release, | ||
384 | }; | ||
385 | |||
386 | static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) | ||
396 | { | 387 | { |
397 | int len = 0; | ||
398 | struct dev_rcv_lists *d; | 388 | struct dev_rcv_lists *d; |
399 | struct hlist_node *n; | 389 | struct hlist_node *n; |
400 | 390 | ||
401 | /* RX_SFF */ | 391 | /* RX_SFF */ |
402 | len += snprintf(page + len, PAGE_SIZE - len, | 392 | seq_puts(m, "\nreceive list 'rx_sff':\n"); |
403 | "\nreceive list 'rx_sff':\n"); | ||
404 | 393 | ||
405 | rcu_read_lock(); | 394 | rcu_read_lock(); |
406 | hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { | 395 | hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { |
@@ -413,46 +402,38 @@ static int can_proc_read_rcvlist_sff(char *page, char **start, off_t off, | |||
413 | } | 402 | } |
414 | 403 | ||
415 | if (!all_empty) { | 404 | if (!all_empty) { |
416 | len = can_print_recv_banner(page, len); | 405 | can_print_recv_banner(m); |
417 | for (i = 0; i < 0x800; i++) { | 406 | for (i = 0; i < 0x800; i++) { |
418 | if (!hlist_empty(&d->rx_sff[i]) && | 407 | if (!hlist_empty(&d->rx_sff[i])) |
419 | len < PAGE_SIZE - 100) | 408 | can_print_rcvlist(m, &d->rx_sff[i], |
420 | len = can_print_rcvlist(page, len, | 409 | d->dev); |
421 | &d->rx_sff[i], | ||
422 | d->dev); | ||
423 | } | 410 | } |
424 | } else | 411 | } else |
425 | len += snprintf(page + len, PAGE_SIZE - len, | 412 | seq_printf(m, " (%s: no entry)\n", DNAME(d->dev)); |
426 | " (%s: no entry)\n", DNAME(d->dev)); | ||
427 | |||
428 | /* exit on end of buffer? */ | ||
429 | if (len > PAGE_SIZE - 100) | ||
430 | break; | ||
431 | } | 413 | } |
432 | rcu_read_unlock(); | 414 | rcu_read_unlock(); |
433 | 415 | ||
434 | len += snprintf(page + len, PAGE_SIZE - len, "\n"); | 416 | seq_putc(m, '\n'); |
417 | return 0; | ||
418 | } | ||
435 | 419 | ||
436 | *eof = 1; | 420 | static int can_rcvlist_sff_proc_open(struct inode *inode, struct file *file) |
437 | return len; | 421 | { |
422 | return single_open(file, can_rcvlist_sff_proc_show, NULL); | ||
438 | } | 423 | } |
439 | 424 | ||
425 | static const struct file_operations can_rcvlist_sff_proc_fops = { | ||
426 | .owner = THIS_MODULE, | ||
427 | .open = can_rcvlist_sff_proc_open, | ||
428 | .read = seq_read, | ||
429 | .llseek = seq_lseek, | ||
430 | .release = single_release, | ||
431 | }; | ||
432 | |||
440 | /* | 433 | /* |
441 | * proc utility functions | 434 | * proc utility functions |
442 | */ | 435 | */ |
443 | 436 | ||
444 | static struct proc_dir_entry *can_create_proc_readentry(const char *name, | ||
445 | mode_t mode, | ||
446 | read_proc_t *read_proc, | ||
447 | void *data) | ||
448 | { | ||
449 | if (can_dir) | ||
450 | return create_proc_read_entry(name, mode, can_dir, read_proc, | ||
451 | data); | ||
452 | else | ||
453 | return NULL; | ||
454 | } | ||
455 | |||
456 | static void can_remove_proc_readentry(const char *name) | 437 | static void can_remove_proc_readentry(const char *name) |
457 | { | 438 | { |
458 | if (can_dir) | 439 | if (can_dir) |
@@ -474,24 +455,24 @@ void can_init_proc(void) | |||
474 | } | 455 | } |
475 | 456 | ||
476 | /* own procfs entries from the AF_CAN core */ | 457 | /* own procfs entries from the AF_CAN core */ |
477 | pde_version = can_create_proc_readentry(CAN_PROC_VERSION, 0644, | 458 | pde_version = proc_create(CAN_PROC_VERSION, 0644, can_dir, |
478 | can_proc_read_version, NULL); | 459 | &can_version_proc_fops); |
479 | pde_stats = can_create_proc_readentry(CAN_PROC_STATS, 0644, | 460 | pde_stats = proc_create(CAN_PROC_STATS, 0644, can_dir, |
480 | can_proc_read_stats, NULL); | 461 | &can_stats_proc_fops); |
481 | pde_reset_stats = can_create_proc_readentry(CAN_PROC_RESET_STATS, 0644, | 462 | pde_reset_stats = proc_create(CAN_PROC_RESET_STATS, 0644, can_dir, |
482 | can_proc_read_reset_stats, NULL); | 463 | &can_reset_stats_proc_fops); |
483 | pde_rcvlist_err = can_create_proc_readentry(CAN_PROC_RCVLIST_ERR, 0644, | 464 | pde_rcvlist_err = proc_create_data(CAN_PROC_RCVLIST_ERR, 0644, can_dir, |
484 | can_proc_read_rcvlist, (void *)RX_ERR); | 465 | &can_rcvlist_proc_fops, (void *)RX_ERR); |
485 | pde_rcvlist_all = can_create_proc_readentry(CAN_PROC_RCVLIST_ALL, 0644, | 466 | pde_rcvlist_all = proc_create_data(CAN_PROC_RCVLIST_ALL, 0644, can_dir, |
486 | can_proc_read_rcvlist, (void *)RX_ALL); | 467 | &can_rcvlist_proc_fops, (void *)RX_ALL); |
487 | pde_rcvlist_fil = can_create_proc_readentry(CAN_PROC_RCVLIST_FIL, 0644, | 468 | pde_rcvlist_fil = proc_create_data(CAN_PROC_RCVLIST_FIL, 0644, can_dir, |
488 | can_proc_read_rcvlist, (void *)RX_FIL); | 469 | &can_rcvlist_proc_fops, (void *)RX_FIL); |
489 | pde_rcvlist_inv = can_create_proc_readentry(CAN_PROC_RCVLIST_INV, 0644, | 470 | pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir, |
490 | can_proc_read_rcvlist, (void *)RX_INV); | 471 | &can_rcvlist_proc_fops, (void *)RX_INV); |
491 | pde_rcvlist_eff = can_create_proc_readentry(CAN_PROC_RCVLIST_EFF, 0644, | 472 | pde_rcvlist_eff = proc_create_data(CAN_PROC_RCVLIST_EFF, 0644, can_dir, |
492 | can_proc_read_rcvlist, (void *)RX_EFF); | 473 | &can_rcvlist_proc_fops, (void *)RX_EFF); |
493 | pde_rcvlist_sff = can_create_proc_readentry(CAN_PROC_RCVLIST_SFF, 0644, | 474 | pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir, |
494 | can_proc_read_rcvlist_sff, NULL); | 475 | &can_rcvlist_sff_proc_fops); |
495 | } | 476 | } |
496 | 477 | ||
497 | /* | 478 | /* |
diff --git a/net/compat.c b/net/compat.c index 8d739053afe4..12728b17a226 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -743,6 +743,18 @@ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, uns | |||
743 | return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); | 743 | return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); |
744 | } | 744 | } |
745 | 745 | ||
746 | asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags) | ||
747 | { | ||
748 | return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT); | ||
749 | } | ||
750 | |||
751 | asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, | ||
752 | unsigned flags, struct sockaddr __user *addr, | ||
753 | int __user *addrlen) | ||
754 | { | ||
755 | return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen); | ||
756 | } | ||
757 | |||
746 | asmlinkage long compat_sys_socketcall(int call, u32 __user *args) | 758 | asmlinkage long compat_sys_socketcall(int call, u32 __user *args) |
747 | { | 759 | { |
748 | int ret; | 760 | int ret; |
@@ -788,10 +800,11 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args) | |||
788 | ret = sys_sendto(a0, compat_ptr(a1), a[2], a[3], compat_ptr(a[4]), a[5]); | 800 | ret = sys_sendto(a0, compat_ptr(a1), a[2], a[3], compat_ptr(a[4]), a[5]); |
789 | break; | 801 | break; |
790 | case SYS_RECV: | 802 | case SYS_RECV: |
791 | ret = sys_recv(a0, compat_ptr(a1), a[2], a[3]); | 803 | ret = compat_sys_recv(a0, compat_ptr(a1), a[2], a[3]); |
792 | break; | 804 | break; |
793 | case SYS_RECVFROM: | 805 | case SYS_RECVFROM: |
794 | ret = sys_recvfrom(a0, compat_ptr(a1), a[2], a[3], compat_ptr(a[4]), compat_ptr(a[5])); | 806 | ret = compat_sys_recvfrom(a0, compat_ptr(a1), a[2], a[3], |
807 | compat_ptr(a[4]), compat_ptr(a[5])); | ||
795 | break; | 808 | break; |
796 | case SYS_SHUTDOWN: | 809 | case SYS_SHUTDOWN: |
797 | ret = sys_shutdown(a0,a1); | 810 | ret = sys_shutdown(a0,a1); |
diff --git a/net/core/datagram.c b/net/core/datagram.c index b0fe69211eef..1c6cf3a1a4f6 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <net/checksum.h> | 55 | #include <net/checksum.h> |
56 | #include <net/sock.h> | 56 | #include <net/sock.h> |
57 | #include <net/tcp_states.h> | 57 | #include <net/tcp_states.h> |
58 | #include <trace/events/skb.h> | ||
58 | 59 | ||
59 | /* | 60 | /* |
60 | * Is a socket 'connection oriented' ? | 61 | * Is a socket 'connection oriented' ? |
@@ -284,6 +285,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, | |||
284 | int i, copy = start - offset; | 285 | int i, copy = start - offset; |
285 | struct sk_buff *frag_iter; | 286 | struct sk_buff *frag_iter; |
286 | 287 | ||
288 | trace_skb_copy_datagram_iovec(skb, len); | ||
289 | |||
287 | /* Copy header. */ | 290 | /* Copy header. */ |
288 | if (copy > 0) { | 291 | if (copy > 0) { |
289 | if (copy > len) | 292 | if (copy > len) |
diff --git a/net/core/dev.c b/net/core/dev.c index 278d489aad3b..84945470ab38 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -191,7 +191,6 @@ static struct list_head ptype_all __read_mostly; /* Taps */ | |||
191 | * semaphore held. | 191 | * semaphore held. |
192 | */ | 192 | */ |
193 | DEFINE_RWLOCK(dev_base_lock); | 193 | DEFINE_RWLOCK(dev_base_lock); |
194 | |||
195 | EXPORT_SYMBOL(dev_base_lock); | 194 | EXPORT_SYMBOL(dev_base_lock); |
196 | 195 | ||
197 | #define NETDEV_HASHBITS 8 | 196 | #define NETDEV_HASHBITS 8 |
@@ -248,6 +247,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain); | |||
248 | */ | 247 | */ |
249 | 248 | ||
250 | DEFINE_PER_CPU(struct softnet_data, softnet_data); | 249 | DEFINE_PER_CPU(struct softnet_data, softnet_data); |
250 | EXPORT_PER_CPU_SYMBOL(softnet_data); | ||
251 | 251 | ||
252 | #ifdef CONFIG_LOCKDEP | 252 | #ifdef CONFIG_LOCKDEP |
253 | /* | 253 | /* |
@@ -269,10 +269,10 @@ static const unsigned short netdev_lock_type[] = | |||
269 | ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, | 269 | ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, |
270 | ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, | 270 | ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, |
271 | ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, | 271 | ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, |
272 | ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY, | 272 | ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, |
273 | ARPHRD_VOID, ARPHRD_NONE}; | 273 | ARPHRD_VOID, ARPHRD_NONE}; |
274 | 274 | ||
275 | static const char *netdev_lock_name[] = | 275 | static const char *const netdev_lock_name[] = |
276 | {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", | 276 | {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", |
277 | "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", | 277 | "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", |
278 | "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", | 278 | "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", |
@@ -287,7 +287,7 @@ static const char *netdev_lock_name[] = | |||
287 | "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", | 287 | "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", |
288 | "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", | 288 | "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", |
289 | "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", | 289 | "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", |
290 | "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY", | 290 | "_xmit_PHONET_PIPE", "_xmit_IEEE802154", |
291 | "_xmit_VOID", "_xmit_NONE"}; | 291 | "_xmit_VOID", "_xmit_NONE"}; |
292 | 292 | ||
293 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; | 293 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
@@ -381,6 +381,7 @@ void dev_add_pack(struct packet_type *pt) | |||
381 | } | 381 | } |
382 | spin_unlock_bh(&ptype_lock); | 382 | spin_unlock_bh(&ptype_lock); |
383 | } | 383 | } |
384 | EXPORT_SYMBOL(dev_add_pack); | ||
384 | 385 | ||
385 | /** | 386 | /** |
386 | * __dev_remove_pack - remove packet handler | 387 | * __dev_remove_pack - remove packet handler |
@@ -418,6 +419,8 @@ void __dev_remove_pack(struct packet_type *pt) | |||
418 | out: | 419 | out: |
419 | spin_unlock_bh(&ptype_lock); | 420 | spin_unlock_bh(&ptype_lock); |
420 | } | 421 | } |
422 | EXPORT_SYMBOL(__dev_remove_pack); | ||
423 | |||
421 | /** | 424 | /** |
422 | * dev_remove_pack - remove packet handler | 425 | * dev_remove_pack - remove packet handler |
423 | * @pt: packet type declaration | 426 | * @pt: packet type declaration |
@@ -436,6 +439,7 @@ void dev_remove_pack(struct packet_type *pt) | |||
436 | 439 | ||
437 | synchronize_net(); | 440 | synchronize_net(); |
438 | } | 441 | } |
442 | EXPORT_SYMBOL(dev_remove_pack); | ||
439 | 443 | ||
440 | /****************************************************************************** | 444 | /****************************************************************************** |
441 | 445 | ||
@@ -499,6 +503,7 @@ int netdev_boot_setup_check(struct net_device *dev) | |||
499 | } | 503 | } |
500 | return 0; | 504 | return 0; |
501 | } | 505 | } |
506 | EXPORT_SYMBOL(netdev_boot_setup_check); | ||
502 | 507 | ||
503 | 508 | ||
504 | /** | 509 | /** |
@@ -591,6 +596,7 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name) | |||
591 | } | 596 | } |
592 | return NULL; | 597 | return NULL; |
593 | } | 598 | } |
599 | EXPORT_SYMBOL(__dev_get_by_name); | ||
594 | 600 | ||
595 | /** | 601 | /** |
596 | * dev_get_by_name - find a device by its name | 602 | * dev_get_by_name - find a device by its name |
@@ -615,6 +621,7 @@ struct net_device *dev_get_by_name(struct net *net, const char *name) | |||
615 | read_unlock(&dev_base_lock); | 621 | read_unlock(&dev_base_lock); |
616 | return dev; | 622 | return dev; |
617 | } | 623 | } |
624 | EXPORT_SYMBOL(dev_get_by_name); | ||
618 | 625 | ||
619 | /** | 626 | /** |
620 | * __dev_get_by_index - find a device by its ifindex | 627 | * __dev_get_by_index - find a device by its ifindex |
@@ -640,6 +647,7 @@ struct net_device *__dev_get_by_index(struct net *net, int ifindex) | |||
640 | } | 647 | } |
641 | return NULL; | 648 | return NULL; |
642 | } | 649 | } |
650 | EXPORT_SYMBOL(__dev_get_by_index); | ||
643 | 651 | ||
644 | 652 | ||
645 | /** | 653 | /** |
@@ -664,6 +672,7 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex) | |||
664 | read_unlock(&dev_base_lock); | 672 | read_unlock(&dev_base_lock); |
665 | return dev; | 673 | return dev; |
666 | } | 674 | } |
675 | EXPORT_SYMBOL(dev_get_by_index); | ||
667 | 676 | ||
668 | /** | 677 | /** |
669 | * dev_getbyhwaddr - find a device by its hardware address | 678 | * dev_getbyhwaddr - find a device by its hardware address |
@@ -693,7 +702,6 @@ struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *h | |||
693 | 702 | ||
694 | return NULL; | 703 | return NULL; |
695 | } | 704 | } |
696 | |||
697 | EXPORT_SYMBOL(dev_getbyhwaddr); | 705 | EXPORT_SYMBOL(dev_getbyhwaddr); |
698 | 706 | ||
699 | struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) | 707 | struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) |
@@ -707,7 +715,6 @@ struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) | |||
707 | 715 | ||
708 | return NULL; | 716 | return NULL; |
709 | } | 717 | } |
710 | |||
711 | EXPORT_SYMBOL(__dev_getfirstbyhwtype); | 718 | EXPORT_SYMBOL(__dev_getfirstbyhwtype); |
712 | 719 | ||
713 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) | 720 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) |
@@ -721,7 +728,6 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) | |||
721 | rtnl_unlock(); | 728 | rtnl_unlock(); |
722 | return dev; | 729 | return dev; |
723 | } | 730 | } |
724 | |||
725 | EXPORT_SYMBOL(dev_getfirstbyhwtype); | 731 | EXPORT_SYMBOL(dev_getfirstbyhwtype); |
726 | 732 | ||
727 | /** | 733 | /** |
@@ -736,7 +742,8 @@ EXPORT_SYMBOL(dev_getfirstbyhwtype); | |||
736 | * dev_put to indicate they have finished with it. | 742 | * dev_put to indicate they have finished with it. |
737 | */ | 743 | */ |
738 | 744 | ||
739 | struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask) | 745 | struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags, |
746 | unsigned short mask) | ||
740 | { | 747 | { |
741 | struct net_device *dev, *ret; | 748 | struct net_device *dev, *ret; |
742 | 749 | ||
@@ -752,6 +759,7 @@ struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, u | |||
752 | read_unlock(&dev_base_lock); | 759 | read_unlock(&dev_base_lock); |
753 | return ret; | 760 | return ret; |
754 | } | 761 | } |
762 | EXPORT_SYMBOL(dev_get_by_flags); | ||
755 | 763 | ||
756 | /** | 764 | /** |
757 | * dev_valid_name - check if name is okay for network device | 765 | * dev_valid_name - check if name is okay for network device |
@@ -777,6 +785,7 @@ int dev_valid_name(const char *name) | |||
777 | } | 785 | } |
778 | return 1; | 786 | return 1; |
779 | } | 787 | } |
788 | EXPORT_SYMBOL(dev_valid_name); | ||
780 | 789 | ||
781 | /** | 790 | /** |
782 | * __dev_alloc_name - allocate a name for a device | 791 | * __dev_alloc_name - allocate a name for a device |
@@ -870,6 +879,7 @@ int dev_alloc_name(struct net_device *dev, const char *name) | |||
870 | strlcpy(dev->name, buf, IFNAMSIZ); | 879 | strlcpy(dev->name, buf, IFNAMSIZ); |
871 | return ret; | 880 | return ret; |
872 | } | 881 | } |
882 | EXPORT_SYMBOL(dev_alloc_name); | ||
873 | 883 | ||
874 | 884 | ||
875 | /** | 885 | /** |
@@ -906,8 +916,7 @@ int dev_change_name(struct net_device *dev, const char *newname) | |||
906 | err = dev_alloc_name(dev, newname); | 916 | err = dev_alloc_name(dev, newname); |
907 | if (err < 0) | 917 | if (err < 0) |
908 | return err; | 918 | return err; |
909 | } | 919 | } else if (__dev_get_by_name(net, newname)) |
910 | else if (__dev_get_by_name(net, newname)) | ||
911 | return -EEXIST; | 920 | return -EEXIST; |
912 | else | 921 | else |
913 | strlcpy(dev->name, newname, IFNAMSIZ); | 922 | strlcpy(dev->name, newname, IFNAMSIZ); |
@@ -970,7 +979,7 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) | |||
970 | return 0; | 979 | return 0; |
971 | } | 980 | } |
972 | 981 | ||
973 | dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL); | 982 | dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); |
974 | if (!dev->ifalias) | 983 | if (!dev->ifalias) |
975 | return -ENOMEM; | 984 | return -ENOMEM; |
976 | 985 | ||
@@ -1006,6 +1015,7 @@ void netdev_state_change(struct net_device *dev) | |||
1006 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); | 1015 | rtmsg_ifinfo(RTM_NEWLINK, dev, 0); |
1007 | } | 1016 | } |
1008 | } | 1017 | } |
1018 | EXPORT_SYMBOL(netdev_state_change); | ||
1009 | 1019 | ||
1010 | void netdev_bonding_change(struct net_device *dev) | 1020 | void netdev_bonding_change(struct net_device *dev) |
1011 | { | 1021 | { |
@@ -1034,6 +1044,7 @@ void dev_load(struct net *net, const char *name) | |||
1034 | if (!dev && capable(CAP_NET_ADMIN)) | 1044 | if (!dev && capable(CAP_NET_ADMIN)) |
1035 | request_module("%s", name); | 1045 | request_module("%s", name); |
1036 | } | 1046 | } |
1047 | EXPORT_SYMBOL(dev_load); | ||
1037 | 1048 | ||
1038 | /** | 1049 | /** |
1039 | * dev_open - prepare an interface for use. | 1050 | * dev_open - prepare an interface for use. |
@@ -1118,6 +1129,7 @@ int dev_open(struct net_device *dev) | |||
1118 | 1129 | ||
1119 | return ret; | 1130 | return ret; |
1120 | } | 1131 | } |
1132 | EXPORT_SYMBOL(dev_open); | ||
1121 | 1133 | ||
1122 | /** | 1134 | /** |
1123 | * dev_close - shutdown an interface. | 1135 | * dev_close - shutdown an interface. |
@@ -1184,6 +1196,7 @@ int dev_close(struct net_device *dev) | |||
1184 | 1196 | ||
1185 | return 0; | 1197 | return 0; |
1186 | } | 1198 | } |
1199 | EXPORT_SYMBOL(dev_close); | ||
1187 | 1200 | ||
1188 | 1201 | ||
1189 | /** | 1202 | /** |
@@ -1279,6 +1292,7 @@ rollback: | |||
1279 | raw_notifier_chain_unregister(&netdev_chain, nb); | 1292 | raw_notifier_chain_unregister(&netdev_chain, nb); |
1280 | goto unlock; | 1293 | goto unlock; |
1281 | } | 1294 | } |
1295 | EXPORT_SYMBOL(register_netdevice_notifier); | ||
1282 | 1296 | ||
1283 | /** | 1297 | /** |
1284 | * unregister_netdevice_notifier - unregister a network notifier block | 1298 | * unregister_netdevice_notifier - unregister a network notifier block |
@@ -1299,6 +1313,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb) | |||
1299 | rtnl_unlock(); | 1313 | rtnl_unlock(); |
1300 | return err; | 1314 | return err; |
1301 | } | 1315 | } |
1316 | EXPORT_SYMBOL(unregister_netdevice_notifier); | ||
1302 | 1317 | ||
1303 | /** | 1318 | /** |
1304 | * call_netdevice_notifiers - call all network notifier blocks | 1319 | * call_netdevice_notifiers - call all network notifier blocks |
@@ -1321,11 +1336,13 @@ void net_enable_timestamp(void) | |||
1321 | { | 1336 | { |
1322 | atomic_inc(&netstamp_needed); | 1337 | atomic_inc(&netstamp_needed); |
1323 | } | 1338 | } |
1339 | EXPORT_SYMBOL(net_enable_timestamp); | ||
1324 | 1340 | ||
1325 | void net_disable_timestamp(void) | 1341 | void net_disable_timestamp(void) |
1326 | { | 1342 | { |
1327 | atomic_dec(&netstamp_needed); | 1343 | atomic_dec(&netstamp_needed); |
1328 | } | 1344 | } |
1345 | EXPORT_SYMBOL(net_disable_timestamp); | ||
1329 | 1346 | ||
1330 | static inline void net_timestamp(struct sk_buff *skb) | 1347 | static inline void net_timestamp(struct sk_buff *skb) |
1331 | { | 1348 | { |
@@ -1359,7 +1376,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1359 | if ((ptype->dev == dev || !ptype->dev) && | 1376 | if ((ptype->dev == dev || !ptype->dev) && |
1360 | (ptype->af_packet_priv == NULL || | 1377 | (ptype->af_packet_priv == NULL || |
1361 | (struct sock *)ptype->af_packet_priv != skb->sk)) { | 1378 | (struct sock *)ptype->af_packet_priv != skb->sk)) { |
1362 | struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC); | 1379 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
1363 | if (!skb2) | 1380 | if (!skb2) |
1364 | break; | 1381 | break; |
1365 | 1382 | ||
@@ -1527,6 +1544,7 @@ out_set_summed: | |||
1527 | out: | 1544 | out: |
1528 | return ret; | 1545 | return ret; |
1529 | } | 1546 | } |
1547 | EXPORT_SYMBOL(skb_checksum_help); | ||
1530 | 1548 | ||
1531 | /** | 1549 | /** |
1532 | * skb_gso_segment - Perform segmentation on skb. | 1550 | * skb_gso_segment - Perform segmentation on skb. |
@@ -1589,7 +1607,6 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) | |||
1589 | 1607 | ||
1590 | return segs; | 1608 | return segs; |
1591 | } | 1609 | } |
1592 | |||
1593 | EXPORT_SYMBOL(skb_gso_segment); | 1610 | EXPORT_SYMBOL(skb_gso_segment); |
1594 | 1611 | ||
1595 | /* Take action when hardware reception checksum errors are detected. */ | 1612 | /* Take action when hardware reception checksum errors are detected. */ |
@@ -1704,7 +1721,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
1704 | skb_dst_drop(skb); | 1721 | skb_dst_drop(skb); |
1705 | 1722 | ||
1706 | rc = ops->ndo_start_xmit(skb, dev); | 1723 | rc = ops->ndo_start_xmit(skb, dev); |
1707 | if (rc == 0) | 1724 | if (rc == NETDEV_TX_OK) |
1708 | txq_trans_update(txq); | 1725 | txq_trans_update(txq); |
1709 | /* | 1726 | /* |
1710 | * TODO: if skb_orphan() was called by | 1727 | * TODO: if skb_orphan() was called by |
@@ -1730,7 +1747,7 @@ gso: | |||
1730 | skb->next = nskb->next; | 1747 | skb->next = nskb->next; |
1731 | nskb->next = NULL; | 1748 | nskb->next = NULL; |
1732 | rc = ops->ndo_start_xmit(nskb, dev); | 1749 | rc = ops->ndo_start_xmit(nskb, dev); |
1733 | if (unlikely(rc)) { | 1750 | if (unlikely(rc != NETDEV_TX_OK)) { |
1734 | nskb->next = skb->next; | 1751 | nskb->next = skb->next; |
1735 | skb->next = nskb; | 1752 | skb->next = nskb; |
1736 | return rc; | 1753 | return rc; |
@@ -1744,7 +1761,7 @@ gso: | |||
1744 | 1761 | ||
1745 | out_kfree_skb: | 1762 | out_kfree_skb: |
1746 | kfree_skb(skb); | 1763 | kfree_skb(skb); |
1747 | return 0; | 1764 | return NETDEV_TX_OK; |
1748 | } | 1765 | } |
1749 | 1766 | ||
1750 | static u32 skb_tx_hashrnd; | 1767 | static u32 skb_tx_hashrnd; |
@@ -1755,7 +1772,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1755 | 1772 | ||
1756 | if (skb_rx_queue_recorded(skb)) { | 1773 | if (skb_rx_queue_recorded(skb)) { |
1757 | hash = skb_get_rx_queue(skb); | 1774 | hash = skb_get_rx_queue(skb); |
1758 | while (unlikely (hash >= dev->real_num_tx_queues)) | 1775 | while (unlikely(hash >= dev->real_num_tx_queues)) |
1759 | hash -= dev->real_num_tx_queues; | 1776 | hash -= dev->real_num_tx_queues; |
1760 | return hash; | 1777 | return hash; |
1761 | } | 1778 | } |
@@ -1786,6 +1803,40 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
1786 | return netdev_get_tx_queue(dev, queue_index); | 1803 | return netdev_get_tx_queue(dev, queue_index); |
1787 | } | 1804 | } |
1788 | 1805 | ||
1806 | static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | ||
1807 | struct net_device *dev, | ||
1808 | struct netdev_queue *txq) | ||
1809 | { | ||
1810 | spinlock_t *root_lock = qdisc_lock(q); | ||
1811 | int rc; | ||
1812 | |||
1813 | spin_lock(root_lock); | ||
1814 | if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { | ||
1815 | kfree_skb(skb); | ||
1816 | rc = NET_XMIT_DROP; | ||
1817 | } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && | ||
1818 | !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) { | ||
1819 | /* | ||
1820 | * This is a work-conserving queue; there are no old skbs | ||
1821 | * waiting to be sent out; and the qdisc is not running - | ||
1822 | * xmit the skb directly. | ||
1823 | */ | ||
1824 | __qdisc_update_bstats(q, skb->len); | ||
1825 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) | ||
1826 | __qdisc_run(q); | ||
1827 | else | ||
1828 | clear_bit(__QDISC_STATE_RUNNING, &q->state); | ||
1829 | |||
1830 | rc = NET_XMIT_SUCCESS; | ||
1831 | } else { | ||
1832 | rc = qdisc_enqueue_root(skb, q); | ||
1833 | qdisc_run(q); | ||
1834 | } | ||
1835 | spin_unlock(root_lock); | ||
1836 | |||
1837 | return rc; | ||
1838 | } | ||
1839 | |||
1789 | /** | 1840 | /** |
1790 | * dev_queue_xmit - transmit a buffer | 1841 | * dev_queue_xmit - transmit a buffer |
1791 | * @skb: buffer to transmit | 1842 | * @skb: buffer to transmit |
@@ -1856,22 +1907,10 @@ gso: | |||
1856 | q = rcu_dereference(txq->qdisc); | 1907 | q = rcu_dereference(txq->qdisc); |
1857 | 1908 | ||
1858 | #ifdef CONFIG_NET_CLS_ACT | 1909 | #ifdef CONFIG_NET_CLS_ACT |
1859 | skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); | 1910 | skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); |
1860 | #endif | 1911 | #endif |
1861 | if (q->enqueue) { | 1912 | if (q->enqueue) { |
1862 | spinlock_t *root_lock = qdisc_lock(q); | 1913 | rc = __dev_xmit_skb(skb, q, dev, txq); |
1863 | |||
1864 | spin_lock(root_lock); | ||
1865 | |||
1866 | if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { | ||
1867 | kfree_skb(skb); | ||
1868 | rc = NET_XMIT_DROP; | ||
1869 | } else { | ||
1870 | rc = qdisc_enqueue_root(skb, q); | ||
1871 | qdisc_run(q); | ||
1872 | } | ||
1873 | spin_unlock(root_lock); | ||
1874 | |||
1875 | goto out; | 1914 | goto out; |
1876 | } | 1915 | } |
1877 | 1916 | ||
@@ -1895,7 +1934,7 @@ gso: | |||
1895 | HARD_TX_LOCK(dev, txq, cpu); | 1934 | HARD_TX_LOCK(dev, txq, cpu); |
1896 | 1935 | ||
1897 | if (!netif_tx_queue_stopped(txq)) { | 1936 | if (!netif_tx_queue_stopped(txq)) { |
1898 | rc = 0; | 1937 | rc = NET_XMIT_SUCCESS; |
1899 | if (!dev_hard_start_xmit(skb, dev, txq)) { | 1938 | if (!dev_hard_start_xmit(skb, dev, txq)) { |
1900 | HARD_TX_UNLOCK(dev, txq); | 1939 | HARD_TX_UNLOCK(dev, txq); |
1901 | goto out; | 1940 | goto out; |
@@ -1924,6 +1963,7 @@ out: | |||
1924 | rcu_read_unlock_bh(); | 1963 | rcu_read_unlock_bh(); |
1925 | return rc; | 1964 | return rc; |
1926 | } | 1965 | } |
1966 | EXPORT_SYMBOL(dev_queue_xmit); | ||
1927 | 1967 | ||
1928 | 1968 | ||
1929 | /*======================================================================= | 1969 | /*======================================================================= |
@@ -1990,6 +2030,7 @@ enqueue: | |||
1990 | kfree_skb(skb); | 2030 | kfree_skb(skb); |
1991 | return NET_RX_DROP; | 2031 | return NET_RX_DROP; |
1992 | } | 2032 | } |
2033 | EXPORT_SYMBOL(netif_rx); | ||
1993 | 2034 | ||
1994 | int netif_rx_ni(struct sk_buff *skb) | 2035 | int netif_rx_ni(struct sk_buff *skb) |
1995 | { | 2036 | { |
@@ -2003,7 +2044,6 @@ int netif_rx_ni(struct sk_buff *skb) | |||
2003 | 2044 | ||
2004 | return err; | 2045 | return err; |
2005 | } | 2046 | } |
2006 | |||
2007 | EXPORT_SYMBOL(netif_rx_ni); | 2047 | EXPORT_SYMBOL(netif_rx_ni); |
2008 | 2048 | ||
2009 | static void net_tx_action(struct softirq_action *h) | 2049 | static void net_tx_action(struct softirq_action *h) |
@@ -2076,7 +2116,7 @@ static inline int deliver_skb(struct sk_buff *skb, | |||
2076 | /* This hook is defined here for ATM LANE */ | 2116 | /* This hook is defined here for ATM LANE */ |
2077 | int (*br_fdb_test_addr_hook)(struct net_device *dev, | 2117 | int (*br_fdb_test_addr_hook)(struct net_device *dev, |
2078 | unsigned char *addr) __read_mostly; | 2118 | unsigned char *addr) __read_mostly; |
2079 | EXPORT_SYMBOL(br_fdb_test_addr_hook); | 2119 | EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); |
2080 | #endif | 2120 | #endif |
2081 | 2121 | ||
2082 | /* | 2122 | /* |
@@ -2085,7 +2125,7 @@ EXPORT_SYMBOL(br_fdb_test_addr_hook); | |||
2085 | */ | 2125 | */ |
2086 | struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, | 2126 | struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, |
2087 | struct sk_buff *skb) __read_mostly; | 2127 | struct sk_buff *skb) __read_mostly; |
2088 | EXPORT_SYMBOL(br_handle_frame_hook); | 2128 | EXPORT_SYMBOL_GPL(br_handle_frame_hook); |
2089 | 2129 | ||
2090 | static inline struct sk_buff *handle_bridge(struct sk_buff *skb, | 2130 | static inline struct sk_buff *handle_bridge(struct sk_buff *skb, |
2091 | struct packet_type **pt_prev, int *ret, | 2131 | struct packet_type **pt_prev, int *ret, |
@@ -2336,6 +2376,7 @@ out: | |||
2336 | rcu_read_unlock(); | 2376 | rcu_read_unlock(); |
2337 | return ret; | 2377 | return ret; |
2338 | } | 2378 | } |
2379 | EXPORT_SYMBOL(netif_receive_skb); | ||
2339 | 2380 | ||
2340 | /* Network device is going away, flush any packets still pending */ | 2381 | /* Network device is going away, flush any packets still pending */ |
2341 | static void flush_backlog(void *arg) | 2382 | static void flush_backlog(void *arg) |
@@ -2852,7 +2893,7 @@ softnet_break: | |||
2852 | goto out; | 2893 | goto out; |
2853 | } | 2894 | } |
2854 | 2895 | ||
2855 | static gifconf_func_t * gifconf_list [NPROTO]; | 2896 | static gifconf_func_t *gifconf_list[NPROTO]; |
2856 | 2897 | ||
2857 | /** | 2898 | /** |
2858 | * register_gifconf - register a SIOCGIF handler | 2899 | * register_gifconf - register a SIOCGIF handler |
@@ -2863,13 +2904,14 @@ static gifconf_func_t * gifconf_list [NPROTO]; | |||
2863 | * that is passed must not be freed or reused until it has been replaced | 2904 | * that is passed must not be freed or reused until it has been replaced |
2864 | * by another handler. | 2905 | * by another handler. |
2865 | */ | 2906 | */ |
2866 | int register_gifconf(unsigned int family, gifconf_func_t * gifconf) | 2907 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf) |
2867 | { | 2908 | { |
2868 | if (family >= NPROTO) | 2909 | if (family >= NPROTO) |
2869 | return -EINVAL; | 2910 | return -EINVAL; |
2870 | gifconf_list[family] = gifconf; | 2911 | gifconf_list[family] = gifconf; |
2871 | return 0; | 2912 | return 0; |
2872 | } | 2913 | } |
2914 | EXPORT_SYMBOL(register_gifconf); | ||
2873 | 2915 | ||
2874 | 2916 | ||
2875 | /* | 2917 | /* |
@@ -3080,7 +3122,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v) | |||
3080 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | 3122 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", |
3081 | s->total, s->dropped, s->time_squeeze, 0, | 3123 | s->total, s->dropped, s->time_squeeze, 0, |
3082 | 0, 0, 0, 0, /* was fastroute */ | 3124 | 0, 0, 0, 0, /* was fastroute */ |
3083 | s->cpu_collision ); | 3125 | s->cpu_collision); |
3084 | return 0; | 3126 | return 0; |
3085 | } | 3127 | } |
3086 | 3128 | ||
@@ -3316,6 +3358,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master) | |||
3316 | rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); | 3358 | rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE); |
3317 | return 0; | 3359 | return 0; |
3318 | } | 3360 | } |
3361 | EXPORT_SYMBOL(netdev_set_master); | ||
3319 | 3362 | ||
3320 | static void dev_change_rx_flags(struct net_device *dev, int flags) | 3363 | static void dev_change_rx_flags(struct net_device *dev, int flags) |
3321 | { | 3364 | { |
@@ -3394,6 +3437,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc) | |||
3394 | dev_set_rx_mode(dev); | 3437 | dev_set_rx_mode(dev); |
3395 | return err; | 3438 | return err; |
3396 | } | 3439 | } |
3440 | EXPORT_SYMBOL(dev_set_promiscuity); | ||
3397 | 3441 | ||
3398 | /** | 3442 | /** |
3399 | * dev_set_allmulti - update allmulti count on a device | 3443 | * dev_set_allmulti - update allmulti count on a device |
@@ -3437,6 +3481,7 @@ int dev_set_allmulti(struct net_device *dev, int inc) | |||
3437 | } | 3481 | } |
3438 | return 0; | 3482 | return 0; |
3439 | } | 3483 | } |
3484 | EXPORT_SYMBOL(dev_set_allmulti); | ||
3440 | 3485 | ||
3441 | /* | 3486 | /* |
3442 | * Upload unicast and multicast address lists to device and | 3487 | * Upload unicast and multicast address lists to device and |
@@ -3927,6 +3972,7 @@ int __dev_addr_sync(struct dev_addr_list **to, int *to_count, | |||
3927 | } | 3972 | } |
3928 | return err; | 3973 | return err; |
3929 | } | 3974 | } |
3975 | EXPORT_SYMBOL_GPL(__dev_addr_sync); | ||
3930 | 3976 | ||
3931 | void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | 3977 | void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, |
3932 | struct dev_addr_list **from, int *from_count) | 3978 | struct dev_addr_list **from, int *from_count) |
@@ -3946,6 +3992,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | |||
3946 | da = next; | 3992 | da = next; |
3947 | } | 3993 | } |
3948 | } | 3994 | } |
3995 | EXPORT_SYMBOL_GPL(__dev_addr_unsync); | ||
3949 | 3996 | ||
3950 | /** | 3997 | /** |
3951 | * dev_unicast_sync - Synchronize device's unicast list to another device | 3998 | * dev_unicast_sync - Synchronize device's unicast list to another device |
@@ -4064,6 +4111,7 @@ unsigned dev_get_flags(const struct net_device *dev) | |||
4064 | 4111 | ||
4065 | return flags; | 4112 | return flags; |
4066 | } | 4113 | } |
4114 | EXPORT_SYMBOL(dev_get_flags); | ||
4067 | 4115 | ||
4068 | /** | 4116 | /** |
4069 | * dev_change_flags - change device settings | 4117 | * dev_change_flags - change device settings |
@@ -4114,12 +4162,13 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
4114 | } | 4162 | } |
4115 | 4163 | ||
4116 | if (dev->flags & IFF_UP && | 4164 | if (dev->flags & IFF_UP && |
4117 | ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | | 4165 | ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | |
4118 | IFF_VOLATILE))) | 4166 | IFF_VOLATILE))) |
4119 | call_netdevice_notifiers(NETDEV_CHANGE, dev); | 4167 | call_netdevice_notifiers(NETDEV_CHANGE, dev); |
4120 | 4168 | ||
4121 | if ((flags ^ dev->gflags) & IFF_PROMISC) { | 4169 | if ((flags ^ dev->gflags) & IFF_PROMISC) { |
4122 | int inc = (flags & IFF_PROMISC) ? +1 : -1; | 4170 | int inc = (flags & IFF_PROMISC) ? 1 : -1; |
4171 | |||
4123 | dev->gflags ^= IFF_PROMISC; | 4172 | dev->gflags ^= IFF_PROMISC; |
4124 | dev_set_promiscuity(dev, inc); | 4173 | dev_set_promiscuity(dev, inc); |
4125 | } | 4174 | } |
@@ -4129,7 +4178,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
4129 | IFF_ALLMULTI is requested not asking us and not reporting. | 4178 | IFF_ALLMULTI is requested not asking us and not reporting. |
4130 | */ | 4179 | */ |
4131 | if ((flags ^ dev->gflags) & IFF_ALLMULTI) { | 4180 | if ((flags ^ dev->gflags) & IFF_ALLMULTI) { |
4132 | int inc = (flags & IFF_ALLMULTI) ? +1 : -1; | 4181 | int inc = (flags & IFF_ALLMULTI) ? 1 : -1; |
4182 | |||
4133 | dev->gflags ^= IFF_ALLMULTI; | 4183 | dev->gflags ^= IFF_ALLMULTI; |
4134 | dev_set_allmulti(dev, inc); | 4184 | dev_set_allmulti(dev, inc); |
4135 | } | 4185 | } |
@@ -4141,6 +4191,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags) | |||
4141 | 4191 | ||
4142 | return ret; | 4192 | return ret; |
4143 | } | 4193 | } |
4194 | EXPORT_SYMBOL(dev_change_flags); | ||
4144 | 4195 | ||
4145 | /** | 4196 | /** |
4146 | * dev_set_mtu - Change maximum transfer unit | 4197 | * dev_set_mtu - Change maximum transfer unit |
@@ -4174,6 +4225,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) | |||
4174 | call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); | 4225 | call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); |
4175 | return err; | 4226 | return err; |
4176 | } | 4227 | } |
4228 | EXPORT_SYMBOL(dev_set_mtu); | ||
4177 | 4229 | ||
4178 | /** | 4230 | /** |
4179 | * dev_set_mac_address - Change Media Access Control Address | 4231 | * dev_set_mac_address - Change Media Access Control Address |
@@ -4198,6 +4250,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) | |||
4198 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | 4250 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); |
4199 | return err; | 4251 | return err; |
4200 | } | 4252 | } |
4253 | EXPORT_SYMBOL(dev_set_mac_address); | ||
4201 | 4254 | ||
4202 | /* | 4255 | /* |
4203 | * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock) | 4256 | * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock) |
@@ -4211,56 +4264,56 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm | |||
4211 | return -ENODEV; | 4264 | return -ENODEV; |
4212 | 4265 | ||
4213 | switch (cmd) { | 4266 | switch (cmd) { |
4214 | case SIOCGIFFLAGS: /* Get interface flags */ | 4267 | case SIOCGIFFLAGS: /* Get interface flags */ |
4215 | ifr->ifr_flags = (short) dev_get_flags(dev); | 4268 | ifr->ifr_flags = (short) dev_get_flags(dev); |
4216 | return 0; | 4269 | return 0; |
4217 | 4270 | ||
4218 | case SIOCGIFMETRIC: /* Get the metric on the interface | 4271 | case SIOCGIFMETRIC: /* Get the metric on the interface |
4219 | (currently unused) */ | 4272 | (currently unused) */ |
4220 | ifr->ifr_metric = 0; | 4273 | ifr->ifr_metric = 0; |
4221 | return 0; | 4274 | return 0; |
4222 | 4275 | ||
4223 | case SIOCGIFMTU: /* Get the MTU of a device */ | 4276 | case SIOCGIFMTU: /* Get the MTU of a device */ |
4224 | ifr->ifr_mtu = dev->mtu; | 4277 | ifr->ifr_mtu = dev->mtu; |
4225 | return 0; | 4278 | return 0; |
4226 | 4279 | ||
4227 | case SIOCGIFHWADDR: | 4280 | case SIOCGIFHWADDR: |
4228 | if (!dev->addr_len) | 4281 | if (!dev->addr_len) |
4229 | memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); | 4282 | memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); |
4230 | else | 4283 | else |
4231 | memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, | 4284 | memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, |
4232 | min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); | 4285 | min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); |
4233 | ifr->ifr_hwaddr.sa_family = dev->type; | 4286 | ifr->ifr_hwaddr.sa_family = dev->type; |
4234 | return 0; | 4287 | return 0; |
4235 | 4288 | ||
4236 | case SIOCGIFSLAVE: | 4289 | case SIOCGIFSLAVE: |
4237 | err = -EINVAL; | 4290 | err = -EINVAL; |
4238 | break; | 4291 | break; |
4239 | 4292 | ||
4240 | case SIOCGIFMAP: | 4293 | case SIOCGIFMAP: |
4241 | ifr->ifr_map.mem_start = dev->mem_start; | 4294 | ifr->ifr_map.mem_start = dev->mem_start; |
4242 | ifr->ifr_map.mem_end = dev->mem_end; | 4295 | ifr->ifr_map.mem_end = dev->mem_end; |
4243 | ifr->ifr_map.base_addr = dev->base_addr; | 4296 | ifr->ifr_map.base_addr = dev->base_addr; |
4244 | ifr->ifr_map.irq = dev->irq; | 4297 | ifr->ifr_map.irq = dev->irq; |
4245 | ifr->ifr_map.dma = dev->dma; | 4298 | ifr->ifr_map.dma = dev->dma; |
4246 | ifr->ifr_map.port = dev->if_port; | 4299 | ifr->ifr_map.port = dev->if_port; |
4247 | return 0; | 4300 | return 0; |
4248 | 4301 | ||
4249 | case SIOCGIFINDEX: | 4302 | case SIOCGIFINDEX: |
4250 | ifr->ifr_ifindex = dev->ifindex; | 4303 | ifr->ifr_ifindex = dev->ifindex; |
4251 | return 0; | 4304 | return 0; |
4252 | 4305 | ||
4253 | case SIOCGIFTXQLEN: | 4306 | case SIOCGIFTXQLEN: |
4254 | ifr->ifr_qlen = dev->tx_queue_len; | 4307 | ifr->ifr_qlen = dev->tx_queue_len; |
4255 | return 0; | 4308 | return 0; |
4256 | 4309 | ||
4257 | default: | 4310 | default: |
4258 | /* dev_ioctl() should ensure this case | 4311 | /* dev_ioctl() should ensure this case |
4259 | * is never reached | 4312 | * is never reached |
4260 | */ | 4313 | */ |
4261 | WARN_ON(1); | 4314 | WARN_ON(1); |
4262 | err = -EINVAL; | 4315 | err = -EINVAL; |
4263 | break; | 4316 | break; |
4264 | 4317 | ||
4265 | } | 4318 | } |
4266 | return err; | 4319 | return err; |
@@ -4281,92 +4334,91 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
4281 | ops = dev->netdev_ops; | 4334 | ops = dev->netdev_ops; |
4282 | 4335 | ||
4283 | switch (cmd) { | 4336 | switch (cmd) { |
4284 | case SIOCSIFFLAGS: /* Set interface flags */ | 4337 | case SIOCSIFFLAGS: /* Set interface flags */ |
4285 | return dev_change_flags(dev, ifr->ifr_flags); | 4338 | return dev_change_flags(dev, ifr->ifr_flags); |
4286 | |||
4287 | case SIOCSIFMETRIC: /* Set the metric on the interface | ||
4288 | (currently unused) */ | ||
4289 | return -EOPNOTSUPP; | ||
4290 | 4339 | ||
4291 | case SIOCSIFMTU: /* Set the MTU of a device */ | 4340 | case SIOCSIFMETRIC: /* Set the metric on the interface |
4292 | return dev_set_mtu(dev, ifr->ifr_mtu); | 4341 | (currently unused) */ |
4342 | return -EOPNOTSUPP; | ||
4293 | 4343 | ||
4294 | case SIOCSIFHWADDR: | 4344 | case SIOCSIFMTU: /* Set the MTU of a device */ |
4295 | return dev_set_mac_address(dev, &ifr->ifr_hwaddr); | 4345 | return dev_set_mtu(dev, ifr->ifr_mtu); |
4296 | 4346 | ||
4297 | case SIOCSIFHWBROADCAST: | 4347 | case SIOCSIFHWADDR: |
4298 | if (ifr->ifr_hwaddr.sa_family != dev->type) | 4348 | return dev_set_mac_address(dev, &ifr->ifr_hwaddr); |
4299 | return -EINVAL; | ||
4300 | memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, | ||
4301 | min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); | ||
4302 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
4303 | return 0; | ||
4304 | 4349 | ||
4305 | case SIOCSIFMAP: | 4350 | case SIOCSIFHWBROADCAST: |
4306 | if (ops->ndo_set_config) { | 4351 | if (ifr->ifr_hwaddr.sa_family != dev->type) |
4307 | if (!netif_device_present(dev)) | 4352 | return -EINVAL; |
4308 | return -ENODEV; | 4353 | memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, |
4309 | return ops->ndo_set_config(dev, &ifr->ifr_map); | 4354 | min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); |
4310 | } | 4355 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); |
4311 | return -EOPNOTSUPP; | 4356 | return 0; |
4312 | |||
4313 | case SIOCADDMULTI: | ||
4314 | if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || | ||
4315 | ifr->ifr_hwaddr.sa_family != AF_UNSPEC) | ||
4316 | return -EINVAL; | ||
4317 | if (!netif_device_present(dev)) | ||
4318 | return -ENODEV; | ||
4319 | return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, | ||
4320 | dev->addr_len, 1); | ||
4321 | 4357 | ||
4322 | case SIOCDELMULTI: | 4358 | case SIOCSIFMAP: |
4323 | if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || | 4359 | if (ops->ndo_set_config) { |
4324 | ifr->ifr_hwaddr.sa_family != AF_UNSPEC) | ||
4325 | return -EINVAL; | ||
4326 | if (!netif_device_present(dev)) | 4360 | if (!netif_device_present(dev)) |
4327 | return -ENODEV; | 4361 | return -ENODEV; |
4328 | return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, | 4362 | return ops->ndo_set_config(dev, &ifr->ifr_map); |
4329 | dev->addr_len, 1); | 4363 | } |
4364 | return -EOPNOTSUPP; | ||
4330 | 4365 | ||
4331 | case SIOCSIFTXQLEN: | 4366 | case SIOCADDMULTI: |
4332 | if (ifr->ifr_qlen < 0) | 4367 | if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || |
4333 | return -EINVAL; | 4368 | ifr->ifr_hwaddr.sa_family != AF_UNSPEC) |
4334 | dev->tx_queue_len = ifr->ifr_qlen; | 4369 | return -EINVAL; |
4335 | return 0; | 4370 | if (!netif_device_present(dev)) |
4371 | return -ENODEV; | ||
4372 | return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, | ||
4373 | dev->addr_len, 1); | ||
4374 | |||
4375 | case SIOCDELMULTI: | ||
4376 | if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || | ||
4377 | ifr->ifr_hwaddr.sa_family != AF_UNSPEC) | ||
4378 | return -EINVAL; | ||
4379 | if (!netif_device_present(dev)) | ||
4380 | return -ENODEV; | ||
4381 | return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, | ||
4382 | dev->addr_len, 1); | ||
4336 | 4383 | ||
4337 | case SIOCSIFNAME: | 4384 | case SIOCSIFTXQLEN: |
4338 | ifr->ifr_newname[IFNAMSIZ-1] = '\0'; | 4385 | if (ifr->ifr_qlen < 0) |
4339 | return dev_change_name(dev, ifr->ifr_newname); | 4386 | return -EINVAL; |
4387 | dev->tx_queue_len = ifr->ifr_qlen; | ||
4388 | return 0; | ||
4340 | 4389 | ||
4341 | /* | 4390 | case SIOCSIFNAME: |
4342 | * Unknown or private ioctl | 4391 | ifr->ifr_newname[IFNAMSIZ-1] = '\0'; |
4343 | */ | 4392 | return dev_change_name(dev, ifr->ifr_newname); |
4344 | 4393 | ||
4345 | default: | 4394 | /* |
4346 | if ((cmd >= SIOCDEVPRIVATE && | 4395 | * Unknown or private ioctl |
4347 | cmd <= SIOCDEVPRIVATE + 15) || | 4396 | */ |
4348 | cmd == SIOCBONDENSLAVE || | 4397 | default: |
4349 | cmd == SIOCBONDRELEASE || | 4398 | if ((cmd >= SIOCDEVPRIVATE && |
4350 | cmd == SIOCBONDSETHWADDR || | 4399 | cmd <= SIOCDEVPRIVATE + 15) || |
4351 | cmd == SIOCBONDSLAVEINFOQUERY || | 4400 | cmd == SIOCBONDENSLAVE || |
4352 | cmd == SIOCBONDINFOQUERY || | 4401 | cmd == SIOCBONDRELEASE || |
4353 | cmd == SIOCBONDCHANGEACTIVE || | 4402 | cmd == SIOCBONDSETHWADDR || |
4354 | cmd == SIOCGMIIPHY || | 4403 | cmd == SIOCBONDSLAVEINFOQUERY || |
4355 | cmd == SIOCGMIIREG || | 4404 | cmd == SIOCBONDINFOQUERY || |
4356 | cmd == SIOCSMIIREG || | 4405 | cmd == SIOCBONDCHANGEACTIVE || |
4357 | cmd == SIOCBRADDIF || | 4406 | cmd == SIOCGMIIPHY || |
4358 | cmd == SIOCBRDELIF || | 4407 | cmd == SIOCGMIIREG || |
4359 | cmd == SIOCSHWTSTAMP || | 4408 | cmd == SIOCSMIIREG || |
4360 | cmd == SIOCWANDEV) { | 4409 | cmd == SIOCBRADDIF || |
4361 | err = -EOPNOTSUPP; | 4410 | cmd == SIOCBRDELIF || |
4362 | if (ops->ndo_do_ioctl) { | 4411 | cmd == SIOCSHWTSTAMP || |
4363 | if (netif_device_present(dev)) | 4412 | cmd == SIOCWANDEV) { |
4364 | err = ops->ndo_do_ioctl(dev, ifr, cmd); | 4413 | err = -EOPNOTSUPP; |
4365 | else | 4414 | if (ops->ndo_do_ioctl) { |
4366 | err = -ENODEV; | 4415 | if (netif_device_present(dev)) |
4367 | } | 4416 | err = ops->ndo_do_ioctl(dev, ifr, cmd); |
4368 | } else | 4417 | else |
4369 | err = -EINVAL; | 4418 | err = -ENODEV; |
4419 | } | ||
4420 | } else | ||
4421 | err = -EINVAL; | ||
4370 | 4422 | ||
4371 | } | 4423 | } |
4372 | return err; | 4424 | return err; |
@@ -4423,135 +4475,135 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
4423 | */ | 4475 | */ |
4424 | 4476 | ||
4425 | switch (cmd) { | 4477 | switch (cmd) { |
4426 | /* | 4478 | /* |
4427 | * These ioctl calls: | 4479 | * These ioctl calls: |
4428 | * - can be done by all. | 4480 | * - can be done by all. |
4429 | * - atomic and do not require locking. | 4481 | * - atomic and do not require locking. |
4430 | * - return a value | 4482 | * - return a value |
4431 | */ | 4483 | */ |
4432 | case SIOCGIFFLAGS: | 4484 | case SIOCGIFFLAGS: |
4433 | case SIOCGIFMETRIC: | 4485 | case SIOCGIFMETRIC: |
4434 | case SIOCGIFMTU: | 4486 | case SIOCGIFMTU: |
4435 | case SIOCGIFHWADDR: | 4487 | case SIOCGIFHWADDR: |
4436 | case SIOCGIFSLAVE: | 4488 | case SIOCGIFSLAVE: |
4437 | case SIOCGIFMAP: | 4489 | case SIOCGIFMAP: |
4438 | case SIOCGIFINDEX: | 4490 | case SIOCGIFINDEX: |
4439 | case SIOCGIFTXQLEN: | 4491 | case SIOCGIFTXQLEN: |
4440 | dev_load(net, ifr.ifr_name); | 4492 | dev_load(net, ifr.ifr_name); |
4441 | read_lock(&dev_base_lock); | 4493 | read_lock(&dev_base_lock); |
4442 | ret = dev_ifsioc_locked(net, &ifr, cmd); | 4494 | ret = dev_ifsioc_locked(net, &ifr, cmd); |
4443 | read_unlock(&dev_base_lock); | 4495 | read_unlock(&dev_base_lock); |
4444 | if (!ret) { | 4496 | if (!ret) { |
4445 | if (colon) | 4497 | if (colon) |
4446 | *colon = ':'; | 4498 | *colon = ':'; |
4447 | if (copy_to_user(arg, &ifr, | 4499 | if (copy_to_user(arg, &ifr, |
4448 | sizeof(struct ifreq))) | 4500 | sizeof(struct ifreq))) |
4449 | ret = -EFAULT; | 4501 | ret = -EFAULT; |
4450 | } | 4502 | } |
4451 | return ret; | 4503 | return ret; |
4452 | 4504 | ||
4453 | case SIOCETHTOOL: | 4505 | case SIOCETHTOOL: |
4454 | dev_load(net, ifr.ifr_name); | 4506 | dev_load(net, ifr.ifr_name); |
4455 | rtnl_lock(); | 4507 | rtnl_lock(); |
4456 | ret = dev_ethtool(net, &ifr); | 4508 | ret = dev_ethtool(net, &ifr); |
4457 | rtnl_unlock(); | 4509 | rtnl_unlock(); |
4458 | if (!ret) { | 4510 | if (!ret) { |
4459 | if (colon) | 4511 | if (colon) |
4460 | *colon = ':'; | 4512 | *colon = ':'; |
4461 | if (copy_to_user(arg, &ifr, | 4513 | if (copy_to_user(arg, &ifr, |
4462 | sizeof(struct ifreq))) | 4514 | sizeof(struct ifreq))) |
4463 | ret = -EFAULT; | 4515 | ret = -EFAULT; |
4464 | } | 4516 | } |
4465 | return ret; | 4517 | return ret; |
4466 | 4518 | ||
4467 | /* | 4519 | /* |
4468 | * These ioctl calls: | 4520 | * These ioctl calls: |
4469 | * - require superuser power. | 4521 | * - require superuser power. |
4470 | * - require strict serialization. | 4522 | * - require strict serialization. |
4471 | * - return a value | 4523 | * - return a value |
4472 | */ | 4524 | */ |
4473 | case SIOCGMIIPHY: | 4525 | case SIOCGMIIPHY: |
4474 | case SIOCGMIIREG: | 4526 | case SIOCGMIIREG: |
4475 | case SIOCSIFNAME: | 4527 | case SIOCSIFNAME: |
4476 | if (!capable(CAP_NET_ADMIN)) | 4528 | if (!capable(CAP_NET_ADMIN)) |
4477 | return -EPERM; | 4529 | return -EPERM; |
4478 | dev_load(net, ifr.ifr_name); | 4530 | dev_load(net, ifr.ifr_name); |
4479 | rtnl_lock(); | 4531 | rtnl_lock(); |
4480 | ret = dev_ifsioc(net, &ifr, cmd); | 4532 | ret = dev_ifsioc(net, &ifr, cmd); |
4481 | rtnl_unlock(); | 4533 | rtnl_unlock(); |
4482 | if (!ret) { | 4534 | if (!ret) { |
4483 | if (colon) | 4535 | if (colon) |
4484 | *colon = ':'; | 4536 | *colon = ':'; |
4485 | if (copy_to_user(arg, &ifr, | 4537 | if (copy_to_user(arg, &ifr, |
4486 | sizeof(struct ifreq))) | 4538 | sizeof(struct ifreq))) |
4487 | ret = -EFAULT; | 4539 | ret = -EFAULT; |
4488 | } | 4540 | } |
4489 | return ret; | 4541 | return ret; |
4490 | 4542 | ||
4491 | /* | 4543 | /* |
4492 | * These ioctl calls: | 4544 | * These ioctl calls: |
4493 | * - require superuser power. | 4545 | * - require superuser power. |
4494 | * - require strict serialization. | 4546 | * - require strict serialization. |
4495 | * - do not return a value | 4547 | * - do not return a value |
4496 | */ | 4548 | */ |
4497 | case SIOCSIFFLAGS: | 4549 | case SIOCSIFFLAGS: |
4498 | case SIOCSIFMETRIC: | 4550 | case SIOCSIFMETRIC: |
4499 | case SIOCSIFMTU: | 4551 | case SIOCSIFMTU: |
4500 | case SIOCSIFMAP: | 4552 | case SIOCSIFMAP: |
4501 | case SIOCSIFHWADDR: | 4553 | case SIOCSIFHWADDR: |
4502 | case SIOCSIFSLAVE: | 4554 | case SIOCSIFSLAVE: |
4503 | case SIOCADDMULTI: | 4555 | case SIOCADDMULTI: |
4504 | case SIOCDELMULTI: | 4556 | case SIOCDELMULTI: |
4505 | case SIOCSIFHWBROADCAST: | 4557 | case SIOCSIFHWBROADCAST: |
4506 | case SIOCSIFTXQLEN: | 4558 | case SIOCSIFTXQLEN: |
4507 | case SIOCSMIIREG: | 4559 | case SIOCSMIIREG: |
4508 | case SIOCBONDENSLAVE: | 4560 | case SIOCBONDENSLAVE: |
4509 | case SIOCBONDRELEASE: | 4561 | case SIOCBONDRELEASE: |
4510 | case SIOCBONDSETHWADDR: | 4562 | case SIOCBONDSETHWADDR: |
4511 | case SIOCBONDCHANGEACTIVE: | 4563 | case SIOCBONDCHANGEACTIVE: |
4512 | case SIOCBRADDIF: | 4564 | case SIOCBRADDIF: |
4513 | case SIOCBRDELIF: | 4565 | case SIOCBRDELIF: |
4514 | case SIOCSHWTSTAMP: | 4566 | case SIOCSHWTSTAMP: |
4515 | if (!capable(CAP_NET_ADMIN)) | 4567 | if (!capable(CAP_NET_ADMIN)) |
4516 | return -EPERM; | 4568 | return -EPERM; |
4517 | /* fall through */ | 4569 | /* fall through */ |
4518 | case SIOCBONDSLAVEINFOQUERY: | 4570 | case SIOCBONDSLAVEINFOQUERY: |
4519 | case SIOCBONDINFOQUERY: | 4571 | case SIOCBONDINFOQUERY: |
4572 | dev_load(net, ifr.ifr_name); | ||
4573 | rtnl_lock(); | ||
4574 | ret = dev_ifsioc(net, &ifr, cmd); | ||
4575 | rtnl_unlock(); | ||
4576 | return ret; | ||
4577 | |||
4578 | case SIOCGIFMEM: | ||
4579 | /* Get the per device memory space. We can add this but | ||
4580 | * currently do not support it */ | ||
4581 | case SIOCSIFMEM: | ||
4582 | /* Set the per device memory buffer space. | ||
4583 | * Not applicable in our case */ | ||
4584 | case SIOCSIFLINK: | ||
4585 | return -EINVAL; | ||
4586 | |||
4587 | /* | ||
4588 | * Unknown or private ioctl. | ||
4589 | */ | ||
4590 | default: | ||
4591 | if (cmd == SIOCWANDEV || | ||
4592 | (cmd >= SIOCDEVPRIVATE && | ||
4593 | cmd <= SIOCDEVPRIVATE + 15)) { | ||
4520 | dev_load(net, ifr.ifr_name); | 4594 | dev_load(net, ifr.ifr_name); |
4521 | rtnl_lock(); | 4595 | rtnl_lock(); |
4522 | ret = dev_ifsioc(net, &ifr, cmd); | 4596 | ret = dev_ifsioc(net, &ifr, cmd); |
4523 | rtnl_unlock(); | 4597 | rtnl_unlock(); |
4598 | if (!ret && copy_to_user(arg, &ifr, | ||
4599 | sizeof(struct ifreq))) | ||
4600 | ret = -EFAULT; | ||
4524 | return ret; | 4601 | return ret; |
4525 | 4602 | } | |
4526 | case SIOCGIFMEM: | 4603 | /* Take care of Wireless Extensions */ |
4527 | /* Get the per device memory space. We can add this but | 4604 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) |
4528 | * currently do not support it */ | 4605 | return wext_handle_ioctl(net, &ifr, cmd, arg); |
4529 | case SIOCSIFMEM: | 4606 | return -EINVAL; |
4530 | /* Set the per device memory buffer space. | ||
4531 | * Not applicable in our case */ | ||
4532 | case SIOCSIFLINK: | ||
4533 | return -EINVAL; | ||
4534 | |||
4535 | /* | ||
4536 | * Unknown or private ioctl. | ||
4537 | */ | ||
4538 | default: | ||
4539 | if (cmd == SIOCWANDEV || | ||
4540 | (cmd >= SIOCDEVPRIVATE && | ||
4541 | cmd <= SIOCDEVPRIVATE + 15)) { | ||
4542 | dev_load(net, ifr.ifr_name); | ||
4543 | rtnl_lock(); | ||
4544 | ret = dev_ifsioc(net, &ifr, cmd); | ||
4545 | rtnl_unlock(); | ||
4546 | if (!ret && copy_to_user(arg, &ifr, | ||
4547 | sizeof(struct ifreq))) | ||
4548 | ret = -EFAULT; | ||
4549 | return ret; | ||
4550 | } | ||
4551 | /* Take care of Wireless Extensions */ | ||
4552 | if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) | ||
4553 | return wext_handle_ioctl(net, &ifr, cmd, arg); | ||
4554 | return -EINVAL; | ||
4555 | } | 4607 | } |
4556 | } | 4608 | } |
4557 | 4609 | ||
@@ -4816,6 +4868,7 @@ err_uninit: | |||
4816 | dev->netdev_ops->ndo_uninit(dev); | 4868 | dev->netdev_ops->ndo_uninit(dev); |
4817 | goto out; | 4869 | goto out; |
4818 | } | 4870 | } |
4871 | EXPORT_SYMBOL(register_netdevice); | ||
4819 | 4872 | ||
4820 | /** | 4873 | /** |
4821 | * init_dummy_netdev - init a dummy network device for NAPI | 4874 | * init_dummy_netdev - init a dummy network device for NAPI |
@@ -5168,6 +5221,7 @@ void free_netdev(struct net_device *dev) | |||
5168 | /* will free via device release */ | 5221 | /* will free via device release */ |
5169 | put_device(&dev->dev); | 5222 | put_device(&dev->dev); |
5170 | } | 5223 | } |
5224 | EXPORT_SYMBOL(free_netdev); | ||
5171 | 5225 | ||
5172 | /** | 5226 | /** |
5173 | * synchronize_net - Synchronize with packet receive processing | 5227 | * synchronize_net - Synchronize with packet receive processing |
@@ -5180,6 +5234,7 @@ void synchronize_net(void) | |||
5180 | might_sleep(); | 5234 | might_sleep(); |
5181 | synchronize_rcu(); | 5235 | synchronize_rcu(); |
5182 | } | 5236 | } |
5237 | EXPORT_SYMBOL(synchronize_net); | ||
5183 | 5238 | ||
5184 | /** | 5239 | /** |
5185 | * unregister_netdevice - remove device from the kernel | 5240 | * unregister_netdevice - remove device from the kernel |
@@ -5200,6 +5255,7 @@ void unregister_netdevice(struct net_device *dev) | |||
5200 | /* Finish processing unregister after unlock */ | 5255 | /* Finish processing unregister after unlock */ |
5201 | net_set_todo(dev); | 5256 | net_set_todo(dev); |
5202 | } | 5257 | } |
5258 | EXPORT_SYMBOL(unregister_netdevice); | ||
5203 | 5259 | ||
5204 | /** | 5260 | /** |
5205 | * unregister_netdev - remove device from the kernel | 5261 | * unregister_netdev - remove device from the kernel |
@@ -5218,7 +5274,6 @@ void unregister_netdev(struct net_device *dev) | |||
5218 | unregister_netdevice(dev); | 5274 | unregister_netdevice(dev); |
5219 | rtnl_unlock(); | 5275 | rtnl_unlock(); |
5220 | } | 5276 | } |
5221 | |||
5222 | EXPORT_SYMBOL(unregister_netdev); | 5277 | EXPORT_SYMBOL(unregister_netdev); |
5223 | 5278 | ||
5224 | /** | 5279 | /** |
@@ -5347,6 +5402,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5347 | out: | 5402 | out: |
5348 | return err; | 5403 | return err; |
5349 | } | 5404 | } |
5405 | EXPORT_SYMBOL_GPL(dev_change_net_namespace); | ||
5350 | 5406 | ||
5351 | static int dev_cpu_callback(struct notifier_block *nfb, | 5407 | static int dev_cpu_callback(struct notifier_block *nfb, |
5352 | unsigned long action, | 5408 | unsigned long action, |
@@ -5407,7 +5463,7 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one, | |||
5407 | unsigned long mask) | 5463 | unsigned long mask) |
5408 | { | 5464 | { |
5409 | /* If device needs checksumming, downgrade to it. */ | 5465 | /* If device needs checksumming, downgrade to it. */ |
5410 | if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) | 5466 | if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) |
5411 | all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); | 5467 | all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); |
5412 | else if (mask & NETIF_F_ALL_CSUM) { | 5468 | else if (mask & NETIF_F_ALL_CSUM) { |
5413 | /* If one device supports v4/v6 checksumming, set for all. */ | 5469 | /* If one device supports v4/v6 checksumming, set for all. */ |
@@ -5633,41 +5689,3 @@ static int __init initialize_hashrnd(void) | |||
5633 | 5689 | ||
5634 | late_initcall_sync(initialize_hashrnd); | 5690 | late_initcall_sync(initialize_hashrnd); |
5635 | 5691 | ||
5636 | EXPORT_SYMBOL(__dev_get_by_index); | ||
5637 | EXPORT_SYMBOL(__dev_get_by_name); | ||
5638 | EXPORT_SYMBOL(__dev_remove_pack); | ||
5639 | EXPORT_SYMBOL(dev_valid_name); | ||
5640 | EXPORT_SYMBOL(dev_add_pack); | ||
5641 | EXPORT_SYMBOL(dev_alloc_name); | ||
5642 | EXPORT_SYMBOL(dev_close); | ||
5643 | EXPORT_SYMBOL(dev_get_by_flags); | ||
5644 | EXPORT_SYMBOL(dev_get_by_index); | ||
5645 | EXPORT_SYMBOL(dev_get_by_name); | ||
5646 | EXPORT_SYMBOL(dev_open); | ||
5647 | EXPORT_SYMBOL(dev_queue_xmit); | ||
5648 | EXPORT_SYMBOL(dev_remove_pack); | ||
5649 | EXPORT_SYMBOL(dev_set_allmulti); | ||
5650 | EXPORT_SYMBOL(dev_set_promiscuity); | ||
5651 | EXPORT_SYMBOL(dev_change_flags); | ||
5652 | EXPORT_SYMBOL(dev_set_mtu); | ||
5653 | EXPORT_SYMBOL(dev_set_mac_address); | ||
5654 | EXPORT_SYMBOL(free_netdev); | ||
5655 | EXPORT_SYMBOL(netdev_boot_setup_check); | ||
5656 | EXPORT_SYMBOL(netdev_set_master); | ||
5657 | EXPORT_SYMBOL(netdev_state_change); | ||
5658 | EXPORT_SYMBOL(netif_receive_skb); | ||
5659 | EXPORT_SYMBOL(netif_rx); | ||
5660 | EXPORT_SYMBOL(register_gifconf); | ||
5661 | EXPORT_SYMBOL(register_netdevice); | ||
5662 | EXPORT_SYMBOL(register_netdevice_notifier); | ||
5663 | EXPORT_SYMBOL(skb_checksum_help); | ||
5664 | EXPORT_SYMBOL(synchronize_net); | ||
5665 | EXPORT_SYMBOL(unregister_netdevice); | ||
5666 | EXPORT_SYMBOL(unregister_netdevice_notifier); | ||
5667 | EXPORT_SYMBOL(net_enable_timestamp); | ||
5668 | EXPORT_SYMBOL(net_disable_timestamp); | ||
5669 | EXPORT_SYMBOL(dev_get_flags); | ||
5670 | |||
5671 | EXPORT_SYMBOL(dev_load); | ||
5672 | |||
5673 | EXPORT_PER_CPU_SYMBOL(softnet_data); | ||
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 9d66fa953ab7..0a113f26bc9f 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -52,6 +52,7 @@ struct per_cpu_dm_data { | |||
52 | 52 | ||
53 | struct dm_hw_stat_delta { | 53 | struct dm_hw_stat_delta { |
54 | struct net_device *dev; | 54 | struct net_device *dev; |
55 | unsigned long last_rx; | ||
55 | struct list_head list; | 56 | struct list_head list; |
56 | struct rcu_head rcu; | 57 | struct rcu_head rcu; |
57 | unsigned long last_drop_val; | 58 | unsigned long last_drop_val; |
@@ -180,17 +181,25 @@ static void trace_napi_poll_hit(struct napi_struct *napi) | |||
180 | struct dm_hw_stat_delta *new_stat; | 181 | struct dm_hw_stat_delta *new_stat; |
181 | 182 | ||
182 | /* | 183 | /* |
183 | * Ratelimit our check time to dm_hw_check_delta jiffies | 184 | * Don't check napi structures with no associated device |
184 | */ | 185 | */ |
185 | if (!time_after(jiffies, napi->dev->last_rx + dm_hw_check_delta)) | 186 | if (!napi->dev) |
186 | return; | 187 | return; |
187 | 188 | ||
188 | rcu_read_lock(); | 189 | rcu_read_lock(); |
189 | list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { | 190 | list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { |
191 | /* | ||
192 | * only add a note to our monitor buffer if: | ||
193 | * 1) this is the dev we received on | ||
194 | * 2) its after the last_rx delta | ||
195 | * 3) our rx_dropped count has gone up | ||
196 | */ | ||
190 | if ((new_stat->dev == napi->dev) && | 197 | if ((new_stat->dev == napi->dev) && |
198 | (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) && | ||
191 | (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { | 199 | (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { |
192 | trace_drop_common(NULL, NULL); | 200 | trace_drop_common(NULL, NULL); |
193 | new_stat->last_drop_val = napi->dev->stats.rx_dropped; | 201 | new_stat->last_drop_val = napi->dev->stats.rx_dropped; |
202 | new_stat->last_rx = jiffies; | ||
194 | break; | 203 | break; |
195 | } | 204 | } |
196 | } | 205 | } |
@@ -286,6 +295,7 @@ static int dropmon_net_event(struct notifier_block *ev_block, | |||
286 | goto out; | 295 | goto out; |
287 | 296 | ||
288 | new_stat->dev = dev; | 297 | new_stat->dev = dev; |
298 | new_stat->last_rx = jiffies; | ||
289 | INIT_RCU_HEAD(&new_stat->rcu); | 299 | INIT_RCU_HEAD(&new_stat->rcu); |
290 | spin_lock(&trace_state_lock); | 300 | spin_lock(&trace_state_lock); |
291 | list_add_rcu(&new_stat->list, &hw_stats_list); | 301 | list_add_rcu(&new_stat->list, &hw_stats_list); |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index d9d5160610d5..4c12ddb5f5ee 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -30,10 +30,17 @@ u32 ethtool_op_get_link(struct net_device *dev) | |||
30 | return netif_carrier_ok(dev) ? 1 : 0; | 30 | return netif_carrier_ok(dev) ? 1 : 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | u32 ethtool_op_get_rx_csum(struct net_device *dev) | ||
34 | { | ||
35 | return (dev->features & NETIF_F_ALL_CSUM) != 0; | ||
36 | } | ||
37 | EXPORT_SYMBOL(ethtool_op_get_rx_csum); | ||
38 | |||
33 | u32 ethtool_op_get_tx_csum(struct net_device *dev) | 39 | u32 ethtool_op_get_tx_csum(struct net_device *dev) |
34 | { | 40 | { |
35 | return (dev->features & NETIF_F_ALL_CSUM) != 0; | 41 | return (dev->features & NETIF_F_ALL_CSUM) != 0; |
36 | } | 42 | } |
43 | EXPORT_SYMBOL(ethtool_op_get_tx_csum); | ||
37 | 44 | ||
38 | int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) | 45 | int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) |
39 | { | 46 | { |
@@ -891,6 +898,19 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr, | |||
891 | return actor(dev, edata.data); | 898 | return actor(dev, edata.data); |
892 | } | 899 | } |
893 | 900 | ||
901 | static int ethtool_flash_device(struct net_device *dev, char __user *useraddr) | ||
902 | { | ||
903 | struct ethtool_flash efl; | ||
904 | |||
905 | if (copy_from_user(&efl, useraddr, sizeof(efl))) | ||
906 | return -EFAULT; | ||
907 | |||
908 | if (!dev->ethtool_ops->flash_device) | ||
909 | return -EOPNOTSUPP; | ||
910 | |||
911 | return dev->ethtool_ops->flash_device(dev, &efl); | ||
912 | } | ||
913 | |||
894 | /* The main entry point in this file. Called from net/core/dev.c */ | 914 | /* The main entry point in this file. Called from net/core/dev.c */ |
895 | 915 | ||
896 | int dev_ethtool(struct net *net, struct ifreq *ifr) | 916 | int dev_ethtool(struct net *net, struct ifreq *ifr) |
@@ -1004,7 +1024,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1004 | break; | 1024 | break; |
1005 | case ETHTOOL_GRXCSUM: | 1025 | case ETHTOOL_GRXCSUM: |
1006 | rc = ethtool_get_value(dev, useraddr, ethcmd, | 1026 | rc = ethtool_get_value(dev, useraddr, ethcmd, |
1007 | dev->ethtool_ops->get_rx_csum); | 1027 | (dev->ethtool_ops->get_rx_csum ? |
1028 | dev->ethtool_ops->get_rx_csum : | ||
1029 | ethtool_op_get_rx_csum)); | ||
1008 | break; | 1030 | break; |
1009 | case ETHTOOL_SRXCSUM: | 1031 | case ETHTOOL_SRXCSUM: |
1010 | rc = ethtool_set_rx_csum(dev, useraddr); | 1032 | rc = ethtool_set_rx_csum(dev, useraddr); |
@@ -1068,7 +1090,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1068 | break; | 1090 | break; |
1069 | case ETHTOOL_GFLAGS: | 1091 | case ETHTOOL_GFLAGS: |
1070 | rc = ethtool_get_value(dev, useraddr, ethcmd, | 1092 | rc = ethtool_get_value(dev, useraddr, ethcmd, |
1071 | dev->ethtool_ops->get_flags); | 1093 | (dev->ethtool_ops->get_flags ? |
1094 | dev->ethtool_ops->get_flags : | ||
1095 | ethtool_op_get_flags)); | ||
1072 | break; | 1096 | break; |
1073 | case ETHTOOL_SFLAGS: | 1097 | case ETHTOOL_SFLAGS: |
1074 | rc = ethtool_set_value(dev, useraddr, | 1098 | rc = ethtool_set_value(dev, useraddr, |
@@ -1100,6 +1124,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1100 | case ETHTOOL_SGRO: | 1124 | case ETHTOOL_SGRO: |
1101 | rc = ethtool_set_gro(dev, useraddr); | 1125 | rc = ethtool_set_gro(dev, useraddr); |
1102 | break; | 1126 | break; |
1127 | case ETHTOOL_FLASHDEV: | ||
1128 | rc = ethtool_flash_device(dev, useraddr); | ||
1129 | break; | ||
1103 | default: | 1130 | default: |
1104 | rc = -EOPNOTSUPP; | 1131 | rc = -EOPNOTSUPP; |
1105 | } | 1132 | } |
@@ -1116,7 +1143,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1116 | EXPORT_SYMBOL(ethtool_op_get_link); | 1143 | EXPORT_SYMBOL(ethtool_op_get_link); |
1117 | EXPORT_SYMBOL(ethtool_op_get_sg); | 1144 | EXPORT_SYMBOL(ethtool_op_get_sg); |
1118 | EXPORT_SYMBOL(ethtool_op_get_tso); | 1145 | EXPORT_SYMBOL(ethtool_op_get_tso); |
1119 | EXPORT_SYMBOL(ethtool_op_get_tx_csum); | ||
1120 | EXPORT_SYMBOL(ethtool_op_set_sg); | 1146 | EXPORT_SYMBOL(ethtool_op_set_sg); |
1121 | EXPORT_SYMBOL(ethtool_op_set_tso); | 1147 | EXPORT_SYMBOL(ethtool_op_set_tso); |
1122 | EXPORT_SYMBOL(ethtool_op_set_tx_csum); | 1148 | EXPORT_SYMBOL(ethtool_op_set_tx_csum); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 163b4f5b0365..e587e6819698 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -692,75 +692,74 @@ static void neigh_connect(struct neighbour *neigh) | |||
692 | hh->hh_output = neigh->ops->hh_output; | 692 | hh->hh_output = neigh->ops->hh_output; |
693 | } | 693 | } |
694 | 694 | ||
695 | static void neigh_periodic_timer(unsigned long arg) | 695 | static void neigh_periodic_work(struct work_struct *work) |
696 | { | 696 | { |
697 | struct neigh_table *tbl = (struct neigh_table *)arg; | 697 | struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); |
698 | struct neighbour *n, **np; | 698 | struct neighbour *n, **np; |
699 | unsigned long expire, now = jiffies; | 699 | unsigned int i; |
700 | 700 | ||
701 | NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); | 701 | NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); |
702 | 702 | ||
703 | write_lock(&tbl->lock); | 703 | write_lock_bh(&tbl->lock); |
704 | 704 | ||
705 | /* | 705 | /* |
706 | * periodically recompute ReachableTime from random function | 706 | * periodically recompute ReachableTime from random function |
707 | */ | 707 | */ |
708 | 708 | ||
709 | if (time_after(now, tbl->last_rand + 300 * HZ)) { | 709 | if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { |
710 | struct neigh_parms *p; | 710 | struct neigh_parms *p; |
711 | tbl->last_rand = now; | 711 | tbl->last_rand = jiffies; |
712 | for (p = &tbl->parms; p; p = p->next) | 712 | for (p = &tbl->parms; p; p = p->next) |
713 | p->reachable_time = | 713 | p->reachable_time = |
714 | neigh_rand_reach_time(p->base_reachable_time); | 714 | neigh_rand_reach_time(p->base_reachable_time); |
715 | } | 715 | } |
716 | 716 | ||
717 | np = &tbl->hash_buckets[tbl->hash_chain_gc]; | 717 | for (i = 0 ; i <= tbl->hash_mask; i++) { |
718 | tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask); | 718 | np = &tbl->hash_buckets[i]; |
719 | 719 | ||
720 | while ((n = *np) != NULL) { | 720 | while ((n = *np) != NULL) { |
721 | unsigned int state; | 721 | unsigned int state; |
722 | 722 | ||
723 | write_lock(&n->lock); | 723 | write_lock(&n->lock); |
724 | 724 | ||
725 | state = n->nud_state; | 725 | state = n->nud_state; |
726 | if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { | 726 | if (state & (NUD_PERMANENT | NUD_IN_TIMER)) { |
727 | write_unlock(&n->lock); | 727 | write_unlock(&n->lock); |
728 | goto next_elt; | 728 | goto next_elt; |
729 | } | 729 | } |
730 | 730 | ||
731 | if (time_before(n->used, n->confirmed)) | 731 | if (time_before(n->used, n->confirmed)) |
732 | n->used = n->confirmed; | 732 | n->used = n->confirmed; |
733 | 733 | ||
734 | if (atomic_read(&n->refcnt) == 1 && | 734 | if (atomic_read(&n->refcnt) == 1 && |
735 | (state == NUD_FAILED || | 735 | (state == NUD_FAILED || |
736 | time_after(now, n->used + n->parms->gc_staletime))) { | 736 | time_after(jiffies, n->used + n->parms->gc_staletime))) { |
737 | *np = n->next; | 737 | *np = n->next; |
738 | n->dead = 1; | 738 | n->dead = 1; |
739 | write_unlock(&n->lock); | ||
740 | neigh_cleanup_and_release(n); | ||
741 | continue; | ||
742 | } | ||
739 | write_unlock(&n->lock); | 743 | write_unlock(&n->lock); |
740 | neigh_cleanup_and_release(n); | ||
741 | continue; | ||
742 | } | ||
743 | write_unlock(&n->lock); | ||
744 | 744 | ||
745 | next_elt: | 745 | next_elt: |
746 | np = &n->next; | 746 | np = &n->next; |
747 | } | ||
748 | /* | ||
749 | * It's fine to release lock here, even if hash table | ||
750 | * grows while we are preempted. | ||
751 | */ | ||
752 | write_unlock_bh(&tbl->lock); | ||
753 | cond_resched(); | ||
754 | write_lock_bh(&tbl->lock); | ||
747 | } | 755 | } |
748 | |||
749 | /* Cycle through all hash buckets every base_reachable_time/2 ticks. | 756 | /* Cycle through all hash buckets every base_reachable_time/2 ticks. |
750 | * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 | 757 | * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 |
751 | * base_reachable_time. | 758 | * base_reachable_time. |
752 | */ | 759 | */ |
753 | expire = tbl->parms.base_reachable_time >> 1; | 760 | schedule_delayed_work(&tbl->gc_work, |
754 | expire /= (tbl->hash_mask + 1); | 761 | tbl->parms.base_reachable_time >> 1); |
755 | if (!expire) | 762 | write_unlock_bh(&tbl->lock); |
756 | expire = 1; | ||
757 | |||
758 | if (expire>HZ) | ||
759 | mod_timer(&tbl->gc_timer, round_jiffies(now + expire)); | ||
760 | else | ||
761 | mod_timer(&tbl->gc_timer, now + expire); | ||
762 | |||
763 | write_unlock(&tbl->lock); | ||
764 | } | 763 | } |
765 | 764 | ||
766 | static __inline__ int neigh_max_probes(struct neighbour *n) | 765 | static __inline__ int neigh_max_probes(struct neighbour *n) |
@@ -1316,7 +1315,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, | |||
1316 | } | 1315 | } |
1317 | EXPORT_SYMBOL(pneigh_enqueue); | 1316 | EXPORT_SYMBOL(pneigh_enqueue); |
1318 | 1317 | ||
1319 | static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, | 1318 | static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, |
1320 | struct net *net, int ifindex) | 1319 | struct net *net, int ifindex) |
1321 | { | 1320 | { |
1322 | struct neigh_parms *p; | 1321 | struct neigh_parms *p; |
@@ -1337,7 +1336,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, | |||
1337 | struct net *net = dev_net(dev); | 1336 | struct net *net = dev_net(dev); |
1338 | const struct net_device_ops *ops = dev->netdev_ops; | 1337 | const struct net_device_ops *ops = dev->netdev_ops; |
1339 | 1338 | ||
1340 | ref = lookup_neigh_params(tbl, net, 0); | 1339 | ref = lookup_neigh_parms(tbl, net, 0); |
1341 | if (!ref) | 1340 | if (!ref) |
1342 | return NULL; | 1341 | return NULL; |
1343 | 1342 | ||
@@ -1442,10 +1441,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) | |||
1442 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); | 1441 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); |
1443 | 1442 | ||
1444 | rwlock_init(&tbl->lock); | 1443 | rwlock_init(&tbl->lock); |
1445 | setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl); | 1444 | INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work); |
1446 | tbl->gc_timer.expires = now + 1; | 1445 | schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); |
1447 | add_timer(&tbl->gc_timer); | ||
1448 | |||
1449 | setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); | 1446 | setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); |
1450 | skb_queue_head_init_class(&tbl->proxy_queue, | 1447 | skb_queue_head_init_class(&tbl->proxy_queue, |
1451 | &neigh_table_proxy_queue_class); | 1448 | &neigh_table_proxy_queue_class); |
@@ -1482,7 +1479,8 @@ int neigh_table_clear(struct neigh_table *tbl) | |||
1482 | struct neigh_table **tp; | 1479 | struct neigh_table **tp; |
1483 | 1480 | ||
1484 | /* It is not clean... Fix it to unload IPv6 module safely */ | 1481 | /* It is not clean... Fix it to unload IPv6 module safely */ |
1485 | del_timer_sync(&tbl->gc_timer); | 1482 | cancel_delayed_work(&tbl->gc_work); |
1483 | flush_scheduled_work(); | ||
1486 | del_timer_sync(&tbl->proxy_timer); | 1484 | del_timer_sync(&tbl->proxy_timer); |
1487 | pneigh_queue_purge(&tbl->proxy_queue); | 1485 | pneigh_queue_purge(&tbl->proxy_queue); |
1488 | neigh_ifdown(tbl, NULL); | 1486 | neigh_ifdown(tbl, NULL); |
@@ -1752,7 +1750,6 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, | |||
1752 | .ndtc_last_rand = jiffies_to_msecs(rand_delta), | 1750 | .ndtc_last_rand = jiffies_to_msecs(rand_delta), |
1753 | .ndtc_hash_rnd = tbl->hash_rnd, | 1751 | .ndtc_hash_rnd = tbl->hash_rnd, |
1754 | .ndtc_hash_mask = tbl->hash_mask, | 1752 | .ndtc_hash_mask = tbl->hash_mask, |
1755 | .ndtc_hash_chain_gc = tbl->hash_chain_gc, | ||
1756 | .ndtc_proxy_qlen = tbl->proxy_queue.qlen, | 1753 | .ndtc_proxy_qlen = tbl->proxy_queue.qlen, |
1757 | }; | 1754 | }; |
1758 | 1755 | ||
@@ -1906,7 +1903,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
1906 | if (tbp[NDTPA_IFINDEX]) | 1903 | if (tbp[NDTPA_IFINDEX]) |
1907 | ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); | 1904 | ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); |
1908 | 1905 | ||
1909 | p = lookup_neigh_params(tbl, net, ifindex); | 1906 | p = lookup_neigh_parms(tbl, net, ifindex); |
1910 | if (p == NULL) { | 1907 | if (p == NULL) { |
1911 | err = -ENOENT; | 1908 | err = -ENOENT; |
1912 | goto errout_tbl_lock; | 1909 | goto errout_tbl_lock; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 3994680c08b9..ad91e9e5f475 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -141,7 +141,7 @@ static ssize_t show_dormant(struct device *dev, | |||
141 | return -EINVAL; | 141 | return -EINVAL; |
142 | } | 142 | } |
143 | 143 | ||
144 | static const char *operstates[] = { | 144 | static const char *const operstates[] = { |
145 | "unknown", | 145 | "unknown", |
146 | "notpresent", /* currently unused */ | 146 | "notpresent", /* currently unused */ |
147 | "down", | 147 | "down", |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 197283072cc8..1c1af2756f38 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -6,6 +6,8 @@ | |||
6 | #include <linux/delay.h> | 6 | #include <linux/delay.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/idr.h> | 8 | #include <linux/idr.h> |
9 | #include <linux/rculist.h> | ||
10 | #include <linux/nsproxy.h> | ||
9 | #include <net/net_namespace.h> | 11 | #include <net/net_namespace.h> |
10 | #include <net/netns/generic.h> | 12 | #include <net/netns/generic.h> |
11 | 13 | ||
@@ -127,7 +129,7 @@ static struct net *net_create(void) | |||
127 | rv = setup_net(net); | 129 | rv = setup_net(net); |
128 | if (rv == 0) { | 130 | if (rv == 0) { |
129 | rtnl_lock(); | 131 | rtnl_lock(); |
130 | list_add_tail(&net->list, &net_namespace_list); | 132 | list_add_tail_rcu(&net->list, &net_namespace_list); |
131 | rtnl_unlock(); | 133 | rtnl_unlock(); |
132 | } | 134 | } |
133 | mutex_unlock(&net_mutex); | 135 | mutex_unlock(&net_mutex); |
@@ -156,9 +158,16 @@ static void cleanup_net(struct work_struct *work) | |||
156 | 158 | ||
157 | /* Don't let anyone else find us. */ | 159 | /* Don't let anyone else find us. */ |
158 | rtnl_lock(); | 160 | rtnl_lock(); |
159 | list_del(&net->list); | 161 | list_del_rcu(&net->list); |
160 | rtnl_unlock(); | 162 | rtnl_unlock(); |
161 | 163 | ||
164 | /* | ||
165 | * Another CPU might be rcu-iterating the list, wait for it. | ||
166 | * This needs to be before calling the exit() notifiers, so | ||
167 | * the rcu_barrier() below isn't sufficient alone. | ||
168 | */ | ||
169 | synchronize_rcu(); | ||
170 | |||
162 | /* Run all of the network namespace exit methods */ | 171 | /* Run all of the network namespace exit methods */ |
163 | list_for_each_entry_reverse(ops, &pernet_list, list) { | 172 | list_for_each_entry_reverse(ops, &pernet_list, list) { |
164 | if (ops->exit) | 173 | if (ops->exit) |
@@ -193,6 +202,26 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net) | |||
193 | } | 202 | } |
194 | #endif | 203 | #endif |
195 | 204 | ||
205 | struct net *get_net_ns_by_pid(pid_t pid) | ||
206 | { | ||
207 | struct task_struct *tsk; | ||
208 | struct net *net; | ||
209 | |||
210 | /* Lookup the network namespace */ | ||
211 | net = ERR_PTR(-ESRCH); | ||
212 | rcu_read_lock(); | ||
213 | tsk = find_task_by_vpid(pid); | ||
214 | if (tsk) { | ||
215 | struct nsproxy *nsproxy; | ||
216 | nsproxy = task_nsproxy(tsk); | ||
217 | if (nsproxy) | ||
218 | net = get_net(nsproxy->net_ns); | ||
219 | } | ||
220 | rcu_read_unlock(); | ||
221 | return net; | ||
222 | } | ||
223 | EXPORT_SYMBOL_GPL(get_net_ns_by_pid); | ||
224 | |||
196 | static int __init net_ns_init(void) | 225 | static int __init net_ns_init(void) |
197 | { | 226 | { |
198 | struct net_generic *ng; | 227 | struct net_generic *ng; |
@@ -219,7 +248,7 @@ static int __init net_ns_init(void) | |||
219 | panic("Could not setup the initial network namespace"); | 248 | panic("Could not setup the initial network namespace"); |
220 | 249 | ||
221 | rtnl_lock(); | 250 | rtnl_lock(); |
222 | list_add_tail(&init_net.list, &net_namespace_list); | 251 | list_add_tail_rcu(&init_net.list, &net_namespace_list); |
223 | rtnl_unlock(); | 252 | rtnl_unlock(); |
224 | 253 | ||
225 | mutex_unlock(&net_mutex); | 254 | mutex_unlock(&net_mutex); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 1b76eb11deb4..0b4d0d35ef40 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * Copyright (C) 2002 Red Hat, Inc. | 9 | * Copyright (C) 2002 Red Hat, Inc. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/moduleparam.h> | ||
12 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
13 | #include <linux/etherdevice.h> | 14 | #include <linux/etherdevice.h> |
14 | #include <linux/string.h> | 15 | #include <linux/string.h> |
@@ -50,6 +51,9 @@ static atomic_t trapped; | |||
50 | static void zap_completion_queue(void); | 51 | static void zap_completion_queue(void); |
51 | static void arp_reply(struct sk_buff *skb); | 52 | static void arp_reply(struct sk_buff *skb); |
52 | 53 | ||
54 | static unsigned int carrier_timeout = 4; | ||
55 | module_param(carrier_timeout, uint, 0644); | ||
56 | |||
53 | static void queue_process(struct work_struct *work) | 57 | static void queue_process(struct work_struct *work) |
54 | { | 58 | { |
55 | struct netpoll_info *npinfo = | 59 | struct netpoll_info *npinfo = |
@@ -737,7 +741,7 @@ int netpoll_setup(struct netpoll *np) | |||
737 | } | 741 | } |
738 | 742 | ||
739 | atleast = jiffies + HZ/10; | 743 | atleast = jiffies + HZ/10; |
740 | atmost = jiffies + 4*HZ; | 744 | atmost = jiffies + carrier_timeout * HZ; |
741 | while (!netif_carrier_ok(ndev)) { | 745 | while (!netif_carrier_ok(ndev)) { |
742 | if (time_after(jiffies, atmost)) { | 746 | if (time_after(jiffies, atmost)) { |
743 | printk(KERN_NOTICE | 747 | printk(KERN_NOTICE |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 19b8c20e98a4..0bcecbf06581 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -131,6 +131,7 @@ | |||
131 | #include <linux/ioport.h> | 131 | #include <linux/ioport.h> |
132 | #include <linux/interrupt.h> | 132 | #include <linux/interrupt.h> |
133 | #include <linux/capability.h> | 133 | #include <linux/capability.h> |
134 | #include <linux/hrtimer.h> | ||
134 | #include <linux/freezer.h> | 135 | #include <linux/freezer.h> |
135 | #include <linux/delay.h> | 136 | #include <linux/delay.h> |
136 | #include <linux/timer.h> | 137 | #include <linux/timer.h> |
@@ -162,14 +163,13 @@ | |||
162 | #include <asm/byteorder.h> | 163 | #include <asm/byteorder.h> |
163 | #include <linux/rcupdate.h> | 164 | #include <linux/rcupdate.h> |
164 | #include <linux/bitops.h> | 165 | #include <linux/bitops.h> |
165 | #include <asm/io.h> | 166 | #include <linux/io.h> |
167 | #include <linux/timex.h> | ||
168 | #include <linux/uaccess.h> | ||
166 | #include <asm/dma.h> | 169 | #include <asm/dma.h> |
167 | #include <asm/uaccess.h> | ||
168 | #include <asm/div64.h> /* do_div */ | 170 | #include <asm/div64.h> /* do_div */ |
169 | #include <asm/timex.h> | ||
170 | |||
171 | #define VERSION "pktgen v2.70: Packet Generator for packet performance testing.\n" | ||
172 | 171 | ||
172 | #define VERSION "2.72" | ||
173 | #define IP_NAME_SZ 32 | 173 | #define IP_NAME_SZ 32 |
174 | #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ | 174 | #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ |
175 | #define MPLS_STACK_BOTTOM htonl(0x00000100) | 175 | #define MPLS_STACK_BOTTOM htonl(0x00000100) |
@@ -206,7 +206,7 @@ | |||
206 | #define PKTGEN_MAGIC 0xbe9be955 | 206 | #define PKTGEN_MAGIC 0xbe9be955 |
207 | #define PG_PROC_DIR "pktgen" | 207 | #define PG_PROC_DIR "pktgen" |
208 | #define PGCTRL "pgctrl" | 208 | #define PGCTRL "pgctrl" |
209 | static struct proc_dir_entry *pg_proc_dir = NULL; | 209 | static struct proc_dir_entry *pg_proc_dir; |
210 | 210 | ||
211 | #define MAX_CFLOWS 65536 | 211 | #define MAX_CFLOWS 65536 |
212 | 212 | ||
@@ -231,9 +231,9 @@ struct pktgen_dev { | |||
231 | */ | 231 | */ |
232 | struct proc_dir_entry *entry; /* proc file */ | 232 | struct proc_dir_entry *entry; /* proc file */ |
233 | struct pktgen_thread *pg_thread;/* the owner */ | 233 | struct pktgen_thread *pg_thread;/* the owner */ |
234 | struct list_head list; /* Used for chaining in the thread's run-queue */ | 234 | struct list_head list; /* chaining in the thread's run-queue */ |
235 | 235 | ||
236 | int running; /* if this changes to false, the test will stop */ | 236 | int running; /* if false, the test will stop */ |
237 | 237 | ||
238 | /* If min != max, then we will either do a linear iteration, or | 238 | /* If min != max, then we will either do a linear iteration, or |
239 | * we will do a random selection from within the range. | 239 | * we will do a random selection from within the range. |
@@ -246,33 +246,37 @@ struct pktgen_dev { | |||
246 | int max_pkt_size; /* = ETH_ZLEN; */ | 246 | int max_pkt_size; /* = ETH_ZLEN; */ |
247 | int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ | 247 | int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ |
248 | int nfrags; | 248 | int nfrags; |
249 | __u32 delay_us; /* Default delay */ | 249 | u64 delay; /* nano-seconds */ |
250 | __u32 delay_ns; | 250 | |
251 | __u64 count; /* Default No packets to send */ | 251 | __u64 count; /* Default No packets to send */ |
252 | __u64 sofar; /* How many pkts we've sent so far */ | 252 | __u64 sofar; /* How many pkts we've sent so far */ |
253 | __u64 tx_bytes; /* How many bytes we've transmitted */ | 253 | __u64 tx_bytes; /* How many bytes we've transmitted */ |
254 | __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */ | 254 | __u64 errors; /* Errors when trying to transmit, |
255 | pkts will be re-sent */ | ||
255 | 256 | ||
256 | /* runtime counters relating to clone_skb */ | 257 | /* runtime counters relating to clone_skb */ |
257 | __u64 next_tx_us; /* timestamp of when to tx next */ | ||
258 | __u32 next_tx_ns; | ||
259 | 258 | ||
260 | __u64 allocated_skbs; | 259 | __u64 allocated_skbs; |
261 | __u32 clone_count; | 260 | __u32 clone_count; |
262 | int last_ok; /* Was last skb sent? | 261 | int last_ok; /* Was last skb sent? |
263 | * Or a failed transmit of some sort? This will keep | 262 | * Or a failed transmit of some sort? |
264 | * sequence numbers in order, for example. | 263 | * This will keep sequence numbers in order |
265 | */ | 264 | */ |
266 | __u64 started_at; /* micro-seconds */ | 265 | ktime_t next_tx; |
267 | __u64 stopped_at; /* micro-seconds */ | 266 | ktime_t started_at; |
268 | __u64 idle_acc; /* micro-seconds */ | 267 | ktime_t stopped_at; |
268 | u64 idle_acc; /* nano-seconds */ | ||
269 | |||
269 | __u32 seq_num; | 270 | __u32 seq_num; |
270 | 271 | ||
271 | int clone_skb; /* Use multiple SKBs during packet gen. If this number | 272 | int clone_skb; /* |
272 | * is greater than 1, then that many copies of the same | 273 | * Use multiple SKBs during packet gen. |
273 | * packet will be sent before a new packet is allocated. | 274 | * If this number is greater than 1, then |
274 | * For instance, if you want to send 1024 identical packets | 275 | * that many copies of the same packet will be |
275 | * before creating a new packet, set clone_skb to 1024. | 276 | * sent before a new packet is allocated. |
277 | * If you want to send 1024 identical packets | ||
278 | * before creating a new packet, | ||
279 | * set clone_skb to 1024. | ||
276 | */ | 280 | */ |
277 | 281 | ||
278 | char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ | 282 | char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ |
@@ -304,8 +308,10 @@ struct pktgen_dev { | |||
304 | __u16 udp_dst_max; /* exclusive, dest UDP port */ | 308 | __u16 udp_dst_max; /* exclusive, dest UDP port */ |
305 | 309 | ||
306 | /* DSCP + ECN */ | 310 | /* DSCP + ECN */ |
307 | __u8 tos; /* six most significant bits of (former) IPv4 TOS are for dscp codepoint */ | 311 | __u8 tos; /* six MSB of (former) IPv4 TOS |
308 | __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 (see RFC 3260, sec. 4) */ | 312 | are for dscp codepoint */ |
313 | __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 | ||
314 | (see RFC 3260, sec. 4) */ | ||
309 | 315 | ||
310 | /* MPLS */ | 316 | /* MPLS */ |
311 | unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ | 317 | unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ |
@@ -346,15 +352,17 @@ struct pktgen_dev { | |||
346 | */ | 352 | */ |
347 | __u16 pad; /* pad out the hh struct to an even 16 bytes */ | 353 | __u16 pad; /* pad out the hh struct to an even 16 bytes */ |
348 | 354 | ||
349 | struct sk_buff *skb; /* skb we are to transmit next, mainly used for when we | 355 | struct sk_buff *skb; /* skb we are to transmit next, used for when we |
350 | * are transmitting the same one multiple times | 356 | * are transmitting the same one multiple times |
351 | */ | 357 | */ |
352 | struct net_device *odev; /* The out-going device. Note that the device should | 358 | struct net_device *odev; /* The out-going device. |
353 | * have it's pg_info pointer pointing back to this | 359 | * Note that the device should have it's |
354 | * device. This will be set when the user specifies | 360 | * pg_info pointer pointing back to this |
355 | * the out-going device name (not when the inject is | 361 | * device. |
356 | * started as it used to do.) | 362 | * Set when the user specifies the out-going |
357 | */ | 363 | * device name (not when the inject is |
364 | * started as it used to do.) | ||
365 | */ | ||
358 | struct flow_state *flows; | 366 | struct flow_state *flows; |
359 | unsigned cflows; /* Concurrent flows (config) */ | 367 | unsigned cflows; /* Concurrent flows (config) */ |
360 | unsigned lflow; /* Flow length (config) */ | 368 | unsigned lflow; /* Flow length (config) */ |
@@ -379,13 +387,14 @@ struct pktgen_hdr { | |||
379 | }; | 387 | }; |
380 | 388 | ||
381 | struct pktgen_thread { | 389 | struct pktgen_thread { |
382 | spinlock_t if_lock; | 390 | spinlock_t if_lock; /* for list of devices */ |
383 | struct list_head if_list; /* All device here */ | 391 | struct list_head if_list; /* All device here */ |
384 | struct list_head th_list; | 392 | struct list_head th_list; |
385 | struct task_struct *tsk; | 393 | struct task_struct *tsk; |
386 | char result[512]; | 394 | char result[512]; |
387 | 395 | ||
388 | /* Field for thread to receive "posted" events terminate, stop ifs etc. */ | 396 | /* Field for thread to receive "posted" events terminate, |
397 | stop ifs etc. */ | ||
389 | 398 | ||
390 | u32 control; | 399 | u32 control; |
391 | int cpu; | 400 | int cpu; |
@@ -397,24 +406,22 @@ struct pktgen_thread { | |||
397 | #define REMOVE 1 | 406 | #define REMOVE 1 |
398 | #define FIND 0 | 407 | #define FIND 0 |
399 | 408 | ||
400 | /** Convert to micro-seconds */ | 409 | static inline ktime_t ktime_now(void) |
401 | static inline __u64 tv_to_us(const struct timeval *tv) | ||
402 | { | 410 | { |
403 | __u64 us = tv->tv_usec; | 411 | struct timespec ts; |
404 | us += (__u64) tv->tv_sec * (__u64) 1000000; | 412 | ktime_get_ts(&ts); |
405 | return us; | 413 | |
414 | return timespec_to_ktime(ts); | ||
406 | } | 415 | } |
407 | 416 | ||
408 | static __u64 getCurUs(void) | 417 | /* This works even if 32 bit because of careful byte order choice */ |
418 | static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2) | ||
409 | { | 419 | { |
410 | struct timeval tv; | 420 | return cmp1.tv64 < cmp2.tv64; |
411 | do_gettimeofday(&tv); | ||
412 | return tv_to_us(&tv); | ||
413 | } | 421 | } |
414 | 422 | ||
415 | /* old include end */ | 423 | static const char version[] = |
416 | 424 | "pktgen " VERSION ": Packet Generator for packet performance testing.\n"; | |
417 | static char version[] __initdata = VERSION; | ||
418 | 425 | ||
419 | static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); | 426 | static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); |
420 | static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); | 427 | static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); |
@@ -424,7 +431,7 @@ static int pktgen_device_event(struct notifier_block *, unsigned long, void *); | |||
424 | static void pktgen_run_all_threads(void); | 431 | static void pktgen_run_all_threads(void); |
425 | static void pktgen_reset_all_threads(void); | 432 | static void pktgen_reset_all_threads(void); |
426 | static void pktgen_stop_all_threads_ifs(void); | 433 | static void pktgen_stop_all_threads_ifs(void); |
427 | static int pktgen_stop_device(struct pktgen_dev *pkt_dev); | 434 | |
428 | static void pktgen_stop(struct pktgen_thread *t); | 435 | static void pktgen_stop(struct pktgen_thread *t); |
429 | static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); | 436 | static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); |
430 | 437 | ||
@@ -432,10 +439,10 @@ static unsigned int scan_ip6(const char *s, char ip[16]); | |||
432 | static unsigned int fmt_ip6(char *s, const char ip[16]); | 439 | static unsigned int fmt_ip6(char *s, const char ip[16]); |
433 | 440 | ||
434 | /* Module parameters, defaults. */ | 441 | /* Module parameters, defaults. */ |
435 | static int pg_count_d = 1000; /* 1000 pkts by default */ | 442 | static int pg_count_d __read_mostly = 1000; |
436 | static int pg_delay_d; | 443 | static int pg_delay_d __read_mostly; |
437 | static int pg_clone_skb_d; | 444 | static int pg_clone_skb_d __read_mostly; |
438 | static int debug; | 445 | static int debug __read_mostly; |
439 | 446 | ||
440 | static DEFINE_MUTEX(pktgen_thread_lock); | 447 | static DEFINE_MUTEX(pktgen_thread_lock); |
441 | static LIST_HEAD(pktgen_threads); | 448 | static LIST_HEAD(pktgen_threads); |
@@ -451,12 +458,12 @@ static struct notifier_block pktgen_notifier_block = { | |||
451 | 458 | ||
452 | static int pgctrl_show(struct seq_file *seq, void *v) | 459 | static int pgctrl_show(struct seq_file *seq, void *v) |
453 | { | 460 | { |
454 | seq_puts(seq, VERSION); | 461 | seq_puts(seq, version); |
455 | return 0; | 462 | return 0; |
456 | } | 463 | } |
457 | 464 | ||
458 | static ssize_t pgctrl_write(struct file *file, const char __user * buf, | 465 | static ssize_t pgctrl_write(struct file *file, const char __user *buf, |
459 | size_t count, loff_t * ppos) | 466 | size_t count, loff_t *ppos) |
460 | { | 467 | { |
461 | int err = 0; | 468 | int err = 0; |
462 | char data[128]; | 469 | char data[128]; |
@@ -509,10 +516,9 @@ static const struct file_operations pktgen_fops = { | |||
509 | 516 | ||
510 | static int pktgen_if_show(struct seq_file *seq, void *v) | 517 | static int pktgen_if_show(struct seq_file *seq, void *v) |
511 | { | 518 | { |
512 | struct pktgen_dev *pkt_dev = seq->private; | 519 | const struct pktgen_dev *pkt_dev = seq->private; |
513 | __u64 sa; | 520 | ktime_t stopped; |
514 | __u64 stopped; | 521 | u64 idle; |
515 | __u64 now = getCurUs(); | ||
516 | 522 | ||
517 | seq_printf(seq, | 523 | seq_printf(seq, |
518 | "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", | 524 | "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", |
@@ -520,9 +526,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
520 | pkt_dev->max_pkt_size); | 526 | pkt_dev->max_pkt_size); |
521 | 527 | ||
522 | seq_printf(seq, | 528 | seq_printf(seq, |
523 | " frags: %d delay: %u clone_skb: %d ifname: %s\n", | 529 | " frags: %d delay: %llu clone_skb: %d ifname: %s\n", |
524 | pkt_dev->nfrags, | 530 | pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, |
525 | 1000 * pkt_dev->delay_us + pkt_dev->delay_ns, | ||
526 | pkt_dev->clone_skb, pkt_dev->odev->name); | 531 | pkt_dev->clone_skb, pkt_dev->odev->name); |
527 | 532 | ||
528 | seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, | 533 | seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, |
@@ -549,11 +554,14 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
549 | " daddr: %s min_daddr: %s max_daddr: %s\n", b1, | 554 | " daddr: %s min_daddr: %s max_daddr: %s\n", b1, |
550 | b2, b3); | 555 | b2, b3); |
551 | 556 | ||
552 | } else | 557 | } else { |
558 | seq_printf(seq, | ||
559 | " dst_min: %s dst_max: %s\n", | ||
560 | pkt_dev->dst_min, pkt_dev->dst_max); | ||
553 | seq_printf(seq, | 561 | seq_printf(seq, |
554 | " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", | 562 | " src_min: %s src_max: %s\n", |
555 | pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, | 563 | pkt_dev->src_min, pkt_dev->src_max); |
556 | pkt_dev->src_max); | 564 | } |
557 | 565 | ||
558 | seq_puts(seq, " src_mac: "); | 566 | seq_puts(seq, " src_mac: "); |
559 | 567 | ||
@@ -565,7 +573,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
565 | seq_printf(seq, "%pM\n", pkt_dev->dst_mac); | 573 | seq_printf(seq, "%pM\n", pkt_dev->dst_mac); |
566 | 574 | ||
567 | seq_printf(seq, | 575 | seq_printf(seq, |
568 | " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", | 576 | " udp_src_min: %d udp_src_max: %d" |
577 | " udp_dst_min: %d udp_dst_max: %d\n", | ||
569 | pkt_dev->udp_src_min, pkt_dev->udp_src_max, | 578 | pkt_dev->udp_src_min, pkt_dev->udp_src_max, |
570 | pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); | 579 | pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); |
571 | 580 | ||
@@ -581,23 +590,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
581 | i == pkt_dev->nr_labels-1 ? "\n" : ", "); | 590 | i == pkt_dev->nr_labels-1 ? "\n" : ", "); |
582 | } | 591 | } |
583 | 592 | ||
584 | if (pkt_dev->vlan_id != 0xffff) { | 593 | if (pkt_dev->vlan_id != 0xffff) |
585 | seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n", | 594 | seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n", |
586 | pkt_dev->vlan_id, pkt_dev->vlan_p, pkt_dev->vlan_cfi); | 595 | pkt_dev->vlan_id, pkt_dev->vlan_p, |
587 | } | 596 | pkt_dev->vlan_cfi); |
588 | 597 | ||
589 | if (pkt_dev->svlan_id != 0xffff) { | 598 | if (pkt_dev->svlan_id != 0xffff) |
590 | seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n", | 599 | seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n", |
591 | pkt_dev->svlan_id, pkt_dev->svlan_p, pkt_dev->svlan_cfi); | 600 | pkt_dev->svlan_id, pkt_dev->svlan_p, |
592 | } | 601 | pkt_dev->svlan_cfi); |
593 | 602 | ||
594 | if (pkt_dev->tos) { | 603 | if (pkt_dev->tos) |
595 | seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos); | 604 | seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos); |
596 | } | ||
597 | 605 | ||
598 | if (pkt_dev->traffic_class) { | 606 | if (pkt_dev->traffic_class) |
599 | seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); | 607 | seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); |
600 | } | ||
601 | 608 | ||
602 | seq_printf(seq, " Flags: "); | 609 | seq_printf(seq, " Flags: "); |
603 | 610 | ||
@@ -654,17 +661,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
654 | 661 | ||
655 | seq_puts(seq, "\n"); | 662 | seq_puts(seq, "\n"); |
656 | 663 | ||
657 | sa = pkt_dev->started_at; | 664 | /* not really stopped, more like last-running-at */ |
658 | stopped = pkt_dev->stopped_at; | 665 | stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at; |
659 | if (pkt_dev->running) | 666 | idle = pkt_dev->idle_acc; |
660 | stopped = now; /* not really stopped, more like last-running-at */ | 667 | do_div(idle, NSEC_PER_USEC); |
661 | 668 | ||
662 | seq_printf(seq, | 669 | seq_printf(seq, |
663 | "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", | 670 | "Current:\n pkts-sofar: %llu errors: %llu\n", |
664 | (unsigned long long)pkt_dev->sofar, | 671 | (unsigned long long)pkt_dev->sofar, |
665 | (unsigned long long)pkt_dev->errors, (unsigned long long)sa, | 672 | (unsigned long long)pkt_dev->errors); |
666 | (unsigned long long)stopped, | 673 | |
667 | (unsigned long long)pkt_dev->idle_acc); | 674 | seq_printf(seq, |
675 | " started: %lluus stopped: %lluus idle: %lluus\n", | ||
676 | (unsigned long long) ktime_to_us(pkt_dev->started_at), | ||
677 | (unsigned long long) ktime_to_us(stopped), | ||
678 | (unsigned long long) idle); | ||
668 | 679 | ||
669 | seq_printf(seq, | 680 | seq_printf(seq, |
670 | " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", | 681 | " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", |
@@ -696,7 +707,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
696 | } | 707 | } |
697 | 708 | ||
698 | 709 | ||
699 | static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, __u32 *num) | 710 | static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, |
711 | __u32 *num) | ||
700 | { | 712 | { |
701 | int i = 0; | 713 | int i = 0; |
702 | *num = 0; | 714 | *num = 0; |
@@ -846,9 +858,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
846 | /* Read variable name */ | 858 | /* Read variable name */ |
847 | 859 | ||
848 | len = strn_len(&user_buffer[i], sizeof(name) - 1); | 860 | len = strn_len(&user_buffer[i], sizeof(name) - 1); |
849 | if (len < 0) { | 861 | if (len < 0) |
850 | return len; | 862 | return len; |
851 | } | 863 | |
852 | memset(name, 0, sizeof(name)); | 864 | memset(name, 0, sizeof(name)); |
853 | if (copy_from_user(name, &user_buffer[i], len)) | 865 | if (copy_from_user(name, &user_buffer[i], len)) |
854 | return -EFAULT; | 866 | return -EFAULT; |
@@ -872,9 +884,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
872 | 884 | ||
873 | if (!strcmp(name, "min_pkt_size")) { | 885 | if (!strcmp(name, "min_pkt_size")) { |
874 | len = num_arg(&user_buffer[i], 10, &value); | 886 | len = num_arg(&user_buffer[i], 10, &value); |
875 | if (len < 0) { | 887 | if (len < 0) |
876 | return len; | 888 | return len; |
877 | } | 889 | |
878 | i += len; | 890 | i += len; |
879 | if (value < 14 + 20 + 8) | 891 | if (value < 14 + 20 + 8) |
880 | value = 14 + 20 + 8; | 892 | value = 14 + 20 + 8; |
@@ -889,9 +901,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
889 | 901 | ||
890 | if (!strcmp(name, "max_pkt_size")) { | 902 | if (!strcmp(name, "max_pkt_size")) { |
891 | len = num_arg(&user_buffer[i], 10, &value); | 903 | len = num_arg(&user_buffer[i], 10, &value); |
892 | if (len < 0) { | 904 | if (len < 0) |
893 | return len; | 905 | return len; |
894 | } | 906 | |
895 | i += len; | 907 | i += len; |
896 | if (value < 14 + 20 + 8) | 908 | if (value < 14 + 20 + 8) |
897 | value = 14 + 20 + 8; | 909 | value = 14 + 20 + 8; |
@@ -908,9 +920,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
908 | 920 | ||
909 | if (!strcmp(name, "pkt_size")) { | 921 | if (!strcmp(name, "pkt_size")) { |
910 | len = num_arg(&user_buffer[i], 10, &value); | 922 | len = num_arg(&user_buffer[i], 10, &value); |
911 | if (len < 0) { | 923 | if (len < 0) |
912 | return len; | 924 | return len; |
913 | } | 925 | |
914 | i += len; | 926 | i += len; |
915 | if (value < 14 + 20 + 8) | 927 | if (value < 14 + 20 + 8) |
916 | value = 14 + 20 + 8; | 928 | value = 14 + 20 + 8; |
@@ -925,9 +937,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
925 | 937 | ||
926 | if (!strcmp(name, "debug")) { | 938 | if (!strcmp(name, "debug")) { |
927 | len = num_arg(&user_buffer[i], 10, &value); | 939 | len = num_arg(&user_buffer[i], 10, &value); |
928 | if (len < 0) { | 940 | if (len < 0) |
929 | return len; | 941 | return len; |
930 | } | 942 | |
931 | i += len; | 943 | i += len; |
932 | debug = value; | 944 | debug = value; |
933 | sprintf(pg_result, "OK: debug=%u", debug); | 945 | sprintf(pg_result, "OK: debug=%u", debug); |
@@ -936,9 +948,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
936 | 948 | ||
937 | if (!strcmp(name, "frags")) { | 949 | if (!strcmp(name, "frags")) { |
938 | len = num_arg(&user_buffer[i], 10, &value); | 950 | len = num_arg(&user_buffer[i], 10, &value); |
939 | if (len < 0) { | 951 | if (len < 0) |
940 | return len; | 952 | return len; |
941 | } | 953 | |
942 | i += len; | 954 | i += len; |
943 | pkt_dev->nfrags = value; | 955 | pkt_dev->nfrags = value; |
944 | sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags); | 956 | sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags); |
@@ -946,26 +958,24 @@ static ssize_t pktgen_if_write(struct file *file, | |||
946 | } | 958 | } |
947 | if (!strcmp(name, "delay")) { | 959 | if (!strcmp(name, "delay")) { |
948 | len = num_arg(&user_buffer[i], 10, &value); | 960 | len = num_arg(&user_buffer[i], 10, &value); |
949 | if (len < 0) { | 961 | if (len < 0) |
950 | return len; | 962 | return len; |
951 | } | 963 | |
952 | i += len; | 964 | i += len; |
953 | if (value == 0x7FFFFFFF) { | 965 | if (value == 0x7FFFFFFF) |
954 | pkt_dev->delay_us = 0x7FFFFFFF; | 966 | pkt_dev->delay = ULLONG_MAX; |
955 | pkt_dev->delay_ns = 0; | 967 | else |
956 | } else { | 968 | pkt_dev->delay = (u64)value * NSEC_PER_USEC; |
957 | pkt_dev->delay_us = value / 1000; | 969 | |
958 | pkt_dev->delay_ns = value % 1000; | 970 | sprintf(pg_result, "OK: delay=%llu", |
959 | } | 971 | (unsigned long long) pkt_dev->delay); |
960 | sprintf(pg_result, "OK: delay=%u", | ||
961 | 1000 * pkt_dev->delay_us + pkt_dev->delay_ns); | ||
962 | return count; | 972 | return count; |
963 | } | 973 | } |
964 | if (!strcmp(name, "udp_src_min")) { | 974 | if (!strcmp(name, "udp_src_min")) { |
965 | len = num_arg(&user_buffer[i], 10, &value); | 975 | len = num_arg(&user_buffer[i], 10, &value); |
966 | if (len < 0) { | 976 | if (len < 0) |
967 | return len; | 977 | return len; |
968 | } | 978 | |
969 | i += len; | 979 | i += len; |
970 | if (value != pkt_dev->udp_src_min) { | 980 | if (value != pkt_dev->udp_src_min) { |
971 | pkt_dev->udp_src_min = value; | 981 | pkt_dev->udp_src_min = value; |
@@ -976,9 +986,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
976 | } | 986 | } |
977 | if (!strcmp(name, "udp_dst_min")) { | 987 | if (!strcmp(name, "udp_dst_min")) { |
978 | len = num_arg(&user_buffer[i], 10, &value); | 988 | len = num_arg(&user_buffer[i], 10, &value); |
979 | if (len < 0) { | 989 | if (len < 0) |
980 | return len; | 990 | return len; |
981 | } | 991 | |
982 | i += len; | 992 | i += len; |
983 | if (value != pkt_dev->udp_dst_min) { | 993 | if (value != pkt_dev->udp_dst_min) { |
984 | pkt_dev->udp_dst_min = value; | 994 | pkt_dev->udp_dst_min = value; |
@@ -989,9 +999,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
989 | } | 999 | } |
990 | if (!strcmp(name, "udp_src_max")) { | 1000 | if (!strcmp(name, "udp_src_max")) { |
991 | len = num_arg(&user_buffer[i], 10, &value); | 1001 | len = num_arg(&user_buffer[i], 10, &value); |
992 | if (len < 0) { | 1002 | if (len < 0) |
993 | return len; | 1003 | return len; |
994 | } | 1004 | |
995 | i += len; | 1005 | i += len; |
996 | if (value != pkt_dev->udp_src_max) { | 1006 | if (value != pkt_dev->udp_src_max) { |
997 | pkt_dev->udp_src_max = value; | 1007 | pkt_dev->udp_src_max = value; |
@@ -1002,9 +1012,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1002 | } | 1012 | } |
1003 | if (!strcmp(name, "udp_dst_max")) { | 1013 | if (!strcmp(name, "udp_dst_max")) { |
1004 | len = num_arg(&user_buffer[i], 10, &value); | 1014 | len = num_arg(&user_buffer[i], 10, &value); |
1005 | if (len < 0) { | 1015 | if (len < 0) |
1006 | return len; | 1016 | return len; |
1007 | } | 1017 | |
1008 | i += len; | 1018 | i += len; |
1009 | if (value != pkt_dev->udp_dst_max) { | 1019 | if (value != pkt_dev->udp_dst_max) { |
1010 | pkt_dev->udp_dst_max = value; | 1020 | pkt_dev->udp_dst_max = value; |
@@ -1015,9 +1025,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1015 | } | 1025 | } |
1016 | if (!strcmp(name, "clone_skb")) { | 1026 | if (!strcmp(name, "clone_skb")) { |
1017 | len = num_arg(&user_buffer[i], 10, &value); | 1027 | len = num_arg(&user_buffer[i], 10, &value); |
1018 | if (len < 0) { | 1028 | if (len < 0) |
1019 | return len; | 1029 | return len; |
1020 | } | 1030 | |
1021 | i += len; | 1031 | i += len; |
1022 | pkt_dev->clone_skb = value; | 1032 | pkt_dev->clone_skb = value; |
1023 | 1033 | ||
@@ -1026,9 +1036,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1026 | } | 1036 | } |
1027 | if (!strcmp(name, "count")) { | 1037 | if (!strcmp(name, "count")) { |
1028 | len = num_arg(&user_buffer[i], 10, &value); | 1038 | len = num_arg(&user_buffer[i], 10, &value); |
1029 | if (len < 0) { | 1039 | if (len < 0) |
1030 | return len; | 1040 | return len; |
1031 | } | 1041 | |
1032 | i += len; | 1042 | i += len; |
1033 | pkt_dev->count = value; | 1043 | pkt_dev->count = value; |
1034 | sprintf(pg_result, "OK: count=%llu", | 1044 | sprintf(pg_result, "OK: count=%llu", |
@@ -1037,9 +1047,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1037 | } | 1047 | } |
1038 | if (!strcmp(name, "src_mac_count")) { | 1048 | if (!strcmp(name, "src_mac_count")) { |
1039 | len = num_arg(&user_buffer[i], 10, &value); | 1049 | len = num_arg(&user_buffer[i], 10, &value); |
1040 | if (len < 0) { | 1050 | if (len < 0) |
1041 | return len; | 1051 | return len; |
1042 | } | 1052 | |
1043 | i += len; | 1053 | i += len; |
1044 | if (pkt_dev->src_mac_count != value) { | 1054 | if (pkt_dev->src_mac_count != value) { |
1045 | pkt_dev->src_mac_count = value; | 1055 | pkt_dev->src_mac_count = value; |
@@ -1051,9 +1061,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1051 | } | 1061 | } |
1052 | if (!strcmp(name, "dst_mac_count")) { | 1062 | if (!strcmp(name, "dst_mac_count")) { |
1053 | len = num_arg(&user_buffer[i], 10, &value); | 1063 | len = num_arg(&user_buffer[i], 10, &value); |
1054 | if (len < 0) { | 1064 | if (len < 0) |
1055 | return len; | 1065 | return len; |
1056 | } | 1066 | |
1057 | i += len; | 1067 | i += len; |
1058 | if (pkt_dev->dst_mac_count != value) { | 1068 | if (pkt_dev->dst_mac_count != value) { |
1059 | pkt_dev->dst_mac_count = value; | 1069 | pkt_dev->dst_mac_count = value; |
@@ -1067,9 +1077,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1067 | char f[32]; | 1077 | char f[32]; |
1068 | memset(f, 0, 32); | 1078 | memset(f, 0, 32); |
1069 | len = strn_len(&user_buffer[i], sizeof(f) - 1); | 1079 | len = strn_len(&user_buffer[i], sizeof(f) - 1); |
1070 | if (len < 0) { | 1080 | if (len < 0) |
1071 | return len; | 1081 | return len; |
1072 | } | 1082 | |
1073 | if (copy_from_user(f, &user_buffer[i], len)) | 1083 | if (copy_from_user(f, &user_buffer[i], len)) |
1074 | return -EFAULT; | 1084 | return -EFAULT; |
1075 | i += len; | 1085 | i += len; |
@@ -1168,9 +1178,8 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1168 | } | 1178 | } |
1169 | if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { | 1179 | if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { |
1170 | len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); | 1180 | len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); |
1171 | if (len < 0) { | 1181 | if (len < 0) |
1172 | return len; | 1182 | return len; |
1173 | } | ||
1174 | 1183 | ||
1175 | if (copy_from_user(buf, &user_buffer[i], len)) | 1184 | if (copy_from_user(buf, &user_buffer[i], len)) |
1176 | return -EFAULT; | 1185 | return -EFAULT; |
@@ -1190,9 +1199,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1190 | } | 1199 | } |
1191 | if (!strcmp(name, "dst_max")) { | 1200 | if (!strcmp(name, "dst_max")) { |
1192 | len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); | 1201 | len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); |
1193 | if (len < 0) { | 1202 | if (len < 0) |
1194 | return len; | 1203 | return len; |
1195 | } | 1204 | |
1196 | 1205 | ||
1197 | if (copy_from_user(buf, &user_buffer[i], len)) | 1206 | if (copy_from_user(buf, &user_buffer[i], len)) |
1198 | return -EFAULT; | 1207 | return -EFAULT; |
@@ -1303,9 +1312,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1303 | } | 1312 | } |
1304 | if (!strcmp(name, "src_min")) { | 1313 | if (!strcmp(name, "src_min")) { |
1305 | len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); | 1314 | len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); |
1306 | if (len < 0) { | 1315 | if (len < 0) |
1307 | return len; | 1316 | return len; |
1308 | } | 1317 | |
1309 | if (copy_from_user(buf, &user_buffer[i], len)) | 1318 | if (copy_from_user(buf, &user_buffer[i], len)) |
1310 | return -EFAULT; | 1319 | return -EFAULT; |
1311 | buf[len] = 0; | 1320 | buf[len] = 0; |
@@ -1324,9 +1333,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1324 | } | 1333 | } |
1325 | if (!strcmp(name, "src_max")) { | 1334 | if (!strcmp(name, "src_max")) { |
1326 | len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); | 1335 | len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); |
1327 | if (len < 0) { | 1336 | if (len < 0) |
1328 | return len; | 1337 | return len; |
1329 | } | 1338 | |
1330 | if (copy_from_user(buf, &user_buffer[i], len)) | 1339 | if (copy_from_user(buf, &user_buffer[i], len)) |
1331 | return -EFAULT; | 1340 | return -EFAULT; |
1332 | buf[len] = 0; | 1341 | buf[len] = 0; |
@@ -1350,9 +1359,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1350 | memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN); | 1359 | memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN); |
1351 | 1360 | ||
1352 | len = strn_len(&user_buffer[i], sizeof(valstr) - 1); | 1361 | len = strn_len(&user_buffer[i], sizeof(valstr) - 1); |
1353 | if (len < 0) { | 1362 | if (len < 0) |
1354 | return len; | 1363 | return len; |
1355 | } | 1364 | |
1356 | memset(valstr, 0, sizeof(valstr)); | 1365 | memset(valstr, 0, sizeof(valstr)); |
1357 | if (copy_from_user(valstr, &user_buffer[i], len)) | 1366 | if (copy_from_user(valstr, &user_buffer[i], len)) |
1358 | return -EFAULT; | 1367 | return -EFAULT; |
@@ -1392,9 +1401,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1392 | memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN); | 1401 | memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN); |
1393 | 1402 | ||
1394 | len = strn_len(&user_buffer[i], sizeof(valstr) - 1); | 1403 | len = strn_len(&user_buffer[i], sizeof(valstr) - 1); |
1395 | if (len < 0) { | 1404 | if (len < 0) |
1396 | return len; | 1405 | return len; |
1397 | } | 1406 | |
1398 | memset(valstr, 0, sizeof(valstr)); | 1407 | memset(valstr, 0, sizeof(valstr)); |
1399 | if (copy_from_user(valstr, &user_buffer[i], len)) | 1408 | if (copy_from_user(valstr, &user_buffer[i], len)) |
1400 | return -EFAULT; | 1409 | return -EFAULT; |
@@ -1435,9 +1444,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1435 | 1444 | ||
1436 | if (!strcmp(name, "flows")) { | 1445 | if (!strcmp(name, "flows")) { |
1437 | len = num_arg(&user_buffer[i], 10, &value); | 1446 | len = num_arg(&user_buffer[i], 10, &value); |
1438 | if (len < 0) { | 1447 | if (len < 0) |
1439 | return len; | 1448 | return len; |
1440 | } | 1449 | |
1441 | i += len; | 1450 | i += len; |
1442 | if (value > MAX_CFLOWS) | 1451 | if (value > MAX_CFLOWS) |
1443 | value = MAX_CFLOWS; | 1452 | value = MAX_CFLOWS; |
@@ -1449,9 +1458,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1449 | 1458 | ||
1450 | if (!strcmp(name, "flowlen")) { | 1459 | if (!strcmp(name, "flowlen")) { |
1451 | len = num_arg(&user_buffer[i], 10, &value); | 1460 | len = num_arg(&user_buffer[i], 10, &value); |
1452 | if (len < 0) { | 1461 | if (len < 0) |
1453 | return len; | 1462 | return len; |
1454 | } | 1463 | |
1455 | i += len; | 1464 | i += len; |
1456 | pkt_dev->lflow = value; | 1465 | pkt_dev->lflow = value; |
1457 | sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); | 1466 | sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); |
@@ -1460,9 +1469,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1460 | 1469 | ||
1461 | if (!strcmp(name, "queue_map_min")) { | 1470 | if (!strcmp(name, "queue_map_min")) { |
1462 | len = num_arg(&user_buffer[i], 5, &value); | 1471 | len = num_arg(&user_buffer[i], 5, &value); |
1463 | if (len < 0) { | 1472 | if (len < 0) |
1464 | return len; | 1473 | return len; |
1465 | } | 1474 | |
1466 | i += len; | 1475 | i += len; |
1467 | pkt_dev->queue_map_min = value; | 1476 | pkt_dev->queue_map_min = value; |
1468 | sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); | 1477 | sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); |
@@ -1471,9 +1480,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1471 | 1480 | ||
1472 | if (!strcmp(name, "queue_map_max")) { | 1481 | if (!strcmp(name, "queue_map_max")) { |
1473 | len = num_arg(&user_buffer[i], 5, &value); | 1482 | len = num_arg(&user_buffer[i], 5, &value); |
1474 | if (len < 0) { | 1483 | if (len < 0) |
1475 | return len; | 1484 | return len; |
1476 | } | 1485 | |
1477 | i += len; | 1486 | i += len; |
1478 | pkt_dev->queue_map_max = value; | 1487 | pkt_dev->queue_map_max = value; |
1479 | sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); | 1488 | sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); |
@@ -1505,9 +1514,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1505 | 1514 | ||
1506 | if (!strcmp(name, "vlan_id")) { | 1515 | if (!strcmp(name, "vlan_id")) { |
1507 | len = num_arg(&user_buffer[i], 4, &value); | 1516 | len = num_arg(&user_buffer[i], 4, &value); |
1508 | if (len < 0) { | 1517 | if (len < 0) |
1509 | return len; | 1518 | return len; |
1510 | } | 1519 | |
1511 | i += len; | 1520 | i += len; |
1512 | if (value <= 4095) { | 1521 | if (value <= 4095) { |
1513 | pkt_dev->vlan_id = value; /* turn on VLAN */ | 1522 | pkt_dev->vlan_id = value; /* turn on VLAN */ |
@@ -1532,9 +1541,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1532 | 1541 | ||
1533 | if (!strcmp(name, "vlan_p")) { | 1542 | if (!strcmp(name, "vlan_p")) { |
1534 | len = num_arg(&user_buffer[i], 1, &value); | 1543 | len = num_arg(&user_buffer[i], 1, &value); |
1535 | if (len < 0) { | 1544 | if (len < 0) |
1536 | return len; | 1545 | return len; |
1537 | } | 1546 | |
1538 | i += len; | 1547 | i += len; |
1539 | if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { | 1548 | if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { |
1540 | pkt_dev->vlan_p = value; | 1549 | pkt_dev->vlan_p = value; |
@@ -1547,9 +1556,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1547 | 1556 | ||
1548 | if (!strcmp(name, "vlan_cfi")) { | 1557 | if (!strcmp(name, "vlan_cfi")) { |
1549 | len = num_arg(&user_buffer[i], 1, &value); | 1558 | len = num_arg(&user_buffer[i], 1, &value); |
1550 | if (len < 0) { | 1559 | if (len < 0) |
1551 | return len; | 1560 | return len; |
1552 | } | 1561 | |
1553 | i += len; | 1562 | i += len; |
1554 | if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { | 1563 | if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { |
1555 | pkt_dev->vlan_cfi = value; | 1564 | pkt_dev->vlan_cfi = value; |
@@ -1562,9 +1571,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1562 | 1571 | ||
1563 | if (!strcmp(name, "svlan_id")) { | 1572 | if (!strcmp(name, "svlan_id")) { |
1564 | len = num_arg(&user_buffer[i], 4, &value); | 1573 | len = num_arg(&user_buffer[i], 4, &value); |
1565 | if (len < 0) { | 1574 | if (len < 0) |
1566 | return len; | 1575 | return len; |
1567 | } | 1576 | |
1568 | i += len; | 1577 | i += len; |
1569 | if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { | 1578 | if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { |
1570 | pkt_dev->svlan_id = value; /* turn on SVLAN */ | 1579 | pkt_dev->svlan_id = value; /* turn on SVLAN */ |
@@ -1589,9 +1598,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1589 | 1598 | ||
1590 | if (!strcmp(name, "svlan_p")) { | 1599 | if (!strcmp(name, "svlan_p")) { |
1591 | len = num_arg(&user_buffer[i], 1, &value); | 1600 | len = num_arg(&user_buffer[i], 1, &value); |
1592 | if (len < 0) { | 1601 | if (len < 0) |
1593 | return len; | 1602 | return len; |
1594 | } | 1603 | |
1595 | i += len; | 1604 | i += len; |
1596 | if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { | 1605 | if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { |
1597 | pkt_dev->svlan_p = value; | 1606 | pkt_dev->svlan_p = value; |
@@ -1604,9 +1613,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1604 | 1613 | ||
1605 | if (!strcmp(name, "svlan_cfi")) { | 1614 | if (!strcmp(name, "svlan_cfi")) { |
1606 | len = num_arg(&user_buffer[i], 1, &value); | 1615 | len = num_arg(&user_buffer[i], 1, &value); |
1607 | if (len < 0) { | 1616 | if (len < 0) |
1608 | return len; | 1617 | return len; |
1609 | } | 1618 | |
1610 | i += len; | 1619 | i += len; |
1611 | if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { | 1620 | if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { |
1612 | pkt_dev->svlan_cfi = value; | 1621 | pkt_dev->svlan_cfi = value; |
@@ -1620,9 +1629,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1620 | if (!strcmp(name, "tos")) { | 1629 | if (!strcmp(name, "tos")) { |
1621 | __u32 tmp_value = 0; | 1630 | __u32 tmp_value = 0; |
1622 | len = hex32_arg(&user_buffer[i], 2, &tmp_value); | 1631 | len = hex32_arg(&user_buffer[i], 2, &tmp_value); |
1623 | if (len < 0) { | 1632 | if (len < 0) |
1624 | return len; | 1633 | return len; |
1625 | } | 1634 | |
1626 | i += len; | 1635 | i += len; |
1627 | if (len == 2) { | 1636 | if (len == 2) { |
1628 | pkt_dev->tos = tmp_value; | 1637 | pkt_dev->tos = tmp_value; |
@@ -1636,9 +1645,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1636 | if (!strcmp(name, "traffic_class")) { | 1645 | if (!strcmp(name, "traffic_class")) { |
1637 | __u32 tmp_value = 0; | 1646 | __u32 tmp_value = 0; |
1638 | len = hex32_arg(&user_buffer[i], 2, &tmp_value); | 1647 | len = hex32_arg(&user_buffer[i], 2, &tmp_value); |
1639 | if (len < 0) { | 1648 | if (len < 0) |
1640 | return len; | 1649 | return len; |
1641 | } | 1650 | |
1642 | i += len; | 1651 | i += len; |
1643 | if (len == 2) { | 1652 | if (len == 2) { |
1644 | pkt_dev->traffic_class = tmp_value; | 1653 | pkt_dev->traffic_class = tmp_value; |
@@ -1670,7 +1679,7 @@ static const struct file_operations pktgen_if_fops = { | |||
1670 | static int pktgen_thread_show(struct seq_file *seq, void *v) | 1679 | static int pktgen_thread_show(struct seq_file *seq, void *v) |
1671 | { | 1680 | { |
1672 | struct pktgen_thread *t = seq->private; | 1681 | struct pktgen_thread *t = seq->private; |
1673 | struct pktgen_dev *pkt_dev; | 1682 | const struct pktgen_dev *pkt_dev; |
1674 | 1683 | ||
1675 | BUG_ON(!t); | 1684 | BUG_ON(!t); |
1676 | 1685 | ||
@@ -1873,8 +1882,10 @@ static void pktgen_change_name(struct net_device *dev) | |||
1873 | 1882 | ||
1874 | remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); | 1883 | remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); |
1875 | 1884 | ||
1876 | pkt_dev->entry = create_proc_entry(dev->name, 0600, | 1885 | pkt_dev->entry = proc_create_data(dev->name, 0600, |
1877 | pg_proc_dir); | 1886 | pg_proc_dir, |
1887 | &pktgen_if_fops, | ||
1888 | pkt_dev); | ||
1878 | if (!pkt_dev->entry) | 1889 | if (!pkt_dev->entry) |
1879 | printk(KERN_ERR "pktgen: can't move proc " | 1890 | printk(KERN_ERR "pktgen: can't move proc " |
1880 | " entry for '%s'\n", dev->name); | 1891 | " entry for '%s'\n", dev->name); |
@@ -1908,13 +1919,14 @@ static int pktgen_device_event(struct notifier_block *unused, | |||
1908 | return NOTIFY_DONE; | 1919 | return NOTIFY_DONE; |
1909 | } | 1920 | } |
1910 | 1921 | ||
1911 | static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, const char *ifname) | 1922 | static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, |
1923 | const char *ifname) | ||
1912 | { | 1924 | { |
1913 | char b[IFNAMSIZ+5]; | 1925 | char b[IFNAMSIZ+5]; |
1914 | int i = 0; | 1926 | int i = 0; |
1915 | 1927 | ||
1916 | for(i=0; ifname[i] != '@'; i++) { | 1928 | for (i = 0; ifname[i] != '@'; i++) { |
1917 | if(i == IFNAMSIZ) | 1929 | if (i == IFNAMSIZ) |
1918 | break; | 1930 | break; |
1919 | 1931 | ||
1920 | b[i] = ifname[i]; | 1932 | b[i] = ifname[i]; |
@@ -1981,7 +1993,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
1981 | printk(KERN_WARNING "pktgen: WARNING: Requested " | 1993 | printk(KERN_WARNING "pktgen: WARNING: Requested " |
1982 | "queue_map_min (zero-based) (%d) exceeds valid range " | 1994 | "queue_map_min (zero-based) (%d) exceeds valid range " |
1983 | "[0 - %d] for (%d) queues on %s, resetting\n", | 1995 | "[0 - %d] for (%d) queues on %s, resetting\n", |
1984 | pkt_dev->queue_map_min, (ntxq ?: 1)- 1, ntxq, | 1996 | pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, |
1985 | pkt_dev->odev->name); | 1997 | pkt_dev->odev->name); |
1986 | pkt_dev->queue_map_min = ntxq - 1; | 1998 | pkt_dev->queue_map_min = ntxq - 1; |
1987 | } | 1999 | } |
@@ -1989,7 +2001,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
1989 | printk(KERN_WARNING "pktgen: WARNING: Requested " | 2001 | printk(KERN_WARNING "pktgen: WARNING: Requested " |
1990 | "queue_map_max (zero-based) (%d) exceeds valid range " | 2002 | "queue_map_max (zero-based) (%d) exceeds valid range " |
1991 | "[0 - %d] for (%d) queues on %s, resetting\n", | 2003 | "[0 - %d] for (%d) queues on %s, resetting\n", |
1992 | pkt_dev->queue_map_max, (ntxq ?: 1)- 1, ntxq, | 2004 | pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, |
1993 | pkt_dev->odev->name); | 2005 | pkt_dev->odev->name); |
1994 | pkt_dev->queue_map_max = ntxq - 1; | 2006 | pkt_dev->queue_map_max = ntxq - 1; |
1995 | } | 2007 | } |
@@ -2030,7 +2042,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
2030 | */ | 2042 | */ |
2031 | 2043 | ||
2032 | rcu_read_lock(); | 2044 | rcu_read_lock(); |
2033 | if ((idev = __in6_dev_get(pkt_dev->odev)) != NULL) { | 2045 | idev = __in6_dev_get(pkt_dev->odev); |
2046 | if (idev) { | ||
2034 | struct inet6_ifaddr *ifp; | 2047 | struct inet6_ifaddr *ifp; |
2035 | 2048 | ||
2036 | read_lock_bh(&idev->lock); | 2049 | read_lock_bh(&idev->lock); |
@@ -2089,27 +2102,40 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) | |||
2089 | pkt_dev->nflows = 0; | 2102 | pkt_dev->nflows = 0; |
2090 | } | 2103 | } |
2091 | 2104 | ||
2092 | static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) | 2105 | |
2106 | static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | ||
2093 | { | 2107 | { |
2094 | __u64 start; | 2108 | ktime_t start; |
2095 | __u64 now; | 2109 | s32 remaining; |
2110 | struct hrtimer_sleeper t; | ||
2111 | |||
2112 | hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
2113 | hrtimer_set_expires(&t.timer, spin_until); | ||
2114 | |||
2115 | remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer)); | ||
2116 | if (remaining <= 0) | ||
2117 | return; | ||
2096 | 2118 | ||
2097 | start = now = getCurUs(); | 2119 | start = ktime_now(); |
2098 | while (now < spin_until_us) { | 2120 | if (remaining < 100) |
2099 | /* TODO: optimize sleeping behavior */ | 2121 | udelay(remaining); /* really small just spin */ |
2100 | if (spin_until_us - now > jiffies_to_usecs(1) + 1) | 2122 | else { |
2101 | schedule_timeout_interruptible(1); | 2123 | /* see do_nanosleep */ |
2102 | else if (spin_until_us - now > 100) { | 2124 | hrtimer_init_sleeper(&t, current); |
2103 | if (!pkt_dev->running) | 2125 | do { |
2104 | return; | 2126 | set_current_state(TASK_INTERRUPTIBLE); |
2105 | if (need_resched()) | 2127 | hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); |
2128 | if (!hrtimer_active(&t.timer)) | ||
2129 | t.task = NULL; | ||
2130 | |||
2131 | if (likely(t.task)) | ||
2106 | schedule(); | 2132 | schedule(); |
2107 | } | ||
2108 | 2133 | ||
2109 | now = getCurUs(); | 2134 | hrtimer_cancel(&t.timer); |
2135 | } while (t.task && pkt_dev->running && !signal_pending(current)); | ||
2136 | __set_current_state(TASK_RUNNING); | ||
2110 | } | 2137 | } |
2111 | 2138 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start)); | |
2112 | pkt_dev->idle_acc += now - start; | ||
2113 | } | 2139 | } |
2114 | 2140 | ||
2115 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2141 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
@@ -2120,13 +2146,9 @@ static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | |||
2120 | pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); | 2146 | pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); |
2121 | } | 2147 | } |
2122 | 2148 | ||
2123 | static inline int f_seen(struct pktgen_dev *pkt_dev, int flow) | 2149 | static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow) |
2124 | { | 2150 | { |
2125 | 2151 | return !!(pkt_dev->flows[flow].flags & F_INIT); | |
2126 | if (pkt_dev->flows[flow].flags & F_INIT) | ||
2127 | return 1; | ||
2128 | else | ||
2129 | return 0; | ||
2130 | } | 2152 | } |
2131 | 2153 | ||
2132 | static inline int f_pick(struct pktgen_dev *pkt_dev) | 2154 | static inline int f_pick(struct pktgen_dev *pkt_dev) |
@@ -2174,7 +2196,7 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) | |||
2174 | if (x) { | 2196 | if (x) { |
2175 | pkt_dev->flows[flow].x = x; | 2197 | pkt_dev->flows[flow].x = x; |
2176 | set_pkt_overhead(pkt_dev); | 2198 | set_pkt_overhead(pkt_dev); |
2177 | pkt_dev->pkt_overhead+=x->props.header_len; | 2199 | pkt_dev->pkt_overhead += x->props.header_len; |
2178 | } | 2200 | } |
2179 | 2201 | ||
2180 | } | 2202 | } |
@@ -2313,18 +2335,18 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) | |||
2313 | 2335 | ||
2314 | if (!(pkt_dev->flags & F_IPV6)) { | 2336 | if (!(pkt_dev->flags & F_IPV6)) { |
2315 | 2337 | ||
2316 | if ((imn = ntohl(pkt_dev->saddr_min)) < (imx = | 2338 | imn = ntohl(pkt_dev->saddr_min); |
2317 | ntohl(pkt_dev-> | 2339 | imx = ntohl(pkt_dev->saddr_max); |
2318 | saddr_max))) { | 2340 | if (imn < imx) { |
2319 | __u32 t; | 2341 | __u32 t; |
2320 | if (pkt_dev->flags & F_IPSRC_RND) | 2342 | if (pkt_dev->flags & F_IPSRC_RND) |
2321 | t = random32() % (imx - imn) + imn; | 2343 | t = random32() % (imx - imn) + imn; |
2322 | else { | 2344 | else { |
2323 | t = ntohl(pkt_dev->cur_saddr); | 2345 | t = ntohl(pkt_dev->cur_saddr); |
2324 | t++; | 2346 | t++; |
2325 | if (t > imx) { | 2347 | if (t > imx) |
2326 | t = imn; | 2348 | t = imn; |
2327 | } | 2349 | |
2328 | } | 2350 | } |
2329 | pkt_dev->cur_saddr = htonl(t); | 2351 | pkt_dev->cur_saddr = htonl(t); |
2330 | } | 2352 | } |
@@ -2435,14 +2457,14 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev) | |||
2435 | if (err) | 2457 | if (err) |
2436 | goto error; | 2458 | goto error; |
2437 | 2459 | ||
2438 | x->curlft.bytes +=skb->len; | 2460 | x->curlft.bytes += skb->len; |
2439 | x->curlft.packets++; | 2461 | x->curlft.packets++; |
2440 | error: | 2462 | error: |
2441 | spin_unlock(&x->lock); | 2463 | spin_unlock(&x->lock); |
2442 | return err; | 2464 | return err; |
2443 | } | 2465 | } |
2444 | 2466 | ||
2445 | static inline void free_SAs(struct pktgen_dev *pkt_dev) | 2467 | static void free_SAs(struct pktgen_dev *pkt_dev) |
2446 | { | 2468 | { |
2447 | if (pkt_dev->cflows) { | 2469 | if (pkt_dev->cflows) { |
2448 | /* let go of the SAs if we have them */ | 2470 | /* let go of the SAs if we have them */ |
@@ -2457,7 +2479,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev) | |||
2457 | } | 2479 | } |
2458 | } | 2480 | } |
2459 | 2481 | ||
2460 | static inline int process_ipsec(struct pktgen_dev *pkt_dev, | 2482 | static int process_ipsec(struct pktgen_dev *pkt_dev, |
2461 | struct sk_buff *skb, __be16 protocol) | 2483 | struct sk_buff *skb, __be16 protocol) |
2462 | { | 2484 | { |
2463 | if (pkt_dev->flags & F_IPSEC_ON) { | 2485 | if (pkt_dev->flags & F_IPSEC_ON) { |
@@ -2467,11 +2489,11 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev, | |||
2467 | int ret; | 2489 | int ret; |
2468 | __u8 *eth; | 2490 | __u8 *eth; |
2469 | nhead = x->props.header_len - skb_headroom(skb); | 2491 | nhead = x->props.header_len - skb_headroom(skb); |
2470 | if (nhead >0) { | 2492 | if (nhead > 0) { |
2471 | ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); | 2493 | ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); |
2472 | if (ret < 0) { | 2494 | if (ret < 0) { |
2473 | printk(KERN_ERR "Error expanding " | 2495 | printk(KERN_ERR "Error expanding " |
2474 | "ipsec packet %d\n",ret); | 2496 | "ipsec packet %d\n", ret); |
2475 | goto err; | 2497 | goto err; |
2476 | } | 2498 | } |
2477 | } | 2499 | } |
@@ -2481,13 +2503,13 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev, | |||
2481 | ret = pktgen_output_ipsec(skb, pkt_dev); | 2503 | ret = pktgen_output_ipsec(skb, pkt_dev); |
2482 | if (ret) { | 2504 | if (ret) { |
2483 | printk(KERN_ERR "Error creating ipsec " | 2505 | printk(KERN_ERR "Error creating ipsec " |
2484 | "packet %d\n",ret); | 2506 | "packet %d\n", ret); |
2485 | goto err; | 2507 | goto err; |
2486 | } | 2508 | } |
2487 | /* restore ll */ | 2509 | /* restore ll */ |
2488 | eth = (__u8 *) skb_push(skb, ETH_HLEN); | 2510 | eth = (__u8 *) skb_push(skb, ETH_HLEN); |
2489 | memcpy(eth, pkt_dev->hh, 12); | 2511 | memcpy(eth, pkt_dev->hh, 12); |
2490 | *(u16 *) & eth[12] = protocol; | 2512 | *(u16 *) ð[12] = protocol; |
2491 | } | 2513 | } |
2492 | } | 2514 | } |
2493 | return 1; | 2515 | return 1; |
@@ -2500,9 +2522,9 @@ err: | |||
2500 | static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) | 2522 | static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) |
2501 | { | 2523 | { |
2502 | unsigned i; | 2524 | unsigned i; |
2503 | for (i = 0; i < pkt_dev->nr_labels; i++) { | 2525 | for (i = 0; i < pkt_dev->nr_labels; i++) |
2504 | *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; | 2526 | *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; |
2505 | } | 2527 | |
2506 | mpls--; | 2528 | mpls--; |
2507 | *mpls |= MPLS_STACK_BOTTOM; | 2529 | *mpls |= MPLS_STACK_BOTTOM; |
2508 | } | 2530 | } |
@@ -2543,8 +2565,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2543 | mod_cur_headers(pkt_dev); | 2565 | mod_cur_headers(pkt_dev); |
2544 | 2566 | ||
2545 | datalen = (odev->hard_header_len + 16) & ~0xf; | 2567 | datalen = (odev->hard_header_len + 16) & ~0xf; |
2546 | skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + | 2568 | skb = __netdev_alloc_skb(odev, |
2547 | pkt_dev->pkt_overhead, GFP_ATOMIC); | 2569 | pkt_dev->cur_pkt_size + 64 |
2570 | + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); | ||
2548 | if (!skb) { | 2571 | if (!skb) { |
2549 | sprintf(pkt_dev->result, "No memory"); | 2572 | sprintf(pkt_dev->result, "No memory"); |
2550 | return NULL; | 2573 | return NULL; |
@@ -2668,8 +2691,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2668 | } | 2691 | } |
2669 | } | 2692 | } |
2670 | 2693 | ||
2671 | /* Stamp the time, and sequence number, convert them to network byte order */ | 2694 | /* Stamp the time, and sequence number, |
2672 | 2695 | * convert them to network byte order | |
2696 | */ | ||
2673 | if (pgh) { | 2697 | if (pgh) { |
2674 | struct timeval timestamp; | 2698 | struct timeval timestamp; |
2675 | 2699 | ||
@@ -2882,8 +2906,9 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2882 | queue_map = pkt_dev->cur_queue_map; | 2906 | queue_map = pkt_dev->cur_queue_map; |
2883 | mod_cur_headers(pkt_dev); | 2907 | mod_cur_headers(pkt_dev); |
2884 | 2908 | ||
2885 | skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + | 2909 | skb = __netdev_alloc_skb(odev, |
2886 | pkt_dev->pkt_overhead, GFP_ATOMIC); | 2910 | pkt_dev->cur_pkt_size + 64 |
2911 | + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT); | ||
2887 | if (!skb) { | 2912 | if (!skb) { |
2888 | sprintf(pkt_dev->result, "No memory"); | 2913 | sprintf(pkt_dev->result, "No memory"); |
2889 | return NULL; | 2914 | return NULL; |
@@ -2922,7 +2947,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2922 | udph = udp_hdr(skb); | 2947 | udph = udp_hdr(skb); |
2923 | 2948 | ||
2924 | memcpy(eth, pkt_dev->hh, 12); | 2949 | memcpy(eth, pkt_dev->hh, 12); |
2925 | *(__be16 *) & eth[12] = protocol; | 2950 | *(__be16 *) ð[12] = protocol; |
2926 | 2951 | ||
2927 | /* Eth + IPh + UDPh + mpls */ | 2952 | /* Eth + IPh + UDPh + mpls */ |
2928 | datalen = pkt_dev->cur_pkt_size - 14 - | 2953 | datalen = pkt_dev->cur_pkt_size - 14 - |
@@ -3016,8 +3041,10 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
3016 | } | 3041 | } |
3017 | } | 3042 | } |
3018 | 3043 | ||
3019 | /* Stamp the time, and sequence number, convert them to network byte order */ | 3044 | /* Stamp the time, and sequence number, |
3020 | /* should we update cloned packets too ? */ | 3045 | * convert them to network byte order |
3046 | * should we update cloned packets too ? | ||
3047 | */ | ||
3021 | if (pgh) { | 3048 | if (pgh) { |
3022 | struct timeval timestamp; | 3049 | struct timeval timestamp; |
3023 | 3050 | ||
@@ -3033,8 +3060,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
3033 | return skb; | 3060 | return skb; |
3034 | } | 3061 | } |
3035 | 3062 | ||
3036 | static inline struct sk_buff *fill_packet(struct net_device *odev, | 3063 | static struct sk_buff *fill_packet(struct net_device *odev, |
3037 | struct pktgen_dev *pkt_dev) | 3064 | struct pktgen_dev *pkt_dev) |
3038 | { | 3065 | { |
3039 | if (pkt_dev->flags & F_IPV6) | 3066 | if (pkt_dev->flags & F_IPV6) |
3040 | return fill_packet_ipv6(odev, pkt_dev); | 3067 | return fill_packet_ipv6(odev, pkt_dev); |
@@ -3072,9 +3099,9 @@ static void pktgen_run(struct pktgen_thread *t) | |||
3072 | pktgen_clear_counters(pkt_dev); | 3099 | pktgen_clear_counters(pkt_dev); |
3073 | pkt_dev->running = 1; /* Cranke yeself! */ | 3100 | pkt_dev->running = 1; /* Cranke yeself! */ |
3074 | pkt_dev->skb = NULL; | 3101 | pkt_dev->skb = NULL; |
3075 | pkt_dev->started_at = getCurUs(); | 3102 | pkt_dev->started_at = |
3076 | pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */ | 3103 | pkt_dev->next_tx = ktime_now(); |
3077 | pkt_dev->next_tx_ns = 0; | 3104 | |
3078 | set_pkt_overhead(pkt_dev); | 3105 | set_pkt_overhead(pkt_dev); |
3079 | 3106 | ||
3080 | strcpy(pkt_dev->result, "Starting"); | 3107 | strcpy(pkt_dev->result, "Starting"); |
@@ -3101,17 +3128,14 @@ static void pktgen_stop_all_threads_ifs(void) | |||
3101 | mutex_unlock(&pktgen_thread_lock); | 3128 | mutex_unlock(&pktgen_thread_lock); |
3102 | } | 3129 | } |
3103 | 3130 | ||
3104 | static int thread_is_running(struct pktgen_thread *t) | 3131 | static int thread_is_running(const struct pktgen_thread *t) |
3105 | { | 3132 | { |
3106 | struct pktgen_dev *pkt_dev; | 3133 | const struct pktgen_dev *pkt_dev; |
3107 | int res = 0; | ||
3108 | 3134 | ||
3109 | list_for_each_entry(pkt_dev, &t->if_list, list) | 3135 | list_for_each_entry(pkt_dev, &t->if_list, list) |
3110 | if (pkt_dev->running) { | 3136 | if (pkt_dev->running) |
3111 | res = 1; | 3137 | return 1; |
3112 | break; | 3138 | return 0; |
3113 | } | ||
3114 | return res; | ||
3115 | } | 3139 | } |
3116 | 3140 | ||
3117 | static int pktgen_wait_thread_run(struct pktgen_thread *t) | 3141 | static int pktgen_wait_thread_run(struct pktgen_thread *t) |
@@ -3168,7 +3192,8 @@ static void pktgen_run_all_threads(void) | |||
3168 | 3192 | ||
3169 | mutex_unlock(&pktgen_thread_lock); | 3193 | mutex_unlock(&pktgen_thread_lock); |
3170 | 3194 | ||
3171 | schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ | 3195 | /* Propagate thread->control */ |
3196 | schedule_timeout_interruptible(msecs_to_jiffies(125)); | ||
3172 | 3197 | ||
3173 | pktgen_wait_all_threads_run(); | 3198 | pktgen_wait_all_threads_run(); |
3174 | } | 3199 | } |
@@ -3186,35 +3211,29 @@ static void pktgen_reset_all_threads(void) | |||
3186 | 3211 | ||
3187 | mutex_unlock(&pktgen_thread_lock); | 3212 | mutex_unlock(&pktgen_thread_lock); |
3188 | 3213 | ||
3189 | schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ | 3214 | /* Propagate thread->control */ |
3215 | schedule_timeout_interruptible(msecs_to_jiffies(125)); | ||
3190 | 3216 | ||
3191 | pktgen_wait_all_threads_run(); | 3217 | pktgen_wait_all_threads_run(); |
3192 | } | 3218 | } |
3193 | 3219 | ||
3194 | static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) | 3220 | static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) |
3195 | { | 3221 | { |
3196 | __u64 total_us, bps, mbps, pps, idle; | 3222 | __u64 bps, mbps, pps; |
3197 | char *p = pkt_dev->result; | 3223 | char *p = pkt_dev->result; |
3198 | 3224 | ktime_t elapsed = ktime_sub(pkt_dev->stopped_at, | |
3199 | total_us = pkt_dev->stopped_at - pkt_dev->started_at; | 3225 | pkt_dev->started_at); |
3200 | 3226 | ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); | |
3201 | idle = pkt_dev->idle_acc; | 3227 | |
3202 | 3228 | p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n", | |
3203 | p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", | 3229 | (unsigned long long)ktime_to_us(elapsed), |
3204 | (unsigned long long)total_us, | 3230 | (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), |
3205 | (unsigned long long)(total_us - idle), | 3231 | (unsigned long long)ktime_to_us(idle), |
3206 | (unsigned long long)idle, | ||
3207 | (unsigned long long)pkt_dev->sofar, | 3232 | (unsigned long long)pkt_dev->sofar, |
3208 | pkt_dev->cur_pkt_size, nr_frags); | 3233 | pkt_dev->cur_pkt_size, nr_frags); |
3209 | 3234 | ||
3210 | pps = pkt_dev->sofar * USEC_PER_SEC; | 3235 | pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC, |
3211 | 3236 | ktime_to_ns(elapsed)); | |
3212 | while ((total_us >> 32) != 0) { | ||
3213 | pps >>= 1; | ||
3214 | total_us >>= 1; | ||
3215 | } | ||
3216 | |||
3217 | do_div(pps, total_us); | ||
3218 | 3237 | ||
3219 | bps = pps * 8 * pkt_dev->cur_pkt_size; | 3238 | bps = pps * 8 * pkt_dev->cur_pkt_size; |
3220 | 3239 | ||
@@ -3228,7 +3247,6 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) | |||
3228 | } | 3247 | } |
3229 | 3248 | ||
3230 | /* Set stopped-at timer, remove from running list, do counters & statistics */ | 3249 | /* Set stopped-at timer, remove from running list, do counters & statistics */ |
3231 | |||
3232 | static int pktgen_stop_device(struct pktgen_dev *pkt_dev) | 3250 | static int pktgen_stop_device(struct pktgen_dev *pkt_dev) |
3233 | { | 3251 | { |
3234 | int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; | 3252 | int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; |
@@ -3239,7 +3257,9 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev) | |||
3239 | return -EINVAL; | 3257 | return -EINVAL; |
3240 | } | 3258 | } |
3241 | 3259 | ||
3242 | pkt_dev->stopped_at = getCurUs(); | 3260 | kfree_skb(pkt_dev->skb); |
3261 | pkt_dev->skb = NULL; | ||
3262 | pkt_dev->stopped_at = ktime_now(); | ||
3243 | pkt_dev->running = 0; | 3263 | pkt_dev->running = 0; |
3244 | 3264 | ||
3245 | show_results(pkt_dev, nr_frags); | 3265 | show_results(pkt_dev, nr_frags); |
@@ -3258,7 +3278,7 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t) | |||
3258 | continue; | 3278 | continue; |
3259 | if (best == NULL) | 3279 | if (best == NULL) |
3260 | best = pkt_dev; | 3280 | best = pkt_dev; |
3261 | else if (pkt_dev->next_tx_us < best->next_tx_us) | 3281 | else if (ktime_lt(pkt_dev->next_tx, best->next_tx)) |
3262 | best = pkt_dev; | 3282 | best = pkt_dev; |
3263 | } | 3283 | } |
3264 | if_unlock(t); | 3284 | if_unlock(t); |
@@ -3275,9 +3295,6 @@ static void pktgen_stop(struct pktgen_thread *t) | |||
3275 | 3295 | ||
3276 | list_for_each_entry(pkt_dev, &t->if_list, list) { | 3296 | list_for_each_entry(pkt_dev, &t->if_list, list) { |
3277 | pktgen_stop_device(pkt_dev); | 3297 | pktgen_stop_device(pkt_dev); |
3278 | kfree_skb(pkt_dev->skb); | ||
3279 | |||
3280 | pkt_dev->skb = NULL; | ||
3281 | } | 3298 | } |
3282 | 3299 | ||
3283 | if_unlock(t); | 3300 | if_unlock(t); |
@@ -3348,30 +3365,37 @@ static void pktgen_rem_thread(struct pktgen_thread *t) | |||
3348 | mutex_unlock(&pktgen_thread_lock); | 3365 | mutex_unlock(&pktgen_thread_lock); |
3349 | } | 3366 | } |
3350 | 3367 | ||
3351 | static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | 3368 | static void idle(struct pktgen_dev *pkt_dev) |
3369 | { | ||
3370 | ktime_t idle_start = ktime_now(); | ||
3371 | |||
3372 | if (need_resched()) | ||
3373 | schedule(); | ||
3374 | else | ||
3375 | cpu_relax(); | ||
3376 | |||
3377 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); | ||
3378 | } | ||
3379 | |||
3380 | |||
3381 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) | ||
3352 | { | 3382 | { |
3353 | struct net_device *odev = pkt_dev->odev; | 3383 | struct net_device *odev = pkt_dev->odev; |
3354 | int (*xmit)(struct sk_buff *, struct net_device *) | 3384 | netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *) |
3355 | = odev->netdev_ops->ndo_start_xmit; | 3385 | = odev->netdev_ops->ndo_start_xmit; |
3356 | struct netdev_queue *txq; | 3386 | struct netdev_queue *txq; |
3357 | __u64 idle_start = 0; | ||
3358 | u16 queue_map; | 3387 | u16 queue_map; |
3359 | int ret; | 3388 | int ret; |
3360 | 3389 | ||
3361 | if (pkt_dev->delay_us || pkt_dev->delay_ns) { | 3390 | if (pkt_dev->delay) { |
3362 | u64 now; | 3391 | spin(pkt_dev, pkt_dev->next_tx); |
3363 | |||
3364 | now = getCurUs(); | ||
3365 | if (now < pkt_dev->next_tx_us) | ||
3366 | spin(pkt_dev, pkt_dev->next_tx_us); | ||
3367 | 3392 | ||
3368 | /* This is max DELAY, this has special meaning of | 3393 | /* This is max DELAY, this has special meaning of |
3369 | * "never transmit" | 3394 | * "never transmit" |
3370 | */ | 3395 | */ |
3371 | if (pkt_dev->delay_us == 0x7FFFFFFF) { | 3396 | if (pkt_dev->delay == ULLONG_MAX) { |
3372 | pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us; | 3397 | pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX); |
3373 | pkt_dev->next_tx_ns = pkt_dev->delay_ns; | 3398 | return; |
3374 | goto out; | ||
3375 | } | 3399 | } |
3376 | } | 3400 | } |
3377 | 3401 | ||
@@ -3383,47 +3407,32 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3383 | } | 3407 | } |
3384 | 3408 | ||
3385 | txq = netdev_get_tx_queue(odev, queue_map); | 3409 | txq = netdev_get_tx_queue(odev, queue_map); |
3386 | if (netif_tx_queue_stopped(txq) || | 3410 | /* Did we saturate the queue already? */ |
3387 | netif_tx_queue_frozen(txq) || | 3411 | if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) { |
3388 | need_resched()) { | 3412 | /* If device is down, then all queues are permnantly frozen */ |
3389 | idle_start = getCurUs(); | 3413 | if (netif_running(odev)) |
3390 | 3414 | idle(pkt_dev); | |
3391 | if (!netif_running(odev)) { | 3415 | else |
3392 | pktgen_stop_device(pkt_dev); | 3416 | pktgen_stop_device(pkt_dev); |
3393 | kfree_skb(pkt_dev->skb); | 3417 | return; |
3394 | pkt_dev->skb = NULL; | ||
3395 | goto out; | ||
3396 | } | ||
3397 | if (need_resched()) | ||
3398 | schedule(); | ||
3399 | |||
3400 | pkt_dev->idle_acc += getCurUs() - idle_start; | ||
3401 | |||
3402 | if (netif_tx_queue_stopped(txq) || | ||
3403 | netif_tx_queue_frozen(txq)) { | ||
3404 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ | ||
3405 | pkt_dev->next_tx_ns = 0; | ||
3406 | goto out; /* Try the next interface */ | ||
3407 | } | ||
3408 | } | 3418 | } |
3409 | 3419 | ||
3410 | if (pkt_dev->last_ok || !pkt_dev->skb) { | 3420 | if (!pkt_dev->skb || (pkt_dev->last_ok && |
3411 | if ((++pkt_dev->clone_count >= pkt_dev->clone_skb) | 3421 | ++pkt_dev->clone_count >= pkt_dev->clone_skb)) { |
3412 | || (!pkt_dev->skb)) { | 3422 | /* build a new pkt */ |
3413 | /* build a new pkt */ | 3423 | kfree_skb(pkt_dev->skb); |
3414 | kfree_skb(pkt_dev->skb); | ||
3415 | 3424 | ||
3416 | pkt_dev->skb = fill_packet(odev, pkt_dev); | 3425 | pkt_dev->skb = fill_packet(odev, pkt_dev); |
3417 | if (pkt_dev->skb == NULL) { | 3426 | if (pkt_dev->skb == NULL) { |
3418 | printk(KERN_ERR "pktgen: ERROR: couldn't " | 3427 | printk(KERN_ERR "pktgen: ERROR: couldn't " |
3419 | "allocate skb in fill_packet.\n"); | 3428 | "allocate skb in fill_packet.\n"); |
3420 | schedule(); | 3429 | schedule(); |
3421 | pkt_dev->clone_count--; /* back out increment, OOM */ | 3430 | pkt_dev->clone_count--; /* back out increment, OOM */ |
3422 | goto out; | 3431 | return; |
3423 | } | ||
3424 | pkt_dev->allocated_skbs++; | ||
3425 | pkt_dev->clone_count = 0; /* reset counter */ | ||
3426 | } | 3432 | } |
3433 | |||
3434 | pkt_dev->allocated_skbs++; | ||
3435 | pkt_dev->clone_count = 0; /* reset counter */ | ||
3427 | } | 3436 | } |
3428 | 3437 | ||
3429 | /* fill_packet() might have changed the queue */ | 3438 | /* fill_packet() might have changed the queue */ |
@@ -3431,73 +3440,53 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3431 | txq = netdev_get_tx_queue(odev, queue_map); | 3440 | txq = netdev_get_tx_queue(odev, queue_map); |
3432 | 3441 | ||
3433 | __netif_tx_lock_bh(txq); | 3442 | __netif_tx_lock_bh(txq); |
3434 | if (!netif_tx_queue_stopped(txq) && | 3443 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) |
3435 | !netif_tx_queue_frozen(txq)) { | 3444 | pkt_dev->last_ok = 0; |
3436 | 3445 | else { | |
3437 | atomic_inc(&(pkt_dev->skb->users)); | 3446 | atomic_inc(&(pkt_dev->skb->users)); |
3438 | retry_now: | 3447 | |
3448 | retry_now: | ||
3439 | ret = (*xmit)(pkt_dev->skb, odev); | 3449 | ret = (*xmit)(pkt_dev->skb, odev); |
3440 | if (likely(ret == NETDEV_TX_OK)) { | 3450 | switch (ret) { |
3451 | case NETDEV_TX_OK: | ||
3441 | txq_trans_update(txq); | 3452 | txq_trans_update(txq); |
3442 | pkt_dev->last_ok = 1; | 3453 | pkt_dev->last_ok = 1; |
3443 | pkt_dev->sofar++; | 3454 | pkt_dev->sofar++; |
3444 | pkt_dev->seq_num++; | 3455 | pkt_dev->seq_num++; |
3445 | pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; | 3456 | pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; |
3446 | 3457 | break; | |
3447 | } else if (ret == NETDEV_TX_LOCKED | 3458 | case NETDEV_TX_LOCKED: |
3448 | && (odev->features & NETIF_F_LLTX)) { | ||
3449 | cpu_relax(); | 3459 | cpu_relax(); |
3450 | goto retry_now; | 3460 | goto retry_now; |
3451 | } else { /* Retry it next time */ | 3461 | default: /* Drivers are not supposed to return other values! */ |
3452 | 3462 | if (net_ratelimit()) | |
3453 | atomic_dec(&(pkt_dev->skb->users)); | 3463 | pr_info("pktgen: %s xmit error: %d\n", |
3454 | 3464 | odev->name, ret); | |
3455 | if (debug && net_ratelimit()) | ||
3456 | printk(KERN_INFO "pktgen: Hard xmit error\n"); | ||
3457 | |||
3458 | pkt_dev->errors++; | 3465 | pkt_dev->errors++; |
3466 | /* fallthru */ | ||
3467 | case NETDEV_TX_BUSY: | ||
3468 | /* Retry it next time */ | ||
3469 | atomic_dec(&(pkt_dev->skb->users)); | ||
3459 | pkt_dev->last_ok = 0; | 3470 | pkt_dev->last_ok = 0; |
3460 | } | 3471 | } |
3461 | 3472 | ||
3462 | pkt_dev->next_tx_us = getCurUs(); | 3473 | if (pkt_dev->delay) |
3463 | pkt_dev->next_tx_ns = 0; | 3474 | pkt_dev->next_tx = ktime_add_ns(ktime_now(), |
3464 | 3475 | pkt_dev->delay); | |
3465 | pkt_dev->next_tx_us += pkt_dev->delay_us; | ||
3466 | pkt_dev->next_tx_ns += pkt_dev->delay_ns; | ||
3467 | |||
3468 | if (pkt_dev->next_tx_ns > 1000) { | ||
3469 | pkt_dev->next_tx_us++; | ||
3470 | pkt_dev->next_tx_ns -= 1000; | ||
3471 | } | ||
3472 | } | 3476 | } |
3473 | |||
3474 | else { /* Retry it next time */ | ||
3475 | pkt_dev->last_ok = 0; | ||
3476 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ | ||
3477 | pkt_dev->next_tx_ns = 0; | ||
3478 | } | ||
3479 | |||
3480 | __netif_tx_unlock_bh(txq); | 3477 | __netif_tx_unlock_bh(txq); |
3481 | 3478 | ||
3482 | /* If pkt_dev->count is zero, then run forever */ | 3479 | /* If pkt_dev->count is zero, then run forever */ |
3483 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { | 3480 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { |
3484 | if (atomic_read(&(pkt_dev->skb->users)) != 1) { | 3481 | while (atomic_read(&(pkt_dev->skb->users)) != 1) { |
3485 | idle_start = getCurUs(); | 3482 | if (signal_pending(current)) |
3486 | while (atomic_read(&(pkt_dev->skb->users)) != 1) { | 3483 | break; |
3487 | if (signal_pending(current)) { | 3484 | idle(pkt_dev); |
3488 | break; | ||
3489 | } | ||
3490 | schedule(); | ||
3491 | } | ||
3492 | pkt_dev->idle_acc += getCurUs() - idle_start; | ||
3493 | } | 3485 | } |
3494 | 3486 | ||
3495 | /* Done with this */ | 3487 | /* Done with this */ |
3496 | pktgen_stop_device(pkt_dev); | 3488 | pktgen_stop_device(pkt_dev); |
3497 | kfree_skb(pkt_dev->skb); | ||
3498 | pkt_dev->skb = NULL; | ||
3499 | } | 3489 | } |
3500 | out:; | ||
3501 | } | 3490 | } |
3502 | 3491 | ||
3503 | /* | 3492 | /* |
@@ -3516,7 +3505,8 @@ static int pktgen_thread_worker(void *arg) | |||
3516 | init_waitqueue_head(&t->queue); | 3505 | init_waitqueue_head(&t->queue); |
3517 | complete(&t->start_done); | 3506 | complete(&t->start_done); |
3518 | 3507 | ||
3519 | pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); | 3508 | pr_debug("pktgen: starting pktgen/%d: pid=%d\n", |
3509 | cpu, task_pid_nr(current)); | ||
3520 | 3510 | ||
3521 | set_current_state(TASK_INTERRUPTIBLE); | 3511 | set_current_state(TASK_INTERRUPTIBLE); |
3522 | 3512 | ||
@@ -3651,8 +3641,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) | |||
3651 | pkt_dev->max_pkt_size = ETH_ZLEN; | 3641 | pkt_dev->max_pkt_size = ETH_ZLEN; |
3652 | pkt_dev->nfrags = 0; | 3642 | pkt_dev->nfrags = 0; |
3653 | pkt_dev->clone_skb = pg_clone_skb_d; | 3643 | pkt_dev->clone_skb = pg_clone_skb_d; |
3654 | pkt_dev->delay_us = pg_delay_d / 1000; | 3644 | pkt_dev->delay = pg_delay_d; |
3655 | pkt_dev->delay_ns = pg_delay_d % 1000; | ||
3656 | pkt_dev->count = pg_count_d; | 3645 | pkt_dev->count = pg_count_d; |
3657 | pkt_dev->sofar = 0; | 3646 | pkt_dev->sofar = 0; |
3658 | pkt_dev->udp_src_min = 9; /* sink port */ | 3647 | pkt_dev->udp_src_min = 9; /* sink port */ |
@@ -3864,10 +3853,15 @@ static void __exit pg_cleanup(void) | |||
3864 | module_init(pg_init); | 3853 | module_init(pg_init); |
3865 | module_exit(pg_cleanup); | 3854 | module_exit(pg_cleanup); |
3866 | 3855 | ||
3867 | MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se"); | 3856 | MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>"); |
3868 | MODULE_DESCRIPTION("Packet Generator tool"); | 3857 | MODULE_DESCRIPTION("Packet Generator tool"); |
3869 | MODULE_LICENSE("GPL"); | 3858 | MODULE_LICENSE("GPL"); |
3859 | MODULE_VERSION(VERSION); | ||
3870 | module_param(pg_count_d, int, 0); | 3860 | module_param(pg_count_d, int, 0); |
3861 | MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject"); | ||
3871 | module_param(pg_delay_d, int, 0); | 3862 | module_param(pg_delay_d, int, 0); |
3863 | MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)"); | ||
3872 | module_param(pg_clone_skb_d, int, 0); | 3864 | module_param(pg_clone_skb_d, int, 0); |
3865 | MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet"); | ||
3873 | module_param(debug, int, 0); | 3866 | module_param(debug, int, 0); |
3867 | MODULE_PARM_DESC(debug, "Enable debugging of pktgen module"); | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d78030f88bd0..eb42873f2a3a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/security.h> | 35 | #include <linux/security.h> |
36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
37 | #include <linux/if_addr.h> | 37 | #include <linux/if_addr.h> |
38 | #include <linux/nsproxy.h> | ||
39 | 38 | ||
40 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
41 | #include <asm/system.h> | 40 | #include <asm/system.h> |
@@ -52,6 +51,7 @@ | |||
52 | #include <net/pkt_sched.h> | 51 | #include <net/pkt_sched.h> |
53 | #include <net/fib_rules.h> | 52 | #include <net/fib_rules.h> |
54 | #include <net/rtnetlink.h> | 53 | #include <net/rtnetlink.h> |
54 | #include <net/net_namespace.h> | ||
55 | 55 | ||
56 | struct rtnl_link | 56 | struct rtnl_link |
57 | { | 57 | { |
@@ -606,7 +606,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
606 | int type, u32 pid, u32 seq, u32 change, | 606 | int type, u32 pid, u32 seq, u32 change, |
607 | unsigned int flags) | 607 | unsigned int flags) |
608 | { | 608 | { |
609 | struct netdev_queue *txq; | ||
610 | struct ifinfomsg *ifm; | 609 | struct ifinfomsg *ifm; |
611 | struct nlmsghdr *nlh; | 610 | struct nlmsghdr *nlh; |
612 | const struct net_device_stats *stats; | 611 | const struct net_device_stats *stats; |
@@ -637,9 +636,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
637 | if (dev->master) | 636 | if (dev->master) |
638 | NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); | 637 | NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); |
639 | 638 | ||
640 | txq = netdev_get_tx_queue(dev, 0); | 639 | if (dev->qdisc) |
641 | if (txq->qdisc_sleeping) | 640 | NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id); |
642 | NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id); | ||
643 | 641 | ||
644 | if (dev->ifalias) | 642 | if (dev->ifalias) |
645 | NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias); | 643 | NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias); |
@@ -725,25 +723,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { | |||
725 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, | 723 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, |
726 | }; | 724 | }; |
727 | 725 | ||
728 | static struct net *get_net_ns_by_pid(pid_t pid) | ||
729 | { | ||
730 | struct task_struct *tsk; | ||
731 | struct net *net; | ||
732 | |||
733 | /* Lookup the network namespace */ | ||
734 | net = ERR_PTR(-ESRCH); | ||
735 | rcu_read_lock(); | ||
736 | tsk = find_task_by_vpid(pid); | ||
737 | if (tsk) { | ||
738 | struct nsproxy *nsproxy; | ||
739 | nsproxy = task_nsproxy(tsk); | ||
740 | if (nsproxy) | ||
741 | net = get_net(nsproxy->net_ns); | ||
742 | } | ||
743 | rcu_read_unlock(); | ||
744 | return net; | ||
745 | } | ||
746 | |||
747 | static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) | 726 | static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) |
748 | { | 727 | { |
749 | if (dev) { | 728 | if (dev) { |
@@ -993,12 +972,20 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname, | |||
993 | { | 972 | { |
994 | int err; | 973 | int err; |
995 | struct net_device *dev; | 974 | struct net_device *dev; |
975 | unsigned int num_queues = 1; | ||
976 | unsigned int real_num_queues = 1; | ||
996 | 977 | ||
978 | if (ops->get_tx_queues) { | ||
979 | err = ops->get_tx_queues(net, tb, &num_queues, &real_num_queues); | ||
980 | if (err) | ||
981 | goto err; | ||
982 | } | ||
997 | err = -ENOMEM; | 983 | err = -ENOMEM; |
998 | dev = alloc_netdev(ops->priv_size, ifname, ops->setup); | 984 | dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues); |
999 | if (!dev) | 985 | if (!dev) |
1000 | goto err; | 986 | goto err; |
1001 | 987 | ||
988 | dev->real_num_tx_queues = real_num_queues; | ||
1002 | if (strchr(dev->name, '%')) { | 989 | if (strchr(dev->name, '%')) { |
1003 | err = dev_alloc_name(dev, dev->name); | 990 | err = dev_alloc_name(dev, dev->name); |
1004 | if (err < 0) | 991 | if (err < 0) |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9e0597d189b0..80a96166df39 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -559,9 +559,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
559 | #endif | 559 | #endif |
560 | #endif | 560 | #endif |
561 | new->vlan_tci = old->vlan_tci; | 561 | new->vlan_tci = old->vlan_tci; |
562 | #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) | ||
563 | new->do_not_encrypt = old->do_not_encrypt; | ||
564 | #endif | ||
565 | 562 | ||
566 | skb_copy_secmark(new, old); | 563 | skb_copy_secmark(new, old); |
567 | } | 564 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 76334228ed1c..30d5446512f9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -142,7 +142,7 @@ static struct lock_class_key af_family_slock_keys[AF_MAX]; | |||
142 | * strings build-time, so that runtime initialization of socket | 142 | * strings build-time, so that runtime initialization of socket |
143 | * locks is fast): | 143 | * locks is fast): |
144 | */ | 144 | */ |
145 | static const char *af_family_key_strings[AF_MAX+1] = { | 145 | static const char *const af_family_key_strings[AF_MAX+1] = { |
146 | "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , | 146 | "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , |
147 | "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", | 147 | "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", |
148 | "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , | 148 | "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , |
@@ -158,7 +158,7 @@ static const char *af_family_key_strings[AF_MAX+1] = { | |||
158 | "sk_lock-AF_IEEE802154", | 158 | "sk_lock-AF_IEEE802154", |
159 | "sk_lock-AF_MAX" | 159 | "sk_lock-AF_MAX" |
160 | }; | 160 | }; |
161 | static const char *af_family_slock_key_strings[AF_MAX+1] = { | 161 | static const char *const af_family_slock_key_strings[AF_MAX+1] = { |
162 | "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , | 162 | "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , |
163 | "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", | 163 | "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", |
164 | "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , | 164 | "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , |
@@ -174,7 +174,7 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = { | |||
174 | "slock-AF_IEEE802154", | 174 | "slock-AF_IEEE802154", |
175 | "slock-AF_MAX" | 175 | "slock-AF_MAX" |
176 | }; | 176 | }; |
177 | static const char *af_family_clock_key_strings[AF_MAX+1] = { | 177 | static const char *const af_family_clock_key_strings[AF_MAX+1] = { |
178 | "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , | 178 | "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , |
179 | "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", | 179 | "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", |
180 | "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , | 180 | "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , |
@@ -482,6 +482,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname, | |||
482 | sk->sk_reuse = valbool; | 482 | sk->sk_reuse = valbool; |
483 | break; | 483 | break; |
484 | case SO_TYPE: | 484 | case SO_TYPE: |
485 | case SO_PROTOCOL: | ||
486 | case SO_DOMAIN: | ||
485 | case SO_ERROR: | 487 | case SO_ERROR: |
486 | ret = -ENOPROTOOPT; | 488 | ret = -ENOPROTOOPT; |
487 | break; | 489 | break; |
@@ -764,6 +766,14 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
764 | v.val = sk->sk_type; | 766 | v.val = sk->sk_type; |
765 | break; | 767 | break; |
766 | 768 | ||
769 | case SO_PROTOCOL: | ||
770 | v.val = sk->sk_protocol; | ||
771 | break; | ||
772 | |||
773 | case SO_DOMAIN: | ||
774 | v.val = sk->sk_family; | ||
775 | break; | ||
776 | |||
767 | case SO_ERROR: | 777 | case SO_ERROR: |
768 | v.val = -sock_error(sk); | 778 | v.val = -sock_error(sk); |
769 | if (v.val == 0) | 779 | if (v.val == 0) |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 8379496de82b..e0879bfb7dd5 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -64,6 +64,7 @@ static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { | |||
64 | [DCB_ATTR_CAP] = {.type = NLA_NESTED}, | 64 | [DCB_ATTR_CAP] = {.type = NLA_NESTED}, |
65 | [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, | 65 | [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, |
66 | [DCB_ATTR_BCN] = {.type = NLA_NESTED}, | 66 | [DCB_ATTR_BCN] = {.type = NLA_NESTED}, |
67 | [DCB_ATTR_APP] = {.type = NLA_NESTED}, | ||
67 | }; | 68 | }; |
68 | 69 | ||
69 | /* DCB priority flow control to User Priority nested attributes */ | 70 | /* DCB priority flow control to User Priority nested attributes */ |
@@ -158,6 +159,13 @@ static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { | |||
158 | [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, | 159 | [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, |
159 | }; | 160 | }; |
160 | 161 | ||
162 | /* DCB APP nested attributes. */ | ||
163 | static struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { | ||
164 | [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, | ||
165 | [DCB_APP_ATTR_ID] = {.type = NLA_U16}, | ||
166 | [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, | ||
167 | }; | ||
168 | |||
161 | /* standard netlink reply call */ | 169 | /* standard netlink reply call */ |
162 | static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid, | 170 | static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid, |
163 | u32 seq, u16 flags) | 171 | u32 seq, u16 flags) |
@@ -536,6 +544,120 @@ static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb, | |||
536 | return ret; | 544 | return ret; |
537 | } | 545 | } |
538 | 546 | ||
547 | static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, | ||
548 | u32 pid, u32 seq, u16 flags) | ||
549 | { | ||
550 | struct sk_buff *dcbnl_skb; | ||
551 | struct nlmsghdr *nlh; | ||
552 | struct dcbmsg *dcb; | ||
553 | struct nlattr *app_nest; | ||
554 | struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; | ||
555 | u16 id; | ||
556 | u8 up, idtype; | ||
557 | int ret = -EINVAL; | ||
558 | |||
559 | if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp) | ||
560 | goto out; | ||
561 | |||
562 | ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], | ||
563 | dcbnl_app_nest); | ||
564 | if (ret) | ||
565 | goto out; | ||
566 | |||
567 | ret = -EINVAL; | ||
568 | /* all must be non-null */ | ||
569 | if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || | ||
570 | (!app_tb[DCB_APP_ATTR_ID])) | ||
571 | goto out; | ||
572 | |||
573 | /* either by eth type or by socket number */ | ||
574 | idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); | ||
575 | if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && | ||
576 | (idtype != DCB_APP_IDTYPE_PORTNUM)) | ||
577 | goto out; | ||
578 | |||
579 | id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); | ||
580 | up = netdev->dcbnl_ops->getapp(netdev, idtype, id); | ||
581 | |||
582 | /* send this back */ | ||
583 | dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
584 | if (!dcbnl_skb) | ||
585 | goto out; | ||
586 | |||
587 | nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); | ||
588 | dcb = NLMSG_DATA(nlh); | ||
589 | dcb->dcb_family = AF_UNSPEC; | ||
590 | dcb->cmd = DCB_CMD_GAPP; | ||
591 | |||
592 | app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); | ||
593 | ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); | ||
594 | if (ret) | ||
595 | goto out_cancel; | ||
596 | |||
597 | ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id); | ||
598 | if (ret) | ||
599 | goto out_cancel; | ||
600 | |||
601 | ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up); | ||
602 | if (ret) | ||
603 | goto out_cancel; | ||
604 | |||
605 | nla_nest_end(dcbnl_skb, app_nest); | ||
606 | nlmsg_end(dcbnl_skb, nlh); | ||
607 | |||
608 | ret = rtnl_unicast(dcbnl_skb, &init_net, pid); | ||
609 | if (ret) | ||
610 | goto nlmsg_failure; | ||
611 | |||
612 | goto out; | ||
613 | |||
614 | out_cancel: | ||
615 | nla_nest_cancel(dcbnl_skb, app_nest); | ||
616 | nlmsg_failure: | ||
617 | kfree_skb(dcbnl_skb); | ||
618 | out: | ||
619 | return ret; | ||
620 | } | ||
621 | |||
622 | static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb, | ||
623 | u32 pid, u32 seq, u16 flags) | ||
624 | { | ||
625 | int ret = -EINVAL; | ||
626 | u16 id; | ||
627 | u8 up, idtype; | ||
628 | struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; | ||
629 | |||
630 | if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->setapp) | ||
631 | goto out; | ||
632 | |||
633 | ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], | ||
634 | dcbnl_app_nest); | ||
635 | if (ret) | ||
636 | goto out; | ||
637 | |||
638 | ret = -EINVAL; | ||
639 | /* all must be non-null */ | ||
640 | if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || | ||
641 | (!app_tb[DCB_APP_ATTR_ID]) || | ||
642 | (!app_tb[DCB_APP_ATTR_PRIORITY])) | ||
643 | goto out; | ||
644 | |||
645 | /* either by eth type or by socket number */ | ||
646 | idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); | ||
647 | if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && | ||
648 | (idtype != DCB_APP_IDTYPE_PORTNUM)) | ||
649 | goto out; | ||
650 | |||
651 | id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); | ||
652 | up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); | ||
653 | |||
654 | ret = dcbnl_reply(netdev->dcbnl_ops->setapp(netdev, idtype, id, up), | ||
655 | RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP, | ||
656 | pid, seq, flags); | ||
657 | out: | ||
658 | return ret; | ||
659 | } | ||
660 | |||
539 | static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb, | 661 | static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb, |
540 | u32 pid, u32 seq, u16 flags, int dir) | 662 | u32 pid, u32 seq, u16 flags, int dir) |
541 | { | 663 | { |
@@ -1093,6 +1215,14 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
1093 | ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq, | 1215 | ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq, |
1094 | nlh->nlmsg_flags); | 1216 | nlh->nlmsg_flags); |
1095 | goto out; | 1217 | goto out; |
1218 | case DCB_CMD_GAPP: | ||
1219 | ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq, | ||
1220 | nlh->nlmsg_flags); | ||
1221 | goto out; | ||
1222 | case DCB_CMD_SAPP: | ||
1223 | ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq, | ||
1224 | nlh->nlmsg_flags); | ||
1225 | goto out; | ||
1096 | default: | 1226 | default: |
1097 | goto errout; | 1227 | goto errout; |
1098 | } | 1228 | } |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index a27b7f4c19c5..f596ce149c3c 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -52,7 +52,7 @@ static int ccid3_debug; | |||
52 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG | 52 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
53 | static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state) | 53 | static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state) |
54 | { | 54 | { |
55 | static char *ccid3_state_names[] = { | 55 | static const char *const ccid3_state_names[] = { |
56 | [TFRC_SSTATE_NO_SENT] = "NO_SENT", | 56 | [TFRC_SSTATE_NO_SENT] = "NO_SENT", |
57 | [TFRC_SSTATE_NO_FBACK] = "NO_FBACK", | 57 | [TFRC_SSTATE_NO_FBACK] = "NO_FBACK", |
58 | [TFRC_SSTATE_FBACK] = "FBACK", | 58 | [TFRC_SSTATE_FBACK] = "FBACK", |
@@ -646,7 +646,7 @@ enum ccid3_fback_type { | |||
646 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG | 646 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
647 | static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state) | 647 | static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state) |
648 | { | 648 | { |
649 | static char *ccid3_rx_state_names[] = { | 649 | static const char *const ccid3_rx_state_names[] = { |
650 | [TFRC_RSTATE_NO_DATA] = "NO_DATA", | 650 | [TFRC_RSTATE_NO_DATA] = "NO_DATA", |
651 | [TFRC_RSTATE_DATA] = "DATA", | 651 | [TFRC_RSTATE_DATA] = "DATA", |
652 | [TFRC_RSTATE_TERM] = "TERM", | 652 | [TFRC_RSTATE_TERM] = "TERM", |
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index b04160a2eea5..972b8dc918d6 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c | |||
@@ -213,7 +213,7 @@ static int dccp_feat_default_value(u8 feat_num) | |||
213 | */ | 213 | */ |
214 | static const char *dccp_feat_fname(const u8 feat) | 214 | static const char *dccp_feat_fname(const u8 feat) |
215 | { | 215 | { |
216 | static const char *feature_names[] = { | 216 | static const char *const feature_names[] = { |
217 | [DCCPF_RESERVED] = "Reserved", | 217 | [DCCPF_RESERVED] = "Reserved", |
218 | [DCCPF_CCID] = "CCID", | 218 | [DCCPF_CCID] = "CCID", |
219 | [DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos", | 219 | [DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos", |
@@ -236,8 +236,9 @@ static const char *dccp_feat_fname(const u8 feat) | |||
236 | return feature_names[feat]; | 236 | return feature_names[feat]; |
237 | } | 237 | } |
238 | 238 | ||
239 | static const char *dccp_feat_sname[] = { "DEFAULT", "INITIALISING", "CHANGING", | 239 | static const char *const dccp_feat_sname[] = { |
240 | "UNSTABLE", "STABLE" }; | 240 | "DEFAULT", "INITIALISING", "CHANGING", "UNSTABLE", "STABLE", |
241 | }; | ||
241 | 242 | ||
242 | #ifdef CONFIG_IP_DCCP_DEBUG | 243 | #ifdef CONFIG_IP_DCCP_DEBUG |
243 | static const char *dccp_feat_oname(const u8 opt) | 244 | static const char *dccp_feat_oname(const u8 opt) |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index a0a36c9e6cce..d01c00de1ad0 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -880,7 +880,7 @@ discard_and_relse: | |||
880 | goto discard_it; | 880 | goto discard_it; |
881 | } | 881 | } |
882 | 882 | ||
883 | static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { | 883 | static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { |
884 | .queue_xmit = ip_queue_xmit, | 884 | .queue_xmit = ip_queue_xmit, |
885 | .send_check = dccp_v4_send_check, | 885 | .send_check = dccp_v4_send_check, |
886 | .rebuild_header = inet_sk_rebuild_header, | 886 | .rebuild_header = inet_sk_rebuild_header, |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 3e70faab2989..64f011cc4491 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -35,8 +35,8 @@ | |||
35 | 35 | ||
36 | /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */ | 36 | /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */ |
37 | 37 | ||
38 | static struct inet_connection_sock_af_ops dccp_ipv6_mapped; | 38 | static const struct inet_connection_sock_af_ops dccp_ipv6_mapped; |
39 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; | 39 | static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops; |
40 | 40 | ||
41 | static void dccp_v6_hash(struct sock *sk) | 41 | static void dccp_v6_hash(struct sock *sk) |
42 | { | 42 | { |
@@ -1055,7 +1055,7 @@ failure: | |||
1055 | return err; | 1055 | return err; |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { | 1058 | static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { |
1059 | .queue_xmit = inet6_csk_xmit, | 1059 | .queue_xmit = inet6_csk_xmit, |
1060 | .send_check = dccp_v6_send_check, | 1060 | .send_check = dccp_v6_send_check, |
1061 | .rebuild_header = inet6_sk_rebuild_header, | 1061 | .rebuild_header = inet6_sk_rebuild_header, |
@@ -1076,7 +1076,7 @@ static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { | |||
1076 | /* | 1076 | /* |
1077 | * DCCP over IPv4 via INET6 API | 1077 | * DCCP over IPv4 via INET6 API |
1078 | */ | 1078 | */ |
1079 | static struct inet_connection_sock_af_ops dccp_ipv6_mapped = { | 1079 | static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { |
1080 | .queue_xmit = ip_queue_xmit, | 1080 | .queue_xmit = ip_queue_xmit, |
1081 | .send_check = dccp_v4_send_check, | 1081 | .send_check = dccp_v4_send_check, |
1082 | .rebuild_header = inet_sk_rebuild_header, | 1082 | .rebuild_header = inet_sk_rebuild_header, |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 1bca9205104e..923db06c7e55 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -124,7 +124,7 @@ EXPORT_SYMBOL_GPL(dccp_done); | |||
124 | 124 | ||
125 | const char *dccp_packet_name(const int type) | 125 | const char *dccp_packet_name(const int type) |
126 | { | 126 | { |
127 | static const char *dccp_packet_names[] = { | 127 | static const char *const dccp_packet_names[] = { |
128 | [DCCP_PKT_REQUEST] = "REQUEST", | 128 | [DCCP_PKT_REQUEST] = "REQUEST", |
129 | [DCCP_PKT_RESPONSE] = "RESPONSE", | 129 | [DCCP_PKT_RESPONSE] = "RESPONSE", |
130 | [DCCP_PKT_DATA] = "DATA", | 130 | [DCCP_PKT_DATA] = "DATA", |
@@ -147,7 +147,7 @@ EXPORT_SYMBOL_GPL(dccp_packet_name); | |||
147 | 147 | ||
148 | const char *dccp_state_name(const int state) | 148 | const char *dccp_state_name(const int state) |
149 | { | 149 | { |
150 | static char *dccp_state_names[] = { | 150 | static const char *const dccp_state_names[] = { |
151 | [DCCP_OPEN] = "OPEN", | 151 | [DCCP_OPEN] = "OPEN", |
152 | [DCCP_REQUESTING] = "REQUESTING", | 152 | [DCCP_REQUESTING] = "REQUESTING", |
153 | [DCCP_PARTOPEN] = "PARTOPEN", | 153 | [DCCP_PARTOPEN] = "PARTOPEN", |
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index 923786bd6d01..794b5bf95af1 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c | |||
@@ -59,7 +59,7 @@ static int dn_phase3_output(struct sk_buff *); | |||
59 | /* | 59 | /* |
60 | * For talking to broadcast devices: Ethernet & PPP | 60 | * For talking to broadcast devices: Ethernet & PPP |
61 | */ | 61 | */ |
62 | static struct neigh_ops dn_long_ops = { | 62 | static const struct neigh_ops dn_long_ops = { |
63 | .family = AF_DECnet, | 63 | .family = AF_DECnet, |
64 | .error_report = dn_long_error_report, | 64 | .error_report = dn_long_error_report, |
65 | .output = dn_long_output, | 65 | .output = dn_long_output, |
@@ -71,7 +71,7 @@ static struct neigh_ops dn_long_ops = { | |||
71 | /* | 71 | /* |
72 | * For talking to pointopoint and multidrop devices: DDCMP and X.25 | 72 | * For talking to pointopoint and multidrop devices: DDCMP and X.25 |
73 | */ | 73 | */ |
74 | static struct neigh_ops dn_short_ops = { | 74 | static const struct neigh_ops dn_short_ops = { |
75 | .family = AF_DECnet, | 75 | .family = AF_DECnet, |
76 | .error_report = dn_short_error_report, | 76 | .error_report = dn_short_error_report, |
77 | .output = dn_short_output, | 77 | .output = dn_short_output, |
@@ -83,7 +83,7 @@ static struct neigh_ops dn_short_ops = { | |||
83 | /* | 83 | /* |
84 | * For talking to DECnet phase III nodes | 84 | * For talking to DECnet phase III nodes |
85 | */ | 85 | */ |
86 | static struct neigh_ops dn_phase3_ops = { | 86 | static const struct neigh_ops dn_phase3_ops = { |
87 | .family = AF_DECnet, | 87 | .family = AF_DECnet, |
88 | .error_report = dn_short_error_report, /* Can use short version here */ | 88 | .error_report = dn_short_error_report, /* Can use short version here */ |
89 | .output = dn_phase3_output, | 89 | .output = dn_phase3_output, |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 1d6ca8a98dc6..9383d3e5a1ab 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -774,7 +774,7 @@ static int dn_rt_bug(struct sk_buff *skb) | |||
774 | 774 | ||
775 | kfree_skb(skb); | 775 | kfree_skb(skb); |
776 | 776 | ||
777 | return NET_RX_BAD; | 777 | return NET_RX_DROP; |
778 | } | 778 | } |
779 | 779 | ||
780 | static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) | 780 | static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) |
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 41055f33d28a..4b0ea0540442 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h | |||
@@ -169,13 +169,13 @@ struct net_device *dsa_slave_create(struct dsa_switch *ds, | |||
169 | int port, char *name); | 169 | int port, char *name); |
170 | 170 | ||
171 | /* tag_dsa.c */ | 171 | /* tag_dsa.c */ |
172 | int dsa_xmit(struct sk_buff *skb, struct net_device *dev); | 172 | netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev); |
173 | 173 | ||
174 | /* tag_edsa.c */ | 174 | /* tag_edsa.c */ |
175 | int edsa_xmit(struct sk_buff *skb, struct net_device *dev); | 175 | netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev); |
176 | 176 | ||
177 | /* tag_trailer.c */ | 177 | /* tag_trailer.c */ |
178 | int trailer_xmit(struct sk_buff *skb, struct net_device *dev); | 178 | netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev); |
179 | 179 | ||
180 | 180 | ||
181 | #endif | 181 | #endif |
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index 8fa25bafe6ca..cdf2d28a0297 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #define DSA_HLEN 4 | 16 | #define DSA_HLEN 4 |
17 | 17 | ||
18 | int dsa_xmit(struct sk_buff *skb, struct net_device *dev) | 18 | netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) |
19 | { | 19 | { |
20 | struct dsa_slave_priv *p = netdev_priv(dev); | 20 | struct dsa_slave_priv *p = netdev_priv(dev); |
21 | u8 *dsa_header; | 21 | u8 *dsa_header; |
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 815607bd286f..8f53948cff4f 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #define DSA_HLEN 4 | 16 | #define DSA_HLEN 4 |
17 | #define EDSA_HLEN 8 | 17 | #define EDSA_HLEN 8 |
18 | 18 | ||
19 | int edsa_xmit(struct sk_buff *skb, struct net_device *dev) | 19 | netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) |
20 | { | 20 | { |
21 | struct dsa_slave_priv *p = netdev_priv(dev); | 21 | struct dsa_slave_priv *p = netdev_priv(dev); |
22 | u8 *edsa_header; | 22 | u8 *edsa_header; |
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 1c3e30c38b86..a85c829853c0 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
14 | #include "dsa_priv.h" | 14 | #include "dsa_priv.h" |
15 | 15 | ||
16 | int trailer_xmit(struct sk_buff *skb, struct net_device *dev) | 16 | netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) |
17 | { | 17 | { |
18 | struct dsa_slave_priv *p = netdev_priv(dev); | 18 | struct dsa_slave_priv *p = netdev_priv(dev); |
19 | struct sk_buff *nskb; | 19 | struct sk_buff *nskb; |
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index f0bbc57926cd..0e0254fd767d 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c | |||
@@ -1073,7 +1073,7 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet | |||
1073 | skb->protocol = htons(ETH_P_IP); | 1073 | skb->protocol = htons(ETH_P_IP); |
1074 | skb_pull(skb, sizeof(struct ec_framehdr)); | 1074 | skb_pull(skb, sizeof(struct ec_framehdr)); |
1075 | netif_rx(skb); | 1075 | netif_rx(skb); |
1076 | return 0; | 1076 | return NET_RX_SUCCESS; |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net); | 1079 | sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net); |
@@ -1084,7 +1084,7 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet | |||
1084 | hdr->port)) | 1084 | hdr->port)) |
1085 | goto drop; | 1085 | goto drop; |
1086 | 1086 | ||
1087 | return 0; | 1087 | return NET_RX_SUCCESS; |
1088 | 1088 | ||
1089 | drop: | 1089 | drop: |
1090 | kfree_skb(skb); | 1090 | kfree_skb(skb); |
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile index f99338a26100..4068a9f5113e 100644 --- a/net/ieee802154/Makefile +++ b/net/ieee802154/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o | 1 | obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o wpan-class.o |
2 | nl802154-y := netlink.o nl_policy.o | 2 | nl802154-y := netlink.o nl_policy.o |
3 | af_802154-y := af_ieee802154.o raw.o dgram.o | 3 | af_802154-y := af_ieee802154.o raw.o dgram.o |
4 | 4 | ||
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index af661805b9fa..cd949d5e451b 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c | |||
@@ -34,8 +34,8 @@ | |||
34 | #include <net/tcp_states.h> | 34 | #include <net/tcp_states.h> |
35 | #include <net/route.h> | 35 | #include <net/route.h> |
36 | 36 | ||
37 | #include <net/ieee802154/af_ieee802154.h> | 37 | #include <net/af_ieee802154.h> |
38 | #include <net/ieee802154/netdevice.h> | 38 | #include <net/ieee802154_netdev.h> |
39 | 39 | ||
40 | #include "af802154.h" | 40 | #include "af802154.h" |
41 | 41 | ||
@@ -147,9 +147,7 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg, | |||
147 | dev_load(sock_net(sk), ifr.ifr_name); | 147 | dev_load(sock_net(sk), ifr.ifr_name); |
148 | dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); | 148 | dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); |
149 | 149 | ||
150 | if ((dev->type == ARPHRD_IEEE802154 || | 150 | if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) |
151 | dev->type == ARPHRD_IEEE802154_PHY) && | ||
152 | dev->netdev_ops->ndo_do_ioctl) | ||
153 | ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); | 151 | ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); |
154 | 152 | ||
155 | if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))) | 153 | if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))) |
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index ba8b214dda8f..77ae6852b93d 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c | |||
@@ -26,9 +26,9 @@ | |||
26 | #include <linux/if_arp.h> | 26 | #include <linux/if_arp.h> |
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <net/sock.h> | 28 | #include <net/sock.h> |
29 | #include <net/ieee802154/af_ieee802154.h> | 29 | #include <net/af_ieee802154.h> |
30 | #include <net/ieee802154/mac_def.h> | 30 | #include <net/ieee802154.h> |
31 | #include <net/ieee802154/netdevice.h> | 31 | #include <net/ieee802154_netdev.h> |
32 | 32 | ||
33 | #include <asm/ioctls.h> | 33 | #include <asm/ioctls.h> |
34 | 34 | ||
@@ -40,9 +40,11 @@ static DEFINE_RWLOCK(dgram_lock); | |||
40 | struct dgram_sock { | 40 | struct dgram_sock { |
41 | struct sock sk; | 41 | struct sock sk; |
42 | 42 | ||
43 | int bound; | ||
44 | struct ieee802154_addr src_addr; | 43 | struct ieee802154_addr src_addr; |
45 | struct ieee802154_addr dst_addr; | 44 | struct ieee802154_addr dst_addr; |
45 | |||
46 | unsigned bound:1; | ||
47 | unsigned want_ack:1; | ||
46 | }; | 48 | }; |
47 | 49 | ||
48 | static inline struct dgram_sock *dgram_sk(const struct sock *sk) | 50 | static inline struct dgram_sock *dgram_sk(const struct sock *sk) |
@@ -50,7 +52,6 @@ static inline struct dgram_sock *dgram_sk(const struct sock *sk) | |||
50 | return container_of(sk, struct dgram_sock, sk); | 52 | return container_of(sk, struct dgram_sock, sk); |
51 | } | 53 | } |
52 | 54 | ||
53 | |||
54 | static void dgram_hash(struct sock *sk) | 55 | static void dgram_hash(struct sock *sk) |
55 | { | 56 | { |
56 | write_lock_bh(&dgram_lock); | 57 | write_lock_bh(&dgram_lock); |
@@ -73,6 +74,7 @@ static int dgram_init(struct sock *sk) | |||
73 | 74 | ||
74 | ro->dst_addr.addr_type = IEEE802154_ADDR_LONG; | 75 | ro->dst_addr.addr_type = IEEE802154_ADDR_LONG; |
75 | ro->dst_addr.pan_id = 0xffff; | 76 | ro->dst_addr.pan_id = 0xffff; |
77 | ro->want_ack = 1; | ||
76 | memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr)); | 78 | memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr)); |
77 | return 0; | 79 | return 0; |
78 | } | 80 | } |
@@ -86,18 +88,18 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len) | |||
86 | { | 88 | { |
87 | struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; | 89 | struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; |
88 | struct dgram_sock *ro = dgram_sk(sk); | 90 | struct dgram_sock *ro = dgram_sk(sk); |
89 | int err = 0; | 91 | int err = -EINVAL; |
90 | struct net_device *dev; | 92 | struct net_device *dev; |
91 | 93 | ||
94 | lock_sock(sk); | ||
95 | |||
92 | ro->bound = 0; | 96 | ro->bound = 0; |
93 | 97 | ||
94 | if (len < sizeof(*addr)) | 98 | if (len < sizeof(*addr)) |
95 | return -EINVAL; | 99 | goto out; |
96 | 100 | ||
97 | if (addr->family != AF_IEEE802154) | 101 | if (addr->family != AF_IEEE802154) |
98 | return -EINVAL; | 102 | goto out; |
99 | |||
100 | lock_sock(sk); | ||
101 | 103 | ||
102 | dev = ieee802154_get_dev(sock_net(sk), &addr->addr); | 104 | dev = ieee802154_get_dev(sock_net(sk), &addr->addr); |
103 | if (!dev) { | 105 | if (!dev) { |
@@ -113,6 +115,7 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len) | |||
113 | memcpy(&ro->src_addr, &addr->addr, sizeof(struct ieee802154_addr)); | 115 | memcpy(&ro->src_addr, &addr->addr, sizeof(struct ieee802154_addr)); |
114 | 116 | ||
115 | ro->bound = 1; | 117 | ro->bound = 1; |
118 | err = 0; | ||
116 | out_put: | 119 | out_put: |
117 | dev_put(dev); | 120 | dev_put(dev); |
118 | out: | 121 | out: |
@@ -235,7 +238,10 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
235 | 238 | ||
236 | skb_reset_network_header(skb); | 239 | skb_reset_network_header(skb); |
237 | 240 | ||
238 | mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA | MAC_CB_FLAG_ACKREQ; | 241 | mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; |
242 | if (ro->want_ack) | ||
243 | mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ; | ||
244 | |||
239 | mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev); | 245 | mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev); |
240 | err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr, | 246 | err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr, |
241 | ro->bound ? &ro->src_addr : NULL, size); | 247 | ro->bound ? &ro->src_addr : NULL, size); |
@@ -380,13 +386,59 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) | |||
380 | static int dgram_getsockopt(struct sock *sk, int level, int optname, | 386 | static int dgram_getsockopt(struct sock *sk, int level, int optname, |
381 | char __user *optval, int __user *optlen) | 387 | char __user *optval, int __user *optlen) |
382 | { | 388 | { |
383 | return -EOPNOTSUPP; | 389 | struct dgram_sock *ro = dgram_sk(sk); |
390 | |||
391 | int val, len; | ||
392 | |||
393 | if (level != SOL_IEEE802154) | ||
394 | return -EOPNOTSUPP; | ||
395 | |||
396 | if (get_user(len, optlen)) | ||
397 | return -EFAULT; | ||
398 | |||
399 | len = min_t(unsigned int, len, sizeof(int)); | ||
400 | |||
401 | switch (optname) { | ||
402 | case WPAN_WANTACK: | ||
403 | val = ro->want_ack; | ||
404 | break; | ||
405 | default: | ||
406 | return -ENOPROTOOPT; | ||
407 | } | ||
408 | |||
409 | if (put_user(len, optlen)) | ||
410 | return -EFAULT; | ||
411 | if (copy_to_user(optval, &val, len)) | ||
412 | return -EFAULT; | ||
413 | return 0; | ||
384 | } | 414 | } |
385 | 415 | ||
386 | static int dgram_setsockopt(struct sock *sk, int level, int optname, | 416 | static int dgram_setsockopt(struct sock *sk, int level, int optname, |
387 | char __user *optval, int __user optlen) | 417 | char __user *optval, int __user optlen) |
388 | { | 418 | { |
389 | return -EOPNOTSUPP; | 419 | struct dgram_sock *ro = dgram_sk(sk); |
420 | int val; | ||
421 | int err = 0; | ||
422 | |||
423 | if (optlen < sizeof(int)) | ||
424 | return -EINVAL; | ||
425 | |||
426 | if (get_user(val, (int __user *)optval)) | ||
427 | return -EFAULT; | ||
428 | |||
429 | lock_sock(sk); | ||
430 | |||
431 | switch (optname) { | ||
432 | case WPAN_WANTACK: | ||
433 | ro->want_ack = !!val; | ||
434 | break; | ||
435 | default: | ||
436 | err = -ENOPROTOOPT; | ||
437 | break; | ||
438 | } | ||
439 | |||
440 | release_sock(sk); | ||
441 | return err; | ||
390 | } | 442 | } |
391 | 443 | ||
392 | struct proto ieee802154_dgram_prot = { | 444 | struct proto ieee802154_dgram_prot = { |
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c index 27eda9fdf3c2..2106ecbf0308 100644 --- a/net/ieee802154/netlink.c +++ b/net/ieee802154/netlink.c | |||
@@ -19,6 +19,7 @@ | |||
19 | * Written by: | 19 | * Written by: |
20 | * Sergey Lapin <slapin@ossfans.org> | 20 | * Sergey Lapin <slapin@ossfans.org> |
21 | * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> | 21 | * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> |
22 | * Maxim Osipov <maxim.osipov@siemens.com> | ||
22 | */ | 23 | */ |
23 | 24 | ||
24 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
@@ -26,10 +27,12 @@ | |||
26 | #include <linux/netdevice.h> | 27 | #include <linux/netdevice.h> |
27 | #include <net/netlink.h> | 28 | #include <net/netlink.h> |
28 | #include <net/genetlink.h> | 29 | #include <net/genetlink.h> |
30 | #include <net/sock.h> | ||
29 | #include <linux/nl802154.h> | 31 | #include <linux/nl802154.h> |
30 | #include <net/ieee802154/af_ieee802154.h> | 32 | #include <net/af_ieee802154.h> |
31 | #include <net/ieee802154/nl802154.h> | 33 | #include <net/nl802154.h> |
32 | #include <net/ieee802154/netdevice.h> | 34 | #include <net/ieee802154.h> |
35 | #include <net/ieee802154_netdev.h> | ||
33 | 36 | ||
34 | static unsigned int ieee802154_seq_num; | 37 | static unsigned int ieee802154_seq_num; |
35 | 38 | ||
@@ -73,7 +76,7 @@ static int ieee802154_nl_finish(struct sk_buff *msg) | |||
73 | /* XXX: nlh is right at the start of msg */ | 76 | /* XXX: nlh is right at the start of msg */ |
74 | void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); | 77 | void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); |
75 | 78 | ||
76 | if (!genlmsg_end(msg, hdr)) | 79 | if (genlmsg_end(msg, hdr) < 0) |
77 | goto out; | 80 | goto out; |
78 | 81 | ||
79 | return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id, | 82 | return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id, |
@@ -229,7 +232,7 @@ nla_put_failure: | |||
229 | EXPORT_SYMBOL(ieee802154_nl_beacon_indic); | 232 | EXPORT_SYMBOL(ieee802154_nl_beacon_indic); |
230 | 233 | ||
231 | int ieee802154_nl_scan_confirm(struct net_device *dev, | 234 | int ieee802154_nl_scan_confirm(struct net_device *dev, |
232 | u8 status, u8 scan_type, u32 unscanned, | 235 | u8 status, u8 scan_type, u32 unscanned, u8 page, |
233 | u8 *edl/* , struct list_head *pan_desc_list */) | 236 | u8 *edl/* , struct list_head *pan_desc_list */) |
234 | { | 237 | { |
235 | struct sk_buff *msg; | 238 | struct sk_buff *msg; |
@@ -248,6 +251,7 @@ int ieee802154_nl_scan_confirm(struct net_device *dev, | |||
248 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | 251 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); |
249 | NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); | 252 | NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); |
250 | NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); | 253 | NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); |
254 | NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page); | ||
251 | 255 | ||
252 | if (edl) | 256 | if (edl) |
253 | NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl); | 257 | NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl); |
@@ -260,6 +264,60 @@ nla_put_failure: | |||
260 | } | 264 | } |
261 | EXPORT_SYMBOL(ieee802154_nl_scan_confirm); | 265 | EXPORT_SYMBOL(ieee802154_nl_scan_confirm); |
262 | 266 | ||
267 | int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) | ||
268 | { | ||
269 | struct sk_buff *msg; | ||
270 | |||
271 | pr_debug("%s\n", __func__); | ||
272 | |||
273 | msg = ieee802154_nl_create(0, IEEE802154_START_CONF); | ||
274 | if (!msg) | ||
275 | return -ENOBUFS; | ||
276 | |||
277 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
278 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
279 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
280 | dev->dev_addr); | ||
281 | |||
282 | NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); | ||
283 | |||
284 | return ieee802154_nl_finish(msg); | ||
285 | |||
286 | nla_put_failure: | ||
287 | nlmsg_free(msg); | ||
288 | return -ENOBUFS; | ||
289 | } | ||
290 | EXPORT_SYMBOL(ieee802154_nl_start_confirm); | ||
291 | |||
292 | static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid, | ||
293 | u32 seq, int flags, struct net_device *dev) | ||
294 | { | ||
295 | void *hdr; | ||
296 | |||
297 | pr_debug("%s\n", __func__); | ||
298 | |||
299 | hdr = genlmsg_put(msg, 0, seq, &ieee802154_coordinator_family, flags, | ||
300 | IEEE802154_LIST_IFACE); | ||
301 | if (!hdr) | ||
302 | goto out; | ||
303 | |||
304 | NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); | ||
305 | NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); | ||
306 | |||
307 | NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, | ||
308 | dev->dev_addr); | ||
309 | NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, | ||
310 | ieee802154_mlme_ops(dev)->get_short_addr(dev)); | ||
311 | NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID, | ||
312 | ieee802154_mlme_ops(dev)->get_pan_id(dev)); | ||
313 | return genlmsg_end(msg, hdr); | ||
314 | |||
315 | nla_put_failure: | ||
316 | genlmsg_cancel(msg, hdr); | ||
317 | out: | ||
318 | return -EMSGSIZE; | ||
319 | } | ||
320 | |||
263 | /* Requests from userspace */ | 321 | /* Requests from userspace */ |
264 | static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) | 322 | static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) |
265 | { | 323 | { |
@@ -272,7 +330,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) | |||
272 | dev = dev_get_by_name(&init_net, name); | 330 | dev = dev_get_by_name(&init_net, name); |
273 | } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) | 331 | } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) |
274 | dev = dev_get_by_index(&init_net, | 332 | dev = dev_get_by_index(&init_net, |
275 | nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX])); | 333 | nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX])); |
276 | else | 334 | else |
277 | return NULL; | 335 | return NULL; |
278 | 336 | ||
@@ -292,6 +350,7 @@ static int ieee802154_associate_req(struct sk_buff *skb, | |||
292 | { | 350 | { |
293 | struct net_device *dev; | 351 | struct net_device *dev; |
294 | struct ieee802154_addr addr; | 352 | struct ieee802154_addr addr; |
353 | u8 page; | ||
295 | int ret = -EINVAL; | 354 | int ret = -EINVAL; |
296 | 355 | ||
297 | if (!info->attrs[IEEE802154_ATTR_CHANNEL] || | 356 | if (!info->attrs[IEEE802154_ATTR_CHANNEL] || |
@@ -317,8 +376,14 @@ static int ieee802154_associate_req(struct sk_buff *skb, | |||
317 | } | 376 | } |
318 | addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); | 377 | addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); |
319 | 378 | ||
379 | if (info->attrs[IEEE802154_ATTR_PAGE]) | ||
380 | page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); | ||
381 | else | ||
382 | page = 0; | ||
383 | |||
320 | ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr, | 384 | ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr, |
321 | nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]), | 385 | nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]), |
386 | page, | ||
322 | nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY])); | 387 | nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY])); |
323 | 388 | ||
324 | dev_put(dev); | 389 | dev_put(dev); |
@@ -401,6 +466,7 @@ static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) | |||
401 | struct ieee802154_addr addr; | 466 | struct ieee802154_addr addr; |
402 | 467 | ||
403 | u8 channel, bcn_ord, sf_ord; | 468 | u8 channel, bcn_ord, sf_ord; |
469 | u8 page; | ||
404 | int pan_coord, blx, coord_realign; | 470 | int pan_coord, blx, coord_realign; |
405 | int ret; | 471 | int ret; |
406 | 472 | ||
@@ -431,7 +497,19 @@ static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) | |||
431 | blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]); | 497 | blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]); |
432 | coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]); | 498 | coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]); |
433 | 499 | ||
434 | ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, | 500 | if (info->attrs[IEEE802154_ATTR_PAGE]) |
501 | page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); | ||
502 | else | ||
503 | page = 0; | ||
504 | |||
505 | |||
506 | if (addr.short_addr == IEEE802154_ADDR_BROADCAST) { | ||
507 | ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS); | ||
508 | dev_put(dev); | ||
509 | return -EINVAL; | ||
510 | } | ||
511 | |||
512 | ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page, | ||
435 | bcn_ord, sf_ord, pan_coord, blx, coord_realign); | 513 | bcn_ord, sf_ord, pan_coord, blx, coord_realign); |
436 | 514 | ||
437 | dev_put(dev); | 515 | dev_put(dev); |
@@ -445,6 +523,7 @@ static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) | |||
445 | u8 type; | 523 | u8 type; |
446 | u32 channels; | 524 | u32 channels; |
447 | u8 duration; | 525 | u8 duration; |
526 | u8 page; | ||
448 | 527 | ||
449 | if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] || | 528 | if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] || |
450 | !info->attrs[IEEE802154_ATTR_CHANNELS] || | 529 | !info->attrs[IEEE802154_ATTR_CHANNELS] || |
@@ -459,13 +538,80 @@ static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) | |||
459 | channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]); | 538 | channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]); |
460 | duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]); | 539 | duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]); |
461 | 540 | ||
462 | ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, | 541 | if (info->attrs[IEEE802154_ATTR_PAGE]) |
542 | page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); | ||
543 | else | ||
544 | page = 0; | ||
545 | |||
546 | |||
547 | ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page, | ||
463 | duration); | 548 | duration); |
464 | 549 | ||
465 | dev_put(dev); | 550 | dev_put(dev); |
466 | return ret; | 551 | return ret; |
467 | } | 552 | } |
468 | 553 | ||
554 | static int ieee802154_list_iface(struct sk_buff *skb, | ||
555 | struct genl_info *info) | ||
556 | { | ||
557 | /* Request for interface name, index, type, IEEE address, | ||
558 | PAN Id, short address */ | ||
559 | struct sk_buff *msg; | ||
560 | struct net_device *dev = NULL; | ||
561 | int rc = -ENOBUFS; | ||
562 | |||
563 | pr_debug("%s\n", __func__); | ||
564 | |||
565 | dev = ieee802154_nl_get_dev(info); | ||
566 | if (!dev) | ||
567 | return -ENODEV; | ||
568 | |||
569 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
570 | if (!msg) | ||
571 | goto out_dev; | ||
572 | |||
573 | rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq, | ||
574 | 0, dev); | ||
575 | if (rc < 0) | ||
576 | goto out_free; | ||
577 | |||
578 | dev_put(dev); | ||
579 | |||
580 | return genlmsg_unicast(&init_net, msg, info->snd_pid); | ||
581 | out_free: | ||
582 | nlmsg_free(msg); | ||
583 | out_dev: | ||
584 | dev_put(dev); | ||
585 | return rc; | ||
586 | |||
587 | } | ||
588 | |||
589 | static int ieee802154_dump_iface(struct sk_buff *skb, | ||
590 | struct netlink_callback *cb) | ||
591 | { | ||
592 | struct net *net = sock_net(skb->sk); | ||
593 | struct net_device *dev; | ||
594 | int idx; | ||
595 | int s_idx = cb->args[0]; | ||
596 | |||
597 | pr_debug("%s\n", __func__); | ||
598 | |||
599 | idx = 0; | ||
600 | for_each_netdev(net, dev) { | ||
601 | if (idx < s_idx || (dev->type != ARPHRD_IEEE802154)) | ||
602 | goto cont; | ||
603 | |||
604 | if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid, | ||
605 | cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0) | ||
606 | break; | ||
607 | cont: | ||
608 | idx++; | ||
609 | } | ||
610 | cb->args[0] = idx; | ||
611 | |||
612 | return skb->len; | ||
613 | } | ||
614 | |||
469 | #define IEEE802154_OP(_cmd, _func) \ | 615 | #define IEEE802154_OP(_cmd, _func) \ |
470 | { \ | 616 | { \ |
471 | .cmd = _cmd, \ | 617 | .cmd = _cmd, \ |
@@ -475,12 +621,22 @@ static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) | |||
475 | .flags = GENL_ADMIN_PERM, \ | 621 | .flags = GENL_ADMIN_PERM, \ |
476 | } | 622 | } |
477 | 623 | ||
624 | #define IEEE802154_DUMP(_cmd, _func, _dump) \ | ||
625 | { \ | ||
626 | .cmd = _cmd, \ | ||
627 | .policy = ieee802154_policy, \ | ||
628 | .doit = _func, \ | ||
629 | .dumpit = _dump, \ | ||
630 | } | ||
631 | |||
478 | static struct genl_ops ieee802154_coordinator_ops[] = { | 632 | static struct genl_ops ieee802154_coordinator_ops[] = { |
479 | IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req), | 633 | IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req), |
480 | IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp), | 634 | IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp), |
481 | IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req), | 635 | IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req), |
482 | IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req), | 636 | IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req), |
483 | IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req), | 637 | IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req), |
638 | IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface, | ||
639 | ieee802154_dump_iface), | ||
484 | }; | 640 | }; |
485 | 641 | ||
486 | static int __init ieee802154_nl_init(void) | 642 | static int __init ieee802154_nl_init(void) |
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c index c7d71d1adcac..2363ebee02e7 100644 --- a/net/ieee802154/nl_policy.c +++ b/net/ieee802154/nl_policy.c | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | #define NLA_HW_ADDR NLA_U64 | 25 | #define NLA_HW_ADDR NLA_U64 |
26 | 26 | ||
27 | struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { | 27 | const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { |
28 | [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, }, | 28 | [IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, }, |
29 | [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, }, | 29 | [IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, }, |
30 | 30 | ||
@@ -33,6 +33,7 @@ struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { | |||
33 | [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, }, | 33 | [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, }, |
34 | [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, }, | 34 | [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, }, |
35 | [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, }, | 35 | [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, }, |
36 | [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, }, | ||
36 | [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, }, | 37 | [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, }, |
37 | [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, }, | 38 | [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, }, |
38 | [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, }, | 39 | [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, }, |
@@ -50,3 +51,4 @@ struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = { | |||
50 | [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, }, | 51 | [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, }, |
51 | [IEEE802154_ATTR_ED_LIST] = { .len = 27 }, | 52 | [IEEE802154_ATTR_ED_LIST] = { .len = 27 }, |
52 | }; | 53 | }; |
54 | |||
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index 9315977c4c61..4681501aae93 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <linux/if_arp.h> | 26 | #include <linux/if_arp.h> |
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <net/sock.h> | 28 | #include <net/sock.h> |
29 | #include <net/ieee802154/af_ieee802154.h> | 29 | #include <net/af_ieee802154.h> |
30 | 30 | ||
31 | #include "af802154.h" | 31 | #include "af802154.h" |
32 | 32 | ||
@@ -74,8 +74,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int len) | |||
74 | goto out; | 74 | goto out; |
75 | } | 75 | } |
76 | 76 | ||
77 | if (dev->type != ARPHRD_IEEE802154_PHY && | 77 | if (dev->type != ARPHRD_IEEE802154) { |
78 | dev->type != ARPHRD_IEEE802154) { | ||
79 | err = -ENODEV; | 78 | err = -ENODEV; |
80 | goto out_put; | 79 | goto out_put; |
81 | } | 80 | } |
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c new file mode 100644 index 000000000000..f306604da67a --- /dev/null +++ b/net/ieee802154/wpan-class.c | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007, 2008, 2009 Siemens AG | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 | ||
6 | * as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/device.h> | ||
22 | |||
23 | #include <net/wpan-phy.h> | ||
24 | |||
25 | #define MASTER_SHOW_COMPLEX(name, format_string, args...) \ | ||
26 | static ssize_t name ## _show(struct device *dev, \ | ||
27 | struct device_attribute *attr, char *buf) \ | ||
28 | { \ | ||
29 | struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \ | ||
30 | int ret; \ | ||
31 | \ | ||
32 | mutex_lock(&phy->pib_lock); \ | ||
33 | ret = sprintf(buf, format_string "\n", args); \ | ||
34 | mutex_unlock(&phy->pib_lock); \ | ||
35 | return ret; \ | ||
36 | } | ||
37 | |||
38 | #define MASTER_SHOW(field, format_string) \ | ||
39 | MASTER_SHOW_COMPLEX(field, format_string, phy->field) | ||
40 | |||
41 | MASTER_SHOW(current_channel, "%d"); | ||
42 | MASTER_SHOW(current_page, "%d"); | ||
43 | MASTER_SHOW(channels_supported, "%#x"); | ||
44 | MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB", | ||
45 | ((signed char) (phy->transmit_power << 2)) >> 2, | ||
46 | (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1 ); | ||
47 | MASTER_SHOW(cca_mode, "%d"); | ||
48 | |||
49 | static struct device_attribute pmib_attrs[] = { | ||
50 | __ATTR_RO(current_channel), | ||
51 | __ATTR_RO(current_page), | ||
52 | __ATTR_RO(channels_supported), | ||
53 | __ATTR_RO(transmit_power), | ||
54 | __ATTR_RO(cca_mode), | ||
55 | {}, | ||
56 | }; | ||
57 | |||
58 | static void wpan_phy_release(struct device *d) | ||
59 | { | ||
60 | struct wpan_phy *phy = container_of(d, struct wpan_phy, dev); | ||
61 | kfree(phy); | ||
62 | } | ||
63 | |||
64 | static struct class wpan_phy_class = { | ||
65 | .name = "ieee802154", | ||
66 | .dev_release = wpan_phy_release, | ||
67 | .dev_attrs = pmib_attrs, | ||
68 | }; | ||
69 | |||
70 | static DEFINE_MUTEX(wpan_phy_mutex); | ||
71 | static int wpan_phy_idx; | ||
72 | |||
73 | static int wpan_phy_match(struct device *dev, void *data) | ||
74 | { | ||
75 | return !strcmp(dev_name(dev), (const char *)data); | ||
76 | } | ||
77 | |||
78 | struct wpan_phy *wpan_phy_find(const char *str) | ||
79 | { | ||
80 | struct device *dev; | ||
81 | |||
82 | if (WARN_ON(!str)) | ||
83 | return NULL; | ||
84 | |||
85 | dev = class_find_device(&wpan_phy_class, NULL, | ||
86 | (void *)str, wpan_phy_match); | ||
87 | if (!dev) | ||
88 | return NULL; | ||
89 | |||
90 | return container_of(dev, struct wpan_phy, dev); | ||
91 | } | ||
92 | EXPORT_SYMBOL(wpan_phy_find); | ||
93 | |||
94 | static int wpan_phy_idx_valid(int idx) | ||
95 | { | ||
96 | return idx >= 0; | ||
97 | } | ||
98 | |||
99 | struct wpan_phy *wpan_phy_alloc(size_t priv_size) | ||
100 | { | ||
101 | struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, | ||
102 | GFP_KERNEL); | ||
103 | |||
104 | mutex_lock(&wpan_phy_mutex); | ||
105 | phy->idx = wpan_phy_idx++; | ||
106 | if (unlikely(!wpan_phy_idx_valid(phy->idx))) { | ||
107 | wpan_phy_idx--; | ||
108 | mutex_unlock(&wpan_phy_mutex); | ||
109 | kfree(phy); | ||
110 | return NULL; | ||
111 | } | ||
112 | mutex_unlock(&wpan_phy_mutex); | ||
113 | |||
114 | mutex_init(&phy->pib_lock); | ||
115 | |||
116 | device_initialize(&phy->dev); | ||
117 | dev_set_name(&phy->dev, "wpan-phy%d", phy->idx); | ||
118 | |||
119 | phy->dev.class = &wpan_phy_class; | ||
120 | |||
121 | return phy; | ||
122 | } | ||
123 | EXPORT_SYMBOL(wpan_phy_alloc); | ||
124 | |||
125 | int wpan_phy_register(struct device *parent, struct wpan_phy *phy) | ||
126 | { | ||
127 | phy->dev.parent = parent; | ||
128 | |||
129 | return device_add(&phy->dev); | ||
130 | } | ||
131 | EXPORT_SYMBOL(wpan_phy_register); | ||
132 | |||
133 | void wpan_phy_unregister(struct wpan_phy *phy) | ||
134 | { | ||
135 | device_del(&phy->dev); | ||
136 | } | ||
137 | EXPORT_SYMBOL(wpan_phy_unregister); | ||
138 | |||
139 | void wpan_phy_free(struct wpan_phy *phy) | ||
140 | { | ||
141 | put_device(&phy->dev); | ||
142 | } | ||
143 | EXPORT_SYMBOL(wpan_phy_free); | ||
144 | |||
145 | static int __init wpan_phy_class_init(void) | ||
146 | { | ||
147 | return class_register(&wpan_phy_class); | ||
148 | } | ||
149 | subsys_initcall(wpan_phy_class_init); | ||
150 | |||
151 | static void __exit wpan_phy_class_exit(void) | ||
152 | { | ||
153 | class_unregister(&wpan_phy_class); | ||
154 | } | ||
155 | module_exit(wpan_phy_class_exit); | ||
156 | |||
157 | MODULE_DESCRIPTION("IEEE 802.15.4 device class"); | ||
158 | MODULE_LICENSE("GPL v2"); | ||
159 | |||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 566ea6c4321d..6c30a73f03f5 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -124,7 +124,6 @@ static struct list_head inetsw[SOCK_MAX]; | |||
124 | static DEFINE_SPINLOCK(inetsw_lock); | 124 | static DEFINE_SPINLOCK(inetsw_lock); |
125 | 125 | ||
126 | struct ipv4_config ipv4_config; | 126 | struct ipv4_config ipv4_config; |
127 | |||
128 | EXPORT_SYMBOL(ipv4_config); | 127 | EXPORT_SYMBOL(ipv4_config); |
129 | 128 | ||
130 | /* New destruction routine */ | 129 | /* New destruction routine */ |
@@ -139,12 +138,12 @@ void inet_sock_destruct(struct sock *sk) | |||
139 | sk_mem_reclaim(sk); | 138 | sk_mem_reclaim(sk); |
140 | 139 | ||
141 | if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { | 140 | if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { |
142 | printk("Attempt to release TCP socket in state %d %p\n", | 141 | pr_err("Attempt to release TCP socket in state %d %p\n", |
143 | sk->sk_state, sk); | 142 | sk->sk_state, sk); |
144 | return; | 143 | return; |
145 | } | 144 | } |
146 | if (!sock_flag(sk, SOCK_DEAD)) { | 145 | if (!sock_flag(sk, SOCK_DEAD)) { |
147 | printk("Attempt to release alive inet socket %p\n", sk); | 146 | pr_err("Attempt to release alive inet socket %p\n", sk); |
148 | return; | 147 | return; |
149 | } | 148 | } |
150 | 149 | ||
@@ -157,6 +156,7 @@ void inet_sock_destruct(struct sock *sk) | |||
157 | dst_release(sk->sk_dst_cache); | 156 | dst_release(sk->sk_dst_cache); |
158 | sk_refcnt_debug_dec(sk); | 157 | sk_refcnt_debug_dec(sk); |
159 | } | 158 | } |
159 | EXPORT_SYMBOL(inet_sock_destruct); | ||
160 | 160 | ||
161 | /* | 161 | /* |
162 | * The routines beyond this point handle the behaviour of an AF_INET | 162 | * The routines beyond this point handle the behaviour of an AF_INET |
@@ -219,6 +219,7 @@ out: | |||
219 | release_sock(sk); | 219 | release_sock(sk); |
220 | return err; | 220 | return err; |
221 | } | 221 | } |
222 | EXPORT_SYMBOL(inet_listen); | ||
222 | 223 | ||
223 | u32 inet_ehash_secret __read_mostly; | 224 | u32 inet_ehash_secret __read_mostly; |
224 | EXPORT_SYMBOL(inet_ehash_secret); | 225 | EXPORT_SYMBOL(inet_ehash_secret); |
@@ -435,9 +436,11 @@ int inet_release(struct socket *sock) | |||
435 | } | 436 | } |
436 | return 0; | 437 | return 0; |
437 | } | 438 | } |
439 | EXPORT_SYMBOL(inet_release); | ||
438 | 440 | ||
439 | /* It is off by default, see below. */ | 441 | /* It is off by default, see below. */ |
440 | int sysctl_ip_nonlocal_bind __read_mostly; | 442 | int sysctl_ip_nonlocal_bind __read_mostly; |
443 | EXPORT_SYMBOL(sysctl_ip_nonlocal_bind); | ||
441 | 444 | ||
442 | int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 445 | int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
443 | { | 446 | { |
@@ -519,6 +522,7 @@ out_release_sock: | |||
519 | out: | 522 | out: |
520 | return err; | 523 | return err; |
521 | } | 524 | } |
525 | EXPORT_SYMBOL(inet_bind); | ||
522 | 526 | ||
523 | int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, | 527 | int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, |
524 | int addr_len, int flags) | 528 | int addr_len, int flags) |
@@ -532,6 +536,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, | |||
532 | return -EAGAIN; | 536 | return -EAGAIN; |
533 | return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len); | 537 | return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len); |
534 | } | 538 | } |
539 | EXPORT_SYMBOL(inet_dgram_connect); | ||
535 | 540 | ||
536 | static long inet_wait_for_connect(struct sock *sk, long timeo) | 541 | static long inet_wait_for_connect(struct sock *sk, long timeo) |
537 | { | 542 | { |
@@ -641,6 +646,7 @@ sock_error: | |||
641 | sock->state = SS_DISCONNECTING; | 646 | sock->state = SS_DISCONNECTING; |
642 | goto out; | 647 | goto out; |
643 | } | 648 | } |
649 | EXPORT_SYMBOL(inet_stream_connect); | ||
644 | 650 | ||
645 | /* | 651 | /* |
646 | * Accept a pending connection. The TCP layer now gives BSD semantics. | 652 | * Accept a pending connection. The TCP layer now gives BSD semantics. |
@@ -668,6 +674,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags) | |||
668 | do_err: | 674 | do_err: |
669 | return err; | 675 | return err; |
670 | } | 676 | } |
677 | EXPORT_SYMBOL(inet_accept); | ||
671 | 678 | ||
672 | 679 | ||
673 | /* | 680 | /* |
@@ -699,6 +706,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
699 | *uaddr_len = sizeof(*sin); | 706 | *uaddr_len = sizeof(*sin); |
700 | return 0; | 707 | return 0; |
701 | } | 708 | } |
709 | EXPORT_SYMBOL(inet_getname); | ||
702 | 710 | ||
703 | int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | 711 | int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, |
704 | size_t size) | 712 | size_t size) |
@@ -711,9 +719,11 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
711 | 719 | ||
712 | return sk->sk_prot->sendmsg(iocb, sk, msg, size); | 720 | return sk->sk_prot->sendmsg(iocb, sk, msg, size); |
713 | } | 721 | } |
722 | EXPORT_SYMBOL(inet_sendmsg); | ||
714 | 723 | ||
715 | 724 | ||
716 | static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) | 725 | static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, |
726 | size_t size, int flags) | ||
717 | { | 727 | { |
718 | struct sock *sk = sock->sk; | 728 | struct sock *sk = sock->sk; |
719 | 729 | ||
@@ -780,6 +790,7 @@ int inet_shutdown(struct socket *sock, int how) | |||
780 | release_sock(sk); | 790 | release_sock(sk); |
781 | return err; | 791 | return err; |
782 | } | 792 | } |
793 | EXPORT_SYMBOL(inet_shutdown); | ||
783 | 794 | ||
784 | /* | 795 | /* |
785 | * ioctl() calls you can issue on an INET socket. Most of these are | 796 | * ioctl() calls you can issue on an INET socket. Most of these are |
@@ -798,44 +809,45 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
798 | struct net *net = sock_net(sk); | 809 | struct net *net = sock_net(sk); |
799 | 810 | ||
800 | switch (cmd) { | 811 | switch (cmd) { |
801 | case SIOCGSTAMP: | 812 | case SIOCGSTAMP: |
802 | err = sock_get_timestamp(sk, (struct timeval __user *)arg); | 813 | err = sock_get_timestamp(sk, (struct timeval __user *)arg); |
803 | break; | 814 | break; |
804 | case SIOCGSTAMPNS: | 815 | case SIOCGSTAMPNS: |
805 | err = sock_get_timestampns(sk, (struct timespec __user *)arg); | 816 | err = sock_get_timestampns(sk, (struct timespec __user *)arg); |
806 | break; | 817 | break; |
807 | case SIOCADDRT: | 818 | case SIOCADDRT: |
808 | case SIOCDELRT: | 819 | case SIOCDELRT: |
809 | case SIOCRTMSG: | 820 | case SIOCRTMSG: |
810 | err = ip_rt_ioctl(net, cmd, (void __user *)arg); | 821 | err = ip_rt_ioctl(net, cmd, (void __user *)arg); |
811 | break; | 822 | break; |
812 | case SIOCDARP: | 823 | case SIOCDARP: |
813 | case SIOCGARP: | 824 | case SIOCGARP: |
814 | case SIOCSARP: | 825 | case SIOCSARP: |
815 | err = arp_ioctl(net, cmd, (void __user *)arg); | 826 | err = arp_ioctl(net, cmd, (void __user *)arg); |
816 | break; | 827 | break; |
817 | case SIOCGIFADDR: | 828 | case SIOCGIFADDR: |
818 | case SIOCSIFADDR: | 829 | case SIOCSIFADDR: |
819 | case SIOCGIFBRDADDR: | 830 | case SIOCGIFBRDADDR: |
820 | case SIOCSIFBRDADDR: | 831 | case SIOCSIFBRDADDR: |
821 | case SIOCGIFNETMASK: | 832 | case SIOCGIFNETMASK: |
822 | case SIOCSIFNETMASK: | 833 | case SIOCSIFNETMASK: |
823 | case SIOCGIFDSTADDR: | 834 | case SIOCGIFDSTADDR: |
824 | case SIOCSIFDSTADDR: | 835 | case SIOCSIFDSTADDR: |
825 | case SIOCSIFPFLAGS: | 836 | case SIOCSIFPFLAGS: |
826 | case SIOCGIFPFLAGS: | 837 | case SIOCGIFPFLAGS: |
827 | case SIOCSIFFLAGS: | 838 | case SIOCSIFFLAGS: |
828 | err = devinet_ioctl(net, cmd, (void __user *)arg); | 839 | err = devinet_ioctl(net, cmd, (void __user *)arg); |
829 | break; | 840 | break; |
830 | default: | 841 | default: |
831 | if (sk->sk_prot->ioctl) | 842 | if (sk->sk_prot->ioctl) |
832 | err = sk->sk_prot->ioctl(sk, cmd, arg); | 843 | err = sk->sk_prot->ioctl(sk, cmd, arg); |
833 | else | 844 | else |
834 | err = -ENOIOCTLCMD; | 845 | err = -ENOIOCTLCMD; |
835 | break; | 846 | break; |
836 | } | 847 | } |
837 | return err; | 848 | return err; |
838 | } | 849 | } |
850 | EXPORT_SYMBOL(inet_ioctl); | ||
839 | 851 | ||
840 | const struct proto_ops inet_stream_ops = { | 852 | const struct proto_ops inet_stream_ops = { |
841 | .family = PF_INET, | 853 | .family = PF_INET, |
@@ -862,6 +874,7 @@ const struct proto_ops inet_stream_ops = { | |||
862 | .compat_getsockopt = compat_sock_common_getsockopt, | 874 | .compat_getsockopt = compat_sock_common_getsockopt, |
863 | #endif | 875 | #endif |
864 | }; | 876 | }; |
877 | EXPORT_SYMBOL(inet_stream_ops); | ||
865 | 878 | ||
866 | const struct proto_ops inet_dgram_ops = { | 879 | const struct proto_ops inet_dgram_ops = { |
867 | .family = PF_INET, | 880 | .family = PF_INET, |
@@ -887,6 +900,7 @@ const struct proto_ops inet_dgram_ops = { | |||
887 | .compat_getsockopt = compat_sock_common_getsockopt, | 900 | .compat_getsockopt = compat_sock_common_getsockopt, |
888 | #endif | 901 | #endif |
889 | }; | 902 | }; |
903 | EXPORT_SYMBOL(inet_dgram_ops); | ||
890 | 904 | ||
891 | /* | 905 | /* |
892 | * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without | 906 | * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without |
@@ -1016,6 +1030,7 @@ out_illegal: | |||
1016 | p->type); | 1030 | p->type); |
1017 | goto out; | 1031 | goto out; |
1018 | } | 1032 | } |
1033 | EXPORT_SYMBOL(inet_register_protosw); | ||
1019 | 1034 | ||
1020 | void inet_unregister_protosw(struct inet_protosw *p) | 1035 | void inet_unregister_protosw(struct inet_protosw *p) |
1021 | { | 1036 | { |
@@ -1031,6 +1046,7 @@ void inet_unregister_protosw(struct inet_protosw *p) | |||
1031 | synchronize_net(); | 1046 | synchronize_net(); |
1032 | } | 1047 | } |
1033 | } | 1048 | } |
1049 | EXPORT_SYMBOL(inet_unregister_protosw); | ||
1034 | 1050 | ||
1035 | /* | 1051 | /* |
1036 | * Shall we try to damage output packets if routing dev changes? | 1052 | * Shall we try to damage output packets if routing dev changes? |
@@ -1141,7 +1157,6 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1141 | 1157 | ||
1142 | return err; | 1158 | return err; |
1143 | } | 1159 | } |
1144 | |||
1145 | EXPORT_SYMBOL(inet_sk_rebuild_header); | 1160 | EXPORT_SYMBOL(inet_sk_rebuild_header); |
1146 | 1161 | ||
1147 | static int inet_gso_send_check(struct sk_buff *skb) | 1162 | static int inet_gso_send_check(struct sk_buff *skb) |
@@ -1187,6 +1202,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
1187 | int proto; | 1202 | int proto; |
1188 | int ihl; | 1203 | int ihl; |
1189 | int id; | 1204 | int id; |
1205 | unsigned int offset = 0; | ||
1190 | 1206 | ||
1191 | if (!(features & NETIF_F_V4_CSUM)) | 1207 | if (!(features & NETIF_F_V4_CSUM)) |
1192 | features &= ~NETIF_F_SG; | 1208 | features &= ~NETIF_F_SG; |
@@ -1229,7 +1245,14 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
1229 | skb = segs; | 1245 | skb = segs; |
1230 | do { | 1246 | do { |
1231 | iph = ip_hdr(skb); | 1247 | iph = ip_hdr(skb); |
1232 | iph->id = htons(id++); | 1248 | if (proto == IPPROTO_UDP) { |
1249 | iph->id = htons(id); | ||
1250 | iph->frag_off = htons(offset >> 3); | ||
1251 | if (skb->next != NULL) | ||
1252 | iph->frag_off |= htons(IP_MF); | ||
1253 | offset += (skb->len - skb->mac_len - iph->ihl * 4); | ||
1254 | } else | ||
1255 | iph->id = htons(id++); | ||
1233 | iph->tot_len = htons(skb->len - skb->mac_len); | 1256 | iph->tot_len = htons(skb->len - skb->mac_len); |
1234 | iph->check = 0; | 1257 | iph->check = 0; |
1235 | iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); | 1258 | iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); |
@@ -1361,7 +1384,6 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family, | |||
1361 | } | 1384 | } |
1362 | return rc; | 1385 | return rc; |
1363 | } | 1386 | } |
1364 | |||
1365 | EXPORT_SYMBOL_GPL(inet_ctl_sock_create); | 1387 | EXPORT_SYMBOL_GPL(inet_ctl_sock_create); |
1366 | 1388 | ||
1367 | unsigned long snmp_fold_field(void *mib[], int offt) | 1389 | unsigned long snmp_fold_field(void *mib[], int offt) |
@@ -1425,6 +1447,8 @@ static struct net_protocol tcp_protocol = { | |||
1425 | static struct net_protocol udp_protocol = { | 1447 | static struct net_protocol udp_protocol = { |
1426 | .handler = udp_rcv, | 1448 | .handler = udp_rcv, |
1427 | .err_handler = udp_err, | 1449 | .err_handler = udp_err, |
1450 | .gso_send_check = udp4_ufo_send_check, | ||
1451 | .gso_segment = udp4_ufo_fragment, | ||
1428 | .no_policy = 1, | 1452 | .no_policy = 1, |
1429 | .netns_ok = 1, | 1453 | .netns_ok = 1, |
1430 | }; | 1454 | }; |
@@ -1666,19 +1690,3 @@ static int __init ipv4_proc_init(void) | |||
1666 | 1690 | ||
1667 | MODULE_ALIAS_NETPROTO(PF_INET); | 1691 | MODULE_ALIAS_NETPROTO(PF_INET); |
1668 | 1692 | ||
1669 | EXPORT_SYMBOL(inet_accept); | ||
1670 | EXPORT_SYMBOL(inet_bind); | ||
1671 | EXPORT_SYMBOL(inet_dgram_connect); | ||
1672 | EXPORT_SYMBOL(inet_dgram_ops); | ||
1673 | EXPORT_SYMBOL(inet_getname); | ||
1674 | EXPORT_SYMBOL(inet_ioctl); | ||
1675 | EXPORT_SYMBOL(inet_listen); | ||
1676 | EXPORT_SYMBOL(inet_register_protosw); | ||
1677 | EXPORT_SYMBOL(inet_release); | ||
1678 | EXPORT_SYMBOL(inet_sendmsg); | ||
1679 | EXPORT_SYMBOL(inet_shutdown); | ||
1680 | EXPORT_SYMBOL(inet_sock_destruct); | ||
1681 | EXPORT_SYMBOL(inet_stream_connect); | ||
1682 | EXPORT_SYMBOL(inet_stream_ops); | ||
1683 | EXPORT_SYMBOL(inet_unregister_protosw); | ||
1684 | EXPORT_SYMBOL(sysctl_ip_nonlocal_bind); | ||
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 090e9991ac2a..4e80f336c0cf 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -130,7 +130,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb); | |||
130 | static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb); | 130 | static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb); |
131 | static void parp_redo(struct sk_buff *skb); | 131 | static void parp_redo(struct sk_buff *skb); |
132 | 132 | ||
133 | static struct neigh_ops arp_generic_ops = { | 133 | static const struct neigh_ops arp_generic_ops = { |
134 | .family = AF_INET, | 134 | .family = AF_INET, |
135 | .solicit = arp_solicit, | 135 | .solicit = arp_solicit, |
136 | .error_report = arp_error_report, | 136 | .error_report = arp_error_report, |
@@ -140,7 +140,7 @@ static struct neigh_ops arp_generic_ops = { | |||
140 | .queue_xmit = dev_queue_xmit, | 140 | .queue_xmit = dev_queue_xmit, |
141 | }; | 141 | }; |
142 | 142 | ||
143 | static struct neigh_ops arp_hh_ops = { | 143 | static const struct neigh_ops arp_hh_ops = { |
144 | .family = AF_INET, | 144 | .family = AF_INET, |
145 | .solicit = arp_solicit, | 145 | .solicit = arp_solicit, |
146 | .error_report = arp_error_report, | 146 | .error_report = arp_error_report, |
@@ -150,7 +150,7 @@ static struct neigh_ops arp_hh_ops = { | |||
150 | .queue_xmit = dev_queue_xmit, | 150 | .queue_xmit = dev_queue_xmit, |
151 | }; | 151 | }; |
152 | 152 | ||
153 | static struct neigh_ops arp_direct_ops = { | 153 | static const struct neigh_ops arp_direct_ops = { |
154 | .family = AF_INET, | 154 | .family = AF_INET, |
155 | .output = dev_queue_xmit, | 155 | .output = dev_queue_xmit, |
156 | .connected_output = dev_queue_xmit, | 156 | .connected_output = dev_queue_xmit, |
@@ -158,7 +158,7 @@ static struct neigh_ops arp_direct_ops = { | |||
158 | .queue_xmit = dev_queue_xmit, | 158 | .queue_xmit = dev_queue_xmit, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | struct neigh_ops arp_broken_ops = { | 161 | const struct neigh_ops arp_broken_ops = { |
162 | .family = AF_INET, | 162 | .family = AF_INET, |
163 | .solicit = arp_solicit, | 163 | .solicit = arp_solicit, |
164 | .error_report = arp_error_report, | 164 | .error_report = arp_error_report, |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 63c2fa7b68c4..291bdf50a21f 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -48,7 +48,7 @@ | |||
48 | * Patrick McHardy <kaber@trash.net> | 48 | * Patrick McHardy <kaber@trash.net> |
49 | */ | 49 | */ |
50 | 50 | ||
51 | #define VERSION "0.408" | 51 | #define VERSION "0.409" |
52 | 52 | ||
53 | #include <asm/uaccess.h> | 53 | #include <asm/uaccess.h> |
54 | #include <asm/system.h> | 54 | #include <asm/system.h> |
@@ -164,6 +164,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn); | |||
164 | static struct tnode *halve(struct trie *t, struct tnode *tn); | 164 | static struct tnode *halve(struct trie *t, struct tnode *tn); |
165 | /* tnodes to free after resize(); protected by RTNL */ | 165 | /* tnodes to free after resize(); protected by RTNL */ |
166 | static struct tnode *tnode_free_head; | 166 | static struct tnode *tnode_free_head; |
167 | static size_t tnode_free_size; | ||
168 | |||
169 | /* | ||
170 | * synchronize_rcu after call_rcu for that many pages; it should be especially | ||
171 | * useful before resizing the root node with PREEMPT_NONE configs; the value was | ||
172 | * obtained experimentally, aiming to avoid visible slowdown. | ||
173 | */ | ||
174 | static const int sync_pages = 128; | ||
167 | 175 | ||
168 | static struct kmem_cache *fn_alias_kmem __read_mostly; | 176 | static struct kmem_cache *fn_alias_kmem __read_mostly; |
169 | static struct kmem_cache *trie_leaf_kmem __read_mostly; | 177 | static struct kmem_cache *trie_leaf_kmem __read_mostly; |
@@ -317,8 +325,7 @@ static inline void check_tnode(const struct tnode *tn) | |||
317 | static const int halve_threshold = 25; | 325 | static const int halve_threshold = 25; |
318 | static const int inflate_threshold = 50; | 326 | static const int inflate_threshold = 50; |
319 | static const int halve_threshold_root = 15; | 327 | static const int halve_threshold_root = 15; |
320 | static const int inflate_threshold_root = 25; | 328 | static const int inflate_threshold_root = 30; |
321 | |||
322 | 329 | ||
323 | static void __alias_free_mem(struct rcu_head *head) | 330 | static void __alias_free_mem(struct rcu_head *head) |
324 | { | 331 | { |
@@ -393,6 +400,8 @@ static void tnode_free_safe(struct tnode *tn) | |||
393 | BUG_ON(IS_LEAF(tn)); | 400 | BUG_ON(IS_LEAF(tn)); |
394 | tn->tnode_free = tnode_free_head; | 401 | tn->tnode_free = tnode_free_head; |
395 | tnode_free_head = tn; | 402 | tnode_free_head = tn; |
403 | tnode_free_size += sizeof(struct tnode) + | ||
404 | (sizeof(struct node *) << tn->bits); | ||
396 | } | 405 | } |
397 | 406 | ||
398 | static void tnode_free_flush(void) | 407 | static void tnode_free_flush(void) |
@@ -404,6 +413,11 @@ static void tnode_free_flush(void) | |||
404 | tn->tnode_free = NULL; | 413 | tn->tnode_free = NULL; |
405 | tnode_free(tn); | 414 | tnode_free(tn); |
406 | } | 415 | } |
416 | |||
417 | if (tnode_free_size >= PAGE_SIZE * sync_pages) { | ||
418 | tnode_free_size = 0; | ||
419 | synchronize_rcu(); | ||
420 | } | ||
407 | } | 421 | } |
408 | 422 | ||
409 | static struct leaf *leaf_new(void) | 423 | static struct leaf *leaf_new(void) |
@@ -499,14 +513,14 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, | |||
499 | rcu_assign_pointer(tn->child[i], n); | 513 | rcu_assign_pointer(tn->child[i], n); |
500 | } | 514 | } |
501 | 515 | ||
516 | #define MAX_WORK 10 | ||
502 | static struct node *resize(struct trie *t, struct tnode *tn) | 517 | static struct node *resize(struct trie *t, struct tnode *tn) |
503 | { | 518 | { |
504 | int i; | 519 | int i; |
505 | int err = 0; | ||
506 | struct tnode *old_tn; | 520 | struct tnode *old_tn; |
507 | int inflate_threshold_use; | 521 | int inflate_threshold_use; |
508 | int halve_threshold_use; | 522 | int halve_threshold_use; |
509 | int max_resize; | 523 | int max_work; |
510 | 524 | ||
511 | if (!tn) | 525 | if (!tn) |
512 | return NULL; | 526 | return NULL; |
@@ -521,18 +535,7 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
521 | } | 535 | } |
522 | /* One child */ | 536 | /* One child */ |
523 | if (tn->empty_children == tnode_child_length(tn) - 1) | 537 | if (tn->empty_children == tnode_child_length(tn) - 1) |
524 | for (i = 0; i < tnode_child_length(tn); i++) { | 538 | goto one_child; |
525 | struct node *n; | ||
526 | |||
527 | n = tn->child[i]; | ||
528 | if (!n) | ||
529 | continue; | ||
530 | |||
531 | /* compress one level */ | ||
532 | node_set_parent(n, NULL); | ||
533 | tnode_free_safe(tn); | ||
534 | return n; | ||
535 | } | ||
536 | /* | 539 | /* |
537 | * Double as long as the resulting node has a number of | 540 | * Double as long as the resulting node has a number of |
538 | * nonempty nodes that are above the threshold. | 541 | * nonempty nodes that are above the threshold. |
@@ -601,14 +604,17 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
601 | 604 | ||
602 | /* Keep root node larger */ | 605 | /* Keep root node larger */ |
603 | 606 | ||
604 | if (!tn->parent) | 607 | if (!node_parent((struct node*) tn)) { |
605 | inflate_threshold_use = inflate_threshold_root; | 608 | inflate_threshold_use = inflate_threshold_root; |
606 | else | 609 | halve_threshold_use = halve_threshold_root; |
610 | } | ||
611 | else { | ||
607 | inflate_threshold_use = inflate_threshold; | 612 | inflate_threshold_use = inflate_threshold; |
613 | halve_threshold_use = halve_threshold; | ||
614 | } | ||
608 | 615 | ||
609 | err = 0; | 616 | max_work = MAX_WORK; |
610 | max_resize = 10; | 617 | while ((tn->full_children > 0 && max_work-- && |
611 | while ((tn->full_children > 0 && max_resize-- && | ||
612 | 50 * (tn->full_children + tnode_child_length(tn) | 618 | 50 * (tn->full_children + tnode_child_length(tn) |
613 | - tn->empty_children) | 619 | - tn->empty_children) |
614 | >= inflate_threshold_use * tnode_child_length(tn))) { | 620 | >= inflate_threshold_use * tnode_child_length(tn))) { |
@@ -625,35 +631,19 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
625 | } | 631 | } |
626 | } | 632 | } |
627 | 633 | ||
628 | if (max_resize < 0) { | ||
629 | if (!tn->parent) | ||
630 | pr_warning("Fix inflate_threshold_root." | ||
631 | " Now=%d size=%d bits\n", | ||
632 | inflate_threshold_root, tn->bits); | ||
633 | else | ||
634 | pr_warning("Fix inflate_threshold." | ||
635 | " Now=%d size=%d bits\n", | ||
636 | inflate_threshold, tn->bits); | ||
637 | } | ||
638 | |||
639 | check_tnode(tn); | 634 | check_tnode(tn); |
640 | 635 | ||
636 | /* Return if at least one inflate is run */ | ||
637 | if( max_work != MAX_WORK) | ||
638 | return (struct node *) tn; | ||
639 | |||
641 | /* | 640 | /* |
642 | * Halve as long as the number of empty children in this | 641 | * Halve as long as the number of empty children in this |
643 | * node is above threshold. | 642 | * node is above threshold. |
644 | */ | 643 | */ |
645 | 644 | ||
646 | 645 | max_work = MAX_WORK; | |
647 | /* Keep root node larger */ | 646 | while (tn->bits > 1 && max_work-- && |
648 | |||
649 | if (!tn->parent) | ||
650 | halve_threshold_use = halve_threshold_root; | ||
651 | else | ||
652 | halve_threshold_use = halve_threshold; | ||
653 | |||
654 | err = 0; | ||
655 | max_resize = 10; | ||
656 | while (tn->bits > 1 && max_resize-- && | ||
657 | 100 * (tnode_child_length(tn) - tn->empty_children) < | 647 | 100 * (tnode_child_length(tn) - tn->empty_children) < |
658 | halve_threshold_use * tnode_child_length(tn)) { | 648 | halve_threshold_use * tnode_child_length(tn)) { |
659 | 649 | ||
@@ -668,19 +658,10 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
668 | } | 658 | } |
669 | } | 659 | } |
670 | 660 | ||
671 | if (max_resize < 0) { | ||
672 | if (!tn->parent) | ||
673 | pr_warning("Fix halve_threshold_root." | ||
674 | " Now=%d size=%d bits\n", | ||
675 | halve_threshold_root, tn->bits); | ||
676 | else | ||
677 | pr_warning("Fix halve_threshold." | ||
678 | " Now=%d size=%d bits\n", | ||
679 | halve_threshold, tn->bits); | ||
680 | } | ||
681 | 661 | ||
682 | /* Only one child remains */ | 662 | /* Only one child remains */ |
683 | if (tn->empty_children == tnode_child_length(tn) - 1) | 663 | if (tn->empty_children == tnode_child_length(tn) - 1) { |
664 | one_child: | ||
684 | for (i = 0; i < tnode_child_length(tn); i++) { | 665 | for (i = 0; i < tnode_child_length(tn); i++) { |
685 | struct node *n; | 666 | struct node *n; |
686 | 667 | ||
@@ -694,7 +675,7 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
694 | tnode_free_safe(tn); | 675 | tnode_free_safe(tn); |
695 | return n; | 676 | return n; |
696 | } | 677 | } |
697 | 678 | } | |
698 | return (struct node *) tn; | 679 | return (struct node *) tn; |
699 | } | 680 | } |
700 | 681 | ||
@@ -1435,7 +1416,7 @@ static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, | |||
1435 | cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length), | 1416 | cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length), |
1436 | pos, bits); | 1417 | pos, bits); |
1437 | 1418 | ||
1438 | n = tnode_get_child(pn, cindex); | 1419 | n = tnode_get_child_rcu(pn, cindex); |
1439 | 1420 | ||
1440 | if (n == NULL) { | 1421 | if (n == NULL) { |
1441 | #ifdef CONFIG_IP_FIB_TRIE_STATS | 1422 | #ifdef CONFIG_IP_FIB_TRIE_STATS |
@@ -1570,7 +1551,7 @@ backtrace: | |||
1570 | if (chopped_off <= pn->bits) { | 1551 | if (chopped_off <= pn->bits) { |
1571 | cindex &= ~(1 << (chopped_off-1)); | 1552 | cindex &= ~(1 << (chopped_off-1)); |
1572 | } else { | 1553 | } else { |
1573 | struct tnode *parent = node_parent((struct node *) pn); | 1554 | struct tnode *parent = node_parent_rcu((struct node *) pn); |
1574 | if (!parent) | 1555 | if (!parent) |
1575 | goto failed; | 1556 | goto failed; |
1576 | 1557 | ||
@@ -1783,7 +1764,7 @@ static struct leaf *trie_firstleaf(struct trie *t) | |||
1783 | static struct leaf *trie_nextleaf(struct leaf *l) | 1764 | static struct leaf *trie_nextleaf(struct leaf *l) |
1784 | { | 1765 | { |
1785 | struct node *c = (struct node *) l; | 1766 | struct node *c = (struct node *) l; |
1786 | struct tnode *p = node_parent(c); | 1767 | struct tnode *p = node_parent_rcu(c); |
1787 | 1768 | ||
1788 | if (!p) | 1769 | if (!p) |
1789 | return NULL; /* trie with just one leaf */ | 1770 | return NULL; /* trie with just one leaf */ |
@@ -2391,7 +2372,7 @@ static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s) | |||
2391 | } | 2372 | } |
2392 | } | 2373 | } |
2393 | 2374 | ||
2394 | static const char *rtn_type_names[__RTN_MAX] = { | 2375 | static const char *const rtn_type_names[__RTN_MAX] = { |
2395 | [RTN_UNSPEC] = "UNSPEC", | 2376 | [RTN_UNSPEC] = "UNSPEC", |
2396 | [RTN_UNICAST] = "UNICAST", | 2377 | [RTN_UNICAST] = "UNICAST", |
2397 | [RTN_LOCAL] = "LOCAL", | 2378 | [RTN_LOCAL] = "LOCAL", |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 61283f928825..13f0781f35cd 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -218,8 +218,8 @@ void inet_twdr_hangman(unsigned long data) | |||
218 | /* We purged the entire slot, anything left? */ | 218 | /* We purged the entire slot, anything left? */ |
219 | if (twdr->tw_count) | 219 | if (twdr->tw_count) |
220 | need_timer = 1; | 220 | need_timer = 1; |
221 | twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); | ||
221 | } | 222 | } |
222 | twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); | ||
223 | if (need_timer) | 223 | if (need_timer) |
224 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); | 224 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); |
225 | out: | 225 | out: |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 82c11dd10a62..533afaadefd4 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -662,7 +662,7 @@ drop_nolock: | |||
662 | return(0); | 662 | return(0); |
663 | } | 663 | } |
664 | 664 | ||
665 | static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 665 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
666 | { | 666 | { |
667 | struct ip_tunnel *tunnel = netdev_priv(dev); | 667 | struct ip_tunnel *tunnel = netdev_priv(dev); |
668 | struct net_device_stats *stats = &tunnel->dev->stats; | 668 | struct net_device_stats *stats = &tunnel->dev->stats; |
@@ -821,7 +821,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
821 | stats->tx_dropped++; | 821 | stats->tx_dropped++; |
822 | dev_kfree_skb(skb); | 822 | dev_kfree_skb(skb); |
823 | tunnel->recursion--; | 823 | tunnel->recursion--; |
824 | return 0; | 824 | return NETDEV_TX_OK; |
825 | } | 825 | } |
826 | if (skb->sk) | 826 | if (skb->sk) |
827 | skb_set_owner_w(new_skb, skb->sk); | 827 | skb_set_owner_w(new_skb, skb->sk); |
@@ -889,7 +889,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
889 | 889 | ||
890 | IPTUNNEL_XMIT(); | 890 | IPTUNNEL_XMIT(); |
891 | tunnel->recursion--; | 891 | tunnel->recursion--; |
892 | return 0; | 892 | return NETDEV_TX_OK; |
893 | 893 | ||
894 | tx_error_icmp: | 894 | tx_error_icmp: |
895 | dst_link_failure(skb); | 895 | dst_link_failure(skb); |
@@ -898,7 +898,7 @@ tx_error: | |||
898 | stats->tx_errors++; | 898 | stats->tx_errors++; |
899 | dev_kfree_skb(skb); | 899 | dev_kfree_skb(skb); |
900 | tunnel->recursion--; | 900 | tunnel->recursion--; |
901 | return 0; | 901 | return NETDEV_TX_OK; |
902 | } | 902 | } |
903 | 903 | ||
904 | static int ipgre_tunnel_bind_dev(struct net_device *dev) | 904 | static int ipgre_tunnel_bind_dev(struct net_device *dev) |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 7ffcd96fe591..9fe5d7b81580 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -1304,7 +1304,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
1304 | err = ip_local_out(skb); | 1304 | err = ip_local_out(skb); |
1305 | if (err) { | 1305 | if (err) { |
1306 | if (err > 0) | 1306 | if (err > 0) |
1307 | err = inet->recverr ? net_xmit_errno(err) : 0; | 1307 | err = net_xmit_errno(err); |
1308 | if (err) | 1308 | if (err) |
1309 | goto error; | 1309 | goto error; |
1310 | } | 1310 | } |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 93e2b787da20..62548cb0923c 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -387,7 +387,7 @@ static int ipip_rcv(struct sk_buff *skb) | |||
387 | * and that skb is filled properly by that function. | 387 | * and that skb is filled properly by that function. |
388 | */ | 388 | */ |
389 | 389 | ||
390 | static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 390 | static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
391 | { | 391 | { |
392 | struct ip_tunnel *tunnel = netdev_priv(dev); | 392 | struct ip_tunnel *tunnel = netdev_priv(dev); |
393 | struct net_device_stats *stats = &tunnel->dev->stats; | 393 | struct net_device_stats *stats = &tunnel->dev->stats; |
@@ -486,7 +486,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
486 | stats->tx_dropped++; | 486 | stats->tx_dropped++; |
487 | dev_kfree_skb(skb); | 487 | dev_kfree_skb(skb); |
488 | tunnel->recursion--; | 488 | tunnel->recursion--; |
489 | return 0; | 489 | return NETDEV_TX_OK; |
490 | } | 490 | } |
491 | if (skb->sk) | 491 | if (skb->sk) |
492 | skb_set_owner_w(new_skb, skb->sk); | 492 | skb_set_owner_w(new_skb, skb->sk); |
@@ -524,7 +524,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
524 | 524 | ||
525 | IPTUNNEL_XMIT(); | 525 | IPTUNNEL_XMIT(); |
526 | tunnel->recursion--; | 526 | tunnel->recursion--; |
527 | return 0; | 527 | return NETDEV_TX_OK; |
528 | 528 | ||
529 | tx_error_icmp: | 529 | tx_error_icmp: |
530 | dst_link_failure(skb); | 530 | dst_link_failure(skb); |
@@ -532,7 +532,7 @@ tx_error: | |||
532 | stats->tx_errors++; | 532 | stats->tx_errors++; |
533 | dev_kfree_skb(skb); | 533 | dev_kfree_skb(skb); |
534 | tunnel->recursion--; | 534 | tunnel->recursion--; |
535 | return 0; | 535 | return NETDEV_TX_OK; |
536 | } | 536 | } |
537 | 537 | ||
538 | static void ipip_tunnel_bind_dev(struct net_device *dev) | 538 | static void ipip_tunnel_bind_dev(struct net_device *dev) |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9a8da5ed92b7..65d421cf5bc7 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -201,7 +201,7 @@ failure: | |||
201 | 201 | ||
202 | #ifdef CONFIG_IP_PIMSM | 202 | #ifdef CONFIG_IP_PIMSM |
203 | 203 | ||
204 | static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | 204 | static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) |
205 | { | 205 | { |
206 | struct net *net = dev_net(dev); | 206 | struct net *net = dev_net(dev); |
207 | 207 | ||
@@ -212,7 +212,7 @@ static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | |||
212 | IGMPMSG_WHOLEPKT); | 212 | IGMPMSG_WHOLEPKT); |
213 | read_unlock(&mrt_lock); | 213 | read_unlock(&mrt_lock); |
214 | kfree_skb(skb); | 214 | kfree_skb(skb); |
215 | return 0; | 215 | return NETDEV_TX_OK; |
216 | } | 216 | } |
217 | 217 | ||
218 | static const struct net_device_ops reg_vif_netdev_ops = { | 218 | static const struct net_device_ops reg_vif_netdev_ops = { |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 7505dff4ffdf..27774c99d888 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * Copyright (C) 2002 David S. Miller (davem@redhat.com) | 8 | * Copyright (C) 2002 David S. Miller (davem@redhat.com) |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/skbuff.h> | 13 | #include <linux/skbuff.h> |
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
@@ -341,15 +341,11 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
341 | } | 341 | } |
342 | 342 | ||
343 | /* All zeroes == unconditional rule. */ | 343 | /* All zeroes == unconditional rule. */ |
344 | static inline int unconditional(const struct arpt_arp *arp) | 344 | static inline bool unconditional(const struct arpt_arp *arp) |
345 | { | 345 | { |
346 | unsigned int i; | 346 | static const struct arpt_arp uncond; |
347 | 347 | ||
348 | for (i = 0; i < sizeof(*arp)/sizeof(__u32); i++) | 348 | return memcmp(arp, &uncond, sizeof(uncond)) == 0; |
349 | if (((__u32 *)arp)[i]) | ||
350 | return 0; | ||
351 | |||
352 | return 1; | ||
353 | } | 349 | } |
354 | 350 | ||
355 | /* Figures out from what hook each rule can be called: returns 0 if | 351 | /* Figures out from what hook each rule can be called: returns 0 if |
@@ -537,12 +533,28 @@ out: | |||
537 | return ret; | 533 | return ret; |
538 | } | 534 | } |
539 | 535 | ||
536 | static bool check_underflow(struct arpt_entry *e) | ||
537 | { | ||
538 | const struct arpt_entry_target *t; | ||
539 | unsigned int verdict; | ||
540 | |||
541 | if (!unconditional(&e->arp)) | ||
542 | return false; | ||
543 | t = arpt_get_target(e); | ||
544 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | ||
545 | return false; | ||
546 | verdict = ((struct arpt_standard_target *)t)->verdict; | ||
547 | verdict = -verdict - 1; | ||
548 | return verdict == NF_DROP || verdict == NF_ACCEPT; | ||
549 | } | ||
550 | |||
540 | static inline int check_entry_size_and_hooks(struct arpt_entry *e, | 551 | static inline int check_entry_size_and_hooks(struct arpt_entry *e, |
541 | struct xt_table_info *newinfo, | 552 | struct xt_table_info *newinfo, |
542 | unsigned char *base, | 553 | unsigned char *base, |
543 | unsigned char *limit, | 554 | unsigned char *limit, |
544 | const unsigned int *hook_entries, | 555 | const unsigned int *hook_entries, |
545 | const unsigned int *underflows, | 556 | const unsigned int *underflows, |
557 | unsigned int valid_hooks, | ||
546 | unsigned int *i) | 558 | unsigned int *i) |
547 | { | 559 | { |
548 | unsigned int h; | 560 | unsigned int h; |
@@ -562,15 +574,21 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, | |||
562 | 574 | ||
563 | /* Check hooks & underflows */ | 575 | /* Check hooks & underflows */ |
564 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { | 576 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { |
577 | if (!(valid_hooks & (1 << h))) | ||
578 | continue; | ||
565 | if ((unsigned char *)e - base == hook_entries[h]) | 579 | if ((unsigned char *)e - base == hook_entries[h]) |
566 | newinfo->hook_entry[h] = hook_entries[h]; | 580 | newinfo->hook_entry[h] = hook_entries[h]; |
567 | if ((unsigned char *)e - base == underflows[h]) | 581 | if ((unsigned char *)e - base == underflows[h]) { |
582 | if (!check_underflow(e)) { | ||
583 | pr_err("Underflows must be unconditional and " | ||
584 | "use the STANDARD target with " | ||
585 | "ACCEPT/DROP\n"); | ||
586 | return -EINVAL; | ||
587 | } | ||
568 | newinfo->underflow[h] = underflows[h]; | 588 | newinfo->underflow[h] = underflows[h]; |
589 | } | ||
569 | } | 590 | } |
570 | 591 | ||
571 | /* FIXME: underflows must be unconditional, standard verdicts | ||
572 | < 0 (not ARPT_RETURN). --RR */ | ||
573 | |||
574 | /* Clear counters and comefrom */ | 592 | /* Clear counters and comefrom */ |
575 | e->counters = ((struct xt_counters) { 0, 0 }); | 593 | e->counters = ((struct xt_counters) { 0, 0 }); |
576 | e->comefrom = 0; | 594 | e->comefrom = 0; |
@@ -630,7 +648,7 @@ static int translate_table(const char *name, | |||
630 | newinfo, | 648 | newinfo, |
631 | entry0, | 649 | entry0, |
632 | entry0 + size, | 650 | entry0 + size, |
633 | hook_entries, underflows, &i); | 651 | hook_entries, underflows, valid_hooks, &i); |
634 | duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); | 652 | duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); |
635 | if (ret != 0) | 653 | if (ret != 0) |
636 | return ret; | 654 | return ret; |
@@ -1760,7 +1778,8 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len | |||
1760 | return ret; | 1778 | return ret; |
1761 | } | 1779 | } |
1762 | 1780 | ||
1763 | struct xt_table *arpt_register_table(struct net *net, struct xt_table *table, | 1781 | struct xt_table *arpt_register_table(struct net *net, |
1782 | const struct xt_table *table, | ||
1764 | const struct arpt_replace *repl) | 1783 | const struct arpt_replace *repl) |
1765 | { | 1784 | { |
1766 | int ret; | 1785 | int ret; |
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c index 6ecfdae7c589..97337601827a 100644 --- a/net/ipv4/netfilter/arptable_filter.c +++ b/net/ipv4/netfilter/arptable_filter.c | |||
@@ -15,7 +15,7 @@ MODULE_DESCRIPTION("arptables filter table"); | |||
15 | #define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \ | 15 | #define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \ |
16 | (1 << NF_ARP_FORWARD)) | 16 | (1 << NF_ARP_FORWARD)) |
17 | 17 | ||
18 | static struct | 18 | static const struct |
19 | { | 19 | { |
20 | struct arpt_replace repl; | 20 | struct arpt_replace repl; |
21 | struct arpt_standard entries[3]; | 21 | struct arpt_standard entries[3]; |
@@ -45,7 +45,7 @@ static struct | |||
45 | .term = ARPT_ERROR_INIT, | 45 | .term = ARPT_ERROR_INIT, |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static struct xt_table packet_filter = { | 48 | static const struct xt_table packet_filter = { |
49 | .name = "filter", | 49 | .name = "filter", |
50 | .valid_hooks = FILTER_VALID_HOOKS, | 50 | .valid_hooks = FILTER_VALID_HOOKS, |
51 | .me = THIS_MODULE, | 51 | .me = THIS_MODULE, |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index fdefae6b5dfc..cde755d5eeab 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
11 | #include <linux/cache.h> | 12 | #include <linux/cache.h> |
12 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
13 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
@@ -190,16 +191,11 @@ get_entry(void *base, unsigned int offset) | |||
190 | 191 | ||
191 | /* All zeroes == unconditional rule. */ | 192 | /* All zeroes == unconditional rule. */ |
192 | /* Mildly perf critical (only if packet tracing is on) */ | 193 | /* Mildly perf critical (only if packet tracing is on) */ |
193 | static inline int | 194 | static inline bool unconditional(const struct ipt_ip *ip) |
194 | unconditional(const struct ipt_ip *ip) | ||
195 | { | 195 | { |
196 | unsigned int i; | 196 | static const struct ipt_ip uncond; |
197 | |||
198 | for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++) | ||
199 | if (((__u32 *)ip)[i]) | ||
200 | return 0; | ||
201 | 197 | ||
202 | return 1; | 198 | return memcmp(ip, &uncond, sizeof(uncond)) == 0; |
203 | #undef FWINV | 199 | #undef FWINV |
204 | } | 200 | } |
205 | 201 | ||
@@ -315,7 +311,6 @@ ipt_do_table(struct sk_buff *skb, | |||
315 | 311 | ||
316 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); | 312 | static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); |
317 | const struct iphdr *ip; | 313 | const struct iphdr *ip; |
318 | u_int16_t datalen; | ||
319 | bool hotdrop = false; | 314 | bool hotdrop = false; |
320 | /* Initializing verdict to NF_DROP keeps gcc happy. */ | 315 | /* Initializing verdict to NF_DROP keeps gcc happy. */ |
321 | unsigned int verdict = NF_DROP; | 316 | unsigned int verdict = NF_DROP; |
@@ -328,7 +323,6 @@ ipt_do_table(struct sk_buff *skb, | |||
328 | 323 | ||
329 | /* Initialization */ | 324 | /* Initialization */ |
330 | ip = ip_hdr(skb); | 325 | ip = ip_hdr(skb); |
331 | datalen = skb->len - ip->ihl * 4; | ||
332 | indev = in ? in->name : nulldevname; | 326 | indev = in ? in->name : nulldevname; |
333 | outdev = out ? out->name : nulldevname; | 327 | outdev = out ? out->name : nulldevname; |
334 | /* We handle fragments by dealing with the first fragment as | 328 | /* We handle fragments by dealing with the first fragment as |
@@ -427,8 +421,6 @@ ipt_do_table(struct sk_buff *skb, | |||
427 | #endif | 421 | #endif |
428 | /* Target might have changed stuff. */ | 422 | /* Target might have changed stuff. */ |
429 | ip = ip_hdr(skb); | 423 | ip = ip_hdr(skb); |
430 | datalen = skb->len - ip->ihl * 4; | ||
431 | |||
432 | if (verdict == IPT_CONTINUE) | 424 | if (verdict == IPT_CONTINUE) |
433 | e = ipt_next_entry(e); | 425 | e = ipt_next_entry(e); |
434 | else | 426 | else |
@@ -716,6 +708,21 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size, | |||
716 | return ret; | 708 | return ret; |
717 | } | 709 | } |
718 | 710 | ||
711 | static bool check_underflow(struct ipt_entry *e) | ||
712 | { | ||
713 | const struct ipt_entry_target *t; | ||
714 | unsigned int verdict; | ||
715 | |||
716 | if (!unconditional(&e->ip)) | ||
717 | return false; | ||
718 | t = ipt_get_target(e); | ||
719 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | ||
720 | return false; | ||
721 | verdict = ((struct ipt_standard_target *)t)->verdict; | ||
722 | verdict = -verdict - 1; | ||
723 | return verdict == NF_DROP || verdict == NF_ACCEPT; | ||
724 | } | ||
725 | |||
719 | static int | 726 | static int |
720 | check_entry_size_and_hooks(struct ipt_entry *e, | 727 | check_entry_size_and_hooks(struct ipt_entry *e, |
721 | struct xt_table_info *newinfo, | 728 | struct xt_table_info *newinfo, |
@@ -723,6 +730,7 @@ check_entry_size_and_hooks(struct ipt_entry *e, | |||
723 | unsigned char *limit, | 730 | unsigned char *limit, |
724 | const unsigned int *hook_entries, | 731 | const unsigned int *hook_entries, |
725 | const unsigned int *underflows, | 732 | const unsigned int *underflows, |
733 | unsigned int valid_hooks, | ||
726 | unsigned int *i) | 734 | unsigned int *i) |
727 | { | 735 | { |
728 | unsigned int h; | 736 | unsigned int h; |
@@ -742,15 +750,21 @@ check_entry_size_and_hooks(struct ipt_entry *e, | |||
742 | 750 | ||
743 | /* Check hooks & underflows */ | 751 | /* Check hooks & underflows */ |
744 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 752 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
753 | if (!(valid_hooks & (1 << h))) | ||
754 | continue; | ||
745 | if ((unsigned char *)e - base == hook_entries[h]) | 755 | if ((unsigned char *)e - base == hook_entries[h]) |
746 | newinfo->hook_entry[h] = hook_entries[h]; | 756 | newinfo->hook_entry[h] = hook_entries[h]; |
747 | if ((unsigned char *)e - base == underflows[h]) | 757 | if ((unsigned char *)e - base == underflows[h]) { |
758 | if (!check_underflow(e)) { | ||
759 | pr_err("Underflows must be unconditional and " | ||
760 | "use the STANDARD target with " | ||
761 | "ACCEPT/DROP\n"); | ||
762 | return -EINVAL; | ||
763 | } | ||
748 | newinfo->underflow[h] = underflows[h]; | 764 | newinfo->underflow[h] = underflows[h]; |
765 | } | ||
749 | } | 766 | } |
750 | 767 | ||
751 | /* FIXME: underflows must be unconditional, standard verdicts | ||
752 | < 0 (not IPT_RETURN). --RR */ | ||
753 | |||
754 | /* Clear counters and comefrom */ | 768 | /* Clear counters and comefrom */ |
755 | e->counters = ((struct xt_counters) { 0, 0 }); | 769 | e->counters = ((struct xt_counters) { 0, 0 }); |
756 | e->comefrom = 0; | 770 | e->comefrom = 0; |
@@ -813,7 +827,7 @@ translate_table(const char *name, | |||
813 | newinfo, | 827 | newinfo, |
814 | entry0, | 828 | entry0, |
815 | entry0 + size, | 829 | entry0 + size, |
816 | hook_entries, underflows, &i); | 830 | hook_entries, underflows, valid_hooks, &i); |
817 | if (ret != 0) | 831 | if (ret != 0) |
818 | return ret; | 832 | return ret; |
819 | 833 | ||
@@ -2051,7 +2065,8 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2051 | return ret; | 2065 | return ret; |
2052 | } | 2066 | } |
2053 | 2067 | ||
2054 | struct xt_table *ipt_register_table(struct net *net, struct xt_table *table, | 2068 | struct xt_table *ipt_register_table(struct net *net, |
2069 | const struct xt_table *table, | ||
2055 | const struct ipt_replace *repl) | 2070 | const struct ipt_replace *repl) |
2056 | { | 2071 | { |
2057 | int ret; | 2072 | int ret; |
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index c30a969724f8..df566cbd68e5 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c | |||
@@ -53,11 +53,11 @@ static struct | |||
53 | .term = IPT_ERROR_INIT, /* ERROR */ | 53 | .term = IPT_ERROR_INIT, /* ERROR */ |
54 | }; | 54 | }; |
55 | 55 | ||
56 | static struct xt_table packet_filter = { | 56 | static const struct xt_table packet_filter = { |
57 | .name = "filter", | 57 | .name = "filter", |
58 | .valid_hooks = FILTER_VALID_HOOKS, | 58 | .valid_hooks = FILTER_VALID_HOOKS, |
59 | .me = THIS_MODULE, | 59 | .me = THIS_MODULE, |
60 | .af = AF_INET, | 60 | .af = NFPROTO_IPV4, |
61 | }; | 61 | }; |
62 | 62 | ||
63 | /* The work comes in here from netfilter.c. */ | 63 | /* The work comes in here from netfilter.c. */ |
@@ -102,21 +102,21 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = { | |||
102 | { | 102 | { |
103 | .hook = ipt_local_in_hook, | 103 | .hook = ipt_local_in_hook, |
104 | .owner = THIS_MODULE, | 104 | .owner = THIS_MODULE, |
105 | .pf = PF_INET, | 105 | .pf = NFPROTO_IPV4, |
106 | .hooknum = NF_INET_LOCAL_IN, | 106 | .hooknum = NF_INET_LOCAL_IN, |
107 | .priority = NF_IP_PRI_FILTER, | 107 | .priority = NF_IP_PRI_FILTER, |
108 | }, | 108 | }, |
109 | { | 109 | { |
110 | .hook = ipt_hook, | 110 | .hook = ipt_hook, |
111 | .owner = THIS_MODULE, | 111 | .owner = THIS_MODULE, |
112 | .pf = PF_INET, | 112 | .pf = NFPROTO_IPV4, |
113 | .hooknum = NF_INET_FORWARD, | 113 | .hooknum = NF_INET_FORWARD, |
114 | .priority = NF_IP_PRI_FILTER, | 114 | .priority = NF_IP_PRI_FILTER, |
115 | }, | 115 | }, |
116 | { | 116 | { |
117 | .hook = ipt_local_out_hook, | 117 | .hook = ipt_local_out_hook, |
118 | .owner = THIS_MODULE, | 118 | .owner = THIS_MODULE, |
119 | .pf = PF_INET, | 119 | .pf = NFPROTO_IPV4, |
120 | .hooknum = NF_INET_LOCAL_OUT, | 120 | .hooknum = NF_INET_LOCAL_OUT, |
121 | .priority = NF_IP_PRI_FILTER, | 121 | .priority = NF_IP_PRI_FILTER, |
122 | }, | 122 | }, |
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index 4087614d9519..036047f9b0f2 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c | |||
@@ -28,7 +28,7 @@ MODULE_DESCRIPTION("iptables mangle table"); | |||
28 | (1 << NF_INET_POST_ROUTING)) | 28 | (1 << NF_INET_POST_ROUTING)) |
29 | 29 | ||
30 | /* Ouch - five different hooks? Maybe this should be a config option..... -- BC */ | 30 | /* Ouch - five different hooks? Maybe this should be a config option..... -- BC */ |
31 | static struct | 31 | static const struct |
32 | { | 32 | { |
33 | struct ipt_replace repl; | 33 | struct ipt_replace repl; |
34 | struct ipt_standard entries[5]; | 34 | struct ipt_standard entries[5]; |
@@ -64,11 +64,11 @@ static struct | |||
64 | .term = IPT_ERROR_INIT, /* ERROR */ | 64 | .term = IPT_ERROR_INIT, /* ERROR */ |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static struct xt_table packet_mangler = { | 67 | static const struct xt_table packet_mangler = { |
68 | .name = "mangle", | 68 | .name = "mangle", |
69 | .valid_hooks = MANGLE_VALID_HOOKS, | 69 | .valid_hooks = MANGLE_VALID_HOOKS, |
70 | .me = THIS_MODULE, | 70 | .me = THIS_MODULE, |
71 | .af = AF_INET, | 71 | .af = NFPROTO_IPV4, |
72 | }; | 72 | }; |
73 | 73 | ||
74 | /* The work comes in here from netfilter.c. */ | 74 | /* The work comes in here from netfilter.c. */ |
@@ -162,35 +162,35 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = { | |||
162 | { | 162 | { |
163 | .hook = ipt_pre_routing_hook, | 163 | .hook = ipt_pre_routing_hook, |
164 | .owner = THIS_MODULE, | 164 | .owner = THIS_MODULE, |
165 | .pf = PF_INET, | 165 | .pf = NFPROTO_IPV4, |
166 | .hooknum = NF_INET_PRE_ROUTING, | 166 | .hooknum = NF_INET_PRE_ROUTING, |
167 | .priority = NF_IP_PRI_MANGLE, | 167 | .priority = NF_IP_PRI_MANGLE, |
168 | }, | 168 | }, |
169 | { | 169 | { |
170 | .hook = ipt_local_in_hook, | 170 | .hook = ipt_local_in_hook, |
171 | .owner = THIS_MODULE, | 171 | .owner = THIS_MODULE, |
172 | .pf = PF_INET, | 172 | .pf = NFPROTO_IPV4, |
173 | .hooknum = NF_INET_LOCAL_IN, | 173 | .hooknum = NF_INET_LOCAL_IN, |
174 | .priority = NF_IP_PRI_MANGLE, | 174 | .priority = NF_IP_PRI_MANGLE, |
175 | }, | 175 | }, |
176 | { | 176 | { |
177 | .hook = ipt_forward_hook, | 177 | .hook = ipt_forward_hook, |
178 | .owner = THIS_MODULE, | 178 | .owner = THIS_MODULE, |
179 | .pf = PF_INET, | 179 | .pf = NFPROTO_IPV4, |
180 | .hooknum = NF_INET_FORWARD, | 180 | .hooknum = NF_INET_FORWARD, |
181 | .priority = NF_IP_PRI_MANGLE, | 181 | .priority = NF_IP_PRI_MANGLE, |
182 | }, | 182 | }, |
183 | { | 183 | { |
184 | .hook = ipt_local_hook, | 184 | .hook = ipt_local_hook, |
185 | .owner = THIS_MODULE, | 185 | .owner = THIS_MODULE, |
186 | .pf = PF_INET, | 186 | .pf = NFPROTO_IPV4, |
187 | .hooknum = NF_INET_LOCAL_OUT, | 187 | .hooknum = NF_INET_LOCAL_OUT, |
188 | .priority = NF_IP_PRI_MANGLE, | 188 | .priority = NF_IP_PRI_MANGLE, |
189 | }, | 189 | }, |
190 | { | 190 | { |
191 | .hook = ipt_post_routing_hook, | 191 | .hook = ipt_post_routing_hook, |
192 | .owner = THIS_MODULE, | 192 | .owner = THIS_MODULE, |
193 | .pf = PF_INET, | 193 | .pf = NFPROTO_IPV4, |
194 | .hooknum = NF_INET_POST_ROUTING, | 194 | .hooknum = NF_INET_POST_ROUTING, |
195 | .priority = NF_IP_PRI_MANGLE, | 195 | .priority = NF_IP_PRI_MANGLE, |
196 | }, | 196 | }, |
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index e5356da1fb54..993edc23be09 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c | |||
@@ -9,7 +9,7 @@ | |||
9 | 9 | ||
10 | #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) | 10 | #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) |
11 | 11 | ||
12 | static struct | 12 | static const struct |
13 | { | 13 | { |
14 | struct ipt_replace repl; | 14 | struct ipt_replace repl; |
15 | struct ipt_standard entries[2]; | 15 | struct ipt_standard entries[2]; |
@@ -36,11 +36,11 @@ static struct | |||
36 | .term = IPT_ERROR_INIT, /* ERROR */ | 36 | .term = IPT_ERROR_INIT, /* ERROR */ |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static struct xt_table packet_raw = { | 39 | static const struct xt_table packet_raw = { |
40 | .name = "raw", | 40 | .name = "raw", |
41 | .valid_hooks = RAW_VALID_HOOKS, | 41 | .valid_hooks = RAW_VALID_HOOKS, |
42 | .me = THIS_MODULE, | 42 | .me = THIS_MODULE, |
43 | .af = AF_INET, | 43 | .af = NFPROTO_IPV4, |
44 | }; | 44 | }; |
45 | 45 | ||
46 | /* The work comes in here from netfilter.c. */ | 46 | /* The work comes in here from netfilter.c. */ |
@@ -74,14 +74,14 @@ ipt_local_hook(unsigned int hook, | |||
74 | static struct nf_hook_ops ipt_ops[] __read_mostly = { | 74 | static struct nf_hook_ops ipt_ops[] __read_mostly = { |
75 | { | 75 | { |
76 | .hook = ipt_hook, | 76 | .hook = ipt_hook, |
77 | .pf = PF_INET, | 77 | .pf = NFPROTO_IPV4, |
78 | .hooknum = NF_INET_PRE_ROUTING, | 78 | .hooknum = NF_INET_PRE_ROUTING, |
79 | .priority = NF_IP_PRI_RAW, | 79 | .priority = NF_IP_PRI_RAW, |
80 | .owner = THIS_MODULE, | 80 | .owner = THIS_MODULE, |
81 | }, | 81 | }, |
82 | { | 82 | { |
83 | .hook = ipt_local_hook, | 83 | .hook = ipt_local_hook, |
84 | .pf = PF_INET, | 84 | .pf = NFPROTO_IPV4, |
85 | .hooknum = NF_INET_LOCAL_OUT, | 85 | .hooknum = NF_INET_LOCAL_OUT, |
86 | .priority = NF_IP_PRI_RAW, | 86 | .priority = NF_IP_PRI_RAW, |
87 | .owner = THIS_MODULE, | 87 | .owner = THIS_MODULE, |
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c index 29ab630f240a..99eb76c65d25 100644 --- a/net/ipv4/netfilter/iptable_security.c +++ b/net/ipv4/netfilter/iptable_security.c | |||
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("iptables security table, for MAC rules"); | |||
27 | (1 << NF_INET_FORWARD) | \ | 27 | (1 << NF_INET_FORWARD) | \ |
28 | (1 << NF_INET_LOCAL_OUT) | 28 | (1 << NF_INET_LOCAL_OUT) |
29 | 29 | ||
30 | static struct | 30 | static const struct |
31 | { | 31 | { |
32 | struct ipt_replace repl; | 32 | struct ipt_replace repl; |
33 | struct ipt_standard entries[3]; | 33 | struct ipt_standard entries[3]; |
@@ -57,11 +57,11 @@ static struct | |||
57 | .term = IPT_ERROR_INIT, /* ERROR */ | 57 | .term = IPT_ERROR_INIT, /* ERROR */ |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static struct xt_table security_table = { | 60 | static const struct xt_table security_table = { |
61 | .name = "security", | 61 | .name = "security", |
62 | .valid_hooks = SECURITY_VALID_HOOKS, | 62 | .valid_hooks = SECURITY_VALID_HOOKS, |
63 | .me = THIS_MODULE, | 63 | .me = THIS_MODULE, |
64 | .af = AF_INET, | 64 | .af = NFPROTO_IPV4, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static unsigned int | 67 | static unsigned int |
@@ -105,21 +105,21 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = { | |||
105 | { | 105 | { |
106 | .hook = ipt_local_in_hook, | 106 | .hook = ipt_local_in_hook, |
107 | .owner = THIS_MODULE, | 107 | .owner = THIS_MODULE, |
108 | .pf = PF_INET, | 108 | .pf = NFPROTO_IPV4, |
109 | .hooknum = NF_INET_LOCAL_IN, | 109 | .hooknum = NF_INET_LOCAL_IN, |
110 | .priority = NF_IP_PRI_SECURITY, | 110 | .priority = NF_IP_PRI_SECURITY, |
111 | }, | 111 | }, |
112 | { | 112 | { |
113 | .hook = ipt_forward_hook, | 113 | .hook = ipt_forward_hook, |
114 | .owner = THIS_MODULE, | 114 | .owner = THIS_MODULE, |
115 | .pf = PF_INET, | 115 | .pf = NFPROTO_IPV4, |
116 | .hooknum = NF_INET_FORWARD, | 116 | .hooknum = NF_INET_FORWARD, |
117 | .priority = NF_IP_PRI_SECURITY, | 117 | .priority = NF_IP_PRI_SECURITY, |
118 | }, | 118 | }, |
119 | { | 119 | { |
120 | .hook = ipt_local_out_hook, | 120 | .hook = ipt_local_out_hook, |
121 | .owner = THIS_MODULE, | 121 | .owner = THIS_MODULE, |
122 | .pf = PF_INET, | 122 | .pf = NFPROTO_IPV4, |
123 | .hooknum = NF_INET_LOCAL_OUT, | 123 | .hooknum = NF_INET_LOCAL_OUT, |
124 | .priority = NF_IP_PRI_SECURITY, | 124 | .priority = NF_IP_PRI_SECURITY, |
125 | }, | 125 | }, |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 7d2ead7228ac..aa95bb82ee6c 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> | 26 | #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> |
27 | #include <net/netfilter/nf_nat_helper.h> | 27 | #include <net/netfilter/nf_nat_helper.h> |
28 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> | 28 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> |
29 | #include <net/netfilter/nf_log.h> | ||
29 | 30 | ||
30 | int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, | 31 | int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, |
31 | struct nf_conn *ct, | 32 | struct nf_conn *ct, |
@@ -113,8 +114,11 @@ static unsigned int ipv4_confirm(unsigned int hooknum, | |||
113 | 114 | ||
114 | ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), | 115 | ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb), |
115 | ct, ctinfo); | 116 | ct, ctinfo); |
116 | if (ret != NF_ACCEPT) | 117 | if (ret != NF_ACCEPT) { |
118 | nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL, | ||
119 | "nf_ct_%s: dropping packet", helper->name); | ||
117 | return ret; | 120 | return ret; |
121 | } | ||
118 | 122 | ||
119 | if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { | 123 | if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { |
120 | typeof(nf_nat_seq_adjust_hook) seq_adjust; | 124 | typeof(nf_nat_seq_adjust_hook) seq_adjust; |
@@ -158,28 +162,28 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = { | |||
158 | { | 162 | { |
159 | .hook = ipv4_conntrack_in, | 163 | .hook = ipv4_conntrack_in, |
160 | .owner = THIS_MODULE, | 164 | .owner = THIS_MODULE, |
161 | .pf = PF_INET, | 165 | .pf = NFPROTO_IPV4, |
162 | .hooknum = NF_INET_PRE_ROUTING, | 166 | .hooknum = NF_INET_PRE_ROUTING, |
163 | .priority = NF_IP_PRI_CONNTRACK, | 167 | .priority = NF_IP_PRI_CONNTRACK, |
164 | }, | 168 | }, |
165 | { | 169 | { |
166 | .hook = ipv4_conntrack_local, | 170 | .hook = ipv4_conntrack_local, |
167 | .owner = THIS_MODULE, | 171 | .owner = THIS_MODULE, |
168 | .pf = PF_INET, | 172 | .pf = NFPROTO_IPV4, |
169 | .hooknum = NF_INET_LOCAL_OUT, | 173 | .hooknum = NF_INET_LOCAL_OUT, |
170 | .priority = NF_IP_PRI_CONNTRACK, | 174 | .priority = NF_IP_PRI_CONNTRACK, |
171 | }, | 175 | }, |
172 | { | 176 | { |
173 | .hook = ipv4_confirm, | 177 | .hook = ipv4_confirm, |
174 | .owner = THIS_MODULE, | 178 | .owner = THIS_MODULE, |
175 | .pf = PF_INET, | 179 | .pf = NFPROTO_IPV4, |
176 | .hooknum = NF_INET_POST_ROUTING, | 180 | .hooknum = NF_INET_POST_ROUTING, |
177 | .priority = NF_IP_PRI_CONNTRACK_CONFIRM, | 181 | .priority = NF_IP_PRI_CONNTRACK_CONFIRM, |
178 | }, | 182 | }, |
179 | { | 183 | { |
180 | .hook = ipv4_confirm, | 184 | .hook = ipv4_confirm, |
181 | .owner = THIS_MODULE, | 185 | .owner = THIS_MODULE, |
182 | .pf = PF_INET, | 186 | .pf = NFPROTO_IPV4, |
183 | .hooknum = NF_INET_LOCAL_IN, | 187 | .hooknum = NF_INET_LOCAL_IN, |
184 | .priority = NF_IP_PRI_CONNTRACK_CONFIRM, | 188 | .priority = NF_IP_PRI_CONNTRACK_CONFIRM, |
185 | }, | 189 | }, |
@@ -256,11 +260,11 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) | |||
256 | tuple.dst.u3.ip = inet->daddr; | 260 | tuple.dst.u3.ip = inet->daddr; |
257 | tuple.dst.u.tcp.port = inet->dport; | 261 | tuple.dst.u.tcp.port = inet->dport; |
258 | tuple.src.l3num = PF_INET; | 262 | tuple.src.l3num = PF_INET; |
259 | tuple.dst.protonum = IPPROTO_TCP; | 263 | tuple.dst.protonum = sk->sk_protocol; |
260 | 264 | ||
261 | /* We only do TCP at the moment: is there a better way? */ | 265 | /* We only do TCP and SCTP at the moment: is there a better way? */ |
262 | if (strcmp(sk->sk_prot->name, "TCP")) { | 266 | if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) { |
263 | pr_debug("SO_ORIGINAL_DST: Not a TCP socket\n"); | 267 | pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n"); |
264 | return -ENOPROTOOPT; | 268 | return -ENOPROTOOPT; |
265 | } | 269 | } |
266 | 270 | ||
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 3229e0a81ba6..68afc6ecd343 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -212,7 +212,7 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple, | |||
212 | maxip = ntohl(range->max_ip); | 212 | maxip = ntohl(range->max_ip); |
213 | j = jhash_2words((__force u32)tuple->src.u3.ip, | 213 | j = jhash_2words((__force u32)tuple->src.u3.ip, |
214 | range->flags & IP_NAT_RANGE_PERSISTENT ? | 214 | range->flags & IP_NAT_RANGE_PERSISTENT ? |
215 | (__force u32)tuple->dst.u3.ip : 0, 0); | 215 | 0 : (__force u32)tuple->dst.u3.ip, 0); |
216 | j = ((u64)j * (maxip - minip + 1)) >> 32; | 216 | j = ((u64)j * (maxip - minip + 1)) >> 32; |
217 | *var_ipp = htonl(minip + j); | 217 | *var_ipp = htonl(minip + j); |
218 | } | 218 | } |
@@ -620,7 +620,7 @@ static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { | |||
620 | }; | 620 | }; |
621 | 621 | ||
622 | static int | 622 | static int |
623 | nfnetlink_parse_nat(struct nlattr *nat, | 623 | nfnetlink_parse_nat(const struct nlattr *nat, |
624 | const struct nf_conn *ct, struct nf_nat_range *range) | 624 | const struct nf_conn *ct, struct nf_nat_range *range) |
625 | { | 625 | { |
626 | struct nlattr *tb[CTA_NAT_MAX+1]; | 626 | struct nlattr *tb[CTA_NAT_MAX+1]; |
@@ -656,7 +656,7 @@ nfnetlink_parse_nat(struct nlattr *nat, | |||
656 | static int | 656 | static int |
657 | nfnetlink_parse_nat_setup(struct nf_conn *ct, | 657 | nfnetlink_parse_nat_setup(struct nf_conn *ct, |
658 | enum nf_nat_manip_type manip, | 658 | enum nf_nat_manip_type manip, |
659 | struct nlattr *attr) | 659 | const struct nlattr *attr) |
660 | { | 660 | { |
661 | struct nf_nat_range range; | 661 | struct nf_nat_range range; |
662 | 662 | ||
@@ -671,7 +671,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, | |||
671 | static int | 671 | static int |
672 | nfnetlink_parse_nat_setup(struct nf_conn *ct, | 672 | nfnetlink_parse_nat_setup(struct nf_conn *ct, |
673 | enum nf_nat_manip_type manip, | 673 | enum nf_nat_manip_type manip, |
674 | struct nlattr *attr) | 674 | const struct nlattr *attr) |
675 | { | 675 | { |
676 | return -EOPNOTSUPP; | 676 | return -EOPNOTSUPP; |
677 | } | 677 | } |
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index 6348a793936e..9e81e0dfb4ec 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c | |||
@@ -28,7 +28,7 @@ | |||
28 | (1 << NF_INET_POST_ROUTING) | \ | 28 | (1 << NF_INET_POST_ROUTING) | \ |
29 | (1 << NF_INET_LOCAL_OUT)) | 29 | (1 << NF_INET_LOCAL_OUT)) |
30 | 30 | ||
31 | static struct | 31 | static const struct |
32 | { | 32 | { |
33 | struct ipt_replace repl; | 33 | struct ipt_replace repl; |
34 | struct ipt_standard entries[3]; | 34 | struct ipt_standard entries[3]; |
@@ -58,11 +58,11 @@ static struct | |||
58 | .term = IPT_ERROR_INIT, /* ERROR */ | 58 | .term = IPT_ERROR_INIT, /* ERROR */ |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static struct xt_table nat_table = { | 61 | static const struct xt_table nat_table = { |
62 | .name = "nat", | 62 | .name = "nat", |
63 | .valid_hooks = NAT_VALID_HOOKS, | 63 | .valid_hooks = NAT_VALID_HOOKS, |
64 | .me = THIS_MODULE, | 64 | .me = THIS_MODULE, |
65 | .af = AF_INET, | 65 | .af = NFPROTO_IPV4, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | /* Source NAT */ | 68 | /* Source NAT */ |
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index 5567bd0d0750..5f41d017ddd8 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c | |||
@@ -251,7 +251,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = { | |||
251 | { | 251 | { |
252 | .hook = nf_nat_in, | 252 | .hook = nf_nat_in, |
253 | .owner = THIS_MODULE, | 253 | .owner = THIS_MODULE, |
254 | .pf = PF_INET, | 254 | .pf = NFPROTO_IPV4, |
255 | .hooknum = NF_INET_PRE_ROUTING, | 255 | .hooknum = NF_INET_PRE_ROUTING, |
256 | .priority = NF_IP_PRI_NAT_DST, | 256 | .priority = NF_IP_PRI_NAT_DST, |
257 | }, | 257 | }, |
@@ -259,7 +259,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = { | |||
259 | { | 259 | { |
260 | .hook = nf_nat_out, | 260 | .hook = nf_nat_out, |
261 | .owner = THIS_MODULE, | 261 | .owner = THIS_MODULE, |
262 | .pf = PF_INET, | 262 | .pf = NFPROTO_IPV4, |
263 | .hooknum = NF_INET_POST_ROUTING, | 263 | .hooknum = NF_INET_POST_ROUTING, |
264 | .priority = NF_IP_PRI_NAT_SRC, | 264 | .priority = NF_IP_PRI_NAT_SRC, |
265 | }, | 265 | }, |
@@ -267,7 +267,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = { | |||
267 | { | 267 | { |
268 | .hook = nf_nat_local_fn, | 268 | .hook = nf_nat_local_fn, |
269 | .owner = THIS_MODULE, | 269 | .owner = THIS_MODULE, |
270 | .pf = PF_INET, | 270 | .pf = NFPROTO_IPV4, |
271 | .hooknum = NF_INET_LOCAL_OUT, | 271 | .hooknum = NF_INET_LOCAL_OUT, |
272 | .priority = NF_IP_PRI_NAT_DST, | 272 | .priority = NF_IP_PRI_NAT_DST, |
273 | }, | 273 | }, |
@@ -275,7 +275,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = { | |||
275 | { | 275 | { |
276 | .hook = nf_nat_fn, | 276 | .hook = nf_nat_fn, |
277 | .owner = THIS_MODULE, | 277 | .owner = THIS_MODULE, |
278 | .pf = PF_INET, | 278 | .pf = NFPROTO_IPV4, |
279 | .hooknum = NF_INET_LOCAL_IN, | 279 | .hooknum = NF_INET_LOCAL_IN, |
280 | .priority = NF_IP_PRI_NAT_SRC, | 280 | .priority = NF_IP_PRI_NAT_SRC, |
281 | }, | 281 | }, |
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index ea50da0649fd..a2e5fc0a15e1 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c | |||
@@ -22,26 +22,11 @@ | |||
22 | * as published by the Free Software Foundation; either version | 22 | * as published by the Free Software Foundation; either version |
23 | * 2 of the License, or (at your option) any later version. | 23 | * 2 of the License, or (at your option) any later version. |
24 | */ | 24 | */ |
25 | 25 | #include <linux/cache.h> | |
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/system.h> | ||
28 | #include <linux/module.h> | 26 | #include <linux/module.h> |
29 | #include <linux/types.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/socket.h> | ||
33 | #include <linux/in.h> | ||
34 | #include <linux/inet.h> | ||
35 | #include <linux/netdevice.h> | 27 | #include <linux/netdevice.h> |
36 | #include <linux/timer.h> | 28 | #include <linux/spinlock.h> |
37 | #include <net/ip.h> | ||
38 | #include <net/protocol.h> | 29 | #include <net/protocol.h> |
39 | #include <linux/skbuff.h> | ||
40 | #include <net/sock.h> | ||
41 | #include <net/icmp.h> | ||
42 | #include <net/udp.h> | ||
43 | #include <net/ipip.h> | ||
44 | #include <linux/igmp.h> | ||
45 | 30 | ||
46 | struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp; | 31 | struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp; |
47 | static DEFINE_SPINLOCK(inet_proto_lock); | 32 | static DEFINE_SPINLOCK(inet_proto_lock); |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 2979f14bb188..ebb1e5848bc6 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -375,7 +375,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
375 | err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, | 375 | err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, |
376 | dst_output); | 376 | dst_output); |
377 | if (err > 0) | 377 | if (err > 0) |
378 | err = inet->recverr ? net_xmit_errno(err) : 0; | 378 | err = net_xmit_errno(err); |
379 | if (err) | 379 | if (err) |
380 | goto error; | 380 | goto error; |
381 | out: | 381 | out: |
@@ -386,6 +386,8 @@ error_fault: | |||
386 | kfree_skb(skb); | 386 | kfree_skb(skb); |
387 | error: | 387 | error: |
388 | IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); | 388 | IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); |
389 | if (err == -ENOBUFS && !inet->recverr) | ||
390 | err = 0; | ||
389 | return err; | 391 | return err; |
390 | } | 392 | } |
391 | 393 | ||
@@ -576,8 +578,11 @@ back_from_confirm: | |||
576 | &ipc, &rt, msg->msg_flags); | 578 | &ipc, &rt, msg->msg_flags); |
577 | if (err) | 579 | if (err) |
578 | ip_flush_pending_frames(sk); | 580 | ip_flush_pending_frames(sk); |
579 | else if (!(msg->msg_flags & MSG_MORE)) | 581 | else if (!(msg->msg_flags & MSG_MORE)) { |
580 | err = ip_push_pending_frames(sk); | 582 | err = ip_push_pending_frames(sk); |
583 | if (err == -ENOBUFS && !inet->recverr) | ||
584 | err = 0; | ||
585 | } | ||
581 | release_sock(sk); | 586 | release_sock(sk); |
582 | } | 587 | } |
583 | done: | 588 | done: |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 278f46f5011b..91867d3e6328 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1514,13 +1514,17 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1514 | void ip_rt_send_redirect(struct sk_buff *skb) | 1514 | void ip_rt_send_redirect(struct sk_buff *skb) |
1515 | { | 1515 | { |
1516 | struct rtable *rt = skb_rtable(skb); | 1516 | struct rtable *rt = skb_rtable(skb); |
1517 | struct in_device *in_dev = in_dev_get(rt->u.dst.dev); | 1517 | struct in_device *in_dev; |
1518 | int log_martians; | ||
1518 | 1519 | ||
1519 | if (!in_dev) | 1520 | rcu_read_lock(); |
1521 | in_dev = __in_dev_get_rcu(rt->u.dst.dev); | ||
1522 | if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { | ||
1523 | rcu_read_unlock(); | ||
1520 | return; | 1524 | return; |
1521 | 1525 | } | |
1522 | if (!IN_DEV_TX_REDIRECTS(in_dev)) | 1526 | log_martians = IN_DEV_LOG_MARTIANS(in_dev); |
1523 | goto out; | 1527 | rcu_read_unlock(); |
1524 | 1528 | ||
1525 | /* No redirected packets during ip_rt_redirect_silence; | 1529 | /* No redirected packets during ip_rt_redirect_silence; |
1526 | * reset the algorithm. | 1530 | * reset the algorithm. |
@@ -1533,7 +1537,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
1533 | */ | 1537 | */ |
1534 | if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) { | 1538 | if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) { |
1535 | rt->u.dst.rate_last = jiffies; | 1539 | rt->u.dst.rate_last = jiffies; |
1536 | goto out; | 1540 | return; |
1537 | } | 1541 | } |
1538 | 1542 | ||
1539 | /* Check for load limit; set rate_last to the latest sent | 1543 | /* Check for load limit; set rate_last to the latest sent |
@@ -1547,7 +1551,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
1547 | rt->u.dst.rate_last = jiffies; | 1551 | rt->u.dst.rate_last = jiffies; |
1548 | ++rt->u.dst.rate_tokens; | 1552 | ++rt->u.dst.rate_tokens; |
1549 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 1553 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
1550 | if (IN_DEV_LOG_MARTIANS(in_dev) && | 1554 | if (log_martians && |
1551 | rt->u.dst.rate_tokens == ip_rt_redirect_number && | 1555 | rt->u.dst.rate_tokens == ip_rt_redirect_number && |
1552 | net_ratelimit()) | 1556 | net_ratelimit()) |
1553 | printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", | 1557 | printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", |
@@ -1555,8 +1559,6 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
1555 | &rt->rt_dst, &rt->rt_gateway); | 1559 | &rt->rt_dst, &rt->rt_gateway); |
1556 | #endif | 1560 | #endif |
1557 | } | 1561 | } |
1558 | out: | ||
1559 | in_dev_put(in_dev); | ||
1560 | } | 1562 | } |
1561 | 1563 | ||
1562 | static int ip_error(struct sk_buff *skb) | 1564 | static int ip_error(struct sk_buff *skb) |
@@ -3442,7 +3444,7 @@ int __init ip_rt_init(void) | |||
3442 | printk(KERN_ERR "Unable to create route proc files\n"); | 3444 | printk(KERN_ERR "Unable to create route proc files\n"); |
3443 | #ifdef CONFIG_XFRM | 3445 | #ifdef CONFIG_XFRM |
3444 | xfrm_init(); | 3446 | xfrm_init(); |
3445 | xfrm4_init(); | 3447 | xfrm4_init(ip_rt_max_size); |
3446 | #endif | 3448 | #endif |
3447 | rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL); | 3449 | rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL); |
3448 | 3450 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 91145244ea63..edeea060db44 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1839,7 +1839,7 @@ void tcp_close(struct sock *sk, long timeout) | |||
1839 | /* Unread data was tossed, zap the connection. */ | 1839 | /* Unread data was tossed, zap the connection. */ |
1840 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); | 1840 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
1841 | tcp_set_state(sk, TCP_CLOSE); | 1841 | tcp_set_state(sk, TCP_CLOSE); |
1842 | tcp_send_active_reset(sk, GFP_KERNEL); | 1842 | tcp_send_active_reset(sk, sk->sk_allocation); |
1843 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { | 1843 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
1844 | /* Check zero linger _after_ checking for unread data. */ | 1844 | /* Check zero linger _after_ checking for unread data. */ |
1845 | sk->sk_prot->disconnect(sk, 0); | 1845 | sk->sk_prot->disconnect(sk, 0); |
@@ -2336,13 +2336,13 @@ static int do_tcp_getsockopt(struct sock *sk, int level, | |||
2336 | val = !!(tp->nonagle&TCP_NAGLE_CORK); | 2336 | val = !!(tp->nonagle&TCP_NAGLE_CORK); |
2337 | break; | 2337 | break; |
2338 | case TCP_KEEPIDLE: | 2338 | case TCP_KEEPIDLE: |
2339 | val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ; | 2339 | val = keepalive_time_when(tp) / HZ; |
2340 | break; | 2340 | break; |
2341 | case TCP_KEEPINTVL: | 2341 | case TCP_KEEPINTVL: |
2342 | val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ; | 2342 | val = keepalive_intvl_when(tp) / HZ; |
2343 | break; | 2343 | break; |
2344 | case TCP_KEEPCNT: | 2344 | case TCP_KEEPCNT: |
2345 | val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; | 2345 | val = keepalive_probes(tp); |
2346 | break; | 2346 | break; |
2347 | case TCP_SYNCNT: | 2347 | case TCP_SYNCNT: |
2348 | val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 2348 | val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
@@ -2658,7 +2658,7 @@ void tcp_free_md5sig_pool(void) | |||
2658 | 2658 | ||
2659 | EXPORT_SYMBOL(tcp_free_md5sig_pool); | 2659 | EXPORT_SYMBOL(tcp_free_md5sig_pool); |
2660 | 2660 | ||
2661 | static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) | 2661 | static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk) |
2662 | { | 2662 | { |
2663 | int cpu; | 2663 | int cpu; |
2664 | struct tcp_md5sig_pool **pool; | 2664 | struct tcp_md5sig_pool **pool; |
@@ -2671,7 +2671,7 @@ static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) | |||
2671 | struct tcp_md5sig_pool *p; | 2671 | struct tcp_md5sig_pool *p; |
2672 | struct crypto_hash *hash; | 2672 | struct crypto_hash *hash; |
2673 | 2673 | ||
2674 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 2674 | p = kzalloc(sizeof(*p), sk->sk_allocation); |
2675 | if (!p) | 2675 | if (!p) |
2676 | goto out_free; | 2676 | goto out_free; |
2677 | *per_cpu_ptr(pool, cpu) = p; | 2677 | *per_cpu_ptr(pool, cpu) = p; |
@@ -2688,7 +2688,7 @@ out_free: | |||
2688 | return NULL; | 2688 | return NULL; |
2689 | } | 2689 | } |
2690 | 2690 | ||
2691 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) | 2691 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk) |
2692 | { | 2692 | { |
2693 | struct tcp_md5sig_pool **pool; | 2693 | struct tcp_md5sig_pool **pool; |
2694 | int alloc = 0; | 2694 | int alloc = 0; |
@@ -2709,7 +2709,7 @@ retry: | |||
2709 | 2709 | ||
2710 | if (alloc) { | 2710 | if (alloc) { |
2711 | /* we cannot hold spinlock here because this may sleep. */ | 2711 | /* we cannot hold spinlock here because this may sleep. */ |
2712 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); | 2712 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk); |
2713 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2713 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2714 | if (!p) { | 2714 | if (!p) { |
2715 | tcp_md5sig_users--; | 2715 | tcp_md5sig_users--; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2bdb0da237e6..af6d6fa00db1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -685,7 +685,7 @@ static inline void tcp_set_rto(struct sock *sk) | |||
685 | * is invisible. Actually, Linux-2.4 also generates erratic | 685 | * is invisible. Actually, Linux-2.4 also generates erratic |
686 | * ACKs in some circumstances. | 686 | * ACKs in some circumstances. |
687 | */ | 687 | */ |
688 | inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; | 688 | inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); |
689 | 689 | ||
690 | /* 2. Fixups made earlier cannot be right. | 690 | /* 2. Fixups made earlier cannot be right. |
691 | * If we do not estimate RTO correctly without them, | 691 | * If we do not estimate RTO correctly without them, |
@@ -696,8 +696,7 @@ static inline void tcp_set_rto(struct sock *sk) | |||
696 | /* NOTE: clamping at TCP_RTO_MIN is not required, current algo | 696 | /* NOTE: clamping at TCP_RTO_MIN is not required, current algo |
697 | * guarantees that rto is higher. | 697 | * guarantees that rto is higher. |
698 | */ | 698 | */ |
699 | if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) | 699 | tcp_bound_rto(sk); |
700 | inet_csk(sk)->icsk_rto = TCP_RTO_MAX; | ||
701 | } | 700 | } |
702 | 701 | ||
703 | /* Save metrics learned by this TCP session. | 702 | /* Save metrics learned by this TCP session. |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6d88219c5e22..0543561da999 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -328,26 +328,29 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu) | |||
328 | * | 328 | * |
329 | */ | 329 | */ |
330 | 330 | ||
331 | void tcp_v4_err(struct sk_buff *skb, u32 info) | 331 | void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) |
332 | { | 332 | { |
333 | struct iphdr *iph = (struct iphdr *)skb->data; | 333 | struct iphdr *iph = (struct iphdr *)icmp_skb->data; |
334 | struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); | 334 | struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); |
335 | struct inet_connection_sock *icsk; | ||
335 | struct tcp_sock *tp; | 336 | struct tcp_sock *tp; |
336 | struct inet_sock *inet; | 337 | struct inet_sock *inet; |
337 | const int type = icmp_hdr(skb)->type; | 338 | const int type = icmp_hdr(icmp_skb)->type; |
338 | const int code = icmp_hdr(skb)->code; | 339 | const int code = icmp_hdr(icmp_skb)->code; |
339 | struct sock *sk; | 340 | struct sock *sk; |
341 | struct sk_buff *skb; | ||
340 | __u32 seq; | 342 | __u32 seq; |
343 | __u32 remaining; | ||
341 | int err; | 344 | int err; |
342 | struct net *net = dev_net(skb->dev); | 345 | struct net *net = dev_net(icmp_skb->dev); |
343 | 346 | ||
344 | if (skb->len < (iph->ihl << 2) + 8) { | 347 | if (icmp_skb->len < (iph->ihl << 2) + 8) { |
345 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); | 348 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
346 | return; | 349 | return; |
347 | } | 350 | } |
348 | 351 | ||
349 | sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest, | 352 | sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest, |
350 | iph->saddr, th->source, inet_iif(skb)); | 353 | iph->saddr, th->source, inet_iif(icmp_skb)); |
351 | if (!sk) { | 354 | if (!sk) { |
352 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); | 355 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
353 | return; | 356 | return; |
@@ -367,6 +370,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
367 | if (sk->sk_state == TCP_CLOSE) | 370 | if (sk->sk_state == TCP_CLOSE) |
368 | goto out; | 371 | goto out; |
369 | 372 | ||
373 | icsk = inet_csk(sk); | ||
370 | tp = tcp_sk(sk); | 374 | tp = tcp_sk(sk); |
371 | seq = ntohl(th->seq); | 375 | seq = ntohl(th->seq); |
372 | if (sk->sk_state != TCP_LISTEN && | 376 | if (sk->sk_state != TCP_LISTEN && |
@@ -393,6 +397,39 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
393 | } | 397 | } |
394 | 398 | ||
395 | err = icmp_err_convert[code].errno; | 399 | err = icmp_err_convert[code].errno; |
400 | /* check if icmp_skb allows revert of backoff | ||
401 | * (see draft-zimmermann-tcp-lcd) */ | ||
402 | if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) | ||
403 | break; | ||
404 | if (seq != tp->snd_una || !icsk->icsk_retransmits || | ||
405 | !icsk->icsk_backoff) | ||
406 | break; | ||
407 | |||
408 | icsk->icsk_backoff--; | ||
409 | inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << | ||
410 | icsk->icsk_backoff; | ||
411 | tcp_bound_rto(sk); | ||
412 | |||
413 | skb = tcp_write_queue_head(sk); | ||
414 | BUG_ON(!skb); | ||
415 | |||
416 | remaining = icsk->icsk_rto - min(icsk->icsk_rto, | ||
417 | tcp_time_stamp - TCP_SKB_CB(skb)->when); | ||
418 | |||
419 | if (remaining) { | ||
420 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | ||
421 | remaining, TCP_RTO_MAX); | ||
422 | } else if (sock_owned_by_user(sk)) { | ||
423 | /* RTO revert clocked out retransmission, | ||
424 | * but socket is locked. Will defer. */ | ||
425 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | ||
426 | HZ/20, TCP_RTO_MAX); | ||
427 | } else { | ||
428 | /* RTO revert clocked out retransmission. | ||
429 | * Will retransmit now */ | ||
430 | tcp_retransmit_timer(sk); | ||
431 | } | ||
432 | |||
396 | break; | 433 | break; |
397 | case ICMP_TIME_EXCEEDED: | 434 | case ICMP_TIME_EXCEEDED: |
398 | err = EHOSTUNREACH; | 435 | err = EHOSTUNREACH; |
@@ -849,7 +886,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
849 | } | 886 | } |
850 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 887 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
851 | } | 888 | } |
852 | if (tcp_alloc_md5sig_pool() == NULL) { | 889 | if (tcp_alloc_md5sig_pool(sk) == NULL) { |
853 | kfree(newkey); | 890 | kfree(newkey); |
854 | return -ENOMEM; | 891 | return -ENOMEM; |
855 | } | 892 | } |
@@ -970,8 +1007,9 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, | |||
970 | 1007 | ||
971 | if (!tcp_sk(sk)->md5sig_info) { | 1008 | if (!tcp_sk(sk)->md5sig_info) { |
972 | struct tcp_sock *tp = tcp_sk(sk); | 1009 | struct tcp_sock *tp = tcp_sk(sk); |
973 | struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL); | 1010 | struct tcp_md5sig_info *p; |
974 | 1011 | ||
1012 | p = kzalloc(sizeof(*p), sk->sk_allocation); | ||
975 | if (!p) | 1013 | if (!p) |
976 | return -EINVAL; | 1014 | return -EINVAL; |
977 | 1015 | ||
@@ -979,7 +1017,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, | |||
979 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 1017 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
980 | } | 1018 | } |
981 | 1019 | ||
982 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); | 1020 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation); |
983 | if (!newkey) | 1021 | if (!newkey) |
984 | return -ENOMEM; | 1022 | return -ENOMEM; |
985 | return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr, | 1023 | return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr, |
@@ -1158,7 +1196,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = { | |||
1158 | }; | 1196 | }; |
1159 | 1197 | ||
1160 | #ifdef CONFIG_TCP_MD5SIG | 1198 | #ifdef CONFIG_TCP_MD5SIG |
1161 | static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { | 1199 | static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { |
1162 | .md5_lookup = tcp_v4_reqsk_md5_lookup, | 1200 | .md5_lookup = tcp_v4_reqsk_md5_lookup, |
1163 | .calc_md5_hash = tcp_v4_md5_hash_skb, | 1201 | .calc_md5_hash = tcp_v4_md5_hash_skb, |
1164 | }; | 1202 | }; |
@@ -1717,7 +1755,7 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw) | |||
1717 | return 0; | 1755 | return 0; |
1718 | } | 1756 | } |
1719 | 1757 | ||
1720 | struct inet_connection_sock_af_ops ipv4_specific = { | 1758 | const struct inet_connection_sock_af_ops ipv4_specific = { |
1721 | .queue_xmit = ip_queue_xmit, | 1759 | .queue_xmit = ip_queue_xmit, |
1722 | .send_check = tcp_v4_send_check, | 1760 | .send_check = tcp_v4_send_check, |
1723 | .rebuild_header = inet_sk_rebuild_header, | 1761 | .rebuild_header = inet_sk_rebuild_header, |
@@ -1737,7 +1775,7 @@ struct inet_connection_sock_af_ops ipv4_specific = { | |||
1737 | }; | 1775 | }; |
1738 | 1776 | ||
1739 | #ifdef CONFIG_TCP_MD5SIG | 1777 | #ifdef CONFIG_TCP_MD5SIG |
1740 | static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { | 1778 | static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { |
1741 | .md5_lookup = tcp_v4_md5_lookup, | 1779 | .md5_lookup = tcp_v4_md5_lookup, |
1742 | .calc_md5_hash = tcp_v4_md5_hash_skb, | 1780 | .calc_md5_hash = tcp_v4_md5_hash_skb, |
1743 | .md5_add = tcp_v4_md5_add_func, | 1781 | .md5_add = tcp_v4_md5_add_func, |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index f8d67ccc64f3..e48c37d74d77 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -322,7 +322,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
322 | if (key != NULL) { | 322 | if (key != NULL) { |
323 | memcpy(&tcptw->tw_md5_key, key->key, key->keylen); | 323 | memcpy(&tcptw->tw_md5_key, key->key, key->keylen); |
324 | tcptw->tw_md5_keylen = key->keylen; | 324 | tcptw->tw_md5_keylen = key->keylen; |
325 | if (tcp_alloc_md5sig_pool() == NULL) | 325 | if (tcp_alloc_md5sig_pool(sk) == NULL) |
326 | BUG(); | 326 | BUG(); |
327 | } | 327 | } |
328 | } while (0); | 328 | } while (0); |
@@ -657,29 +657,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
657 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); | 657 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); |
658 | if (child == NULL) | 658 | if (child == NULL) |
659 | goto listen_overflow; | 659 | goto listen_overflow; |
660 | #ifdef CONFIG_TCP_MD5SIG | ||
661 | else { | ||
662 | /* Copy over the MD5 key from the original socket */ | ||
663 | struct tcp_md5sig_key *key; | ||
664 | struct tcp_sock *tp = tcp_sk(sk); | ||
665 | key = tp->af_specific->md5_lookup(sk, child); | ||
666 | if (key != NULL) { | ||
667 | /* | ||
668 | * We're using one, so create a matching key on the | ||
669 | * newsk structure. If we fail to get memory then we | ||
670 | * end up not copying the key across. Shucks. | ||
671 | */ | ||
672 | char *newkey = kmemdup(key->key, key->keylen, | ||
673 | GFP_ATOMIC); | ||
674 | if (newkey) { | ||
675 | if (!tcp_alloc_md5sig_pool()) | ||
676 | BUG(); | ||
677 | tp->af_specific->md5_add(child, child, newkey, | ||
678 | key->keylen); | ||
679 | } | ||
680 | } | ||
681 | } | ||
682 | #endif | ||
683 | 660 | ||
684 | inet_csk_reqsk_queue_unlink(sk, req, prev); | 661 | inet_csk_reqsk_queue_unlink(sk, req, prev); |
685 | inet_csk_reqsk_queue_removed(sk, req); | 662 | inet_csk_reqsk_queue_removed(sk, req); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index bd62712848fa..5200aab0ca97 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -59,6 +59,7 @@ int sysctl_tcp_base_mss __read_mostly = 512; | |||
59 | /* By default, RFC2861 behavior. */ | 59 | /* By default, RFC2861 behavior. */ |
60 | int sysctl_tcp_slow_start_after_idle __read_mostly = 1; | 60 | int sysctl_tcp_slow_start_after_idle __read_mostly = 1; |
61 | 61 | ||
62 | /* Account for new data that has been sent to the network. */ | ||
62 | static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) | 63 | static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) |
63 | { | 64 | { |
64 | struct tcp_sock *tp = tcp_sk(sk); | 65 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -142,6 +143,7 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) | |||
142 | tp->snd_cwnd_used = 0; | 143 | tp->snd_cwnd_used = 0; |
143 | } | 144 | } |
144 | 145 | ||
146 | /* Congestion state accounting after a packet has been sent. */ | ||
145 | static void tcp_event_data_sent(struct tcp_sock *tp, | 147 | static void tcp_event_data_sent(struct tcp_sock *tp, |
146 | struct sk_buff *skb, struct sock *sk) | 148 | struct sk_buff *skb, struct sock *sk) |
147 | { | 149 | { |
@@ -161,6 +163,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp, | |||
161 | icsk->icsk_ack.pingpong = 1; | 163 | icsk->icsk_ack.pingpong = 1; |
162 | } | 164 | } |
163 | 165 | ||
166 | /* Account for an ACK we sent. */ | ||
164 | static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) | 167 | static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) |
165 | { | 168 | { |
166 | tcp_dec_quickack_mode(sk, pkts); | 169 | tcp_dec_quickack_mode(sk, pkts); |
@@ -276,6 +279,7 @@ static u16 tcp_select_window(struct sock *sk) | |||
276 | return new_win; | 279 | return new_win; |
277 | } | 280 | } |
278 | 281 | ||
282 | /* Packet ECN state for a SYN-ACK */ | ||
279 | static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) | 283 | static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) |
280 | { | 284 | { |
281 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; | 285 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; |
@@ -283,6 +287,7 @@ static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) | |||
283 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; | 287 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; |
284 | } | 288 | } |
285 | 289 | ||
290 | /* Packet ECN state for a SYN. */ | ||
286 | static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) | 291 | static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) |
287 | { | 292 | { |
288 | struct tcp_sock *tp = tcp_sk(sk); | 293 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -301,6 +306,9 @@ TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) | |||
301 | th->ece = 1; | 306 | th->ece = 1; |
302 | } | 307 | } |
303 | 308 | ||
309 | /* Set up ECN state for a packet on a ESTABLISHED socket that is about to | ||
310 | * be sent. | ||
311 | */ | ||
304 | static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, | 312 | static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, |
305 | int tcp_header_len) | 313 | int tcp_header_len) |
306 | { | 314 | { |
@@ -362,7 +370,9 @@ struct tcp_out_options { | |||
362 | __u32 tsval, tsecr; /* need to include OPTION_TS */ | 370 | __u32 tsval, tsecr; /* need to include OPTION_TS */ |
363 | }; | 371 | }; |
364 | 372 | ||
365 | /* Beware: Something in the Internet is very sensitive to the ordering of | 373 | /* Write previously computed TCP options to the packet. |
374 | * | ||
375 | * Beware: Something in the Internet is very sensitive to the ordering of | ||
366 | * TCP options, we learned this through the hard way, so be careful here. | 376 | * TCP options, we learned this through the hard way, so be careful here. |
367 | * Luckily we can at least blame others for their non-compliance but from | 377 | * Luckily we can at least blame others for their non-compliance but from |
368 | * inter-operatibility perspective it seems that we're somewhat stuck with | 378 | * inter-operatibility perspective it seems that we're somewhat stuck with |
@@ -445,6 +455,9 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, | |||
445 | } | 455 | } |
446 | } | 456 | } |
447 | 457 | ||
458 | /* Compute TCP options for SYN packets. This is not the final | ||
459 | * network wire format yet. | ||
460 | */ | ||
448 | static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | 461 | static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, |
449 | struct tcp_out_options *opts, | 462 | struct tcp_out_options *opts, |
450 | struct tcp_md5sig_key **md5) { | 463 | struct tcp_md5sig_key **md5) { |
@@ -493,6 +506,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
493 | return size; | 506 | return size; |
494 | } | 507 | } |
495 | 508 | ||
509 | /* Set up TCP options for SYN-ACKs. */ | ||
496 | static unsigned tcp_synack_options(struct sock *sk, | 510 | static unsigned tcp_synack_options(struct sock *sk, |
497 | struct request_sock *req, | 511 | struct request_sock *req, |
498 | unsigned mss, struct sk_buff *skb, | 512 | unsigned mss, struct sk_buff *skb, |
@@ -541,6 +555,9 @@ static unsigned tcp_synack_options(struct sock *sk, | |||
541 | return size; | 555 | return size; |
542 | } | 556 | } |
543 | 557 | ||
558 | /* Compute TCP options for ESTABLISHED sockets. This is not the | ||
559 | * final wire format yet. | ||
560 | */ | ||
544 | static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, | 561 | static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, |
545 | struct tcp_out_options *opts, | 562 | struct tcp_out_options *opts, |
546 | struct tcp_md5sig_key **md5) { | 563 | struct tcp_md5sig_key **md5) { |
@@ -705,7 +722,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
705 | return net_xmit_eval(err); | 722 | return net_xmit_eval(err); |
706 | } | 723 | } |
707 | 724 | ||
708 | /* This routine just queue's the buffer | 725 | /* This routine just queues the buffer for sending. |
709 | * | 726 | * |
710 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, | 727 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, |
711 | * otherwise socket can stall. | 728 | * otherwise socket can stall. |
@@ -722,6 +739,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) | |||
722 | sk_mem_charge(sk, skb->truesize); | 739 | sk_mem_charge(sk, skb->truesize); |
723 | } | 740 | } |
724 | 741 | ||
742 | /* Initialize TSO segments for a packet. */ | ||
725 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, | 743 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, |
726 | unsigned int mss_now) | 744 | unsigned int mss_now) |
727 | { | 745 | { |
@@ -909,6 +927,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) | |||
909 | skb->len = skb->data_len; | 927 | skb->len = skb->data_len; |
910 | } | 928 | } |
911 | 929 | ||
930 | /* Remove acked data from a packet in the transmit queue. */ | ||
912 | int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | 931 | int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) |
913 | { | 932 | { |
914 | if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | 933 | if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) |
@@ -937,7 +956,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | |||
937 | return 0; | 956 | return 0; |
938 | } | 957 | } |
939 | 958 | ||
940 | /* Not accounting for SACKs here. */ | 959 | /* Calculate MSS. Not accounting for SACKs here. */ |
941 | int tcp_mtu_to_mss(struct sock *sk, int pmtu) | 960 | int tcp_mtu_to_mss(struct sock *sk, int pmtu) |
942 | { | 961 | { |
943 | struct tcp_sock *tp = tcp_sk(sk); | 962 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -981,6 +1000,7 @@ int tcp_mss_to_mtu(struct sock *sk, int mss) | |||
981 | return mtu; | 1000 | return mtu; |
982 | } | 1001 | } |
983 | 1002 | ||
1003 | /* MTU probing init per socket */ | ||
984 | void tcp_mtup_init(struct sock *sk) | 1004 | void tcp_mtup_init(struct sock *sk) |
985 | { | 1005 | { |
986 | struct tcp_sock *tp = tcp_sk(sk); | 1006 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -1143,7 +1163,8 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, | |||
1143 | return 0; | 1163 | return 0; |
1144 | } | 1164 | } |
1145 | 1165 | ||
1146 | /* This must be invoked the first time we consider transmitting | 1166 | /* Intialize TSO state of a skb. |
1167 | * This must be invoked the first time we consider transmitting | ||
1147 | * SKB onto the wire. | 1168 | * SKB onto the wire. |
1148 | */ | 1169 | */ |
1149 | static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, | 1170 | static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, |
@@ -1158,6 +1179,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, | |||
1158 | return tso_segs; | 1179 | return tso_segs; |
1159 | } | 1180 | } |
1160 | 1181 | ||
1182 | /* Minshall's variant of the Nagle send check. */ | ||
1161 | static inline int tcp_minshall_check(const struct tcp_sock *tp) | 1183 | static inline int tcp_minshall_check(const struct tcp_sock *tp) |
1162 | { | 1184 | { |
1163 | return after(tp->snd_sml, tp->snd_una) && | 1185 | return after(tp->snd_sml, tp->snd_una) && |
@@ -1242,6 +1264,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, | |||
1242 | return cwnd_quota; | 1264 | return cwnd_quota; |
1243 | } | 1265 | } |
1244 | 1266 | ||
1267 | /* Test if sending is allowed right now. */ | ||
1245 | int tcp_may_send_now(struct sock *sk) | 1268 | int tcp_may_send_now(struct sock *sk) |
1246 | { | 1269 | { |
1247 | struct tcp_sock *tp = tcp_sk(sk); | 1270 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -1378,6 +1401,10 @@ send_now: | |||
1378 | } | 1401 | } |
1379 | 1402 | ||
1380 | /* Create a new MTU probe if we are ready. | 1403 | /* Create a new MTU probe if we are ready. |
1404 | * MTU probe is regularly attempting to increase the path MTU by | ||
1405 | * deliberately sending larger packets. This discovers routing | ||
1406 | * changes resulting in larger path MTUs. | ||
1407 | * | ||
1381 | * Returns 0 if we should wait to probe (no cwnd available), | 1408 | * Returns 0 if we should wait to probe (no cwnd available), |
1382 | * 1 if a probe was sent, | 1409 | * 1 if a probe was sent, |
1383 | * -1 otherwise | 1410 | * -1 otherwise |
@@ -1790,6 +1817,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) | |||
1790 | sk_wmem_free_skb(sk, next_skb); | 1817 | sk_wmem_free_skb(sk, next_skb); |
1791 | } | 1818 | } |
1792 | 1819 | ||
1820 | /* Check if coalescing SKBs is legal. */ | ||
1793 | static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) | 1821 | static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) |
1794 | { | 1822 | { |
1795 | if (tcp_skb_pcount(skb) > 1) | 1823 | if (tcp_skb_pcount(skb) > 1) |
@@ -1808,6 +1836,9 @@ static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) | |||
1808 | return 1; | 1836 | return 1; |
1809 | } | 1837 | } |
1810 | 1838 | ||
1839 | /* Collapse packets in the retransmit queue to make to create | ||
1840 | * less packets on the wire. This is only done on retransmission. | ||
1841 | */ | ||
1811 | static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, | 1842 | static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, |
1812 | int space) | 1843 | int space) |
1813 | { | 1844 | { |
@@ -1957,6 +1988,9 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1957 | return err; | 1988 | return err; |
1958 | } | 1989 | } |
1959 | 1990 | ||
1991 | /* Check if we forward retransmits are possible in the current | ||
1992 | * window/congestion state. | ||
1993 | */ | ||
1960 | static int tcp_can_forward_retransmit(struct sock *sk) | 1994 | static int tcp_can_forward_retransmit(struct sock *sk) |
1961 | { | 1995 | { |
1962 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1996 | const struct inet_connection_sock *icsk = inet_csk(sk); |
@@ -2101,7 +2135,8 @@ void tcp_send_fin(struct sock *sk) | |||
2101 | } else { | 2135 | } else { |
2102 | /* Socket is locked, keep trying until memory is available. */ | 2136 | /* Socket is locked, keep trying until memory is available. */ |
2103 | for (;;) { | 2137 | for (;;) { |
2104 | skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); | 2138 | skb = alloc_skb_fclone(MAX_TCP_HEADER, |
2139 | sk->sk_allocation); | ||
2105 | if (skb) | 2140 | if (skb) |
2106 | break; | 2141 | break; |
2107 | yield(); | 2142 | yield(); |
@@ -2145,7 +2180,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) | |||
2145 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); | 2180 | TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); |
2146 | } | 2181 | } |
2147 | 2182 | ||
2148 | /* WARNING: This routine must only be called when we have already sent | 2183 | /* Send a crossed SYN-ACK during socket establishment. |
2184 | * WARNING: This routine must only be called when we have already sent | ||
2149 | * a SYN packet that crossed the incoming SYN that caused this routine | 2185 | * a SYN packet that crossed the incoming SYN that caused this routine |
2150 | * to get called. If this assumption fails then the initial rcv_wnd | 2186 | * to get called. If this assumption fails then the initial rcv_wnd |
2151 | * and rcv_wscale values will not be correct. | 2187 | * and rcv_wscale values will not be correct. |
@@ -2180,9 +2216,7 @@ int tcp_send_synack(struct sock *sk) | |||
2180 | return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); | 2216 | return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); |
2181 | } | 2217 | } |
2182 | 2218 | ||
2183 | /* | 2219 | /* Prepare a SYN-ACK. */ |
2184 | * Prepare a SYN-ACK. | ||
2185 | */ | ||
2186 | struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | 2220 | struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, |
2187 | struct request_sock *req) | 2221 | struct request_sock *req) |
2188 | { | 2222 | { |
@@ -2269,9 +2303,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2269 | return skb; | 2303 | return skb; |
2270 | } | 2304 | } |
2271 | 2305 | ||
2272 | /* | 2306 | /* Do all connect socket setups that can be done AF independent. */ |
2273 | * Do all connect socket setups that can be done AF independent. | ||
2274 | */ | ||
2275 | static void tcp_connect_init(struct sock *sk) | 2307 | static void tcp_connect_init(struct sock *sk) |
2276 | { | 2308 | { |
2277 | struct dst_entry *dst = __sk_dst_get(sk); | 2309 | struct dst_entry *dst = __sk_dst_get(sk); |
@@ -2330,9 +2362,7 @@ static void tcp_connect_init(struct sock *sk) | |||
2330 | tcp_clear_retrans(tp); | 2362 | tcp_clear_retrans(tp); |
2331 | } | 2363 | } |
2332 | 2364 | ||
2333 | /* | 2365 | /* Build a SYN and send it off. */ |
2334 | * Build a SYN and send it off. | ||
2335 | */ | ||
2336 | int tcp_connect(struct sock *sk) | 2366 | int tcp_connect(struct sock *sk) |
2337 | { | 2367 | { |
2338 | struct tcp_sock *tp = tcp_sk(sk); | 2368 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -2359,7 +2389,7 @@ int tcp_connect(struct sock *sk) | |||
2359 | sk->sk_wmem_queued += buff->truesize; | 2389 | sk->sk_wmem_queued += buff->truesize; |
2360 | sk_mem_charge(sk, buff->truesize); | 2390 | sk_mem_charge(sk, buff->truesize); |
2361 | tp->packets_out += tcp_skb_pcount(buff); | 2391 | tp->packets_out += tcp_skb_pcount(buff); |
2362 | tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); | 2392 | tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); |
2363 | 2393 | ||
2364 | /* We change tp->snd_nxt after the tcp_transmit_skb() call | 2394 | /* We change tp->snd_nxt after the tcp_transmit_skb() call |
2365 | * in order to make this packet get counted in tcpOutSegs. | 2395 | * in order to make this packet get counted in tcpOutSegs. |
@@ -2493,6 +2523,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) | |||
2493 | return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); | 2523 | return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); |
2494 | } | 2524 | } |
2495 | 2525 | ||
2526 | /* Initiate keepalive or window probe from timer. */ | ||
2496 | int tcp_write_wakeup(struct sock *sk) | 2527 | int tcp_write_wakeup(struct sock *sk) |
2497 | { | 2528 | { |
2498 | struct tcp_sock *tp = tcp_sk(sk); | 2529 | struct tcp_sock *tp = tcp_sk(sk); |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b144a26359bc..cdb2ca7684d4 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -137,13 +137,14 @@ static int tcp_write_timeout(struct sock *sk) | |||
137 | { | 137 | { |
138 | struct inet_connection_sock *icsk = inet_csk(sk); | 138 | struct inet_connection_sock *icsk = inet_csk(sk); |
139 | int retry_until; | 139 | int retry_until; |
140 | bool do_reset; | ||
140 | 141 | ||
141 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 142 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
142 | if (icsk->icsk_retransmits) | 143 | if (icsk->icsk_retransmits) |
143 | dst_negative_advice(&sk->sk_dst_cache); | 144 | dst_negative_advice(&sk->sk_dst_cache); |
144 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 145 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
145 | } else { | 146 | } else { |
146 | if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { | 147 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { |
147 | /* Black hole detection */ | 148 | /* Black hole detection */ |
148 | tcp_mtu_probing(icsk, sk); | 149 | tcp_mtu_probing(icsk, sk); |
149 | 150 | ||
@@ -155,13 +156,15 @@ static int tcp_write_timeout(struct sock *sk) | |||
155 | const int alive = (icsk->icsk_rto < TCP_RTO_MAX); | 156 | const int alive = (icsk->icsk_rto < TCP_RTO_MAX); |
156 | 157 | ||
157 | retry_until = tcp_orphan_retries(sk, alive); | 158 | retry_until = tcp_orphan_retries(sk, alive); |
159 | do_reset = alive || | ||
160 | !retransmits_timed_out(sk, retry_until); | ||
158 | 161 | ||
159 | if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) | 162 | if (tcp_out_of_resources(sk, do_reset)) |
160 | return 1; | 163 | return 1; |
161 | } | 164 | } |
162 | } | 165 | } |
163 | 166 | ||
164 | if (icsk->icsk_retransmits >= retry_until) { | 167 | if (retransmits_timed_out(sk, retry_until)) { |
165 | /* Has it gone just too far? */ | 168 | /* Has it gone just too far? */ |
166 | tcp_write_err(sk); | 169 | tcp_write_err(sk); |
167 | return 1; | 170 | return 1; |
@@ -279,7 +282,7 @@ static void tcp_probe_timer(struct sock *sk) | |||
279 | * The TCP retransmit timer. | 282 | * The TCP retransmit timer. |
280 | */ | 283 | */ |
281 | 284 | ||
282 | static void tcp_retransmit_timer(struct sock *sk) | 285 | void tcp_retransmit_timer(struct sock *sk) |
283 | { | 286 | { |
284 | struct tcp_sock *tp = tcp_sk(sk); | 287 | struct tcp_sock *tp = tcp_sk(sk); |
285 | struct inet_connection_sock *icsk = inet_csk(sk); | 288 | struct inet_connection_sock *icsk = inet_csk(sk); |
@@ -385,7 +388,7 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
385 | out_reset_timer: | 388 | out_reset_timer: |
386 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); | 389 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); |
387 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); | 390 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); |
388 | if (icsk->icsk_retransmits > sysctl_tcp_retries1) | 391 | if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) |
389 | __sk_dst_reset(sk); | 392 | __sk_dst_reset(sk); |
390 | 393 | ||
391 | out:; | 394 | out:; |
@@ -499,8 +502,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
499 | elapsed = tcp_time_stamp - tp->rcv_tstamp; | 502 | elapsed = tcp_time_stamp - tp->rcv_tstamp; |
500 | 503 | ||
501 | if (elapsed >= keepalive_time_when(tp)) { | 504 | if (elapsed >= keepalive_time_when(tp)) { |
502 | if ((!tp->keepalive_probes && icsk->icsk_probes_out >= sysctl_tcp_keepalive_probes) || | 505 | if (icsk->icsk_probes_out >= keepalive_probes(tp)) { |
503 | (tp->keepalive_probes && icsk->icsk_probes_out >= tp->keepalive_probes)) { | ||
504 | tcp_send_active_reset(sk, GFP_ATOMIC); | 506 | tcp_send_active_reset(sk, GFP_ATOMIC); |
505 | tcp_write_err(sk); | 507 | tcp_write_err(sk); |
506 | goto out; | 508 | goto out; |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 80e3812837ad..ebaaa7f973d7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -110,11 +110,12 @@ struct udp_table udp_table; | |||
110 | EXPORT_SYMBOL(udp_table); | 110 | EXPORT_SYMBOL(udp_table); |
111 | 111 | ||
112 | int sysctl_udp_mem[3] __read_mostly; | 112 | int sysctl_udp_mem[3] __read_mostly; |
113 | int sysctl_udp_rmem_min __read_mostly; | ||
114 | int sysctl_udp_wmem_min __read_mostly; | ||
115 | |||
116 | EXPORT_SYMBOL(sysctl_udp_mem); | 113 | EXPORT_SYMBOL(sysctl_udp_mem); |
114 | |||
115 | int sysctl_udp_rmem_min __read_mostly; | ||
117 | EXPORT_SYMBOL(sysctl_udp_rmem_min); | 116 | EXPORT_SYMBOL(sysctl_udp_rmem_min); |
117 | |||
118 | int sysctl_udp_wmem_min __read_mostly; | ||
118 | EXPORT_SYMBOL(sysctl_udp_wmem_min); | 119 | EXPORT_SYMBOL(sysctl_udp_wmem_min); |
119 | 120 | ||
120 | atomic_t udp_memory_allocated; | 121 | atomic_t udp_memory_allocated; |
@@ -158,7 +159,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, | |||
158 | */ | 159 | */ |
159 | int udp_lib_get_port(struct sock *sk, unsigned short snum, | 160 | int udp_lib_get_port(struct sock *sk, unsigned short snum, |
160 | int (*saddr_comp)(const struct sock *sk1, | 161 | int (*saddr_comp)(const struct sock *sk1, |
161 | const struct sock *sk2 ) ) | 162 | const struct sock *sk2)) |
162 | { | 163 | { |
163 | struct udp_hslot *hslot; | 164 | struct udp_hslot *hslot; |
164 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | 165 | struct udp_table *udptable = sk->sk_prot->h.udp_table; |
@@ -221,14 +222,15 @@ fail_unlock: | |||
221 | fail: | 222 | fail: |
222 | return error; | 223 | return error; |
223 | } | 224 | } |
225 | EXPORT_SYMBOL(udp_lib_get_port); | ||
224 | 226 | ||
225 | static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) | 227 | static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) |
226 | { | 228 | { |
227 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); | 229 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); |
228 | 230 | ||
229 | return ( !ipv6_only_sock(sk2) && | 231 | return (!ipv6_only_sock(sk2) && |
230 | (!inet1->rcv_saddr || !inet2->rcv_saddr || | 232 | (!inet1->rcv_saddr || !inet2->rcv_saddr || |
231 | inet1->rcv_saddr == inet2->rcv_saddr )); | 233 | inet1->rcv_saddr == inet2->rcv_saddr)); |
232 | } | 234 | } |
233 | 235 | ||
234 | int udp_v4_get_port(struct sock *sk, unsigned short snum) | 236 | int udp_v4_get_port(struct sock *sk, unsigned short snum) |
@@ -383,8 +385,8 @@ found: | |||
383 | void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | 385 | void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) |
384 | { | 386 | { |
385 | struct inet_sock *inet; | 387 | struct inet_sock *inet; |
386 | struct iphdr *iph = (struct iphdr*)skb->data; | 388 | struct iphdr *iph = (struct iphdr *)skb->data; |
387 | struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2)); | 389 | struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); |
388 | const int type = icmp_hdr(skb)->type; | 390 | const int type = icmp_hdr(skb)->type; |
389 | const int code = icmp_hdr(skb)->code; | 391 | const int code = icmp_hdr(skb)->code; |
390 | struct sock *sk; | 392 | struct sock *sk; |
@@ -439,7 +441,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |||
439 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) | 441 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
440 | goto out; | 442 | goto out; |
441 | } else { | 443 | } else { |
442 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1)); | 444 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); |
443 | } | 445 | } |
444 | sk->sk_err = err; | 446 | sk->sk_err = err; |
445 | sk->sk_error_report(sk); | 447 | sk->sk_error_report(sk); |
@@ -474,7 +476,7 @@ EXPORT_SYMBOL(udp_flush_pending_frames); | |||
474 | * (checksum field must be zeroed out) | 476 | * (checksum field must be zeroed out) |
475 | */ | 477 | */ |
476 | static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, | 478 | static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, |
477 | __be32 src, __be32 dst, int len ) | 479 | __be32 src, __be32 dst, int len) |
478 | { | 480 | { |
479 | unsigned int offset; | 481 | unsigned int offset; |
480 | struct udphdr *uh = udp_hdr(skb); | 482 | struct udphdr *uh = udp_hdr(skb); |
@@ -545,7 +547,7 @@ static int udp_push_pending_frames(struct sock *sk) | |||
545 | 547 | ||
546 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ | 548 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ |
547 | 549 | ||
548 | udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len); | 550 | udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len); |
549 | goto send; | 551 | goto send; |
550 | 552 | ||
551 | } else /* `normal' UDP */ | 553 | } else /* `normal' UDP */ |
@@ -553,18 +555,24 @@ static int udp_push_pending_frames(struct sock *sk) | |||
553 | 555 | ||
554 | /* add protocol-dependent pseudo-header */ | 556 | /* add protocol-dependent pseudo-header */ |
555 | uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len, | 557 | uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len, |
556 | sk->sk_protocol, csum ); | 558 | sk->sk_protocol, csum); |
557 | if (uh->check == 0) | 559 | if (uh->check == 0) |
558 | uh->check = CSUM_MANGLED_0; | 560 | uh->check = CSUM_MANGLED_0; |
559 | 561 | ||
560 | send: | 562 | send: |
561 | err = ip_push_pending_frames(sk); | 563 | err = ip_push_pending_frames(sk); |
564 | if (err) { | ||
565 | if (err == -ENOBUFS && !inet->recverr) { | ||
566 | UDP_INC_STATS_USER(sock_net(sk), | ||
567 | UDP_MIB_SNDBUFERRORS, is_udplite); | ||
568 | err = 0; | ||
569 | } | ||
570 | } else | ||
571 | UDP_INC_STATS_USER(sock_net(sk), | ||
572 | UDP_MIB_OUTDATAGRAMS, is_udplite); | ||
562 | out: | 573 | out: |
563 | up->len = 0; | 574 | up->len = 0; |
564 | up->pending = 0; | 575 | up->pending = 0; |
565 | if (!err) | ||
566 | UDP_INC_STATS_USER(sock_net(sk), | ||
567 | UDP_MIB_OUTDATAGRAMS, is_udplite); | ||
568 | return err; | 576 | return err; |
569 | } | 577 | } |
570 | 578 | ||
@@ -592,7 +600,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
592 | * Check the flags. | 600 | * Check the flags. |
593 | */ | 601 | */ |
594 | 602 | ||
595 | if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */ | 603 | if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ |
596 | return -EOPNOTSUPP; | 604 | return -EOPNOTSUPP; |
597 | 605 | ||
598 | ipc.opt = NULL; | 606 | ipc.opt = NULL; |
@@ -619,7 +627,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
619 | * Get and verify the address. | 627 | * Get and verify the address. |
620 | */ | 628 | */ |
621 | if (msg->msg_name) { | 629 | if (msg->msg_name) { |
622 | struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name; | 630 | struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name; |
623 | if (msg->msg_namelen < sizeof(*usin)) | 631 | if (msg->msg_namelen < sizeof(*usin)) |
624 | return -EINVAL; | 632 | return -EINVAL; |
625 | if (usin->sin_family != AF_INET) { | 633 | if (usin->sin_family != AF_INET) { |
@@ -684,7 +692,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
684 | } | 692 | } |
685 | 693 | ||
686 | if (connected) | 694 | if (connected) |
687 | rt = (struct rtable*)sk_dst_check(sk, 0); | 695 | rt = (struct rtable *)sk_dst_check(sk, 0); |
688 | 696 | ||
689 | if (rt == NULL) { | 697 | if (rt == NULL) { |
690 | struct flowi fl = { .oif = ipc.oif, | 698 | struct flowi fl = { .oif = ipc.oif, |
@@ -782,6 +790,7 @@ do_confirm: | |||
782 | err = 0; | 790 | err = 0; |
783 | goto out; | 791 | goto out; |
784 | } | 792 | } |
793 | EXPORT_SYMBOL(udp_sendmsg); | ||
785 | 794 | ||
786 | int udp_sendpage(struct sock *sk, struct page *page, int offset, | 795 | int udp_sendpage(struct sock *sk, struct page *page, int offset, |
787 | size_t size, int flags) | 796 | size_t size, int flags) |
@@ -871,6 +880,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
871 | 880 | ||
872 | return 0; | 881 | return 0; |
873 | } | 882 | } |
883 | EXPORT_SYMBOL(udp_ioctl); | ||
874 | 884 | ||
875 | /* | 885 | /* |
876 | * This should be easy, if there is something there we | 886 | * This should be easy, if there is something there we |
@@ -892,7 +902,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
892 | * Check any passed addresses | 902 | * Check any passed addresses |
893 | */ | 903 | */ |
894 | if (addr_len) | 904 | if (addr_len) |
895 | *addr_len=sizeof(*sin); | 905 | *addr_len = sizeof(*sin); |
896 | 906 | ||
897 | if (flags & MSG_ERRQUEUE) | 907 | if (flags & MSG_ERRQUEUE) |
898 | return ip_recv_error(sk, msg, len); | 908 | return ip_recv_error(sk, msg, len); |
@@ -923,9 +933,11 @@ try_again: | |||
923 | 933 | ||
924 | if (skb_csum_unnecessary(skb)) | 934 | if (skb_csum_unnecessary(skb)) |
925 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), | 935 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), |
926 | msg->msg_iov, copied ); | 936 | msg->msg_iov, copied); |
927 | else { | 937 | else { |
928 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); | 938 | err = skb_copy_and_csum_datagram_iovec(skb, |
939 | sizeof(struct udphdr), | ||
940 | msg->msg_iov); | ||
929 | 941 | ||
930 | if (err == -EINVAL) | 942 | if (err == -EINVAL) |
931 | goto csum_copy_err; | 943 | goto csum_copy_err; |
@@ -941,8 +953,7 @@ try_again: | |||
941 | sock_recv_timestamp(msg, sk, skb); | 953 | sock_recv_timestamp(msg, sk, skb); |
942 | 954 | ||
943 | /* Copy the address. */ | 955 | /* Copy the address. */ |
944 | if (sin) | 956 | if (sin) { |
945 | { | ||
946 | sin->sin_family = AF_INET; | 957 | sin->sin_family = AF_INET; |
947 | sin->sin_port = udp_hdr(skb)->source; | 958 | sin->sin_port = udp_hdr(skb)->source; |
948 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | 959 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; |
@@ -995,6 +1006,7 @@ int udp_disconnect(struct sock *sk, int flags) | |||
995 | sk_dst_reset(sk); | 1006 | sk_dst_reset(sk); |
996 | return 0; | 1007 | return 0; |
997 | } | 1008 | } |
1009 | EXPORT_SYMBOL(udp_disconnect); | ||
998 | 1010 | ||
999 | void udp_lib_unhash(struct sock *sk) | 1011 | void udp_lib_unhash(struct sock *sk) |
1000 | { | 1012 | { |
@@ -1044,7 +1056,7 @@ drop: | |||
1044 | * Note that in the success and error cases, the skb is assumed to | 1056 | * Note that in the success and error cases, the skb is assumed to |
1045 | * have either been requeued or freed. | 1057 | * have either been requeued or freed. |
1046 | */ | 1058 | */ |
1047 | int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | 1059 | int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1048 | { | 1060 | { |
1049 | struct udp_sock *up = udp_sk(sk); | 1061 | struct udp_sock *up = udp_sk(sk); |
1050 | int rc; | 1062 | int rc; |
@@ -1214,7 +1226,7 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, | |||
1214 | if (uh->check == 0) { | 1226 | if (uh->check == 0) { |
1215 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1227 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1216 | } else if (skb->ip_summed == CHECKSUM_COMPLETE) { | 1228 | } else if (skb->ip_summed == CHECKSUM_COMPLETE) { |
1217 | if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, | 1229 | if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, |
1218 | proto, skb->csum)) | 1230 | proto, skb->csum)) |
1219 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1231 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1220 | } | 1232 | } |
@@ -1355,7 +1367,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1355 | int err = 0; | 1367 | int err = 0; |
1356 | int is_udplite = IS_UDPLITE(sk); | 1368 | int is_udplite = IS_UDPLITE(sk); |
1357 | 1369 | ||
1358 | if (optlen<sizeof(int)) | 1370 | if (optlen < sizeof(int)) |
1359 | return -EINVAL; | 1371 | return -EINVAL; |
1360 | 1372 | ||
1361 | if (get_user(val, (int __user *)optval)) | 1373 | if (get_user(val, (int __user *)optval)) |
@@ -1426,6 +1438,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1426 | 1438 | ||
1427 | return err; | 1439 | return err; |
1428 | } | 1440 | } |
1441 | EXPORT_SYMBOL(udp_lib_setsockopt); | ||
1429 | 1442 | ||
1430 | int udp_setsockopt(struct sock *sk, int level, int optname, | 1443 | int udp_setsockopt(struct sock *sk, int level, int optname, |
1431 | char __user *optval, int optlen) | 1444 | char __user *optval, int optlen) |
@@ -1453,7 +1466,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, | |||
1453 | struct udp_sock *up = udp_sk(sk); | 1466 | struct udp_sock *up = udp_sk(sk); |
1454 | int val, len; | 1467 | int val, len; |
1455 | 1468 | ||
1456 | if (get_user(len,optlen)) | 1469 | if (get_user(len, optlen)) |
1457 | return -EFAULT; | 1470 | return -EFAULT; |
1458 | 1471 | ||
1459 | len = min_t(unsigned int, len, sizeof(int)); | 1472 | len = min_t(unsigned int, len, sizeof(int)); |
@@ -1486,10 +1499,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, | |||
1486 | 1499 | ||
1487 | if (put_user(len, optlen)) | 1500 | if (put_user(len, optlen)) |
1488 | return -EFAULT; | 1501 | return -EFAULT; |
1489 | if (copy_to_user(optval, &val,len)) | 1502 | if (copy_to_user(optval, &val, len)) |
1490 | return -EFAULT; | 1503 | return -EFAULT; |
1491 | return 0; | 1504 | return 0; |
1492 | } | 1505 | } |
1506 | EXPORT_SYMBOL(udp_lib_getsockopt); | ||
1493 | 1507 | ||
1494 | int udp_getsockopt(struct sock *sk, int level, int optname, | 1508 | int udp_getsockopt(struct sock *sk, int level, int optname, |
1495 | char __user *optval, int __user *optlen) | 1509 | char __user *optval, int __user *optlen) |
@@ -1528,9 +1542,9 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
1528 | int is_lite = IS_UDPLITE(sk); | 1542 | int is_lite = IS_UDPLITE(sk); |
1529 | 1543 | ||
1530 | /* Check for false positives due to checksum errors */ | 1544 | /* Check for false positives due to checksum errors */ |
1531 | if ( (mask & POLLRDNORM) && | 1545 | if ((mask & POLLRDNORM) && |
1532 | !(file->f_flags & O_NONBLOCK) && | 1546 | !(file->f_flags & O_NONBLOCK) && |
1533 | !(sk->sk_shutdown & RCV_SHUTDOWN)){ | 1547 | !(sk->sk_shutdown & RCV_SHUTDOWN)) { |
1534 | struct sk_buff_head *rcvq = &sk->sk_receive_queue; | 1548 | struct sk_buff_head *rcvq = &sk->sk_receive_queue; |
1535 | struct sk_buff *skb; | 1549 | struct sk_buff *skb; |
1536 | 1550 | ||
@@ -1552,6 +1566,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
1552 | return mask; | 1566 | return mask; |
1553 | 1567 | ||
1554 | } | 1568 | } |
1569 | EXPORT_SYMBOL(udp_poll); | ||
1555 | 1570 | ||
1556 | struct proto udp_prot = { | 1571 | struct proto udp_prot = { |
1557 | .name = "UDP", | 1572 | .name = "UDP", |
@@ -1582,6 +1597,7 @@ struct proto udp_prot = { | |||
1582 | .compat_getsockopt = compat_udp_getsockopt, | 1597 | .compat_getsockopt = compat_udp_getsockopt, |
1583 | #endif | 1598 | #endif |
1584 | }; | 1599 | }; |
1600 | EXPORT_SYMBOL(udp_prot); | ||
1585 | 1601 | ||
1586 | /* ------------------------------------------------------------------------ */ | 1602 | /* ------------------------------------------------------------------------ */ |
1587 | #ifdef CONFIG_PROC_FS | 1603 | #ifdef CONFIG_PROC_FS |
@@ -1703,11 +1719,13 @@ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) | |||
1703 | rc = -ENOMEM; | 1719 | rc = -ENOMEM; |
1704 | return rc; | 1720 | return rc; |
1705 | } | 1721 | } |
1722 | EXPORT_SYMBOL(udp_proc_register); | ||
1706 | 1723 | ||
1707 | void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) | 1724 | void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) |
1708 | { | 1725 | { |
1709 | proc_net_remove(net, afinfo->name); | 1726 | proc_net_remove(net, afinfo->name); |
1710 | } | 1727 | } |
1728 | EXPORT_SYMBOL(udp_proc_unregister); | ||
1711 | 1729 | ||
1712 | /* ------------------------------------------------------------------------ */ | 1730 | /* ------------------------------------------------------------------------ */ |
1713 | static void udp4_format_sock(struct sock *sp, struct seq_file *f, | 1731 | static void udp4_format_sock(struct sock *sp, struct seq_file *f, |
@@ -1741,7 +1759,7 @@ int udp4_seq_show(struct seq_file *seq, void *v) | |||
1741 | int len; | 1759 | int len; |
1742 | 1760 | ||
1743 | udp4_format_sock(v, seq, state->bucket, &len); | 1761 | udp4_format_sock(v, seq, state->bucket, &len); |
1744 | seq_printf(seq, "%*s\n", 127 - len ,""); | 1762 | seq_printf(seq, "%*s\n", 127 - len, ""); |
1745 | } | 1763 | } |
1746 | return 0; | 1764 | return 0; |
1747 | } | 1765 | } |
@@ -1816,16 +1834,64 @@ void __init udp_init(void) | |||
1816 | sysctl_udp_wmem_min = SK_MEM_QUANTUM; | 1834 | sysctl_udp_wmem_min = SK_MEM_QUANTUM; |
1817 | } | 1835 | } |
1818 | 1836 | ||
1819 | EXPORT_SYMBOL(udp_disconnect); | 1837 | int udp4_ufo_send_check(struct sk_buff *skb) |
1820 | EXPORT_SYMBOL(udp_ioctl); | 1838 | { |
1821 | EXPORT_SYMBOL(udp_prot); | 1839 | const struct iphdr *iph; |
1822 | EXPORT_SYMBOL(udp_sendmsg); | 1840 | struct udphdr *uh; |
1823 | EXPORT_SYMBOL(udp_lib_getsockopt); | 1841 | |
1824 | EXPORT_SYMBOL(udp_lib_setsockopt); | 1842 | if (!pskb_may_pull(skb, sizeof(*uh))) |
1825 | EXPORT_SYMBOL(udp_poll); | 1843 | return -EINVAL; |
1826 | EXPORT_SYMBOL(udp_lib_get_port); | 1844 | |
1845 | iph = ip_hdr(skb); | ||
1846 | uh = udp_hdr(skb); | ||
1847 | |||
1848 | uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, | ||
1849 | IPPROTO_UDP, 0); | ||
1850 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
1851 | skb->csum_offset = offsetof(struct udphdr, check); | ||
1852 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
1853 | return 0; | ||
1854 | } | ||
1855 | |||
1856 | struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features) | ||
1857 | { | ||
1858 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
1859 | unsigned int mss; | ||
1860 | int offset; | ||
1861 | __wsum csum; | ||
1862 | |||
1863 | mss = skb_shinfo(skb)->gso_size; | ||
1864 | if (unlikely(skb->len <= mss)) | ||
1865 | goto out; | ||
1866 | |||
1867 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { | ||
1868 | /* Packet is from an untrusted source, reset gso_segs. */ | ||
1869 | int type = skb_shinfo(skb)->gso_type; | ||
1870 | |||
1871 | if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || | ||
1872 | !(type & (SKB_GSO_UDP)))) | ||
1873 | goto out; | ||
1874 | |||
1875 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); | ||
1876 | |||
1877 | segs = NULL; | ||
1878 | goto out; | ||
1879 | } | ||
1880 | |||
1881 | /* Do software UFO. Complete and fill in the UDP checksum as HW cannot | ||
1882 | * do checksum of UDP packets sent as multiple IP fragments. | ||
1883 | */ | ||
1884 | offset = skb->csum_start - skb_headroom(skb); | ||
1885 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
1886 | offset += skb->csum_offset; | ||
1887 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
1888 | skb->ip_summed = CHECKSUM_NONE; | ||
1889 | |||
1890 | /* Fragment the skb. IP headers of the fragments are updated in | ||
1891 | * inet_gso_segment() | ||
1892 | */ | ||
1893 | segs = skb_segment(skb, features); | ||
1894 | out: | ||
1895 | return segs; | ||
1896 | } | ||
1827 | 1897 | ||
1828 | #ifdef CONFIG_PROC_FS | ||
1829 | EXPORT_SYMBOL(udp_proc_register); | ||
1830 | EXPORT_SYMBOL(udp_proc_unregister); | ||
1831 | #endif | ||
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 0071ee6f441f..74fb2eb833ec 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -264,6 +264,22 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { | |||
264 | .fill_dst = xfrm4_fill_dst, | 264 | .fill_dst = xfrm4_fill_dst, |
265 | }; | 265 | }; |
266 | 266 | ||
267 | #ifdef CONFIG_SYSCTL | ||
268 | static struct ctl_table xfrm4_policy_table[] = { | ||
269 | { | ||
270 | .ctl_name = CTL_UNNUMBERED, | ||
271 | .procname = "xfrm4_gc_thresh", | ||
272 | .data = &xfrm4_dst_ops.gc_thresh, | ||
273 | .maxlen = sizeof(int), | ||
274 | .mode = 0644, | ||
275 | .proc_handler = proc_dointvec, | ||
276 | }, | ||
277 | { } | ||
278 | }; | ||
279 | |||
280 | static struct ctl_table_header *sysctl_hdr; | ||
281 | #endif | ||
282 | |||
267 | static void __init xfrm4_policy_init(void) | 283 | static void __init xfrm4_policy_init(void) |
268 | { | 284 | { |
269 | xfrm_policy_register_afinfo(&xfrm4_policy_afinfo); | 285 | xfrm_policy_register_afinfo(&xfrm4_policy_afinfo); |
@@ -271,12 +287,31 @@ static void __init xfrm4_policy_init(void) | |||
271 | 287 | ||
272 | static void __exit xfrm4_policy_fini(void) | 288 | static void __exit xfrm4_policy_fini(void) |
273 | { | 289 | { |
290 | #ifdef CONFIG_SYSCTL | ||
291 | if (sysctl_hdr) | ||
292 | unregister_net_sysctl_table(sysctl_hdr); | ||
293 | #endif | ||
274 | xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo); | 294 | xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo); |
275 | } | 295 | } |
276 | 296 | ||
277 | void __init xfrm4_init(void) | 297 | void __init xfrm4_init(int rt_max_size) |
278 | { | 298 | { |
279 | xfrm4_state_init(); | 299 | xfrm4_state_init(); |
280 | xfrm4_policy_init(); | 300 | xfrm4_policy_init(); |
301 | /* | ||
302 | * Select a default value for the gc_thresh based on the main route | ||
303 | * table hash size. It seems to me the worst case scenario is when | ||
304 | * we have ipsec operating in transport mode, in which we create a | ||
305 | * dst_entry per socket. The xfrm gc algorithm starts trying to remove | ||
306 | * entries at gc_thresh, and prevents new allocations as 2*gc_thresh | ||
307 | * so lets set an initial xfrm gc_thresh value at the rt_max_size/2. | ||
308 | * That will let us store an ipsec connection per route table entry, | ||
309 | * and start cleaning when were 1/2 full | ||
310 | */ | ||
311 | xfrm4_dst_ops.gc_thresh = rt_max_size/2; | ||
312 | #ifdef CONFIG_SYSCTL | ||
313 | sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, | ||
314 | xfrm4_policy_table); | ||
315 | #endif | ||
281 | } | 316 | } |
282 | 317 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 43b3c9f89c12..c9b369034a40 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1371,12 +1371,14 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add | |||
1371 | 1371 | ||
1372 | /* Gets referenced address, destroys ifaddr */ | 1372 | /* Gets referenced address, destroys ifaddr */ |
1373 | 1373 | ||
1374 | static void addrconf_dad_stop(struct inet6_ifaddr *ifp) | 1374 | static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) |
1375 | { | 1375 | { |
1376 | if (ifp->flags&IFA_F_PERMANENT) { | 1376 | if (ifp->flags&IFA_F_PERMANENT) { |
1377 | spin_lock_bh(&ifp->lock); | 1377 | spin_lock_bh(&ifp->lock); |
1378 | addrconf_del_timer(ifp); | 1378 | addrconf_del_timer(ifp); |
1379 | ifp->flags |= IFA_F_TENTATIVE; | 1379 | ifp->flags |= IFA_F_TENTATIVE; |
1380 | if (dad_failed) | ||
1381 | ifp->flags |= IFA_F_DADFAILED; | ||
1380 | spin_unlock_bh(&ifp->lock); | 1382 | spin_unlock_bh(&ifp->lock); |
1381 | in6_ifa_put(ifp); | 1383 | in6_ifa_put(ifp); |
1382 | #ifdef CONFIG_IPV6_PRIVACY | 1384 | #ifdef CONFIG_IPV6_PRIVACY |
@@ -1422,7 +1424,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) | |||
1422 | } | 1424 | } |
1423 | } | 1425 | } |
1424 | 1426 | ||
1425 | addrconf_dad_stop(ifp); | 1427 | addrconf_dad_stop(ifp, 1); |
1426 | } | 1428 | } |
1427 | 1429 | ||
1428 | /* Join to solicited addr multicast group. */ | 1430 | /* Join to solicited addr multicast group. */ |
@@ -2778,7 +2780,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) | |||
2778 | idev->cnf.accept_dad < 1 || | 2780 | idev->cnf.accept_dad < 1 || |
2779 | !(ifp->flags&IFA_F_TENTATIVE) || | 2781 | !(ifp->flags&IFA_F_TENTATIVE) || |
2780 | ifp->flags & IFA_F_NODAD) { | 2782 | ifp->flags & IFA_F_NODAD) { |
2781 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC); | 2783 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); |
2782 | spin_unlock_bh(&ifp->lock); | 2784 | spin_unlock_bh(&ifp->lock); |
2783 | read_unlock_bh(&idev->lock); | 2785 | read_unlock_bh(&idev->lock); |
2784 | 2786 | ||
@@ -2795,7 +2797,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) | |||
2795 | * - otherwise, kill it. | 2797 | * - otherwise, kill it. |
2796 | */ | 2798 | */ |
2797 | in6_ifa_hold(ifp); | 2799 | in6_ifa_hold(ifp); |
2798 | addrconf_dad_stop(ifp); | 2800 | addrconf_dad_stop(ifp, 0); |
2799 | return; | 2801 | return; |
2800 | } | 2802 | } |
2801 | 2803 | ||
@@ -2829,7 +2831,7 @@ static void addrconf_dad_timer(unsigned long data) | |||
2829 | * DAD was successful | 2831 | * DAD was successful |
2830 | */ | 2832 | */ |
2831 | 2833 | ||
2832 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC); | 2834 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); |
2833 | spin_unlock_bh(&ifp->lock); | 2835 | spin_unlock_bh(&ifp->lock); |
2834 | read_unlock_bh(&idev->lock); | 2836 | read_unlock_bh(&idev->lock); |
2835 | 2837 | ||
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 45f9a2a42d56..a123a328aeb3 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -774,6 +774,11 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | |||
774 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 774 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
775 | struct ipv6hdr *ipv6h; | 775 | struct ipv6hdr *ipv6h; |
776 | struct inet6_protocol *ops; | 776 | struct inet6_protocol *ops; |
777 | int proto; | ||
778 | struct frag_hdr *fptr; | ||
779 | unsigned int unfrag_ip6hlen; | ||
780 | u8 *prevhdr; | ||
781 | int offset = 0; | ||
777 | 782 | ||
778 | if (!(features & NETIF_F_V6_CSUM)) | 783 | if (!(features & NETIF_F_V6_CSUM)) |
779 | features &= ~NETIF_F_SG; | 784 | features &= ~NETIF_F_SG; |
@@ -793,10 +798,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | |||
793 | __skb_pull(skb, sizeof(*ipv6h)); | 798 | __skb_pull(skb, sizeof(*ipv6h)); |
794 | segs = ERR_PTR(-EPROTONOSUPPORT); | 799 | segs = ERR_PTR(-EPROTONOSUPPORT); |
795 | 800 | ||
801 | proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
796 | rcu_read_lock(); | 802 | rcu_read_lock(); |
797 | ops = rcu_dereference(inet6_protos[ | 803 | ops = rcu_dereference(inet6_protos[proto]); |
798 | ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); | ||
799 | |||
800 | if (likely(ops && ops->gso_segment)) { | 804 | if (likely(ops && ops->gso_segment)) { |
801 | skb_reset_transport_header(skb); | 805 | skb_reset_transport_header(skb); |
802 | segs = ops->gso_segment(skb, features); | 806 | segs = ops->gso_segment(skb, features); |
@@ -810,6 +814,16 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | |||
810 | ipv6h = ipv6_hdr(skb); | 814 | ipv6h = ipv6_hdr(skb); |
811 | ipv6h->payload_len = htons(skb->len - skb->mac_len - | 815 | ipv6h->payload_len = htons(skb->len - skb->mac_len - |
812 | sizeof(*ipv6h)); | 816 | sizeof(*ipv6h)); |
817 | if (proto == IPPROTO_UDP) { | ||
818 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); | ||
819 | fptr = (struct frag_hdr *)(skb_network_header(skb) + | ||
820 | unfrag_ip6hlen); | ||
821 | fptr->frag_off = htons(offset); | ||
822 | if (skb->next != NULL) | ||
823 | fptr->frag_off |= htons(IP6_MF); | ||
824 | offset += (ntohs(ipv6h->payload_len) - | ||
825 | sizeof(struct frag_hdr)); | ||
826 | } | ||
813 | } | 827 | } |
814 | 828 | ||
815 | out: | 829 | out: |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index eab62a7a8f06..e2325f6a05fb 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -323,7 +323,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, | |||
323 | int iif = 0; | 323 | int iif = 0; |
324 | int addr_type = 0; | 324 | int addr_type = 0; |
325 | int len; | 325 | int len; |
326 | int hlimit, tclass; | 326 | int hlimit; |
327 | int err = 0; | 327 | int err = 0; |
328 | 328 | ||
329 | if ((u8 *)hdr < skb->head || | 329 | if ((u8 *)hdr < skb->head || |
@@ -469,10 +469,6 @@ route_done: | |||
469 | if (hlimit < 0) | 469 | if (hlimit < 0) |
470 | hlimit = ip6_dst_hoplimit(dst); | 470 | hlimit = ip6_dst_hoplimit(dst); |
471 | 471 | ||
472 | tclass = np->tclass; | ||
473 | if (tclass < 0) | ||
474 | tclass = 0; | ||
475 | |||
476 | msg.skb = skb; | 472 | msg.skb = skb; |
477 | msg.offset = skb_network_offset(skb); | 473 | msg.offset = skb_network_offset(skb); |
478 | msg.type = type; | 474 | msg.type = type; |
@@ -488,8 +484,8 @@ route_done: | |||
488 | 484 | ||
489 | err = ip6_append_data(sk, icmpv6_getfrag, &msg, | 485 | err = ip6_append_data(sk, icmpv6_getfrag, &msg, |
490 | len + sizeof(struct icmp6hdr), | 486 | len + sizeof(struct icmp6hdr), |
491 | sizeof(struct icmp6hdr), | 487 | sizeof(struct icmp6hdr), hlimit, |
492 | hlimit, tclass, NULL, &fl, (struct rt6_info*)dst, | 488 | np->tclass, NULL, &fl, (struct rt6_info*)dst, |
493 | MSG_DONTWAIT); | 489 | MSG_DONTWAIT); |
494 | if (err) { | 490 | if (err) { |
495 | ip6_flush_pending_frames(sk); | 491 | ip6_flush_pending_frames(sk); |
@@ -522,7 +518,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
522 | struct dst_entry *dst; | 518 | struct dst_entry *dst; |
523 | int err = 0; | 519 | int err = 0; |
524 | int hlimit; | 520 | int hlimit; |
525 | int tclass; | ||
526 | 521 | ||
527 | saddr = &ipv6_hdr(skb)->daddr; | 522 | saddr = &ipv6_hdr(skb)->daddr; |
528 | 523 | ||
@@ -562,10 +557,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
562 | if (hlimit < 0) | 557 | if (hlimit < 0) |
563 | hlimit = ip6_dst_hoplimit(dst); | 558 | hlimit = ip6_dst_hoplimit(dst); |
564 | 559 | ||
565 | tclass = np->tclass; | ||
566 | if (tclass < 0) | ||
567 | tclass = 0; | ||
568 | |||
569 | idev = in6_dev_get(skb->dev); | 560 | idev = in6_dev_get(skb->dev); |
570 | 561 | ||
571 | msg.skb = skb; | 562 | msg.skb = skb; |
@@ -573,7 +564,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
573 | msg.type = ICMPV6_ECHO_REPLY; | 564 | msg.type = ICMPV6_ECHO_REPLY; |
574 | 565 | ||
575 | err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), | 566 | err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), |
576 | sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl, | 567 | sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl, |
577 | (struct rt6_info*)dst, MSG_DONTWAIT); | 568 | (struct rt6_info*)dst, MSG_DONTWAIT); |
578 | 569 | ||
579 | if (err) { | 570 | if (err) { |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 52ee1dced2ff..0e93ca56eb69 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -164,12 +164,6 @@ static __inline__ void rt6_release(struct rt6_info *rt) | |||
164 | dst_free(&rt->u.dst); | 164 | dst_free(&rt->u.dst); |
165 | } | 165 | } |
166 | 166 | ||
167 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | ||
168 | #define FIB_TABLE_HASHSZ 256 | ||
169 | #else | ||
170 | #define FIB_TABLE_HASHSZ 1 | ||
171 | #endif | ||
172 | |||
173 | static void fib6_link_table(struct net *net, struct fib6_table *tb) | 167 | static void fib6_link_table(struct net *net, struct fib6_table *tb) |
174 | { | 168 | { |
175 | unsigned int h; | 169 | unsigned int h; |
@@ -180,7 +174,7 @@ static void fib6_link_table(struct net *net, struct fib6_table *tb) | |||
180 | */ | 174 | */ |
181 | rwlock_init(&tb->tb6_lock); | 175 | rwlock_init(&tb->tb6_lock); |
182 | 176 | ||
183 | h = tb->tb6_id & (FIB_TABLE_HASHSZ - 1); | 177 | h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1); |
184 | 178 | ||
185 | /* | 179 | /* |
186 | * No protection necessary, this is the only list mutatation | 180 | * No protection necessary, this is the only list mutatation |
@@ -231,7 +225,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id) | |||
231 | 225 | ||
232 | if (id == 0) | 226 | if (id == 0) |
233 | id = RT6_TABLE_MAIN; | 227 | id = RT6_TABLE_MAIN; |
234 | h = id & (FIB_TABLE_HASHSZ - 1); | 228 | h = id & (FIB6_TABLE_HASHSZ - 1); |
235 | rcu_read_lock(); | 229 | rcu_read_lock(); |
236 | head = &net->ipv6.fib_table_hash[h]; | 230 | head = &net->ipv6.fib_table_hash[h]; |
237 | hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { | 231 | hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) { |
@@ -382,7 +376,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
382 | arg.net = net; | 376 | arg.net = net; |
383 | w->args = &arg; | 377 | w->args = &arg; |
384 | 378 | ||
385 | for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) { | 379 | for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { |
386 | e = 0; | 380 | e = 0; |
387 | head = &net->ipv6.fib_table_hash[h]; | 381 | head = &net->ipv6.fib_table_hash[h]; |
388 | hlist_for_each_entry(tb, node, head, tb6_hlist) { | 382 | hlist_for_each_entry(tb, node, head, tb6_hlist) { |
@@ -1368,7 +1362,7 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), | |||
1368 | unsigned int h; | 1362 | unsigned int h; |
1369 | 1363 | ||
1370 | rcu_read_lock(); | 1364 | rcu_read_lock(); |
1371 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { | 1365 | for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { |
1372 | head = &net->ipv6.fib_table_hash[h]; | 1366 | head = &net->ipv6.fib_table_hash[h]; |
1373 | hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { | 1367 | hlist_for_each_entry_rcu(table, node, head, tb6_hlist) { |
1374 | write_lock_bh(&table->tb6_lock); | 1368 | write_lock_bh(&table->tb6_lock); |
@@ -1483,7 +1477,7 @@ static int fib6_net_init(struct net *net) | |||
1483 | if (!net->ipv6.rt6_stats) | 1477 | if (!net->ipv6.rt6_stats) |
1484 | goto out_timer; | 1478 | goto out_timer; |
1485 | 1479 | ||
1486 | net->ipv6.fib_table_hash = kcalloc(FIB_TABLE_HASHSZ, | 1480 | net->ipv6.fib_table_hash = kcalloc(FIB6_TABLE_HASHSZ, |
1487 | sizeof(*net->ipv6.fib_table_hash), | 1481 | sizeof(*net->ipv6.fib_table_hash), |
1488 | GFP_KERNEL); | 1482 | GFP_KERNEL); |
1489 | if (!net->ipv6.fib_table_hash) | 1483 | if (!net->ipv6.fib_table_hash) |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 6d6a4277c677..2d9cbaa67edb 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -63,7 +63,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
63 | 63 | ||
64 | if (skb->pkt_type == PACKET_OTHERHOST) { | 64 | if (skb->pkt_type == PACKET_OTHERHOST) { |
65 | kfree_skb(skb); | 65 | kfree_skb(skb); |
66 | return 0; | 66 | return NET_RX_DROP; |
67 | } | 67 | } |
68 | 68 | ||
69 | rcu_read_lock(); | 69 | rcu_read_lock(); |
@@ -133,7 +133,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
133 | if (ipv6_parse_hopopts(skb) < 0) { | 133 | if (ipv6_parse_hopopts(skb) < 0) { |
134 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); | 134 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); |
135 | rcu_read_unlock(); | 135 | rcu_read_unlock(); |
136 | return 0; | 136 | return NET_RX_DROP; |
137 | } | 137 | } |
138 | } | 138 | } |
139 | 139 | ||
@@ -149,7 +149,7 @@ err: | |||
149 | drop: | 149 | drop: |
150 | rcu_read_unlock(); | 150 | rcu_read_unlock(); |
151 | kfree_skb(skb); | 151 | kfree_skb(skb); |
152 | return 0; | 152 | return NET_RX_DROP; |
153 | } | 153 | } |
154 | 154 | ||
155 | /* | 155 | /* |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 87f8419a68fd..cd48801a8d6f 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -57,18 +57,6 @@ | |||
57 | 57 | ||
58 | static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); | 58 | static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); |
59 | 59 | ||
60 | static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr) | ||
61 | { | ||
62 | static u32 ipv6_fragmentation_id = 1; | ||
63 | static DEFINE_SPINLOCK(ip6_id_lock); | ||
64 | |||
65 | spin_lock_bh(&ip6_id_lock); | ||
66 | fhdr->identification = htonl(ipv6_fragmentation_id); | ||
67 | if (++ipv6_fragmentation_id == 0) | ||
68 | ipv6_fragmentation_id = 1; | ||
69 | spin_unlock_bh(&ip6_id_lock); | ||
70 | } | ||
71 | |||
72 | int __ip6_local_out(struct sk_buff *skb) | 60 | int __ip6_local_out(struct sk_buff *skb) |
73 | { | 61 | { |
74 | int len; | 62 | int len; |
@@ -206,7 +194,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
206 | struct ipv6hdr *hdr; | 194 | struct ipv6hdr *hdr; |
207 | u8 proto = fl->proto; | 195 | u8 proto = fl->proto; |
208 | int seg_len = skb->len; | 196 | int seg_len = skb->len; |
209 | int hlimit, tclass; | 197 | int hlimit = -1; |
198 | int tclass = 0; | ||
210 | u32 mtu; | 199 | u32 mtu; |
211 | 200 | ||
212 | if (opt) { | 201 | if (opt) { |
@@ -249,19 +238,13 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
249 | /* | 238 | /* |
250 | * Fill in the IPv6 header | 239 | * Fill in the IPv6 header |
251 | */ | 240 | */ |
252 | 241 | if (np) { | |
253 | hlimit = -1; | 242 | tclass = np->tclass; |
254 | if (np) | ||
255 | hlimit = np->hop_limit; | 243 | hlimit = np->hop_limit; |
244 | } | ||
256 | if (hlimit < 0) | 245 | if (hlimit < 0) |
257 | hlimit = ip6_dst_hoplimit(dst); | 246 | hlimit = ip6_dst_hoplimit(dst); |
258 | 247 | ||
259 | tclass = -1; | ||
260 | if (np) | ||
261 | tclass = np->tclass; | ||
262 | if (tclass < 0) | ||
263 | tclass = 0; | ||
264 | |||
265 | *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel; | 248 | *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel; |
266 | 249 | ||
267 | hdr->payload_len = htons(seg_len); | 250 | hdr->payload_len = htons(seg_len); |
@@ -706,7 +689,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
706 | skb_reset_network_header(skb); | 689 | skb_reset_network_header(skb); |
707 | memcpy(skb_network_header(skb), tmp_hdr, hlen); | 690 | memcpy(skb_network_header(skb), tmp_hdr, hlen); |
708 | 691 | ||
709 | ipv6_select_ident(skb, fh); | 692 | ipv6_select_ident(fh); |
710 | fh->nexthdr = nexthdr; | 693 | fh->nexthdr = nexthdr; |
711 | fh->reserved = 0; | 694 | fh->reserved = 0; |
712 | fh->frag_off = htons(IP6_MF); | 695 | fh->frag_off = htons(IP6_MF); |
@@ -844,7 +827,7 @@ slow_path: | |||
844 | fh->nexthdr = nexthdr; | 827 | fh->nexthdr = nexthdr; |
845 | fh->reserved = 0; | 828 | fh->reserved = 0; |
846 | if (!frag_id) { | 829 | if (!frag_id) { |
847 | ipv6_select_ident(skb, fh); | 830 | ipv6_select_ident(fh); |
848 | frag_id = fh->identification; | 831 | frag_id = fh->identification; |
849 | } else | 832 | } else |
850 | fh->identification = frag_id; | 833 | fh->identification = frag_id; |
@@ -1087,11 +1070,13 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
1087 | if (!err) { | 1070 | if (!err) { |
1088 | struct frag_hdr fhdr; | 1071 | struct frag_hdr fhdr; |
1089 | 1072 | ||
1090 | /* specify the length of each IP datagram fragment*/ | 1073 | /* Specify the length of each IPv6 datagram fragment. |
1091 | skb_shinfo(skb)->gso_size = mtu - fragheaderlen - | 1074 | * It has to be a multiple of 8. |
1092 | sizeof(struct frag_hdr); | 1075 | */ |
1076 | skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - | ||
1077 | sizeof(struct frag_hdr)) & ~7; | ||
1093 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | 1078 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
1094 | ipv6_select_ident(skb, &fhdr); | 1079 | ipv6_select_ident(&fhdr); |
1095 | skb_shinfo(skb)->ip6_frag_id = fhdr.identification; | 1080 | skb_shinfo(skb)->ip6_frag_id = fhdr.identification; |
1096 | __skb_queue_tail(&sk->sk_write_queue, skb); | 1081 | __skb_queue_tail(&sk->sk_write_queue, skb); |
1097 | 1082 | ||
@@ -1526,7 +1511,7 @@ int ip6_push_pending_frames(struct sock *sk) | |||
1526 | err = ip6_local_out(skb); | 1511 | err = ip6_local_out(skb); |
1527 | if (err) { | 1512 | if (err) { |
1528 | if (err > 0) | 1513 | if (err > 0) |
1529 | err = np->recverr ? net_xmit_errno(err) : 0; | 1514 | err = net_xmit_errno(err); |
1530 | if (err) | 1515 | if (err) |
1531 | goto error; | 1516 | goto error; |
1532 | } | 1517 | } |
@@ -1535,6 +1520,7 @@ out: | |||
1535 | ip6_cork_release(inet, np); | 1520 | ip6_cork_release(inet, np); |
1536 | return err; | 1521 | return err; |
1537 | error: | 1522 | error: |
1523 | IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); | ||
1538 | goto out; | 1524 | goto out; |
1539 | } | 1525 | } |
1540 | 1526 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 51f410e7775a..7d25bbe32110 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1036,7 +1036,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1036 | return 0; | 1036 | return 0; |
1037 | } | 1037 | } |
1038 | 1038 | ||
1039 | static int | 1039 | static netdev_tx_t |
1040 | ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | 1040 | ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) |
1041 | { | 1041 | { |
1042 | struct ip6_tnl *t = netdev_priv(dev); | 1042 | struct ip6_tnl *t = netdev_priv(dev); |
@@ -1063,14 +1063,14 @@ ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1063 | goto tx_err; | 1063 | goto tx_err; |
1064 | 1064 | ||
1065 | t->recursion--; | 1065 | t->recursion--; |
1066 | return 0; | 1066 | return NETDEV_TX_OK; |
1067 | 1067 | ||
1068 | tx_err: | 1068 | tx_err: |
1069 | stats->tx_errors++; | 1069 | stats->tx_errors++; |
1070 | stats->tx_dropped++; | 1070 | stats->tx_dropped++; |
1071 | kfree_skb(skb); | 1071 | kfree_skb(skb); |
1072 | t->recursion--; | 1072 | t->recursion--; |
1073 | return 0; | 1073 | return NETDEV_TX_OK; |
1074 | } | 1074 | } |
1075 | 1075 | ||
1076 | static void ip6_tnl_set_cap(struct ip6_tnl *t) | 1076 | static void ip6_tnl_set_cap(struct ip6_tnl *t) |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index c769f155c698..5c8d73730c75 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -204,7 +204,7 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v) | |||
204 | return 0; | 204 | return 0; |
205 | } | 205 | } |
206 | 206 | ||
207 | static struct seq_operations ip6mr_vif_seq_ops = { | 207 | static const struct seq_operations ip6mr_vif_seq_ops = { |
208 | .start = ip6mr_vif_seq_start, | 208 | .start = ip6mr_vif_seq_start, |
209 | .next = ip6mr_vif_seq_next, | 209 | .next = ip6mr_vif_seq_next, |
210 | .stop = ip6mr_vif_seq_stop, | 210 | .stop = ip6mr_vif_seq_stop, |
@@ -217,7 +217,7 @@ static int ip6mr_vif_open(struct inode *inode, struct file *file) | |||
217 | sizeof(struct ipmr_vif_iter)); | 217 | sizeof(struct ipmr_vif_iter)); |
218 | } | 218 | } |
219 | 219 | ||
220 | static struct file_operations ip6mr_vif_fops = { | 220 | static const struct file_operations ip6mr_vif_fops = { |
221 | .owner = THIS_MODULE, | 221 | .owner = THIS_MODULE, |
222 | .open = ip6mr_vif_open, | 222 | .open = ip6mr_vif_open, |
223 | .read = seq_read, | 223 | .read = seq_read, |
@@ -341,7 +341,7 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file) | |||
341 | sizeof(struct ipmr_mfc_iter)); | 341 | sizeof(struct ipmr_mfc_iter)); |
342 | } | 342 | } |
343 | 343 | ||
344 | static struct file_operations ip6mr_mfc_fops = { | 344 | static const struct file_operations ip6mr_mfc_fops = { |
345 | .owner = THIS_MODULE, | 345 | .owner = THIS_MODULE, |
346 | .open = ipmr_mfc_open, | 346 | .open = ipmr_mfc_open, |
347 | .read = seq_read, | 347 | .read = seq_read, |
@@ -416,7 +416,8 @@ static struct inet6_protocol pim6_protocol = { | |||
416 | 416 | ||
417 | /* Service routines creating virtual interfaces: PIMREG */ | 417 | /* Service routines creating virtual interfaces: PIMREG */ |
418 | 418 | ||
419 | static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | 419 | static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, |
420 | struct net_device *dev) | ||
420 | { | 421 | { |
421 | struct net *net = dev_net(dev); | 422 | struct net *net = dev_net(dev); |
422 | 423 | ||
@@ -427,7 +428,7 @@ static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | |||
427 | MRT6MSG_WHOLEPKT); | 428 | MRT6MSG_WHOLEPKT); |
428 | read_unlock(&mrt_lock); | 429 | read_unlock(&mrt_lock); |
429 | kfree_skb(skb); | 430 | kfree_skb(skb); |
430 | return 0; | 431 | return NETDEV_TX_OK; |
431 | } | 432 | } |
432 | 433 | ||
433 | static const struct net_device_ops reg_vif_netdev_ops = { | 434 | static const struct net_device_ops reg_vif_netdev_ops = { |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index a7fdf9a27f15..f5e0682b402d 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -315,6 +315,9 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
315 | goto e_inval; | 315 | goto e_inval; |
316 | if (val < -1 || val > 0xff) | 316 | if (val < -1 || val > 0xff) |
317 | goto e_inval; | 317 | goto e_inval; |
318 | /* RFC 3542, 6.5: default traffic class of 0x0 */ | ||
319 | if (val == -1) | ||
320 | val = 0; | ||
318 | np->tclass = val; | 321 | np->tclass = val; |
319 | retv = 0; | 322 | retv = 0; |
320 | break; | 323 | break; |
@@ -1037,8 +1040,6 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1037 | 1040 | ||
1038 | case IPV6_TCLASS: | 1041 | case IPV6_TCLASS: |
1039 | val = np->tclass; | 1042 | val = np->tclass; |
1040 | if (val < 0) | ||
1041 | val = 0; | ||
1042 | break; | 1043 | break; |
1043 | 1044 | ||
1044 | case IPV6_RECVTCLASS: | 1045 | case IPV6_RECVTCLASS: |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 4b264ed40a8c..71c3dacec1ed 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -2107,7 +2107,6 @@ static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca, | |||
2107 | for (j=0; j<i; j++) | 2107 | for (j=0; j<i; j++) |
2108 | (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); | 2108 | (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); |
2109 | } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { | 2109 | } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { |
2110 | struct inet6_dev *idev = pmc->idev; | ||
2111 | struct ip6_sf_list *psf; | 2110 | struct ip6_sf_list *psf; |
2112 | 2111 | ||
2113 | /* filter mode change */ | 2112 | /* filter mode change */ |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 9eb68e92cc18..7015478797f6 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -98,7 +98,7 @@ static int pndisc_constructor(struct pneigh_entry *n); | |||
98 | static void pndisc_destructor(struct pneigh_entry *n); | 98 | static void pndisc_destructor(struct pneigh_entry *n); |
99 | static void pndisc_redo(struct sk_buff *skb); | 99 | static void pndisc_redo(struct sk_buff *skb); |
100 | 100 | ||
101 | static struct neigh_ops ndisc_generic_ops = { | 101 | static const struct neigh_ops ndisc_generic_ops = { |
102 | .family = AF_INET6, | 102 | .family = AF_INET6, |
103 | .solicit = ndisc_solicit, | 103 | .solicit = ndisc_solicit, |
104 | .error_report = ndisc_error_report, | 104 | .error_report = ndisc_error_report, |
@@ -108,7 +108,7 @@ static struct neigh_ops ndisc_generic_ops = { | |||
108 | .queue_xmit = dev_queue_xmit, | 108 | .queue_xmit = dev_queue_xmit, |
109 | }; | 109 | }; |
110 | 110 | ||
111 | static struct neigh_ops ndisc_hh_ops = { | 111 | static const struct neigh_ops ndisc_hh_ops = { |
112 | .family = AF_INET6, | 112 | .family = AF_INET6, |
113 | .solicit = ndisc_solicit, | 113 | .solicit = ndisc_solicit, |
114 | .error_report = ndisc_error_report, | 114 | .error_report = ndisc_error_report, |
@@ -119,7 +119,7 @@ static struct neigh_ops ndisc_hh_ops = { | |||
119 | }; | 119 | }; |
120 | 120 | ||
121 | 121 | ||
122 | static struct neigh_ops ndisc_direct_ops = { | 122 | static const struct neigh_ops ndisc_direct_ops = { |
123 | .family = AF_INET6, | 123 | .family = AF_INET6, |
124 | .output = dev_queue_xmit, | 124 | .output = dev_queue_xmit, |
125 | .connected_output = dev_queue_xmit, | 125 | .connected_output = dev_queue_xmit, |
@@ -955,8 +955,8 @@ static void ndisc_recv_na(struct sk_buff *skb) | |||
955 | */ | 955 | */ |
956 | if (skb->pkt_type != PACKET_LOOPBACK) | 956 | if (skb->pkt_type != PACKET_LOOPBACK) |
957 | ND_PRINTK1(KERN_WARNING | 957 | ND_PRINTK1(KERN_WARNING |
958 | "ICMPv6 NA: someone advertises our address on %s!\n", | 958 | "ICMPv6 NA: someone advertises our address %pI6 on %s!\n", |
959 | ifp->idev->dev->name); | 959 | &ifp->addr, ifp->idev->dev->name); |
960 | in6_ifa_put(ifp); | 960 | in6_ifa_put(ifp); |
961 | return; | 961 | return; |
962 | } | 962 | } |
@@ -1151,10 +1151,6 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
1151 | skb->dev->name); | 1151 | skb->dev->name); |
1152 | return; | 1152 | return; |
1153 | } | 1153 | } |
1154 | if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra) { | ||
1155 | in6_dev_put(in6_dev); | ||
1156 | return; | ||
1157 | } | ||
1158 | 1154 | ||
1159 | if (!ndisc_parse_options(opt, optlen, &ndopts)) { | 1155 | if (!ndisc_parse_options(opt, optlen, &ndopts)) { |
1160 | in6_dev_put(in6_dev); | 1156 | in6_dev_put(in6_dev); |
@@ -1163,6 +1159,10 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
1163 | return; | 1159 | return; |
1164 | } | 1160 | } |
1165 | 1161 | ||
1162 | /* skip route and link configuration on routers */ | ||
1163 | if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra) | ||
1164 | goto skip_linkparms; | ||
1165 | |||
1166 | #ifdef CONFIG_IPV6_NDISC_NODETYPE | 1166 | #ifdef CONFIG_IPV6_NDISC_NODETYPE |
1167 | /* skip link-specific parameters from interior routers */ | 1167 | /* skip link-specific parameters from interior routers */ |
1168 | if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) | 1168 | if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) |
@@ -1283,9 +1283,7 @@ skip_defrtr: | |||
1283 | } | 1283 | } |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | #ifdef CONFIG_IPV6_NDISC_NODETYPE | ||
1287 | skip_linkparms: | 1286 | skip_linkparms: |
1288 | #endif | ||
1289 | 1287 | ||
1290 | /* | 1288 | /* |
1291 | * Process options. | 1289 | * Process options. |
@@ -1312,6 +1310,10 @@ skip_linkparms: | |||
1312 | NEIGH_UPDATE_F_ISROUTER); | 1310 | NEIGH_UPDATE_F_ISROUTER); |
1313 | } | 1311 | } |
1314 | 1312 | ||
1313 | /* skip route and link configuration on routers */ | ||
1314 | if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra) | ||
1315 | goto out; | ||
1316 | |||
1315 | #ifdef CONFIG_IPV6_ROUTE_INFO | 1317 | #ifdef CONFIG_IPV6_ROUTE_INFO |
1316 | if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) { | 1318 | if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) { |
1317 | struct nd_opt_hdr *p; | 1319 | struct nd_opt_hdr *p; |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index ced1f2c0cb65..cc9f8ef303fd 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
12 | #include <linux/capability.h> | 12 | #include <linux/capability.h> |
13 | #include <linux/in.h> | 13 | #include <linux/in.h> |
14 | #include <linux/skbuff.h> | 14 | #include <linux/skbuff.h> |
@@ -222,16 +222,11 @@ get_entry(void *base, unsigned int offset) | |||
222 | 222 | ||
223 | /* All zeroes == unconditional rule. */ | 223 | /* All zeroes == unconditional rule. */ |
224 | /* Mildly perf critical (only if packet tracing is on) */ | 224 | /* Mildly perf critical (only if packet tracing is on) */ |
225 | static inline int | 225 | static inline bool unconditional(const struct ip6t_ip6 *ipv6) |
226 | unconditional(const struct ip6t_ip6 *ipv6) | ||
227 | { | 226 | { |
228 | unsigned int i; | 227 | static const struct ip6t_ip6 uncond; |
229 | |||
230 | for (i = 0; i < sizeof(*ipv6); i++) | ||
231 | if (((char *)ipv6)[i]) | ||
232 | break; | ||
233 | 228 | ||
234 | return (i == sizeof(*ipv6)); | 229 | return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; |
235 | } | 230 | } |
236 | 231 | ||
237 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 232 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
@@ -745,6 +740,21 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size, | |||
745 | return ret; | 740 | return ret; |
746 | } | 741 | } |
747 | 742 | ||
743 | static bool check_underflow(struct ip6t_entry *e) | ||
744 | { | ||
745 | const struct ip6t_entry_target *t; | ||
746 | unsigned int verdict; | ||
747 | |||
748 | if (!unconditional(&e->ipv6)) | ||
749 | return false; | ||
750 | t = ip6t_get_target(e); | ||
751 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | ||
752 | return false; | ||
753 | verdict = ((struct ip6t_standard_target *)t)->verdict; | ||
754 | verdict = -verdict - 1; | ||
755 | return verdict == NF_DROP || verdict == NF_ACCEPT; | ||
756 | } | ||
757 | |||
748 | static int | 758 | static int |
749 | check_entry_size_and_hooks(struct ip6t_entry *e, | 759 | check_entry_size_and_hooks(struct ip6t_entry *e, |
750 | struct xt_table_info *newinfo, | 760 | struct xt_table_info *newinfo, |
@@ -752,6 +762,7 @@ check_entry_size_and_hooks(struct ip6t_entry *e, | |||
752 | unsigned char *limit, | 762 | unsigned char *limit, |
753 | const unsigned int *hook_entries, | 763 | const unsigned int *hook_entries, |
754 | const unsigned int *underflows, | 764 | const unsigned int *underflows, |
765 | unsigned int valid_hooks, | ||
755 | unsigned int *i) | 766 | unsigned int *i) |
756 | { | 767 | { |
757 | unsigned int h; | 768 | unsigned int h; |
@@ -771,15 +782,21 @@ check_entry_size_and_hooks(struct ip6t_entry *e, | |||
771 | 782 | ||
772 | /* Check hooks & underflows */ | 783 | /* Check hooks & underflows */ |
773 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 784 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
785 | if (!(valid_hooks & (1 << h))) | ||
786 | continue; | ||
774 | if ((unsigned char *)e - base == hook_entries[h]) | 787 | if ((unsigned char *)e - base == hook_entries[h]) |
775 | newinfo->hook_entry[h] = hook_entries[h]; | 788 | newinfo->hook_entry[h] = hook_entries[h]; |
776 | if ((unsigned char *)e - base == underflows[h]) | 789 | if ((unsigned char *)e - base == underflows[h]) { |
790 | if (!check_underflow(e)) { | ||
791 | pr_err("Underflows must be unconditional and " | ||
792 | "use the STANDARD target with " | ||
793 | "ACCEPT/DROP\n"); | ||
794 | return -EINVAL; | ||
795 | } | ||
777 | newinfo->underflow[h] = underflows[h]; | 796 | newinfo->underflow[h] = underflows[h]; |
797 | } | ||
778 | } | 798 | } |
779 | 799 | ||
780 | /* FIXME: underflows must be unconditional, standard verdicts | ||
781 | < 0 (not IP6T_RETURN). --RR */ | ||
782 | |||
783 | /* Clear counters and comefrom */ | 800 | /* Clear counters and comefrom */ |
784 | e->counters = ((struct xt_counters) { 0, 0 }); | 801 | e->counters = ((struct xt_counters) { 0, 0 }); |
785 | e->comefrom = 0; | 802 | e->comefrom = 0; |
@@ -842,7 +859,7 @@ translate_table(const char *name, | |||
842 | newinfo, | 859 | newinfo, |
843 | entry0, | 860 | entry0, |
844 | entry0 + size, | 861 | entry0 + size, |
845 | hook_entries, underflows, &i); | 862 | hook_entries, underflows, valid_hooks, &i); |
846 | if (ret != 0) | 863 | if (ret != 0) |
847 | return ret; | 864 | return ret; |
848 | 865 | ||
@@ -2083,7 +2100,8 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2083 | return ret; | 2100 | return ret; |
2084 | } | 2101 | } |
2085 | 2102 | ||
2086 | struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table, | 2103 | struct xt_table *ip6t_register_table(struct net *net, |
2104 | const struct xt_table *table, | ||
2087 | const struct ip6t_replace *repl) | 2105 | const struct ip6t_replace *repl) |
2088 | { | 2106 | { |
2089 | int ret; | 2107 | int ret; |
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c index db610bacbcce..ca287f6d2bce 100644 --- a/net/ipv6/netfilter/ip6t_eui64.c +++ b/net/ipv6/netfilter/ip6t_eui64.c | |||
@@ -23,7 +23,6 @@ static bool | |||
23 | eui64_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | 23 | eui64_mt6(const struct sk_buff *skb, const struct xt_match_param *par) |
24 | { | 24 | { |
25 | unsigned char eui64[8]; | 25 | unsigned char eui64[8]; |
26 | int i = 0; | ||
27 | 26 | ||
28 | if (!(skb_mac_header(skb) >= skb->head && | 27 | if (!(skb_mac_header(skb) >= skb->head && |
29 | skb_mac_header(skb) + ETH_HLEN <= skb->data) && | 28 | skb_mac_header(skb) + ETH_HLEN <= skb->data) && |
@@ -42,12 +41,8 @@ eui64_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
42 | eui64[4] = 0xfe; | 41 | eui64[4] = 0xfe; |
43 | eui64[0] ^= 0x02; | 42 | eui64[0] ^= 0x02; |
44 | 43 | ||
45 | i = 0; | 44 | if (!memcmp(ipv6_hdr(skb)->saddr.s6_addr + 8, eui64, |
46 | while (ipv6_hdr(skb)->saddr.s6_addr[8 + i] == eui64[i] | 45 | sizeof(eui64))) |
47 | && i < 8) | ||
48 | i++; | ||
49 | |||
50 | if (i == 8) | ||
51 | return true; | 46 | return true; |
52 | } | 47 | } |
53 | } | 48 | } |
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index ef5a0a32bf8e..6f4383ad86f9 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c | |||
@@ -51,11 +51,11 @@ static struct | |||
51 | .term = IP6T_ERROR_INIT, /* ERROR */ | 51 | .term = IP6T_ERROR_INIT, /* ERROR */ |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static struct xt_table packet_filter = { | 54 | static const struct xt_table packet_filter = { |
55 | .name = "filter", | 55 | .name = "filter", |
56 | .valid_hooks = FILTER_VALID_HOOKS, | 56 | .valid_hooks = FILTER_VALID_HOOKS, |
57 | .me = THIS_MODULE, | 57 | .me = THIS_MODULE, |
58 | .af = AF_INET6, | 58 | .af = NFPROTO_IPV6, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | /* The work comes in here from netfilter.c. */ | 61 | /* The work comes in here from netfilter.c. */ |
@@ -95,21 +95,21 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = { | |||
95 | { | 95 | { |
96 | .hook = ip6t_in_hook, | 96 | .hook = ip6t_in_hook, |
97 | .owner = THIS_MODULE, | 97 | .owner = THIS_MODULE, |
98 | .pf = PF_INET6, | 98 | .pf = NFPROTO_IPV6, |
99 | .hooknum = NF_INET_LOCAL_IN, | 99 | .hooknum = NF_INET_LOCAL_IN, |
100 | .priority = NF_IP6_PRI_FILTER, | 100 | .priority = NF_IP6_PRI_FILTER, |
101 | }, | 101 | }, |
102 | { | 102 | { |
103 | .hook = ip6t_in_hook, | 103 | .hook = ip6t_in_hook, |
104 | .owner = THIS_MODULE, | 104 | .owner = THIS_MODULE, |
105 | .pf = PF_INET6, | 105 | .pf = NFPROTO_IPV6, |
106 | .hooknum = NF_INET_FORWARD, | 106 | .hooknum = NF_INET_FORWARD, |
107 | .priority = NF_IP6_PRI_FILTER, | 107 | .priority = NF_IP6_PRI_FILTER, |
108 | }, | 108 | }, |
109 | { | 109 | { |
110 | .hook = ip6t_local_out_hook, | 110 | .hook = ip6t_local_out_hook, |
111 | .owner = THIS_MODULE, | 111 | .owner = THIS_MODULE, |
112 | .pf = PF_INET6, | 112 | .pf = NFPROTO_IPV6, |
113 | .hooknum = NF_INET_LOCAL_OUT, | 113 | .hooknum = NF_INET_LOCAL_OUT, |
114 | .priority = NF_IP6_PRI_FILTER, | 114 | .priority = NF_IP6_PRI_FILTER, |
115 | }, | 115 | }, |
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index ab0d398a2ba7..0ad91433ed61 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c | |||
@@ -21,7 +21,7 @@ MODULE_DESCRIPTION("ip6tables mangle table"); | |||
21 | (1 << NF_INET_LOCAL_OUT) | \ | 21 | (1 << NF_INET_LOCAL_OUT) | \ |
22 | (1 << NF_INET_POST_ROUTING)) | 22 | (1 << NF_INET_POST_ROUTING)) |
23 | 23 | ||
24 | static struct | 24 | static const struct |
25 | { | 25 | { |
26 | struct ip6t_replace repl; | 26 | struct ip6t_replace repl; |
27 | struct ip6t_standard entries[5]; | 27 | struct ip6t_standard entries[5]; |
@@ -57,11 +57,11 @@ static struct | |||
57 | .term = IP6T_ERROR_INIT, /* ERROR */ | 57 | .term = IP6T_ERROR_INIT, /* ERROR */ |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static struct xt_table packet_mangler = { | 60 | static const struct xt_table packet_mangler = { |
61 | .name = "mangle", | 61 | .name = "mangle", |
62 | .valid_hooks = MANGLE_VALID_HOOKS, | 62 | .valid_hooks = MANGLE_VALID_HOOKS, |
63 | .me = THIS_MODULE, | 63 | .me = THIS_MODULE, |
64 | .af = AF_INET6, | 64 | .af = NFPROTO_IPV6, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* The work comes in here from netfilter.c. */ | 67 | /* The work comes in here from netfilter.c. */ |
@@ -136,35 +136,35 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = { | |||
136 | { | 136 | { |
137 | .hook = ip6t_in_hook, | 137 | .hook = ip6t_in_hook, |
138 | .owner = THIS_MODULE, | 138 | .owner = THIS_MODULE, |
139 | .pf = PF_INET6, | 139 | .pf = NFPROTO_IPV6, |
140 | .hooknum = NF_INET_PRE_ROUTING, | 140 | .hooknum = NF_INET_PRE_ROUTING, |
141 | .priority = NF_IP6_PRI_MANGLE, | 141 | .priority = NF_IP6_PRI_MANGLE, |
142 | }, | 142 | }, |
143 | { | 143 | { |
144 | .hook = ip6t_in_hook, | 144 | .hook = ip6t_in_hook, |
145 | .owner = THIS_MODULE, | 145 | .owner = THIS_MODULE, |
146 | .pf = PF_INET6, | 146 | .pf = NFPROTO_IPV6, |
147 | .hooknum = NF_INET_LOCAL_IN, | 147 | .hooknum = NF_INET_LOCAL_IN, |
148 | .priority = NF_IP6_PRI_MANGLE, | 148 | .priority = NF_IP6_PRI_MANGLE, |
149 | }, | 149 | }, |
150 | { | 150 | { |
151 | .hook = ip6t_in_hook, | 151 | .hook = ip6t_in_hook, |
152 | .owner = THIS_MODULE, | 152 | .owner = THIS_MODULE, |
153 | .pf = PF_INET6, | 153 | .pf = NFPROTO_IPV6, |
154 | .hooknum = NF_INET_FORWARD, | 154 | .hooknum = NF_INET_FORWARD, |
155 | .priority = NF_IP6_PRI_MANGLE, | 155 | .priority = NF_IP6_PRI_MANGLE, |
156 | }, | 156 | }, |
157 | { | 157 | { |
158 | .hook = ip6t_local_out_hook, | 158 | .hook = ip6t_local_out_hook, |
159 | .owner = THIS_MODULE, | 159 | .owner = THIS_MODULE, |
160 | .pf = PF_INET6, | 160 | .pf = NFPROTO_IPV6, |
161 | .hooknum = NF_INET_LOCAL_OUT, | 161 | .hooknum = NF_INET_LOCAL_OUT, |
162 | .priority = NF_IP6_PRI_MANGLE, | 162 | .priority = NF_IP6_PRI_MANGLE, |
163 | }, | 163 | }, |
164 | { | 164 | { |
165 | .hook = ip6t_post_routing_hook, | 165 | .hook = ip6t_post_routing_hook, |
166 | .owner = THIS_MODULE, | 166 | .owner = THIS_MODULE, |
167 | .pf = PF_INET6, | 167 | .pf = NFPROTO_IPV6, |
168 | .hooknum = NF_INET_POST_ROUTING, | 168 | .hooknum = NF_INET_POST_ROUTING, |
169 | .priority = NF_IP6_PRI_MANGLE, | 169 | .priority = NF_IP6_PRI_MANGLE, |
170 | }, | 170 | }, |
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c index 4b792b6ca321..ed1a1180f3b3 100644 --- a/net/ipv6/netfilter/ip6table_raw.c +++ b/net/ipv6/netfilter/ip6table_raw.c | |||
@@ -8,7 +8,7 @@ | |||
8 | 8 | ||
9 | #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) | 9 | #define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) |
10 | 10 | ||
11 | static struct | 11 | static const struct |
12 | { | 12 | { |
13 | struct ip6t_replace repl; | 13 | struct ip6t_replace repl; |
14 | struct ip6t_standard entries[2]; | 14 | struct ip6t_standard entries[2]; |
@@ -35,11 +35,11 @@ static struct | |||
35 | .term = IP6T_ERROR_INIT, /* ERROR */ | 35 | .term = IP6T_ERROR_INIT, /* ERROR */ |
36 | }; | 36 | }; |
37 | 37 | ||
38 | static struct xt_table packet_raw = { | 38 | static const struct xt_table packet_raw = { |
39 | .name = "raw", | 39 | .name = "raw", |
40 | .valid_hooks = RAW_VALID_HOOKS, | 40 | .valid_hooks = RAW_VALID_HOOKS, |
41 | .me = THIS_MODULE, | 41 | .me = THIS_MODULE, |
42 | .af = AF_INET6, | 42 | .af = NFPROTO_IPV6, |
43 | }; | 43 | }; |
44 | 44 | ||
45 | /* The work comes in here from netfilter.c. */ | 45 | /* The work comes in here from netfilter.c. */ |
@@ -68,14 +68,14 @@ ip6t_local_out_hook(unsigned int hook, | |||
68 | static struct nf_hook_ops ip6t_ops[] __read_mostly = { | 68 | static struct nf_hook_ops ip6t_ops[] __read_mostly = { |
69 | { | 69 | { |
70 | .hook = ip6t_pre_routing_hook, | 70 | .hook = ip6t_pre_routing_hook, |
71 | .pf = PF_INET6, | 71 | .pf = NFPROTO_IPV6, |
72 | .hooknum = NF_INET_PRE_ROUTING, | 72 | .hooknum = NF_INET_PRE_ROUTING, |
73 | .priority = NF_IP6_PRI_FIRST, | 73 | .priority = NF_IP6_PRI_FIRST, |
74 | .owner = THIS_MODULE, | 74 | .owner = THIS_MODULE, |
75 | }, | 75 | }, |
76 | { | 76 | { |
77 | .hook = ip6t_local_out_hook, | 77 | .hook = ip6t_local_out_hook, |
78 | .pf = PF_INET6, | 78 | .pf = NFPROTO_IPV6, |
79 | .hooknum = NF_INET_LOCAL_OUT, | 79 | .hooknum = NF_INET_LOCAL_OUT, |
80 | .priority = NF_IP6_PRI_FIRST, | 80 | .priority = NF_IP6_PRI_FIRST, |
81 | .owner = THIS_MODULE, | 81 | .owner = THIS_MODULE, |
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c index 0ea37ff15d56..41b444c60934 100644 --- a/net/ipv6/netfilter/ip6table_security.c +++ b/net/ipv6/netfilter/ip6table_security.c | |||
@@ -26,7 +26,7 @@ MODULE_DESCRIPTION("ip6tables security table, for MAC rules"); | |||
26 | (1 << NF_INET_FORWARD) | \ | 26 | (1 << NF_INET_FORWARD) | \ |
27 | (1 << NF_INET_LOCAL_OUT) | 27 | (1 << NF_INET_LOCAL_OUT) |
28 | 28 | ||
29 | static struct | 29 | static const struct |
30 | { | 30 | { |
31 | struct ip6t_replace repl; | 31 | struct ip6t_replace repl; |
32 | struct ip6t_standard entries[3]; | 32 | struct ip6t_standard entries[3]; |
@@ -56,11 +56,11 @@ static struct | |||
56 | .term = IP6T_ERROR_INIT, /* ERROR */ | 56 | .term = IP6T_ERROR_INIT, /* ERROR */ |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static struct xt_table security_table = { | 59 | static const struct xt_table security_table = { |
60 | .name = "security", | 60 | .name = "security", |
61 | .valid_hooks = SECURITY_VALID_HOOKS, | 61 | .valid_hooks = SECURITY_VALID_HOOKS, |
62 | .me = THIS_MODULE, | 62 | .me = THIS_MODULE, |
63 | .af = AF_INET6, | 63 | .af = NFPROTO_IPV6, |
64 | }; | 64 | }; |
65 | 65 | ||
66 | static unsigned int | 66 | static unsigned int |
@@ -101,21 +101,21 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = { | |||
101 | { | 101 | { |
102 | .hook = ip6t_local_in_hook, | 102 | .hook = ip6t_local_in_hook, |
103 | .owner = THIS_MODULE, | 103 | .owner = THIS_MODULE, |
104 | .pf = PF_INET6, | 104 | .pf = NFPROTO_IPV6, |
105 | .hooknum = NF_INET_LOCAL_IN, | 105 | .hooknum = NF_INET_LOCAL_IN, |
106 | .priority = NF_IP6_PRI_SECURITY, | 106 | .priority = NF_IP6_PRI_SECURITY, |
107 | }, | 107 | }, |
108 | { | 108 | { |
109 | .hook = ip6t_forward_hook, | 109 | .hook = ip6t_forward_hook, |
110 | .owner = THIS_MODULE, | 110 | .owner = THIS_MODULE, |
111 | .pf = PF_INET6, | 111 | .pf = NFPROTO_IPV6, |
112 | .hooknum = NF_INET_FORWARD, | 112 | .hooknum = NF_INET_FORWARD, |
113 | .priority = NF_IP6_PRI_SECURITY, | 113 | .priority = NF_IP6_PRI_SECURITY, |
114 | }, | 114 | }, |
115 | { | 115 | { |
116 | .hook = ip6t_local_out_hook, | 116 | .hook = ip6t_local_out_hook, |
117 | .owner = THIS_MODULE, | 117 | .owner = THIS_MODULE, |
118 | .pf = PF_INET6, | 118 | .pf = NFPROTO_IPV6, |
119 | .hooknum = NF_INET_LOCAL_OUT, | 119 | .hooknum = NF_INET_LOCAL_OUT, |
120 | .priority = NF_IP6_PRI_SECURITY, | 120 | .priority = NF_IP6_PRI_SECURITY, |
121 | }, | 121 | }, |
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 2a15c2d66c69..5f2ec208a8c3 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <net/netfilter/nf_conntrack_l3proto.h> | 27 | #include <net/netfilter/nf_conntrack_l3proto.h> |
28 | #include <net/netfilter/nf_conntrack_core.h> | 28 | #include <net/netfilter/nf_conntrack_core.h> |
29 | #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> | 29 | #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> |
30 | #include <net/netfilter/nf_log.h> | ||
30 | 31 | ||
31 | static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, | 32 | static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, |
32 | struct nf_conntrack_tuple *tuple) | 33 | struct nf_conntrack_tuple *tuple) |
@@ -176,8 +177,11 @@ static unsigned int ipv6_confirm(unsigned int hooknum, | |||
176 | } | 177 | } |
177 | 178 | ||
178 | ret = helper->help(skb, protoff, ct, ctinfo); | 179 | ret = helper->help(skb, protoff, ct, ctinfo); |
179 | if (ret != NF_ACCEPT) | 180 | if (ret != NF_ACCEPT) { |
181 | nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL, | ||
182 | "nf_ct_%s: dropping packet", helper->name); | ||
180 | return ret; | 183 | return ret; |
184 | } | ||
181 | out: | 185 | out: |
182 | /* We've seen it coming out the other side: confirm it */ | 186 | /* We've seen it coming out the other side: confirm it */ |
183 | return nf_conntrack_confirm(skb); | 187 | return nf_conntrack_confirm(skb); |
@@ -265,42 +269,42 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { | |||
265 | { | 269 | { |
266 | .hook = ipv6_defrag, | 270 | .hook = ipv6_defrag, |
267 | .owner = THIS_MODULE, | 271 | .owner = THIS_MODULE, |
268 | .pf = PF_INET6, | 272 | .pf = NFPROTO_IPV6, |
269 | .hooknum = NF_INET_PRE_ROUTING, | 273 | .hooknum = NF_INET_PRE_ROUTING, |
270 | .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, | 274 | .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, |
271 | }, | 275 | }, |
272 | { | 276 | { |
273 | .hook = ipv6_conntrack_in, | 277 | .hook = ipv6_conntrack_in, |
274 | .owner = THIS_MODULE, | 278 | .owner = THIS_MODULE, |
275 | .pf = PF_INET6, | 279 | .pf = NFPROTO_IPV6, |
276 | .hooknum = NF_INET_PRE_ROUTING, | 280 | .hooknum = NF_INET_PRE_ROUTING, |
277 | .priority = NF_IP6_PRI_CONNTRACK, | 281 | .priority = NF_IP6_PRI_CONNTRACK, |
278 | }, | 282 | }, |
279 | { | 283 | { |
280 | .hook = ipv6_conntrack_local, | 284 | .hook = ipv6_conntrack_local, |
281 | .owner = THIS_MODULE, | 285 | .owner = THIS_MODULE, |
282 | .pf = PF_INET6, | 286 | .pf = NFPROTO_IPV6, |
283 | .hooknum = NF_INET_LOCAL_OUT, | 287 | .hooknum = NF_INET_LOCAL_OUT, |
284 | .priority = NF_IP6_PRI_CONNTRACK, | 288 | .priority = NF_IP6_PRI_CONNTRACK, |
285 | }, | 289 | }, |
286 | { | 290 | { |
287 | .hook = ipv6_defrag, | 291 | .hook = ipv6_defrag, |
288 | .owner = THIS_MODULE, | 292 | .owner = THIS_MODULE, |
289 | .pf = PF_INET6, | 293 | .pf = NFPROTO_IPV6, |
290 | .hooknum = NF_INET_LOCAL_OUT, | 294 | .hooknum = NF_INET_LOCAL_OUT, |
291 | .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, | 295 | .priority = NF_IP6_PRI_CONNTRACK_DEFRAG, |
292 | }, | 296 | }, |
293 | { | 297 | { |
294 | .hook = ipv6_confirm, | 298 | .hook = ipv6_confirm, |
295 | .owner = THIS_MODULE, | 299 | .owner = THIS_MODULE, |
296 | .pf = PF_INET6, | 300 | .pf = NFPROTO_IPV6, |
297 | .hooknum = NF_INET_POST_ROUTING, | 301 | .hooknum = NF_INET_POST_ROUTING, |
298 | .priority = NF_IP6_PRI_LAST, | 302 | .priority = NF_IP6_PRI_LAST, |
299 | }, | 303 | }, |
300 | { | 304 | { |
301 | .hook = ipv6_confirm, | 305 | .hook = ipv6_confirm, |
302 | .owner = THIS_MODULE, | 306 | .owner = THIS_MODULE, |
303 | .pf = PF_INET6, | 307 | .pf = NFPROTO_IPV6, |
304 | .hooknum = NF_INET_LOCAL_IN, | 308 | .hooknum = NF_INET_LOCAL_IN, |
305 | .priority = NF_IP6_PRI_LAST-1, | 309 | .priority = NF_IP6_PRI_LAST-1, |
306 | }, | 310 | }, |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 590ddefb7ffc..c9605c3ad91f 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -101,7 +101,7 @@ static struct snmp_mib snmp6_icmp6_list[] = { | |||
101 | }; | 101 | }; |
102 | 102 | ||
103 | /* RFC 4293 v6 ICMPMsgStatsTable; named items for RFC 2466 compatibility */ | 103 | /* RFC 4293 v6 ICMPMsgStatsTable; named items for RFC 2466 compatibility */ |
104 | static char *icmp6type2name[256] = { | 104 | static const char *const icmp6type2name[256] = { |
105 | [ICMPV6_DEST_UNREACH] = "DestUnreachs", | 105 | [ICMPV6_DEST_UNREACH] = "DestUnreachs", |
106 | [ICMPV6_PKT_TOOBIG] = "PktTooBigs", | 106 | [ICMPV6_PKT_TOOBIG] = "PktTooBigs", |
107 | [ICMPV6_TIME_EXCEED] = "TimeExcds", | 107 | [ICMPV6_TIME_EXCEED] = "TimeExcds", |
@@ -144,7 +144,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) | |||
144 | /* print by name -- deprecated items */ | 144 | /* print by name -- deprecated items */ |
145 | for (i = 0; i < ICMP6MSG_MIB_MAX; i++) { | 145 | for (i = 0; i < ICMP6MSG_MIB_MAX; i++) { |
146 | int icmptype; | 146 | int icmptype; |
147 | char *p; | 147 | const char *p; |
148 | 148 | ||
149 | icmptype = i & 0xff; | 149 | icmptype = i & 0xff; |
150 | p = icmp6type2name[icmptype]; | 150 | p = icmp6type2name[icmptype]; |
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c index 9ab789159913..568864f722ca 100644 --- a/net/ipv6/protocol.c +++ b/net/ipv6/protocol.c | |||
@@ -20,20 +20,9 @@ | |||
20 | * - Removed unused variable 'inet6_protocol_base' | 20 | * - Removed unused variable 'inet6_protocol_base' |
21 | * - Modified inet6_del_protocol() to correctly maintain copy bit. | 21 | * - Modified inet6_del_protocol() to correctly maintain copy bit. |
22 | */ | 22 | */ |
23 | 23 | #include <linux/module.h> | |
24 | #include <linux/errno.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/socket.h> | ||
27 | #include <linux/sockios.h> | ||
28 | #include <linux/net.h> | ||
29 | #include <linux/in6.h> | ||
30 | #include <linux/netdevice.h> | 24 | #include <linux/netdevice.h> |
31 | #include <linux/if_arp.h> | 25 | #include <linux/spinlock.h> |
32 | |||
33 | #include <net/sock.h> | ||
34 | #include <net/snmp.h> | ||
35 | |||
36 | #include <net/ipv6.h> | ||
37 | #include <net/protocol.h> | 26 | #include <net/protocol.h> |
38 | 27 | ||
39 | struct inet6_protocol *inet6_protos[MAX_INET_PROTOS]; | 28 | struct inet6_protocol *inet6_protos[MAX_INET_PROTOS]; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index d6c3c1c34b2d..7d675b8d82d3 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -642,7 +642,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, | |||
642 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, | 642 | err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, |
643 | dst_output); | 643 | dst_output); |
644 | if (err > 0) | 644 | if (err > 0) |
645 | err = np->recverr ? net_xmit_errno(err) : 0; | 645 | err = net_xmit_errno(err); |
646 | if (err) | 646 | if (err) |
647 | goto error; | 647 | goto error; |
648 | out: | 648 | out: |
@@ -653,6 +653,8 @@ error_fault: | |||
653 | kfree_skb(skb); | 653 | kfree_skb(skb); |
654 | error: | 654 | error: |
655 | IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); | 655 | IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); |
656 | if (err == -ENOBUFS && !np->recverr) | ||
657 | err = 0; | ||
656 | return err; | 658 | return err; |
657 | } | 659 | } |
658 | 660 | ||
@@ -877,11 +879,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
877 | hlimit = ip6_dst_hoplimit(dst); | 879 | hlimit = ip6_dst_hoplimit(dst); |
878 | } | 880 | } |
879 | 881 | ||
880 | if (tclass < 0) { | 882 | if (tclass < 0) |
881 | tclass = np->tclass; | 883 | tclass = np->tclass; |
882 | if (tclass < 0) | ||
883 | tclass = 0; | ||
884 | } | ||
885 | 884 | ||
886 | if (msg->msg_flags&MSG_CONFIRM) | 885 | if (msg->msg_flags&MSG_CONFIRM) |
887 | goto do_confirm; | 886 | goto do_confirm; |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1473ee0a1f51..9ccfef345560 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -665,7 +665,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad | |||
665 | net->ipv6.sysctl.ip6_rt_gc_elasticity = 1; | 665 | net->ipv6.sysctl.ip6_rt_gc_elasticity = 1; |
666 | net->ipv6.sysctl.ip6_rt_gc_min_interval = 0; | 666 | net->ipv6.sysctl.ip6_rt_gc_min_interval = 0; |
667 | 667 | ||
668 | ip6_dst_gc(net->ipv6.ip6_dst_ops); | 668 | ip6_dst_gc(&net->ipv6.ip6_dst_ops); |
669 | 669 | ||
670 | net->ipv6.sysctl.ip6_rt_gc_elasticity = | 670 | net->ipv6.sysctl.ip6_rt_gc_elasticity = |
671 | saved_rt_elasticity; | 671 | saved_rt_elasticity; |
@@ -970,7 +970,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
970 | if (unlikely(idev == NULL)) | 970 | if (unlikely(idev == NULL)) |
971 | return NULL; | 971 | return NULL; |
972 | 972 | ||
973 | rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); | 973 | rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); |
974 | if (unlikely(rt == NULL)) { | 974 | if (unlikely(rt == NULL)) { |
975 | in6_dev_put(idev); | 975 | in6_dev_put(idev); |
976 | goto out; | 976 | goto out; |
@@ -1060,7 +1060,7 @@ static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg), | |||
1060 | static int ip6_dst_gc(struct dst_ops *ops) | 1060 | static int ip6_dst_gc(struct dst_ops *ops) |
1061 | { | 1061 | { |
1062 | unsigned long now = jiffies; | 1062 | unsigned long now = jiffies; |
1063 | struct net *net = ops->dst_net; | 1063 | struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops); |
1064 | int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; | 1064 | int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; |
1065 | int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size; | 1065 | int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size; |
1066 | int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity; | 1066 | int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity; |
@@ -1154,7 +1154,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1154 | goto out; | 1154 | goto out; |
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); | 1157 | rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); |
1158 | 1158 | ||
1159 | if (rt == NULL) { | 1159 | if (rt == NULL) { |
1160 | err = -ENOMEM; | 1160 | err = -ENOMEM; |
@@ -1643,7 +1643,7 @@ out: | |||
1643 | static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) | 1643 | static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) |
1644 | { | 1644 | { |
1645 | struct net *net = dev_net(ort->rt6i_dev); | 1645 | struct net *net = dev_net(ort->rt6i_dev); |
1646 | struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); | 1646 | struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); |
1647 | 1647 | ||
1648 | if (rt) { | 1648 | if (rt) { |
1649 | rt->u.dst.input = ort->u.dst.input; | 1649 | rt->u.dst.input = ort->u.dst.input; |
@@ -1923,7 +1923,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1923 | int anycast) | 1923 | int anycast) |
1924 | { | 1924 | { |
1925 | struct net *net = dev_net(idev->dev); | 1925 | struct net *net = dev_net(idev->dev); |
1926 | struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops); | 1926 | struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); |
1927 | struct neighbour *neigh; | 1927 | struct neighbour *neigh; |
1928 | 1928 | ||
1929 | if (rt == NULL) | 1929 | if (rt == NULL) |
@@ -2501,7 +2501,7 @@ static int rt6_stats_seq_show(struct seq_file *seq, void *v) | |||
2501 | net->ipv6.rt6_stats->fib_rt_alloc, | 2501 | net->ipv6.rt6_stats->fib_rt_alloc, |
2502 | net->ipv6.rt6_stats->fib_rt_entries, | 2502 | net->ipv6.rt6_stats->fib_rt_entries, |
2503 | net->ipv6.rt6_stats->fib_rt_cache, | 2503 | net->ipv6.rt6_stats->fib_rt_cache, |
2504 | atomic_read(&net->ipv6.ip6_dst_ops->entries), | 2504 | atomic_read(&net->ipv6.ip6_dst_ops.entries), |
2505 | net->ipv6.rt6_stats->fib_discarded_routes); | 2505 | net->ipv6.rt6_stats->fib_discarded_routes); |
2506 | 2506 | ||
2507 | return 0; | 2507 | return 0; |
@@ -2637,7 +2637,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net) | |||
2637 | 2637 | ||
2638 | if (table) { | 2638 | if (table) { |
2639 | table[0].data = &net->ipv6.sysctl.flush_delay; | 2639 | table[0].data = &net->ipv6.sysctl.flush_delay; |
2640 | table[1].data = &net->ipv6.ip6_dst_ops->gc_thresh; | 2640 | table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; |
2641 | table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; | 2641 | table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; |
2642 | table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; | 2642 | table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; |
2643 | table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; | 2643 | table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; |
@@ -2655,12 +2655,8 @@ static int ip6_route_net_init(struct net *net) | |||
2655 | { | 2655 | { |
2656 | int ret = -ENOMEM; | 2656 | int ret = -ENOMEM; |
2657 | 2657 | ||
2658 | net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template, | 2658 | memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template, |
2659 | sizeof(*net->ipv6.ip6_dst_ops), | 2659 | sizeof(net->ipv6.ip6_dst_ops)); |
2660 | GFP_KERNEL); | ||
2661 | if (!net->ipv6.ip6_dst_ops) | ||
2662 | goto out; | ||
2663 | net->ipv6.ip6_dst_ops->dst_net = hold_net(net); | ||
2664 | 2660 | ||
2665 | net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, | 2661 | net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, |
2666 | sizeof(*net->ipv6.ip6_null_entry), | 2662 | sizeof(*net->ipv6.ip6_null_entry), |
@@ -2669,7 +2665,7 @@ static int ip6_route_net_init(struct net *net) | |||
2669 | goto out_ip6_dst_ops; | 2665 | goto out_ip6_dst_ops; |
2670 | net->ipv6.ip6_null_entry->u.dst.path = | 2666 | net->ipv6.ip6_null_entry->u.dst.path = |
2671 | (struct dst_entry *)net->ipv6.ip6_null_entry; | 2667 | (struct dst_entry *)net->ipv6.ip6_null_entry; |
2672 | net->ipv6.ip6_null_entry->u.dst.ops = net->ipv6.ip6_dst_ops; | 2668 | net->ipv6.ip6_null_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; |
2673 | 2669 | ||
2674 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 2670 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
2675 | net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, | 2671 | net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, |
@@ -2679,7 +2675,7 @@ static int ip6_route_net_init(struct net *net) | |||
2679 | goto out_ip6_null_entry; | 2675 | goto out_ip6_null_entry; |
2680 | net->ipv6.ip6_prohibit_entry->u.dst.path = | 2676 | net->ipv6.ip6_prohibit_entry->u.dst.path = |
2681 | (struct dst_entry *)net->ipv6.ip6_prohibit_entry; | 2677 | (struct dst_entry *)net->ipv6.ip6_prohibit_entry; |
2682 | net->ipv6.ip6_prohibit_entry->u.dst.ops = net->ipv6.ip6_dst_ops; | 2678 | net->ipv6.ip6_prohibit_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; |
2683 | 2679 | ||
2684 | net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, | 2680 | net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, |
2685 | sizeof(*net->ipv6.ip6_blk_hole_entry), | 2681 | sizeof(*net->ipv6.ip6_blk_hole_entry), |
@@ -2688,7 +2684,7 @@ static int ip6_route_net_init(struct net *net) | |||
2688 | goto out_ip6_prohibit_entry; | 2684 | goto out_ip6_prohibit_entry; |
2689 | net->ipv6.ip6_blk_hole_entry->u.dst.path = | 2685 | net->ipv6.ip6_blk_hole_entry->u.dst.path = |
2690 | (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; | 2686 | (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; |
2691 | net->ipv6.ip6_blk_hole_entry->u.dst.ops = net->ipv6.ip6_dst_ops; | 2687 | net->ipv6.ip6_blk_hole_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; |
2692 | #endif | 2688 | #endif |
2693 | 2689 | ||
2694 | net->ipv6.sysctl.flush_delay = 0; | 2690 | net->ipv6.sysctl.flush_delay = 0; |
@@ -2717,8 +2713,6 @@ out_ip6_null_entry: | |||
2717 | kfree(net->ipv6.ip6_null_entry); | 2713 | kfree(net->ipv6.ip6_null_entry); |
2718 | #endif | 2714 | #endif |
2719 | out_ip6_dst_ops: | 2715 | out_ip6_dst_ops: |
2720 | release_net(net->ipv6.ip6_dst_ops->dst_net); | ||
2721 | kfree(net->ipv6.ip6_dst_ops); | ||
2722 | goto out; | 2716 | goto out; |
2723 | } | 2717 | } |
2724 | 2718 | ||
@@ -2733,8 +2727,6 @@ static void ip6_route_net_exit(struct net *net) | |||
2733 | kfree(net->ipv6.ip6_prohibit_entry); | 2727 | kfree(net->ipv6.ip6_prohibit_entry); |
2734 | kfree(net->ipv6.ip6_blk_hole_entry); | 2728 | kfree(net->ipv6.ip6_blk_hole_entry); |
2735 | #endif | 2729 | #endif |
2736 | release_net(net->ipv6.ip6_dst_ops->dst_net); | ||
2737 | kfree(net->ipv6.ip6_dst_ops); | ||
2738 | } | 2730 | } |
2739 | 2731 | ||
2740 | static struct pernet_operations ip6_route_net_ops = { | 2732 | static struct pernet_operations ip6_route_net_ops = { |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 98b7327d0949..0ae4f6448187 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -609,7 +609,8 @@ static inline __be32 try_6to4(struct in6_addr *v6dst) | |||
609 | * and that skb is filled properly by that function. | 609 | * and that skb is filled properly by that function. |
610 | */ | 610 | */ |
611 | 611 | ||
612 | static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 612 | static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, |
613 | struct net_device *dev) | ||
613 | { | 614 | { |
614 | struct ip_tunnel *tunnel = netdev_priv(dev); | 615 | struct ip_tunnel *tunnel = netdev_priv(dev); |
615 | struct net_device_stats *stats = &tunnel->dev->stats; | 616 | struct net_device_stats *stats = &tunnel->dev->stats; |
@@ -753,7 +754,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
753 | stats->tx_dropped++; | 754 | stats->tx_dropped++; |
754 | dev_kfree_skb(skb); | 755 | dev_kfree_skb(skb); |
755 | tunnel->recursion--; | 756 | tunnel->recursion--; |
756 | return 0; | 757 | return NETDEV_TX_OK; |
757 | } | 758 | } |
758 | if (skb->sk) | 759 | if (skb->sk) |
759 | skb_set_owner_w(new_skb, skb->sk); | 760 | skb_set_owner_w(new_skb, skb->sk); |
@@ -778,7 +779,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
778 | iph->version = 4; | 779 | iph->version = 4; |
779 | iph->ihl = sizeof(struct iphdr)>>2; | 780 | iph->ihl = sizeof(struct iphdr)>>2; |
780 | if (mtu > IPV6_MIN_MTU) | 781 | if (mtu > IPV6_MIN_MTU) |
781 | iph->frag_off = htons(IP_DF); | 782 | iph->frag_off = tiph->frag_off; |
782 | else | 783 | else |
783 | iph->frag_off = 0; | 784 | iph->frag_off = 0; |
784 | 785 | ||
@@ -794,7 +795,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
794 | 795 | ||
795 | IPTUNNEL_XMIT(); | 796 | IPTUNNEL_XMIT(); |
796 | tunnel->recursion--; | 797 | tunnel->recursion--; |
797 | return 0; | 798 | return NETDEV_TX_OK; |
798 | 799 | ||
799 | tx_error_icmp: | 800 | tx_error_icmp: |
800 | dst_link_failure(skb); | 801 | dst_link_failure(skb); |
@@ -802,7 +803,7 @@ tx_error: | |||
802 | stats->tx_errors++; | 803 | stats->tx_errors++; |
803 | dev_kfree_skb(skb); | 804 | dev_kfree_skb(skb); |
804 | tunnel->recursion--; | 805 | tunnel->recursion--; |
805 | return 0; | 806 | return NETDEV_TX_OK; |
806 | } | 807 | } |
807 | 808 | ||
808 | static void ipip6_tunnel_bind_dev(struct net_device *dev) | 809 | static void ipip6_tunnel_bind_dev(struct net_device *dev) |
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index a031034720b4..0dc6a4e5ed4a 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c | |||
@@ -40,7 +40,7 @@ static ctl_table ipv6_table_template[] = { | |||
40 | { .ctl_name = 0 } | 40 | { .ctl_name = 0 } |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static ctl_table ipv6_table[] = { | 43 | static ctl_table ipv6_rotable[] = { |
44 | { | 44 | { |
45 | .ctl_name = NET_IPV6_MLD_MAX_MSF, | 45 | .ctl_name = NET_IPV6_MLD_MAX_MSF, |
46 | .procname = "mld_max_msf", | 46 | .procname = "mld_max_msf", |
@@ -130,7 +130,7 @@ int ipv6_sysctl_register(void) | |||
130 | { | 130 | { |
131 | int err = -ENOMEM; | 131 | int err = -ENOMEM; |
132 | 132 | ||
133 | ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); | 133 | ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_rotable); |
134 | if (ip6_header == NULL) | 134 | if (ip6_header == NULL) |
135 | goto out; | 135 | goto out; |
136 | 136 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d849dd53b788..3aae0f217d61 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -75,11 +75,11 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
75 | 75 | ||
76 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); | 76 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); |
77 | 77 | ||
78 | static struct inet_connection_sock_af_ops ipv6_mapped; | 78 | static const struct inet_connection_sock_af_ops ipv6_mapped; |
79 | static struct inet_connection_sock_af_ops ipv6_specific; | 79 | static const struct inet_connection_sock_af_ops ipv6_specific; |
80 | #ifdef CONFIG_TCP_MD5SIG | 80 | #ifdef CONFIG_TCP_MD5SIG |
81 | static struct tcp_sock_af_ops tcp_sock_ipv6_specific; | 81 | static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; |
82 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; | 82 | static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; |
83 | #else | 83 | #else |
84 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, | 84 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, |
85 | struct in6_addr *addr) | 85 | struct in6_addr *addr) |
@@ -591,7 +591,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, | |||
591 | } | 591 | } |
592 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; | 592 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
593 | } | 593 | } |
594 | if (tcp_alloc_md5sig_pool() == NULL) { | 594 | if (tcp_alloc_md5sig_pool(sk) == NULL) { |
595 | kfree(newkey); | 595 | kfree(newkey); |
596 | return -ENOMEM; | 596 | return -ENOMEM; |
597 | } | 597 | } |
@@ -894,7 +894,7 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = { | |||
894 | }; | 894 | }; |
895 | 895 | ||
896 | #ifdef CONFIG_TCP_MD5SIG | 896 | #ifdef CONFIG_TCP_MD5SIG |
897 | static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | 897 | static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { |
898 | .md5_lookup = tcp_v6_reqsk_md5_lookup, | 898 | .md5_lookup = tcp_v6_reqsk_md5_lookup, |
899 | .calc_md5_hash = tcp_v6_md5_hash_skb, | 899 | .calc_md5_hash = tcp_v6_md5_hash_skb, |
900 | }; | 900 | }; |
@@ -1003,6 +1003,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
1003 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); | 1003 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); |
1004 | 1004 | ||
1005 | t1 = (struct tcphdr *) skb_push(buff, tot_len); | 1005 | t1 = (struct tcphdr *) skb_push(buff, tot_len); |
1006 | skb_reset_transport_header(skb); | ||
1006 | 1007 | ||
1007 | /* Swap the send and the receive. */ | 1008 | /* Swap the send and the receive. */ |
1008 | memset(t1, 0, sizeof(*t1)); | 1009 | memset(t1, 0, sizeof(*t1)); |
@@ -1760,7 +1761,7 @@ static int tcp_v6_remember_stamp(struct sock *sk) | |||
1760 | return 0; | 1761 | return 0; |
1761 | } | 1762 | } |
1762 | 1763 | ||
1763 | static struct inet_connection_sock_af_ops ipv6_specific = { | 1764 | static const struct inet_connection_sock_af_ops ipv6_specific = { |
1764 | .queue_xmit = inet6_csk_xmit, | 1765 | .queue_xmit = inet6_csk_xmit, |
1765 | .send_check = tcp_v6_send_check, | 1766 | .send_check = tcp_v6_send_check, |
1766 | .rebuild_header = inet6_sk_rebuild_header, | 1767 | .rebuild_header = inet6_sk_rebuild_header, |
@@ -1780,7 +1781,7 @@ static struct inet_connection_sock_af_ops ipv6_specific = { | |||
1780 | }; | 1781 | }; |
1781 | 1782 | ||
1782 | #ifdef CONFIG_TCP_MD5SIG | 1783 | #ifdef CONFIG_TCP_MD5SIG |
1783 | static struct tcp_sock_af_ops tcp_sock_ipv6_specific = { | 1784 | static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { |
1784 | .md5_lookup = tcp_v6_md5_lookup, | 1785 | .md5_lookup = tcp_v6_md5_lookup, |
1785 | .calc_md5_hash = tcp_v6_md5_hash_skb, | 1786 | .calc_md5_hash = tcp_v6_md5_hash_skb, |
1786 | .md5_add = tcp_v6_md5_add_func, | 1787 | .md5_add = tcp_v6_md5_add_func, |
@@ -1792,7 +1793,7 @@ static struct tcp_sock_af_ops tcp_sock_ipv6_specific = { | |||
1792 | * TCP over IPv4 via INET6 API | 1793 | * TCP over IPv4 via INET6 API |
1793 | */ | 1794 | */ |
1794 | 1795 | ||
1795 | static struct inet_connection_sock_af_ops ipv6_mapped = { | 1796 | static const struct inet_connection_sock_af_ops ipv6_mapped = { |
1796 | .queue_xmit = ip_queue_xmit, | 1797 | .queue_xmit = ip_queue_xmit, |
1797 | .send_check = tcp_v4_send_check, | 1798 | .send_check = tcp_v4_send_check, |
1798 | .rebuild_header = inet_sk_rebuild_header, | 1799 | .rebuild_header = inet_sk_rebuild_header, |
@@ -1812,7 +1813,7 @@ static struct inet_connection_sock_af_ops ipv6_mapped = { | |||
1812 | }; | 1813 | }; |
1813 | 1814 | ||
1814 | #ifdef CONFIG_TCP_MD5SIG | 1815 | #ifdef CONFIG_TCP_MD5SIG |
1815 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { | 1816 | static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { |
1816 | .md5_lookup = tcp_v4_md5_lookup, | 1817 | .md5_lookup = tcp_v4_md5_lookup, |
1817 | .calc_md5_hash = tcp_v4_md5_hash_skb, | 1818 | .calc_md5_hash = tcp_v4_md5_hash_skb, |
1818 | .md5_add = tcp_v6_md5_add_func, | 1819 | .md5_add = tcp_v6_md5_add_func, |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 33b59bd92c4d..164040613c2e 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -638,6 +638,47 @@ static void udp_v6_flush_pending_frames(struct sock *sk) | |||
638 | } | 638 | } |
639 | } | 639 | } |
640 | 640 | ||
641 | /** | ||
642 | * udp6_hwcsum_outgoing - handle outgoing HW checksumming | ||
643 | * @sk: socket we are sending on | ||
644 | * @skb: sk_buff containing the filled-in UDP header | ||
645 | * (checksum field must be zeroed out) | ||
646 | */ | ||
647 | static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, | ||
648 | const struct in6_addr *saddr, | ||
649 | const struct in6_addr *daddr, int len) | ||
650 | { | ||
651 | unsigned int offset; | ||
652 | struct udphdr *uh = udp_hdr(skb); | ||
653 | __wsum csum = 0; | ||
654 | |||
655 | if (skb_queue_len(&sk->sk_write_queue) == 1) { | ||
656 | /* Only one fragment on the socket. */ | ||
657 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
658 | skb->csum_offset = offsetof(struct udphdr, check); | ||
659 | uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); | ||
660 | } else { | ||
661 | /* | ||
662 | * HW-checksum won't work as there are two or more | ||
663 | * fragments on the socket so that all csums of sk_buffs | ||
664 | * should be together | ||
665 | */ | ||
666 | offset = skb_transport_offset(skb); | ||
667 | skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
668 | |||
669 | skb->ip_summed = CHECKSUM_NONE; | ||
670 | |||
671 | skb_queue_walk(&sk->sk_write_queue, skb) { | ||
672 | csum = csum_add(csum, skb->csum); | ||
673 | } | ||
674 | |||
675 | uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, | ||
676 | csum); | ||
677 | if (uh->check == 0) | ||
678 | uh->check = CSUM_MANGLED_0; | ||
679 | } | ||
680 | } | ||
681 | |||
641 | /* | 682 | /* |
642 | * Sending | 683 | * Sending |
643 | */ | 684 | */ |
@@ -668,7 +709,11 @@ static int udp_v6_push_pending_frames(struct sock *sk) | |||
668 | 709 | ||
669 | if (is_udplite) | 710 | if (is_udplite) |
670 | csum = udplite_csum_outgoing(sk, skb); | 711 | csum = udplite_csum_outgoing(sk, skb); |
671 | else | 712 | else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ |
713 | udp6_hwcsum_outgoing(sk, skb, &fl->fl6_src, &fl->fl6_dst, | ||
714 | up->len); | ||
715 | goto send; | ||
716 | } else | ||
672 | csum = udp_csum_outgoing(sk, skb); | 717 | csum = udp_csum_outgoing(sk, skb); |
673 | 718 | ||
674 | /* add protocol-dependent pseudo-header */ | 719 | /* add protocol-dependent pseudo-header */ |
@@ -677,13 +722,20 @@ static int udp_v6_push_pending_frames(struct sock *sk) | |||
677 | if (uh->check == 0) | 722 | if (uh->check == 0) |
678 | uh->check = CSUM_MANGLED_0; | 723 | uh->check = CSUM_MANGLED_0; |
679 | 724 | ||
725 | send: | ||
680 | err = ip6_push_pending_frames(sk); | 726 | err = ip6_push_pending_frames(sk); |
727 | if (err) { | ||
728 | if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { | ||
729 | UDP6_INC_STATS_USER(sock_net(sk), | ||
730 | UDP_MIB_SNDBUFERRORS, is_udplite); | ||
731 | err = 0; | ||
732 | } | ||
733 | } else | ||
734 | UDP6_INC_STATS_USER(sock_net(sk), | ||
735 | UDP_MIB_OUTDATAGRAMS, is_udplite); | ||
681 | out: | 736 | out: |
682 | up->len = 0; | 737 | up->len = 0; |
683 | up->pending = 0; | 738 | up->pending = 0; |
684 | if (!err) | ||
685 | UDP6_INC_STATS_USER(sock_net(sk), | ||
686 | UDP_MIB_OUTDATAGRAMS, is_udplite); | ||
687 | return err; | 739 | return err; |
688 | } | 740 | } |
689 | 741 | ||
@@ -900,11 +952,8 @@ do_udp_sendmsg: | |||
900 | hlimit = ip6_dst_hoplimit(dst); | 952 | hlimit = ip6_dst_hoplimit(dst); |
901 | } | 953 | } |
902 | 954 | ||
903 | if (tclass < 0) { | 955 | if (tclass < 0) |
904 | tclass = np->tclass; | 956 | tclass = np->tclass; |
905 | if (tclass < 0) | ||
906 | tclass = 0; | ||
907 | } | ||
908 | 957 | ||
909 | if (msg->msg_flags&MSG_CONFIRM) | 958 | if (msg->msg_flags&MSG_CONFIRM) |
910 | goto do_confirm; | 959 | goto do_confirm; |
@@ -1032,9 +1081,102 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, | |||
1032 | } | 1081 | } |
1033 | #endif | 1082 | #endif |
1034 | 1083 | ||
1084 | static int udp6_ufo_send_check(struct sk_buff *skb) | ||
1085 | { | ||
1086 | struct ipv6hdr *ipv6h; | ||
1087 | struct udphdr *uh; | ||
1088 | |||
1089 | if (!pskb_may_pull(skb, sizeof(*uh))) | ||
1090 | return -EINVAL; | ||
1091 | |||
1092 | ipv6h = ipv6_hdr(skb); | ||
1093 | uh = udp_hdr(skb); | ||
1094 | |||
1095 | uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, | ||
1096 | IPPROTO_UDP, 0); | ||
1097 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
1098 | skb->csum_offset = offsetof(struct udphdr, check); | ||
1099 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
1100 | return 0; | ||
1101 | } | ||
1102 | |||
1103 | static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features) | ||
1104 | { | ||
1105 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
1106 | unsigned int mss; | ||
1107 | unsigned int unfrag_ip6hlen, unfrag_len; | ||
1108 | struct frag_hdr *fptr; | ||
1109 | u8 *mac_start, *prevhdr; | ||
1110 | u8 nexthdr; | ||
1111 | u8 frag_hdr_sz = sizeof(struct frag_hdr); | ||
1112 | int offset; | ||
1113 | __wsum csum; | ||
1114 | |||
1115 | mss = skb_shinfo(skb)->gso_size; | ||
1116 | if (unlikely(skb->len <= mss)) | ||
1117 | goto out; | ||
1118 | |||
1119 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { | ||
1120 | /* Packet is from an untrusted source, reset gso_segs. */ | ||
1121 | int type = skb_shinfo(skb)->gso_type; | ||
1122 | |||
1123 | if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || | ||
1124 | !(type & (SKB_GSO_UDP)))) | ||
1125 | goto out; | ||
1126 | |||
1127 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); | ||
1128 | |||
1129 | segs = NULL; | ||
1130 | goto out; | ||
1131 | } | ||
1132 | |||
1133 | /* Do software UFO. Complete and fill in the UDP checksum as HW cannot | ||
1134 | * do checksum of UDP packets sent as multiple IP fragments. | ||
1135 | */ | ||
1136 | offset = skb->csum_start - skb_headroom(skb); | ||
1137 | csum = skb_checksum(skb, offset, skb->len- offset, 0); | ||
1138 | offset += skb->csum_offset; | ||
1139 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
1140 | skb->ip_summed = CHECKSUM_NONE; | ||
1141 | |||
1142 | /* Check if there is enough headroom to insert fragment header. */ | ||
1143 | if ((skb_headroom(skb) < frag_hdr_sz) && | ||
1144 | pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) | ||
1145 | goto out; | ||
1146 | |||
1147 | /* Find the unfragmentable header and shift it left by frag_hdr_sz | ||
1148 | * bytes to insert fragment header. | ||
1149 | */ | ||
1150 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); | ||
1151 | nexthdr = *prevhdr; | ||
1152 | *prevhdr = NEXTHDR_FRAGMENT; | ||
1153 | unfrag_len = skb_network_header(skb) - skb_mac_header(skb) + | ||
1154 | unfrag_ip6hlen; | ||
1155 | mac_start = skb_mac_header(skb); | ||
1156 | memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len); | ||
1157 | |||
1158 | skb->mac_header -= frag_hdr_sz; | ||
1159 | skb->network_header -= frag_hdr_sz; | ||
1160 | |||
1161 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); | ||
1162 | fptr->nexthdr = nexthdr; | ||
1163 | fptr->reserved = 0; | ||
1164 | ipv6_select_ident(fptr); | ||
1165 | |||
1166 | /* Fragment the skb. ipv6 header and the remaining fields of the | ||
1167 | * fragment header are updated in ipv6_gso_segment() | ||
1168 | */ | ||
1169 | segs = skb_segment(skb, features); | ||
1170 | |||
1171 | out: | ||
1172 | return segs; | ||
1173 | } | ||
1174 | |||
1035 | static struct inet6_protocol udpv6_protocol = { | 1175 | static struct inet6_protocol udpv6_protocol = { |
1036 | .handler = udpv6_rcv, | 1176 | .handler = udpv6_rcv, |
1037 | .err_handler = udpv6_err, | 1177 | .err_handler = udpv6_err, |
1178 | .gso_send_check = udp6_ufo_send_check, | ||
1179 | .gso_segment = udp6_ufo_fragment, | ||
1038 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | 1180 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
1039 | }; | 1181 | }; |
1040 | 1182 | ||
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 3a3c677bc0f2..8ec3d45cd1d9 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -306,9 +306,26 @@ static void xfrm6_policy_fini(void) | |||
306 | xfrm_policy_unregister_afinfo(&xfrm6_policy_afinfo); | 306 | xfrm_policy_unregister_afinfo(&xfrm6_policy_afinfo); |
307 | } | 307 | } |
308 | 308 | ||
309 | #ifdef CONFIG_SYSCTL | ||
310 | static struct ctl_table xfrm6_policy_table[] = { | ||
311 | { | ||
312 | .ctl_name = CTL_UNNUMBERED, | ||
313 | .procname = "xfrm6_gc_thresh", | ||
314 | .data = &xfrm6_dst_ops.gc_thresh, | ||
315 | .maxlen = sizeof(int), | ||
316 | .mode = 0644, | ||
317 | .proc_handler = proc_dointvec, | ||
318 | }, | ||
319 | { } | ||
320 | }; | ||
321 | |||
322 | static struct ctl_table_header *sysctl_hdr; | ||
323 | #endif | ||
324 | |||
309 | int __init xfrm6_init(void) | 325 | int __init xfrm6_init(void) |
310 | { | 326 | { |
311 | int ret; | 327 | int ret; |
328 | unsigned int gc_thresh; | ||
312 | 329 | ||
313 | ret = xfrm6_policy_init(); | 330 | ret = xfrm6_policy_init(); |
314 | if (ret) | 331 | if (ret) |
@@ -317,6 +334,23 @@ int __init xfrm6_init(void) | |||
317 | ret = xfrm6_state_init(); | 334 | ret = xfrm6_state_init(); |
318 | if (ret) | 335 | if (ret) |
319 | goto out_policy; | 336 | goto out_policy; |
337 | /* | ||
338 | * We need a good default value for the xfrm6 gc threshold. | ||
339 | * In ipv4 we set it to the route hash table size * 8, which | ||
340 | * is half the size of the maximaum route cache for ipv4. It | ||
341 | * would be good to do the same thing for v6, except the table is | ||
342 | * constructed differently here. Here each table for a net namespace | ||
343 | * can have FIB_TABLE_HASHSZ entries, so lets go with the same | ||
344 | * computation that we used for ipv4 here. Also, lets keep the initial | ||
345 | * gc_thresh to a minimum of 1024, since, the ipv6 route cache defaults | ||
346 | * to that as a minimum as well | ||
347 | */ | ||
348 | gc_thresh = FIB6_TABLE_HASHSZ * 8; | ||
349 | xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; | ||
350 | #ifdef CONFIG_SYSCTL | ||
351 | sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, | ||
352 | xfrm6_policy_table); | ||
353 | #endif | ||
320 | out: | 354 | out: |
321 | return ret; | 355 | return ret; |
322 | out_policy: | 356 | out_policy: |
@@ -326,6 +360,10 @@ out_policy: | |||
326 | 360 | ||
327 | void xfrm6_fini(void) | 361 | void xfrm6_fini(void) |
328 | { | 362 | { |
363 | #ifdef CONFIG_SYSCTL | ||
364 | if (sysctl_hdr) | ||
365 | unregister_net_sysctl_table(sysctl_hdr); | ||
366 | #endif | ||
329 | //xfrm6_input_fini(); | 367 | //xfrm6_input_fini(); |
330 | xfrm6_policy_fini(); | 368 | xfrm6_policy_fini(); |
331 | xfrm6_state_fini(); | 369 | xfrm6_state_fini(); |
diff --git a/net/irda/ircomm/ircomm_event.c b/net/irda/ircomm/ircomm_event.c index c35b3ef5c2f0..d78554fedbac 100644 --- a/net/irda/ircomm/ircomm_event.c +++ b/net/irda/ircomm/ircomm_event.c | |||
@@ -49,7 +49,7 @@ static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event, | |||
49 | static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, | 49 | static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, |
50 | struct sk_buff *skb, struct ircomm_info *info); | 50 | struct sk_buff *skb, struct ircomm_info *info); |
51 | 51 | ||
52 | char *ircomm_state[] = { | 52 | const char *const ircomm_state[] = { |
53 | "IRCOMM_IDLE", | 53 | "IRCOMM_IDLE", |
54 | "IRCOMM_WAITI", | 54 | "IRCOMM_WAITI", |
55 | "IRCOMM_WAITR", | 55 | "IRCOMM_WAITR", |
@@ -57,7 +57,7 @@ char *ircomm_state[] = { | |||
57 | }; | 57 | }; |
58 | 58 | ||
59 | #ifdef CONFIG_IRDA_DEBUG | 59 | #ifdef CONFIG_IRDA_DEBUG |
60 | static char *ircomm_event[] = { | 60 | static const char *const ircomm_event[] = { |
61 | "IRCOMM_CONNECT_REQUEST", | 61 | "IRCOMM_CONNECT_REQUEST", |
62 | "IRCOMM_CONNECT_RESPONSE", | 62 | "IRCOMM_CONNECT_RESPONSE", |
63 | "IRCOMM_TTP_CONNECT_INDICATION", | 63 | "IRCOMM_TTP_CONNECT_INDICATION", |
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c index 9032a1d1190d..eafc010907c2 100644 --- a/net/irda/ircomm/ircomm_tty_attach.c +++ b/net/irda/ircomm/ircomm_tty_attach.c | |||
@@ -80,7 +80,7 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self, | |||
80 | struct sk_buff *skb, | 80 | struct sk_buff *skb, |
81 | struct ircomm_tty_info *info); | 81 | struct ircomm_tty_info *info); |
82 | 82 | ||
83 | char *ircomm_tty_state[] = { | 83 | const char *const ircomm_tty_state[] = { |
84 | "IRCOMM_TTY_IDLE", | 84 | "IRCOMM_TTY_IDLE", |
85 | "IRCOMM_TTY_SEARCH", | 85 | "IRCOMM_TTY_SEARCH", |
86 | "IRCOMM_TTY_QUERY_PARAMETERS", | 86 | "IRCOMM_TTY_QUERY_PARAMETERS", |
@@ -91,7 +91,7 @@ char *ircomm_tty_state[] = { | |||
91 | }; | 91 | }; |
92 | 92 | ||
93 | #ifdef CONFIG_IRDA_DEBUG | 93 | #ifdef CONFIG_IRDA_DEBUG |
94 | static char *ircomm_tty_event[] = { | 94 | static const char *const ircomm_tty_event[] = { |
95 | "IRCOMM_TTY_ATTACH_CABLE", | 95 | "IRCOMM_TTY_ATTACH_CABLE", |
96 | "IRCOMM_TTY_DETACH_CABLE", | 96 | "IRCOMM_TTY_DETACH_CABLE", |
97 | "IRCOMM_TTY_DATA_REQUEST", | 97 | "IRCOMM_TTY_DATA_REQUEST", |
diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 4a105dc32dcd..294e34d3517c 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c | |||
@@ -44,7 +44,7 @@ | |||
44 | 44 | ||
45 | #ifdef CONFIG_IRDA_DEBUG | 45 | #ifdef CONFIG_IRDA_DEBUG |
46 | /* FIXME: This one should go in irlmp.c */ | 46 | /* FIXME: This one should go in irlmp.c */ |
47 | static const char *ias_charset_types[] = { | 47 | static const char *const ias_charset_types[] = { |
48 | "CS_ASCII", | 48 | "CS_ASCII", |
49 | "CS_ISO_8859_1", | 49 | "CS_ISO_8859_1", |
50 | "CS_ISO_8859_2", | 50 | "CS_ISO_8859_2", |
@@ -966,7 +966,7 @@ static void iriap_watchdog_timer_expired(void *data) | |||
966 | 966 | ||
967 | #ifdef CONFIG_PROC_FS | 967 | #ifdef CONFIG_PROC_FS |
968 | 968 | ||
969 | static const char *ias_value_types[] = { | 969 | static const char *const ias_value_types[] = { |
970 | "IAS_MISSING", | 970 | "IAS_MISSING", |
971 | "IAS_INTEGER", | 971 | "IAS_INTEGER", |
972 | "IAS_OCT_SEQ", | 972 | "IAS_OCT_SEQ", |
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index 774d73a76852..62116829b817 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c | |||
@@ -69,14 +69,14 @@ static int eth; /* Use "eth" or "irlan" name for devices */ | |||
69 | static int access = ACCESS_PEER; /* PEER, DIRECT or HOSTED */ | 69 | static int access = ACCESS_PEER; /* PEER, DIRECT or HOSTED */ |
70 | 70 | ||
71 | #ifdef CONFIG_PROC_FS | 71 | #ifdef CONFIG_PROC_FS |
72 | static const char *irlan_access[] = { | 72 | static const char *const irlan_access[] = { |
73 | "UNKNOWN", | 73 | "UNKNOWN", |
74 | "DIRECT", | 74 | "DIRECT", |
75 | "PEER", | 75 | "PEER", |
76 | "HOSTED" | 76 | "HOSTED" |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static const char *irlan_media[] = { | 79 | static const char *const irlan_media[] = { |
80 | "UNKNOWN", | 80 | "UNKNOWN", |
81 | "802.3", | 81 | "802.3", |
82 | "802.5" | 82 | "802.5" |
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index 724bcf951b80..7b6b631f647f 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c | |||
@@ -41,7 +41,8 @@ | |||
41 | 41 | ||
42 | static int irlan_eth_open(struct net_device *dev); | 42 | static int irlan_eth_open(struct net_device *dev); |
43 | static int irlan_eth_close(struct net_device *dev); | 43 | static int irlan_eth_close(struct net_device *dev); |
44 | static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev); | 44 | static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, |
45 | struct net_device *dev); | ||
45 | static void irlan_eth_set_multicast_list( struct net_device *dev); | 46 | static void irlan_eth_set_multicast_list( struct net_device *dev); |
46 | static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev); | 47 | static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev); |
47 | 48 | ||
@@ -162,7 +163,8 @@ static int irlan_eth_close(struct net_device *dev) | |||
162 | * Transmits ethernet frames over IrDA link. | 163 | * Transmits ethernet frames over IrDA link. |
163 | * | 164 | * |
164 | */ | 165 | */ |
165 | static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev) | 166 | static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, |
167 | struct net_device *dev) | ||
166 | { | 168 | { |
167 | struct irlan_cb *self = netdev_priv(dev); | 169 | struct irlan_cb *self = netdev_priv(dev); |
168 | int ret; | 170 | int ret; |
@@ -177,7 +179,7 @@ static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
177 | 179 | ||
178 | /* Did the realloc succeed? */ | 180 | /* Did the realloc succeed? */ |
179 | if (new_skb == NULL) | 181 | if (new_skb == NULL) |
180 | return 0; | 182 | return NETDEV_TX_OK; |
181 | 183 | ||
182 | /* Use the new skb instead */ | 184 | /* Use the new skb instead */ |
183 | skb = new_skb; | 185 | skb = new_skb; |
@@ -209,7 +211,7 @@ static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
209 | self->stats.tx_bytes += skb->len; | 211 | self->stats.tx_bytes += skb->len; |
210 | } | 212 | } |
211 | 213 | ||
212 | return 0; | 214 | return NETDEV_TX_OK; |
213 | } | 215 | } |
214 | 216 | ||
215 | /* | 217 | /* |
diff --git a/net/irda/irlap.c b/net/irda/irlap.c index e4965b764b9b..356e65b1dc42 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c | |||
@@ -63,7 +63,7 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self, | |||
63 | struct qos_info *qos_user); | 63 | struct qos_info *qos_user); |
64 | 64 | ||
65 | #ifdef CONFIG_IRDA_DEBUG | 65 | #ifdef CONFIG_IRDA_DEBUG |
66 | static char *lap_reasons[] = { | 66 | static const char *const lap_reasons[] = { |
67 | "ERROR, NOT USED", | 67 | "ERROR, NOT USED", |
68 | "LAP_DISC_INDICATION", | 68 | "LAP_DISC_INDICATION", |
69 | "LAP_NO_RESPONSE", | 69 | "LAP_NO_RESPONSE", |
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index 16c4ef0f5c1a..c5c51959e3ce 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c | |||
@@ -78,7 +78,7 @@ static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event, | |||
78 | struct sk_buff *, struct irlap_info *); | 78 | struct sk_buff *, struct irlap_info *); |
79 | 79 | ||
80 | #ifdef CONFIG_IRDA_DEBUG | 80 | #ifdef CONFIG_IRDA_DEBUG |
81 | static const char *irlap_event[] = { | 81 | static const char *const irlap_event[] = { |
82 | "DISCOVERY_REQUEST", | 82 | "DISCOVERY_REQUEST", |
83 | "CONNECT_REQUEST", | 83 | "CONNECT_REQUEST", |
84 | "CONNECT_RESPONSE", | 84 | "CONNECT_RESPONSE", |
@@ -120,7 +120,7 @@ static const char *irlap_event[] = { | |||
120 | }; | 120 | }; |
121 | #endif /* CONFIG_IRDA_DEBUG */ | 121 | #endif /* CONFIG_IRDA_DEBUG */ |
122 | 122 | ||
123 | const char *irlap_state[] = { | 123 | const char *const irlap_state[] = { |
124 | "LAP_NDM", | 124 | "LAP_NDM", |
125 | "LAP_QUERY", | 125 | "LAP_QUERY", |
126 | "LAP_REPLY", | 126 | "LAP_REPLY", |
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c index 78cce0cb073f..c1fb5db81042 100644 --- a/net/irda/irlmp_event.c +++ b/net/irda/irlmp_event.c | |||
@@ -33,13 +33,13 @@ | |||
33 | #include <net/irda/irlmp_frame.h> | 33 | #include <net/irda/irlmp_frame.h> |
34 | #include <net/irda/irlmp_event.h> | 34 | #include <net/irda/irlmp_event.h> |
35 | 35 | ||
36 | const char *irlmp_state[] = { | 36 | const char *const irlmp_state[] = { |
37 | "LAP_STANDBY", | 37 | "LAP_STANDBY", |
38 | "LAP_U_CONNECT", | 38 | "LAP_U_CONNECT", |
39 | "LAP_ACTIVE", | 39 | "LAP_ACTIVE", |
40 | }; | 40 | }; |
41 | 41 | ||
42 | const char *irlsap_state[] = { | 42 | const char *const irlsap_state[] = { |
43 | "LSAP_DISCONNECTED", | 43 | "LSAP_DISCONNECTED", |
44 | "LSAP_CONNECT", | 44 | "LSAP_CONNECT", |
45 | "LSAP_CONNECT_PEND", | 45 | "LSAP_CONNECT_PEND", |
@@ -49,7 +49,7 @@ const char *irlsap_state[] = { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | #ifdef CONFIG_IRDA_DEBUG | 51 | #ifdef CONFIG_IRDA_DEBUG |
52 | static const char *irlmp_event[] = { | 52 | static const char *const irlmp_event[] = { |
53 | "LM_CONNECT_REQUEST", | 53 | "LM_CONNECT_REQUEST", |
54 | "LM_CONNECT_CONFIRM", | 54 | "LM_CONNECT_CONFIRM", |
55 | "LM_CONNECT_RESPONSE", | 55 | "LM_CONNECT_RESPONSE", |
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h index d9f8bd4ebd05..b5df2418f90c 100644 --- a/net/irda/irnet/irnet_ppp.h +++ b/net/irda/irnet/irnet_ppp.h | |||
@@ -95,7 +95,7 @@ static int | |||
95 | /**************************** VARIABLES ****************************/ | 95 | /**************************** VARIABLES ****************************/ |
96 | 96 | ||
97 | /* Filesystem callbacks (to call us) */ | 97 | /* Filesystem callbacks (to call us) */ |
98 | static struct file_operations irnet_device_fops = | 98 | static const struct file_operations irnet_device_fops = |
99 | { | 99 | { |
100 | .owner = THIS_MODULE, | 100 | .owner = THIS_MODULE, |
101 | .read = dev_irnet_read, | 101 | .read = dev_irnet_read, |
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c index 8dd7ed7e7c1f..476b307bd801 100644 --- a/net/irda/irnetlink.c +++ b/net/irda/irnetlink.c | |||
@@ -115,7 +115,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info) | |||
115 | 115 | ||
116 | genlmsg_end(msg, hdr); | 116 | genlmsg_end(msg, hdr); |
117 | 117 | ||
118 | return genlmsg_unicast(msg, info->snd_pid); | 118 | return genlmsg_reply(msg, info); |
119 | 119 | ||
120 | err_out: | 120 | err_out: |
121 | nlmsg_free(msg); | 121 | nlmsg_free(msg); |
diff --git a/net/irda/irproc.c b/net/irda/irproc.c index 8ff1861649e8..318766e5dbdf 100644 --- a/net/irda/irproc.c +++ b/net/irda/irproc.c | |||
@@ -34,21 +34,21 @@ | |||
34 | #include <net/irda/irlap.h> | 34 | #include <net/irda/irlap.h> |
35 | #include <net/irda/irlmp.h> | 35 | #include <net/irda/irlmp.h> |
36 | 36 | ||
37 | extern struct file_operations discovery_seq_fops; | 37 | extern const struct file_operations discovery_seq_fops; |
38 | extern struct file_operations irlap_seq_fops; | 38 | extern const struct file_operations irlap_seq_fops; |
39 | extern struct file_operations irlmp_seq_fops; | 39 | extern const struct file_operations irlmp_seq_fops; |
40 | extern struct file_operations irttp_seq_fops; | 40 | extern const struct file_operations irttp_seq_fops; |
41 | extern struct file_operations irias_seq_fops; | 41 | extern const struct file_operations irias_seq_fops; |
42 | 42 | ||
43 | struct irda_entry { | 43 | struct irda_entry { |
44 | const char *name; | 44 | const char *name; |
45 | struct file_operations *fops; | 45 | const struct file_operations *fops; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | struct proc_dir_entry *proc_irda; | 48 | struct proc_dir_entry *proc_irda; |
49 | EXPORT_SYMBOL(proc_irda); | 49 | EXPORT_SYMBOL(proc_irda); |
50 | 50 | ||
51 | static struct irda_entry irda_dirs[] = { | 51 | static const struct irda_entry irda_dirs[] = { |
52 | {"discovery", &discovery_seq_fops}, | 52 | {"discovery", &discovery_seq_fops}, |
53 | {"irttp", &irttp_seq_fops}, | 53 | {"irttp", &irttp_seq_fops}, |
54 | {"irlmp", &irlmp_seq_fops}, | 54 | {"irlmp", &irlmp_seq_fops}, |
diff --git a/net/key/af_key.c b/net/key/af_key.c index dba9abd27f90..4e98193dfa0f 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -3705,7 +3705,7 @@ static void pfkey_seq_stop(struct seq_file *f, void *v) | |||
3705 | read_unlock(&pfkey_table_lock); | 3705 | read_unlock(&pfkey_table_lock); |
3706 | } | 3706 | } |
3707 | 3707 | ||
3708 | static struct seq_operations pfkey_seq_ops = { | 3708 | static const struct seq_operations pfkey_seq_ops = { |
3709 | .start = pfkey_seq_start, | 3709 | .start = pfkey_seq_start, |
3710 | .next = pfkey_seq_next, | 3710 | .next = pfkey_seq_next, |
3711 | .stop = pfkey_seq_stop, | 3711 | .stop = pfkey_seq_stop, |
@@ -3718,7 +3718,7 @@ static int pfkey_seq_open(struct inode *inode, struct file *file) | |||
3718 | sizeof(struct seq_net_private)); | 3718 | sizeof(struct seq_net_private)); |
3719 | } | 3719 | } |
3720 | 3720 | ||
3721 | static struct file_operations pfkey_proc_ops = { | 3721 | static const struct file_operations pfkey_proc_ops = { |
3722 | .open = pfkey_seq_open, | 3722 | .open = pfkey_seq_open, |
3723 | .read = seq_read, | 3723 | .read = seq_read, |
3724 | .llseek = seq_lseek, | 3724 | .llseek = seq_lseek, |
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index 2ba1bc4f3c3a..bda96d18fd98 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c | |||
@@ -407,7 +407,7 @@ int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb) | |||
407 | return lapb->callbacks.data_indication(lapb->dev, skb); | 407 | return lapb->callbacks.data_indication(lapb->dev, skb); |
408 | 408 | ||
409 | kfree_skb(skb); | 409 | kfree_skb(skb); |
410 | return NET_RX_CN_HIGH; /* For now; must be != NET_RX_DROP */ | 410 | return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */ |
411 | } | 411 | } |
412 | 412 | ||
413 | int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) | 413 | int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) |
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index f97be471fe2e..be47ac427f6b 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c | |||
@@ -143,7 +143,7 @@ out: | |||
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | 145 | ||
146 | static char *llc_conn_state_names[] = { | 146 | static const char *const llc_conn_state_names[] = { |
147 | [LLC_CONN_STATE_ADM] = "adm", | 147 | [LLC_CONN_STATE_ADM] = "adm", |
148 | [LLC_CONN_STATE_SETUP] = "setup", | 148 | [LLC_CONN_STATE_SETUP] = "setup", |
149 | [LLC_CONN_STATE_NORMAL] = "normal", | 149 | [LLC_CONN_STATE_NORMAL] = "normal", |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 7836ee928983..4d5543af3123 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -6,7 +6,6 @@ config MAC80211 | |||
6 | select CRYPTO_ARC4 | 6 | select CRYPTO_ARC4 |
7 | select CRYPTO_AES | 7 | select CRYPTO_AES |
8 | select CRC32 | 8 | select CRC32 |
9 | select WIRELESS_EXT | ||
10 | ---help--- | 9 | ---help--- |
11 | This option enables the hardware independent IEEE 802.11 | 10 | This option enables the hardware independent IEEE 802.11 |
12 | networking stack. | 11 | networking stack. |
@@ -14,24 +13,7 @@ config MAC80211 | |||
14 | comment "CFG80211 needs to be enabled for MAC80211" | 13 | comment "CFG80211 needs to be enabled for MAC80211" |
15 | depends on CFG80211=n | 14 | depends on CFG80211=n |
16 | 15 | ||
17 | config MAC80211_DEFAULT_PS | 16 | if MAC80211 != n |
18 | bool "enable powersave by default" | ||
19 | depends on MAC80211 | ||
20 | default y | ||
21 | help | ||
22 | This option enables powersave mode by default. | ||
23 | |||
24 | If this causes your applications to misbehave you should fix your | ||
25 | applications instead -- they need to register their network | ||
26 | latency requirement, see Documentation/power/pm_qos_interface.txt. | ||
27 | |||
28 | config MAC80211_DEFAULT_PS_VALUE | ||
29 | int | ||
30 | default 1 if MAC80211_DEFAULT_PS | ||
31 | default 0 | ||
32 | |||
33 | menu "Rate control algorithm selection" | ||
34 | depends on MAC80211 != n | ||
35 | 17 | ||
36 | config MAC80211_RC_PID | 18 | config MAC80211_RC_PID |
37 | bool "PID controller based rate control algorithm" if EMBEDDED | 19 | bool "PID controller based rate control algorithm" if EMBEDDED |
@@ -78,17 +60,17 @@ config MAC80211_RC_DEFAULT | |||
78 | default "pid" if MAC80211_RC_DEFAULT_PID | 60 | default "pid" if MAC80211_RC_DEFAULT_PID |
79 | default "" | 61 | default "" |
80 | 62 | ||
81 | endmenu | 63 | endif |
82 | 64 | ||
83 | config MAC80211_MESH | 65 | config MAC80211_MESH |
84 | bool "Enable mac80211 mesh networking (pre-802.11s) support" | 66 | bool "Enable mac80211 mesh networking (pre-802.11s) support" |
85 | depends on MAC80211 && EXPERIMENTAL | 67 | depends on MAC80211 && EXPERIMENTAL |
86 | depends on BROKEN | ||
87 | ---help--- | 68 | ---help--- |
88 | This options enables support of Draft 802.11s mesh networking. | 69 | This options enables support of Draft 802.11s mesh networking. |
89 | The implementation is based on Draft 1.08 of the Mesh Networking | 70 | The implementation is based on Draft 2.08 of the Mesh Networking |
90 | amendment. For more information visit http://o11s.org/. | 71 | amendment. However, no compliance with that draft is claimed or even |
91 | 72 | possible, as drafts leave a number of identifiers to be defined after | |
73 | ratification. For more information visit http://o11s.org/. | ||
92 | 74 | ||
93 | config MAC80211_LEDS | 75 | config MAC80211_LEDS |
94 | bool "Enable LED triggers" | 76 | bool "Enable LED triggers" |
@@ -222,3 +204,15 @@ config MAC80211_DEBUG_COUNTERS | |||
222 | and show them in debugfs. | 204 | and show them in debugfs. |
223 | 205 | ||
224 | If unsure, say N. | 206 | If unsure, say N. |
207 | |||
208 | config MAC80211_DRIVER_API_TRACER | ||
209 | bool "Driver API tracer" | ||
210 | depends on MAC80211_DEBUG_MENU | ||
211 | depends on EVENT_TRACING | ||
212 | help | ||
213 | Say Y here to make mac80211 register with the ftrace | ||
214 | framework for the driver API -- you can see which | ||
215 | driver methods it is calling then by looking at the | ||
216 | trace. | ||
217 | |||
218 | If unsure, say N. | ||
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 0e3ab88bb706..9f3cf7129324 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile | |||
@@ -3,7 +3,6 @@ obj-$(CONFIG_MAC80211) += mac80211.o | |||
3 | # mac80211 objects | 3 | # mac80211 objects |
4 | mac80211-y := \ | 4 | mac80211-y := \ |
5 | main.o \ | 5 | main.o \ |
6 | wext.o \ | ||
7 | sta_info.o \ | 6 | sta_info.o \ |
8 | wep.o \ | 7 | wep.o \ |
9 | wpa.o \ | 8 | wpa.o \ |
@@ -41,6 +40,9 @@ mac80211-$(CONFIG_MAC80211_MESH) += \ | |||
41 | 40 | ||
42 | mac80211-$(CONFIG_PM) += pm.o | 41 | mac80211-$(CONFIG_PM) += pm.o |
43 | 42 | ||
43 | mac80211-$(CONFIG_MAC80211_DRIVER_API_TRACER) += driver-trace.o | ||
44 | CFLAGS_driver-trace.o := -I$(src) | ||
45 | |||
44 | # objects for PID algorithm | 46 | # objects for PID algorithm |
45 | rc80211_pid-y := rc80211_pid_algo.o | 47 | rc80211_pid-y := rc80211_pid_algo.o |
46 | rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o | 48 | rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index a24e59816b93..bd765f30dba2 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -391,9 +391,6 @@ static void ieee80211_agg_splice_packets(struct ieee80211_local *local, | |||
391 | 391 | ||
392 | if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { | 392 | if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { |
393 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 393 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
394 | /* mark queue as pending, it is stopped already */ | ||
395 | __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING, | ||
396 | &local->queue_stop_reasons[queue]); | ||
397 | /* copy over remaining packets */ | 394 | /* copy over remaining packets */ |
398 | skb_queue_splice_tail_init( | 395 | skb_queue_splice_tail_init( |
399 | &sta->ampdu_mlme.tid_tx[tid]->pending, | 396 | &sta->ampdu_mlme.tid_tx[tid]->pending, |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 3f47276caeb8..5608f6c68413 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -57,36 +57,21 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name, | |||
57 | return 0; | 57 | return 0; |
58 | } | 58 | } |
59 | 59 | ||
60 | static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) | 60 | static int ieee80211_del_iface(struct wiphy *wiphy, struct net_device *dev) |
61 | { | 61 | { |
62 | struct net_device *dev; | 62 | ieee80211_if_remove(IEEE80211_DEV_TO_SUB_IF(dev)); |
63 | struct ieee80211_sub_if_data *sdata; | ||
64 | |||
65 | /* we're under RTNL */ | ||
66 | dev = __dev_get_by_index(&init_net, ifindex); | ||
67 | if (!dev) | ||
68 | return -ENODEV; | ||
69 | |||
70 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
71 | |||
72 | ieee80211_if_remove(sdata); | ||
73 | 63 | ||
74 | return 0; | 64 | return 0; |
75 | } | 65 | } |
76 | 66 | ||
77 | static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, | 67 | static int ieee80211_change_iface(struct wiphy *wiphy, |
68 | struct net_device *dev, | ||
78 | enum nl80211_iftype type, u32 *flags, | 69 | enum nl80211_iftype type, u32 *flags, |
79 | struct vif_params *params) | 70 | struct vif_params *params) |
80 | { | 71 | { |
81 | struct net_device *dev; | ||
82 | struct ieee80211_sub_if_data *sdata; | 72 | struct ieee80211_sub_if_data *sdata; |
83 | int ret; | 73 | int ret; |
84 | 74 | ||
85 | /* we're under RTNL */ | ||
86 | dev = __dev_get_by_index(&init_net, ifindex); | ||
87 | if (!dev) | ||
88 | return -ENODEV; | ||
89 | |||
90 | if (!nl80211_type_check(type)) | 75 | if (!nl80211_type_check(type)) |
91 | return -EINVAL; | 76 | return -EINVAL; |
92 | 77 | ||
@@ -338,6 +323,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | |||
338 | { | 323 | { |
339 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 324 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
340 | 325 | ||
326 | sinfo->generation = sdata->local->sta_generation; | ||
327 | |||
341 | sinfo->filled = STATION_INFO_INACTIVE_TIME | | 328 | sinfo->filled = STATION_INFO_INACTIVE_TIME | |
342 | STATION_INFO_RX_BYTES | | 329 | STATION_INFO_RX_BYTES | |
343 | STATION_INFO_TX_BYTES | | 330 | STATION_INFO_TX_BYTES | |
@@ -924,6 +911,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
924 | else | 911 | else |
925 | memset(next_hop, 0, ETH_ALEN); | 912 | memset(next_hop, 0, ETH_ALEN); |
926 | 913 | ||
914 | pinfo->generation = mesh_paths_generation; | ||
915 | |||
927 | pinfo->filled = MPATH_INFO_FRAME_QLEN | | 916 | pinfo->filled = MPATH_INFO_FRAME_QLEN | |
928 | MPATH_INFO_DSN | | 917 | MPATH_INFO_DSN | |
929 | MPATH_INFO_METRIC | | 918 | MPATH_INFO_METRIC | |
@@ -1177,123 +1166,29 @@ static int ieee80211_scan(struct wiphy *wiphy, | |||
1177 | static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, | 1166 | static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, |
1178 | struct cfg80211_auth_request *req) | 1167 | struct cfg80211_auth_request *req) |
1179 | { | 1168 | { |
1180 | struct ieee80211_sub_if_data *sdata; | 1169 | return ieee80211_mgd_auth(IEEE80211_DEV_TO_SUB_IF(dev), req); |
1181 | |||
1182 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1183 | |||
1184 | switch (req->auth_type) { | ||
1185 | case NL80211_AUTHTYPE_OPEN_SYSTEM: | ||
1186 | sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_OPEN; | ||
1187 | break; | ||
1188 | case NL80211_AUTHTYPE_SHARED_KEY: | ||
1189 | sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_SHARED_KEY; | ||
1190 | break; | ||
1191 | case NL80211_AUTHTYPE_FT: | ||
1192 | sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_FT; | ||
1193 | break; | ||
1194 | case NL80211_AUTHTYPE_NETWORK_EAP: | ||
1195 | sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_LEAP; | ||
1196 | break; | ||
1197 | default: | ||
1198 | return -EOPNOTSUPP; | ||
1199 | } | ||
1200 | |||
1201 | memcpy(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN); | ||
1202 | sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; | ||
1203 | sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET; | ||
1204 | |||
1205 | /* TODO: req->chan */ | ||
1206 | sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL; | ||
1207 | |||
1208 | if (req->ssid) { | ||
1209 | sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET; | ||
1210 | memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len); | ||
1211 | sdata->u.mgd.ssid_len = req->ssid_len; | ||
1212 | sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; | ||
1213 | } | ||
1214 | |||
1215 | kfree(sdata->u.mgd.sme_auth_ie); | ||
1216 | sdata->u.mgd.sme_auth_ie = NULL; | ||
1217 | sdata->u.mgd.sme_auth_ie_len = 0; | ||
1218 | if (req->ie) { | ||
1219 | sdata->u.mgd.sme_auth_ie = kmalloc(req->ie_len, GFP_KERNEL); | ||
1220 | if (sdata->u.mgd.sme_auth_ie == NULL) | ||
1221 | return -ENOMEM; | ||
1222 | memcpy(sdata->u.mgd.sme_auth_ie, req->ie, req->ie_len); | ||
1223 | sdata->u.mgd.sme_auth_ie_len = req->ie_len; | ||
1224 | } | ||
1225 | |||
1226 | sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME; | ||
1227 | sdata->u.mgd.state = IEEE80211_STA_MLME_DIRECT_PROBE; | ||
1228 | ieee80211_sta_req_auth(sdata); | ||
1229 | return 0; | ||
1230 | } | 1170 | } |
1231 | 1171 | ||
1232 | static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, | 1172 | static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, |
1233 | struct cfg80211_assoc_request *req) | 1173 | struct cfg80211_assoc_request *req) |
1234 | { | 1174 | { |
1235 | struct ieee80211_sub_if_data *sdata; | 1175 | return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); |
1236 | int ret; | ||
1237 | |||
1238 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1239 | |||
1240 | if (memcmp(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN) != 0 || | ||
1241 | !(sdata->u.mgd.flags & IEEE80211_STA_AUTHENTICATED)) | ||
1242 | return -ENOLINK; /* not authenticated */ | ||
1243 | |||
1244 | sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; | ||
1245 | sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET; | ||
1246 | |||
1247 | /* TODO: req->chan */ | ||
1248 | sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL; | ||
1249 | |||
1250 | if (req->ssid) { | ||
1251 | sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET; | ||
1252 | memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len); | ||
1253 | sdata->u.mgd.ssid_len = req->ssid_len; | ||
1254 | sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; | ||
1255 | } else | ||
1256 | sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL; | ||
1257 | |||
1258 | ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len); | ||
1259 | if (ret && ret != -EALREADY) | ||
1260 | return ret; | ||
1261 | |||
1262 | if (req->use_mfp) { | ||
1263 | sdata->u.mgd.mfp = IEEE80211_MFP_REQUIRED; | ||
1264 | sdata->u.mgd.flags |= IEEE80211_STA_MFP_ENABLED; | ||
1265 | } else { | ||
1266 | sdata->u.mgd.mfp = IEEE80211_MFP_DISABLED; | ||
1267 | sdata->u.mgd.flags &= ~IEEE80211_STA_MFP_ENABLED; | ||
1268 | } | ||
1269 | |||
1270 | if (req->control_port) | ||
1271 | sdata->u.mgd.flags |= IEEE80211_STA_CONTROL_PORT; | ||
1272 | else | ||
1273 | sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT; | ||
1274 | |||
1275 | sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME; | ||
1276 | sdata->u.mgd.state = IEEE80211_STA_MLME_ASSOCIATE; | ||
1277 | ieee80211_sta_req_auth(sdata); | ||
1278 | return 0; | ||
1279 | } | 1176 | } |
1280 | 1177 | ||
1281 | static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, | 1178 | static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, |
1282 | struct cfg80211_deauth_request *req) | 1179 | struct cfg80211_deauth_request *req, |
1180 | void *cookie) | ||
1283 | { | 1181 | { |
1284 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1182 | return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), |
1285 | 1183 | req, cookie); | |
1286 | /* TODO: req->ie, req->peer_addr */ | ||
1287 | return ieee80211_sta_deauthenticate(sdata, req->reason_code); | ||
1288 | } | 1184 | } |
1289 | 1185 | ||
1290 | static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, | 1186 | static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, |
1291 | struct cfg80211_disassoc_request *req) | 1187 | struct cfg80211_disassoc_request *req, |
1188 | void *cookie) | ||
1292 | { | 1189 | { |
1293 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1190 | return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), |
1294 | 1191 | req, cookie); | |
1295 | /* TODO: req->ie, req->peer_addr */ | ||
1296 | return ieee80211_sta_disassociate(sdata, req->reason_code); | ||
1297 | } | 1192 | } |
1298 | 1193 | ||
1299 | static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, | 1194 | static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, |
@@ -1374,6 +1269,16 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy, int *dbm) | |||
1374 | return 0; | 1269 | return 0; |
1375 | } | 1270 | } |
1376 | 1271 | ||
1272 | static int ieee80211_set_wds_peer(struct wiphy *wiphy, struct net_device *dev, | ||
1273 | u8 *addr) | ||
1274 | { | ||
1275 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1276 | |||
1277 | memcpy(&sdata->u.wds.remote_addr, addr, ETH_ALEN); | ||
1278 | |||
1279 | return 0; | ||
1280 | } | ||
1281 | |||
1377 | static void ieee80211_rfkill_poll(struct wiphy *wiphy) | 1282 | static void ieee80211_rfkill_poll(struct wiphy *wiphy) |
1378 | { | 1283 | { |
1379 | struct ieee80211_local *local = wiphy_priv(wiphy); | 1284 | struct ieee80211_local *local = wiphy_priv(wiphy); |
@@ -1381,6 +1286,85 @@ static void ieee80211_rfkill_poll(struct wiphy *wiphy) | |||
1381 | drv_rfkill_poll(local); | 1286 | drv_rfkill_poll(local); |
1382 | } | 1287 | } |
1383 | 1288 | ||
1289 | #ifdef CONFIG_NL80211_TESTMODE | ||
1290 | static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len) | ||
1291 | { | ||
1292 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
1293 | |||
1294 | if (!local->ops->testmode_cmd) | ||
1295 | return -EOPNOTSUPP; | ||
1296 | |||
1297 | return local->ops->testmode_cmd(&local->hw, data, len); | ||
1298 | } | ||
1299 | #endif | ||
1300 | |||
1301 | static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | ||
1302 | bool enabled, int timeout) | ||
1303 | { | ||
1304 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1305 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1306 | struct ieee80211_conf *conf = &local->hw.conf; | ||
1307 | |||
1308 | if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) | ||
1309 | return -EOPNOTSUPP; | ||
1310 | |||
1311 | if (enabled == sdata->u.mgd.powersave && | ||
1312 | timeout == conf->dynamic_ps_timeout) | ||
1313 | return 0; | ||
1314 | |||
1315 | sdata->u.mgd.powersave = enabled; | ||
1316 | conf->dynamic_ps_timeout = timeout; | ||
1317 | |||
1318 | if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) | ||
1319 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | ||
1320 | |||
1321 | ieee80211_recalc_ps(local, -1); | ||
1322 | |||
1323 | return 0; | ||
1324 | } | ||
1325 | |||
1326 | static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, | ||
1327 | struct net_device *dev, | ||
1328 | const u8 *addr, | ||
1329 | const struct cfg80211_bitrate_mask *mask) | ||
1330 | { | ||
1331 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1332 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
1333 | int i, err = -EINVAL; | ||
1334 | u32 target_rate; | ||
1335 | struct ieee80211_supported_band *sband; | ||
1336 | |||
1337 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
1338 | |||
1339 | /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates | ||
1340 | * target_rate = X, rate->fixed = 1 means only rate X | ||
1341 | * target_rate = X, rate->fixed = 0 means all rates <= X */ | ||
1342 | sdata->max_ratectrl_rateidx = -1; | ||
1343 | sdata->force_unicast_rateidx = -1; | ||
1344 | |||
1345 | if (mask->fixed) | ||
1346 | target_rate = mask->fixed / 100; | ||
1347 | else if (mask->maxrate) | ||
1348 | target_rate = mask->maxrate / 100; | ||
1349 | else | ||
1350 | return 0; | ||
1351 | |||
1352 | for (i=0; i< sband->n_bitrates; i++) { | ||
1353 | struct ieee80211_rate *brate = &sband->bitrates[i]; | ||
1354 | int this_rate = brate->bitrate; | ||
1355 | |||
1356 | if (target_rate == this_rate) { | ||
1357 | sdata->max_ratectrl_rateidx = i; | ||
1358 | if (mask->fixed) | ||
1359 | sdata->force_unicast_rateidx = i; | ||
1360 | err = 0; | ||
1361 | break; | ||
1362 | } | ||
1363 | } | ||
1364 | |||
1365 | return err; | ||
1366 | } | ||
1367 | |||
1384 | struct cfg80211_ops mac80211_config_ops = { | 1368 | struct cfg80211_ops mac80211_config_ops = { |
1385 | .add_virtual_intf = ieee80211_add_iface, | 1369 | .add_virtual_intf = ieee80211_add_iface, |
1386 | .del_virtual_intf = ieee80211_del_iface, | 1370 | .del_virtual_intf = ieee80211_del_iface, |
@@ -1422,5 +1406,9 @@ struct cfg80211_ops mac80211_config_ops = { | |||
1422 | .set_wiphy_params = ieee80211_set_wiphy_params, | 1406 | .set_wiphy_params = ieee80211_set_wiphy_params, |
1423 | .set_tx_power = ieee80211_set_tx_power, | 1407 | .set_tx_power = ieee80211_set_tx_power, |
1424 | .get_tx_power = ieee80211_get_tx_power, | 1408 | .get_tx_power = ieee80211_get_tx_power, |
1409 | .set_wds_peer = ieee80211_set_wds_peer, | ||
1425 | .rfkill_poll = ieee80211_rfkill_poll, | 1410 | .rfkill_poll = ieee80211_rfkill_poll, |
1411 | CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) | ||
1412 | .set_power_mgmt = ieee80211_set_power_mgmt, | ||
1413 | .set_bitrate_mask = ieee80211_set_bitrate_mask, | ||
1426 | }; | 1414 | }; |
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 6c439cd5ccea..96991b68f048 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -175,7 +175,7 @@ static ssize_t queues_read(struct file *file, char __user *user_buf, | |||
175 | for (q = 0; q < local->hw.queues; q++) | 175 | for (q = 0; q < local->hw.queues; q++) |
176 | res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q, | 176 | res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q, |
177 | local->queue_stop_reasons[q], | 177 | local->queue_stop_reasons[q], |
178 | __netif_subqueue_stopped(local->mdev, q)); | 178 | skb_queue_len(&local->pending[q])); |
179 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 179 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
180 | 180 | ||
181 | return simple_read_from_buffer(user_buf, count, ppos, buf, res); | 181 | return simple_read_from_buffer(user_buf, count, ppos, buf, res); |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index e3420329f4e6..61234e79022b 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -95,33 +95,9 @@ IEEE80211_IF_FILE(force_unicast_rateidx, force_unicast_rateidx, DEC); | |||
95 | IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC); | 95 | IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC); |
96 | 96 | ||
97 | /* STA attributes */ | 97 | /* STA attributes */ |
98 | IEEE80211_IF_FILE(state, u.mgd.state, DEC); | ||
99 | IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); | 98 | IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); |
100 | IEEE80211_IF_FILE(prev_bssid, u.mgd.prev_bssid, MAC); | ||
101 | IEEE80211_IF_FILE(ssid_len, u.mgd.ssid_len, SIZE); | ||
102 | IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); | 99 | IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); |
103 | IEEE80211_IF_FILE(ap_capab, u.mgd.ap_capab, HEX); | ||
104 | IEEE80211_IF_FILE(capab, u.mgd.capab, HEX); | 100 | IEEE80211_IF_FILE(capab, u.mgd.capab, HEX); |
105 | IEEE80211_IF_FILE(extra_ie_len, u.mgd.extra_ie_len, SIZE); | ||
106 | IEEE80211_IF_FILE(auth_tries, u.mgd.auth_tries, DEC); | ||
107 | IEEE80211_IF_FILE(assoc_tries, u.mgd.assoc_tries, DEC); | ||
108 | IEEE80211_IF_FILE(auth_algs, u.mgd.auth_algs, HEX); | ||
109 | IEEE80211_IF_FILE(auth_alg, u.mgd.auth_alg, DEC); | ||
110 | IEEE80211_IF_FILE(auth_transaction, u.mgd.auth_transaction, DEC); | ||
111 | |||
112 | static ssize_t ieee80211_if_fmt_flags( | ||
113 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) | ||
114 | { | ||
115 | return scnprintf(buf, buflen, "%s%s%s%s%s%s%s\n", | ||
116 | sdata->u.mgd.flags & IEEE80211_STA_SSID_SET ? "SSID\n" : "", | ||
117 | sdata->u.mgd.flags & IEEE80211_STA_BSSID_SET ? "BSSID\n" : "", | ||
118 | sdata->u.mgd.flags & IEEE80211_STA_PREV_BSSID_SET ? "prev BSSID\n" : "", | ||
119 | sdata->u.mgd.flags & IEEE80211_STA_AUTHENTICATED ? "AUTH\n" : "", | ||
120 | sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED ? "ASSOC\n" : "", | ||
121 | sdata->u.mgd.flags & IEEE80211_STA_PROBEREQ_POLL ? "PROBEREQ POLL\n" : "", | ||
122 | sdata->vif.bss_conf.use_cts_prot ? "CTS prot\n" : ""); | ||
123 | } | ||
124 | __IEEE80211_IF_FILE(flags); | ||
125 | 101 | ||
126 | /* AP attributes */ | 102 | /* AP attributes */ |
127 | IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); | 103 | IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); |
@@ -140,6 +116,8 @@ IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); | |||
140 | 116 | ||
141 | #ifdef CONFIG_MAC80211_MESH | 117 | #ifdef CONFIG_MAC80211_MESH |
142 | /* Mesh stats attributes */ | 118 | /* Mesh stats attributes */ |
119 | IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC); | ||
120 | IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); | ||
143 | IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); | 121 | IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); |
144 | IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); | 122 | IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); |
145 | IEEE80211_IF_FILE(dropped_frames_no_route, | 123 | IEEE80211_IF_FILE(dropped_frames_no_route, |
@@ -184,20 +162,9 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) | |||
184 | DEBUGFS_ADD(force_unicast_rateidx, sta); | 162 | DEBUGFS_ADD(force_unicast_rateidx, sta); |
185 | DEBUGFS_ADD(max_ratectrl_rateidx, sta); | 163 | DEBUGFS_ADD(max_ratectrl_rateidx, sta); |
186 | 164 | ||
187 | DEBUGFS_ADD(state, sta); | ||
188 | DEBUGFS_ADD(bssid, sta); | 165 | DEBUGFS_ADD(bssid, sta); |
189 | DEBUGFS_ADD(prev_bssid, sta); | ||
190 | DEBUGFS_ADD(ssid_len, sta); | ||
191 | DEBUGFS_ADD(aid, sta); | 166 | DEBUGFS_ADD(aid, sta); |
192 | DEBUGFS_ADD(ap_capab, sta); | ||
193 | DEBUGFS_ADD(capab, sta); | 167 | DEBUGFS_ADD(capab, sta); |
194 | DEBUGFS_ADD(extra_ie_len, sta); | ||
195 | DEBUGFS_ADD(auth_tries, sta); | ||
196 | DEBUGFS_ADD(assoc_tries, sta); | ||
197 | DEBUGFS_ADD(auth_algs, sta); | ||
198 | DEBUGFS_ADD(auth_alg, sta); | ||
199 | DEBUGFS_ADD(auth_transaction, sta); | ||
200 | DEBUGFS_ADD(flags, sta); | ||
201 | } | 168 | } |
202 | 169 | ||
203 | static void add_ap_files(struct ieee80211_sub_if_data *sdata) | 170 | static void add_ap_files(struct ieee80211_sub_if_data *sdata) |
@@ -240,6 +207,8 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) | |||
240 | { | 207 | { |
241 | sdata->mesh_stats_dir = debugfs_create_dir("mesh_stats", | 208 | sdata->mesh_stats_dir = debugfs_create_dir("mesh_stats", |
242 | sdata->debugfsdir); | 209 | sdata->debugfsdir); |
210 | MESHSTATS_ADD(fwded_mcast); | ||
211 | MESHSTATS_ADD(fwded_unicast); | ||
243 | MESHSTATS_ADD(fwded_frames); | 212 | MESHSTATS_ADD(fwded_frames); |
244 | MESHSTATS_ADD(dropped_frames_ttl); | 213 | MESHSTATS_ADD(dropped_frames_ttl); |
245 | MESHSTATS_ADD(dropped_frames_no_route); | 214 | MESHSTATS_ADD(dropped_frames_no_route); |
@@ -317,20 +286,9 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata) | |||
317 | DEBUGFS_DEL(force_unicast_rateidx, sta); | 286 | DEBUGFS_DEL(force_unicast_rateidx, sta); |
318 | DEBUGFS_DEL(max_ratectrl_rateidx, sta); | 287 | DEBUGFS_DEL(max_ratectrl_rateidx, sta); |
319 | 288 | ||
320 | DEBUGFS_DEL(state, sta); | ||
321 | DEBUGFS_DEL(bssid, sta); | 289 | DEBUGFS_DEL(bssid, sta); |
322 | DEBUGFS_DEL(prev_bssid, sta); | ||
323 | DEBUGFS_DEL(ssid_len, sta); | ||
324 | DEBUGFS_DEL(aid, sta); | 290 | DEBUGFS_DEL(aid, sta); |
325 | DEBUGFS_DEL(ap_capab, sta); | ||
326 | DEBUGFS_DEL(capab, sta); | 291 | DEBUGFS_DEL(capab, sta); |
327 | DEBUGFS_DEL(extra_ie_len, sta); | ||
328 | DEBUGFS_DEL(auth_tries, sta); | ||
329 | DEBUGFS_DEL(assoc_tries, sta); | ||
330 | DEBUGFS_DEL(auth_algs, sta); | ||
331 | DEBUGFS_DEL(auth_alg, sta); | ||
332 | DEBUGFS_DEL(auth_transaction, sta); | ||
333 | DEBUGFS_DEL(flags, sta); | ||
334 | } | 292 | } |
335 | 293 | ||
336 | static void del_ap_files(struct ieee80211_sub_if_data *sdata) | 294 | static void del_ap_files(struct ieee80211_sub_if_data *sdata) |
@@ -373,6 +331,8 @@ static void del_monitor_files(struct ieee80211_sub_if_data *sdata) | |||
373 | 331 | ||
374 | static void del_mesh_stats(struct ieee80211_sub_if_data *sdata) | 332 | static void del_mesh_stats(struct ieee80211_sub_if_data *sdata) |
375 | { | 333 | { |
334 | MESHSTATS_DEL(fwded_mcast); | ||
335 | MESHSTATS_DEL(fwded_unicast); | ||
376 | MESHSTATS_DEL(fwded_frames); | 336 | MESHSTATS_DEL(fwded_frames); |
377 | MESHSTATS_DEL(dropped_frames_ttl); | 337 | MESHSTATS_DEL(dropped_frames_ttl); |
378 | MESHSTATS_DEL(dropped_frames_no_route); | 338 | MESHSTATS_DEL(dropped_frames_no_route); |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 90230c718b5b..33a2e892115b 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -120,45 +120,38 @@ STA_OPS(last_seq_ctrl); | |||
120 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | 120 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, |
121 | size_t count, loff_t *ppos) | 121 | size_t count, loff_t *ppos) |
122 | { | 122 | { |
123 | char buf[768], *p = buf; | 123 | char buf[30 + STA_TID_NUM * 70], *p = buf; |
124 | int i; | 124 | int i; |
125 | struct sta_info *sta = file->private_data; | 125 | struct sta_info *sta = file->private_data; |
126 | p += scnprintf(p, sizeof(buf)+buf-p, "Agg state for STA is:\n"); | ||
127 | p += scnprintf(p, sizeof(buf)+buf-p, " STA next dialog_token is %d \n " | ||
128 | "TIDs info is: \n TID :", | ||
129 | (sta->ampdu_mlme.dialog_token_allocator + 1)); | ||
130 | for (i = 0; i < STA_TID_NUM; i++) | ||
131 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", i); | ||
132 | |||
133 | p += scnprintf(p, sizeof(buf)+buf-p, "\n RX :"); | ||
134 | for (i = 0; i < STA_TID_NUM; i++) | ||
135 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", | ||
136 | sta->ampdu_mlme.tid_state_rx[i]); | ||
137 | |||
138 | p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:"); | ||
139 | for (i = 0; i < STA_TID_NUM; i++) | ||
140 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", | ||
141 | sta->ampdu_mlme.tid_state_rx[i] ? | ||
142 | sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); | ||
143 | |||
144 | p += scnprintf(p, sizeof(buf)+buf-p, "\n TX :"); | ||
145 | for (i = 0; i < STA_TID_NUM; i++) | ||
146 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", | ||
147 | sta->ampdu_mlme.tid_state_tx[i]); | ||
148 | |||
149 | p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:"); | ||
150 | for (i = 0; i < STA_TID_NUM; i++) | ||
151 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", | ||
152 | sta->ampdu_mlme.tid_state_tx[i] ? | ||
153 | sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); | ||
154 | |||
155 | p += scnprintf(p, sizeof(buf)+buf-p, "\n SSN :"); | ||
156 | for (i = 0; i < STA_TID_NUM; i++) | ||
157 | p += scnprintf(p, sizeof(buf)+buf-p, "%5d", | ||
158 | sta->ampdu_mlme.tid_state_tx[i] ? | ||
159 | sta->ampdu_mlme.tid_tx[i]->ssn : 0); | ||
160 | 126 | ||
161 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); | 127 | spin_lock_bh(&sta->lock); |
128 | p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n", | ||
129 | sta->ampdu_mlme.dialog_token_allocator + 1); | ||
130 | for (i = 0; i < STA_TID_NUM; i++) { | ||
131 | p += scnprintf(p, sizeof(buf)+buf-p, "TID %02d:", i); | ||
132 | p += scnprintf(p, sizeof(buf)+buf-p, " RX=%x", | ||
133 | sta->ampdu_mlme.tid_state_rx[i]); | ||
134 | p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x", | ||
135 | sta->ampdu_mlme.tid_state_rx[i] ? | ||
136 | sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); | ||
137 | p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x", | ||
138 | sta->ampdu_mlme.tid_state_rx[i] ? | ||
139 | sta->ampdu_mlme.tid_rx[i]->ssn : 0); | ||
140 | |||
141 | p += scnprintf(p, sizeof(buf)+buf-p, " TX=%x", | ||
142 | sta->ampdu_mlme.tid_state_tx[i]); | ||
143 | p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x", | ||
144 | sta->ampdu_mlme.tid_state_tx[i] ? | ||
145 | sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); | ||
146 | p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x", | ||
147 | sta->ampdu_mlme.tid_state_tx[i] ? | ||
148 | sta->ampdu_mlme.tid_tx[i]->ssn : 0); | ||
149 | p += scnprintf(p, sizeof(buf)+buf-p, "/pending=%03d", | ||
150 | sta->ampdu_mlme.tid_state_tx[i] ? | ||
151 | skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); | ||
152 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); | ||
153 | } | ||
154 | spin_unlock_bh(&sta->lock); | ||
162 | 155 | ||
163 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); | 156 | return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); |
164 | } | 157 | } |
@@ -203,6 +196,22 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) | |||
203 | DEBUGFS_ADD(inactive_ms); | 196 | DEBUGFS_ADD(inactive_ms); |
204 | DEBUGFS_ADD(last_seq_ctrl); | 197 | DEBUGFS_ADD(last_seq_ctrl); |
205 | DEBUGFS_ADD(agg_status); | 198 | DEBUGFS_ADD(agg_status); |
199 | DEBUGFS_ADD(dev); | ||
200 | DEBUGFS_ADD(rx_packets); | ||
201 | DEBUGFS_ADD(tx_packets); | ||
202 | DEBUGFS_ADD(rx_bytes); | ||
203 | DEBUGFS_ADD(tx_bytes); | ||
204 | DEBUGFS_ADD(rx_duplicates); | ||
205 | DEBUGFS_ADD(rx_fragments); | ||
206 | DEBUGFS_ADD(rx_dropped); | ||
207 | DEBUGFS_ADD(tx_fragments); | ||
208 | DEBUGFS_ADD(tx_filtered); | ||
209 | DEBUGFS_ADD(tx_retry_failed); | ||
210 | DEBUGFS_ADD(tx_retry_count); | ||
211 | DEBUGFS_ADD(last_signal); | ||
212 | DEBUGFS_ADD(last_qual); | ||
213 | DEBUGFS_ADD(last_noise); | ||
214 | DEBUGFS_ADD(wep_weak_iv_count); | ||
206 | } | 215 | } |
207 | 216 | ||
208 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) | 217 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) |
@@ -212,6 +221,23 @@ void ieee80211_sta_debugfs_remove(struct sta_info *sta) | |||
212 | DEBUGFS_DEL(inactive_ms); | 221 | DEBUGFS_DEL(inactive_ms); |
213 | DEBUGFS_DEL(last_seq_ctrl); | 222 | DEBUGFS_DEL(last_seq_ctrl); |
214 | DEBUGFS_DEL(agg_status); | 223 | DEBUGFS_DEL(agg_status); |
224 | DEBUGFS_DEL(aid); | ||
225 | DEBUGFS_DEL(dev); | ||
226 | DEBUGFS_DEL(rx_packets); | ||
227 | DEBUGFS_DEL(tx_packets); | ||
228 | DEBUGFS_DEL(rx_bytes); | ||
229 | DEBUGFS_DEL(tx_bytes); | ||
230 | DEBUGFS_DEL(rx_duplicates); | ||
231 | DEBUGFS_DEL(rx_fragments); | ||
232 | DEBUGFS_DEL(rx_dropped); | ||
233 | DEBUGFS_DEL(tx_fragments); | ||
234 | DEBUGFS_DEL(tx_filtered); | ||
235 | DEBUGFS_DEL(tx_retry_failed); | ||
236 | DEBUGFS_DEL(tx_retry_count); | ||
237 | DEBUGFS_DEL(last_signal); | ||
238 | DEBUGFS_DEL(last_qual); | ||
239 | DEBUGFS_DEL(last_noise); | ||
240 | DEBUGFS_DEL(wep_weak_iv_count); | ||
215 | 241 | ||
216 | debugfs_remove(sta->debugfs.dir); | 242 | debugfs_remove(sta->debugfs.dir); |
217 | sta->debugfs.dir = NULL; | 243 | sta->debugfs.dir = NULL; |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index b13446afd48f..020a94a31106 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <net/mac80211.h> | 4 | #include <net/mac80211.h> |
5 | #include "ieee80211_i.h" | 5 | #include "ieee80211_i.h" |
6 | #include "driver-trace.h" | ||
6 | 7 | ||
7 | static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb) | 8 | static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb) |
8 | { | 9 | { |
@@ -11,29 +12,49 @@ static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb) | |||
11 | 12 | ||
12 | static inline int drv_start(struct ieee80211_local *local) | 13 | static inline int drv_start(struct ieee80211_local *local) |
13 | { | 14 | { |
14 | return local->ops->start(&local->hw); | 15 | int ret; |
16 | |||
17 | local->started = true; | ||
18 | smp_mb(); | ||
19 | ret = local->ops->start(&local->hw); | ||
20 | trace_drv_start(local, ret); | ||
21 | return ret; | ||
15 | } | 22 | } |
16 | 23 | ||
17 | static inline void drv_stop(struct ieee80211_local *local) | 24 | static inline void drv_stop(struct ieee80211_local *local) |
18 | { | 25 | { |
19 | local->ops->stop(&local->hw); | 26 | local->ops->stop(&local->hw); |
27 | trace_drv_stop(local); | ||
28 | |||
29 | /* sync away all work on the tasklet before clearing started */ | ||
30 | tasklet_disable(&local->tasklet); | ||
31 | tasklet_enable(&local->tasklet); | ||
32 | |||
33 | barrier(); | ||
34 | |||
35 | local->started = false; | ||
20 | } | 36 | } |
21 | 37 | ||
22 | static inline int drv_add_interface(struct ieee80211_local *local, | 38 | static inline int drv_add_interface(struct ieee80211_local *local, |
23 | struct ieee80211_if_init_conf *conf) | 39 | struct ieee80211_if_init_conf *conf) |
24 | { | 40 | { |
25 | return local->ops->add_interface(&local->hw, conf); | 41 | int ret = local->ops->add_interface(&local->hw, conf); |
42 | trace_drv_add_interface(local, conf->mac_addr, conf->vif, ret); | ||
43 | return ret; | ||
26 | } | 44 | } |
27 | 45 | ||
28 | static inline void drv_remove_interface(struct ieee80211_local *local, | 46 | static inline void drv_remove_interface(struct ieee80211_local *local, |
29 | struct ieee80211_if_init_conf *conf) | 47 | struct ieee80211_if_init_conf *conf) |
30 | { | 48 | { |
31 | local->ops->remove_interface(&local->hw, conf); | 49 | local->ops->remove_interface(&local->hw, conf); |
50 | trace_drv_remove_interface(local, conf->mac_addr, conf->vif); | ||
32 | } | 51 | } |
33 | 52 | ||
34 | static inline int drv_config(struct ieee80211_local *local, u32 changed) | 53 | static inline int drv_config(struct ieee80211_local *local, u32 changed) |
35 | { | 54 | { |
36 | return local->ops->config(&local->hw, changed); | 55 | int ret = local->ops->config(&local->hw, changed); |
56 | trace_drv_config(local, changed, ret); | ||
57 | return ret; | ||
37 | } | 58 | } |
38 | 59 | ||
39 | static inline void drv_bss_info_changed(struct ieee80211_local *local, | 60 | static inline void drv_bss_info_changed(struct ieee80211_local *local, |
@@ -43,24 +64,45 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, | |||
43 | { | 64 | { |
44 | if (local->ops->bss_info_changed) | 65 | if (local->ops->bss_info_changed) |
45 | local->ops->bss_info_changed(&local->hw, vif, info, changed); | 66 | local->ops->bss_info_changed(&local->hw, vif, info, changed); |
67 | trace_drv_bss_info_changed(local, vif, info, changed); | ||
68 | } | ||
69 | |||
70 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, | ||
71 | int mc_count, | ||
72 | struct dev_addr_list *mc_list) | ||
73 | { | ||
74 | u64 ret = 0; | ||
75 | |||
76 | if (local->ops->prepare_multicast) | ||
77 | ret = local->ops->prepare_multicast(&local->hw, mc_count, | ||
78 | mc_list); | ||
79 | |||
80 | trace_drv_prepare_multicast(local, mc_count, ret); | ||
81 | |||
82 | return ret; | ||
46 | } | 83 | } |
47 | 84 | ||
48 | static inline void drv_configure_filter(struct ieee80211_local *local, | 85 | static inline void drv_configure_filter(struct ieee80211_local *local, |
49 | unsigned int changed_flags, | 86 | unsigned int changed_flags, |
50 | unsigned int *total_flags, | 87 | unsigned int *total_flags, |
51 | int mc_count, | 88 | u64 multicast) |
52 | struct dev_addr_list *mc_list) | ||
53 | { | 89 | { |
90 | might_sleep(); | ||
91 | |||
54 | local->ops->configure_filter(&local->hw, changed_flags, total_flags, | 92 | local->ops->configure_filter(&local->hw, changed_flags, total_flags, |
55 | mc_count, mc_list); | 93 | multicast); |
94 | trace_drv_configure_filter(local, changed_flags, total_flags, | ||
95 | multicast); | ||
56 | } | 96 | } |
57 | 97 | ||
58 | static inline int drv_set_tim(struct ieee80211_local *local, | 98 | static inline int drv_set_tim(struct ieee80211_local *local, |
59 | struct ieee80211_sta *sta, bool set) | 99 | struct ieee80211_sta *sta, bool set) |
60 | { | 100 | { |
101 | int ret = 0; | ||
61 | if (local->ops->set_tim) | 102 | if (local->ops->set_tim) |
62 | return local->ops->set_tim(&local->hw, sta, set); | 103 | ret = local->ops->set_tim(&local->hw, sta, set); |
63 | return 0; | 104 | trace_drv_set_tim(local, sta, set, ret); |
105 | return ret; | ||
64 | } | 106 | } |
65 | 107 | ||
66 | static inline int drv_set_key(struct ieee80211_local *local, | 108 | static inline int drv_set_key(struct ieee80211_local *local, |
@@ -68,7 +110,9 @@ static inline int drv_set_key(struct ieee80211_local *local, | |||
68 | struct ieee80211_sta *sta, | 110 | struct ieee80211_sta *sta, |
69 | struct ieee80211_key_conf *key) | 111 | struct ieee80211_key_conf *key) |
70 | { | 112 | { |
71 | return local->ops->set_key(&local->hw, cmd, vif, sta, key); | 113 | int ret = local->ops->set_key(&local->hw, cmd, vif, sta, key); |
114 | trace_drv_set_key(local, cmd, vif, sta, key, ret); | ||
115 | return ret; | ||
72 | } | 116 | } |
73 | 117 | ||
74 | static inline void drv_update_tkip_key(struct ieee80211_local *local, | 118 | static inline void drv_update_tkip_key(struct ieee80211_local *local, |
@@ -79,32 +123,41 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local, | |||
79 | if (local->ops->update_tkip_key) | 123 | if (local->ops->update_tkip_key) |
80 | local->ops->update_tkip_key(&local->hw, conf, address, | 124 | local->ops->update_tkip_key(&local->hw, conf, address, |
81 | iv32, phase1key); | 125 | iv32, phase1key); |
126 | trace_drv_update_tkip_key(local, conf, address, iv32); | ||
82 | } | 127 | } |
83 | 128 | ||
84 | static inline int drv_hw_scan(struct ieee80211_local *local, | 129 | static inline int drv_hw_scan(struct ieee80211_local *local, |
85 | struct cfg80211_scan_request *req) | 130 | struct cfg80211_scan_request *req) |
86 | { | 131 | { |
87 | return local->ops->hw_scan(&local->hw, req); | 132 | int ret = local->ops->hw_scan(&local->hw, req); |
133 | trace_drv_hw_scan(local, req, ret); | ||
134 | return ret; | ||
88 | } | 135 | } |
89 | 136 | ||
90 | static inline void drv_sw_scan_start(struct ieee80211_local *local) | 137 | static inline void drv_sw_scan_start(struct ieee80211_local *local) |
91 | { | 138 | { |
92 | if (local->ops->sw_scan_start) | 139 | if (local->ops->sw_scan_start) |
93 | local->ops->sw_scan_start(&local->hw); | 140 | local->ops->sw_scan_start(&local->hw); |
141 | trace_drv_sw_scan_start(local); | ||
94 | } | 142 | } |
95 | 143 | ||
96 | static inline void drv_sw_scan_complete(struct ieee80211_local *local) | 144 | static inline void drv_sw_scan_complete(struct ieee80211_local *local) |
97 | { | 145 | { |
98 | if (local->ops->sw_scan_complete) | 146 | if (local->ops->sw_scan_complete) |
99 | local->ops->sw_scan_complete(&local->hw); | 147 | local->ops->sw_scan_complete(&local->hw); |
148 | trace_drv_sw_scan_complete(local); | ||
100 | } | 149 | } |
101 | 150 | ||
102 | static inline int drv_get_stats(struct ieee80211_local *local, | 151 | static inline int drv_get_stats(struct ieee80211_local *local, |
103 | struct ieee80211_low_level_stats *stats) | 152 | struct ieee80211_low_level_stats *stats) |
104 | { | 153 | { |
105 | if (!local->ops->get_stats) | 154 | int ret = -EOPNOTSUPP; |
106 | return -EOPNOTSUPP; | 155 | |
107 | return local->ops->get_stats(&local->hw, stats); | 156 | if (local->ops->get_stats) |
157 | ret = local->ops->get_stats(&local->hw, stats); | ||
158 | trace_drv_get_stats(local, stats, ret); | ||
159 | |||
160 | return ret; | ||
108 | } | 161 | } |
109 | 162 | ||
110 | static inline void drv_get_tkip_seq(struct ieee80211_local *local, | 163 | static inline void drv_get_tkip_seq(struct ieee80211_local *local, |
@@ -112,14 +165,17 @@ static inline void drv_get_tkip_seq(struct ieee80211_local *local, | |||
112 | { | 165 | { |
113 | if (local->ops->get_tkip_seq) | 166 | if (local->ops->get_tkip_seq) |
114 | local->ops->get_tkip_seq(&local->hw, hw_key_idx, iv32, iv16); | 167 | local->ops->get_tkip_seq(&local->hw, hw_key_idx, iv32, iv16); |
168 | trace_drv_get_tkip_seq(local, hw_key_idx, iv32, iv16); | ||
115 | } | 169 | } |
116 | 170 | ||
117 | static inline int drv_set_rts_threshold(struct ieee80211_local *local, | 171 | static inline int drv_set_rts_threshold(struct ieee80211_local *local, |
118 | u32 value) | 172 | u32 value) |
119 | { | 173 | { |
174 | int ret = 0; | ||
120 | if (local->ops->set_rts_threshold) | 175 | if (local->ops->set_rts_threshold) |
121 | return local->ops->set_rts_threshold(&local->hw, value); | 176 | ret = local->ops->set_rts_threshold(&local->hw, value); |
122 | return 0; | 177 | trace_drv_set_rts_threshold(local, value, ret); |
178 | return ret; | ||
123 | } | 179 | } |
124 | 180 | ||
125 | static inline void drv_sta_notify(struct ieee80211_local *local, | 181 | static inline void drv_sta_notify(struct ieee80211_local *local, |
@@ -129,46 +185,57 @@ static inline void drv_sta_notify(struct ieee80211_local *local, | |||
129 | { | 185 | { |
130 | if (local->ops->sta_notify) | 186 | if (local->ops->sta_notify) |
131 | local->ops->sta_notify(&local->hw, vif, cmd, sta); | 187 | local->ops->sta_notify(&local->hw, vif, cmd, sta); |
188 | trace_drv_sta_notify(local, vif, cmd, sta); | ||
132 | } | 189 | } |
133 | 190 | ||
134 | static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, | 191 | static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, |
135 | const struct ieee80211_tx_queue_params *params) | 192 | const struct ieee80211_tx_queue_params *params) |
136 | { | 193 | { |
194 | int ret = -EOPNOTSUPP; | ||
137 | if (local->ops->conf_tx) | 195 | if (local->ops->conf_tx) |
138 | return local->ops->conf_tx(&local->hw, queue, params); | 196 | ret = local->ops->conf_tx(&local->hw, queue, params); |
139 | return -EOPNOTSUPP; | 197 | trace_drv_conf_tx(local, queue, params, ret); |
198 | return ret; | ||
140 | } | 199 | } |
141 | 200 | ||
142 | static inline int drv_get_tx_stats(struct ieee80211_local *local, | 201 | static inline int drv_get_tx_stats(struct ieee80211_local *local, |
143 | struct ieee80211_tx_queue_stats *stats) | 202 | struct ieee80211_tx_queue_stats *stats) |
144 | { | 203 | { |
145 | return local->ops->get_tx_stats(&local->hw, stats); | 204 | int ret = local->ops->get_tx_stats(&local->hw, stats); |
205 | trace_drv_get_tx_stats(local, stats, ret); | ||
206 | return ret; | ||
146 | } | 207 | } |
147 | 208 | ||
148 | static inline u64 drv_get_tsf(struct ieee80211_local *local) | 209 | static inline u64 drv_get_tsf(struct ieee80211_local *local) |
149 | { | 210 | { |
211 | u64 ret = -1ULL; | ||
150 | if (local->ops->get_tsf) | 212 | if (local->ops->get_tsf) |
151 | return local->ops->get_tsf(&local->hw); | 213 | ret = local->ops->get_tsf(&local->hw); |
152 | return -1ULL; | 214 | trace_drv_get_tsf(local, ret); |
215 | return ret; | ||
153 | } | 216 | } |
154 | 217 | ||
155 | static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) | 218 | static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) |
156 | { | 219 | { |
157 | if (local->ops->set_tsf) | 220 | if (local->ops->set_tsf) |
158 | local->ops->set_tsf(&local->hw, tsf); | 221 | local->ops->set_tsf(&local->hw, tsf); |
222 | trace_drv_set_tsf(local, tsf); | ||
159 | } | 223 | } |
160 | 224 | ||
161 | static inline void drv_reset_tsf(struct ieee80211_local *local) | 225 | static inline void drv_reset_tsf(struct ieee80211_local *local) |
162 | { | 226 | { |
163 | if (local->ops->reset_tsf) | 227 | if (local->ops->reset_tsf) |
164 | local->ops->reset_tsf(&local->hw); | 228 | local->ops->reset_tsf(&local->hw); |
229 | trace_drv_reset_tsf(local); | ||
165 | } | 230 | } |
166 | 231 | ||
167 | static inline int drv_tx_last_beacon(struct ieee80211_local *local) | 232 | static inline int drv_tx_last_beacon(struct ieee80211_local *local) |
168 | { | 233 | { |
234 | int ret = 1; | ||
169 | if (local->ops->tx_last_beacon) | 235 | if (local->ops->tx_last_beacon) |
170 | return local->ops->tx_last_beacon(&local->hw); | 236 | ret = local->ops->tx_last_beacon(&local->hw); |
171 | return 1; | 237 | trace_drv_tx_last_beacon(local, ret); |
238 | return ret; | ||
172 | } | 239 | } |
173 | 240 | ||
174 | static inline int drv_ampdu_action(struct ieee80211_local *local, | 241 | static inline int drv_ampdu_action(struct ieee80211_local *local, |
@@ -176,10 +243,12 @@ static inline int drv_ampdu_action(struct ieee80211_local *local, | |||
176 | struct ieee80211_sta *sta, u16 tid, | 243 | struct ieee80211_sta *sta, u16 tid, |
177 | u16 *ssn) | 244 | u16 *ssn) |
178 | { | 245 | { |
246 | int ret = -EOPNOTSUPP; | ||
179 | if (local->ops->ampdu_action) | 247 | if (local->ops->ampdu_action) |
180 | return local->ops->ampdu_action(&local->hw, action, | 248 | ret = local->ops->ampdu_action(&local->hw, action, |
181 | sta, tid, ssn); | 249 | sta, tid, ssn); |
182 | return -EOPNOTSUPP; | 250 | trace_drv_ampdu_action(local, action, sta, tid, ssn, ret); |
251 | return ret; | ||
183 | } | 252 | } |
184 | 253 | ||
185 | 254 | ||
diff --git a/net/mac80211/driver-trace.c b/net/mac80211/driver-trace.c new file mode 100644 index 000000000000..8ed8711b1a6d --- /dev/null +++ b/net/mac80211/driver-trace.c | |||
@@ -0,0 +1,9 @@ | |||
1 | /* bug in tracepoint.h, it should include this */ | ||
2 | #include <linux/module.h> | ||
3 | |||
4 | /* sparse isn't too happy with all macros... */ | ||
5 | #ifndef __CHECKER__ | ||
6 | #include "driver-ops.h" | ||
7 | #define CREATE_TRACE_POINTS | ||
8 | #include "driver-trace.h" | ||
9 | #endif | ||
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h new file mode 100644 index 000000000000..37b9051afcf3 --- /dev/null +++ b/net/mac80211/driver-trace.h | |||
@@ -0,0 +1,672 @@ | |||
1 | #if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define __MAC80211_DRIVER_TRACE | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | #include <net/mac80211.h> | ||
6 | #include "ieee80211_i.h" | ||
7 | |||
8 | #if !defined(CONFIG_MAC80211_DRIVER_API_TRACER) || defined(__CHECKER__) | ||
9 | #undef TRACE_EVENT | ||
10 | #define TRACE_EVENT(name, proto, ...) \ | ||
11 | static inline void trace_ ## name(proto) {} | ||
12 | #endif | ||
13 | |||
14 | #undef TRACE_SYSTEM | ||
15 | #define TRACE_SYSTEM mac80211 | ||
16 | |||
17 | #define MAXNAME 32 | ||
18 | #define LOCAL_ENTRY __array(char, wiphy_name, 32) | ||
19 | #define LOCAL_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(local->hw.wiphy), MAXNAME) | ||
20 | #define LOCAL_PR_FMT "%s" | ||
21 | #define LOCAL_PR_ARG __entry->wiphy_name | ||
22 | |||
23 | #define STA_ENTRY __array(char, sta_addr, ETH_ALEN) | ||
24 | #define STA_ASSIGN (sta ? memcpy(__entry->sta_addr, sta->addr, ETH_ALEN) : memset(__entry->sta_addr, 0, ETH_ALEN)) | ||
25 | #define STA_PR_FMT " sta:%pM" | ||
26 | #define STA_PR_ARG __entry->sta_addr | ||
27 | |||
28 | #define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, vif) | ||
29 | #define VIF_ASSIGN __entry->vif_type = vif ? vif->type : 0; __entry->vif = vif | ||
30 | #define VIF_PR_FMT " vif:%p(%d)" | ||
31 | #define VIF_PR_ARG __entry->vif, __entry->vif_type | ||
32 | |||
33 | TRACE_EVENT(drv_start, | ||
34 | TP_PROTO(struct ieee80211_local *local, int ret), | ||
35 | |||
36 | TP_ARGS(local, ret), | ||
37 | |||
38 | TP_STRUCT__entry( | ||
39 | LOCAL_ENTRY | ||
40 | __field(int, ret) | ||
41 | ), | ||
42 | |||
43 | TP_fast_assign( | ||
44 | LOCAL_ASSIGN; | ||
45 | __entry->ret = ret; | ||
46 | ), | ||
47 | |||
48 | TP_printk( | ||
49 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
50 | ) | ||
51 | ); | ||
52 | |||
53 | TRACE_EVENT(drv_stop, | ||
54 | TP_PROTO(struct ieee80211_local *local), | ||
55 | |||
56 | TP_ARGS(local), | ||
57 | |||
58 | TP_STRUCT__entry( | ||
59 | LOCAL_ENTRY | ||
60 | ), | ||
61 | |||
62 | TP_fast_assign( | ||
63 | LOCAL_ASSIGN; | ||
64 | ), | ||
65 | |||
66 | TP_printk( | ||
67 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
68 | ) | ||
69 | ); | ||
70 | |||
71 | TRACE_EVENT(drv_add_interface, | ||
72 | TP_PROTO(struct ieee80211_local *local, | ||
73 | const u8 *addr, | ||
74 | struct ieee80211_vif *vif, | ||
75 | int ret), | ||
76 | |||
77 | TP_ARGS(local, addr, vif, ret), | ||
78 | |||
79 | TP_STRUCT__entry( | ||
80 | LOCAL_ENTRY | ||
81 | VIF_ENTRY | ||
82 | __array(char, addr, 6) | ||
83 | __field(int, ret) | ||
84 | ), | ||
85 | |||
86 | TP_fast_assign( | ||
87 | LOCAL_ASSIGN; | ||
88 | VIF_ASSIGN; | ||
89 | memcpy(__entry->addr, addr, 6); | ||
90 | __entry->ret = ret; | ||
91 | ), | ||
92 | |||
93 | TP_printk( | ||
94 | LOCAL_PR_FMT VIF_PR_FMT " addr:%pM ret:%d", | ||
95 | LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr, __entry->ret | ||
96 | ) | ||
97 | ); | ||
98 | |||
99 | TRACE_EVENT(drv_remove_interface, | ||
100 | TP_PROTO(struct ieee80211_local *local, | ||
101 | const u8 *addr, struct ieee80211_vif *vif), | ||
102 | |||
103 | TP_ARGS(local, addr, vif), | ||
104 | |||
105 | TP_STRUCT__entry( | ||
106 | LOCAL_ENTRY | ||
107 | VIF_ENTRY | ||
108 | __array(char, addr, 6) | ||
109 | ), | ||
110 | |||
111 | TP_fast_assign( | ||
112 | LOCAL_ASSIGN; | ||
113 | VIF_ASSIGN; | ||
114 | memcpy(__entry->addr, addr, 6); | ||
115 | ), | ||
116 | |||
117 | TP_printk( | ||
118 | LOCAL_PR_FMT VIF_PR_FMT " addr:%pM", | ||
119 | LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr | ||
120 | ) | ||
121 | ); | ||
122 | |||
123 | TRACE_EVENT(drv_config, | ||
124 | TP_PROTO(struct ieee80211_local *local, | ||
125 | u32 changed, | ||
126 | int ret), | ||
127 | |||
128 | TP_ARGS(local, changed, ret), | ||
129 | |||
130 | TP_STRUCT__entry( | ||
131 | LOCAL_ENTRY | ||
132 | __field(u32, changed) | ||
133 | __field(int, ret) | ||
134 | ), | ||
135 | |||
136 | TP_fast_assign( | ||
137 | LOCAL_ASSIGN; | ||
138 | __entry->changed = changed; | ||
139 | __entry->ret = ret; | ||
140 | ), | ||
141 | |||
142 | TP_printk( | ||
143 | LOCAL_PR_FMT " ch:%#x ret:%d", | ||
144 | LOCAL_PR_ARG, __entry->changed, __entry->ret | ||
145 | ) | ||
146 | ); | ||
147 | |||
148 | TRACE_EVENT(drv_bss_info_changed, | ||
149 | TP_PROTO(struct ieee80211_local *local, | ||
150 | struct ieee80211_vif *vif, | ||
151 | struct ieee80211_bss_conf *info, | ||
152 | u32 changed), | ||
153 | |||
154 | TP_ARGS(local, vif, info, changed), | ||
155 | |||
156 | TP_STRUCT__entry( | ||
157 | LOCAL_ENTRY | ||
158 | VIF_ENTRY | ||
159 | __field(bool, assoc) | ||
160 | __field(u16, aid) | ||
161 | __field(bool, cts) | ||
162 | __field(bool, shortpre) | ||
163 | __field(bool, shortslot) | ||
164 | __field(u8, dtimper) | ||
165 | __field(u16, bcnint) | ||
166 | __field(u16, assoc_cap) | ||
167 | __field(u64, timestamp) | ||
168 | __field(u32, basic_rates) | ||
169 | __field(u32, changed) | ||
170 | ), | ||
171 | |||
172 | TP_fast_assign( | ||
173 | LOCAL_ASSIGN; | ||
174 | VIF_ASSIGN; | ||
175 | __entry->changed = changed; | ||
176 | __entry->aid = info->aid; | ||
177 | __entry->assoc = info->assoc; | ||
178 | __entry->shortpre = info->use_short_preamble; | ||
179 | __entry->cts = info->use_cts_prot; | ||
180 | __entry->shortslot = info->use_short_slot; | ||
181 | __entry->dtimper = info->dtim_period; | ||
182 | __entry->bcnint = info->beacon_int; | ||
183 | __entry->assoc_cap = info->assoc_capability; | ||
184 | __entry->timestamp = info->timestamp; | ||
185 | __entry->basic_rates = info->basic_rates; | ||
186 | ), | ||
187 | |||
188 | TP_printk( | ||
189 | LOCAL_PR_FMT VIF_PR_FMT " changed:%#x", | ||
190 | LOCAL_PR_ARG, VIF_PR_ARG, __entry->changed | ||
191 | ) | ||
192 | ); | ||
193 | |||
194 | TRACE_EVENT(drv_prepare_multicast, | ||
195 | TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret), | ||
196 | |||
197 | TP_ARGS(local, mc_count, ret), | ||
198 | |||
199 | TP_STRUCT__entry( | ||
200 | LOCAL_ENTRY | ||
201 | __field(int, mc_count) | ||
202 | __field(u64, ret) | ||
203 | ), | ||
204 | |||
205 | TP_fast_assign( | ||
206 | LOCAL_ASSIGN; | ||
207 | __entry->mc_count = mc_count; | ||
208 | __entry->ret = ret; | ||
209 | ), | ||
210 | |||
211 | TP_printk( | ||
212 | LOCAL_PR_FMT " prepare mc (%d): %llx", | ||
213 | LOCAL_PR_ARG, __entry->mc_count, | ||
214 | (unsigned long long) __entry->ret | ||
215 | ) | ||
216 | ); | ||
217 | |||
218 | TRACE_EVENT(drv_configure_filter, | ||
219 | TP_PROTO(struct ieee80211_local *local, | ||
220 | unsigned int changed_flags, | ||
221 | unsigned int *total_flags, | ||
222 | u64 multicast), | ||
223 | |||
224 | TP_ARGS(local, changed_flags, total_flags, multicast), | ||
225 | |||
226 | TP_STRUCT__entry( | ||
227 | LOCAL_ENTRY | ||
228 | __field(unsigned int, changed) | ||
229 | __field(unsigned int, total) | ||
230 | __field(u64, multicast) | ||
231 | ), | ||
232 | |||
233 | TP_fast_assign( | ||
234 | LOCAL_ASSIGN; | ||
235 | __entry->changed = changed_flags; | ||
236 | __entry->total = *total_flags; | ||
237 | __entry->multicast = multicast; | ||
238 | ), | ||
239 | |||
240 | TP_printk( | ||
241 | LOCAL_PR_FMT " changed:%#x total:%#x", | ||
242 | LOCAL_PR_ARG, __entry->changed, __entry->total | ||
243 | ) | ||
244 | ); | ||
245 | |||
246 | TRACE_EVENT(drv_set_tim, | ||
247 | TP_PROTO(struct ieee80211_local *local, | ||
248 | struct ieee80211_sta *sta, bool set, int ret), | ||
249 | |||
250 | TP_ARGS(local, sta, set, ret), | ||
251 | |||
252 | TP_STRUCT__entry( | ||
253 | LOCAL_ENTRY | ||
254 | STA_ENTRY | ||
255 | __field(bool, set) | ||
256 | __field(int, ret) | ||
257 | ), | ||
258 | |||
259 | TP_fast_assign( | ||
260 | LOCAL_ASSIGN; | ||
261 | STA_ASSIGN; | ||
262 | __entry->set = set; | ||
263 | __entry->ret = ret; | ||
264 | ), | ||
265 | |||
266 | TP_printk( | ||
267 | LOCAL_PR_FMT STA_PR_FMT " set:%d ret:%d", | ||
268 | LOCAL_PR_ARG, STA_PR_FMT, __entry->set, __entry->ret | ||
269 | ) | ||
270 | ); | ||
271 | |||
272 | TRACE_EVENT(drv_set_key, | ||
273 | TP_PROTO(struct ieee80211_local *local, | ||
274 | enum set_key_cmd cmd, struct ieee80211_vif *vif, | ||
275 | struct ieee80211_sta *sta, | ||
276 | struct ieee80211_key_conf *key, int ret), | ||
277 | |||
278 | TP_ARGS(local, cmd, vif, sta, key, ret), | ||
279 | |||
280 | TP_STRUCT__entry( | ||
281 | LOCAL_ENTRY | ||
282 | VIF_ENTRY | ||
283 | STA_ENTRY | ||
284 | __field(enum ieee80211_key_alg, alg) | ||
285 | __field(u8, hw_key_idx) | ||
286 | __field(u8, flags) | ||
287 | __field(s8, keyidx) | ||
288 | __field(int, ret) | ||
289 | ), | ||
290 | |||
291 | TP_fast_assign( | ||
292 | LOCAL_ASSIGN; | ||
293 | VIF_ASSIGN; | ||
294 | STA_ASSIGN; | ||
295 | __entry->alg = key->alg; | ||
296 | __entry->flags = key->flags; | ||
297 | __entry->keyidx = key->keyidx; | ||
298 | __entry->hw_key_idx = key->hw_key_idx; | ||
299 | __entry->ret = ret; | ||
300 | ), | ||
301 | |||
302 | TP_printk( | ||
303 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", | ||
304 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret | ||
305 | ) | ||
306 | ); | ||
307 | |||
308 | TRACE_EVENT(drv_update_tkip_key, | ||
309 | TP_PROTO(struct ieee80211_local *local, | ||
310 | struct ieee80211_key_conf *conf, | ||
311 | const u8 *address, u32 iv32), | ||
312 | |||
313 | TP_ARGS(local, conf, address, iv32), | ||
314 | |||
315 | TP_STRUCT__entry( | ||
316 | LOCAL_ENTRY | ||
317 | __array(u8, addr, 6) | ||
318 | __field(u32, iv32) | ||
319 | ), | ||
320 | |||
321 | TP_fast_assign( | ||
322 | LOCAL_ASSIGN; | ||
323 | memcpy(__entry->addr, address, 6); | ||
324 | __entry->iv32 = iv32; | ||
325 | ), | ||
326 | |||
327 | TP_printk( | ||
328 | LOCAL_PR_FMT " addr:%pM iv32:%#x", | ||
329 | LOCAL_PR_ARG, __entry->addr, __entry->iv32 | ||
330 | ) | ||
331 | ); | ||
332 | |||
333 | TRACE_EVENT(drv_hw_scan, | ||
334 | TP_PROTO(struct ieee80211_local *local, | ||
335 | struct cfg80211_scan_request *req, int ret), | ||
336 | |||
337 | TP_ARGS(local, req, ret), | ||
338 | |||
339 | TP_STRUCT__entry( | ||
340 | LOCAL_ENTRY | ||
341 | __field(int, ret) | ||
342 | ), | ||
343 | |||
344 | TP_fast_assign( | ||
345 | LOCAL_ASSIGN; | ||
346 | __entry->ret = ret; | ||
347 | ), | ||
348 | |||
349 | TP_printk( | ||
350 | LOCAL_PR_FMT " ret:%d", | ||
351 | LOCAL_PR_ARG, __entry->ret | ||
352 | ) | ||
353 | ); | ||
354 | |||
355 | TRACE_EVENT(drv_sw_scan_start, | ||
356 | TP_PROTO(struct ieee80211_local *local), | ||
357 | |||
358 | TP_ARGS(local), | ||
359 | |||
360 | TP_STRUCT__entry( | ||
361 | LOCAL_ENTRY | ||
362 | ), | ||
363 | |||
364 | TP_fast_assign( | ||
365 | LOCAL_ASSIGN; | ||
366 | ), | ||
367 | |||
368 | TP_printk( | ||
369 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
370 | ) | ||
371 | ); | ||
372 | |||
373 | TRACE_EVENT(drv_sw_scan_complete, | ||
374 | TP_PROTO(struct ieee80211_local *local), | ||
375 | |||
376 | TP_ARGS(local), | ||
377 | |||
378 | TP_STRUCT__entry( | ||
379 | LOCAL_ENTRY | ||
380 | ), | ||
381 | |||
382 | TP_fast_assign( | ||
383 | LOCAL_ASSIGN; | ||
384 | ), | ||
385 | |||
386 | TP_printk( | ||
387 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
388 | ) | ||
389 | ); | ||
390 | |||
391 | TRACE_EVENT(drv_get_stats, | ||
392 | TP_PROTO(struct ieee80211_local *local, | ||
393 | struct ieee80211_low_level_stats *stats, | ||
394 | int ret), | ||
395 | |||
396 | TP_ARGS(local, stats, ret), | ||
397 | |||
398 | TP_STRUCT__entry( | ||
399 | LOCAL_ENTRY | ||
400 | __field(int, ret) | ||
401 | __field(unsigned int, ackfail) | ||
402 | __field(unsigned int, rtsfail) | ||
403 | __field(unsigned int, fcserr) | ||
404 | __field(unsigned int, rtssucc) | ||
405 | ), | ||
406 | |||
407 | TP_fast_assign( | ||
408 | LOCAL_ASSIGN; | ||
409 | __entry->ret = ret; | ||
410 | __entry->ackfail = stats->dot11ACKFailureCount; | ||
411 | __entry->rtsfail = stats->dot11RTSFailureCount; | ||
412 | __entry->fcserr = stats->dot11FCSErrorCount; | ||
413 | __entry->rtssucc = stats->dot11RTSSuccessCount; | ||
414 | ), | ||
415 | |||
416 | TP_printk( | ||
417 | LOCAL_PR_FMT " ret:%d", | ||
418 | LOCAL_PR_ARG, __entry->ret | ||
419 | ) | ||
420 | ); | ||
421 | |||
422 | TRACE_EVENT(drv_get_tkip_seq, | ||
423 | TP_PROTO(struct ieee80211_local *local, | ||
424 | u8 hw_key_idx, u32 *iv32, u16 *iv16), | ||
425 | |||
426 | TP_ARGS(local, hw_key_idx, iv32, iv16), | ||
427 | |||
428 | TP_STRUCT__entry( | ||
429 | LOCAL_ENTRY | ||
430 | __field(u8, hw_key_idx) | ||
431 | __field(u32, iv32) | ||
432 | __field(u16, iv16) | ||
433 | ), | ||
434 | |||
435 | TP_fast_assign( | ||
436 | LOCAL_ASSIGN; | ||
437 | __entry->hw_key_idx = hw_key_idx; | ||
438 | __entry->iv32 = *iv32; | ||
439 | __entry->iv16 = *iv16; | ||
440 | ), | ||
441 | |||
442 | TP_printk( | ||
443 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
444 | ) | ||
445 | ); | ||
446 | |||
447 | TRACE_EVENT(drv_set_rts_threshold, | ||
448 | TP_PROTO(struct ieee80211_local *local, u32 value, int ret), | ||
449 | |||
450 | TP_ARGS(local, value, ret), | ||
451 | |||
452 | TP_STRUCT__entry( | ||
453 | LOCAL_ENTRY | ||
454 | __field(u32, value) | ||
455 | __field(int, ret) | ||
456 | ), | ||
457 | |||
458 | TP_fast_assign( | ||
459 | LOCAL_ASSIGN; | ||
460 | __entry->ret = ret; | ||
461 | __entry->value = value; | ||
462 | ), | ||
463 | |||
464 | TP_printk( | ||
465 | LOCAL_PR_FMT " value:%d ret:%d", | ||
466 | LOCAL_PR_ARG, __entry->value, __entry->ret | ||
467 | ) | ||
468 | ); | ||
469 | |||
470 | TRACE_EVENT(drv_sta_notify, | ||
471 | TP_PROTO(struct ieee80211_local *local, | ||
472 | struct ieee80211_vif *vif, | ||
473 | enum sta_notify_cmd cmd, | ||
474 | struct ieee80211_sta *sta), | ||
475 | |||
476 | TP_ARGS(local, vif, cmd, sta), | ||
477 | |||
478 | TP_STRUCT__entry( | ||
479 | LOCAL_ENTRY | ||
480 | VIF_ENTRY | ||
481 | STA_ENTRY | ||
482 | __field(u32, cmd) | ||
483 | ), | ||
484 | |||
485 | TP_fast_assign( | ||
486 | LOCAL_ASSIGN; | ||
487 | VIF_ASSIGN; | ||
488 | STA_ASSIGN; | ||
489 | __entry->cmd = cmd; | ||
490 | ), | ||
491 | |||
492 | TP_printk( | ||
493 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " cmd:%d", | ||
494 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->cmd | ||
495 | ) | ||
496 | ); | ||
497 | |||
498 | TRACE_EVENT(drv_conf_tx, | ||
499 | TP_PROTO(struct ieee80211_local *local, u16 queue, | ||
500 | const struct ieee80211_tx_queue_params *params, | ||
501 | int ret), | ||
502 | |||
503 | TP_ARGS(local, queue, params, ret), | ||
504 | |||
505 | TP_STRUCT__entry( | ||
506 | LOCAL_ENTRY | ||
507 | __field(u16, queue) | ||
508 | __field(u16, txop) | ||
509 | __field(u16, cw_min) | ||
510 | __field(u16, cw_max) | ||
511 | __field(u8, aifs) | ||
512 | __field(int, ret) | ||
513 | ), | ||
514 | |||
515 | TP_fast_assign( | ||
516 | LOCAL_ASSIGN; | ||
517 | __entry->queue = queue; | ||
518 | __entry->ret = ret; | ||
519 | __entry->txop = params->txop; | ||
520 | __entry->cw_max = params->cw_max; | ||
521 | __entry->cw_min = params->cw_min; | ||
522 | __entry->aifs = params->aifs; | ||
523 | ), | ||
524 | |||
525 | TP_printk( | ||
526 | LOCAL_PR_FMT " queue:%d ret:%d", | ||
527 | LOCAL_PR_ARG, __entry->queue, __entry->ret | ||
528 | ) | ||
529 | ); | ||
530 | |||
531 | TRACE_EVENT(drv_get_tx_stats, | ||
532 | TP_PROTO(struct ieee80211_local *local, | ||
533 | struct ieee80211_tx_queue_stats *stats, | ||
534 | int ret), | ||
535 | |||
536 | TP_ARGS(local, stats, ret), | ||
537 | |||
538 | TP_STRUCT__entry( | ||
539 | LOCAL_ENTRY | ||
540 | __field(int, ret) | ||
541 | ), | ||
542 | |||
543 | TP_fast_assign( | ||
544 | LOCAL_ASSIGN; | ||
545 | __entry->ret = ret; | ||
546 | ), | ||
547 | |||
548 | TP_printk( | ||
549 | LOCAL_PR_FMT " ret:%d", | ||
550 | LOCAL_PR_ARG, __entry->ret | ||
551 | ) | ||
552 | ); | ||
553 | |||
554 | TRACE_EVENT(drv_get_tsf, | ||
555 | TP_PROTO(struct ieee80211_local *local, u64 ret), | ||
556 | |||
557 | TP_ARGS(local, ret), | ||
558 | |||
559 | TP_STRUCT__entry( | ||
560 | LOCAL_ENTRY | ||
561 | __field(u64, ret) | ||
562 | ), | ||
563 | |||
564 | TP_fast_assign( | ||
565 | LOCAL_ASSIGN; | ||
566 | __entry->ret = ret; | ||
567 | ), | ||
568 | |||
569 | TP_printk( | ||
570 | LOCAL_PR_FMT " ret:%llu", | ||
571 | LOCAL_PR_ARG, (unsigned long long)__entry->ret | ||
572 | ) | ||
573 | ); | ||
574 | |||
575 | TRACE_EVENT(drv_set_tsf, | ||
576 | TP_PROTO(struct ieee80211_local *local, u64 tsf), | ||
577 | |||
578 | TP_ARGS(local, tsf), | ||
579 | |||
580 | TP_STRUCT__entry( | ||
581 | LOCAL_ENTRY | ||
582 | __field(u64, tsf) | ||
583 | ), | ||
584 | |||
585 | TP_fast_assign( | ||
586 | LOCAL_ASSIGN; | ||
587 | __entry->tsf = tsf; | ||
588 | ), | ||
589 | |||
590 | TP_printk( | ||
591 | LOCAL_PR_FMT " tsf:%llu", | ||
592 | LOCAL_PR_ARG, (unsigned long long)__entry->tsf | ||
593 | ) | ||
594 | ); | ||
595 | |||
596 | TRACE_EVENT(drv_reset_tsf, | ||
597 | TP_PROTO(struct ieee80211_local *local), | ||
598 | |||
599 | TP_ARGS(local), | ||
600 | |||
601 | TP_STRUCT__entry( | ||
602 | LOCAL_ENTRY | ||
603 | ), | ||
604 | |||
605 | TP_fast_assign( | ||
606 | LOCAL_ASSIGN; | ||
607 | ), | ||
608 | |||
609 | TP_printk( | ||
610 | LOCAL_PR_FMT, LOCAL_PR_ARG | ||
611 | ) | ||
612 | ); | ||
613 | |||
614 | TRACE_EVENT(drv_tx_last_beacon, | ||
615 | TP_PROTO(struct ieee80211_local *local, int ret), | ||
616 | |||
617 | TP_ARGS(local, ret), | ||
618 | |||
619 | TP_STRUCT__entry( | ||
620 | LOCAL_ENTRY | ||
621 | __field(int, ret) | ||
622 | ), | ||
623 | |||
624 | TP_fast_assign( | ||
625 | LOCAL_ASSIGN; | ||
626 | __entry->ret = ret; | ||
627 | ), | ||
628 | |||
629 | TP_printk( | ||
630 | LOCAL_PR_FMT " ret:%d", | ||
631 | LOCAL_PR_ARG, __entry->ret | ||
632 | ) | ||
633 | ); | ||
634 | |||
635 | TRACE_EVENT(drv_ampdu_action, | ||
636 | TP_PROTO(struct ieee80211_local *local, | ||
637 | enum ieee80211_ampdu_mlme_action action, | ||
638 | struct ieee80211_sta *sta, u16 tid, | ||
639 | u16 *ssn, int ret), | ||
640 | |||
641 | TP_ARGS(local, action, sta, tid, ssn, ret), | ||
642 | |||
643 | TP_STRUCT__entry( | ||
644 | LOCAL_ENTRY | ||
645 | STA_ENTRY | ||
646 | __field(u32, action) | ||
647 | __field(u16, tid) | ||
648 | __field(u16, ssn) | ||
649 | __field(int, ret) | ||
650 | ), | ||
651 | |||
652 | TP_fast_assign( | ||
653 | LOCAL_ASSIGN; | ||
654 | STA_ASSIGN; | ||
655 | __entry->ret = ret; | ||
656 | __entry->action = action; | ||
657 | __entry->tid = tid; | ||
658 | __entry->ssn = *ssn; | ||
659 | ), | ||
660 | |||
661 | TP_printk( | ||
662 | LOCAL_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d", | ||
663 | LOCAL_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret | ||
664 | ) | ||
665 | ); | ||
666 | #endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ | ||
667 | |||
668 | #undef TRACE_INCLUDE_PATH | ||
669 | #define TRACE_INCLUDE_PATH . | ||
670 | #undef TRACE_INCLUDE_FILE | ||
671 | #define TRACE_INCLUDE_FILE driver-trace | ||
672 | #include <trace/define_trace.h> | ||
diff --git a/net/mac80211/event.c b/net/mac80211/event.c index f288d01a6344..01ae759518f6 100644 --- a/net/mac80211/event.c +++ b/net/mac80211/event.c | |||
@@ -7,8 +7,7 @@ | |||
7 | * | 7 | * |
8 | * mac80211 - events | 8 | * mac80211 - events |
9 | */ | 9 | */ |
10 | 10 | #include <net/cfg80211.h> | |
11 | #include <net/iw_handler.h> | ||
12 | #include "ieee80211_i.h" | 11 | #include "ieee80211_i.h" |
13 | 12 | ||
14 | /* | 13 | /* |
@@ -17,26 +16,12 @@ | |||
17 | * driver or is still in the frame), it should provide that information. | 16 | * driver or is still in the frame), it should provide that information. |
18 | */ | 17 | */ |
19 | void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, | 18 | void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, |
20 | struct ieee80211_hdr *hdr, const u8 *tsc) | 19 | struct ieee80211_hdr *hdr, const u8 *tsc, |
20 | gfp_t gfp) | ||
21 | { | 21 | { |
22 | union iwreq_data wrqu; | ||
23 | char *buf = kmalloc(128, GFP_ATOMIC); | ||
24 | |||
25 | if (buf) { | ||
26 | /* TODO: needed parameters: count, key type, TSC */ | ||
27 | sprintf(buf, "MLME-MICHAELMICFAILURE.indication(" | ||
28 | "keyid=%d %scast addr=%pM)", | ||
29 | keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni", | ||
30 | hdr->addr2); | ||
31 | memset(&wrqu, 0, sizeof(wrqu)); | ||
32 | wrqu.data.length = strlen(buf); | ||
33 | wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf); | ||
34 | kfree(buf); | ||
35 | } | ||
36 | |||
37 | cfg80211_michael_mic_failure(sdata->dev, hdr->addr2, | 22 | cfg80211_michael_mic_failure(sdata->dev, hdr->addr2, |
38 | (hdr->addr1[0] & 0x01) ? | 23 | (hdr->addr1[0] & 0x01) ? |
39 | NL80211_KEYTYPE_GROUP : | 24 | NL80211_KEYTYPE_GROUP : |
40 | NL80211_KEYTYPE_PAIRWISE, | 25 | NL80211_KEYTYPE_PAIRWISE, |
41 | keyidx, tsc); | 26 | keyidx, tsc, gfp); |
42 | } | 27 | } |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 0b30277eb366..920ec8792f4b 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -57,7 +57,7 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, | |||
57 | */ | 57 | */ |
58 | if (auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1) | 58 | if (auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1) |
59 | ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0, | 59 | ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0, |
60 | sdata->u.ibss.bssid, 0); | 60 | sdata->u.ibss.bssid, NULL, 0, 0); |
61 | } | 61 | } |
62 | 62 | ||
63 | static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | 63 | static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, |
@@ -494,7 +494,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) | |||
494 | 494 | ||
495 | capability = WLAN_CAPABILITY_IBSS; | 495 | capability = WLAN_CAPABILITY_IBSS; |
496 | 496 | ||
497 | if (sdata->default_key) | 497 | if (ifibss->privacy) |
498 | capability |= WLAN_CAPABILITY_PRIVACY; | 498 | capability |= WLAN_CAPABILITY_PRIVACY; |
499 | else | 499 | else |
500 | sdata->drop_unencrypted = 0; | 500 | sdata->drop_unencrypted = 0; |
@@ -524,9 +524,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) | |||
524 | return; | 524 | return; |
525 | 525 | ||
526 | capability = WLAN_CAPABILITY_IBSS; | 526 | capability = WLAN_CAPABILITY_IBSS; |
527 | if (sdata->default_key) | 527 | if (ifibss->privacy) |
528 | capability |= WLAN_CAPABILITY_PRIVACY; | 528 | capability |= WLAN_CAPABILITY_PRIVACY; |
529 | |||
530 | if (ifibss->fixed_bssid) | 529 | if (ifibss->fixed_bssid) |
531 | bssid = ifibss->bssid; | 530 | bssid = ifibss->bssid; |
532 | if (ifibss->fixed_channel) | 531 | if (ifibss->fixed_channel) |
@@ -705,7 +704,7 @@ static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
705 | struct ieee80211_mgmt *mgmt; | 704 | struct ieee80211_mgmt *mgmt; |
706 | u16 fc; | 705 | u16 fc; |
707 | 706 | ||
708 | rx_status = (struct ieee80211_rx_status *) skb->cb; | 707 | rx_status = IEEE80211_SKB_RXCB(skb); |
709 | mgmt = (struct ieee80211_mgmt *) skb->data; | 708 | mgmt = (struct ieee80211_mgmt *) skb->data; |
710 | fc = le16_to_cpu(mgmt->frame_control); | 709 | fc = le16_to_cpu(mgmt->frame_control); |
711 | 710 | ||
@@ -743,7 +742,7 @@ static void ieee80211_ibss_work(struct work_struct *work) | |||
743 | if (!netif_running(sdata->dev)) | 742 | if (!netif_running(sdata->dev)) |
744 | return; | 743 | return; |
745 | 744 | ||
746 | if (local->sw_scanning || local->hw_scanning) | 745 | if (local->scanning) |
747 | return; | 746 | return; |
748 | 747 | ||
749 | if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC)) | 748 | if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC)) |
@@ -782,7 +781,7 @@ static void ieee80211_ibss_timer(unsigned long data) | |||
782 | } | 781 | } |
783 | 782 | ||
784 | set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); | 783 | set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); |
785 | queue_work(local->hw.workqueue, &ifibss->work); | 784 | ieee80211_queue_work(&local->hw, &ifibss->work); |
786 | } | 785 | } |
787 | 786 | ||
788 | #ifdef CONFIG_PM | 787 | #ifdef CONFIG_PM |
@@ -836,8 +835,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) | |||
836 | } | 835 | } |
837 | 836 | ||
838 | ieee80211_rx_result | 837 | ieee80211_rx_result |
839 | ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | 838 | ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) |
840 | struct ieee80211_rx_status *rx_status) | ||
841 | { | 839 | { |
842 | struct ieee80211_local *local = sdata->local; | 840 | struct ieee80211_local *local = sdata->local; |
843 | struct ieee80211_mgmt *mgmt; | 841 | struct ieee80211_mgmt *mgmt; |
@@ -852,11 +850,10 @@ ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | |||
852 | switch (fc & IEEE80211_FCTL_STYPE) { | 850 | switch (fc & IEEE80211_FCTL_STYPE) { |
853 | case IEEE80211_STYPE_PROBE_RESP: | 851 | case IEEE80211_STYPE_PROBE_RESP: |
854 | case IEEE80211_STYPE_BEACON: | 852 | case IEEE80211_STYPE_BEACON: |
855 | memcpy(skb->cb, rx_status, sizeof(*rx_status)); | ||
856 | case IEEE80211_STYPE_PROBE_REQ: | 853 | case IEEE80211_STYPE_PROBE_REQ: |
857 | case IEEE80211_STYPE_AUTH: | 854 | case IEEE80211_STYPE_AUTH: |
858 | skb_queue_tail(&sdata->u.ibss.skb_queue, skb); | 855 | skb_queue_tail(&sdata->u.ibss.skb_queue, skb); |
859 | queue_work(local->hw.workqueue, &sdata->u.ibss.work); | 856 | ieee80211_queue_work(&local->hw, &sdata->u.ibss.work); |
860 | return RX_QUEUED; | 857 | return RX_QUEUED; |
861 | } | 858 | } |
862 | 859 | ||
@@ -874,6 +871,8 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
874 | } else | 871 | } else |
875 | sdata->u.ibss.fixed_bssid = false; | 872 | sdata->u.ibss.fixed_bssid = false; |
876 | 873 | ||
874 | sdata->u.ibss.privacy = params->privacy; | ||
875 | |||
877 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; | 876 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; |
878 | 877 | ||
879 | sdata->u.ibss.channel = params->channel; | 878 | sdata->u.ibss.channel = params->channel; |
@@ -913,7 +912,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
913 | ieee80211_recalc_idle(sdata->local); | 912 | ieee80211_recalc_idle(sdata->local); |
914 | 913 | ||
915 | set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); | 914 | set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); |
916 | queue_work(sdata->local->hw.workqueue, &sdata->u.ibss.work); | 915 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.ibss.work); |
917 | 916 | ||
918 | return 0; | 917 | return 0; |
919 | } | 918 | } |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 68eb5052179a..588005c84a6d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/etherdevice.h> | 25 | #include <linux/etherdevice.h> |
26 | #include <net/cfg80211.h> | 26 | #include <net/cfg80211.h> |
27 | #include <net/iw_handler.h> | ||
28 | #include <net/mac80211.h> | 27 | #include <net/mac80211.h> |
29 | #include "key.h" | 28 | #include "key.h" |
30 | #include "sta_info.h" | 29 | #include "sta_info.h" |
@@ -213,7 +212,9 @@ struct ieee80211_if_vlan { | |||
213 | }; | 212 | }; |
214 | 213 | ||
215 | struct mesh_stats { | 214 | struct mesh_stats { |
216 | __u32 fwded_frames; /* Mesh forwarded frames */ | 215 | __u32 fwded_mcast; /* Mesh forwarded multicast frames */ |
216 | __u32 fwded_unicast; /* Mesh forwarded unicast frames */ | ||
217 | __u32 fwded_frames; /* Mesh total forwarded frames */ | ||
217 | __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ | 218 | __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ |
218 | __u32 dropped_frames_no_route; /* Not transmitted, no route found */ | 219 | __u32 dropped_frames_no_route; /* Not transmitted, no route found */ |
219 | atomic_t estab_plinks; | 220 | atomic_t estab_plinks; |
@@ -227,86 +228,81 @@ struct mesh_preq_queue { | |||
227 | u8 flags; | 228 | u8 flags; |
228 | }; | 229 | }; |
229 | 230 | ||
231 | enum ieee80211_mgd_state { | ||
232 | IEEE80211_MGD_STATE_IDLE, | ||
233 | IEEE80211_MGD_STATE_PROBE, | ||
234 | IEEE80211_MGD_STATE_AUTH, | ||
235 | IEEE80211_MGD_STATE_ASSOC, | ||
236 | }; | ||
237 | |||
238 | struct ieee80211_mgd_work { | ||
239 | struct list_head list; | ||
240 | struct ieee80211_bss *bss; | ||
241 | int ie_len; | ||
242 | u8 prev_bssid[ETH_ALEN]; | ||
243 | u8 ssid[IEEE80211_MAX_SSID_LEN]; | ||
244 | u8 ssid_len; | ||
245 | unsigned long timeout; | ||
246 | enum ieee80211_mgd_state state; | ||
247 | u16 auth_alg, auth_transaction; | ||
248 | |||
249 | int tries; | ||
250 | |||
251 | u8 key[WLAN_KEY_LEN_WEP104]; | ||
252 | u8 key_len, key_idx; | ||
253 | |||
254 | /* must be last */ | ||
255 | u8 ie[0]; /* for auth or assoc frame, not probe */ | ||
256 | }; | ||
257 | |||
230 | /* flags used in struct ieee80211_if_managed.flags */ | 258 | /* flags used in struct ieee80211_if_managed.flags */ |
231 | #define IEEE80211_STA_SSID_SET BIT(0) | 259 | enum ieee80211_sta_flags { |
232 | #define IEEE80211_STA_BSSID_SET BIT(1) | 260 | IEEE80211_STA_BEACON_POLL = BIT(0), |
233 | #define IEEE80211_STA_PREV_BSSID_SET BIT(2) | 261 | IEEE80211_STA_CONNECTION_POLL = BIT(1), |
234 | #define IEEE80211_STA_AUTHENTICATED BIT(3) | 262 | IEEE80211_STA_CONTROL_PORT = BIT(2), |
235 | #define IEEE80211_STA_ASSOCIATED BIT(4) | 263 | IEEE80211_STA_WMM_ENABLED = BIT(3), |
236 | #define IEEE80211_STA_PROBEREQ_POLL BIT(5) | 264 | IEEE80211_STA_DISABLE_11N = BIT(4), |
237 | #define IEEE80211_STA_CREATE_IBSS BIT(6) | 265 | IEEE80211_STA_CSA_RECEIVED = BIT(5), |
238 | #define IEEE80211_STA_CONTROL_PORT BIT(7) | 266 | IEEE80211_STA_MFP_ENABLED = BIT(6), |
239 | #define IEEE80211_STA_WMM_ENABLED BIT(8) | 267 | }; |
240 | /* hole at 9, please re-use */ | ||
241 | #define IEEE80211_STA_AUTO_SSID_SEL BIT(10) | ||
242 | #define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) | ||
243 | #define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) | ||
244 | #define IEEE80211_STA_PRIVACY_INVOKED BIT(13) | ||
245 | #define IEEE80211_STA_TKIP_WEP_USED BIT(14) | ||
246 | #define IEEE80211_STA_CSA_RECEIVED BIT(15) | ||
247 | #define IEEE80211_STA_MFP_ENABLED BIT(16) | ||
248 | #define IEEE80211_STA_EXT_SME BIT(17) | ||
249 | /* flags for MLME request */ | ||
250 | #define IEEE80211_STA_REQ_SCAN 0 | ||
251 | #define IEEE80211_STA_REQ_AUTH 1 | ||
252 | #define IEEE80211_STA_REQ_RUN 2 | ||
253 | 268 | ||
254 | /* bitfield of allowed auth algs */ | 269 | /* flags for MLME request */ |
255 | #define IEEE80211_AUTH_ALG_OPEN BIT(0) | 270 | enum ieee80211_sta_request { |
256 | #define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1) | 271 | IEEE80211_STA_REQ_SCAN, |
257 | #define IEEE80211_AUTH_ALG_LEAP BIT(2) | 272 | }; |
258 | #define IEEE80211_AUTH_ALG_FT BIT(3) | ||
259 | 273 | ||
260 | struct ieee80211_if_managed { | 274 | struct ieee80211_if_managed { |
261 | struct timer_list timer; | 275 | struct timer_list timer; |
276 | struct timer_list conn_mon_timer; | ||
277 | struct timer_list bcn_mon_timer; | ||
262 | struct timer_list chswitch_timer; | 278 | struct timer_list chswitch_timer; |
263 | struct work_struct work; | 279 | struct work_struct work; |
280 | struct work_struct monitor_work; | ||
264 | struct work_struct chswitch_work; | 281 | struct work_struct chswitch_work; |
265 | struct work_struct beacon_loss_work; | 282 | struct work_struct beacon_loss_work; |
266 | 283 | ||
267 | u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; | 284 | unsigned long probe_timeout; |
285 | int probe_send_count; | ||
268 | 286 | ||
269 | u8 ssid[IEEE80211_MAX_SSID_LEN]; | 287 | struct mutex mtx; |
270 | size_t ssid_len; | 288 | struct ieee80211_bss *associated; |
289 | struct ieee80211_mgd_work *old_associate_work; | ||
290 | struct list_head work_list; | ||
271 | 291 | ||
272 | enum { | 292 | u8 bssid[ETH_ALEN]; |
273 | IEEE80211_STA_MLME_DISABLED, | ||
274 | IEEE80211_STA_MLME_DIRECT_PROBE, | ||
275 | IEEE80211_STA_MLME_AUTHENTICATE, | ||
276 | IEEE80211_STA_MLME_ASSOCIATE, | ||
277 | IEEE80211_STA_MLME_ASSOCIATED, | ||
278 | } state; | ||
279 | 293 | ||
280 | u16 aid; | 294 | u16 aid; |
281 | u16 ap_capab, capab; | 295 | u16 capab; |
282 | u8 *extra_ie; /* to be added to the end of AssocReq */ | ||
283 | size_t extra_ie_len; | ||
284 | |||
285 | /* The last AssocReq/Resp IEs */ | ||
286 | u8 *assocreq_ies, *assocresp_ies; | ||
287 | size_t assocreq_ies_len, assocresp_ies_len; | ||
288 | 296 | ||
289 | struct sk_buff_head skb_queue; | 297 | struct sk_buff_head skb_queue; |
290 | 298 | ||
291 | int assoc_scan_tries; /* number of scans done pre-association */ | ||
292 | int direct_probe_tries; /* retries for direct probes */ | ||
293 | int auth_tries; /* retries for auth req */ | ||
294 | int assoc_tries; /* retries for assoc req */ | ||
295 | |||
296 | unsigned long timers_running; /* used for quiesce/restart */ | 299 | unsigned long timers_running; /* used for quiesce/restart */ |
297 | bool powersave; /* powersave requested for this iface */ | 300 | bool powersave; /* powersave requested for this iface */ |
298 | 301 | ||
299 | unsigned long request; | 302 | unsigned long request; |
300 | 303 | ||
301 | unsigned long last_probe; | ||
302 | unsigned long last_beacon; | ||
303 | |||
304 | unsigned int flags; | 304 | unsigned int flags; |
305 | 305 | ||
306 | unsigned int auth_algs; /* bitfield of allowed auth algs */ | ||
307 | int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ | ||
308 | int auth_transaction; | ||
309 | |||
310 | u32 beacon_crc; | 306 | u32 beacon_crc; |
311 | 307 | ||
312 | enum { | 308 | enum { |
@@ -316,10 +312,6 @@ struct ieee80211_if_managed { | |||
316 | } mfp; /* management frame protection */ | 312 | } mfp; /* management frame protection */ |
317 | 313 | ||
318 | int wmm_last_param_set; | 314 | int wmm_last_param_set; |
319 | |||
320 | /* Extra IE data for management frames */ | ||
321 | u8 *sme_auth_ie; | ||
322 | size_t sme_auth_ie_len; | ||
323 | }; | 315 | }; |
324 | 316 | ||
325 | enum ieee80211_ibss_request { | 317 | enum ieee80211_ibss_request { |
@@ -339,6 +331,7 @@ struct ieee80211_if_ibss { | |||
339 | 331 | ||
340 | bool fixed_bssid; | 332 | bool fixed_bssid; |
341 | bool fixed_channel; | 333 | bool fixed_channel; |
334 | bool privacy; | ||
342 | 335 | ||
343 | u8 bssid[ETH_ALEN]; | 336 | u8 bssid[ETH_ALEN]; |
344 | u8 ssid[IEEE80211_MAX_SSID_LEN]; | 337 | u8 ssid[IEEE80211_MAX_SSID_LEN]; |
@@ -364,7 +357,7 @@ struct ieee80211_if_mesh { | |||
364 | 357 | ||
365 | unsigned long timers_running; | 358 | unsigned long timers_running; |
366 | 359 | ||
367 | bool housekeeping; | 360 | unsigned long wrkq_flags; |
368 | 361 | ||
369 | u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; | 362 | u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; |
370 | size_t mesh_id_len; | 363 | size_t mesh_id_len; |
@@ -374,6 +367,10 @@ struct ieee80211_if_mesh { | |||
374 | u8 mesh_pm_id[4]; | 367 | u8 mesh_pm_id[4]; |
375 | /* Congestion Control Mode Identifier */ | 368 | /* Congestion Control Mode Identifier */ |
376 | u8 mesh_cc_id[4]; | 369 | u8 mesh_cc_id[4]; |
370 | /* Synchronization Protocol Identifier */ | ||
371 | u8 mesh_sp_id[4]; | ||
372 | /* Authentication Protocol Identifier */ | ||
373 | u8 mesh_auth_id[4]; | ||
377 | /* Local mesh Destination Sequence Number */ | 374 | /* Local mesh Destination Sequence Number */ |
378 | u32 dsn; | 375 | u32 dsn; |
379 | /* Last used PREQ ID */ | 376 | /* Last used PREQ ID */ |
@@ -478,20 +475,9 @@ struct ieee80211_sub_if_data { | |||
478 | union { | 475 | union { |
479 | struct { | 476 | struct { |
480 | struct dentry *drop_unencrypted; | 477 | struct dentry *drop_unencrypted; |
481 | struct dentry *state; | ||
482 | struct dentry *bssid; | 478 | struct dentry *bssid; |
483 | struct dentry *prev_bssid; | ||
484 | struct dentry *ssid_len; | ||
485 | struct dentry *aid; | 479 | struct dentry *aid; |
486 | struct dentry *ap_capab; | ||
487 | struct dentry *capab; | 480 | struct dentry *capab; |
488 | struct dentry *extra_ie_len; | ||
489 | struct dentry *auth_tries; | ||
490 | struct dentry *assoc_tries; | ||
491 | struct dentry *auth_algs; | ||
492 | struct dentry *auth_alg; | ||
493 | struct dentry *auth_transaction; | ||
494 | struct dentry *flags; | ||
495 | struct dentry *force_unicast_rateidx; | 481 | struct dentry *force_unicast_rateidx; |
496 | struct dentry *max_ratectrl_rateidx; | 482 | struct dentry *max_ratectrl_rateidx; |
497 | } sta; | 483 | } sta; |
@@ -526,6 +512,8 @@ struct ieee80211_sub_if_data { | |||
526 | #ifdef CONFIG_MAC80211_MESH | 512 | #ifdef CONFIG_MAC80211_MESH |
527 | struct dentry *mesh_stats_dir; | 513 | struct dentry *mesh_stats_dir; |
528 | struct { | 514 | struct { |
515 | struct dentry *fwded_mcast; | ||
516 | struct dentry *fwded_unicast; | ||
529 | struct dentry *fwded_frames; | 517 | struct dentry *fwded_frames; |
530 | struct dentry *dropped_frames_ttl; | 518 | struct dentry *dropped_frames_ttl; |
531 | struct dentry *dropped_frames_no_route; | 519 | struct dentry *dropped_frames_no_route; |
@@ -588,12 +576,44 @@ enum queue_stop_reason { | |||
588 | IEEE80211_QUEUE_STOP_REASON_CSA, | 576 | IEEE80211_QUEUE_STOP_REASON_CSA, |
589 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION, | 577 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION, |
590 | IEEE80211_QUEUE_STOP_REASON_SUSPEND, | 578 | IEEE80211_QUEUE_STOP_REASON_SUSPEND, |
591 | IEEE80211_QUEUE_STOP_REASON_PENDING, | ||
592 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD, | 579 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD, |
593 | }; | 580 | }; |
594 | 581 | ||
595 | struct ieee80211_master_priv { | 582 | /** |
596 | struct ieee80211_local *local; | 583 | * mac80211 scan flags - currently active scan mode |
584 | * | ||
585 | * @SCAN_SW_SCANNING: We're currently in the process of scanning but may as | ||
586 | * well be on the operating channel | ||
587 | * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to | ||
588 | * determine if we are on the operating channel or not | ||
589 | * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning, | ||
590 | * gets only set in conjunction with SCAN_SW_SCANNING | ||
591 | */ | ||
592 | enum { | ||
593 | SCAN_SW_SCANNING, | ||
594 | SCAN_HW_SCANNING, | ||
595 | SCAN_OFF_CHANNEL, | ||
596 | }; | ||
597 | |||
598 | /** | ||
599 | * enum mac80211_scan_state - scan state machine states | ||
600 | * | ||
601 | * @SCAN_DECISION: Main entry point to the scan state machine, this state | ||
602 | * determines if we should keep on scanning or switch back to the | ||
603 | * operating channel | ||
604 | * @SCAN_SET_CHANNEL: Set the next channel to be scanned | ||
605 | * @SCAN_SEND_PROBE: Send probe requests and wait for probe responses | ||
606 | * @SCAN_LEAVE_OPER_CHANNEL: Leave the operating channel, notify the AP | ||
607 | * about us leaving the channel and stop all associated STA interfaces | ||
608 | * @SCAN_ENTER_OPER_CHANNEL: Enter the operating channel again, notify the | ||
609 | * AP about us being back and restart all associated STA interfaces | ||
610 | */ | ||
611 | enum mac80211_scan_state { | ||
612 | SCAN_DECISION, | ||
613 | SCAN_SET_CHANNEL, | ||
614 | SCAN_SEND_PROBE, | ||
615 | SCAN_LEAVE_OPER_CHANNEL, | ||
616 | SCAN_ENTER_OPER_CHANNEL, | ||
597 | }; | 617 | }; |
598 | 618 | ||
599 | struct ieee80211_local { | 619 | struct ieee80211_local { |
@@ -604,17 +624,33 @@ struct ieee80211_local { | |||
604 | 624 | ||
605 | const struct ieee80211_ops *ops; | 625 | const struct ieee80211_ops *ops; |
606 | 626 | ||
627 | /* | ||
628 | * private workqueue to mac80211. mac80211 makes this accessible | ||
629 | * via ieee80211_queue_work() | ||
630 | */ | ||
631 | struct workqueue_struct *workqueue; | ||
632 | |||
607 | unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; | 633 | unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; |
608 | /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ | 634 | /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ |
609 | spinlock_t queue_stop_reason_lock; | 635 | spinlock_t queue_stop_reason_lock; |
610 | 636 | ||
611 | struct net_device *mdev; /* wmaster# - "master" 802.11 device */ | ||
612 | int open_count; | 637 | int open_count; |
613 | int monitors, cooked_mntrs; | 638 | int monitors, cooked_mntrs; |
614 | /* number of interfaces with corresponding FIF_ flags */ | 639 | /* number of interfaces with corresponding FIF_ flags */ |
615 | int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss; | 640 | int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll; |
616 | unsigned int filter_flags; /* FIF_* */ | 641 | unsigned int filter_flags; /* FIF_* */ |
617 | struct iw_statistics wstats; | 642 | struct iw_statistics wstats; |
643 | |||
644 | /* protects the aggregated multicast list and filter calls */ | ||
645 | spinlock_t filter_lock; | ||
646 | |||
647 | /* used for uploading changed mc list */ | ||
648 | struct work_struct reconfig_filter; | ||
649 | |||
650 | /* aggregated multicast list */ | ||
651 | struct dev_addr_list *mc_list; | ||
652 | int mc_count; | ||
653 | |||
618 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ | 654 | bool tim_in_locked_section; /* see ieee80211_beacon_get() */ |
619 | 655 | ||
620 | /* | 656 | /* |
@@ -631,6 +667,9 @@ struct ieee80211_local { | |||
631 | */ | 667 | */ |
632 | bool quiescing; | 668 | bool quiescing; |
633 | 669 | ||
670 | /* device is started */ | ||
671 | bool started; | ||
672 | |||
634 | int tx_headroom; /* required headroom for hardware/radiotap */ | 673 | int tx_headroom; /* required headroom for hardware/radiotap */ |
635 | 674 | ||
636 | /* Tasklet and skb queue to process calls from IRQ mode. All frames | 675 | /* Tasklet and skb queue to process calls from IRQ mode. All frames |
@@ -653,6 +692,7 @@ struct ieee80211_local { | |||
653 | struct list_head sta_list; | 692 | struct list_head sta_list; |
654 | struct sta_info *sta_hash[STA_HASH_SIZE]; | 693 | struct sta_info *sta_hash[STA_HASH_SIZE]; |
655 | struct timer_list sta_cleanup; | 694 | struct timer_list sta_cleanup; |
695 | int sta_generation; | ||
656 | 696 | ||
657 | struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; | 697 | struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; |
658 | struct tasklet_struct tx_pending_tasklet; | 698 | struct tasklet_struct tx_pending_tasklet; |
@@ -687,9 +727,9 @@ struct ieee80211_local { | |||
687 | 727 | ||
688 | /* Scanning and BSS list */ | 728 | /* Scanning and BSS list */ |
689 | struct mutex scan_mtx; | 729 | struct mutex scan_mtx; |
690 | bool sw_scanning, hw_scanning; | 730 | unsigned long scanning; |
691 | struct cfg80211_ssid scan_ssid; | 731 | struct cfg80211_ssid scan_ssid; |
692 | struct cfg80211_scan_request int_scan_req; | 732 | struct cfg80211_scan_request *int_scan_req; |
693 | struct cfg80211_scan_request *scan_req; | 733 | struct cfg80211_scan_request *scan_req; |
694 | struct ieee80211_channel *scan_channel; | 734 | struct ieee80211_channel *scan_channel; |
695 | const u8 *orig_ies; | 735 | const u8 *orig_ies; |
@@ -697,7 +737,7 @@ struct ieee80211_local { | |||
697 | int scan_channel_idx; | 737 | int scan_channel_idx; |
698 | int scan_ies_len; | 738 | int scan_ies_len; |
699 | 739 | ||
700 | enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; | 740 | enum mac80211_scan_state next_scan_state; |
701 | struct delayed_work scan_work; | 741 | struct delayed_work scan_work; |
702 | struct ieee80211_sub_if_data *scan_sdata; | 742 | struct ieee80211_sub_if_data *scan_sdata; |
703 | enum nl80211_channel_type oper_channel_type; | 743 | enum nl80211_channel_type oper_channel_type; |
@@ -834,10 +874,6 @@ struct ieee80211_local { | |||
834 | static inline struct ieee80211_sub_if_data * | 874 | static inline struct ieee80211_sub_if_data * |
835 | IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) | 875 | IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) |
836 | { | 876 | { |
837 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
838 | |||
839 | BUG_ON(!local || local->mdev == dev); | ||
840 | |||
841 | return netdev_priv(dev); | 877 | return netdev_priv(dev); |
842 | } | 878 | } |
843 | 879 | ||
@@ -937,21 +973,20 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
937 | void ieee80211_configure_filter(struct ieee80211_local *local); | 973 | void ieee80211_configure_filter(struct ieee80211_local *local); |
938 | u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); | 974 | u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); |
939 | 975 | ||
940 | /* wireless extensions */ | ||
941 | extern const struct iw_handler_def ieee80211_iw_handler_def; | ||
942 | |||
943 | /* STA code */ | 976 | /* STA code */ |
944 | void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); | 977 | void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); |
978 | int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, | ||
979 | struct cfg80211_auth_request *req); | ||
980 | int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | ||
981 | struct cfg80211_assoc_request *req); | ||
982 | int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | ||
983 | struct cfg80211_deauth_request *req, | ||
984 | void *cookie); | ||
985 | int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | ||
986 | struct cfg80211_disassoc_request *req, | ||
987 | void *cookie); | ||
945 | ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, | 988 | ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, |
946 | struct sk_buff *skb, | 989 | struct sk_buff *skb); |
947 | struct ieee80211_rx_status *rx_status); | ||
948 | int ieee80211_sta_commit(struct ieee80211_sub_if_data *sdata); | ||
949 | int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len); | ||
950 | int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len); | ||
951 | int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid); | ||
952 | void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata); | ||
953 | int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason); | ||
954 | int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason); | ||
955 | void ieee80211_send_pspoll(struct ieee80211_local *local, | 990 | void ieee80211_send_pspoll(struct ieee80211_local *local, |
956 | struct ieee80211_sub_if_data *sdata); | 991 | struct ieee80211_sub_if_data *sdata); |
957 | void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); | 992 | void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); |
@@ -967,8 +1002,7 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); | |||
967 | void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); | 1002 | void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); |
968 | void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); | 1003 | void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); |
969 | ieee80211_rx_result | 1004 | ieee80211_rx_result |
970 | ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | 1005 | ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); |
971 | struct ieee80211_rx_status *rx_status); | ||
972 | struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, | 1006 | struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, |
973 | u8 *bssid, u8 *addr, u32 supp_rates); | 1007 | u8 *bssid, u8 *addr, u32 supp_rates); |
974 | int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | 1008 | int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, |
@@ -983,16 +1017,9 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, | |||
983 | const u8 *ssid, u8 ssid_len); | 1017 | const u8 *ssid, u8 ssid_len); |
984 | int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, | 1018 | int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, |
985 | struct cfg80211_scan_request *req); | 1019 | struct cfg80211_scan_request *req); |
986 | int ieee80211_scan_results(struct ieee80211_local *local, | ||
987 | struct iw_request_info *info, | ||
988 | char *buf, size_t len); | ||
989 | void ieee80211_scan_cancel(struct ieee80211_local *local); | 1020 | void ieee80211_scan_cancel(struct ieee80211_local *local); |
990 | ieee80211_rx_result | 1021 | ieee80211_rx_result |
991 | ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, | 1022 | ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); |
992 | struct sk_buff *skb, | ||
993 | struct ieee80211_rx_status *rx_status); | ||
994 | int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, | ||
995 | const char *ie, size_t len); | ||
996 | 1023 | ||
997 | void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); | 1024 | void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); |
998 | struct ieee80211_bss * | 1025 | struct ieee80211_bss * |
@@ -1008,8 +1035,6 @@ ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, | |||
1008 | u8 *ssid, u8 ssid_len); | 1035 | u8 *ssid, u8 ssid_len); |
1009 | void ieee80211_rx_bss_put(struct ieee80211_local *local, | 1036 | void ieee80211_rx_bss_put(struct ieee80211_local *local, |
1010 | struct ieee80211_bss *bss); | 1037 | struct ieee80211_bss *bss); |
1011 | void ieee80211_rx_bss_remove(struct ieee80211_sub_if_data *sdata, u8 *bssid, | ||
1012 | int freq, u8 *ssid, u8 ssid_len); | ||
1013 | 1038 | ||
1014 | /* interface handling */ | 1039 | /* interface handling */ |
1015 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, | 1040 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
@@ -1025,9 +1050,10 @@ void ieee80211_recalc_idle(struct ieee80211_local *local); | |||
1025 | /* tx handling */ | 1050 | /* tx handling */ |
1026 | void ieee80211_clear_tx_pending(struct ieee80211_local *local); | 1051 | void ieee80211_clear_tx_pending(struct ieee80211_local *local); |
1027 | void ieee80211_tx_pending(unsigned long data); | 1052 | void ieee80211_tx_pending(unsigned long data); |
1028 | int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev); | 1053 | netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, |
1029 | int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); | 1054 | struct net_device *dev); |
1030 | int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); | 1055 | netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, |
1056 | struct net_device *dev); | ||
1031 | 1057 | ||
1032 | /* HT */ | 1058 | /* HT */ |
1033 | void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, | 1059 | void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, |
@@ -1065,6 +1091,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | |||
1065 | 1091 | ||
1066 | /* Suspend/resume and hw reconfiguration */ | 1092 | /* Suspend/resume and hw reconfiguration */ |
1067 | int ieee80211_reconfig(struct ieee80211_local *local); | 1093 | int ieee80211_reconfig(struct ieee80211_local *local); |
1094 | void ieee80211_stop_device(struct ieee80211_local *local); | ||
1068 | 1095 | ||
1069 | #ifdef CONFIG_PM | 1096 | #ifdef CONFIG_PM |
1070 | int __ieee80211_suspend(struct ieee80211_hw *hw); | 1097 | int __ieee80211_suspend(struct ieee80211_hw *hw); |
@@ -1092,7 +1119,8 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, | |||
1092 | int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, | 1119 | int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, |
1093 | int rate, int erp, int short_preamble); | 1120 | int rate, int erp, int short_preamble); |
1094 | void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, | 1121 | void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, |
1095 | struct ieee80211_hdr *hdr, const u8 *tsc); | 1122 | struct ieee80211_hdr *hdr, const u8 *tsc, |
1123 | gfp_t gfp); | ||
1096 | void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); | 1124 | void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata); |
1097 | void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | 1125 | void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, |
1098 | int encrypt); | 1126 | int encrypt); |
@@ -1129,8 +1157,8 @@ int ieee80211_add_pending_skbs(struct ieee80211_local *local, | |||
1129 | 1157 | ||
1130 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | 1158 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, |
1131 | u16 transaction, u16 auth_alg, | 1159 | u16 transaction, u16 auth_alg, |
1132 | u8 *extra, size_t extra_len, | 1160 | u8 *extra, size_t extra_len, const u8 *bssid, |
1133 | const u8 *bssid, int encrypt); | 1161 | const u8 *key, u8 key_len, u8 key_idx); |
1134 | int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, | 1162 | int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, |
1135 | const u8 *ie, size_t ie_len); | 1163 | const u8 *ie, size_t ie_len); |
1136 | void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, | 1164 | void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index b7c8a4484298..b8295cbd7e8f 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -190,10 +190,6 @@ static int ieee80211_open(struct net_device *dev) | |||
190 | ETH_ALEN); | 190 | ETH_ALEN); |
191 | } | 191 | } |
192 | 192 | ||
193 | if (compare_ether_addr(null_addr, local->mdev->dev_addr) == 0) | ||
194 | memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, | ||
195 | ETH_ALEN); | ||
196 | |||
197 | /* | 193 | /* |
198 | * Validate the MAC address for this device. | 194 | * Validate the MAC address for this device. |
199 | */ | 195 | */ |
@@ -224,18 +220,15 @@ static int ieee80211_open(struct net_device *dev) | |||
224 | local->fif_fcsfail++; | 220 | local->fif_fcsfail++; |
225 | if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) | 221 | if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) |
226 | local->fif_plcpfail++; | 222 | local->fif_plcpfail++; |
227 | if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) | 223 | if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) { |
228 | local->fif_control++; | 224 | local->fif_control++; |
225 | local->fif_pspoll++; | ||
226 | } | ||
229 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) | 227 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) |
230 | local->fif_other_bss++; | 228 | local->fif_other_bss++; |
231 | 229 | ||
232 | netif_addr_lock_bh(local->mdev); | ||
233 | ieee80211_configure_filter(local); | 230 | ieee80211_configure_filter(local); |
234 | netif_addr_unlock_bh(local->mdev); | ||
235 | break; | 231 | break; |
236 | case NL80211_IFTYPE_STATION: | ||
237 | sdata->u.mgd.flags &= ~IEEE80211_STA_PREV_BSSID_SET; | ||
238 | /* fall through */ | ||
239 | default: | 232 | default: |
240 | conf.vif = &sdata->vif; | 233 | conf.vif = &sdata->vif; |
241 | conf.type = sdata->vif.type; | 234 | conf.type = sdata->vif.type; |
@@ -246,12 +239,15 @@ static int ieee80211_open(struct net_device *dev) | |||
246 | 239 | ||
247 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | 240 | if (ieee80211_vif_is_mesh(&sdata->vif)) { |
248 | local->fif_other_bss++; | 241 | local->fif_other_bss++; |
249 | netif_addr_lock_bh(local->mdev); | ||
250 | ieee80211_configure_filter(local); | 242 | ieee80211_configure_filter(local); |
251 | netif_addr_unlock_bh(local->mdev); | ||
252 | 243 | ||
253 | ieee80211_start_mesh(sdata); | 244 | ieee80211_start_mesh(sdata); |
245 | } else if (sdata->vif.type == NL80211_IFTYPE_AP) { | ||
246 | local->fif_pspoll++; | ||
247 | |||
248 | ieee80211_configure_filter(local); | ||
254 | } | 249 | } |
250 | |||
255 | changed |= ieee80211_reset_erp_info(sdata); | 251 | changed |= ieee80211_reset_erp_info(sdata); |
256 | ieee80211_bss_info_change_notify(sdata, changed); | 252 | ieee80211_bss_info_change_notify(sdata, changed); |
257 | ieee80211_enable_keys(sdata); | 253 | ieee80211_enable_keys(sdata); |
@@ -281,15 +277,6 @@ static int ieee80211_open(struct net_device *dev) | |||
281 | } | 277 | } |
282 | } | 278 | } |
283 | 279 | ||
284 | if (local->open_count == 0) { | ||
285 | res = dev_open(local->mdev); | ||
286 | WARN_ON(res); | ||
287 | if (res) | ||
288 | goto err_del_interface; | ||
289 | tasklet_enable(&local->tx_pending_tasklet); | ||
290 | tasklet_enable(&local->tasklet); | ||
291 | } | ||
292 | |||
293 | /* | 280 | /* |
294 | * set_multicast_list will be invoked by the networking core | 281 | * set_multicast_list will be invoked by the networking core |
295 | * which will check whether any increments here were done in | 282 | * which will check whether any increments here were done in |
@@ -323,7 +310,7 @@ static int ieee80211_open(struct net_device *dev) | |||
323 | * to fix this. | 310 | * to fix this. |
324 | */ | 311 | */ |
325 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | 312 | if (sdata->vif.type == NL80211_IFTYPE_STATION) |
326 | queue_work(local->hw.workqueue, &sdata->u.mgd.work); | 313 | ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); |
327 | 314 | ||
328 | netif_tx_start_all_queues(dev); | 315 | netif_tx_start_all_queues(dev); |
329 | 316 | ||
@@ -346,7 +333,10 @@ static int ieee80211_stop(struct net_device *dev) | |||
346 | struct ieee80211_local *local = sdata->local; | 333 | struct ieee80211_local *local = sdata->local; |
347 | struct ieee80211_if_init_conf conf; | 334 | struct ieee80211_if_init_conf conf; |
348 | struct sta_info *sta; | 335 | struct sta_info *sta; |
336 | unsigned long flags; | ||
337 | struct sk_buff *skb, *tmp; | ||
349 | u32 hw_reconf_flags = 0; | 338 | u32 hw_reconf_flags = 0; |
339 | int i; | ||
350 | 340 | ||
351 | /* | 341 | /* |
352 | * Stop TX on this interface first. | 342 | * Stop TX on this interface first. |
@@ -366,18 +356,6 @@ static int ieee80211_stop(struct net_device *dev) | |||
366 | rcu_read_unlock(); | 356 | rcu_read_unlock(); |
367 | 357 | ||
368 | /* | 358 | /* |
369 | * Announce that we are leaving the network, in case we are a | ||
370 | * station interface type. This must be done before removing | ||
371 | * all stations associated with sta_info_flush, otherwise STA | ||
372 | * information will be gone and no announce being done. | ||
373 | */ | ||
374 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
375 | if (sdata->u.mgd.state != IEEE80211_STA_MLME_DISABLED) | ||
376 | ieee80211_sta_deauthenticate(sdata, | ||
377 | WLAN_REASON_DEAUTH_LEAVING); | ||
378 | } | ||
379 | |||
380 | /* | ||
381 | * Remove all stations associated with this interface. | 359 | * Remove all stations associated with this interface. |
382 | * | 360 | * |
383 | * This must be done before calling ops->remove_interface() | 361 | * This must be done before calling ops->remove_interface() |
@@ -408,13 +386,24 @@ static int ieee80211_stop(struct net_device *dev) | |||
408 | if (sdata->flags & IEEE80211_SDATA_PROMISC) | 386 | if (sdata->flags & IEEE80211_SDATA_PROMISC) |
409 | atomic_dec(&local->iff_promiscs); | 387 | atomic_dec(&local->iff_promiscs); |
410 | 388 | ||
411 | dev_mc_unsync(local->mdev, dev); | 389 | if (sdata->vif.type == NL80211_IFTYPE_AP) |
390 | local->fif_pspoll--; | ||
391 | |||
392 | netif_addr_lock_bh(dev); | ||
393 | spin_lock_bh(&local->filter_lock); | ||
394 | __dev_addr_unsync(&local->mc_list, &local->mc_count, | ||
395 | &dev->mc_list, &dev->mc_count); | ||
396 | spin_unlock_bh(&local->filter_lock); | ||
397 | netif_addr_unlock_bh(dev); | ||
398 | |||
399 | ieee80211_configure_filter(local); | ||
400 | |||
412 | del_timer_sync(&local->dynamic_ps_timer); | 401 | del_timer_sync(&local->dynamic_ps_timer); |
413 | cancel_work_sync(&local->dynamic_ps_enable_work); | 402 | cancel_work_sync(&local->dynamic_ps_enable_work); |
414 | 403 | ||
415 | /* APs need special treatment */ | 404 | /* APs need special treatment */ |
416 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | 405 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
417 | struct ieee80211_sub_if_data *vlan, *tmp; | 406 | struct ieee80211_sub_if_data *vlan, *tmpsdata; |
418 | struct beacon_data *old_beacon = sdata->u.ap.beacon; | 407 | struct beacon_data *old_beacon = sdata->u.ap.beacon; |
419 | 408 | ||
420 | /* remove beacon */ | 409 | /* remove beacon */ |
@@ -423,7 +412,7 @@ static int ieee80211_stop(struct net_device *dev) | |||
423 | kfree(old_beacon); | 412 | kfree(old_beacon); |
424 | 413 | ||
425 | /* down all dependent devices, that is VLANs */ | 414 | /* down all dependent devices, that is VLANs */ |
426 | list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans, | 415 | list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, |
427 | u.vlan.list) | 416 | u.vlan.list) |
428 | dev_close(vlan->dev); | 417 | dev_close(vlan->dev); |
429 | WARN_ON(!list_empty(&sdata->u.ap.vlans)); | 418 | WARN_ON(!list_empty(&sdata->u.ap.vlans)); |
@@ -452,29 +441,30 @@ static int ieee80211_stop(struct net_device *dev) | |||
452 | local->fif_fcsfail--; | 441 | local->fif_fcsfail--; |
453 | if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) | 442 | if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL) |
454 | local->fif_plcpfail--; | 443 | local->fif_plcpfail--; |
455 | if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) | 444 | if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) { |
445 | local->fif_pspoll--; | ||
456 | local->fif_control--; | 446 | local->fif_control--; |
447 | } | ||
457 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) | 448 | if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS) |
458 | local->fif_other_bss--; | 449 | local->fif_other_bss--; |
459 | 450 | ||
460 | netif_addr_lock_bh(local->mdev); | ||
461 | ieee80211_configure_filter(local); | 451 | ieee80211_configure_filter(local); |
462 | netif_addr_unlock_bh(local->mdev); | ||
463 | break; | 452 | break; |
464 | case NL80211_IFTYPE_STATION: | 453 | case NL80211_IFTYPE_STATION: |
465 | memset(sdata->u.mgd.bssid, 0, ETH_ALEN); | ||
466 | del_timer_sync(&sdata->u.mgd.chswitch_timer); | 454 | del_timer_sync(&sdata->u.mgd.chswitch_timer); |
467 | del_timer_sync(&sdata->u.mgd.timer); | 455 | del_timer_sync(&sdata->u.mgd.timer); |
456 | del_timer_sync(&sdata->u.mgd.conn_mon_timer); | ||
457 | del_timer_sync(&sdata->u.mgd.bcn_mon_timer); | ||
468 | /* | 458 | /* |
469 | * If the timer fired while we waited for it, it will have | 459 | * If any of the timers fired while we waited for it, it will |
470 | * requeued the work. Now the work will be running again | 460 | * have queued its work. Now the work will be running again |
471 | * but will not rearm the timer again because it checks | 461 | * but will not rearm the timer again because it checks |
472 | * whether the interface is running, which, at this point, | 462 | * whether the interface is running, which, at this point, |
473 | * it no longer is. | 463 | * it no longer is. |
474 | */ | 464 | */ |
475 | cancel_work_sync(&sdata->u.mgd.work); | 465 | cancel_work_sync(&sdata->u.mgd.work); |
476 | cancel_work_sync(&sdata->u.mgd.chswitch_work); | 466 | cancel_work_sync(&sdata->u.mgd.chswitch_work); |
477 | 467 | cancel_work_sync(&sdata->u.mgd.monitor_work); | |
478 | cancel_work_sync(&sdata->u.mgd.beacon_loss_work); | 468 | cancel_work_sync(&sdata->u.mgd.beacon_loss_work); |
479 | 469 | ||
480 | /* | 470 | /* |
@@ -485,12 +475,6 @@ static int ieee80211_stop(struct net_device *dev) | |||
485 | */ | 475 | */ |
486 | synchronize_rcu(); | 476 | synchronize_rcu(); |
487 | skb_queue_purge(&sdata->u.mgd.skb_queue); | 477 | skb_queue_purge(&sdata->u.mgd.skb_queue); |
488 | |||
489 | sdata->u.mgd.flags &= ~(IEEE80211_STA_PRIVACY_INVOKED | | ||
490 | IEEE80211_STA_TKIP_WEP_USED); | ||
491 | kfree(sdata->u.mgd.extra_ie); | ||
492 | sdata->u.mgd.extra_ie = NULL; | ||
493 | sdata->u.mgd.extra_ie_len = 0; | ||
494 | /* fall through */ | 478 | /* fall through */ |
495 | case NL80211_IFTYPE_ADHOC: | 479 | case NL80211_IFTYPE_ADHOC: |
496 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { | 480 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
@@ -507,37 +491,23 @@ static int ieee80211_stop(struct net_device *dev) | |||
507 | local->fif_other_bss--; | 491 | local->fif_other_bss--; |
508 | atomic_dec(&local->iff_allmultis); | 492 | atomic_dec(&local->iff_allmultis); |
509 | 493 | ||
510 | netif_addr_lock_bh(local->mdev); | ||
511 | ieee80211_configure_filter(local); | 494 | ieee80211_configure_filter(local); |
512 | netif_addr_unlock_bh(local->mdev); | ||
513 | 495 | ||
514 | ieee80211_stop_mesh(sdata); | 496 | ieee80211_stop_mesh(sdata); |
515 | } | 497 | } |
516 | /* fall through */ | 498 | /* fall through */ |
517 | default: | 499 | default: |
518 | if (local->scan_sdata == sdata) { | 500 | if (local->scan_sdata == sdata) |
519 | if (!local->ops->hw_scan) | 501 | ieee80211_scan_cancel(local); |
520 | cancel_delayed_work_sync(&local->scan_work); | 502 | |
521 | /* | 503 | /* |
522 | * The software scan can no longer run now, so we can | 504 | * Disable beaconing for AP and mesh, IBSS can't |
523 | * clear out the scan_sdata reference. However, the | 505 | * still be joined to a network at this point. |
524 | * hardware scan may still be running. The complete | 506 | */ |
525 | * function must be prepared to handle a NULL value. | 507 | if (sdata->vif.type == NL80211_IFTYPE_AP || |
526 | */ | 508 | sdata->vif.type == NL80211_IFTYPE_MESH_POINT) { |
527 | local->scan_sdata = NULL; | 509 | ieee80211_bss_info_change_notify(sdata, |
528 | /* | 510 | BSS_CHANGED_BEACON_ENABLED); |
529 | * The memory barrier guarantees that another CPU | ||
530 | * that is hardware-scanning will now see the fact | ||
531 | * that this interface is gone. | ||
532 | */ | ||
533 | smp_mb(); | ||
534 | /* | ||
535 | * If software scanning, complete the scan but since | ||
536 | * the scan_sdata is NULL already don't send out a | ||
537 | * scan event to userspace -- the scan is incomplete. | ||
538 | */ | ||
539 | if (local->sw_scanning) | ||
540 | ieee80211_scan_completed(&local->hw, true); | ||
541 | } | 511 | } |
542 | 512 | ||
543 | conf.vif = &sdata->vif; | 513 | conf.vif = &sdata->vif; |
@@ -555,17 +525,8 @@ static int ieee80211_stop(struct net_device *dev) | |||
555 | ieee80211_recalc_ps(local, -1); | 525 | ieee80211_recalc_ps(local, -1); |
556 | 526 | ||
557 | if (local->open_count == 0) { | 527 | if (local->open_count == 0) { |
558 | if (netif_running(local->mdev)) | 528 | ieee80211_clear_tx_pending(local); |
559 | dev_close(local->mdev); | 529 | ieee80211_stop_device(local); |
560 | |||
561 | drv_stop(local); | ||
562 | |||
563 | ieee80211_led_radio(local, false); | ||
564 | |||
565 | flush_workqueue(local->hw.workqueue); | ||
566 | |||
567 | tasklet_disable(&local->tx_pending_tasklet); | ||
568 | tasklet_disable(&local->tasklet); | ||
569 | 530 | ||
570 | /* no reconfiguring after stop! */ | 531 | /* no reconfiguring after stop! */ |
571 | hw_reconf_flags = 0; | 532 | hw_reconf_flags = 0; |
@@ -575,6 +536,18 @@ static int ieee80211_stop(struct net_device *dev) | |||
575 | if (hw_reconf_flags) | 536 | if (hw_reconf_flags) |
576 | ieee80211_hw_config(local, hw_reconf_flags); | 537 | ieee80211_hw_config(local, hw_reconf_flags); |
577 | 538 | ||
539 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
540 | for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { | ||
541 | skb_queue_walk_safe(&local->pending[i], skb, tmp) { | ||
542 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
543 | if (info->control.vif == &sdata->vif) { | ||
544 | __skb_unlink(skb, &local->pending[i]); | ||
545 | dev_kfree_skb_irq(skb); | ||
546 | } | ||
547 | } | ||
548 | } | ||
549 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
550 | |||
578 | return 0; | 551 | return 0; |
579 | } | 552 | } |
580 | 553 | ||
@@ -604,8 +577,11 @@ static void ieee80211_set_multicast_list(struct net_device *dev) | |||
604 | atomic_dec(&local->iff_promiscs); | 577 | atomic_dec(&local->iff_promiscs); |
605 | sdata->flags ^= IEEE80211_SDATA_PROMISC; | 578 | sdata->flags ^= IEEE80211_SDATA_PROMISC; |
606 | } | 579 | } |
607 | 580 | spin_lock_bh(&local->filter_lock); | |
608 | dev_mc_sync(local->mdev, dev); | 581 | __dev_addr_sync(&local->mc_list, &local->mc_count, |
582 | &dev->mc_list, &dev->mc_count); | ||
583 | spin_unlock_bh(&local->filter_lock); | ||
584 | ieee80211_queue_work(&local->hw, &local->reconfig_filter); | ||
609 | } | 585 | } |
610 | 586 | ||
611 | /* | 587 | /* |
@@ -652,11 +628,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev) | |||
652 | kfree_skb(sdata->u.ibss.presp); | 628 | kfree_skb(sdata->u.ibss.presp); |
653 | break; | 629 | break; |
654 | case NL80211_IFTYPE_STATION: | 630 | case NL80211_IFTYPE_STATION: |
655 | kfree(sdata->u.mgd.extra_ie); | ||
656 | kfree(sdata->u.mgd.assocreq_ies); | ||
657 | kfree(sdata->u.mgd.assocresp_ies); | ||
658 | kfree(sdata->u.mgd.sme_auth_ie); | ||
659 | break; | ||
660 | case NL80211_IFTYPE_WDS: | 631 | case NL80211_IFTYPE_WDS: |
661 | case NL80211_IFTYPE_AP_VLAN: | 632 | case NL80211_IFTYPE_AP_VLAN: |
662 | case NL80211_IFTYPE_MONITOR: | 633 | case NL80211_IFTYPE_MONITOR: |
@@ -695,7 +666,6 @@ static void ieee80211_if_setup(struct net_device *dev) | |||
695 | { | 666 | { |
696 | ether_setup(dev); | 667 | ether_setup(dev); |
697 | dev->netdev_ops = &ieee80211_dataif_ops; | 668 | dev->netdev_ops = &ieee80211_dataif_ops; |
698 | dev->wireless_handlers = &ieee80211_iw_handler_def; | ||
699 | dev->destructor = free_netdev; | 669 | dev->destructor = free_netdev; |
700 | } | 670 | } |
701 | 671 | ||
@@ -784,6 +754,10 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
784 | return 0; | 754 | return 0; |
785 | } | 755 | } |
786 | 756 | ||
757 | static struct device_type wiphy_type = { | ||
758 | .name = "wlan", | ||
759 | }; | ||
760 | |||
787 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, | 761 | int ieee80211_if_add(struct ieee80211_local *local, const char *name, |
788 | struct net_device **new_dev, enum nl80211_iftype type, | 762 | struct net_device **new_dev, enum nl80211_iftype type, |
789 | struct vif_params *params) | 763 | struct vif_params *params) |
@@ -798,6 +772,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
798 | name, ieee80211_if_setup); | 772 | name, ieee80211_if_setup); |
799 | if (!ndev) | 773 | if (!ndev) |
800 | return -ENOMEM; | 774 | return -ENOMEM; |
775 | dev_net_set(ndev, wiphy_net(local->hw.wiphy)); | ||
801 | 776 | ||
802 | ndev->needed_headroom = local->tx_headroom + | 777 | ndev->needed_headroom = local->tx_headroom + |
803 | 4*6 /* four MAC addresses */ | 778 | 4*6 /* four MAC addresses */ |
@@ -814,7 +789,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
814 | 789 | ||
815 | memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); | 790 | memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); |
816 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); | 791 | SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); |
817 | ndev->features |= NETIF_F_NETNS_LOCAL; | 792 | SET_NETDEV_DEVTYPE(ndev, &wiphy_type); |
818 | 793 | ||
819 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ | 794 | /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ |
820 | sdata = netdev_priv(ndev); | 795 | sdata = netdev_priv(ndev); |
@@ -931,7 +906,7 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local) | |||
931 | struct ieee80211_sub_if_data *sdata; | 906 | struct ieee80211_sub_if_data *sdata; |
932 | int count = 0; | 907 | int count = 0; |
933 | 908 | ||
934 | if (local->hw_scanning || local->sw_scanning) | 909 | if (local->scanning) |
935 | return ieee80211_idle_off(local, "scanning"); | 910 | return ieee80211_idle_off(local, "scanning"); |
936 | 911 | ||
937 | list_for_each_entry(sdata, &local->interfaces, list) { | 912 | list_for_each_entry(sdata, &local->interfaces, list) { |
@@ -939,7 +914,8 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local) | |||
939 | continue; | 914 | continue; |
940 | /* do not count disabled managed interfaces */ | 915 | /* do not count disabled managed interfaces */ |
941 | if (sdata->vif.type == NL80211_IFTYPE_STATION && | 916 | if (sdata->vif.type == NL80211_IFTYPE_STATION && |
942 | sdata->u.mgd.state == IEEE80211_STA_MLME_DISABLED) | 917 | !sdata->u.mgd.associated && |
918 | list_empty(&sdata->u.mgd.work_list)) | ||
943 | continue; | 919 | continue; |
944 | /* do not count unused IBSS interfaces */ | 920 | /* do not count unused IBSS interfaces */ |
945 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC && | 921 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC && |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 092a017b237e..797f53942e5f 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -50,9 +50,9 @@ struct ieee80211_tx_status_rtap_hdr { | |||
50 | } __attribute__ ((packed)); | 50 | } __attribute__ ((packed)); |
51 | 51 | ||
52 | 52 | ||
53 | /* must be called under mdev tx lock */ | ||
54 | void ieee80211_configure_filter(struct ieee80211_local *local) | 53 | void ieee80211_configure_filter(struct ieee80211_local *local) |
55 | { | 54 | { |
55 | u64 mc; | ||
56 | unsigned int changed_flags; | 56 | unsigned int changed_flags; |
57 | unsigned int new_flags = 0; | 57 | unsigned int new_flags = 0; |
58 | 58 | ||
@@ -62,7 +62,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local) | |||
62 | if (atomic_read(&local->iff_allmultis)) | 62 | if (atomic_read(&local->iff_allmultis)) |
63 | new_flags |= FIF_ALLMULTI; | 63 | new_flags |= FIF_ALLMULTI; |
64 | 64 | ||
65 | if (local->monitors) | 65 | if (local->monitors || local->scanning) |
66 | new_flags |= FIF_BCN_PRBRESP_PROMISC; | 66 | new_flags |= FIF_BCN_PRBRESP_PROMISC; |
67 | 67 | ||
68 | if (local->fif_fcsfail) | 68 | if (local->fif_fcsfail) |
@@ -77,77 +77,29 @@ void ieee80211_configure_filter(struct ieee80211_local *local) | |||
77 | if (local->fif_other_bss) | 77 | if (local->fif_other_bss) |
78 | new_flags |= FIF_OTHER_BSS; | 78 | new_flags |= FIF_OTHER_BSS; |
79 | 79 | ||
80 | if (local->fif_pspoll) | ||
81 | new_flags |= FIF_PSPOLL; | ||
82 | |||
83 | spin_lock_bh(&local->filter_lock); | ||
80 | changed_flags = local->filter_flags ^ new_flags; | 84 | changed_flags = local->filter_flags ^ new_flags; |
81 | 85 | ||
86 | mc = drv_prepare_multicast(local, local->mc_count, local->mc_list); | ||
87 | spin_unlock_bh(&local->filter_lock); | ||
88 | |||
82 | /* be a bit nasty */ | 89 | /* be a bit nasty */ |
83 | new_flags |= (1<<31); | 90 | new_flags |= (1<<31); |
84 | 91 | ||
85 | drv_configure_filter(local, changed_flags, &new_flags, | 92 | drv_configure_filter(local, changed_flags, &new_flags, mc); |
86 | local->mdev->mc_count, | ||
87 | local->mdev->mc_list); | ||
88 | 93 | ||
89 | WARN_ON(new_flags & (1<<31)); | 94 | WARN_ON(new_flags & (1<<31)); |
90 | 95 | ||
91 | local->filter_flags = new_flags & ~(1<<31); | 96 | local->filter_flags = new_flags & ~(1<<31); |
92 | } | 97 | } |
93 | 98 | ||
94 | /* master interface */ | 99 | static void ieee80211_reconfig_filter(struct work_struct *work) |
95 | |||
96 | static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr) | ||
97 | { | ||
98 | memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ | ||
99 | return ETH_ALEN; | ||
100 | } | ||
101 | |||
102 | static const struct header_ops ieee80211_header_ops = { | ||
103 | .create = eth_header, | ||
104 | .parse = header_parse_80211, | ||
105 | .rebuild = eth_rebuild_header, | ||
106 | .cache = eth_header_cache, | ||
107 | .cache_update = eth_header_cache_update, | ||
108 | }; | ||
109 | |||
110 | static int ieee80211_master_open(struct net_device *dev) | ||
111 | { | 100 | { |
112 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); | 101 | struct ieee80211_local *local = |
113 | struct ieee80211_local *local = mpriv->local; | 102 | container_of(work, struct ieee80211_local, reconfig_filter); |
114 | struct ieee80211_sub_if_data *sdata; | ||
115 | int res = -EOPNOTSUPP; | ||
116 | |||
117 | /* we hold the RTNL here so can safely walk the list */ | ||
118 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
119 | if (netif_running(sdata->dev)) { | ||
120 | res = 0; | ||
121 | break; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | if (res) | ||
126 | return res; | ||
127 | |||
128 | netif_tx_start_all_queues(local->mdev); | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static int ieee80211_master_stop(struct net_device *dev) | ||
134 | { | ||
135 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); | ||
136 | struct ieee80211_local *local = mpriv->local; | ||
137 | struct ieee80211_sub_if_data *sdata; | ||
138 | |||
139 | /* we hold the RTNL here so can safely walk the list */ | ||
140 | list_for_each_entry(sdata, &local->interfaces, list) | ||
141 | if (netif_running(sdata->dev)) | ||
142 | dev_close(sdata->dev); | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static void ieee80211_master_set_multicast_list(struct net_device *dev) | ||
148 | { | ||
149 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); | ||
150 | struct ieee80211_local *local = mpriv->local; | ||
151 | 103 | ||
152 | ieee80211_configure_filter(local); | 104 | ieee80211_configure_filter(local); |
153 | } | 105 | } |
@@ -259,7 +211,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
259 | } | 211 | } |
260 | 212 | ||
261 | if (changed & BSS_CHANGED_BEACON_ENABLED) { | 213 | if (changed & BSS_CHANGED_BEACON_ENABLED) { |
262 | if (local->sw_scanning) { | 214 | if (local->quiescing || !netif_running(sdata->dev) || |
215 | test_bit(SCAN_SW_SCANNING, &local->scanning)) { | ||
263 | sdata->vif.bss_conf.enable_beacon = false; | 216 | sdata->vif.bss_conf.enable_beacon = false; |
264 | } else { | 217 | } else { |
265 | /* | 218 | /* |
@@ -288,9 +241,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | |||
288 | 241 | ||
289 | drv_bss_info_changed(local, &sdata->vif, | 242 | drv_bss_info_changed(local, &sdata->vif, |
290 | &sdata->vif.bss_conf, changed); | 243 | &sdata->vif.bss_conf, changed); |
291 | |||
292 | /* DEPRECATED */ | ||
293 | local->hw.conf.beacon_int = sdata->vif.bss_conf.beacon_int; | ||
294 | } | 244 | } |
295 | 245 | ||
296 | u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) | 246 | u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) |
@@ -310,7 +260,6 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, | |||
310 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 260 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
311 | int tmp; | 261 | int tmp; |
312 | 262 | ||
313 | skb->dev = local->mdev; | ||
314 | skb->pkt_type = IEEE80211_TX_STATUS_MSG; | 263 | skb->pkt_type = IEEE80211_TX_STATUS_MSG; |
315 | skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ? | 264 | skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ? |
316 | &local->skb_queue : &local->skb_queue_unreliable, skb); | 265 | &local->skb_queue : &local->skb_queue_unreliable, skb); |
@@ -330,19 +279,16 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
330 | { | 279 | { |
331 | struct ieee80211_local *local = (struct ieee80211_local *) data; | 280 | struct ieee80211_local *local = (struct ieee80211_local *) data; |
332 | struct sk_buff *skb; | 281 | struct sk_buff *skb; |
333 | struct ieee80211_rx_status rx_status; | ||
334 | struct ieee80211_ra_tid *ra_tid; | 282 | struct ieee80211_ra_tid *ra_tid; |
335 | 283 | ||
336 | while ((skb = skb_dequeue(&local->skb_queue)) || | 284 | while ((skb = skb_dequeue(&local->skb_queue)) || |
337 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { | 285 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { |
338 | switch (skb->pkt_type) { | 286 | switch (skb->pkt_type) { |
339 | case IEEE80211_RX_MSG: | 287 | case IEEE80211_RX_MSG: |
340 | /* status is in skb->cb */ | ||
341 | memcpy(&rx_status, skb->cb, sizeof(rx_status)); | ||
342 | /* Clear skb->pkt_type in order to not confuse kernel | 288 | /* Clear skb->pkt_type in order to not confuse kernel |
343 | * netstack. */ | 289 | * netstack. */ |
344 | skb->pkt_type = 0; | 290 | skb->pkt_type = 0; |
345 | __ieee80211_rx(local_to_hw(local), skb, &rx_status); | 291 | ieee80211_rx(local_to_hw(local), skb); |
346 | break; | 292 | break; |
347 | case IEEE80211_TX_STATUS_MSG: | 293 | case IEEE80211_TX_STATUS_MSG: |
348 | skb->pkt_type = 0; | 294 | skb->pkt_type = 0; |
@@ -375,6 +321,31 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
375 | { | 321 | { |
376 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 322 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
377 | 323 | ||
324 | /* | ||
325 | * XXX: This is temporary! | ||
326 | * | ||
327 | * The problem here is that when we get here, the driver will | ||
328 | * quite likely have pretty much overwritten info->control by | ||
329 | * using info->driver_data or info->rate_driver_data. Thus, | ||
330 | * when passing out the frame to the driver again, we would be | ||
331 | * passing completely bogus data since the driver would then | ||
332 | * expect a properly filled info->control. In mac80211 itself | ||
333 | * the same problem occurs, since we need info->control.vif | ||
334 | * internally. | ||
335 | * | ||
336 | * To fix this, we should send the frame through TX processing | ||
337 | * again. However, it's not that simple, since the frame will | ||
338 | * have been software-encrypted (if applicable) already, and | ||
339 | * encrypting it again doesn't do much good. So to properly do | ||
340 | * that, we not only have to skip the actual 'raw' encryption | ||
341 | * (key selection etc. still has to be done!) but also the | ||
342 | * sequence number assignment since that impacts the crypto | ||
343 | * encapsulation, of course. | ||
344 | * | ||
345 | * Hence, for now, fix the bug by just dropping the frame. | ||
346 | */ | ||
347 | goto drop; | ||
348 | |||
378 | sta->tx_filtered_count++; | 349 | sta->tx_filtered_count++; |
379 | 350 | ||
380 | /* | 351 | /* |
@@ -428,6 +399,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
428 | return; | 399 | return; |
429 | } | 400 | } |
430 | 401 | ||
402 | drop: | ||
431 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 403 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
432 | if (net_ratelimit()) | 404 | if (net_ratelimit()) |
433 | printk(KERN_DEBUG "%s: dropped TX filtered frame, " | 405 | printk(KERN_DEBUG "%s: dropped TX filtered frame, " |
@@ -510,6 +482,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
510 | } | 482 | } |
511 | 483 | ||
512 | rate_control_tx_status(local, sband, sta, skb); | 484 | rate_control_tx_status(local, sband, sta, skb); |
485 | if (ieee80211_vif_is_mesh(&sta->sdata->vif)) | ||
486 | ieee80211s_update_metric(local, sta, skb); | ||
513 | } | 487 | } |
514 | 488 | ||
515 | rcu_read_unlock(); | 489 | rcu_read_unlock(); |
@@ -685,6 +659,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
685 | if (!wiphy) | 659 | if (!wiphy) |
686 | return NULL; | 660 | return NULL; |
687 | 661 | ||
662 | wiphy->netnsok = true; | ||
688 | wiphy->privid = mac80211_wiphy_privid; | 663 | wiphy->privid = mac80211_wiphy_privid; |
689 | 664 | ||
690 | /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */ | 665 | /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */ |
@@ -711,7 +686,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
711 | local->hw.max_rates = 1; | 686 | local->hw.max_rates = 1; |
712 | local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; | 687 | local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; |
713 | local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; | 688 | local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; |
714 | local->hw.conf.radio_enabled = true; | ||
715 | local->user_power_level = -1; | 689 | local->user_power_level = -1; |
716 | 690 | ||
717 | INIT_LIST_HEAD(&local->interfaces); | 691 | INIT_LIST_HEAD(&local->interfaces); |
@@ -719,13 +693,15 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
719 | mutex_init(&local->scan_mtx); | 693 | mutex_init(&local->scan_mtx); |
720 | 694 | ||
721 | spin_lock_init(&local->key_lock); | 695 | spin_lock_init(&local->key_lock); |
722 | 696 | spin_lock_init(&local->filter_lock); | |
723 | spin_lock_init(&local->queue_stop_reason_lock); | 697 | spin_lock_init(&local->queue_stop_reason_lock); |
724 | 698 | ||
725 | INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); | 699 | INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); |
726 | 700 | ||
727 | INIT_WORK(&local->restart_work, ieee80211_restart_work); | 701 | INIT_WORK(&local->restart_work, ieee80211_restart_work); |
728 | 702 | ||
703 | INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); | ||
704 | |||
729 | INIT_WORK(&local->dynamic_ps_enable_work, | 705 | INIT_WORK(&local->dynamic_ps_enable_work, |
730 | ieee80211_dynamic_ps_enable_work); | 706 | ieee80211_dynamic_ps_enable_work); |
731 | INIT_WORK(&local->dynamic_ps_disable_work, | 707 | INIT_WORK(&local->dynamic_ps_disable_work, |
@@ -739,12 +715,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
739 | skb_queue_head_init(&local->pending[i]); | 715 | skb_queue_head_init(&local->pending[i]); |
740 | tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, | 716 | tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, |
741 | (unsigned long)local); | 717 | (unsigned long)local); |
742 | tasklet_disable(&local->tx_pending_tasklet); | ||
743 | 718 | ||
744 | tasklet_init(&local->tasklet, | 719 | tasklet_init(&local->tasklet, |
745 | ieee80211_tasklet_handler, | 720 | ieee80211_tasklet_handler, |
746 | (unsigned long) local); | 721 | (unsigned long) local); |
747 | tasklet_disable(&local->tasklet); | ||
748 | 722 | ||
749 | skb_queue_head_init(&local->skb_queue); | 723 | skb_queue_head_init(&local->skb_queue); |
750 | skb_queue_head_init(&local->skb_queue_unreliable); | 724 | skb_queue_head_init(&local->skb_queue_unreliable); |
@@ -755,30 +729,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
755 | } | 729 | } |
756 | EXPORT_SYMBOL(ieee80211_alloc_hw); | 730 | EXPORT_SYMBOL(ieee80211_alloc_hw); |
757 | 731 | ||
758 | static const struct net_device_ops ieee80211_master_ops = { | ||
759 | .ndo_start_xmit = ieee80211_master_start_xmit, | ||
760 | .ndo_open = ieee80211_master_open, | ||
761 | .ndo_stop = ieee80211_master_stop, | ||
762 | .ndo_set_multicast_list = ieee80211_master_set_multicast_list, | ||
763 | .ndo_select_queue = ieee80211_select_queue, | ||
764 | }; | ||
765 | |||
766 | static void ieee80211_master_setup(struct net_device *mdev) | ||
767 | { | ||
768 | mdev->type = ARPHRD_IEEE80211; | ||
769 | mdev->netdev_ops = &ieee80211_master_ops; | ||
770 | mdev->header_ops = &ieee80211_header_ops; | ||
771 | mdev->tx_queue_len = 1000; | ||
772 | mdev->addr_len = ETH_ALEN; | ||
773 | } | ||
774 | |||
775 | int ieee80211_register_hw(struct ieee80211_hw *hw) | 732 | int ieee80211_register_hw(struct ieee80211_hw *hw) |
776 | { | 733 | { |
777 | struct ieee80211_local *local = hw_to_local(hw); | 734 | struct ieee80211_local *local = hw_to_local(hw); |
778 | int result; | 735 | int result; |
779 | enum ieee80211_band band; | 736 | enum ieee80211_band band; |
780 | struct net_device *mdev; | ||
781 | struct ieee80211_master_priv *mpriv; | ||
782 | int channels, i, j, max_bitrates; | 737 | int channels, i, j, max_bitrates; |
783 | bool supp_ht; | 738 | bool supp_ht; |
784 | static const u32 cipher_suites[] = { | 739 | static const u32 cipher_suites[] = { |
@@ -818,9 +773,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
818 | supp_ht = supp_ht || sband->ht_cap.ht_supported; | 773 | supp_ht = supp_ht || sband->ht_cap.ht_supported; |
819 | } | 774 | } |
820 | 775 | ||
821 | local->int_scan_req.n_channels = channels; | 776 | local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + |
822 | local->int_scan_req.channels = kzalloc(sizeof(void *) * channels, GFP_KERNEL); | 777 | sizeof(void *) * channels, GFP_KERNEL); |
823 | if (!local->int_scan_req.channels) | 778 | if (!local->int_scan_req) |
824 | return -ENOMEM; | 779 | return -ENOMEM; |
825 | 780 | ||
826 | /* if low-level driver supports AP, we also support VLAN */ | 781 | /* if low-level driver supports AP, we also support VLAN */ |
@@ -877,19 +832,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
877 | if (hw->queues > IEEE80211_MAX_QUEUES) | 832 | if (hw->queues > IEEE80211_MAX_QUEUES) |
878 | hw->queues = IEEE80211_MAX_QUEUES; | 833 | hw->queues = IEEE80211_MAX_QUEUES; |
879 | 834 | ||
880 | mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv), | 835 | local->workqueue = |
881 | "wmaster%d", ieee80211_master_setup, | ||
882 | hw->queues); | ||
883 | if (!mdev) | ||
884 | goto fail_mdev_alloc; | ||
885 | |||
886 | mpriv = netdev_priv(mdev); | ||
887 | mpriv->local = local; | ||
888 | local->mdev = mdev; | ||
889 | |||
890 | local->hw.workqueue = | ||
891 | create_singlethread_workqueue(wiphy_name(local->hw.wiphy)); | 836 | create_singlethread_workqueue(wiphy_name(local->hw.wiphy)); |
892 | if (!local->hw.workqueue) { | 837 | if (!local->workqueue) { |
893 | result = -ENOMEM; | 838 | result = -ENOMEM; |
894 | goto fail_workqueue; | 839 | goto fail_workqueue; |
895 | } | 840 | } |
@@ -921,17 +866,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
921 | } | 866 | } |
922 | 867 | ||
923 | rtnl_lock(); | 868 | rtnl_lock(); |
924 | result = dev_alloc_name(local->mdev, local->mdev->name); | ||
925 | if (result < 0) | ||
926 | goto fail_dev; | ||
927 | |||
928 | memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); | ||
929 | SET_NETDEV_DEV(local->mdev, wiphy_dev(local->hw.wiphy)); | ||
930 | local->mdev->features |= NETIF_F_NETNS_LOCAL; | ||
931 | |||
932 | result = register_netdevice(local->mdev); | ||
933 | if (result < 0) | ||
934 | goto fail_dev; | ||
935 | 869 | ||
936 | result = ieee80211_init_rate_ctrl_alg(local, | 870 | result = ieee80211_init_rate_ctrl_alg(local, |
937 | hw->rate_control_algorithm); | 871 | hw->rate_control_algorithm); |
@@ -956,13 +890,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
956 | 890 | ||
957 | /* alloc internal scan request */ | 891 | /* alloc internal scan request */ |
958 | i = 0; | 892 | i = 0; |
959 | local->int_scan_req.ssids = &local->scan_ssid; | 893 | local->int_scan_req->ssids = &local->scan_ssid; |
960 | local->int_scan_req.n_ssids = 1; | 894 | local->int_scan_req->n_ssids = 1; |
961 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 895 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
962 | if (!hw->wiphy->bands[band]) | 896 | if (!hw->wiphy->bands[band]) |
963 | continue; | 897 | continue; |
964 | for (j = 0; j < hw->wiphy->bands[band]->n_channels; j++) { | 898 | for (j = 0; j < hw->wiphy->bands[band]->n_channels; j++) { |
965 | local->int_scan_req.channels[i] = | 899 | local->int_scan_req->channels[i] = |
966 | &hw->wiphy->bands[band]->channels[j]; | 900 | &hw->wiphy->bands[band]->channels[j]; |
967 | i++; | 901 | i++; |
968 | } | 902 | } |
@@ -984,23 +918,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
984 | ieee80211_led_exit(local); | 918 | ieee80211_led_exit(local); |
985 | ieee80211_remove_interfaces(local); | 919 | ieee80211_remove_interfaces(local); |
986 | fail_rate: | 920 | fail_rate: |
987 | unregister_netdevice(local->mdev); | ||
988 | local->mdev = NULL; | ||
989 | fail_dev: | ||
990 | rtnl_unlock(); | 921 | rtnl_unlock(); |
991 | ieee80211_wep_free(local); | 922 | ieee80211_wep_free(local); |
992 | fail_wep: | 923 | fail_wep: |
993 | sta_info_stop(local); | 924 | sta_info_stop(local); |
994 | fail_sta_info: | 925 | fail_sta_info: |
995 | debugfs_hw_del(local); | 926 | debugfs_hw_del(local); |
996 | destroy_workqueue(local->hw.workqueue); | 927 | destroy_workqueue(local->workqueue); |
997 | fail_workqueue: | 928 | fail_workqueue: |
998 | if (local->mdev) | ||
999 | free_netdev(local->mdev); | ||
1000 | fail_mdev_alloc: | ||
1001 | wiphy_unregister(local->hw.wiphy); | 929 | wiphy_unregister(local->hw.wiphy); |
1002 | fail_wiphy_register: | 930 | fail_wiphy_register: |
1003 | kfree(local->int_scan_req.channels); | 931 | kfree(local->int_scan_req); |
1004 | return result; | 932 | return result; |
1005 | } | 933 | } |
1006 | EXPORT_SYMBOL(ieee80211_register_hw); | 934 | EXPORT_SYMBOL(ieee80211_register_hw); |
@@ -1022,15 +950,12 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1022 | * because the driver cannot be handing us frames any | 950 | * because the driver cannot be handing us frames any |
1023 | * more and the tasklet is killed. | 951 | * more and the tasklet is killed. |
1024 | */ | 952 | */ |
1025 | |||
1026 | /* First, we remove all virtual interfaces. */ | ||
1027 | ieee80211_remove_interfaces(local); | 953 | ieee80211_remove_interfaces(local); |
1028 | 954 | ||
1029 | /* then, finally, remove the master interface */ | ||
1030 | unregister_netdevice(local->mdev); | ||
1031 | |||
1032 | rtnl_unlock(); | 955 | rtnl_unlock(); |
1033 | 956 | ||
957 | cancel_work_sync(&local->reconfig_filter); | ||
958 | |||
1034 | ieee80211_clear_tx_pending(local); | 959 | ieee80211_clear_tx_pending(local); |
1035 | sta_info_stop(local); | 960 | sta_info_stop(local); |
1036 | rate_control_deinitialize(local); | 961 | rate_control_deinitialize(local); |
@@ -1043,12 +968,11 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1043 | skb_queue_purge(&local->skb_queue); | 968 | skb_queue_purge(&local->skb_queue); |
1044 | skb_queue_purge(&local->skb_queue_unreliable); | 969 | skb_queue_purge(&local->skb_queue_unreliable); |
1045 | 970 | ||
1046 | destroy_workqueue(local->hw.workqueue); | 971 | destroy_workqueue(local->workqueue); |
1047 | wiphy_unregister(local->hw.wiphy); | 972 | wiphy_unregister(local->hw.wiphy); |
1048 | ieee80211_wep_free(local); | 973 | ieee80211_wep_free(local); |
1049 | ieee80211_led_exit(local); | 974 | ieee80211_led_exit(local); |
1050 | free_netdev(local->mdev); | 975 | kfree(local->int_scan_req); |
1051 | kfree(local->int_scan_req.channels); | ||
1052 | } | 976 | } |
1053 | EXPORT_SYMBOL(ieee80211_unregister_hw); | 977 | EXPORT_SYMBOL(ieee80211_unregister_hw); |
1054 | 978 | ||
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 11cf45bce38a..f7364e56f1ee 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -18,8 +18,11 @@ | |||
18 | #define PP_OFFSET 1 /* Path Selection Protocol */ | 18 | #define PP_OFFSET 1 /* Path Selection Protocol */ |
19 | #define PM_OFFSET 5 /* Path Selection Metric */ | 19 | #define PM_OFFSET 5 /* Path Selection Metric */ |
20 | #define CC_OFFSET 9 /* Congestion Control Mode */ | 20 | #define CC_OFFSET 9 /* Congestion Control Mode */ |
21 | #define CAPAB_OFFSET 17 | 21 | #define SP_OFFSET 13 /* Synchronization Protocol */ |
22 | #define ACCEPT_PLINKS 0x80 | 22 | #define AUTH_OFFSET 17 /* Authentication Protocol */ |
23 | #define CAPAB_OFFSET 22 | ||
24 | #define CAPAB_ACCEPT_PLINKS 0x80 | ||
25 | #define CAPAB_FORWARDING 0x10 | ||
23 | 26 | ||
24 | #define TMR_RUNNING_HK 0 | 27 | #define TMR_RUNNING_HK 0 |
25 | #define TMR_RUNNING_MP 1 | 28 | #define TMR_RUNNING_MP 1 |
@@ -47,14 +50,14 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) | |||
47 | struct ieee80211_local *local = sdata->local; | 50 | struct ieee80211_local *local = sdata->local; |
48 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 51 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
49 | 52 | ||
50 | ifmsh->housekeeping = true; | 53 | ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING; |
51 | 54 | ||
52 | if (local->quiescing) { | 55 | if (local->quiescing) { |
53 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); | 56 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); |
54 | return; | 57 | return; |
55 | } | 58 | } |
56 | 59 | ||
57 | queue_work(local->hw.workqueue, &ifmsh->work); | 60 | ieee80211_queue_work(&local->hw, &ifmsh->work); |
58 | } | 61 | } |
59 | 62 | ||
60 | /** | 63 | /** |
@@ -84,7 +87,9 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat | |||
84 | memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && | 87 | memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && |
85 | memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 && | 88 | memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 && |
86 | memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 && | 89 | memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 && |
87 | memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0) | 90 | memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0 && |
91 | memcmp(ifmsh->mesh_sp_id, ie->mesh_config + SP_OFFSET, 4) == 0 && | ||
92 | memcmp(ifmsh->mesh_auth_id, ie->mesh_config + AUTH_OFFSET, 4) == 0) | ||
88 | return true; | 93 | return true; |
89 | 94 | ||
90 | return false; | 95 | return false; |
@@ -97,7 +102,7 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat | |||
97 | */ | 102 | */ |
98 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) | 103 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) |
99 | { | 104 | { |
100 | return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0; | 105 | return (*(ie->mesh_config + CAPAB_OFFSET) & CAPAB_ACCEPT_PLINKS) != 0; |
101 | } | 106 | } |
102 | 107 | ||
103 | /** | 108 | /** |
@@ -123,11 +128,18 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) | |||
123 | 128 | ||
124 | void mesh_ids_set_default(struct ieee80211_if_mesh *sta) | 129 | void mesh_ids_set_default(struct ieee80211_if_mesh *sta) |
125 | { | 130 | { |
126 | u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff}; | 131 | u8 oui[3] = {0x00, 0x0F, 0xAC}; |
127 | 132 | ||
128 | memcpy(sta->mesh_pp_id, def_id, 4); | 133 | memcpy(sta->mesh_pp_id, oui, sizeof(oui)); |
129 | memcpy(sta->mesh_pm_id, def_id, 4); | 134 | memcpy(sta->mesh_pm_id, oui, sizeof(oui)); |
130 | memcpy(sta->mesh_cc_id, def_id, 4); | 135 | memcpy(sta->mesh_cc_id, oui, sizeof(oui)); |
136 | memcpy(sta->mesh_sp_id, oui, sizeof(oui)); | ||
137 | memcpy(sta->mesh_auth_id, oui, sizeof(oui)); | ||
138 | sta->mesh_pp_id[sizeof(oui)] = 0; | ||
139 | sta->mesh_pm_id[sizeof(oui)] = 0; | ||
140 | sta->mesh_cc_id[sizeof(oui)] = 0xff; | ||
141 | sta->mesh_sp_id[sizeof(oui)] = 0xff; | ||
142 | sta->mesh_auth_id[sizeof(oui)] = 0x0; | ||
131 | } | 143 | } |
132 | 144 | ||
133 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) | 145 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) |
@@ -245,7 +257,7 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) | |||
245 | if (sdata->u.mesh.mesh_id_len) | 257 | if (sdata->u.mesh.mesh_id_len) |
246 | memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len); | 258 | memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len); |
247 | 259 | ||
248 | pos = skb_put(skb, 21); | 260 | pos = skb_put(skb, 2 + IEEE80211_MESH_CONFIG_LEN); |
249 | *pos++ = WLAN_EID_MESH_CONFIG; | 261 | *pos++ = WLAN_EID_MESH_CONFIG; |
250 | *pos++ = IEEE80211_MESH_CONFIG_LEN; | 262 | *pos++ = IEEE80211_MESH_CONFIG_LEN; |
251 | /* Version */ | 263 | /* Version */ |
@@ -263,15 +275,22 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) | |||
263 | memcpy(pos, sdata->u.mesh.mesh_cc_id, 4); | 275 | memcpy(pos, sdata->u.mesh.mesh_cc_id, 4); |
264 | pos += 4; | 276 | pos += 4; |
265 | 277 | ||
266 | /* Channel precedence: | 278 | /* Synchronization protocol identifier */ |
267 | * Not running simple channel unification protocol | 279 | memcpy(pos, sdata->u.mesh.mesh_sp_id, 4); |
268 | */ | ||
269 | memset(pos, 0x00, 4); | ||
270 | pos += 4; | 280 | pos += 4; |
271 | 281 | ||
282 | /* Authentication Protocol identifier */ | ||
283 | memcpy(pos, sdata->u.mesh.mesh_auth_id, 4); | ||
284 | pos += 4; | ||
285 | |||
286 | /* Mesh Formation Info */ | ||
287 | memset(pos, 0x00, 1); | ||
288 | pos += 1; | ||
289 | |||
272 | /* Mesh capability */ | 290 | /* Mesh capability */ |
273 | sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); | 291 | sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); |
274 | *pos++ = sdata->u.mesh.accepting_plinks ? ACCEPT_PLINKS : 0x00; | 292 | *pos = CAPAB_FORWARDING; |
293 | *pos++ |= sdata->u.mesh.accepting_plinks ? CAPAB_ACCEPT_PLINKS : 0x00; | ||
275 | *pos++ = 0x00; | 294 | *pos++ = 0x00; |
276 | 295 | ||
277 | return; | 296 | return; |
@@ -320,30 +339,6 @@ struct mesh_table *mesh_table_alloc(int size_order) | |||
320 | return newtbl; | 339 | return newtbl; |
321 | } | 340 | } |
322 | 341 | ||
323 | static void __mesh_table_free(struct mesh_table *tbl) | ||
324 | { | ||
325 | kfree(tbl->hash_buckets); | ||
326 | kfree(tbl->hashwlock); | ||
327 | kfree(tbl); | ||
328 | } | ||
329 | |||
330 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | ||
331 | { | ||
332 | struct hlist_head *mesh_hash; | ||
333 | struct hlist_node *p, *q; | ||
334 | int i; | ||
335 | |||
336 | mesh_hash = tbl->hash_buckets; | ||
337 | for (i = 0; i <= tbl->hash_mask; i++) { | ||
338 | spin_lock(&tbl->hashwlock[i]); | ||
339 | hlist_for_each_safe(p, q, &mesh_hash[i]) { | ||
340 | tbl->free_node(p, free_leafs); | ||
341 | atomic_dec(&tbl->entries); | ||
342 | } | ||
343 | spin_unlock(&tbl->hashwlock[i]); | ||
344 | } | ||
345 | __mesh_table_free(tbl); | ||
346 | } | ||
347 | 342 | ||
348 | static void ieee80211_mesh_path_timer(unsigned long data) | 343 | static void ieee80211_mesh_path_timer(unsigned long data) |
349 | { | 344 | { |
@@ -357,63 +352,79 @@ static void ieee80211_mesh_path_timer(unsigned long data) | |||
357 | return; | 352 | return; |
358 | } | 353 | } |
359 | 354 | ||
360 | queue_work(local->hw.workqueue, &ifmsh->work); | 355 | ieee80211_queue_work(&local->hw, &ifmsh->work); |
361 | } | 356 | } |
362 | 357 | ||
363 | struct mesh_table *mesh_table_grow(struct mesh_table *tbl) | 358 | /** |
364 | { | 359 | * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame |
365 | struct mesh_table *newtbl; | 360 | * @hdr: 802.11 frame header |
366 | struct hlist_head *oldhash; | 361 | * @fc: frame control field |
367 | struct hlist_node *p, *q; | 362 | * @meshda: destination address in the mesh |
368 | int i; | 363 | * @meshsa: source address address in the mesh. Same as TA, as frame is |
369 | 364 | * locally originated. | |
370 | if (atomic_read(&tbl->entries) | 365 | * |
371 | < tbl->mean_chain_len * (tbl->hash_mask + 1)) | 366 | * Return the length of the 802.11 (does not include a mesh control header) |
372 | goto endgrow; | 367 | */ |
373 | 368 | int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, char | |
374 | newtbl = mesh_table_alloc(tbl->size_order + 1); | 369 | *meshda, char *meshsa) { |
375 | if (!newtbl) | 370 | if (is_multicast_ether_addr(meshda)) { |
376 | goto endgrow; | 371 | *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); |
377 | 372 | /* DA TA SA */ | |
378 | newtbl->free_node = tbl->free_node; | 373 | memcpy(hdr->addr1, meshda, ETH_ALEN); |
379 | newtbl->mean_chain_len = tbl->mean_chain_len; | 374 | memcpy(hdr->addr2, meshsa, ETH_ALEN); |
380 | newtbl->copy_node = tbl->copy_node; | 375 | memcpy(hdr->addr3, meshsa, ETH_ALEN); |
381 | atomic_set(&newtbl->entries, atomic_read(&tbl->entries)); | 376 | return 24; |
382 | 377 | } else { | |
383 | oldhash = tbl->hash_buckets; | 378 | *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | |
384 | for (i = 0; i <= tbl->hash_mask; i++) | 379 | IEEE80211_FCTL_TODS); |
385 | hlist_for_each(p, &oldhash[i]) | 380 | /* RA TA DA SA */ |
386 | if (tbl->copy_node(p, newtbl) < 0) | 381 | memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */ |
387 | goto errcopy; | 382 | memcpy(hdr->addr2, meshsa, ETH_ALEN); |
388 | 383 | memcpy(hdr->addr3, meshda, ETH_ALEN); | |
389 | return newtbl; | 384 | memcpy(hdr->addr4, meshsa, ETH_ALEN); |
390 | 385 | return 30; | |
391 | errcopy: | ||
392 | for (i = 0; i <= newtbl->hash_mask; i++) { | ||
393 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) | ||
394 | tbl->free_node(p, 0); | ||
395 | } | 386 | } |
396 | __mesh_table_free(newtbl); | ||
397 | endgrow: | ||
398 | return NULL; | ||
399 | } | 387 | } |
400 | 388 | ||
401 | /** | 389 | /** |
402 | * ieee80211_new_mesh_header - create a new mesh header | 390 | * ieee80211_new_mesh_header - create a new mesh header |
403 | * @meshhdr: uninitialized mesh header | 391 | * @meshhdr: uninitialized mesh header |
404 | * @sdata: mesh interface to be used | 392 | * @sdata: mesh interface to be used |
393 | * @addr4: addr4 of the mesh frame (1st in ae header) | ||
394 | * may be NULL | ||
395 | * @addr5: addr5 of the mesh frame (1st or 2nd in ae header) | ||
396 | * may be NULL unless addr6 is present | ||
397 | * @addr6: addr6 of the mesh frame (2nd or 3rd in ae header) | ||
398 | * may be NULL unless addr5 is present | ||
405 | * | 399 | * |
406 | * Return the header length. | 400 | * Return the header length. |
407 | */ | 401 | */ |
408 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | 402 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, |
409 | struct ieee80211_sub_if_data *sdata) | 403 | struct ieee80211_sub_if_data *sdata, char *addr4, |
404 | char *addr5, char *addr6) | ||
410 | { | 405 | { |
411 | meshhdr->flags = 0; | 406 | int aelen = 0; |
407 | memset(meshhdr, 0, sizeof(meshhdr)); | ||
412 | meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; | 408 | meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; |
413 | put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); | 409 | put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); |
414 | sdata->u.mesh.mesh_seqnum++; | 410 | sdata->u.mesh.mesh_seqnum++; |
415 | 411 | if (addr4) { | |
416 | return 6; | 412 | meshhdr->flags |= MESH_FLAGS_AE_A4; |
413 | aelen += ETH_ALEN; | ||
414 | memcpy(meshhdr->eaddr1, addr4, ETH_ALEN); | ||
415 | } | ||
416 | if (addr5 && addr6) { | ||
417 | meshhdr->flags |= MESH_FLAGS_AE_A5_A6; | ||
418 | aelen += 2 * ETH_ALEN; | ||
419 | if (!addr4) { | ||
420 | memcpy(meshhdr->eaddr1, addr5, ETH_ALEN); | ||
421 | memcpy(meshhdr->eaddr2, addr6, ETH_ALEN); | ||
422 | } else { | ||
423 | memcpy(meshhdr->eaddr2, addr5, ETH_ALEN); | ||
424 | memcpy(meshhdr->eaddr3, addr6, ETH_ALEN); | ||
425 | } | ||
426 | } | ||
427 | return 6 + aelen; | ||
417 | } | 428 | } |
418 | 429 | ||
419 | static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, | 430 | static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, |
@@ -433,7 +444,6 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, | |||
433 | if (free_plinks != sdata->u.mesh.accepting_plinks) | 444 | if (free_plinks != sdata->u.mesh.accepting_plinks) |
434 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); | 445 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); |
435 | 446 | ||
436 | ifmsh->housekeeping = false; | ||
437 | mod_timer(&ifmsh->housekeeping_timer, | 447 | mod_timer(&ifmsh->housekeeping_timer, |
438 | round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); | 448 | round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); |
439 | } | 449 | } |
@@ -470,10 +480,12 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | |||
470 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 480 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
471 | struct ieee80211_local *local = sdata->local; | 481 | struct ieee80211_local *local = sdata->local; |
472 | 482 | ||
473 | ifmsh->housekeeping = true; | 483 | ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING; |
474 | queue_work(local->hw.workqueue, &ifmsh->work); | 484 | ieee80211_queue_work(&local->hw, &ifmsh->work); |
485 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; | ||
475 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | | 486 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | |
476 | BSS_CHANGED_BEACON_ENABLED); | 487 | BSS_CHANGED_BEACON_ENABLED | |
488 | BSS_CHANGED_BEACON_INT); | ||
477 | } | 489 | } |
478 | 490 | ||
479 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | 491 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) |
@@ -568,7 +580,7 @@ static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
568 | 580 | ||
569 | ifmsh = &sdata->u.mesh; | 581 | ifmsh = &sdata->u.mesh; |
570 | 582 | ||
571 | rx_status = (struct ieee80211_rx_status *) skb->cb; | 583 | rx_status = IEEE80211_SKB_RXCB(skb); |
572 | mgmt = (struct ieee80211_mgmt *) skb->data; | 584 | mgmt = (struct ieee80211_mgmt *) skb->data; |
573 | stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; | 585 | stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; |
574 | 586 | ||
@@ -597,7 +609,7 @@ static void ieee80211_mesh_work(struct work_struct *work) | |||
597 | if (!netif_running(sdata->dev)) | 609 | if (!netif_running(sdata->dev)) |
598 | return; | 610 | return; |
599 | 611 | ||
600 | if (local->sw_scanning || local->hw_scanning) | 612 | if (local->scanning) |
601 | return; | 613 | return; |
602 | 614 | ||
603 | while ((skb = skb_dequeue(&ifmsh->skb_queue))) | 615 | while ((skb = skb_dequeue(&ifmsh->skb_queue))) |
@@ -608,7 +620,13 @@ static void ieee80211_mesh_work(struct work_struct *work) | |||
608 | ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) | 620 | ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) |
609 | mesh_path_start_discovery(sdata); | 621 | mesh_path_start_discovery(sdata); |
610 | 622 | ||
611 | if (ifmsh->housekeeping) | 623 | if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) |
624 | mesh_mpath_table_grow(); | ||
625 | |||
626 | if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) | ||
627 | mesh_mpp_table_grow(); | ||
628 | |||
629 | if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) | ||
612 | ieee80211_mesh_housekeeping(sdata, ifmsh); | 630 | ieee80211_mesh_housekeeping(sdata, ifmsh); |
613 | } | 631 | } |
614 | 632 | ||
@@ -619,7 +637,7 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | |||
619 | rcu_read_lock(); | 637 | rcu_read_lock(); |
620 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 638 | list_for_each_entry_rcu(sdata, &local->interfaces, list) |
621 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 639 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
622 | queue_work(local->hw.workqueue, &sdata->u.mesh.work); | 640 | ieee80211_queue_work(&local->hw, &sdata->u.mesh.work); |
623 | rcu_read_unlock(); | 641 | rcu_read_unlock(); |
624 | } | 642 | } |
625 | 643 | ||
@@ -671,8 +689,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | |||
671 | } | 689 | } |
672 | 690 | ||
673 | ieee80211_rx_result | 691 | ieee80211_rx_result |
674 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | 692 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) |
675 | struct ieee80211_rx_status *rx_status) | ||
676 | { | 693 | { |
677 | struct ieee80211_local *local = sdata->local; | 694 | struct ieee80211_local *local = sdata->local; |
678 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 695 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
@@ -686,12 +703,14 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | |||
686 | fc = le16_to_cpu(mgmt->frame_control); | 703 | fc = le16_to_cpu(mgmt->frame_control); |
687 | 704 | ||
688 | switch (fc & IEEE80211_FCTL_STYPE) { | 705 | switch (fc & IEEE80211_FCTL_STYPE) { |
706 | case IEEE80211_STYPE_ACTION: | ||
707 | if (skb->len < IEEE80211_MIN_ACTION_SIZE) | ||
708 | return RX_DROP_MONITOR; | ||
709 | /* fall through */ | ||
689 | case IEEE80211_STYPE_PROBE_RESP: | 710 | case IEEE80211_STYPE_PROBE_RESP: |
690 | case IEEE80211_STYPE_BEACON: | 711 | case IEEE80211_STYPE_BEACON: |
691 | case IEEE80211_STYPE_ACTION: | ||
692 | memcpy(skb->cb, rx_status, sizeof(*rx_status)); | ||
693 | skb_queue_tail(&ifmsh->skb_queue, skb); | 712 | skb_queue_tail(&ifmsh->skb_queue, skb); |
694 | queue_work(local->hw.workqueue, &ifmsh->work); | 713 | ieee80211_queue_work(&local->hw, &ifmsh->work); |
695 | return RX_QUEUED; | 714 | return RX_QUEUED; |
696 | } | 715 | } |
697 | 716 | ||
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index c7d72819cdd2..dd1c19319f0a 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -44,6 +44,23 @@ enum mesh_path_flags { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | /** | 46 | /** |
47 | * enum mesh_deferred_task_flags - mac80211 mesh deferred tasks | ||
48 | * | ||
49 | * | ||
50 | * | ||
51 | * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks | ||
52 | * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs | ||
53 | * to grow. | ||
54 | * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to | ||
55 | * grow | ||
56 | */ | ||
57 | enum mesh_deferred_task_flags { | ||
58 | MESH_WORK_HOUSEKEEPING, | ||
59 | MESH_WORK_GROW_MPATH_TABLE, | ||
60 | MESH_WORK_GROW_MPP_TABLE, | ||
61 | }; | ||
62 | |||
63 | /** | ||
47 | * struct mesh_path - mac80211 mesh path structure | 64 | * struct mesh_path - mac80211 mesh path structure |
48 | * | 65 | * |
49 | * @dst: mesh path destination mac address | 66 | * @dst: mesh path destination mac address |
@@ -61,7 +78,7 @@ enum mesh_path_flags { | |||
61 | * retry | 78 | * retry |
62 | * @discovery_retries: number of discovery retries | 79 | * @discovery_retries: number of discovery retries |
63 | * @flags: mesh path flags, as specified on &enum mesh_path_flags | 80 | * @flags: mesh path flags, as specified on &enum mesh_path_flags |
64 | * @state_lock: mesh pat state lock | 81 | * @state_lock: mesh path state lock |
65 | * | 82 | * |
66 | * | 83 | * |
67 | * The combination of dst and sdata is unique in the mesh path table. Since the | 84 | * The combination of dst and sdata is unique in the mesh path table. Since the |
@@ -174,6 +191,7 @@ struct mesh_rmc { | |||
174 | */ | 191 | */ |
175 | #define MESH_PATH_REFRESH_TIME 1000 | 192 | #define MESH_PATH_REFRESH_TIME 1000 |
176 | #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) | 193 | #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) |
194 | #define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */ | ||
177 | 195 | ||
178 | #define MESH_MAX_PREQ_RETRIES 4 | 196 | #define MESH_MAX_PREQ_RETRIES 4 |
179 | #define MESH_PATH_EXPIRE (600 * HZ) | 197 | #define MESH_PATH_EXPIRE (600 * HZ) |
@@ -193,8 +211,11 @@ struct mesh_rmc { | |||
193 | 211 | ||
194 | /* Public interfaces */ | 212 | /* Public interfaces */ |
195 | /* Various */ | 213 | /* Various */ |
214 | int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, | ||
215 | char *da, char *sa); | ||
196 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | 216 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, |
197 | struct ieee80211_sub_if_data *sdata); | 217 | struct ieee80211_sub_if_data *sdata, char *addr4, |
218 | char *addr5, char *addr6); | ||
198 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, | 219 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, |
199 | struct ieee80211_sub_if_data *sdata); | 220 | struct ieee80211_sub_if_data *sdata); |
200 | bool mesh_matches_local(struct ieee802_11_elems *ie, | 221 | bool mesh_matches_local(struct ieee802_11_elems *ie, |
@@ -205,11 +226,12 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, | |||
205 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); | 226 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); |
206 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); | 227 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); |
207 | void ieee80211s_init(void); | 228 | void ieee80211s_init(void); |
229 | void ieee80211s_update_metric(struct ieee80211_local *local, | ||
230 | struct sta_info *stainfo, struct sk_buff *skb); | ||
208 | void ieee80211s_stop(void); | 231 | void ieee80211s_stop(void); |
209 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); | 232 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); |
210 | ieee80211_rx_result | 233 | ieee80211_rx_result |
211 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | 234 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); |
212 | struct ieee80211_rx_status *rx_status); | ||
213 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); | 235 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); |
214 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); | 236 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); |
215 | 237 | ||
@@ -247,7 +269,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, | |||
247 | /* Mesh tables */ | 269 | /* Mesh tables */ |
248 | struct mesh_table *mesh_table_alloc(int size_order); | 270 | struct mesh_table *mesh_table_alloc(int size_order); |
249 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs); | 271 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs); |
250 | struct mesh_table *mesh_table_grow(struct mesh_table *tbl); | 272 | void mesh_mpath_table_grow(void); |
273 | void mesh_mpp_table_grow(void); | ||
251 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, | 274 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, |
252 | struct mesh_table *tbl); | 275 | struct mesh_table *tbl); |
253 | /* Mesh paths */ | 276 | /* Mesh paths */ |
@@ -266,6 +289,8 @@ void mesh_path_discard_frame(struct sk_buff *skb, | |||
266 | void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); | 289 | void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); |
267 | void mesh_path_restart(struct ieee80211_sub_if_data *sdata); | 290 | void mesh_path_restart(struct ieee80211_sub_if_data *sdata); |
268 | 291 | ||
292 | extern int mesh_paths_generation; | ||
293 | |||
269 | #ifdef CONFIG_MAC80211_MESH | 294 | #ifdef CONFIG_MAC80211_MESH |
270 | extern int mesh_allocated; | 295 | extern int mesh_allocated; |
271 | 296 | ||
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index f49ef288e2e2..e12a786e26b8 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -201,6 +201,24 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, | |||
201 | return 0; | 201 | return 0; |
202 | } | 202 | } |
203 | 203 | ||
204 | void ieee80211s_update_metric(struct ieee80211_local *local, | ||
205 | struct sta_info *stainfo, struct sk_buff *skb) | ||
206 | { | ||
207 | struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); | ||
208 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
209 | int failed; | ||
210 | |||
211 | if (!ieee80211_is_data(hdr->frame_control)) | ||
212 | return; | ||
213 | |||
214 | failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); | ||
215 | |||
216 | /* moving average, scaled to 100 */ | ||
217 | stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed); | ||
218 | if (stainfo->fail_avg > 95) | ||
219 | mesh_plink_broken(stainfo); | ||
220 | } | ||
221 | |||
204 | static u32 airtime_link_metric_get(struct ieee80211_local *local, | 222 | static u32 airtime_link_metric_get(struct ieee80211_local *local, |
205 | struct sta_info *sta) | 223 | struct sta_info *sta) |
206 | { | 224 | { |
@@ -397,7 +415,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, | |||
397 | 415 | ||
398 | static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, | 416 | static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, |
399 | struct ieee80211_mgmt *mgmt, | 417 | struct ieee80211_mgmt *mgmt, |
400 | u8 *preq_elem, u32 metric) { | 418 | u8 *preq_elem, u32 metric) |
419 | { | ||
401 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 420 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
402 | struct mesh_path *mpath; | 421 | struct mesh_path *mpath; |
403 | u8 *dst_addr, *orig_addr; | 422 | u8 *dst_addr, *orig_addr; |
@@ -430,7 +449,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, | |||
430 | if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || | 449 | if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || |
431 | DSN_LT(mpath->dsn, dst_dsn)) { | 450 | DSN_LT(mpath->dsn, dst_dsn)) { |
432 | mpath->dsn = dst_dsn; | 451 | mpath->dsn = dst_dsn; |
433 | mpath->flags &= MESH_PATH_DSN_VALID; | 452 | mpath->flags |= MESH_PATH_DSN_VALID; |
434 | } else if ((!(dst_flags & MP_F_DO)) && | 453 | } else if ((!(dst_flags & MP_F_DO)) && |
435 | (mpath->flags & MESH_PATH_ACTIVE)) { | 454 | (mpath->flags & MESH_PATH_ACTIVE)) { |
436 | reply = true; | 455 | reply = true; |
@@ -478,6 +497,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, | |||
478 | hopcount, ttl, cpu_to_le32(lifetime), | 497 | hopcount, ttl, cpu_to_le32(lifetime), |
479 | cpu_to_le32(metric), cpu_to_le32(preq_id), | 498 | cpu_to_le32(metric), cpu_to_le32(preq_id), |
480 | sdata); | 499 | sdata); |
500 | ifmsh->mshstats.fwded_mcast++; | ||
481 | ifmsh->mshstats.fwded_frames++; | 501 | ifmsh->mshstats.fwded_frames++; |
482 | } | 502 | } |
483 | } | 503 | } |
@@ -536,6 +556,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, | |||
536 | cpu_to_le32(lifetime), cpu_to_le32(metric), | 556 | cpu_to_le32(lifetime), cpu_to_le32(metric), |
537 | 0, sdata); | 557 | 0, sdata); |
538 | rcu_read_unlock(); | 558 | rcu_read_unlock(); |
559 | |||
560 | sdata->u.mesh.mshstats.fwded_unicast++; | ||
539 | sdata->u.mesh.mshstats.fwded_frames++; | 561 | sdata->u.mesh.mshstats.fwded_frames++; |
540 | return; | 562 | return; |
541 | 563 | ||
@@ -660,14 +682,14 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | |||
660 | spin_unlock(&ifmsh->mesh_preq_queue_lock); | 682 | spin_unlock(&ifmsh->mesh_preq_queue_lock); |
661 | 683 | ||
662 | if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) | 684 | if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) |
663 | queue_work(sdata->local->hw.workqueue, &ifmsh->work); | 685 | ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); |
664 | 686 | ||
665 | else if (time_before(jiffies, ifmsh->last_preq)) { | 687 | else if (time_before(jiffies, ifmsh->last_preq)) { |
666 | /* avoid long wait if did not send preqs for a long time | 688 | /* avoid long wait if did not send preqs for a long time |
667 | * and jiffies wrapped around | 689 | * and jiffies wrapped around |
668 | */ | 690 | */ |
669 | ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; | 691 | ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; |
670 | queue_work(sdata->local->hw.workqueue, &ifmsh->work); | 692 | ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); |
671 | } else | 693 | } else |
672 | mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + | 694 | mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + |
673 | min_preq_int_jiff(sdata)); | 695 | min_preq_int_jiff(sdata)); |
@@ -686,11 +708,11 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) | |||
686 | u8 ttl, dst_flags; | 708 | u8 ttl, dst_flags; |
687 | u32 lifetime; | 709 | u32 lifetime; |
688 | 710 | ||
689 | spin_lock(&ifmsh->mesh_preq_queue_lock); | 711 | spin_lock_bh(&ifmsh->mesh_preq_queue_lock); |
690 | if (!ifmsh->preq_queue_len || | 712 | if (!ifmsh->preq_queue_len || |
691 | time_before(jiffies, ifmsh->last_preq + | 713 | time_before(jiffies, ifmsh->last_preq + |
692 | min_preq_int_jiff(sdata))) { | 714 | min_preq_int_jiff(sdata))) { |
693 | spin_unlock(&ifmsh->mesh_preq_queue_lock); | 715 | spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); |
694 | return; | 716 | return; |
695 | } | 717 | } |
696 | 718 | ||
@@ -698,7 +720,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) | |||
698 | struct mesh_preq_queue, list); | 720 | struct mesh_preq_queue, list); |
699 | list_del(&preq_node->list); | 721 | list_del(&preq_node->list); |
700 | --ifmsh->preq_queue_len; | 722 | --ifmsh->preq_queue_len; |
701 | spin_unlock(&ifmsh->mesh_preq_queue_lock); | 723 | spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); |
702 | 724 | ||
703 | rcu_read_lock(); | 725 | rcu_read_lock(); |
704 | mpath = mesh_path_lookup(preq_node->dst, sdata); | 726 | mpath = mesh_path_lookup(preq_node->dst, sdata); |
@@ -784,7 +806,6 @@ int mesh_nexthop_lookup(struct sk_buff *skb, | |||
784 | mesh_path_add(dst_addr, sdata); | 806 | mesh_path_add(dst_addr, sdata); |
785 | mpath = mesh_path_lookup(dst_addr, sdata); | 807 | mpath = mesh_path_lookup(dst_addr, sdata); |
786 | if (!mpath) { | 808 | if (!mpath) { |
787 | dev_kfree_skb(skb); | ||
788 | sdata->u.mesh.mshstats.dropped_frames_no_route++; | 809 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
789 | err = -ENOSPC; | 810 | err = -ENOSPC; |
790 | goto endlookup; | 811 | goto endlookup; |
@@ -792,7 +813,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, | |||
792 | } | 813 | } |
793 | 814 | ||
794 | if (mpath->flags & MESH_PATH_ACTIVE) { | 815 | if (mpath->flags & MESH_PATH_ACTIVE) { |
795 | if (time_after(jiffies, mpath->exp_time - | 816 | if (time_after(jiffies, mpath->exp_time + |
796 | msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) | 817 | msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) |
797 | && !memcmp(sdata->dev->dev_addr, hdr->addr4, | 818 | && !memcmp(sdata->dev->dev_addr, hdr->addr4, |
798 | ETH_ALEN) | 819 | ETH_ALEN) |
@@ -804,17 +825,17 @@ int mesh_nexthop_lookup(struct sk_buff *skb, | |||
804 | memcpy(hdr->addr1, mpath->next_hop->sta.addr, | 825 | memcpy(hdr->addr1, mpath->next_hop->sta.addr, |
805 | ETH_ALEN); | 826 | ETH_ALEN); |
806 | } else { | 827 | } else { |
828 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
807 | if (!(mpath->flags & MESH_PATH_RESOLVING)) { | 829 | if (!(mpath->flags & MESH_PATH_RESOLVING)) { |
808 | /* Start discovery only if it is not running yet */ | 830 | /* Start discovery only if it is not running yet */ |
809 | mesh_queue_preq(mpath, PREQ_Q_F_START); | 831 | mesh_queue_preq(mpath, PREQ_Q_F_START); |
810 | } | 832 | } |
811 | 833 | ||
812 | if (skb_queue_len(&mpath->frame_queue) >= | 834 | if (skb_queue_len(&mpath->frame_queue) >= |
813 | MESH_FRAME_QUEUE_LEN) { | 835 | MESH_FRAME_QUEUE_LEN) |
814 | skb_to_free = mpath->frame_queue.next; | 836 | skb_to_free = skb_dequeue(&mpath->frame_queue); |
815 | skb_unlink(skb_to_free, &mpath->frame_queue); | ||
816 | } | ||
817 | 837 | ||
838 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
818 | skb_queue_tail(&mpath->frame_queue, skb); | 839 | skb_queue_tail(&mpath->frame_queue, skb); |
819 | if (skb_to_free) | 840 | if (skb_to_free) |
820 | mesh_path_discard_frame(skb_to_free, sdata); | 841 | mesh_path_discard_frame(skb_to_free, sdata); |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 479597e88583..751c4d0e2b36 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -38,6 +38,71 @@ struct mpath_node { | |||
38 | static struct mesh_table *mesh_paths; | 38 | static struct mesh_table *mesh_paths; |
39 | static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ | 39 | static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ |
40 | 40 | ||
41 | int mesh_paths_generation; | ||
42 | static void __mesh_table_free(struct mesh_table *tbl) | ||
43 | { | ||
44 | kfree(tbl->hash_buckets); | ||
45 | kfree(tbl->hashwlock); | ||
46 | kfree(tbl); | ||
47 | } | ||
48 | |||
49 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | ||
50 | { | ||
51 | struct hlist_head *mesh_hash; | ||
52 | struct hlist_node *p, *q; | ||
53 | int i; | ||
54 | |||
55 | mesh_hash = tbl->hash_buckets; | ||
56 | for (i = 0; i <= tbl->hash_mask; i++) { | ||
57 | spin_lock(&tbl->hashwlock[i]); | ||
58 | hlist_for_each_safe(p, q, &mesh_hash[i]) { | ||
59 | tbl->free_node(p, free_leafs); | ||
60 | atomic_dec(&tbl->entries); | ||
61 | } | ||
62 | spin_unlock(&tbl->hashwlock[i]); | ||
63 | } | ||
64 | __mesh_table_free(tbl); | ||
65 | } | ||
66 | |||
67 | static struct mesh_table *mesh_table_grow(struct mesh_table *tbl) | ||
68 | { | ||
69 | struct mesh_table *newtbl; | ||
70 | struct hlist_head *oldhash; | ||
71 | struct hlist_node *p, *q; | ||
72 | int i; | ||
73 | |||
74 | if (atomic_read(&tbl->entries) | ||
75 | < tbl->mean_chain_len * (tbl->hash_mask + 1)) | ||
76 | goto endgrow; | ||
77 | |||
78 | newtbl = mesh_table_alloc(tbl->size_order + 1); | ||
79 | if (!newtbl) | ||
80 | goto endgrow; | ||
81 | |||
82 | newtbl->free_node = tbl->free_node; | ||
83 | newtbl->mean_chain_len = tbl->mean_chain_len; | ||
84 | newtbl->copy_node = tbl->copy_node; | ||
85 | atomic_set(&newtbl->entries, atomic_read(&tbl->entries)); | ||
86 | |||
87 | oldhash = tbl->hash_buckets; | ||
88 | for (i = 0; i <= tbl->hash_mask; i++) | ||
89 | hlist_for_each(p, &oldhash[i]) | ||
90 | if (tbl->copy_node(p, newtbl) < 0) | ||
91 | goto errcopy; | ||
92 | |||
93 | return newtbl; | ||
94 | |||
95 | errcopy: | ||
96 | for (i = 0; i <= newtbl->hash_mask; i++) { | ||
97 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) | ||
98 | tbl->free_node(p, 0); | ||
99 | } | ||
100 | __mesh_table_free(newtbl); | ||
101 | endgrow: | ||
102 | return NULL; | ||
103 | } | ||
104 | |||
105 | |||
41 | /* This lock will have the grow table function as writer and add / delete nodes | 106 | /* This lock will have the grow table function as writer and add / delete nodes |
42 | * as readers. When reading the table (i.e. doing lookups) we are well protected | 107 | * as readers. When reading the table (i.e. doing lookups) we are well protected |
43 | * by RCU | 108 | * by RCU |
@@ -55,7 +120,25 @@ static DEFINE_RWLOCK(pathtbl_resize_lock); | |||
55 | */ | 120 | */ |
56 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | 121 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) |
57 | { | 122 | { |
123 | struct sk_buff *skb; | ||
124 | struct ieee80211_hdr *hdr; | ||
125 | struct sk_buff_head tmpq; | ||
126 | unsigned long flags; | ||
127 | |||
58 | rcu_assign_pointer(mpath->next_hop, sta); | 128 | rcu_assign_pointer(mpath->next_hop, sta); |
129 | |||
130 | __skb_queue_head_init(&tmpq); | ||
131 | |||
132 | spin_lock_irqsave(&mpath->frame_queue.lock, flags); | ||
133 | |||
134 | while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { | ||
135 | hdr = (struct ieee80211_hdr *) skb->data; | ||
136 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); | ||
137 | __skb_queue_tail(&tmpq, skb); | ||
138 | } | ||
139 | |||
140 | skb_queue_splice(&tmpq, &mpath->frame_queue); | ||
141 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); | ||
59 | } | 142 | } |
60 | 143 | ||
61 | 144 | ||
@@ -167,6 +250,8 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data | |||
167 | */ | 250 | */ |
168 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | 251 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) |
169 | { | 252 | { |
253 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
254 | struct ieee80211_local *local = sdata->local; | ||
170 | struct mesh_path *mpath, *new_mpath; | 255 | struct mesh_path *mpath, *new_mpath; |
171 | struct mpath_node *node, *new_node; | 256 | struct mpath_node *node, *new_node; |
172 | struct hlist_head *bucket; | 257 | struct hlist_head *bucket; |
@@ -175,8 +260,6 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
175 | int err = 0; | 260 | int err = 0; |
176 | u32 hash_idx; | 261 | u32 hash_idx; |
177 | 262 | ||
178 | might_sleep(); | ||
179 | |||
180 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) | 263 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) |
181 | /* never add ourselves as neighbours */ | 264 | /* never add ourselves as neighbours */ |
182 | return -ENOTSUPP; | 265 | return -ENOTSUPP; |
@@ -188,11 +271,11 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
188 | return -ENOSPC; | 271 | return -ENOSPC; |
189 | 272 | ||
190 | err = -ENOMEM; | 273 | err = -ENOMEM; |
191 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); | 274 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); |
192 | if (!new_mpath) | 275 | if (!new_mpath) |
193 | goto err_path_alloc; | 276 | goto err_path_alloc; |
194 | 277 | ||
195 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | 278 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
196 | if (!new_node) | 279 | if (!new_node) |
197 | goto err_node_alloc; | 280 | goto err_node_alloc; |
198 | 281 | ||
@@ -225,23 +308,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
225 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) | 308 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) |
226 | grow = 1; | 309 | grow = 1; |
227 | 310 | ||
311 | mesh_paths_generation++; | ||
312 | |||
228 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 313 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); |
229 | read_unlock(&pathtbl_resize_lock); | 314 | read_unlock(&pathtbl_resize_lock); |
230 | if (grow) { | 315 | if (grow) { |
231 | struct mesh_table *oldtbl, *newtbl; | 316 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
232 | 317 | ieee80211_queue_work(&local->hw, &ifmsh->work); | |
233 | write_lock(&pathtbl_resize_lock); | ||
234 | oldtbl = mesh_paths; | ||
235 | newtbl = mesh_table_grow(mesh_paths); | ||
236 | if (!newtbl) { | ||
237 | write_unlock(&pathtbl_resize_lock); | ||
238 | return 0; | ||
239 | } | ||
240 | rcu_assign_pointer(mesh_paths, newtbl); | ||
241 | write_unlock(&pathtbl_resize_lock); | ||
242 | |||
243 | synchronize_rcu(); | ||
244 | mesh_table_free(oldtbl, false); | ||
245 | } | 318 | } |
246 | return 0; | 319 | return 0; |
247 | 320 | ||
@@ -256,9 +329,46 @@ err_path_alloc: | |||
256 | return err; | 329 | return err; |
257 | } | 330 | } |
258 | 331 | ||
332 | void mesh_mpath_table_grow(void) | ||
333 | { | ||
334 | struct mesh_table *oldtbl, *newtbl; | ||
335 | |||
336 | write_lock(&pathtbl_resize_lock); | ||
337 | oldtbl = mesh_paths; | ||
338 | newtbl = mesh_table_grow(mesh_paths); | ||
339 | if (!newtbl) { | ||
340 | write_unlock(&pathtbl_resize_lock); | ||
341 | return; | ||
342 | } | ||
343 | rcu_assign_pointer(mesh_paths, newtbl); | ||
344 | write_unlock(&pathtbl_resize_lock); | ||
345 | |||
346 | synchronize_rcu(); | ||
347 | mesh_table_free(oldtbl, false); | ||
348 | } | ||
349 | |||
350 | void mesh_mpp_table_grow(void) | ||
351 | { | ||
352 | struct mesh_table *oldtbl, *newtbl; | ||
353 | |||
354 | write_lock(&pathtbl_resize_lock); | ||
355 | oldtbl = mpp_paths; | ||
356 | newtbl = mesh_table_grow(mpp_paths); | ||
357 | if (!newtbl) { | ||
358 | write_unlock(&pathtbl_resize_lock); | ||
359 | return; | ||
360 | } | ||
361 | rcu_assign_pointer(mpp_paths, newtbl); | ||
362 | write_unlock(&pathtbl_resize_lock); | ||
363 | |||
364 | synchronize_rcu(); | ||
365 | mesh_table_free(oldtbl, false); | ||
366 | } | ||
259 | 367 | ||
260 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | 368 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) |
261 | { | 369 | { |
370 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
371 | struct ieee80211_local *local = sdata->local; | ||
262 | struct mesh_path *mpath, *new_mpath; | 372 | struct mesh_path *mpath, *new_mpath; |
263 | struct mpath_node *node, *new_node; | 373 | struct mpath_node *node, *new_node; |
264 | struct hlist_head *bucket; | 374 | struct hlist_head *bucket; |
@@ -267,8 +377,6 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
267 | int err = 0; | 377 | int err = 0; |
268 | u32 hash_idx; | 378 | u32 hash_idx; |
269 | 379 | ||
270 | might_sleep(); | ||
271 | |||
272 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) | 380 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) |
273 | /* never add ourselves as neighbours */ | 381 | /* never add ourselves as neighbours */ |
274 | return -ENOTSUPP; | 382 | return -ENOTSUPP; |
@@ -277,11 +385,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
277 | return -ENOTSUPP; | 385 | return -ENOTSUPP; |
278 | 386 | ||
279 | err = -ENOMEM; | 387 | err = -ENOMEM; |
280 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); | 388 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); |
281 | if (!new_mpath) | 389 | if (!new_mpath) |
282 | goto err_path_alloc; | 390 | goto err_path_alloc; |
283 | 391 | ||
284 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | 392 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
285 | if (!new_node) | 393 | if (!new_node) |
286 | goto err_node_alloc; | 394 | goto err_node_alloc; |
287 | 395 | ||
@@ -315,20 +423,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
315 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | 423 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); |
316 | read_unlock(&pathtbl_resize_lock); | 424 | read_unlock(&pathtbl_resize_lock); |
317 | if (grow) { | 425 | if (grow) { |
318 | struct mesh_table *oldtbl, *newtbl; | 426 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
319 | 427 | ieee80211_queue_work(&local->hw, &ifmsh->work); | |
320 | write_lock(&pathtbl_resize_lock); | ||
321 | oldtbl = mpp_paths; | ||
322 | newtbl = mesh_table_grow(mpp_paths); | ||
323 | if (!newtbl) { | ||
324 | write_unlock(&pathtbl_resize_lock); | ||
325 | return 0; | ||
326 | } | ||
327 | rcu_assign_pointer(mpp_paths, newtbl); | ||
328 | write_unlock(&pathtbl_resize_lock); | ||
329 | |||
330 | synchronize_rcu(); | ||
331 | mesh_table_free(oldtbl, false); | ||
332 | } | 428 | } |
333 | return 0; | 429 | return 0; |
334 | 430 | ||
@@ -466,6 +562,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | |||
466 | 562 | ||
467 | err = -ENXIO; | 563 | err = -ENXIO; |
468 | enddel: | 564 | enddel: |
565 | mesh_paths_generation++; | ||
469 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 566 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); |
470 | read_unlock(&pathtbl_resize_lock); | 567 | read_unlock(&pathtbl_resize_lock); |
471 | return err; | 568 | return err; |
@@ -481,11 +578,9 @@ enddel: | |||
481 | */ | 578 | */ |
482 | void mesh_path_tx_pending(struct mesh_path *mpath) | 579 | void mesh_path_tx_pending(struct mesh_path *mpath) |
483 | { | 580 | { |
484 | struct sk_buff *skb; | 581 | if (mpath->flags & MESH_PATH_ACTIVE) |
485 | 582 | ieee80211_add_pending_skbs(mpath->sdata->local, | |
486 | while ((skb = skb_dequeue(&mpath->frame_queue)) && | 583 | &mpath->frame_queue); |
487 | (mpath->flags & MESH_PATH_ACTIVE)) | ||
488 | dev_queue_xmit(skb); | ||
489 | } | 584 | } |
490 | 585 | ||
491 | /** | 586 | /** |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index cb14253587f1..ffcbad75e09b 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -409,7 +409,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
409 | baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt; | 409 | baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt; |
410 | if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) { | 410 | if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) { |
411 | baseaddr += 4; | 411 | baseaddr += 4; |
412 | baselen -= 4; | 412 | baselen += 4; |
413 | } | 413 | } |
414 | ieee802_11_parse_elems(baseaddr, len - baselen, &elems); | 414 | ieee802_11_parse_elems(baseaddr, len - baselen, &elems); |
415 | if (!elems.peer_link) { | 415 | if (!elems.peer_link) { |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 07e7e41816be..97a278a2f48e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -27,43 +27,99 @@ | |||
27 | #include "rate.h" | 27 | #include "rate.h" |
28 | #include "led.h" | 28 | #include "led.h" |
29 | 29 | ||
30 | #define IEEE80211_ASSOC_SCANS_MAX_TRIES 2 | ||
31 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) | 30 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) |
32 | #define IEEE80211_AUTH_MAX_TRIES 3 | 31 | #define IEEE80211_AUTH_MAX_TRIES 3 |
33 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) | 32 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) |
34 | #define IEEE80211_ASSOC_MAX_TRIES 3 | 33 | #define IEEE80211_ASSOC_MAX_TRIES 3 |
35 | #define IEEE80211_MONITORING_INTERVAL (2 * HZ) | 34 | #define IEEE80211_MAX_PROBE_TRIES 5 |
36 | #define IEEE80211_PROBE_WAIT (HZ / 5) | 35 | |
37 | #define IEEE80211_PROBE_IDLE_TIME (60 * HZ) | 36 | /* |
38 | #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) | 37 | * beacon loss detection timeout |
38 | * XXX: should depend on beacon interval | ||
39 | */ | ||
40 | #define IEEE80211_BEACON_LOSS_TIME (2 * HZ) | ||
41 | /* | ||
42 | * Time the connection can be idle before we probe | ||
43 | * it to see if we can still talk to the AP. | ||
44 | */ | ||
45 | #define IEEE80211_CONNECTION_IDLE_TIME (30 * HZ) | ||
46 | /* | ||
47 | * Time we wait for a probe response after sending | ||
48 | * a probe request because of beacon loss or for | ||
49 | * checking the connection still works. | ||
50 | */ | ||
51 | #define IEEE80211_PROBE_WAIT (HZ / 2) | ||
39 | 52 | ||
40 | #define TMR_RUNNING_TIMER 0 | 53 | #define TMR_RUNNING_TIMER 0 |
41 | #define TMR_RUNNING_CHANSW 1 | 54 | #define TMR_RUNNING_CHANSW 1 |
42 | 55 | ||
56 | /* | ||
57 | * All cfg80211 functions have to be called outside a locked | ||
58 | * section so that they can acquire a lock themselves... This | ||
59 | * is much simpler than queuing up things in cfg80211, but we | ||
60 | * do need some indirection for that here. | ||
61 | */ | ||
62 | enum rx_mgmt_action { | ||
63 | /* no action required */ | ||
64 | RX_MGMT_NONE, | ||
65 | |||
66 | /* caller must call cfg80211_send_rx_auth() */ | ||
67 | RX_MGMT_CFG80211_AUTH, | ||
68 | |||
69 | /* caller must call cfg80211_send_rx_assoc() */ | ||
70 | RX_MGMT_CFG80211_ASSOC, | ||
71 | |||
72 | /* caller must call cfg80211_send_deauth() */ | ||
73 | RX_MGMT_CFG80211_DEAUTH, | ||
74 | |||
75 | /* caller must call cfg80211_send_disassoc() */ | ||
76 | RX_MGMT_CFG80211_DISASSOC, | ||
77 | |||
78 | /* caller must call cfg80211_auth_timeout() & free work */ | ||
79 | RX_MGMT_CFG80211_AUTH_TO, | ||
80 | |||
81 | /* caller must call cfg80211_assoc_timeout() & free work */ | ||
82 | RX_MGMT_CFG80211_ASSOC_TO, | ||
83 | }; | ||
84 | |||
43 | /* utils */ | 85 | /* utils */ |
44 | static int ecw2cw(int ecw) | 86 | static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd) |
45 | { | 87 | { |
46 | return (1 << ecw) - 1; | 88 | WARN_ON(!mutex_is_locked(&ifmgd->mtx)); |
47 | } | 89 | } |
48 | 90 | ||
49 | static u8 *ieee80211_bss_get_ie(struct ieee80211_bss *bss, u8 ie) | 91 | /* |
92 | * We can have multiple work items (and connection probing) | ||
93 | * scheduling this timer, but we need to take care to only | ||
94 | * reschedule it when it should fire _earlier_ than it was | ||
95 | * asked for before, or if it's not pending right now. This | ||
96 | * function ensures that. Note that it then is required to | ||
97 | * run this function for all timeouts after the first one | ||
98 | * has happened -- the work that runs from this timer will | ||
99 | * do that. | ||
100 | */ | ||
101 | static void run_again(struct ieee80211_if_managed *ifmgd, | ||
102 | unsigned long timeout) | ||
50 | { | 103 | { |
51 | u8 *end, *pos; | 104 | ASSERT_MGD_MTX(ifmgd); |
52 | 105 | ||
53 | pos = bss->cbss.information_elements; | 106 | if (!timer_pending(&ifmgd->timer) || |
54 | if (pos == NULL) | 107 | time_before(timeout, ifmgd->timer.expires)) |
55 | return NULL; | 108 | mod_timer(&ifmgd->timer, timeout); |
56 | end = pos + bss->cbss.len_information_elements; | 109 | } |
57 | 110 | ||
58 | while (pos + 1 < end) { | 111 | static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata) |
59 | if (pos + 2 + pos[1] > end) | 112 | { |
60 | break; | 113 | if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER) |
61 | if (pos[0] == ie) | 114 | return; |
62 | return pos; | 115 | |
63 | pos += 2 + pos[1]; | 116 | mod_timer(&sdata->u.mgd.bcn_mon_timer, |
64 | } | 117 | round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME)); |
118 | } | ||
65 | 119 | ||
66 | return NULL; | 120 | static int ecw2cw(int ecw) |
121 | { | ||
122 | return (1 << ecw) - 1; | ||
67 | } | 123 | } |
68 | 124 | ||
69 | static int ieee80211_compatible_rates(struct ieee80211_bss *bss, | 125 | static int ieee80211_compatible_rates(struct ieee80211_bss *bss, |
@@ -94,11 +150,10 @@ static int ieee80211_compatible_rates(struct ieee80211_bss *bss, | |||
94 | */ | 150 | */ |
95 | static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, | 151 | static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, |
96 | struct ieee80211_ht_info *hti, | 152 | struct ieee80211_ht_info *hti, |
97 | u16 ap_ht_cap_flags) | 153 | const u8 *bssid, u16 ap_ht_cap_flags) |
98 | { | 154 | { |
99 | struct ieee80211_local *local = sdata->local; | 155 | struct ieee80211_local *local = sdata->local; |
100 | struct ieee80211_supported_band *sband; | 156 | struct ieee80211_supported_band *sband; |
101 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
102 | struct sta_info *sta; | 157 | struct sta_info *sta; |
103 | u32 changed = 0; | 158 | u32 changed = 0; |
104 | u16 ht_opmode; | 159 | u16 ht_opmode; |
@@ -147,12 +202,10 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, | |||
147 | ieee80211_hw_config(local, 0); | 202 | ieee80211_hw_config(local, 0); |
148 | 203 | ||
149 | rcu_read_lock(); | 204 | rcu_read_lock(); |
150 | 205 | sta = sta_info_get(local, bssid); | |
151 | sta = sta_info_get(local, ifmgd->bssid); | ||
152 | if (sta) | 206 | if (sta) |
153 | rate_control_rate_update(local, sband, sta, | 207 | rate_control_rate_update(local, sband, sta, |
154 | IEEE80211_RC_HT_CHANGED); | 208 | IEEE80211_RC_HT_CHANGED); |
155 | |||
156 | rcu_read_unlock(); | 209 | rcu_read_unlock(); |
157 | } | 210 | } |
158 | 211 | ||
@@ -175,23 +228,24 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, | |||
175 | 228 | ||
176 | /* frame sending functions */ | 229 | /* frame sending functions */ |
177 | 230 | ||
178 | static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | 231 | static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, |
232 | struct ieee80211_mgd_work *wk) | ||
179 | { | 233 | { |
180 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 234 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
181 | struct ieee80211_local *local = sdata->local; | 235 | struct ieee80211_local *local = sdata->local; |
182 | struct sk_buff *skb; | 236 | struct sk_buff *skb; |
183 | struct ieee80211_mgmt *mgmt; | 237 | struct ieee80211_mgmt *mgmt; |
184 | u8 *pos, *ies, *ht_ie; | 238 | u8 *pos; |
239 | const u8 *ies, *ht_ie; | ||
185 | int i, len, count, rates_len, supp_rates_len; | 240 | int i, len, count, rates_len, supp_rates_len; |
186 | u16 capab; | 241 | u16 capab; |
187 | struct ieee80211_bss *bss; | ||
188 | int wmm = 0; | 242 | int wmm = 0; |
189 | struct ieee80211_supported_band *sband; | 243 | struct ieee80211_supported_band *sband; |
190 | u32 rates = 0; | 244 | u32 rates = 0; |
191 | 245 | ||
192 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + | 246 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + |
193 | sizeof(*mgmt) + 200 + ifmgd->extra_ie_len + | 247 | sizeof(*mgmt) + 200 + wk->ie_len + |
194 | ifmgd->ssid_len); | 248 | wk->ssid_len); |
195 | if (!skb) { | 249 | if (!skb) { |
196 | printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " | 250 | printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " |
197 | "frame\n", sdata->dev->name); | 251 | "frame\n", sdata->dev->name); |
@@ -210,45 +264,35 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
210 | capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; | 264 | capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; |
211 | } | 265 | } |
212 | 266 | ||
213 | bss = ieee80211_rx_bss_get(local, ifmgd->bssid, | 267 | if (wk->bss->cbss.capability & WLAN_CAPABILITY_PRIVACY) |
214 | local->hw.conf.channel->center_freq, | 268 | capab |= WLAN_CAPABILITY_PRIVACY; |
215 | ifmgd->ssid, ifmgd->ssid_len); | 269 | if (wk->bss->wmm_used) |
216 | if (bss) { | 270 | wmm = 1; |
217 | if (bss->cbss.capability & WLAN_CAPABILITY_PRIVACY) | ||
218 | capab |= WLAN_CAPABILITY_PRIVACY; | ||
219 | if (bss->wmm_used) | ||
220 | wmm = 1; | ||
221 | 271 | ||
222 | /* get all rates supported by the device and the AP as | 272 | /* get all rates supported by the device and the AP as |
223 | * some APs don't like getting a superset of their rates | 273 | * some APs don't like getting a superset of their rates |
224 | * in the association request (e.g. D-Link DAP 1353 in | 274 | * in the association request (e.g. D-Link DAP 1353 in |
225 | * b-only mode) */ | 275 | * b-only mode) */ |
226 | rates_len = ieee80211_compatible_rates(bss, sband, &rates); | 276 | rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates); |
227 | 277 | ||
228 | if ((bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) && | 278 | if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) && |
229 | (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)) | 279 | (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)) |
230 | capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; | 280 | capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; |
231 | |||
232 | ieee80211_rx_bss_put(local, bss); | ||
233 | } else { | ||
234 | rates = ~0; | ||
235 | rates_len = sband->n_bitrates; | ||
236 | } | ||
237 | 281 | ||
238 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 282 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
239 | memset(mgmt, 0, 24); | 283 | memset(mgmt, 0, 24); |
240 | memcpy(mgmt->da, ifmgd->bssid, ETH_ALEN); | 284 | memcpy(mgmt->da, wk->bss->cbss.bssid, ETH_ALEN); |
241 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | 285 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
242 | memcpy(mgmt->bssid, ifmgd->bssid, ETH_ALEN); | 286 | memcpy(mgmt->bssid, wk->bss->cbss.bssid, ETH_ALEN); |
243 | 287 | ||
244 | if (ifmgd->flags & IEEE80211_STA_PREV_BSSID_SET) { | 288 | if (!is_zero_ether_addr(wk->prev_bssid)) { |
245 | skb_put(skb, 10); | 289 | skb_put(skb, 10); |
246 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | 290 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
247 | IEEE80211_STYPE_REASSOC_REQ); | 291 | IEEE80211_STYPE_REASSOC_REQ); |
248 | mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); | 292 | mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); |
249 | mgmt->u.reassoc_req.listen_interval = | 293 | mgmt->u.reassoc_req.listen_interval = |
250 | cpu_to_le16(local->hw.conf.listen_interval); | 294 | cpu_to_le16(local->hw.conf.listen_interval); |
251 | memcpy(mgmt->u.reassoc_req.current_ap, ifmgd->prev_bssid, | 295 | memcpy(mgmt->u.reassoc_req.current_ap, wk->prev_bssid, |
252 | ETH_ALEN); | 296 | ETH_ALEN); |
253 | } else { | 297 | } else { |
254 | skb_put(skb, 4); | 298 | skb_put(skb, 4); |
@@ -260,10 +304,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
260 | } | 304 | } |
261 | 305 | ||
262 | /* SSID */ | 306 | /* SSID */ |
263 | ies = pos = skb_put(skb, 2 + ifmgd->ssid_len); | 307 | ies = pos = skb_put(skb, 2 + wk->ssid_len); |
264 | *pos++ = WLAN_EID_SSID; | 308 | *pos++ = WLAN_EID_SSID; |
265 | *pos++ = ifmgd->ssid_len; | 309 | *pos++ = wk->ssid_len; |
266 | memcpy(pos, ifmgd->ssid, ifmgd->ssid_len); | 310 | memcpy(pos, wk->ssid, wk->ssid_len); |
267 | 311 | ||
268 | /* add all rates which were marked to be used above */ | 312 | /* add all rates which were marked to be used above */ |
269 | supp_rates_len = rates_len; | 313 | supp_rates_len = rates_len; |
@@ -318,9 +362,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
318 | } | 362 | } |
319 | } | 363 | } |
320 | 364 | ||
321 | if (ifmgd->extra_ie) { | 365 | if (wk->ie_len && wk->ie) { |
322 | pos = skb_put(skb, ifmgd->extra_ie_len); | 366 | pos = skb_put(skb, wk->ie_len); |
323 | memcpy(pos, ifmgd->extra_ie, ifmgd->extra_ie_len); | 367 | memcpy(pos, wk->ie, wk->ie_len); |
324 | } | 368 | } |
325 | 369 | ||
326 | if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) { | 370 | if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) { |
@@ -345,9 +389,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
345 | */ | 389 | */ |
346 | if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) && | 390 | if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) && |
347 | sband->ht_cap.ht_supported && | 391 | sband->ht_cap.ht_supported && |
348 | (ht_ie = ieee80211_bss_get_ie(bss, WLAN_EID_HT_INFORMATION)) && | 392 | (ht_ie = ieee80211_bss_get_ie(&wk->bss->cbss, WLAN_EID_HT_INFORMATION)) && |
349 | ht_ie[1] >= sizeof(struct ieee80211_ht_info) && | 393 | ht_ie[1] >= sizeof(struct ieee80211_ht_info) && |
350 | (!(ifmgd->flags & IEEE80211_STA_TKIP_WEP_USED))) { | 394 | (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))) { |
351 | struct ieee80211_ht_info *ht_info = | 395 | struct ieee80211_ht_info *ht_info = |
352 | (struct ieee80211_ht_info *)(ht_ie + 2); | 396 | (struct ieee80211_ht_info *)(ht_ie + 2); |
353 | u16 cap = sband->ht_cap.cap; | 397 | u16 cap = sband->ht_cap.cap; |
@@ -382,18 +426,13 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
382 | memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); | 426 | memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); |
383 | } | 427 | } |
384 | 428 | ||
385 | kfree(ifmgd->assocreq_ies); | ||
386 | ifmgd->assocreq_ies_len = (skb->data + skb->len) - ies; | ||
387 | ifmgd->assocreq_ies = kmalloc(ifmgd->assocreq_ies_len, GFP_KERNEL); | ||
388 | if (ifmgd->assocreq_ies) | ||
389 | memcpy(ifmgd->assocreq_ies, ies, ifmgd->assocreq_ies_len); | ||
390 | |||
391 | ieee80211_tx_skb(sdata, skb, 0); | 429 | ieee80211_tx_skb(sdata, skb, 0); |
392 | } | 430 | } |
393 | 431 | ||
394 | 432 | ||
395 | static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, | 433 | static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, |
396 | u16 stype, u16 reason) | 434 | const u8 *bssid, u16 stype, u16 reason, |
435 | void *cookie) | ||
397 | { | 436 | { |
398 | struct ieee80211_local *local = sdata->local; | 437 | struct ieee80211_local *local = sdata->local; |
399 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 438 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
@@ -410,18 +449,18 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, | |||
410 | 449 | ||
411 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 450 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
412 | memset(mgmt, 0, 24); | 451 | memset(mgmt, 0, 24); |
413 | memcpy(mgmt->da, ifmgd->bssid, ETH_ALEN); | 452 | memcpy(mgmt->da, bssid, ETH_ALEN); |
414 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | 453 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
415 | memcpy(mgmt->bssid, ifmgd->bssid, ETH_ALEN); | 454 | memcpy(mgmt->bssid, bssid, ETH_ALEN); |
416 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); | 455 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); |
417 | skb_put(skb, 2); | 456 | skb_put(skb, 2); |
418 | /* u.deauth.reason_code == u.disassoc.reason_code */ | 457 | /* u.deauth.reason_code == u.disassoc.reason_code */ |
419 | mgmt->u.deauth.reason_code = cpu_to_le16(reason); | 458 | mgmt->u.deauth.reason_code = cpu_to_le16(reason); |
420 | 459 | ||
421 | if (stype == IEEE80211_STYPE_DEAUTH) | 460 | if (stype == IEEE80211_STYPE_DEAUTH) |
422 | cfg80211_send_deauth(sdata->dev, (u8 *) mgmt, skb->len); | 461 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, cookie); |
423 | else | 462 | else |
424 | cfg80211_send_disassoc(sdata->dev, (u8 *) mgmt, skb->len); | 463 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len, cookie); |
425 | ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); | 464 | ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); |
426 | } | 465 | } |
427 | 466 | ||
@@ -494,28 +533,26 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
494 | { | 533 | { |
495 | struct ieee80211_sub_if_data *sdata = | 534 | struct ieee80211_sub_if_data *sdata = |
496 | container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work); | 535 | container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work); |
497 | struct ieee80211_bss *bss; | ||
498 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 536 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
499 | 537 | ||
500 | if (!netif_running(sdata->dev)) | 538 | if (!netif_running(sdata->dev)) |
501 | return; | 539 | return; |
502 | 540 | ||
503 | bss = ieee80211_rx_bss_get(sdata->local, ifmgd->bssid, | 541 | mutex_lock(&ifmgd->mtx); |
504 | sdata->local->hw.conf.channel->center_freq, | 542 | if (!ifmgd->associated) |
505 | ifmgd->ssid, ifmgd->ssid_len); | 543 | goto out; |
506 | if (!bss) | ||
507 | goto exit; | ||
508 | 544 | ||
509 | sdata->local->oper_channel = sdata->local->csa_channel; | 545 | sdata->local->oper_channel = sdata->local->csa_channel; |
546 | ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL); | ||
547 | |||
510 | /* XXX: shouldn't really modify cfg80211-owned data! */ | 548 | /* XXX: shouldn't really modify cfg80211-owned data! */ |
511 | if (!ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL)) | 549 | ifmgd->associated->cbss.channel = sdata->local->oper_channel; |
512 | bss->cbss.channel = sdata->local->oper_channel; | ||
513 | 550 | ||
514 | ieee80211_rx_bss_put(sdata->local, bss); | ||
515 | exit: | ||
516 | ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; | ||
517 | ieee80211_wake_queues_by_reason(&sdata->local->hw, | 551 | ieee80211_wake_queues_by_reason(&sdata->local->hw, |
518 | IEEE80211_QUEUE_STOP_REASON_CSA); | 552 | IEEE80211_QUEUE_STOP_REASON_CSA); |
553 | out: | ||
554 | ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; | ||
555 | mutex_unlock(&ifmgd->mtx); | ||
519 | } | 556 | } |
520 | 557 | ||
521 | static void ieee80211_chswitch_timer(unsigned long data) | 558 | static void ieee80211_chswitch_timer(unsigned long data) |
@@ -529,7 +566,7 @@ static void ieee80211_chswitch_timer(unsigned long data) | |||
529 | return; | 566 | return; |
530 | } | 567 | } |
531 | 568 | ||
532 | queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work); | 569 | ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); |
533 | } | 570 | } |
534 | 571 | ||
535 | void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | 572 | void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
@@ -540,10 +577,12 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
540 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 577 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
541 | int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num); | 578 | int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num); |
542 | 579 | ||
543 | if (ifmgd->state != IEEE80211_STA_MLME_ASSOCIATED) | 580 | ASSERT_MGD_MTX(ifmgd); |
581 | |||
582 | if (!ifmgd->associated) | ||
544 | return; | 583 | return; |
545 | 584 | ||
546 | if (sdata->local->sw_scanning || sdata->local->hw_scanning) | 585 | if (sdata->local->scanning) |
547 | return; | 586 | return; |
548 | 587 | ||
549 | /* Disregard subsequent beacons if we are already running a timer | 588 | /* Disregard subsequent beacons if we are already running a timer |
@@ -559,7 +598,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
559 | sdata->local->csa_channel = new_ch; | 598 | sdata->local->csa_channel = new_ch; |
560 | 599 | ||
561 | if (sw_elem->count <= 1) { | 600 | if (sw_elem->count <= 1) { |
562 | queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work); | 601 | ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work); |
563 | } else { | 602 | } else { |
564 | ieee80211_stop_queues_by_reason(&sdata->local->hw, | 603 | ieee80211_stop_queues_by_reason(&sdata->local->hw, |
565 | IEEE80211_QUEUE_STOP_REASON_CSA); | 604 | IEEE80211_QUEUE_STOP_REASON_CSA); |
@@ -601,7 +640,7 @@ static void ieee80211_enable_ps(struct ieee80211_local *local, | |||
601 | * If we are scanning right now then the parameters will | 640 | * If we are scanning right now then the parameters will |
602 | * take effect when scan finishes. | 641 | * take effect when scan finishes. |
603 | */ | 642 | */ |
604 | if (local->hw_scanning || local->sw_scanning) | 643 | if (local->scanning) |
605 | return; | 644 | return; |
606 | 645 | ||
607 | if (conf->dynamic_ps_timeout > 0 && | 646 | if (conf->dynamic_ps_timeout > 0 && |
@@ -651,8 +690,9 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) | |||
651 | } | 690 | } |
652 | 691 | ||
653 | if (count == 1 && found->u.mgd.powersave && | 692 | if (count == 1 && found->u.mgd.powersave && |
654 | (found->u.mgd.flags & IEEE80211_STA_ASSOCIATED) && | 693 | found->u.mgd.associated && list_empty(&found->u.mgd.work_list) && |
655 | !(found->u.mgd.flags & IEEE80211_STA_PROBEREQ_POLL)) { | 694 | !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL | |
695 | IEEE80211_STA_CONNECTION_POLL))) { | ||
656 | s32 beaconint_us; | 696 | s32 beaconint_us; |
657 | 697 | ||
658 | if (latency < 0) | 698 | if (latency < 0) |
@@ -724,7 +764,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data) | |||
724 | if (local->quiescing || local->suspended) | 764 | if (local->quiescing || local->suspended) |
725 | return; | 765 | return; |
726 | 766 | ||
727 | queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work); | 767 | ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work); |
728 | } | 768 | } |
729 | 769 | ||
730 | /* MLME */ | 770 | /* MLME */ |
@@ -806,9 +846,6 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | |||
806 | u16 capab, bool erp_valid, u8 erp) | 846 | u16 capab, bool erp_valid, u8 erp) |
807 | { | 847 | { |
808 | struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; | 848 | struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; |
809 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
810 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
811 | #endif | ||
812 | u32 changed = 0; | 849 | u32 changed = 0; |
813 | bool use_protection; | 850 | bool use_protection; |
814 | bool use_short_preamble; | 851 | bool use_short_preamble; |
@@ -825,42 +862,16 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | |||
825 | use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); | 862 | use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); |
826 | 863 | ||
827 | if (use_protection != bss_conf->use_cts_prot) { | 864 | if (use_protection != bss_conf->use_cts_prot) { |
828 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
829 | if (net_ratelimit()) { | ||
830 | printk(KERN_DEBUG "%s: CTS protection %s (BSSID=%pM)\n", | ||
831 | sdata->dev->name, | ||
832 | use_protection ? "enabled" : "disabled", | ||
833 | ifmgd->bssid); | ||
834 | } | ||
835 | #endif | ||
836 | bss_conf->use_cts_prot = use_protection; | 865 | bss_conf->use_cts_prot = use_protection; |
837 | changed |= BSS_CHANGED_ERP_CTS_PROT; | 866 | changed |= BSS_CHANGED_ERP_CTS_PROT; |
838 | } | 867 | } |
839 | 868 | ||
840 | if (use_short_preamble != bss_conf->use_short_preamble) { | 869 | if (use_short_preamble != bss_conf->use_short_preamble) { |
841 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
842 | if (net_ratelimit()) { | ||
843 | printk(KERN_DEBUG "%s: switched to %s barker preamble" | ||
844 | " (BSSID=%pM)\n", | ||
845 | sdata->dev->name, | ||
846 | use_short_preamble ? "short" : "long", | ||
847 | ifmgd->bssid); | ||
848 | } | ||
849 | #endif | ||
850 | bss_conf->use_short_preamble = use_short_preamble; | 870 | bss_conf->use_short_preamble = use_short_preamble; |
851 | changed |= BSS_CHANGED_ERP_PREAMBLE; | 871 | changed |= BSS_CHANGED_ERP_PREAMBLE; |
852 | } | 872 | } |
853 | 873 | ||
854 | if (use_short_slot != bss_conf->use_short_slot) { | 874 | if (use_short_slot != bss_conf->use_short_slot) { |
855 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
856 | if (net_ratelimit()) { | ||
857 | printk(KERN_DEBUG "%s: switched to %s slot time" | ||
858 | " (BSSID=%pM)\n", | ||
859 | sdata->dev->name, | ||
860 | use_short_slot ? "short" : "long", | ||
861 | ifmgd->bssid); | ||
862 | } | ||
863 | #endif | ||
864 | bss_conf->use_short_slot = use_short_slot; | 875 | bss_conf->use_short_slot = use_short_slot; |
865 | changed |= BSS_CHANGED_ERP_SLOT; | 876 | changed |= BSS_CHANGED_ERP_SLOT; |
866 | } | 877 | } |
@@ -868,105 +879,31 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | |||
868 | return changed; | 879 | return changed; |
869 | } | 880 | } |
870 | 881 | ||
871 | static void ieee80211_sta_send_apinfo(struct ieee80211_sub_if_data *sdata) | ||
872 | { | ||
873 | union iwreq_data wrqu; | ||
874 | |||
875 | memset(&wrqu, 0, sizeof(wrqu)); | ||
876 | if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) | ||
877 | memcpy(wrqu.ap_addr.sa_data, sdata->u.mgd.bssid, ETH_ALEN); | ||
878 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | ||
879 | wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL); | ||
880 | } | ||
881 | |||
882 | static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata) | ||
883 | { | ||
884 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
885 | char *buf; | ||
886 | size_t len; | ||
887 | int i; | ||
888 | union iwreq_data wrqu; | ||
889 | |||
890 | if (!ifmgd->assocreq_ies && !ifmgd->assocresp_ies) | ||
891 | return; | ||
892 | |||
893 | buf = kmalloc(50 + 2 * (ifmgd->assocreq_ies_len + | ||
894 | ifmgd->assocresp_ies_len), GFP_KERNEL); | ||
895 | if (!buf) | ||
896 | return; | ||
897 | |||
898 | len = sprintf(buf, "ASSOCINFO("); | ||
899 | if (ifmgd->assocreq_ies) { | ||
900 | len += sprintf(buf + len, "ReqIEs="); | ||
901 | for (i = 0; i < ifmgd->assocreq_ies_len; i++) { | ||
902 | len += sprintf(buf + len, "%02x", | ||
903 | ifmgd->assocreq_ies[i]); | ||
904 | } | ||
905 | } | ||
906 | if (ifmgd->assocresp_ies) { | ||
907 | if (ifmgd->assocreq_ies) | ||
908 | len += sprintf(buf + len, " "); | ||
909 | len += sprintf(buf + len, "RespIEs="); | ||
910 | for (i = 0; i < ifmgd->assocresp_ies_len; i++) { | ||
911 | len += sprintf(buf + len, "%02x", | ||
912 | ifmgd->assocresp_ies[i]); | ||
913 | } | ||
914 | } | ||
915 | len += sprintf(buf + len, ")"); | ||
916 | |||
917 | if (len > IW_CUSTOM_MAX) { | ||
918 | len = sprintf(buf, "ASSOCRESPIE="); | ||
919 | for (i = 0; i < ifmgd->assocresp_ies_len; i++) { | ||
920 | len += sprintf(buf + len, "%02x", | ||
921 | ifmgd->assocresp_ies[i]); | ||
922 | } | ||
923 | } | ||
924 | |||
925 | if (len <= IW_CUSTOM_MAX) { | ||
926 | memset(&wrqu, 0, sizeof(wrqu)); | ||
927 | wrqu.data.length = len; | ||
928 | wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf); | ||
929 | } | ||
930 | |||
931 | kfree(buf); | ||
932 | } | ||
933 | |||
934 | |||
935 | static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | 882 | static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, |
883 | struct ieee80211_mgd_work *wk, | ||
936 | u32 bss_info_changed) | 884 | u32 bss_info_changed) |
937 | { | 885 | { |
938 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
939 | struct ieee80211_local *local = sdata->local; | 886 | struct ieee80211_local *local = sdata->local; |
940 | struct ieee80211_conf *conf = &local_to_hw(local)->conf; | 887 | struct ieee80211_bss *bss = wk->bss; |
941 | |||
942 | struct ieee80211_bss *bss; | ||
943 | 888 | ||
944 | bss_info_changed |= BSS_CHANGED_ASSOC; | 889 | bss_info_changed |= BSS_CHANGED_ASSOC; |
945 | ifmgd->flags |= IEEE80211_STA_ASSOCIATED; | 890 | /* set timing information */ |
891 | sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval; | ||
892 | sdata->vif.bss_conf.timestamp = bss->cbss.tsf; | ||
893 | sdata->vif.bss_conf.dtim_period = bss->dtim_period; | ||
946 | 894 | ||
947 | bss = ieee80211_rx_bss_get(local, ifmgd->bssid, | 895 | bss_info_changed |= BSS_CHANGED_BEACON_INT; |
948 | conf->channel->center_freq, | 896 | bss_info_changed |= ieee80211_handle_bss_capability(sdata, |
949 | ifmgd->ssid, ifmgd->ssid_len); | 897 | bss->cbss.capability, bss->has_erp_value, bss->erp_value); |
950 | if (bss) { | ||
951 | /* set timing information */ | ||
952 | sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval; | ||
953 | sdata->vif.bss_conf.timestamp = bss->cbss.tsf; | ||
954 | sdata->vif.bss_conf.dtim_period = bss->dtim_period; | ||
955 | 898 | ||
956 | bss_info_changed |= BSS_CHANGED_BEACON_INT; | 899 | sdata->u.mgd.associated = bss; |
957 | bss_info_changed |= ieee80211_handle_bss_capability(sdata, | 900 | sdata->u.mgd.old_associate_work = wk; |
958 | bss->cbss.capability, bss->has_erp_value, bss->erp_value); | 901 | memcpy(sdata->u.mgd.bssid, bss->cbss.bssid, ETH_ALEN); |
959 | |||
960 | cfg80211_hold_bss(&bss->cbss); | ||
961 | |||
962 | ieee80211_rx_bss_put(local, bss); | ||
963 | } | ||
964 | 902 | ||
965 | ifmgd->flags |= IEEE80211_STA_PREV_BSSID_SET; | 903 | /* just to be sure */ |
966 | memcpy(ifmgd->prev_bssid, sdata->u.mgd.bssid, ETH_ALEN); | 904 | sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | |
967 | ieee80211_sta_send_associnfo(sdata); | 905 | IEEE80211_STA_BEACON_POLL); |
968 | 906 | ||
969 | ifmgd->last_probe = jiffies; | ||
970 | ieee80211_led_assoc(local, 1); | 907 | ieee80211_led_assoc(local, 1); |
971 | 908 | ||
972 | sdata->vif.bss_conf.assoc = 1; | 909 | sdata->vif.bss_conf.assoc = 1; |
@@ -982,176 +919,157 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
982 | 919 | ||
983 | ieee80211_bss_info_change_notify(sdata, bss_info_changed); | 920 | ieee80211_bss_info_change_notify(sdata, bss_info_changed); |
984 | 921 | ||
985 | /* will be same as sdata */ | 922 | mutex_lock(&local->iflist_mtx); |
986 | if (local->ps_sdata) { | 923 | ieee80211_recalc_ps(local, -1); |
987 | mutex_lock(&local->iflist_mtx); | 924 | mutex_unlock(&local->iflist_mtx); |
988 | ieee80211_recalc_ps(local, -1); | ||
989 | mutex_unlock(&local->iflist_mtx); | ||
990 | } | ||
991 | 925 | ||
992 | netif_tx_start_all_queues(sdata->dev); | 926 | netif_tx_start_all_queues(sdata->dev); |
993 | netif_carrier_on(sdata->dev); | 927 | netif_carrier_on(sdata->dev); |
994 | |||
995 | ieee80211_sta_send_apinfo(sdata); | ||
996 | } | 928 | } |
997 | 929 | ||
998 | static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata) | 930 | static enum rx_mgmt_action __must_check |
931 | ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata, | ||
932 | struct ieee80211_mgd_work *wk) | ||
999 | { | 933 | { |
1000 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 934 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1001 | struct ieee80211_local *local = sdata->local; | 935 | struct ieee80211_local *local = sdata->local; |
1002 | 936 | ||
1003 | ifmgd->direct_probe_tries++; | 937 | wk->tries++; |
1004 | if (ifmgd->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) { | 938 | if (wk->tries > IEEE80211_AUTH_MAX_TRIES) { |
1005 | printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n", | 939 | printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n", |
1006 | sdata->dev->name, ifmgd->bssid); | 940 | sdata->dev->name, wk->bss->cbss.bssid); |
1007 | ifmgd->state = IEEE80211_STA_MLME_DISABLED; | ||
1008 | ieee80211_recalc_idle(local); | ||
1009 | cfg80211_send_auth_timeout(sdata->dev, ifmgd->bssid); | ||
1010 | 941 | ||
1011 | /* | 942 | /* |
1012 | * Most likely AP is not in the range so remove the | 943 | * Most likely AP is not in the range so remove the |
1013 | * bss information associated to the AP | 944 | * bss struct for that AP. |
1014 | */ | 945 | */ |
1015 | ieee80211_rx_bss_remove(sdata, ifmgd->bssid, | 946 | cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss); |
1016 | sdata->local->hw.conf.channel->center_freq, | ||
1017 | ifmgd->ssid, ifmgd->ssid_len); | ||
1018 | 947 | ||
1019 | /* | 948 | /* |
1020 | * We might have a pending scan which had no chance to run yet | 949 | * We might have a pending scan which had no chance to run yet |
1021 | * due to state == IEEE80211_STA_MLME_DIRECT_PROBE. | 950 | * due to work needing to be done. Hence, queue the STAs work |
1022 | * Hence, queue the STAs work again | 951 | * again for that. |
1023 | */ | 952 | */ |
1024 | queue_work(local->hw.workqueue, &ifmgd->work); | 953 | ieee80211_queue_work(&local->hw, &ifmgd->work); |
1025 | return; | 954 | return RX_MGMT_CFG80211_AUTH_TO; |
1026 | } | 955 | } |
1027 | 956 | ||
1028 | printk(KERN_DEBUG "%s: direct probe to AP %pM try %d\n", | 957 | printk(KERN_DEBUG "%s: direct probe to AP %pM (try %d)\n", |
1029 | sdata->dev->name, ifmgd->bssid, | 958 | sdata->dev->name, wk->bss->cbss.bssid, |
1030 | ifmgd->direct_probe_tries); | 959 | wk->tries); |
1031 | 960 | ||
1032 | ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE; | 961 | /* |
1033 | 962 | * Direct probe is sent to broadcast address as some APs | |
1034 | /* Direct probe is sent to broadcast address as some APs | ||
1035 | * will not answer to direct packet in unassociated state. | 963 | * will not answer to direct packet in unassociated state. |
1036 | */ | 964 | */ |
1037 | ieee80211_send_probe_req(sdata, NULL, | 965 | ieee80211_send_probe_req(sdata, NULL, wk->ssid, wk->ssid_len, NULL, 0); |
1038 | ifmgd->ssid, ifmgd->ssid_len, NULL, 0); | 966 | |
967 | wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; | ||
968 | run_again(ifmgd, wk->timeout); | ||
1039 | 969 | ||
1040 | mod_timer(&ifmgd->timer, jiffies + IEEE80211_AUTH_TIMEOUT); | 970 | return RX_MGMT_NONE; |
1041 | } | 971 | } |
1042 | 972 | ||
1043 | 973 | ||
1044 | static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata) | 974 | static enum rx_mgmt_action __must_check |
975 | ieee80211_authenticate(struct ieee80211_sub_if_data *sdata, | ||
976 | struct ieee80211_mgd_work *wk) | ||
1045 | { | 977 | { |
1046 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 978 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1047 | struct ieee80211_local *local = sdata->local; | 979 | struct ieee80211_local *local = sdata->local; |
1048 | u8 *ies; | ||
1049 | size_t ies_len; | ||
1050 | 980 | ||
1051 | ifmgd->auth_tries++; | 981 | wk->tries++; |
1052 | if (ifmgd->auth_tries > IEEE80211_AUTH_MAX_TRIES) { | 982 | if (wk->tries > IEEE80211_AUTH_MAX_TRIES) { |
1053 | printk(KERN_DEBUG "%s: authentication with AP %pM" | 983 | printk(KERN_DEBUG "%s: authentication with AP %pM" |
1054 | " timed out\n", | 984 | " timed out\n", |
1055 | sdata->dev->name, ifmgd->bssid); | 985 | sdata->dev->name, wk->bss->cbss.bssid); |
1056 | ifmgd->state = IEEE80211_STA_MLME_DISABLED; | 986 | |
1057 | ieee80211_recalc_idle(local); | 987 | /* |
1058 | cfg80211_send_auth_timeout(sdata->dev, ifmgd->bssid); | 988 | * Most likely AP is not in the range so remove the |
1059 | ieee80211_rx_bss_remove(sdata, ifmgd->bssid, | 989 | * bss struct for that AP. |
1060 | sdata->local->hw.conf.channel->center_freq, | 990 | */ |
1061 | ifmgd->ssid, ifmgd->ssid_len); | 991 | cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss); |
1062 | 992 | ||
1063 | /* | 993 | /* |
1064 | * We might have a pending scan which had no chance to run yet | 994 | * We might have a pending scan which had no chance to run yet |
1065 | * due to state == IEEE80211_STA_MLME_AUTHENTICATE. | 995 | * due to work needing to be done. Hence, queue the STAs work |
1066 | * Hence, queue the STAs work again | 996 | * again for that. |
1067 | */ | 997 | */ |
1068 | queue_work(local->hw.workqueue, &ifmgd->work); | 998 | ieee80211_queue_work(&local->hw, &ifmgd->work); |
1069 | return; | 999 | return RX_MGMT_CFG80211_AUTH_TO; |
1070 | } | 1000 | } |
1071 | 1001 | ||
1072 | ifmgd->state = IEEE80211_STA_MLME_AUTHENTICATE; | 1002 | printk(KERN_DEBUG "%s: authenticate with AP %pM (try %d)\n", |
1073 | printk(KERN_DEBUG "%s: authenticate with AP %pM\n", | 1003 | sdata->dev->name, wk->bss->cbss.bssid, wk->tries); |
1074 | sdata->dev->name, ifmgd->bssid); | ||
1075 | 1004 | ||
1076 | if (ifmgd->flags & IEEE80211_STA_EXT_SME) { | 1005 | ieee80211_send_auth(sdata, 1, wk->auth_alg, wk->ie, wk->ie_len, |
1077 | ies = ifmgd->sme_auth_ie; | 1006 | wk->bss->cbss.bssid, NULL, 0, 0); |
1078 | ies_len = ifmgd->sme_auth_ie_len; | 1007 | wk->auth_transaction = 2; |
1079 | } else { | 1008 | |
1080 | ies = NULL; | 1009 | wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; |
1081 | ies_len = 0; | 1010 | run_again(ifmgd, wk->timeout); |
1082 | } | ||
1083 | ieee80211_send_auth(sdata, 1, ifmgd->auth_alg, ies, ies_len, | ||
1084 | ifmgd->bssid, 0); | ||
1085 | ifmgd->auth_transaction = 2; | ||
1086 | 1011 | ||
1087 | mod_timer(&ifmgd->timer, jiffies + IEEE80211_AUTH_TIMEOUT); | 1012 | return RX_MGMT_NONE; |
1088 | } | 1013 | } |
1089 | 1014 | ||
1090 | /* | ||
1091 | * The disassoc 'reason' argument can be either our own reason | ||
1092 | * if self disconnected or a reason code from the AP. | ||
1093 | */ | ||
1094 | static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | 1015 | static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, |
1095 | bool deauth, bool self_disconnected, | 1016 | bool deauth) |
1096 | u16 reason) | ||
1097 | { | 1017 | { |
1098 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1018 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1099 | struct ieee80211_local *local = sdata->local; | 1019 | struct ieee80211_local *local = sdata->local; |
1100 | struct ieee80211_conf *conf = &local_to_hw(local)->conf; | ||
1101 | struct ieee80211_bss *bss; | ||
1102 | struct sta_info *sta; | 1020 | struct sta_info *sta; |
1103 | u32 changed = 0, config_changed = 0; | 1021 | u32 changed = 0, config_changed = 0; |
1022 | u8 bssid[ETH_ALEN]; | ||
1023 | |||
1024 | ASSERT_MGD_MTX(ifmgd); | ||
1025 | |||
1026 | if (WARN_ON(!ifmgd->associated)) | ||
1027 | return; | ||
1028 | |||
1029 | memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN); | ||
1030 | |||
1031 | ifmgd->associated = NULL; | ||
1032 | memset(ifmgd->bssid, 0, ETH_ALEN); | ||
1104 | 1033 | ||
1105 | if (deauth) { | 1034 | if (deauth) { |
1106 | ifmgd->direct_probe_tries = 0; | 1035 | kfree(ifmgd->old_associate_work); |
1107 | ifmgd->auth_tries = 0; | 1036 | ifmgd->old_associate_work = NULL; |
1037 | } else { | ||
1038 | struct ieee80211_mgd_work *wk = ifmgd->old_associate_work; | ||
1039 | |||
1040 | wk->state = IEEE80211_MGD_STATE_IDLE; | ||
1041 | list_add(&wk->list, &ifmgd->work_list); | ||
1108 | } | 1042 | } |
1109 | ifmgd->assoc_scan_tries = 0; | 1043 | |
1110 | ifmgd->assoc_tries = 0; | 1044 | /* |
1045 | * we need to commit the associated = NULL change because the | ||
1046 | * scan code uses that to determine whether this iface should | ||
1047 | * go to/wake up from powersave or not -- and could otherwise | ||
1048 | * wake the queues erroneously. | ||
1049 | */ | ||
1050 | smp_mb(); | ||
1051 | |||
1052 | /* | ||
1053 | * Thus, we can only afterwards stop the queues -- to account | ||
1054 | * for the case where another CPU is finishing a scan at this | ||
1055 | * time -- we don't want the scan code to enable queues. | ||
1056 | */ | ||
1111 | 1057 | ||
1112 | netif_tx_stop_all_queues(sdata->dev); | 1058 | netif_tx_stop_all_queues(sdata->dev); |
1113 | netif_carrier_off(sdata->dev); | 1059 | netif_carrier_off(sdata->dev); |
1114 | 1060 | ||
1115 | rcu_read_lock(); | 1061 | rcu_read_lock(); |
1116 | sta = sta_info_get(local, ifmgd->bssid); | 1062 | sta = sta_info_get(local, bssid); |
1117 | if (sta) | 1063 | if (sta) |
1118 | ieee80211_sta_tear_down_BA_sessions(sta); | 1064 | ieee80211_sta_tear_down_BA_sessions(sta); |
1119 | rcu_read_unlock(); | 1065 | rcu_read_unlock(); |
1120 | 1066 | ||
1121 | bss = ieee80211_rx_bss_get(local, ifmgd->bssid, | ||
1122 | conf->channel->center_freq, | ||
1123 | ifmgd->ssid, ifmgd->ssid_len); | ||
1124 | |||
1125 | if (bss) { | ||
1126 | cfg80211_unhold_bss(&bss->cbss); | ||
1127 | ieee80211_rx_bss_put(local, bss); | ||
1128 | } | ||
1129 | |||
1130 | if (self_disconnected) { | ||
1131 | if (deauth) | ||
1132 | ieee80211_send_deauth_disassoc(sdata, | ||
1133 | IEEE80211_STYPE_DEAUTH, reason); | ||
1134 | else | ||
1135 | ieee80211_send_deauth_disassoc(sdata, | ||
1136 | IEEE80211_STYPE_DISASSOC, reason); | ||
1137 | } | ||
1138 | |||
1139 | ifmgd->flags &= ~IEEE80211_STA_ASSOCIATED; | ||
1140 | changed |= ieee80211_reset_erp_info(sdata); | 1067 | changed |= ieee80211_reset_erp_info(sdata); |
1141 | 1068 | ||
1142 | ieee80211_led_assoc(local, 0); | 1069 | ieee80211_led_assoc(local, 0); |
1143 | changed |= BSS_CHANGED_ASSOC; | 1070 | changed |= BSS_CHANGED_ASSOC; |
1144 | sdata->vif.bss_conf.assoc = false; | 1071 | sdata->vif.bss_conf.assoc = false; |
1145 | 1072 | ||
1146 | ieee80211_sta_send_apinfo(sdata); | ||
1147 | |||
1148 | if (self_disconnected || reason == WLAN_REASON_DISASSOC_STA_HAS_LEFT) { | ||
1149 | ifmgd->state = IEEE80211_STA_MLME_DISABLED; | ||
1150 | ieee80211_rx_bss_remove(sdata, ifmgd->bssid, | ||
1151 | sdata->local->hw.conf.channel->center_freq, | ||
1152 | ifmgd->ssid, ifmgd->ssid_len); | ||
1153 | } | ||
1154 | |||
1155 | ieee80211_set_wmm_default(sdata); | 1073 | ieee80211_set_wmm_default(sdata); |
1156 | 1074 | ||
1157 | ieee80211_recalc_idle(local); | 1075 | ieee80211_recalc_idle(local); |
@@ -1180,7 +1098,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1180 | 1098 | ||
1181 | rcu_read_lock(); | 1099 | rcu_read_lock(); |
1182 | 1100 | ||
1183 | sta = sta_info_get(local, ifmgd->bssid); | 1101 | sta = sta_info_get(local, bssid); |
1184 | if (!sta) { | 1102 | if (!sta) { |
1185 | rcu_read_unlock(); | 1103 | rcu_read_unlock(); |
1186 | return; | 1104 | return; |
@@ -1193,83 +1111,42 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1193 | sta_info_destroy(sta); | 1111 | sta_info_destroy(sta); |
1194 | } | 1112 | } |
1195 | 1113 | ||
1196 | static int ieee80211_sta_wep_configured(struct ieee80211_sub_if_data *sdata) | 1114 | static enum rx_mgmt_action __must_check |
1197 | { | 1115 | ieee80211_associate(struct ieee80211_sub_if_data *sdata, |
1198 | if (!sdata || !sdata->default_key || | 1116 | struct ieee80211_mgd_work *wk) |
1199 | sdata->default_key->conf.alg != ALG_WEP) | ||
1200 | return 0; | ||
1201 | return 1; | ||
1202 | } | ||
1203 | |||
1204 | static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata) | ||
1205 | { | ||
1206 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
1207 | struct ieee80211_local *local = sdata->local; | ||
1208 | struct ieee80211_bss *bss; | ||
1209 | int bss_privacy; | ||
1210 | int wep_privacy; | ||
1211 | int privacy_invoked; | ||
1212 | |||
1213 | if (!ifmgd || (ifmgd->flags & IEEE80211_STA_EXT_SME)) | ||
1214 | return 0; | ||
1215 | |||
1216 | bss = ieee80211_rx_bss_get(local, ifmgd->bssid, | ||
1217 | local->hw.conf.channel->center_freq, | ||
1218 | ifmgd->ssid, ifmgd->ssid_len); | ||
1219 | if (!bss) | ||
1220 | return 0; | ||
1221 | |||
1222 | bss_privacy = !!(bss->cbss.capability & WLAN_CAPABILITY_PRIVACY); | ||
1223 | wep_privacy = !!ieee80211_sta_wep_configured(sdata); | ||
1224 | privacy_invoked = !!(ifmgd->flags & IEEE80211_STA_PRIVACY_INVOKED); | ||
1225 | |||
1226 | ieee80211_rx_bss_put(local, bss); | ||
1227 | |||
1228 | if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked)) | ||
1229 | return 0; | ||
1230 | |||
1231 | return 1; | ||
1232 | } | ||
1233 | |||
1234 | static void ieee80211_associate(struct ieee80211_sub_if_data *sdata) | ||
1235 | { | 1117 | { |
1236 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1118 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1237 | struct ieee80211_local *local = sdata->local; | 1119 | struct ieee80211_local *local = sdata->local; |
1238 | 1120 | ||
1239 | ifmgd->assoc_tries++; | 1121 | wk->tries++; |
1240 | if (ifmgd->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) { | 1122 | if (wk->tries > IEEE80211_ASSOC_MAX_TRIES) { |
1241 | printk(KERN_DEBUG "%s: association with AP %pM" | 1123 | printk(KERN_DEBUG "%s: association with AP %pM" |
1242 | " timed out\n", | 1124 | " timed out\n", |
1243 | sdata->dev->name, ifmgd->bssid); | 1125 | sdata->dev->name, wk->bss->cbss.bssid); |
1244 | ifmgd->state = IEEE80211_STA_MLME_DISABLED; | 1126 | |
1245 | ieee80211_recalc_idle(local); | 1127 | /* |
1246 | cfg80211_send_assoc_timeout(sdata->dev, ifmgd->bssid); | 1128 | * Most likely AP is not in the range so remove the |
1247 | ieee80211_rx_bss_remove(sdata, ifmgd->bssid, | 1129 | * bss struct for that AP. |
1248 | sdata->local->hw.conf.channel->center_freq, | 1130 | */ |
1249 | ifmgd->ssid, ifmgd->ssid_len); | 1131 | cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss); |
1132 | |||
1250 | /* | 1133 | /* |
1251 | * We might have a pending scan which had no chance to run yet | 1134 | * We might have a pending scan which had no chance to run yet |
1252 | * due to state == IEEE80211_STA_MLME_ASSOCIATE. | 1135 | * due to work needing to be done. Hence, queue the STAs work |
1253 | * Hence, queue the STAs work again | 1136 | * again for that. |
1254 | */ | 1137 | */ |
1255 | queue_work(local->hw.workqueue, &ifmgd->work); | 1138 | ieee80211_queue_work(&local->hw, &ifmgd->work); |
1256 | return; | 1139 | return RX_MGMT_CFG80211_ASSOC_TO; |
1257 | } | 1140 | } |
1258 | 1141 | ||
1259 | ifmgd->state = IEEE80211_STA_MLME_ASSOCIATE; | 1142 | printk(KERN_DEBUG "%s: associate with AP %pM (try %d)\n", |
1260 | printk(KERN_DEBUG "%s: associate with AP %pM\n", | 1143 | sdata->dev->name, wk->bss->cbss.bssid, wk->tries); |
1261 | sdata->dev->name, ifmgd->bssid); | 1144 | ieee80211_send_assoc(sdata, wk); |
1262 | if (ieee80211_privacy_mismatch(sdata)) { | ||
1263 | printk(KERN_DEBUG "%s: mismatch in privacy configuration and " | ||
1264 | "mixed-cell disabled - abort association\n", sdata->dev->name); | ||
1265 | ifmgd->state = IEEE80211_STA_MLME_DISABLED; | ||
1266 | ieee80211_recalc_idle(local); | ||
1267 | return; | ||
1268 | } | ||
1269 | 1145 | ||
1270 | ieee80211_send_assoc(sdata); | 1146 | wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; |
1147 | run_again(ifmgd, wk->timeout); | ||
1271 | 1148 | ||
1272 | mod_timer(&ifmgd->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); | 1149 | return RX_MGMT_NONE; |
1273 | } | 1150 | } |
1274 | 1151 | ||
1275 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | 1152 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, |
@@ -1280,160 +1157,113 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | |||
1280 | * from AP because we know that the connection is working both ways | 1157 | * from AP because we know that the connection is working both ways |
1281 | * at that time. But multicast frames (and hence also beacons) must | 1158 | * at that time. But multicast frames (and hence also beacons) must |
1282 | * be ignored here, because we need to trigger the timer during | 1159 | * be ignored here, because we need to trigger the timer during |
1283 | * data idle periods for sending the periodical probe request to | 1160 | * data idle periods for sending the periodic probe request to the |
1284 | * the AP. | 1161 | * AP we're connected to. |
1285 | */ | 1162 | */ |
1286 | if (!is_multicast_ether_addr(hdr->addr1)) | 1163 | if (is_multicast_ether_addr(hdr->addr1)) |
1287 | mod_timer(&sdata->u.mgd.timer, | ||
1288 | jiffies + IEEE80211_MONITORING_INTERVAL); | ||
1289 | } | ||
1290 | |||
1291 | void ieee80211_beacon_loss_work(struct work_struct *work) | ||
1292 | { | ||
1293 | struct ieee80211_sub_if_data *sdata = | ||
1294 | container_of(work, struct ieee80211_sub_if_data, | ||
1295 | u.mgd.beacon_loss_work); | ||
1296 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
1297 | |||
1298 | /* | ||
1299 | * The driver has already reported this event and we have | ||
1300 | * already sent a probe request. Maybe the AP died and the | ||
1301 | * driver keeps reporting until we disassociate... We have | ||
1302 | * to ignore that because otherwise we would continually | ||
1303 | * reset the timer and never check whether we received a | ||
1304 | * probe response! | ||
1305 | */ | ||
1306 | if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) | ||
1307 | return; | 1164 | return; |
1308 | 1165 | ||
1309 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 1166 | mod_timer(&sdata->u.mgd.conn_mon_timer, |
1310 | if (net_ratelimit()) { | 1167 | round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); |
1311 | printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM " | ||
1312 | "- sending probe request\n", sdata->dev->name, | ||
1313 | sdata->u.mgd.bssid); | ||
1314 | } | ||
1315 | #endif | ||
1316 | |||
1317 | ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; | ||
1318 | |||
1319 | mutex_lock(&sdata->local->iflist_mtx); | ||
1320 | ieee80211_recalc_ps(sdata->local, -1); | ||
1321 | mutex_unlock(&sdata->local->iflist_mtx); | ||
1322 | |||
1323 | ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, | ||
1324 | ifmgd->ssid_len, NULL, 0); | ||
1325 | |||
1326 | mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT); | ||
1327 | } | 1168 | } |
1328 | 1169 | ||
1329 | void ieee80211_beacon_loss(struct ieee80211_vif *vif) | 1170 | static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) |
1330 | { | 1171 | { |
1331 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 1172 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1173 | const u8 *ssid; | ||
1174 | |||
1175 | ssid = ieee80211_bss_get_ie(&ifmgd->associated->cbss, WLAN_EID_SSID); | ||
1176 | ieee80211_send_probe_req(sdata, ifmgd->associated->cbss.bssid, | ||
1177 | ssid + 2, ssid[1], NULL, 0); | ||
1332 | 1178 | ||
1333 | queue_work(sdata->local->hw.workqueue, | 1179 | ifmgd->probe_send_count++; |
1334 | &sdata->u.mgd.beacon_loss_work); | 1180 | ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT; |
1181 | run_again(ifmgd, ifmgd->probe_timeout); | ||
1335 | } | 1182 | } |
1336 | EXPORT_SYMBOL(ieee80211_beacon_loss); | ||
1337 | 1183 | ||
1338 | static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) | 1184 | static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, |
1185 | bool beacon) | ||
1339 | { | 1186 | { |
1340 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1187 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1341 | struct ieee80211_local *local = sdata->local; | 1188 | bool already = false; |
1342 | struct sta_info *sta; | ||
1343 | unsigned long last_rx; | ||
1344 | bool disassoc = false; | ||
1345 | 1189 | ||
1346 | /* TODO: start monitoring current AP signal quality and number of | 1190 | if (!netif_running(sdata->dev)) |
1347 | * missed beacons. Scan other channels every now and then and search | 1191 | return; |
1348 | * for better APs. */ | ||
1349 | /* TODO: remove expired BSSes */ | ||
1350 | 1192 | ||
1351 | ifmgd->state = IEEE80211_STA_MLME_ASSOCIATED; | 1193 | if (sdata->local->scanning) |
1194 | return; | ||
1352 | 1195 | ||
1353 | rcu_read_lock(); | 1196 | mutex_lock(&ifmgd->mtx); |
1354 | 1197 | ||
1355 | sta = sta_info_get(local, ifmgd->bssid); | 1198 | if (!ifmgd->associated) |
1356 | if (!sta) { | ||
1357 | printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n", | ||
1358 | sdata->dev->name, ifmgd->bssid); | ||
1359 | disassoc = true; | ||
1360 | rcu_read_unlock(); | ||
1361 | goto out; | 1199 | goto out; |
1362 | } | ||
1363 | 1200 | ||
1364 | last_rx = sta->last_rx; | 1201 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
1365 | rcu_read_unlock(); | 1202 | if (beacon && net_ratelimit()) |
1366 | 1203 | printk(KERN_DEBUG "%s: detected beacon loss from AP " | |
1367 | if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) && | 1204 | "- sending probe request\n", sdata->dev->name); |
1368 | time_after(jiffies, last_rx + IEEE80211_PROBE_WAIT)) { | 1205 | #endif |
1369 | printk(KERN_DEBUG "%s: no probe response from AP %pM " | ||
1370 | "- disassociating\n", | ||
1371 | sdata->dev->name, ifmgd->bssid); | ||
1372 | disassoc = true; | ||
1373 | ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; | ||
1374 | goto out; | ||
1375 | } | ||
1376 | 1206 | ||
1377 | /* | 1207 | /* |
1378 | * Beacon filtering is only enabled with power save and then the | 1208 | * The driver/our work has already reported this event or the |
1379 | * stack should not check for beacon loss. | 1209 | * connection monitoring has kicked in and we have already sent |
1210 | * a probe request. Or maybe the AP died and the driver keeps | ||
1211 | * reporting until we disassociate... | ||
1212 | * | ||
1213 | * In either case we have to ignore the current call to this | ||
1214 | * function (except for setting the correct probe reason bit) | ||
1215 | * because otherwise we would reset the timer every time and | ||
1216 | * never check whether we received a probe response! | ||
1380 | */ | 1217 | */ |
1381 | if (!((local->hw.flags & IEEE80211_HW_BEACON_FILTER) && | 1218 | if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | |
1382 | (local->hw.conf.flags & IEEE80211_CONF_PS)) && | 1219 | IEEE80211_STA_CONNECTION_POLL)) |
1383 | time_after(jiffies, | 1220 | already = true; |
1384 | ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) { | 1221 | |
1385 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 1222 | if (beacon) |
1386 | if (net_ratelimit()) { | 1223 | ifmgd->flags |= IEEE80211_STA_BEACON_POLL; |
1387 | printk(KERN_DEBUG "%s: beacon loss from AP %pM " | 1224 | else |
1388 | "- sending probe request\n", | 1225 | ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; |
1389 | sdata->dev->name, ifmgd->bssid); | 1226 | |
1390 | } | 1227 | if (already) |
1391 | #endif | ||
1392 | ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; | ||
1393 | mutex_lock(&local->iflist_mtx); | ||
1394 | ieee80211_recalc_ps(local, -1); | ||
1395 | mutex_unlock(&local->iflist_mtx); | ||
1396 | ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, | ||
1397 | ifmgd->ssid_len, NULL, 0); | ||
1398 | mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT); | ||
1399 | goto out; | 1228 | goto out; |
1400 | } | ||
1401 | 1229 | ||
1402 | if (time_after(jiffies, last_rx + IEEE80211_PROBE_IDLE_TIME)) { | 1230 | mutex_lock(&sdata->local->iflist_mtx); |
1403 | ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; | 1231 | ieee80211_recalc_ps(sdata->local, -1); |
1404 | mutex_lock(&local->iflist_mtx); | 1232 | mutex_unlock(&sdata->local->iflist_mtx); |
1405 | ieee80211_recalc_ps(local, -1); | ||
1406 | mutex_unlock(&local->iflist_mtx); | ||
1407 | ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, | ||
1408 | ifmgd->ssid_len, NULL, 0); | ||
1409 | } | ||
1410 | 1233 | ||
1234 | ifmgd->probe_send_count = 0; | ||
1235 | ieee80211_mgd_probe_ap_send(sdata); | ||
1411 | out: | 1236 | out: |
1412 | if (!disassoc) | 1237 | mutex_unlock(&ifmgd->mtx); |
1413 | mod_timer(&ifmgd->timer, | ||
1414 | jiffies + IEEE80211_MONITORING_INTERVAL); | ||
1415 | else | ||
1416 | ieee80211_set_disassoc(sdata, true, true, | ||
1417 | WLAN_REASON_PREV_AUTH_NOT_VALID); | ||
1418 | } | 1238 | } |
1419 | 1239 | ||
1240 | void ieee80211_beacon_loss_work(struct work_struct *work) | ||
1241 | { | ||
1242 | struct ieee80211_sub_if_data *sdata = | ||
1243 | container_of(work, struct ieee80211_sub_if_data, | ||
1244 | u.mgd.beacon_loss_work); | ||
1245 | |||
1246 | ieee80211_mgd_probe_ap(sdata, true); | ||
1247 | } | ||
1420 | 1248 | ||
1421 | static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata) | 1249 | void ieee80211_beacon_loss(struct ieee80211_vif *vif) |
1422 | { | 1250 | { |
1423 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1251 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
1424 | 1252 | ||
1253 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); | ||
1254 | } | ||
1255 | EXPORT_SYMBOL(ieee80211_beacon_loss); | ||
1256 | |||
1257 | static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata, | ||
1258 | struct ieee80211_mgd_work *wk) | ||
1259 | { | ||
1260 | wk->state = IEEE80211_MGD_STATE_IDLE; | ||
1425 | printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name); | 1261 | printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name); |
1426 | ifmgd->flags |= IEEE80211_STA_AUTHENTICATED; | ||
1427 | if (ifmgd->flags & IEEE80211_STA_EXT_SME) { | ||
1428 | /* Wait for SME to request association */ | ||
1429 | ifmgd->state = IEEE80211_STA_MLME_DISABLED; | ||
1430 | ieee80211_recalc_idle(sdata->local); | ||
1431 | } else | ||
1432 | ieee80211_associate(sdata); | ||
1433 | } | 1262 | } |
1434 | 1263 | ||
1435 | 1264 | ||
1436 | static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, | 1265 | static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, |
1266 | struct ieee80211_mgd_work *wk, | ||
1437 | struct ieee80211_mgmt *mgmt, | 1267 | struct ieee80211_mgmt *mgmt, |
1438 | size_t len) | 1268 | size_t len) |
1439 | { | 1269 | { |
@@ -1444,161 +1274,133 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, | |||
1444 | ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); | 1274 | ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); |
1445 | if (!elems.challenge) | 1275 | if (!elems.challenge) |
1446 | return; | 1276 | return; |
1447 | ieee80211_send_auth(sdata, 3, sdata->u.mgd.auth_alg, | 1277 | ieee80211_send_auth(sdata, 3, wk->auth_alg, |
1448 | elems.challenge - 2, elems.challenge_len + 2, | 1278 | elems.challenge - 2, elems.challenge_len + 2, |
1449 | sdata->u.mgd.bssid, 1); | 1279 | wk->bss->cbss.bssid, |
1450 | sdata->u.mgd.auth_transaction = 4; | 1280 | wk->key, wk->key_len, wk->key_idx); |
1281 | wk->auth_transaction = 4; | ||
1451 | } | 1282 | } |
1452 | 1283 | ||
1453 | static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | 1284 | static enum rx_mgmt_action __must_check |
1454 | struct ieee80211_mgmt *mgmt, | 1285 | ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, |
1455 | size_t len) | 1286 | struct ieee80211_mgd_work *wk, |
1287 | struct ieee80211_mgmt *mgmt, size_t len) | ||
1456 | { | 1288 | { |
1457 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
1458 | u16 auth_alg, auth_transaction, status_code; | 1289 | u16 auth_alg, auth_transaction, status_code; |
1459 | 1290 | ||
1460 | if (ifmgd->state != IEEE80211_STA_MLME_AUTHENTICATE) | 1291 | if (wk->state != IEEE80211_MGD_STATE_AUTH) |
1461 | return; | 1292 | return RX_MGMT_NONE; |
1462 | 1293 | ||
1463 | if (len < 24 + 6) | 1294 | if (len < 24 + 6) |
1464 | return; | 1295 | return RX_MGMT_NONE; |
1465 | 1296 | ||
1466 | if (memcmp(ifmgd->bssid, mgmt->sa, ETH_ALEN) != 0) | 1297 | if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0) |
1467 | return; | 1298 | return RX_MGMT_NONE; |
1468 | 1299 | ||
1469 | if (memcmp(ifmgd->bssid, mgmt->bssid, ETH_ALEN) != 0) | 1300 | if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0) |
1470 | return; | 1301 | return RX_MGMT_NONE; |
1471 | 1302 | ||
1472 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); | 1303 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); |
1473 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); | 1304 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); |
1474 | status_code = le16_to_cpu(mgmt->u.auth.status_code); | 1305 | status_code = le16_to_cpu(mgmt->u.auth.status_code); |
1475 | 1306 | ||
1476 | if (auth_alg != ifmgd->auth_alg || | 1307 | if (auth_alg != wk->auth_alg || |
1477 | auth_transaction != ifmgd->auth_transaction) | 1308 | auth_transaction != wk->auth_transaction) |
1478 | return; | 1309 | return RX_MGMT_NONE; |
1479 | 1310 | ||
1480 | if (status_code != WLAN_STATUS_SUCCESS) { | 1311 | if (status_code != WLAN_STATUS_SUCCESS) { |
1481 | if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { | 1312 | list_del(&wk->list); |
1482 | u8 algs[3]; | 1313 | kfree(wk); |
1483 | const int num_algs = ARRAY_SIZE(algs); | 1314 | return RX_MGMT_CFG80211_AUTH; |
1484 | int i, pos; | ||
1485 | algs[0] = algs[1] = algs[2] = 0xff; | ||
1486 | if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_OPEN) | ||
1487 | algs[0] = WLAN_AUTH_OPEN; | ||
1488 | if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_SHARED_KEY) | ||
1489 | algs[1] = WLAN_AUTH_SHARED_KEY; | ||
1490 | if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_LEAP) | ||
1491 | algs[2] = WLAN_AUTH_LEAP; | ||
1492 | if (ifmgd->auth_alg == WLAN_AUTH_OPEN) | ||
1493 | pos = 0; | ||
1494 | else if (ifmgd->auth_alg == WLAN_AUTH_SHARED_KEY) | ||
1495 | pos = 1; | ||
1496 | else | ||
1497 | pos = 2; | ||
1498 | for (i = 0; i < num_algs; i++) { | ||
1499 | pos++; | ||
1500 | if (pos >= num_algs) | ||
1501 | pos = 0; | ||
1502 | if (algs[pos] == ifmgd->auth_alg || | ||
1503 | algs[pos] == 0xff) | ||
1504 | continue; | ||
1505 | if (algs[pos] == WLAN_AUTH_SHARED_KEY && | ||
1506 | !ieee80211_sta_wep_configured(sdata)) | ||
1507 | continue; | ||
1508 | ifmgd->auth_alg = algs[pos]; | ||
1509 | break; | ||
1510 | } | ||
1511 | } | ||
1512 | return; | ||
1513 | } | 1315 | } |
1514 | 1316 | ||
1515 | switch (ifmgd->auth_alg) { | 1317 | switch (wk->auth_alg) { |
1516 | case WLAN_AUTH_OPEN: | 1318 | case WLAN_AUTH_OPEN: |
1517 | case WLAN_AUTH_LEAP: | 1319 | case WLAN_AUTH_LEAP: |
1518 | case WLAN_AUTH_FT: | 1320 | case WLAN_AUTH_FT: |
1519 | ieee80211_auth_completed(sdata); | 1321 | ieee80211_auth_completed(sdata, wk); |
1520 | cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len); | 1322 | return RX_MGMT_CFG80211_AUTH; |
1521 | break; | ||
1522 | case WLAN_AUTH_SHARED_KEY: | 1323 | case WLAN_AUTH_SHARED_KEY: |
1523 | if (ifmgd->auth_transaction == 4) { | 1324 | if (wk->auth_transaction == 4) { |
1524 | ieee80211_auth_completed(sdata); | 1325 | ieee80211_auth_completed(sdata, wk); |
1525 | cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len); | 1326 | return RX_MGMT_CFG80211_AUTH; |
1526 | } else | 1327 | } else |
1527 | ieee80211_auth_challenge(sdata, mgmt, len); | 1328 | ieee80211_auth_challenge(sdata, wk, mgmt, len); |
1528 | break; | 1329 | break; |
1529 | } | 1330 | } |
1331 | |||
1332 | return RX_MGMT_NONE; | ||
1530 | } | 1333 | } |
1531 | 1334 | ||
1532 | 1335 | ||
1533 | static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, | 1336 | static enum rx_mgmt_action __must_check |
1534 | struct ieee80211_mgmt *mgmt, | 1337 | ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, |
1535 | size_t len) | 1338 | struct ieee80211_mgd_work *wk, |
1339 | struct ieee80211_mgmt *mgmt, size_t len) | ||
1536 | { | 1340 | { |
1537 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1341 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1342 | const u8 *bssid = NULL; | ||
1538 | u16 reason_code; | 1343 | u16 reason_code; |
1539 | 1344 | ||
1540 | if (len < 24 + 2) | 1345 | if (len < 24 + 2) |
1541 | return; | 1346 | return RX_MGMT_NONE; |
1542 | 1347 | ||
1543 | if (memcmp(ifmgd->bssid, mgmt->sa, ETH_ALEN)) | 1348 | ASSERT_MGD_MTX(ifmgd); |
1544 | return; | 1349 | |
1350 | if (wk) | ||
1351 | bssid = wk->bss->cbss.bssid; | ||
1352 | else | ||
1353 | bssid = ifmgd->associated->cbss.bssid; | ||
1545 | 1354 | ||
1546 | reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); | 1355 | reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); |
1547 | 1356 | ||
1548 | if (ifmgd->flags & IEEE80211_STA_AUTHENTICATED) | 1357 | printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", |
1549 | printk(KERN_DEBUG "%s: deauthenticated (Reason: %u)\n", | 1358 | sdata->dev->name, bssid, reason_code); |
1550 | sdata->dev->name, reason_code); | ||
1551 | 1359 | ||
1552 | if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) && | 1360 | if (!wk) { |
1553 | (ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE || | 1361 | ieee80211_set_disassoc(sdata, true); |
1554 | ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE || | 1362 | } else { |
1555 | ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)) { | 1363 | list_del(&wk->list); |
1556 | ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE; | 1364 | kfree(wk); |
1557 | mod_timer(&ifmgd->timer, jiffies + | ||
1558 | IEEE80211_RETRY_AUTH_INTERVAL); | ||
1559 | } | 1365 | } |
1560 | 1366 | ||
1561 | ieee80211_set_disassoc(sdata, true, false, 0); | 1367 | return RX_MGMT_CFG80211_DEAUTH; |
1562 | ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED; | ||
1563 | cfg80211_send_deauth(sdata->dev, (u8 *) mgmt, len); | ||
1564 | } | 1368 | } |
1565 | 1369 | ||
1566 | 1370 | ||
1567 | static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, | 1371 | static enum rx_mgmt_action __must_check |
1568 | struct ieee80211_mgmt *mgmt, | 1372 | ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, |
1569 | size_t len) | 1373 | struct ieee80211_mgmt *mgmt, size_t len) |
1570 | { | 1374 | { |
1571 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1375 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1572 | u16 reason_code; | 1376 | u16 reason_code; |
1573 | 1377 | ||
1574 | if (len < 24 + 2) | 1378 | if (len < 24 + 2) |
1575 | return; | 1379 | return RX_MGMT_NONE; |
1576 | 1380 | ||
1577 | if (memcmp(ifmgd->bssid, mgmt->sa, ETH_ALEN)) | 1381 | ASSERT_MGD_MTX(ifmgd); |
1578 | return; | ||
1579 | 1382 | ||
1580 | reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); | 1383 | if (WARN_ON(!ifmgd->associated)) |
1384 | return RX_MGMT_NONE; | ||
1581 | 1385 | ||
1582 | if (ifmgd->flags & IEEE80211_STA_ASSOCIATED) | 1386 | if (WARN_ON(memcmp(ifmgd->associated->cbss.bssid, mgmt->sa, ETH_ALEN))) |
1583 | printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n", | 1387 | return RX_MGMT_NONE; |
1584 | sdata->dev->name, reason_code); | ||
1585 | 1388 | ||
1586 | if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) && | 1389 | reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); |
1587 | ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) { | ||
1588 | ifmgd->state = IEEE80211_STA_MLME_ASSOCIATE; | ||
1589 | mod_timer(&ifmgd->timer, jiffies + | ||
1590 | IEEE80211_RETRY_AUTH_INTERVAL); | ||
1591 | } | ||
1592 | 1390 | ||
1593 | ieee80211_set_disassoc(sdata, false, false, reason_code); | 1391 | printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n", |
1594 | cfg80211_send_disassoc(sdata->dev, (u8 *) mgmt, len); | 1392 | sdata->dev->name, reason_code); |
1393 | |||
1394 | ieee80211_set_disassoc(sdata, false); | ||
1395 | return RX_MGMT_CFG80211_DISASSOC; | ||
1595 | } | 1396 | } |
1596 | 1397 | ||
1597 | 1398 | ||
1598 | static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | 1399 | static enum rx_mgmt_action __must_check |
1599 | struct ieee80211_mgmt *mgmt, | 1400 | ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, |
1600 | size_t len, | 1401 | struct ieee80211_mgd_work *wk, |
1601 | int reassoc) | 1402 | struct ieee80211_mgmt *mgmt, size_t len, |
1403 | bool reassoc) | ||
1602 | { | 1404 | { |
1603 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1405 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1604 | struct ieee80211_local *local = sdata->local; | 1406 | struct ieee80211_local *local = sdata->local; |
@@ -1614,17 +1416,16 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1614 | bool have_higher_than_11mbit = false, newsta = false; | 1416 | bool have_higher_than_11mbit = false, newsta = false; |
1615 | u16 ap_ht_cap_flags; | 1417 | u16 ap_ht_cap_flags; |
1616 | 1418 | ||
1617 | /* AssocResp and ReassocResp have identical structure, so process both | 1419 | /* |
1618 | * of them in this function. */ | 1420 | * AssocResp and ReassocResp have identical structure, so process both |
1619 | 1421 | * of them in this function. | |
1620 | if (ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE) | 1422 | */ |
1621 | return; | ||
1622 | 1423 | ||
1623 | if (len < 24 + 6) | 1424 | if (len < 24 + 6) |
1624 | return; | 1425 | return RX_MGMT_NONE; |
1625 | 1426 | ||
1626 | if (memcmp(ifmgd->bssid, mgmt->sa, ETH_ALEN) != 0) | 1427 | if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0) |
1627 | return; | 1428 | return RX_MGMT_NONE; |
1628 | 1429 | ||
1629 | capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); | 1430 | capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); |
1630 | status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); | 1431 | status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); |
@@ -1647,26 +1448,18 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1647 | printk(KERN_DEBUG "%s: AP rejected association temporarily; " | 1448 | printk(KERN_DEBUG "%s: AP rejected association temporarily; " |
1648 | "comeback duration %u TU (%u ms)\n", | 1449 | "comeback duration %u TU (%u ms)\n", |
1649 | sdata->dev->name, tu, ms); | 1450 | sdata->dev->name, tu, ms); |
1451 | wk->timeout = jiffies + msecs_to_jiffies(ms); | ||
1650 | if (ms > IEEE80211_ASSOC_TIMEOUT) | 1452 | if (ms > IEEE80211_ASSOC_TIMEOUT) |
1651 | mod_timer(&ifmgd->timer, | 1453 | run_again(ifmgd, jiffies + msecs_to_jiffies(ms)); |
1652 | jiffies + msecs_to_jiffies(ms)); | 1454 | return RX_MGMT_NONE; |
1653 | return; | ||
1654 | } | 1455 | } |
1655 | 1456 | ||
1656 | if (status_code != WLAN_STATUS_SUCCESS) { | 1457 | if (status_code != WLAN_STATUS_SUCCESS) { |
1657 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", | 1458 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", |
1658 | sdata->dev->name, status_code); | 1459 | sdata->dev->name, status_code); |
1659 | /* if this was a reassociation, ensure we try a "full" | 1460 | list_del(&wk->list); |
1660 | * association next time. This works around some broken APs | 1461 | kfree(wk); |
1661 | * which do not correctly reject reassociation requests. */ | 1462 | return RX_MGMT_CFG80211_ASSOC; |
1662 | ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET; | ||
1663 | cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len); | ||
1664 | if (ifmgd->flags & IEEE80211_STA_EXT_SME) { | ||
1665 | /* Wait for SME to decide what to do next */ | ||
1666 | ifmgd->state = IEEE80211_STA_MLME_DISABLED; | ||
1667 | ieee80211_recalc_idle(local); | ||
1668 | } | ||
1669 | return; | ||
1670 | } | 1463 | } |
1671 | 1464 | ||
1672 | if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) | 1465 | if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) |
@@ -1677,51 +1470,35 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1677 | if (!elems.supp_rates) { | 1470 | if (!elems.supp_rates) { |
1678 | printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", | 1471 | printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", |
1679 | sdata->dev->name); | 1472 | sdata->dev->name); |
1680 | return; | 1473 | return RX_MGMT_NONE; |
1681 | } | 1474 | } |
1682 | 1475 | ||
1683 | printk(KERN_DEBUG "%s: associated\n", sdata->dev->name); | 1476 | printk(KERN_DEBUG "%s: associated\n", sdata->dev->name); |
1684 | ifmgd->aid = aid; | 1477 | ifmgd->aid = aid; |
1685 | ifmgd->ap_capab = capab_info; | ||
1686 | |||
1687 | kfree(ifmgd->assocresp_ies); | ||
1688 | ifmgd->assocresp_ies_len = len - (pos - (u8 *) mgmt); | ||
1689 | ifmgd->assocresp_ies = kmalloc(ifmgd->assocresp_ies_len, GFP_KERNEL); | ||
1690 | if (ifmgd->assocresp_ies) | ||
1691 | memcpy(ifmgd->assocresp_ies, pos, ifmgd->assocresp_ies_len); | ||
1692 | 1478 | ||
1693 | rcu_read_lock(); | 1479 | rcu_read_lock(); |
1694 | 1480 | ||
1695 | /* Add STA entry for the AP */ | 1481 | /* Add STA entry for the AP */ |
1696 | sta = sta_info_get(local, ifmgd->bssid); | 1482 | sta = sta_info_get(local, wk->bss->cbss.bssid); |
1697 | if (!sta) { | 1483 | if (!sta) { |
1698 | newsta = true; | 1484 | newsta = true; |
1699 | 1485 | ||
1700 | sta = sta_info_alloc(sdata, ifmgd->bssid, GFP_ATOMIC); | 1486 | rcu_read_unlock(); |
1487 | |||
1488 | sta = sta_info_alloc(sdata, wk->bss->cbss.bssid, GFP_KERNEL); | ||
1701 | if (!sta) { | 1489 | if (!sta) { |
1702 | printk(KERN_DEBUG "%s: failed to alloc STA entry for" | 1490 | printk(KERN_DEBUG "%s: failed to alloc STA entry for" |
1703 | " the AP\n", sdata->dev->name); | 1491 | " the AP\n", sdata->dev->name); |
1704 | rcu_read_unlock(); | 1492 | return RX_MGMT_NONE; |
1705 | return; | ||
1706 | } | 1493 | } |
1707 | 1494 | ||
1708 | /* update new sta with its last rx activity */ | 1495 | set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | |
1709 | sta->last_rx = jiffies; | 1496 | WLAN_STA_ASSOC_AP); |
1710 | } | 1497 | if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) |
1498 | set_sta_flags(sta, WLAN_STA_AUTHORIZED); | ||
1711 | 1499 | ||
1712 | /* | 1500 | rcu_read_lock(); |
1713 | * FIXME: Do we really need to update the sta_info's information here? | 1501 | } |
1714 | * We already know about the AP (we found it in our list) so it | ||
1715 | * should already be filled with the right info, no? | ||
1716 | * As is stands, all this is racy because typically we assume | ||
1717 | * the information that is filled in here (except flags) doesn't | ||
1718 | * change while a STA structure is alive. As such, it should move | ||
1719 | * to between the sta_info_alloc() and sta_info_insert() above. | ||
1720 | */ | ||
1721 | |||
1722 | set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP); | ||
1723 | if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) | ||
1724 | set_sta_flags(sta, WLAN_STA_AUTHORIZED); | ||
1725 | 1502 | ||
1726 | rates = 0; | 1503 | rates = 0; |
1727 | basic_rates = 0; | 1504 | basic_rates = 0; |
@@ -1771,8 +1548,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1771 | else | 1548 | else |
1772 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; | 1549 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; |
1773 | 1550 | ||
1774 | /* If TKIP/WEP is used, no need to parse AP's HT capabilities */ | 1551 | if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) |
1775 | if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_TKIP_WEP_USED)) | ||
1776 | ieee80211_ht_cap_ie_to_sta_ht_cap(sband, | 1552 | ieee80211_ht_cap_ie_to_sta_ht_cap(sband, |
1777 | elems.ht_cap_elem, &sta->sta.ht_cap); | 1553 | elems.ht_cap_elem, &sta->sta.ht_cap); |
1778 | 1554 | ||
@@ -1792,7 +1568,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1792 | printk(KERN_DEBUG "%s: failed to insert STA entry for" | 1568 | printk(KERN_DEBUG "%s: failed to insert STA entry for" |
1793 | " the AP (error %d)\n", sdata->dev->name, err); | 1569 | " the AP (error %d)\n", sdata->dev->name, err); |
1794 | rcu_read_unlock(); | 1570 | rcu_read_unlock(); |
1795 | return; | 1571 | return RX_MGMT_NONE; |
1796 | } | 1572 | } |
1797 | } | 1573 | } |
1798 | 1574 | ||
@@ -1806,24 +1582,29 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | |||
1806 | 1582 | ||
1807 | if (elems.ht_info_elem && elems.wmm_param && | 1583 | if (elems.ht_info_elem && elems.wmm_param && |
1808 | (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) && | 1584 | (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) && |
1809 | !(ifmgd->flags & IEEE80211_STA_TKIP_WEP_USED)) | 1585 | !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) |
1810 | changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, | 1586 | changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, |
1587 | wk->bss->cbss.bssid, | ||
1811 | ap_ht_cap_flags); | 1588 | ap_ht_cap_flags); |
1812 | 1589 | ||
1590 | /* delete work item -- must be before set_associated for PS */ | ||
1591 | list_del(&wk->list); | ||
1592 | |||
1813 | /* set AID and assoc capability, | 1593 | /* set AID and assoc capability, |
1814 | * ieee80211_set_associated() will tell the driver */ | 1594 | * ieee80211_set_associated() will tell the driver */ |
1815 | bss_conf->aid = aid; | 1595 | bss_conf->aid = aid; |
1816 | bss_conf->assoc_capability = capab_info; | 1596 | bss_conf->assoc_capability = capab_info; |
1817 | ieee80211_set_associated(sdata, changed); | 1597 | /* this will take ownership of wk */ |
1598 | ieee80211_set_associated(sdata, wk, changed); | ||
1818 | 1599 | ||
1819 | /* | 1600 | /* |
1820 | * initialise the time of last beacon to be the association time, | 1601 | * Start timer to probe the connection to the AP now. |
1821 | * otherwise beacon loss check will trigger immediately | 1602 | * Also start the timer that will detect beacon loss. |
1822 | */ | 1603 | */ |
1823 | ifmgd->last_beacon = jiffies; | 1604 | ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); |
1605 | mod_beacon_timer(sdata); | ||
1824 | 1606 | ||
1825 | ieee80211_associated(sdata); | 1607 | return RX_MGMT_CFG80211_ASSOC; |
1826 | cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len); | ||
1827 | } | 1608 | } |
1828 | 1609 | ||
1829 | 1610 | ||
@@ -1851,23 +1632,25 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
1851 | 1632 | ||
1852 | bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, | 1633 | bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, |
1853 | channel, beacon); | 1634 | channel, beacon); |
1854 | if (!bss) | 1635 | if (bss) |
1636 | ieee80211_rx_bss_put(local, bss); | ||
1637 | |||
1638 | if (!sdata->u.mgd.associated) | ||
1855 | return; | 1639 | return; |
1856 | 1640 | ||
1857 | if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) && | 1641 | if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) && |
1858 | (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN) == 0)) { | 1642 | (memcmp(mgmt->bssid, sdata->u.mgd.associated->cbss.bssid, |
1643 | ETH_ALEN) == 0)) { | ||
1859 | struct ieee80211_channel_sw_ie *sw_elem = | 1644 | struct ieee80211_channel_sw_ie *sw_elem = |
1860 | (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; | 1645 | (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; |
1861 | ieee80211_sta_process_chanswitch(sdata, sw_elem, bss); | 1646 | ieee80211_sta_process_chanswitch(sdata, sw_elem, bss); |
1862 | } | 1647 | } |
1863 | |||
1864 | ieee80211_rx_bss_put(local, bss); | ||
1865 | } | 1648 | } |
1866 | 1649 | ||
1867 | 1650 | ||
1868 | static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, | 1651 | static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, |
1869 | struct ieee80211_mgmt *mgmt, | 1652 | struct ieee80211_mgd_work *wk, |
1870 | size_t len, | 1653 | struct ieee80211_mgmt *mgmt, size_t len, |
1871 | struct ieee80211_rx_status *rx_status) | 1654 | struct ieee80211_rx_status *rx_status) |
1872 | { | 1655 | { |
1873 | struct ieee80211_if_managed *ifmgd; | 1656 | struct ieee80211_if_managed *ifmgd; |
@@ -1876,6 +1659,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, | |||
1876 | 1659 | ||
1877 | ifmgd = &sdata->u.mgd; | 1660 | ifmgd = &sdata->u.mgd; |
1878 | 1661 | ||
1662 | ASSERT_MGD_MTX(ifmgd); | ||
1663 | |||
1879 | if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) | 1664 | if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) |
1880 | return; /* ignore ProbeResp to foreign address */ | 1665 | return; /* ignore ProbeResp to foreign address */ |
1881 | 1666 | ||
@@ -1889,17 +1674,32 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, | |||
1889 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); | 1674 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); |
1890 | 1675 | ||
1891 | /* direct probe may be part of the association flow */ | 1676 | /* direct probe may be part of the association flow */ |
1892 | if (ifmgd->state == IEEE80211_STA_MLME_DIRECT_PROBE) { | 1677 | if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) { |
1893 | printk(KERN_DEBUG "%s direct probe responded\n", | 1678 | printk(KERN_DEBUG "%s direct probe responded\n", |
1894 | sdata->dev->name); | 1679 | sdata->dev->name); |
1895 | ieee80211_authenticate(sdata); | 1680 | wk->tries = 0; |
1681 | wk->state = IEEE80211_MGD_STATE_AUTH; | ||
1682 | WARN_ON(ieee80211_authenticate(sdata, wk) != RX_MGMT_NONE); | ||
1896 | } | 1683 | } |
1897 | 1684 | ||
1898 | if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) { | 1685 | if (ifmgd->associated && |
1899 | ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; | 1686 | memcmp(mgmt->bssid, ifmgd->associated->cbss.bssid, ETH_ALEN) == 0 && |
1687 | ifmgd->flags & (IEEE80211_STA_BEACON_POLL | | ||
1688 | IEEE80211_STA_CONNECTION_POLL)) { | ||
1689 | ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | | ||
1690 | IEEE80211_STA_BEACON_POLL); | ||
1900 | mutex_lock(&sdata->local->iflist_mtx); | 1691 | mutex_lock(&sdata->local->iflist_mtx); |
1901 | ieee80211_recalc_ps(sdata->local, -1); | 1692 | ieee80211_recalc_ps(sdata->local, -1); |
1902 | mutex_unlock(&sdata->local->iflist_mtx); | 1693 | mutex_unlock(&sdata->local->iflist_mtx); |
1694 | /* | ||
1695 | * We've received a probe response, but are not sure whether | ||
1696 | * we have or will be receiving any beacons or data, so let's | ||
1697 | * schedule the timers again, just in case. | ||
1698 | */ | ||
1699 | mod_beacon_timer(sdata); | ||
1700 | mod_timer(&ifmgd->conn_mon_timer, | ||
1701 | round_jiffies_up(jiffies + | ||
1702 | IEEE80211_CONNECTION_IDLE_TIME)); | ||
1903 | } | 1703 | } |
1904 | } | 1704 | } |
1905 | 1705 | ||
@@ -1937,6 +1737,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
1937 | bool erp_valid, directed_tim = false; | 1737 | bool erp_valid, directed_tim = false; |
1938 | u8 erp_value = 0; | 1738 | u8 erp_value = 0; |
1939 | u32 ncrc; | 1739 | u32 ncrc; |
1740 | u8 *bssid; | ||
1741 | |||
1742 | ASSERT_MGD_MTX(ifmgd); | ||
1940 | 1743 | ||
1941 | /* Process beacon from the current BSS */ | 1744 | /* Process beacon from the current BSS */ |
1942 | baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; | 1745 | baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; |
@@ -1946,23 +1749,41 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
1946 | if (rx_status->freq != local->hw.conf.channel->center_freq) | 1749 | if (rx_status->freq != local->hw.conf.channel->center_freq) |
1947 | return; | 1750 | return; |
1948 | 1751 | ||
1949 | if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED) || | 1752 | /* |
1950 | memcmp(ifmgd->bssid, mgmt->bssid, ETH_ALEN) != 0) | 1753 | * We might have received a number of frames, among them a |
1754 | * disassoc frame and a beacon... | ||
1755 | */ | ||
1756 | if (!ifmgd->associated) | ||
1757 | return; | ||
1758 | |||
1759 | bssid = ifmgd->associated->cbss.bssid; | ||
1760 | |||
1761 | /* | ||
1762 | * And in theory even frames from a different AP we were just | ||
1763 | * associated to a split-second ago! | ||
1764 | */ | ||
1765 | if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0) | ||
1951 | return; | 1766 | return; |
1952 | 1767 | ||
1953 | if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) { | 1768 | if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { |
1954 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 1769 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
1955 | if (net_ratelimit()) { | 1770 | if (net_ratelimit()) { |
1956 | printk(KERN_DEBUG "%s: cancelling probereq poll due " | 1771 | printk(KERN_DEBUG "%s: cancelling probereq poll due " |
1957 | "to a received beacon\n", sdata->dev->name); | 1772 | "to a received beacon\n", sdata->dev->name); |
1958 | } | 1773 | } |
1959 | #endif | 1774 | #endif |
1960 | ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL; | 1775 | ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; |
1961 | mutex_lock(&local->iflist_mtx); | 1776 | mutex_lock(&local->iflist_mtx); |
1962 | ieee80211_recalc_ps(local, -1); | 1777 | ieee80211_recalc_ps(local, -1); |
1963 | mutex_unlock(&local->iflist_mtx); | 1778 | mutex_unlock(&local->iflist_mtx); |
1964 | } | 1779 | } |
1965 | 1780 | ||
1781 | /* | ||
1782 | * Push the beacon loss detection into the future since | ||
1783 | * we are processing a beacon from the AP just now. | ||
1784 | */ | ||
1785 | mod_beacon_timer(sdata); | ||
1786 | |||
1966 | ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); | 1787 | ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); |
1967 | ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable, | 1788 | ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable, |
1968 | len - baselen, &elems, | 1789 | len - baselen, &elems, |
@@ -2019,15 +1840,15 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2019 | 1840 | ||
2020 | 1841 | ||
2021 | if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && | 1842 | if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && |
2022 | !(ifmgd->flags & IEEE80211_STA_TKIP_WEP_USED)) { | 1843 | !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { |
2023 | struct sta_info *sta; | 1844 | struct sta_info *sta; |
2024 | struct ieee80211_supported_band *sband; | 1845 | struct ieee80211_supported_band *sband; |
2025 | u16 ap_ht_cap_flags; | 1846 | u16 ap_ht_cap_flags; |
2026 | 1847 | ||
2027 | rcu_read_lock(); | 1848 | rcu_read_lock(); |
2028 | 1849 | ||
2029 | sta = sta_info_get(local, ifmgd->bssid); | 1850 | sta = sta_info_get(local, bssid); |
2030 | if (!sta) { | 1851 | if (WARN_ON(!sta)) { |
2031 | rcu_read_unlock(); | 1852 | rcu_read_unlock(); |
2032 | return; | 1853 | return; |
2033 | } | 1854 | } |
@@ -2042,15 +1863,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2042 | rcu_read_unlock(); | 1863 | rcu_read_unlock(); |
2043 | 1864 | ||
2044 | changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, | 1865 | changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, |
2045 | ap_ht_cap_flags); | 1866 | bssid, ap_ht_cap_flags); |
2046 | } | 1867 | } |
2047 | 1868 | ||
1869 | /* Note: country IE parsing is done for us by cfg80211 */ | ||
2048 | if (elems.country_elem) { | 1870 | if (elems.country_elem) { |
2049 | /* Note we are only reviewing this on beacons | ||
2050 | * for the BSSID we are associated to */ | ||
2051 | regulatory_hint_11d(local->hw.wiphy, | ||
2052 | elems.country_elem, elems.country_elem_len); | ||
2053 | |||
2054 | /* TODO: IBSS also needs this */ | 1871 | /* TODO: IBSS also needs this */ |
2055 | if (elems.pwr_constr_elem) | 1872 | if (elems.pwr_constr_elem) |
2056 | ieee80211_handle_pwr_constr(sdata, | 1873 | ieee80211_handle_pwr_constr(sdata, |
@@ -2063,8 +1880,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2063 | } | 1880 | } |
2064 | 1881 | ||
2065 | ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, | 1882 | ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, |
2066 | struct sk_buff *skb, | 1883 | struct sk_buff *skb) |
2067 | struct ieee80211_rx_status *rx_status) | ||
2068 | { | 1884 | { |
2069 | struct ieee80211_local *local = sdata->local; | 1885 | struct ieee80211_local *local = sdata->local; |
2070 | struct ieee80211_mgmt *mgmt; | 1886 | struct ieee80211_mgmt *mgmt; |
@@ -2080,14 +1896,14 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, | |||
2080 | case IEEE80211_STYPE_PROBE_REQ: | 1896 | case IEEE80211_STYPE_PROBE_REQ: |
2081 | case IEEE80211_STYPE_PROBE_RESP: | 1897 | case IEEE80211_STYPE_PROBE_RESP: |
2082 | case IEEE80211_STYPE_BEACON: | 1898 | case IEEE80211_STYPE_BEACON: |
2083 | memcpy(skb->cb, rx_status, sizeof(*rx_status)); | ||
2084 | case IEEE80211_STYPE_AUTH: | 1899 | case IEEE80211_STYPE_AUTH: |
2085 | case IEEE80211_STYPE_ASSOC_RESP: | 1900 | case IEEE80211_STYPE_ASSOC_RESP: |
2086 | case IEEE80211_STYPE_REASSOC_RESP: | 1901 | case IEEE80211_STYPE_REASSOC_RESP: |
2087 | case IEEE80211_STYPE_DEAUTH: | 1902 | case IEEE80211_STYPE_DEAUTH: |
2088 | case IEEE80211_STYPE_DISASSOC: | 1903 | case IEEE80211_STYPE_DISASSOC: |
1904 | case IEEE80211_STYPE_ACTION: | ||
2089 | skb_queue_tail(&sdata->u.mgd.skb_queue, skb); | 1905 | skb_queue_tail(&sdata->u.mgd.skb_queue, skb); |
2090 | queue_work(local->hw.workqueue, &sdata->u.mgd.work); | 1906 | ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); |
2091 | return RX_QUEUED; | 1907 | return RX_QUEUED; |
2092 | } | 1908 | } |
2093 | 1909 | ||
@@ -2097,40 +1913,119 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, | |||
2097 | static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | 1913 | static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, |
2098 | struct sk_buff *skb) | 1914 | struct sk_buff *skb) |
2099 | { | 1915 | { |
1916 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
2100 | struct ieee80211_rx_status *rx_status; | 1917 | struct ieee80211_rx_status *rx_status; |
2101 | struct ieee80211_mgmt *mgmt; | 1918 | struct ieee80211_mgmt *mgmt; |
1919 | struct ieee80211_mgd_work *wk; | ||
1920 | enum rx_mgmt_action rma = RX_MGMT_NONE; | ||
2102 | u16 fc; | 1921 | u16 fc; |
2103 | 1922 | ||
2104 | rx_status = (struct ieee80211_rx_status *) skb->cb; | 1923 | rx_status = (struct ieee80211_rx_status *) skb->cb; |
2105 | mgmt = (struct ieee80211_mgmt *) skb->data; | 1924 | mgmt = (struct ieee80211_mgmt *) skb->data; |
2106 | fc = le16_to_cpu(mgmt->frame_control); | 1925 | fc = le16_to_cpu(mgmt->frame_control); |
2107 | 1926 | ||
2108 | switch (fc & IEEE80211_FCTL_STYPE) { | 1927 | mutex_lock(&ifmgd->mtx); |
2109 | case IEEE80211_STYPE_PROBE_RESP: | 1928 | |
2110 | ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len, | 1929 | if (ifmgd->associated && |
2111 | rx_status); | 1930 | memcmp(ifmgd->associated->cbss.bssid, mgmt->bssid, |
2112 | break; | 1931 | ETH_ALEN) == 0) { |
2113 | case IEEE80211_STYPE_BEACON: | 1932 | switch (fc & IEEE80211_FCTL_STYPE) { |
2114 | ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, | 1933 | case IEEE80211_STYPE_BEACON: |
2115 | rx_status); | 1934 | ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, |
2116 | break; | 1935 | rx_status); |
2117 | case IEEE80211_STYPE_AUTH: | 1936 | break; |
2118 | ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); | 1937 | case IEEE80211_STYPE_PROBE_RESP: |
1938 | ieee80211_rx_mgmt_probe_resp(sdata, NULL, mgmt, | ||
1939 | skb->len, rx_status); | ||
1940 | break; | ||
1941 | case IEEE80211_STYPE_DEAUTH: | ||
1942 | rma = ieee80211_rx_mgmt_deauth(sdata, NULL, | ||
1943 | mgmt, skb->len); | ||
1944 | break; | ||
1945 | case IEEE80211_STYPE_DISASSOC: | ||
1946 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); | ||
1947 | break; | ||
1948 | case IEEE80211_STYPE_ACTION: | ||
1949 | /* XXX: differentiate, can only happen for CSA now! */ | ||
1950 | ieee80211_sta_process_chanswitch(sdata, | ||
1951 | &mgmt->u.action.u.chan_switch.sw_elem, | ||
1952 | ifmgd->associated); | ||
1953 | break; | ||
1954 | } | ||
1955 | mutex_unlock(&ifmgd->mtx); | ||
1956 | |||
1957 | switch (rma) { | ||
1958 | case RX_MGMT_NONE: | ||
1959 | /* no action */ | ||
1960 | break; | ||
1961 | case RX_MGMT_CFG80211_DEAUTH: | ||
1962 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, | ||
1963 | NULL); | ||
1964 | break; | ||
1965 | case RX_MGMT_CFG80211_DISASSOC: | ||
1966 | cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len, | ||
1967 | NULL); | ||
1968 | break; | ||
1969 | default: | ||
1970 | WARN(1, "unexpected: %d", rma); | ||
1971 | } | ||
1972 | goto out; | ||
1973 | } | ||
1974 | |||
1975 | list_for_each_entry(wk, &ifmgd->work_list, list) { | ||
1976 | if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0) | ||
1977 | continue; | ||
1978 | |||
1979 | switch (fc & IEEE80211_FCTL_STYPE) { | ||
1980 | case IEEE80211_STYPE_PROBE_RESP: | ||
1981 | ieee80211_rx_mgmt_probe_resp(sdata, wk, mgmt, skb->len, | ||
1982 | rx_status); | ||
1983 | break; | ||
1984 | case IEEE80211_STYPE_AUTH: | ||
1985 | rma = ieee80211_rx_mgmt_auth(sdata, wk, mgmt, skb->len); | ||
1986 | break; | ||
1987 | case IEEE80211_STYPE_ASSOC_RESP: | ||
1988 | rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt, | ||
1989 | skb->len, false); | ||
1990 | break; | ||
1991 | case IEEE80211_STYPE_REASSOC_RESP: | ||
1992 | rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt, | ||
1993 | skb->len, true); | ||
1994 | break; | ||
1995 | case IEEE80211_STYPE_DEAUTH: | ||
1996 | rma = ieee80211_rx_mgmt_deauth(sdata, wk, mgmt, | ||
1997 | skb->len); | ||
1998 | break; | ||
1999 | } | ||
2000 | /* | ||
2001 | * We've processed this frame for that work, so it can't | ||
2002 | * belong to another work struct. | ||
2003 | * NB: this is also required for correctness because the | ||
2004 | * called functions can free 'wk', and for 'rma'! | ||
2005 | */ | ||
2119 | break; | 2006 | break; |
2120 | case IEEE80211_STYPE_ASSOC_RESP: | 2007 | } |
2121 | ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, 0); | 2008 | |
2009 | mutex_unlock(&ifmgd->mtx); | ||
2010 | |||
2011 | switch (rma) { | ||
2012 | case RX_MGMT_NONE: | ||
2013 | /* no action */ | ||
2122 | break; | 2014 | break; |
2123 | case IEEE80211_STYPE_REASSOC_RESP: | 2015 | case RX_MGMT_CFG80211_AUTH: |
2124 | ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, 1); | 2016 | cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, skb->len); |
2125 | break; | 2017 | break; |
2126 | case IEEE80211_STYPE_DEAUTH: | 2018 | case RX_MGMT_CFG80211_ASSOC: |
2127 | ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); | 2019 | cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len); |
2128 | break; | 2020 | break; |
2129 | case IEEE80211_STYPE_DISASSOC: | 2021 | case RX_MGMT_CFG80211_DEAUTH: |
2130 | ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); | 2022 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, NULL); |
2131 | break; | 2023 | break; |
2024 | default: | ||
2025 | WARN(1, "unexpected: %d", rma); | ||
2132 | } | 2026 | } |
2133 | 2027 | ||
2028 | out: | ||
2134 | kfree_skb(skb); | 2029 | kfree_skb(skb); |
2135 | } | 2030 | } |
2136 | 2031 | ||
@@ -2146,215 +2041,216 @@ static void ieee80211_sta_timer(unsigned long data) | |||
2146 | return; | 2041 | return; |
2147 | } | 2042 | } |
2148 | 2043 | ||
2149 | set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request); | 2044 | ieee80211_queue_work(&local->hw, &ifmgd->work); |
2150 | queue_work(local->hw.workqueue, &ifmgd->work); | ||
2151 | } | 2045 | } |
2152 | 2046 | ||
2153 | static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata) | 2047 | static void ieee80211_sta_work(struct work_struct *work) |
2154 | { | 2048 | { |
2155 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2049 | struct ieee80211_sub_if_data *sdata = |
2050 | container_of(work, struct ieee80211_sub_if_data, u.mgd.work); | ||
2156 | struct ieee80211_local *local = sdata->local; | 2051 | struct ieee80211_local *local = sdata->local; |
2052 | struct ieee80211_if_managed *ifmgd; | ||
2053 | struct sk_buff *skb; | ||
2054 | struct ieee80211_mgd_work *wk, *tmp; | ||
2055 | LIST_HEAD(free_work); | ||
2056 | enum rx_mgmt_action rma; | ||
2057 | bool anybusy = false; | ||
2157 | 2058 | ||
2158 | /* Reset own TSF to allow time synchronization work. */ | 2059 | if (!netif_running(sdata->dev)) |
2159 | drv_reset_tsf(local); | 2060 | return; |
2160 | 2061 | ||
2161 | ifmgd->wmm_last_param_set = -1; /* allow any WMM update */ | 2062 | if (local->scanning) |
2063 | return; | ||
2162 | 2064 | ||
2065 | if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) | ||
2066 | return; | ||
2163 | 2067 | ||
2164 | if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_OPEN) | 2068 | /* |
2165 | ifmgd->auth_alg = WLAN_AUTH_OPEN; | 2069 | * ieee80211_queue_work() should have picked up most cases, |
2166 | else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_SHARED_KEY) | 2070 | * here we'll pick the the rest. |
2167 | ifmgd->auth_alg = WLAN_AUTH_SHARED_KEY; | 2071 | */ |
2168 | else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_LEAP) | 2072 | if (WARN(local->suspended, "STA MLME work scheduled while " |
2169 | ifmgd->auth_alg = WLAN_AUTH_LEAP; | 2073 | "going to suspend\n")) |
2170 | else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_FT) | 2074 | return; |
2171 | ifmgd->auth_alg = WLAN_AUTH_FT; | ||
2172 | else | ||
2173 | ifmgd->auth_alg = WLAN_AUTH_OPEN; | ||
2174 | ifmgd->auth_transaction = -1; | ||
2175 | ifmgd->flags &= ~IEEE80211_STA_ASSOCIATED; | ||
2176 | ifmgd->assoc_scan_tries = 0; | ||
2177 | ifmgd->direct_probe_tries = 0; | ||
2178 | ifmgd->auth_tries = 0; | ||
2179 | ifmgd->assoc_tries = 0; | ||
2180 | netif_tx_stop_all_queues(sdata->dev); | ||
2181 | netif_carrier_off(sdata->dev); | ||
2182 | } | ||
2183 | 2075 | ||
2184 | static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata) | 2076 | ifmgd = &sdata->u.mgd; |
2185 | { | ||
2186 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
2187 | struct ieee80211_local *local = sdata->local; | ||
2188 | struct ieee80211_bss *bss; | ||
2189 | u8 *bssid = ifmgd->bssid, *ssid = ifmgd->ssid; | ||
2190 | u8 ssid_len = ifmgd->ssid_len; | ||
2191 | u16 capa_mask = WLAN_CAPABILITY_ESS; | ||
2192 | u16 capa_val = WLAN_CAPABILITY_ESS; | ||
2193 | struct ieee80211_channel *chan = local->oper_channel; | ||
2194 | 2077 | ||
2195 | if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) && | 2078 | /* first process frames to avoid timing out while a frame is pending */ |
2196 | ifmgd->flags & (IEEE80211_STA_AUTO_SSID_SEL | | 2079 | while ((skb = skb_dequeue(&ifmgd->skb_queue))) |
2197 | IEEE80211_STA_AUTO_BSSID_SEL | | 2080 | ieee80211_sta_rx_queued_mgmt(sdata, skb); |
2198 | IEEE80211_STA_AUTO_CHANNEL_SEL)) { | 2081 | |
2199 | capa_mask |= WLAN_CAPABILITY_PRIVACY; | 2082 | /* then process the rest of the work */ |
2200 | if (sdata->default_key) | 2083 | mutex_lock(&ifmgd->mtx); |
2201 | capa_val |= WLAN_CAPABILITY_PRIVACY; | ||
2202 | } | ||
2203 | 2084 | ||
2204 | if (ifmgd->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) | 2085 | if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | |
2205 | chan = NULL; | 2086 | IEEE80211_STA_CONNECTION_POLL) && |
2087 | ifmgd->associated) { | ||
2088 | u8 bssid[ETH_ALEN]; | ||
2206 | 2089 | ||
2207 | if (ifmgd->flags & IEEE80211_STA_AUTO_BSSID_SEL) | 2090 | memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN); |
2208 | bssid = NULL; | 2091 | if (time_is_after_jiffies(ifmgd->probe_timeout)) |
2092 | run_again(ifmgd, ifmgd->probe_timeout); | ||
2209 | 2093 | ||
2210 | if (ifmgd->flags & IEEE80211_STA_AUTO_SSID_SEL) { | 2094 | else if (ifmgd->probe_send_count < IEEE80211_MAX_PROBE_TRIES) { |
2211 | ssid = NULL; | 2095 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
2212 | ssid_len = 0; | 2096 | printk(KERN_DEBUG "No probe response from AP %pM" |
2097 | " after %dms, try %d\n", bssid, | ||
2098 | (1000 * IEEE80211_PROBE_WAIT)/HZ, | ||
2099 | ifmgd->probe_send_count); | ||
2100 | #endif | ||
2101 | ieee80211_mgd_probe_ap_send(sdata); | ||
2102 | } else { | ||
2103 | /* | ||
2104 | * We actually lost the connection ... or did we? | ||
2105 | * Let's make sure! | ||
2106 | */ | ||
2107 | ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | | ||
2108 | IEEE80211_STA_BEACON_POLL); | ||
2109 | printk(KERN_DEBUG "No probe response from AP %pM" | ||
2110 | " after %dms, disconnecting.\n", | ||
2111 | bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); | ||
2112 | ieee80211_set_disassoc(sdata, true); | ||
2113 | mutex_unlock(&ifmgd->mtx); | ||
2114 | /* | ||
2115 | * must be outside lock due to cfg80211, | ||
2116 | * but that's not a problem. | ||
2117 | */ | ||
2118 | ieee80211_send_deauth_disassoc(sdata, bssid, | ||
2119 | IEEE80211_STYPE_DEAUTH, | ||
2120 | WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, | ||
2121 | NULL); | ||
2122 | mutex_lock(&ifmgd->mtx); | ||
2123 | } | ||
2213 | } | 2124 | } |
2214 | 2125 | ||
2215 | bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, | ||
2216 | bssid, ssid, ssid_len, | ||
2217 | capa_mask, capa_val); | ||
2218 | 2126 | ||
2219 | if (bss) { | 2127 | ieee80211_recalc_idle(local); |
2220 | local->oper_channel = bss->cbss.channel; | ||
2221 | local->oper_channel_type = NL80211_CHAN_NO_HT; | ||
2222 | ieee80211_hw_config(local, 0); | ||
2223 | 2128 | ||
2224 | if (!(ifmgd->flags & IEEE80211_STA_SSID_SET)) | 2129 | list_for_each_entry_safe(wk, tmp, &ifmgd->work_list, list) { |
2225 | ieee80211_sta_set_ssid(sdata, bss->ssid, | 2130 | if (time_is_after_jiffies(wk->timeout)) { |
2226 | bss->ssid_len); | 2131 | /* |
2227 | ieee80211_sta_set_bssid(sdata, bss->cbss.bssid); | 2132 | * This work item isn't supposed to be worked on |
2228 | ieee80211_sta_def_wmm_params(sdata, bss->supp_rates_len, | 2133 | * right now, but take care to adjust the timer |
2229 | bss->supp_rates); | 2134 | * properly. |
2230 | if (sdata->u.mgd.mfp == IEEE80211_MFP_REQUIRED) | 2135 | */ |
2231 | sdata->u.mgd.flags |= IEEE80211_STA_MFP_ENABLED; | 2136 | run_again(ifmgd, wk->timeout); |
2232 | else | 2137 | continue; |
2233 | sdata->u.mgd.flags &= ~IEEE80211_STA_MFP_ENABLED; | 2138 | } |
2234 | |||
2235 | /* Send out direct probe if no probe resp was received or | ||
2236 | * the one we have is outdated | ||
2237 | */ | ||
2238 | if (!bss->last_probe_resp || | ||
2239 | time_after(jiffies, bss->last_probe_resp | ||
2240 | + IEEE80211_SCAN_RESULT_EXPIRE)) | ||
2241 | ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE; | ||
2242 | else | ||
2243 | ifmgd->state = IEEE80211_STA_MLME_AUTHENTICATE; | ||
2244 | 2139 | ||
2245 | ieee80211_rx_bss_put(local, bss); | 2140 | switch (wk->state) { |
2246 | ieee80211_sta_reset_auth(sdata); | 2141 | default: |
2247 | return 0; | 2142 | WARN_ON(1); |
2248 | } else { | 2143 | /* fall through */ |
2249 | if (ifmgd->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) { | 2144 | case IEEE80211_MGD_STATE_IDLE: |
2145 | /* nothing */ | ||
2146 | rma = RX_MGMT_NONE; | ||
2147 | break; | ||
2148 | case IEEE80211_MGD_STATE_PROBE: | ||
2149 | rma = ieee80211_direct_probe(sdata, wk); | ||
2150 | break; | ||
2151 | case IEEE80211_MGD_STATE_AUTH: | ||
2152 | rma = ieee80211_authenticate(sdata, wk); | ||
2153 | break; | ||
2154 | case IEEE80211_MGD_STATE_ASSOC: | ||
2155 | rma = ieee80211_associate(sdata, wk); | ||
2156 | break; | ||
2157 | } | ||
2158 | |||
2159 | switch (rma) { | ||
2160 | case RX_MGMT_NONE: | ||
2161 | /* no action required */ | ||
2162 | break; | ||
2163 | case RX_MGMT_CFG80211_AUTH_TO: | ||
2164 | case RX_MGMT_CFG80211_ASSOC_TO: | ||
2165 | list_del(&wk->list); | ||
2166 | list_add(&wk->list, &free_work); | ||
2167 | wk->tries = rma; /* small abuse but only local */ | ||
2168 | break; | ||
2169 | default: | ||
2170 | WARN(1, "unexpected: %d", rma); | ||
2171 | } | ||
2172 | } | ||
2250 | 2173 | ||
2251 | ifmgd->assoc_scan_tries++; | 2174 | list_for_each_entry(wk, &ifmgd->work_list, list) { |
2175 | if (wk->state != IEEE80211_MGD_STATE_IDLE) { | ||
2176 | anybusy = true; | ||
2177 | break; | ||
2178 | } | ||
2179 | } | ||
2180 | if (!anybusy && | ||
2181 | test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request)) | ||
2182 | ieee80211_queue_delayed_work(&local->hw, | ||
2183 | &local->scan_work, | ||
2184 | round_jiffies_relative(0)); | ||
2252 | 2185 | ||
2253 | ieee80211_request_internal_scan(sdata, ifmgd->ssid, | 2186 | mutex_unlock(&ifmgd->mtx); |
2254 | ssid_len); | ||
2255 | 2187 | ||
2256 | ifmgd->state = IEEE80211_STA_MLME_AUTHENTICATE; | 2188 | list_for_each_entry_safe(wk, tmp, &free_work, list) { |
2257 | set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request); | 2189 | switch (wk->tries) { |
2258 | } else { | 2190 | case RX_MGMT_CFG80211_AUTH_TO: |
2259 | ifmgd->assoc_scan_tries = 0; | 2191 | cfg80211_send_auth_timeout(sdata->dev, |
2260 | ifmgd->state = IEEE80211_STA_MLME_DISABLED; | 2192 | wk->bss->cbss.bssid); |
2261 | ieee80211_recalc_idle(local); | 2193 | break; |
2194 | case RX_MGMT_CFG80211_ASSOC_TO: | ||
2195 | cfg80211_send_assoc_timeout(sdata->dev, | ||
2196 | wk->bss->cbss.bssid); | ||
2197 | break; | ||
2198 | default: | ||
2199 | WARN(1, "unexpected: %d", wk->tries); | ||
2262 | } | 2200 | } |
2201 | |||
2202 | list_del(&wk->list); | ||
2203 | kfree(wk); | ||
2263 | } | 2204 | } |
2264 | return -1; | ||
2265 | } | ||
2266 | 2205 | ||
2206 | ieee80211_recalc_idle(local); | ||
2207 | } | ||
2267 | 2208 | ||
2268 | static void ieee80211_sta_work(struct work_struct *work) | 2209 | static void ieee80211_sta_bcn_mon_timer(unsigned long data) |
2269 | { | 2210 | { |
2270 | struct ieee80211_sub_if_data *sdata = | 2211 | struct ieee80211_sub_if_data *sdata = |
2271 | container_of(work, struct ieee80211_sub_if_data, u.mgd.work); | 2212 | (struct ieee80211_sub_if_data *) data; |
2272 | struct ieee80211_local *local = sdata->local; | 2213 | struct ieee80211_local *local = sdata->local; |
2273 | struct ieee80211_if_managed *ifmgd; | ||
2274 | struct sk_buff *skb; | ||
2275 | |||
2276 | if (!netif_running(sdata->dev)) | ||
2277 | return; | ||
2278 | 2214 | ||
2279 | if (local->sw_scanning || local->hw_scanning) | 2215 | if (local->quiescing) |
2280 | return; | 2216 | return; |
2281 | 2217 | ||
2282 | if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) | 2218 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); |
2283 | return; | 2219 | } |
2284 | |||
2285 | /* | ||
2286 | * Nothing should have been stuffed into the workqueue during | ||
2287 | * the suspend->resume cycle. If this WARN is seen then there | ||
2288 | * is a bug with either the driver suspend or something in | ||
2289 | * mac80211 stuffing into the workqueue which we haven't yet | ||
2290 | * cleared during mac80211's suspend cycle. | ||
2291 | */ | ||
2292 | if (WARN_ON(local->suspended)) | ||
2293 | return; | ||
2294 | |||
2295 | ifmgd = &sdata->u.mgd; | ||
2296 | |||
2297 | while ((skb = skb_dequeue(&ifmgd->skb_queue))) | ||
2298 | ieee80211_sta_rx_queued_mgmt(sdata, skb); | ||
2299 | 2220 | ||
2300 | if (ifmgd->state != IEEE80211_STA_MLME_DIRECT_PROBE && | 2221 | static void ieee80211_sta_conn_mon_timer(unsigned long data) |
2301 | ifmgd->state != IEEE80211_STA_MLME_AUTHENTICATE && | 2222 | { |
2302 | ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE && | 2223 | struct ieee80211_sub_if_data *sdata = |
2303 | test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request)) { | 2224 | (struct ieee80211_sub_if_data *) data; |
2304 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | 2225 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2305 | round_jiffies_relative(0)); | 2226 | struct ieee80211_local *local = sdata->local; |
2306 | return; | ||
2307 | } | ||
2308 | 2227 | ||
2309 | if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request)) { | 2228 | if (local->quiescing) |
2310 | if (ieee80211_sta_config_auth(sdata)) | ||
2311 | return; | ||
2312 | clear_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request); | ||
2313 | } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request)) | ||
2314 | return; | 2229 | return; |
2315 | 2230 | ||
2316 | ieee80211_recalc_idle(local); | 2231 | ieee80211_queue_work(&local->hw, &ifmgd->monitor_work); |
2317 | 2232 | } | |
2318 | switch (ifmgd->state) { | ||
2319 | case IEEE80211_STA_MLME_DISABLED: | ||
2320 | break; | ||
2321 | case IEEE80211_STA_MLME_DIRECT_PROBE: | ||
2322 | ieee80211_direct_probe(sdata); | ||
2323 | break; | ||
2324 | case IEEE80211_STA_MLME_AUTHENTICATE: | ||
2325 | ieee80211_authenticate(sdata); | ||
2326 | break; | ||
2327 | case IEEE80211_STA_MLME_ASSOCIATE: | ||
2328 | ieee80211_associate(sdata); | ||
2329 | break; | ||
2330 | case IEEE80211_STA_MLME_ASSOCIATED: | ||
2331 | ieee80211_associated(sdata); | ||
2332 | break; | ||
2333 | default: | ||
2334 | WARN_ON(1); | ||
2335 | break; | ||
2336 | } | ||
2337 | 2233 | ||
2338 | if (ieee80211_privacy_mismatch(sdata)) { | 2234 | static void ieee80211_sta_monitor_work(struct work_struct *work) |
2339 | printk(KERN_DEBUG "%s: privacy configuration mismatch and " | 2235 | { |
2340 | "mixed-cell disabled - disassociate\n", sdata->dev->name); | 2236 | struct ieee80211_sub_if_data *sdata = |
2237 | container_of(work, struct ieee80211_sub_if_data, | ||
2238 | u.mgd.monitor_work); | ||
2341 | 2239 | ||
2342 | ieee80211_set_disassoc(sdata, false, true, | 2240 | ieee80211_mgd_probe_ap(sdata, false); |
2343 | WLAN_REASON_UNSPECIFIED); | ||
2344 | } | ||
2345 | } | 2241 | } |
2346 | 2242 | ||
2347 | static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) | 2243 | static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) |
2348 | { | 2244 | { |
2349 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 2245 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
2350 | /* | 2246 | sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | |
2351 | * Need to update last_beacon to avoid beacon loss | 2247 | IEEE80211_STA_CONNECTION_POLL); |
2352 | * test to trigger. | 2248 | |
2353 | */ | 2249 | /* let's probe the connection once */ |
2354 | sdata->u.mgd.last_beacon = jiffies; | 2250 | ieee80211_queue_work(&sdata->local->hw, |
2355 | 2251 | &sdata->u.mgd.monitor_work); | |
2356 | 2252 | /* and do all the other regular work too */ | |
2357 | queue_work(sdata->local->hw.workqueue, | 2253 | ieee80211_queue_work(&sdata->local->hw, |
2358 | &sdata->u.mgd.work); | 2254 | &sdata->u.mgd.work); |
2359 | } | 2255 | } |
2360 | } | 2256 | } |
@@ -2378,6 +2274,11 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
2378 | cancel_work_sync(&ifmgd->chswitch_work); | 2274 | cancel_work_sync(&ifmgd->chswitch_work); |
2379 | if (del_timer_sync(&ifmgd->chswitch_timer)) | 2275 | if (del_timer_sync(&ifmgd->chswitch_timer)) |
2380 | set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); | 2276 | set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); |
2277 | |||
2278 | cancel_work_sync(&ifmgd->monitor_work); | ||
2279 | /* these will just be re-established on connection */ | ||
2280 | del_timer_sync(&ifmgd->conn_mon_timer); | ||
2281 | del_timer_sync(&ifmgd->bcn_mon_timer); | ||
2381 | } | 2282 | } |
2382 | 2283 | ||
2383 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) | 2284 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) |
@@ -2395,210 +2296,277 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) | |||
2395 | void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) | 2296 | void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) |
2396 | { | 2297 | { |
2397 | struct ieee80211_if_managed *ifmgd; | 2298 | struct ieee80211_if_managed *ifmgd; |
2398 | u32 hw_flags; | ||
2399 | 2299 | ||
2400 | ifmgd = &sdata->u.mgd; | 2300 | ifmgd = &sdata->u.mgd; |
2401 | INIT_WORK(&ifmgd->work, ieee80211_sta_work); | 2301 | INIT_WORK(&ifmgd->work, ieee80211_sta_work); |
2302 | INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); | ||
2402 | INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); | 2303 | INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); |
2403 | INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work); | 2304 | INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work); |
2404 | setup_timer(&ifmgd->timer, ieee80211_sta_timer, | 2305 | setup_timer(&ifmgd->timer, ieee80211_sta_timer, |
2405 | (unsigned long) sdata); | 2306 | (unsigned long) sdata); |
2307 | setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, | ||
2308 | (unsigned long) sdata); | ||
2309 | setup_timer(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, | ||
2310 | (unsigned long) sdata); | ||
2406 | setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, | 2311 | setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, |
2407 | (unsigned long) sdata); | 2312 | (unsigned long) sdata); |
2408 | skb_queue_head_init(&ifmgd->skb_queue); | 2313 | skb_queue_head_init(&ifmgd->skb_queue); |
2409 | 2314 | ||
2315 | INIT_LIST_HEAD(&ifmgd->work_list); | ||
2316 | |||
2410 | ifmgd->capab = WLAN_CAPABILITY_ESS; | 2317 | ifmgd->capab = WLAN_CAPABILITY_ESS; |
2411 | ifmgd->auth_algs = IEEE80211_AUTH_ALG_OPEN | | 2318 | ifmgd->flags = 0; |
2412 | IEEE80211_AUTH_ALG_SHARED_KEY; | ||
2413 | ifmgd->flags |= IEEE80211_STA_CREATE_IBSS | | ||
2414 | IEEE80211_STA_AUTO_BSSID_SEL | | ||
2415 | IEEE80211_STA_AUTO_CHANNEL_SEL; | ||
2416 | if (sdata->local->hw.queues >= 4) | 2319 | if (sdata->local->hw.queues >= 4) |
2417 | ifmgd->flags |= IEEE80211_STA_WMM_ENABLED; | 2320 | ifmgd->flags |= IEEE80211_STA_WMM_ENABLED; |
2418 | 2321 | ||
2419 | hw_flags = sdata->local->hw.flags; | 2322 | mutex_init(&ifmgd->mtx); |
2420 | |||
2421 | if (hw_flags & IEEE80211_HW_SUPPORTS_PS) { | ||
2422 | ifmgd->powersave = CONFIG_MAC80211_DEFAULT_PS_VALUE; | ||
2423 | sdata->local->hw.conf.dynamic_ps_timeout = 500; | ||
2424 | } | ||
2425 | } | 2323 | } |
2426 | 2324 | ||
2427 | /* configuration hooks */ | 2325 | /* scan finished notification */ |
2428 | void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata) | 2326 | void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) |
2429 | { | 2327 | { |
2430 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2328 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; |
2431 | struct ieee80211_local *local = sdata->local; | ||
2432 | |||
2433 | if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) | ||
2434 | return; | ||
2435 | |||
2436 | if ((ifmgd->flags & (IEEE80211_STA_BSSID_SET | | ||
2437 | IEEE80211_STA_AUTO_BSSID_SEL)) && | ||
2438 | (ifmgd->flags & (IEEE80211_STA_SSID_SET | | ||
2439 | IEEE80211_STA_AUTO_SSID_SEL))) { | ||
2440 | |||
2441 | if (ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) | ||
2442 | ieee80211_set_disassoc(sdata, true, true, | ||
2443 | WLAN_REASON_DEAUTH_LEAVING); | ||
2444 | |||
2445 | if (ifmgd->ssid_len == 0) { | ||
2446 | /* | ||
2447 | * Only allow association to be started if a valid SSID | ||
2448 | * is configured. | ||
2449 | */ | ||
2450 | return; | ||
2451 | } | ||
2452 | 2329 | ||
2453 | if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) || | 2330 | /* Restart STA timers */ |
2454 | ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE) | 2331 | rcu_read_lock(); |
2455 | set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request); | 2332 | list_for_each_entry_rcu(sdata, &local->interfaces, list) |
2456 | else if (ifmgd->flags & IEEE80211_STA_EXT_SME) | 2333 | ieee80211_restart_sta_timer(sdata); |
2457 | set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request); | 2334 | rcu_read_unlock(); |
2458 | queue_work(local->hw.workqueue, &ifmgd->work); | ||
2459 | } | ||
2460 | } | 2335 | } |
2461 | 2336 | ||
2462 | int ieee80211_sta_commit(struct ieee80211_sub_if_data *sdata) | 2337 | int ieee80211_max_network_latency(struct notifier_block *nb, |
2338 | unsigned long data, void *dummy) | ||
2463 | { | 2339 | { |
2464 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2340 | s32 latency_usec = (s32) data; |
2341 | struct ieee80211_local *local = | ||
2342 | container_of(nb, struct ieee80211_local, | ||
2343 | network_latency_notifier); | ||
2465 | 2344 | ||
2466 | if (ifmgd->ssid_len) | 2345 | mutex_lock(&local->iflist_mtx); |
2467 | ifmgd->flags |= IEEE80211_STA_SSID_SET; | 2346 | ieee80211_recalc_ps(local, latency_usec); |
2468 | else | 2347 | mutex_unlock(&local->iflist_mtx); |
2469 | ifmgd->flags &= ~IEEE80211_STA_SSID_SET; | ||
2470 | 2348 | ||
2471 | return 0; | 2349 | return 0; |
2472 | } | 2350 | } |
2473 | 2351 | ||
2474 | int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len) | 2352 | /* config hooks */ |
2353 | int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, | ||
2354 | struct cfg80211_auth_request *req) | ||
2475 | { | 2355 | { |
2476 | struct ieee80211_if_managed *ifmgd; | 2356 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2357 | const u8 *ssid; | ||
2358 | struct ieee80211_mgd_work *wk; | ||
2359 | u16 auth_alg; | ||
2477 | 2360 | ||
2478 | if (len > IEEE80211_MAX_SSID_LEN) | 2361 | switch (req->auth_type) { |
2479 | return -EINVAL; | 2362 | case NL80211_AUTHTYPE_OPEN_SYSTEM: |
2363 | auth_alg = WLAN_AUTH_OPEN; | ||
2364 | break; | ||
2365 | case NL80211_AUTHTYPE_SHARED_KEY: | ||
2366 | auth_alg = WLAN_AUTH_SHARED_KEY; | ||
2367 | break; | ||
2368 | case NL80211_AUTHTYPE_FT: | ||
2369 | auth_alg = WLAN_AUTH_FT; | ||
2370 | break; | ||
2371 | case NL80211_AUTHTYPE_NETWORK_EAP: | ||
2372 | auth_alg = WLAN_AUTH_LEAP; | ||
2373 | break; | ||
2374 | default: | ||
2375 | return -EOPNOTSUPP; | ||
2376 | } | ||
2480 | 2377 | ||
2481 | ifmgd = &sdata->u.mgd; | 2378 | wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL); |
2379 | if (!wk) | ||
2380 | return -ENOMEM; | ||
2482 | 2381 | ||
2483 | if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) { | 2382 | wk->bss = (void *)req->bss; |
2484 | if (ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) | ||
2485 | ieee80211_set_disassoc(sdata, true, true, | ||
2486 | WLAN_REASON_DEAUTH_LEAVING); | ||
2487 | 2383 | ||
2488 | /* | 2384 | if (req->ie && req->ie_len) { |
2489 | * Do not use reassociation if SSID is changed (different ESS). | 2385 | memcpy(wk->ie, req->ie, req->ie_len); |
2490 | */ | 2386 | wk->ie_len = req->ie_len; |
2491 | ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET; | ||
2492 | memset(ifmgd->ssid, 0, sizeof(ifmgd->ssid)); | ||
2493 | memcpy(ifmgd->ssid, ssid, len); | ||
2494 | ifmgd->ssid_len = len; | ||
2495 | } | 2387 | } |
2496 | 2388 | ||
2497 | return ieee80211_sta_commit(sdata); | 2389 | if (req->key && req->key_len) { |
2498 | } | 2390 | wk->key_len = req->key_len; |
2391 | wk->key_idx = req->key_idx; | ||
2392 | memcpy(wk->key, req->key, req->key_len); | ||
2393 | } | ||
2499 | 2394 | ||
2500 | int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len) | 2395 | ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); |
2501 | { | 2396 | memcpy(wk->ssid, ssid + 2, ssid[1]); |
2502 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2397 | wk->ssid_len = ssid[1]; |
2503 | memcpy(ssid, ifmgd->ssid, ifmgd->ssid_len); | 2398 | |
2504 | *len = ifmgd->ssid_len; | 2399 | wk->state = IEEE80211_MGD_STATE_PROBE; |
2400 | wk->auth_alg = auth_alg; | ||
2401 | wk->timeout = jiffies; /* run right away */ | ||
2402 | |||
2403 | /* | ||
2404 | * XXX: if still associated need to tell AP that we're going | ||
2405 | * to sleep and then change channel etc. | ||
2406 | */ | ||
2407 | sdata->local->oper_channel = req->bss->channel; | ||
2408 | ieee80211_hw_config(sdata->local, 0); | ||
2409 | |||
2410 | mutex_lock(&ifmgd->mtx); | ||
2411 | list_add(&wk->list, &sdata->u.mgd.work_list); | ||
2412 | mutex_unlock(&ifmgd->mtx); | ||
2413 | |||
2414 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work); | ||
2505 | return 0; | 2415 | return 0; |
2506 | } | 2416 | } |
2507 | 2417 | ||
2508 | int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid) | 2418 | int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, |
2419 | struct cfg80211_assoc_request *req) | ||
2509 | { | 2420 | { |
2510 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2421 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2422 | struct ieee80211_mgd_work *wk, *found = NULL; | ||
2423 | int i, err; | ||
2511 | 2424 | ||
2512 | if (compare_ether_addr(bssid, ifmgd->bssid) != 0 && | 2425 | mutex_lock(&ifmgd->mtx); |
2513 | ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) | ||
2514 | ieee80211_set_disassoc(sdata, true, true, | ||
2515 | WLAN_REASON_DEAUTH_LEAVING); | ||
2516 | 2426 | ||
2517 | if (is_valid_ether_addr(bssid)) { | 2427 | list_for_each_entry(wk, &ifmgd->work_list, list) { |
2518 | memcpy(ifmgd->bssid, bssid, ETH_ALEN); | 2428 | if (&wk->bss->cbss == req->bss && |
2519 | ifmgd->flags |= IEEE80211_STA_BSSID_SET; | 2429 | wk->state == IEEE80211_MGD_STATE_IDLE) { |
2520 | } else { | 2430 | found = wk; |
2521 | memset(ifmgd->bssid, 0, ETH_ALEN); | 2431 | break; |
2522 | ifmgd->flags &= ~IEEE80211_STA_BSSID_SET; | 2432 | } |
2523 | } | 2433 | } |
2524 | 2434 | ||
2525 | return ieee80211_sta_commit(sdata); | 2435 | if (!found) { |
2526 | } | 2436 | err = -ENOLINK; |
2437 | goto out; | ||
2438 | } | ||
2527 | 2439 | ||
2528 | int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, | 2440 | list_del(&found->list); |
2529 | const char *ie, size_t len) | ||
2530 | { | ||
2531 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
2532 | 2441 | ||
2533 | if (len == 0 && ifmgd->extra_ie_len == 0) | 2442 | wk = krealloc(found, sizeof(*wk) + req->ie_len, GFP_KERNEL); |
2534 | return -EALREADY; | 2443 | if (!wk) { |
2444 | list_add(&found->list, &ifmgd->work_list); | ||
2445 | err = -ENOMEM; | ||
2446 | goto out; | ||
2447 | } | ||
2535 | 2448 | ||
2536 | if (len == ifmgd->extra_ie_len && ifmgd->extra_ie && | 2449 | list_add(&wk->list, &ifmgd->work_list); |
2537 | memcmp(ifmgd->extra_ie, ie, len) == 0) | ||
2538 | return -EALREADY; | ||
2539 | 2450 | ||
2540 | kfree(ifmgd->extra_ie); | 2451 | ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; |
2541 | if (len == 0) { | 2452 | |
2542 | ifmgd->extra_ie = NULL; | 2453 | for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) |
2543 | ifmgd->extra_ie_len = 0; | 2454 | if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || |
2544 | return 0; | 2455 | req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || |
2545 | } | 2456 | req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) |
2546 | ifmgd->extra_ie = kmalloc(len, GFP_KERNEL); | 2457 | ifmgd->flags |= IEEE80211_STA_DISABLE_11N; |
2547 | if (!ifmgd->extra_ie) { | 2458 | |
2548 | ifmgd->extra_ie_len = 0; | 2459 | sdata->local->oper_channel = req->bss->channel; |
2549 | return -ENOMEM; | 2460 | ieee80211_hw_config(sdata->local, 0); |
2461 | |||
2462 | if (req->ie && req->ie_len) { | ||
2463 | memcpy(wk->ie, req->ie, req->ie_len); | ||
2464 | wk->ie_len = req->ie_len; | ||
2465 | } else | ||
2466 | wk->ie_len = 0; | ||
2467 | |||
2468 | if (req->prev_bssid) | ||
2469 | memcpy(wk->prev_bssid, req->prev_bssid, ETH_ALEN); | ||
2470 | |||
2471 | wk->state = IEEE80211_MGD_STATE_ASSOC; | ||
2472 | wk->tries = 0; | ||
2473 | wk->timeout = jiffies; /* run right away */ | ||
2474 | |||
2475 | if (req->use_mfp) { | ||
2476 | ifmgd->mfp = IEEE80211_MFP_REQUIRED; | ||
2477 | ifmgd->flags |= IEEE80211_STA_MFP_ENABLED; | ||
2478 | } else { | ||
2479 | ifmgd->mfp = IEEE80211_MFP_DISABLED; | ||
2480 | ifmgd->flags &= ~IEEE80211_STA_MFP_ENABLED; | ||
2550 | } | 2481 | } |
2551 | memcpy(ifmgd->extra_ie, ie, len); | ||
2552 | ifmgd->extra_ie_len = len; | ||
2553 | return 0; | ||
2554 | } | ||
2555 | 2482 | ||
2556 | int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason) | 2483 | if (req->crypto.control_port) |
2557 | { | 2484 | ifmgd->flags |= IEEE80211_STA_CONTROL_PORT; |
2558 | printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", | 2485 | else |
2559 | sdata->dev->name, reason); | 2486 | ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; |
2560 | 2487 | ||
2561 | ieee80211_set_disassoc(sdata, true, true, reason); | 2488 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work); |
2562 | return 0; | 2489 | |
2490 | err = 0; | ||
2491 | |||
2492 | out: | ||
2493 | mutex_unlock(&ifmgd->mtx); | ||
2494 | return err; | ||
2563 | } | 2495 | } |
2564 | 2496 | ||
2565 | int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason) | 2497 | int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, |
2498 | struct cfg80211_deauth_request *req, | ||
2499 | void *cookie) | ||
2566 | { | 2500 | { |
2567 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2501 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2502 | struct ieee80211_mgd_work *wk; | ||
2503 | const u8 *bssid = NULL; | ||
2568 | 2504 | ||
2569 | printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", | 2505 | printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", |
2570 | sdata->dev->name, reason); | 2506 | sdata->dev->name, req->reason_code); |
2507 | |||
2508 | mutex_lock(&ifmgd->mtx); | ||
2509 | |||
2510 | if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) { | ||
2511 | bssid = req->bss->bssid; | ||
2512 | ieee80211_set_disassoc(sdata, true); | ||
2513 | } else list_for_each_entry(wk, &ifmgd->work_list, list) { | ||
2514 | if (&wk->bss->cbss == req->bss) { | ||
2515 | bssid = req->bss->bssid; | ||
2516 | list_del(&wk->list); | ||
2517 | kfree(wk); | ||
2518 | break; | ||
2519 | } | ||
2520 | } | ||
2571 | 2521 | ||
2572 | if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED)) | 2522 | /* |
2523 | * cfg80211 should catch this ... but it's racy since | ||
2524 | * we can receive a deauth frame, process it, hand it | ||
2525 | * to cfg80211 while that's in a locked section already | ||
2526 | * trying to tell us that the user wants to disconnect. | ||
2527 | */ | ||
2528 | if (!bssid) { | ||
2529 | mutex_unlock(&ifmgd->mtx); | ||
2573 | return -ENOLINK; | 2530 | return -ENOLINK; |
2531 | } | ||
2532 | |||
2533 | mutex_unlock(&ifmgd->mtx); | ||
2534 | |||
2535 | ieee80211_send_deauth_disassoc(sdata, bssid, | ||
2536 | IEEE80211_STYPE_DEAUTH, req->reason_code, | ||
2537 | cookie); | ||
2574 | 2538 | ||
2575 | ieee80211_set_disassoc(sdata, false, true, reason); | ||
2576 | return 0; | 2539 | return 0; |
2577 | } | 2540 | } |
2578 | 2541 | ||
2579 | /* scan finished notification */ | 2542 | int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, |
2580 | void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) | 2543 | struct cfg80211_disassoc_request *req, |
2544 | void *cookie) | ||
2581 | { | 2545 | { |
2582 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; | 2546 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2583 | 2547 | ||
2584 | /* Restart STA timers */ | 2548 | printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", |
2585 | rcu_read_lock(); | 2549 | sdata->dev->name, req->reason_code); |
2586 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
2587 | ieee80211_restart_sta_timer(sdata); | ||
2588 | rcu_read_unlock(); | ||
2589 | } | ||
2590 | 2550 | ||
2591 | int ieee80211_max_network_latency(struct notifier_block *nb, | 2551 | mutex_lock(&ifmgd->mtx); |
2592 | unsigned long data, void *dummy) | ||
2593 | { | ||
2594 | s32 latency_usec = (s32) data; | ||
2595 | struct ieee80211_local *local = | ||
2596 | container_of(nb, struct ieee80211_local, | ||
2597 | network_latency_notifier); | ||
2598 | 2552 | ||
2599 | mutex_lock(&local->iflist_mtx); | 2553 | /* |
2600 | ieee80211_recalc_ps(local, latency_usec); | 2554 | * cfg80211 should catch this ... but it's racy since |
2601 | mutex_unlock(&local->iflist_mtx); | 2555 | * we can receive a disassoc frame, process it, hand it |
2556 | * to cfg80211 while that's in a locked section already | ||
2557 | * trying to tell us that the user wants to disconnect. | ||
2558 | */ | ||
2559 | if (&ifmgd->associated->cbss != req->bss) { | ||
2560 | mutex_unlock(&ifmgd->mtx); | ||
2561 | return -ENOLINK; | ||
2562 | } | ||
2563 | |||
2564 | ieee80211_set_disassoc(sdata, false); | ||
2565 | |||
2566 | mutex_unlock(&ifmgd->mtx); | ||
2602 | 2567 | ||
2568 | ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, | ||
2569 | IEEE80211_STYPE_DISASSOC, req->reason_code, | ||
2570 | cookie); | ||
2603 | return 0; | 2571 | return 0; |
2604 | } | 2572 | } |
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 5e3d476972f9..e535f1c988fe 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c | |||
@@ -26,7 +26,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) | |||
26 | /* make quiescing visible to timers everywhere */ | 26 | /* make quiescing visible to timers everywhere */ |
27 | mb(); | 27 | mb(); |
28 | 28 | ||
29 | flush_workqueue(local->hw.workqueue); | 29 | flush_workqueue(local->workqueue); |
30 | 30 | ||
31 | /* Don't try to run timers while suspended. */ | 31 | /* Don't try to run timers while suspended. */ |
32 | del_timer_sync(&local->sta_cleanup); | 32 | del_timer_sync(&local->sta_cleanup); |
@@ -96,6 +96,10 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) | |||
96 | if (!netif_running(sdata->dev)) | 96 | if (!netif_running(sdata->dev)) |
97 | continue; | 97 | continue; |
98 | 98 | ||
99 | /* disable beaconing */ | ||
100 | ieee80211_bss_info_change_notify(sdata, | ||
101 | BSS_CHANGED_BEACON_ENABLED); | ||
102 | |||
99 | conf.vif = &sdata->vif; | 103 | conf.vif = &sdata->vif; |
100 | conf.type = sdata->vif.type; | 104 | conf.type = sdata->vif.type; |
101 | conf.mac_addr = sdata->dev->dev_addr; | 105 | conf.mac_addr = sdata->dev->dev_addr; |
@@ -103,17 +107,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) | |||
103 | } | 107 | } |
104 | 108 | ||
105 | /* stop hardware - this must stop RX */ | 109 | /* stop hardware - this must stop RX */ |
106 | if (local->open_count) { | 110 | if (local->open_count) |
107 | ieee80211_led_radio(local, false); | 111 | ieee80211_stop_device(local); |
108 | drv_stop(local); | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * flush again, in case driver queued work -- it | ||
113 | * shouldn't be doing (or cancel everything in the | ||
114 | * stop callback) that but better safe than sorry. | ||
115 | */ | ||
116 | flush_workqueue(local->hw.workqueue); | ||
117 | 112 | ||
118 | local->suspended = true; | 113 | local->suspended = true; |
119 | /* need suspended to be visible before quiescing is false */ | 114 | /* need suspended to be visible before quiescing is false */ |
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 4641f00a1e5c..b33efc4fc267 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c | |||
@@ -198,6 +198,35 @@ static void rate_control_release(struct kref *kref) | |||
198 | kfree(ctrl_ref); | 198 | kfree(ctrl_ref); |
199 | } | 199 | } |
200 | 200 | ||
201 | static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc) | ||
202 | { | ||
203 | struct sk_buff *skb = txrc->skb; | ||
204 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
205 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
206 | __le16 fc; | ||
207 | |||
208 | fc = hdr->frame_control; | ||
209 | |||
210 | return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc)); | ||
211 | } | ||
212 | |||
213 | bool rate_control_send_low(struct ieee80211_sta *sta, | ||
214 | void *priv_sta, | ||
215 | struct ieee80211_tx_rate_control *txrc) | ||
216 | { | ||
217 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); | ||
218 | |||
219 | if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) { | ||
220 | info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta); | ||
221 | info->control.rates[0].count = | ||
222 | (info->flags & IEEE80211_TX_CTL_NO_ACK) ? | ||
223 | 1 : txrc->hw->max_rate_tries; | ||
224 | return true; | ||
225 | } | ||
226 | return false; | ||
227 | } | ||
228 | EXPORT_SYMBOL(rate_control_send_low); | ||
229 | |||
201 | void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, | 230 | void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, |
202 | struct sta_info *sta, | 231 | struct sta_info *sta, |
203 | struct ieee80211_tx_rate_control *txrc) | 232 | struct ieee80211_tx_rate_control *txrc) |
@@ -258,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, | |||
258 | struct rate_control_ref *ref, *old; | 287 | struct rate_control_ref *ref, *old; |
259 | 288 | ||
260 | ASSERT_RTNL(); | 289 | ASSERT_RTNL(); |
261 | if (local->open_count || netif_running(local->mdev)) | 290 | if (local->open_count) |
262 | return -EBUSY; | 291 | return -EBUSY; |
263 | 292 | ||
264 | ref = rate_control_alloc(name, local); | 293 | ref = rate_control_alloc(name, local); |
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 37771abd8f5a..7c5142988bbb 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c | |||
@@ -70,20 +70,6 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix) | |||
70 | return i; | 70 | return i; |
71 | } | 71 | } |
72 | 72 | ||
73 | static inline bool | ||
74 | use_low_rate(struct sk_buff *skb) | ||
75 | { | ||
76 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
77 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
78 | u16 fc; | ||
79 | |||
80 | fc = le16_to_cpu(hdr->frame_control); | ||
81 | |||
82 | return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || | ||
83 | (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA); | ||
84 | } | ||
85 | |||
86 | |||
87 | static void | 73 | static void |
88 | minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) | 74 | minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) |
89 | { | 75 | { |
@@ -232,7 +218,6 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, | |||
232 | void *priv_sta, struct ieee80211_tx_rate_control *txrc) | 218 | void *priv_sta, struct ieee80211_tx_rate_control *txrc) |
233 | { | 219 | { |
234 | struct sk_buff *skb = txrc->skb; | 220 | struct sk_buff *skb = txrc->skb; |
235 | struct ieee80211_supported_band *sband = txrc->sband; | ||
236 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 221 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
237 | struct minstrel_sta_info *mi = priv_sta; | 222 | struct minstrel_sta_info *mi = priv_sta; |
238 | struct minstrel_priv *mp = priv; | 223 | struct minstrel_priv *mp = priv; |
@@ -245,14 +230,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, | |||
245 | int mrr_ndx[3]; | 230 | int mrr_ndx[3]; |
246 | int sample_rate; | 231 | int sample_rate; |
247 | 232 | ||
248 | if (!sta || !mi || use_low_rate(skb)) { | 233 | if (rate_control_send_low(sta, priv_sta, txrc)) |
249 | ar[0].idx = rate_lowest_index(sband, sta); | ||
250 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) | ||
251 | ar[0].count = 1; | ||
252 | else | ||
253 | ar[0].count = mp->max_retry; | ||
254 | return; | 234 | return; |
255 | } | ||
256 | 235 | ||
257 | mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot; | 236 | mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot; |
258 | 237 | ||
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h index 869fe0ef951d..38bf4168fc3a 100644 --- a/net/mac80211/rc80211_minstrel.h +++ b/net/mac80211/rc80211_minstrel.h | |||
@@ -33,7 +33,6 @@ struct minstrel_rate { | |||
33 | 33 | ||
34 | /* per-rate throughput */ | 34 | /* per-rate throughput */ |
35 | u32 cur_tp; | 35 | u32 cur_tp; |
36 | u32 throughput; | ||
37 | 36 | ||
38 | u64 succ_hist; | 37 | u64 succ_hist; |
39 | u64 att_hist; | 38 | u64 att_hist; |
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index 98f480708050..a715d9454f64 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c | |||
@@ -83,7 +83,7 @@ minstrel_stats_open(struct inode *inode, struct file *file) | |||
83 | p += sprintf(p, "%3u%s", mr->bitrate / 2, | 83 | p += sprintf(p, "%3u%s", mr->bitrate / 2, |
84 | (mr->bitrate & 1 ? ".5" : " ")); | 84 | (mr->bitrate & 1 ? ".5" : " ")); |
85 | 85 | ||
86 | tp = ((mr->cur_tp * 96) / 18000) >> 10; | 86 | tp = mr->cur_tp / ((18000 << 10) / 96); |
87 | prob = mr->cur_prob / 18; | 87 | prob = mr->cur_prob / 18; |
88 | eprob = mr->probability / 18; | 88 | eprob = mr->probability / 18; |
89 | 89 | ||
@@ -139,7 +139,7 @@ minstrel_stats_release(struct inode *inode, struct file *file) | |||
139 | return 0; | 139 | return 0; |
140 | } | 140 | } |
141 | 141 | ||
142 | static struct file_operations minstrel_stat_fops = { | 142 | static const struct file_operations minstrel_stat_fops = { |
143 | .owner = THIS_MODULE, | 143 | .owner = THIS_MODULE, |
144 | .open = minstrel_stats_open, | 144 | .open = minstrel_stats_open, |
145 | .read = minstrel_stats_read, | 145 | .read = minstrel_stats_read, |
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index a0bef767ceb5..699d3ed869c4 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c | |||
@@ -169,19 +169,9 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo, | |||
169 | * still a good measurement and copy it. */ | 169 | * still a good measurement and copy it. */ |
170 | if (unlikely(spinfo->tx_num_xmit == 0)) | 170 | if (unlikely(spinfo->tx_num_xmit == 0)) |
171 | pf = spinfo->last_pf; | 171 | pf = spinfo->last_pf; |
172 | else { | 172 | else |
173 | /* XXX: BAD HACK!!! */ | ||
174 | struct sta_info *si = container_of(sta, struct sta_info, sta); | ||
175 | |||
176 | pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; | 173 | pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; |
177 | 174 | ||
178 | if (ieee80211_vif_is_mesh(&si->sdata->vif) && pf == 100) | ||
179 | mesh_plink_broken(si); | ||
180 | pf <<= RC_PID_ARITH_SHIFT; | ||
181 | si->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9) | ||
182 | >> RC_PID_ARITH_SHIFT; | ||
183 | } | ||
184 | |||
185 | spinfo->tx_num_xmit = 0; | 175 | spinfo->tx_num_xmit = 0; |
186 | spinfo->tx_num_failed = 0; | 176 | spinfo->tx_num_failed = 0; |
187 | 177 | ||
@@ -276,11 +266,9 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta, | |||
276 | { | 266 | { |
277 | struct sk_buff *skb = txrc->skb; | 267 | struct sk_buff *skb = txrc->skb; |
278 | struct ieee80211_supported_band *sband = txrc->sband; | 268 | struct ieee80211_supported_band *sband = txrc->sband; |
279 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
280 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 269 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
281 | struct rc_pid_sta_info *spinfo = priv_sta; | 270 | struct rc_pid_sta_info *spinfo = priv_sta; |
282 | int rateidx; | 271 | int rateidx; |
283 | u16 fc; | ||
284 | 272 | ||
285 | if (txrc->rts) | 273 | if (txrc->rts) |
286 | info->control.rates[0].count = | 274 | info->control.rates[0].count = |
@@ -290,16 +278,8 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta, | |||
290 | txrc->hw->conf.short_frame_max_tx_count; | 278 | txrc->hw->conf.short_frame_max_tx_count; |
291 | 279 | ||
292 | /* Send management frames and NO_ACK data using lowest rate. */ | 280 | /* Send management frames and NO_ACK data using lowest rate. */ |
293 | fc = le16_to_cpu(hdr->frame_control); | 281 | if (rate_control_send_low(sta, priv_sta, txrc)) |
294 | if (!sta || !spinfo || | ||
295 | (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || | ||
296 | info->flags & IEEE80211_TX_CTL_NO_ACK) { | ||
297 | info->control.rates[0].idx = rate_lowest_index(sband, sta); | ||
298 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) | ||
299 | info->control.rates[0].count = 1; | ||
300 | |||
301 | return; | 282 | return; |
302 | } | ||
303 | 283 | ||
304 | rateidx = spinfo->txrate_idx; | 284 | rateidx = spinfo->txrate_idx; |
305 | 285 | ||
@@ -321,7 +301,6 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, | |||
321 | struct rc_pid_sta_info *spinfo = priv_sta; | 301 | struct rc_pid_sta_info *spinfo = priv_sta; |
322 | struct rc_pid_info *pinfo = priv; | 302 | struct rc_pid_info *pinfo = priv; |
323 | struct rc_pid_rateinfo *rinfo = pinfo->rinfo; | 303 | struct rc_pid_rateinfo *rinfo = pinfo->rinfo; |
324 | struct sta_info *si; | ||
325 | int i, j, tmp; | 304 | int i, j, tmp; |
326 | bool s; | 305 | bool s; |
327 | 306 | ||
@@ -358,9 +337,6 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, | |||
358 | } | 337 | } |
359 | 338 | ||
360 | spinfo->txrate_idx = rate_lowest_index(sband, sta); | 339 | spinfo->txrate_idx = rate_lowest_index(sband, sta); |
361 | /* HACK */ | ||
362 | si = container_of(sta, struct sta_info, sta); | ||
363 | si->fail_avg = 0; | ||
364 | } | 340 | } |
365 | 341 | ||
366 | static void *rate_control_pid_alloc(struct ieee80211_hw *hw, | 342 | static void *rate_control_pid_alloc(struct ieee80211_hw *hw, |
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c index a08a9b530347..a59043fbb0ff 100644 --- a/net/mac80211/rc80211_pid_debugfs.c +++ b/net/mac80211/rc80211_pid_debugfs.c | |||
@@ -198,7 +198,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf, | |||
198 | 198 | ||
199 | #undef RC_PID_PRINT_BUF_SIZE | 199 | #undef RC_PID_PRINT_BUF_SIZE |
200 | 200 | ||
201 | static struct file_operations rc_pid_fop_events = { | 201 | static const struct file_operations rc_pid_fop_events = { |
202 | .owner = THIS_MODULE, | 202 | .owner = THIS_MODULE, |
203 | .read = rate_control_pid_events_read, | 203 | .read = rate_control_pid_events_read, |
204 | .poll = rate_control_pid_events_poll, | 204 | .poll = rate_control_pid_events_poll, |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0936fc24942d..c01588f9d453 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -30,7 +30,6 @@ | |||
30 | static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | 30 | static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, |
31 | struct tid_ampdu_rx *tid_agg_rx, | 31 | struct tid_ampdu_rx *tid_agg_rx, |
32 | struct sk_buff *skb, | 32 | struct sk_buff *skb, |
33 | struct ieee80211_rx_status *status, | ||
34 | u16 mpdu_seq_num, | 33 | u16 mpdu_seq_num, |
35 | int bar_req); | 34 | int bar_req); |
36 | /* | 35 | /* |
@@ -59,11 +58,11 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, | |||
59 | return skb; | 58 | return skb; |
60 | } | 59 | } |
61 | 60 | ||
62 | static inline int should_drop_frame(struct ieee80211_rx_status *status, | 61 | static inline int should_drop_frame(struct sk_buff *skb, |
63 | struct sk_buff *skb, | ||
64 | int present_fcs_len, | 62 | int present_fcs_len, |
65 | int radiotap_len) | 63 | int radiotap_len) |
66 | { | 64 | { |
65 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | ||
67 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 66 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
68 | 67 | ||
69 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) | 68 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) |
@@ -111,10 +110,10 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local, | |||
111 | static void | 110 | static void |
112 | ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | 111 | ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, |
113 | struct sk_buff *skb, | 112 | struct sk_buff *skb, |
114 | struct ieee80211_rx_status *status, | ||
115 | struct ieee80211_rate *rate, | 113 | struct ieee80211_rate *rate, |
116 | int rtap_len) | 114 | int rtap_len) |
117 | { | 115 | { |
116 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | ||
118 | struct ieee80211_radiotap_header *rthdr; | 117 | struct ieee80211_radiotap_header *rthdr; |
119 | unsigned char *pos; | 118 | unsigned char *pos; |
120 | 119 | ||
@@ -220,9 +219,9 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
220 | */ | 219 | */ |
221 | static struct sk_buff * | 220 | static struct sk_buff * |
222 | ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | 221 | ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, |
223 | struct ieee80211_rx_status *status, | ||
224 | struct ieee80211_rate *rate) | 222 | struct ieee80211_rate *rate) |
225 | { | 223 | { |
224 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); | ||
226 | struct ieee80211_sub_if_data *sdata; | 225 | struct ieee80211_sub_if_data *sdata; |
227 | int needed_headroom = 0; | 226 | int needed_headroom = 0; |
228 | struct sk_buff *skb, *skb2; | 227 | struct sk_buff *skb, *skb2; |
@@ -248,8 +247,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
248 | present_fcs_len = FCS_LEN; | 247 | present_fcs_len = FCS_LEN; |
249 | 248 | ||
250 | if (!local->monitors) { | 249 | if (!local->monitors) { |
251 | if (should_drop_frame(status, origskb, present_fcs_len, | 250 | if (should_drop_frame(origskb, present_fcs_len, rtap_len)) { |
252 | rtap_len)) { | ||
253 | dev_kfree_skb(origskb); | 251 | dev_kfree_skb(origskb); |
254 | return NULL; | 252 | return NULL; |
255 | } | 253 | } |
@@ -257,7 +255,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
257 | return remove_monitor_info(local, origskb, rtap_len); | 255 | return remove_monitor_info(local, origskb, rtap_len); |
258 | } | 256 | } |
259 | 257 | ||
260 | if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) { | 258 | if (should_drop_frame(origskb, present_fcs_len, rtap_len)) { |
261 | /* only need to expand headroom if necessary */ | 259 | /* only need to expand headroom if necessary */ |
262 | skb = origskb; | 260 | skb = origskb; |
263 | origskb = NULL; | 261 | origskb = NULL; |
@@ -289,7 +287,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
289 | 287 | ||
290 | /* if necessary, prepend radiotap information */ | 288 | /* if necessary, prepend radiotap information */ |
291 | if (!(status->flag & RX_FLAG_RADIOTAP)) | 289 | if (!(status->flag & RX_FLAG_RADIOTAP)) |
292 | ieee80211_add_rx_radiotap_header(local, skb, status, rate, | 290 | ieee80211_add_rx_radiotap_header(local, skb, rate, |
293 | needed_headroom); | 291 | needed_headroom); |
294 | 292 | ||
295 | skb_reset_mac_header(skb); | 293 | skb_reset_mac_header(skb); |
@@ -420,13 +418,13 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) | |||
420 | struct ieee80211_local *local = rx->local; | 418 | struct ieee80211_local *local = rx->local; |
421 | struct sk_buff *skb = rx->skb; | 419 | struct sk_buff *skb = rx->skb; |
422 | 420 | ||
423 | if (unlikely(local->hw_scanning)) | 421 | if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning))) |
424 | return ieee80211_scan_rx(rx->sdata, skb, rx->status); | 422 | return ieee80211_scan_rx(rx->sdata, skb); |
425 | 423 | ||
426 | if (unlikely(local->sw_scanning)) { | 424 | if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) && |
425 | (rx->flags & IEEE80211_RX_IN_SCAN))) { | ||
427 | /* drop all the other packets during a software scan anyway */ | 426 | /* drop all the other packets during a software scan anyway */ |
428 | if (ieee80211_scan_rx(rx->sdata, skb, rx->status) | 427 | if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED) |
429 | != RX_QUEUED) | ||
430 | dev_kfree_skb(skb); | 428 | dev_kfree_skb(skb); |
431 | return RX_QUEUED; | 429 | return RX_QUEUED; |
432 | } | 430 | } |
@@ -491,12 +489,21 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
491 | { | 489 | { |
492 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 490 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
493 | unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); | 491 | unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); |
492 | char *dev_addr = rx->dev->dev_addr; | ||
494 | 493 | ||
495 | if (ieee80211_is_data(hdr->frame_control)) { | 494 | if (ieee80211_is_data(hdr->frame_control)) { |
496 | if (!ieee80211_has_a4(hdr->frame_control)) | 495 | if (is_multicast_ether_addr(hdr->addr1)) { |
497 | return RX_DROP_MONITOR; | 496 | if (ieee80211_has_tods(hdr->frame_control) || |
498 | if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0) | 497 | !ieee80211_has_fromds(hdr->frame_control)) |
499 | return RX_DROP_MONITOR; | 498 | return RX_DROP_MONITOR; |
499 | if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0) | ||
500 | return RX_DROP_MONITOR; | ||
501 | } else { | ||
502 | if (!ieee80211_has_a4(hdr->frame_control)) | ||
503 | return RX_DROP_MONITOR; | ||
504 | if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0) | ||
505 | return RX_DROP_MONITOR; | ||
506 | } | ||
500 | } | 507 | } |
501 | 508 | ||
502 | /* If there is not an established peer link and this is not a peer link | 509 | /* If there is not an established peer link and this is not a peer link |
@@ -529,7 +536,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
529 | 536 | ||
530 | if (ieee80211_is_data(hdr->frame_control) && | 537 | if (ieee80211_is_data(hdr->frame_control) && |
531 | is_multicast_ether_addr(hdr->addr1) && | 538 | is_multicast_ether_addr(hdr->addr1) && |
532 | mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata)) | 539 | mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata)) |
533 | return RX_DROP_MONITOR; | 540 | return RX_DROP_MONITOR; |
534 | #undef msh_h_get | 541 | #undef msh_h_get |
535 | 542 | ||
@@ -785,7 +792,7 @@ static void ap_sta_ps_start(struct sta_info *sta) | |||
785 | struct ieee80211_local *local = sdata->local; | 792 | struct ieee80211_local *local = sdata->local; |
786 | 793 | ||
787 | atomic_inc(&sdata->bss->num_sta_ps); | 794 | atomic_inc(&sdata->bss->num_sta_ps); |
788 | set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL); | 795 | set_sta_flags(sta, WLAN_STA_PS); |
789 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta); | 796 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta); |
790 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 797 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
791 | printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", | 798 | printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", |
@@ -801,7 +808,7 @@ static int ap_sta_ps_end(struct sta_info *sta) | |||
801 | 808 | ||
802 | atomic_dec(&sdata->bss->num_sta_ps); | 809 | atomic_dec(&sdata->bss->num_sta_ps); |
803 | 810 | ||
804 | clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL); | 811 | clear_sta_flags(sta, WLAN_STA_PS); |
805 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta); | 812 | drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta); |
806 | 813 | ||
807 | if (!skb_queue_empty(&sta->ps_tx_buf)) | 814 | if (!skb_queue_empty(&sta->ps_tx_buf)) |
@@ -836,28 +843,22 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
836 | if (!sta) | 843 | if (!sta) |
837 | return RX_CONTINUE; | 844 | return RX_CONTINUE; |
838 | 845 | ||
839 | /* Update last_rx only for IBSS packets which are for the current | 846 | /* |
840 | * BSSID to avoid keeping the current IBSS network alive in cases where | 847 | * Update last_rx only for IBSS packets which are for the current |
841 | * other STAs are using different BSSID. */ | 848 | * BSSID to avoid keeping the current IBSS network alive in cases |
849 | * where other STAs start using different BSSID. | ||
850 | */ | ||
842 | if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { | 851 | if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
843 | u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, | 852 | u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, |
844 | NL80211_IFTYPE_ADHOC); | 853 | NL80211_IFTYPE_ADHOC); |
845 | if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) | 854 | if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) |
846 | sta->last_rx = jiffies; | 855 | sta->last_rx = jiffies; |
847 | } else | 856 | } else if (!is_multicast_ether_addr(hdr->addr1)) { |
848 | if (!is_multicast_ether_addr(hdr->addr1) || | 857 | /* |
849 | rx->sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
850 | /* Update last_rx only for unicast frames in order to prevent | ||
851 | * the Probe Request frames (the only broadcast frames from a | ||
852 | * STA in infrastructure mode) from keeping a connection alive. | ||
853 | * Mesh beacons will update last_rx when if they are found to | 858 | * Mesh beacons will update last_rx when if they are found to |
854 | * match the current local configuration when processed. | 859 | * match the current local configuration when processed. |
855 | */ | 860 | */ |
856 | if (rx->sdata->vif.type == NL80211_IFTYPE_STATION && | 861 | sta->last_rx = jiffies; |
857 | ieee80211_is_beacon(hdr->frame_control)) { | ||
858 | rx->sdata->u.mgd.last_beacon = jiffies; | ||
859 | } else | ||
860 | sta->last_rx = jiffies; | ||
861 | } | 862 | } |
862 | 863 | ||
863 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 864 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
@@ -1125,14 +1126,15 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
1125 | skb_queue_empty(&rx->sta->ps_tx_buf); | 1126 | skb_queue_empty(&rx->sta->ps_tx_buf); |
1126 | 1127 | ||
1127 | if (skb) { | 1128 | if (skb) { |
1129 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1128 | struct ieee80211_hdr *hdr = | 1130 | struct ieee80211_hdr *hdr = |
1129 | (struct ieee80211_hdr *) skb->data; | 1131 | (struct ieee80211_hdr *) skb->data; |
1130 | 1132 | ||
1131 | /* | 1133 | /* |
1132 | * Tell TX path to send one frame even though the STA may | 1134 | * Tell TX path to send this frame even though the STA may |
1133 | * still remain is PS mode after this frame exchange. | 1135 | * still remain is PS mode after this frame exchange. |
1134 | */ | 1136 | */ |
1135 | set_sta_flags(rx->sta, WLAN_STA_PSPOLL); | 1137 | info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE; |
1136 | 1138 | ||
1137 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 1139 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
1138 | printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n", | 1140 | printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n", |
@@ -1147,7 +1149,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
1147 | else | 1149 | else |
1148 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 1150 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
1149 | 1151 | ||
1150 | dev_queue_xmit(skb); | 1152 | ieee80211_add_pending_skb(rx->local, skb); |
1151 | 1153 | ||
1152 | if (no_pending_pkts) | 1154 | if (no_pending_pkts) |
1153 | sta_info_clear_tim_bit(rx->sta); | 1155 | sta_info_clear_tim_bit(rx->sta); |
@@ -1487,10 +1489,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1487 | struct ieee80211s_hdr *mesh_hdr; | 1489 | struct ieee80211s_hdr *mesh_hdr; |
1488 | unsigned int hdrlen; | 1490 | unsigned int hdrlen; |
1489 | struct sk_buff *skb = rx->skb, *fwd_skb; | 1491 | struct sk_buff *skb = rx->skb, *fwd_skb; |
1492 | struct ieee80211_local *local = rx->local; | ||
1493 | struct ieee80211_sub_if_data *sdata; | ||
1490 | 1494 | ||
1491 | hdr = (struct ieee80211_hdr *) skb->data; | 1495 | hdr = (struct ieee80211_hdr *) skb->data; |
1492 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 1496 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
1493 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | 1497 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
1498 | sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | ||
1494 | 1499 | ||
1495 | if (!ieee80211_is_data(hdr->frame_control)) | 1500 | if (!ieee80211_is_data(hdr->frame_control)) |
1496 | return RX_CONTINUE; | 1501 | return RX_CONTINUE; |
@@ -1499,11 +1504,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1499 | /* illegal frame */ | 1504 | /* illegal frame */ |
1500 | return RX_DROP_MONITOR; | 1505 | return RX_DROP_MONITOR; |
1501 | 1506 | ||
1502 | if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){ | 1507 | if (!is_multicast_ether_addr(hdr->addr1) && |
1503 | struct ieee80211_sub_if_data *sdata; | 1508 | (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6)) { |
1504 | struct mesh_path *mppath; | 1509 | struct mesh_path *mppath; |
1505 | 1510 | ||
1506 | sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | ||
1507 | rcu_read_lock(); | 1511 | rcu_read_lock(); |
1508 | mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata); | 1512 | mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata); |
1509 | if (!mppath) { | 1513 | if (!mppath) { |
@@ -1518,7 +1522,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1518 | rcu_read_unlock(); | 1522 | rcu_read_unlock(); |
1519 | } | 1523 | } |
1520 | 1524 | ||
1521 | if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0) | 1525 | /* Frame has reached destination. Don't forward */ |
1526 | if (!is_multicast_ether_addr(hdr->addr1) && | ||
1527 | compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0) | ||
1522 | return RX_CONTINUE; | 1528 | return RX_CONTINUE; |
1523 | 1529 | ||
1524 | mesh_hdr->ttl--; | 1530 | mesh_hdr->ttl--; |
@@ -1529,6 +1535,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1529 | dropped_frames_ttl); | 1535 | dropped_frames_ttl); |
1530 | else { | 1536 | else { |
1531 | struct ieee80211_hdr *fwd_hdr; | 1537 | struct ieee80211_hdr *fwd_hdr; |
1538 | struct ieee80211_tx_info *info; | ||
1539 | |||
1532 | fwd_skb = skb_copy(skb, GFP_ATOMIC); | 1540 | fwd_skb = skb_copy(skb, GFP_ATOMIC); |
1533 | 1541 | ||
1534 | if (!fwd_skb && net_ratelimit()) | 1542 | if (!fwd_skb && net_ratelimit()) |
@@ -1536,19 +1544,40 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1536 | rx->dev->name); | 1544 | rx->dev->name); |
1537 | 1545 | ||
1538 | fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; | 1546 | fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; |
1539 | /* | ||
1540 | * Save TA to addr1 to send TA a path error if a | ||
1541 | * suitable next hop is not found | ||
1542 | */ | ||
1543 | memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN); | ||
1544 | memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN); | 1547 | memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN); |
1545 | fwd_skb->dev = rx->local->mdev; | 1548 | info = IEEE80211_SKB_CB(fwd_skb); |
1546 | fwd_skb->iif = rx->dev->ifindex; | 1549 | memset(info, 0, sizeof(*info)); |
1547 | dev_queue_xmit(fwd_skb); | 1550 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; |
1551 | info->control.vif = &rx->sdata->vif; | ||
1552 | ieee80211_select_queue(local, fwd_skb); | ||
1553 | if (is_multicast_ether_addr(fwd_hdr->addr1)) | ||
1554 | IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, | ||
1555 | fwded_mcast); | ||
1556 | else { | ||
1557 | int err; | ||
1558 | /* | ||
1559 | * Save TA to addr1 to send TA a path error if a | ||
1560 | * suitable next hop is not found | ||
1561 | */ | ||
1562 | memcpy(fwd_hdr->addr1, fwd_hdr->addr2, | ||
1563 | ETH_ALEN); | ||
1564 | err = mesh_nexthop_lookup(fwd_skb, sdata); | ||
1565 | /* Failed to immediately resolve next hop: | ||
1566 | * fwded frame was dropped or will be added | ||
1567 | * later to the pending skb queue. */ | ||
1568 | if (err) | ||
1569 | return RX_DROP_MONITOR; | ||
1570 | |||
1571 | IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, | ||
1572 | fwded_unicast); | ||
1573 | } | ||
1574 | IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, | ||
1575 | fwded_frames); | ||
1576 | ieee80211_add_pending_skb(local, fwd_skb); | ||
1548 | } | 1577 | } |
1549 | } | 1578 | } |
1550 | 1579 | ||
1551 | if (is_multicast_ether_addr(hdr->addr3) || | 1580 | if (is_multicast_ether_addr(hdr->addr1) || |
1552 | rx->dev->flags & IFF_PROMISC) | 1581 | rx->dev->flags & IFF_PROMISC) |
1553 | return RX_CONTINUE; | 1582 | return RX_CONTINUE; |
1554 | else | 1583 | else |
@@ -1620,7 +1649,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | |||
1620 | /* manage reordering buffer according to requested */ | 1649 | /* manage reordering buffer according to requested */ |
1621 | /* sequence number */ | 1650 | /* sequence number */ |
1622 | rcu_read_lock(); | 1651 | rcu_read_lock(); |
1623 | ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL, | 1652 | ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, |
1624 | start_seq_num, 1); | 1653 | start_seq_num, 1); |
1625 | rcu_read_unlock(); | 1654 | rcu_read_unlock(); |
1626 | return RX_DROP_UNUSABLE; | 1655 | return RX_DROP_UNUSABLE; |
@@ -1644,12 +1673,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, | |||
1644 | 1673 | ||
1645 | if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || | 1674 | if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || |
1646 | compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { | 1675 | compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { |
1647 | /* Not from the current AP. */ | 1676 | /* Not from the current AP or not associated yet. */ |
1648 | return; | ||
1649 | } | ||
1650 | |||
1651 | if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATE) { | ||
1652 | /* Association in progress; ignore SA Query */ | ||
1653 | return; | 1677 | return; |
1654 | } | 1678 | } |
1655 | 1679 | ||
@@ -1686,7 +1710,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1686 | struct ieee80211_local *local = rx->local; | 1710 | struct ieee80211_local *local = rx->local; |
1687 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); | 1711 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); |
1688 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; | 1712 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; |
1689 | struct ieee80211_bss *bss; | ||
1690 | int len = rx->skb->len; | 1713 | int len = rx->skb->len; |
1691 | 1714 | ||
1692 | if (!ieee80211_is_action(mgmt->frame_control)) | 1715 | if (!ieee80211_is_action(mgmt->frame_control)) |
@@ -1764,17 +1787,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1764 | if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) | 1787 | if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) |
1765 | return RX_DROP_MONITOR; | 1788 | return RX_DROP_MONITOR; |
1766 | 1789 | ||
1767 | bss = ieee80211_rx_bss_get(local, sdata->u.mgd.bssid, | 1790 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); |
1768 | local->hw.conf.channel->center_freq, | ||
1769 | sdata->u.mgd.ssid, | ||
1770 | sdata->u.mgd.ssid_len); | ||
1771 | if (!bss) | ||
1772 | return RX_DROP_MONITOR; | ||
1773 | |||
1774 | ieee80211_sta_process_chanswitch(sdata, | ||
1775 | &mgmt->u.action.u.chan_switch.sw_elem, bss); | ||
1776 | ieee80211_rx_bss_put(local, bss); | ||
1777 | break; | ||
1778 | } | 1791 | } |
1779 | break; | 1792 | break; |
1780 | case WLAN_CATEGORY_SA_QUERY: | 1793 | case WLAN_CATEGORY_SA_QUERY: |
@@ -1817,19 +1830,18 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) | |||
1817 | return RX_DROP_MONITOR; | 1830 | return RX_DROP_MONITOR; |
1818 | 1831 | ||
1819 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 1832 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
1820 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status); | 1833 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); |
1821 | 1834 | ||
1822 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | 1835 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) |
1823 | return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status); | 1836 | return ieee80211_ibss_rx_mgmt(sdata, rx->skb); |
1824 | 1837 | ||
1825 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | 1838 | if (sdata->vif.type == NL80211_IFTYPE_STATION) |
1826 | return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status); | 1839 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); |
1827 | 1840 | ||
1828 | return RX_DROP_MONITOR; | 1841 | return RX_DROP_MONITOR; |
1829 | } | 1842 | } |
1830 | 1843 | ||
1831 | static void ieee80211_rx_michael_mic_report(struct net_device *dev, | 1844 | static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, |
1832 | struct ieee80211_hdr *hdr, | ||
1833 | struct ieee80211_rx_data *rx) | 1845 | struct ieee80211_rx_data *rx) |
1834 | { | 1846 | { |
1835 | int keyidx; | 1847 | int keyidx; |
@@ -1866,7 +1878,8 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev, | |||
1866 | !ieee80211_is_auth(hdr->frame_control)) | 1878 | !ieee80211_is_auth(hdr->frame_control)) |
1867 | goto ignore; | 1879 | goto ignore; |
1868 | 1880 | ||
1869 | mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL); | 1881 | mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL, |
1882 | GFP_ATOMIC); | ||
1870 | ignore: | 1883 | ignore: |
1871 | dev_kfree_skb(rx->skb); | 1884 | dev_kfree_skb(rx->skb); |
1872 | rx->skb = NULL; | 1885 | rx->skb = NULL; |
@@ -2028,13 +2041,8 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
2028 | case NL80211_IFTYPE_STATION: | 2041 | case NL80211_IFTYPE_STATION: |
2029 | if (!bssid) | 2042 | if (!bssid) |
2030 | return 0; | 2043 | return 0; |
2031 | if (!ieee80211_bssid_match(bssid, sdata->u.mgd.bssid)) { | 2044 | if (!multicast && |
2032 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) | 2045 | compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) { |
2033 | return 0; | ||
2034 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | ||
2035 | } else if (!multicast && | ||
2036 | compare_ether_addr(sdata->dev->dev_addr, | ||
2037 | hdr->addr1) != 0) { | ||
2038 | if (!(sdata->dev->flags & IFF_PROMISC)) | 2046 | if (!(sdata->dev->flags & IFF_PROMISC)) |
2039 | return 0; | 2047 | return 0; |
2040 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 2048 | rx->flags &= ~IEEE80211_RX_RA_MATCH; |
@@ -2114,9 +2122,9 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
2114 | */ | 2122 | */ |
2115 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | 2123 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, |
2116 | struct sk_buff *skb, | 2124 | struct sk_buff *skb, |
2117 | struct ieee80211_rx_status *status, | ||
2118 | struct ieee80211_rate *rate) | 2125 | struct ieee80211_rate *rate) |
2119 | { | 2126 | { |
2127 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | ||
2120 | struct ieee80211_local *local = hw_to_local(hw); | 2128 | struct ieee80211_local *local = hw_to_local(hw); |
2121 | struct ieee80211_sub_if_data *sdata; | 2129 | struct ieee80211_sub_if_data *sdata; |
2122 | struct ieee80211_hdr *hdr; | 2130 | struct ieee80211_hdr *hdr; |
@@ -2143,11 +2151,12 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
2143 | } | 2151 | } |
2144 | 2152 | ||
2145 | if ((status->flag & RX_FLAG_MMIC_ERROR)) { | 2153 | if ((status->flag & RX_FLAG_MMIC_ERROR)) { |
2146 | ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx); | 2154 | ieee80211_rx_michael_mic_report(hdr, &rx); |
2147 | return; | 2155 | return; |
2148 | } | 2156 | } |
2149 | 2157 | ||
2150 | if (unlikely(local->sw_scanning || local->hw_scanning)) | 2158 | if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || |
2159 | test_bit(SCAN_OFF_CHANNEL, &local->scanning))) | ||
2151 | rx.flags |= IEEE80211_RX_IN_SCAN; | 2160 | rx.flags |= IEEE80211_RX_IN_SCAN; |
2152 | 2161 | ||
2153 | ieee80211_parse_qos(&rx); | 2162 | ieee80211_parse_qos(&rx); |
@@ -2227,20 +2236,21 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | |||
2227 | { | 2236 | { |
2228 | struct ieee80211_supported_band *sband; | 2237 | struct ieee80211_supported_band *sband; |
2229 | struct ieee80211_rate *rate; | 2238 | struct ieee80211_rate *rate; |
2230 | struct ieee80211_rx_status status; | 2239 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; |
2240 | struct ieee80211_rx_status *status; | ||
2231 | 2241 | ||
2232 | if (!tid_agg_rx->reorder_buf[index]) | 2242 | if (!skb) |
2233 | goto no_frame; | 2243 | goto no_frame; |
2234 | 2244 | ||
2245 | status = IEEE80211_SKB_RXCB(skb); | ||
2246 | |||
2235 | /* release the reordered frames to stack */ | 2247 | /* release the reordered frames to stack */ |
2236 | memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, sizeof(status)); | 2248 | sband = hw->wiphy->bands[status->band]; |
2237 | sband = hw->wiphy->bands[status.band]; | 2249 | if (status->flag & RX_FLAG_HT) |
2238 | if (status.flag & RX_FLAG_HT) | ||
2239 | rate = sband->bitrates; /* TODO: HT rates */ | 2250 | rate = sband->bitrates; /* TODO: HT rates */ |
2240 | else | 2251 | else |
2241 | rate = &sband->bitrates[status.rate_idx]; | 2252 | rate = &sband->bitrates[status->rate_idx]; |
2242 | __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], | 2253 | __ieee80211_rx_handle_packet(hw, skb, rate); |
2243 | &status, rate); | ||
2244 | tid_agg_rx->stored_mpdu_num--; | 2254 | tid_agg_rx->stored_mpdu_num--; |
2245 | tid_agg_rx->reorder_buf[index] = NULL; | 2255 | tid_agg_rx->reorder_buf[index] = NULL; |
2246 | 2256 | ||
@@ -2265,7 +2275,6 @@ no_frame: | |||
2265 | static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | 2275 | static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, |
2266 | struct tid_ampdu_rx *tid_agg_rx, | 2276 | struct tid_ampdu_rx *tid_agg_rx, |
2267 | struct sk_buff *skb, | 2277 | struct sk_buff *skb, |
2268 | struct ieee80211_rx_status *rxstatus, | ||
2269 | u16 mpdu_seq_num, | 2278 | u16 mpdu_seq_num, |
2270 | int bar_req) | 2279 | int bar_req) |
2271 | { | 2280 | { |
@@ -2324,8 +2333,6 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
2324 | /* put the frame in the reordering buffer */ | 2333 | /* put the frame in the reordering buffer */ |
2325 | tid_agg_rx->reorder_buf[index] = skb; | 2334 | tid_agg_rx->reorder_buf[index] = skb; |
2326 | tid_agg_rx->reorder_time[index] = jiffies; | 2335 | tid_agg_rx->reorder_time[index] = jiffies; |
2327 | memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus, | ||
2328 | sizeof(*rxstatus)); | ||
2329 | tid_agg_rx->stored_mpdu_num++; | 2336 | tid_agg_rx->stored_mpdu_num++; |
2330 | /* release the buffer until next missing frame */ | 2337 | /* release the buffer until next missing frame */ |
2331 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) | 2338 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) |
@@ -2374,8 +2381,7 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
2374 | } | 2381 | } |
2375 | 2382 | ||
2376 | static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | 2383 | static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, |
2377 | struct sk_buff *skb, | 2384 | struct sk_buff *skb) |
2378 | struct ieee80211_rx_status *status) | ||
2379 | { | 2385 | { |
2380 | struct ieee80211_hw *hw = &local->hw; | 2386 | struct ieee80211_hw *hw = &local->hw; |
2381 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 2387 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
@@ -2424,7 +2430,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | |||
2424 | 2430 | ||
2425 | /* according to mpdu sequence number deal with reordering buffer */ | 2431 | /* according to mpdu sequence number deal with reordering buffer */ |
2426 | mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; | 2432 | mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; |
2427 | ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status, | 2433 | ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, |
2428 | mpdu_seq_num, 0); | 2434 | mpdu_seq_num, 0); |
2429 | end_reorder: | 2435 | end_reorder: |
2430 | return ret; | 2436 | return ret; |
@@ -2434,24 +2440,20 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | |||
2434 | * This is the receive path handler. It is called by a low level driver when an | 2440 | * This is the receive path handler. It is called by a low level driver when an |
2435 | * 802.11 MPDU is received from the hardware. | 2441 | * 802.11 MPDU is received from the hardware. |
2436 | */ | 2442 | */ |
2437 | void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | 2443 | void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) |
2438 | struct ieee80211_rx_status *status) | ||
2439 | { | 2444 | { |
2440 | struct ieee80211_local *local = hw_to_local(hw); | 2445 | struct ieee80211_local *local = hw_to_local(hw); |
2441 | struct ieee80211_rate *rate = NULL; | 2446 | struct ieee80211_rate *rate = NULL; |
2442 | struct ieee80211_supported_band *sband; | 2447 | struct ieee80211_supported_band *sband; |
2448 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | ||
2443 | 2449 | ||
2444 | if (status->band < 0 || | 2450 | if (WARN_ON(status->band < 0 || |
2445 | status->band >= IEEE80211_NUM_BANDS) { | 2451 | status->band >= IEEE80211_NUM_BANDS)) |
2446 | WARN_ON(1); | 2452 | goto drop; |
2447 | return; | ||
2448 | } | ||
2449 | 2453 | ||
2450 | sband = local->hw.wiphy->bands[status->band]; | 2454 | sband = local->hw.wiphy->bands[status->band]; |
2451 | if (!sband) { | 2455 | if (WARN_ON(!sband)) |
2452 | WARN_ON(1); | 2456 | goto drop; |
2453 | return; | ||
2454 | } | ||
2455 | 2457 | ||
2456 | /* | 2458 | /* |
2457 | * If we're suspending, it is possible although not too likely | 2459 | * If we're suspending, it is possible although not too likely |
@@ -2460,16 +2462,21 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2460 | * that might, for example, cause stations to be added or other | 2462 | * that might, for example, cause stations to be added or other |
2461 | * driver callbacks be invoked. | 2463 | * driver callbacks be invoked. |
2462 | */ | 2464 | */ |
2463 | if (unlikely(local->quiescing || local->suspended)) { | 2465 | if (unlikely(local->quiescing || local->suspended)) |
2464 | kfree_skb(skb); | 2466 | goto drop; |
2465 | return; | 2467 | |
2466 | } | 2468 | /* |
2469 | * The same happens when we're not even started, | ||
2470 | * but that's worth a warning. | ||
2471 | */ | ||
2472 | if (WARN_ON(!local->started)) | ||
2473 | goto drop; | ||
2467 | 2474 | ||
2468 | if (status->flag & RX_FLAG_HT) { | 2475 | if (status->flag & RX_FLAG_HT) { |
2469 | /* rate_idx is MCS index */ | 2476 | /* rate_idx is MCS index */ |
2470 | if (WARN_ON(status->rate_idx < 0 || | 2477 | if (WARN_ON(status->rate_idx < 0 || |
2471 | status->rate_idx >= 76)) | 2478 | status->rate_idx >= 76)) |
2472 | return; | 2479 | goto drop; |
2473 | /* HT rates are not in the table - use the highest legacy rate | 2480 | /* HT rates are not in the table - use the highest legacy rate |
2474 | * for now since other parts of mac80211 may not yet be fully | 2481 | * for now since other parts of mac80211 may not yet be fully |
2475 | * MCS aware. */ | 2482 | * MCS aware. */ |
@@ -2477,7 +2484,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2477 | } else { | 2484 | } else { |
2478 | if (WARN_ON(status->rate_idx < 0 || | 2485 | if (WARN_ON(status->rate_idx < 0 || |
2479 | status->rate_idx >= sband->n_bitrates)) | 2486 | status->rate_idx >= sband->n_bitrates)) |
2480 | return; | 2487 | goto drop; |
2481 | rate = &sband->bitrates[status->rate_idx]; | 2488 | rate = &sband->bitrates[status->rate_idx]; |
2482 | } | 2489 | } |
2483 | 2490 | ||
@@ -2494,7 +2501,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2494 | * if it was previously present. | 2501 | * if it was previously present. |
2495 | * Also, frames with less than 16 bytes are dropped. | 2502 | * Also, frames with less than 16 bytes are dropped. |
2496 | */ | 2503 | */ |
2497 | skb = ieee80211_rx_monitor(local, skb, status, rate); | 2504 | skb = ieee80211_rx_monitor(local, skb, rate); |
2498 | if (!skb) { | 2505 | if (!skb) { |
2499 | rcu_read_unlock(); | 2506 | rcu_read_unlock(); |
2500 | return; | 2507 | return; |
@@ -2512,25 +2519,25 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2512 | * frames from other than operational channel), but that should not | 2519 | * frames from other than operational channel), but that should not |
2513 | * happen in normal networks. | 2520 | * happen in normal networks. |
2514 | */ | 2521 | */ |
2515 | if (!ieee80211_rx_reorder_ampdu(local, skb, status)) | 2522 | if (!ieee80211_rx_reorder_ampdu(local, skb)) |
2516 | __ieee80211_rx_handle_packet(hw, skb, status, rate); | 2523 | __ieee80211_rx_handle_packet(hw, skb, rate); |
2517 | 2524 | ||
2518 | rcu_read_unlock(); | 2525 | rcu_read_unlock(); |
2526 | |||
2527 | return; | ||
2528 | drop: | ||
2529 | kfree_skb(skb); | ||
2519 | } | 2530 | } |
2520 | EXPORT_SYMBOL(__ieee80211_rx); | 2531 | EXPORT_SYMBOL(ieee80211_rx); |
2521 | 2532 | ||
2522 | /* This is a version of the rx handler that can be called from hard irq | 2533 | /* This is a version of the rx handler that can be called from hard irq |
2523 | * context. Post the skb on the queue and schedule the tasklet */ | 2534 | * context. Post the skb on the queue and schedule the tasklet */ |
2524 | void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb, | 2535 | void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) |
2525 | struct ieee80211_rx_status *status) | ||
2526 | { | 2536 | { |
2527 | struct ieee80211_local *local = hw_to_local(hw); | 2537 | struct ieee80211_local *local = hw_to_local(hw); |
2528 | 2538 | ||
2529 | BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); | 2539 | BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); |
2530 | 2540 | ||
2531 | skb->dev = local->mdev; | ||
2532 | /* copy status into skb->cb for use by tasklet */ | ||
2533 | memcpy(skb->cb, status, sizeof(*status)); | ||
2534 | skb->pkt_type = IEEE80211_RX_MSG; | 2541 | skb->pkt_type = IEEE80211_RX_MSG; |
2535 | skb_queue_tail(&local->skb_queue, skb); | 2542 | skb_queue_tail(&local->skb_queue, skb); |
2536 | tasklet_schedule(&local->tasklet); | 2543 | tasklet_schedule(&local->tasklet); |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 2a8d09ad17ff..039901109fa1 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/if_arp.h> | 18 | #include <linux/if_arp.h> |
19 | #include <linux/rtnetlink.h> | 19 | #include <linux/rtnetlink.h> |
20 | #include <net/mac80211.h> | 20 | #include <net/mac80211.h> |
21 | #include <net/iw_handler.h> | ||
22 | 21 | ||
23 | #include "ieee80211_i.h" | 22 | #include "ieee80211_i.h" |
24 | #include "driver-ops.h" | 23 | #include "driver-ops.h" |
@@ -26,7 +25,7 @@ | |||
26 | 25 | ||
27 | #define IEEE80211_PROBE_DELAY (HZ / 33) | 26 | #define IEEE80211_PROBE_DELAY (HZ / 33) |
28 | #define IEEE80211_CHANNEL_TIME (HZ / 33) | 27 | #define IEEE80211_CHANNEL_TIME (HZ / 33) |
29 | #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5) | 28 | #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8) |
30 | 29 | ||
31 | struct ieee80211_bss * | 30 | struct ieee80211_bss * |
32 | ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, | 31 | ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, |
@@ -121,23 +120,10 @@ ieee80211_bss_info_update(struct ieee80211_local *local, | |||
121 | return bss; | 120 | return bss; |
122 | } | 121 | } |
123 | 122 | ||
124 | void ieee80211_rx_bss_remove(struct ieee80211_sub_if_data *sdata, u8 *bssid, | ||
125 | int freq, u8 *ssid, u8 ssid_len) | ||
126 | { | ||
127 | struct ieee80211_bss *bss; | ||
128 | struct ieee80211_local *local = sdata->local; | ||
129 | |||
130 | bss = ieee80211_rx_bss_get(local, bssid, freq, ssid, ssid_len); | ||
131 | if (bss) { | ||
132 | cfg80211_unlink_bss(local->hw.wiphy, (void *)bss); | ||
133 | ieee80211_rx_bss_put(local, bss); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | ieee80211_rx_result | 123 | ieee80211_rx_result |
138 | ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | 124 | ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) |
139 | struct ieee80211_rx_status *rx_status) | ||
140 | { | 125 | { |
126 | struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); | ||
141 | struct ieee80211_mgmt *mgmt; | 127 | struct ieee80211_mgmt *mgmt; |
142 | struct ieee80211_bss *bss; | 128 | struct ieee80211_bss *bss; |
143 | u8 *elements; | 129 | u8 *elements; |
@@ -278,7 +264,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) | |||
278 | 264 | ||
279 | mutex_lock(&local->scan_mtx); | 265 | mutex_lock(&local->scan_mtx); |
280 | 266 | ||
281 | if (WARN_ON(!local->hw_scanning && !local->sw_scanning)) { | 267 | if (WARN_ON(!local->scanning)) { |
282 | mutex_unlock(&local->scan_mtx); | 268 | mutex_unlock(&local->scan_mtx); |
283 | return; | 269 | return; |
284 | } | 270 | } |
@@ -288,16 +274,16 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) | |||
288 | return; | 274 | return; |
289 | } | 275 | } |
290 | 276 | ||
291 | if (local->hw_scanning) | 277 | if (test_bit(SCAN_HW_SCANNING, &local->scanning)) |
292 | ieee80211_restore_scan_ies(local); | 278 | ieee80211_restore_scan_ies(local); |
293 | 279 | ||
294 | if (local->scan_req != &local->int_scan_req) | 280 | if (local->scan_req != local->int_scan_req) |
295 | cfg80211_scan_done(local->scan_req, aborted); | 281 | cfg80211_scan_done(local->scan_req, aborted); |
296 | local->scan_req = NULL; | 282 | local->scan_req = NULL; |
283 | local->scan_sdata = NULL; | ||
297 | 284 | ||
298 | was_hw_scan = local->hw_scanning; | 285 | was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning); |
299 | local->hw_scanning = false; | 286 | local->scanning = 0; |
300 | local->sw_scanning = false; | ||
301 | local->scan_channel = NULL; | 287 | local->scan_channel = NULL; |
302 | 288 | ||
303 | /* we only have to protect scan_req and hw/sw scan */ | 289 | /* we only have to protect scan_req and hw/sw scan */ |
@@ -307,16 +293,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) | |||
307 | if (was_hw_scan) | 293 | if (was_hw_scan) |
308 | goto done; | 294 | goto done; |
309 | 295 | ||
310 | netif_tx_lock_bh(local->mdev); | 296 | ieee80211_configure_filter(local); |
311 | netif_addr_lock(local->mdev); | ||
312 | local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; | ||
313 | drv_configure_filter(local, FIF_BCN_PRBRESP_PROMISC, | ||
314 | &local->filter_flags, | ||
315 | local->mdev->mc_count, | ||
316 | local->mdev->mc_list); | ||
317 | |||
318 | netif_addr_unlock(local->mdev); | ||
319 | netif_tx_unlock_bh(local->mdev); | ||
320 | 297 | ||
321 | drv_sw_scan_complete(local); | 298 | drv_sw_scan_complete(local); |
322 | 299 | ||
@@ -327,7 +304,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) | |||
327 | 304 | ||
328 | /* Tell AP we're back */ | 305 | /* Tell AP we're back */ |
329 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 306 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
330 | if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) { | 307 | if (sdata->u.mgd.associated) { |
331 | ieee80211_scan_ps_disable(sdata); | 308 | ieee80211_scan_ps_disable(sdata); |
332 | netif_tx_wake_all_queues(sdata->dev); | 309 | netif_tx_wake_all_queues(sdata->dev); |
333 | } | 310 | } |
@@ -382,30 +359,24 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) | |||
382 | ieee80211_bss_info_change_notify( | 359 | ieee80211_bss_info_change_notify( |
383 | sdata, BSS_CHANGED_BEACON_ENABLED); | 360 | sdata, BSS_CHANGED_BEACON_ENABLED); |
384 | 361 | ||
385 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 362 | /* |
386 | if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) { | 363 | * only handle non-STA interfaces here, STA interfaces |
387 | netif_tx_stop_all_queues(sdata->dev); | 364 | * are handled in the scan state machine |
388 | ieee80211_scan_ps_enable(sdata); | 365 | */ |
389 | } | 366 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
390 | } else | ||
391 | netif_tx_stop_all_queues(sdata->dev); | 367 | netif_tx_stop_all_queues(sdata->dev); |
392 | } | 368 | } |
393 | mutex_unlock(&local->iflist_mtx); | 369 | mutex_unlock(&local->iflist_mtx); |
394 | 370 | ||
395 | local->scan_state = SCAN_SET_CHANNEL; | 371 | local->next_scan_state = SCAN_DECISION; |
396 | local->scan_channel_idx = 0; | 372 | local->scan_channel_idx = 0; |
397 | 373 | ||
398 | netif_addr_lock_bh(local->mdev); | 374 | ieee80211_configure_filter(local); |
399 | local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; | ||
400 | drv_configure_filter(local, FIF_BCN_PRBRESP_PROMISC, | ||
401 | &local->filter_flags, | ||
402 | local->mdev->mc_count, | ||
403 | local->mdev->mc_list); | ||
404 | netif_addr_unlock_bh(local->mdev); | ||
405 | 375 | ||
406 | /* TODO: start scan as soon as all nullfunc frames are ACKed */ | 376 | /* TODO: start scan as soon as all nullfunc frames are ACKed */ |
407 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | 377 | ieee80211_queue_delayed_work(&local->hw, |
408 | IEEE80211_CHANNEL_TIME); | 378 | &local->scan_work, |
379 | IEEE80211_CHANNEL_TIME); | ||
409 | 380 | ||
410 | return 0; | 381 | return 0; |
411 | } | 382 | } |
@@ -441,20 +412,18 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
441 | local->scan_req = req; | 412 | local->scan_req = req; |
442 | local->scan_sdata = sdata; | 413 | local->scan_sdata = sdata; |
443 | 414 | ||
444 | if (req != &local->int_scan_req && | 415 | if (req != local->int_scan_req && |
445 | sdata->vif.type == NL80211_IFTYPE_STATION && | 416 | sdata->vif.type == NL80211_IFTYPE_STATION && |
446 | (ifmgd->state == IEEE80211_STA_MLME_DIRECT_PROBE || | 417 | !list_empty(&ifmgd->work_list)) { |
447 | ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE || | 418 | /* actually wait for the work it's doing to finish/time out */ |
448 | ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE)) { | ||
449 | /* actually wait for the assoc to finish/time out */ | ||
450 | set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request); | 419 | set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request); |
451 | return 0; | 420 | return 0; |
452 | } | 421 | } |
453 | 422 | ||
454 | if (local->ops->hw_scan) | 423 | if (local->ops->hw_scan) |
455 | local->hw_scanning = true; | 424 | __set_bit(SCAN_HW_SCANNING, &local->scanning); |
456 | else | 425 | else |
457 | local->sw_scanning = true; | 426 | __set_bit(SCAN_SW_SCANNING, &local->scanning); |
458 | /* | 427 | /* |
459 | * Kicking off the scan need not be protected, | 428 | * Kicking off the scan need not be protected, |
460 | * only the scan variable stuff, since now | 429 | * only the scan variable stuff, since now |
@@ -477,11 +446,9 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
477 | mutex_lock(&local->scan_mtx); | 446 | mutex_lock(&local->scan_mtx); |
478 | 447 | ||
479 | if (rc) { | 448 | if (rc) { |
480 | if (local->ops->hw_scan) { | 449 | if (local->ops->hw_scan) |
481 | local->hw_scanning = false; | ||
482 | ieee80211_restore_scan_ies(local); | 450 | ieee80211_restore_scan_ies(local); |
483 | } else | 451 | local->scanning = 0; |
484 | local->sw_scanning = false; | ||
485 | 452 | ||
486 | ieee80211_recalc_idle(local); | 453 | ieee80211_recalc_idle(local); |
487 | 454 | ||
@@ -492,13 +459,195 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
492 | return rc; | 459 | return rc; |
493 | } | 460 | } |
494 | 461 | ||
462 | static int ieee80211_scan_state_decision(struct ieee80211_local *local, | ||
463 | unsigned long *next_delay) | ||
464 | { | ||
465 | bool associated = false; | ||
466 | struct ieee80211_sub_if_data *sdata; | ||
467 | |||
468 | /* if no more bands/channels left, complete scan and advance to the idle state */ | ||
469 | if (local->scan_channel_idx >= local->scan_req->n_channels) { | ||
470 | ieee80211_scan_completed(&local->hw, false); | ||
471 | return 1; | ||
472 | } | ||
473 | |||
474 | /* check if at least one STA interface is associated */ | ||
475 | mutex_lock(&local->iflist_mtx); | ||
476 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
477 | if (!netif_running(sdata->dev)) | ||
478 | continue; | ||
479 | |||
480 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
481 | if (sdata->u.mgd.associated) { | ||
482 | associated = true; | ||
483 | break; | ||
484 | } | ||
485 | } | ||
486 | } | ||
487 | mutex_unlock(&local->iflist_mtx); | ||
488 | |||
489 | if (local->scan_channel) { | ||
490 | /* | ||
491 | * we're currently scanning a different channel, let's | ||
492 | * switch back to the operating channel now if at least | ||
493 | * one interface is associated. Otherwise just scan the | ||
494 | * next channel | ||
495 | */ | ||
496 | if (associated) | ||
497 | local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; | ||
498 | else | ||
499 | local->next_scan_state = SCAN_SET_CHANNEL; | ||
500 | } else { | ||
501 | /* | ||
502 | * we're on the operating channel currently, let's | ||
503 | * leave that channel now to scan another one | ||
504 | */ | ||
505 | local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL; | ||
506 | } | ||
507 | |||
508 | *next_delay = 0; | ||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local, | ||
513 | unsigned long *next_delay) | ||
514 | { | ||
515 | struct ieee80211_sub_if_data *sdata; | ||
516 | |||
517 | /* | ||
518 | * notify the AP about us leaving the channel and stop all STA interfaces | ||
519 | */ | ||
520 | mutex_lock(&local->iflist_mtx); | ||
521 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
522 | if (!netif_running(sdata->dev)) | ||
523 | continue; | ||
524 | |||
525 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
526 | netif_tx_stop_all_queues(sdata->dev); | ||
527 | if (sdata->u.mgd.associated) | ||
528 | ieee80211_scan_ps_enable(sdata); | ||
529 | } | ||
530 | } | ||
531 | mutex_unlock(&local->iflist_mtx); | ||
532 | |||
533 | __set_bit(SCAN_OFF_CHANNEL, &local->scanning); | ||
534 | |||
535 | /* advance to the next channel to be scanned */ | ||
536 | *next_delay = HZ / 10; | ||
537 | local->next_scan_state = SCAN_SET_CHANNEL; | ||
538 | } | ||
539 | |||
540 | static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local, | ||
541 | unsigned long *next_delay) | ||
542 | { | ||
543 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; | ||
544 | |||
545 | /* switch back to the operating channel */ | ||
546 | local->scan_channel = NULL; | ||
547 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | ||
548 | |||
549 | /* | ||
550 | * notify the AP about us being back and restart all STA interfaces | ||
551 | */ | ||
552 | mutex_lock(&local->iflist_mtx); | ||
553 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
554 | if (!netif_running(sdata->dev)) | ||
555 | continue; | ||
556 | |||
557 | /* Tell AP we're back */ | ||
558 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
559 | if (sdata->u.mgd.associated) | ||
560 | ieee80211_scan_ps_disable(sdata); | ||
561 | netif_tx_wake_all_queues(sdata->dev); | ||
562 | } | ||
563 | } | ||
564 | mutex_unlock(&local->iflist_mtx); | ||
565 | |||
566 | __clear_bit(SCAN_OFF_CHANNEL, &local->scanning); | ||
567 | |||
568 | *next_delay = HZ / 5; | ||
569 | local->next_scan_state = SCAN_DECISION; | ||
570 | } | ||
571 | |||
572 | static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, | ||
573 | unsigned long *next_delay) | ||
574 | { | ||
575 | int skip; | ||
576 | struct ieee80211_channel *chan; | ||
577 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; | ||
578 | |||
579 | skip = 0; | ||
580 | chan = local->scan_req->channels[local->scan_channel_idx]; | ||
581 | |||
582 | if (chan->flags & IEEE80211_CHAN_DISABLED || | ||
583 | (sdata->vif.type == NL80211_IFTYPE_ADHOC && | ||
584 | chan->flags & IEEE80211_CHAN_NO_IBSS)) | ||
585 | skip = 1; | ||
586 | |||
587 | if (!skip) { | ||
588 | local->scan_channel = chan; | ||
589 | if (ieee80211_hw_config(local, | ||
590 | IEEE80211_CONF_CHANGE_CHANNEL)) | ||
591 | skip = 1; | ||
592 | } | ||
593 | |||
594 | /* advance state machine to next channel/band */ | ||
595 | local->scan_channel_idx++; | ||
596 | |||
597 | if (skip) { | ||
598 | /* if we skip this channel return to the decision state */ | ||
599 | local->next_scan_state = SCAN_DECISION; | ||
600 | return; | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * Probe delay is used to update the NAV, cf. 11.1.3.2.2 | ||
605 | * (which unfortunately doesn't say _why_ step a) is done, | ||
606 | * but it waits for the probe delay or until a frame is | ||
607 | * received - and the received frame would update the NAV). | ||
608 | * For now, we do not support waiting until a frame is | ||
609 | * received. | ||
610 | * | ||
611 | * In any case, it is not necessary for a passive scan. | ||
612 | */ | ||
613 | if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN || | ||
614 | !local->scan_req->n_ssids) { | ||
615 | *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | ||
616 | local->next_scan_state = SCAN_DECISION; | ||
617 | return; | ||
618 | } | ||
619 | |||
620 | /* active scan, send probes */ | ||
621 | *next_delay = IEEE80211_PROBE_DELAY; | ||
622 | local->next_scan_state = SCAN_SEND_PROBE; | ||
623 | } | ||
624 | |||
625 | static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, | ||
626 | unsigned long *next_delay) | ||
627 | { | ||
628 | int i; | ||
629 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; | ||
630 | |||
631 | for (i = 0; i < local->scan_req->n_ssids; i++) | ||
632 | ieee80211_send_probe_req( | ||
633 | sdata, NULL, | ||
634 | local->scan_req->ssids[i].ssid, | ||
635 | local->scan_req->ssids[i].ssid_len, | ||
636 | local->scan_req->ie, local->scan_req->ie_len); | ||
637 | |||
638 | /* | ||
639 | * After sending probe requests, wait for probe responses | ||
640 | * on the channel. | ||
641 | */ | ||
642 | *next_delay = IEEE80211_CHANNEL_TIME; | ||
643 | local->next_scan_state = SCAN_DECISION; | ||
644 | } | ||
645 | |||
495 | void ieee80211_scan_work(struct work_struct *work) | 646 | void ieee80211_scan_work(struct work_struct *work) |
496 | { | 647 | { |
497 | struct ieee80211_local *local = | 648 | struct ieee80211_local *local = |
498 | container_of(work, struct ieee80211_local, scan_work.work); | 649 | container_of(work, struct ieee80211_local, scan_work.work); |
499 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; | 650 | struct ieee80211_sub_if_data *sdata = local->scan_sdata; |
500 | struct ieee80211_channel *chan; | ||
501 | int skip, i; | ||
502 | unsigned long next_delay = 0; | 651 | unsigned long next_delay = 0; |
503 | 652 | ||
504 | mutex_lock(&local->scan_mtx); | 653 | mutex_lock(&local->scan_mtx); |
@@ -507,11 +656,12 @@ void ieee80211_scan_work(struct work_struct *work) | |||
507 | return; | 656 | return; |
508 | } | 657 | } |
509 | 658 | ||
510 | if (local->scan_req && !(local->sw_scanning || local->hw_scanning)) { | 659 | if (local->scan_req && !local->scanning) { |
511 | struct cfg80211_scan_request *req = local->scan_req; | 660 | struct cfg80211_scan_request *req = local->scan_req; |
512 | int rc; | 661 | int rc; |
513 | 662 | ||
514 | local->scan_req = NULL; | 663 | local->scan_req = NULL; |
664 | local->scan_sdata = NULL; | ||
515 | 665 | ||
516 | rc = __ieee80211_start_scan(sdata, req); | 666 | rc = __ieee80211_start_scan(sdata, req); |
517 | mutex_unlock(&local->scan_mtx); | 667 | mutex_unlock(&local->scan_mtx); |
@@ -531,72 +681,32 @@ void ieee80211_scan_work(struct work_struct *work) | |||
531 | return; | 681 | return; |
532 | } | 682 | } |
533 | 683 | ||
534 | switch (local->scan_state) { | 684 | /* |
535 | case SCAN_SET_CHANNEL: | 685 | * as long as no delay is required advance immediately |
536 | /* if no more bands/channels left, complete scan */ | 686 | * without scheduling a new work |
537 | if (local->scan_channel_idx >= local->scan_req->n_channels) { | 687 | */ |
538 | ieee80211_scan_completed(&local->hw, false); | 688 | do { |
539 | return; | 689 | switch (local->next_scan_state) { |
540 | } | 690 | case SCAN_DECISION: |
541 | skip = 0; | 691 | if (ieee80211_scan_state_decision(local, &next_delay)) |
542 | chan = local->scan_req->channels[local->scan_channel_idx]; | 692 | return; |
543 | |||
544 | if (chan->flags & IEEE80211_CHAN_DISABLED || | ||
545 | (sdata->vif.type == NL80211_IFTYPE_ADHOC && | ||
546 | chan->flags & IEEE80211_CHAN_NO_IBSS)) | ||
547 | skip = 1; | ||
548 | |||
549 | if (!skip) { | ||
550 | local->scan_channel = chan; | ||
551 | if (ieee80211_hw_config(local, | ||
552 | IEEE80211_CONF_CHANGE_CHANNEL)) | ||
553 | skip = 1; | ||
554 | } | ||
555 | |||
556 | /* advance state machine to next channel/band */ | ||
557 | local->scan_channel_idx++; | ||
558 | |||
559 | if (skip) | ||
560 | break; | 693 | break; |
561 | 694 | case SCAN_SET_CHANNEL: | |
562 | /* | 695 | ieee80211_scan_state_set_channel(local, &next_delay); |
563 | * Probe delay is used to update the NAV, cf. 11.1.3.2.2 | 696 | break; |
564 | * (which unfortunately doesn't say _why_ step a) is done, | 697 | case SCAN_SEND_PROBE: |
565 | * but it waits for the probe delay or until a frame is | 698 | ieee80211_scan_state_send_probe(local, &next_delay); |
566 | * received - and the received frame would update the NAV). | 699 | break; |
567 | * For now, we do not support waiting until a frame is | 700 | case SCAN_LEAVE_OPER_CHANNEL: |
568 | * received. | 701 | ieee80211_scan_state_leave_oper_channel(local, &next_delay); |
569 | * | 702 | break; |
570 | * In any case, it is not necessary for a passive scan. | 703 | case SCAN_ENTER_OPER_CHANNEL: |
571 | */ | 704 | ieee80211_scan_state_enter_oper_channel(local, &next_delay); |
572 | if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN || | ||
573 | !local->scan_req->n_ssids) { | ||
574 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | ||
575 | break; | 705 | break; |
576 | } | 706 | } |
707 | } while (next_delay == 0); | ||
577 | 708 | ||
578 | next_delay = IEEE80211_PROBE_DELAY; | 709 | ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay); |
579 | local->scan_state = SCAN_SEND_PROBE; | ||
580 | break; | ||
581 | case SCAN_SEND_PROBE: | ||
582 | for (i = 0; i < local->scan_req->n_ssids; i++) | ||
583 | ieee80211_send_probe_req( | ||
584 | sdata, NULL, | ||
585 | local->scan_req->ssids[i].ssid, | ||
586 | local->scan_req->ssids[i].ssid_len, | ||
587 | local->scan_req->ie, local->scan_req->ie_len); | ||
588 | |||
589 | /* | ||
590 | * After sending probe requests, wait for probe responses | ||
591 | * on the channel. | ||
592 | */ | ||
593 | next_delay = IEEE80211_CHANNEL_TIME; | ||
594 | local->scan_state = SCAN_SET_CHANNEL; | ||
595 | break; | ||
596 | } | ||
597 | |||
598 | queue_delayed_work(local->hw.workqueue, &local->scan_work, | ||
599 | next_delay); | ||
600 | } | 710 | } |
601 | 711 | ||
602 | int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, | 712 | int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, |
@@ -623,10 +733,10 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, | |||
623 | if (local->scan_req) | 733 | if (local->scan_req) |
624 | goto unlock; | 734 | goto unlock; |
625 | 735 | ||
626 | memcpy(local->int_scan_req.ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN); | 736 | memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN); |
627 | local->int_scan_req.ssids[0].ssid_len = ssid_len; | 737 | local->int_scan_req->ssids[0].ssid_len = ssid_len; |
628 | 738 | ||
629 | ret = __ieee80211_start_scan(sdata, &sdata->local->int_scan_req); | 739 | ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req); |
630 | unlock: | 740 | unlock: |
631 | mutex_unlock(&local->scan_mtx); | 741 | mutex_unlock(&local->scan_mtx); |
632 | return ret; | 742 | return ret; |
@@ -634,7 +744,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata, | |||
634 | 744 | ||
635 | void ieee80211_scan_cancel(struct ieee80211_local *local) | 745 | void ieee80211_scan_cancel(struct ieee80211_local *local) |
636 | { | 746 | { |
637 | bool swscan; | 747 | bool abortscan; |
638 | 748 | ||
639 | cancel_delayed_work_sync(&local->scan_work); | 749 | cancel_delayed_work_sync(&local->scan_work); |
640 | 750 | ||
@@ -643,9 +753,10 @@ void ieee80211_scan_cancel(struct ieee80211_local *local) | |||
643 | * queued -- mostly at suspend under RTNL. | 753 | * queued -- mostly at suspend under RTNL. |
644 | */ | 754 | */ |
645 | mutex_lock(&local->scan_mtx); | 755 | mutex_lock(&local->scan_mtx); |
646 | swscan = local->sw_scanning; | 756 | abortscan = test_bit(SCAN_SW_SCANNING, &local->scanning) || |
757 | (!local->scanning && local->scan_req); | ||
647 | mutex_unlock(&local->scan_mtx); | 758 | mutex_unlock(&local->scan_mtx); |
648 | 759 | ||
649 | if (swscan) | 760 | if (abortscan) |
650 | ieee80211_scan_completed(&local->hw, true); | 761 | ieee80211_scan_completed(&local->hw, true); |
651 | } | 762 | } |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a360bceeba59..eec001491e66 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -349,6 +349,7 @@ int sta_info_insert(struct sta_info *sta) | |||
349 | goto out_free; | 349 | goto out_free; |
350 | } | 350 | } |
351 | list_add(&sta->list, &local->sta_list); | 351 | list_add(&sta->list, &local->sta_list); |
352 | local->sta_generation++; | ||
352 | local->num_sta++; | 353 | local->num_sta++; |
353 | sta_info_hash_add(local, sta); | 354 | sta_info_hash_add(local, sta); |
354 | 355 | ||
@@ -485,6 +486,7 @@ static void __sta_info_unlink(struct sta_info **sta) | |||
485 | } | 486 | } |
486 | 487 | ||
487 | local->num_sta--; | 488 | local->num_sta--; |
489 | local->sta_generation++; | ||
488 | 490 | ||
489 | if (local->ops->sta_notify) { | 491 | if (local->ops->sta_notify) { |
490 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 492 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 49a1a1f76511..ccc3adf962c7 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -30,7 +30,6 @@ | |||
30 | * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP. | 30 | * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP. |
31 | * @WLAN_STA_WME: Station is a QoS-STA. | 31 | * @WLAN_STA_WME: Station is a QoS-STA. |
32 | * @WLAN_STA_WDS: Station is one of our WDS peers. | 32 | * @WLAN_STA_WDS: Station is one of our WDS peers. |
33 | * @WLAN_STA_PSPOLL: Station has just PS-polled us. | ||
34 | * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the | 33 | * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the |
35 | * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next | 34 | * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next |
36 | * frame to this station is transmitted. | 35 | * frame to this station is transmitted. |
@@ -47,7 +46,6 @@ enum ieee80211_sta_info_flags { | |||
47 | WLAN_STA_ASSOC_AP = 1<<5, | 46 | WLAN_STA_ASSOC_AP = 1<<5, |
48 | WLAN_STA_WME = 1<<6, | 47 | WLAN_STA_WME = 1<<6, |
49 | WLAN_STA_WDS = 1<<7, | 48 | WLAN_STA_WDS = 1<<7, |
50 | WLAN_STA_PSPOLL = 1<<8, | ||
51 | WLAN_STA_CLEAR_PS_FILT = 1<<9, | 49 | WLAN_STA_CLEAR_PS_FILT = 1<<9, |
52 | WLAN_STA_MFP = 1<<10, | 50 | WLAN_STA_MFP = 1<<10, |
53 | WLAN_STA_SUSPEND = 1<<11 | 51 | WLAN_STA_SUSPEND = 1<<11 |
@@ -308,6 +306,23 @@ struct sta_info { | |||
308 | struct dentry *inactive_ms; | 306 | struct dentry *inactive_ms; |
309 | struct dentry *last_seq_ctrl; | 307 | struct dentry *last_seq_ctrl; |
310 | struct dentry *agg_status; | 308 | struct dentry *agg_status; |
309 | struct dentry *aid; | ||
310 | struct dentry *dev; | ||
311 | struct dentry *rx_packets; | ||
312 | struct dentry *tx_packets; | ||
313 | struct dentry *rx_bytes; | ||
314 | struct dentry *tx_bytes; | ||
315 | struct dentry *rx_duplicates; | ||
316 | struct dentry *rx_fragments; | ||
317 | struct dentry *rx_dropped; | ||
318 | struct dentry *tx_fragments; | ||
319 | struct dentry *tx_filtered; | ||
320 | struct dentry *tx_retry_failed; | ||
321 | struct dentry *tx_retry_count; | ||
322 | struct dentry *last_signal; | ||
323 | struct dentry *last_qual; | ||
324 | struct dentry *last_noise; | ||
325 | struct dentry *wep_weak_iv_count; | ||
311 | bool add_has_run; | 326 | bool add_has_run; |
312 | } debugfs; | 327 | } debugfs; |
313 | #endif | 328 | #endif |
@@ -342,17 +357,6 @@ static inline void clear_sta_flags(struct sta_info *sta, const u32 flags) | |||
342 | spin_unlock_irqrestore(&sta->flaglock, irqfl); | 357 | spin_unlock_irqrestore(&sta->flaglock, irqfl); |
343 | } | 358 | } |
344 | 359 | ||
345 | static inline void set_and_clear_sta_flags(struct sta_info *sta, | ||
346 | const u32 set, const u32 clear) | ||
347 | { | ||
348 | unsigned long irqfl; | ||
349 | |||
350 | spin_lock_irqsave(&sta->flaglock, irqfl); | ||
351 | sta->flags |= set; | ||
352 | sta->flags &= ~clear; | ||
353 | spin_unlock_irqrestore(&sta->flaglock, irqfl); | ||
354 | } | ||
355 | |||
356 | static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags) | 360 | static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags) |
357 | { | 361 | { |
358 | u32 ret; | 362 | u32 ret; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3a8922cd1038..5143d203256b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -192,7 +192,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) | |||
192 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) | 192 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) |
193 | return TX_CONTINUE; | 193 | return TX_CONTINUE; |
194 | 194 | ||
195 | if (unlikely(tx->local->sw_scanning) && | 195 | if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) && |
196 | !ieee80211_is_probe_req(hdr->frame_control) && | 196 | !ieee80211_is_probe_req(hdr->frame_control) && |
197 | !ieee80211_is_nullfunc(hdr->frame_control)) | 197 | !ieee80211_is_nullfunc(hdr->frame_control)) |
198 | /* | 198 | /* |
@@ -317,30 +317,30 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
317 | if (!atomic_read(&tx->sdata->bss->num_sta_ps)) | 317 | if (!atomic_read(&tx->sdata->bss->num_sta_ps)) |
318 | return TX_CONTINUE; | 318 | return TX_CONTINUE; |
319 | 319 | ||
320 | /* buffered in hardware */ | ||
321 | if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) { | ||
322 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; | ||
323 | |||
324 | return TX_CONTINUE; | ||
325 | } | ||
326 | |||
320 | /* buffered in mac80211 */ | 327 | /* buffered in mac80211 */ |
321 | if (tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) { | 328 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) |
322 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) | 329 | purge_old_ps_buffers(tx->local); |
323 | purge_old_ps_buffers(tx->local); | 330 | |
324 | if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= | 331 | if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { |
325 | AP_MAX_BC_BUFFER) { | ||
326 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 332 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
327 | if (net_ratelimit()) { | 333 | if (net_ratelimit()) |
328 | printk(KERN_DEBUG "%s: BC TX buffer full - " | 334 | printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n", |
329 | "dropping the oldest frame\n", | 335 | tx->dev->name); |
330 | tx->dev->name); | ||
331 | } | ||
332 | #endif | 336 | #endif |
333 | dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); | 337 | dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); |
334 | } else | 338 | } else |
335 | tx->local->total_ps_buffered++; | 339 | tx->local->total_ps_buffered++; |
336 | skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); | ||
337 | return TX_QUEUED; | ||
338 | } | ||
339 | 340 | ||
340 | /* buffered in hardware */ | 341 | skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); |
341 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; | ||
342 | 342 | ||
343 | return TX_CONTINUE; | 343 | return TX_QUEUED; |
344 | } | 344 | } |
345 | 345 | ||
346 | static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, | 346 | static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, |
@@ -373,7 +373,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
373 | staflags = get_sta_flags(sta); | 373 | staflags = get_sta_flags(sta); |
374 | 374 | ||
375 | if (unlikely((staflags & WLAN_STA_PS) && | 375 | if (unlikely((staflags & WLAN_STA_PS) && |
376 | !(staflags & WLAN_STA_PSPOLL))) { | 376 | !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) { |
377 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 377 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
378 | printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " | 378 | printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries " |
379 | "before %d)\n", | 379 | "before %d)\n", |
@@ -400,6 +400,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
400 | sta_info_set_tim_bit(sta); | 400 | sta_info_set_tim_bit(sta); |
401 | 401 | ||
402 | info->control.jiffies = jiffies; | 402 | info->control.jiffies = jiffies; |
403 | info->control.vif = &tx->sdata->vif; | ||
403 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | 404 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; |
404 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); | 405 | skb_queue_tail(&sta->ps_tx_buf, tx->skb); |
405 | return TX_QUEUED; | 406 | return TX_QUEUED; |
@@ -411,24 +412,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
411 | sta->sta.addr); | 412 | sta->sta.addr); |
412 | } | 413 | } |
413 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ | 414 | #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ |
414 | if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) { | ||
415 | /* | ||
416 | * The sleeping station with pending data is now snoozing. | ||
417 | * It queried us for its buffered frames and will go back | ||
418 | * to deep sleep once it got everything. | ||
419 | * | ||
420 | * inform the driver, in case the hardware does powersave | ||
421 | * frame filtering and keeps a station blacklist on its own | ||
422 | * (e.g: p54), so that frames can be delivered unimpeded. | ||
423 | * | ||
424 | * Note: It should be safe to disable the filter now. | ||
425 | * As, it is really unlikely that we still have any pending | ||
426 | * frame for this station in the hw's buffers/fifos left, | ||
427 | * that is not rejected with a unsuccessful tx_status yet. | ||
428 | */ | ||
429 | 415 | ||
430 | info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; | ||
431 | } | ||
432 | return TX_CONTINUE; | 416 | return TX_CONTINUE; |
433 | } | 417 | } |
434 | 418 | ||
@@ -451,7 +435,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | |||
451 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 435 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
452 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | 436 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
453 | 437 | ||
454 | if (unlikely(tx->skb->do_not_encrypt)) | 438 | if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) |
455 | tx->key = NULL; | 439 | tx->key = NULL; |
456 | else if (tx->sta && (key = rcu_dereference(tx->sta->key))) | 440 | else if (tx->sta && (key = rcu_dereference(tx->sta->key))) |
457 | tx->key = key; | 441 | tx->key = key; |
@@ -497,7 +481,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) | |||
497 | } | 481 | } |
498 | 482 | ||
499 | if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) | 483 | if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) |
500 | tx->skb->do_not_encrypt = 1; | 484 | info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; |
501 | 485 | ||
502 | return TX_CONTINUE; | 486 | return TX_CONTINUE; |
503 | } | 487 | } |
@@ -512,6 +496,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) | |||
512 | int i, len; | 496 | int i, len; |
513 | bool inval = false, rts = false, short_preamble = false; | 497 | bool inval = false, rts = false, short_preamble = false; |
514 | struct ieee80211_tx_rate_control txrc; | 498 | struct ieee80211_tx_rate_control txrc; |
499 | u32 sta_flags; | ||
515 | 500 | ||
516 | memset(&txrc, 0, sizeof(txrc)); | 501 | memset(&txrc, 0, sizeof(txrc)); |
517 | 502 | ||
@@ -544,7 +529,26 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) | |||
544 | (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) | 529 | (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) |
545 | txrc.short_preamble = short_preamble = true; | 530 | txrc.short_preamble = short_preamble = true; |
546 | 531 | ||
532 | sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0; | ||
533 | |||
534 | /* | ||
535 | * Lets not bother rate control if we're associated and cannot | ||
536 | * talk to the sta. This should not happen. | ||
537 | */ | ||
538 | if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && | ||
539 | (sta_flags & WLAN_STA_ASSOC) && | ||
540 | !rate_usable_index_exists(sband, &tx->sta->sta), | ||
541 | "%s: Dropped data frame as no usable bitrate found while " | ||
542 | "scanning and associated. Target station: " | ||
543 | "%pM on %d GHz band\n", | ||
544 | tx->dev->name, hdr->addr1, | ||
545 | tx->channel->band ? 5 : 2)) | ||
546 | return TX_DROP; | ||
547 | 547 | ||
548 | /* | ||
549 | * If we're associated with the sta at this point we know we can at | ||
550 | * least send the frame at the lowest bit rate. | ||
551 | */ | ||
548 | rate_control_get_rate(tx->sdata, tx->sta, &txrc); | 552 | rate_control_get_rate(tx->sdata, tx->sta, &txrc); |
549 | 553 | ||
550 | if (unlikely(info->control.rates[0].idx < 0)) | 554 | if (unlikely(info->control.rates[0].idx < 0)) |
@@ -676,7 +680,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) | |||
676 | * number, if we have no matching interface then we | 680 | * number, if we have no matching interface then we |
677 | * neither assign one ourselves nor ask the driver to. | 681 | * neither assign one ourselves nor ask the driver to. |
678 | */ | 682 | */ |
679 | if (unlikely(!info->control.vif)) | 683 | if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR)) |
680 | return TX_CONTINUE; | 684 | return TX_CONTINUE; |
681 | 685 | ||
682 | if (unlikely(ieee80211_is_ctl(hdr->frame_control))) | 686 | if (unlikely(ieee80211_is_ctl(hdr->frame_control))) |
@@ -696,7 +700,6 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) | |||
696 | /* for pure STA mode without beacons, we can do it */ | 700 | /* for pure STA mode without beacons, we can do it */ |
697 | hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); | 701 | hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); |
698 | tx->sdata->sequence_number += 0x10; | 702 | tx->sdata->sequence_number += 0x10; |
699 | tx->sdata->sequence_number &= IEEE80211_SCTL_SEQ; | ||
700 | return TX_CONTINUE; | 703 | return TX_CONTINUE; |
701 | } | 704 | } |
702 | 705 | ||
@@ -754,9 +757,7 @@ static int ieee80211_fragment(struct ieee80211_local *local, | |||
754 | memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); | 757 | memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); |
755 | skb_copy_queue_mapping(tmp, skb); | 758 | skb_copy_queue_mapping(tmp, skb); |
756 | tmp->priority = skb->priority; | 759 | tmp->priority = skb->priority; |
757 | tmp->do_not_encrypt = skb->do_not_encrypt; | ||
758 | tmp->dev = skb->dev; | 760 | tmp->dev = skb->dev; |
759 | tmp->iif = skb->iif; | ||
760 | 761 | ||
761 | /* copy header and data */ | 762 | /* copy header and data */ |
762 | memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen); | 763 | memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen); |
@@ -784,7 +785,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
784 | 785 | ||
785 | /* | 786 | /* |
786 | * Warn when submitting a fragmented A-MPDU frame and drop it. | 787 | * Warn when submitting a fragmented A-MPDU frame and drop it. |
787 | * This scenario is handled in __ieee80211_tx_prepare but extra | 788 | * This scenario is handled in ieee80211_tx_prepare but extra |
788 | * caution taken here as fragmented ampdu may cause Tx stop. | 789 | * caution taken here as fragmented ampdu may cause Tx stop. |
789 | */ | 790 | */ |
790 | if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) | 791 | if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) |
@@ -842,6 +843,23 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
842 | } | 843 | } |
843 | 844 | ||
844 | static ieee80211_tx_result debug_noinline | 845 | static ieee80211_tx_result debug_noinline |
846 | ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) | ||
847 | { | ||
848 | struct sk_buff *skb = tx->skb; | ||
849 | |||
850 | if (!tx->sta) | ||
851 | return TX_CONTINUE; | ||
852 | |||
853 | tx->sta->tx_packets++; | ||
854 | do { | ||
855 | tx->sta->tx_fragments++; | ||
856 | tx->sta->tx_bytes += skb->len; | ||
857 | } while ((skb = skb->next)); | ||
858 | |||
859 | return TX_CONTINUE; | ||
860 | } | ||
861 | |||
862 | static ieee80211_tx_result debug_noinline | ||
845 | ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) | 863 | ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) |
846 | { | 864 | { |
847 | if (!tx->key) | 865 | if (!tx->key) |
@@ -885,23 +903,6 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) | |||
885 | return TX_CONTINUE; | 903 | return TX_CONTINUE; |
886 | } | 904 | } |
887 | 905 | ||
888 | static ieee80211_tx_result debug_noinline | ||
889 | ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) | ||
890 | { | ||
891 | struct sk_buff *skb = tx->skb; | ||
892 | |||
893 | if (!tx->sta) | ||
894 | return TX_CONTINUE; | ||
895 | |||
896 | tx->sta->tx_packets++; | ||
897 | do { | ||
898 | tx->sta->tx_fragments++; | ||
899 | tx->sta->tx_bytes += skb->len; | ||
900 | } while ((skb = skb->next)); | ||
901 | |||
902 | return TX_CONTINUE; | ||
903 | } | ||
904 | |||
905 | /* actual transmit path */ | 906 | /* actual transmit path */ |
906 | 907 | ||
907 | /* | 908 | /* |
@@ -923,11 +924,12 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
923 | struct ieee80211_radiotap_header *rthdr = | 924 | struct ieee80211_radiotap_header *rthdr = |
924 | (struct ieee80211_radiotap_header *) skb->data; | 925 | (struct ieee80211_radiotap_header *) skb->data; |
925 | struct ieee80211_supported_band *sband; | 926 | struct ieee80211_supported_band *sband; |
927 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
926 | int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); | 928 | int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); |
927 | 929 | ||
928 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; | 930 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; |
929 | 931 | ||
930 | skb->do_not_encrypt = 1; | 932 | info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; |
931 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; | 933 | tx->flags &= ~IEEE80211_TX_FRAGMENTED; |
932 | 934 | ||
933 | /* | 935 | /* |
@@ -965,7 +967,7 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
965 | skb_trim(skb, skb->len - FCS_LEN); | 967 | skb_trim(skb, skb->len - FCS_LEN); |
966 | } | 968 | } |
967 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) | 969 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) |
968 | tx->skb->do_not_encrypt = 0; | 970 | info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT; |
969 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) | 971 | if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) |
970 | tx->flags |= IEEE80211_TX_FRAGMENTED; | 972 | tx->flags |= IEEE80211_TX_FRAGMENTED; |
971 | break; | 973 | break; |
@@ -998,13 +1000,12 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
998 | * initialises @tx | 1000 | * initialises @tx |
999 | */ | 1001 | */ |
1000 | static ieee80211_tx_result | 1002 | static ieee80211_tx_result |
1001 | __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | 1003 | ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, |
1002 | struct sk_buff *skb, | 1004 | struct ieee80211_tx_data *tx, |
1003 | struct net_device *dev) | 1005 | struct sk_buff *skb) |
1004 | { | 1006 | { |
1005 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1007 | struct ieee80211_local *local = sdata->local; |
1006 | struct ieee80211_hdr *hdr; | 1008 | struct ieee80211_hdr *hdr; |
1007 | struct ieee80211_sub_if_data *sdata; | ||
1008 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1009 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1009 | int hdrlen, tid; | 1010 | int hdrlen, tid; |
1010 | u8 *qc, *state; | 1011 | u8 *qc, *state; |
@@ -1012,9 +1013,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1012 | 1013 | ||
1013 | memset(tx, 0, sizeof(*tx)); | 1014 | memset(tx, 0, sizeof(*tx)); |
1014 | tx->skb = skb; | 1015 | tx->skb = skb; |
1015 | tx->dev = dev; /* use original interface */ | 1016 | tx->dev = sdata->dev; /* use original interface */ |
1016 | tx->local = local; | 1017 | tx->local = local; |
1017 | tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1018 | tx->sdata = sdata; |
1018 | tx->channel = local->hw.conf.channel; | 1019 | tx->channel = local->hw.conf.channel; |
1019 | /* | 1020 | /* |
1020 | * Set this flag (used below to indicate "automatic fragmentation"), | 1021 | * Set this flag (used below to indicate "automatic fragmentation"), |
@@ -1023,7 +1024,6 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1023 | tx->flags |= IEEE80211_TX_FRAGMENTED; | 1024 | tx->flags |= IEEE80211_TX_FRAGMENTED; |
1024 | 1025 | ||
1025 | /* process and remove the injection radiotap header */ | 1026 | /* process and remove the injection radiotap header */ |
1026 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1027 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { | 1027 | if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { |
1028 | if (!__ieee80211_parse_tx_radiotap(tx, skb)) | 1028 | if (!__ieee80211_parse_tx_radiotap(tx, skb)) |
1029 | return TX_DROP; | 1029 | return TX_DROP; |
@@ -1075,6 +1075,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1075 | } else if (*state != HT_AGG_STATE_IDLE) { | 1075 | } else if (*state != HT_AGG_STATE_IDLE) { |
1076 | /* in progress */ | 1076 | /* in progress */ |
1077 | queued = true; | 1077 | queued = true; |
1078 | info->control.vif = &sdata->vif; | ||
1078 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | 1079 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; |
1079 | __skb_queue_tail(&tid_tx->pending, skb); | 1080 | __skb_queue_tail(&tid_tx->pending, skb); |
1080 | } | 1081 | } |
@@ -1119,50 +1120,29 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx, | |||
1119 | return TX_CONTINUE; | 1120 | return TX_CONTINUE; |
1120 | } | 1121 | } |
1121 | 1122 | ||
1122 | /* | ||
1123 | * NB: @tx is uninitialised when passed in here | ||
1124 | */ | ||
1125 | static int ieee80211_tx_prepare(struct ieee80211_local *local, | ||
1126 | struct ieee80211_tx_data *tx, | ||
1127 | struct sk_buff *skb) | ||
1128 | { | ||
1129 | struct net_device *dev; | ||
1130 | |||
1131 | dev = dev_get_by_index(&init_net, skb->iif); | ||
1132 | if (unlikely(dev && !is_ieee80211_device(local, dev))) { | ||
1133 | dev_put(dev); | ||
1134 | dev = NULL; | ||
1135 | } | ||
1136 | if (unlikely(!dev)) | ||
1137 | return -ENODEV; | ||
1138 | /* | ||
1139 | * initialises tx with control | ||
1140 | * | ||
1141 | * return value is safe to ignore here because this function | ||
1142 | * can only be invoked for multicast frames | ||
1143 | * | ||
1144 | * XXX: clean up | ||
1145 | */ | ||
1146 | __ieee80211_tx_prepare(tx, skb, dev); | ||
1147 | dev_put(dev); | ||
1148 | return 0; | ||
1149 | } | ||
1150 | |||
1151 | static int __ieee80211_tx(struct ieee80211_local *local, | 1123 | static int __ieee80211_tx(struct ieee80211_local *local, |
1152 | struct sk_buff **skbp, | 1124 | struct sk_buff **skbp, |
1153 | struct sta_info *sta) | 1125 | struct sta_info *sta, |
1126 | bool txpending) | ||
1154 | { | 1127 | { |
1155 | struct sk_buff *skb = *skbp, *next; | 1128 | struct sk_buff *skb = *skbp, *next; |
1156 | struct ieee80211_tx_info *info; | 1129 | struct ieee80211_tx_info *info; |
1130 | struct ieee80211_sub_if_data *sdata; | ||
1131 | unsigned long flags; | ||
1157 | int ret, len; | 1132 | int ret, len; |
1158 | bool fragm = false; | 1133 | bool fragm = false; |
1159 | 1134 | ||
1160 | local->mdev->trans_start = jiffies; | ||
1161 | |||
1162 | while (skb) { | 1135 | while (skb) { |
1163 | if (ieee80211_queue_stopped(&local->hw, | 1136 | int q = skb_get_queue_mapping(skb); |
1164 | skb_get_queue_mapping(skb))) | 1137 | |
1165 | return IEEE80211_TX_PENDING; | 1138 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
1139 | ret = IEEE80211_TX_OK; | ||
1140 | if (local->queue_stop_reasons[q] || | ||
1141 | (!txpending && !skb_queue_empty(&local->pending[q]))) | ||
1142 | ret = IEEE80211_TX_PENDING; | ||
1143 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
1144 | if (ret != IEEE80211_TX_OK) | ||
1145 | return ret; | ||
1166 | 1146 | ||
1167 | info = IEEE80211_SKB_CB(skb); | 1147 | info = IEEE80211_SKB_CB(skb); |
1168 | 1148 | ||
@@ -1172,13 +1152,35 @@ static int __ieee80211_tx(struct ieee80211_local *local, | |||
1172 | 1152 | ||
1173 | next = skb->next; | 1153 | next = skb->next; |
1174 | len = skb->len; | 1154 | len = skb->len; |
1155 | |||
1156 | if (next) | ||
1157 | info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; | ||
1158 | |||
1159 | sdata = vif_to_sdata(info->control.vif); | ||
1160 | |||
1161 | switch (sdata->vif.type) { | ||
1162 | case NL80211_IFTYPE_MONITOR: | ||
1163 | info->control.vif = NULL; | ||
1164 | break; | ||
1165 | case NL80211_IFTYPE_AP_VLAN: | ||
1166 | info->control.vif = &container_of(sdata->bss, | ||
1167 | struct ieee80211_sub_if_data, u.ap)->vif; | ||
1168 | break; | ||
1169 | default: | ||
1170 | /* keep */ | ||
1171 | break; | ||
1172 | } | ||
1173 | |||
1175 | ret = drv_tx(local, skb); | 1174 | ret = drv_tx(local, skb); |
1176 | if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { | 1175 | if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { |
1177 | dev_kfree_skb(skb); | 1176 | dev_kfree_skb(skb); |
1178 | ret = NETDEV_TX_OK; | 1177 | ret = NETDEV_TX_OK; |
1179 | } | 1178 | } |
1180 | if (ret != NETDEV_TX_OK) | 1179 | if (ret != NETDEV_TX_OK) { |
1180 | info->control.vif = &sdata->vif; | ||
1181 | return IEEE80211_TX_AGAIN; | 1181 | return IEEE80211_TX_AGAIN; |
1182 | } | ||
1183 | |||
1182 | *skbp = skb = next; | 1184 | *skbp = skb = next; |
1183 | ieee80211_led_tx(local, 1); | 1185 | ieee80211_led_tx(local, 1); |
1184 | fragm = true; | 1186 | fragm = true; |
@@ -1210,9 +1212,9 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1210 | CALL_TXH(ieee80211_tx_h_sequence) | 1212 | CALL_TXH(ieee80211_tx_h_sequence) |
1211 | CALL_TXH(ieee80211_tx_h_fragment) | 1213 | CALL_TXH(ieee80211_tx_h_fragment) |
1212 | /* handlers after fragment must be aware of tx info fragmentation! */ | 1214 | /* handlers after fragment must be aware of tx info fragmentation! */ |
1215 | CALL_TXH(ieee80211_tx_h_stats) | ||
1213 | CALL_TXH(ieee80211_tx_h_encrypt) | 1216 | CALL_TXH(ieee80211_tx_h_encrypt) |
1214 | CALL_TXH(ieee80211_tx_h_calculate_duration) | 1217 | CALL_TXH(ieee80211_tx_h_calculate_duration) |
1215 | CALL_TXH(ieee80211_tx_h_stats) | ||
1216 | #undef CALL_TXH | 1218 | #undef CALL_TXH |
1217 | 1219 | ||
1218 | txh_done: | 1220 | txh_done: |
@@ -1234,10 +1236,10 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1234 | return 0; | 1236 | return 0; |
1235 | } | 1237 | } |
1236 | 1238 | ||
1237 | static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | 1239 | static void ieee80211_tx(struct ieee80211_sub_if_data *sdata, |
1238 | bool txpending) | 1240 | struct sk_buff *skb, bool txpending) |
1239 | { | 1241 | { |
1240 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1242 | struct ieee80211_local *local = sdata->local; |
1241 | struct ieee80211_tx_data tx; | 1243 | struct ieee80211_tx_data tx; |
1242 | ieee80211_tx_result res_prepare; | 1244 | ieee80211_tx_result res_prepare; |
1243 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1245 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
@@ -1248,8 +1250,6 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1248 | 1250 | ||
1249 | queue = skb_get_queue_mapping(skb); | 1251 | queue = skb_get_queue_mapping(skb); |
1250 | 1252 | ||
1251 | WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue])); | ||
1252 | |||
1253 | if (unlikely(skb->len < 10)) { | 1253 | if (unlikely(skb->len < 10)) { |
1254 | dev_kfree_skb(skb); | 1254 | dev_kfree_skb(skb); |
1255 | return; | 1255 | return; |
@@ -1258,7 +1258,7 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1258 | rcu_read_lock(); | 1258 | rcu_read_lock(); |
1259 | 1259 | ||
1260 | /* initialises tx */ | 1260 | /* initialises tx */ |
1261 | res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); | 1261 | res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); |
1262 | 1262 | ||
1263 | if (unlikely(res_prepare == TX_DROP)) { | 1263 | if (unlikely(res_prepare == TX_DROP)) { |
1264 | dev_kfree_skb(skb); | 1264 | dev_kfree_skb(skb); |
@@ -1277,7 +1277,7 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1277 | 1277 | ||
1278 | retries = 0; | 1278 | retries = 0; |
1279 | retry: | 1279 | retry: |
1280 | ret = __ieee80211_tx(local, &tx.skb, tx.sta); | 1280 | ret = __ieee80211_tx(local, &tx.skb, tx.sta, txpending); |
1281 | switch (ret) { | 1281 | switch (ret) { |
1282 | case IEEE80211_TX_OK: | 1282 | case IEEE80211_TX_OK: |
1283 | break; | 1283 | break; |
@@ -1295,34 +1295,35 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb, | |||
1295 | 1295 | ||
1296 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 1296 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
1297 | 1297 | ||
1298 | if (__netif_subqueue_stopped(local->mdev, queue)) { | 1298 | if (local->queue_stop_reasons[queue] || |
1299 | !skb_queue_empty(&local->pending[queue])) { | ||
1300 | /* | ||
1301 | * if queue is stopped, queue up frames for later | ||
1302 | * transmission from the tasklet | ||
1303 | */ | ||
1299 | do { | 1304 | do { |
1300 | next = skb->next; | 1305 | next = skb->next; |
1301 | skb->next = NULL; | 1306 | skb->next = NULL; |
1302 | if (unlikely(txpending)) | 1307 | if (unlikely(txpending)) |
1303 | skb_queue_head(&local->pending[queue], | 1308 | __skb_queue_head(&local->pending[queue], |
1304 | skb); | 1309 | skb); |
1305 | else | 1310 | else |
1306 | skb_queue_tail(&local->pending[queue], | 1311 | __skb_queue_tail(&local->pending[queue], |
1307 | skb); | 1312 | skb); |
1308 | } while ((skb = next)); | 1313 | } while ((skb = next)); |
1309 | 1314 | ||
1310 | /* | ||
1311 | * Make sure nobody will enable the queue on us | ||
1312 | * (without going through the tasklet) nor disable the | ||
1313 | * netdev queue underneath the pending handling code. | ||
1314 | */ | ||
1315 | __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING, | ||
1316 | &local->queue_stop_reasons[queue]); | ||
1317 | |||
1318 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | 1315 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, |
1319 | flags); | 1316 | flags); |
1320 | } else { | 1317 | } else { |
1318 | /* | ||
1319 | * otherwise retry, but this is a race condition or | ||
1320 | * a driver bug (which we warn about if it persists) | ||
1321 | */ | ||
1321 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | 1322 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, |
1322 | flags); | 1323 | flags); |
1323 | 1324 | ||
1324 | retries++; | 1325 | retries++; |
1325 | if (WARN(retries > 10, "tx refused but queue active")) | 1326 | if (WARN(retries > 10, "tx refused but queue active\n")) |
1326 | goto drop; | 1327 | goto drop; |
1327 | goto retry; | 1328 | goto retry; |
1328 | } | 1329 | } |
@@ -1383,44 +1384,25 @@ static int ieee80211_skb_resize(struct ieee80211_local *local, | |||
1383 | return 0; | 1384 | return 0; |
1384 | } | 1385 | } |
1385 | 1386 | ||
1386 | int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1387 | static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, |
1388 | struct sk_buff *skb) | ||
1387 | { | 1389 | { |
1388 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); | 1390 | struct ieee80211_local *local = sdata->local; |
1389 | struct ieee80211_local *local = mpriv->local; | ||
1390 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1391 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1391 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 1392 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
1392 | struct net_device *odev = NULL; | 1393 | struct ieee80211_sub_if_data *tmp_sdata; |
1393 | struct ieee80211_sub_if_data *osdata; | ||
1394 | int headroom; | 1394 | int headroom; |
1395 | bool may_encrypt; | 1395 | bool may_encrypt; |
1396 | enum { | 1396 | |
1397 | NOT_MONITOR, | 1397 | dev_hold(sdata->dev); |
1398 | FOUND_SDATA, | ||
1399 | UNKNOWN_ADDRESS, | ||
1400 | } monitor_iface = NOT_MONITOR; | ||
1401 | |||
1402 | if (skb->iif) | ||
1403 | odev = dev_get_by_index(&init_net, skb->iif); | ||
1404 | if (unlikely(odev && !is_ieee80211_device(local, odev))) { | ||
1405 | dev_put(odev); | ||
1406 | odev = NULL; | ||
1407 | } | ||
1408 | if (unlikely(!odev)) { | ||
1409 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | ||
1410 | printk(KERN_DEBUG "%s: Discarded packet with nonexistent " | ||
1411 | "originating device\n", dev->name); | ||
1412 | #endif | ||
1413 | dev_kfree_skb(skb); | ||
1414 | return NETDEV_TX_OK; | ||
1415 | } | ||
1416 | 1398 | ||
1417 | if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && | 1399 | if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && |
1418 | local->hw.conf.dynamic_ps_timeout > 0 && | 1400 | local->hw.conf.dynamic_ps_timeout > 0 && |
1419 | !local->sw_scanning && !local->hw_scanning && local->ps_sdata) { | 1401 | !(local->scanning) && local->ps_sdata) { |
1420 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { | 1402 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { |
1421 | ieee80211_stop_queues_by_reason(&local->hw, | 1403 | ieee80211_stop_queues_by_reason(&local->hw, |
1422 | IEEE80211_QUEUE_STOP_REASON_PS); | 1404 | IEEE80211_QUEUE_STOP_REASON_PS); |
1423 | queue_work(local->hw.workqueue, | 1405 | ieee80211_queue_work(&local->hw, |
1424 | &local->dynamic_ps_disable_work); | 1406 | &local->dynamic_ps_disable_work); |
1425 | } | 1407 | } |
1426 | 1408 | ||
@@ -1428,31 +1410,13 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1428 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); | 1410 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); |
1429 | } | 1411 | } |
1430 | 1412 | ||
1431 | memset(info, 0, sizeof(*info)); | ||
1432 | |||
1433 | info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; | 1413 | info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; |
1434 | 1414 | ||
1435 | osdata = IEEE80211_DEV_TO_SUB_IF(odev); | 1415 | if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) { |
1436 | |||
1437 | if (ieee80211_vif_is_mesh(&osdata->vif) && | ||
1438 | ieee80211_is_data(hdr->frame_control)) { | ||
1439 | if (is_multicast_ether_addr(hdr->addr3)) | ||
1440 | memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); | ||
1441 | else | ||
1442 | if (mesh_nexthop_lookup(skb, osdata)) { | ||
1443 | dev_put(odev); | ||
1444 | return NETDEV_TX_OK; | ||
1445 | } | ||
1446 | if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) | ||
1447 | IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, | ||
1448 | fwded_frames); | ||
1449 | } else if (unlikely(osdata->vif.type == NL80211_IFTYPE_MONITOR)) { | ||
1450 | struct ieee80211_sub_if_data *sdata; | ||
1451 | int hdrlen; | 1416 | int hdrlen; |
1452 | u16 len_rthdr; | 1417 | u16 len_rthdr; |
1453 | 1418 | ||
1454 | info->flags |= IEEE80211_TX_CTL_INJECTED; | 1419 | info->flags |= IEEE80211_TX_CTL_INJECTED; |
1455 | monitor_iface = UNKNOWN_ADDRESS; | ||
1456 | 1420 | ||
1457 | len_rthdr = ieee80211_get_radiotap_len(skb->data); | 1421 | len_rthdr = ieee80211_get_radiotap_len(skb->data); |
1458 | hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); | 1422 | hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); |
@@ -1471,20 +1435,17 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1471 | */ | 1435 | */ |
1472 | 1436 | ||
1473 | rcu_read_lock(); | 1437 | rcu_read_lock(); |
1474 | list_for_each_entry_rcu(sdata, &local->interfaces, | 1438 | list_for_each_entry_rcu(tmp_sdata, &local->interfaces, |
1475 | list) { | 1439 | list) { |
1476 | if (!netif_running(sdata->dev)) | 1440 | if (!netif_running(tmp_sdata->dev)) |
1477 | continue; | 1441 | continue; |
1478 | if (sdata->vif.type != NL80211_IFTYPE_AP) | 1442 | if (tmp_sdata->vif.type != NL80211_IFTYPE_AP) |
1479 | continue; | 1443 | continue; |
1480 | if (compare_ether_addr(sdata->dev->dev_addr, | 1444 | if (compare_ether_addr(tmp_sdata->dev->dev_addr, |
1481 | hdr->addr2)) { | 1445 | hdr->addr2)) { |
1482 | dev_hold(sdata->dev); | 1446 | dev_hold(tmp_sdata->dev); |
1483 | dev_put(odev); | 1447 | dev_put(sdata->dev); |
1484 | osdata = sdata; | 1448 | sdata = tmp_sdata; |
1485 | odev = osdata->dev; | ||
1486 | skb->iif = sdata->dev->ifindex; | ||
1487 | monitor_iface = FOUND_SDATA; | ||
1488 | break; | 1449 | break; |
1489 | } | 1450 | } |
1490 | } | 1451 | } |
@@ -1492,40 +1453,44 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1492 | } | 1453 | } |
1493 | } | 1454 | } |
1494 | 1455 | ||
1495 | may_encrypt = !skb->do_not_encrypt; | 1456 | may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); |
1496 | 1457 | ||
1497 | headroom = osdata->local->tx_headroom; | 1458 | headroom = local->tx_headroom; |
1498 | if (may_encrypt) | 1459 | if (may_encrypt) |
1499 | headroom += IEEE80211_ENCRYPT_HEADROOM; | 1460 | headroom += IEEE80211_ENCRYPT_HEADROOM; |
1500 | headroom -= skb_headroom(skb); | 1461 | headroom -= skb_headroom(skb); |
1501 | headroom = max_t(int, 0, headroom); | 1462 | headroom = max_t(int, 0, headroom); |
1502 | 1463 | ||
1503 | if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { | 1464 | if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) { |
1504 | dev_kfree_skb(skb); | 1465 | dev_kfree_skb(skb); |
1505 | dev_put(odev); | 1466 | dev_put(sdata->dev); |
1506 | return NETDEV_TX_OK; | 1467 | return; |
1507 | } | 1468 | } |
1508 | 1469 | ||
1509 | if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 1470 | info->control.vif = &sdata->vif; |
1510 | osdata = container_of(osdata->bss, | ||
1511 | struct ieee80211_sub_if_data, | ||
1512 | u.ap); | ||
1513 | if (likely(monitor_iface != UNKNOWN_ADDRESS)) | ||
1514 | info->control.vif = &osdata->vif; | ||
1515 | 1471 | ||
1516 | ieee80211_tx(odev, skb, false); | 1472 | if (ieee80211_vif_is_mesh(&sdata->vif) && |
1517 | dev_put(odev); | 1473 | ieee80211_is_data(hdr->frame_control) && |
1474 | !is_multicast_ether_addr(hdr->addr1)) | ||
1475 | if (mesh_nexthop_lookup(skb, sdata)) { | ||
1476 | /* skb queued: don't free */ | ||
1477 | dev_put(sdata->dev); | ||
1478 | return; | ||
1479 | } | ||
1518 | 1480 | ||
1519 | return NETDEV_TX_OK; | 1481 | ieee80211_select_queue(local, skb); |
1482 | ieee80211_tx(sdata, skb, false); | ||
1483 | dev_put(sdata->dev); | ||
1520 | } | 1484 | } |
1521 | 1485 | ||
1522 | int ieee80211_monitor_start_xmit(struct sk_buff *skb, | 1486 | netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, |
1523 | struct net_device *dev) | 1487 | struct net_device *dev) |
1524 | { | 1488 | { |
1525 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1489 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1526 | struct ieee80211_channel *chan = local->hw.conf.channel; | 1490 | struct ieee80211_channel *chan = local->hw.conf.channel; |
1527 | struct ieee80211_radiotap_header *prthdr = | 1491 | struct ieee80211_radiotap_header *prthdr = |
1528 | (struct ieee80211_radiotap_header *)skb->data; | 1492 | (struct ieee80211_radiotap_header *)skb->data; |
1493 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1529 | u16 len_rthdr; | 1494 | u16 len_rthdr; |
1530 | 1495 | ||
1531 | /* | 1496 | /* |
@@ -1563,15 +1528,6 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb, | |||
1563 | if (unlikely(skb->len < len_rthdr)) | 1528 | if (unlikely(skb->len < len_rthdr)) |
1564 | goto fail; /* skb too short for claimed rt header extent */ | 1529 | goto fail; /* skb too short for claimed rt header extent */ |
1565 | 1530 | ||
1566 | skb->dev = local->mdev; | ||
1567 | |||
1568 | /* needed because we set skb device to master */ | ||
1569 | skb->iif = dev->ifindex; | ||
1570 | |||
1571 | /* sometimes we do encrypt injected frames, will be fixed | ||
1572 | * up in radiotap parser if not wanted */ | ||
1573 | skb->do_not_encrypt = 0; | ||
1574 | |||
1575 | /* | 1531 | /* |
1576 | * fix up the pointers accounting for the radiotap | 1532 | * fix up the pointers accounting for the radiotap |
1577 | * header still being in there. We are being given | 1533 | * header still being in there. We are being given |
@@ -1586,8 +1542,10 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb, | |||
1586 | skb_set_network_header(skb, len_rthdr); | 1542 | skb_set_network_header(skb, len_rthdr); |
1587 | skb_set_transport_header(skb, len_rthdr); | 1543 | skb_set_transport_header(skb, len_rthdr); |
1588 | 1544 | ||
1589 | /* pass the radiotap header up to the next stage intact */ | 1545 | memset(info, 0, sizeof(*info)); |
1590 | dev_queue_xmit(skb); | 1546 | |
1547 | /* pass the radiotap header up to xmit */ | ||
1548 | ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb); | ||
1591 | return NETDEV_TX_OK; | 1549 | return NETDEV_TX_OK; |
1592 | 1550 | ||
1593 | fail: | 1551 | fail: |
@@ -1610,11 +1568,12 @@ fail: | |||
1610 | * encapsulated packet will then be passed to master interface, wlan#.11, for | 1568 | * encapsulated packet will then be passed to master interface, wlan#.11, for |
1611 | * transmission (through low-level driver). | 1569 | * transmission (through low-level driver). |
1612 | */ | 1570 | */ |
1613 | int ieee80211_subif_start_xmit(struct sk_buff *skb, | 1571 | netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, |
1614 | struct net_device *dev) | 1572 | struct net_device *dev) |
1615 | { | 1573 | { |
1616 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1574 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1617 | struct ieee80211_local *local = sdata->local; | 1575 | struct ieee80211_local *local = sdata->local; |
1576 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1618 | int ret = NETDEV_TX_BUSY, head_need; | 1577 | int ret = NETDEV_TX_BUSY, head_need; |
1619 | u16 ethertype, hdrlen, meshhdrlen = 0; | 1578 | u16 ethertype, hdrlen, meshhdrlen = 0; |
1620 | __le16 fc; | 1579 | __le16 fc; |
@@ -1627,7 +1586,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1627 | u32 sta_flags = 0; | 1586 | u32 sta_flags = 0; |
1628 | 1587 | ||
1629 | if (unlikely(skb->len < ETH_HLEN)) { | 1588 | if (unlikely(skb->len < ETH_HLEN)) { |
1630 | ret = 0; | 1589 | ret = NETDEV_TX_OK; |
1631 | goto fail; | 1590 | goto fail; |
1632 | } | 1591 | } |
1633 | 1592 | ||
@@ -1660,52 +1619,58 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1660 | break; | 1619 | break; |
1661 | #ifdef CONFIG_MAC80211_MESH | 1620 | #ifdef CONFIG_MAC80211_MESH |
1662 | case NL80211_IFTYPE_MESH_POINT: | 1621 | case NL80211_IFTYPE_MESH_POINT: |
1663 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); | ||
1664 | if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { | 1622 | if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { |
1665 | /* Do not send frames with mesh_ttl == 0 */ | 1623 | /* Do not send frames with mesh_ttl == 0 */ |
1666 | sdata->u.mesh.mshstats.dropped_frames_ttl++; | 1624 | sdata->u.mesh.mshstats.dropped_frames_ttl++; |
1667 | ret = 0; | 1625 | ret = NETDEV_TX_OK; |
1668 | goto fail; | 1626 | goto fail; |
1669 | } | 1627 | } |
1670 | memset(&mesh_hdr, 0, sizeof(mesh_hdr)); | ||
1671 | 1628 | ||
1672 | if (compare_ether_addr(dev->dev_addr, | 1629 | if (compare_ether_addr(dev->dev_addr, |
1673 | skb->data + ETH_ALEN) == 0) { | 1630 | skb->data + ETH_ALEN) == 0) { |
1674 | /* RA TA DA SA */ | 1631 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, |
1675 | memset(hdr.addr1, 0, ETH_ALEN); | 1632 | skb->data, skb->data + ETH_ALEN); |
1676 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | 1633 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, |
1677 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | 1634 | sdata, NULL, NULL, NULL); |
1678 | memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | ||
1679 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata); | ||
1680 | } else { | 1635 | } else { |
1681 | /* packet from other interface */ | 1636 | /* packet from other interface */ |
1682 | struct mesh_path *mppath; | 1637 | struct mesh_path *mppath; |
1638 | int is_mesh_mcast = 1; | ||
1639 | char *mesh_da; | ||
1683 | 1640 | ||
1684 | memset(hdr.addr1, 0, ETH_ALEN); | 1641 | rcu_read_lock(); |
1685 | memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); | ||
1686 | memcpy(hdr.addr4, dev->dev_addr, ETH_ALEN); | ||
1687 | |||
1688 | if (is_multicast_ether_addr(skb->data)) | 1642 | if (is_multicast_ether_addr(skb->data)) |
1689 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | 1643 | /* DA TA mSA AE:SA */ |
1644 | mesh_da = skb->data; | ||
1690 | else { | 1645 | else { |
1691 | rcu_read_lock(); | ||
1692 | mppath = mpp_path_lookup(skb->data, sdata); | 1646 | mppath = mpp_path_lookup(skb->data, sdata); |
1693 | if (mppath) | 1647 | if (mppath) { |
1694 | memcpy(hdr.addr3, mppath->mpp, ETH_ALEN); | 1648 | /* RA TA mDA mSA AE:DA SA */ |
1695 | else | 1649 | mesh_da = mppath->mpp; |
1696 | memset(hdr.addr3, 0xff, ETH_ALEN); | 1650 | is_mesh_mcast = 0; |
1697 | rcu_read_unlock(); | 1651 | } else |
1652 | /* DA TA mSA AE:SA */ | ||
1653 | mesh_da = dev->broadcast; | ||
1698 | } | 1654 | } |
1655 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, | ||
1656 | mesh_da, dev->dev_addr); | ||
1657 | rcu_read_unlock(); | ||
1658 | if (is_mesh_mcast) | ||
1659 | meshhdrlen = | ||
1660 | ieee80211_new_mesh_header(&mesh_hdr, | ||
1661 | sdata, | ||
1662 | skb->data + ETH_ALEN, | ||
1663 | NULL, | ||
1664 | NULL); | ||
1665 | else | ||
1666 | meshhdrlen = | ||
1667 | ieee80211_new_mesh_header(&mesh_hdr, | ||
1668 | sdata, | ||
1669 | NULL, | ||
1670 | skb->data, | ||
1671 | skb->data + ETH_ALEN); | ||
1699 | 1672 | ||
1700 | mesh_hdr.flags |= MESH_FLAGS_AE_A5_A6; | ||
1701 | mesh_hdr.ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; | ||
1702 | put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &mesh_hdr.seqnum); | ||
1703 | memcpy(mesh_hdr.eaddr1, skb->data, ETH_ALEN); | ||
1704 | memcpy(mesh_hdr.eaddr2, skb->data + ETH_ALEN, ETH_ALEN); | ||
1705 | sdata->u.mesh.mesh_seqnum++; | ||
1706 | meshhdrlen = 18; | ||
1707 | } | 1673 | } |
1708 | hdrlen = 30; | ||
1709 | break; | 1674 | break; |
1710 | #endif | 1675 | #endif |
1711 | case NL80211_IFTYPE_STATION: | 1676 | case NL80211_IFTYPE_STATION: |
@@ -1724,7 +1689,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1724 | hdrlen = 24; | 1689 | hdrlen = 24; |
1725 | break; | 1690 | break; |
1726 | default: | 1691 | default: |
1727 | ret = 0; | 1692 | ret = NETDEV_TX_OK; |
1728 | goto fail; | 1693 | goto fail; |
1729 | } | 1694 | } |
1730 | 1695 | ||
@@ -1766,7 +1731,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1766 | 1731 | ||
1767 | I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); | 1732 | I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); |
1768 | 1733 | ||
1769 | ret = 0; | 1734 | ret = NETDEV_TX_OK; |
1770 | goto fail; | 1735 | goto fail; |
1771 | } | 1736 | } |
1772 | 1737 | ||
@@ -1842,9 +1807,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1842 | nh_pos += hdrlen; | 1807 | nh_pos += hdrlen; |
1843 | h_pos += hdrlen; | 1808 | h_pos += hdrlen; |
1844 | 1809 | ||
1845 | skb->iif = dev->ifindex; | ||
1846 | |||
1847 | skb->dev = local->mdev; | ||
1848 | dev->stats.tx_packets++; | 1810 | dev->stats.tx_packets++; |
1849 | dev->stats.tx_bytes += skb->len; | 1811 | dev->stats.tx_bytes += skb->len; |
1850 | 1812 | ||
@@ -1855,13 +1817,15 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1855 | skb_set_network_header(skb, nh_pos); | 1817 | skb_set_network_header(skb, nh_pos); |
1856 | skb_set_transport_header(skb, h_pos); | 1818 | skb_set_transport_header(skb, h_pos); |
1857 | 1819 | ||
1820 | memset(info, 0, sizeof(*info)); | ||
1821 | |||
1858 | dev->trans_start = jiffies; | 1822 | dev->trans_start = jiffies; |
1859 | dev_queue_xmit(skb); | 1823 | ieee80211_xmit(sdata, skb); |
1860 | 1824 | ||
1861 | return 0; | 1825 | return NETDEV_TX_OK; |
1862 | 1826 | ||
1863 | fail: | 1827 | fail: |
1864 | if (!ret) | 1828 | if (ret == NETDEV_TX_OK) |
1865 | dev_kfree_skb(skb); | 1829 | dev_kfree_skb(skb); |
1866 | 1830 | ||
1867 | return ret; | 1831 | return ret; |
@@ -1887,101 +1851,74 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, | |||
1887 | struct ieee80211_sub_if_data *sdata; | 1851 | struct ieee80211_sub_if_data *sdata; |
1888 | struct sta_info *sta; | 1852 | struct sta_info *sta; |
1889 | struct ieee80211_hdr *hdr; | 1853 | struct ieee80211_hdr *hdr; |
1890 | struct net_device *dev; | ||
1891 | int ret; | 1854 | int ret; |
1892 | bool result = true; | 1855 | bool result = true; |
1893 | 1856 | ||
1894 | /* does interface still exist? */ | 1857 | sdata = vif_to_sdata(info->control.vif); |
1895 | dev = dev_get_by_index(&init_net, skb->iif); | ||
1896 | if (!dev) { | ||
1897 | dev_kfree_skb(skb); | ||
1898 | return true; | ||
1899 | } | ||
1900 | |||
1901 | /* validate info->control.vif against skb->iif */ | ||
1902 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1903 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
1904 | sdata = container_of(sdata->bss, | ||
1905 | struct ieee80211_sub_if_data, | ||
1906 | u.ap); | ||
1907 | |||
1908 | if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) { | ||
1909 | dev_kfree_skb(skb); | ||
1910 | result = true; | ||
1911 | goto out; | ||
1912 | } | ||
1913 | 1858 | ||
1914 | if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { | 1859 | if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { |
1915 | ieee80211_tx(dev, skb, true); | 1860 | ieee80211_tx(sdata, skb, true); |
1916 | } else { | 1861 | } else { |
1917 | hdr = (struct ieee80211_hdr *)skb->data; | 1862 | hdr = (struct ieee80211_hdr *)skb->data; |
1918 | sta = sta_info_get(local, hdr->addr1); | 1863 | sta = sta_info_get(local, hdr->addr1); |
1919 | 1864 | ||
1920 | ret = __ieee80211_tx(local, &skb, sta); | 1865 | ret = __ieee80211_tx(local, &skb, sta, true); |
1921 | if (ret != IEEE80211_TX_OK) | 1866 | if (ret != IEEE80211_TX_OK) |
1922 | result = false; | 1867 | result = false; |
1923 | } | 1868 | } |
1924 | 1869 | ||
1925 | out: | ||
1926 | dev_put(dev); | ||
1927 | |||
1928 | return result; | 1870 | return result; |
1929 | } | 1871 | } |
1930 | 1872 | ||
1931 | /* | 1873 | /* |
1932 | * Transmit all pending packets. Called from tasklet, locks master device | 1874 | * Transmit all pending packets. Called from tasklet. |
1933 | * TX lock so that no new packets can come in. | ||
1934 | */ | 1875 | */ |
1935 | void ieee80211_tx_pending(unsigned long data) | 1876 | void ieee80211_tx_pending(unsigned long data) |
1936 | { | 1877 | { |
1937 | struct ieee80211_local *local = (struct ieee80211_local *)data; | 1878 | struct ieee80211_local *local = (struct ieee80211_local *)data; |
1938 | struct net_device *dev = local->mdev; | ||
1939 | unsigned long flags; | 1879 | unsigned long flags; |
1940 | int i; | 1880 | int i; |
1941 | bool next; | 1881 | bool txok; |
1942 | 1882 | ||
1943 | rcu_read_lock(); | 1883 | rcu_read_lock(); |
1944 | netif_tx_lock_bh(dev); | ||
1945 | 1884 | ||
1885 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
1946 | for (i = 0; i < local->hw.queues; i++) { | 1886 | for (i = 0; i < local->hw.queues; i++) { |
1947 | /* | 1887 | /* |
1948 | * If queue is stopped by something other than due to pending | 1888 | * If queue is stopped by something other than due to pending |
1949 | * frames, or we have no pending frames, proceed to next queue. | 1889 | * frames, or we have no pending frames, proceed to next queue. |
1950 | */ | 1890 | */ |
1951 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 1891 | if (local->queue_stop_reasons[i] || |
1952 | next = false; | ||
1953 | if (local->queue_stop_reasons[i] != | ||
1954 | BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) || | ||
1955 | skb_queue_empty(&local->pending[i])) | 1892 | skb_queue_empty(&local->pending[i])) |
1956 | next = true; | ||
1957 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
1958 | |||
1959 | if (next) | ||
1960 | continue; | 1893 | continue; |
1961 | 1894 | ||
1962 | /* | ||
1963 | * start the queue now to allow processing our packets, | ||
1964 | * we're under the tx lock here anyway so nothing will | ||
1965 | * happen as a result of this | ||
1966 | */ | ||
1967 | netif_start_subqueue(local->mdev, i); | ||
1968 | |||
1969 | while (!skb_queue_empty(&local->pending[i])) { | 1895 | while (!skb_queue_empty(&local->pending[i])) { |
1970 | struct sk_buff *skb = skb_dequeue(&local->pending[i]); | 1896 | struct sk_buff *skb = __skb_dequeue(&local->pending[i]); |
1897 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1898 | struct ieee80211_sub_if_data *sdata; | ||
1971 | 1899 | ||
1972 | if (!ieee80211_tx_pending_skb(local, skb)) { | 1900 | if (WARN_ON(!info->control.vif)) { |
1973 | skb_queue_head(&local->pending[i], skb); | 1901 | kfree_skb(skb); |
1974 | break; | 1902 | continue; |
1975 | } | 1903 | } |
1976 | } | ||
1977 | 1904 | ||
1978 | /* Start regular packet processing again. */ | 1905 | sdata = vif_to_sdata(info->control.vif); |
1979 | if (skb_queue_empty(&local->pending[i])) | 1906 | dev_hold(sdata->dev); |
1980 | ieee80211_wake_queue_by_reason(&local->hw, i, | 1907 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, |
1981 | IEEE80211_QUEUE_STOP_REASON_PENDING); | 1908 | flags); |
1909 | |||
1910 | txok = ieee80211_tx_pending_skb(local, skb); | ||
1911 | dev_put(sdata->dev); | ||
1912 | if (!txok) | ||
1913 | __skb_queue_head(&local->pending[i], skb); | ||
1914 | spin_lock_irqsave(&local->queue_stop_reason_lock, | ||
1915 | flags); | ||
1916 | if (!txok) | ||
1917 | break; | ||
1918 | } | ||
1982 | } | 1919 | } |
1920 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
1983 | 1921 | ||
1984 | netif_tx_unlock_bh(dev); | ||
1985 | rcu_read_unlock(); | 1922 | rcu_read_unlock(); |
1986 | } | 1923 | } |
1987 | 1924 | ||
@@ -2156,8 +2093,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, | |||
2156 | 2093 | ||
2157 | info = IEEE80211_SKB_CB(skb); | 2094 | info = IEEE80211_SKB_CB(skb); |
2158 | 2095 | ||
2159 | skb->do_not_encrypt = 1; | 2096 | info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; |
2160 | |||
2161 | info->band = band; | 2097 | info->band = band; |
2162 | /* | 2098 | /* |
2163 | * XXX: For now, always use the lowest rate | 2099 | * XXX: For now, always use the lowest rate |
@@ -2228,9 +2164,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2228 | sdata = vif_to_sdata(vif); | 2164 | sdata = vif_to_sdata(vif); |
2229 | bss = &sdata->u.ap; | 2165 | bss = &sdata->u.ap; |
2230 | 2166 | ||
2231 | if (!bss) | ||
2232 | return NULL; | ||
2233 | |||
2234 | rcu_read_lock(); | 2167 | rcu_read_lock(); |
2235 | beacon = rcu_dereference(bss->beacon); | 2168 | beacon = rcu_dereference(bss->beacon); |
2236 | 2169 | ||
@@ -2256,7 +2189,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2256 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 2189 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
2257 | } | 2190 | } |
2258 | 2191 | ||
2259 | if (!ieee80211_tx_prepare(local, &tx, skb)) | 2192 | if (!ieee80211_tx_prepare(sdata, &tx, skb)) |
2260 | break; | 2193 | break; |
2261 | dev_kfree_skb_any(skb); | 2194 | dev_kfree_skb_any(skb); |
2262 | } | 2195 | } |
@@ -2276,3 +2209,24 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2276 | return skb; | 2209 | return skb; |
2277 | } | 2210 | } |
2278 | EXPORT_SYMBOL(ieee80211_get_buffered_bc); | 2211 | EXPORT_SYMBOL(ieee80211_get_buffered_bc); |
2212 | |||
2213 | void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
2214 | int encrypt) | ||
2215 | { | ||
2216 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
2217 | skb_set_mac_header(skb, 0); | ||
2218 | skb_set_network_header(skb, 0); | ||
2219 | skb_set_transport_header(skb, 0); | ||
2220 | |||
2221 | if (!encrypt) | ||
2222 | info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; | ||
2223 | |||
2224 | /* | ||
2225 | * The other path calling ieee80211_xmit is from the tasklet, | ||
2226 | * and while we can handle concurrent transmissions locking | ||
2227 | * requirements are that we do not come into tx with bhs on. | ||
2228 | */ | ||
2229 | local_bh_disable(); | ||
2230 | ieee80211_xmit(sdata, skb); | ||
2231 | local_bh_enable(); | ||
2232 | } | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 915e77769312..dd6564321369 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "mesh.h" | 31 | #include "mesh.h" |
32 | #include "wme.h" | 32 | #include "wme.h" |
33 | #include "led.h" | 33 | #include "led.h" |
34 | #include "wep.h" | ||
34 | 35 | ||
35 | /* privid for wiphys to determine whether they belong to us or not */ | 36 | /* privid for wiphys to determine whether they belong to us or not */ |
36 | void *mac80211_wiphy_privid = &mac80211_wiphy_privid; | 37 | void *mac80211_wiphy_privid = &mac80211_wiphy_privid; |
@@ -274,16 +275,12 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, | |||
274 | 275 | ||
275 | __clear_bit(reason, &local->queue_stop_reasons[queue]); | 276 | __clear_bit(reason, &local->queue_stop_reasons[queue]); |
276 | 277 | ||
277 | if (!skb_queue_empty(&local->pending[queue]) && | ||
278 | local->queue_stop_reasons[queue] == | ||
279 | BIT(IEEE80211_QUEUE_STOP_REASON_PENDING)) | ||
280 | tasklet_schedule(&local->tx_pending_tasklet); | ||
281 | |||
282 | if (local->queue_stop_reasons[queue] != 0) | 278 | if (local->queue_stop_reasons[queue] != 0) |
283 | /* someone still has this queue stopped */ | 279 | /* someone still has this queue stopped */ |
284 | return; | 280 | return; |
285 | 281 | ||
286 | netif_wake_subqueue(local->mdev, queue); | 282 | if (!skb_queue_empty(&local->pending[queue])) |
283 | tasklet_schedule(&local->tx_pending_tasklet); | ||
287 | } | 284 | } |
288 | 285 | ||
289 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, | 286 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, |
@@ -312,14 +309,6 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, | |||
312 | if (WARN_ON(queue >= hw->queues)) | 309 | if (WARN_ON(queue >= hw->queues)) |
313 | return; | 310 | return; |
314 | 311 | ||
315 | /* | ||
316 | * Only stop if it was previously running, this is necessary | ||
317 | * for correct pending packets handling because there we may | ||
318 | * start (but not wake) the queue and rely on that. | ||
319 | */ | ||
320 | if (!local->queue_stop_reasons[queue]) | ||
321 | netif_stop_subqueue(local->mdev, queue); | ||
322 | |||
323 | __set_bit(reason, &local->queue_stop_reasons[queue]); | 312 | __set_bit(reason, &local->queue_stop_reasons[queue]); |
324 | } | 313 | } |
325 | 314 | ||
@@ -347,11 +336,16 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local, | |||
347 | struct ieee80211_hw *hw = &local->hw; | 336 | struct ieee80211_hw *hw = &local->hw; |
348 | unsigned long flags; | 337 | unsigned long flags; |
349 | int queue = skb_get_queue_mapping(skb); | 338 | int queue = skb_get_queue_mapping(skb); |
339 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
340 | |||
341 | if (WARN_ON(!info->control.vif)) { | ||
342 | kfree(skb); | ||
343 | return; | ||
344 | } | ||
350 | 345 | ||
351 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 346 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
352 | __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); | 347 | __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); |
353 | __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_PENDING); | 348 | __skb_queue_tail(&local->pending[queue], skb); |
354 | skb_queue_tail(&local->pending[queue], skb); | ||
355 | __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); | 349 | __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); |
356 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 350 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
357 | } | 351 | } |
@@ -370,18 +364,21 @@ int ieee80211_add_pending_skbs(struct ieee80211_local *local, | |||
370 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD); | 364 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD); |
371 | 365 | ||
372 | while ((skb = skb_dequeue(skbs))) { | 366 | while ((skb = skb_dequeue(skbs))) { |
367 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
368 | |||
369 | if (WARN_ON(!info->control.vif)) { | ||
370 | kfree(skb); | ||
371 | continue; | ||
372 | } | ||
373 | |||
373 | ret++; | 374 | ret++; |
374 | queue = skb_get_queue_mapping(skb); | 375 | queue = skb_get_queue_mapping(skb); |
375 | skb_queue_tail(&local->pending[queue], skb); | 376 | __skb_queue_tail(&local->pending[queue], skb); |
376 | } | 377 | } |
377 | 378 | ||
378 | for (i = 0; i < hw->queues; i++) { | 379 | for (i = 0; i < hw->queues; i++) |
379 | if (ret) | ||
380 | __ieee80211_stop_queue(hw, i, | ||
381 | IEEE80211_QUEUE_STOP_REASON_PENDING); | ||
382 | __ieee80211_wake_queue(hw, i, | 380 | __ieee80211_wake_queue(hw, i, |
383 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD); | 381 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD); |
384 | } | ||
385 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 382 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
386 | 383 | ||
387 | return ret; | 384 | return ret; |
@@ -412,11 +409,16 @@ EXPORT_SYMBOL(ieee80211_stop_queues); | |||
412 | int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) | 409 | int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) |
413 | { | 410 | { |
414 | struct ieee80211_local *local = hw_to_local(hw); | 411 | struct ieee80211_local *local = hw_to_local(hw); |
412 | unsigned long flags; | ||
413 | int ret; | ||
415 | 414 | ||
416 | if (WARN_ON(queue >= hw->queues)) | 415 | if (WARN_ON(queue >= hw->queues)) |
417 | return true; | 416 | return true; |
418 | 417 | ||
419 | return __netif_subqueue_stopped(local->mdev, queue); | 418 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
419 | ret = !!local->queue_stop_reasons[queue]; | ||
420 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
421 | return ret; | ||
420 | } | 422 | } |
421 | EXPORT_SYMBOL(ieee80211_queue_stopped); | 423 | EXPORT_SYMBOL(ieee80211_queue_stopped); |
422 | 424 | ||
@@ -509,6 +511,46 @@ void ieee80211_iterate_active_interfaces_atomic( | |||
509 | } | 511 | } |
510 | EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); | 512 | EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); |
511 | 513 | ||
514 | /* | ||
515 | * Nothing should have been stuffed into the workqueue during | ||
516 | * the suspend->resume cycle. If this WARN is seen then there | ||
517 | * is a bug with either the driver suspend or something in | ||
518 | * mac80211 stuffing into the workqueue which we haven't yet | ||
519 | * cleared during mac80211's suspend cycle. | ||
520 | */ | ||
521 | static bool ieee80211_can_queue_work(struct ieee80211_local *local) | ||
522 | { | ||
523 | if (WARN(local->suspended, "queueing ieee80211 work while " | ||
524 | "going to suspend\n")) | ||
525 | return false; | ||
526 | |||
527 | return true; | ||
528 | } | ||
529 | |||
530 | void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work) | ||
531 | { | ||
532 | struct ieee80211_local *local = hw_to_local(hw); | ||
533 | |||
534 | if (!ieee80211_can_queue_work(local)) | ||
535 | return; | ||
536 | |||
537 | queue_work(local->workqueue, work); | ||
538 | } | ||
539 | EXPORT_SYMBOL(ieee80211_queue_work); | ||
540 | |||
541 | void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, | ||
542 | struct delayed_work *dwork, | ||
543 | unsigned long delay) | ||
544 | { | ||
545 | struct ieee80211_local *local = hw_to_local(hw); | ||
546 | |||
547 | if (!ieee80211_can_queue_work(local)) | ||
548 | return; | ||
549 | |||
550 | queue_delayed_work(local->workqueue, dwork, delay); | ||
551 | } | ||
552 | EXPORT_SYMBOL(ieee80211_queue_delayed_work); | ||
553 | |||
512 | void ieee802_11_parse_elems(u8 *start, size_t len, | 554 | void ieee802_11_parse_elems(u8 *start, size_t len, |
513 | struct ieee802_11_elems *elems) | 555 | struct ieee802_11_elems *elems) |
514 | { | 556 | { |
@@ -760,20 +802,6 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
760 | ieee80211_set_wmm_default(sdata); | 802 | ieee80211_set_wmm_default(sdata); |
761 | } | 803 | } |
762 | 804 | ||
763 | void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
764 | int encrypt) | ||
765 | { | ||
766 | skb->dev = sdata->local->mdev; | ||
767 | skb_set_mac_header(skb, 0); | ||
768 | skb_set_network_header(skb, 0); | ||
769 | skb_set_transport_header(skb, 0); | ||
770 | |||
771 | skb->iif = sdata->dev->ifindex; | ||
772 | skb->do_not_encrypt = !encrypt; | ||
773 | |||
774 | dev_queue_xmit(skb); | ||
775 | } | ||
776 | |||
777 | u32 ieee80211_mandatory_rates(struct ieee80211_local *local, | 805 | u32 ieee80211_mandatory_rates(struct ieee80211_local *local, |
778 | enum ieee80211_band band) | 806 | enum ieee80211_band band) |
779 | { | 807 | { |
@@ -804,12 +832,13 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local, | |||
804 | 832 | ||
805 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | 833 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, |
806 | u16 transaction, u16 auth_alg, | 834 | u16 transaction, u16 auth_alg, |
807 | u8 *extra, size_t extra_len, | 835 | u8 *extra, size_t extra_len, const u8 *bssid, |
808 | const u8 *bssid, int encrypt) | 836 | const u8 *key, u8 key_len, u8 key_idx) |
809 | { | 837 | { |
810 | struct ieee80211_local *local = sdata->local; | 838 | struct ieee80211_local *local = sdata->local; |
811 | struct sk_buff *skb; | 839 | struct sk_buff *skb; |
812 | struct ieee80211_mgmt *mgmt; | 840 | struct ieee80211_mgmt *mgmt; |
841 | int err; | ||
813 | 842 | ||
814 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + | 843 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + |
815 | sizeof(*mgmt) + 6 + extra_len); | 844 | sizeof(*mgmt) + 6 + extra_len); |
@@ -824,8 +853,6 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | |||
824 | memset(mgmt, 0, 24 + 6); | 853 | memset(mgmt, 0, 24 + 6); |
825 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | | 854 | mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | |
826 | IEEE80211_STYPE_AUTH); | 855 | IEEE80211_STYPE_AUTH); |
827 | if (encrypt) | ||
828 | mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); | ||
829 | memcpy(mgmt->da, bssid, ETH_ALEN); | 856 | memcpy(mgmt->da, bssid, ETH_ALEN); |
830 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); | 857 | memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); |
831 | memcpy(mgmt->bssid, bssid, ETH_ALEN); | 858 | memcpy(mgmt->bssid, bssid, ETH_ALEN); |
@@ -835,7 +862,13 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | |||
835 | if (extra) | 862 | if (extra) |
836 | memcpy(skb_put(skb, extra_len), extra, extra_len); | 863 | memcpy(skb_put(skb, extra_len), extra, extra_len); |
837 | 864 | ||
838 | ieee80211_tx_skb(sdata, skb, encrypt); | 865 | if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) { |
866 | mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); | ||
867 | err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx); | ||
868 | WARN_ON(err); | ||
869 | } | ||
870 | |||
871 | ieee80211_tx_skb(sdata, skb, 0); | ||
839 | } | 872 | } |
840 | 873 | ||
841 | int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, | 874 | int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, |
@@ -974,6 +1007,16 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local, | |||
974 | return supp_rates; | 1007 | return supp_rates; |
975 | } | 1008 | } |
976 | 1009 | ||
1010 | void ieee80211_stop_device(struct ieee80211_local *local) | ||
1011 | { | ||
1012 | ieee80211_led_radio(local, false); | ||
1013 | |||
1014 | cancel_work_sync(&local->reconfig_filter); | ||
1015 | drv_stop(local); | ||
1016 | |||
1017 | flush_workqueue(local->workqueue); | ||
1018 | } | ||
1019 | |||
977 | int ieee80211_reconfig(struct ieee80211_local *local) | 1020 | int ieee80211_reconfig(struct ieee80211_local *local) |
978 | { | 1021 | { |
979 | struct ieee80211_hw *hw = &local->hw; | 1022 | struct ieee80211_hw *hw = &local->hw; |
@@ -1043,9 +1086,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1043 | /* reconfigure hardware */ | 1086 | /* reconfigure hardware */ |
1044 | ieee80211_hw_config(local, ~0); | 1087 | ieee80211_hw_config(local, ~0); |
1045 | 1088 | ||
1046 | netif_addr_lock_bh(local->mdev); | ||
1047 | ieee80211_configure_filter(local); | 1089 | ieee80211_configure_filter(local); |
1048 | netif_addr_unlock_bh(local->mdev); | ||
1049 | 1090 | ||
1050 | /* Finally also reconfigure all the BSS information */ | 1091 | /* Finally also reconfigure all the BSS information */ |
1051 | list_for_each_entry(sdata, &local->interfaces, list) { | 1092 | list_for_each_entry(sdata, &local->interfaces, list) { |
@@ -1121,3 +1162,4 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1121 | #endif | 1162 | #endif |
1122 | return 0; | 1163 | return 0; |
1123 | } | 1164 | } |
1165 | |||
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index ef73105b3061..8a980f136941 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c | |||
@@ -67,10 +67,10 @@ static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) | |||
67 | 67 | ||
68 | 68 | ||
69 | static void ieee80211_wep_get_iv(struct ieee80211_local *local, | 69 | static void ieee80211_wep_get_iv(struct ieee80211_local *local, |
70 | struct ieee80211_key *key, u8 *iv) | 70 | int keylen, int keyidx, u8 *iv) |
71 | { | 71 | { |
72 | local->wep_iv++; | 72 | local->wep_iv++; |
73 | if (ieee80211_wep_weak_iv(local->wep_iv, key->conf.keylen)) | 73 | if (ieee80211_wep_weak_iv(local->wep_iv, keylen)) |
74 | local->wep_iv += 0x0100; | 74 | local->wep_iv += 0x0100; |
75 | 75 | ||
76 | if (!iv) | 76 | if (!iv) |
@@ -79,13 +79,13 @@ static void ieee80211_wep_get_iv(struct ieee80211_local *local, | |||
79 | *iv++ = (local->wep_iv >> 16) & 0xff; | 79 | *iv++ = (local->wep_iv >> 16) & 0xff; |
80 | *iv++ = (local->wep_iv >> 8) & 0xff; | 80 | *iv++ = (local->wep_iv >> 8) & 0xff; |
81 | *iv++ = local->wep_iv & 0xff; | 81 | *iv++ = local->wep_iv & 0xff; |
82 | *iv++ = key->conf.keyidx << 6; | 82 | *iv++ = keyidx << 6; |
83 | } | 83 | } |
84 | 84 | ||
85 | 85 | ||
86 | static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, | 86 | static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, |
87 | struct sk_buff *skb, | 87 | struct sk_buff *skb, |
88 | struct ieee80211_key *key) | 88 | int keylen, int keyidx) |
89 | { | 89 | { |
90 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 90 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
91 | unsigned int hdrlen; | 91 | unsigned int hdrlen; |
@@ -100,7 +100,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, | |||
100 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 100 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
101 | newhdr = skb_push(skb, WEP_IV_LEN); | 101 | newhdr = skb_push(skb, WEP_IV_LEN); |
102 | memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); | 102 | memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); |
103 | ieee80211_wep_get_iv(local, key, newhdr + hdrlen); | 103 | ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); |
104 | return newhdr + hdrlen; | 104 | return newhdr + hdrlen; |
105 | } | 105 | } |
106 | 106 | ||
@@ -144,26 +144,17 @@ void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, | |||
144 | * | 144 | * |
145 | * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) | 145 | * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) |
146 | */ | 146 | */ |
147 | int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, | 147 | int ieee80211_wep_encrypt(struct ieee80211_local *local, |
148 | struct ieee80211_key *key) | 148 | struct sk_buff *skb, |
149 | const u8 *key, int keylen, int keyidx) | ||
149 | { | 150 | { |
150 | u32 klen; | 151 | u8 *iv; |
151 | u8 *rc4key, *iv; | ||
152 | size_t len; | 152 | size_t len; |
153 | u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; | ||
153 | 154 | ||
154 | if (!key || key->conf.alg != ALG_WEP) | 155 | iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); |
155 | return -1; | 156 | if (!iv) |
156 | |||
157 | klen = 3 + key->conf.keylen; | ||
158 | rc4key = kmalloc(klen, GFP_ATOMIC); | ||
159 | if (!rc4key) | ||
160 | return -1; | ||
161 | |||
162 | iv = ieee80211_wep_add_iv(local, skb, key); | ||
163 | if (!iv) { | ||
164 | kfree(rc4key); | ||
165 | return -1; | 157 | return -1; |
166 | } | ||
167 | 158 | ||
168 | len = skb->len - (iv + WEP_IV_LEN - skb->data); | 159 | len = skb->len - (iv + WEP_IV_LEN - skb->data); |
169 | 160 | ||
@@ -171,16 +162,14 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, | |||
171 | memcpy(rc4key, iv, 3); | 162 | memcpy(rc4key, iv, 3); |
172 | 163 | ||
173 | /* Copy rest of the WEP key (the secret part) */ | 164 | /* Copy rest of the WEP key (the secret part) */ |
174 | memcpy(rc4key + 3, key->conf.key, key->conf.keylen); | 165 | memcpy(rc4key + 3, key, keylen); |
175 | 166 | ||
176 | /* Add room for ICV */ | 167 | /* Add room for ICV */ |
177 | skb_put(skb, WEP_ICV_LEN); | 168 | skb_put(skb, WEP_ICV_LEN); |
178 | 169 | ||
179 | ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, klen, | 170 | ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, |
180 | iv + WEP_IV_LEN, len); | 171 | iv + WEP_IV_LEN, len); |
181 | 172 | ||
182 | kfree(rc4key); | ||
183 | |||
184 | return 0; | 173 | return 0; |
185 | } | 174 | } |
186 | 175 | ||
@@ -216,8 +205,9 @@ int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, | |||
216 | * failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload | 205 | * failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload |
217 | * is moved to the beginning of the skb and skb length will be reduced. | 206 | * is moved to the beginning of the skb and skb length will be reduced. |
218 | */ | 207 | */ |
219 | int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, | 208 | static int ieee80211_wep_decrypt(struct ieee80211_local *local, |
220 | struct ieee80211_key *key) | 209 | struct sk_buff *skb, |
210 | struct ieee80211_key *key) | ||
221 | { | 211 | { |
222 | u32 klen; | 212 | u32 klen; |
223 | u8 *rc4key; | 213 | u8 *rc4key; |
@@ -314,12 +304,16 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) | |||
314 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 304 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
315 | 305 | ||
316 | if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { | 306 | if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { |
317 | if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) | 307 | if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, |
308 | tx->key->conf.keylen, | ||
309 | tx->key->conf.keyidx)) | ||
318 | return -1; | 310 | return -1; |
319 | } else { | 311 | } else { |
320 | info->control.hw_key = &tx->key->conf; | 312 | info->control.hw_key = &tx->key->conf; |
321 | if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { | 313 | if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { |
322 | if (!ieee80211_wep_add_iv(tx->local, skb, tx->key)) | 314 | if (!ieee80211_wep_add_iv(tx->local, skb, |
315 | tx->key->conf.keylen, | ||
316 | tx->key->conf.keyidx)) | ||
323 | return -1; | 317 | return -1; |
324 | } | 318 | } |
325 | } | 319 | } |
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h index d3f0db48314e..fe29d7e5759f 100644 --- a/net/mac80211/wep.h +++ b/net/mac80211/wep.h | |||
@@ -20,12 +20,11 @@ int ieee80211_wep_init(struct ieee80211_local *local); | |||
20 | void ieee80211_wep_free(struct ieee80211_local *local); | 20 | void ieee80211_wep_free(struct ieee80211_local *local); |
21 | void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, | 21 | void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, |
22 | size_t klen, u8 *data, size_t data_len); | 22 | size_t klen, u8 *data, size_t data_len); |
23 | int ieee80211_wep_encrypt(struct ieee80211_local *local, | ||
24 | struct sk_buff *skb, | ||
25 | const u8 *key, int keylen, int keyidx); | ||
23 | int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, | 26 | int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, |
24 | size_t klen, u8 *data, size_t data_len); | 27 | size_t klen, u8 *data, size_t data_len); |
25 | int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, | ||
26 | struct ieee80211_key *key); | ||
27 | int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, | ||
28 | struct ieee80211_key *key); | ||
29 | bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); | 28 | bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); |
30 | 29 | ||
31 | ieee80211_rx_result | 30 | ieee80211_rx_result |
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c deleted file mode 100644 index 1da81f456744..000000000000 --- a/net/mac80211/wext.c +++ /dev/null | |||
@@ -1,633 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2002-2005, Instant802 Networks, Inc. | ||
3 | * Copyright 2005-2006, Devicescape Software, Inc. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/skbuff.h> | ||
16 | #include <linux/etherdevice.h> | ||
17 | #include <linux/if_arp.h> | ||
18 | #include <linux/wireless.h> | ||
19 | #include <net/iw_handler.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | |||
22 | #include <net/mac80211.h> | ||
23 | #include "ieee80211_i.h" | ||
24 | #include "led.h" | ||
25 | #include "rate.h" | ||
26 | #include "wpa.h" | ||
27 | #include "aes_ccm.h" | ||
28 | |||
29 | |||
30 | static int ieee80211_ioctl_siwgenie(struct net_device *dev, | ||
31 | struct iw_request_info *info, | ||
32 | struct iw_point *data, char *extra) | ||
33 | { | ||
34 | struct ieee80211_sub_if_data *sdata; | ||
35 | |||
36 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
37 | |||
38 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
39 | int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length); | ||
40 | if (ret && ret != -EALREADY) | ||
41 | return ret; | ||
42 | sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; | ||
43 | sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; | ||
44 | sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT; | ||
45 | if (ret != -EALREADY) | ||
46 | ieee80211_sta_req_auth(sdata); | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | return -EOPNOTSUPP; | ||
51 | } | ||
52 | |||
53 | static int ieee80211_ioctl_siwfreq(struct net_device *dev, | ||
54 | struct iw_request_info *info, | ||
55 | struct iw_freq *freq, char *extra) | ||
56 | { | ||
57 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
58 | struct ieee80211_local *local = sdata->local; | ||
59 | struct ieee80211_channel *chan; | ||
60 | |||
61 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | ||
62 | return cfg80211_ibss_wext_siwfreq(dev, info, freq, extra); | ||
63 | else if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
64 | sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL; | ||
65 | |||
66 | /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */ | ||
67 | if (freq->e == 0) { | ||
68 | if (freq->m < 0) { | ||
69 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
70 | sdata->u.mgd.flags |= | ||
71 | IEEE80211_STA_AUTO_CHANNEL_SEL; | ||
72 | return 0; | ||
73 | } else | ||
74 | chan = ieee80211_get_channel(local->hw.wiphy, | ||
75 | ieee80211_channel_to_frequency(freq->m)); | ||
76 | } else { | ||
77 | int i, div = 1000000; | ||
78 | for (i = 0; i < freq->e; i++) | ||
79 | div /= 10; | ||
80 | if (div <= 0) | ||
81 | return -EINVAL; | ||
82 | chan = ieee80211_get_channel(local->hw.wiphy, freq->m / div); | ||
83 | } | ||
84 | |||
85 | if (!chan) | ||
86 | return -EINVAL; | ||
87 | |||
88 | if (chan->flags & IEEE80211_CHAN_DISABLED) | ||
89 | return -EINVAL; | ||
90 | |||
91 | /* | ||
92 | * no change except maybe auto -> fixed, ignore the HT | ||
93 | * setting so you can fix a channel you're on already | ||
94 | */ | ||
95 | if (local->oper_channel == chan) | ||
96 | return 0; | ||
97 | |||
98 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
99 | ieee80211_sta_req_auth(sdata); | ||
100 | |||
101 | local->oper_channel = chan; | ||
102 | local->oper_channel_type = NL80211_CHAN_NO_HT; | ||
103 | ieee80211_hw_config(local, 0); | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | |||
109 | static int ieee80211_ioctl_giwfreq(struct net_device *dev, | ||
110 | struct iw_request_info *info, | ||
111 | struct iw_freq *freq, char *extra) | ||
112 | { | ||
113 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
114 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
115 | |||
116 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | ||
117 | return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); | ||
118 | |||
119 | freq->m = local->oper_channel->center_freq; | ||
120 | freq->e = 6; | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | |||
126 | static int ieee80211_ioctl_siwessid(struct net_device *dev, | ||
127 | struct iw_request_info *info, | ||
128 | struct iw_point *data, char *ssid) | ||
129 | { | ||
130 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
131 | size_t len = data->length; | ||
132 | int ret; | ||
133 | |||
134 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | ||
135 | return cfg80211_ibss_wext_siwessid(dev, info, data, ssid); | ||
136 | |||
137 | /* iwconfig uses nul termination in SSID.. */ | ||
138 | if (len > 0 && ssid[len - 1] == '\0') | ||
139 | len--; | ||
140 | |||
141 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
142 | if (data->flags) | ||
143 | sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; | ||
144 | else | ||
145 | sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL; | ||
146 | |||
147 | ret = ieee80211_sta_set_ssid(sdata, ssid, len); | ||
148 | if (ret) | ||
149 | return ret; | ||
150 | |||
151 | sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; | ||
152 | sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT; | ||
153 | ieee80211_sta_req_auth(sdata); | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | return -EOPNOTSUPP; | ||
158 | } | ||
159 | |||
160 | |||
161 | static int ieee80211_ioctl_giwessid(struct net_device *dev, | ||
162 | struct iw_request_info *info, | ||
163 | struct iw_point *data, char *ssid) | ||
164 | { | ||
165 | size_t len; | ||
166 | struct ieee80211_sub_if_data *sdata; | ||
167 | |||
168 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
169 | |||
170 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | ||
171 | return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); | ||
172 | |||
173 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
174 | int res = ieee80211_sta_get_ssid(sdata, ssid, &len); | ||
175 | if (res == 0) { | ||
176 | data->length = len; | ||
177 | data->flags = 1; | ||
178 | } else | ||
179 | data->flags = 0; | ||
180 | return res; | ||
181 | } | ||
182 | |||
183 | return -EOPNOTSUPP; | ||
184 | } | ||
185 | |||
186 | |||
187 | static int ieee80211_ioctl_siwap(struct net_device *dev, | ||
188 | struct iw_request_info *info, | ||
189 | struct sockaddr *ap_addr, char *extra) | ||
190 | { | ||
191 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
192 | |||
193 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | ||
194 | return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra); | ||
195 | |||
196 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
197 | int ret; | ||
198 | |||
199 | if (is_zero_ether_addr((u8 *) &ap_addr->sa_data)) | ||
200 | sdata->u.mgd.flags |= IEEE80211_STA_AUTO_BSSID_SEL | | ||
201 | IEEE80211_STA_AUTO_CHANNEL_SEL; | ||
202 | else if (is_broadcast_ether_addr((u8 *) &ap_addr->sa_data)) | ||
203 | sdata->u.mgd.flags |= IEEE80211_STA_AUTO_BSSID_SEL; | ||
204 | else | ||
205 | sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; | ||
206 | ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data); | ||
207 | if (ret) | ||
208 | return ret; | ||
209 | sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME; | ||
210 | sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT; | ||
211 | ieee80211_sta_req_auth(sdata); | ||
212 | return 0; | ||
213 | } else if (sdata->vif.type == NL80211_IFTYPE_WDS) { | ||
214 | /* | ||
215 | * If it is necessary to update the WDS peer address | ||
216 | * while the interface is running, then we need to do | ||
217 | * more work here, namely if it is running we need to | ||
218 | * add a new and remove the old STA entry, this is | ||
219 | * normally handled by _open() and _stop(). | ||
220 | */ | ||
221 | if (netif_running(dev)) | ||
222 | return -EBUSY; | ||
223 | |||
224 | memcpy(&sdata->u.wds.remote_addr, (u8 *) &ap_addr->sa_data, | ||
225 | ETH_ALEN); | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | return -EOPNOTSUPP; | ||
231 | } | ||
232 | |||
233 | |||
234 | static int ieee80211_ioctl_giwap(struct net_device *dev, | ||
235 | struct iw_request_info *info, | ||
236 | struct sockaddr *ap_addr, char *extra) | ||
237 | { | ||
238 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
239 | |||
240 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | ||
241 | return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra); | ||
242 | |||
243 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
244 | if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATED) { | ||
245 | ap_addr->sa_family = ARPHRD_ETHER; | ||
246 | memcpy(&ap_addr->sa_data, sdata->u.mgd.bssid, ETH_ALEN); | ||
247 | } else | ||
248 | memset(&ap_addr->sa_data, 0, ETH_ALEN); | ||
249 | return 0; | ||
250 | } else if (sdata->vif.type == NL80211_IFTYPE_WDS) { | ||
251 | ap_addr->sa_family = ARPHRD_ETHER; | ||
252 | memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN); | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | return -EOPNOTSUPP; | ||
257 | } | ||
258 | |||
259 | |||
260 | static int ieee80211_ioctl_siwrate(struct net_device *dev, | ||
261 | struct iw_request_info *info, | ||
262 | struct iw_param *rate, char *extra) | ||
263 | { | ||
264 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
265 | int i, err = -EINVAL; | ||
266 | u32 target_rate = rate->value / 100000; | ||
267 | struct ieee80211_sub_if_data *sdata; | ||
268 | struct ieee80211_supported_band *sband; | ||
269 | |||
270 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
271 | |||
272 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
273 | |||
274 | /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates | ||
275 | * target_rate = X, rate->fixed = 1 means only rate X | ||
276 | * target_rate = X, rate->fixed = 0 means all rates <= X */ | ||
277 | sdata->max_ratectrl_rateidx = -1; | ||
278 | sdata->force_unicast_rateidx = -1; | ||
279 | if (rate->value < 0) | ||
280 | return 0; | ||
281 | |||
282 | for (i=0; i< sband->n_bitrates; i++) { | ||
283 | struct ieee80211_rate *brate = &sband->bitrates[i]; | ||
284 | int this_rate = brate->bitrate; | ||
285 | |||
286 | if (target_rate == this_rate) { | ||
287 | sdata->max_ratectrl_rateidx = i; | ||
288 | if (rate->fixed) | ||
289 | sdata->force_unicast_rateidx = i; | ||
290 | err = 0; | ||
291 | break; | ||
292 | } | ||
293 | } | ||
294 | return err; | ||
295 | } | ||
296 | |||
297 | static int ieee80211_ioctl_giwrate(struct net_device *dev, | ||
298 | struct iw_request_info *info, | ||
299 | struct iw_param *rate, char *extra) | ||
300 | { | ||
301 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
302 | struct sta_info *sta; | ||
303 | struct ieee80211_sub_if_data *sdata; | ||
304 | struct ieee80211_supported_band *sband; | ||
305 | |||
306 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
307 | |||
308 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
309 | return -EOPNOTSUPP; | ||
310 | |||
311 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | ||
312 | |||
313 | rcu_read_lock(); | ||
314 | |||
315 | sta = sta_info_get(local, sdata->u.mgd.bssid); | ||
316 | |||
317 | if (sta && !(sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)) | ||
318 | rate->value = sband->bitrates[sta->last_tx_rate.idx].bitrate; | ||
319 | else | ||
320 | rate->value = 0; | ||
321 | |||
322 | rcu_read_unlock(); | ||
323 | |||
324 | if (!sta) | ||
325 | return -ENODEV; | ||
326 | |||
327 | rate->value *= 100000; | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static int ieee80211_ioctl_siwpower(struct net_device *dev, | ||
333 | struct iw_request_info *info, | ||
334 | struct iw_param *wrq, | ||
335 | char *extra) | ||
336 | { | ||
337 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
338 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
339 | struct ieee80211_conf *conf = &local->hw.conf; | ||
340 | int timeout = 0; | ||
341 | bool ps; | ||
342 | |||
343 | if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) | ||
344 | return -EOPNOTSUPP; | ||
345 | |||
346 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
347 | return -EINVAL; | ||
348 | |||
349 | if (wrq->disabled) { | ||
350 | ps = false; | ||
351 | timeout = 0; | ||
352 | goto set; | ||
353 | } | ||
354 | |||
355 | switch (wrq->flags & IW_POWER_MODE) { | ||
356 | case IW_POWER_ON: /* If not specified */ | ||
357 | case IW_POWER_MODE: /* If set all mask */ | ||
358 | case IW_POWER_ALL_R: /* If explicitely state all */ | ||
359 | ps = true; | ||
360 | break; | ||
361 | default: /* Otherwise we ignore */ | ||
362 | return -EINVAL; | ||
363 | } | ||
364 | |||
365 | if (wrq->flags & ~(IW_POWER_MODE | IW_POWER_TIMEOUT)) | ||
366 | return -EINVAL; | ||
367 | |||
368 | if (wrq->flags & IW_POWER_TIMEOUT) | ||
369 | timeout = wrq->value / 1000; | ||
370 | |||
371 | set: | ||
372 | if (ps == sdata->u.mgd.powersave && timeout == conf->dynamic_ps_timeout) | ||
373 | return 0; | ||
374 | |||
375 | sdata->u.mgd.powersave = ps; | ||
376 | conf->dynamic_ps_timeout = timeout; | ||
377 | |||
378 | if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) | ||
379 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | ||
380 | |||
381 | ieee80211_recalc_ps(local, -1); | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static int ieee80211_ioctl_giwpower(struct net_device *dev, | ||
387 | struct iw_request_info *info, | ||
388 | union iwreq_data *wrqu, | ||
389 | char *extra) | ||
390 | { | ||
391 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
392 | |||
393 | wrqu->power.disabled = !sdata->u.mgd.powersave; | ||
394 | |||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static int ieee80211_ioctl_siwauth(struct net_device *dev, | ||
399 | struct iw_request_info *info, | ||
400 | struct iw_param *data, char *extra) | ||
401 | { | ||
402 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
403 | int ret = 0; | ||
404 | |||
405 | switch (data->flags & IW_AUTH_INDEX) { | ||
406 | case IW_AUTH_WPA_VERSION: | ||
407 | case IW_AUTH_CIPHER_GROUP: | ||
408 | case IW_AUTH_WPA_ENABLED: | ||
409 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | ||
410 | case IW_AUTH_KEY_MGMT: | ||
411 | case IW_AUTH_CIPHER_GROUP_MGMT: | ||
412 | break; | ||
413 | case IW_AUTH_CIPHER_PAIRWISE: | ||
414 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
415 | if (data->value & (IW_AUTH_CIPHER_WEP40 | | ||
416 | IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_TKIP)) | ||
417 | sdata->u.mgd.flags |= | ||
418 | IEEE80211_STA_TKIP_WEP_USED; | ||
419 | else | ||
420 | sdata->u.mgd.flags &= | ||
421 | ~IEEE80211_STA_TKIP_WEP_USED; | ||
422 | } | ||
423 | break; | ||
424 | case IW_AUTH_DROP_UNENCRYPTED: | ||
425 | sdata->drop_unencrypted = !!data->value; | ||
426 | break; | ||
427 | case IW_AUTH_PRIVACY_INVOKED: | ||
428 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
429 | ret = -EINVAL; | ||
430 | else { | ||
431 | sdata->u.mgd.flags &= ~IEEE80211_STA_PRIVACY_INVOKED; | ||
432 | /* | ||
433 | * Privacy invoked by wpa_supplicant, store the | ||
434 | * value and allow associating to a protected | ||
435 | * network without having a key up front. | ||
436 | */ | ||
437 | if (data->value) | ||
438 | sdata->u.mgd.flags |= | ||
439 | IEEE80211_STA_PRIVACY_INVOKED; | ||
440 | } | ||
441 | break; | ||
442 | case IW_AUTH_80211_AUTH_ALG: | ||
443 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
444 | sdata->u.mgd.auth_algs = data->value; | ||
445 | else | ||
446 | ret = -EOPNOTSUPP; | ||
447 | break; | ||
448 | case IW_AUTH_MFP: | ||
449 | if (!(sdata->local->hw.flags & IEEE80211_HW_MFP_CAPABLE)) { | ||
450 | ret = -EOPNOTSUPP; | ||
451 | break; | ||
452 | } | ||
453 | if (sdata->vif.type == NL80211_IFTYPE_STATION) { | ||
454 | switch (data->value) { | ||
455 | case IW_AUTH_MFP_DISABLED: | ||
456 | sdata->u.mgd.mfp = IEEE80211_MFP_DISABLED; | ||
457 | break; | ||
458 | case IW_AUTH_MFP_OPTIONAL: | ||
459 | sdata->u.mgd.mfp = IEEE80211_MFP_OPTIONAL; | ||
460 | break; | ||
461 | case IW_AUTH_MFP_REQUIRED: | ||
462 | sdata->u.mgd.mfp = IEEE80211_MFP_REQUIRED; | ||
463 | break; | ||
464 | default: | ||
465 | ret = -EINVAL; | ||
466 | } | ||
467 | } else | ||
468 | ret = -EOPNOTSUPP; | ||
469 | break; | ||
470 | default: | ||
471 | ret = -EOPNOTSUPP; | ||
472 | break; | ||
473 | } | ||
474 | return ret; | ||
475 | } | ||
476 | |||
477 | /* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */ | ||
478 | static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev) | ||
479 | { | ||
480 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
481 | struct iw_statistics *wstats = &local->wstats; | ||
482 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
483 | struct sta_info *sta = NULL; | ||
484 | |||
485 | rcu_read_lock(); | ||
486 | |||
487 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
488 | sta = sta_info_get(local, sdata->u.mgd.bssid); | ||
489 | |||
490 | if (!sta) { | ||
491 | wstats->discard.fragment = 0; | ||
492 | wstats->discard.misc = 0; | ||
493 | wstats->qual.qual = 0; | ||
494 | wstats->qual.level = 0; | ||
495 | wstats->qual.noise = 0; | ||
496 | wstats->qual.updated = IW_QUAL_ALL_INVALID; | ||
497 | } else { | ||
498 | wstats->qual.updated = 0; | ||
499 | /* | ||
500 | * mirror what cfg80211 does for iwrange/scan results, | ||
501 | * otherwise userspace gets confused. | ||
502 | */ | ||
503 | if (local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC | | ||
504 | IEEE80211_HW_SIGNAL_DBM)) { | ||
505 | wstats->qual.updated |= IW_QUAL_LEVEL_UPDATED; | ||
506 | wstats->qual.updated |= IW_QUAL_QUAL_UPDATED; | ||
507 | } else { | ||
508 | wstats->qual.updated |= IW_QUAL_LEVEL_INVALID; | ||
509 | wstats->qual.updated |= IW_QUAL_QUAL_INVALID; | ||
510 | } | ||
511 | |||
512 | if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) { | ||
513 | wstats->qual.level = sta->last_signal; | ||
514 | wstats->qual.qual = sta->last_signal; | ||
515 | } else if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { | ||
516 | int sig = sta->last_signal; | ||
517 | |||
518 | wstats->qual.updated |= IW_QUAL_DBM; | ||
519 | wstats->qual.level = sig; | ||
520 | if (sig < -110) | ||
521 | sig = -110; | ||
522 | else if (sig > -40) | ||
523 | sig = -40; | ||
524 | wstats->qual.qual = sig + 110; | ||
525 | } | ||
526 | |||
527 | if (local->hw.flags & IEEE80211_HW_NOISE_DBM) { | ||
528 | /* | ||
529 | * This assumes that if driver reports noise, it also | ||
530 | * reports signal in dBm. | ||
531 | */ | ||
532 | wstats->qual.noise = sta->last_noise; | ||
533 | wstats->qual.updated |= IW_QUAL_NOISE_UPDATED; | ||
534 | } else { | ||
535 | wstats->qual.updated |= IW_QUAL_NOISE_INVALID; | ||
536 | } | ||
537 | } | ||
538 | |||
539 | rcu_read_unlock(); | ||
540 | |||
541 | return wstats; | ||
542 | } | ||
543 | |||
544 | static int ieee80211_ioctl_giwauth(struct net_device *dev, | ||
545 | struct iw_request_info *info, | ||
546 | struct iw_param *data, char *extra) | ||
547 | { | ||
548 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
549 | int ret = 0; | ||
550 | |||
551 | switch (data->flags & IW_AUTH_INDEX) { | ||
552 | case IW_AUTH_80211_AUTH_ALG: | ||
553 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
554 | data->value = sdata->u.mgd.auth_algs; | ||
555 | else | ||
556 | ret = -EOPNOTSUPP; | ||
557 | break; | ||
558 | default: | ||
559 | ret = -EOPNOTSUPP; | ||
560 | break; | ||
561 | } | ||
562 | return ret; | ||
563 | } | ||
564 | |||
565 | |||
566 | /* Structures to export the Wireless Handlers */ | ||
567 | |||
568 | static const iw_handler ieee80211_handler[] = | ||
569 | { | ||
570 | (iw_handler) NULL, /* SIOCSIWCOMMIT */ | ||
571 | (iw_handler) cfg80211_wext_giwname, /* SIOCGIWNAME */ | ||
572 | (iw_handler) NULL, /* SIOCSIWNWID */ | ||
573 | (iw_handler) NULL, /* SIOCGIWNWID */ | ||
574 | (iw_handler) ieee80211_ioctl_siwfreq, /* SIOCSIWFREQ */ | ||
575 | (iw_handler) ieee80211_ioctl_giwfreq, /* SIOCGIWFREQ */ | ||
576 | (iw_handler) cfg80211_wext_siwmode, /* SIOCSIWMODE */ | ||
577 | (iw_handler) cfg80211_wext_giwmode, /* SIOCGIWMODE */ | ||
578 | (iw_handler) NULL, /* SIOCSIWSENS */ | ||
579 | (iw_handler) NULL, /* SIOCGIWSENS */ | ||
580 | (iw_handler) NULL /* not used */, /* SIOCSIWRANGE */ | ||
581 | (iw_handler) cfg80211_wext_giwrange, /* SIOCGIWRANGE */ | ||
582 | (iw_handler) NULL /* not used */, /* SIOCSIWPRIV */ | ||
583 | (iw_handler) NULL /* kernel code */, /* SIOCGIWPRIV */ | ||
584 | (iw_handler) NULL /* not used */, /* SIOCSIWSTATS */ | ||
585 | (iw_handler) NULL /* kernel code */, /* SIOCGIWSTATS */ | ||
586 | (iw_handler) NULL, /* SIOCSIWSPY */ | ||
587 | (iw_handler) NULL, /* SIOCGIWSPY */ | ||
588 | (iw_handler) NULL, /* SIOCSIWTHRSPY */ | ||
589 | (iw_handler) NULL, /* SIOCGIWTHRSPY */ | ||
590 | (iw_handler) ieee80211_ioctl_siwap, /* SIOCSIWAP */ | ||
591 | (iw_handler) ieee80211_ioctl_giwap, /* SIOCGIWAP */ | ||
592 | (iw_handler) cfg80211_wext_siwmlme, /* SIOCSIWMLME */ | ||
593 | (iw_handler) NULL, /* SIOCGIWAPLIST */ | ||
594 | (iw_handler) cfg80211_wext_siwscan, /* SIOCSIWSCAN */ | ||
595 | (iw_handler) cfg80211_wext_giwscan, /* SIOCGIWSCAN */ | ||
596 | (iw_handler) ieee80211_ioctl_siwessid, /* SIOCSIWESSID */ | ||
597 | (iw_handler) ieee80211_ioctl_giwessid, /* SIOCGIWESSID */ | ||
598 | (iw_handler) NULL, /* SIOCSIWNICKN */ | ||
599 | (iw_handler) NULL, /* SIOCGIWNICKN */ | ||
600 | (iw_handler) NULL, /* -- hole -- */ | ||
601 | (iw_handler) NULL, /* -- hole -- */ | ||
602 | (iw_handler) ieee80211_ioctl_siwrate, /* SIOCSIWRATE */ | ||
603 | (iw_handler) ieee80211_ioctl_giwrate, /* SIOCGIWRATE */ | ||
604 | (iw_handler) cfg80211_wext_siwrts, /* SIOCSIWRTS */ | ||
605 | (iw_handler) cfg80211_wext_giwrts, /* SIOCGIWRTS */ | ||
606 | (iw_handler) cfg80211_wext_siwfrag, /* SIOCSIWFRAG */ | ||
607 | (iw_handler) cfg80211_wext_giwfrag, /* SIOCGIWFRAG */ | ||
608 | (iw_handler) cfg80211_wext_siwtxpower, /* SIOCSIWTXPOW */ | ||
609 | (iw_handler) cfg80211_wext_giwtxpower, /* SIOCGIWTXPOW */ | ||
610 | (iw_handler) cfg80211_wext_siwretry, /* SIOCSIWRETRY */ | ||
611 | (iw_handler) cfg80211_wext_giwretry, /* SIOCGIWRETRY */ | ||
612 | (iw_handler) cfg80211_wext_siwencode, /* SIOCSIWENCODE */ | ||
613 | (iw_handler) cfg80211_wext_giwencode, /* SIOCGIWENCODE */ | ||
614 | (iw_handler) ieee80211_ioctl_siwpower, /* SIOCSIWPOWER */ | ||
615 | (iw_handler) ieee80211_ioctl_giwpower, /* SIOCGIWPOWER */ | ||
616 | (iw_handler) NULL, /* -- hole -- */ | ||
617 | (iw_handler) NULL, /* -- hole -- */ | ||
618 | (iw_handler) ieee80211_ioctl_siwgenie, /* SIOCSIWGENIE */ | ||
619 | (iw_handler) NULL, /* SIOCGIWGENIE */ | ||
620 | (iw_handler) ieee80211_ioctl_siwauth, /* SIOCSIWAUTH */ | ||
621 | (iw_handler) ieee80211_ioctl_giwauth, /* SIOCGIWAUTH */ | ||
622 | (iw_handler) cfg80211_wext_siwencodeext, /* SIOCSIWENCODEEXT */ | ||
623 | (iw_handler) NULL, /* SIOCGIWENCODEEXT */ | ||
624 | (iw_handler) NULL, /* SIOCSIWPMKSA */ | ||
625 | (iw_handler) NULL, /* -- hole -- */ | ||
626 | }; | ||
627 | |||
628 | const struct iw_handler_def ieee80211_iw_handler_def = | ||
629 | { | ||
630 | .num_standard = ARRAY_SIZE(ieee80211_handler), | ||
631 | .standard = (iw_handler *) ieee80211_handler, | ||
632 | .get_wireless_stats = ieee80211_get_wireless_stats, | ||
633 | }; | ||
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 116a923b14d6..b19b7696f3a2 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -85,10 +85,8 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb) | |||
85 | return ieee802_1d_to_ac[skb->priority]; | 85 | return ieee802_1d_to_ac[skb->priority]; |
86 | } | 86 | } |
87 | 87 | ||
88 | u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) | 88 | void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb) |
89 | { | 89 | { |
90 | struct ieee80211_master_priv *mpriv = netdev_priv(dev); | ||
91 | struct ieee80211_local *local = mpriv->local; | ||
92 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 90 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
93 | u16 queue; | 91 | u16 queue; |
94 | u8 tid; | 92 | u8 tid; |
@@ -113,5 +111,5 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
113 | *p = 0; | 111 | *p = 0; |
114 | } | 112 | } |
115 | 113 | ||
116 | return queue; | 114 | skb_set_queue_mapping(skb, queue); |
117 | } | 115 | } |
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index 7520d2e014dc..d4fd87ca5118 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | extern const int ieee802_1d_to_ac[8]; | 21 | extern const int ieee802_1d_to_ac[8]; |
22 | 22 | ||
23 | u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb); | 23 | void ieee80211_select_queue(struct ieee80211_local *local, |
24 | struct sk_buff *skb); | ||
24 | 25 | ||
25 | #endif /* _WME_H */ | 26 | #endif /* _WME_H */ |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index dcfae8884b86..70778694877b 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -122,7 +122,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) | |||
122 | return RX_DROP_UNUSABLE; | 122 | return RX_DROP_UNUSABLE; |
123 | 123 | ||
124 | mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, | 124 | mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, |
125 | (void *) skb->data, NULL); | 125 | (void *) skb->data, NULL, |
126 | GFP_ATOMIC); | ||
126 | return RX_DROP_UNUSABLE; | 127 | return RX_DROP_UNUSABLE; |
127 | } | 128 | } |
128 | 129 | ||
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c index 201b8ea3020d..3c7e42735b60 100644 --- a/net/netfilter/ipvs/ip_vs_app.c +++ b/net/netfilter/ipvs/ip_vs_app.c | |||
@@ -18,6 +18,9 @@ | |||
18 | * | 18 | * |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #define KMSG_COMPONENT "IPVS" | ||
22 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
23 | |||
21 | #include <linux/module.h> | 24 | #include <linux/module.h> |
22 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
23 | #include <linux/skbuff.h> | 26 | #include <linux/skbuff.h> |
@@ -262,12 +265,12 @@ static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th) | |||
262 | if (vseq->delta || vseq->previous_delta) { | 265 | if (vseq->delta || vseq->previous_delta) { |
263 | if(after(seq, vseq->init_seq)) { | 266 | if(after(seq, vseq->init_seq)) { |
264 | th->seq = htonl(seq + vseq->delta); | 267 | th->seq = htonl(seq + vseq->delta); |
265 | IP_VS_DBG(9, "vs_fix_seq(): added delta (%d) to seq\n", | 268 | IP_VS_DBG(9, "%s(): added delta (%d) to seq\n", |
266 | vseq->delta); | 269 | __func__, vseq->delta); |
267 | } else { | 270 | } else { |
268 | th->seq = htonl(seq + vseq->previous_delta); | 271 | th->seq = htonl(seq + vseq->previous_delta); |
269 | IP_VS_DBG(9, "vs_fix_seq(): added previous_delta " | 272 | IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n", |
270 | "(%d) to seq\n", vseq->previous_delta); | 273 | __func__, vseq->previous_delta); |
271 | } | 274 | } |
272 | } | 275 | } |
273 | } | 276 | } |
@@ -291,14 +294,14 @@ vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th) | |||
291 | to receive next, so compare it with init_seq+delta */ | 294 | to receive next, so compare it with init_seq+delta */ |
292 | if(after(ack_seq, vseq->init_seq+vseq->delta)) { | 295 | if(after(ack_seq, vseq->init_seq+vseq->delta)) { |
293 | th->ack_seq = htonl(ack_seq - vseq->delta); | 296 | th->ack_seq = htonl(ack_seq - vseq->delta); |
294 | IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted delta " | 297 | IP_VS_DBG(9, "%s(): subtracted delta " |
295 | "(%d) from ack_seq\n", vseq->delta); | 298 | "(%d) from ack_seq\n", __func__, vseq->delta); |
296 | 299 | ||
297 | } else { | 300 | } else { |
298 | th->ack_seq = htonl(ack_seq - vseq->previous_delta); | 301 | th->ack_seq = htonl(ack_seq - vseq->previous_delta); |
299 | IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted " | 302 | IP_VS_DBG(9, "%s(): subtracted " |
300 | "previous_delta (%d) from ack_seq\n", | 303 | "previous_delta (%d) from ack_seq\n", |
301 | vseq->previous_delta); | 304 | __func__, vseq->previous_delta); |
302 | } | 305 | } |
303 | } | 306 | } |
304 | } | 307 | } |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 77bfdfeb966e..27c30cf933da 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
@@ -22,6 +22,9 @@ | |||
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #define KMSG_COMPONENT "IPVS" | ||
26 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
27 | |||
25 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
26 | #include <linux/in.h> | 29 | #include <linux/in.h> |
27 | #include <linux/net.h> | 30 | #include <linux/net.h> |
@@ -150,8 +153,8 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) | |||
150 | atomic_inc(&cp->refcnt); | 153 | atomic_inc(&cp->refcnt); |
151 | ret = 1; | 154 | ret = 1; |
152 | } else { | 155 | } else { |
153 | IP_VS_ERR("ip_vs_conn_hash(): request for already hashed, " | 156 | pr_err("%s(): request for already hashed, called from %pF\n", |
154 | "called from %p\n", __builtin_return_address(0)); | 157 | __func__, __builtin_return_address(0)); |
155 | ret = 0; | 158 | ret = 0; |
156 | } | 159 | } |
157 | 160 | ||
@@ -689,7 +692,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport, | |||
689 | 692 | ||
690 | cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC); | 693 | cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC); |
691 | if (cp == NULL) { | 694 | if (cp == NULL) { |
692 | IP_VS_ERR_RL("ip_vs_conn_new: no memory available.\n"); | 695 | IP_VS_ERR_RL("%s(): no memory\n", __func__); |
693 | return NULL; | 696 | return NULL; |
694 | } | 697 | } |
695 | 698 | ||
@@ -1073,10 +1076,10 @@ int __init ip_vs_conn_init(void) | |||
1073 | return -ENOMEM; | 1076 | return -ENOMEM; |
1074 | } | 1077 | } |
1075 | 1078 | ||
1076 | IP_VS_INFO("Connection hash table configured " | 1079 | pr_info("Connection hash table configured " |
1077 | "(size=%d, memory=%ldKbytes)\n", | 1080 | "(size=%d, memory=%ldKbytes)\n", |
1078 | IP_VS_CONN_TAB_SIZE, | 1081 | IP_VS_CONN_TAB_SIZE, |
1079 | (long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024); | 1082 | (long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024); |
1080 | IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n", | 1083 | IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n", |
1081 | sizeof(struct ip_vs_conn)); | 1084 | sizeof(struct ip_vs_conn)); |
1082 | 1085 | ||
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 8dddb17a947a..b95699f00545 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -24,6 +24,9 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #define KMSG_COMPONENT "IPVS" | ||
28 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
29 | |||
27 | #include <linux/module.h> | 30 | #include <linux/module.h> |
28 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
29 | #include <linux/ip.h> | 32 | #include <linux/ip.h> |
@@ -388,9 +391,9 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
388 | */ | 391 | */ |
389 | if (!svc->fwmark && pptr[1] != svc->port) { | 392 | if (!svc->fwmark && pptr[1] != svc->port) { |
390 | if (!svc->port) | 393 | if (!svc->port) |
391 | IP_VS_ERR("Schedule: port zero only supported " | 394 | pr_err("Schedule: port zero only supported " |
392 | "in persistent services, " | 395 | "in persistent services, " |
393 | "check your ipvs configuration\n"); | 396 | "check your ipvs configuration\n"); |
394 | return NULL; | 397 | return NULL; |
395 | } | 398 | } |
396 | 399 | ||
@@ -462,7 +465,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
462 | ip_vs_service_put(svc); | 465 | ip_vs_service_put(svc); |
463 | 466 | ||
464 | /* create a new connection entry */ | 467 | /* create a new connection entry */ |
465 | IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n"); | 468 | IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__); |
466 | cp = ip_vs_conn_new(svc->af, iph.protocol, | 469 | cp = ip_vs_conn_new(svc->af, iph.protocol, |
467 | &iph.saddr, pptr[0], | 470 | &iph.saddr, pptr[0], |
468 | &iph.daddr, pptr[1], | 471 | &iph.daddr, pptr[1], |
@@ -664,8 +667,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb, | |||
664 | unsigned int verdict = NF_DROP; | 667 | unsigned int verdict = NF_DROP; |
665 | 668 | ||
666 | if (IP_VS_FWD_METHOD(cp) != 0) { | 669 | if (IP_VS_FWD_METHOD(cp) != 0) { |
667 | IP_VS_ERR("shouldn't reach here, because the box is on the " | 670 | pr_err("shouldn't reach here, because the box is on the " |
668 | "half connection in the tun/dr module.\n"); | 671 | "half connection in the tun/dr module.\n"); |
669 | } | 672 | } |
670 | 673 | ||
671 | /* Ensure the checksum is correct */ | 674 | /* Ensure the checksum is correct */ |
@@ -1256,7 +1259,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
1256 | struct ip_vs_iphdr iph; | 1259 | struct ip_vs_iphdr iph; |
1257 | struct ip_vs_protocol *pp; | 1260 | struct ip_vs_protocol *pp; |
1258 | struct ip_vs_conn *cp; | 1261 | struct ip_vs_conn *cp; |
1259 | int ret, restart, af; | 1262 | int ret, restart, af, pkts; |
1260 | 1263 | ||
1261 | af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6; | 1264 | af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6; |
1262 | 1265 | ||
@@ -1274,13 +1277,24 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
1274 | return NF_ACCEPT; | 1277 | return NF_ACCEPT; |
1275 | } | 1278 | } |
1276 | 1279 | ||
1277 | if (unlikely(iph.protocol == IPPROTO_ICMP)) { | 1280 | #ifdef CONFIG_IP_VS_IPV6 |
1278 | int related, verdict = ip_vs_in_icmp(skb, &related, hooknum); | 1281 | if (af == AF_INET6) { |
1282 | if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { | ||
1283 | int related, verdict = ip_vs_in_icmp_v6(skb, &related, hooknum); | ||
1279 | 1284 | ||
1280 | if (related) | 1285 | if (related) |
1281 | return verdict; | 1286 | return verdict; |
1282 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1287 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); |
1283 | } | 1288 | } |
1289 | } else | ||
1290 | #endif | ||
1291 | if (unlikely(iph.protocol == IPPROTO_ICMP)) { | ||
1292 | int related, verdict = ip_vs_in_icmp(skb, &related, hooknum); | ||
1293 | |||
1294 | if (related) | ||
1295 | return verdict; | ||
1296 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
1297 | } | ||
1284 | 1298 | ||
1285 | /* Protocol supported? */ | 1299 | /* Protocol supported? */ |
1286 | pp = ip_vs_proto_get(iph.protocol); | 1300 | pp = ip_vs_proto_get(iph.protocol); |
@@ -1343,12 +1357,12 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, | |||
1343 | * Sync connection if it is about to close to | 1357 | * Sync connection if it is about to close to |
1344 | * encorage the standby servers to update the connections timeout | 1358 | * encorage the standby servers to update the connections timeout |
1345 | */ | 1359 | */ |
1346 | atomic_inc(&cp->in_pkts); | 1360 | pkts = atomic_add_return(1, &cp->in_pkts); |
1347 | if (af == AF_INET && | 1361 | if (af == AF_INET && |
1348 | (ip_vs_sync_state & IP_VS_STATE_MASTER) && | 1362 | (ip_vs_sync_state & IP_VS_STATE_MASTER) && |
1349 | (((cp->protocol != IPPROTO_TCP || | 1363 | (((cp->protocol != IPPROTO_TCP || |
1350 | cp->state == IP_VS_TCP_S_ESTABLISHED) && | 1364 | cp->state == IP_VS_TCP_S_ESTABLISHED) && |
1351 | (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] | 1365 | (pkts % sysctl_ip_vs_sync_threshold[1] |
1352 | == sysctl_ip_vs_sync_threshold[0])) || | 1366 | == sysctl_ip_vs_sync_threshold[0])) || |
1353 | ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && | 1367 | ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && |
1354 | ((cp->state == IP_VS_TCP_S_FIN_WAIT) || | 1368 | ((cp->state == IP_VS_TCP_S_FIN_WAIT) || |
@@ -1487,7 +1501,7 @@ static int __init ip_vs_init(void) | |||
1487 | 1501 | ||
1488 | ret = ip_vs_control_init(); | 1502 | ret = ip_vs_control_init(); |
1489 | if (ret < 0) { | 1503 | if (ret < 0) { |
1490 | IP_VS_ERR("can't setup control.\n"); | 1504 | pr_err("can't setup control.\n"); |
1491 | goto cleanup_estimator; | 1505 | goto cleanup_estimator; |
1492 | } | 1506 | } |
1493 | 1507 | ||
@@ -1495,23 +1509,23 @@ static int __init ip_vs_init(void) | |||
1495 | 1509 | ||
1496 | ret = ip_vs_app_init(); | 1510 | ret = ip_vs_app_init(); |
1497 | if (ret < 0) { | 1511 | if (ret < 0) { |
1498 | IP_VS_ERR("can't setup application helper.\n"); | 1512 | pr_err("can't setup application helper.\n"); |
1499 | goto cleanup_protocol; | 1513 | goto cleanup_protocol; |
1500 | } | 1514 | } |
1501 | 1515 | ||
1502 | ret = ip_vs_conn_init(); | 1516 | ret = ip_vs_conn_init(); |
1503 | if (ret < 0) { | 1517 | if (ret < 0) { |
1504 | IP_VS_ERR("can't setup connection table.\n"); | 1518 | pr_err("can't setup connection table.\n"); |
1505 | goto cleanup_app; | 1519 | goto cleanup_app; |
1506 | } | 1520 | } |
1507 | 1521 | ||
1508 | ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); | 1522 | ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); |
1509 | if (ret < 0) { | 1523 | if (ret < 0) { |
1510 | IP_VS_ERR("can't register hooks.\n"); | 1524 | pr_err("can't register hooks.\n"); |
1511 | goto cleanup_conn; | 1525 | goto cleanup_conn; |
1512 | } | 1526 | } |
1513 | 1527 | ||
1514 | IP_VS_INFO("ipvs loaded.\n"); | 1528 | pr_info("ipvs loaded.\n"); |
1515 | return ret; | 1529 | return ret; |
1516 | 1530 | ||
1517 | cleanup_conn: | 1531 | cleanup_conn: |
@@ -1534,7 +1548,7 @@ static void __exit ip_vs_cleanup(void) | |||
1534 | ip_vs_protocol_cleanup(); | 1548 | ip_vs_protocol_cleanup(); |
1535 | ip_vs_control_cleanup(); | 1549 | ip_vs_control_cleanup(); |
1536 | ip_vs_estimator_cleanup(); | 1550 | ip_vs_estimator_cleanup(); |
1537 | IP_VS_INFO("ipvs unloaded.\n"); | 1551 | pr_info("ipvs unloaded.\n"); |
1538 | } | 1552 | } |
1539 | 1553 | ||
1540 | module_init(ip_vs_init); | 1554 | module_init(ip_vs_init); |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 7c1333c67ff3..fba2892b99e1 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -18,6 +18,9 @@ | |||
18 | * | 18 | * |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #define KMSG_COMPONENT "IPVS" | ||
22 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
23 | |||
21 | #include <linux/module.h> | 24 | #include <linux/module.h> |
22 | #include <linux/init.h> | 25 | #include <linux/init.h> |
23 | #include <linux/types.h> | 26 | #include <linux/types.h> |
@@ -340,8 +343,8 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc) | |||
340 | unsigned hash; | 343 | unsigned hash; |
341 | 344 | ||
342 | if (svc->flags & IP_VS_SVC_F_HASHED) { | 345 | if (svc->flags & IP_VS_SVC_F_HASHED) { |
343 | IP_VS_ERR("ip_vs_svc_hash(): request for already hashed, " | 346 | pr_err("%s(): request for already hashed, called from %pF\n", |
344 | "called from %p\n", __builtin_return_address(0)); | 347 | __func__, __builtin_return_address(0)); |
345 | return 0; | 348 | return 0; |
346 | } | 349 | } |
347 | 350 | ||
@@ -374,8 +377,8 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc) | |||
374 | static int ip_vs_svc_unhash(struct ip_vs_service *svc) | 377 | static int ip_vs_svc_unhash(struct ip_vs_service *svc) |
375 | { | 378 | { |
376 | if (!(svc->flags & IP_VS_SVC_F_HASHED)) { | 379 | if (!(svc->flags & IP_VS_SVC_F_HASHED)) { |
377 | IP_VS_ERR("ip_vs_svc_unhash(): request for unhash flagged, " | 380 | pr_err("%s(): request for unhash flagged, called from %pF\n", |
378 | "called from %p\n", __builtin_return_address(0)); | 381 | __func__, __builtin_return_address(0)); |
379 | return 0; | 382 | return 0; |
380 | } | 383 | } |
381 | 384 | ||
@@ -841,7 +844,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, | |||
841 | 844 | ||
842 | dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); | 845 | dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); |
843 | if (dest == NULL) { | 846 | if (dest == NULL) { |
844 | IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n"); | 847 | pr_err("%s(): no memory.\n", __func__); |
845 | return -ENOMEM; | 848 | return -ENOMEM; |
846 | } | 849 | } |
847 | 850 | ||
@@ -885,13 +888,13 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) | |||
885 | EnterFunction(2); | 888 | EnterFunction(2); |
886 | 889 | ||
887 | if (udest->weight < 0) { | 890 | if (udest->weight < 0) { |
888 | IP_VS_ERR("ip_vs_add_dest(): server weight less than zero\n"); | 891 | pr_err("%s(): server weight less than zero\n", __func__); |
889 | return -ERANGE; | 892 | return -ERANGE; |
890 | } | 893 | } |
891 | 894 | ||
892 | if (udest->l_threshold > udest->u_threshold) { | 895 | if (udest->l_threshold > udest->u_threshold) { |
893 | IP_VS_ERR("ip_vs_add_dest(): lower threshold is higher than " | 896 | pr_err("%s(): lower threshold is higher than upper threshold\n", |
894 | "upper threshold\n"); | 897 | __func__); |
895 | return -ERANGE; | 898 | return -ERANGE; |
896 | } | 899 | } |
897 | 900 | ||
@@ -903,7 +906,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) | |||
903 | dest = ip_vs_lookup_dest(svc, &daddr, dport); | 906 | dest = ip_vs_lookup_dest(svc, &daddr, dport); |
904 | 907 | ||
905 | if (dest != NULL) { | 908 | if (dest != NULL) { |
906 | IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n"); | 909 | IP_VS_DBG(1, "%s(): dest already exists\n", __func__); |
907 | return -EEXIST; | 910 | return -EEXIST; |
908 | } | 911 | } |
909 | 912 | ||
@@ -997,13 +1000,13 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) | |||
997 | EnterFunction(2); | 1000 | EnterFunction(2); |
998 | 1001 | ||
999 | if (udest->weight < 0) { | 1002 | if (udest->weight < 0) { |
1000 | IP_VS_ERR("ip_vs_edit_dest(): server weight less than zero\n"); | 1003 | pr_err("%s(): server weight less than zero\n", __func__); |
1001 | return -ERANGE; | 1004 | return -ERANGE; |
1002 | } | 1005 | } |
1003 | 1006 | ||
1004 | if (udest->l_threshold > udest->u_threshold) { | 1007 | if (udest->l_threshold > udest->u_threshold) { |
1005 | IP_VS_ERR("ip_vs_edit_dest(): lower threshold is higher than " | 1008 | pr_err("%s(): lower threshold is higher than upper threshold\n", |
1006 | "upper threshold\n"); | 1009 | __func__); |
1007 | return -ERANGE; | 1010 | return -ERANGE; |
1008 | } | 1011 | } |
1009 | 1012 | ||
@@ -1015,7 +1018,7 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) | |||
1015 | dest = ip_vs_lookup_dest(svc, &daddr, dport); | 1018 | dest = ip_vs_lookup_dest(svc, &daddr, dport); |
1016 | 1019 | ||
1017 | if (dest == NULL) { | 1020 | if (dest == NULL) { |
1018 | IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n"); | 1021 | IP_VS_DBG(1, "%s(): dest doesn't exist\n", __func__); |
1019 | return -ENOENT; | 1022 | return -ENOENT; |
1020 | } | 1023 | } |
1021 | 1024 | ||
@@ -1115,7 +1118,7 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) | |||
1115 | dest = ip_vs_lookup_dest(svc, &udest->addr, dport); | 1118 | dest = ip_vs_lookup_dest(svc, &udest->addr, dport); |
1116 | 1119 | ||
1117 | if (dest == NULL) { | 1120 | if (dest == NULL) { |
1118 | IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n"); | 1121 | IP_VS_DBG(1, "%s(): destination not found!\n", __func__); |
1119 | return -ENOENT; | 1122 | return -ENOENT; |
1120 | } | 1123 | } |
1121 | 1124 | ||
@@ -1161,8 +1164,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u, | |||
1161 | /* Lookup the scheduler by 'u->sched_name' */ | 1164 | /* Lookup the scheduler by 'u->sched_name' */ |
1162 | sched = ip_vs_scheduler_get(u->sched_name); | 1165 | sched = ip_vs_scheduler_get(u->sched_name); |
1163 | if (sched == NULL) { | 1166 | if (sched == NULL) { |
1164 | IP_VS_INFO("Scheduler module ip_vs_%s not found\n", | 1167 | pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); |
1165 | u->sched_name); | ||
1166 | ret = -ENOENT; | 1168 | ret = -ENOENT; |
1167 | goto out_mod_dec; | 1169 | goto out_mod_dec; |
1168 | } | 1170 | } |
@@ -1176,7 +1178,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u, | |||
1176 | 1178 | ||
1177 | svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); | 1179 | svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); |
1178 | if (svc == NULL) { | 1180 | if (svc == NULL) { |
1179 | IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); | 1181 | IP_VS_DBG(1, "%s(): no memory\n", __func__); |
1180 | ret = -ENOMEM; | 1182 | ret = -ENOMEM; |
1181 | goto out_err; | 1183 | goto out_err; |
1182 | } | 1184 | } |
@@ -1259,8 +1261,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) | |||
1259 | */ | 1261 | */ |
1260 | sched = ip_vs_scheduler_get(u->sched_name); | 1262 | sched = ip_vs_scheduler_get(u->sched_name); |
1261 | if (sched == NULL) { | 1263 | if (sched == NULL) { |
1262 | IP_VS_INFO("Scheduler module ip_vs_%s not found\n", | 1264 | pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); |
1263 | u->sched_name); | ||
1264 | return -ENOENT; | 1265 | return -ENOENT; |
1265 | } | 1266 | } |
1266 | old_sched = sched; | 1267 | old_sched = sched; |
@@ -2077,8 +2078,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
2077 | return -EPERM; | 2078 | return -EPERM; |
2078 | 2079 | ||
2079 | if (len != set_arglen[SET_CMDID(cmd)]) { | 2080 | if (len != set_arglen[SET_CMDID(cmd)]) { |
2080 | IP_VS_ERR("set_ctl: len %u != %u\n", | 2081 | pr_err("set_ctl: len %u != %u\n", |
2081 | len, set_arglen[SET_CMDID(cmd)]); | 2082 | len, set_arglen[SET_CMDID(cmd)]); |
2082 | return -EINVAL; | 2083 | return -EINVAL; |
2083 | } | 2084 | } |
2084 | 2085 | ||
@@ -2129,9 +2130,9 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
2129 | 2130 | ||
2130 | /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ | 2131 | /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ |
2131 | if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) { | 2132 | if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) { |
2132 | IP_VS_ERR("set_ctl: invalid protocol: %d %pI4:%d %s\n", | 2133 | pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", |
2133 | usvc.protocol, &usvc.addr.ip, | 2134 | usvc.protocol, &usvc.addr.ip, |
2134 | ntohs(usvc.port), usvc.sched_name); | 2135 | ntohs(usvc.port), usvc.sched_name); |
2135 | ret = -EFAULT; | 2136 | ret = -EFAULT; |
2136 | goto out_unlock; | 2137 | goto out_unlock; |
2137 | } | 2138 | } |
@@ -2356,8 +2357,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2356 | return -EPERM; | 2357 | return -EPERM; |
2357 | 2358 | ||
2358 | if (*len < get_arglen[GET_CMDID(cmd)]) { | 2359 | if (*len < get_arglen[GET_CMDID(cmd)]) { |
2359 | IP_VS_ERR("get_ctl: len %u < %u\n", | 2360 | pr_err("get_ctl: len %u < %u\n", |
2360 | *len, get_arglen[GET_CMDID(cmd)]); | 2361 | *len, get_arglen[GET_CMDID(cmd)]); |
2361 | return -EINVAL; | 2362 | return -EINVAL; |
2362 | } | 2363 | } |
2363 | 2364 | ||
@@ -2402,7 +2403,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2402 | size = sizeof(*get) + | 2403 | size = sizeof(*get) + |
2403 | sizeof(struct ip_vs_service_entry) * get->num_services; | 2404 | sizeof(struct ip_vs_service_entry) * get->num_services; |
2404 | if (*len != size) { | 2405 | if (*len != size) { |
2405 | IP_VS_ERR("length: %u != %u\n", *len, size); | 2406 | pr_err("length: %u != %u\n", *len, size); |
2406 | ret = -EINVAL; | 2407 | ret = -EINVAL; |
2407 | goto out; | 2408 | goto out; |
2408 | } | 2409 | } |
@@ -2442,7 +2443,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2442 | size = sizeof(*get) + | 2443 | size = sizeof(*get) + |
2443 | sizeof(struct ip_vs_dest_entry) * get->num_dests; | 2444 | sizeof(struct ip_vs_dest_entry) * get->num_dests; |
2444 | if (*len != size) { | 2445 | if (*len != size) { |
2445 | IP_VS_ERR("length: %u != %u\n", *len, size); | 2446 | pr_err("length: %u != %u\n", *len, size); |
2446 | ret = -EINVAL; | 2447 | ret = -EINVAL; |
2447 | goto out; | 2448 | goto out; |
2448 | } | 2449 | } |
@@ -3170,7 +3171,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) | |||
3170 | else if (cmd == IPVS_CMD_GET_CONFIG) | 3171 | else if (cmd == IPVS_CMD_GET_CONFIG) |
3171 | reply_cmd = IPVS_CMD_SET_CONFIG; | 3172 | reply_cmd = IPVS_CMD_SET_CONFIG; |
3172 | else { | 3173 | else { |
3173 | IP_VS_ERR("unknown Generic Netlink command\n"); | 3174 | pr_err("unknown Generic Netlink command\n"); |
3174 | return -EINVAL; | 3175 | return -EINVAL; |
3175 | } | 3176 | } |
3176 | 3177 | ||
@@ -3231,11 +3232,11 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info) | |||
3231 | } | 3232 | } |
3232 | 3233 | ||
3233 | genlmsg_end(msg, reply); | 3234 | genlmsg_end(msg, reply); |
3234 | ret = genlmsg_unicast(msg, info->snd_pid); | 3235 | ret = genlmsg_reply(msg, info); |
3235 | goto out; | 3236 | goto out; |
3236 | 3237 | ||
3237 | nla_put_failure: | 3238 | nla_put_failure: |
3238 | IP_VS_ERR("not enough space in Netlink message\n"); | 3239 | pr_err("not enough space in Netlink message\n"); |
3239 | ret = -EMSGSIZE; | 3240 | ret = -EMSGSIZE; |
3240 | 3241 | ||
3241 | out_err: | 3242 | out_err: |
@@ -3366,13 +3367,13 @@ int __init ip_vs_control_init(void) | |||
3366 | 3367 | ||
3367 | ret = nf_register_sockopt(&ip_vs_sockopts); | 3368 | ret = nf_register_sockopt(&ip_vs_sockopts); |
3368 | if (ret) { | 3369 | if (ret) { |
3369 | IP_VS_ERR("cannot register sockopt.\n"); | 3370 | pr_err("cannot register sockopt.\n"); |
3370 | return ret; | 3371 | return ret; |
3371 | } | 3372 | } |
3372 | 3373 | ||
3373 | ret = ip_vs_genl_register(); | 3374 | ret = ip_vs_genl_register(); |
3374 | if (ret) { | 3375 | if (ret) { |
3375 | IP_VS_ERR("cannot register Generic Netlink interface.\n"); | 3376 | pr_err("cannot register Generic Netlink interface.\n"); |
3376 | nf_unregister_sockopt(&ip_vs_sockopts); | 3377 | nf_unregister_sockopt(&ip_vs_sockopts); |
3377 | return ret; | 3378 | return ret; |
3378 | } | 3379 | } |
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c index a9dac74bb13f..fe3e18834b91 100644 --- a/net/netfilter/ipvs/ip_vs_dh.c +++ b/net/netfilter/ipvs/ip_vs_dh.c | |||
@@ -35,6 +35,9 @@ | |||
35 | * | 35 | * |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #define KMSG_COMPONENT "IPVS" | ||
39 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
40 | |||
38 | #include <linux/ip.h> | 41 | #include <linux/ip.h> |
39 | #include <linux/module.h> | 42 | #include <linux/module.h> |
40 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
@@ -147,7 +150,7 @@ static int ip_vs_dh_init_svc(struct ip_vs_service *svc) | |||
147 | tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE, | 150 | tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE, |
148 | GFP_ATOMIC); | 151 | GFP_ATOMIC); |
149 | if (tbl == NULL) { | 152 | if (tbl == NULL) { |
150 | IP_VS_ERR("ip_vs_dh_init_svc(): no memory\n"); | 153 | pr_err("%s(): no memory\n", __func__); |
151 | return -ENOMEM; | 154 | return -ENOMEM; |
152 | } | 155 | } |
153 | svc->sched_data = tbl; | 156 | svc->sched_data = tbl; |
@@ -214,7 +217,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
214 | 217 | ||
215 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | 218 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); |
216 | 219 | ||
217 | IP_VS_DBG(6, "ip_vs_dh_schedule(): Scheduling...\n"); | 220 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
218 | 221 | ||
219 | tbl = (struct ip_vs_dh_bucket *)svc->sched_data; | 222 | tbl = (struct ip_vs_dh_bucket *)svc->sched_data; |
220 | dest = ip_vs_dh_get(svc->af, tbl, &iph.daddr); | 223 | dest = ip_vs_dh_get(svc->af, tbl, &iph.daddr); |
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c index 2eb2860dabb5..702b53ca937c 100644 --- a/net/netfilter/ipvs/ip_vs_est.c +++ b/net/netfilter/ipvs/ip_vs_est.c | |||
@@ -11,6 +11,10 @@ | |||
11 | * Changes: | 11 | * Changes: |
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | |||
15 | #define KMSG_COMPONENT "IPVS" | ||
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
17 | |||
14 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
15 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
16 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 428edbf481cc..33e2c799cba7 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c | |||
@@ -22,6 +22,9 @@ | |||
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #define KMSG_COMPONENT "IPVS" | ||
26 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
27 | |||
25 | #include <linux/module.h> | 28 | #include <linux/module.h> |
26 | #include <linux/moduleparam.h> | 29 | #include <linux/moduleparam.h> |
27 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
@@ -382,8 +385,8 @@ static int __init ip_vs_ftp_init(void) | |||
382 | ret = register_ip_vs_app_inc(app, app->protocol, ports[i]); | 385 | ret = register_ip_vs_app_inc(app, app->protocol, ports[i]); |
383 | if (ret) | 386 | if (ret) |
384 | break; | 387 | break; |
385 | IP_VS_INFO("%s: loaded support on port[%d] = %d\n", | 388 | pr_info("%s: loaded support on port[%d] = %d\n", |
386 | app->name, i, ports[i]); | 389 | app->name, i, ports[i]); |
387 | } | 390 | } |
388 | 391 | ||
389 | if (ret) | 392 | if (ret) |
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 3eb5e2660c49..c1757f3620cd 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c | |||
@@ -39,6 +39,9 @@ | |||
39 | * me to write this module. | 39 | * me to write this module. |
40 | */ | 40 | */ |
41 | 41 | ||
42 | #define KMSG_COMPONENT "IPVS" | ||
43 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
44 | |||
42 | #include <linux/ip.h> | 45 | #include <linux/ip.h> |
43 | #include <linux/module.h> | 46 | #include <linux/module.h> |
44 | #include <linux/kernel.h> | 47 | #include <linux/kernel.h> |
@@ -199,7 +202,7 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, | |||
199 | if (!en) { | 202 | if (!en) { |
200 | en = kmalloc(sizeof(*en), GFP_ATOMIC); | 203 | en = kmalloc(sizeof(*en), GFP_ATOMIC); |
201 | if (!en) { | 204 | if (!en) { |
202 | IP_VS_ERR("ip_vs_lblc_new(): no memory\n"); | 205 | pr_err("%s(): no memory\n", __func__); |
203 | return NULL; | 206 | return NULL; |
204 | } | 207 | } |
205 | 208 | ||
@@ -332,7 +335,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc) | |||
332 | */ | 335 | */ |
333 | tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); | 336 | tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); |
334 | if (tbl == NULL) { | 337 | if (tbl == NULL) { |
335 | IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n"); | 338 | pr_err("%s(): no memory\n", __func__); |
336 | return -ENOMEM; | 339 | return -ENOMEM; |
337 | } | 340 | } |
338 | svc->sched_data = tbl; | 341 | svc->sched_data = tbl; |
@@ -477,7 +480,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
477 | 480 | ||
478 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | 481 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); |
479 | 482 | ||
480 | IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); | 483 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
481 | 484 | ||
482 | /* First look in our cache */ | 485 | /* First look in our cache */ |
483 | read_lock(&svc->sched_lock); | 486 | read_lock(&svc->sched_lock); |
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index c04ce56c7f0f..715b57f9540d 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c | |||
@@ -37,6 +37,9 @@ | |||
37 | * | 37 | * |
38 | */ | 38 | */ |
39 | 39 | ||
40 | #define KMSG_COMPONENT "IPVS" | ||
41 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
42 | |||
40 | #include <linux/ip.h> | 43 | #include <linux/ip.h> |
41 | #include <linux/module.h> | 44 | #include <linux/module.h> |
42 | #include <linux/kernel.h> | 45 | #include <linux/kernel.h> |
@@ -108,7 +111,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) | |||
108 | 111 | ||
109 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | 112 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
110 | if (e == NULL) { | 113 | if (e == NULL) { |
111 | IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n"); | 114 | pr_err("%s(): no memory\n", __func__); |
112 | return NULL; | 115 | return NULL; |
113 | } | 116 | } |
114 | 117 | ||
@@ -202,8 +205,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
202 | } | 205 | } |
203 | } | 206 | } |
204 | 207 | ||
205 | IP_VS_DBG_BUF(6, "ip_vs_dest_set_min: server %s:%d " | 208 | IP_VS_DBG_BUF(6, "%s(): server %s:%d " |
206 | "activeconns %d refcnt %d weight %d overhead %d\n", | 209 | "activeconns %d refcnt %d weight %d overhead %d\n", |
210 | __func__, | ||
207 | IP_VS_DBG_ADDR(least->af, &least->addr), | 211 | IP_VS_DBG_ADDR(least->af, &least->addr), |
208 | ntohs(least->port), | 212 | ntohs(least->port), |
209 | atomic_read(&least->activeconns), | 213 | atomic_read(&least->activeconns), |
@@ -249,8 +253,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
249 | } | 253 | } |
250 | } | 254 | } |
251 | 255 | ||
252 | IP_VS_DBG_BUF(6, "ip_vs_dest_set_max: server %s:%d " | 256 | IP_VS_DBG_BUF(6, "%s(): server %s:%d " |
253 | "activeconns %d refcnt %d weight %d overhead %d\n", | 257 | "activeconns %d refcnt %d weight %d overhead %d\n", |
258 | __func__, | ||
254 | IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port), | 259 | IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port), |
255 | atomic_read(&most->activeconns), | 260 | atomic_read(&most->activeconns), |
256 | atomic_read(&most->refcnt), | 261 | atomic_read(&most->refcnt), |
@@ -374,7 +379,7 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, | |||
374 | if (!en) { | 379 | if (!en) { |
375 | en = kmalloc(sizeof(*en), GFP_ATOMIC); | 380 | en = kmalloc(sizeof(*en), GFP_ATOMIC); |
376 | if (!en) { | 381 | if (!en) { |
377 | IP_VS_ERR("ip_vs_lblcr_new(): no memory\n"); | 382 | pr_err("%s(): no memory\n", __func__); |
378 | return NULL; | 383 | return NULL; |
379 | } | 384 | } |
380 | 385 | ||
@@ -508,7 +513,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) | |||
508 | */ | 513 | */ |
509 | tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); | 514 | tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); |
510 | if (tbl == NULL) { | 515 | if (tbl == NULL) { |
511 | IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n"); | 516 | pr_err("%s(): no memory\n", __func__); |
512 | return -ENOMEM; | 517 | return -ENOMEM; |
513 | } | 518 | } |
514 | svc->sched_data = tbl; | 519 | svc->sched_data = tbl; |
@@ -654,7 +659,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
654 | 659 | ||
655 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | 660 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); |
656 | 661 | ||
657 | IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n"); | 662 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
658 | 663 | ||
659 | /* First look in our cache */ | 664 | /* First look in our cache */ |
660 | read_lock(&svc->sched_lock); | 665 | read_lock(&svc->sched_lock); |
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c index d0dadc8a65fd..4f69db1fac56 100644 --- a/net/netfilter/ipvs/ip_vs_lc.c +++ b/net/netfilter/ipvs/ip_vs_lc.c | |||
@@ -14,6 +14,9 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define KMSG_COMPONENT "IPVS" | ||
18 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
19 | |||
17 | #include <linux/module.h> | 20 | #include <linux/module.h> |
18 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
19 | 22 | ||
@@ -44,7 +47,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
44 | struct ip_vs_dest *dest, *least = NULL; | 47 | struct ip_vs_dest *dest, *least = NULL; |
45 | unsigned int loh = 0, doh; | 48 | unsigned int loh = 0, doh; |
46 | 49 | ||
47 | IP_VS_DBG(6, "ip_vs_lc_schedule(): Scheduling...\n"); | 50 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
48 | 51 | ||
49 | /* | 52 | /* |
50 | * Simply select the server with the least number of | 53 | * Simply select the server with the least number of |
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c index 694952db5026..c413e1830823 100644 --- a/net/netfilter/ipvs/ip_vs_nq.c +++ b/net/netfilter/ipvs/ip_vs_nq.c | |||
@@ -31,6 +31,9 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define KMSG_COMPONENT "IPVS" | ||
35 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
36 | |||
34 | #include <linux/module.h> | 37 | #include <linux/module.h> |
35 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
36 | 39 | ||
@@ -57,7 +60,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
57 | struct ip_vs_dest *dest, *least = NULL; | 60 | struct ip_vs_dest *dest, *least = NULL; |
58 | unsigned int loh = 0, doh; | 61 | unsigned int loh = 0, doh; |
59 | 62 | ||
60 | IP_VS_DBG(6, "ip_vs_nq_schedule(): Scheduling...\n"); | 63 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
61 | 64 | ||
62 | /* | 65 | /* |
63 | * We calculate the load of each dest server as follows: | 66 | * We calculate the load of each dest server as follows: |
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index a01520e3d6b8..3e7671674549 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c | |||
@@ -13,6 +13,9 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define KMSG_COMPONENT "IPVS" | ||
17 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
18 | |||
16 | #include <linux/module.h> | 19 | #include <linux/module.h> |
17 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
18 | #include <linux/skbuff.h> | 21 | #include <linux/skbuff.h> |
@@ -124,7 +127,8 @@ ip_vs_create_timeout_table(int *table, int size) | |||
124 | * Set timeout value for state specified by name | 127 | * Set timeout value for state specified by name |
125 | */ | 128 | */ |
126 | int | 129 | int |
127 | ip_vs_set_state_timeout(int *table, int num, char **names, char *name, int to) | 130 | ip_vs_set_state_timeout(int *table, int num, const char *const *names, |
131 | const char *name, int to) | ||
128 | { | 132 | { |
129 | int i; | 133 | int i; |
130 | 134 | ||
@@ -181,7 +185,7 @@ ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp, | |||
181 | &ih->daddr, ntohs(pptr[1])); | 185 | &ih->daddr, ntohs(pptr[1])); |
182 | } | 186 | } |
183 | 187 | ||
184 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | 188 | pr_debug("%s: %s\n", msg, buf); |
185 | } | 189 | } |
186 | 190 | ||
187 | #ifdef CONFIG_IP_VS_IPV6 | 191 | #ifdef CONFIG_IP_VS_IPV6 |
@@ -215,7 +219,7 @@ ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, | |||
215 | &ih->daddr, ntohs(pptr[1])); | 219 | &ih->daddr, ntohs(pptr[1])); |
216 | } | 220 | } |
217 | 221 | ||
218 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | 222 | pr_debug("%s: %s\n", msg, buf); |
219 | } | 223 | } |
220 | #endif | 224 | #endif |
221 | 225 | ||
@@ -259,7 +263,7 @@ int __init ip_vs_protocol_init(void) | |||
259 | #ifdef CONFIG_IP_VS_PROTO_ESP | 263 | #ifdef CONFIG_IP_VS_PROTO_ESP |
260 | REGISTER_PROTOCOL(&ip_vs_protocol_esp); | 264 | REGISTER_PROTOCOL(&ip_vs_protocol_esp); |
261 | #endif | 265 | #endif |
262 | IP_VS_INFO("Registered protocols (%s)\n", &protocols[2]); | 266 | pr_info("Registered protocols (%s)\n", &protocols[2]); |
263 | 267 | ||
264 | return 0; | 268 | return 0; |
265 | } | 269 | } |
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c index 79f56c1e7c19..c30b43c36cd7 100644 --- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c +++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c | |||
@@ -10,6 +10,9 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define KMSG_COMPONENT "IPVS" | ||
14 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
15 | |||
13 | #include <linux/in.h> | 16 | #include <linux/in.h> |
14 | #include <linux/ip.h> | 17 | #include <linux/ip.h> |
15 | #include <linux/module.h> | 18 | #include <linux/module.h> |
@@ -138,7 +141,7 @@ ah_esp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb, | |||
138 | sprintf(buf, "%s %pI4->%pI4", | 141 | sprintf(buf, "%s %pI4->%pI4", |
139 | pp->name, &ih->saddr, &ih->daddr); | 142 | pp->name, &ih->saddr, &ih->daddr); |
140 | 143 | ||
141 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | 144 | pr_debug("%s: %s\n", msg, buf); |
142 | } | 145 | } |
143 | 146 | ||
144 | #ifdef CONFIG_IP_VS_IPV6 | 147 | #ifdef CONFIG_IP_VS_IPV6 |
@@ -156,7 +159,7 @@ ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb, | |||
156 | sprintf(buf, "%s %pI6->%pI6", | 159 | sprintf(buf, "%s %pI6->%pI6", |
157 | pp->name, &ih->saddr, &ih->daddr); | 160 | pp->name, &ih->saddr, &ih->daddr); |
158 | 161 | ||
159 | printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); | 162 | pr_debug("%s: %s\n", msg, buf); |
160 | } | 163 | } |
161 | #endif | 164 | #endif |
162 | 165 | ||
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index 8cba41802850..91d28e073742 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c | |||
@@ -13,6 +13,9 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define KMSG_COMPONENT "IPVS" | ||
17 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
18 | |||
16 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
17 | #include <linux/ip.h> | 20 | #include <linux/ip.h> |
18 | #include <linux/tcp.h> /* for tcphdr */ | 21 | #include <linux/tcp.h> /* for tcphdr */ |
@@ -374,7 +377,7 @@ static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = { | |||
374 | [IP_VS_TCP_S_LAST] = 2*HZ, | 377 | [IP_VS_TCP_S_LAST] = 2*HZ, |
375 | }; | 378 | }; |
376 | 379 | ||
377 | static char * tcp_state_name_table[IP_VS_TCP_S_LAST+1] = { | 380 | static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = { |
378 | [IP_VS_TCP_S_NONE] = "NONE", | 381 | [IP_VS_TCP_S_NONE] = "NONE", |
379 | [IP_VS_TCP_S_ESTABLISHED] = "ESTABLISHED", | 382 | [IP_VS_TCP_S_ESTABLISHED] = "ESTABLISHED", |
380 | [IP_VS_TCP_S_SYN_SENT] = "SYN_SENT", | 383 | [IP_VS_TCP_S_SYN_SENT] = "SYN_SENT", |
@@ -661,7 +664,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp) | |||
661 | break; | 664 | break; |
662 | spin_unlock(&tcp_app_lock); | 665 | spin_unlock(&tcp_app_lock); |
663 | 666 | ||
664 | IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->" | 667 | IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" |
665 | "%s:%u to app %s on port %u\n", | 668 | "%s:%u to app %s on port %u\n", |
666 | __func__, | 669 | __func__, |
667 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), | 670 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), |
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index d2930a71084b..e7a6885e0167 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c | |||
@@ -13,6 +13,9 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define KMSG_COMPONENT "IPVS" | ||
17 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
18 | |||
16 | #include <linux/in.h> | 19 | #include <linux/in.h> |
17 | #include <linux/ip.h> | 20 | #include <linux/ip.h> |
18 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -442,7 +445,7 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp) | |||
442 | break; | 445 | break; |
443 | spin_unlock(&udp_app_lock); | 446 | spin_unlock(&udp_app_lock); |
444 | 447 | ||
445 | IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->" | 448 | IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" |
446 | "%s:%u to app %s on port %u\n", | 449 | "%s:%u to app %s on port %u\n", |
447 | __func__, | 450 | __func__, |
448 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), | 451 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), |
@@ -469,7 +472,7 @@ static int udp_timeouts[IP_VS_UDP_S_LAST+1] = { | |||
469 | [IP_VS_UDP_S_LAST] = 2*HZ, | 472 | [IP_VS_UDP_S_LAST] = 2*HZ, |
470 | }; | 473 | }; |
471 | 474 | ||
472 | static char * udp_state_name_table[IP_VS_UDP_S_LAST+1] = { | 475 | static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = { |
473 | [IP_VS_UDP_S_NORMAL] = "UDP", | 476 | [IP_VS_UDP_S_NORMAL] = "UDP", |
474 | [IP_VS_UDP_S_LAST] = "BUG!", | 477 | [IP_VS_UDP_S_LAST] = "BUG!", |
475 | }; | 478 | }; |
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c index 2d16ab7f8c1e..e210f37d8ea2 100644 --- a/net/netfilter/ipvs/ip_vs_rr.c +++ b/net/netfilter/ipvs/ip_vs_rr.c | |||
@@ -19,6 +19,9 @@ | |||
19 | * | 19 | * |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #define KMSG_COMPONENT "IPVS" | ||
23 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
24 | |||
22 | #include <linux/module.h> | 25 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
24 | 27 | ||
@@ -48,7 +51,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
48 | struct list_head *p, *q; | 51 | struct list_head *p, *q; |
49 | struct ip_vs_dest *dest; | 52 | struct ip_vs_dest *dest; |
50 | 53 | ||
51 | IP_VS_DBG(6, "ip_vs_rr_schedule(): Scheduling...\n"); | 54 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
52 | 55 | ||
53 | write_lock(&svc->sched_lock); | 56 | write_lock(&svc->sched_lock); |
54 | p = (struct list_head *)svc->sched_data; | 57 | p = (struct list_head *)svc->sched_data; |
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c index a46ad9e35016..bbc1ac795952 100644 --- a/net/netfilter/ipvs/ip_vs_sched.c +++ b/net/netfilter/ipvs/ip_vs_sched.c | |||
@@ -17,6 +17,9 @@ | |||
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #define KMSG_COMPONENT "IPVS" | ||
21 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
22 | |||
20 | #include <linux/module.h> | 23 | #include <linux/module.h> |
21 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
22 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
@@ -44,11 +47,11 @@ int ip_vs_bind_scheduler(struct ip_vs_service *svc, | |||
44 | int ret; | 47 | int ret; |
45 | 48 | ||
46 | if (svc == NULL) { | 49 | if (svc == NULL) { |
47 | IP_VS_ERR("ip_vs_bind_scheduler(): svc arg NULL\n"); | 50 | pr_err("%s(): svc arg NULL\n", __func__); |
48 | return -EINVAL; | 51 | return -EINVAL; |
49 | } | 52 | } |
50 | if (scheduler == NULL) { | 53 | if (scheduler == NULL) { |
51 | IP_VS_ERR("ip_vs_bind_scheduler(): scheduler arg NULL\n"); | 54 | pr_err("%s(): scheduler arg NULL\n", __func__); |
52 | return -EINVAL; | 55 | return -EINVAL; |
53 | } | 56 | } |
54 | 57 | ||
@@ -57,7 +60,7 @@ int ip_vs_bind_scheduler(struct ip_vs_service *svc, | |||
57 | if (scheduler->init_service) { | 60 | if (scheduler->init_service) { |
58 | ret = scheduler->init_service(svc); | 61 | ret = scheduler->init_service(svc); |
59 | if (ret) { | 62 | if (ret) { |
60 | IP_VS_ERR("ip_vs_bind_scheduler(): init error\n"); | 63 | pr_err("%s(): init error\n", __func__); |
61 | return ret; | 64 | return ret; |
62 | } | 65 | } |
63 | } | 66 | } |
@@ -74,19 +77,19 @@ int ip_vs_unbind_scheduler(struct ip_vs_service *svc) | |||
74 | struct ip_vs_scheduler *sched; | 77 | struct ip_vs_scheduler *sched; |
75 | 78 | ||
76 | if (svc == NULL) { | 79 | if (svc == NULL) { |
77 | IP_VS_ERR("ip_vs_unbind_scheduler(): svc arg NULL\n"); | 80 | pr_err("%s(): svc arg NULL\n", __func__); |
78 | return -EINVAL; | 81 | return -EINVAL; |
79 | } | 82 | } |
80 | 83 | ||
81 | sched = svc->scheduler; | 84 | sched = svc->scheduler; |
82 | if (sched == NULL) { | 85 | if (sched == NULL) { |
83 | IP_VS_ERR("ip_vs_unbind_scheduler(): svc isn't bound\n"); | 86 | pr_err("%s(): svc isn't bound\n", __func__); |
84 | return -EINVAL; | 87 | return -EINVAL; |
85 | } | 88 | } |
86 | 89 | ||
87 | if (sched->done_service) { | 90 | if (sched->done_service) { |
88 | if (sched->done_service(svc) != 0) { | 91 | if (sched->done_service(svc) != 0) { |
89 | IP_VS_ERR("ip_vs_unbind_scheduler(): done error\n"); | 92 | pr_err("%s(): done error\n", __func__); |
90 | return -EINVAL; | 93 | return -EINVAL; |
91 | } | 94 | } |
92 | } | 95 | } |
@@ -103,8 +106,7 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name) | |||
103 | { | 106 | { |
104 | struct ip_vs_scheduler *sched; | 107 | struct ip_vs_scheduler *sched; |
105 | 108 | ||
106 | IP_VS_DBG(2, "ip_vs_sched_getbyname(): sched_name \"%s\"\n", | 109 | IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name); |
107 | sched_name); | ||
108 | 110 | ||
109 | read_lock_bh(&__ip_vs_sched_lock); | 111 | read_lock_bh(&__ip_vs_sched_lock); |
110 | 112 | ||
@@ -170,12 +172,12 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) | |||
170 | struct ip_vs_scheduler *sched; | 172 | struct ip_vs_scheduler *sched; |
171 | 173 | ||
172 | if (!scheduler) { | 174 | if (!scheduler) { |
173 | IP_VS_ERR("register_ip_vs_scheduler(): NULL arg\n"); | 175 | pr_err("%s(): NULL arg\n", __func__); |
174 | return -EINVAL; | 176 | return -EINVAL; |
175 | } | 177 | } |
176 | 178 | ||
177 | if (!scheduler->name) { | 179 | if (!scheduler->name) { |
178 | IP_VS_ERR("register_ip_vs_scheduler(): NULL scheduler_name\n"); | 180 | pr_err("%s(): NULL scheduler_name\n", __func__); |
179 | return -EINVAL; | 181 | return -EINVAL; |
180 | } | 182 | } |
181 | 183 | ||
@@ -187,8 +189,8 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) | |||
187 | if (!list_empty(&scheduler->n_list)) { | 189 | if (!list_empty(&scheduler->n_list)) { |
188 | write_unlock_bh(&__ip_vs_sched_lock); | 190 | write_unlock_bh(&__ip_vs_sched_lock); |
189 | ip_vs_use_count_dec(); | 191 | ip_vs_use_count_dec(); |
190 | IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " | 192 | pr_err("%s(): [%s] scheduler already linked\n", |
191 | "already linked\n", scheduler->name); | 193 | __func__, scheduler->name); |
192 | return -EINVAL; | 194 | return -EINVAL; |
193 | } | 195 | } |
194 | 196 | ||
@@ -200,9 +202,8 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) | |||
200 | if (strcmp(scheduler->name, sched->name) == 0) { | 202 | if (strcmp(scheduler->name, sched->name) == 0) { |
201 | write_unlock_bh(&__ip_vs_sched_lock); | 203 | write_unlock_bh(&__ip_vs_sched_lock); |
202 | ip_vs_use_count_dec(); | 204 | ip_vs_use_count_dec(); |
203 | IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " | 205 | pr_err("%s(): [%s] scheduler already existed " |
204 | "already existed in the system\n", | 206 | "in the system\n", __func__, scheduler->name); |
205 | scheduler->name); | ||
206 | return -EINVAL; | 207 | return -EINVAL; |
207 | } | 208 | } |
208 | } | 209 | } |
@@ -212,7 +213,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) | |||
212 | list_add(&scheduler->n_list, &ip_vs_schedulers); | 213 | list_add(&scheduler->n_list, &ip_vs_schedulers); |
213 | write_unlock_bh(&__ip_vs_sched_lock); | 214 | write_unlock_bh(&__ip_vs_sched_lock); |
214 | 215 | ||
215 | IP_VS_INFO("[%s] scheduler registered.\n", scheduler->name); | 216 | pr_info("[%s] scheduler registered.\n", scheduler->name); |
216 | 217 | ||
217 | return 0; | 218 | return 0; |
218 | } | 219 | } |
@@ -224,15 +225,15 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) | |||
224 | int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) | 225 | int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) |
225 | { | 226 | { |
226 | if (!scheduler) { | 227 | if (!scheduler) { |
227 | IP_VS_ERR( "unregister_ip_vs_scheduler(): NULL arg\n"); | 228 | pr_err("%s(): NULL arg\n", __func__); |
228 | return -EINVAL; | 229 | return -EINVAL; |
229 | } | 230 | } |
230 | 231 | ||
231 | write_lock_bh(&__ip_vs_sched_lock); | 232 | write_lock_bh(&__ip_vs_sched_lock); |
232 | if (list_empty(&scheduler->n_list)) { | 233 | if (list_empty(&scheduler->n_list)) { |
233 | write_unlock_bh(&__ip_vs_sched_lock); | 234 | write_unlock_bh(&__ip_vs_sched_lock); |
234 | IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler " | 235 | pr_err("%s(): [%s] scheduler is not in the list. failed\n", |
235 | "is not in the list. failed\n", scheduler->name); | 236 | __func__, scheduler->name); |
236 | return -EINVAL; | 237 | return -EINVAL; |
237 | } | 238 | } |
238 | 239 | ||
@@ -245,7 +246,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) | |||
245 | /* decrease the module use count */ | 246 | /* decrease the module use count */ |
246 | ip_vs_use_count_dec(); | 247 | ip_vs_use_count_dec(); |
247 | 248 | ||
248 | IP_VS_INFO("[%s] scheduler unregistered.\n", scheduler->name); | 249 | pr_info("[%s] scheduler unregistered.\n", scheduler->name); |
249 | 250 | ||
250 | return 0; | 251 | return 0; |
251 | } | 252 | } |
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c index 20e4657d2f3b..1ab75a9dc400 100644 --- a/net/netfilter/ipvs/ip_vs_sed.c +++ b/net/netfilter/ipvs/ip_vs_sed.c | |||
@@ -35,6 +35,9 @@ | |||
35 | * | 35 | * |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #define KMSG_COMPONENT "IPVS" | ||
39 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
40 | |||
38 | #include <linux/module.h> | 41 | #include <linux/module.h> |
39 | #include <linux/kernel.h> | 42 | #include <linux/kernel.h> |
40 | 43 | ||
@@ -61,7 +64,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
61 | struct ip_vs_dest *dest, *least; | 64 | struct ip_vs_dest *dest, *least; |
62 | unsigned int loh, doh; | 65 | unsigned int loh, doh; |
63 | 66 | ||
64 | IP_VS_DBG(6, "ip_vs_sed_schedule(): Scheduling...\n"); | 67 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
65 | 68 | ||
66 | /* | 69 | /* |
67 | * We calculate the load of each dest server as follows: | 70 | * We calculate the load of each dest server as follows: |
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index 75709ebeb630..8e6cfd36e6f0 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c | |||
@@ -32,6 +32,9 @@ | |||
32 | * | 32 | * |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #define KMSG_COMPONENT "IPVS" | ||
36 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
37 | |||
35 | #include <linux/ip.h> | 38 | #include <linux/ip.h> |
36 | #include <linux/module.h> | 39 | #include <linux/module.h> |
37 | #include <linux/kernel.h> | 40 | #include <linux/kernel.h> |
@@ -144,7 +147,7 @@ static int ip_vs_sh_init_svc(struct ip_vs_service *svc) | |||
144 | tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE, | 147 | tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE, |
145 | GFP_ATOMIC); | 148 | GFP_ATOMIC); |
146 | if (tbl == NULL) { | 149 | if (tbl == NULL) { |
147 | IP_VS_ERR("ip_vs_sh_init_svc(): no memory\n"); | 150 | pr_err("%s(): no memory\n", __func__); |
148 | return -ENOMEM; | 151 | return -ENOMEM; |
149 | } | 152 | } |
150 | svc->sched_data = tbl; | 153 | svc->sched_data = tbl; |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 5c48378a852f..e177f0dc2084 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -17,6 +17,9 @@ | |||
17 | * Justin Ossevoort : Fix endian problem on sync message size. | 17 | * Justin Ossevoort : Fix endian problem on sync message size. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #define KMSG_COMPONENT "IPVS" | ||
21 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
22 | |||
20 | #include <linux/module.h> | 23 | #include <linux/module.h> |
21 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
22 | #include <linux/inetdevice.h> | 25 | #include <linux/inetdevice.h> |
@@ -243,7 +246,7 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp) | |||
243 | if (!curr_sb) { | 246 | if (!curr_sb) { |
244 | if (!(curr_sb=ip_vs_sync_buff_create())) { | 247 | if (!(curr_sb=ip_vs_sync_buff_create())) { |
245 | spin_unlock(&curr_sb_lock); | 248 | spin_unlock(&curr_sb_lock); |
246 | IP_VS_ERR("ip_vs_sync_buff_create failed.\n"); | 249 | pr_err("ip_vs_sync_buff_create failed.\n"); |
247 | return; | 250 | return; |
248 | } | 251 | } |
249 | } | 252 | } |
@@ -409,7 +412,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen) | |||
409 | if (dest) | 412 | if (dest) |
410 | atomic_dec(&dest->refcnt); | 413 | atomic_dec(&dest->refcnt); |
411 | if (!cp) { | 414 | if (!cp) { |
412 | IP_VS_ERR("ip_vs_conn_new failed\n"); | 415 | pr_err("ip_vs_conn_new failed\n"); |
413 | return; | 416 | return; |
414 | } | 417 | } |
415 | } else if (!cp->dest) { | 418 | } else if (!cp->dest) { |
@@ -577,8 +580,8 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname) | |||
577 | 580 | ||
578 | addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); | 581 | addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); |
579 | if (!addr) | 582 | if (!addr) |
580 | IP_VS_ERR("You probably need to specify IP address on " | 583 | pr_err("You probably need to specify IP address on " |
581 | "multicast interface.\n"); | 584 | "multicast interface.\n"); |
582 | 585 | ||
583 | IP_VS_DBG(7, "binding socket with (%s) %pI4\n", | 586 | IP_VS_DBG(7, "binding socket with (%s) %pI4\n", |
584 | ifname, &addr); | 587 | ifname, &addr); |
@@ -602,13 +605,13 @@ static struct socket * make_send_sock(void) | |||
602 | /* First create a socket */ | 605 | /* First create a socket */ |
603 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); | 606 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); |
604 | if (result < 0) { | 607 | if (result < 0) { |
605 | IP_VS_ERR("Error during creation of socket; terminating\n"); | 608 | pr_err("Error during creation of socket; terminating\n"); |
606 | return ERR_PTR(result); | 609 | return ERR_PTR(result); |
607 | } | 610 | } |
608 | 611 | ||
609 | result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn); | 612 | result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn); |
610 | if (result < 0) { | 613 | if (result < 0) { |
611 | IP_VS_ERR("Error setting outbound mcast interface\n"); | 614 | pr_err("Error setting outbound mcast interface\n"); |
612 | goto error; | 615 | goto error; |
613 | } | 616 | } |
614 | 617 | ||
@@ -617,14 +620,14 @@ static struct socket * make_send_sock(void) | |||
617 | 620 | ||
618 | result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn); | 621 | result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn); |
619 | if (result < 0) { | 622 | if (result < 0) { |
620 | IP_VS_ERR("Error binding address of the mcast interface\n"); | 623 | pr_err("Error binding address of the mcast interface\n"); |
621 | goto error; | 624 | goto error; |
622 | } | 625 | } |
623 | 626 | ||
624 | result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr, | 627 | result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr, |
625 | sizeof(struct sockaddr), 0); | 628 | sizeof(struct sockaddr), 0); |
626 | if (result < 0) { | 629 | if (result < 0) { |
627 | IP_VS_ERR("Error connecting to the multicast addr\n"); | 630 | pr_err("Error connecting to the multicast addr\n"); |
628 | goto error; | 631 | goto error; |
629 | } | 632 | } |
630 | 633 | ||
@@ -647,7 +650,7 @@ static struct socket * make_receive_sock(void) | |||
647 | /* First create a socket */ | 650 | /* First create a socket */ |
648 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); | 651 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); |
649 | if (result < 0) { | 652 | if (result < 0) { |
650 | IP_VS_ERR("Error during creation of socket; terminating\n"); | 653 | pr_err("Error during creation of socket; terminating\n"); |
651 | return ERR_PTR(result); | 654 | return ERR_PTR(result); |
652 | } | 655 | } |
653 | 656 | ||
@@ -657,7 +660,7 @@ static struct socket * make_receive_sock(void) | |||
657 | result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, | 660 | result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, |
658 | sizeof(struct sockaddr)); | 661 | sizeof(struct sockaddr)); |
659 | if (result < 0) { | 662 | if (result < 0) { |
660 | IP_VS_ERR("Error binding to the multicast addr\n"); | 663 | pr_err("Error binding to the multicast addr\n"); |
661 | goto error; | 664 | goto error; |
662 | } | 665 | } |
663 | 666 | ||
@@ -666,7 +669,7 @@ static struct socket * make_receive_sock(void) | |||
666 | (struct in_addr *) &mcast_addr.sin_addr, | 669 | (struct in_addr *) &mcast_addr.sin_addr, |
667 | ip_vs_backup_mcast_ifn); | 670 | ip_vs_backup_mcast_ifn); |
668 | if (result < 0) { | 671 | if (result < 0) { |
669 | IP_VS_ERR("Error joining to the multicast group\n"); | 672 | pr_err("Error joining to the multicast group\n"); |
670 | goto error; | 673 | goto error; |
671 | } | 674 | } |
672 | 675 | ||
@@ -706,7 +709,7 @@ ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg) | |||
706 | msg->size = htons(msg->size); | 709 | msg->size = htons(msg->size); |
707 | 710 | ||
708 | if (ip_vs_send_async(sock, (char *)msg, msize) != msize) | 711 | if (ip_vs_send_async(sock, (char *)msg, msize) != msize) |
709 | IP_VS_ERR("ip_vs_send_async error\n"); | 712 | pr_err("ip_vs_send_async error\n"); |
710 | } | 713 | } |
711 | 714 | ||
712 | static int | 715 | static int |
@@ -737,9 +740,9 @@ static int sync_thread_master(void *data) | |||
737 | struct ip_vs_sync_thread_data *tinfo = data; | 740 | struct ip_vs_sync_thread_data *tinfo = data; |
738 | struct ip_vs_sync_buff *sb; | 741 | struct ip_vs_sync_buff *sb; |
739 | 742 | ||
740 | IP_VS_INFO("sync thread started: state = MASTER, mcast_ifn = %s, " | 743 | pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " |
741 | "syncid = %d\n", | 744 | "syncid = %d\n", |
742 | ip_vs_master_mcast_ifn, ip_vs_master_syncid); | 745 | ip_vs_master_mcast_ifn, ip_vs_master_syncid); |
743 | 746 | ||
744 | while (!kthread_should_stop()) { | 747 | while (!kthread_should_stop()) { |
745 | while ((sb = sb_dequeue())) { | 748 | while ((sb = sb_dequeue())) { |
@@ -780,9 +783,9 @@ static int sync_thread_backup(void *data) | |||
780 | struct ip_vs_sync_thread_data *tinfo = data; | 783 | struct ip_vs_sync_thread_data *tinfo = data; |
781 | int len; | 784 | int len; |
782 | 785 | ||
783 | IP_VS_INFO("sync thread started: state = BACKUP, mcast_ifn = %s, " | 786 | pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " |
784 | "syncid = %d\n", | 787 | "syncid = %d\n", |
785 | ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); | 788 | ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); |
786 | 789 | ||
787 | while (!kthread_should_stop()) { | 790 | while (!kthread_should_stop()) { |
788 | wait_event_interruptible(*tinfo->sock->sk->sk_sleep, | 791 | wait_event_interruptible(*tinfo->sock->sk->sk_sleep, |
@@ -794,7 +797,7 @@ static int sync_thread_backup(void *data) | |||
794 | len = ip_vs_receive(tinfo->sock, tinfo->buf, | 797 | len = ip_vs_receive(tinfo->sock, tinfo->buf, |
795 | sync_recv_mesg_maxlen); | 798 | sync_recv_mesg_maxlen); |
796 | if (len <= 0) { | 799 | if (len <= 0) { |
797 | IP_VS_ERR("receiving message error\n"); | 800 | pr_err("receiving message error\n"); |
798 | break; | 801 | break; |
799 | } | 802 | } |
800 | 803 | ||
@@ -824,7 +827,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid) | |||
824 | int (*threadfn)(void *data); | 827 | int (*threadfn)(void *data); |
825 | int result = -ENOMEM; | 828 | int result = -ENOMEM; |
826 | 829 | ||
827 | IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current)); | 830 | IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); |
828 | IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", | 831 | IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", |
829 | sizeof(struct ip_vs_sync_conn)); | 832 | sizeof(struct ip_vs_sync_conn)); |
830 | 833 | ||
@@ -901,14 +904,14 @@ out: | |||
901 | 904 | ||
902 | int stop_sync_thread(int state) | 905 | int stop_sync_thread(int state) |
903 | { | 906 | { |
904 | IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current)); | 907 | IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); |
905 | 908 | ||
906 | if (state == IP_VS_STATE_MASTER) { | 909 | if (state == IP_VS_STATE_MASTER) { |
907 | if (!sync_master_thread) | 910 | if (!sync_master_thread) |
908 | return -ESRCH; | 911 | return -ESRCH; |
909 | 912 | ||
910 | IP_VS_INFO("stopping master sync thread %d ...\n", | 913 | pr_info("stopping master sync thread %d ...\n", |
911 | task_pid_nr(sync_master_thread)); | 914 | task_pid_nr(sync_master_thread)); |
912 | 915 | ||
913 | /* | 916 | /* |
914 | * The lock synchronizes with sb_queue_tail(), so that we don't | 917 | * The lock synchronizes with sb_queue_tail(), so that we don't |
@@ -925,8 +928,8 @@ int stop_sync_thread(int state) | |||
925 | if (!sync_backup_thread) | 928 | if (!sync_backup_thread) |
926 | return -ESRCH; | 929 | return -ESRCH; |
927 | 930 | ||
928 | IP_VS_INFO("stopping backup sync thread %d ...\n", | 931 | pr_info("stopping backup sync thread %d ...\n", |
929 | task_pid_nr(sync_backup_thread)); | 932 | task_pid_nr(sync_backup_thread)); |
930 | 933 | ||
931 | ip_vs_sync_state &= ~IP_VS_STATE_BACKUP; | 934 | ip_vs_sync_state &= ~IP_VS_STATE_BACKUP; |
932 | kthread_stop(sync_backup_thread); | 935 | kthread_stop(sync_backup_thread); |
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c index 8e942565b47d..bbddfdb10db2 100644 --- a/net/netfilter/ipvs/ip_vs_wlc.c +++ b/net/netfilter/ipvs/ip_vs_wlc.c | |||
@@ -19,6 +19,9 @@ | |||
19 | * | 19 | * |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #define KMSG_COMPONENT "IPVS" | ||
23 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
24 | |||
22 | #include <linux/module.h> | 25 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
24 | 27 | ||
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index f7d74ef1ecf9..6182e8ea0be7 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c | |||
@@ -18,6 +18,9 @@ | |||
18 | * | 18 | * |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #define KMSG_COMPONENT "IPVS" | ||
22 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
23 | |||
21 | #include <linux/module.h> | 24 | #include <linux/module.h> |
22 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
23 | #include <linux/net.h> | 26 | #include <linux/net.h> |
@@ -74,11 +77,12 @@ static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc) | |||
74 | static int ip_vs_wrr_max_weight(struct ip_vs_service *svc) | 77 | static int ip_vs_wrr_max_weight(struct ip_vs_service *svc) |
75 | { | 78 | { |
76 | struct ip_vs_dest *dest; | 79 | struct ip_vs_dest *dest; |
77 | int weight = 0; | 80 | int new_weight, weight = 0; |
78 | 81 | ||
79 | list_for_each_entry(dest, &svc->destinations, n_list) { | 82 | list_for_each_entry(dest, &svc->destinations, n_list) { |
80 | if (atomic_read(&dest->weight) > weight) | 83 | new_weight = atomic_read(&dest->weight); |
81 | weight = atomic_read(&dest->weight); | 84 | if (new_weight > weight) |
85 | weight = new_weight; | ||
82 | } | 86 | } |
83 | 87 | ||
84 | return weight; | 88 | return weight; |
@@ -94,7 +98,7 @@ static int ip_vs_wrr_init_svc(struct ip_vs_service *svc) | |||
94 | */ | 98 | */ |
95 | mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC); | 99 | mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC); |
96 | if (mark == NULL) { | 100 | if (mark == NULL) { |
97 | IP_VS_ERR("ip_vs_wrr_init_svc(): no memory\n"); | 101 | pr_err("%s(): no memory\n", __func__); |
98 | return -ENOMEM; | 102 | return -ENOMEM; |
99 | } | 103 | } |
100 | mark->cl = &svc->destinations; | 104 | mark->cl = &svc->destinations; |
@@ -141,7 +145,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
141 | struct ip_vs_wrr_mark *mark = svc->sched_data; | 145 | struct ip_vs_wrr_mark *mark = svc->sched_data; |
142 | struct list_head *p; | 146 | struct list_head *p; |
143 | 147 | ||
144 | IP_VS_DBG(6, "ip_vs_wrr_schedule(): Scheduling...\n"); | 148 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
145 | 149 | ||
146 | /* | 150 | /* |
147 | * This loop will always terminate, because mark->cw in (0, max_weight] | 151 | * This loop will always terminate, because mark->cw in (0, max_weight] |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 5874657af7f2..30b3189bd29c 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -13,6 +13,9 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define KMSG_COMPONENT "IPVS" | ||
17 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
18 | |||
16 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
17 | #include <linux/tcp.h> /* for tcphdr */ | 20 | #include <linux/tcp.h> /* for tcphdr */ |
18 | #include <net/ip.h> | 21 | #include <net/ip.h> |
@@ -235,8 +238,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
235 | EnterFunction(10); | 238 | EnterFunction(10); |
236 | 239 | ||
237 | if (ip_route_output_key(&init_net, &rt, &fl)) { | 240 | if (ip_route_output_key(&init_net, &rt, &fl)) { |
238 | IP_VS_DBG_RL("ip_vs_bypass_xmit(): ip_route_output error, dest: %pI4\n", | 241 | IP_VS_DBG_RL("%s(): ip_route_output error, dest: %pI4\n", |
239 | &iph->daddr); | 242 | __func__, &iph->daddr); |
240 | goto tx_error_icmp; | 243 | goto tx_error_icmp; |
241 | } | 244 | } |
242 | 245 | ||
@@ -245,7 +248,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
245 | if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { | 248 | if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { |
246 | ip_rt_put(rt); | 249 | ip_rt_put(rt); |
247 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); | 250 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); |
248 | IP_VS_DBG_RL("ip_vs_bypass_xmit(): frag needed\n"); | 251 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
249 | goto tx_error; | 252 | goto tx_error; |
250 | } | 253 | } |
251 | 254 | ||
@@ -299,8 +302,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
299 | 302 | ||
300 | rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); | 303 | rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); |
301 | if (!rt) { | 304 | if (!rt) { |
302 | IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): ip6_route_output error, dest: %pI6\n", | 305 | IP_VS_DBG_RL("%s(): ip6_route_output error, dest: %pI6\n", |
303 | &iph->daddr); | 306 | __func__, &iph->daddr); |
304 | goto tx_error_icmp; | 307 | goto tx_error_icmp; |
305 | } | 308 | } |
306 | 309 | ||
@@ -309,7 +312,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
309 | if (skb->len > mtu) { | 312 | if (skb->len > mtu) { |
310 | dst_release(&rt->u.dst); | 313 | dst_release(&rt->u.dst); |
311 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | 314 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); |
312 | IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): frag needed\n"); | 315 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
313 | goto tx_error; | 316 | goto tx_error; |
314 | } | 317 | } |
315 | 318 | ||
@@ -536,9 +539,9 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
536 | EnterFunction(10); | 539 | EnterFunction(10); |
537 | 540 | ||
538 | if (skb->protocol != htons(ETH_P_IP)) { | 541 | if (skb->protocol != htons(ETH_P_IP)) { |
539 | IP_VS_DBG_RL("ip_vs_tunnel_xmit(): protocol error, " | 542 | IP_VS_DBG_RL("%s(): protocol error, " |
540 | "ETH_P_IP: %d, skb protocol: %d\n", | 543 | "ETH_P_IP: %d, skb protocol: %d\n", |
541 | htons(ETH_P_IP), skb->protocol); | 544 | __func__, htons(ETH_P_IP), skb->protocol); |
542 | goto tx_error; | 545 | goto tx_error; |
543 | } | 546 | } |
544 | 547 | ||
@@ -550,7 +553,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
550 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); | 553 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); |
551 | if (mtu < 68) { | 554 | if (mtu < 68) { |
552 | ip_rt_put(rt); | 555 | ip_rt_put(rt); |
553 | IP_VS_DBG_RL("ip_vs_tunnel_xmit(): mtu less than 68\n"); | 556 | IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__); |
554 | goto tx_error; | 557 | goto tx_error; |
555 | } | 558 | } |
556 | if (skb_dst(skb)) | 559 | if (skb_dst(skb)) |
@@ -562,7 +565,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
562 | && mtu < ntohs(old_iph->tot_len)) { | 565 | && mtu < ntohs(old_iph->tot_len)) { |
563 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); | 566 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); |
564 | ip_rt_put(rt); | 567 | ip_rt_put(rt); |
565 | IP_VS_DBG_RL("ip_vs_tunnel_xmit(): frag needed\n"); | 568 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
566 | goto tx_error; | 569 | goto tx_error; |
567 | } | 570 | } |
568 | 571 | ||
@@ -578,7 +581,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
578 | if (!new_skb) { | 581 | if (!new_skb) { |
579 | ip_rt_put(rt); | 582 | ip_rt_put(rt); |
580 | kfree_skb(skb); | 583 | kfree_skb(skb); |
581 | IP_VS_ERR_RL("ip_vs_tunnel_xmit(): no memory\n"); | 584 | IP_VS_ERR_RL("%s(): no memory\n", __func__); |
582 | return NF_STOLEN; | 585 | return NF_STOLEN; |
583 | } | 586 | } |
584 | kfree_skb(skb); | 587 | kfree_skb(skb); |
@@ -646,9 +649,9 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
646 | EnterFunction(10); | 649 | EnterFunction(10); |
647 | 650 | ||
648 | if (skb->protocol != htons(ETH_P_IPV6)) { | 651 | if (skb->protocol != htons(ETH_P_IPV6)) { |
649 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): protocol error, " | 652 | IP_VS_DBG_RL("%s(): protocol error, " |
650 | "ETH_P_IPV6: %d, skb protocol: %d\n", | 653 | "ETH_P_IPV6: %d, skb protocol: %d\n", |
651 | htons(ETH_P_IPV6), skb->protocol); | 654 | __func__, htons(ETH_P_IPV6), skb->protocol); |
652 | goto tx_error; | 655 | goto tx_error; |
653 | } | 656 | } |
654 | 657 | ||
@@ -662,7 +665,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
662 | /* TODO IPv6: do we need this check in IPv6? */ | 665 | /* TODO IPv6: do we need this check in IPv6? */ |
663 | if (mtu < 1280) { | 666 | if (mtu < 1280) { |
664 | dst_release(&rt->u.dst); | 667 | dst_release(&rt->u.dst); |
665 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n"); | 668 | IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__); |
666 | goto tx_error; | 669 | goto tx_error; |
667 | } | 670 | } |
668 | if (skb_dst(skb)) | 671 | if (skb_dst(skb)) |
@@ -671,7 +674,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
671 | if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { | 674 | if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { |
672 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | 675 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); |
673 | dst_release(&rt->u.dst); | 676 | dst_release(&rt->u.dst); |
674 | IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): frag needed\n"); | 677 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
675 | goto tx_error; | 678 | goto tx_error; |
676 | } | 679 | } |
677 | 680 | ||
@@ -687,7 +690,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
687 | if (!new_skb) { | 690 | if (!new_skb) { |
688 | dst_release(&rt->u.dst); | 691 | dst_release(&rt->u.dst); |
689 | kfree_skb(skb); | 692 | kfree_skb(skb); |
690 | IP_VS_ERR_RL("ip_vs_tunnel_xmit_v6(): no memory\n"); | 693 | IP_VS_ERR_RL("%s(): no memory\n", __func__); |
691 | return NF_STOLEN; | 694 | return NF_STOLEN; |
692 | } | 695 | } |
693 | kfree_skb(skb); | 696 | kfree_skb(skb); |
@@ -760,7 +763,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
760 | if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) { | 763 | if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) { |
761 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); | 764 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); |
762 | ip_rt_put(rt); | 765 | ip_rt_put(rt); |
763 | IP_VS_DBG_RL("ip_vs_dr_xmit(): frag needed\n"); | 766 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
764 | goto tx_error; | 767 | goto tx_error; |
765 | } | 768 | } |
766 | 769 | ||
@@ -813,7 +816,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
813 | if (skb->len > mtu) { | 816 | if (skb->len > mtu) { |
814 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | 817 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); |
815 | dst_release(&rt->u.dst); | 818 | dst_release(&rt->u.dst); |
816 | IP_VS_DBG_RL("ip_vs_dr_xmit_v6(): frag needed\n"); | 819 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
817 | goto tx_error; | 820 | goto tx_error; |
818 | } | 821 | } |
819 | 822 | ||
@@ -888,7 +891,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
888 | if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) { | 891 | if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) { |
889 | ip_rt_put(rt); | 892 | ip_rt_put(rt); |
890 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | 893 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
891 | IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n"); | 894 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
892 | goto tx_error; | 895 | goto tx_error; |
893 | } | 896 | } |
894 | 897 | ||
@@ -963,7 +966,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
963 | if (skb->len > mtu) { | 966 | if (skb->len > mtu) { |
964 | dst_release(&rt->u.dst); | 967 | dst_release(&rt->u.dst); |
965 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | 968 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); |
966 | IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n"); | 969 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
967 | goto tx_error; | 970 | goto tx_error; |
968 | } | 971 | } |
969 | 972 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index b5869b9574b0..b37109817a98 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, | 48 | int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, |
49 | enum nf_nat_manip_type manip, | 49 | enum nf_nat_manip_type manip, |
50 | struct nlattr *attr) __read_mostly; | 50 | const struct nlattr *attr) __read_mostly; |
51 | EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); | 51 | EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); |
52 | 52 | ||
53 | DEFINE_SPINLOCK(nf_conntrack_lock); | 53 | DEFINE_SPINLOCK(nf_conntrack_lock); |
@@ -1089,14 +1089,14 @@ void nf_conntrack_flush_report(struct net *net, u32 pid, int report) | |||
1089 | } | 1089 | } |
1090 | EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); | 1090 | EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); |
1091 | 1091 | ||
1092 | static void nf_ct_release_dying_list(void) | 1092 | static void nf_ct_release_dying_list(struct net *net) |
1093 | { | 1093 | { |
1094 | struct nf_conntrack_tuple_hash *h; | 1094 | struct nf_conntrack_tuple_hash *h; |
1095 | struct nf_conn *ct; | 1095 | struct nf_conn *ct; |
1096 | struct hlist_nulls_node *n; | 1096 | struct hlist_nulls_node *n; |
1097 | 1097 | ||
1098 | spin_lock_bh(&nf_conntrack_lock); | 1098 | spin_lock_bh(&nf_conntrack_lock); |
1099 | hlist_nulls_for_each_entry(h, n, &init_net.ct.dying, hnnode) { | 1099 | hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) { |
1100 | ct = nf_ct_tuplehash_to_ctrack(h); | 1100 | ct = nf_ct_tuplehash_to_ctrack(h); |
1101 | /* never fails to remove them, no listeners at this point */ | 1101 | /* never fails to remove them, no listeners at this point */ |
1102 | nf_ct_kill(ct); | 1102 | nf_ct_kill(ct); |
@@ -1115,7 +1115,7 @@ static void nf_conntrack_cleanup_net(struct net *net) | |||
1115 | { | 1115 | { |
1116 | i_see_dead_people: | 1116 | i_see_dead_people: |
1117 | nf_ct_iterate_cleanup(net, kill_all, NULL); | 1117 | nf_ct_iterate_cleanup(net, kill_all, NULL); |
1118 | nf_ct_release_dying_list(); | 1118 | nf_ct_release_dying_list(net); |
1119 | if (atomic_read(&net->ct.count) != 0) { | 1119 | if (atomic_read(&net->ct.count) != 0) { |
1120 | schedule(); | 1120 | schedule(); |
1121 | goto i_see_dead_people; | 1121 | goto i_see_dead_people; |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 49479d194570..59d8064eb522 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -704,7 +704,8 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr, | |||
704 | } | 704 | } |
705 | 705 | ||
706 | static int | 706 | static int |
707 | ctnetlink_parse_tuple(struct nlattr *cda[], struct nf_conntrack_tuple *tuple, | 707 | ctnetlink_parse_tuple(const struct nlattr * const cda[], |
708 | struct nf_conntrack_tuple *tuple, | ||
708 | enum ctattr_tuple type, u_int8_t l3num) | 709 | enum ctattr_tuple type, u_int8_t l3num) |
709 | { | 710 | { |
710 | struct nlattr *tb[CTA_TUPLE_MAX+1]; | 711 | struct nlattr *tb[CTA_TUPLE_MAX+1]; |
@@ -740,7 +741,7 @@ ctnetlink_parse_tuple(struct nlattr *cda[], struct nf_conntrack_tuple *tuple, | |||
740 | } | 741 | } |
741 | 742 | ||
742 | static inline int | 743 | static inline int |
743 | ctnetlink_parse_help(struct nlattr *attr, char **helper_name) | 744 | ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) |
744 | { | 745 | { |
745 | struct nlattr *tb[CTA_HELP_MAX+1]; | 746 | struct nlattr *tb[CTA_HELP_MAX+1]; |
746 | 747 | ||
@@ -764,7 +765,8 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { | |||
764 | 765 | ||
765 | static int | 766 | static int |
766 | ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | 767 | ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, |
767 | struct nlmsghdr *nlh, struct nlattr *cda[]) | 768 | const struct nlmsghdr *nlh, |
769 | const struct nlattr * const cda[]) | ||
768 | { | 770 | { |
769 | struct nf_conntrack_tuple_hash *h; | 771 | struct nf_conntrack_tuple_hash *h; |
770 | struct nf_conntrack_tuple tuple; | 772 | struct nf_conntrack_tuple tuple; |
@@ -823,7 +825,8 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
823 | 825 | ||
824 | static int | 826 | static int |
825 | ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | 827 | ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, |
826 | struct nlmsghdr *nlh, struct nlattr *cda[]) | 828 | const struct nlmsghdr *nlh, |
829 | const struct nlattr * const cda[]) | ||
827 | { | 830 | { |
828 | struct nf_conntrack_tuple_hash *h; | 831 | struct nf_conntrack_tuple_hash *h; |
829 | struct nf_conntrack_tuple tuple; | 832 | struct nf_conntrack_tuple tuple; |
@@ -884,7 +887,7 @@ out: | |||
884 | static int | 887 | static int |
885 | ctnetlink_parse_nat_setup(struct nf_conn *ct, | 888 | ctnetlink_parse_nat_setup(struct nf_conn *ct, |
886 | enum nf_nat_manip_type manip, | 889 | enum nf_nat_manip_type manip, |
887 | struct nlattr *attr) | 890 | const struct nlattr *attr) |
888 | { | 891 | { |
889 | typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup; | 892 | typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup; |
890 | 893 | ||
@@ -914,7 +917,7 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct, | |||
914 | #endif | 917 | #endif |
915 | 918 | ||
916 | static int | 919 | static int |
917 | ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[]) | 920 | ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) |
918 | { | 921 | { |
919 | unsigned long d; | 922 | unsigned long d; |
920 | unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS])); | 923 | unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS])); |
@@ -940,7 +943,7 @@ ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[]) | |||
940 | } | 943 | } |
941 | 944 | ||
942 | static int | 945 | static int |
943 | ctnetlink_change_nat(struct nf_conn *ct, struct nlattr *cda[]) | 946 | ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[]) |
944 | { | 947 | { |
945 | #ifdef CONFIG_NF_NAT_NEEDED | 948 | #ifdef CONFIG_NF_NAT_NEEDED |
946 | int ret; | 949 | int ret; |
@@ -966,7 +969,7 @@ ctnetlink_change_nat(struct nf_conn *ct, struct nlattr *cda[]) | |||
966 | } | 969 | } |
967 | 970 | ||
968 | static inline int | 971 | static inline int |
969 | ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[]) | 972 | ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) |
970 | { | 973 | { |
971 | struct nf_conntrack_helper *helper; | 974 | struct nf_conntrack_helper *helper; |
972 | struct nf_conn_help *help = nfct_help(ct); | 975 | struct nf_conn_help *help = nfct_help(ct); |
@@ -1028,7 +1031,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[]) | |||
1028 | } | 1031 | } |
1029 | 1032 | ||
1030 | static inline int | 1033 | static inline int |
1031 | ctnetlink_change_timeout(struct nf_conn *ct, struct nlattr *cda[]) | 1034 | ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[]) |
1032 | { | 1035 | { |
1033 | u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); | 1036 | u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); |
1034 | 1037 | ||
@@ -1042,9 +1045,10 @@ ctnetlink_change_timeout(struct nf_conn *ct, struct nlattr *cda[]) | |||
1042 | } | 1045 | } |
1043 | 1046 | ||
1044 | static inline int | 1047 | static inline int |
1045 | ctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[]) | 1048 | ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]) |
1046 | { | 1049 | { |
1047 | struct nlattr *tb[CTA_PROTOINFO_MAX+1], *attr = cda[CTA_PROTOINFO]; | 1050 | const struct nlattr *attr = cda[CTA_PROTOINFO]; |
1051 | struct nlattr *tb[CTA_PROTOINFO_MAX+1]; | ||
1048 | struct nf_conntrack_l4proto *l4proto; | 1052 | struct nf_conntrack_l4proto *l4proto; |
1049 | int err = 0; | 1053 | int err = 0; |
1050 | 1054 | ||
@@ -1061,7 +1065,7 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[]) | |||
1061 | 1065 | ||
1062 | #ifdef CONFIG_NF_NAT_NEEDED | 1066 | #ifdef CONFIG_NF_NAT_NEEDED |
1063 | static inline int | 1067 | static inline int |
1064 | change_nat_seq_adj(struct nf_nat_seq *natseq, struct nlattr *attr) | 1068 | change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr) |
1065 | { | 1069 | { |
1066 | struct nlattr *cda[CTA_NAT_SEQ_MAX+1]; | 1070 | struct nlattr *cda[CTA_NAT_SEQ_MAX+1]; |
1067 | 1071 | ||
@@ -1089,7 +1093,8 @@ change_nat_seq_adj(struct nf_nat_seq *natseq, struct nlattr *attr) | |||
1089 | } | 1093 | } |
1090 | 1094 | ||
1091 | static int | 1095 | static int |
1092 | ctnetlink_change_nat_seq_adj(struct nf_conn *ct, struct nlattr *cda[]) | 1096 | ctnetlink_change_nat_seq_adj(struct nf_conn *ct, |
1097 | const struct nlattr * const cda[]) | ||
1093 | { | 1098 | { |
1094 | int ret = 0; | 1099 | int ret = 0; |
1095 | struct nf_conn_nat *nat = nfct_nat(ct); | 1100 | struct nf_conn_nat *nat = nfct_nat(ct); |
@@ -1120,7 +1125,8 @@ ctnetlink_change_nat_seq_adj(struct nf_conn *ct, struct nlattr *cda[]) | |||
1120 | #endif | 1125 | #endif |
1121 | 1126 | ||
1122 | static int | 1127 | static int |
1123 | ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[]) | 1128 | ctnetlink_change_conntrack(struct nf_conn *ct, |
1129 | const struct nlattr * const cda[]) | ||
1124 | { | 1130 | { |
1125 | int err; | 1131 | int err; |
1126 | 1132 | ||
@@ -1169,7 +1175,7 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[]) | |||
1169 | } | 1175 | } |
1170 | 1176 | ||
1171 | static struct nf_conn * | 1177 | static struct nf_conn * |
1172 | ctnetlink_create_conntrack(struct nlattr *cda[], | 1178 | ctnetlink_create_conntrack(const struct nlattr * const cda[], |
1173 | struct nf_conntrack_tuple *otuple, | 1179 | struct nf_conntrack_tuple *otuple, |
1174 | struct nf_conntrack_tuple *rtuple, | 1180 | struct nf_conntrack_tuple *rtuple, |
1175 | u8 u3) | 1181 | u8 u3) |
@@ -1304,7 +1310,8 @@ err1: | |||
1304 | 1310 | ||
1305 | static int | 1311 | static int |
1306 | ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | 1312 | ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, |
1307 | struct nlmsghdr *nlh, struct nlattr *cda[]) | 1313 | const struct nlmsghdr *nlh, |
1314 | const struct nlattr * const cda[]) | ||
1308 | { | 1315 | { |
1309 | struct nf_conntrack_tuple otuple, rtuple; | 1316 | struct nf_conntrack_tuple otuple, rtuple; |
1310 | struct nf_conntrack_tuple_hash *h = NULL; | 1317 | struct nf_conntrack_tuple_hash *h = NULL; |
@@ -1629,7 +1636,8 @@ static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { | |||
1629 | 1636 | ||
1630 | static int | 1637 | static int |
1631 | ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | 1638 | ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, |
1632 | struct nlmsghdr *nlh, struct nlattr *cda[]) | 1639 | const struct nlmsghdr *nlh, |
1640 | const struct nlattr * const cda[]) | ||
1633 | { | 1641 | { |
1634 | struct nf_conntrack_tuple tuple; | 1642 | struct nf_conntrack_tuple tuple; |
1635 | struct nf_conntrack_expect *exp; | 1643 | struct nf_conntrack_expect *exp; |
@@ -1689,7 +1697,8 @@ out: | |||
1689 | 1697 | ||
1690 | static int | 1698 | static int |
1691 | ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | 1699 | ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, |
1692 | struct nlmsghdr *nlh, struct nlattr *cda[]) | 1700 | const struct nlmsghdr *nlh, |
1701 | const struct nlattr * const cda[]) | ||
1693 | { | 1702 | { |
1694 | struct nf_conntrack_expect *exp; | 1703 | struct nf_conntrack_expect *exp; |
1695 | struct nf_conntrack_tuple tuple; | 1704 | struct nf_conntrack_tuple tuple; |
@@ -1767,13 +1776,15 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1767 | return 0; | 1776 | return 0; |
1768 | } | 1777 | } |
1769 | static int | 1778 | static int |
1770 | ctnetlink_change_expect(struct nf_conntrack_expect *x, struct nlattr *cda[]) | 1779 | ctnetlink_change_expect(struct nf_conntrack_expect *x, |
1780 | const struct nlattr * const cda[]) | ||
1771 | { | 1781 | { |
1772 | return -EOPNOTSUPP; | 1782 | return -EOPNOTSUPP; |
1773 | } | 1783 | } |
1774 | 1784 | ||
1775 | static int | 1785 | static int |
1776 | ctnetlink_create_expect(struct nlattr *cda[], u_int8_t u3, u32 pid, int report) | 1786 | ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3, |
1787 | u32 pid, int report) | ||
1777 | { | 1788 | { |
1778 | struct nf_conntrack_tuple tuple, mask, master_tuple; | 1789 | struct nf_conntrack_tuple tuple, mask, master_tuple; |
1779 | struct nf_conntrack_tuple_hash *h = NULL; | 1790 | struct nf_conntrack_tuple_hash *h = NULL; |
@@ -1831,7 +1842,8 @@ out: | |||
1831 | 1842 | ||
1832 | static int | 1843 | static int |
1833 | ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb, | 1844 | ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb, |
1834 | struct nlmsghdr *nlh, struct nlattr *cda[]) | 1845 | const struct nlmsghdr *nlh, |
1846 | const struct nlattr * const cda[]) | ||
1835 | { | 1847 | { |
1836 | struct nf_conntrack_tuple tuple; | 1848 | struct nf_conntrack_tuple tuple; |
1837 | struct nf_conntrack_expect *exp; | 1849 | struct nf_conntrack_expect *exp; |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 92761a988375..eedc0c1ac7a4 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -170,7 +170,7 @@ replay: | |||
170 | if (err < 0) | 170 | if (err < 0) |
171 | return err; | 171 | return err; |
172 | 172 | ||
173 | err = nc->call(nfnl, skb, nlh, cda); | 173 | err = nc->call(nfnl, skb, nlh, (const struct nlattr **)cda); |
174 | if (err == -EAGAIN) | 174 | if (err == -EAGAIN) |
175 | goto replay; | 175 | goto replay; |
176 | return err; | 176 | return err; |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 66a6dd5c519a..f900dc3194af 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -694,7 +694,8 @@ static struct notifier_block nfulnl_rtnl_notifier = { | |||
694 | 694 | ||
695 | static int | 695 | static int |
696 | nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, | 696 | nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, |
697 | struct nlmsghdr *nlh, struct nlattr *nfqa[]) | 697 | const struct nlmsghdr *nlh, |
698 | const struct nlattr * const nfqa[]) | ||
698 | { | 699 | { |
699 | return -ENOTSUPP; | 700 | return -ENOTSUPP; |
700 | } | 701 | } |
@@ -716,7 +717,8 @@ static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = { | |||
716 | 717 | ||
717 | static int | 718 | static int |
718 | nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | 719 | nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, |
719 | struct nlmsghdr *nlh, struct nlattr *nfula[]) | 720 | const struct nlmsghdr *nlh, |
721 | const struct nlattr * const nfula[]) | ||
720 | { | 722 | { |
721 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | 723 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
722 | u_int16_t group_num = ntohs(nfmsg->res_id); | 724 | u_int16_t group_num = ntohs(nfmsg->res_id); |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 71daa0934b6c..7a9dec9fb822 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -608,7 +608,8 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { | |||
608 | 608 | ||
609 | static int | 609 | static int |
610 | nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | 610 | nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, |
611 | struct nlmsghdr *nlh, struct nlattr *nfqa[]) | 611 | const struct nlmsghdr *nlh, |
612 | const struct nlattr * const nfqa[]) | ||
612 | { | 613 | { |
613 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | 614 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
614 | u_int16_t queue_num = ntohs(nfmsg->res_id); | 615 | u_int16_t queue_num = ntohs(nfmsg->res_id); |
@@ -670,7 +671,8 @@ err_out_unlock: | |||
670 | 671 | ||
671 | static int | 672 | static int |
672 | nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, | 673 | nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, |
673 | struct nlmsghdr *nlh, struct nlattr *nfqa[]) | 674 | const struct nlmsghdr *nlh, |
675 | const struct nlattr * const nfqa[]) | ||
674 | { | 676 | { |
675 | return -ENOTSUPP; | 677 | return -ENOTSUPP; |
676 | } | 678 | } |
@@ -687,7 +689,8 @@ static const struct nf_queue_handler nfqh = { | |||
687 | 689 | ||
688 | static int | 690 | static int |
689 | nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | 691 | nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, |
690 | struct nlmsghdr *nlh, struct nlattr *nfqa[]) | 692 | const struct nlmsghdr *nlh, |
693 | const struct nlattr * const nfqa[]) | ||
691 | { | 694 | { |
692 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); | 695 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
693 | u_int16_t queue_num = ntohs(nfmsg->res_id); | 696 | u_int16_t queue_num = ntohs(nfmsg->res_id); |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 025d1a0af78b..a6ac83a93348 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -736,16 +736,17 @@ xt_replace_table(struct xt_table *table, | |||
736 | } | 736 | } |
737 | EXPORT_SYMBOL_GPL(xt_replace_table); | 737 | EXPORT_SYMBOL_GPL(xt_replace_table); |
738 | 738 | ||
739 | struct xt_table *xt_register_table(struct net *net, struct xt_table *table, | 739 | struct xt_table *xt_register_table(struct net *net, |
740 | const struct xt_table *input_table, | ||
740 | struct xt_table_info *bootstrap, | 741 | struct xt_table_info *bootstrap, |
741 | struct xt_table_info *newinfo) | 742 | struct xt_table_info *newinfo) |
742 | { | 743 | { |
743 | int ret; | 744 | int ret; |
744 | struct xt_table_info *private; | 745 | struct xt_table_info *private; |
745 | struct xt_table *t; | 746 | struct xt_table *t, *table; |
746 | 747 | ||
747 | /* Don't add one object to multiple lists. */ | 748 | /* Don't add one object to multiple lists. */ |
748 | table = kmemdup(table, sizeof(struct xt_table), GFP_KERNEL); | 749 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
749 | if (!table) { | 750 | if (!table) { |
750 | ret = -ENOMEM; | 751 | ret = -ENOMEM; |
751 | goto out; | 752 | goto out; |
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c index d6e5ab463277..593457068ae1 100644 --- a/net/netfilter/xt_CONNMARK.c +++ b/net/netfilter/xt_CONNMARK.c | |||
@@ -36,45 +36,6 @@ MODULE_ALIAS("ip6t_CONNMARK"); | |||
36 | #include <net/netfilter/nf_conntrack_ecache.h> | 36 | #include <net/netfilter/nf_conntrack_ecache.h> |
37 | 37 | ||
38 | static unsigned int | 38 | static unsigned int |
39 | connmark_tg_v0(struct sk_buff *skb, const struct xt_target_param *par) | ||
40 | { | ||
41 | const struct xt_connmark_target_info *markinfo = par->targinfo; | ||
42 | struct nf_conn *ct; | ||
43 | enum ip_conntrack_info ctinfo; | ||
44 | u_int32_t diff; | ||
45 | u_int32_t mark; | ||
46 | u_int32_t newmark; | ||
47 | |||
48 | ct = nf_ct_get(skb, &ctinfo); | ||
49 | if (ct) { | ||
50 | switch(markinfo->mode) { | ||
51 | case XT_CONNMARK_SET: | ||
52 | newmark = (ct->mark & ~markinfo->mask) | markinfo->mark; | ||
53 | if (newmark != ct->mark) { | ||
54 | ct->mark = newmark; | ||
55 | nf_conntrack_event_cache(IPCT_MARK, ct); | ||
56 | } | ||
57 | break; | ||
58 | case XT_CONNMARK_SAVE: | ||
59 | newmark = (ct->mark & ~markinfo->mask) | | ||
60 | (skb->mark & markinfo->mask); | ||
61 | if (ct->mark != newmark) { | ||
62 | ct->mark = newmark; | ||
63 | nf_conntrack_event_cache(IPCT_MARK, ct); | ||
64 | } | ||
65 | break; | ||
66 | case XT_CONNMARK_RESTORE: | ||
67 | mark = skb->mark; | ||
68 | diff = (ct->mark ^ mark) & markinfo->mask; | ||
69 | skb->mark = mark ^ diff; | ||
70 | break; | ||
71 | } | ||
72 | } | ||
73 | |||
74 | return XT_CONTINUE; | ||
75 | } | ||
76 | |||
77 | static unsigned int | ||
78 | connmark_tg(struct sk_buff *skb, const struct xt_target_param *par) | 39 | connmark_tg(struct sk_buff *skb, const struct xt_target_param *par) |
79 | { | 40 | { |
80 | const struct xt_connmark_tginfo1 *info = par->targinfo; | 41 | const struct xt_connmark_tginfo1 *info = par->targinfo; |
@@ -112,30 +73,6 @@ connmark_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
112 | return XT_CONTINUE; | 73 | return XT_CONTINUE; |
113 | } | 74 | } |
114 | 75 | ||
115 | static bool connmark_tg_check_v0(const struct xt_tgchk_param *par) | ||
116 | { | ||
117 | const struct xt_connmark_target_info *matchinfo = par->targinfo; | ||
118 | |||
119 | if (matchinfo->mode == XT_CONNMARK_RESTORE) { | ||
120 | if (strcmp(par->table, "mangle") != 0) { | ||
121 | printk(KERN_WARNING "CONNMARK: restore can only be " | ||
122 | "called from \"mangle\" table, not \"%s\"\n", | ||
123 | par->table); | ||
124 | return false; | ||
125 | } | ||
126 | } | ||
127 | if (matchinfo->mark > 0xffffffff || matchinfo->mask > 0xffffffff) { | ||
128 | printk(KERN_WARNING "CONNMARK: Only supports 32bit mark\n"); | ||
129 | return false; | ||
130 | } | ||
131 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | ||
132 | printk(KERN_WARNING "can't load conntrack support for " | ||
133 | "proto=%u\n", par->family); | ||
134 | return false; | ||
135 | } | ||
136 | return true; | ||
137 | } | ||
138 | |||
139 | static bool connmark_tg_check(const struct xt_tgchk_param *par) | 76 | static bool connmark_tg_check(const struct xt_tgchk_param *par) |
140 | { | 77 | { |
141 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | 78 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { |
@@ -151,74 +88,25 @@ static void connmark_tg_destroy(const struct xt_tgdtor_param *par) | |||
151 | nf_ct_l3proto_module_put(par->family); | 88 | nf_ct_l3proto_module_put(par->family); |
152 | } | 89 | } |
153 | 90 | ||
154 | #ifdef CONFIG_COMPAT | 91 | static struct xt_target connmark_tg_reg __read_mostly = { |
155 | struct compat_xt_connmark_target_info { | 92 | .name = "CONNMARK", |
156 | compat_ulong_t mark, mask; | 93 | .revision = 1, |
157 | u_int8_t mode; | 94 | .family = NFPROTO_UNSPEC, |
158 | u_int8_t __pad1; | 95 | .checkentry = connmark_tg_check, |
159 | u_int16_t __pad2; | 96 | .target = connmark_tg, |
160 | }; | 97 | .targetsize = sizeof(struct xt_connmark_tginfo1), |
161 | 98 | .destroy = connmark_tg_destroy, | |
162 | static void connmark_tg_compat_from_user_v0(void *dst, void *src) | 99 | .me = THIS_MODULE, |
163 | { | ||
164 | const struct compat_xt_connmark_target_info *cm = src; | ||
165 | struct xt_connmark_target_info m = { | ||
166 | .mark = cm->mark, | ||
167 | .mask = cm->mask, | ||
168 | .mode = cm->mode, | ||
169 | }; | ||
170 | memcpy(dst, &m, sizeof(m)); | ||
171 | } | ||
172 | |||
173 | static int connmark_tg_compat_to_user_v0(void __user *dst, void *src) | ||
174 | { | ||
175 | const struct xt_connmark_target_info *m = src; | ||
176 | struct compat_xt_connmark_target_info cm = { | ||
177 | .mark = m->mark, | ||
178 | .mask = m->mask, | ||
179 | .mode = m->mode, | ||
180 | }; | ||
181 | return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0; | ||
182 | } | ||
183 | #endif /* CONFIG_COMPAT */ | ||
184 | |||
185 | static struct xt_target connmark_tg_reg[] __read_mostly = { | ||
186 | { | ||
187 | .name = "CONNMARK", | ||
188 | .revision = 0, | ||
189 | .family = NFPROTO_UNSPEC, | ||
190 | .checkentry = connmark_tg_check_v0, | ||
191 | .destroy = connmark_tg_destroy, | ||
192 | .target = connmark_tg_v0, | ||
193 | .targetsize = sizeof(struct xt_connmark_target_info), | ||
194 | #ifdef CONFIG_COMPAT | ||
195 | .compatsize = sizeof(struct compat_xt_connmark_target_info), | ||
196 | .compat_from_user = connmark_tg_compat_from_user_v0, | ||
197 | .compat_to_user = connmark_tg_compat_to_user_v0, | ||
198 | #endif | ||
199 | .me = THIS_MODULE | ||
200 | }, | ||
201 | { | ||
202 | .name = "CONNMARK", | ||
203 | .revision = 1, | ||
204 | .family = NFPROTO_UNSPEC, | ||
205 | .checkentry = connmark_tg_check, | ||
206 | .target = connmark_tg, | ||
207 | .targetsize = sizeof(struct xt_connmark_tginfo1), | ||
208 | .destroy = connmark_tg_destroy, | ||
209 | .me = THIS_MODULE, | ||
210 | }, | ||
211 | }; | 100 | }; |
212 | 101 | ||
213 | static int __init connmark_tg_init(void) | 102 | static int __init connmark_tg_init(void) |
214 | { | 103 | { |
215 | return xt_register_targets(connmark_tg_reg, | 104 | return xt_register_target(&connmark_tg_reg); |
216 | ARRAY_SIZE(connmark_tg_reg)); | ||
217 | } | 105 | } |
218 | 106 | ||
219 | static void __exit connmark_tg_exit(void) | 107 | static void __exit connmark_tg_exit(void) |
220 | { | 108 | { |
221 | xt_unregister_targets(connmark_tg_reg, ARRAY_SIZE(connmark_tg_reg)); | 109 | xt_unregister_target(&connmark_tg_reg); |
222 | } | 110 | } |
223 | 111 | ||
224 | module_init(connmark_tg_init); | 112 | module_init(connmark_tg_init); |
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c index 6a347e768f86..74ce89260056 100644 --- a/net/netfilter/xt_DSCP.c +++ b/net/netfilter/xt_DSCP.c | |||
@@ -18,7 +18,6 @@ | |||
18 | 18 | ||
19 | #include <linux/netfilter/x_tables.h> | 19 | #include <linux/netfilter/x_tables.h> |
20 | #include <linux/netfilter/xt_DSCP.h> | 20 | #include <linux/netfilter/xt_DSCP.h> |
21 | #include <linux/netfilter_ipv4/ipt_TOS.h> | ||
22 | 21 | ||
23 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 22 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
24 | MODULE_DESCRIPTION("Xtables: DSCP/TOS field modification"); | 23 | MODULE_DESCRIPTION("Xtables: DSCP/TOS field modification"); |
@@ -73,41 +72,6 @@ static bool dscp_tg_check(const struct xt_tgchk_param *par) | |||
73 | } | 72 | } |
74 | 73 | ||
75 | static unsigned int | 74 | static unsigned int |
76 | tos_tg_v0(struct sk_buff *skb, const struct xt_target_param *par) | ||
77 | { | ||
78 | const struct ipt_tos_target_info *info = par->targinfo; | ||
79 | struct iphdr *iph = ip_hdr(skb); | ||
80 | u_int8_t oldtos; | ||
81 | |||
82 | if ((iph->tos & IPTOS_TOS_MASK) != info->tos) { | ||
83 | if (!skb_make_writable(skb, sizeof(struct iphdr))) | ||
84 | return NF_DROP; | ||
85 | |||
86 | iph = ip_hdr(skb); | ||
87 | oldtos = iph->tos; | ||
88 | iph->tos = (iph->tos & IPTOS_PREC_MASK) | info->tos; | ||
89 | csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); | ||
90 | } | ||
91 | |||
92 | return XT_CONTINUE; | ||
93 | } | ||
94 | |||
95 | static bool tos_tg_check_v0(const struct xt_tgchk_param *par) | ||
96 | { | ||
97 | const struct ipt_tos_target_info *info = par->targinfo; | ||
98 | const uint8_t tos = info->tos; | ||
99 | |||
100 | if (tos != IPTOS_LOWDELAY && tos != IPTOS_THROUGHPUT && | ||
101 | tos != IPTOS_RELIABILITY && tos != IPTOS_MINCOST && | ||
102 | tos != IPTOS_NORMALSVC) { | ||
103 | printk(KERN_WARNING "TOS: bad tos value %#x\n", tos); | ||
104 | return false; | ||
105 | } | ||
106 | |||
107 | return true; | ||
108 | } | ||
109 | |||
110 | static unsigned int | ||
111 | tos_tg(struct sk_buff *skb, const struct xt_target_param *par) | 75 | tos_tg(struct sk_buff *skb, const struct xt_target_param *par) |
112 | { | 76 | { |
113 | const struct xt_tos_target_info *info = par->targinfo; | 77 | const struct xt_tos_target_info *info = par->targinfo; |
@@ -168,16 +132,6 @@ static struct xt_target dscp_tg_reg[] __read_mostly = { | |||
168 | }, | 132 | }, |
169 | { | 133 | { |
170 | .name = "TOS", | 134 | .name = "TOS", |
171 | .revision = 0, | ||
172 | .family = NFPROTO_IPV4, | ||
173 | .table = "mangle", | ||
174 | .target = tos_tg_v0, | ||
175 | .targetsize = sizeof(struct ipt_tos_target_info), | ||
176 | .checkentry = tos_tg_check_v0, | ||
177 | .me = THIS_MODULE, | ||
178 | }, | ||
179 | { | ||
180 | .name = "TOS", | ||
181 | .revision = 1, | 135 | .revision = 1, |
182 | .family = NFPROTO_IPV4, | 136 | .family = NFPROTO_IPV4, |
183 | .table = "mangle", | 137 | .table = "mangle", |
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c index 67574bcfb8ac..225f8d11e173 100644 --- a/net/netfilter/xt_MARK.c +++ b/net/netfilter/xt_MARK.c | |||
@@ -25,39 +25,6 @@ MODULE_ALIAS("ipt_MARK"); | |||
25 | MODULE_ALIAS("ip6t_MARK"); | 25 | MODULE_ALIAS("ip6t_MARK"); |
26 | 26 | ||
27 | static unsigned int | 27 | static unsigned int |
28 | mark_tg_v0(struct sk_buff *skb, const struct xt_target_param *par) | ||
29 | { | ||
30 | const struct xt_mark_target_info *markinfo = par->targinfo; | ||
31 | |||
32 | skb->mark = markinfo->mark; | ||
33 | return XT_CONTINUE; | ||
34 | } | ||
35 | |||
36 | static unsigned int | ||
37 | mark_tg_v1(struct sk_buff *skb, const struct xt_target_param *par) | ||
38 | { | ||
39 | const struct xt_mark_target_info_v1 *markinfo = par->targinfo; | ||
40 | int mark = 0; | ||
41 | |||
42 | switch (markinfo->mode) { | ||
43 | case XT_MARK_SET: | ||
44 | mark = markinfo->mark; | ||
45 | break; | ||
46 | |||
47 | case XT_MARK_AND: | ||
48 | mark = skb->mark & markinfo->mark; | ||
49 | break; | ||
50 | |||
51 | case XT_MARK_OR: | ||
52 | mark = skb->mark | markinfo->mark; | ||
53 | break; | ||
54 | } | ||
55 | |||
56 | skb->mark = mark; | ||
57 | return XT_CONTINUE; | ||
58 | } | ||
59 | |||
60 | static unsigned int | ||
61 | mark_tg(struct sk_buff *skb, const struct xt_target_param *par) | 28 | mark_tg(struct sk_buff *skb, const struct xt_target_param *par) |
62 | { | 29 | { |
63 | const struct xt_mark_tginfo2 *info = par->targinfo; | 30 | const struct xt_mark_tginfo2 *info = par->targinfo; |
@@ -66,135 +33,23 @@ mark_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
66 | return XT_CONTINUE; | 33 | return XT_CONTINUE; |
67 | } | 34 | } |
68 | 35 | ||
69 | static bool mark_tg_check_v0(const struct xt_tgchk_param *par) | 36 | static struct xt_target mark_tg_reg __read_mostly = { |
70 | { | 37 | .name = "MARK", |
71 | const struct xt_mark_target_info *markinfo = par->targinfo; | 38 | .revision = 2, |
72 | 39 | .family = NFPROTO_UNSPEC, | |
73 | if (markinfo->mark > 0xffffffff) { | 40 | .target = mark_tg, |
74 | printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n"); | 41 | .targetsize = sizeof(struct xt_mark_tginfo2), |
75 | return false; | 42 | .me = THIS_MODULE, |
76 | } | ||
77 | return true; | ||
78 | } | ||
79 | |||
80 | static bool mark_tg_check_v1(const struct xt_tgchk_param *par) | ||
81 | { | ||
82 | const struct xt_mark_target_info_v1 *markinfo = par->targinfo; | ||
83 | |||
84 | if (markinfo->mode != XT_MARK_SET | ||
85 | && markinfo->mode != XT_MARK_AND | ||
86 | && markinfo->mode != XT_MARK_OR) { | ||
87 | printk(KERN_WARNING "MARK: unknown mode %u\n", | ||
88 | markinfo->mode); | ||
89 | return false; | ||
90 | } | ||
91 | if (markinfo->mark > 0xffffffff) { | ||
92 | printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n"); | ||
93 | return false; | ||
94 | } | ||
95 | return true; | ||
96 | } | ||
97 | |||
98 | #ifdef CONFIG_COMPAT | ||
99 | struct compat_xt_mark_target_info { | ||
100 | compat_ulong_t mark; | ||
101 | }; | ||
102 | |||
103 | static void mark_tg_compat_from_user_v0(void *dst, void *src) | ||
104 | { | ||
105 | const struct compat_xt_mark_target_info *cm = src; | ||
106 | struct xt_mark_target_info m = { | ||
107 | .mark = cm->mark, | ||
108 | }; | ||
109 | memcpy(dst, &m, sizeof(m)); | ||
110 | } | ||
111 | |||
112 | static int mark_tg_compat_to_user_v0(void __user *dst, void *src) | ||
113 | { | ||
114 | const struct xt_mark_target_info *m = src; | ||
115 | struct compat_xt_mark_target_info cm = { | ||
116 | .mark = m->mark, | ||
117 | }; | ||
118 | return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0; | ||
119 | } | ||
120 | |||
121 | struct compat_xt_mark_target_info_v1 { | ||
122 | compat_ulong_t mark; | ||
123 | u_int8_t mode; | ||
124 | u_int8_t __pad1; | ||
125 | u_int16_t __pad2; | ||
126 | }; | ||
127 | |||
128 | static void mark_tg_compat_from_user_v1(void *dst, void *src) | ||
129 | { | ||
130 | const struct compat_xt_mark_target_info_v1 *cm = src; | ||
131 | struct xt_mark_target_info_v1 m = { | ||
132 | .mark = cm->mark, | ||
133 | .mode = cm->mode, | ||
134 | }; | ||
135 | memcpy(dst, &m, sizeof(m)); | ||
136 | } | ||
137 | |||
138 | static int mark_tg_compat_to_user_v1(void __user *dst, void *src) | ||
139 | { | ||
140 | const struct xt_mark_target_info_v1 *m = src; | ||
141 | struct compat_xt_mark_target_info_v1 cm = { | ||
142 | .mark = m->mark, | ||
143 | .mode = m->mode, | ||
144 | }; | ||
145 | return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0; | ||
146 | } | ||
147 | #endif /* CONFIG_COMPAT */ | ||
148 | |||
149 | static struct xt_target mark_tg_reg[] __read_mostly = { | ||
150 | { | ||
151 | .name = "MARK", | ||
152 | .family = NFPROTO_UNSPEC, | ||
153 | .revision = 0, | ||
154 | .checkentry = mark_tg_check_v0, | ||
155 | .target = mark_tg_v0, | ||
156 | .targetsize = sizeof(struct xt_mark_target_info), | ||
157 | #ifdef CONFIG_COMPAT | ||
158 | .compatsize = sizeof(struct compat_xt_mark_target_info), | ||
159 | .compat_from_user = mark_tg_compat_from_user_v0, | ||
160 | .compat_to_user = mark_tg_compat_to_user_v0, | ||
161 | #endif | ||
162 | .table = "mangle", | ||
163 | .me = THIS_MODULE, | ||
164 | }, | ||
165 | { | ||
166 | .name = "MARK", | ||
167 | .family = NFPROTO_UNSPEC, | ||
168 | .revision = 1, | ||
169 | .checkentry = mark_tg_check_v1, | ||
170 | .target = mark_tg_v1, | ||
171 | .targetsize = sizeof(struct xt_mark_target_info_v1), | ||
172 | #ifdef CONFIG_COMPAT | ||
173 | .compatsize = sizeof(struct compat_xt_mark_target_info_v1), | ||
174 | .compat_from_user = mark_tg_compat_from_user_v1, | ||
175 | .compat_to_user = mark_tg_compat_to_user_v1, | ||
176 | #endif | ||
177 | .table = "mangle", | ||
178 | .me = THIS_MODULE, | ||
179 | }, | ||
180 | { | ||
181 | .name = "MARK", | ||
182 | .revision = 2, | ||
183 | .family = NFPROTO_UNSPEC, | ||
184 | .target = mark_tg, | ||
185 | .targetsize = sizeof(struct xt_mark_tginfo2), | ||
186 | .me = THIS_MODULE, | ||
187 | }, | ||
188 | }; | 43 | }; |
189 | 44 | ||
190 | static int __init mark_tg_init(void) | 45 | static int __init mark_tg_init(void) |
191 | { | 46 | { |
192 | return xt_register_targets(mark_tg_reg, ARRAY_SIZE(mark_tg_reg)); | 47 | return xt_register_target(&mark_tg_reg); |
193 | } | 48 | } |
194 | 49 | ||
195 | static void __exit mark_tg_exit(void) | 50 | static void __exit mark_tg_exit(void) |
196 | { | 51 | { |
197 | xt_unregister_targets(mark_tg_reg, ARRAY_SIZE(mark_tg_reg)); | 52 | xt_unregister_target(&mark_tg_reg); |
198 | } | 53 | } |
199 | 54 | ||
200 | module_init(mark_tg_init); | 55 | module_init(mark_tg_init); |
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 86cacab7a4a3..122aa8b0147b 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c | |||
@@ -47,36 +47,6 @@ connmark_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
47 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; | 47 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; |
48 | } | 48 | } |
49 | 49 | ||
50 | static bool | ||
51 | connmark_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | ||
52 | { | ||
53 | const struct xt_connmark_info *info = par->matchinfo; | ||
54 | const struct nf_conn *ct; | ||
55 | enum ip_conntrack_info ctinfo; | ||
56 | |||
57 | ct = nf_ct_get(skb, &ctinfo); | ||
58 | if (!ct) | ||
59 | return false; | ||
60 | |||
61 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; | ||
62 | } | ||
63 | |||
64 | static bool connmark_mt_check_v0(const struct xt_mtchk_param *par) | ||
65 | { | ||
66 | const struct xt_connmark_info *cm = par->matchinfo; | ||
67 | |||
68 | if (cm->mark > 0xffffffff || cm->mask > 0xffffffff) { | ||
69 | printk(KERN_WARNING "connmark: only support 32bit mark\n"); | ||
70 | return false; | ||
71 | } | ||
72 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | ||
73 | printk(KERN_WARNING "can't load conntrack support for " | ||
74 | "proto=%u\n", par->family); | ||
75 | return false; | ||
76 | } | ||
77 | return true; | ||
78 | } | ||
79 | |||
80 | static bool connmark_mt_check(const struct xt_mtchk_param *par) | 50 | static bool connmark_mt_check(const struct xt_mtchk_param *par) |
81 | { | 51 | { |
82 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { | 52 | if (nf_ct_l3proto_try_module_get(par->family) < 0) { |
@@ -92,74 +62,25 @@ static void connmark_mt_destroy(const struct xt_mtdtor_param *par) | |||
92 | nf_ct_l3proto_module_put(par->family); | 62 | nf_ct_l3proto_module_put(par->family); |
93 | } | 63 | } |
94 | 64 | ||
95 | #ifdef CONFIG_COMPAT | 65 | static struct xt_match connmark_mt_reg __read_mostly = { |
96 | struct compat_xt_connmark_info { | 66 | .name = "connmark", |
97 | compat_ulong_t mark, mask; | 67 | .revision = 1, |
98 | u_int8_t invert; | 68 | .family = NFPROTO_UNSPEC, |
99 | u_int8_t __pad1; | 69 | .checkentry = connmark_mt_check, |
100 | u_int16_t __pad2; | 70 | .match = connmark_mt, |
101 | }; | 71 | .matchsize = sizeof(struct xt_connmark_mtinfo1), |
102 | 72 | .destroy = connmark_mt_destroy, | |
103 | static void connmark_mt_compat_from_user_v0(void *dst, void *src) | 73 | .me = THIS_MODULE, |
104 | { | ||
105 | const struct compat_xt_connmark_info *cm = src; | ||
106 | struct xt_connmark_info m = { | ||
107 | .mark = cm->mark, | ||
108 | .mask = cm->mask, | ||
109 | .invert = cm->invert, | ||
110 | }; | ||
111 | memcpy(dst, &m, sizeof(m)); | ||
112 | } | ||
113 | |||
114 | static int connmark_mt_compat_to_user_v0(void __user *dst, void *src) | ||
115 | { | ||
116 | const struct xt_connmark_info *m = src; | ||
117 | struct compat_xt_connmark_info cm = { | ||
118 | .mark = m->mark, | ||
119 | .mask = m->mask, | ||
120 | .invert = m->invert, | ||
121 | }; | ||
122 | return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0; | ||
123 | } | ||
124 | #endif /* CONFIG_COMPAT */ | ||
125 | |||
126 | static struct xt_match connmark_mt_reg[] __read_mostly = { | ||
127 | { | ||
128 | .name = "connmark", | ||
129 | .revision = 0, | ||
130 | .family = NFPROTO_UNSPEC, | ||
131 | .checkentry = connmark_mt_check_v0, | ||
132 | .match = connmark_mt_v0, | ||
133 | .destroy = connmark_mt_destroy, | ||
134 | .matchsize = sizeof(struct xt_connmark_info), | ||
135 | #ifdef CONFIG_COMPAT | ||
136 | .compatsize = sizeof(struct compat_xt_connmark_info), | ||
137 | .compat_from_user = connmark_mt_compat_from_user_v0, | ||
138 | .compat_to_user = connmark_mt_compat_to_user_v0, | ||
139 | #endif | ||
140 | .me = THIS_MODULE | ||
141 | }, | ||
142 | { | ||
143 | .name = "connmark", | ||
144 | .revision = 1, | ||
145 | .family = NFPROTO_UNSPEC, | ||
146 | .checkentry = connmark_mt_check, | ||
147 | .match = connmark_mt, | ||
148 | .matchsize = sizeof(struct xt_connmark_mtinfo1), | ||
149 | .destroy = connmark_mt_destroy, | ||
150 | .me = THIS_MODULE, | ||
151 | }, | ||
152 | }; | 74 | }; |
153 | 75 | ||
154 | static int __init connmark_mt_init(void) | 76 | static int __init connmark_mt_init(void) |
155 | { | 77 | { |
156 | return xt_register_matches(connmark_mt_reg, | 78 | return xt_register_match(&connmark_mt_reg); |
157 | ARRAY_SIZE(connmark_mt_reg)); | ||
158 | } | 79 | } |
159 | 80 | ||
160 | static void __exit connmark_mt_exit(void) | 81 | static void __exit connmark_mt_exit(void) |
161 | { | 82 | { |
162 | xt_unregister_matches(connmark_mt_reg, ARRAY_SIZE(connmark_mt_reg)); | 83 | xt_unregister_match(&connmark_mt_reg); |
163 | } | 84 | } |
164 | 85 | ||
165 | module_init(connmark_mt_init); | 86 | module_init(connmark_mt_init); |
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index fc581800698e..6dc4652f2fe8 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
@@ -19,101 +19,12 @@ | |||
19 | 19 | ||
20 | MODULE_LICENSE("GPL"); | 20 | MODULE_LICENSE("GPL"); |
21 | MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); | 21 | MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>"); |
22 | MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>"); | 22 | MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); |
23 | MODULE_DESCRIPTION("Xtables: connection tracking state match"); | 23 | MODULE_DESCRIPTION("Xtables: connection tracking state match"); |
24 | MODULE_ALIAS("ipt_conntrack"); | 24 | MODULE_ALIAS("ipt_conntrack"); |
25 | MODULE_ALIAS("ip6t_conntrack"); | 25 | MODULE_ALIAS("ip6t_conntrack"); |
26 | 26 | ||
27 | static bool | 27 | static bool |
28 | conntrack_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | ||
29 | { | ||
30 | const struct xt_conntrack_info *sinfo = par->matchinfo; | ||
31 | const struct nf_conn *ct; | ||
32 | enum ip_conntrack_info ctinfo; | ||
33 | unsigned int statebit; | ||
34 | |||
35 | ct = nf_ct_get(skb, &ctinfo); | ||
36 | |||
37 | #define FWINV(bool, invflg) ((bool) ^ !!(sinfo->invflags & (invflg))) | ||
38 | |||
39 | if (ct == &nf_conntrack_untracked) | ||
40 | statebit = XT_CONNTRACK_STATE_UNTRACKED; | ||
41 | else if (ct) | ||
42 | statebit = XT_CONNTRACK_STATE_BIT(ctinfo); | ||
43 | else | ||
44 | statebit = XT_CONNTRACK_STATE_INVALID; | ||
45 | |||
46 | if (sinfo->flags & XT_CONNTRACK_STATE) { | ||
47 | if (ct) { | ||
48 | if (test_bit(IPS_SRC_NAT_BIT, &ct->status)) | ||
49 | statebit |= XT_CONNTRACK_STATE_SNAT; | ||
50 | if (test_bit(IPS_DST_NAT_BIT, &ct->status)) | ||
51 | statebit |= XT_CONNTRACK_STATE_DNAT; | ||
52 | } | ||
53 | if (FWINV((statebit & sinfo->statemask) == 0, | ||
54 | XT_CONNTRACK_STATE)) | ||
55 | return false; | ||
56 | } | ||
57 | |||
58 | if (ct == NULL) { | ||
59 | if (sinfo->flags & ~XT_CONNTRACK_STATE) | ||
60 | return false; | ||
61 | return true; | ||
62 | } | ||
63 | |||
64 | if (sinfo->flags & XT_CONNTRACK_PROTO && | ||
65 | FWINV(nf_ct_protonum(ct) != | ||
66 | sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, | ||
67 | XT_CONNTRACK_PROTO)) | ||
68 | return false; | ||
69 | |||
70 | if (sinfo->flags & XT_CONNTRACK_ORIGSRC && | ||
71 | FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip & | ||
72 | sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != | ||
73 | sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, | ||
74 | XT_CONNTRACK_ORIGSRC)) | ||
75 | return false; | ||
76 | |||
77 | if (sinfo->flags & XT_CONNTRACK_ORIGDST && | ||
78 | FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip & | ||
79 | sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != | ||
80 | sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, | ||
81 | XT_CONNTRACK_ORIGDST)) | ||
82 | return false; | ||
83 | |||
84 | if (sinfo->flags & XT_CONNTRACK_REPLSRC && | ||
85 | FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip & | ||
86 | sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != | ||
87 | sinfo->tuple[IP_CT_DIR_REPLY].src.ip, | ||
88 | XT_CONNTRACK_REPLSRC)) | ||
89 | return false; | ||
90 | |||
91 | if (sinfo->flags & XT_CONNTRACK_REPLDST && | ||
92 | FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip & | ||
93 | sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != | ||
94 | sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, | ||
95 | XT_CONNTRACK_REPLDST)) | ||
96 | return false; | ||
97 | |||
98 | if (sinfo->flags & XT_CONNTRACK_STATUS && | ||
99 | FWINV((ct->status & sinfo->statusmask) == 0, | ||
100 | XT_CONNTRACK_STATUS)) | ||
101 | return false; | ||
102 | |||
103 | if(sinfo->flags & XT_CONNTRACK_EXPIRES) { | ||
104 | unsigned long expires = timer_pending(&ct->timeout) ? | ||
105 | (ct->timeout.expires - jiffies)/HZ : 0; | ||
106 | |||
107 | if (FWINV(!(expires >= sinfo->expires_min && | ||
108 | expires <= sinfo->expires_max), | ||
109 | XT_CONNTRACK_EXPIRES)) | ||
110 | return false; | ||
111 | } | ||
112 | return true; | ||
113 | #undef FWINV | ||
114 | } | ||
115 | |||
116 | static bool | ||
117 | conntrack_addrcmp(const union nf_inet_addr *kaddr, | 28 | conntrack_addrcmp(const union nf_inet_addr *kaddr, |
118 | const union nf_inet_addr *uaddr, | 29 | const union nf_inet_addr *uaddr, |
119 | const union nf_inet_addr *umask, unsigned int l3proto) | 30 | const union nf_inet_addr *umask, unsigned int l3proto) |
@@ -337,73 +248,9 @@ static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par) | |||
337 | conntrack_mt_destroy(par); | 248 | conntrack_mt_destroy(par); |
338 | } | 249 | } |
339 | 250 | ||
340 | #ifdef CONFIG_COMPAT | ||
341 | struct compat_xt_conntrack_info | ||
342 | { | ||
343 | compat_uint_t statemask; | ||
344 | compat_uint_t statusmask; | ||
345 | struct ip_conntrack_old_tuple tuple[IP_CT_DIR_MAX]; | ||
346 | struct in_addr sipmsk[IP_CT_DIR_MAX]; | ||
347 | struct in_addr dipmsk[IP_CT_DIR_MAX]; | ||
348 | compat_ulong_t expires_min; | ||
349 | compat_ulong_t expires_max; | ||
350 | u_int8_t flags; | ||
351 | u_int8_t invflags; | ||
352 | }; | ||
353 | |||
354 | static void conntrack_mt_compat_from_user_v0(void *dst, void *src) | ||
355 | { | ||
356 | const struct compat_xt_conntrack_info *cm = src; | ||
357 | struct xt_conntrack_info m = { | ||
358 | .statemask = cm->statemask, | ||
359 | .statusmask = cm->statusmask, | ||
360 | .expires_min = cm->expires_min, | ||
361 | .expires_max = cm->expires_max, | ||
362 | .flags = cm->flags, | ||
363 | .invflags = cm->invflags, | ||
364 | }; | ||
365 | memcpy(m.tuple, cm->tuple, sizeof(m.tuple)); | ||
366 | memcpy(m.sipmsk, cm->sipmsk, sizeof(m.sipmsk)); | ||
367 | memcpy(m.dipmsk, cm->dipmsk, sizeof(m.dipmsk)); | ||
368 | memcpy(dst, &m, sizeof(m)); | ||
369 | } | ||
370 | |||
371 | static int conntrack_mt_compat_to_user_v0(void __user *dst, void *src) | ||
372 | { | ||
373 | const struct xt_conntrack_info *m = src; | ||
374 | struct compat_xt_conntrack_info cm = { | ||
375 | .statemask = m->statemask, | ||
376 | .statusmask = m->statusmask, | ||
377 | .expires_min = m->expires_min, | ||
378 | .expires_max = m->expires_max, | ||
379 | .flags = m->flags, | ||
380 | .invflags = m->invflags, | ||
381 | }; | ||
382 | memcpy(cm.tuple, m->tuple, sizeof(cm.tuple)); | ||
383 | memcpy(cm.sipmsk, m->sipmsk, sizeof(cm.sipmsk)); | ||
384 | memcpy(cm.dipmsk, m->dipmsk, sizeof(cm.dipmsk)); | ||
385 | return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0; | ||
386 | } | ||
387 | #endif | ||
388 | |||
389 | static struct xt_match conntrack_mt_reg[] __read_mostly = { | 251 | static struct xt_match conntrack_mt_reg[] __read_mostly = { |
390 | { | 252 | { |
391 | .name = "conntrack", | 253 | .name = "conntrack", |
392 | .revision = 0, | ||
393 | .family = NFPROTO_IPV4, | ||
394 | .match = conntrack_mt_v0, | ||
395 | .checkentry = conntrack_mt_check, | ||
396 | .destroy = conntrack_mt_destroy, | ||
397 | .matchsize = sizeof(struct xt_conntrack_info), | ||
398 | .me = THIS_MODULE, | ||
399 | #ifdef CONFIG_COMPAT | ||
400 | .compatsize = sizeof(struct compat_xt_conntrack_info), | ||
401 | .compat_from_user = conntrack_mt_compat_from_user_v0, | ||
402 | .compat_to_user = conntrack_mt_compat_to_user_v0, | ||
403 | #endif | ||
404 | }, | ||
405 | { | ||
406 | .name = "conntrack", | ||
407 | .revision = 1, | 254 | .revision = 1, |
408 | .family = NFPROTO_UNSPEC, | 255 | .family = NFPROTO_UNSPEC, |
409 | .matchsize = sizeof(struct xt_conntrack_mtinfo1), | 256 | .matchsize = sizeof(struct xt_conntrack_mtinfo1), |
diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c index c3f8085460d7..0280d3a8c161 100644 --- a/net/netfilter/xt_dscp.c +++ b/net/netfilter/xt_dscp.c | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #include <linux/netfilter/x_tables.h> | 16 | #include <linux/netfilter/x_tables.h> |
17 | #include <linux/netfilter/xt_dscp.h> | 17 | #include <linux/netfilter/xt_dscp.h> |
18 | #include <linux/netfilter_ipv4/ipt_tos.h> | ||
19 | 18 | ||
20 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 19 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
21 | MODULE_DESCRIPTION("Xtables: DSCP/TOS field match"); | 20 | MODULE_DESCRIPTION("Xtables: DSCP/TOS field match"); |
@@ -55,14 +54,6 @@ static bool dscp_mt_check(const struct xt_mtchk_param *par) | |||
55 | return true; | 54 | return true; |
56 | } | 55 | } |
57 | 56 | ||
58 | static bool | ||
59 | tos_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | ||
60 | { | ||
61 | const struct ipt_tos_info *info = par->matchinfo; | ||
62 | |||
63 | return (ip_hdr(skb)->tos == info->tos) ^ info->invert; | ||
64 | } | ||
65 | |||
66 | static bool tos_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 57 | static bool tos_mt(const struct sk_buff *skb, const struct xt_match_param *par) |
67 | { | 58 | { |
68 | const struct xt_tos_match_info *info = par->matchinfo; | 59 | const struct xt_tos_match_info *info = par->matchinfo; |
@@ -94,14 +85,6 @@ static struct xt_match dscp_mt_reg[] __read_mostly = { | |||
94 | }, | 85 | }, |
95 | { | 86 | { |
96 | .name = "tos", | 87 | .name = "tos", |
97 | .revision = 0, | ||
98 | .family = NFPROTO_IPV4, | ||
99 | .match = tos_mt_v0, | ||
100 | .matchsize = sizeof(struct ipt_tos_info), | ||
101 | .me = THIS_MODULE, | ||
102 | }, | ||
103 | { | ||
104 | .name = "tos", | ||
105 | .revision = 1, | 88 | .revision = 1, |
106 | .family = NFPROTO_IPV4, | 89 | .family = NFPROTO_IPV4, |
107 | .match = tos_mt, | 90 | .match = tos_mt, |
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c index 501f9b623188..ffc96387d556 100644 --- a/net/netfilter/xt_iprange.c +++ b/net/netfilter/xt_iprange.c | |||
@@ -14,40 +14,6 @@ | |||
14 | #include <linux/ipv6.h> | 14 | #include <linux/ipv6.h> |
15 | #include <linux/netfilter/x_tables.h> | 15 | #include <linux/netfilter/x_tables.h> |
16 | #include <linux/netfilter/xt_iprange.h> | 16 | #include <linux/netfilter/xt_iprange.h> |
17 | #include <linux/netfilter_ipv4/ipt_iprange.h> | ||
18 | |||
19 | static bool | ||
20 | iprange_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | ||
21 | { | ||
22 | const struct ipt_iprange_info *info = par->matchinfo; | ||
23 | const struct iphdr *iph = ip_hdr(skb); | ||
24 | |||
25 | if (info->flags & IPRANGE_SRC) { | ||
26 | if ((ntohl(iph->saddr) < ntohl(info->src.min_ip) | ||
27 | || ntohl(iph->saddr) > ntohl(info->src.max_ip)) | ||
28 | ^ !!(info->flags & IPRANGE_SRC_INV)) { | ||
29 | pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n", | ||
30 | &iph->saddr, | ||
31 | info->flags & IPRANGE_SRC_INV ? "(INV) " : "", | ||
32 | &info->src.min_ip, | ||
33 | &info->src.max_ip); | ||
34 | return false; | ||
35 | } | ||
36 | } | ||
37 | if (info->flags & IPRANGE_DST) { | ||
38 | if ((ntohl(iph->daddr) < ntohl(info->dst.min_ip) | ||
39 | || ntohl(iph->daddr) > ntohl(info->dst.max_ip)) | ||
40 | ^ !!(info->flags & IPRANGE_DST_INV)) { | ||
41 | pr_debug("dst IP %pI4 NOT in range %s%pI4-%pI4\n", | ||
42 | &iph->daddr, | ||
43 | info->flags & IPRANGE_DST_INV ? "(INV) " : "", | ||
44 | &info->dst.min_ip, | ||
45 | &info->dst.max_ip); | ||
46 | return false; | ||
47 | } | ||
48 | } | ||
49 | return true; | ||
50 | } | ||
51 | 17 | ||
52 | static bool | 18 | static bool |
53 | iprange_mt4(const struct sk_buff *skb, const struct xt_match_param *par) | 19 | iprange_mt4(const struct sk_buff *skb, const struct xt_match_param *par) |
@@ -127,14 +93,6 @@ iprange_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
127 | static struct xt_match iprange_mt_reg[] __read_mostly = { | 93 | static struct xt_match iprange_mt_reg[] __read_mostly = { |
128 | { | 94 | { |
129 | .name = "iprange", | 95 | .name = "iprange", |
130 | .revision = 0, | ||
131 | .family = NFPROTO_IPV4, | ||
132 | .match = iprange_mt_v0, | ||
133 | .matchsize = sizeof(struct ipt_iprange_info), | ||
134 | .me = THIS_MODULE, | ||
135 | }, | ||
136 | { | ||
137 | .name = "iprange", | ||
138 | .revision = 1, | 96 | .revision = 1, |
139 | .family = NFPROTO_IPV4, | 97 | .family = NFPROTO_IPV4, |
140 | .match = iprange_mt4, | 98 | .match = iprange_mt4, |
@@ -164,7 +122,8 @@ static void __exit iprange_mt_exit(void) | |||
164 | module_init(iprange_mt_init); | 122 | module_init(iprange_mt_init); |
165 | module_exit(iprange_mt_exit); | 123 | module_exit(iprange_mt_exit); |
166 | MODULE_LICENSE("GPL"); | 124 | MODULE_LICENSE("GPL"); |
167 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>, Jan Engelhardt <jengelh@computergmbh.de>"); | 125 | MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); |
126 | MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); | ||
168 | MODULE_DESCRIPTION("Xtables: arbitrary IPv4 range matching"); | 127 | MODULE_DESCRIPTION("Xtables: arbitrary IPv4 range matching"); |
169 | MODULE_ALIAS("ipt_iprange"); | 128 | MODULE_ALIAS("ipt_iprange"); |
170 | MODULE_ALIAS("ip6t_iprange"); | 129 | MODULE_ALIAS("ip6t_iprange"); |
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c index 10b9e34bbc5b..1db07d8125f8 100644 --- a/net/netfilter/xt_mark.c +++ b/net/netfilter/xt_mark.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * (C) 1999-2001 Marc Boucher <marc@mbsi.ca> | 4 | * (C) 1999-2001 Marc Boucher <marc@mbsi.ca> |
5 | * Copyright © CC Computer Consultants GmbH, 2007 - 2008 | 5 | * Copyright © CC Computer Consultants GmbH, 2007 - 2008 |
6 | * Jan Engelhardt <jengelh@computergmbh.de> | 6 | * Jan Engelhardt <jengelh@medozas.de> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -23,14 +23,6 @@ MODULE_ALIAS("ipt_mark"); | |||
23 | MODULE_ALIAS("ip6t_mark"); | 23 | MODULE_ALIAS("ip6t_mark"); |
24 | 24 | ||
25 | static bool | 25 | static bool |
26 | mark_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | ||
27 | { | ||
28 | const struct xt_mark_info *info = par->matchinfo; | ||
29 | |||
30 | return ((skb->mark & info->mask) == info->mark) ^ info->invert; | ||
31 | } | ||
32 | |||
33 | static bool | ||
34 | mark_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 26 | mark_mt(const struct sk_buff *skb, const struct xt_match_param *par) |
35 | { | 27 | { |
36 | const struct xt_mark_mtinfo1 *info = par->matchinfo; | 28 | const struct xt_mark_mtinfo1 *info = par->matchinfo; |
@@ -38,81 +30,23 @@ mark_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
38 | return ((skb->mark & info->mask) == info->mark) ^ info->invert; | 30 | return ((skb->mark & info->mask) == info->mark) ^ info->invert; |
39 | } | 31 | } |
40 | 32 | ||
41 | static bool mark_mt_check_v0(const struct xt_mtchk_param *par) | 33 | static struct xt_match mark_mt_reg __read_mostly = { |
42 | { | 34 | .name = "mark", |
43 | const struct xt_mark_info *minfo = par->matchinfo; | 35 | .revision = 1, |
44 | 36 | .family = NFPROTO_UNSPEC, | |
45 | if (minfo->mark > 0xffffffff || minfo->mask > 0xffffffff) { | 37 | .match = mark_mt, |
46 | printk(KERN_WARNING "mark: only supports 32bit mark\n"); | 38 | .matchsize = sizeof(struct xt_mark_mtinfo1), |
47 | return false; | 39 | .me = THIS_MODULE, |
48 | } | ||
49 | return true; | ||
50 | } | ||
51 | |||
52 | #ifdef CONFIG_COMPAT | ||
53 | struct compat_xt_mark_info { | ||
54 | compat_ulong_t mark, mask; | ||
55 | u_int8_t invert; | ||
56 | u_int8_t __pad1; | ||
57 | u_int16_t __pad2; | ||
58 | }; | ||
59 | |||
60 | static void mark_mt_compat_from_user_v0(void *dst, void *src) | ||
61 | { | ||
62 | const struct compat_xt_mark_info *cm = src; | ||
63 | struct xt_mark_info m = { | ||
64 | .mark = cm->mark, | ||
65 | .mask = cm->mask, | ||
66 | .invert = cm->invert, | ||
67 | }; | ||
68 | memcpy(dst, &m, sizeof(m)); | ||
69 | } | ||
70 | |||
71 | static int mark_mt_compat_to_user_v0(void __user *dst, void *src) | ||
72 | { | ||
73 | const struct xt_mark_info *m = src; | ||
74 | struct compat_xt_mark_info cm = { | ||
75 | .mark = m->mark, | ||
76 | .mask = m->mask, | ||
77 | .invert = m->invert, | ||
78 | }; | ||
79 | return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0; | ||
80 | } | ||
81 | #endif /* CONFIG_COMPAT */ | ||
82 | |||
83 | static struct xt_match mark_mt_reg[] __read_mostly = { | ||
84 | { | ||
85 | .name = "mark", | ||
86 | .revision = 0, | ||
87 | .family = NFPROTO_UNSPEC, | ||
88 | .checkentry = mark_mt_check_v0, | ||
89 | .match = mark_mt_v0, | ||
90 | .matchsize = sizeof(struct xt_mark_info), | ||
91 | #ifdef CONFIG_COMPAT | ||
92 | .compatsize = sizeof(struct compat_xt_mark_info), | ||
93 | .compat_from_user = mark_mt_compat_from_user_v0, | ||
94 | .compat_to_user = mark_mt_compat_to_user_v0, | ||
95 | #endif | ||
96 | .me = THIS_MODULE, | ||
97 | }, | ||
98 | { | ||
99 | .name = "mark", | ||
100 | .revision = 1, | ||
101 | .family = NFPROTO_UNSPEC, | ||
102 | .match = mark_mt, | ||
103 | .matchsize = sizeof(struct xt_mark_mtinfo1), | ||
104 | .me = THIS_MODULE, | ||
105 | }, | ||
106 | }; | 40 | }; |
107 | 41 | ||
108 | static int __init mark_mt_init(void) | 42 | static int __init mark_mt_init(void) |
109 | { | 43 | { |
110 | return xt_register_matches(mark_mt_reg, ARRAY_SIZE(mark_mt_reg)); | 44 | return xt_register_match(&mark_mt_reg); |
111 | } | 45 | } |
112 | 46 | ||
113 | static void __exit mark_mt_exit(void) | 47 | static void __exit mark_mt_exit(void) |
114 | { | 48 | { |
115 | xt_unregister_matches(mark_mt_reg, ARRAY_SIZE(mark_mt_reg)); | 49 | xt_unregister_match(&mark_mt_reg); |
116 | } | 50 | } |
117 | 51 | ||
118 | module_init(mark_mt_init); | 52 | module_init(mark_mt_init); |
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c index 0f482e2440b4..63e190504656 100644 --- a/net/netfilter/xt_osf.c +++ b/net/netfilter/xt_osf.c | |||
@@ -70,7 +70,8 @@ static void xt_osf_finger_free_rcu(struct rcu_head *rcu_head) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb, | 72 | static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb, |
73 | struct nlmsghdr *nlh, struct nlattr *osf_attrs[]) | 73 | const struct nlmsghdr *nlh, |
74 | const struct nlattr * const osf_attrs[]) | ||
74 | { | 75 | { |
75 | struct xt_osf_user_finger *f; | 76 | struct xt_osf_user_finger *f; |
76 | struct xt_osf_finger *kf = NULL, *sf; | 77 | struct xt_osf_finger *kf = NULL, *sf; |
@@ -112,7 +113,8 @@ static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb, | |||
112 | } | 113 | } |
113 | 114 | ||
114 | static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb, | 115 | static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb, |
115 | struct nlmsghdr *nlh, struct nlattr *osf_attrs[]) | 116 | const struct nlmsghdr *nlh, |
117 | const struct nlattr * const osf_attrs[]) | ||
116 | { | 118 | { |
117 | struct xt_osf_user_finger *f; | 119 | struct xt_osf_user_finger *f; |
118 | struct xt_osf_finger *sf; | 120 | struct xt_osf_finger *sf; |
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c index 22b2a5e881ea..d24c76dffee2 100644 --- a/net/netfilter/xt_owner.c +++ b/net/netfilter/xt_owner.c | |||
@@ -5,7 +5,6 @@ | |||
5 | * (C) 2000 Marc Boucher <marc@mbsi.ca> | 5 | * (C) 2000 Marc Boucher <marc@mbsi.ca> |
6 | * | 6 | * |
7 | * Copyright © CC Computer Consultants GmbH, 2007 - 2008 | 7 | * Copyright © CC Computer Consultants GmbH, 2007 - 2008 |
8 | * <jengelh@computergmbh.de> | ||
9 | * | 8 | * |
10 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
@@ -17,60 +16,6 @@ | |||
17 | #include <net/sock.h> | 16 | #include <net/sock.h> |
18 | #include <linux/netfilter/x_tables.h> | 17 | #include <linux/netfilter/x_tables.h> |
19 | #include <linux/netfilter/xt_owner.h> | 18 | #include <linux/netfilter/xt_owner.h> |
20 | #include <linux/netfilter_ipv4/ipt_owner.h> | ||
21 | #include <linux/netfilter_ipv6/ip6t_owner.h> | ||
22 | |||
23 | static bool | ||
24 | owner_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) | ||
25 | { | ||
26 | const struct ipt_owner_info *info = par->matchinfo; | ||
27 | const struct file *filp; | ||
28 | |||
29 | if (skb->sk == NULL || skb->sk->sk_socket == NULL) | ||
30 | return false; | ||
31 | |||
32 | filp = skb->sk->sk_socket->file; | ||
33 | if (filp == NULL) | ||
34 | return false; | ||
35 | |||
36 | if (info->match & IPT_OWNER_UID) | ||
37 | if ((filp->f_cred->fsuid != info->uid) ^ | ||
38 | !!(info->invert & IPT_OWNER_UID)) | ||
39 | return false; | ||
40 | |||
41 | if (info->match & IPT_OWNER_GID) | ||
42 | if ((filp->f_cred->fsgid != info->gid) ^ | ||
43 | !!(info->invert & IPT_OWNER_GID)) | ||
44 | return false; | ||
45 | |||
46 | return true; | ||
47 | } | ||
48 | |||
49 | static bool | ||
50 | owner_mt6_v0(const struct sk_buff *skb, const struct xt_match_param *par) | ||
51 | { | ||
52 | const struct ip6t_owner_info *info = par->matchinfo; | ||
53 | const struct file *filp; | ||
54 | |||
55 | if (skb->sk == NULL || skb->sk->sk_socket == NULL) | ||
56 | return false; | ||
57 | |||
58 | filp = skb->sk->sk_socket->file; | ||
59 | if (filp == NULL) | ||
60 | return false; | ||
61 | |||
62 | if (info->match & IP6T_OWNER_UID) | ||
63 | if ((filp->f_cred->fsuid != info->uid) ^ | ||
64 | !!(info->invert & IP6T_OWNER_UID)) | ||
65 | return false; | ||
66 | |||
67 | if (info->match & IP6T_OWNER_GID) | ||
68 | if ((filp->f_cred->fsgid != info->gid) ^ | ||
69 | !!(info->invert & IP6T_OWNER_GID)) | ||
70 | return false; | ||
71 | |||
72 | return true; | ||
73 | } | ||
74 | 19 | ||
75 | static bool | 20 | static bool |
76 | owner_mt(const struct sk_buff *skb, const struct xt_match_param *par) | 21 | owner_mt(const struct sk_buff *skb, const struct xt_match_param *par) |
@@ -107,81 +52,30 @@ owner_mt(const struct sk_buff *skb, const struct xt_match_param *par) | |||
107 | return true; | 52 | return true; |
108 | } | 53 | } |
109 | 54 | ||
110 | static bool owner_mt_check_v0(const struct xt_mtchk_param *par) | 55 | static struct xt_match owner_mt_reg __read_mostly = { |
111 | { | 56 | .name = "owner", |
112 | const struct ipt_owner_info *info = par->matchinfo; | 57 | .revision = 1, |
113 | 58 | .family = NFPROTO_UNSPEC, | |
114 | if (info->match & (IPT_OWNER_PID | IPT_OWNER_SID | IPT_OWNER_COMM)) { | 59 | .match = owner_mt, |
115 | printk(KERN_WARNING KBUILD_MODNAME | 60 | .matchsize = sizeof(struct xt_owner_match_info), |
116 | ": PID, SID and command matching is not " | 61 | .hooks = (1 << NF_INET_LOCAL_OUT) | |
117 | "supported anymore\n"); | 62 | (1 << NF_INET_POST_ROUTING), |
118 | return false; | 63 | .me = THIS_MODULE, |
119 | } | ||
120 | |||
121 | return true; | ||
122 | } | ||
123 | |||
124 | static bool owner_mt6_check_v0(const struct xt_mtchk_param *par) | ||
125 | { | ||
126 | const struct ip6t_owner_info *info = par->matchinfo; | ||
127 | |||
128 | if (info->match & (IP6T_OWNER_PID | IP6T_OWNER_SID)) { | ||
129 | printk(KERN_WARNING KBUILD_MODNAME | ||
130 | ": PID and SID matching is not supported anymore\n"); | ||
131 | return false; | ||
132 | } | ||
133 | |||
134 | return true; | ||
135 | } | ||
136 | |||
137 | static struct xt_match owner_mt_reg[] __read_mostly = { | ||
138 | { | ||
139 | .name = "owner", | ||
140 | .revision = 0, | ||
141 | .family = NFPROTO_IPV4, | ||
142 | .match = owner_mt_v0, | ||
143 | .matchsize = sizeof(struct ipt_owner_info), | ||
144 | .checkentry = owner_mt_check_v0, | ||
145 | .hooks = (1 << NF_INET_LOCAL_OUT) | | ||
146 | (1 << NF_INET_POST_ROUTING), | ||
147 | .me = THIS_MODULE, | ||
148 | }, | ||
149 | { | ||
150 | .name = "owner", | ||
151 | .revision = 0, | ||
152 | .family = NFPROTO_IPV6, | ||
153 | .match = owner_mt6_v0, | ||
154 | .matchsize = sizeof(struct ip6t_owner_info), | ||
155 | .checkentry = owner_mt6_check_v0, | ||
156 | .hooks = (1 << NF_INET_LOCAL_OUT) | | ||
157 | (1 << NF_INET_POST_ROUTING), | ||
158 | .me = THIS_MODULE, | ||
159 | }, | ||
160 | { | ||
161 | .name = "owner", | ||
162 | .revision = 1, | ||
163 | .family = NFPROTO_UNSPEC, | ||
164 | .match = owner_mt, | ||
165 | .matchsize = sizeof(struct xt_owner_match_info), | ||
166 | .hooks = (1 << NF_INET_LOCAL_OUT) | | ||
167 | (1 << NF_INET_POST_ROUTING), | ||
168 | .me = THIS_MODULE, | ||
169 | }, | ||
170 | }; | 64 | }; |
171 | 65 | ||
172 | static int __init owner_mt_init(void) | 66 | static int __init owner_mt_init(void) |
173 | { | 67 | { |
174 | return xt_register_matches(owner_mt_reg, ARRAY_SIZE(owner_mt_reg)); | 68 | return xt_register_match(&owner_mt_reg); |
175 | } | 69 | } |
176 | 70 | ||
177 | static void __exit owner_mt_exit(void) | 71 | static void __exit owner_mt_exit(void) |
178 | { | 72 | { |
179 | xt_unregister_matches(owner_mt_reg, ARRAY_SIZE(owner_mt_reg)); | 73 | xt_unregister_match(&owner_mt_reg); |
180 | } | 74 | } |
181 | 75 | ||
182 | module_init(owner_mt_init); | 76 | module_init(owner_mt_init); |
183 | module_exit(owner_mt_exit); | 77 | module_exit(owner_mt_exit); |
184 | MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>"); | 78 | MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); |
185 | MODULE_DESCRIPTION("Xtables: socket owner matching"); | 79 | MODULE_DESCRIPTION("Xtables: socket owner matching"); |
186 | MODULE_LICENSE("GPL"); | 80 | MODULE_LICENSE("GPL"); |
187 | MODULE_ALIAS("ipt_owner"); | 81 | MODULE_ALIAS("ipt_owner"); |
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 16e6c4378ff1..6ce00205f342 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c | |||
@@ -185,8 +185,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain, | |||
185 | return 0; | 185 | return 0; |
186 | 186 | ||
187 | cfg_unlbl_map_add_failure: | 187 | cfg_unlbl_map_add_failure: |
188 | if (entry != NULL) | 188 | kfree(entry->domain); |
189 | kfree(entry->domain); | ||
190 | kfree(entry); | 189 | kfree(entry); |
191 | kfree(addrmap); | 190 | kfree(addrmap); |
192 | kfree(map4); | 191 | kfree(map4); |
@@ -385,8 +384,7 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, | |||
385 | 384 | ||
386 | cfg_cipsov4_map_add_failure: | 385 | cfg_cipsov4_map_add_failure: |
387 | cipso_v4_doi_putdef(doi_def); | 386 | cipso_v4_doi_putdef(doi_def); |
388 | if (entry != NULL) | 387 | kfree(entry->domain); |
389 | kfree(entry->domain); | ||
390 | kfree(entry); | 388 | kfree(entry); |
391 | kfree(addrmap); | 389 | kfree(addrmap); |
392 | kfree(addrinfo); | 390 | kfree(addrinfo); |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2936fa3b6dc8..d0ff382c40ca 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -83,6 +83,11 @@ struct netlink_sock { | |||
83 | struct module *module; | 83 | struct module *module; |
84 | }; | 84 | }; |
85 | 85 | ||
86 | struct listeners_rcu_head { | ||
87 | struct rcu_head rcu_head; | ||
88 | void *ptr; | ||
89 | }; | ||
90 | |||
86 | #define NETLINK_KERNEL_SOCKET 0x1 | 91 | #define NETLINK_KERNEL_SOCKET 0x1 |
87 | #define NETLINK_RECV_PKTINFO 0x2 | 92 | #define NETLINK_RECV_PKTINFO 0x2 |
88 | #define NETLINK_BROADCAST_SEND_ERROR 0x4 | 93 | #define NETLINK_BROADCAST_SEND_ERROR 0x4 |
@@ -1356,7 +1361,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1356 | struct netlink_sock *nlk = nlk_sk(sk); | 1361 | struct netlink_sock *nlk = nlk_sk(sk); |
1357 | int noblock = flags&MSG_DONTWAIT; | 1362 | int noblock = flags&MSG_DONTWAIT; |
1358 | size_t copied; | 1363 | size_t copied; |
1359 | struct sk_buff *skb; | 1364 | struct sk_buff *skb, *frag __maybe_unused = NULL; |
1360 | int err; | 1365 | int err; |
1361 | 1366 | ||
1362 | if (flags&MSG_OOB) | 1367 | if (flags&MSG_OOB) |
@@ -1368,6 +1373,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1368 | if (skb == NULL) | 1373 | if (skb == NULL) |
1369 | goto out; | 1374 | goto out; |
1370 | 1375 | ||
1376 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | ||
1377 | if (unlikely(skb_shinfo(skb)->frag_list)) { | ||
1378 | bool need_compat = !!(flags & MSG_CMSG_COMPAT); | ||
1379 | |||
1380 | /* | ||
1381 | * If this skb has a frag_list, then here that means that | ||
1382 | * we will have to use the frag_list skb for compat tasks | ||
1383 | * and the regular skb for non-compat tasks. | ||
1384 | * | ||
1385 | * The skb might (and likely will) be cloned, so we can't | ||
1386 | * just reset frag_list and go on with things -- we need to | ||
1387 | * keep that. For the compat case that's easy -- simply get | ||
1388 | * a reference to the compat skb and free the regular one | ||
1389 | * including the frag. For the non-compat case, we need to | ||
1390 | * avoid sending the frag to the user -- so assign NULL but | ||
1391 | * restore it below before freeing the skb. | ||
1392 | */ | ||
1393 | if (need_compat) { | ||
1394 | struct sk_buff *compskb = skb_shinfo(skb)->frag_list; | ||
1395 | skb_get(compskb); | ||
1396 | kfree_skb(skb); | ||
1397 | skb = compskb; | ||
1398 | } else { | ||
1399 | frag = skb_shinfo(skb)->frag_list; | ||
1400 | skb_shinfo(skb)->frag_list = NULL; | ||
1401 | } | ||
1402 | } | ||
1403 | #endif | ||
1404 | |||
1371 | msg->msg_namelen = 0; | 1405 | msg->msg_namelen = 0; |
1372 | 1406 | ||
1373 | copied = skb->len; | 1407 | copied = skb->len; |
@@ -1398,6 +1432,11 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1398 | siocb->scm->creds = *NETLINK_CREDS(skb); | 1432 | siocb->scm->creds = *NETLINK_CREDS(skb); |
1399 | if (flags & MSG_TRUNC) | 1433 | if (flags & MSG_TRUNC) |
1400 | copied = skb->len; | 1434 | copied = skb->len; |
1435 | |||
1436 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | ||
1437 | skb_shinfo(skb)->frag_list = frag; | ||
1438 | #endif | ||
1439 | |||
1401 | skb_free_datagram(sk, skb); | 1440 | skb_free_datagram(sk, skb); |
1402 | 1441 | ||
1403 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) | 1442 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) |
@@ -1453,7 +1492,8 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups, | |||
1453 | if (groups < 32) | 1492 | if (groups < 32) |
1454 | groups = 32; | 1493 | groups = 32; |
1455 | 1494 | ||
1456 | listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL); | 1495 | listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head), |
1496 | GFP_KERNEL); | ||
1457 | if (!listeners) | 1497 | if (!listeners) |
1458 | goto out_sock_release; | 1498 | goto out_sock_release; |
1459 | 1499 | ||
@@ -1501,6 +1541,14 @@ netlink_kernel_release(struct sock *sk) | |||
1501 | EXPORT_SYMBOL(netlink_kernel_release); | 1541 | EXPORT_SYMBOL(netlink_kernel_release); |
1502 | 1542 | ||
1503 | 1543 | ||
1544 | static void netlink_free_old_listeners(struct rcu_head *rcu_head) | ||
1545 | { | ||
1546 | struct listeners_rcu_head *lrh; | ||
1547 | |||
1548 | lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head); | ||
1549 | kfree(lrh->ptr); | ||
1550 | } | ||
1551 | |||
1504 | /** | 1552 | /** |
1505 | * netlink_change_ngroups - change number of multicast groups | 1553 | * netlink_change_ngroups - change number of multicast groups |
1506 | * | 1554 | * |
@@ -1516,6 +1564,7 @@ EXPORT_SYMBOL(netlink_kernel_release); | |||
1516 | int netlink_change_ngroups(struct sock *sk, unsigned int groups) | 1564 | int netlink_change_ngroups(struct sock *sk, unsigned int groups) |
1517 | { | 1565 | { |
1518 | unsigned long *listeners, *old = NULL; | 1566 | unsigned long *listeners, *old = NULL; |
1567 | struct listeners_rcu_head *old_rcu_head; | ||
1519 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; | 1568 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; |
1520 | int err = 0; | 1569 | int err = 0; |
1521 | 1570 | ||
@@ -1524,7 +1573,9 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups) | |||
1524 | 1573 | ||
1525 | netlink_table_grab(); | 1574 | netlink_table_grab(); |
1526 | if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { | 1575 | if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { |
1527 | listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC); | 1576 | listeners = kzalloc(NLGRPSZ(groups) + |
1577 | sizeof(struct listeners_rcu_head), | ||
1578 | GFP_ATOMIC); | ||
1528 | if (!listeners) { | 1579 | if (!listeners) { |
1529 | err = -ENOMEM; | 1580 | err = -ENOMEM; |
1530 | goto out_ungrab; | 1581 | goto out_ungrab; |
@@ -1532,16 +1583,24 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups) | |||
1532 | old = tbl->listeners; | 1583 | old = tbl->listeners; |
1533 | memcpy(listeners, old, NLGRPSZ(tbl->groups)); | 1584 | memcpy(listeners, old, NLGRPSZ(tbl->groups)); |
1534 | rcu_assign_pointer(tbl->listeners, listeners); | 1585 | rcu_assign_pointer(tbl->listeners, listeners); |
1586 | /* | ||
1587 | * Free the old memory after an RCU grace period so we | ||
1588 | * don't leak it. We use call_rcu() here in order to be | ||
1589 | * able to call this function from atomic contexts. The | ||
1590 | * allocation of this memory will have reserved enough | ||
1591 | * space for struct listeners_rcu_head at the end. | ||
1592 | */ | ||
1593 | old_rcu_head = (void *)(tbl->listeners + | ||
1594 | NLGRPLONGS(tbl->groups)); | ||
1595 | old_rcu_head->ptr = old; | ||
1596 | call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners); | ||
1535 | } | 1597 | } |
1536 | tbl->groups = groups; | 1598 | tbl->groups = groups; |
1537 | 1599 | ||
1538 | out_ungrab: | 1600 | out_ungrab: |
1539 | netlink_table_ungrab(); | 1601 | netlink_table_ungrab(); |
1540 | synchronize_rcu(); | ||
1541 | kfree(old); | ||
1542 | return err; | 1602 | return err; |
1543 | } | 1603 | } |
1544 | EXPORT_SYMBOL(netlink_change_ngroups); | ||
1545 | 1604 | ||
1546 | /** | 1605 | /** |
1547 | * netlink_clear_multicast_users - kick off multicast listeners | 1606 | * netlink_clear_multicast_users - kick off multicast listeners |
@@ -1564,7 +1623,6 @@ void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) | |||
1564 | 1623 | ||
1565 | netlink_table_ungrab(); | 1624 | netlink_table_ungrab(); |
1566 | } | 1625 | } |
1567 | EXPORT_SYMBOL(netlink_clear_multicast_users); | ||
1568 | 1626 | ||
1569 | void netlink_set_nonroot(int protocol, unsigned int flags) | 1627 | void netlink_set_nonroot(int protocol, unsigned int flags) |
1570 | { | 1628 | { |
@@ -1647,7 +1705,7 @@ errout: | |||
1647 | } | 1705 | } |
1648 | 1706 | ||
1649 | int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | 1707 | int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, |
1650 | struct nlmsghdr *nlh, | 1708 | const struct nlmsghdr *nlh, |
1651 | int (*dump)(struct sk_buff *skb, | 1709 | int (*dump)(struct sk_buff *skb, |
1652 | struct netlink_callback *), | 1710 | struct netlink_callback *), |
1653 | int (*done)(struct netlink_callback *)) | 1711 | int (*done)(struct netlink_callback *)) |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index eed4c6a8afc0..66f6ba0bab11 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -18,8 +18,6 @@ | |||
18 | #include <net/sock.h> | 18 | #include <net/sock.h> |
19 | #include <net/genetlink.h> | 19 | #include <net/genetlink.h> |
20 | 20 | ||
21 | struct sock *genl_sock = NULL; | ||
22 | |||
23 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ | 21 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ |
24 | 22 | ||
25 | static inline void genl_lock(void) | 23 | static inline void genl_lock(void) |
@@ -138,7 +136,7 @@ int genl_register_mc_group(struct genl_family *family, | |||
138 | { | 136 | { |
139 | int id; | 137 | int id; |
140 | unsigned long *new_groups; | 138 | unsigned long *new_groups; |
141 | int err; | 139 | int err = 0; |
142 | 140 | ||
143 | BUG_ON(grp->name[0] == '\0'); | 141 | BUG_ON(grp->name[0] == '\0'); |
144 | 142 | ||
@@ -175,10 +173,31 @@ int genl_register_mc_group(struct genl_family *family, | |||
175 | mc_groups_longs++; | 173 | mc_groups_longs++; |
176 | } | 174 | } |
177 | 175 | ||
178 | err = netlink_change_ngroups(genl_sock, | 176 | if (family->netnsok) { |
179 | mc_groups_longs * BITS_PER_LONG); | 177 | struct net *net; |
180 | if (err) | 178 | |
181 | goto out; | 179 | rcu_read_lock(); |
180 | for_each_net_rcu(net) { | ||
181 | err = netlink_change_ngroups(net->genl_sock, | ||
182 | mc_groups_longs * BITS_PER_LONG); | ||
183 | if (err) { | ||
184 | /* | ||
185 | * No need to roll back, can only fail if | ||
186 | * memory allocation fails and then the | ||
187 | * number of _possible_ groups has been | ||
188 | * increased on some sockets which is ok. | ||
189 | */ | ||
190 | rcu_read_unlock(); | ||
191 | goto out; | ||
192 | } | ||
193 | } | ||
194 | rcu_read_unlock(); | ||
195 | } else { | ||
196 | err = netlink_change_ngroups(init_net.genl_sock, | ||
197 | mc_groups_longs * BITS_PER_LONG); | ||
198 | if (err) | ||
199 | goto out; | ||
200 | } | ||
182 | 201 | ||
183 | grp->id = id; | 202 | grp->id = id; |
184 | set_bit(id, mc_groups); | 203 | set_bit(id, mc_groups); |
@@ -195,8 +214,14 @@ EXPORT_SYMBOL(genl_register_mc_group); | |||
195 | static void __genl_unregister_mc_group(struct genl_family *family, | 214 | static void __genl_unregister_mc_group(struct genl_family *family, |
196 | struct genl_multicast_group *grp) | 215 | struct genl_multicast_group *grp) |
197 | { | 216 | { |
217 | struct net *net; | ||
198 | BUG_ON(grp->family != family); | 218 | BUG_ON(grp->family != family); |
199 | netlink_clear_multicast_users(genl_sock, grp->id); | 219 | |
220 | rcu_read_lock(); | ||
221 | for_each_net_rcu(net) | ||
222 | netlink_clear_multicast_users(net->genl_sock, grp->id); | ||
223 | rcu_read_unlock(); | ||
224 | |||
200 | clear_bit(grp->id, mc_groups); | 225 | clear_bit(grp->id, mc_groups); |
201 | list_del(&grp->list); | 226 | list_del(&grp->list); |
202 | genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp); | 227 | genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp); |
@@ -467,6 +492,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
467 | { | 492 | { |
468 | struct genl_ops *ops; | 493 | struct genl_ops *ops; |
469 | struct genl_family *family; | 494 | struct genl_family *family; |
495 | struct net *net = sock_net(skb->sk); | ||
470 | struct genl_info info; | 496 | struct genl_info info; |
471 | struct genlmsghdr *hdr = nlmsg_data(nlh); | 497 | struct genlmsghdr *hdr = nlmsg_data(nlh); |
472 | int hdrlen, err; | 498 | int hdrlen, err; |
@@ -475,6 +501,10 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
475 | if (family == NULL) | 501 | if (family == NULL) |
476 | return -ENOENT; | 502 | return -ENOENT; |
477 | 503 | ||
504 | /* this family doesn't exist in this netns */ | ||
505 | if (!family->netnsok && !net_eq(net, &init_net)) | ||
506 | return -ENOENT; | ||
507 | |||
478 | hdrlen = GENL_HDRLEN + family->hdrsize; | 508 | hdrlen = GENL_HDRLEN + family->hdrsize; |
479 | if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) | 509 | if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) |
480 | return -EINVAL; | 510 | return -EINVAL; |
@@ -492,7 +522,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
492 | return -EOPNOTSUPP; | 522 | return -EOPNOTSUPP; |
493 | 523 | ||
494 | genl_unlock(); | 524 | genl_unlock(); |
495 | err = netlink_dump_start(genl_sock, skb, nlh, | 525 | err = netlink_dump_start(net->genl_sock, skb, nlh, |
496 | ops->dumpit, ops->done); | 526 | ops->dumpit, ops->done); |
497 | genl_lock(); | 527 | genl_lock(); |
498 | return err; | 528 | return err; |
@@ -514,6 +544,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
514 | info.genlhdr = nlmsg_data(nlh); | 544 | info.genlhdr = nlmsg_data(nlh); |
515 | info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; | 545 | info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; |
516 | info.attrs = family->attrbuf; | 546 | info.attrs = family->attrbuf; |
547 | genl_info_net_set(&info, net); | ||
517 | 548 | ||
518 | return ops->doit(skb, &info); | 549 | return ops->doit(skb, &info); |
519 | } | 550 | } |
@@ -534,6 +565,7 @@ static struct genl_family genl_ctrl = { | |||
534 | .name = "nlctrl", | 565 | .name = "nlctrl", |
535 | .version = 0x2, | 566 | .version = 0x2, |
536 | .maxattr = CTRL_ATTR_MAX, | 567 | .maxattr = CTRL_ATTR_MAX, |
568 | .netnsok = true, | ||
537 | }; | 569 | }; |
538 | 570 | ||
539 | static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, | 571 | static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, |
@@ -650,6 +682,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) | |||
650 | 682 | ||
651 | int i, n = 0; | 683 | int i, n = 0; |
652 | struct genl_family *rt; | 684 | struct genl_family *rt; |
685 | struct net *net = sock_net(skb->sk); | ||
653 | int chains_to_skip = cb->args[0]; | 686 | int chains_to_skip = cb->args[0]; |
654 | int fams_to_skip = cb->args[1]; | 687 | int fams_to_skip = cb->args[1]; |
655 | 688 | ||
@@ -658,6 +691,8 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) | |||
658 | continue; | 691 | continue; |
659 | n = 0; | 692 | n = 0; |
660 | list_for_each_entry(rt, genl_family_chain(i), family_list) { | 693 | list_for_each_entry(rt, genl_family_chain(i), family_list) { |
694 | if (!rt->netnsok && !net_eq(net, &init_net)) | ||
695 | continue; | ||
661 | if (++n < fams_to_skip) | 696 | if (++n < fams_to_skip) |
662 | continue; | 697 | continue; |
663 | if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid, | 698 | if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid, |
@@ -729,6 +764,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) | |||
729 | if (info->attrs[CTRL_ATTR_FAMILY_ID]) { | 764 | if (info->attrs[CTRL_ATTR_FAMILY_ID]) { |
730 | u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]); | 765 | u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]); |
731 | res = genl_family_find_byid(id); | 766 | res = genl_family_find_byid(id); |
767 | err = -ENOENT; | ||
732 | } | 768 | } |
733 | 769 | ||
734 | if (info->attrs[CTRL_ATTR_FAMILY_NAME]) { | 770 | if (info->attrs[CTRL_ATTR_FAMILY_NAME]) { |
@@ -736,49 +772,61 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) | |||
736 | 772 | ||
737 | name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]); | 773 | name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]); |
738 | res = genl_family_find_byname(name); | 774 | res = genl_family_find_byname(name); |
775 | err = -ENOENT; | ||
739 | } | 776 | } |
740 | 777 | ||
741 | if (res == NULL) { | 778 | if (res == NULL) |
742 | err = -ENOENT; | 779 | return err; |
743 | goto errout; | 780 | |
781 | if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) { | ||
782 | /* family doesn't exist here */ | ||
783 | return -ENOENT; | ||
744 | } | 784 | } |
745 | 785 | ||
746 | msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq, | 786 | msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq, |
747 | CTRL_CMD_NEWFAMILY); | 787 | CTRL_CMD_NEWFAMILY); |
748 | if (IS_ERR(msg)) { | 788 | if (IS_ERR(msg)) |
749 | err = PTR_ERR(msg); | 789 | return PTR_ERR(msg); |
750 | goto errout; | ||
751 | } | ||
752 | 790 | ||
753 | err = genlmsg_reply(msg, info); | 791 | return genlmsg_reply(msg, info); |
754 | errout: | ||
755 | return err; | ||
756 | } | 792 | } |
757 | 793 | ||
758 | static int genl_ctrl_event(int event, void *data) | 794 | static int genl_ctrl_event(int event, void *data) |
759 | { | 795 | { |
760 | struct sk_buff *msg; | 796 | struct sk_buff *msg; |
797 | struct genl_family *family; | ||
798 | struct genl_multicast_group *grp; | ||
761 | 799 | ||
762 | if (genl_sock == NULL) | 800 | /* genl is still initialising */ |
801 | if (!init_net.genl_sock) | ||
763 | return 0; | 802 | return 0; |
764 | 803 | ||
765 | switch (event) { | 804 | switch (event) { |
766 | case CTRL_CMD_NEWFAMILY: | 805 | case CTRL_CMD_NEWFAMILY: |
767 | case CTRL_CMD_DELFAMILY: | 806 | case CTRL_CMD_DELFAMILY: |
768 | msg = ctrl_build_family_msg(data, 0, 0, event); | 807 | family = data; |
769 | if (IS_ERR(msg)) | 808 | msg = ctrl_build_family_msg(family, 0, 0, event); |
770 | return PTR_ERR(msg); | ||
771 | |||
772 | genlmsg_multicast(msg, 0, GENL_ID_CTRL, GFP_KERNEL); | ||
773 | break; | 809 | break; |
774 | case CTRL_CMD_NEWMCAST_GRP: | 810 | case CTRL_CMD_NEWMCAST_GRP: |
775 | case CTRL_CMD_DELMCAST_GRP: | 811 | case CTRL_CMD_DELMCAST_GRP: |
812 | grp = data; | ||
813 | family = grp->family; | ||
776 | msg = ctrl_build_mcgrp_msg(data, 0, 0, event); | 814 | msg = ctrl_build_mcgrp_msg(data, 0, 0, event); |
777 | if (IS_ERR(msg)) | ||
778 | return PTR_ERR(msg); | ||
779 | |||
780 | genlmsg_multicast(msg, 0, GENL_ID_CTRL, GFP_KERNEL); | ||
781 | break; | 815 | break; |
816 | default: | ||
817 | return -EINVAL; | ||
818 | } | ||
819 | |||
820 | if (IS_ERR(msg)) | ||
821 | return PTR_ERR(msg); | ||
822 | |||
823 | if (!family->netnsok) { | ||
824 | genlmsg_multicast_netns(&init_net, msg, 0, | ||
825 | GENL_ID_CTRL, GFP_KERNEL); | ||
826 | } else { | ||
827 | rcu_read_lock(); | ||
828 | genlmsg_multicast_allns(msg, 0, GENL_ID_CTRL, GFP_ATOMIC); | ||
829 | rcu_read_unlock(); | ||
782 | } | 830 | } |
783 | 831 | ||
784 | return 0; | 832 | return 0; |
@@ -795,6 +843,33 @@ static struct genl_multicast_group notify_grp = { | |||
795 | .name = "notify", | 843 | .name = "notify", |
796 | }; | 844 | }; |
797 | 845 | ||
846 | static int __net_init genl_pernet_init(struct net *net) | ||
847 | { | ||
848 | /* we'll bump the group number right afterwards */ | ||
849 | net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, 0, | ||
850 | genl_rcv, &genl_mutex, | ||
851 | THIS_MODULE); | ||
852 | |||
853 | if (!net->genl_sock && net_eq(net, &init_net)) | ||
854 | panic("GENL: Cannot initialize generic netlink\n"); | ||
855 | |||
856 | if (!net->genl_sock) | ||
857 | return -ENOMEM; | ||
858 | |||
859 | return 0; | ||
860 | } | ||
861 | |||
862 | static void __net_exit genl_pernet_exit(struct net *net) | ||
863 | { | ||
864 | netlink_kernel_release(net->genl_sock); | ||
865 | net->genl_sock = NULL; | ||
866 | } | ||
867 | |||
868 | static struct pernet_operations genl_pernet_ops = { | ||
869 | .init = genl_pernet_init, | ||
870 | .exit = genl_pernet_exit, | ||
871 | }; | ||
872 | |||
798 | static int __init genl_init(void) | 873 | static int __init genl_init(void) |
799 | { | 874 | { |
800 | int i, err; | 875 | int i, err; |
@@ -804,36 +879,67 @@ static int __init genl_init(void) | |||
804 | 879 | ||
805 | err = genl_register_family(&genl_ctrl); | 880 | err = genl_register_family(&genl_ctrl); |
806 | if (err < 0) | 881 | if (err < 0) |
807 | goto errout; | 882 | goto problem; |
808 | 883 | ||
809 | err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops); | 884 | err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops); |
810 | if (err < 0) | 885 | if (err < 0) |
811 | goto errout_register; | 886 | goto problem; |
812 | 887 | ||
813 | netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); | 888 | netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); |
814 | 889 | ||
815 | /* we'll bump the group number right afterwards */ | 890 | err = register_pernet_subsys(&genl_pernet_ops); |
816 | genl_sock = netlink_kernel_create(&init_net, NETLINK_GENERIC, 0, | 891 | if (err) |
817 | genl_rcv, &genl_mutex, THIS_MODULE); | 892 | goto problem; |
818 | if (genl_sock == NULL) | ||
819 | panic("GENL: Cannot initialize generic netlink\n"); | ||
820 | 893 | ||
821 | err = genl_register_mc_group(&genl_ctrl, ¬ify_grp); | 894 | err = genl_register_mc_group(&genl_ctrl, ¬ify_grp); |
822 | if (err < 0) | 895 | if (err < 0) |
823 | goto errout_register; | 896 | goto problem; |
824 | 897 | ||
825 | return 0; | 898 | return 0; |
826 | 899 | ||
827 | errout_register: | 900 | problem: |
828 | genl_unregister_family(&genl_ctrl); | ||
829 | errout: | ||
830 | panic("GENL: Cannot register controller: %d\n", err); | 901 | panic("GENL: Cannot register controller: %d\n", err); |
831 | } | 902 | } |
832 | 903 | ||
833 | subsys_initcall(genl_init); | 904 | subsys_initcall(genl_init); |
834 | 905 | ||
835 | EXPORT_SYMBOL(genl_sock); | ||
836 | EXPORT_SYMBOL(genl_register_ops); | 906 | EXPORT_SYMBOL(genl_register_ops); |
837 | EXPORT_SYMBOL(genl_unregister_ops); | 907 | EXPORT_SYMBOL(genl_unregister_ops); |
838 | EXPORT_SYMBOL(genl_register_family); | 908 | EXPORT_SYMBOL(genl_register_family); |
839 | EXPORT_SYMBOL(genl_unregister_family); | 909 | EXPORT_SYMBOL(genl_unregister_family); |
910 | |||
911 | static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group, | ||
912 | gfp_t flags) | ||
913 | { | ||
914 | struct sk_buff *tmp; | ||
915 | struct net *net, *prev = NULL; | ||
916 | int err; | ||
917 | |||
918 | for_each_net_rcu(net) { | ||
919 | if (prev) { | ||
920 | tmp = skb_clone(skb, flags); | ||
921 | if (!tmp) { | ||
922 | err = -ENOMEM; | ||
923 | goto error; | ||
924 | } | ||
925 | err = nlmsg_multicast(prev->genl_sock, tmp, | ||
926 | pid, group, flags); | ||
927 | if (err) | ||
928 | goto error; | ||
929 | } | ||
930 | |||
931 | prev = net; | ||
932 | } | ||
933 | |||
934 | return nlmsg_multicast(prev->genl_sock, skb, pid, group, flags); | ||
935 | error: | ||
936 | kfree_skb(skb); | ||
937 | return err; | ||
938 | } | ||
939 | |||
940 | int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group, | ||
941 | gfp_t flags) | ||
942 | { | ||
943 | return genlmsg_mcast(skb, pid, group, flags); | ||
944 | } | ||
945 | EXPORT_SYMBOL(genlmsg_multicast_allns); | ||
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 351372463fed..7aa11b01b2e2 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c | |||
@@ -169,7 +169,7 @@ static int nr_close(struct net_device *dev) | |||
169 | return 0; | 169 | return 0; |
170 | } | 170 | } |
171 | 171 | ||
172 | static int nr_xmit(struct sk_buff *skb, struct net_device *dev) | 172 | static netdev_tx_t nr_xmit(struct sk_buff *skb, struct net_device *dev) |
173 | { | 173 | { |
174 | struct net_device_stats *stats = &dev->stats; | 174 | struct net_device_stats *stats = &dev->stats; |
175 | unsigned int len = skb->len; | 175 | unsigned int len = skb->len; |
@@ -177,13 +177,13 @@ static int nr_xmit(struct sk_buff *skb, struct net_device *dev) | |||
177 | if (!nr_route_frame(skb, NULL)) { | 177 | if (!nr_route_frame(skb, NULL)) { |
178 | kfree_skb(skb); | 178 | kfree_skb(skb); |
179 | stats->tx_errors++; | 179 | stats->tx_errors++; |
180 | return 0; | 180 | return NETDEV_TX_OK; |
181 | } | 181 | } |
182 | 182 | ||
183 | stats->tx_packets++; | 183 | stats->tx_packets++; |
184 | stats->tx_bytes += len; | 184 | stats->tx_bytes += len; |
185 | 185 | ||
186 | return 0; | 186 | return NETDEV_TX_OK; |
187 | } | 187 | } |
188 | 188 | ||
189 | static const struct header_ops nr_header_ops = { | 189 | static const struct header_ops nr_header_ops = { |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ebe5718baa31..d3d52c66cdc2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -137,8 +137,7 @@ dev->hard_header == NULL (ll header is added by device, we cannot control it) | |||
137 | 137 | ||
138 | /* Private packet socket structures. */ | 138 | /* Private packet socket structures. */ |
139 | 139 | ||
140 | struct packet_mclist | 140 | struct packet_mclist { |
141 | { | ||
142 | struct packet_mclist *next; | 141 | struct packet_mclist *next; |
143 | int ifindex; | 142 | int ifindex; |
144 | int count; | 143 | int count; |
@@ -149,8 +148,7 @@ struct packet_mclist | |||
149 | /* identical to struct packet_mreq except it has | 148 | /* identical to struct packet_mreq except it has |
150 | * a longer address field. | 149 | * a longer address field. |
151 | */ | 150 | */ |
152 | struct packet_mreq_max | 151 | struct packet_mreq_max { |
153 | { | ||
154 | int mr_ifindex; | 152 | int mr_ifindex; |
155 | unsigned short mr_type; | 153 | unsigned short mr_type; |
156 | unsigned short mr_alen; | 154 | unsigned short mr_alen; |
@@ -162,7 +160,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, | |||
162 | int closing, int tx_ring); | 160 | int closing, int tx_ring); |
163 | 161 | ||
164 | struct packet_ring_buffer { | 162 | struct packet_ring_buffer { |
165 | char * *pg_vec; | 163 | char **pg_vec; |
166 | unsigned int head; | 164 | unsigned int head; |
167 | unsigned int frames_per_block; | 165 | unsigned int frames_per_block; |
168 | unsigned int frame_size; | 166 | unsigned int frame_size; |
@@ -239,7 +237,7 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status) | |||
239 | flush_dcache_page(virt_to_page(&h.h2->tp_status)); | 237 | flush_dcache_page(virt_to_page(&h.h2->tp_status)); |
240 | break; | 238 | break; |
241 | default: | 239 | default: |
242 | printk(KERN_ERR "TPACKET version not supported\n"); | 240 | pr_err("TPACKET version not supported\n"); |
243 | BUG(); | 241 | BUG(); |
244 | } | 242 | } |
245 | 243 | ||
@@ -265,7 +263,7 @@ static int __packet_get_status(struct packet_sock *po, void *frame) | |||
265 | flush_dcache_page(virt_to_page(&h.h2->tp_status)); | 263 | flush_dcache_page(virt_to_page(&h.h2->tp_status)); |
266 | return h.h2->tp_status; | 264 | return h.h2->tp_status; |
267 | default: | 265 | default: |
268 | printk(KERN_ERR "TPACKET version not supported\n"); | 266 | pr_err("TPACKET version not supported\n"); |
269 | BUG(); | 267 | BUG(); |
270 | return 0; | 268 | return 0; |
271 | } | 269 | } |
@@ -327,7 +325,7 @@ static void packet_sock_destruct(struct sock *sk) | |||
327 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); | 325 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); |
328 | 326 | ||
329 | if (!sock_flag(sk, SOCK_DEAD)) { | 327 | if (!sock_flag(sk, SOCK_DEAD)) { |
330 | printk("Attempt to release alive packet socket: %p\n", sk); | 328 | pr_err("Attempt to release alive packet socket: %p\n", sk); |
331 | return; | 329 | return; |
332 | } | 330 | } |
333 | 331 | ||
@@ -339,7 +337,8 @@ static const struct proto_ops packet_ops; | |||
339 | 337 | ||
340 | static const struct proto_ops packet_ops_spkt; | 338 | static const struct proto_ops packet_ops_spkt; |
341 | 339 | ||
342 | static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) | 340 | static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, |
341 | struct packet_type *pt, struct net_device *orig_dev) | ||
343 | { | 342 | { |
344 | struct sock *sk; | 343 | struct sock *sk; |
345 | struct sockaddr_pkt *spkt; | 344 | struct sockaddr_pkt *spkt; |
@@ -368,7 +367,8 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct | |||
368 | if (dev_net(dev) != sock_net(sk)) | 367 | if (dev_net(dev) != sock_net(sk)) |
369 | goto out; | 368 | goto out; |
370 | 369 | ||
371 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) | 370 | skb = skb_share_check(skb, GFP_ATOMIC); |
371 | if (skb == NULL) | ||
372 | goto oom; | 372 | goto oom; |
373 | 373 | ||
374 | /* drop any routing info */ | 374 | /* drop any routing info */ |
@@ -394,7 +394,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct | |||
394 | * to prevent sockets using all the memory up. | 394 | * to prevent sockets using all the memory up. |
395 | */ | 395 | */ |
396 | 396 | ||
397 | if (sock_queue_rcv_skb(sk,skb) == 0) | 397 | if (sock_queue_rcv_skb(sk, skb) == 0) |
398 | return 0; | 398 | return 0; |
399 | 399 | ||
400 | out: | 400 | out: |
@@ -413,25 +413,23 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
413 | struct msghdr *msg, size_t len) | 413 | struct msghdr *msg, size_t len) |
414 | { | 414 | { |
415 | struct sock *sk = sock->sk; | 415 | struct sock *sk = sock->sk; |
416 | struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name; | 416 | struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; |
417 | struct sk_buff *skb; | 417 | struct sk_buff *skb; |
418 | struct net_device *dev; | 418 | struct net_device *dev; |
419 | __be16 proto=0; | 419 | __be16 proto = 0; |
420 | int err; | 420 | int err; |
421 | 421 | ||
422 | /* | 422 | /* |
423 | * Get and verify the address. | 423 | * Get and verify the address. |
424 | */ | 424 | */ |
425 | 425 | ||
426 | if (saddr) | 426 | if (saddr) { |
427 | { | ||
428 | if (msg->msg_namelen < sizeof(struct sockaddr)) | 427 | if (msg->msg_namelen < sizeof(struct sockaddr)) |
429 | return(-EINVAL); | 428 | return -EINVAL; |
430 | if (msg->msg_namelen==sizeof(struct sockaddr_pkt)) | 429 | if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) |
431 | proto=saddr->spkt_protocol; | 430 | proto = saddr->spkt_protocol; |
432 | } | 431 | } else |
433 | else | 432 | return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */ |
434 | return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */ | ||
435 | 433 | ||
436 | /* | 434 | /* |
437 | * Find the device first to size check it | 435 | * Find the device first to size check it |
@@ -448,8 +446,8 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
448 | goto out_unlock; | 446 | goto out_unlock; |
449 | 447 | ||
450 | /* | 448 | /* |
451 | * You may not queue a frame bigger than the mtu. This is the lowest level | 449 | * You may not queue a frame bigger than the mtu. This is the lowest level |
452 | * raw protocol and you must do your own fragmentation at this level. | 450 | * raw protocol and you must do your own fragmentation at this level. |
453 | */ | 451 | */ |
454 | 452 | ||
455 | err = -EMSGSIZE; | 453 | err = -EMSGSIZE; |
@@ -460,9 +458,9 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
460 | skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL); | 458 | skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL); |
461 | 459 | ||
462 | /* | 460 | /* |
463 | * If the write buffer is full, then tough. At this level the user gets to | 461 | * If the write buffer is full, then tough. At this level the user |
464 | * deal with the problem - do your own algorithmic backoffs. That's far | 462 | * gets to deal with the problem - do your own algorithmic backoffs. |
465 | * more flexible. | 463 | * That's far more flexible. |
466 | */ | 464 | */ |
467 | 465 | ||
468 | if (skb == NULL) | 466 | if (skb == NULL) |
@@ -488,7 +486,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
488 | } | 486 | } |
489 | 487 | ||
490 | /* Returns -EFAULT on error */ | 488 | /* Returns -EFAULT on error */ |
491 | err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); | 489 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); |
492 | skb->protocol = proto; | 490 | skb->protocol = proto; |
493 | skb->dev = dev; | 491 | skb->dev = dev; |
494 | skb->priority = sk->sk_priority; | 492 | skb->priority = sk->sk_priority; |
@@ -501,7 +499,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
501 | 499 | ||
502 | dev_queue_xmit(skb); | 500 | dev_queue_xmit(skb); |
503 | dev_put(dev); | 501 | dev_put(dev); |
504 | return(len); | 502 | return len; |
505 | 503 | ||
506 | out_free: | 504 | out_free: |
507 | kfree_skb(skb); | 505 | kfree_skb(skb); |
@@ -537,12 +535,13 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk, | |||
537 | we will not harm anyone. | 535 | we will not harm anyone. |
538 | */ | 536 | */ |
539 | 537 | ||
540 | static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) | 538 | static int packet_rcv(struct sk_buff *skb, struct net_device *dev, |
539 | struct packet_type *pt, struct net_device *orig_dev) | ||
541 | { | 540 | { |
542 | struct sock *sk; | 541 | struct sock *sk; |
543 | struct sockaddr_ll *sll; | 542 | struct sockaddr_ll *sll; |
544 | struct packet_sock *po; | 543 | struct packet_sock *po; |
545 | u8 * skb_head = skb->data; | 544 | u8 *skb_head = skb->data; |
546 | int skb_len = skb->len; | 545 | int skb_len = skb->len; |
547 | unsigned int snaplen, res; | 546 | unsigned int snaplen, res; |
548 | 547 | ||
@@ -648,7 +647,8 @@ drop: | |||
648 | } | 647 | } |
649 | 648 | ||
650 | #ifdef CONFIG_PACKET_MMAP | 649 | #ifdef CONFIG_PACKET_MMAP |
651 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) | 650 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
651 | struct packet_type *pt, struct net_device *orig_dev) | ||
652 | { | 652 | { |
653 | struct sock *sk; | 653 | struct sock *sk; |
654 | struct packet_sock *po; | 654 | struct packet_sock *po; |
@@ -658,7 +658,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe | |||
658 | struct tpacket2_hdr *h2; | 658 | struct tpacket2_hdr *h2; |
659 | void *raw; | 659 | void *raw; |
660 | } h; | 660 | } h; |
661 | u8 * skb_head = skb->data; | 661 | u8 *skb_head = skb->data; |
662 | int skb_len = skb->len; | 662 | int skb_len = skb->len; |
663 | unsigned int snaplen, res; | 663 | unsigned int snaplen, res; |
664 | unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; | 664 | unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER; |
@@ -821,7 +821,7 @@ ring_is_full: | |||
821 | static void tpacket_destruct_skb(struct sk_buff *skb) | 821 | static void tpacket_destruct_skb(struct sk_buff *skb) |
822 | { | 822 | { |
823 | struct packet_sock *po = pkt_sk(skb->sk); | 823 | struct packet_sock *po = pkt_sk(skb->sk); |
824 | void * ph; | 824 | void *ph; |
825 | 825 | ||
826 | BUG_ON(skb == NULL); | 826 | BUG_ON(skb == NULL); |
827 | 827 | ||
@@ -836,9 +836,9 @@ static void tpacket_destruct_skb(struct sk_buff *skb) | |||
836 | sock_wfree(skb); | 836 | sock_wfree(skb); |
837 | } | 837 | } |
838 | 838 | ||
839 | static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb, | 839 | static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, |
840 | void * frame, struct net_device *dev, int size_max, | 840 | void *frame, struct net_device *dev, int size_max, |
841 | __be16 proto, unsigned char * addr) | 841 | __be16 proto, unsigned char *addr) |
842 | { | 842 | { |
843 | union { | 843 | union { |
844 | struct tpacket_hdr *h1; | 844 | struct tpacket_hdr *h1; |
@@ -867,8 +867,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb, | |||
867 | break; | 867 | break; |
868 | } | 868 | } |
869 | if (unlikely(tp_len > size_max)) { | 869 | if (unlikely(tp_len > size_max)) { |
870 | printk(KERN_ERR "packet size is too long (%d > %d)\n", | 870 | pr_err("packet size is too long (%d > %d)\n", tp_len, size_max); |
871 | tp_len, size_max); | ||
872 | return -EMSGSIZE; | 871 | return -EMSGSIZE; |
873 | } | 872 | } |
874 | 873 | ||
@@ -883,12 +882,11 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb, | |||
883 | NULL, tp_len); | 882 | NULL, tp_len); |
884 | if (unlikely(err < 0)) | 883 | if (unlikely(err < 0)) |
885 | return -EINVAL; | 884 | return -EINVAL; |
886 | } else if (dev->hard_header_len ) { | 885 | } else if (dev->hard_header_len) { |
887 | /* net device doesn't like empty head */ | 886 | /* net device doesn't like empty head */ |
888 | if (unlikely(tp_len <= dev->hard_header_len)) { | 887 | if (unlikely(tp_len <= dev->hard_header_len)) { |
889 | printk(KERN_ERR "packet size is too short " | 888 | pr_err("packet size is too short (%d < %d)\n", |
890 | "(%d < %d)\n", tp_len, | 889 | tp_len, dev->hard_header_len); |
891 | dev->hard_header_len); | ||
892 | return -EINVAL; | 890 | return -EINVAL; |
893 | } | 891 | } |
894 | 892 | ||
@@ -917,9 +915,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb, | |||
917 | nr_frags = skb_shinfo(skb)->nr_frags; | 915 | nr_frags = skb_shinfo(skb)->nr_frags; |
918 | 916 | ||
919 | if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { | 917 | if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { |
920 | printk(KERN_ERR "Packet exceed the number " | 918 | pr_err("Packet exceed the number of skb frags(%lu)\n", |
921 | "of skb frags(%lu)\n", | 919 | MAX_SKB_FRAGS); |
922 | MAX_SKB_FRAGS); | ||
923 | return -EFAULT; | 920 | return -EFAULT; |
924 | } | 921 | } |
925 | 922 | ||
@@ -944,8 +941,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
944 | struct net_device *dev; | 941 | struct net_device *dev; |
945 | __be16 proto; | 942 | __be16 proto; |
946 | int ifindex, err, reserve = 0; | 943 | int ifindex, err, reserve = 0; |
947 | void * ph; | 944 | void *ph; |
948 | struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name; | 945 | struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; |
949 | int tp_len, size_max; | 946 | int tp_len, size_max; |
950 | unsigned char *addr; | 947 | unsigned char *addr; |
951 | int len_sum = 0; | 948 | int len_sum = 0; |
@@ -1038,8 +1035,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
1038 | goto out_xmit; | 1035 | goto out_xmit; |
1039 | packet_increment_head(&po->tx_ring); | 1036 | packet_increment_head(&po->tx_ring); |
1040 | len_sum += tp_len; | 1037 | len_sum += tp_len; |
1041 | } | 1038 | } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT)) |
1042 | while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT)) | ||
1043 | && (atomic_read(&po->tx_ring.pending)))) | 1039 | && (atomic_read(&po->tx_ring.pending)))) |
1044 | ); | 1040 | ); |
1045 | 1041 | ||
@@ -1064,7 +1060,7 @@ static int packet_snd(struct socket *sock, | |||
1064 | struct msghdr *msg, size_t len) | 1060 | struct msghdr *msg, size_t len) |
1065 | { | 1061 | { |
1066 | struct sock *sk = sock->sk; | 1062 | struct sock *sk = sock->sk; |
1067 | struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name; | 1063 | struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; |
1068 | struct sk_buff *skb; | 1064 | struct sk_buff *skb; |
1069 | struct net_device *dev; | 1065 | struct net_device *dev; |
1070 | __be16 proto; | 1066 | __be16 proto; |
@@ -1110,7 +1106,7 @@ static int packet_snd(struct socket *sock, | |||
1110 | 1106 | ||
1111 | skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev), | 1107 | skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev), |
1112 | msg->msg_flags & MSG_DONTWAIT, &err); | 1108 | msg->msg_flags & MSG_DONTWAIT, &err); |
1113 | if (skb==NULL) | 1109 | if (skb == NULL) |
1114 | goto out_unlock; | 1110 | goto out_unlock; |
1115 | 1111 | ||
1116 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | 1112 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); |
@@ -1122,7 +1118,7 @@ static int packet_snd(struct socket *sock, | |||
1122 | goto out_free; | 1118 | goto out_free; |
1123 | 1119 | ||
1124 | /* Returns -EFAULT on error */ | 1120 | /* Returns -EFAULT on error */ |
1125 | err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); | 1121 | err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); |
1126 | if (err) | 1122 | if (err) |
1127 | goto out_free; | 1123 | goto out_free; |
1128 | 1124 | ||
@@ -1140,7 +1136,7 @@ static int packet_snd(struct socket *sock, | |||
1140 | 1136 | ||
1141 | dev_put(dev); | 1137 | dev_put(dev); |
1142 | 1138 | ||
1143 | return(len); | 1139 | return len; |
1144 | 1140 | ||
1145 | out_free: | 1141 | out_free: |
1146 | kfree_skb(skb); | 1142 | kfree_skb(skb); |
@@ -1283,9 +1279,10 @@ out_unlock: | |||
1283 | * Bind a packet socket to a device | 1279 | * Bind a packet socket to a device |
1284 | */ | 1280 | */ |
1285 | 1281 | ||
1286 | static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 1282 | static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, |
1283 | int addr_len) | ||
1287 | { | 1284 | { |
1288 | struct sock *sk=sock->sk; | 1285 | struct sock *sk = sock->sk; |
1289 | char name[15]; | 1286 | char name[15]; |
1290 | struct net_device *dev; | 1287 | struct net_device *dev; |
1291 | int err = -ENODEV; | 1288 | int err = -ENODEV; |
@@ -1296,7 +1293,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add | |||
1296 | 1293 | ||
1297 | if (addr_len != sizeof(struct sockaddr)) | 1294 | if (addr_len != sizeof(struct sockaddr)) |
1298 | return -EINVAL; | 1295 | return -EINVAL; |
1299 | strlcpy(name,uaddr->sa_data,sizeof(name)); | 1296 | strlcpy(name, uaddr->sa_data, sizeof(name)); |
1300 | 1297 | ||
1301 | dev = dev_get_by_name(sock_net(sk), name); | 1298 | dev = dev_get_by_name(sock_net(sk), name); |
1302 | if (dev) { | 1299 | if (dev) { |
@@ -1308,8 +1305,8 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add | |||
1308 | 1305 | ||
1309 | static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 1306 | static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
1310 | { | 1307 | { |
1311 | struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr; | 1308 | struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; |
1312 | struct sock *sk=sock->sk; | 1309 | struct sock *sk = sock->sk; |
1313 | struct net_device *dev = NULL; | 1310 | struct net_device *dev = NULL; |
1314 | int err; | 1311 | int err; |
1315 | 1312 | ||
@@ -1404,7 +1401,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol) | |||
1404 | sk_add_node(sk, &net->packet.sklist); | 1401 | sk_add_node(sk, &net->packet.sklist); |
1405 | sock_prot_inuse_add(net, &packet_proto, 1); | 1402 | sock_prot_inuse_add(net, &packet_proto, 1); |
1406 | write_unlock_bh(&net->packet.sklist_lock); | 1403 | write_unlock_bh(&net->packet.sklist_lock); |
1407 | return(0); | 1404 | return 0; |
1408 | out: | 1405 | out: |
1409 | return err; | 1406 | return err; |
1410 | } | 1407 | } |
@@ -1441,7 +1438,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1441 | * but then it will block. | 1438 | * but then it will block. |
1442 | */ | 1439 | */ |
1443 | 1440 | ||
1444 | skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err); | 1441 | skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); |
1445 | 1442 | ||
1446 | /* | 1443 | /* |
1447 | * An error occurred so return it. Because skb_recv_datagram() | 1444 | * An error occurred so return it. Because skb_recv_datagram() |
@@ -1469,10 +1466,9 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1469 | */ | 1466 | */ |
1470 | 1467 | ||
1471 | copied = skb->len; | 1468 | copied = skb->len; |
1472 | if (copied > len) | 1469 | if (copied > len) { |
1473 | { | 1470 | copied = len; |
1474 | copied=len; | 1471 | msg->msg_flags |= MSG_TRUNC; |
1475 | msg->msg_flags|=MSG_TRUNC; | ||
1476 | } | 1472 | } |
1477 | 1473 | ||
1478 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 1474 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
@@ -1539,7 +1535,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1539 | struct net_device *dev; | 1535 | struct net_device *dev; |
1540 | struct sock *sk = sock->sk; | 1536 | struct sock *sk = sock->sk; |
1541 | struct packet_sock *po = pkt_sk(sk); | 1537 | struct packet_sock *po = pkt_sk(sk); |
1542 | struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr; | 1538 | struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; |
1543 | 1539 | ||
1544 | if (peer) | 1540 | if (peer) |
1545 | return -EOPNOTSUPP; | 1541 | return -EOPNOTSUPP; |
@@ -1584,14 +1580,15 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
1584 | else | 1580 | else |
1585 | return dev_unicast_delete(dev, i->addr); | 1581 | return dev_unicast_delete(dev, i->addr); |
1586 | break; | 1582 | break; |
1587 | default:; | 1583 | default: |
1584 | break; | ||
1588 | } | 1585 | } |
1589 | return 0; | 1586 | return 0; |
1590 | } | 1587 | } |
1591 | 1588 | ||
1592 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) | 1589 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) |
1593 | { | 1590 | { |
1594 | for ( ; i; i=i->next) { | 1591 | for ( ; i; i = i->next) { |
1595 | if (i->ifindex == dev->ifindex) | 1592 | if (i->ifindex == dev->ifindex) |
1596 | packet_dev_mc(dev, i, what); | 1593 | packet_dev_mc(dev, i, what); |
1597 | } | 1594 | } |
@@ -1693,7 +1690,8 @@ static void packet_flush_mclist(struct sock *sk) | |||
1693 | struct net_device *dev; | 1690 | struct net_device *dev; |
1694 | 1691 | ||
1695 | po->mclist = ml->next; | 1692 | po->mclist = ml->next; |
1696 | if ((dev = dev_get_by_index(sock_net(sk), ml->ifindex)) != NULL) { | 1693 | dev = dev_get_by_index(sock_net(sk), ml->ifindex); |
1694 | if (dev != NULL) { | ||
1697 | packet_dev_mc(dev, ml, -1); | 1695 | packet_dev_mc(dev, ml, -1); |
1698 | dev_put(dev); | 1696 | dev_put(dev); |
1699 | } | 1697 | } |
@@ -1723,7 +1721,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
1723 | return -EINVAL; | 1721 | return -EINVAL; |
1724 | if (len > sizeof(mreq)) | 1722 | if (len > sizeof(mreq)) |
1725 | len = sizeof(mreq); | 1723 | len = sizeof(mreq); |
1726 | if (copy_from_user(&mreq,optval,len)) | 1724 | if (copy_from_user(&mreq, optval, len)) |
1727 | return -EFAULT; | 1725 | return -EFAULT; |
1728 | if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) | 1726 | if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) |
1729 | return -EINVAL; | 1727 | return -EINVAL; |
@@ -1740,9 +1738,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
1740 | { | 1738 | { |
1741 | struct tpacket_req req; | 1739 | struct tpacket_req req; |
1742 | 1740 | ||
1743 | if (optlen<sizeof(req)) | 1741 | if (optlen < sizeof(req)) |
1744 | return -EINVAL; | 1742 | return -EINVAL; |
1745 | if (copy_from_user(&req,optval,sizeof(req))) | 1743 | if (copy_from_user(&req, optval, sizeof(req))) |
1746 | return -EFAULT; | 1744 | return -EFAULT; |
1747 | return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING); | 1745 | return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING); |
1748 | } | 1746 | } |
@@ -1750,9 +1748,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
1750 | { | 1748 | { |
1751 | int val; | 1749 | int val; |
1752 | 1750 | ||
1753 | if (optlen!=sizeof(val)) | 1751 | if (optlen != sizeof(val)) |
1754 | return -EINVAL; | 1752 | return -EINVAL; |
1755 | if (copy_from_user(&val,optval,sizeof(val))) | 1753 | if (copy_from_user(&val, optval, sizeof(val))) |
1756 | return -EFAULT; | 1754 | return -EFAULT; |
1757 | 1755 | ||
1758 | pkt_sk(sk)->copy_thresh = val; | 1756 | pkt_sk(sk)->copy_thresh = val; |
@@ -1985,51 +1983,51 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, | |||
1985 | struct sock *sk = sock->sk; | 1983 | struct sock *sk = sock->sk; |
1986 | 1984 | ||
1987 | switch (cmd) { | 1985 | switch (cmd) { |
1988 | case SIOCOUTQ: | 1986 | case SIOCOUTQ: |
1989 | { | 1987 | { |
1990 | int amount = sk_wmem_alloc_get(sk); | 1988 | int amount = sk_wmem_alloc_get(sk); |
1991 | 1989 | ||
1992 | return put_user(amount, (int __user *)arg); | 1990 | return put_user(amount, (int __user *)arg); |
1993 | } | 1991 | } |
1994 | case SIOCINQ: | 1992 | case SIOCINQ: |
1995 | { | 1993 | { |
1996 | struct sk_buff *skb; | 1994 | struct sk_buff *skb; |
1997 | int amount = 0; | 1995 | int amount = 0; |
1998 | 1996 | ||
1999 | spin_lock_bh(&sk->sk_receive_queue.lock); | 1997 | spin_lock_bh(&sk->sk_receive_queue.lock); |
2000 | skb = skb_peek(&sk->sk_receive_queue); | 1998 | skb = skb_peek(&sk->sk_receive_queue); |
2001 | if (skb) | 1999 | if (skb) |
2002 | amount = skb->len; | 2000 | amount = skb->len; |
2003 | spin_unlock_bh(&sk->sk_receive_queue.lock); | 2001 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
2004 | return put_user(amount, (int __user *)arg); | 2002 | return put_user(amount, (int __user *)arg); |
2005 | } | 2003 | } |
2006 | case SIOCGSTAMP: | 2004 | case SIOCGSTAMP: |
2007 | return sock_get_timestamp(sk, (struct timeval __user *)arg); | 2005 | return sock_get_timestamp(sk, (struct timeval __user *)arg); |
2008 | case SIOCGSTAMPNS: | 2006 | case SIOCGSTAMPNS: |
2009 | return sock_get_timestampns(sk, (struct timespec __user *)arg); | 2007 | return sock_get_timestampns(sk, (struct timespec __user *)arg); |
2010 | 2008 | ||
2011 | #ifdef CONFIG_INET | 2009 | #ifdef CONFIG_INET |
2012 | case SIOCADDRT: | 2010 | case SIOCADDRT: |
2013 | case SIOCDELRT: | 2011 | case SIOCDELRT: |
2014 | case SIOCDARP: | 2012 | case SIOCDARP: |
2015 | case SIOCGARP: | 2013 | case SIOCGARP: |
2016 | case SIOCSARP: | 2014 | case SIOCSARP: |
2017 | case SIOCGIFADDR: | 2015 | case SIOCGIFADDR: |
2018 | case SIOCSIFADDR: | 2016 | case SIOCSIFADDR: |
2019 | case SIOCGIFBRDADDR: | 2017 | case SIOCGIFBRDADDR: |
2020 | case SIOCSIFBRDADDR: | 2018 | case SIOCSIFBRDADDR: |
2021 | case SIOCGIFNETMASK: | 2019 | case SIOCGIFNETMASK: |
2022 | case SIOCSIFNETMASK: | 2020 | case SIOCSIFNETMASK: |
2023 | case SIOCGIFDSTADDR: | 2021 | case SIOCGIFDSTADDR: |
2024 | case SIOCSIFDSTADDR: | 2022 | case SIOCSIFDSTADDR: |
2025 | case SIOCSIFFLAGS: | 2023 | case SIOCSIFFLAGS: |
2026 | if (!net_eq(sock_net(sk), &init_net)) | 2024 | if (!net_eq(sock_net(sk), &init_net)) |
2027 | return -ENOIOCTLCMD; | 2025 | return -ENOIOCTLCMD; |
2028 | return inet_dgram_ops.ioctl(sock, cmd, arg); | 2026 | return inet_dgram_ops.ioctl(sock, cmd, arg); |
2029 | #endif | 2027 | #endif |
2030 | 2028 | ||
2031 | default: | 2029 | default: |
2032 | return -ENOIOCTLCMD; | 2030 | return -ENOIOCTLCMD; |
2033 | } | 2031 | } |
2034 | return 0; | 2032 | return 0; |
2035 | } | 2033 | } |
@@ -2039,7 +2037,7 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, | |||
2039 | #define packet_poll datagram_poll | 2037 | #define packet_poll datagram_poll |
2040 | #else | 2038 | #else |
2041 | 2039 | ||
2042 | static unsigned int packet_poll(struct file * file, struct socket *sock, | 2040 | static unsigned int packet_poll(struct file *file, struct socket *sock, |
2043 | poll_table *wait) | 2041 | poll_table *wait) |
2044 | { | 2042 | { |
2045 | struct sock *sk = sock->sk; | 2043 | struct sock *sk = sock->sk; |
@@ -2069,7 +2067,7 @@ static unsigned int packet_poll(struct file * file, struct socket *sock, | |||
2069 | static void packet_mm_open(struct vm_area_struct *vma) | 2067 | static void packet_mm_open(struct vm_area_struct *vma) |
2070 | { | 2068 | { |
2071 | struct file *file = vma->vm_file; | 2069 | struct file *file = vma->vm_file; |
2072 | struct socket * sock = file->private_data; | 2070 | struct socket *sock = file->private_data; |
2073 | struct sock *sk = sock->sk; | 2071 | struct sock *sk = sock->sk; |
2074 | 2072 | ||
2075 | if (sk) | 2073 | if (sk) |
@@ -2079,7 +2077,7 @@ static void packet_mm_open(struct vm_area_struct *vma) | |||
2079 | static void packet_mm_close(struct vm_area_struct *vma) | 2077 | static void packet_mm_close(struct vm_area_struct *vma) |
2080 | { | 2078 | { |
2081 | struct file *file = vma->vm_file; | 2079 | struct file *file = vma->vm_file; |
2082 | struct socket * sock = file->private_data; | 2080 | struct socket *sock = file->private_data; |
2083 | struct sock *sk = sock->sk; | 2081 | struct sock *sk = sock->sk; |
2084 | 2082 | ||
2085 | if (sk) | 2083 | if (sk) |
@@ -2087,8 +2085,8 @@ static void packet_mm_close(struct vm_area_struct *vma) | |||
2087 | } | 2085 | } |
2088 | 2086 | ||
2089 | static struct vm_operations_struct packet_mmap_ops = { | 2087 | static struct vm_operations_struct packet_mmap_ops = { |
2090 | .open = packet_mm_open, | 2088 | .open = packet_mm_open, |
2091 | .close =packet_mm_close, | 2089 | .close = packet_mm_close, |
2092 | }; | 2090 | }; |
2093 | 2091 | ||
2094 | static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len) | 2092 | static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len) |
@@ -2239,8 +2237,8 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, | |||
2239 | skb_queue_purge(rb_queue); | 2237 | skb_queue_purge(rb_queue); |
2240 | #undef XC | 2238 | #undef XC |
2241 | if (atomic_read(&po->mapped)) | 2239 | if (atomic_read(&po->mapped)) |
2242 | printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", | 2240 | pr_err("packet_mmap: vma is busy: %d\n", |
2243 | atomic_read(&po->mapped)); | 2241 | atomic_read(&po->mapped)); |
2244 | } | 2242 | } |
2245 | mutex_unlock(&po->pg_vec_lock); | 2243 | mutex_unlock(&po->pg_vec_lock); |
2246 | 2244 | ||
@@ -2303,7 +2301,7 @@ static int packet_mmap(struct file *file, struct socket *sock, | |||
2303 | int pg_num; | 2301 | int pg_num; |
2304 | 2302 | ||
2305 | for (pg_num = 0; pg_num < rb->pg_vec_pages; | 2303 | for (pg_num = 0; pg_num < rb->pg_vec_pages; |
2306 | pg_num++,page++) { | 2304 | pg_num++, page++) { |
2307 | err = vm_insert_page(vma, start, page); | 2305 | err = vm_insert_page(vma, start, page); |
2308 | if (unlikely(err)) | 2306 | if (unlikely(err)) |
2309 | goto out; | 2307 | goto out; |
@@ -2372,7 +2370,7 @@ static struct net_proto_family packet_family_ops = { | |||
2372 | }; | 2370 | }; |
2373 | 2371 | ||
2374 | static struct notifier_block packet_netdev_notifier = { | 2372 | static struct notifier_block packet_netdev_notifier = { |
2375 | .notifier_call =packet_notifier, | 2373 | .notifier_call = packet_notifier, |
2376 | }; | 2374 | }; |
2377 | 2375 | ||
2378 | #ifdef CONFIG_PROC_FS | 2376 | #ifdef CONFIG_PROC_FS |
@@ -2402,7 +2400,7 @@ static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2402 | ++*pos; | 2400 | ++*pos; |
2403 | return (v == SEQ_START_TOKEN) | 2401 | return (v == SEQ_START_TOKEN) |
2404 | ? sk_head(&net->packet.sklist) | 2402 | ? sk_head(&net->packet.sklist) |
2405 | : sk_next((struct sock*)v) ; | 2403 | : sk_next((struct sock *)v) ; |
2406 | } | 2404 | } |
2407 | 2405 | ||
2408 | static void packet_seq_stop(struct seq_file *seq, void *v) | 2406 | static void packet_seq_stop(struct seq_file *seq, void *v) |
@@ -2430,7 +2428,7 @@ static int packet_seq_show(struct seq_file *seq, void *v) | |||
2430 | po->running, | 2428 | po->running, |
2431 | atomic_read(&s->sk_rmem_alloc), | 2429 | atomic_read(&s->sk_rmem_alloc), |
2432 | sock_i_uid(s), | 2430 | sock_i_uid(s), |
2433 | sock_i_ino(s) ); | 2431 | sock_i_ino(s)); |
2434 | } | 2432 | } |
2435 | 2433 | ||
2436 | return 0; | 2434 | return 0; |
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index e087862ed7e4..ef5c75c372e4 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c | |||
@@ -159,8 +159,11 @@ out_nofree: | |||
159 | static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 159 | static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
160 | { | 160 | { |
161 | int err = sock_queue_rcv_skb(sk, skb); | 161 | int err = sock_queue_rcv_skb(sk, skb); |
162 | if (err < 0) | 162 | if (err < 0) { |
163 | kfree_skb(skb); | 163 | kfree_skb(skb); |
164 | if (err == -ENOMEM) | ||
165 | atomic_inc(&sk->sk_drops); | ||
166 | } | ||
164 | return err ? NET_RX_DROP : NET_RX_SUCCESS; | 167 | return err ? NET_RX_DROP : NET_RX_SUCCESS; |
165 | } | 168 | } |
166 | 169 | ||
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index 480839dfc560..d183509d3fa6 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c | |||
@@ -183,7 +183,7 @@ static int gprs_close(struct net_device *dev) | |||
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | static int gprs_xmit(struct sk_buff *skb, struct net_device *dev) | 186 | static netdev_tx_t gprs_xmit(struct sk_buff *skb, struct net_device *dev) |
187 | { | 187 | { |
188 | struct gprs_dev *gp = netdev_priv(dev); | 188 | struct gprs_dev *gp = netdev_priv(dev); |
189 | struct sock *sk = gp->sk; | 189 | struct sock *sk = gp->sk; |
@@ -195,7 +195,7 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev) | |||
195 | break; | 195 | break; |
196 | default: | 196 | default: |
197 | dev_kfree_skb(skb); | 197 | dev_kfree_skb(skb); |
198 | return 0; | 198 | return NETDEV_TX_OK; |
199 | } | 199 | } |
200 | 200 | ||
201 | skb_orphan(skb); | 201 | skb_orphan(skb); |
@@ -215,7 +215,7 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev) | |||
215 | netif_stop_queue(dev); | 215 | netif_stop_queue(dev); |
216 | if (pep_writeable(sk)) | 216 | if (pep_writeable(sk)) |
217 | netif_wake_queue(dev); | 217 | netif_wake_queue(dev); |
218 | return 0; | 218 | return NETDEV_TX_OK; |
219 | } | 219 | } |
220 | 220 | ||
221 | static int gprs_set_mtu(struct net_device *dev, int new_mtu) | 221 | static int gprs_set_mtu(struct net_device *dev, int new_mtu) |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index eef833ea6d7b..b8252d289cd7 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -346,8 +346,10 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
346 | break; | 346 | break; |
347 | 347 | ||
348 | case PNS_PEP_CTRL_REQ: | 348 | case PNS_PEP_CTRL_REQ: |
349 | if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) | 349 | if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { |
350 | atomic_inc(&sk->sk_drops); | ||
350 | break; | 351 | break; |
352 | } | ||
351 | __skb_pull(skb, 4); | 353 | __skb_pull(skb, 4); |
352 | queue = &pn->ctrlreq_queue; | 354 | queue = &pn->ctrlreq_queue; |
353 | goto queue; | 355 | goto queue; |
@@ -358,10 +360,13 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
358 | err = sock_queue_rcv_skb(sk, skb); | 360 | err = sock_queue_rcv_skb(sk, skb); |
359 | if (!err) | 361 | if (!err) |
360 | return 0; | 362 | return 0; |
363 | if (err == -ENOMEM) | ||
364 | atomic_inc(&sk->sk_drops); | ||
361 | break; | 365 | break; |
362 | } | 366 | } |
363 | 367 | ||
364 | if (pn->rx_credits == 0) { | 368 | if (pn->rx_credits == 0) { |
369 | atomic_inc(&sk->sk_drops); | ||
365 | err = -ENOBUFS; | 370 | err = -ENOBUFS; |
366 | break; | 371 | break; |
367 | } | 372 | } |
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index c2b77a698695..2f65dcaed2fb 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <linux/net.h> | 27 | #include <linux/net.h> |
28 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
29 | #include <linux/phonet.h> | 29 | #include <linux/phonet.h> |
30 | #include <linux/proc_fs.h> | ||
31 | #include <linux/if_arp.h> | ||
30 | #include <net/sock.h> | 32 | #include <net/sock.h> |
31 | #include <net/netns/generic.h> | 33 | #include <net/netns/generic.h> |
32 | #include <net/phonet/pn_dev.h> | 34 | #include <net/phonet/pn_dev.h> |
@@ -194,14 +196,37 @@ found: | |||
194 | return err; | 196 | return err; |
195 | } | 197 | } |
196 | 198 | ||
199 | /* automatically configure a Phonet device, if supported */ | ||
200 | static int phonet_device_autoconf(struct net_device *dev) | ||
201 | { | ||
202 | struct if_phonet_req req; | ||
203 | int ret; | ||
204 | |||
205 | if (!dev->netdev_ops->ndo_do_ioctl) | ||
206 | return -EOPNOTSUPP; | ||
207 | |||
208 | ret = dev->netdev_ops->ndo_do_ioctl(dev, (struct ifreq *)&req, | ||
209 | SIOCPNGAUTOCONF); | ||
210 | if (ret < 0) | ||
211 | return ret; | ||
212 | return phonet_address_add(dev, req.ifr_phonet_autoconf.device); | ||
213 | } | ||
214 | |||
197 | /* notify Phonet of device events */ | 215 | /* notify Phonet of device events */ |
198 | static int phonet_device_notify(struct notifier_block *me, unsigned long what, | 216 | static int phonet_device_notify(struct notifier_block *me, unsigned long what, |
199 | void *arg) | 217 | void *arg) |
200 | { | 218 | { |
201 | struct net_device *dev = arg; | 219 | struct net_device *dev = arg; |
202 | 220 | ||
203 | if (what == NETDEV_UNREGISTER) | 221 | switch (what) { |
222 | case NETDEV_REGISTER: | ||
223 | if (dev->type == ARPHRD_PHONET) | ||
224 | phonet_device_autoconf(dev); | ||
225 | break; | ||
226 | case NETDEV_UNREGISTER: | ||
204 | phonet_device_destroy(dev); | 227 | phonet_device_destroy(dev); |
228 | break; | ||
229 | } | ||
205 | return 0; | 230 | return 0; |
206 | 231 | ||
207 | } | 232 | } |
@@ -218,6 +243,11 @@ static int phonet_init_net(struct net *net) | |||
218 | if (!pnn) | 243 | if (!pnn) |
219 | return -ENOMEM; | 244 | return -ENOMEM; |
220 | 245 | ||
246 | if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) { | ||
247 | kfree(pnn); | ||
248 | return -ENOMEM; | ||
249 | } | ||
250 | |||
221 | INIT_LIST_HEAD(&pnn->pndevs.list); | 251 | INIT_LIST_HEAD(&pnn->pndevs.list); |
222 | spin_lock_init(&pnn->pndevs.lock); | 252 | spin_lock_init(&pnn->pndevs.lock); |
223 | net_assign_generic(net, phonet_net_id, pnn); | 253 | net_assign_generic(net, phonet_net_id, pnn); |
@@ -233,6 +263,8 @@ static void phonet_exit_net(struct net *net) | |||
233 | for_each_netdev(net, dev) | 263 | for_each_netdev(net, dev) |
234 | phonet_device_destroy(dev); | 264 | phonet_device_destroy(dev); |
235 | rtnl_unlock(); | 265 | rtnl_unlock(); |
266 | |||
267 | proc_net_remove(net, "phonet"); | ||
236 | kfree(pnn); | 268 | kfree(pnn); |
237 | } | 269 | } |
238 | 270 | ||
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index f8b4cee434c2..d21fd3576610 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c | |||
@@ -147,7 +147,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | |||
147 | 147 | ||
148 | if (fill_addr(skb, pnd->netdev, addr << 2, | 148 | if (fill_addr(skb, pnd->netdev, addr << 2, |
149 | NETLINK_CB(cb->skb).pid, | 149 | NETLINK_CB(cb->skb).pid, |
150 | cb->nlh->nlmsg_seq, RTM_NEWADDR)) | 150 | cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0) |
151 | goto out; | 151 | goto out; |
152 | } | 152 | } |
153 | } | 153 | } |
diff --git a/net/phonet/socket.c b/net/phonet/socket.c index ada2a35bf7a2..7a4ee397d2f7 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c | |||
@@ -412,3 +412,102 @@ found: | |||
412 | return 0; | 412 | return 0; |
413 | } | 413 | } |
414 | EXPORT_SYMBOL(pn_sock_get_port); | 414 | EXPORT_SYMBOL(pn_sock_get_port); |
415 | |||
416 | #ifdef CONFIG_PROC_FS | ||
417 | static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos) | ||
418 | { | ||
419 | struct net *net = seq_file_net(seq); | ||
420 | struct hlist_node *node; | ||
421 | struct sock *sknode; | ||
422 | |||
423 | sk_for_each(sknode, node, &pnsocks.hlist) { | ||
424 | if (!net_eq(net, sock_net(sknode))) | ||
425 | continue; | ||
426 | if (!pos) | ||
427 | return sknode; | ||
428 | pos--; | ||
429 | } | ||
430 | return NULL; | ||
431 | } | ||
432 | |||
433 | static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk) | ||
434 | { | ||
435 | struct net *net = seq_file_net(seq); | ||
436 | |||
437 | do | ||
438 | sk = sk_next(sk); | ||
439 | while (sk && !net_eq(net, sock_net(sk))); | ||
440 | |||
441 | return sk; | ||
442 | } | ||
443 | |||
444 | static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos) | ||
445 | __acquires(pnsocks.lock) | ||
446 | { | ||
447 | spin_lock_bh(&pnsocks.lock); | ||
448 | return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | ||
449 | } | ||
450 | |||
451 | static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
452 | { | ||
453 | struct sock *sk; | ||
454 | |||
455 | if (v == SEQ_START_TOKEN) | ||
456 | sk = pn_sock_get_idx(seq, 0); | ||
457 | else | ||
458 | sk = pn_sock_get_next(seq, v); | ||
459 | (*pos)++; | ||
460 | return sk; | ||
461 | } | ||
462 | |||
463 | static void pn_sock_seq_stop(struct seq_file *seq, void *v) | ||
464 | __releases(pnsocks.lock) | ||
465 | { | ||
466 | spin_unlock_bh(&pnsocks.lock); | ||
467 | } | ||
468 | |||
469 | static int pn_sock_seq_show(struct seq_file *seq, void *v) | ||
470 | { | ||
471 | int len; | ||
472 | |||
473 | if (v == SEQ_START_TOKEN) | ||
474 | seq_printf(seq, "%s%n", "pt loc rem rs st tx_queue rx_queue " | ||
475 | " uid inode ref pointer drops", &len); | ||
476 | else { | ||
477 | struct sock *sk = v; | ||
478 | struct pn_sock *pn = pn_sk(sk); | ||
479 | |||
480 | seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " | ||
481 | "%d %p %d%n", | ||
482 | sk->sk_protocol, pn->sobject, 0, pn->resource, | ||
483 | sk->sk_state, | ||
484 | sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), | ||
485 | sock_i_uid(sk), sock_i_ino(sk), | ||
486 | atomic_read(&sk->sk_refcnt), sk, | ||
487 | atomic_read(&sk->sk_drops), &len); | ||
488 | } | ||
489 | seq_printf(seq, "%*s\n", 127 - len, ""); | ||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | static const struct seq_operations pn_sock_seq_ops = { | ||
494 | .start = pn_sock_seq_start, | ||
495 | .next = pn_sock_seq_next, | ||
496 | .stop = pn_sock_seq_stop, | ||
497 | .show = pn_sock_seq_show, | ||
498 | }; | ||
499 | |||
500 | static int pn_sock_open(struct inode *inode, struct file *file) | ||
501 | { | ||
502 | return seq_open_net(inode, file, &pn_sock_seq_ops, | ||
503 | sizeof(struct seq_net_private)); | ||
504 | } | ||
505 | |||
506 | const struct file_operations pn_sock_seq_fops = { | ||
507 | .owner = THIS_MODULE, | ||
508 | .open = pn_sock_open, | ||
509 | .read = seq_read, | ||
510 | .llseek = seq_lseek, | ||
511 | .release = seq_release_net, | ||
512 | }; | ||
513 | #endif | ||
diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 796773b5df9b..ec753b3ae72a 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig | |||
@@ -1,14 +1,28 @@ | |||
1 | 1 | ||
2 | config RDS | 2 | config RDS |
3 | tristate "Reliable Datagram Sockets (RDS) (EXPERIMENTAL)" | 3 | tristate "The RDS Protocol (EXPERIMENTAL)" |
4 | depends on INET && INFINIBAND_IPOIB && EXPERIMENTAL | 4 | depends on INET && EXPERIMENTAL |
5 | depends on INFINIBAND && INFINIBAND_ADDR_TRANS | ||
6 | ---help--- | 5 | ---help--- |
7 | RDS provides reliable, sequenced delivery of datagrams | 6 | The RDS (Reliable Datagram Sockets) protocol provides reliable, |
8 | over Infiniband. | 7 | sequenced delivery of datagrams over Infiniband, iWARP, |
8 | or TCP. | ||
9 | |||
10 | config RDS_RDMA | ||
11 | tristate "RDS over Infiniband and iWARP" | ||
12 | depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS | ||
13 | ---help--- | ||
14 | Allow RDS to use Infiniband and iWARP as a transport. | ||
15 | This transport supports RDMA operations. | ||
16 | |||
17 | config RDS_TCP | ||
18 | tristate "RDS over TCP" | ||
19 | depends on RDS | ||
20 | ---help--- | ||
21 | Allow RDS to use TCP as a transport. | ||
22 | This transport does not support RDMA operations. | ||
9 | 23 | ||
10 | config RDS_DEBUG | 24 | config RDS_DEBUG |
11 | bool "Debugging messages" | 25 | bool "RDS debugging messages" |
12 | depends on RDS | 26 | depends on RDS |
13 | default n | 27 | default n |
14 | 28 | ||
diff --git a/net/rds/Makefile b/net/rds/Makefile index 51f27585fa08..b46eca109688 100644 --- a/net/rds/Makefile +++ b/net/rds/Makefile | |||
@@ -1,13 +1,20 @@ | |||
1 | obj-$(CONFIG_RDS) += rds.o | 1 | obj-$(CONFIG_RDS) += rds.o |
2 | rds-y := af_rds.o bind.o cong.o connection.o info.o message.o \ | 2 | rds-y := af_rds.o bind.o cong.o connection.o info.o message.o \ |
3 | recv.o send.o stats.o sysctl.o threads.o transport.o \ | 3 | recv.o send.o stats.o sysctl.o threads.o transport.o \ |
4 | loop.o page.o rdma.o \ | 4 | loop.o page.o rdma.o |
5 | rdma_transport.o \ | 5 | |
6 | obj-$(CONFIG_RDS_RDMA) += rds_rdma.o | ||
7 | rds_rdma-objs := rdma_transport.o \ | ||
6 | ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \ | 8 | ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \ |
7 | ib_sysctl.o ib_rdma.o \ | 9 | ib_sysctl.o ib_rdma.o \ |
8 | iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \ | 10 | iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \ |
9 | iw_sysctl.o iw_rdma.o | 11 | iw_sysctl.o iw_rdma.o |
10 | 12 | ||
13 | |||
14 | obj-$(CONFIG_RDS_TCP) += rds_tcp.o | ||
15 | rds_tcp-objs := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \ | ||
16 | tcp_send.o tcp_stats.o | ||
17 | |||
11 | ifeq ($(CONFIG_RDS_DEBUG), y) | 18 | ifeq ($(CONFIG_RDS_DEBUG), y) |
12 | EXTRA_CFLAGS += -DDEBUG | 19 | EXTRA_CFLAGS += -DDEBUG |
13 | endif | 20 | endif |
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index b11e7e527864..108ed2e671c5 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
@@ -39,7 +39,6 @@ | |||
39 | 39 | ||
40 | #include "rds.h" | 40 | #include "rds.h" |
41 | #include "rdma.h" | 41 | #include "rdma.h" |
42 | #include "rdma_transport.h" | ||
43 | 42 | ||
44 | /* this is just used for stats gathering :/ */ | 43 | /* this is just used for stats gathering :/ */ |
45 | static DEFINE_SPINLOCK(rds_sock_lock); | 44 | static DEFINE_SPINLOCK(rds_sock_lock); |
@@ -509,7 +508,6 @@ out: | |||
509 | 508 | ||
510 | static void __exit rds_exit(void) | 509 | static void __exit rds_exit(void) |
511 | { | 510 | { |
512 | rds_rdma_exit(); | ||
513 | sock_unregister(rds_family_ops.family); | 511 | sock_unregister(rds_family_ops.family); |
514 | proto_unregister(&rds_proto); | 512 | proto_unregister(&rds_proto); |
515 | rds_conn_exit(); | 513 | rds_conn_exit(); |
@@ -549,14 +547,8 @@ static int __init rds_init(void) | |||
549 | rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info); | 547 | rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info); |
550 | rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info); | 548 | rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info); |
551 | 549 | ||
552 | /* ib/iwarp transports currently compiled-in */ | ||
553 | ret = rds_rdma_init(); | ||
554 | if (ret) | ||
555 | goto out_sock; | ||
556 | goto out; | 550 | goto out; |
557 | 551 | ||
558 | out_sock: | ||
559 | sock_unregister(rds_family_ops.family); | ||
560 | out_proto: | 552 | out_proto: |
561 | proto_unregister(&rds_proto); | 553 | proto_unregister(&rds_proto); |
562 | out_stats: | 554 | out_stats: |
diff --git a/net/rds/bind.c b/net/rds/bind.c index c17cc39160ce..5d95fc007f1a 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c | |||
@@ -187,6 +187,9 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
187 | if (trans == NULL) { | 187 | if (trans == NULL) { |
188 | ret = -EADDRNOTAVAIL; | 188 | ret = -EADDRNOTAVAIL; |
189 | rds_remove_bound(rs); | 189 | rds_remove_bound(rs); |
190 | if (printk_ratelimit()) | ||
191 | printk(KERN_INFO "RDS: rds_bind() could not find a transport, " | ||
192 | "load rds_tcp or rds_rdma?\n"); | ||
190 | goto out; | 193 | goto out; |
191 | } | 194 | } |
192 | 195 | ||
diff --git a/net/rds/cong.c b/net/rds/cong.c index 710e4599d76c..dd2711df640b 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c | |||
@@ -254,6 +254,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask) | |||
254 | read_unlock_irqrestore(&rds_cong_monitor_lock, flags); | 254 | read_unlock_irqrestore(&rds_cong_monitor_lock, flags); |
255 | } | 255 | } |
256 | } | 256 | } |
257 | EXPORT_SYMBOL_GPL(rds_cong_map_updated); | ||
257 | 258 | ||
258 | int rds_cong_updated_since(unsigned long *recent) | 259 | int rds_cong_updated_since(unsigned long *recent) |
259 | { | 260 | { |
diff --git a/net/rds/connection.c b/net/rds/connection.c index d14445c48304..cc8b568c0c84 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -126,7 +126,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, | |||
126 | struct rds_transport *trans, gfp_t gfp, | 126 | struct rds_transport *trans, gfp_t gfp, |
127 | int is_outgoing) | 127 | int is_outgoing) |
128 | { | 128 | { |
129 | struct rds_connection *conn, *tmp, *parent = NULL; | 129 | struct rds_connection *conn, *parent = NULL; |
130 | struct hlist_head *head = rds_conn_bucket(laddr, faddr); | 130 | struct hlist_head *head = rds_conn_bucket(laddr, faddr); |
131 | unsigned long flags; | 131 | unsigned long flags; |
132 | int ret; | 132 | int ret; |
@@ -155,7 +155,6 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, | |||
155 | } | 155 | } |
156 | 156 | ||
157 | INIT_HLIST_NODE(&conn->c_hash_node); | 157 | INIT_HLIST_NODE(&conn->c_hash_node); |
158 | conn->c_version = RDS_PROTOCOL_3_0; | ||
159 | conn->c_laddr = laddr; | 158 | conn->c_laddr = laddr; |
160 | conn->c_faddr = faddr; | 159 | conn->c_faddr = faddr; |
161 | spin_lock_init(&conn->c_lock); | 160 | spin_lock_init(&conn->c_lock); |
@@ -211,26 +210,40 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, | |||
211 | trans->t_name ? trans->t_name : "[unknown]", | 210 | trans->t_name ? trans->t_name : "[unknown]", |
212 | is_outgoing ? "(outgoing)" : ""); | 211 | is_outgoing ? "(outgoing)" : ""); |
213 | 212 | ||
213 | /* | ||
214 | * Since we ran without holding the conn lock, someone could | ||
215 | * have created the same conn (either normal or passive) in the | ||
216 | * interim. We check while holding the lock. If we won, we complete | ||
217 | * init and return our conn. If we lost, we rollback and return the | ||
218 | * other one. | ||
219 | */ | ||
214 | spin_lock_irqsave(&rds_conn_lock, flags); | 220 | spin_lock_irqsave(&rds_conn_lock, flags); |
215 | if (parent == NULL) { | 221 | if (parent) { |
216 | tmp = rds_conn_lookup(head, laddr, faddr, trans); | 222 | /* Creating passive conn */ |
217 | if (tmp == NULL) | 223 | if (parent->c_passive) { |
218 | hlist_add_head(&conn->c_hash_node, head); | 224 | trans->conn_free(conn->c_transport_data); |
219 | } else { | 225 | kmem_cache_free(rds_conn_slab, conn); |
220 | tmp = parent->c_passive; | 226 | conn = parent->c_passive; |
221 | if (!tmp) | 227 | } else { |
222 | parent->c_passive = conn; | 228 | parent->c_passive = conn; |
223 | } | 229 | rds_cong_add_conn(conn); |
224 | 230 | rds_conn_count++; | |
225 | if (tmp) { | 231 | } |
226 | trans->conn_free(conn->c_transport_data); | ||
227 | kmem_cache_free(rds_conn_slab, conn); | ||
228 | conn = tmp; | ||
229 | } else { | 232 | } else { |
230 | rds_cong_add_conn(conn); | 233 | /* Creating normal conn */ |
231 | rds_conn_count++; | 234 | struct rds_connection *found; |
235 | |||
236 | found = rds_conn_lookup(head, laddr, faddr, trans); | ||
237 | if (found) { | ||
238 | trans->conn_free(conn->c_transport_data); | ||
239 | kmem_cache_free(rds_conn_slab, conn); | ||
240 | conn = found; | ||
241 | } else { | ||
242 | hlist_add_head(&conn->c_hash_node, head); | ||
243 | rds_cong_add_conn(conn); | ||
244 | rds_conn_count++; | ||
245 | } | ||
232 | } | 246 | } |
233 | |||
234 | spin_unlock_irqrestore(&rds_conn_lock, flags); | 247 | spin_unlock_irqrestore(&rds_conn_lock, flags); |
235 | 248 | ||
236 | out: | 249 | out: |
@@ -242,12 +255,14 @@ struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr, | |||
242 | { | 255 | { |
243 | return __rds_conn_create(laddr, faddr, trans, gfp, 0); | 256 | return __rds_conn_create(laddr, faddr, trans, gfp, 0); |
244 | } | 257 | } |
258 | EXPORT_SYMBOL_GPL(rds_conn_create); | ||
245 | 259 | ||
246 | struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, | 260 | struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, |
247 | struct rds_transport *trans, gfp_t gfp) | 261 | struct rds_transport *trans, gfp_t gfp) |
248 | { | 262 | { |
249 | return __rds_conn_create(laddr, faddr, trans, gfp, 1); | 263 | return __rds_conn_create(laddr, faddr, trans, gfp, 1); |
250 | } | 264 | } |
265 | EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); | ||
251 | 266 | ||
252 | void rds_conn_destroy(struct rds_connection *conn) | 267 | void rds_conn_destroy(struct rds_connection *conn) |
253 | { | 268 | { |
@@ -290,6 +305,7 @@ void rds_conn_destroy(struct rds_connection *conn) | |||
290 | 305 | ||
291 | rds_conn_count--; | 306 | rds_conn_count--; |
292 | } | 307 | } |
308 | EXPORT_SYMBOL_GPL(rds_conn_destroy); | ||
293 | 309 | ||
294 | static void rds_conn_message_info(struct socket *sock, unsigned int len, | 310 | static void rds_conn_message_info(struct socket *sock, unsigned int len, |
295 | struct rds_info_iterator *iter, | 311 | struct rds_info_iterator *iter, |
@@ -393,6 +409,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len, | |||
393 | 409 | ||
394 | spin_unlock_irqrestore(&rds_conn_lock, flags); | 410 | spin_unlock_irqrestore(&rds_conn_lock, flags); |
395 | } | 411 | } |
412 | EXPORT_SYMBOL_GPL(rds_for_each_conn_info); | ||
396 | 413 | ||
397 | static int rds_conn_info_visitor(struct rds_connection *conn, | 414 | static int rds_conn_info_visitor(struct rds_connection *conn, |
398 | void *buffer) | 415 | void *buffer) |
@@ -468,6 +485,7 @@ void rds_conn_drop(struct rds_connection *conn) | |||
468 | atomic_set(&conn->c_state, RDS_CONN_ERROR); | 485 | atomic_set(&conn->c_state, RDS_CONN_ERROR); |
469 | queue_work(rds_wq, &conn->c_down_w); | 486 | queue_work(rds_wq, &conn->c_down_w); |
470 | } | 487 | } |
488 | EXPORT_SYMBOL_GPL(rds_conn_drop); | ||
471 | 489 | ||
472 | /* | 490 | /* |
473 | * An error occurred on the connection | 491 | * An error occurred on the connection |
diff --git a/net/rds/ib.c b/net/rds/ib.c index b9bcd32431e1..536ebe5d3f6b 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -43,11 +43,14 @@ | |||
43 | 43 | ||
44 | unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE; | 44 | unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE; |
45 | unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */ | 45 | unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */ |
46 | unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT; | ||
46 | 47 | ||
47 | module_param(fmr_pool_size, int, 0444); | 48 | module_param(fmr_pool_size, int, 0444); |
48 | MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA"); | 49 | MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA"); |
49 | module_param(fmr_message_size, int, 0444); | 50 | module_param(fmr_message_size, int, 0444); |
50 | MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer"); | 51 | MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer"); |
52 | module_param(rds_ib_retry_count, int, 0444); | ||
53 | MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); | ||
51 | 54 | ||
52 | struct list_head rds_ib_devices; | 55 | struct list_head rds_ib_devices; |
53 | 56 | ||
@@ -82,9 +85,6 @@ void rds_ib_add_one(struct ib_device *device) | |||
82 | rds_ibdev->max_wrs = dev_attr->max_qp_wr; | 85 | rds_ibdev->max_wrs = dev_attr->max_qp_wr; |
83 | rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); | 86 | rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); |
84 | 87 | ||
85 | rds_ibdev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1); | ||
86 | rds_ibdev->fmr_page_size = 1 << rds_ibdev->fmr_page_shift; | ||
87 | rds_ibdev->fmr_page_mask = ~((u64) rds_ibdev->fmr_page_size - 1); | ||
88 | rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32; | 88 | rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32; |
89 | rds_ibdev->max_fmrs = dev_attr->max_fmr ? | 89 | rds_ibdev->max_fmrs = dev_attr->max_fmr ? |
90 | min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : | 90 | min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : |
@@ -282,6 +282,7 @@ struct rds_transport rds_ib_transport = { | |||
282 | .flush_mrs = rds_ib_flush_mrs, | 282 | .flush_mrs = rds_ib_flush_mrs, |
283 | .t_owner = THIS_MODULE, | 283 | .t_owner = THIS_MODULE, |
284 | .t_name = "infiniband", | 284 | .t_name = "infiniband", |
285 | .t_type = RDS_TRANS_IB | ||
285 | }; | 286 | }; |
286 | 287 | ||
287 | int __init rds_ib_init(void) | 288 | int __init rds_ib_init(void) |
diff --git a/net/rds/ib.h b/net/rds/ib.h index 455ae73047fe..1378b854cac0 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #define RDS_IB_DEFAULT_RECV_WR 1024 | 15 | #define RDS_IB_DEFAULT_RECV_WR 1024 |
16 | #define RDS_IB_DEFAULT_SEND_WR 256 | 16 | #define RDS_IB_DEFAULT_SEND_WR 256 |
17 | 17 | ||
18 | #define RDS_IB_DEFAULT_RETRY_COUNT 2 | ||
19 | |||
18 | #define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ | 20 | #define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ |
19 | 21 | ||
20 | extern struct list_head rds_ib_devices; | 22 | extern struct list_head rds_ib_devices; |
@@ -157,9 +159,6 @@ struct rds_ib_device { | |||
157 | struct ib_pd *pd; | 159 | struct ib_pd *pd; |
158 | struct ib_mr *mr; | 160 | struct ib_mr *mr; |
159 | struct rds_ib_mr_pool *mr_pool; | 161 | struct rds_ib_mr_pool *mr_pool; |
160 | int fmr_page_shift; | ||
161 | int fmr_page_size; | ||
162 | u64 fmr_page_mask; | ||
163 | unsigned int fmr_max_remaps; | 162 | unsigned int fmr_max_remaps; |
164 | unsigned int max_fmrs; | 163 | unsigned int max_fmrs; |
165 | int max_sge; | 164 | int max_sge; |
@@ -247,6 +246,7 @@ extern struct ib_client rds_ib_client; | |||
247 | 246 | ||
248 | extern unsigned int fmr_pool_size; | 247 | extern unsigned int fmr_pool_size; |
249 | extern unsigned int fmr_message_size; | 248 | extern unsigned int fmr_message_size; |
249 | extern unsigned int rds_ib_retry_count; | ||
250 | 250 | ||
251 | extern spinlock_t ib_nodev_conns_lock; | 251 | extern spinlock_t ib_nodev_conns_lock; |
252 | extern struct list_head ib_nodev_conns; | 252 | extern struct list_head ib_nodev_conns; |
@@ -355,17 +355,25 @@ extern ctl_table rds_ib_sysctl_table[]; | |||
355 | /* | 355 | /* |
356 | * Helper functions for getting/setting the header and data SGEs in | 356 | * Helper functions for getting/setting the header and data SGEs in |
357 | * RDS packets (not RDMA) | 357 | * RDS packets (not RDMA) |
358 | * | ||
359 | * From version 3.1 onwards, header is in front of data in the sge. | ||
358 | */ | 360 | */ |
359 | static inline struct ib_sge * | 361 | static inline struct ib_sge * |
360 | rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge) | 362 | rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge) |
361 | { | 363 | { |
362 | return &sge[0]; | 364 | if (ic->conn->c_version > RDS_PROTOCOL_3_0) |
365 | return &sge[0]; | ||
366 | else | ||
367 | return &sge[1]; | ||
363 | } | 368 | } |
364 | 369 | ||
365 | static inline struct ib_sge * | 370 | static inline struct ib_sge * |
366 | rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge) | 371 | rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge) |
367 | { | 372 | { |
368 | return &sge[1]; | 373 | if (ic->conn->c_version > RDS_PROTOCOL_3_0) |
374 | return &sge[1]; | ||
375 | else | ||
376 | return &sge[0]; | ||
369 | } | 377 | } |
370 | 378 | ||
371 | #endif | 379 | #endif |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index f8e40e1a6038..c2d372f13dbb 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -98,21 +98,34 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even | |||
98 | struct ib_qp_attr qp_attr; | 98 | struct ib_qp_attr qp_attr; |
99 | int err; | 99 | int err; |
100 | 100 | ||
101 | if (event->param.conn.private_data_len) { | 101 | if (event->param.conn.private_data_len >= sizeof(*dp)) { |
102 | dp = event->param.conn.private_data; | 102 | dp = event->param.conn.private_data; |
103 | 103 | ||
104 | rds_ib_set_protocol(conn, | 104 | /* make sure it isn't empty data */ |
105 | if (dp->dp_protocol_major) { | ||
106 | rds_ib_set_protocol(conn, | ||
105 | RDS_PROTOCOL(dp->dp_protocol_major, | 107 | RDS_PROTOCOL(dp->dp_protocol_major, |
106 | dp->dp_protocol_minor)); | 108 | dp->dp_protocol_minor)); |
107 | rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); | 109 | rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit)); |
110 | } | ||
108 | } | 111 | } |
109 | 112 | ||
110 | printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n", | 113 | printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n", |
111 | &conn->c_laddr, | 114 | &conn->c_faddr, |
112 | RDS_PROTOCOL_MAJOR(conn->c_version), | 115 | RDS_PROTOCOL_MAJOR(conn->c_version), |
113 | RDS_PROTOCOL_MINOR(conn->c_version), | 116 | RDS_PROTOCOL_MINOR(conn->c_version), |
114 | ic->i_flowctl ? ", flow control" : ""); | 117 | ic->i_flowctl ? ", flow control" : ""); |
115 | 118 | ||
119 | /* | ||
120 | * Init rings and fill recv. this needs to wait until protocol negotiation | ||
121 | * is complete, since ring layout is different from 3.0 to 3.1. | ||
122 | */ | ||
123 | rds_ib_send_init_ring(ic); | ||
124 | rds_ib_recv_init_ring(ic); | ||
125 | /* Post receive buffers - as a side effect, this will update | ||
126 | * the posted credit count. */ | ||
127 | rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1); | ||
128 | |||
116 | /* Tune RNR behavior */ | 129 | /* Tune RNR behavior */ |
117 | rds_ib_tune_rnr(ic, &qp_attr); | 130 | rds_ib_tune_rnr(ic, &qp_attr); |
118 | 131 | ||
@@ -145,7 +158,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, | |||
145 | /* XXX tune these? */ | 158 | /* XXX tune these? */ |
146 | conn_param->responder_resources = 1; | 159 | conn_param->responder_resources = 1; |
147 | conn_param->initiator_depth = 1; | 160 | conn_param->initiator_depth = 1; |
148 | conn_param->retry_count = 7; | 161 | conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); |
149 | conn_param->rnr_retry_count = 7; | 162 | conn_param->rnr_retry_count = 7; |
150 | 163 | ||
151 | if (dp) { | 164 | if (dp) { |
@@ -190,9 +203,9 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) | |||
190 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); | 203 | rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); |
191 | break; | 204 | break; |
192 | default: | 205 | default: |
193 | printk(KERN_WARNING "RDS/ib: unhandled QP event %u " | 206 | rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u " |
194 | "on connection to %pI4\n", event->event, | 207 | "- connection %pI4->%pI4, reconnecting\n", |
195 | &conn->c_faddr); | 208 | event->event, &conn->c_laddr, &conn->c_faddr); |
196 | break; | 209 | break; |
197 | } | 210 | } |
198 | } | 211 | } |
@@ -321,7 +334,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) | |||
321 | rdsdebug("send allocation failed\n"); | 334 | rdsdebug("send allocation failed\n"); |
322 | goto out; | 335 | goto out; |
323 | } | 336 | } |
324 | rds_ib_send_init_ring(ic); | 337 | memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); |
325 | 338 | ||
326 | ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work)); | 339 | ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work)); |
327 | if (ic->i_recvs == NULL) { | 340 | if (ic->i_recvs == NULL) { |
@@ -329,14 +342,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn) | |||
329 | rdsdebug("recv allocation failed\n"); | 342 | rdsdebug("recv allocation failed\n"); |
330 | goto out; | 343 | goto out; |
331 | } | 344 | } |
345 | memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work)); | ||
332 | 346 | ||
333 | rds_ib_recv_init_ring(ic); | ||
334 | rds_ib_recv_init_ack(ic); | 347 | rds_ib_recv_init_ack(ic); |
335 | 348 | ||
336 | /* Post receive buffers - as a side effect, this will update | ||
337 | * the posted credit count. */ | ||
338 | rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1); | ||
339 | |||
340 | rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr, | 349 | rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr, |
341 | ic->i_send_cq, ic->i_recv_cq); | 350 | ic->i_send_cq, ic->i_recv_cq); |
342 | 351 | ||
@@ -344,19 +353,32 @@ out: | |||
344 | return ret; | 353 | return ret; |
345 | } | 354 | } |
346 | 355 | ||
347 | static u32 rds_ib_protocol_compatible(const struct rds_ib_connect_private *dp) | 356 | static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event) |
348 | { | 357 | { |
358 | const struct rds_ib_connect_private *dp = event->param.conn.private_data; | ||
349 | u16 common; | 359 | u16 common; |
350 | u32 version = 0; | 360 | u32 version = 0; |
351 | 361 | ||
352 | /* rdma_cm private data is odd - when there is any private data in the | 362 | /* |
363 | * rdma_cm private data is odd - when there is any private data in the | ||
353 | * request, we will be given a pretty large buffer without telling us the | 364 | * request, we will be given a pretty large buffer without telling us the |
354 | * original size. The only way to tell the difference is by looking at | 365 | * original size. The only way to tell the difference is by looking at |
355 | * the contents, which are initialized to zero. | 366 | * the contents, which are initialized to zero. |
356 | * If the protocol version fields aren't set, this is a connection attempt | 367 | * If the protocol version fields aren't set, this is a connection attempt |
357 | * from an older version. This could could be 3.0 or 2.0 - we can't tell. | 368 | * from an older version. This could could be 3.0 or 2.0 - we can't tell. |
358 | * We really should have changed this for OFED 1.3 :-( */ | 369 | * We really should have changed this for OFED 1.3 :-( |
359 | if (dp->dp_protocol_major == 0) | 370 | */ |
371 | |||
372 | /* Be paranoid. RDS always has privdata */ | ||
373 | if (!event->param.conn.private_data_len) { | ||
374 | printk(KERN_NOTICE "RDS incoming connection has no private data, " | ||
375 | "rejecting\n"); | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | /* Even if len is crap *now* I still want to check it. -ASG */ | ||
380 | if (event->param.conn.private_data_len < sizeof (*dp) | ||
381 | || dp->dp_protocol_major == 0) | ||
360 | return RDS_PROTOCOL_3_0; | 382 | return RDS_PROTOCOL_3_0; |
361 | 383 | ||
362 | common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS; | 384 | common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS; |
@@ -388,7 +410,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
388 | int err, destroy = 1; | 410 | int err, destroy = 1; |
389 | 411 | ||
390 | /* Check whether the remote protocol version matches ours. */ | 412 | /* Check whether the remote protocol version matches ours. */ |
391 | version = rds_ib_protocol_compatible(dp); | 413 | version = rds_ib_protocol_compatible(event); |
392 | if (!version) | 414 | if (!version) |
393 | goto out; | 415 | goto out; |
394 | 416 | ||
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 81033af93020..ef3ab5b7283e 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -211,7 +211,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) | |||
211 | 211 | ||
212 | pool->fmr_attr.max_pages = fmr_message_size; | 212 | pool->fmr_attr.max_pages = fmr_message_size; |
213 | pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; | 213 | pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; |
214 | pool->fmr_attr.page_shift = rds_ibdev->fmr_page_shift; | 214 | pool->fmr_attr.page_shift = PAGE_SHIFT; |
215 | pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; | 215 | pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; |
216 | 216 | ||
217 | /* We never allow more than max_items MRs to be allocated. | 217 | /* We never allow more than max_items MRs to be allocated. |
@@ -349,13 +349,13 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm | |||
349 | unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); | 349 | unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); |
350 | u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); | 350 | u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); |
351 | 351 | ||
352 | if (dma_addr & ~rds_ibdev->fmr_page_mask) { | 352 | if (dma_addr & ~PAGE_MASK) { |
353 | if (i > 0) | 353 | if (i > 0) |
354 | return -EINVAL; | 354 | return -EINVAL; |
355 | else | 355 | else |
356 | ++page_cnt; | 356 | ++page_cnt; |
357 | } | 357 | } |
358 | if ((dma_addr + dma_len) & ~rds_ibdev->fmr_page_mask) { | 358 | if ((dma_addr + dma_len) & ~PAGE_MASK) { |
359 | if (i < sg_dma_len - 1) | 359 | if (i < sg_dma_len - 1) |
360 | return -EINVAL; | 360 | return -EINVAL; |
361 | else | 361 | else |
@@ -365,7 +365,7 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm | |||
365 | len += dma_len; | 365 | len += dma_len; |
366 | } | 366 | } |
367 | 367 | ||
368 | page_cnt += len >> rds_ibdev->fmr_page_shift; | 368 | page_cnt += len >> PAGE_SHIFT; |
369 | if (page_cnt > fmr_message_size) | 369 | if (page_cnt > fmr_message_size) |
370 | return -EINVAL; | 370 | return -EINVAL; |
371 | 371 | ||
@@ -378,9 +378,9 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm | |||
378 | unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); | 378 | unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); |
379 | u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); | 379 | u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); |
380 | 380 | ||
381 | for (j = 0; j < dma_len; j += rds_ibdev->fmr_page_size) | 381 | for (j = 0; j < dma_len; j += PAGE_SIZE) |
382 | dma_pages[page_cnt++] = | 382 | dma_pages[page_cnt++] = |
383 | (dma_addr & rds_ibdev->fmr_page_mask) + j; | 383 | (dma_addr & PAGE_MASK) + j; |
384 | } | 384 | } |
385 | 385 | ||
386 | ret = ib_map_phys_fmr(ibmr->fmr, | 386 | ret = ib_map_phys_fmr(ibmr->fmr, |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 5709bad28329..cd7a6cfcab03 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -555,6 +555,47 @@ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic) | |||
555 | return rds_ib_get_ack(ic); | 555 | return rds_ib_get_ack(ic); |
556 | } | 556 | } |
557 | 557 | ||
558 | static struct rds_header *rds_ib_get_header(struct rds_connection *conn, | ||
559 | struct rds_ib_recv_work *recv, | ||
560 | u32 data_len) | ||
561 | { | ||
562 | struct rds_ib_connection *ic = conn->c_transport_data; | ||
563 | void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs]; | ||
564 | void *addr; | ||
565 | u32 misplaced_hdr_bytes; | ||
566 | |||
567 | /* | ||
568 | * Support header at the front (RDS 3.1+) as well as header-at-end. | ||
569 | * | ||
570 | * Cases: | ||
571 | * 1) header all in header buff (great!) | ||
572 | * 2) header all in data page (copy all to header buff) | ||
573 | * 3) header split across hdr buf + data page | ||
574 | * (move bit in hdr buff to end before copying other bit from data page) | ||
575 | */ | ||
576 | if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE) | ||
577 | return hdr_buff; | ||
578 | |||
579 | if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) { | ||
580 | addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0); | ||
581 | memcpy(hdr_buff, | ||
582 | addr + recv->r_frag->f_offset + data_len, | ||
583 | sizeof(struct rds_header)); | ||
584 | kunmap_atomic(addr, KM_SOFTIRQ0); | ||
585 | return hdr_buff; | ||
586 | } | ||
587 | |||
588 | misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len)); | ||
589 | |||
590 | memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes); | ||
591 | |||
592 | addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0); | ||
593 | memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len, | ||
594 | sizeof(struct rds_header) - misplaced_hdr_bytes); | ||
595 | kunmap_atomic(addr, KM_SOFTIRQ0); | ||
596 | return hdr_buff; | ||
597 | } | ||
598 | |||
558 | /* | 599 | /* |
559 | * It's kind of lame that we're copying from the posted receive pages into | 600 | * It's kind of lame that we're copying from the posted receive pages into |
560 | * long-lived bitmaps. We could have posted the bitmaps and rdma written into | 601 | * long-lived bitmaps. We could have posted the bitmaps and rdma written into |
@@ -645,7 +686,7 @@ struct rds_ib_ack_state { | |||
645 | }; | 686 | }; |
646 | 687 | ||
647 | static void rds_ib_process_recv(struct rds_connection *conn, | 688 | static void rds_ib_process_recv(struct rds_connection *conn, |
648 | struct rds_ib_recv_work *recv, u32 byte_len, | 689 | struct rds_ib_recv_work *recv, u32 data_len, |
649 | struct rds_ib_ack_state *state) | 690 | struct rds_ib_ack_state *state) |
650 | { | 691 | { |
651 | struct rds_ib_connection *ic = conn->c_transport_data; | 692 | struct rds_ib_connection *ic = conn->c_transport_data; |
@@ -655,9 +696,9 @@ static void rds_ib_process_recv(struct rds_connection *conn, | |||
655 | /* XXX shut down the connection if port 0,0 are seen? */ | 696 | /* XXX shut down the connection if port 0,0 are seen? */ |
656 | 697 | ||
657 | rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv, | 698 | rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv, |
658 | byte_len); | 699 | data_len); |
659 | 700 | ||
660 | if (byte_len < sizeof(struct rds_header)) { | 701 | if (data_len < sizeof(struct rds_header)) { |
661 | rds_ib_conn_error(conn, "incoming message " | 702 | rds_ib_conn_error(conn, "incoming message " |
662 | "from %pI4 didn't inclue a " | 703 | "from %pI4 didn't inclue a " |
663 | "header, disconnecting and " | 704 | "header, disconnecting and " |
@@ -665,9 +706,9 @@ static void rds_ib_process_recv(struct rds_connection *conn, | |||
665 | &conn->c_faddr); | 706 | &conn->c_faddr); |
666 | return; | 707 | return; |
667 | } | 708 | } |
668 | byte_len -= sizeof(struct rds_header); | 709 | data_len -= sizeof(struct rds_header); |
669 | 710 | ||
670 | ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; | 711 | ihdr = rds_ib_get_header(conn, recv, data_len); |
671 | 712 | ||
672 | /* Validate the checksum. */ | 713 | /* Validate the checksum. */ |
673 | if (!rds_message_verify_checksum(ihdr)) { | 714 | if (!rds_message_verify_checksum(ihdr)) { |
@@ -687,7 +728,7 @@ static void rds_ib_process_recv(struct rds_connection *conn, | |||
687 | if (ihdr->h_credit) | 728 | if (ihdr->h_credit) |
688 | rds_ib_send_add_credits(conn, ihdr->h_credit); | 729 | rds_ib_send_add_credits(conn, ihdr->h_credit); |
689 | 730 | ||
690 | if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && byte_len == 0) { | 731 | if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) { |
691 | /* This is an ACK-only packet. The fact that it gets | 732 | /* This is an ACK-only packet. The fact that it gets |
692 | * special treatment here is that historically, ACKs | 733 | * special treatment here is that historically, ACKs |
693 | * were rather special beasts. | 734 | * were rather special beasts. |
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c index 02e3e3d50d4a..8d8488306fe4 100644 --- a/net/rds/ib_stats.c +++ b/net/rds/ib_stats.c | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | DEFINE_PER_CPU(struct rds_ib_statistics, rds_ib_stats) ____cacheline_aligned; | 40 | DEFINE_PER_CPU(struct rds_ib_statistics, rds_ib_stats) ____cacheline_aligned; |
41 | 41 | ||
42 | static char *rds_ib_stat_names[] = { | 42 | static const char *const rds_ib_stat_names[] = { |
43 | "ib_connect_raced", | 43 | "ib_connect_raced", |
44 | "ib_listen_closed_stale", | 44 | "ib_listen_closed_stale", |
45 | "ib_tx_cq_call", | 45 | "ib_tx_cq_call", |
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c index d87830db93a0..84b5ffcb280f 100644 --- a/net/rds/ib_sysctl.c +++ b/net/rds/ib_sysctl.c | |||
@@ -53,7 +53,17 @@ unsigned long rds_ib_sysctl_max_unsig_bytes = (16 << 20); | |||
53 | static unsigned long rds_ib_sysctl_max_unsig_bytes_min = 1; | 53 | static unsigned long rds_ib_sysctl_max_unsig_bytes_min = 1; |
54 | static unsigned long rds_ib_sysctl_max_unsig_bytes_max = ~0UL; | 54 | static unsigned long rds_ib_sysctl_max_unsig_bytes_max = ~0UL; |
55 | 55 | ||
56 | unsigned int rds_ib_sysctl_flow_control = 1; | 56 | /* |
57 | * This sysctl does nothing. | ||
58 | * | ||
59 | * Backwards compatibility with RDS 3.0 wire protocol | ||
60 | * disables initial FC credit exchange. | ||
61 | * If it's ever possible to drop 3.0 support, | ||
62 | * setting this to 1 and moving init/refill of send/recv | ||
63 | * rings from ib_cm_connect_complete() back into ib_setup_qp() | ||
64 | * will cause credits to be added before protocol negotiation. | ||
65 | */ | ||
66 | unsigned int rds_ib_sysctl_flow_control = 0; | ||
57 | 67 | ||
58 | ctl_table rds_ib_sysctl_table[] = { | 68 | ctl_table rds_ib_sysctl_table[] = { |
59 | { | 69 | { |
diff --git a/net/rds/info.c b/net/rds/info.c index 62aeef37aefe..814a91a6f4a7 100644 --- a/net/rds/info.c +++ b/net/rds/info.c | |||
@@ -79,6 +79,7 @@ void rds_info_register_func(int optname, rds_info_func func) | |||
79 | rds_info_funcs[offset] = func; | 79 | rds_info_funcs[offset] = func; |
80 | spin_unlock(&rds_info_lock); | 80 | spin_unlock(&rds_info_lock); |
81 | } | 81 | } |
82 | EXPORT_SYMBOL_GPL(rds_info_register_func); | ||
82 | 83 | ||
83 | void rds_info_deregister_func(int optname, rds_info_func func) | 84 | void rds_info_deregister_func(int optname, rds_info_func func) |
84 | { | 85 | { |
@@ -91,6 +92,7 @@ void rds_info_deregister_func(int optname, rds_info_func func) | |||
91 | rds_info_funcs[offset] = NULL; | 92 | rds_info_funcs[offset] = NULL; |
92 | spin_unlock(&rds_info_lock); | 93 | spin_unlock(&rds_info_lock); |
93 | } | 94 | } |
95 | EXPORT_SYMBOL_GPL(rds_info_deregister_func); | ||
94 | 96 | ||
95 | /* | 97 | /* |
96 | * Typically we hold an atomic kmap across multiple rds_info_copy() calls | 98 | * Typically we hold an atomic kmap across multiple rds_info_copy() calls |
@@ -137,6 +139,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data, | |||
137 | } | 139 | } |
138 | } | 140 | } |
139 | } | 141 | } |
142 | EXPORT_SYMBOL_GPL(rds_info_copy); | ||
140 | 143 | ||
141 | /* | 144 | /* |
142 | * @optval points to the userspace buffer that the information snapshot | 145 | * @optval points to the userspace buffer that the information snapshot |
diff --git a/net/rds/iw.c b/net/rds/iw.c index d16e1cbc8e83..db224f7c2937 100644 --- a/net/rds/iw.c +++ b/net/rds/iw.c | |||
@@ -83,23 +83,16 @@ void rds_iw_add_one(struct ib_device *device) | |||
83 | rds_iwdev->max_wrs = dev_attr->max_qp_wr; | 83 | rds_iwdev->max_wrs = dev_attr->max_qp_wr; |
84 | rds_iwdev->max_sge = min(dev_attr->max_sge, RDS_IW_MAX_SGE); | 84 | rds_iwdev->max_sge = min(dev_attr->max_sge, RDS_IW_MAX_SGE); |
85 | 85 | ||
86 | rds_iwdev->page_shift = max(PAGE_SHIFT, ffs(dev_attr->page_size_cap) - 1); | ||
87 | |||
88 | rds_iwdev->dev = device; | 86 | rds_iwdev->dev = device; |
89 | rds_iwdev->pd = ib_alloc_pd(device); | 87 | rds_iwdev->pd = ib_alloc_pd(device); |
90 | if (IS_ERR(rds_iwdev->pd)) | 88 | if (IS_ERR(rds_iwdev->pd)) |
91 | goto free_dev; | 89 | goto free_dev; |
92 | 90 | ||
93 | if (!rds_iwdev->dma_local_lkey) { | 91 | if (!rds_iwdev->dma_local_lkey) { |
94 | if (device->node_type != RDMA_NODE_RNIC) { | 92 | rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd, |
95 | rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd, | 93 | IB_ACCESS_REMOTE_READ | |
96 | IB_ACCESS_LOCAL_WRITE); | 94 | IB_ACCESS_REMOTE_WRITE | |
97 | } else { | 95 | IB_ACCESS_LOCAL_WRITE); |
98 | rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd, | ||
99 | IB_ACCESS_REMOTE_READ | | ||
100 | IB_ACCESS_REMOTE_WRITE | | ||
101 | IB_ACCESS_LOCAL_WRITE); | ||
102 | } | ||
103 | if (IS_ERR(rds_iwdev->mr)) | 96 | if (IS_ERR(rds_iwdev->mr)) |
104 | goto err_pd; | 97 | goto err_pd; |
105 | } else | 98 | } else |
@@ -291,6 +284,7 @@ struct rds_transport rds_iw_transport = { | |||
291 | .flush_mrs = rds_iw_flush_mrs, | 284 | .flush_mrs = rds_iw_flush_mrs, |
292 | .t_owner = THIS_MODULE, | 285 | .t_owner = THIS_MODULE, |
293 | .t_name = "iwarp", | 286 | .t_name = "iwarp", |
287 | .t_type = RDS_TRANS_IWARP, | ||
294 | .t_prefer_loopback = 1, | 288 | .t_prefer_loopback = 1, |
295 | }; | 289 | }; |
296 | 290 | ||
diff --git a/net/rds/iw.h b/net/rds/iw.h index 0715dde323e7..dd72b62bd506 100644 --- a/net/rds/iw.h +++ b/net/rds/iw.h | |||
@@ -181,7 +181,6 @@ struct rds_iw_device { | |||
181 | struct ib_pd *pd; | 181 | struct ib_pd *pd; |
182 | struct ib_mr *mr; | 182 | struct ib_mr *mr; |
183 | struct rds_iw_mr_pool *mr_pool; | 183 | struct rds_iw_mr_pool *mr_pool; |
184 | int page_shift; | ||
185 | int max_sge; | 184 | int max_sge; |
186 | unsigned int max_wrs; | 185 | unsigned int max_wrs; |
187 | unsigned int dma_local_lkey:1; | 186 | unsigned int dma_local_lkey:1; |
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index dcdb37da80f2..de4a1b16bf7b 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c | |||
@@ -263,18 +263,12 @@ static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg, | |||
263 | } | 263 | } |
264 | 264 | ||
265 | static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, | 265 | static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, |
266 | struct rds_iw_scatterlist *sg, | 266 | struct rds_iw_scatterlist *sg) |
267 | unsigned int dma_page_shift) | ||
268 | { | 267 | { |
269 | struct ib_device *dev = rds_iwdev->dev; | 268 | struct ib_device *dev = rds_iwdev->dev; |
270 | u64 *dma_pages = NULL; | 269 | u64 *dma_pages = NULL; |
271 | u64 dma_mask; | ||
272 | unsigned int dma_page_size; | ||
273 | int i, j, ret; | 270 | int i, j, ret; |
274 | 271 | ||
275 | dma_page_size = 1 << dma_page_shift; | ||
276 | dma_mask = dma_page_size - 1; | ||
277 | |||
278 | WARN_ON(sg->dma_len); | 272 | WARN_ON(sg->dma_len); |
279 | 273 | ||
280 | sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL); | 274 | sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL); |
@@ -295,18 +289,18 @@ static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, | |||
295 | sg->bytes += dma_len; | 289 | sg->bytes += dma_len; |
296 | 290 | ||
297 | end_addr = dma_addr + dma_len; | 291 | end_addr = dma_addr + dma_len; |
298 | if (dma_addr & dma_mask) { | 292 | if (dma_addr & PAGE_MASK) { |
299 | if (i > 0) | 293 | if (i > 0) |
300 | goto out_unmap; | 294 | goto out_unmap; |
301 | dma_addr &= ~dma_mask; | 295 | dma_addr &= ~PAGE_MASK; |
302 | } | 296 | } |
303 | if (end_addr & dma_mask) { | 297 | if (end_addr & PAGE_MASK) { |
304 | if (i < sg->dma_len - 1) | 298 | if (i < sg->dma_len - 1) |
305 | goto out_unmap; | 299 | goto out_unmap; |
306 | end_addr = (end_addr + dma_mask) & ~dma_mask; | 300 | end_addr = (end_addr + PAGE_MASK) & ~PAGE_MASK; |
307 | } | 301 | } |
308 | 302 | ||
309 | sg->dma_npages += (end_addr - dma_addr) >> dma_page_shift; | 303 | sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT; |
310 | } | 304 | } |
311 | 305 | ||
312 | /* Now gather the dma addrs into one list */ | 306 | /* Now gather the dma addrs into one list */ |
@@ -325,8 +319,8 @@ static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, | |||
325 | u64 end_addr; | 319 | u64 end_addr; |
326 | 320 | ||
327 | end_addr = dma_addr + dma_len; | 321 | end_addr = dma_addr + dma_len; |
328 | dma_addr &= ~dma_mask; | 322 | dma_addr &= ~PAGE_MASK; |
329 | for (; dma_addr < end_addr; dma_addr += dma_page_size) | 323 | for (; dma_addr < end_addr; dma_addr += PAGE_SIZE) |
330 | dma_pages[j++] = dma_addr; | 324 | dma_pages[j++] = dma_addr; |
331 | BUG_ON(j > sg->dma_npages); | 325 | BUG_ON(j > sg->dma_npages); |
332 | } | 326 | } |
@@ -727,7 +721,7 @@ static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) | |||
727 | f_wr.wr.fast_reg.rkey = mapping->m_rkey; | 721 | f_wr.wr.fast_reg.rkey = mapping->m_rkey; |
728 | f_wr.wr.fast_reg.page_list = ibmr->page_list; | 722 | f_wr.wr.fast_reg.page_list = ibmr->page_list; |
729 | f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len; | 723 | f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len; |
730 | f_wr.wr.fast_reg.page_shift = ibmr->device->page_shift; | 724 | f_wr.wr.fast_reg.page_shift = PAGE_SHIFT; |
731 | f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE | | 725 | f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE | |
732 | IB_ACCESS_REMOTE_READ | | 726 | IB_ACCESS_REMOTE_READ | |
733 | IB_ACCESS_REMOTE_WRITE; | 727 | IB_ACCESS_REMOTE_WRITE; |
@@ -780,9 +774,7 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, | |||
780 | 774 | ||
781 | rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len); | 775 | rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len); |
782 | 776 | ||
783 | dma_pages = rds_iw_map_scatterlist(rds_iwdev, | 777 | dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg); |
784 | &mapping->m_sg, | ||
785 | rds_iwdev->page_shift); | ||
786 | if (IS_ERR(dma_pages)) { | 778 | if (IS_ERR(dma_pages)) { |
787 | ret = PTR_ERR(dma_pages); | 779 | ret = PTR_ERR(dma_pages); |
788 | dma_pages = NULL; | 780 | dma_pages = NULL; |
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 44a6a0551f28..1f5abe3cf2b4 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c | |||
@@ -779,7 +779,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd | |||
779 | send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey; | 779 | send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey; |
780 | send->s_wr.wr.fast_reg.page_list = send->s_page_list; | 780 | send->s_wr.wr.fast_reg.page_list = send->s_page_list; |
781 | send->s_wr.wr.fast_reg.page_list_len = nent; | 781 | send->s_wr.wr.fast_reg.page_list_len = nent; |
782 | send->s_wr.wr.fast_reg.page_shift = rds_iwdev->page_shift; | 782 | send->s_wr.wr.fast_reg.page_shift = PAGE_SHIFT; |
783 | send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE; | 783 | send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE; |
784 | send->s_wr.wr.fast_reg.iova_start = sg_addr; | 784 | send->s_wr.wr.fast_reg.iova_start = sg_addr; |
785 | 785 | ||
diff --git a/net/rds/iw_stats.c b/net/rds/iw_stats.c index ccc7e8f0bf0e..d33ea790484e 100644 --- a/net/rds/iw_stats.c +++ b/net/rds/iw_stats.c | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | DEFINE_PER_CPU(struct rds_iw_statistics, rds_iw_stats) ____cacheline_aligned; | 40 | DEFINE_PER_CPU(struct rds_iw_statistics, rds_iw_stats) ____cacheline_aligned; |
41 | 41 | ||
42 | static char *rds_iw_stat_names[] = { | 42 | static const char *const rds_iw_stat_names[] = { |
43 | "iw_connect_raced", | 43 | "iw_connect_raced", |
44 | "iw_listen_closed_stale", | 44 | "iw_listen_closed_stale", |
45 | "iw_tx_cq_call", | 45 | "iw_tx_cq_call", |
diff --git a/net/rds/message.c b/net/rds/message.c index 5a15dc8d0cd7..ca50a8ec9742 100644 --- a/net/rds/message.c +++ b/net/rds/message.c | |||
@@ -50,6 +50,7 @@ void rds_message_addref(struct rds_message *rm) | |||
50 | rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); | 50 | rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); |
51 | atomic_inc(&rm->m_refcount); | 51 | atomic_inc(&rm->m_refcount); |
52 | } | 52 | } |
53 | EXPORT_SYMBOL_GPL(rds_message_addref); | ||
53 | 54 | ||
54 | /* | 55 | /* |
55 | * This relies on dma_map_sg() not touching sg[].page during merging. | 56 | * This relies on dma_map_sg() not touching sg[].page during merging. |
@@ -92,6 +93,7 @@ void rds_message_put(struct rds_message *rm) | |||
92 | kfree(rm); | 93 | kfree(rm); |
93 | } | 94 | } |
94 | } | 95 | } |
96 | EXPORT_SYMBOL_GPL(rds_message_put); | ||
95 | 97 | ||
96 | void rds_message_inc_free(struct rds_incoming *inc) | 98 | void rds_message_inc_free(struct rds_incoming *inc) |
97 | { | 99 | { |
@@ -108,6 +110,7 @@ void rds_message_populate_header(struct rds_header *hdr, __be16 sport, | |||
108 | hdr->h_sequence = cpu_to_be64(seq); | 110 | hdr->h_sequence = cpu_to_be64(seq); |
109 | hdr->h_exthdr[0] = RDS_EXTHDR_NONE; | 111 | hdr->h_exthdr[0] = RDS_EXTHDR_NONE; |
110 | } | 112 | } |
113 | EXPORT_SYMBOL_GPL(rds_message_populate_header); | ||
111 | 114 | ||
112 | int rds_message_add_extension(struct rds_header *hdr, | 115 | int rds_message_add_extension(struct rds_header *hdr, |
113 | unsigned int type, const void *data, unsigned int len) | 116 | unsigned int type, const void *data, unsigned int len) |
@@ -133,6 +136,7 @@ int rds_message_add_extension(struct rds_header *hdr, | |||
133 | dst[len] = RDS_EXTHDR_NONE; | 136 | dst[len] = RDS_EXTHDR_NONE; |
134 | return 1; | 137 | return 1; |
135 | } | 138 | } |
139 | EXPORT_SYMBOL_GPL(rds_message_add_extension); | ||
136 | 140 | ||
137 | /* | 141 | /* |
138 | * If a message has extension headers, retrieve them here. | 142 | * If a message has extension headers, retrieve them here. |
@@ -208,6 +212,7 @@ int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 o | |||
208 | ext_hdr.h_rdma_offset = cpu_to_be32(offset); | 212 | ext_hdr.h_rdma_offset = cpu_to_be32(offset); |
209 | return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr)); | 213 | return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr)); |
210 | } | 214 | } |
215 | EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension); | ||
211 | 216 | ||
212 | struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp) | 217 | struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp) |
213 | { | 218 | { |
@@ -399,4 +404,5 @@ void rds_message_unmapped(struct rds_message *rm) | |||
399 | if (waitqueue_active(&rds_message_flush_waitq)) | 404 | if (waitqueue_active(&rds_message_flush_waitq)) |
400 | wake_up(&rds_message_flush_waitq); | 405 | wake_up(&rds_message_flush_waitq); |
401 | } | 406 | } |
407 | EXPORT_SYMBOL_GPL(rds_message_unmapped); | ||
402 | 408 | ||
diff --git a/net/rds/page.c b/net/rds/page.c index c460743a89ad..55c21efdb62e 100644 --- a/net/rds/page.c +++ b/net/rds/page.c | |||
@@ -81,6 +81,7 @@ int rds_page_copy_user(struct page *page, unsigned long offset, | |||
81 | 81 | ||
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | EXPORT_SYMBOL_GPL(rds_page_copy_user); | ||
84 | 85 | ||
85 | /* | 86 | /* |
86 | * Message allocation uses this to build up regions of a message. | 87 | * Message allocation uses this to build up regions of a message. |
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 7d0f901c93d5..9ece910ea394 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c | |||
@@ -101,7 +101,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, | |||
101 | break; | 101 | break; |
102 | 102 | ||
103 | case RDMA_CM_EVENT_DISCONNECTED: | 103 | case RDMA_CM_EVENT_DISCONNECTED: |
104 | printk(KERN_WARNING "RDS/IW: DISCONNECT event - dropping connection " | 104 | printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection " |
105 | "%pI4->%pI4\n", &conn->c_laddr, | 105 | "%pI4->%pI4\n", &conn->c_laddr, |
106 | &conn->c_faddr); | 106 | &conn->c_faddr); |
107 | rds_conn_drop(conn); | 107 | rds_conn_drop(conn); |
@@ -132,12 +132,12 @@ static int __init rds_rdma_listen_init(void) | |||
132 | cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP); | 132 | cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP); |
133 | if (IS_ERR(cm_id)) { | 133 | if (IS_ERR(cm_id)) { |
134 | ret = PTR_ERR(cm_id); | 134 | ret = PTR_ERR(cm_id); |
135 | printk(KERN_ERR "RDS/IW: failed to setup listener, " | 135 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " |
136 | "rdma_create_id() returned %d\n", ret); | 136 | "rdma_create_id() returned %d\n", ret); |
137 | goto out; | 137 | goto out; |
138 | } | 138 | } |
139 | 139 | ||
140 | sin.sin_family = PF_INET, | 140 | sin.sin_family = AF_INET, |
141 | sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY); | 141 | sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY); |
142 | sin.sin_port = (__force u16)htons(RDS_PORT); | 142 | sin.sin_port = (__force u16)htons(RDS_PORT); |
143 | 143 | ||
@@ -147,14 +147,14 @@ static int __init rds_rdma_listen_init(void) | |||
147 | */ | 147 | */ |
148 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); | 148 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); |
149 | if (ret) { | 149 | if (ret) { |
150 | printk(KERN_ERR "RDS/IW: failed to setup listener, " | 150 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " |
151 | "rdma_bind_addr() returned %d\n", ret); | 151 | "rdma_bind_addr() returned %d\n", ret); |
152 | goto out; | 152 | goto out; |
153 | } | 153 | } |
154 | 154 | ||
155 | ret = rdma_listen(cm_id, 128); | 155 | ret = rdma_listen(cm_id, 128); |
156 | if (ret) { | 156 | if (ret) { |
157 | printk(KERN_ERR "RDS/IW: failed to setup listener, " | 157 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " |
158 | "rdma_listen() returned %d\n", ret); | 158 | "rdma_listen() returned %d\n", ret); |
159 | goto out; | 159 | goto out; |
160 | } | 160 | } |
@@ -203,6 +203,7 @@ err_iw_init: | |||
203 | out: | 203 | out: |
204 | return ret; | 204 | return ret; |
205 | } | 205 | } |
206 | module_init(rds_rdma_init); | ||
206 | 207 | ||
207 | void rds_rdma_exit(void) | 208 | void rds_rdma_exit(void) |
208 | { | 209 | { |
@@ -211,4 +212,9 @@ void rds_rdma_exit(void) | |||
211 | rds_ib_exit(); | 212 | rds_ib_exit(); |
212 | rds_iw_exit(); | 213 | rds_iw_exit(); |
213 | } | 214 | } |
215 | module_exit(rds_rdma_exit); | ||
216 | |||
217 | MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>"); | ||
218 | MODULE_DESCRIPTION("RDS: IB/iWARP transport"); | ||
219 | MODULE_LICENSE("Dual BSD/GPL"); | ||
214 | 220 | ||
diff --git a/net/rds/rds.h b/net/rds/rds.h index dbe111236783..85d6f897ecc7 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
@@ -311,11 +311,17 @@ struct rds_notifier { | |||
311 | * flag and header. | 311 | * flag and header. |
312 | */ | 312 | */ |
313 | 313 | ||
314 | #define RDS_TRANS_IB 0 | ||
315 | #define RDS_TRANS_IWARP 1 | ||
316 | #define RDS_TRANS_TCP 2 | ||
317 | #define RDS_TRANS_COUNT 3 | ||
318 | |||
314 | struct rds_transport { | 319 | struct rds_transport { |
315 | char t_name[TRANSNAMSIZ]; | 320 | char t_name[TRANSNAMSIZ]; |
316 | struct list_head t_item; | 321 | struct list_head t_item; |
317 | struct module *t_owner; | 322 | struct module *t_owner; |
318 | unsigned int t_prefer_loopback:1; | 323 | unsigned int t_prefer_loopback:1; |
324 | unsigned int t_type; | ||
319 | 325 | ||
320 | int (*laddr_check)(__be32 addr); | 326 | int (*laddr_check)(__be32 addr); |
321 | int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp); | 327 | int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp); |
@@ -652,7 +658,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats); | |||
652 | int __init rds_stats_init(void); | 658 | int __init rds_stats_init(void); |
653 | void rds_stats_exit(void); | 659 | void rds_stats_exit(void); |
654 | void rds_stats_info_copy(struct rds_info_iterator *iter, | 660 | void rds_stats_info_copy(struct rds_info_iterator *iter, |
655 | uint64_t *values, char **names, size_t nr); | 661 | uint64_t *values, const char *const *names, |
662 | size_t nr); | ||
656 | 663 | ||
657 | /* sysctl.c */ | 664 | /* sysctl.c */ |
658 | int __init rds_sysctl_init(void); | 665 | int __init rds_sysctl_init(void); |
diff --git a/net/rds/recv.c b/net/rds/recv.c index f2118c51cfa3..fdff33c7b432 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c | |||
@@ -46,12 +46,14 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, | |||
46 | inc->i_saddr = saddr; | 46 | inc->i_saddr = saddr; |
47 | inc->i_rdma_cookie = 0; | 47 | inc->i_rdma_cookie = 0; |
48 | } | 48 | } |
49 | EXPORT_SYMBOL_GPL(rds_inc_init); | ||
49 | 50 | ||
50 | void rds_inc_addref(struct rds_incoming *inc) | 51 | void rds_inc_addref(struct rds_incoming *inc) |
51 | { | 52 | { |
52 | rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount)); | 53 | rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount)); |
53 | atomic_inc(&inc->i_refcount); | 54 | atomic_inc(&inc->i_refcount); |
54 | } | 55 | } |
56 | EXPORT_SYMBOL_GPL(rds_inc_addref); | ||
55 | 57 | ||
56 | void rds_inc_put(struct rds_incoming *inc) | 58 | void rds_inc_put(struct rds_incoming *inc) |
57 | { | 59 | { |
@@ -62,6 +64,7 @@ void rds_inc_put(struct rds_incoming *inc) | |||
62 | inc->i_conn->c_trans->inc_free(inc); | 64 | inc->i_conn->c_trans->inc_free(inc); |
63 | } | 65 | } |
64 | } | 66 | } |
67 | EXPORT_SYMBOL_GPL(rds_inc_put); | ||
65 | 68 | ||
66 | static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, | 69 | static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, |
67 | struct rds_cong_map *map, | 70 | struct rds_cong_map *map, |
@@ -237,6 +240,7 @@ out: | |||
237 | if (rs) | 240 | if (rs) |
238 | rds_sock_put(rs); | 241 | rds_sock_put(rs); |
239 | } | 242 | } |
243 | EXPORT_SYMBOL_GPL(rds_recv_incoming); | ||
240 | 244 | ||
241 | /* | 245 | /* |
242 | * be very careful here. This is being called as the condition in | 246 | * be very careful here. This is being called as the condition in |
@@ -409,18 +413,18 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
409 | if (msg_flags & MSG_OOB) | 413 | if (msg_flags & MSG_OOB) |
410 | goto out; | 414 | goto out; |
411 | 415 | ||
412 | /* If there are pending notifications, do those - and nothing else */ | 416 | while (1) { |
413 | if (!list_empty(&rs->rs_notify_queue)) { | 417 | /* If there are pending notifications, do those - and nothing else */ |
414 | ret = rds_notify_queue_get(rs, msg); | 418 | if (!list_empty(&rs->rs_notify_queue)) { |
415 | goto out; | 419 | ret = rds_notify_queue_get(rs, msg); |
416 | } | 420 | break; |
421 | } | ||
417 | 422 | ||
418 | if (rs->rs_cong_notify) { | 423 | if (rs->rs_cong_notify) { |
419 | ret = rds_notify_cong(rs, msg); | 424 | ret = rds_notify_cong(rs, msg); |
420 | goto out; | 425 | break; |
421 | } | 426 | } |
422 | 427 | ||
423 | while (1) { | ||
424 | if (!rds_next_incoming(rs, &inc)) { | 428 | if (!rds_next_incoming(rs, &inc)) { |
425 | if (nonblock) { | 429 | if (nonblock) { |
426 | ret = -EAGAIN; | 430 | ret = -EAGAIN; |
@@ -428,7 +432,9 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
428 | } | 432 | } |
429 | 433 | ||
430 | timeo = wait_event_interruptible_timeout(*sk->sk_sleep, | 434 | timeo = wait_event_interruptible_timeout(*sk->sk_sleep, |
431 | rds_next_incoming(rs, &inc), | 435 | (!list_empty(&rs->rs_notify_queue) |
436 | || rs->rs_cong_notify | ||
437 | || rds_next_incoming(rs, &inc)), | ||
432 | timeo); | 438 | timeo); |
433 | rdsdebug("recvmsg woke inc %p timeo %ld\n", inc, | 439 | rdsdebug("recvmsg woke inc %p timeo %ld\n", inc, |
434 | timeo); | 440 | timeo); |
diff --git a/net/rds/send.c b/net/rds/send.c index a4a7f428cd76..28c88ff3d038 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -439,6 +439,7 @@ void rds_rdma_send_complete(struct rds_message *rm, int status) | |||
439 | sock_put(rds_rs_to_sk(rs)); | 439 | sock_put(rds_rs_to_sk(rs)); |
440 | } | 440 | } |
441 | } | 441 | } |
442 | EXPORT_SYMBOL_GPL(rds_rdma_send_complete); | ||
442 | 443 | ||
443 | /* | 444 | /* |
444 | * This is the same as rds_rdma_send_complete except we | 445 | * This is the same as rds_rdma_send_complete except we |
@@ -494,6 +495,7 @@ out: | |||
494 | 495 | ||
495 | return found; | 496 | return found; |
496 | } | 497 | } |
498 | EXPORT_SYMBOL_GPL(rds_send_get_message); | ||
497 | 499 | ||
498 | /* | 500 | /* |
499 | * This removes messages from the socket's list if they're on it. The list | 501 | * This removes messages from the socket's list if they're on it. The list |
@@ -610,6 +612,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack, | |||
610 | /* now remove the messages from the sock list as needed */ | 612 | /* now remove the messages from the sock list as needed */ |
611 | rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); | 613 | rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); |
612 | } | 614 | } |
615 | EXPORT_SYMBOL_GPL(rds_send_drop_acked); | ||
613 | 616 | ||
614 | void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | 617 | void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) |
615 | { | 618 | { |
diff --git a/net/rds/stats.c b/net/rds/stats.c index 637146893cf3..7598eb07cfb1 100644 --- a/net/rds/stats.c +++ b/net/rds/stats.c | |||
@@ -37,10 +37,11 @@ | |||
37 | #include "rds.h" | 37 | #include "rds.h" |
38 | 38 | ||
39 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats); | 39 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats); |
40 | EXPORT_PER_CPU_SYMBOL_GPL(rds_stats); | ||
40 | 41 | ||
41 | /* :.,$s/unsigned long\>.*\<s_\(.*\);/"\1",/g */ | 42 | /* :.,$s/unsigned long\>.*\<s_\(.*\);/"\1",/g */ |
42 | 43 | ||
43 | static char *rds_stat_names[] = { | 44 | static const char *const rds_stat_names[] = { |
44 | "conn_reset", | 45 | "conn_reset", |
45 | "recv_drop_bad_checksum", | 46 | "recv_drop_bad_checksum", |
46 | "recv_drop_old_seq", | 47 | "recv_drop_old_seq", |
@@ -77,7 +78,7 @@ static char *rds_stat_names[] = { | |||
77 | }; | 78 | }; |
78 | 79 | ||
79 | void rds_stats_info_copy(struct rds_info_iterator *iter, | 80 | void rds_stats_info_copy(struct rds_info_iterator *iter, |
80 | uint64_t *values, char **names, size_t nr) | 81 | uint64_t *values, const char *const *names, size_t nr) |
81 | { | 82 | { |
82 | struct rds_info_counter ctr; | 83 | struct rds_info_counter ctr; |
83 | size_t i; | 84 | size_t i; |
@@ -90,6 +91,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter, | |||
90 | rds_info_copy(iter, &ctr, sizeof(ctr)); | 91 | rds_info_copy(iter, &ctr, sizeof(ctr)); |
91 | } | 92 | } |
92 | } | 93 | } |
94 | EXPORT_SYMBOL_GPL(rds_stats_info_copy); | ||
93 | 95 | ||
94 | /* | 96 | /* |
95 | * This gives global counters across all the transports. The strings | 97 | * This gives global counters across all the transports. The strings |
diff --git a/net/rds/tcp.c b/net/rds/tcp.c new file mode 100644 index 000000000000..b5198aee45d3 --- /dev/null +++ b/net/rds/tcp.c | |||
@@ -0,0 +1,320 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Oracle. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/in.h> | ||
35 | #include <net/tcp.h> | ||
36 | |||
37 | #include "rds.h" | ||
38 | #include "tcp.h" | ||
39 | |||
40 | /* only for info exporting */ | ||
41 | static DEFINE_SPINLOCK(rds_tcp_tc_list_lock); | ||
42 | static LIST_HEAD(rds_tcp_tc_list); | ||
43 | unsigned int rds_tcp_tc_count; | ||
44 | |||
45 | /* Track rds_tcp_connection structs so they can be cleaned up */ | ||
46 | static DEFINE_SPINLOCK(rds_tcp_conn_lock); | ||
47 | static LIST_HEAD(rds_tcp_conn_list); | ||
48 | |||
49 | static struct kmem_cache *rds_tcp_conn_slab; | ||
50 | |||
51 | #define RDS_TCP_DEFAULT_BUFSIZE (128 * 1024) | ||
52 | |||
53 | /* doing it this way avoids calling tcp_sk() */ | ||
54 | void rds_tcp_nonagle(struct socket *sock) | ||
55 | { | ||
56 | mm_segment_t oldfs = get_fs(); | ||
57 | int val = 1; | ||
58 | |||
59 | set_fs(KERNEL_DS); | ||
60 | sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val, | ||
61 | sizeof(val)); | ||
62 | set_fs(oldfs); | ||
63 | } | ||
64 | |||
65 | void rds_tcp_tune(struct socket *sock) | ||
66 | { | ||
67 | struct sock *sk = sock->sk; | ||
68 | |||
69 | rds_tcp_nonagle(sock); | ||
70 | |||
71 | /* | ||
72 | * We're trying to saturate gigabit with the default, | ||
73 | * see svc_sock_setbufsize(). | ||
74 | */ | ||
75 | lock_sock(sk); | ||
76 | sk->sk_sndbuf = RDS_TCP_DEFAULT_BUFSIZE; | ||
77 | sk->sk_rcvbuf = RDS_TCP_DEFAULT_BUFSIZE; | ||
78 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; | ||
79 | release_sock(sk); | ||
80 | } | ||
81 | |||
82 | u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc) | ||
83 | { | ||
84 | return tcp_sk(tc->t_sock->sk)->snd_nxt; | ||
85 | } | ||
86 | |||
87 | u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) | ||
88 | { | ||
89 | return tcp_sk(tc->t_sock->sk)->snd_una; | ||
90 | } | ||
91 | |||
92 | void rds_tcp_restore_callbacks(struct socket *sock, | ||
93 | struct rds_tcp_connection *tc) | ||
94 | { | ||
95 | rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc); | ||
96 | write_lock_bh(&sock->sk->sk_callback_lock); | ||
97 | |||
98 | /* done under the callback_lock to serialize with write_space */ | ||
99 | spin_lock(&rds_tcp_tc_list_lock); | ||
100 | list_del_init(&tc->t_list_item); | ||
101 | rds_tcp_tc_count--; | ||
102 | spin_unlock(&rds_tcp_tc_list_lock); | ||
103 | |||
104 | tc->t_sock = NULL; | ||
105 | |||
106 | sock->sk->sk_write_space = tc->t_orig_write_space; | ||
107 | sock->sk->sk_data_ready = tc->t_orig_data_ready; | ||
108 | sock->sk->sk_state_change = tc->t_orig_state_change; | ||
109 | sock->sk->sk_user_data = NULL; | ||
110 | |||
111 | write_unlock_bh(&sock->sk->sk_callback_lock); | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * This is the only path that sets tc->t_sock. Send and receive trust that | ||
116 | * it is set. The RDS_CONN_CONNECTED bit protects those paths from being | ||
117 | * called while it isn't set. | ||
118 | */ | ||
119 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) | ||
120 | { | ||
121 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
122 | |||
123 | rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc); | ||
124 | write_lock_bh(&sock->sk->sk_callback_lock); | ||
125 | |||
126 | /* done under the callback_lock to serialize with write_space */ | ||
127 | spin_lock(&rds_tcp_tc_list_lock); | ||
128 | list_add_tail(&tc->t_list_item, &rds_tcp_tc_list); | ||
129 | rds_tcp_tc_count++; | ||
130 | spin_unlock(&rds_tcp_tc_list_lock); | ||
131 | |||
132 | /* accepted sockets need our listen data ready undone */ | ||
133 | if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready) | ||
134 | sock->sk->sk_data_ready = sock->sk->sk_user_data; | ||
135 | |||
136 | tc->t_sock = sock; | ||
137 | tc->conn = conn; | ||
138 | tc->t_orig_data_ready = sock->sk->sk_data_ready; | ||
139 | tc->t_orig_write_space = sock->sk->sk_write_space; | ||
140 | tc->t_orig_state_change = sock->sk->sk_state_change; | ||
141 | |||
142 | sock->sk->sk_user_data = conn; | ||
143 | sock->sk->sk_data_ready = rds_tcp_data_ready; | ||
144 | sock->sk->sk_write_space = rds_tcp_write_space; | ||
145 | sock->sk->sk_state_change = rds_tcp_state_change; | ||
146 | |||
147 | write_unlock_bh(&sock->sk->sk_callback_lock); | ||
148 | } | ||
149 | |||
150 | static void rds_tcp_tc_info(struct socket *sock, unsigned int len, | ||
151 | struct rds_info_iterator *iter, | ||
152 | struct rds_info_lengths *lens) | ||
153 | { | ||
154 | struct rds_info_tcp_socket tsinfo; | ||
155 | struct rds_tcp_connection *tc; | ||
156 | unsigned long flags; | ||
157 | struct sockaddr_in sin; | ||
158 | int sinlen; | ||
159 | |||
160 | spin_lock_irqsave(&rds_tcp_tc_list_lock, flags); | ||
161 | |||
162 | if (len / sizeof(tsinfo) < rds_tcp_tc_count) | ||
163 | goto out; | ||
164 | |||
165 | list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { | ||
166 | |||
167 | sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 0); | ||
168 | tsinfo.local_addr = sin.sin_addr.s_addr; | ||
169 | tsinfo.local_port = sin.sin_port; | ||
170 | sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 1); | ||
171 | tsinfo.peer_addr = sin.sin_addr.s_addr; | ||
172 | tsinfo.peer_port = sin.sin_port; | ||
173 | |||
174 | tsinfo.hdr_rem = tc->t_tinc_hdr_rem; | ||
175 | tsinfo.data_rem = tc->t_tinc_data_rem; | ||
176 | tsinfo.last_sent_nxt = tc->t_last_sent_nxt; | ||
177 | tsinfo.last_expected_una = tc->t_last_expected_una; | ||
178 | tsinfo.last_seen_una = tc->t_last_seen_una; | ||
179 | |||
180 | rds_info_copy(iter, &tsinfo, sizeof(tsinfo)); | ||
181 | } | ||
182 | |||
183 | out: | ||
184 | lens->nr = rds_tcp_tc_count; | ||
185 | lens->each = sizeof(tsinfo); | ||
186 | |||
187 | spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); | ||
188 | } | ||
189 | |||
190 | static int rds_tcp_laddr_check(__be32 addr) | ||
191 | { | ||
192 | if (inet_addr_type(&init_net, addr) == RTN_LOCAL) | ||
193 | return 0; | ||
194 | return -EADDRNOTAVAIL; | ||
195 | } | ||
196 | |||
197 | static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) | ||
198 | { | ||
199 | struct rds_tcp_connection *tc; | ||
200 | |||
201 | tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); | ||
202 | if (tc == NULL) | ||
203 | return -ENOMEM; | ||
204 | |||
205 | tc->t_sock = NULL; | ||
206 | tc->t_tinc = NULL; | ||
207 | tc->t_tinc_hdr_rem = sizeof(struct rds_header); | ||
208 | tc->t_tinc_data_rem = 0; | ||
209 | |||
210 | conn->c_transport_data = tc; | ||
211 | |||
212 | spin_lock_irq(&rds_tcp_conn_lock); | ||
213 | list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); | ||
214 | spin_unlock_irq(&rds_tcp_conn_lock); | ||
215 | |||
216 | rdsdebug("alloced tc %p\n", conn->c_transport_data); | ||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static void rds_tcp_conn_free(void *arg) | ||
221 | { | ||
222 | struct rds_tcp_connection *tc = arg; | ||
223 | rdsdebug("freeing tc %p\n", tc); | ||
224 | kmem_cache_free(rds_tcp_conn_slab, tc); | ||
225 | } | ||
226 | |||
227 | static void rds_tcp_destroy_conns(void) | ||
228 | { | ||
229 | struct rds_tcp_connection *tc, *_tc; | ||
230 | LIST_HEAD(tmp_list); | ||
231 | |||
232 | /* avoid calling conn_destroy with irqs off */ | ||
233 | spin_lock_irq(&rds_tcp_conn_lock); | ||
234 | list_splice(&rds_tcp_conn_list, &tmp_list); | ||
235 | INIT_LIST_HEAD(&rds_tcp_conn_list); | ||
236 | spin_unlock_irq(&rds_tcp_conn_lock); | ||
237 | |||
238 | list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) { | ||
239 | if (tc->conn->c_passive) | ||
240 | rds_conn_destroy(tc->conn->c_passive); | ||
241 | rds_conn_destroy(tc->conn); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | void rds_tcp_exit(void) | ||
246 | { | ||
247 | rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); | ||
248 | rds_tcp_listen_stop(); | ||
249 | rds_tcp_destroy_conns(); | ||
250 | rds_trans_unregister(&rds_tcp_transport); | ||
251 | rds_tcp_recv_exit(); | ||
252 | kmem_cache_destroy(rds_tcp_conn_slab); | ||
253 | } | ||
254 | module_exit(rds_tcp_exit); | ||
255 | |||
256 | struct rds_transport rds_tcp_transport = { | ||
257 | .laddr_check = rds_tcp_laddr_check, | ||
258 | .xmit_prepare = rds_tcp_xmit_prepare, | ||
259 | .xmit_complete = rds_tcp_xmit_complete, | ||
260 | .xmit_cong_map = rds_tcp_xmit_cong_map, | ||
261 | .xmit = rds_tcp_xmit, | ||
262 | .recv = rds_tcp_recv, | ||
263 | .conn_alloc = rds_tcp_conn_alloc, | ||
264 | .conn_free = rds_tcp_conn_free, | ||
265 | .conn_connect = rds_tcp_conn_connect, | ||
266 | .conn_shutdown = rds_tcp_conn_shutdown, | ||
267 | .inc_copy_to_user = rds_tcp_inc_copy_to_user, | ||
268 | .inc_purge = rds_tcp_inc_purge, | ||
269 | .inc_free = rds_tcp_inc_free, | ||
270 | .stats_info_copy = rds_tcp_stats_info_copy, | ||
271 | .exit = rds_tcp_exit, | ||
272 | .t_owner = THIS_MODULE, | ||
273 | .t_name = "tcp", | ||
274 | .t_type = RDS_TRANS_TCP, | ||
275 | .t_prefer_loopback = 1, | ||
276 | }; | ||
277 | |||
278 | int __init rds_tcp_init(void) | ||
279 | { | ||
280 | int ret; | ||
281 | |||
282 | rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection", | ||
283 | sizeof(struct rds_tcp_connection), | ||
284 | 0, 0, NULL); | ||
285 | if (rds_tcp_conn_slab == NULL) { | ||
286 | ret = -ENOMEM; | ||
287 | goto out; | ||
288 | } | ||
289 | |||
290 | ret = rds_tcp_recv_init(); | ||
291 | if (ret) | ||
292 | goto out_slab; | ||
293 | |||
294 | ret = rds_trans_register(&rds_tcp_transport); | ||
295 | if (ret) | ||
296 | goto out_recv; | ||
297 | |||
298 | ret = rds_tcp_listen_init(); | ||
299 | if (ret) | ||
300 | goto out_register; | ||
301 | |||
302 | rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); | ||
303 | |||
304 | goto out; | ||
305 | |||
306 | out_register: | ||
307 | rds_trans_unregister(&rds_tcp_transport); | ||
308 | out_recv: | ||
309 | rds_tcp_recv_exit(); | ||
310 | out_slab: | ||
311 | kmem_cache_destroy(rds_tcp_conn_slab); | ||
312 | out: | ||
313 | return ret; | ||
314 | } | ||
315 | module_init(rds_tcp_init); | ||
316 | |||
317 | MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>"); | ||
318 | MODULE_DESCRIPTION("RDS: TCP transport"); | ||
319 | MODULE_LICENSE("Dual BSD/GPL"); | ||
320 | |||
diff --git a/net/rds/tcp.h b/net/rds/tcp.h new file mode 100644 index 000000000000..844fa6b9cf5a --- /dev/null +++ b/net/rds/tcp.h | |||
@@ -0,0 +1,93 @@ | |||
1 | #ifndef _RDS_TCP_H | ||
2 | #define _RDS_TCP_H | ||
3 | |||
4 | #define RDS_TCP_PORT 16385 | ||
5 | |||
6 | struct rds_tcp_incoming { | ||
7 | struct rds_incoming ti_inc; | ||
8 | struct sk_buff_head ti_skb_list; | ||
9 | }; | ||
10 | |||
11 | struct rds_tcp_connection { | ||
12 | |||
13 | struct list_head t_tcp_node; | ||
14 | struct rds_connection *conn; | ||
15 | struct socket *t_sock; | ||
16 | void *t_orig_write_space; | ||
17 | void *t_orig_data_ready; | ||
18 | void *t_orig_state_change; | ||
19 | |||
20 | struct rds_tcp_incoming *t_tinc; | ||
21 | size_t t_tinc_hdr_rem; | ||
22 | size_t t_tinc_data_rem; | ||
23 | |||
24 | /* XXX error report? */ | ||
25 | struct work_struct t_conn_w; | ||
26 | struct work_struct t_send_w; | ||
27 | struct work_struct t_down_w; | ||
28 | struct work_struct t_recv_w; | ||
29 | |||
30 | /* for info exporting only */ | ||
31 | struct list_head t_list_item; | ||
32 | u32 t_last_sent_nxt; | ||
33 | u32 t_last_expected_una; | ||
34 | u32 t_last_seen_una; | ||
35 | }; | ||
36 | |||
37 | struct rds_tcp_statistics { | ||
38 | uint64_t s_tcp_data_ready_calls; | ||
39 | uint64_t s_tcp_write_space_calls; | ||
40 | uint64_t s_tcp_sndbuf_full; | ||
41 | uint64_t s_tcp_connect_raced; | ||
42 | uint64_t s_tcp_listen_closed_stale; | ||
43 | }; | ||
44 | |||
45 | /* tcp.c */ | ||
46 | int __init rds_tcp_init(void); | ||
47 | void rds_tcp_exit(void); | ||
48 | void rds_tcp_tune(struct socket *sock); | ||
49 | void rds_tcp_nonagle(struct socket *sock); | ||
50 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn); | ||
51 | void rds_tcp_restore_callbacks(struct socket *sock, | ||
52 | struct rds_tcp_connection *tc); | ||
53 | u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); | ||
54 | u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); | ||
55 | u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq); | ||
56 | extern struct rds_transport rds_tcp_transport; | ||
57 | |||
58 | /* tcp_connect.c */ | ||
59 | int rds_tcp_conn_connect(struct rds_connection *conn); | ||
60 | void rds_tcp_conn_shutdown(struct rds_connection *conn); | ||
61 | void rds_tcp_state_change(struct sock *sk); | ||
62 | |||
63 | /* tcp_listen.c */ | ||
64 | int __init rds_tcp_listen_init(void); | ||
65 | void rds_tcp_listen_stop(void); | ||
66 | void rds_tcp_listen_data_ready(struct sock *sk, int bytes); | ||
67 | |||
68 | /* tcp_recv.c */ | ||
69 | int __init rds_tcp_recv_init(void); | ||
70 | void rds_tcp_recv_exit(void); | ||
71 | void rds_tcp_data_ready(struct sock *sk, int bytes); | ||
72 | int rds_tcp_recv(struct rds_connection *conn); | ||
73 | void rds_tcp_inc_purge(struct rds_incoming *inc); | ||
74 | void rds_tcp_inc_free(struct rds_incoming *inc); | ||
75 | int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, | ||
76 | size_t size); | ||
77 | |||
78 | /* tcp_send.c */ | ||
79 | void rds_tcp_xmit_prepare(struct rds_connection *conn); | ||
80 | void rds_tcp_xmit_complete(struct rds_connection *conn); | ||
81 | int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, | ||
82 | unsigned int hdr_off, unsigned int sg, unsigned int off); | ||
83 | void rds_tcp_write_space(struct sock *sk); | ||
84 | int rds_tcp_xmit_cong_map(struct rds_connection *conn, | ||
85 | struct rds_cong_map *map, unsigned long offset); | ||
86 | |||
87 | /* tcp_stats.c */ | ||
88 | DECLARE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats); | ||
89 | #define rds_tcp_stats_inc(member) rds_stats_inc_which(rds_tcp_stats, member) | ||
90 | unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter, | ||
91 | unsigned int avail); | ||
92 | |||
93 | #endif | ||
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c new file mode 100644 index 000000000000..211522f9a9a2 --- /dev/null +++ b/net/rds/tcp_connect.c | |||
@@ -0,0 +1,153 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Oracle. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/in.h> | ||
35 | #include <net/tcp.h> | ||
36 | |||
37 | #include "rds.h" | ||
38 | #include "tcp.h" | ||
39 | |||
40 | void rds_tcp_state_change(struct sock *sk) | ||
41 | { | ||
42 | void (*state_change)(struct sock *sk); | ||
43 | struct rds_connection *conn; | ||
44 | struct rds_tcp_connection *tc; | ||
45 | |||
46 | read_lock(&sk->sk_callback_lock); | ||
47 | conn = sk->sk_user_data; | ||
48 | if (conn == NULL) { | ||
49 | state_change = sk->sk_state_change; | ||
50 | goto out; | ||
51 | } | ||
52 | tc = conn->c_transport_data; | ||
53 | state_change = tc->t_orig_state_change; | ||
54 | |||
55 | rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); | ||
56 | |||
57 | switch(sk->sk_state) { | ||
58 | /* ignore connecting sockets as they make progress */ | ||
59 | case TCP_SYN_SENT: | ||
60 | case TCP_SYN_RECV: | ||
61 | break; | ||
62 | case TCP_ESTABLISHED: | ||
63 | rds_connect_complete(conn); | ||
64 | break; | ||
65 | case TCP_CLOSE: | ||
66 | rds_conn_drop(conn); | ||
67 | default: | ||
68 | break; | ||
69 | } | ||
70 | out: | ||
71 | read_unlock(&sk->sk_callback_lock); | ||
72 | state_change(sk); | ||
73 | } | ||
74 | |||
75 | int rds_tcp_conn_connect(struct rds_connection *conn) | ||
76 | { | ||
77 | struct socket *sock = NULL; | ||
78 | struct sockaddr_in src, dest; | ||
79 | int ret; | ||
80 | |||
81 | ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); | ||
82 | if (ret < 0) | ||
83 | goto out; | ||
84 | |||
85 | rds_tcp_tune(sock); | ||
86 | |||
87 | src.sin_family = AF_INET; | ||
88 | src.sin_addr.s_addr = (__force u32)conn->c_laddr; | ||
89 | src.sin_port = (__force u16)htons(0); | ||
90 | |||
91 | ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src)); | ||
92 | if (ret) { | ||
93 | rdsdebug("bind failed with %d at address %u.%u.%u.%u\n", | ||
94 | ret, NIPQUAD(conn->c_laddr)); | ||
95 | goto out; | ||
96 | } | ||
97 | |||
98 | dest.sin_family = AF_INET; | ||
99 | dest.sin_addr.s_addr = (__force u32)conn->c_faddr; | ||
100 | dest.sin_port = (__force u16)htons(RDS_TCP_PORT); | ||
101 | |||
102 | /* | ||
103 | * once we call connect() we can start getting callbacks and they | ||
104 | * own the socket | ||
105 | */ | ||
106 | rds_tcp_set_callbacks(sock, conn); | ||
107 | ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest), | ||
108 | O_NONBLOCK); | ||
109 | sock = NULL; | ||
110 | |||
111 | rdsdebug("connect to address %u.%u.%u.%u returned %d\n", | ||
112 | NIPQUAD(conn->c_faddr), ret); | ||
113 | if (ret == -EINPROGRESS) | ||
114 | ret = 0; | ||
115 | |||
116 | out: | ||
117 | if (sock) | ||
118 | sock_release(sock); | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Before killing the tcp socket this needs to serialize with callbacks. The | ||
124 | * caller has already grabbed the sending sem so we're serialized with other | ||
125 | * senders. | ||
126 | * | ||
127 | * TCP calls the callbacks with the sock lock so we hold it while we reset the | ||
128 | * callbacks to those set by TCP. Our callbacks won't execute again once we | ||
129 | * hold the sock lock. | ||
130 | */ | ||
131 | void rds_tcp_conn_shutdown(struct rds_connection *conn) | ||
132 | { | ||
133 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
134 | struct socket *sock = tc->t_sock; | ||
135 | |||
136 | rdsdebug("shutting down conn %p tc %p sock %p\n", conn, tc, sock); | ||
137 | |||
138 | if (sock) { | ||
139 | sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN); | ||
140 | lock_sock(sock->sk); | ||
141 | rds_tcp_restore_callbacks(sock, tc); /* tc->tc_sock = NULL */ | ||
142 | |||
143 | release_sock(sock->sk); | ||
144 | sock_release(sock); | ||
145 | }; | ||
146 | |||
147 | if (tc->t_tinc) { | ||
148 | rds_inc_put(&tc->t_tinc->ti_inc); | ||
149 | tc->t_tinc = NULL; | ||
150 | } | ||
151 | tc->t_tinc_hdr_rem = sizeof(struct rds_header); | ||
152 | tc->t_tinc_data_rem = 0; | ||
153 | } | ||
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c new file mode 100644 index 000000000000..24b743eb0b1b --- /dev/null +++ b/net/rds/tcp_listen.c | |||
@@ -0,0 +1,199 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Oracle. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/in.h> | ||
35 | #include <net/tcp.h> | ||
36 | |||
37 | #include "rds.h" | ||
38 | #include "tcp.h" | ||
39 | |||
40 | /* | ||
41 | * cheesy, but simple.. | ||
42 | */ | ||
43 | static void rds_tcp_accept_worker(struct work_struct *work); | ||
44 | static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker); | ||
45 | static struct socket *rds_tcp_listen_sock; | ||
46 | |||
47 | static int rds_tcp_accept_one(struct socket *sock) | ||
48 | { | ||
49 | struct socket *new_sock = NULL; | ||
50 | struct rds_connection *conn; | ||
51 | int ret; | ||
52 | struct inet_sock *inet; | ||
53 | |||
54 | ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, | ||
55 | sock->sk->sk_protocol, &new_sock); | ||
56 | if (ret) | ||
57 | goto out; | ||
58 | |||
59 | new_sock->type = sock->type; | ||
60 | new_sock->ops = sock->ops; | ||
61 | ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); | ||
62 | if (ret < 0) | ||
63 | goto out; | ||
64 | |||
65 | rds_tcp_tune(new_sock); | ||
66 | |||
67 | inet = inet_sk(new_sock->sk); | ||
68 | |||
69 | rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n", | ||
70 | NIPQUAD(inet->saddr), ntohs(inet->sport), | ||
71 | NIPQUAD(inet->daddr), ntohs(inet->dport)); | ||
72 | |||
73 | conn = rds_conn_create(inet->saddr, inet->daddr, &rds_tcp_transport, | ||
74 | GFP_KERNEL); | ||
75 | if (IS_ERR(conn)) { | ||
76 | ret = PTR_ERR(conn); | ||
77 | goto out; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * see the comment above rds_queue_delayed_reconnect() | ||
82 | */ | ||
83 | if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) { | ||
84 | if (rds_conn_state(conn) == RDS_CONN_UP) | ||
85 | rds_tcp_stats_inc(s_tcp_listen_closed_stale); | ||
86 | else | ||
87 | rds_tcp_stats_inc(s_tcp_connect_raced); | ||
88 | rds_conn_drop(conn); | ||
89 | ret = 0; | ||
90 | goto out; | ||
91 | } | ||
92 | |||
93 | rds_tcp_set_callbacks(new_sock, conn); | ||
94 | rds_connect_complete(conn); | ||
95 | new_sock = NULL; | ||
96 | ret = 0; | ||
97 | |||
98 | out: | ||
99 | if (new_sock) | ||
100 | sock_release(new_sock); | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | static void rds_tcp_accept_worker(struct work_struct *work) | ||
105 | { | ||
106 | while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0) | ||
107 | cond_resched(); | ||
108 | } | ||
109 | |||
110 | void rds_tcp_listen_data_ready(struct sock *sk, int bytes) | ||
111 | { | ||
112 | void (*ready)(struct sock *sk, int bytes); | ||
113 | |||
114 | rdsdebug("listen data ready sk %p\n", sk); | ||
115 | |||
116 | read_lock(&sk->sk_callback_lock); | ||
117 | ready = sk->sk_user_data; | ||
118 | if (ready == NULL) { /* check for teardown race */ | ||
119 | ready = sk->sk_data_ready; | ||
120 | goto out; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * ->sk_data_ready is also called for a newly established child socket | ||
125 | * before it has been accepted and the accepter has set up their | ||
126 | * data_ready.. we only want to queue listen work for our listening | ||
127 | * socket | ||
128 | */ | ||
129 | if (sk->sk_state == TCP_LISTEN) | ||
130 | queue_work(rds_wq, &rds_tcp_listen_work); | ||
131 | |||
132 | out: | ||
133 | read_unlock(&sk->sk_callback_lock); | ||
134 | ready(sk, bytes); | ||
135 | } | ||
136 | |||
137 | int __init rds_tcp_listen_init(void) | ||
138 | { | ||
139 | struct sockaddr_in sin; | ||
140 | struct socket *sock = NULL; | ||
141 | int ret; | ||
142 | |||
143 | ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); | ||
144 | if (ret < 0) | ||
145 | goto out; | ||
146 | |||
147 | sock->sk->sk_reuse = 1; | ||
148 | rds_tcp_nonagle(sock); | ||
149 | |||
150 | write_lock_bh(&sock->sk->sk_callback_lock); | ||
151 | sock->sk->sk_user_data = sock->sk->sk_data_ready; | ||
152 | sock->sk->sk_data_ready = rds_tcp_listen_data_ready; | ||
153 | write_unlock_bh(&sock->sk->sk_callback_lock); | ||
154 | |||
155 | sin.sin_family = PF_INET, | ||
156 | sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY); | ||
157 | sin.sin_port = (__force u16)htons(RDS_TCP_PORT); | ||
158 | |||
159 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); | ||
160 | if (ret < 0) | ||
161 | goto out; | ||
162 | |||
163 | ret = sock->ops->listen(sock, 64); | ||
164 | if (ret < 0) | ||
165 | goto out; | ||
166 | |||
167 | rds_tcp_listen_sock = sock; | ||
168 | sock = NULL; | ||
169 | out: | ||
170 | if (sock) | ||
171 | sock_release(sock); | ||
172 | return ret; | ||
173 | } | ||
174 | |||
175 | void rds_tcp_listen_stop(void) | ||
176 | { | ||
177 | struct socket *sock = rds_tcp_listen_sock; | ||
178 | struct sock *sk; | ||
179 | |||
180 | if (sock == NULL) | ||
181 | return; | ||
182 | |||
183 | sk = sock->sk; | ||
184 | |||
185 | /* serialize with and prevent further callbacks */ | ||
186 | lock_sock(sk); | ||
187 | write_lock_bh(&sk->sk_callback_lock); | ||
188 | if (sk->sk_user_data) { | ||
189 | sk->sk_data_ready = sk->sk_user_data; | ||
190 | sk->sk_user_data = NULL; | ||
191 | } | ||
192 | write_unlock_bh(&sk->sk_callback_lock); | ||
193 | release_sock(sk); | ||
194 | |||
195 | /* wait for accepts to stop and close the socket */ | ||
196 | flush_workqueue(rds_wq); | ||
197 | sock_release(sock); | ||
198 | rds_tcp_listen_sock = NULL; | ||
199 | } | ||
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c new file mode 100644 index 000000000000..c00dafffbb5a --- /dev/null +++ b/net/rds/tcp_recv.c | |||
@@ -0,0 +1,356 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Oracle. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/kernel.h> | ||
34 | #include <net/tcp.h> | ||
35 | |||
36 | #include "rds.h" | ||
37 | #include "tcp.h" | ||
38 | |||
39 | static struct kmem_cache *rds_tcp_incoming_slab; | ||
40 | |||
41 | void rds_tcp_inc_purge(struct rds_incoming *inc) | ||
42 | { | ||
43 | struct rds_tcp_incoming *tinc; | ||
44 | tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); | ||
45 | rdsdebug("purging tinc %p inc %p\n", tinc, inc); | ||
46 | skb_queue_purge(&tinc->ti_skb_list); | ||
47 | } | ||
48 | |||
49 | void rds_tcp_inc_free(struct rds_incoming *inc) | ||
50 | { | ||
51 | struct rds_tcp_incoming *tinc; | ||
52 | tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); | ||
53 | rds_tcp_inc_purge(inc); | ||
54 | rdsdebug("freeing tinc %p inc %p\n", tinc, inc); | ||
55 | kmem_cache_free(rds_tcp_incoming_slab, tinc); | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * this is pretty lame, but, whatever. | ||
60 | */ | ||
61 | int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, | ||
62 | size_t size) | ||
63 | { | ||
64 | struct rds_tcp_incoming *tinc; | ||
65 | struct iovec *iov, tmp; | ||
66 | struct sk_buff *skb; | ||
67 | unsigned long to_copy, skb_off; | ||
68 | int ret = 0; | ||
69 | |||
70 | if (size == 0) | ||
71 | goto out; | ||
72 | |||
73 | tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); | ||
74 | iov = first_iov; | ||
75 | tmp = *iov; | ||
76 | |||
77 | skb_queue_walk(&tinc->ti_skb_list, skb) { | ||
78 | skb_off = 0; | ||
79 | while (skb_off < skb->len) { | ||
80 | while (tmp.iov_len == 0) { | ||
81 | iov++; | ||
82 | tmp = *iov; | ||
83 | } | ||
84 | |||
85 | to_copy = min(tmp.iov_len, size); | ||
86 | to_copy = min(to_copy, skb->len - skb_off); | ||
87 | |||
88 | rdsdebug("ret %d size %zu skb %p skb_off %lu " | ||
89 | "skblen %d iov_base %p iov_len %zu cpy %lu\n", | ||
90 | ret, size, skb, skb_off, skb->len, | ||
91 | tmp.iov_base, tmp.iov_len, to_copy); | ||
92 | |||
93 | /* modifies tmp as it copies */ | ||
94 | if (skb_copy_datagram_iovec(skb, skb_off, &tmp, | ||
95 | to_copy)) { | ||
96 | ret = -EFAULT; | ||
97 | goto out; | ||
98 | } | ||
99 | |||
100 | size -= to_copy; | ||
101 | ret += to_copy; | ||
102 | skb_off += to_copy; | ||
103 | if (size == 0) | ||
104 | goto out; | ||
105 | } | ||
106 | } | ||
107 | out: | ||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * We have a series of skbs that have fragmented pieces of the congestion | ||
113 | * bitmap. They must add up to the exact size of the congestion bitmap. We | ||
114 | * use the skb helpers to copy those into the pages that make up the in-memory | ||
115 | * congestion bitmap for the remote address of this connection. We then tell | ||
116 | * the congestion core that the bitmap has been changed so that it can wake up | ||
117 | * sleepers. | ||
118 | * | ||
119 | * This is racing with sending paths which are using test_bit to see if the | ||
120 | * bitmap indicates that their recipient is congested. | ||
121 | */ | ||
122 | |||
123 | static void rds_tcp_cong_recv(struct rds_connection *conn, | ||
124 | struct rds_tcp_incoming *tinc) | ||
125 | { | ||
126 | struct sk_buff *skb; | ||
127 | unsigned int to_copy, skb_off; | ||
128 | unsigned int map_off; | ||
129 | unsigned int map_page; | ||
130 | struct rds_cong_map *map; | ||
131 | int ret; | ||
132 | |||
133 | /* catch completely corrupt packets */ | ||
134 | if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) | ||
135 | return; | ||
136 | |||
137 | map_page = 0; | ||
138 | map_off = 0; | ||
139 | map = conn->c_fcong; | ||
140 | |||
141 | skb_queue_walk(&tinc->ti_skb_list, skb) { | ||
142 | skb_off = 0; | ||
143 | while (skb_off < skb->len) { | ||
144 | to_copy = min_t(unsigned int, PAGE_SIZE - map_off, | ||
145 | skb->len - skb_off); | ||
146 | |||
147 | BUG_ON(map_page >= RDS_CONG_MAP_PAGES); | ||
148 | |||
149 | /* only returns 0 or -error */ | ||
150 | ret = skb_copy_bits(skb, skb_off, | ||
151 | (void *)map->m_page_addrs[map_page] + map_off, | ||
152 | to_copy); | ||
153 | BUG_ON(ret != 0); | ||
154 | |||
155 | skb_off += to_copy; | ||
156 | map_off += to_copy; | ||
157 | if (map_off == PAGE_SIZE) { | ||
158 | map_off = 0; | ||
159 | map_page++; | ||
160 | } | ||
161 | } | ||
162 | } | ||
163 | |||
164 | rds_cong_map_updated(map, ~(u64) 0); | ||
165 | } | ||
166 | |||
167 | struct rds_tcp_desc_arg { | ||
168 | struct rds_connection *conn; | ||
169 | gfp_t gfp; | ||
170 | enum km_type km; | ||
171 | }; | ||
172 | |||
173 | static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, | ||
174 | unsigned int offset, size_t len) | ||
175 | { | ||
176 | struct rds_tcp_desc_arg *arg = desc->arg.data; | ||
177 | struct rds_connection *conn = arg->conn; | ||
178 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
179 | struct rds_tcp_incoming *tinc = tc->t_tinc; | ||
180 | struct sk_buff *clone; | ||
181 | size_t left = len, to_copy; | ||
182 | |||
183 | rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset, | ||
184 | len); | ||
185 | |||
186 | /* | ||
187 | * tcp_read_sock() interprets partial progress as an indication to stop | ||
188 | * processing. | ||
189 | */ | ||
190 | while (left) { | ||
191 | if (tinc == NULL) { | ||
192 | tinc = kmem_cache_alloc(rds_tcp_incoming_slab, | ||
193 | arg->gfp); | ||
194 | if (tinc == NULL) { | ||
195 | desc->error = -ENOMEM; | ||
196 | goto out; | ||
197 | } | ||
198 | tc->t_tinc = tinc; | ||
199 | rdsdebug("alloced tinc %p\n", tinc); | ||
200 | rds_inc_init(&tinc->ti_inc, conn, conn->c_faddr); | ||
201 | /* | ||
202 | * XXX * we might be able to use the __ variants when | ||
203 | * we've already serialized at a higher level. | ||
204 | */ | ||
205 | skb_queue_head_init(&tinc->ti_skb_list); | ||
206 | } | ||
207 | |||
208 | if (left && tc->t_tinc_hdr_rem) { | ||
209 | to_copy = min(tc->t_tinc_hdr_rem, left); | ||
210 | rdsdebug("copying %zu header from skb %p\n", to_copy, | ||
211 | skb); | ||
212 | skb_copy_bits(skb, offset, | ||
213 | (char *)&tinc->ti_inc.i_hdr + | ||
214 | sizeof(struct rds_header) - | ||
215 | tc->t_tinc_hdr_rem, | ||
216 | to_copy); | ||
217 | tc->t_tinc_hdr_rem -= to_copy; | ||
218 | left -= to_copy; | ||
219 | offset += to_copy; | ||
220 | |||
221 | if (tc->t_tinc_hdr_rem == 0) { | ||
222 | /* could be 0 for a 0 len message */ | ||
223 | tc->t_tinc_data_rem = | ||
224 | be32_to_cpu(tinc->ti_inc.i_hdr.h_len); | ||
225 | } | ||
226 | } | ||
227 | |||
228 | if (left && tc->t_tinc_data_rem) { | ||
229 | clone = skb_clone(skb, arg->gfp); | ||
230 | if (clone == NULL) { | ||
231 | desc->error = -ENOMEM; | ||
232 | goto out; | ||
233 | } | ||
234 | |||
235 | to_copy = min(tc->t_tinc_data_rem, left); | ||
236 | pskb_pull(clone, offset); | ||
237 | pskb_trim(clone, to_copy); | ||
238 | skb_queue_tail(&tinc->ti_skb_list, clone); | ||
239 | |||
240 | rdsdebug("skb %p data %p len %d off %u to_copy %zu -> " | ||
241 | "clone %p data %p len %d\n", | ||
242 | skb, skb->data, skb->len, offset, to_copy, | ||
243 | clone, clone->data, clone->len); | ||
244 | |||
245 | tc->t_tinc_data_rem -= to_copy; | ||
246 | left -= to_copy; | ||
247 | offset += to_copy; | ||
248 | } | ||
249 | |||
250 | if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) { | ||
251 | if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) | ||
252 | rds_tcp_cong_recv(conn, tinc); | ||
253 | else | ||
254 | rds_recv_incoming(conn, conn->c_faddr, | ||
255 | conn->c_laddr, &tinc->ti_inc, | ||
256 | arg->gfp, arg->km); | ||
257 | |||
258 | tc->t_tinc_hdr_rem = sizeof(struct rds_header); | ||
259 | tc->t_tinc_data_rem = 0; | ||
260 | tc->t_tinc = NULL; | ||
261 | rds_inc_put(&tinc->ti_inc); | ||
262 | tinc = NULL; | ||
263 | } | ||
264 | } | ||
265 | out: | ||
266 | rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n", | ||
267 | len, left, skb->len, | ||
268 | skb_queue_len(&tc->t_sock->sk->sk_receive_queue)); | ||
269 | return len - left; | ||
270 | } | ||
271 | |||
272 | /* the caller has to hold the sock lock */ | ||
273 | int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp, enum km_type km) | ||
274 | { | ||
275 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
276 | struct socket *sock = tc->t_sock; | ||
277 | read_descriptor_t desc; | ||
278 | struct rds_tcp_desc_arg arg; | ||
279 | |||
280 | /* It's like glib in the kernel! */ | ||
281 | arg.conn = conn; | ||
282 | arg.gfp = gfp; | ||
283 | arg.km = km; | ||
284 | desc.arg.data = &arg; | ||
285 | desc.error = 0; | ||
286 | desc.count = 1; /* give more than one skb per call */ | ||
287 | |||
288 | tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv); | ||
289 | rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp, | ||
290 | desc.error); | ||
291 | |||
292 | return desc.error; | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from | ||
297 | * data_ready. | ||
298 | * | ||
299 | * if we fail to allocate we're in trouble.. blindly wait some time before | ||
300 | * trying again to see if the VM can free up something for us. | ||
301 | */ | ||
302 | int rds_tcp_recv(struct rds_connection *conn) | ||
303 | { | ||
304 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
305 | struct socket *sock = tc->t_sock; | ||
306 | int ret = 0; | ||
307 | |||
308 | rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock); | ||
309 | |||
310 | lock_sock(sock->sk); | ||
311 | ret = rds_tcp_read_sock(conn, GFP_KERNEL, KM_USER0); | ||
312 | release_sock(sock->sk); | ||
313 | |||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | void rds_tcp_data_ready(struct sock *sk, int bytes) | ||
318 | { | ||
319 | void (*ready)(struct sock *sk, int bytes); | ||
320 | struct rds_connection *conn; | ||
321 | struct rds_tcp_connection *tc; | ||
322 | |||
323 | rdsdebug("data ready sk %p bytes %d\n", sk, bytes); | ||
324 | |||
325 | read_lock(&sk->sk_callback_lock); | ||
326 | conn = sk->sk_user_data; | ||
327 | if (conn == NULL) { /* check for teardown race */ | ||
328 | ready = sk->sk_data_ready; | ||
329 | goto out; | ||
330 | } | ||
331 | |||
332 | tc = conn->c_transport_data; | ||
333 | ready = tc->t_orig_data_ready; | ||
334 | rds_tcp_stats_inc(s_tcp_data_ready_calls); | ||
335 | |||
336 | if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) | ||
337 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); | ||
338 | out: | ||
339 | read_unlock(&sk->sk_callback_lock); | ||
340 | ready(sk, bytes); | ||
341 | } | ||
342 | |||
343 | int __init rds_tcp_recv_init(void) | ||
344 | { | ||
345 | rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming", | ||
346 | sizeof(struct rds_tcp_incoming), | ||
347 | 0, 0, NULL); | ||
348 | if (rds_tcp_incoming_slab == NULL) | ||
349 | return -ENOMEM; | ||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | void rds_tcp_recv_exit(void) | ||
354 | { | ||
355 | kmem_cache_destroy(rds_tcp_incoming_slab); | ||
356 | } | ||
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c new file mode 100644 index 000000000000..ab545e0cd5d6 --- /dev/null +++ b/net/rds/tcp_send.c | |||
@@ -0,0 +1,263 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Oracle. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/in.h> | ||
35 | #include <net/tcp.h> | ||
36 | |||
37 | #include "rds.h" | ||
38 | #include "tcp.h" | ||
39 | |||
40 | static void rds_tcp_cork(struct socket *sock, int val) | ||
41 | { | ||
42 | mm_segment_t oldfs; | ||
43 | |||
44 | oldfs = get_fs(); | ||
45 | set_fs(KERNEL_DS); | ||
46 | sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val, | ||
47 | sizeof(val)); | ||
48 | set_fs(oldfs); | ||
49 | } | ||
50 | |||
51 | void rds_tcp_xmit_prepare(struct rds_connection *conn) | ||
52 | { | ||
53 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
54 | |||
55 | rds_tcp_cork(tc->t_sock, 1); | ||
56 | } | ||
57 | |||
58 | void rds_tcp_xmit_complete(struct rds_connection *conn) | ||
59 | { | ||
60 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
61 | |||
62 | rds_tcp_cork(tc->t_sock, 0); | ||
63 | } | ||
64 | |||
65 | /* the core send_sem serializes this with other xmit and shutdown */ | ||
66 | int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len) | ||
67 | { | ||
68 | struct kvec vec = { | ||
69 | .iov_base = data, | ||
70 | .iov_len = len, | ||
71 | }; | ||
72 | struct msghdr msg = { | ||
73 | .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL, | ||
74 | }; | ||
75 | |||
76 | return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); | ||
77 | } | ||
78 | |||
79 | /* the core send_sem serializes this with other xmit and shutdown */ | ||
80 | int rds_tcp_xmit_cong_map(struct rds_connection *conn, | ||
81 | struct rds_cong_map *map, unsigned long offset) | ||
82 | { | ||
83 | static struct rds_header rds_tcp_map_header = { | ||
84 | .h_flags = RDS_FLAG_CONG_BITMAP, | ||
85 | }; | ||
86 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
87 | unsigned long i; | ||
88 | int ret; | ||
89 | int copied = 0; | ||
90 | |||
91 | /* Some problem claims cpu_to_be32(constant) isn't a constant. */ | ||
92 | rds_tcp_map_header.h_len = cpu_to_be32(RDS_CONG_MAP_BYTES); | ||
93 | |||
94 | if (offset < sizeof(struct rds_header)) { | ||
95 | ret = rds_tcp_sendmsg(tc->t_sock, | ||
96 | (void *)&rds_tcp_map_header + offset, | ||
97 | sizeof(struct rds_header) - offset); | ||
98 | if (ret <= 0) | ||
99 | return ret; | ||
100 | offset += ret; | ||
101 | copied = ret; | ||
102 | if (offset < sizeof(struct rds_header)) | ||
103 | return ret; | ||
104 | } | ||
105 | |||
106 | offset -= sizeof(struct rds_header); | ||
107 | i = offset / PAGE_SIZE; | ||
108 | offset = offset % PAGE_SIZE; | ||
109 | BUG_ON(i >= RDS_CONG_MAP_PAGES); | ||
110 | |||
111 | do { | ||
112 | ret = tc->t_sock->ops->sendpage(tc->t_sock, | ||
113 | virt_to_page(map->m_page_addrs[i]), | ||
114 | offset, PAGE_SIZE - offset, | ||
115 | MSG_DONTWAIT); | ||
116 | if (ret <= 0) | ||
117 | break; | ||
118 | copied += ret; | ||
119 | offset += ret; | ||
120 | if (offset == PAGE_SIZE) { | ||
121 | offset = 0; | ||
122 | i++; | ||
123 | } | ||
124 | } while (i < RDS_CONG_MAP_PAGES); | ||
125 | |||
126 | return copied ? copied : ret; | ||
127 | } | ||
128 | |||
129 | /* the core send_sem serializes this with other xmit and shutdown */ | ||
130 | int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, | ||
131 | unsigned int hdr_off, unsigned int sg, unsigned int off) | ||
132 | { | ||
133 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
134 | int done = 0; | ||
135 | int ret = 0; | ||
136 | |||
137 | if (hdr_off == 0) { | ||
138 | /* | ||
139 | * m_ack_seq is set to the sequence number of the last byte of | ||
140 | * header and data. see rds_tcp_is_acked(). | ||
141 | */ | ||
142 | tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc); | ||
143 | rm->m_ack_seq = tc->t_last_sent_nxt + | ||
144 | sizeof(struct rds_header) + | ||
145 | be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; | ||
146 | smp_mb__before_clear_bit(); | ||
147 | set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags); | ||
148 | tc->t_last_expected_una = rm->m_ack_seq + 1; | ||
149 | |||
150 | rdsdebug("rm %p tcp nxt %u ack_seq %llu\n", | ||
151 | rm, rds_tcp_snd_nxt(tc), | ||
152 | (unsigned long long)rm->m_ack_seq); | ||
153 | } | ||
154 | |||
155 | if (hdr_off < sizeof(struct rds_header)) { | ||
156 | /* see rds_tcp_write_space() */ | ||
157 | set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags); | ||
158 | |||
159 | ret = rds_tcp_sendmsg(tc->t_sock, | ||
160 | (void *)&rm->m_inc.i_hdr + hdr_off, | ||
161 | sizeof(rm->m_inc.i_hdr) - hdr_off); | ||
162 | if (ret < 0) | ||
163 | goto out; | ||
164 | done += ret; | ||
165 | if (hdr_off + done != sizeof(struct rds_header)) | ||
166 | goto out; | ||
167 | } | ||
168 | |||
169 | while (sg < rm->m_nents) { | ||
170 | ret = tc->t_sock->ops->sendpage(tc->t_sock, | ||
171 | sg_page(&rm->m_sg[sg]), | ||
172 | rm->m_sg[sg].offset + off, | ||
173 | rm->m_sg[sg].length - off, | ||
174 | MSG_DONTWAIT|MSG_NOSIGNAL); | ||
175 | rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]), | ||
176 | rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off, | ||
177 | ret); | ||
178 | if (ret <= 0) | ||
179 | break; | ||
180 | |||
181 | off += ret; | ||
182 | done += ret; | ||
183 | if (off == rm->m_sg[sg].length) { | ||
184 | off = 0; | ||
185 | sg++; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | out: | ||
190 | if (ret <= 0) { | ||
191 | /* write_space will hit after EAGAIN, all else fatal */ | ||
192 | if (ret == -EAGAIN) { | ||
193 | rds_tcp_stats_inc(s_tcp_sndbuf_full); | ||
194 | ret = 0; | ||
195 | } else { | ||
196 | printk(KERN_WARNING "RDS/tcp: send to %u.%u.%u.%u " | ||
197 | "returned %d, disconnecting and reconnecting\n", | ||
198 | NIPQUAD(conn->c_faddr), ret); | ||
199 | rds_conn_drop(conn); | ||
200 | } | ||
201 | } | ||
202 | if (done == 0) | ||
203 | done = ret; | ||
204 | return done; | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * rm->m_ack_seq is set to the tcp sequence number that corresponds to the | ||
209 | * last byte of the message, including the header. This means that the | ||
210 | * entire message has been received if rm->m_ack_seq is "before" the next | ||
211 | * unacked byte of the TCP sequence space. We have to do very careful | ||
212 | * wrapping 32bit comparisons here. | ||
213 | */ | ||
214 | static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack) | ||
215 | { | ||
216 | if (!test_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags)) | ||
217 | return 0; | ||
218 | return (__s32)((u32)rm->m_ack_seq - (u32)ack) < 0; | ||
219 | } | ||
220 | |||
221 | void rds_tcp_write_space(struct sock *sk) | ||
222 | { | ||
223 | void (*write_space)(struct sock *sk); | ||
224 | struct rds_connection *conn; | ||
225 | struct rds_tcp_connection *tc; | ||
226 | |||
227 | read_lock(&sk->sk_callback_lock); | ||
228 | conn = sk->sk_user_data; | ||
229 | if (conn == NULL) { | ||
230 | write_space = sk->sk_write_space; | ||
231 | goto out; | ||
232 | } | ||
233 | |||
234 | tc = conn->c_transport_data; | ||
235 | rdsdebug("write_space for tc %p\n", tc); | ||
236 | write_space = tc->t_orig_write_space; | ||
237 | rds_tcp_stats_inc(s_tcp_write_space_calls); | ||
238 | |||
239 | rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc)); | ||
240 | tc->t_last_seen_una = rds_tcp_snd_una(tc); | ||
241 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); | ||
242 | |||
243 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | ||
244 | out: | ||
245 | read_unlock(&sk->sk_callback_lock); | ||
246 | |||
247 | /* | ||
248 | * write_space is only called when data leaves tcp's send queue if | ||
249 | * SOCK_NOSPACE is set. We set SOCK_NOSPACE every time we put | ||
250 | * data in tcp's send queue because we use write_space to parse the | ||
251 | * sequence numbers and notice that rds messages have been fully | ||
252 | * received. | ||
253 | * | ||
254 | * tcp's write_space clears SOCK_NOSPACE if the send queue has more | ||
255 | * than a certain amount of space. So we need to set it again *after* | ||
256 | * we call tcp's write_space or else we might only get called on the | ||
257 | * first of a series of incoming tcp acks. | ||
258 | */ | ||
259 | write_space(sk); | ||
260 | |||
261 | if (sk->sk_socket) | ||
262 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | ||
263 | } | ||
diff --git a/net/rds/tcp_stats.c b/net/rds/tcp_stats.c new file mode 100644 index 000000000000..d5898d03cd68 --- /dev/null +++ b/net/rds/tcp_stats.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Oracle. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/percpu.h> | ||
34 | #include <linux/seq_file.h> | ||
35 | #include <linux/proc_fs.h> | ||
36 | |||
37 | #include "rds.h" | ||
38 | #include "tcp.h" | ||
39 | |||
40 | DEFINE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats) | ||
41 | ____cacheline_aligned; | ||
42 | |||
43 | static const char const *rds_tcp_stat_names[] = { | ||
44 | "tcp_data_ready_calls", | ||
45 | "tcp_write_space_calls", | ||
46 | "tcp_sndbuf_full", | ||
47 | "tcp_connect_raced", | ||
48 | "tcp_listen_closed_stale", | ||
49 | }; | ||
50 | |||
51 | unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter, | ||
52 | unsigned int avail) | ||
53 | { | ||
54 | struct rds_tcp_statistics stats = {0, }; | ||
55 | uint64_t *src; | ||
56 | uint64_t *sum; | ||
57 | size_t i; | ||
58 | int cpu; | ||
59 | |||
60 | if (avail < ARRAY_SIZE(rds_tcp_stat_names)) | ||
61 | goto out; | ||
62 | |||
63 | for_each_online_cpu(cpu) { | ||
64 | src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); | ||
65 | sum = (uint64_t *)&stats; | ||
66 | for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++) | ||
67 | *(sum++) += *(src++); | ||
68 | } | ||
69 | |||
70 | rds_stats_info_copy(iter, (uint64_t *)&stats, rds_tcp_stat_names, | ||
71 | ARRAY_SIZE(rds_tcp_stat_names)); | ||
72 | out: | ||
73 | return ARRAY_SIZE(rds_tcp_stat_names); | ||
74 | } | ||
diff --git a/net/rds/threads.c b/net/rds/threads.c index 828a1bf9ea92..dd7e0cad1e7c 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
@@ -68,6 +68,7 @@ | |||
68 | * (TCP, IB/RDMA) to provide the necessary synchronisation. | 68 | * (TCP, IB/RDMA) to provide the necessary synchronisation. |
69 | */ | 69 | */ |
70 | struct workqueue_struct *rds_wq; | 70 | struct workqueue_struct *rds_wq; |
71 | EXPORT_SYMBOL_GPL(rds_wq); | ||
71 | 72 | ||
72 | void rds_connect_complete(struct rds_connection *conn) | 73 | void rds_connect_complete(struct rds_connection *conn) |
73 | { | 74 | { |
@@ -89,6 +90,7 @@ void rds_connect_complete(struct rds_connection *conn) | |||
89 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 90 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
90 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); | 91 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); |
91 | } | 92 | } |
93 | EXPORT_SYMBOL_GPL(rds_connect_complete); | ||
92 | 94 | ||
93 | /* | 95 | /* |
94 | * This random exponential backoff is relied on to eventually resolve racing | 96 | * This random exponential backoff is relied on to eventually resolve racing |
diff --git a/net/rds/transport.c b/net/rds/transport.c index 767da61ad2f3..7e1067901353 100644 --- a/net/rds/transport.c +++ b/net/rds/transport.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include "rds.h" | 37 | #include "rds.h" |
38 | #include "loop.h" | 38 | #include "loop.h" |
39 | 39 | ||
40 | static LIST_HEAD(rds_transports); | 40 | static struct rds_transport *transports[RDS_TRANS_COUNT]; |
41 | static DECLARE_RWSEM(rds_trans_sem); | 41 | static DECLARE_RWSEM(rds_trans_sem); |
42 | 42 | ||
43 | int rds_trans_register(struct rds_transport *trans) | 43 | int rds_trans_register(struct rds_transport *trans) |
@@ -46,36 +46,44 @@ int rds_trans_register(struct rds_transport *trans) | |||
46 | 46 | ||
47 | down_write(&rds_trans_sem); | 47 | down_write(&rds_trans_sem); |
48 | 48 | ||
49 | list_add_tail(&trans->t_item, &rds_transports); | 49 | if (transports[trans->t_type]) |
50 | printk(KERN_INFO "Registered RDS/%s transport\n", trans->t_name); | 50 | printk(KERN_ERR "RDS Transport type %d already registered\n", |
51 | trans->t_type); | ||
52 | else { | ||
53 | transports[trans->t_type] = trans; | ||
54 | printk(KERN_INFO "Registered RDS/%s transport\n", trans->t_name); | ||
55 | } | ||
51 | 56 | ||
52 | up_write(&rds_trans_sem); | 57 | up_write(&rds_trans_sem); |
53 | 58 | ||
54 | return 0; | 59 | return 0; |
55 | } | 60 | } |
61 | EXPORT_SYMBOL_GPL(rds_trans_register); | ||
56 | 62 | ||
57 | void rds_trans_unregister(struct rds_transport *trans) | 63 | void rds_trans_unregister(struct rds_transport *trans) |
58 | { | 64 | { |
59 | down_write(&rds_trans_sem); | 65 | down_write(&rds_trans_sem); |
60 | 66 | ||
61 | list_del_init(&trans->t_item); | 67 | transports[trans->t_type] = NULL; |
62 | printk(KERN_INFO "Unregistered RDS/%s transport\n", trans->t_name); | 68 | printk(KERN_INFO "Unregistered RDS/%s transport\n", trans->t_name); |
63 | 69 | ||
64 | up_write(&rds_trans_sem); | 70 | up_write(&rds_trans_sem); |
65 | } | 71 | } |
72 | EXPORT_SYMBOL_GPL(rds_trans_unregister); | ||
66 | 73 | ||
67 | struct rds_transport *rds_trans_get_preferred(__be32 addr) | 74 | struct rds_transport *rds_trans_get_preferred(__be32 addr) |
68 | { | 75 | { |
69 | struct rds_transport *trans; | ||
70 | struct rds_transport *ret = NULL; | 76 | struct rds_transport *ret = NULL; |
77 | int i; | ||
71 | 78 | ||
72 | if (IN_LOOPBACK(ntohl(addr))) | 79 | if (IN_LOOPBACK(ntohl(addr))) |
73 | return &rds_loop_transport; | 80 | return &rds_loop_transport; |
74 | 81 | ||
75 | down_read(&rds_trans_sem); | 82 | down_read(&rds_trans_sem); |
76 | list_for_each_entry(trans, &rds_transports, t_item) { | 83 | for (i = 0; i < RDS_TRANS_COUNT; i++) |
77 | if (trans->laddr_check(addr) == 0) { | 84 | { |
78 | ret = trans; | 85 | if (transports[i] && (transports[i]->laddr_check(addr) == 0)) { |
86 | ret = transports[i]; | ||
79 | break; | 87 | break; |
80 | } | 88 | } |
81 | } | 89 | } |
@@ -97,12 +105,15 @@ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, | |||
97 | struct rds_transport *trans; | 105 | struct rds_transport *trans; |
98 | unsigned int total = 0; | 106 | unsigned int total = 0; |
99 | unsigned int part; | 107 | unsigned int part; |
108 | int i; | ||
100 | 109 | ||
101 | rds_info_iter_unmap(iter); | 110 | rds_info_iter_unmap(iter); |
102 | down_read(&rds_trans_sem); | 111 | down_read(&rds_trans_sem); |
103 | 112 | ||
104 | list_for_each_entry(trans, &rds_transports, t_item) { | 113 | for (i = 0; i < RDS_TRANS_COUNT; i++) |
105 | if (trans->stats_info_copy == NULL) | 114 | { |
115 | trans = transports[i]; | ||
116 | if (!trans || !trans->stats_info_copy) | ||
106 | continue; | 117 | continue; |
107 | 118 | ||
108 | part = trans->stats_info_copy(iter, avail); | 119 | part = trans->stats_info_copy(iter, avail); |
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 2fc4a1724eb8..dbeaf2983822 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
@@ -589,11 +589,13 @@ static const char *rfkill_get_type_str(enum rfkill_type type) | |||
589 | return "wimax"; | 589 | return "wimax"; |
590 | case RFKILL_TYPE_WWAN: | 590 | case RFKILL_TYPE_WWAN: |
591 | return "wwan"; | 591 | return "wwan"; |
592 | case RFKILL_TYPE_GPS: | ||
593 | return "gps"; | ||
592 | default: | 594 | default: |
593 | BUG(); | 595 | BUG(); |
594 | } | 596 | } |
595 | 597 | ||
596 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1); | 598 | BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_GPS + 1); |
597 | } | 599 | } |
598 | 600 | ||
599 | static ssize_t rfkill_type_show(struct device *dev, | 601 | static ssize_t rfkill_type_show(struct device *dev, |
@@ -1091,10 +1093,16 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, | |||
1091 | struct rfkill_event ev; | 1093 | struct rfkill_event ev; |
1092 | 1094 | ||
1093 | /* we don't need the 'hard' variable but accept it */ | 1095 | /* we don't need the 'hard' variable but accept it */ |
1094 | if (count < sizeof(ev) - 1) | 1096 | if (count < RFKILL_EVENT_SIZE_V1 - 1) |
1095 | return -EINVAL; | 1097 | return -EINVAL; |
1096 | 1098 | ||
1097 | if (copy_from_user(&ev, buf, sizeof(ev) - 1)) | 1099 | /* |
1100 | * Copy as much data as we can accept into our 'ev' buffer, | ||
1101 | * but tell userspace how much we've copied so it can determine | ||
1102 | * our API version even in a write() call, if it cares. | ||
1103 | */ | ||
1104 | count = min(count, sizeof(ev)); | ||
1105 | if (copy_from_user(&ev, buf, count)) | ||
1098 | return -EFAULT; | 1106 | return -EFAULT; |
1099 | 1107 | ||
1100 | if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) | 1108 | if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL) |
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 389d6e0d7740..424b893d1450 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c | |||
@@ -131,7 +131,7 @@ static int rose_close(struct net_device *dev) | |||
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
133 | 133 | ||
134 | static int rose_xmit(struct sk_buff *skb, struct net_device *dev) | 134 | static netdev_tx_t rose_xmit(struct sk_buff *skb, struct net_device *dev) |
135 | { | 135 | { |
136 | struct net_device_stats *stats = &dev->stats; | 136 | struct net_device_stats *stats = &dev->stats; |
137 | 137 | ||
@@ -141,7 +141,7 @@ static int rose_xmit(struct sk_buff *skb, struct net_device *dev) | |||
141 | } | 141 | } |
142 | dev_kfree_skb(skb); | 142 | dev_kfree_skb(skb); |
143 | stats->tx_errors++; | 143 | stats->tx_errors++; |
144 | return 0; | 144 | return NETDEV_TX_OK; |
145 | } | 145 | } |
146 | 146 | ||
147 | static const struct header_ops rose_header_ops = { | 147 | static const struct header_ops rose_header_ops = { |
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index 3ac1672e1070..c9f1f0a3a2ff 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c | |||
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | static unsigned rxrpc_ack_defer = 1; | 21 | static unsigned rxrpc_ack_defer = 1; |
22 | 22 | ||
23 | static const char *rxrpc_acks[] = { | 23 | static const char *const rxrpc_acks[] = { |
24 | "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", | 24 | "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", |
25 | "-?-" | 25 | "-?-" |
26 | }; | 26 | }; |
diff --git a/net/sched/Makefile b/net/sched/Makefile index 54d950cd4b8d..f14e71bfa58f 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the Linux Traffic Control Unit. | 2 | # Makefile for the Linux Traffic Control Unit. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := sch_generic.o | 5 | obj-y := sch_generic.o sch_mq.o |
6 | 6 | ||
7 | obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o | 7 | obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o |
8 | obj-$(CONFIG_NET_CLS) += cls_api.o | 8 | obj-$(CONFIG_NET_CLS) += cls_api.o |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 9d03cc33b6cc..2dfb3e7a040d 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -1011,7 +1011,7 @@ replay: | |||
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | static struct nlattr * | 1013 | static struct nlattr * |
1014 | find_dump_kind(struct nlmsghdr *n) | 1014 | find_dump_kind(const struct nlmsghdr *n) |
1015 | { | 1015 | { |
1016 | struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; | 1016 | struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; |
1017 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; | 1017 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 09cdcdfe7e91..6a536949cdc0 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -168,8 +168,7 @@ replay: | |||
168 | 168 | ||
169 | /* Find qdisc */ | 169 | /* Find qdisc */ |
170 | if (!parent) { | 170 | if (!parent) { |
171 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); | 171 | q = dev->qdisc; |
172 | q = dev_queue->qdisc_sleeping; | ||
173 | parent = q->handle; | 172 | parent = q->handle; |
174 | } else { | 173 | } else { |
175 | q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); | 174 | q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); |
@@ -181,6 +180,9 @@ replay: | |||
181 | if ((cops = q->ops->cl_ops) == NULL) | 180 | if ((cops = q->ops->cl_ops) == NULL) |
182 | return -EINVAL; | 181 | return -EINVAL; |
183 | 182 | ||
183 | if (cops->tcf_chain == NULL) | ||
184 | return -EOPNOTSUPP; | ||
185 | |||
184 | /* Do we search for filter, attached to class? */ | 186 | /* Do we search for filter, attached to class? */ |
185 | if (TC_H_MIN(parent)) { | 187 | if (TC_H_MIN(parent)) { |
186 | cl = cops->get(q, parent); | 188 | cl = cops->get(q, parent); |
@@ -405,7 +407,6 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, | |||
405 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | 407 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) |
406 | { | 408 | { |
407 | struct net *net = sock_net(skb->sk); | 409 | struct net *net = sock_net(skb->sk); |
408 | struct netdev_queue *dev_queue; | ||
409 | int t; | 410 | int t; |
410 | int s_t; | 411 | int s_t; |
411 | struct net_device *dev; | 412 | struct net_device *dev; |
@@ -424,15 +425,16 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
424 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 425 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
425 | return skb->len; | 426 | return skb->len; |
426 | 427 | ||
427 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
428 | if (!tcm->tcm_parent) | 428 | if (!tcm->tcm_parent) |
429 | q = dev_queue->qdisc_sleeping; | 429 | q = dev->qdisc; |
430 | else | 430 | else |
431 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); | 431 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
432 | if (!q) | 432 | if (!q) |
433 | goto out; | 433 | goto out; |
434 | if ((cops = q->ops->cl_ops) == NULL) | 434 | if ((cops = q->ops->cl_ops) == NULL) |
435 | goto errout; | 435 | goto errout; |
436 | if (cops->tcf_chain == NULL) | ||
437 | goto errout; | ||
436 | if (TC_H_MIN(tcm->tcm_parent)) { | 438 | if (TC_H_MIN(tcm->tcm_parent)) { |
437 | cl = cops->get(q, tcm->tcm_parent); | 439 | cl = cops->get(q, tcm->tcm_parent); |
438 | if (cl == 0) | 440 | if (cl == 0) |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index fdb694e9f759..692d9a41cd23 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -207,7 +207,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) | |||
207 | static void qdisc_list_add(struct Qdisc *q) | 207 | static void qdisc_list_add(struct Qdisc *q) |
208 | { | 208 | { |
209 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) | 209 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) |
210 | list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); | 210 | list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list); |
211 | } | 211 | } |
212 | 212 | ||
213 | void qdisc_list_del(struct Qdisc *q) | 213 | void qdisc_list_del(struct Qdisc *q) |
@@ -219,17 +219,11 @@ EXPORT_SYMBOL(qdisc_list_del); | |||
219 | 219 | ||
220 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | 220 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) |
221 | { | 221 | { |
222 | unsigned int i; | ||
223 | struct Qdisc *q; | 222 | struct Qdisc *q; |
224 | 223 | ||
225 | for (i = 0; i < dev->num_tx_queues; i++) { | 224 | q = qdisc_match_from_root(dev->qdisc, handle); |
226 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 225 | if (q) |
227 | struct Qdisc *txq_root = txq->qdisc_sleeping; | 226 | goto out; |
228 | |||
229 | q = qdisc_match_from_root(txq_root, handle); | ||
230 | if (q) | ||
231 | goto out; | ||
232 | } | ||
233 | 227 | ||
234 | q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); | 228 | q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); |
235 | out: | 229 | out: |
@@ -616,32 +610,6 @@ static u32 qdisc_alloc_handle(struct net_device *dev) | |||
616 | return i>0 ? autohandle : 0; | 610 | return i>0 ? autohandle : 0; |
617 | } | 611 | } |
618 | 612 | ||
619 | /* Attach toplevel qdisc to device queue. */ | ||
620 | |||
621 | static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, | ||
622 | struct Qdisc *qdisc) | ||
623 | { | ||
624 | struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; | ||
625 | spinlock_t *root_lock; | ||
626 | |||
627 | root_lock = qdisc_lock(oqdisc); | ||
628 | spin_lock_bh(root_lock); | ||
629 | |||
630 | /* Prune old scheduler */ | ||
631 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) | ||
632 | qdisc_reset(oqdisc); | ||
633 | |||
634 | /* ... and graft new one */ | ||
635 | if (qdisc == NULL) | ||
636 | qdisc = &noop_qdisc; | ||
637 | dev_queue->qdisc_sleeping = qdisc; | ||
638 | rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); | ||
639 | |||
640 | spin_unlock_bh(root_lock); | ||
641 | |||
642 | return oqdisc; | ||
643 | } | ||
644 | |||
645 | void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | 613 | void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) |
646 | { | 614 | { |
647 | const struct Qdisc_class_ops *cops; | 615 | const struct Qdisc_class_ops *cops; |
@@ -710,6 +678,11 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
710 | if (dev->flags & IFF_UP) | 678 | if (dev->flags & IFF_UP) |
711 | dev_deactivate(dev); | 679 | dev_deactivate(dev); |
712 | 680 | ||
681 | if (new && new->ops->attach) { | ||
682 | new->ops->attach(new); | ||
683 | num_q = 0; | ||
684 | } | ||
685 | |||
713 | for (i = 0; i < num_q; i++) { | 686 | for (i = 0; i < num_q; i++) { |
714 | struct netdev_queue *dev_queue = &dev->rx_queue; | 687 | struct netdev_queue *dev_queue = &dev->rx_queue; |
715 | 688 | ||
@@ -720,22 +693,27 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
720 | if (new && i > 0) | 693 | if (new && i > 0) |
721 | atomic_inc(&new->refcnt); | 694 | atomic_inc(&new->refcnt); |
722 | 695 | ||
723 | notify_and_destroy(skb, n, classid, old, new); | 696 | qdisc_destroy(old); |
724 | } | 697 | } |
725 | 698 | ||
699 | notify_and_destroy(skb, n, classid, dev->qdisc, new); | ||
700 | if (new && !new->ops->attach) | ||
701 | atomic_inc(&new->refcnt); | ||
702 | dev->qdisc = new ? : &noop_qdisc; | ||
703 | |||
726 | if (dev->flags & IFF_UP) | 704 | if (dev->flags & IFF_UP) |
727 | dev_activate(dev); | 705 | dev_activate(dev); |
728 | } else { | 706 | } else { |
729 | const struct Qdisc_class_ops *cops = parent->ops->cl_ops; | 707 | const struct Qdisc_class_ops *cops = parent->ops->cl_ops; |
730 | 708 | ||
731 | err = -EINVAL; | 709 | err = -EOPNOTSUPP; |
732 | 710 | if (cops && cops->graft) { | |
733 | if (cops) { | ||
734 | unsigned long cl = cops->get(parent, classid); | 711 | unsigned long cl = cops->get(parent, classid); |
735 | if (cl) { | 712 | if (cl) { |
736 | err = cops->graft(parent, cl, new, &old); | 713 | err = cops->graft(parent, cl, new, &old); |
737 | cops->put(parent, cl); | 714 | cops->put(parent, cl); |
738 | } | 715 | } else |
716 | err = -ENOENT; | ||
739 | } | 717 | } |
740 | if (!err) | 718 | if (!err) |
741 | notify_and_destroy(skb, n, classid, old, new); | 719 | notify_and_destroy(skb, n, classid, old, new); |
@@ -755,7 +733,8 @@ static struct lock_class_key qdisc_rx_lock; | |||
755 | 733 | ||
756 | static struct Qdisc * | 734 | static struct Qdisc * |
757 | qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | 735 | qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, |
758 | u32 parent, u32 handle, struct nlattr **tca, int *errp) | 736 | struct Qdisc *p, u32 parent, u32 handle, |
737 | struct nlattr **tca, int *errp) | ||
759 | { | 738 | { |
760 | int err; | 739 | int err; |
761 | struct nlattr *kind = tca[TCA_KIND]; | 740 | struct nlattr *kind = tca[TCA_KIND]; |
@@ -832,24 +811,21 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
832 | if (tca[TCA_RATE]) { | 811 | if (tca[TCA_RATE]) { |
833 | spinlock_t *root_lock; | 812 | spinlock_t *root_lock; |
834 | 813 | ||
814 | err = -EOPNOTSUPP; | ||
815 | if (sch->flags & TCQ_F_MQROOT) | ||
816 | goto err_out4; | ||
817 | |||
835 | if ((sch->parent != TC_H_ROOT) && | 818 | if ((sch->parent != TC_H_ROOT) && |
836 | !(sch->flags & TCQ_F_INGRESS)) | 819 | !(sch->flags & TCQ_F_INGRESS) && |
820 | (!p || !(p->flags & TCQ_F_MQROOT))) | ||
837 | root_lock = qdisc_root_sleeping_lock(sch); | 821 | root_lock = qdisc_root_sleeping_lock(sch); |
838 | else | 822 | else |
839 | root_lock = qdisc_lock(sch); | 823 | root_lock = qdisc_lock(sch); |
840 | 824 | ||
841 | err = gen_new_estimator(&sch->bstats, &sch->rate_est, | 825 | err = gen_new_estimator(&sch->bstats, &sch->rate_est, |
842 | root_lock, tca[TCA_RATE]); | 826 | root_lock, tca[TCA_RATE]); |
843 | if (err) { | 827 | if (err) |
844 | /* | 828 | goto err_out4; |
845 | * Any broken qdiscs that would require | ||
846 | * a ops->reset() here? The qdisc was never | ||
847 | * in action so it shouldn't be necessary. | ||
848 | */ | ||
849 | if (ops->destroy) | ||
850 | ops->destroy(sch); | ||
851 | goto err_out3; | ||
852 | } | ||
853 | } | 829 | } |
854 | 830 | ||
855 | qdisc_list_add(sch); | 831 | qdisc_list_add(sch); |
@@ -865,6 +841,15 @@ err_out2: | |||
865 | err_out: | 841 | err_out: |
866 | *errp = err; | 842 | *errp = err; |
867 | return NULL; | 843 | return NULL; |
844 | |||
845 | err_out4: | ||
846 | /* | ||
847 | * Any broken qdiscs that would require a ops->reset() here? | ||
848 | * The qdisc was never in action so it shouldn't be necessary. | ||
849 | */ | ||
850 | if (ops->destroy) | ||
851 | ops->destroy(sch); | ||
852 | goto err_out3; | ||
868 | } | 853 | } |
869 | 854 | ||
870 | static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) | 855 | static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) |
@@ -889,13 +874,16 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) | |||
889 | qdisc_put_stab(sch->stab); | 874 | qdisc_put_stab(sch->stab); |
890 | sch->stab = stab; | 875 | sch->stab = stab; |
891 | 876 | ||
892 | if (tca[TCA_RATE]) | 877 | if (tca[TCA_RATE]) { |
893 | /* NB: ignores errors from replace_estimator | 878 | /* NB: ignores errors from replace_estimator |
894 | because change can't be undone. */ | 879 | because change can't be undone. */ |
880 | if (sch->flags & TCQ_F_MQROOT) | ||
881 | goto out; | ||
895 | gen_replace_estimator(&sch->bstats, &sch->rate_est, | 882 | gen_replace_estimator(&sch->bstats, &sch->rate_est, |
896 | qdisc_root_sleeping_lock(sch), | 883 | qdisc_root_sleeping_lock(sch), |
897 | tca[TCA_RATE]); | 884 | tca[TCA_RATE]); |
898 | 885 | } | |
886 | out: | ||
899 | return 0; | 887 | return 0; |
900 | } | 888 | } |
901 | 889 | ||
@@ -974,9 +962,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
974 | q = dev->rx_queue.qdisc_sleeping; | 962 | q = dev->rx_queue.qdisc_sleeping; |
975 | } | 963 | } |
976 | } else { | 964 | } else { |
977 | struct netdev_queue *dev_queue; | 965 | q = dev->qdisc; |
978 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
979 | q = dev_queue->qdisc_sleeping; | ||
980 | } | 966 | } |
981 | if (!q) | 967 | if (!q) |
982 | return -ENOENT; | 968 | return -ENOENT; |
@@ -1044,9 +1030,7 @@ replay: | |||
1044 | q = dev->rx_queue.qdisc_sleeping; | 1030 | q = dev->rx_queue.qdisc_sleeping; |
1045 | } | 1031 | } |
1046 | } else { | 1032 | } else { |
1047 | struct netdev_queue *dev_queue; | 1033 | q = dev->qdisc; |
1048 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
1049 | q = dev_queue->qdisc_sleeping; | ||
1050 | } | 1034 | } |
1051 | 1035 | ||
1052 | /* It may be default qdisc, ignore it */ | 1036 | /* It may be default qdisc, ignore it */ |
@@ -1123,13 +1107,19 @@ create_n_graft: | |||
1123 | if (!(n->nlmsg_flags&NLM_F_CREATE)) | 1107 | if (!(n->nlmsg_flags&NLM_F_CREATE)) |
1124 | return -ENOENT; | 1108 | return -ENOENT; |
1125 | if (clid == TC_H_INGRESS) | 1109 | if (clid == TC_H_INGRESS) |
1126 | q = qdisc_create(dev, &dev->rx_queue, | 1110 | q = qdisc_create(dev, &dev->rx_queue, p, |
1127 | tcm->tcm_parent, tcm->tcm_parent, | 1111 | tcm->tcm_parent, tcm->tcm_parent, |
1128 | tca, &err); | 1112 | tca, &err); |
1129 | else | 1113 | else { |
1130 | q = qdisc_create(dev, netdev_get_tx_queue(dev, 0), | 1114 | unsigned int ntx = 0; |
1115 | |||
1116 | if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) | ||
1117 | ntx = p->ops->cl_ops->select_queue(p, tcm); | ||
1118 | |||
1119 | q = qdisc_create(dev, netdev_get_tx_queue(dev, ntx), p, | ||
1131 | tcm->tcm_parent, tcm->tcm_handle, | 1120 | tcm->tcm_parent, tcm->tcm_handle, |
1132 | tca, &err); | 1121 | tca, &err); |
1122 | } | ||
1133 | if (q == NULL) { | 1123 | if (q == NULL) { |
1134 | if (err == -EAGAIN) | 1124 | if (err == -EAGAIN) |
1135 | goto replay; | 1125 | goto replay; |
@@ -1291,8 +1281,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
1291 | s_q_idx = 0; | 1281 | s_q_idx = 0; |
1292 | q_idx = 0; | 1282 | q_idx = 0; |
1293 | 1283 | ||
1294 | dev_queue = netdev_get_tx_queue(dev, 0); | 1284 | if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0) |
1295 | if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0) | ||
1296 | goto done; | 1285 | goto done; |
1297 | 1286 | ||
1298 | dev_queue = &dev->rx_queue; | 1287 | dev_queue = &dev->rx_queue; |
@@ -1323,7 +1312,6 @@ done: | |||
1323 | static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | 1312 | static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) |
1324 | { | 1313 | { |
1325 | struct net *net = sock_net(skb->sk); | 1314 | struct net *net = sock_net(skb->sk); |
1326 | struct netdev_queue *dev_queue; | ||
1327 | struct tcmsg *tcm = NLMSG_DATA(n); | 1315 | struct tcmsg *tcm = NLMSG_DATA(n); |
1328 | struct nlattr *tca[TCA_MAX + 1]; | 1316 | struct nlattr *tca[TCA_MAX + 1]; |
1329 | struct net_device *dev; | 1317 | struct net_device *dev; |
@@ -1361,7 +1349,6 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1361 | 1349 | ||
1362 | /* Step 1. Determine qdisc handle X:0 */ | 1350 | /* Step 1. Determine qdisc handle X:0 */ |
1363 | 1351 | ||
1364 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
1365 | if (pid != TC_H_ROOT) { | 1352 | if (pid != TC_H_ROOT) { |
1366 | u32 qid1 = TC_H_MAJ(pid); | 1353 | u32 qid1 = TC_H_MAJ(pid); |
1367 | 1354 | ||
@@ -1372,7 +1359,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1372 | } else if (qid1) { | 1359 | } else if (qid1) { |
1373 | qid = qid1; | 1360 | qid = qid1; |
1374 | } else if (qid == 0) | 1361 | } else if (qid == 0) |
1375 | qid = dev_queue->qdisc_sleeping->handle; | 1362 | qid = dev->qdisc->handle; |
1376 | 1363 | ||
1377 | /* Now qid is genuine qdisc handle consistent | 1364 | /* Now qid is genuine qdisc handle consistent |
1378 | both with parent and child. | 1365 | both with parent and child. |
@@ -1383,7 +1370,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1383 | pid = TC_H_MAKE(qid, pid); | 1370 | pid = TC_H_MAKE(qid, pid); |
1384 | } else { | 1371 | } else { |
1385 | if (qid == 0) | 1372 | if (qid == 0) |
1386 | qid = dev_queue->qdisc_sleeping->handle; | 1373 | qid = dev->qdisc->handle; |
1387 | } | 1374 | } |
1388 | 1375 | ||
1389 | /* OK. Locate qdisc */ | 1376 | /* OK. Locate qdisc */ |
@@ -1417,7 +1404,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1417 | goto out; | 1404 | goto out; |
1418 | break; | 1405 | break; |
1419 | case RTM_DELTCLASS: | 1406 | case RTM_DELTCLASS: |
1420 | err = cops->delete(q, cl); | 1407 | err = -EOPNOTSUPP; |
1408 | if (cops->delete) | ||
1409 | err = cops->delete(q, cl); | ||
1421 | if (err == 0) | 1410 | if (err == 0) |
1422 | tclass_notify(skb, n, q, cl, RTM_DELTCLASS); | 1411 | tclass_notify(skb, n, q, cl, RTM_DELTCLASS); |
1423 | goto out; | 1412 | goto out; |
@@ -1431,7 +1420,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1431 | } | 1420 | } |
1432 | 1421 | ||
1433 | new_cl = cl; | 1422 | new_cl = cl; |
1434 | err = cops->change(q, clid, pid, tca, &new_cl); | 1423 | err = -EOPNOTSUPP; |
1424 | if (cops->change) | ||
1425 | err = cops->change(q, clid, pid, tca, &new_cl); | ||
1435 | if (err == 0) | 1426 | if (err == 0) |
1436 | tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); | 1427 | tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); |
1437 | 1428 | ||
@@ -1586,8 +1577,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1586 | s_t = cb->args[0]; | 1577 | s_t = cb->args[0]; |
1587 | t = 0; | 1578 | t = 0; |
1588 | 1579 | ||
1589 | dev_queue = netdev_get_tx_queue(dev, 0); | 1580 | if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0) |
1590 | if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0) | ||
1591 | goto done; | 1581 | goto done; |
1592 | 1582 | ||
1593 | dev_queue = &dev->rx_queue; | 1583 | dev_queue = &dev->rx_queue; |
@@ -1707,6 +1697,7 @@ static int __init pktsched_init(void) | |||
1707 | { | 1697 | { |
1708 | register_qdisc(&pfifo_qdisc_ops); | 1698 | register_qdisc(&pfifo_qdisc_ops); |
1709 | register_qdisc(&bfifo_qdisc_ops); | 1699 | register_qdisc(&bfifo_qdisc_ops); |
1700 | register_qdisc(&mq_qdisc_ops); | ||
1710 | proc_net_fops_create(&init_net, "psched", 0, &psched_fops); | 1701 | proc_net_fops_create(&init_net, "psched", 0, &psched_fops); |
1711 | 1702 | ||
1712 | rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); | 1703 | rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index d5798e17a832..5b132c473264 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -1621,29 +1621,25 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1621 | { | 1621 | { |
1622 | struct cbq_class *cl = (struct cbq_class*)arg; | 1622 | struct cbq_class *cl = (struct cbq_class*)arg; |
1623 | 1623 | ||
1624 | if (cl) { | 1624 | if (new == NULL) { |
1625 | if (new == NULL) { | 1625 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1626 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 1626 | &pfifo_qdisc_ops, cl->common.classid); |
1627 | &pfifo_qdisc_ops, | 1627 | if (new == NULL) |
1628 | cl->common.classid); | 1628 | return -ENOBUFS; |
1629 | if (new == NULL) | 1629 | } else { |
1630 | return -ENOBUFS; | ||
1631 | } else { | ||
1632 | #ifdef CONFIG_NET_CLS_ACT | 1630 | #ifdef CONFIG_NET_CLS_ACT |
1633 | if (cl->police == TC_POLICE_RECLASSIFY) | 1631 | if (cl->police == TC_POLICE_RECLASSIFY) |
1634 | new->reshape_fail = cbq_reshape_fail; | 1632 | new->reshape_fail = cbq_reshape_fail; |
1635 | #endif | 1633 | #endif |
1636 | } | ||
1637 | sch_tree_lock(sch); | ||
1638 | *old = cl->q; | ||
1639 | cl->q = new; | ||
1640 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | ||
1641 | qdisc_reset(*old); | ||
1642 | sch_tree_unlock(sch); | ||
1643 | |||
1644 | return 0; | ||
1645 | } | 1634 | } |
1646 | return -ENOENT; | 1635 | sch_tree_lock(sch); |
1636 | *old = cl->q; | ||
1637 | cl->q = new; | ||
1638 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | ||
1639 | qdisc_reset(*old); | ||
1640 | sch_tree_unlock(sch); | ||
1641 | |||
1642 | return 0; | ||
1647 | } | 1643 | } |
1648 | 1644 | ||
1649 | static struct Qdisc * | 1645 | static struct Qdisc * |
@@ -1651,7 +1647,7 @@ cbq_leaf(struct Qdisc *sch, unsigned long arg) | |||
1651 | { | 1647 | { |
1652 | struct cbq_class *cl = (struct cbq_class*)arg; | 1648 | struct cbq_class *cl = (struct cbq_class*)arg; |
1653 | 1649 | ||
1654 | return cl ? cl->q : NULL; | 1650 | return cl->q; |
1655 | } | 1651 | } |
1656 | 1652 | ||
1657 | static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) | 1653 | static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 27d03816ec3e..4ae6aa562f2b 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -37,15 +37,11 @@ | |||
37 | * - updates to tree and tree walking are only done under the rtnl mutex. | 37 | * - updates to tree and tree walking are only done under the rtnl mutex. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | static inline int qdisc_qlen(struct Qdisc *q) | ||
41 | { | ||
42 | return q->q.qlen; | ||
43 | } | ||
44 | |||
45 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | 40 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
46 | { | 41 | { |
47 | q->gso_skb = skb; | 42 | q->gso_skb = skb; |
48 | q->qstats.requeues++; | 43 | q->qstats.requeues++; |
44 | q->q.qlen++; /* it's still part of the queue */ | ||
49 | __netif_schedule(q); | 45 | __netif_schedule(q); |
50 | 46 | ||
51 | return 0; | 47 | return 0; |
@@ -61,9 +57,11 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) | |||
61 | 57 | ||
62 | /* check the reason of requeuing without tx lock first */ | 58 | /* check the reason of requeuing without tx lock first */ |
63 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 59 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
64 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) | 60 | if (!netif_tx_queue_stopped(txq) && |
61 | !netif_tx_queue_frozen(txq)) { | ||
65 | q->gso_skb = NULL; | 62 | q->gso_skb = NULL; |
66 | else | 63 | q->q.qlen--; |
64 | } else | ||
67 | skb = NULL; | 65 | skb = NULL; |
68 | } else { | 66 | } else { |
69 | skb = q->dequeue(q); | 67 | skb = q->dequeue(q); |
@@ -103,44 +101,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
103 | } | 101 | } |
104 | 102 | ||
105 | /* | 103 | /* |
106 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. | 104 | * Transmit one skb, and handle the return status as required. Holding the |
107 | * | 105 | * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this |
108 | * __QDISC_STATE_RUNNING guarantees only one CPU can process | 106 | * function. |
109 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for | ||
110 | * this queue. | ||
111 | * | ||
112 | * netif_tx_lock serializes accesses to device driver. | ||
113 | * | ||
114 | * qdisc_lock(q) and netif_tx_lock are mutually exclusive, | ||
115 | * if one is grabbed, another must be free. | ||
116 | * | ||
117 | * Note, that this procedure can be called by a watchdog timer | ||
118 | * | 107 | * |
119 | * Returns to the caller: | 108 | * Returns to the caller: |
120 | * 0 - queue is empty or throttled. | 109 | * 0 - queue is empty or throttled. |
121 | * >0 - queue is not empty. | 110 | * >0 - queue is not empty. |
122 | * | ||
123 | */ | 111 | */ |
124 | static inline int qdisc_restart(struct Qdisc *q) | 112 | int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, |
113 | struct net_device *dev, struct netdev_queue *txq, | ||
114 | spinlock_t *root_lock) | ||
125 | { | 115 | { |
126 | struct netdev_queue *txq; | ||
127 | int ret = NETDEV_TX_BUSY; | 116 | int ret = NETDEV_TX_BUSY; |
128 | struct net_device *dev; | ||
129 | spinlock_t *root_lock; | ||
130 | struct sk_buff *skb; | ||
131 | |||
132 | /* Dequeue packet */ | ||
133 | if (unlikely((skb = dequeue_skb(q)) == NULL)) | ||
134 | return 0; | ||
135 | |||
136 | root_lock = qdisc_lock(q); | ||
137 | 117 | ||
138 | /* And release qdisc */ | 118 | /* And release qdisc */ |
139 | spin_unlock(root_lock); | 119 | spin_unlock(root_lock); |
140 | 120 | ||
141 | dev = qdisc_dev(q); | ||
142 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
143 | |||
144 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 121 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
145 | if (!netif_tx_queue_stopped(txq) && | 122 | if (!netif_tx_queue_stopped(txq) && |
146 | !netif_tx_queue_frozen(txq)) | 123 | !netif_tx_queue_frozen(txq)) |
@@ -177,6 +154,44 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
177 | return ret; | 154 | return ret; |
178 | } | 155 | } |
179 | 156 | ||
157 | /* | ||
158 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. | ||
159 | * | ||
160 | * __QDISC_STATE_RUNNING guarantees only one CPU can process | ||
161 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for | ||
162 | * this queue. | ||
163 | * | ||
164 | * netif_tx_lock serializes accesses to device driver. | ||
165 | * | ||
166 | * qdisc_lock(q) and netif_tx_lock are mutually exclusive, | ||
167 | * if one is grabbed, another must be free. | ||
168 | * | ||
169 | * Note, that this procedure can be called by a watchdog timer | ||
170 | * | ||
171 | * Returns to the caller: | ||
172 | * 0 - queue is empty or throttled. | ||
173 | * >0 - queue is not empty. | ||
174 | * | ||
175 | */ | ||
176 | static inline int qdisc_restart(struct Qdisc *q) | ||
177 | { | ||
178 | struct netdev_queue *txq; | ||
179 | struct net_device *dev; | ||
180 | spinlock_t *root_lock; | ||
181 | struct sk_buff *skb; | ||
182 | |||
183 | /* Dequeue packet */ | ||
184 | skb = dequeue_skb(q); | ||
185 | if (unlikely(!skb)) | ||
186 | return 0; | ||
187 | |||
188 | root_lock = qdisc_lock(q); | ||
189 | dev = qdisc_dev(q); | ||
190 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
191 | |||
192 | return sch_direct_xmit(skb, q, dev, txq, root_lock); | ||
193 | } | ||
194 | |||
180 | void __qdisc_run(struct Qdisc *q) | 195 | void __qdisc_run(struct Qdisc *q) |
181 | { | 196 | { |
182 | unsigned long start_time = jiffies; | 197 | unsigned long start_time = jiffies; |
@@ -391,18 +406,38 @@ static const u8 prio2band[TC_PRIO_MAX+1] = | |||
391 | 406 | ||
392 | #define PFIFO_FAST_BANDS 3 | 407 | #define PFIFO_FAST_BANDS 3 |
393 | 408 | ||
394 | static inline struct sk_buff_head *prio2list(struct sk_buff *skb, | 409 | /* |
395 | struct Qdisc *qdisc) | 410 | * Private data for a pfifo_fast scheduler containing: |
411 | * - queues for the three band | ||
412 | * - bitmap indicating which of the bands contain skbs | ||
413 | */ | ||
414 | struct pfifo_fast_priv { | ||
415 | u32 bitmap; | ||
416 | struct sk_buff_head q[PFIFO_FAST_BANDS]; | ||
417 | }; | ||
418 | |||
419 | /* | ||
420 | * Convert a bitmap to the first band number where an skb is queued, where: | ||
421 | * bitmap=0 means there are no skbs on any band. | ||
422 | * bitmap=1 means there is an skb on band 0. | ||
423 | * bitmap=7 means there are skbs on all 3 bands, etc. | ||
424 | */ | ||
425 | static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; | ||
426 | |||
427 | static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, | ||
428 | int band) | ||
396 | { | 429 | { |
397 | struct sk_buff_head *list = qdisc_priv(qdisc); | 430 | return priv->q + band; |
398 | return list + prio2band[skb->priority & TC_PRIO_MAX]; | ||
399 | } | 431 | } |
400 | 432 | ||
401 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | 433 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) |
402 | { | 434 | { |
403 | struct sk_buff_head *list = prio2list(skb, qdisc); | 435 | if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { |
436 | int band = prio2band[skb->priority & TC_PRIO_MAX]; | ||
437 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | ||
438 | struct sk_buff_head *list = band2list(priv, band); | ||
404 | 439 | ||
405 | if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { | 440 | priv->bitmap |= (1 << band); |
406 | qdisc->q.qlen++; | 441 | qdisc->q.qlen++; |
407 | return __qdisc_enqueue_tail(skb, qdisc, list); | 442 | return __qdisc_enqueue_tail(skb, qdisc, list); |
408 | } | 443 | } |
@@ -412,14 +447,18 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | |||
412 | 447 | ||
413 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | 448 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) |
414 | { | 449 | { |
415 | int prio; | 450 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
416 | struct sk_buff_head *list = qdisc_priv(qdisc); | 451 | int band = bitmap2band[priv->bitmap]; |
417 | 452 | ||
418 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 453 | if (likely(band >= 0)) { |
419 | if (!skb_queue_empty(list + prio)) { | 454 | struct sk_buff_head *list = band2list(priv, band); |
420 | qdisc->q.qlen--; | 455 | struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); |
421 | return __qdisc_dequeue_head(qdisc, list + prio); | 456 | |
422 | } | 457 | qdisc->q.qlen--; |
458 | if (skb_queue_empty(list)) | ||
459 | priv->bitmap &= ~(1 << band); | ||
460 | |||
461 | return skb; | ||
423 | } | 462 | } |
424 | 463 | ||
425 | return NULL; | 464 | return NULL; |
@@ -427,12 +466,13 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | |||
427 | 466 | ||
428 | static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | 467 | static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) |
429 | { | 468 | { |
430 | int prio; | 469 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
431 | struct sk_buff_head *list = qdisc_priv(qdisc); | 470 | int band = bitmap2band[priv->bitmap]; |
432 | 471 | ||
433 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 472 | if (band >= 0) { |
434 | if (!skb_queue_empty(list + prio)) | 473 | struct sk_buff_head *list = band2list(priv, band); |
435 | return skb_peek(list + prio); | 474 | |
475 | return skb_peek(list); | ||
436 | } | 476 | } |
437 | 477 | ||
438 | return NULL; | 478 | return NULL; |
@@ -441,11 +481,12 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | |||
441 | static void pfifo_fast_reset(struct Qdisc* qdisc) | 481 | static void pfifo_fast_reset(struct Qdisc* qdisc) |
442 | { | 482 | { |
443 | int prio; | 483 | int prio; |
444 | struct sk_buff_head *list = qdisc_priv(qdisc); | 484 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
445 | 485 | ||
446 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 486 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
447 | __qdisc_reset_queue(qdisc, list + prio); | 487 | __qdisc_reset_queue(qdisc, band2list(priv, prio)); |
448 | 488 | ||
489 | priv->bitmap = 0; | ||
449 | qdisc->qstats.backlog = 0; | 490 | qdisc->qstats.backlog = 0; |
450 | qdisc->q.qlen = 0; | 491 | qdisc->q.qlen = 0; |
451 | } | 492 | } |
@@ -465,17 +506,17 @@ nla_put_failure: | |||
465 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) | 506 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) |
466 | { | 507 | { |
467 | int prio; | 508 | int prio; |
468 | struct sk_buff_head *list = qdisc_priv(qdisc); | 509 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
469 | 510 | ||
470 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 511 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
471 | skb_queue_head_init(list + prio); | 512 | skb_queue_head_init(band2list(priv, prio)); |
472 | 513 | ||
473 | return 0; | 514 | return 0; |
474 | } | 515 | } |
475 | 516 | ||
476 | static struct Qdisc_ops pfifo_fast_ops __read_mostly = { | 517 | struct Qdisc_ops pfifo_fast_ops __read_mostly = { |
477 | .id = "pfifo_fast", | 518 | .id = "pfifo_fast", |
478 | .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), | 519 | .priv_size = sizeof(struct pfifo_fast_priv), |
479 | .enqueue = pfifo_fast_enqueue, | 520 | .enqueue = pfifo_fast_enqueue, |
480 | .dequeue = pfifo_fast_dequeue, | 521 | .dequeue = pfifo_fast_dequeue, |
481 | .peek = pfifo_fast_peek, | 522 | .peek = pfifo_fast_peek, |
@@ -547,8 +588,11 @@ void qdisc_reset(struct Qdisc *qdisc) | |||
547 | if (ops->reset) | 588 | if (ops->reset) |
548 | ops->reset(qdisc); | 589 | ops->reset(qdisc); |
549 | 590 | ||
550 | kfree_skb(qdisc->gso_skb); | 591 | if (qdisc->gso_skb) { |
551 | qdisc->gso_skb = NULL; | 592 | kfree_skb(qdisc->gso_skb); |
593 | qdisc->gso_skb = NULL; | ||
594 | qdisc->q.qlen = 0; | ||
595 | } | ||
552 | } | 596 | } |
553 | EXPORT_SYMBOL(qdisc_reset); | 597 | EXPORT_SYMBOL(qdisc_reset); |
554 | 598 | ||
@@ -579,17 +623,29 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
579 | } | 623 | } |
580 | EXPORT_SYMBOL(qdisc_destroy); | 624 | EXPORT_SYMBOL(qdisc_destroy); |
581 | 625 | ||
582 | static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) | 626 | /* Attach toplevel qdisc to device queue. */ |
627 | struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, | ||
628 | struct Qdisc *qdisc) | ||
583 | { | 629 | { |
584 | unsigned int i; | 630 | struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; |
631 | spinlock_t *root_lock; | ||
585 | 632 | ||
586 | for (i = 0; i < dev->num_tx_queues; i++) { | 633 | root_lock = qdisc_lock(oqdisc); |
587 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 634 | spin_lock_bh(root_lock); |
588 | 635 | ||
589 | if (txq->qdisc_sleeping != &noop_qdisc) | 636 | /* Prune old scheduler */ |
590 | return false; | 637 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) |
591 | } | 638 | qdisc_reset(oqdisc); |
592 | return true; | 639 | |
640 | /* ... and graft new one */ | ||
641 | if (qdisc == NULL) | ||
642 | qdisc = &noop_qdisc; | ||
643 | dev_queue->qdisc_sleeping = qdisc; | ||
644 | rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); | ||
645 | |||
646 | spin_unlock_bh(root_lock); | ||
647 | |||
648 | return oqdisc; | ||
593 | } | 649 | } |
594 | 650 | ||
595 | static void attach_one_default_qdisc(struct net_device *dev, | 651 | static void attach_one_default_qdisc(struct net_device *dev, |
@@ -605,12 +661,35 @@ static void attach_one_default_qdisc(struct net_device *dev, | |||
605 | printk(KERN_INFO "%s: activation failed\n", dev->name); | 661 | printk(KERN_INFO "%s: activation failed\n", dev->name); |
606 | return; | 662 | return; |
607 | } | 663 | } |
664 | |||
665 | /* Can by-pass the queue discipline for default qdisc */ | ||
666 | qdisc->flags |= TCQ_F_CAN_BYPASS; | ||
608 | } else { | 667 | } else { |
609 | qdisc = &noqueue_qdisc; | 668 | qdisc = &noqueue_qdisc; |
610 | } | 669 | } |
611 | dev_queue->qdisc_sleeping = qdisc; | 670 | dev_queue->qdisc_sleeping = qdisc; |
612 | } | 671 | } |
613 | 672 | ||
673 | static void attach_default_qdiscs(struct net_device *dev) | ||
674 | { | ||
675 | struct netdev_queue *txq; | ||
676 | struct Qdisc *qdisc; | ||
677 | |||
678 | txq = netdev_get_tx_queue(dev, 0); | ||
679 | |||
680 | if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { | ||
681 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); | ||
682 | dev->qdisc = txq->qdisc_sleeping; | ||
683 | atomic_inc(&dev->qdisc->refcnt); | ||
684 | } else { | ||
685 | qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT); | ||
686 | if (qdisc) { | ||
687 | qdisc->ops->attach(qdisc); | ||
688 | dev->qdisc = qdisc; | ||
689 | } | ||
690 | } | ||
691 | } | ||
692 | |||
614 | static void transition_one_qdisc(struct net_device *dev, | 693 | static void transition_one_qdisc(struct net_device *dev, |
615 | struct netdev_queue *dev_queue, | 694 | struct netdev_queue *dev_queue, |
616 | void *_need_watchdog) | 695 | void *_need_watchdog) |
@@ -638,8 +717,8 @@ void dev_activate(struct net_device *dev) | |||
638 | virtual interfaces | 717 | virtual interfaces |
639 | */ | 718 | */ |
640 | 719 | ||
641 | if (dev_all_qdisc_sleeping_noop(dev)) | 720 | if (dev->qdisc == &noop_qdisc) |
642 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); | 721 | attach_default_qdiscs(dev); |
643 | 722 | ||
644 | if (!netif_carrier_ok(dev)) | 723 | if (!netif_carrier_ok(dev)) |
645 | /* Delay activation until next carrier-on event */ | 724 | /* Delay activation until next carrier-on event */ |
@@ -730,6 +809,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, | |||
730 | 809 | ||
731 | void dev_init_scheduler(struct net_device *dev) | 810 | void dev_init_scheduler(struct net_device *dev) |
732 | { | 811 | { |
812 | dev->qdisc = &noop_qdisc; | ||
733 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); | 813 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
734 | dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); | 814 | dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); |
735 | 815 | ||
@@ -755,5 +835,8 @@ void dev_shutdown(struct net_device *dev) | |||
755 | { | 835 | { |
756 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); | 836 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
757 | shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); | 837 | shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); |
838 | qdisc_destroy(dev->qdisc); | ||
839 | dev->qdisc = &noop_qdisc; | ||
840 | |||
758 | WARN_ON(timer_pending(&dev->watchdog_timer)); | 841 | WARN_ON(timer_pending(&dev->watchdog_timer)); |
759 | } | 842 | } |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index dad0144423da..375d64cb1a3d 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1203,8 +1203,6 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1203 | { | 1203 | { |
1204 | struct hfsc_class *cl = (struct hfsc_class *)arg; | 1204 | struct hfsc_class *cl = (struct hfsc_class *)arg; |
1205 | 1205 | ||
1206 | if (cl == NULL) | ||
1207 | return -ENOENT; | ||
1208 | if (cl->level > 0) | 1206 | if (cl->level > 0) |
1209 | return -EINVAL; | 1207 | return -EINVAL; |
1210 | if (new == NULL) { | 1208 | if (new == NULL) { |
@@ -1228,7 +1226,7 @@ hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) | |||
1228 | { | 1226 | { |
1229 | struct hfsc_class *cl = (struct hfsc_class *)arg; | 1227 | struct hfsc_class *cl = (struct hfsc_class *)arg; |
1230 | 1228 | ||
1231 | if (cl != NULL && cl->level == 0) | 1229 | if (cl->level == 0) |
1232 | return cl->qdisc; | 1230 | return cl->qdisc; |
1233 | 1231 | ||
1234 | return NULL; | 1232 | return NULL; |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index ec4d46399d59..85acab9dc6fd 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1117,30 +1117,29 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1117 | { | 1117 | { |
1118 | struct htb_class *cl = (struct htb_class *)arg; | 1118 | struct htb_class *cl = (struct htb_class *)arg; |
1119 | 1119 | ||
1120 | if (cl && !cl->level) { | 1120 | if (cl->level) |
1121 | if (new == NULL && | 1121 | return -EINVAL; |
1122 | (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 1122 | if (new == NULL && |
1123 | &pfifo_qdisc_ops, | 1123 | (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1124 | cl->common.classid)) | 1124 | &pfifo_qdisc_ops, |
1125 | == NULL) | 1125 | cl->common.classid)) == NULL) |
1126 | return -ENOBUFS; | 1126 | return -ENOBUFS; |
1127 | sch_tree_lock(sch); | 1127 | |
1128 | *old = cl->un.leaf.q; | 1128 | sch_tree_lock(sch); |
1129 | cl->un.leaf.q = new; | 1129 | *old = cl->un.leaf.q; |
1130 | if (*old != NULL) { | 1130 | cl->un.leaf.q = new; |
1131 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | 1131 | if (*old != NULL) { |
1132 | qdisc_reset(*old); | 1132 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); |
1133 | } | 1133 | qdisc_reset(*old); |
1134 | sch_tree_unlock(sch); | ||
1135 | return 0; | ||
1136 | } | 1134 | } |
1137 | return -ENOENT; | 1135 | sch_tree_unlock(sch); |
1136 | return 0; | ||
1138 | } | 1137 | } |
1139 | 1138 | ||
1140 | static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) | 1139 | static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) |
1141 | { | 1140 | { |
1142 | struct htb_class *cl = (struct htb_class *)arg; | 1141 | struct htb_class *cl = (struct htb_class *)arg; |
1143 | return (cl && !cl->level) ? cl->un.leaf.q : NULL; | 1142 | return !cl->level ? cl->un.leaf.q : NULL; |
1144 | } | 1143 | } |
1145 | 1144 | ||
1146 | static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) | 1145 | static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 4a2b77374358..a9e646bdb605 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -22,12 +22,6 @@ struct ingress_qdisc_data { | |||
22 | 22 | ||
23 | /* ------------------------- Class/flow operations ------------------------- */ | 23 | /* ------------------------- Class/flow operations ------------------------- */ |
24 | 24 | ||
25 | static int ingress_graft(struct Qdisc *sch, unsigned long arg, | ||
26 | struct Qdisc *new, struct Qdisc **old) | ||
27 | { | ||
28 | return -EOPNOTSUPP; | ||
29 | } | ||
30 | |||
31 | static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) | 25 | static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) |
32 | { | 26 | { |
33 | return NULL; | 27 | return NULL; |
@@ -48,12 +42,6 @@ static void ingress_put(struct Qdisc *sch, unsigned long cl) | |||
48 | { | 42 | { |
49 | } | 43 | } |
50 | 44 | ||
51 | static int ingress_change(struct Qdisc *sch, u32 classid, u32 parent, | ||
52 | struct nlattr **tca, unsigned long *arg) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) | 45 | static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
58 | { | 46 | { |
59 | return; | 47 | return; |
@@ -123,11 +111,9 @@ nla_put_failure: | |||
123 | } | 111 | } |
124 | 112 | ||
125 | static const struct Qdisc_class_ops ingress_class_ops = { | 113 | static const struct Qdisc_class_ops ingress_class_ops = { |
126 | .graft = ingress_graft, | ||
127 | .leaf = ingress_leaf, | 114 | .leaf = ingress_leaf, |
128 | .get = ingress_get, | 115 | .get = ingress_get, |
129 | .put = ingress_put, | 116 | .put = ingress_put, |
130 | .change = ingress_change, | ||
131 | .walk = ingress_walk, | 117 | .walk = ingress_walk, |
132 | .tcf_chain = ingress_find_tcf, | 118 | .tcf_chain = ingress_find_tcf, |
133 | .bind_tcf = ingress_bind_filter, | 119 | .bind_tcf = ingress_bind_filter, |
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c new file mode 100644 index 000000000000..dd5ee022f1f7 --- /dev/null +++ b/net/sched/sch_mq.c | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * net/sched/sch_mq.c Classful multiqueue dummy scheduler | ||
3 | * | ||
4 | * Copyright (c) 2009 Patrick McHardy <kaber@trash.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/skbuff.h> | ||
16 | #include <net/netlink.h> | ||
17 | #include <net/pkt_sched.h> | ||
18 | |||
19 | struct mq_sched { | ||
20 | struct Qdisc **qdiscs; | ||
21 | }; | ||
22 | |||
23 | static void mq_destroy(struct Qdisc *sch) | ||
24 | { | ||
25 | struct net_device *dev = qdisc_dev(sch); | ||
26 | struct mq_sched *priv = qdisc_priv(sch); | ||
27 | unsigned int ntx; | ||
28 | |||
29 | if (!priv->qdiscs) | ||
30 | return; | ||
31 | for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) | ||
32 | qdisc_destroy(priv->qdiscs[ntx]); | ||
33 | kfree(priv->qdiscs); | ||
34 | } | ||
35 | |||
36 | static int mq_init(struct Qdisc *sch, struct nlattr *opt) | ||
37 | { | ||
38 | struct net_device *dev = qdisc_dev(sch); | ||
39 | struct mq_sched *priv = qdisc_priv(sch); | ||
40 | struct netdev_queue *dev_queue; | ||
41 | struct Qdisc *qdisc; | ||
42 | unsigned int ntx; | ||
43 | |||
44 | if (sch->parent != TC_H_ROOT) | ||
45 | return -EOPNOTSUPP; | ||
46 | |||
47 | if (!netif_is_multiqueue(dev)) | ||
48 | return -EOPNOTSUPP; | ||
49 | |||
50 | /* pre-allocate qdiscs, attachment can't fail */ | ||
51 | priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), | ||
52 | GFP_KERNEL); | ||
53 | if (priv->qdiscs == NULL) | ||
54 | return -ENOMEM; | ||
55 | |||
56 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { | ||
57 | dev_queue = netdev_get_tx_queue(dev, ntx); | ||
58 | qdisc = qdisc_create_dflt(dev, dev_queue, &pfifo_fast_ops, | ||
59 | TC_H_MAKE(TC_H_MAJ(sch->handle), | ||
60 | TC_H_MIN(ntx + 1))); | ||
61 | if (qdisc == NULL) | ||
62 | goto err; | ||
63 | qdisc->flags |= TCQ_F_CAN_BYPASS; | ||
64 | priv->qdiscs[ntx] = qdisc; | ||
65 | } | ||
66 | |||
67 | sch->flags |= TCQ_F_MQROOT; | ||
68 | return 0; | ||
69 | |||
70 | err: | ||
71 | mq_destroy(sch); | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | |||
75 | static void mq_attach(struct Qdisc *sch) | ||
76 | { | ||
77 | struct net_device *dev = qdisc_dev(sch); | ||
78 | struct mq_sched *priv = qdisc_priv(sch); | ||
79 | struct Qdisc *qdisc; | ||
80 | unsigned int ntx; | ||
81 | |||
82 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { | ||
83 | qdisc = priv->qdiscs[ntx]; | ||
84 | qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc); | ||
85 | if (qdisc) | ||
86 | qdisc_destroy(qdisc); | ||
87 | } | ||
88 | kfree(priv->qdiscs); | ||
89 | priv->qdiscs = NULL; | ||
90 | } | ||
91 | |||
92 | static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) | ||
93 | { | ||
94 | struct net_device *dev = qdisc_dev(sch); | ||
95 | struct Qdisc *qdisc; | ||
96 | unsigned int ntx; | ||
97 | |||
98 | sch->q.qlen = 0; | ||
99 | memset(&sch->bstats, 0, sizeof(sch->bstats)); | ||
100 | memset(&sch->qstats, 0, sizeof(sch->qstats)); | ||
101 | |||
102 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { | ||
103 | qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; | ||
104 | spin_lock_bh(qdisc_lock(qdisc)); | ||
105 | sch->q.qlen += qdisc->q.qlen; | ||
106 | sch->bstats.bytes += qdisc->bstats.bytes; | ||
107 | sch->bstats.packets += qdisc->bstats.packets; | ||
108 | sch->qstats.qlen += qdisc->qstats.qlen; | ||
109 | sch->qstats.backlog += qdisc->qstats.backlog; | ||
110 | sch->qstats.drops += qdisc->qstats.drops; | ||
111 | sch->qstats.requeues += qdisc->qstats.requeues; | ||
112 | sch->qstats.overlimits += qdisc->qstats.overlimits; | ||
113 | spin_unlock_bh(qdisc_lock(qdisc)); | ||
114 | } | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl) | ||
119 | { | ||
120 | struct net_device *dev = qdisc_dev(sch); | ||
121 | unsigned long ntx = cl - 1; | ||
122 | |||
123 | if (ntx >= dev->num_tx_queues) | ||
124 | return NULL; | ||
125 | return netdev_get_tx_queue(dev, ntx); | ||
126 | } | ||
127 | |||
128 | static unsigned int mq_select_queue(struct Qdisc *sch, struct tcmsg *tcm) | ||
129 | { | ||
130 | unsigned int ntx = TC_H_MIN(tcm->tcm_parent); | ||
131 | |||
132 | if (!mq_queue_get(sch, ntx)) | ||
133 | return 0; | ||
134 | return ntx - 1; | ||
135 | } | ||
136 | |||
137 | static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, | ||
138 | struct Qdisc **old) | ||
139 | { | ||
140 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); | ||
141 | struct net_device *dev = qdisc_dev(sch); | ||
142 | |||
143 | if (dev->flags & IFF_UP) | ||
144 | dev_deactivate(dev); | ||
145 | |||
146 | *old = dev_graft_qdisc(dev_queue, new); | ||
147 | |||
148 | if (dev->flags & IFF_UP) | ||
149 | dev_activate(dev); | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl) | ||
154 | { | ||
155 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); | ||
156 | |||
157 | return dev_queue->qdisc_sleeping; | ||
158 | } | ||
159 | |||
160 | static unsigned long mq_get(struct Qdisc *sch, u32 classid) | ||
161 | { | ||
162 | unsigned int ntx = TC_H_MIN(classid); | ||
163 | |||
164 | if (!mq_queue_get(sch, ntx)) | ||
165 | return 0; | ||
166 | return ntx; | ||
167 | } | ||
168 | |||
169 | static void mq_put(struct Qdisc *sch, unsigned long cl) | ||
170 | { | ||
171 | return; | ||
172 | } | ||
173 | |||
174 | static int mq_dump_class(struct Qdisc *sch, unsigned long cl, | ||
175 | struct sk_buff *skb, struct tcmsg *tcm) | ||
176 | { | ||
177 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); | ||
178 | |||
179 | tcm->tcm_parent = TC_H_ROOT; | ||
180 | tcm->tcm_handle |= TC_H_MIN(cl); | ||
181 | tcm->tcm_info = dev_queue->qdisc_sleeping->handle; | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | ||
186 | struct gnet_dump *d) | ||
187 | { | ||
188 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); | ||
189 | |||
190 | sch = dev_queue->qdisc_sleeping; | ||
191 | if (gnet_stats_copy_basic(d, &sch->bstats) < 0 || | ||
192 | gnet_stats_copy_queue(d, &sch->qstats) < 0) | ||
193 | return -1; | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | ||
198 | { | ||
199 | struct net_device *dev = qdisc_dev(sch); | ||
200 | unsigned int ntx; | ||
201 | |||
202 | if (arg->stop) | ||
203 | return; | ||
204 | |||
205 | arg->count = arg->skip; | ||
206 | for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { | ||
207 | if (arg->fn(sch, ntx + 1, arg) < 0) { | ||
208 | arg->stop = 1; | ||
209 | break; | ||
210 | } | ||
211 | arg->count++; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | static const struct Qdisc_class_ops mq_class_ops = { | ||
216 | .select_queue = mq_select_queue, | ||
217 | .graft = mq_graft, | ||
218 | .leaf = mq_leaf, | ||
219 | .get = mq_get, | ||
220 | .put = mq_put, | ||
221 | .walk = mq_walk, | ||
222 | .dump = mq_dump_class, | ||
223 | .dump_stats = mq_dump_class_stats, | ||
224 | }; | ||
225 | |||
226 | struct Qdisc_ops mq_qdisc_ops __read_mostly = { | ||
227 | .cl_ops = &mq_class_ops, | ||
228 | .id = "mq", | ||
229 | .priv_size = sizeof(struct mq_sched), | ||
230 | .init = mq_init, | ||
231 | .destroy = mq_destroy, | ||
232 | .attach = mq_attach, | ||
233 | .dump = mq_dump, | ||
234 | .owner = THIS_MODULE, | ||
235 | }; | ||
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 912731203047..069f81c97277 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -298,9 +298,6 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
298 | struct multiq_sched_data *q = qdisc_priv(sch); | 298 | struct multiq_sched_data *q = qdisc_priv(sch); |
299 | unsigned long band = arg - 1; | 299 | unsigned long band = arg - 1; |
300 | 300 | ||
301 | if (band >= q->bands) | ||
302 | return -EINVAL; | ||
303 | |||
304 | if (new == NULL) | 301 | if (new == NULL) |
305 | new = &noop_qdisc; | 302 | new = &noop_qdisc; |
306 | 303 | ||
@@ -320,9 +317,6 @@ multiq_leaf(struct Qdisc *sch, unsigned long arg) | |||
320 | struct multiq_sched_data *q = qdisc_priv(sch); | 317 | struct multiq_sched_data *q = qdisc_priv(sch); |
321 | unsigned long band = arg - 1; | 318 | unsigned long band = arg - 1; |
322 | 319 | ||
323 | if (band >= q->bands) | ||
324 | return NULL; | ||
325 | |||
326 | return q->queues[band]; | 320 | return q->queues[band]; |
327 | } | 321 | } |
328 | 322 | ||
@@ -348,36 +342,13 @@ static void multiq_put(struct Qdisc *q, unsigned long cl) | |||
348 | return; | 342 | return; |
349 | } | 343 | } |
350 | 344 | ||
351 | static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent, | ||
352 | struct nlattr **tca, unsigned long *arg) | ||
353 | { | ||
354 | unsigned long cl = *arg; | ||
355 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
356 | |||
357 | if (cl - 1 > q->bands) | ||
358 | return -ENOENT; | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | static int multiq_delete(struct Qdisc *sch, unsigned long cl) | ||
363 | { | ||
364 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
365 | if (cl - 1 > q->bands) | ||
366 | return -ENOENT; | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | |||
371 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, | 345 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, |
372 | struct sk_buff *skb, struct tcmsg *tcm) | 346 | struct sk_buff *skb, struct tcmsg *tcm) |
373 | { | 347 | { |
374 | struct multiq_sched_data *q = qdisc_priv(sch); | 348 | struct multiq_sched_data *q = qdisc_priv(sch); |
375 | 349 | ||
376 | if (cl - 1 > q->bands) | ||
377 | return -ENOENT; | ||
378 | tcm->tcm_handle |= TC_H_MIN(cl); | 350 | tcm->tcm_handle |= TC_H_MIN(cl); |
379 | if (q->queues[cl-1]) | 351 | tcm->tcm_info = q->queues[cl-1]->handle; |
380 | tcm->tcm_info = q->queues[cl-1]->handle; | ||
381 | return 0; | 352 | return 0; |
382 | } | 353 | } |
383 | 354 | ||
@@ -430,8 +401,6 @@ static const struct Qdisc_class_ops multiq_class_ops = { | |||
430 | .leaf = multiq_leaf, | 401 | .leaf = multiq_leaf, |
431 | .get = multiq_get, | 402 | .get = multiq_get, |
432 | .put = multiq_put, | 403 | .put = multiq_put, |
433 | .change = multiq_change, | ||
434 | .delete = multiq_delete, | ||
435 | .walk = multiq_walk, | 404 | .walk = multiq_walk, |
436 | .tcf_chain = multiq_find_tcf, | 405 | .tcf_chain = multiq_find_tcf, |
437 | .bind_tcf = multiq_bind, | 406 | .bind_tcf = multiq_bind, |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 94cecef70145..0f73c412d04b 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -262,9 +262,6 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
262 | struct prio_sched_data *q = qdisc_priv(sch); | 262 | struct prio_sched_data *q = qdisc_priv(sch); |
263 | unsigned long band = arg - 1; | 263 | unsigned long band = arg - 1; |
264 | 264 | ||
265 | if (band >= q->bands) | ||
266 | return -EINVAL; | ||
267 | |||
268 | if (new == NULL) | 265 | if (new == NULL) |
269 | new = &noop_qdisc; | 266 | new = &noop_qdisc; |
270 | 267 | ||
@@ -284,9 +281,6 @@ prio_leaf(struct Qdisc *sch, unsigned long arg) | |||
284 | struct prio_sched_data *q = qdisc_priv(sch); | 281 | struct prio_sched_data *q = qdisc_priv(sch); |
285 | unsigned long band = arg - 1; | 282 | unsigned long band = arg - 1; |
286 | 283 | ||
287 | if (band >= q->bands) | ||
288 | return NULL; | ||
289 | |||
290 | return q->queues[band]; | 284 | return q->queues[band]; |
291 | } | 285 | } |
292 | 286 | ||
@@ -311,35 +305,13 @@ static void prio_put(struct Qdisc *q, unsigned long cl) | |||
311 | return; | 305 | return; |
312 | } | 306 | } |
313 | 307 | ||
314 | static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct nlattr **tca, unsigned long *arg) | ||
315 | { | ||
316 | unsigned long cl = *arg; | ||
317 | struct prio_sched_data *q = qdisc_priv(sch); | ||
318 | |||
319 | if (cl - 1 > q->bands) | ||
320 | return -ENOENT; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static int prio_delete(struct Qdisc *sch, unsigned long cl) | ||
325 | { | ||
326 | struct prio_sched_data *q = qdisc_priv(sch); | ||
327 | if (cl - 1 > q->bands) | ||
328 | return -ENOENT; | ||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | |||
333 | static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, | 308 | static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, |
334 | struct tcmsg *tcm) | 309 | struct tcmsg *tcm) |
335 | { | 310 | { |
336 | struct prio_sched_data *q = qdisc_priv(sch); | 311 | struct prio_sched_data *q = qdisc_priv(sch); |
337 | 312 | ||
338 | if (cl - 1 > q->bands) | ||
339 | return -ENOENT; | ||
340 | tcm->tcm_handle |= TC_H_MIN(cl); | 313 | tcm->tcm_handle |= TC_H_MIN(cl); |
341 | if (q->queues[cl-1]) | 314 | tcm->tcm_info = q->queues[cl-1]->handle; |
342 | tcm->tcm_info = q->queues[cl-1]->handle; | ||
343 | return 0; | 315 | return 0; |
344 | } | 316 | } |
345 | 317 | ||
@@ -392,8 +364,6 @@ static const struct Qdisc_class_ops prio_class_ops = { | |||
392 | .leaf = prio_leaf, | 364 | .leaf = prio_leaf, |
393 | .get = prio_get, | 365 | .get = prio_get, |
394 | .put = prio_put, | 366 | .put = prio_put, |
395 | .change = prio_change, | ||
396 | .delete = prio_delete, | ||
397 | .walk = prio_walk, | 367 | .walk = prio_walk, |
398 | .tcf_chain = prio_find_tcf, | 368 | .tcf_chain = prio_find_tcf, |
399 | .bind_tcf = prio_bind, | 369 | .bind_tcf = prio_bind, |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 2bdf241f6315..072cdf442f8e 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -268,8 +268,6 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl, | |||
268 | { | 268 | { |
269 | struct red_sched_data *q = qdisc_priv(sch); | 269 | struct red_sched_data *q = qdisc_priv(sch); |
270 | 270 | ||
271 | if (cl != 1) | ||
272 | return -ENOENT; | ||
273 | tcm->tcm_handle |= TC_H_MIN(1); | 271 | tcm->tcm_handle |= TC_H_MIN(1); |
274 | tcm->tcm_info = q->qdisc->handle; | 272 | tcm->tcm_info = q->qdisc->handle; |
275 | return 0; | 273 | return 0; |
@@ -308,17 +306,6 @@ static void red_put(struct Qdisc *sch, unsigned long arg) | |||
308 | return; | 306 | return; |
309 | } | 307 | } |
310 | 308 | ||
311 | static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | ||
312 | struct nlattr **tca, unsigned long *arg) | ||
313 | { | ||
314 | return -ENOSYS; | ||
315 | } | ||
316 | |||
317 | static int red_delete(struct Qdisc *sch, unsigned long cl) | ||
318 | { | ||
319 | return -ENOSYS; | ||
320 | } | ||
321 | |||
322 | static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) | 309 | static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
323 | { | 310 | { |
324 | if (!walker->stop) { | 311 | if (!walker->stop) { |
@@ -331,20 +318,12 @@ static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |||
331 | } | 318 | } |
332 | } | 319 | } |
333 | 320 | ||
334 | static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl) | ||
335 | { | ||
336 | return NULL; | ||
337 | } | ||
338 | |||
339 | static const struct Qdisc_class_ops red_class_ops = { | 321 | static const struct Qdisc_class_ops red_class_ops = { |
340 | .graft = red_graft, | 322 | .graft = red_graft, |
341 | .leaf = red_leaf, | 323 | .leaf = red_leaf, |
342 | .get = red_get, | 324 | .get = red_get, |
343 | .put = red_put, | 325 | .put = red_put, |
344 | .change = red_change_class, | ||
345 | .delete = red_delete, | ||
346 | .walk = red_walk, | 326 | .walk = red_walk, |
347 | .tcf_chain = red_find_tcf, | ||
348 | .dump = red_dump_class, | 327 | .dump = red_dump_class, |
349 | }; | 328 | }; |
350 | 329 | ||
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 8706920a6d45..cb21380c0605 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -496,12 +496,6 @@ nla_put_failure: | |||
496 | return -1; | 496 | return -1; |
497 | } | 497 | } |
498 | 498 | ||
499 | static int sfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | ||
500 | struct nlattr **tca, unsigned long *arg) | ||
501 | { | ||
502 | return -EOPNOTSUPP; | ||
503 | } | ||
504 | |||
505 | static unsigned long sfq_get(struct Qdisc *sch, u32 classid) | 499 | static unsigned long sfq_get(struct Qdisc *sch, u32 classid) |
506 | { | 500 | { |
507 | return 0; | 501 | return 0; |
@@ -560,7 +554,6 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
560 | 554 | ||
561 | static const struct Qdisc_class_ops sfq_class_ops = { | 555 | static const struct Qdisc_class_ops sfq_class_ops = { |
562 | .get = sfq_get, | 556 | .get = sfq_get, |
563 | .change = sfq_change_class, | ||
564 | .tcf_chain = sfq_find_tcf, | 557 | .tcf_chain = sfq_find_tcf, |
565 | .dump = sfq_dump_class, | 558 | .dump = sfq_dump_class, |
566 | .dump_stats = sfq_dump_class_stats, | 559 | .dump_stats = sfq_dump_class_stats, |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index e22dfe85e43e..8fb8107ab188 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -368,9 +368,6 @@ static int tbf_dump_class(struct Qdisc *sch, unsigned long cl, | |||
368 | { | 368 | { |
369 | struct tbf_sched_data *q = qdisc_priv(sch); | 369 | struct tbf_sched_data *q = qdisc_priv(sch); |
370 | 370 | ||
371 | if (cl != 1) /* only one class */ | ||
372 | return -ENOENT; | ||
373 | |||
374 | tcm->tcm_handle |= TC_H_MIN(1); | 371 | tcm->tcm_handle |= TC_H_MIN(1); |
375 | tcm->tcm_info = q->qdisc->handle; | 372 | tcm->tcm_info = q->qdisc->handle; |
376 | 373 | ||
@@ -410,17 +407,6 @@ static void tbf_put(struct Qdisc *sch, unsigned long arg) | |||
410 | { | 407 | { |
411 | } | 408 | } |
412 | 409 | ||
413 | static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | ||
414 | struct nlattr **tca, unsigned long *arg) | ||
415 | { | ||
416 | return -ENOSYS; | ||
417 | } | ||
418 | |||
419 | static int tbf_delete(struct Qdisc *sch, unsigned long arg) | ||
420 | { | ||
421 | return -ENOSYS; | ||
422 | } | ||
423 | |||
424 | static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) | 410 | static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
425 | { | 411 | { |
426 | if (!walker->stop) { | 412 | if (!walker->stop) { |
@@ -433,21 +419,13 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |||
433 | } | 419 | } |
434 | } | 420 | } |
435 | 421 | ||
436 | static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl) | ||
437 | { | ||
438 | return NULL; | ||
439 | } | ||
440 | |||
441 | static const struct Qdisc_class_ops tbf_class_ops = | 422 | static const struct Qdisc_class_ops tbf_class_ops = |
442 | { | 423 | { |
443 | .graft = tbf_graft, | 424 | .graft = tbf_graft, |
444 | .leaf = tbf_leaf, | 425 | .leaf = tbf_leaf, |
445 | .get = tbf_get, | 426 | .get = tbf_get, |
446 | .put = tbf_put, | 427 | .put = tbf_put, |
447 | .change = tbf_change_class, | ||
448 | .delete = tbf_delete, | ||
449 | .walk = tbf_walk, | 428 | .walk = tbf_walk, |
450 | .tcf_chain = tbf_find_tcf, | ||
451 | .dump = tbf_dump_class, | 429 | .dump = tbf_dump_class, |
452 | }; | 430 | }; |
453 | 431 | ||
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 9c002b6e0533..5a002c247231 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -268,7 +268,7 @@ static inline int teql_resolve(struct sk_buff *skb, | |||
268 | return __teql_resolve(skb, skb_res, dev); | 268 | return __teql_resolve(skb, skb_res, dev); |
269 | } | 269 | } |
270 | 270 | ||
271 | static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 271 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) |
272 | { | 272 | { |
273 | struct teql_master *master = netdev_priv(dev); | 273 | struct teql_master *master = netdev_priv(dev); |
274 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | 274 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); |
@@ -307,14 +307,14 @@ restart: | |||
307 | 307 | ||
308 | if (!netif_tx_queue_stopped(slave_txq) && | 308 | if (!netif_tx_queue_stopped(slave_txq) && |
309 | !netif_tx_queue_frozen(slave_txq) && | 309 | !netif_tx_queue_frozen(slave_txq) && |
310 | slave_ops->ndo_start_xmit(skb, slave) == 0) { | 310 | slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { |
311 | txq_trans_update(slave_txq); | 311 | txq_trans_update(slave_txq); |
312 | __netif_tx_unlock(slave_txq); | 312 | __netif_tx_unlock(slave_txq); |
313 | master->slaves = NEXT_SLAVE(q); | 313 | master->slaves = NEXT_SLAVE(q); |
314 | netif_wake_queue(dev); | 314 | netif_wake_queue(dev); |
315 | txq->tx_packets++; | 315 | txq->tx_packets++; |
316 | txq->tx_bytes += length; | 316 | txq->tx_bytes += length; |
317 | return 0; | 317 | return NETDEV_TX_OK; |
318 | } | 318 | } |
319 | __netif_tx_unlock(slave_txq); | 319 | __netif_tx_unlock(slave_txq); |
320 | } | 320 | } |
@@ -323,7 +323,7 @@ restart: | |||
323 | break; | 323 | break; |
324 | case 1: | 324 | case 1: |
325 | master->slaves = NEXT_SLAVE(q); | 325 | master->slaves = NEXT_SLAVE(q); |
326 | return 0; | 326 | return NETDEV_TX_OK; |
327 | default: | 327 | default: |
328 | nores = 1; | 328 | nores = 1; |
329 | break; | 329 | break; |
@@ -345,7 +345,7 @@ restart: | |||
345 | drop: | 345 | drop: |
346 | txq->tx_dropped++; | 346 | txq->tx_dropped++; |
347 | dev_kfree_skb(skb); | 347 | dev_kfree_skb(skb); |
348 | return 0; | 348 | return NETDEV_TX_OK; |
349 | } | 349 | } |
350 | 350 | ||
351 | static int teql_master_open(struct net_device *dev) | 351 | static int teql_master_open(struct net_device *dev) |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 525864bf4f07..8450960df24f 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -112,6 +112,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
112 | asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) | 112 | asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) |
113 | * 1000; | 113 | * 1000; |
114 | asoc->frag_point = 0; | 114 | asoc->frag_point = 0; |
115 | asoc->user_frag = sp->user_frag; | ||
115 | 116 | ||
116 | /* Set the association max_retrans and RTO values from the | 117 | /* Set the association max_retrans and RTO values from the |
117 | * socket values. | 118 | * socket values. |
@@ -202,6 +203,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
202 | asoc->a_rwnd = asoc->rwnd; | 203 | asoc->a_rwnd = asoc->rwnd; |
203 | 204 | ||
204 | asoc->rwnd_over = 0; | 205 | asoc->rwnd_over = 0; |
206 | asoc->rwnd_press = 0; | ||
205 | 207 | ||
206 | /* Use my own max window until I learn something better. */ | 208 | /* Use my own max window until I learn something better. */ |
207 | asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; | 209 | asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; |
@@ -582,6 +584,33 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, | |||
582 | asoc->addip_last_asconf->transport == peer) | 584 | asoc->addip_last_asconf->transport == peer) |
583 | asoc->addip_last_asconf->transport = NULL; | 585 | asoc->addip_last_asconf->transport = NULL; |
584 | 586 | ||
587 | /* If we have something on the transmitted list, we have to | ||
588 | * save it off. The best place is the active path. | ||
589 | */ | ||
590 | if (!list_empty(&peer->transmitted)) { | ||
591 | struct sctp_transport *active = asoc->peer.active_path; | ||
592 | struct sctp_chunk *ch; | ||
593 | |||
594 | /* Reset the transport of each chunk on this list */ | ||
595 | list_for_each_entry(ch, &peer->transmitted, | ||
596 | transmitted_list) { | ||
597 | ch->transport = NULL; | ||
598 | ch->rtt_in_progress = 0; | ||
599 | } | ||
600 | |||
601 | list_splice_tail_init(&peer->transmitted, | ||
602 | &active->transmitted); | ||
603 | |||
604 | /* Start a T3 timer here in case it wasn't running so | ||
605 | * that these migrated packets have a chance to get | ||
606 | * retrnasmitted. | ||
607 | */ | ||
608 | if (!timer_pending(&active->T3_rtx_timer)) | ||
609 | if (!mod_timer(&active->T3_rtx_timer, | ||
610 | jiffies + active->rto)) | ||
611 | sctp_transport_hold(active); | ||
612 | } | ||
613 | |||
585 | asoc->peer.transport_count--; | 614 | asoc->peer.transport_count--; |
586 | 615 | ||
587 | sctp_transport_free(peer); | 616 | sctp_transport_free(peer); |
@@ -651,13 +680,15 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | |||
651 | */ | 680 | */ |
652 | peer->param_flags = asoc->param_flags; | 681 | peer->param_flags = asoc->param_flags; |
653 | 682 | ||
683 | sctp_transport_route(peer, NULL, sp); | ||
684 | |||
654 | /* Initialize the pmtu of the transport. */ | 685 | /* Initialize the pmtu of the transport. */ |
655 | if (peer->param_flags & SPP_PMTUD_ENABLE) | 686 | if (peer->param_flags & SPP_PMTUD_DISABLE) { |
656 | sctp_transport_pmtu(peer); | 687 | if (asoc->pathmtu) |
657 | else if (asoc->pathmtu) | 688 | peer->pathmtu = asoc->pathmtu; |
658 | peer->pathmtu = asoc->pathmtu; | 689 | else |
659 | else | 690 | peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; |
660 | peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; | 691 | } |
661 | 692 | ||
662 | /* If this is the first transport addr on this association, | 693 | /* If this is the first transport addr on this association, |
663 | * initialize the association PMTU to the peer's PMTU. | 694 | * initialize the association PMTU to the peer's PMTU. |
@@ -673,7 +704,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | |||
673 | "%d\n", asoc, asoc->pathmtu); | 704 | "%d\n", asoc, asoc->pathmtu); |
674 | peer->pmtu_pending = 0; | 705 | peer->pmtu_pending = 0; |
675 | 706 | ||
676 | asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); | 707 | asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); |
677 | 708 | ||
678 | /* The asoc->peer.port might not be meaningful yet, but | 709 | /* The asoc->peer.port might not be meaningful yet, but |
679 | * initialize the packet structure anyway. | 710 | * initialize the packet structure anyway. |
@@ -810,11 +841,16 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
810 | break; | 841 | break; |
811 | 842 | ||
812 | case SCTP_TRANSPORT_DOWN: | 843 | case SCTP_TRANSPORT_DOWN: |
813 | /* if the transort was never confirmed, do not transition it | 844 | /* If the transport was never confirmed, do not transition it |
814 | * to inactive state. | 845 | * to inactive state. Also, release the cached route since |
846 | * there may be a better route next time. | ||
815 | */ | 847 | */ |
816 | if (transport->state != SCTP_UNCONFIRMED) | 848 | if (transport->state != SCTP_UNCONFIRMED) |
817 | transport->state = SCTP_INACTIVE; | 849 | transport->state = SCTP_INACTIVE; |
850 | else { | ||
851 | dst_release(transport->dst); | ||
852 | transport->dst = NULL; | ||
853 | } | ||
818 | 854 | ||
819 | spc_state = SCTP_ADDR_UNREACHABLE; | 855 | spc_state = SCTP_ADDR_UNREACHABLE; |
820 | break; | 856 | break; |
@@ -1324,9 +1360,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc) | |||
1324 | } | 1360 | } |
1325 | 1361 | ||
1326 | if (pmtu) { | 1362 | if (pmtu) { |
1327 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | ||
1328 | asoc->pathmtu = pmtu; | 1363 | asoc->pathmtu = pmtu; |
1329 | asoc->frag_point = sctp_frag_point(sp, pmtu); | 1364 | asoc->frag_point = sctp_frag_point(asoc, pmtu); |
1330 | } | 1365 | } |
1331 | 1366 | ||
1332 | SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", | 1367 | SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", |
@@ -1369,6 +1404,17 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) | |||
1369 | asoc->rwnd += len; | 1404 | asoc->rwnd += len; |
1370 | } | 1405 | } |
1371 | 1406 | ||
1407 | /* If we had window pressure, start recovering it | ||
1408 | * once our rwnd had reached the accumulated pressure | ||
1409 | * threshold. The idea is to recover slowly, but up | ||
1410 | * to the initial advertised window. | ||
1411 | */ | ||
1412 | if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { | ||
1413 | int change = min(asoc->pathmtu, asoc->rwnd_press); | ||
1414 | asoc->rwnd += change; | ||
1415 | asoc->rwnd_press -= change; | ||
1416 | } | ||
1417 | |||
1372 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " | 1418 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " |
1373 | "- %u\n", __func__, asoc, len, asoc->rwnd, | 1419 | "- %u\n", __func__, asoc, len, asoc->rwnd, |
1374 | asoc->rwnd_over, asoc->a_rwnd); | 1420 | asoc->rwnd_over, asoc->a_rwnd); |
@@ -1401,17 +1447,38 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) | |||
1401 | /* Decrease asoc's rwnd by len. */ | 1447 | /* Decrease asoc's rwnd by len. */ |
1402 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) | 1448 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) |
1403 | { | 1449 | { |
1450 | int rx_count; | ||
1451 | int over = 0; | ||
1452 | |||
1404 | SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); | 1453 | SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); |
1405 | SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); | 1454 | SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); |
1455 | |||
1456 | if (asoc->ep->rcvbuf_policy) | ||
1457 | rx_count = atomic_read(&asoc->rmem_alloc); | ||
1458 | else | ||
1459 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | ||
1460 | |||
1461 | /* If we've reached or overflowed our receive buffer, announce | ||
1462 | * a 0 rwnd if rwnd would still be positive. Store the | ||
1463 | * the pottential pressure overflow so that the window can be restored | ||
1464 | * back to original value. | ||
1465 | */ | ||
1466 | if (rx_count >= asoc->base.sk->sk_rcvbuf) | ||
1467 | over = 1; | ||
1468 | |||
1406 | if (asoc->rwnd >= len) { | 1469 | if (asoc->rwnd >= len) { |
1407 | asoc->rwnd -= len; | 1470 | asoc->rwnd -= len; |
1471 | if (over) { | ||
1472 | asoc->rwnd_press = asoc->rwnd; | ||
1473 | asoc->rwnd = 0; | ||
1474 | } | ||
1408 | } else { | 1475 | } else { |
1409 | asoc->rwnd_over = len - asoc->rwnd; | 1476 | asoc->rwnd_over = len - asoc->rwnd; |
1410 | asoc->rwnd = 0; | 1477 | asoc->rwnd = 0; |
1411 | } | 1478 | } |
1412 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n", | 1479 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n", |
1413 | __func__, asoc, len, asoc->rwnd, | 1480 | __func__, asoc, len, asoc->rwnd, |
1414 | asoc->rwnd_over); | 1481 | asoc->rwnd_over, asoc->rwnd_press); |
1415 | } | 1482 | } |
1416 | 1483 | ||
1417 | /* Build the bind address list for the association based on info from the | 1484 | /* Build the bind address list for the association based on info from the |
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 6d5944a745d4..13a6fba41077 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c | |||
@@ -510,9 +510,28 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope) | |||
510 | * of requested destination address, sender and receiver | 510 | * of requested destination address, sender and receiver |
511 | * SHOULD include all of its addresses with level greater | 511 | * SHOULD include all of its addresses with level greater |
512 | * than or equal to L. | 512 | * than or equal to L. |
513 | * | ||
514 | * Address scoping can be selectively controlled via sysctl | ||
515 | * option | ||
513 | */ | 516 | */ |
514 | if (addr_scope <= scope) | 517 | switch (sctp_scope_policy) { |
518 | case SCTP_SCOPE_POLICY_DISABLE: | ||
515 | return 1; | 519 | return 1; |
520 | case SCTP_SCOPE_POLICY_ENABLE: | ||
521 | if (addr_scope <= scope) | ||
522 | return 1; | ||
523 | break; | ||
524 | case SCTP_SCOPE_POLICY_PRIVATE: | ||
525 | if (addr_scope <= scope || SCTP_SCOPE_PRIVATE == addr_scope) | ||
526 | return 1; | ||
527 | break; | ||
528 | case SCTP_SCOPE_POLICY_LINK: | ||
529 | if (addr_scope <= scope || SCTP_SCOPE_LINK == addr_scope) | ||
530 | return 1; | ||
531 | break; | ||
532 | default: | ||
533 | break; | ||
534 | } | ||
516 | 535 | ||
517 | return 0; | 536 | return 0; |
518 | } | 537 | } |
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 1748ef90950c..acf7c4d128f7 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c | |||
@@ -59,6 +59,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg) | |||
59 | msg->can_abandon = 0; | 59 | msg->can_abandon = 0; |
60 | msg->expires_at = 0; | 60 | msg->expires_at = 0; |
61 | INIT_LIST_HEAD(&msg->chunks); | 61 | INIT_LIST_HEAD(&msg->chunks); |
62 | msg->msg_size = 0; | ||
62 | } | 63 | } |
63 | 64 | ||
64 | /* Allocate and initialize datamsg. */ | 65 | /* Allocate and initialize datamsg. */ |
@@ -73,6 +74,19 @@ SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp) | |||
73 | return msg; | 74 | return msg; |
74 | } | 75 | } |
75 | 76 | ||
77 | void sctp_datamsg_free(struct sctp_datamsg *msg) | ||
78 | { | ||
79 | struct sctp_chunk *chunk; | ||
80 | |||
81 | /* This doesn't have to be a _safe vairant because | ||
82 | * sctp_chunk_free() only drops the refs. | ||
83 | */ | ||
84 | list_for_each_entry(chunk, &msg->chunks, frag_list) | ||
85 | sctp_chunk_free(chunk); | ||
86 | |||
87 | sctp_datamsg_put(msg); | ||
88 | } | ||
89 | |||
76 | /* Final destructruction of datamsg memory. */ | 90 | /* Final destructruction of datamsg memory. */ |
77 | static void sctp_datamsg_destroy(struct sctp_datamsg *msg) | 91 | static void sctp_datamsg_destroy(struct sctp_datamsg *msg) |
78 | { | 92 | { |
@@ -142,6 +156,7 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu | |||
142 | { | 156 | { |
143 | sctp_datamsg_hold(msg); | 157 | sctp_datamsg_hold(msg); |
144 | chunk->msg = msg; | 158 | chunk->msg = msg; |
159 | msg->msg_size += chunk->skb->len; | ||
145 | } | 160 | } |
146 | 161 | ||
147 | 162 | ||
@@ -158,6 +173,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
158 | { | 173 | { |
159 | int max, whole, i, offset, over, err; | 174 | int max, whole, i, offset, over, err; |
160 | int len, first_len; | 175 | int len, first_len; |
176 | int max_data; | ||
161 | struct sctp_chunk *chunk; | 177 | struct sctp_chunk *chunk; |
162 | struct sctp_datamsg *msg; | 178 | struct sctp_datamsg *msg; |
163 | struct list_head *pos, *temp; | 179 | struct list_head *pos, *temp; |
@@ -179,8 +195,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
179 | __func__, msg, msg->expires_at, jiffies); | 195 | __func__, msg, msg->expires_at, jiffies); |
180 | } | 196 | } |
181 | 197 | ||
182 | max = asoc->frag_point; | 198 | /* This is the biggest possible DATA chunk that can fit into |
199 | * the packet | ||
200 | */ | ||
201 | max_data = asoc->pathmtu - | ||
202 | sctp_sk(asoc->base.sk)->pf->af->net_header_len - | ||
203 | sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk); | ||
183 | 204 | ||
205 | max = asoc->frag_point; | ||
184 | /* If the the peer requested that we authenticate DATA chunks | 206 | /* If the the peer requested that we authenticate DATA chunks |
185 | * we need to accound for bundling of the AUTH chunks along with | 207 | * we need to accound for bundling of the AUTH chunks along with |
186 | * DATA. | 208 | * DATA. |
@@ -189,23 +211,41 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
189 | struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc); | 211 | struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc); |
190 | 212 | ||
191 | if (hmac_desc) | 213 | if (hmac_desc) |
192 | max -= WORD_ROUND(sizeof(sctp_auth_chunk_t) + | 214 | max_data -= WORD_ROUND(sizeof(sctp_auth_chunk_t) + |
193 | hmac_desc->hmac_len); | 215 | hmac_desc->hmac_len); |
194 | } | 216 | } |
195 | 217 | ||
218 | /* Now, check if we need to reduce our max */ | ||
219 | if (max > max_data) | ||
220 | max = max_data; | ||
221 | |||
196 | whole = 0; | 222 | whole = 0; |
197 | first_len = max; | 223 | first_len = max; |
198 | 224 | ||
225 | /* Check to see if we have a pending SACK and try to let it be bundled | ||
226 | * with this message. Do this if we don't have any data queued already. | ||
227 | * To check that, look at out_qlen and retransmit list. | ||
228 | * NOTE: we will not reduce to account for SACK, if the message would | ||
229 | * not have been fragmented. | ||
230 | */ | ||
231 | if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) && | ||
232 | asoc->outqueue.out_qlen == 0 && | ||
233 | list_empty(&asoc->outqueue.retransmit) && | ||
234 | msg_len > max) | ||
235 | max_data -= WORD_ROUND(sizeof(sctp_sack_chunk_t)); | ||
236 | |||
199 | /* Encourage Cookie-ECHO bundling. */ | 237 | /* Encourage Cookie-ECHO bundling. */ |
200 | if (asoc->state < SCTP_STATE_COOKIE_ECHOED) { | 238 | if (asoc->state < SCTP_STATE_COOKIE_ECHOED) |
201 | whole = msg_len / (max - SCTP_ARBITRARY_COOKIE_ECHO_LEN); | 239 | max_data -= SCTP_ARBITRARY_COOKIE_ECHO_LEN; |
202 | 240 | ||
203 | /* Account for the DATA to be bundled with the COOKIE-ECHO. */ | 241 | /* Now that we adjusted completely, reset first_len */ |
204 | if (whole) { | 242 | if (first_len > max_data) |
205 | first_len = max - SCTP_ARBITRARY_COOKIE_ECHO_LEN; | 243 | first_len = max_data; |
206 | msg_len -= first_len; | 244 | |
207 | whole = 1; | 245 | /* Account for a different sized first fragment */ |
208 | } | 246 | if (msg_len >= first_len) { |
247 | msg_len -= first_len; | ||
248 | whole = 1; | ||
209 | } | 249 | } |
210 | 250 | ||
211 | /* How many full sized? How many bytes leftover? */ | 251 | /* How many full sized? How many bytes leftover? */ |
diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 7ff548a30cfb..bf24fa697de2 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c | |||
@@ -52,7 +52,7 @@ int sctp_debug_flag = 1; /* Initially enable DEBUG */ | |||
52 | #endif /* SCTP_DEBUG */ | 52 | #endif /* SCTP_DEBUG */ |
53 | 53 | ||
54 | /* These are printable forms of Chunk ID's from section 3.1. */ | 54 | /* These are printable forms of Chunk ID's from section 3.1. */ |
55 | static const char *sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = { | 55 | static const char *const sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = { |
56 | "DATA", | 56 | "DATA", |
57 | "INIT", | 57 | "INIT", |
58 | "INIT_ACK", | 58 | "INIT_ACK", |
@@ -97,7 +97,7 @@ const char *sctp_cname(const sctp_subtype_t cid) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | /* These are printable forms of the states. */ | 99 | /* These are printable forms of the states. */ |
100 | const char *sctp_state_tbl[SCTP_STATE_NUM_STATES] = { | 100 | const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = { |
101 | "STATE_EMPTY", | 101 | "STATE_EMPTY", |
102 | "STATE_CLOSED", | 102 | "STATE_CLOSED", |
103 | "STATE_COOKIE_WAIT", | 103 | "STATE_COOKIE_WAIT", |
@@ -110,7 +110,7 @@ const char *sctp_state_tbl[SCTP_STATE_NUM_STATES] = { | |||
110 | }; | 110 | }; |
111 | 111 | ||
112 | /* Events that could change the state of an association. */ | 112 | /* Events that could change the state of an association. */ |
113 | const char *sctp_evttype_tbl[] = { | 113 | const char *const sctp_evttype_tbl[] = { |
114 | "EVENT_T_unknown", | 114 | "EVENT_T_unknown", |
115 | "EVENT_T_CHUNK", | 115 | "EVENT_T_CHUNK", |
116 | "EVENT_T_TIMEOUT", | 116 | "EVENT_T_TIMEOUT", |
@@ -119,7 +119,7 @@ const char *sctp_evttype_tbl[] = { | |||
119 | }; | 119 | }; |
120 | 120 | ||
121 | /* Return value of a state function */ | 121 | /* Return value of a state function */ |
122 | const char *sctp_status_tbl[] = { | 122 | const char *const sctp_status_tbl[] = { |
123 | "DISPOSITION_DISCARD", | 123 | "DISPOSITION_DISCARD", |
124 | "DISPOSITION_CONSUME", | 124 | "DISPOSITION_CONSUME", |
125 | "DISPOSITION_NOMEM", | 125 | "DISPOSITION_NOMEM", |
@@ -132,7 +132,7 @@ const char *sctp_status_tbl[] = { | |||
132 | }; | 132 | }; |
133 | 133 | ||
134 | /* Printable forms of primitives */ | 134 | /* Printable forms of primitives */ |
135 | static const char *sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = { | 135 | static const char *const sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = { |
136 | "PRIMITIVE_ASSOCIATE", | 136 | "PRIMITIVE_ASSOCIATE", |
137 | "PRIMITIVE_SHUTDOWN", | 137 | "PRIMITIVE_SHUTDOWN", |
138 | "PRIMITIVE_ABORT", | 138 | "PRIMITIVE_ABORT", |
@@ -149,7 +149,7 @@ const char *sctp_pname(const sctp_subtype_t id) | |||
149 | return "unknown_primitive"; | 149 | return "unknown_primitive"; |
150 | } | 150 | } |
151 | 151 | ||
152 | static const char *sctp_other_tbl[] = { | 152 | static const char *const sctp_other_tbl[] = { |
153 | "NO_PENDING_TSN", | 153 | "NO_PENDING_TSN", |
154 | "ICMP_PROTO_UNREACH", | 154 | "ICMP_PROTO_UNREACH", |
155 | }; | 155 | }; |
@@ -162,7 +162,7 @@ const char *sctp_oname(const sctp_subtype_t id) | |||
162 | return "unknown 'other' event"; | 162 | return "unknown 'other' event"; |
163 | } | 163 | } |
164 | 164 | ||
165 | static const char *sctp_timer_tbl[] = { | 165 | static const char *const sctp_timer_tbl[] = { |
166 | "TIMEOUT_NONE", | 166 | "TIMEOUT_NONE", |
167 | "TIMEOUT_T1_COOKIE", | 167 | "TIMEOUT_T1_COOKIE", |
168 | "TIMEOUT_T1_INIT", | 168 | "TIMEOUT_T1_INIT", |
diff --git a/net/sctp/output.c b/net/sctp/output.c index b94c21190566..5cbda8f1ddfd 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -61,8 +61,24 @@ | |||
61 | #include <net/sctp/checksum.h> | 61 | #include <net/sctp/checksum.h> |
62 | 62 | ||
63 | /* Forward declarations for private helpers. */ | 63 | /* Forward declarations for private helpers. */ |
64 | static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | 64 | static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, |
65 | struct sctp_chunk *chunk); | 65 | struct sctp_chunk *chunk); |
66 | static void sctp_packet_append_data(struct sctp_packet *packet, | ||
67 | struct sctp_chunk *chunk); | ||
68 | static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, | ||
69 | struct sctp_chunk *chunk, | ||
70 | u16 chunk_len); | ||
71 | |||
72 | static void sctp_packet_reset(struct sctp_packet *packet) | ||
73 | { | ||
74 | packet->size = packet->overhead; | ||
75 | packet->has_cookie_echo = 0; | ||
76 | packet->has_sack = 0; | ||
77 | packet->has_data = 0; | ||
78 | packet->has_auth = 0; | ||
79 | packet->ipfragok = 0; | ||
80 | packet->auth = NULL; | ||
81 | } | ||
66 | 82 | ||
67 | /* Config a packet. | 83 | /* Config a packet. |
68 | * This appears to be a followup set of initializations. | 84 | * This appears to be a followup set of initializations. |
@@ -75,13 +91,8 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, | |||
75 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, | 91 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, |
76 | packet, vtag); | 92 | packet, vtag); |
77 | 93 | ||
94 | sctp_packet_reset(packet); | ||
78 | packet->vtag = vtag; | 95 | packet->vtag = vtag; |
79 | packet->has_cookie_echo = 0; | ||
80 | packet->has_sack = 0; | ||
81 | packet->has_auth = 0; | ||
82 | packet->has_data = 0; | ||
83 | packet->ipfragok = 0; | ||
84 | packet->auth = NULL; | ||
85 | 96 | ||
86 | if (ecn_capable && sctp_packet_empty(packet)) { | 97 | if (ecn_capable && sctp_packet_empty(packet)) { |
87 | chunk = sctp_get_ecne_prepend(packet->transport->asoc); | 98 | chunk = sctp_get_ecne_prepend(packet->transport->asoc); |
@@ -119,15 +130,9 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, | |||
119 | } | 130 | } |
120 | overhead += sizeof(struct sctphdr); | 131 | overhead += sizeof(struct sctphdr); |
121 | packet->overhead = overhead; | 132 | packet->overhead = overhead; |
122 | packet->size = overhead; | 133 | sctp_packet_reset(packet); |
123 | packet->vtag = 0; | 134 | packet->vtag = 0; |
124 | packet->has_cookie_echo = 0; | ||
125 | packet->has_sack = 0; | ||
126 | packet->has_auth = 0; | ||
127 | packet->has_data = 0; | ||
128 | packet->ipfragok = 0; | ||
129 | packet->malloced = 0; | 135 | packet->malloced = 0; |
130 | packet->auth = NULL; | ||
131 | return packet; | 136 | return packet; |
132 | } | 137 | } |
133 | 138 | ||
@@ -204,7 +209,7 @@ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt, | |||
204 | /* See if this is an auth chunk we are bundling or if | 209 | /* See if this is an auth chunk we are bundling or if |
205 | * auth is already bundled. | 210 | * auth is already bundled. |
206 | */ | 211 | */ |
207 | if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->auth) | 212 | if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) |
208 | return retval; | 213 | return retval; |
209 | 214 | ||
210 | /* if the peer did not request this chunk to be authenticated, | 215 | /* if the peer did not request this chunk to be authenticated, |
@@ -234,18 +239,19 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, | |||
234 | if (sctp_chunk_is_data(chunk) && !pkt->has_sack && | 239 | if (sctp_chunk_is_data(chunk) && !pkt->has_sack && |
235 | !pkt->has_cookie_echo) { | 240 | !pkt->has_cookie_echo) { |
236 | struct sctp_association *asoc; | 241 | struct sctp_association *asoc; |
242 | struct timer_list *timer; | ||
237 | asoc = pkt->transport->asoc; | 243 | asoc = pkt->transport->asoc; |
244 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; | ||
238 | 245 | ||
239 | if (asoc->a_rwnd > asoc->rwnd) { | 246 | /* If the SACK timer is running, we have a pending SACK */ |
247 | if (timer_pending(timer)) { | ||
240 | struct sctp_chunk *sack; | 248 | struct sctp_chunk *sack; |
241 | asoc->a_rwnd = asoc->rwnd; | 249 | asoc->a_rwnd = asoc->rwnd; |
242 | sack = sctp_make_sack(asoc); | 250 | sack = sctp_make_sack(asoc); |
243 | if (sack) { | 251 | if (sack) { |
244 | struct timer_list *timer; | ||
245 | retval = sctp_packet_append_chunk(pkt, sack); | 252 | retval = sctp_packet_append_chunk(pkt, sack); |
246 | asoc->peer.sack_needed = 0; | 253 | asoc->peer.sack_needed = 0; |
247 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; | 254 | if (del_timer(timer)) |
248 | if (timer_pending(timer) && del_timer(timer)) | ||
249 | sctp_association_put(asoc); | 255 | sctp_association_put(asoc); |
250 | } | 256 | } |
251 | } | 257 | } |
@@ -261,13 +267,20 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, | |||
261 | { | 267 | { |
262 | sctp_xmit_t retval = SCTP_XMIT_OK; | 268 | sctp_xmit_t retval = SCTP_XMIT_OK; |
263 | __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); | 269 | __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); |
264 | size_t psize; | ||
265 | size_t pmtu; | ||
266 | int too_big; | ||
267 | 270 | ||
268 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, | 271 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, |
269 | chunk); | 272 | chunk); |
270 | 273 | ||
274 | /* Data chunks are special. Before seeing what else we can | ||
275 | * bundle into this packet, check to see if we are allowed to | ||
276 | * send this DATA. | ||
277 | */ | ||
278 | if (sctp_chunk_is_data(chunk)) { | ||
279 | retval = sctp_packet_can_append_data(packet, chunk); | ||
280 | if (retval != SCTP_XMIT_OK) | ||
281 | goto finish; | ||
282 | } | ||
283 | |||
271 | /* Try to bundle AUTH chunk */ | 284 | /* Try to bundle AUTH chunk */ |
272 | retval = sctp_packet_bundle_auth(packet, chunk); | 285 | retval = sctp_packet_bundle_auth(packet, chunk); |
273 | if (retval != SCTP_XMIT_OK) | 286 | if (retval != SCTP_XMIT_OK) |
@@ -278,51 +291,16 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, | |||
278 | if (retval != SCTP_XMIT_OK) | 291 | if (retval != SCTP_XMIT_OK) |
279 | goto finish; | 292 | goto finish; |
280 | 293 | ||
281 | psize = packet->size; | 294 | /* Check to see if this chunk will fit into the packet */ |
282 | pmtu = ((packet->transport->asoc) ? | 295 | retval = sctp_packet_will_fit(packet, chunk, chunk_len); |
283 | (packet->transport->asoc->pathmtu) : | 296 | if (retval != SCTP_XMIT_OK) |
284 | (packet->transport->pathmtu)); | 297 | goto finish; |
285 | |||
286 | too_big = (psize + chunk_len > pmtu); | ||
287 | |||
288 | /* Decide if we need to fragment or resubmit later. */ | ||
289 | if (too_big) { | ||
290 | /* It's OK to fragmet at IP level if any one of the following | ||
291 | * is true: | ||
292 | * 1. The packet is empty (meaning this chunk is greater | ||
293 | * the MTU) | ||
294 | * 2. The chunk we are adding is a control chunk | ||
295 | * 3. The packet doesn't have any data in it yet and data | ||
296 | * requires authentication. | ||
297 | */ | ||
298 | if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || | ||
299 | (!packet->has_data && chunk->auth)) { | ||
300 | /* We no longer do re-fragmentation. | ||
301 | * Just fragment at the IP layer, if we | ||
302 | * actually hit this condition | ||
303 | */ | ||
304 | packet->ipfragok = 1; | ||
305 | goto append; | ||
306 | |||
307 | } else { | ||
308 | retval = SCTP_XMIT_PMTU_FULL; | ||
309 | goto finish; | ||
310 | } | ||
311 | } | ||
312 | |||
313 | append: | ||
314 | /* We believe that this chunk is OK to add to the packet (as | ||
315 | * long as we have the cwnd for it). | ||
316 | */ | ||
317 | 298 | ||
318 | /* DATA is a special case since we must examine both rwnd and cwnd | 299 | /* We believe that this chunk is OK to add to the packet */ |
319 | * before we send DATA. | ||
320 | */ | ||
321 | switch (chunk->chunk_hdr->type) { | 300 | switch (chunk->chunk_hdr->type) { |
322 | case SCTP_CID_DATA: | 301 | case SCTP_CID_DATA: |
323 | retval = sctp_packet_append_data(packet, chunk); | 302 | /* Account for the data being in the packet */ |
324 | if (SCTP_XMIT_OK != retval) | 303 | sctp_packet_append_data(packet, chunk); |
325 | goto finish; | ||
326 | /* Disallow SACK bundling after DATA. */ | 304 | /* Disallow SACK bundling after DATA. */ |
327 | packet->has_sack = 1; | 305 | packet->has_sack = 1; |
328 | /* Disallow AUTH bundling after DATA */ | 306 | /* Disallow AUTH bundling after DATA */ |
@@ -598,7 +576,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
598 | (*tp->af_specific->sctp_xmit)(nskb, tp); | 576 | (*tp->af_specific->sctp_xmit)(nskb, tp); |
599 | 577 | ||
600 | out: | 578 | out: |
601 | packet->size = packet->overhead; | 579 | sctp_packet_reset(packet); |
602 | return err; | 580 | return err; |
603 | no_route: | 581 | no_route: |
604 | kfree_skb(nskb); | 582 | kfree_skb(nskb); |
@@ -632,16 +610,15 @@ nomem: | |||
632 | * 2nd Level Abstractions | 610 | * 2nd Level Abstractions |
633 | ********************************************************************/ | 611 | ********************************************************************/ |
634 | 612 | ||
635 | /* This private function handles the specifics of appending DATA chunks. */ | 613 | /* This private function check to see if a chunk can be added */ |
636 | static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | 614 | static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, |
637 | struct sctp_chunk *chunk) | 615 | struct sctp_chunk *chunk) |
638 | { | 616 | { |
639 | sctp_xmit_t retval = SCTP_XMIT_OK; | 617 | sctp_xmit_t retval = SCTP_XMIT_OK; |
640 | size_t datasize, rwnd, inflight; | 618 | size_t datasize, rwnd, inflight, flight_size; |
641 | struct sctp_transport *transport = packet->transport; | 619 | struct sctp_transport *transport = packet->transport; |
642 | __u32 max_burst_bytes; | 620 | __u32 max_burst_bytes; |
643 | struct sctp_association *asoc = transport->asoc; | 621 | struct sctp_association *asoc = transport->asoc; |
644 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | ||
645 | struct sctp_outq *q = &asoc->outqueue; | 622 | struct sctp_outq *q = &asoc->outqueue; |
646 | 623 | ||
647 | /* RFC 2960 6.1 Transmission of DATA Chunks | 624 | /* RFC 2960 6.1 Transmission of DATA Chunks |
@@ -658,7 +635,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
658 | */ | 635 | */ |
659 | 636 | ||
660 | rwnd = asoc->peer.rwnd; | 637 | rwnd = asoc->peer.rwnd; |
661 | inflight = asoc->outqueue.outstanding_bytes; | 638 | inflight = q->outstanding_bytes; |
639 | flight_size = transport->flight_size; | ||
662 | 640 | ||
663 | datasize = sctp_data_size(chunk); | 641 | datasize = sctp_data_size(chunk); |
664 | 642 | ||
@@ -681,8 +659,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
681 | * cwnd = flightsize + Max.Burst * MTU | 659 | * cwnd = flightsize + Max.Burst * MTU |
682 | */ | 660 | */ |
683 | max_burst_bytes = asoc->max_burst * asoc->pathmtu; | 661 | max_burst_bytes = asoc->max_burst * asoc->pathmtu; |
684 | if ((transport->flight_size + max_burst_bytes) < transport->cwnd) { | 662 | if ((flight_size + max_burst_bytes) < transport->cwnd) { |
685 | transport->cwnd = transport->flight_size + max_burst_bytes; | 663 | transport->cwnd = flight_size + max_burst_bytes; |
686 | SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: " | 664 | SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: " |
687 | "transport: %p, cwnd: %d, " | 665 | "transport: %p, cwnd: %d, " |
688 | "ssthresh: %d, flight_size: %d, " | 666 | "ssthresh: %d, flight_size: %d, " |
@@ -707,7 +685,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
707 | * ignore the value of cwnd and SHOULD NOT delay retransmission. | 685 | * ignore the value of cwnd and SHOULD NOT delay retransmission. |
708 | */ | 686 | */ |
709 | if (chunk->fast_retransmit != SCTP_NEED_FRTX) | 687 | if (chunk->fast_retransmit != SCTP_NEED_FRTX) |
710 | if (transport->flight_size >= transport->cwnd) { | 688 | if (flight_size >= transport->cwnd) { |
711 | retval = SCTP_XMIT_RWND_FULL; | 689 | retval = SCTP_XMIT_RWND_FULL; |
712 | goto finish; | 690 | goto finish; |
713 | } | 691 | } |
@@ -717,20 +695,36 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
717 | * if any previously transmitted data on the connection remains | 695 | * if any previously transmitted data on the connection remains |
718 | * unacknowledged. | 696 | * unacknowledged. |
719 | */ | 697 | */ |
720 | if (!sp->nodelay && sctp_packet_empty(packet) && | 698 | if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) && |
721 | q->outstanding_bytes && sctp_state(asoc, ESTABLISHED)) { | 699 | inflight && sctp_state(asoc, ESTABLISHED)) { |
722 | unsigned len = datasize + q->out_qlen; | 700 | unsigned max = transport->pathmtu - packet->overhead; |
701 | unsigned len = chunk->skb->len + q->out_qlen; | ||
723 | 702 | ||
724 | /* Check whether this chunk and all the rest of pending | 703 | /* Check whether this chunk and all the rest of pending |
725 | * data will fit or delay in hopes of bundling a full | 704 | * data will fit or delay in hopes of bundling a full |
726 | * sized packet. | 705 | * sized packet. |
706 | * Don't delay large message writes that may have been | ||
707 | * fragmeneted into small peices. | ||
727 | */ | 708 | */ |
728 | if (len < asoc->frag_point) { | 709 | if ((len < max) && (chunk->msg->msg_size < max)) { |
729 | retval = SCTP_XMIT_NAGLE_DELAY; | 710 | retval = SCTP_XMIT_NAGLE_DELAY; |
730 | goto finish; | 711 | goto finish; |
731 | } | 712 | } |
732 | } | 713 | } |
733 | 714 | ||
715 | finish: | ||
716 | return retval; | ||
717 | } | ||
718 | |||
719 | /* This private function does management things when adding DATA chunk */ | ||
720 | static void sctp_packet_append_data(struct sctp_packet *packet, | ||
721 | struct sctp_chunk *chunk) | ||
722 | { | ||
723 | struct sctp_transport *transport = packet->transport; | ||
724 | size_t datasize = sctp_data_size(chunk); | ||
725 | struct sctp_association *asoc = transport->asoc; | ||
726 | u32 rwnd = asoc->peer.rwnd; | ||
727 | |||
734 | /* Keep track of how many bytes are in flight over this transport. */ | 728 | /* Keep track of how many bytes are in flight over this transport. */ |
735 | transport->flight_size += datasize; | 729 | transport->flight_size += datasize; |
736 | 730 | ||
@@ -753,7 +747,45 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
753 | /* Has been accepted for transmission. */ | 747 | /* Has been accepted for transmission. */ |
754 | if (!asoc->peer.prsctp_capable) | 748 | if (!asoc->peer.prsctp_capable) |
755 | chunk->msg->can_abandon = 0; | 749 | chunk->msg->can_abandon = 0; |
750 | } | ||
751 | |||
752 | static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, | ||
753 | struct sctp_chunk *chunk, | ||
754 | u16 chunk_len) | ||
755 | { | ||
756 | size_t psize; | ||
757 | size_t pmtu; | ||
758 | int too_big; | ||
759 | sctp_xmit_t retval = SCTP_XMIT_OK; | ||
760 | |||
761 | psize = packet->size; | ||
762 | pmtu = ((packet->transport->asoc) ? | ||
763 | (packet->transport->asoc->pathmtu) : | ||
764 | (packet->transport->pathmtu)); | ||
765 | |||
766 | too_big = (psize + chunk_len > pmtu); | ||
767 | |||
768 | /* Decide if we need to fragment or resubmit later. */ | ||
769 | if (too_big) { | ||
770 | /* It's OK to fragmet at IP level if any one of the following | ||
771 | * is true: | ||
772 | * 1. The packet is empty (meaning this chunk is greater | ||
773 | * the MTU) | ||
774 | * 2. The chunk we are adding is a control chunk | ||
775 | * 3. The packet doesn't have any data in it yet and data | ||
776 | * requires authentication. | ||
777 | */ | ||
778 | if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || | ||
779 | (!packet->has_data && chunk->auth)) { | ||
780 | /* We no longer do re-fragmentation. | ||
781 | * Just fragment at the IP layer, if we | ||
782 | * actually hit this condition | ||
783 | */ | ||
784 | packet->ipfragok = 1; | ||
785 | } else { | ||
786 | retval = SCTP_XMIT_PMTU_FULL; | ||
787 | } | ||
788 | } | ||
756 | 789 | ||
757 | finish: | ||
758 | return retval; | 790 | return retval; |
759 | } | 791 | } |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index d765fc53e74d..c9f20e28521b 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -406,8 +406,9 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
406 | * not be retransmitted | 406 | * not be retransmitted |
407 | */ | 407 | */ |
408 | if (!chunk->tsn_gap_acked) { | 408 | if (!chunk->tsn_gap_acked) { |
409 | chunk->transport->flight_size -= | 409 | if (chunk->transport) |
410 | sctp_data_size(chunk); | 410 | chunk->transport->flight_size -= |
411 | sctp_data_size(chunk); | ||
411 | q->outstanding_bytes -= sctp_data_size(chunk); | 412 | q->outstanding_bytes -= sctp_data_size(chunk); |
412 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + | 413 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + |
413 | sizeof(struct sk_buff)); | 414 | sizeof(struct sk_buff)); |
@@ -443,7 +444,8 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
443 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + | 444 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + |
444 | sizeof(struct sk_buff)); | 445 | sizeof(struct sk_buff)); |
445 | q->outstanding_bytes -= sctp_data_size(chunk); | 446 | q->outstanding_bytes -= sctp_data_size(chunk); |
446 | transport->flight_size -= sctp_data_size(chunk); | 447 | if (chunk->transport) |
448 | transport->flight_size -= sctp_data_size(chunk); | ||
447 | 449 | ||
448 | /* sctpimpguide-05 Section 2.8.2 | 450 | /* sctpimpguide-05 Section 2.8.2 |
449 | * M5) If a T3-rtx timer expires, the | 451 | * M5) If a T3-rtx timer expires, the |
@@ -1310,6 +1312,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1310 | __u32 rtt; | 1312 | __u32 rtt; |
1311 | __u8 restart_timer = 0; | 1313 | __u8 restart_timer = 0; |
1312 | int bytes_acked = 0; | 1314 | int bytes_acked = 0; |
1315 | int migrate_bytes = 0; | ||
1313 | 1316 | ||
1314 | /* These state variables are for coherent debug output. --xguo */ | 1317 | /* These state variables are for coherent debug output. --xguo */ |
1315 | 1318 | ||
@@ -1343,8 +1346,9 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1343 | * considering it as 'outstanding'. | 1346 | * considering it as 'outstanding'. |
1344 | */ | 1347 | */ |
1345 | if (!tchunk->tsn_gap_acked) { | 1348 | if (!tchunk->tsn_gap_acked) { |
1346 | tchunk->transport->flight_size -= | 1349 | if (tchunk->transport) |
1347 | sctp_data_size(tchunk); | 1350 | tchunk->transport->flight_size -= |
1351 | sctp_data_size(tchunk); | ||
1348 | q->outstanding_bytes -= sctp_data_size(tchunk); | 1352 | q->outstanding_bytes -= sctp_data_size(tchunk); |
1349 | } | 1353 | } |
1350 | continue; | 1354 | continue; |
@@ -1378,6 +1382,20 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1378 | rtt); | 1382 | rtt); |
1379 | } | 1383 | } |
1380 | } | 1384 | } |
1385 | |||
1386 | /* If the chunk hasn't been marked as ACKED, | ||
1387 | * mark it and account bytes_acked if the | ||
1388 | * chunk had a valid transport (it will not | ||
1389 | * have a transport if ASCONF had deleted it | ||
1390 | * while DATA was outstanding). | ||
1391 | */ | ||
1392 | if (!tchunk->tsn_gap_acked) { | ||
1393 | tchunk->tsn_gap_acked = 1; | ||
1394 | bytes_acked += sctp_data_size(tchunk); | ||
1395 | if (!tchunk->transport) | ||
1396 | migrate_bytes += sctp_data_size(tchunk); | ||
1397 | } | ||
1398 | |||
1381 | if (TSN_lte(tsn, sack_ctsn)) { | 1399 | if (TSN_lte(tsn, sack_ctsn)) { |
1382 | /* RFC 2960 6.3.2 Retransmission Timer Rules | 1400 | /* RFC 2960 6.3.2 Retransmission Timer Rules |
1383 | * | 1401 | * |
@@ -1391,8 +1409,6 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1391 | restart_timer = 1; | 1409 | restart_timer = 1; |
1392 | 1410 | ||
1393 | if (!tchunk->tsn_gap_acked) { | 1411 | if (!tchunk->tsn_gap_acked) { |
1394 | tchunk->tsn_gap_acked = 1; | ||
1395 | bytes_acked += sctp_data_size(tchunk); | ||
1396 | /* | 1412 | /* |
1397 | * SFR-CACC algorithm: | 1413 | * SFR-CACC algorithm: |
1398 | * 2) If the SACK contains gap acks | 1414 | * 2) If the SACK contains gap acks |
@@ -1432,10 +1448,6 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1432 | * older than that newly acknowledged DATA | 1448 | * older than that newly acknowledged DATA |
1433 | * chunk, are qualified as 'Stray DATA chunks'. | 1449 | * chunk, are qualified as 'Stray DATA chunks'. |
1434 | */ | 1450 | */ |
1435 | if (!tchunk->tsn_gap_acked) { | ||
1436 | tchunk->tsn_gap_acked = 1; | ||
1437 | bytes_acked += sctp_data_size(tchunk); | ||
1438 | } | ||
1439 | list_add_tail(lchunk, &tlist); | 1451 | list_add_tail(lchunk, &tlist); |
1440 | } | 1452 | } |
1441 | 1453 | ||
@@ -1491,7 +1503,8 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1491 | tsn); | 1503 | tsn); |
1492 | tchunk->tsn_gap_acked = 0; | 1504 | tchunk->tsn_gap_acked = 0; |
1493 | 1505 | ||
1494 | bytes_acked -= sctp_data_size(tchunk); | 1506 | if (tchunk->transport) |
1507 | bytes_acked -= sctp_data_size(tchunk); | ||
1495 | 1508 | ||
1496 | /* RFC 2960 6.3.2 Retransmission Timer Rules | 1509 | /* RFC 2960 6.3.2 Retransmission Timer Rules |
1497 | * | 1510 | * |
@@ -1561,6 +1574,14 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1561 | #endif /* SCTP_DEBUG */ | 1574 | #endif /* SCTP_DEBUG */ |
1562 | if (transport) { | 1575 | if (transport) { |
1563 | if (bytes_acked) { | 1576 | if (bytes_acked) { |
1577 | /* We may have counted DATA that was migrated | ||
1578 | * to this transport due to DEL-IP operation. | ||
1579 | * Subtract those bytes, since the were never | ||
1580 | * send on this transport and shouldn't be | ||
1581 | * credited to this transport. | ||
1582 | */ | ||
1583 | bytes_acked -= migrate_bytes; | ||
1584 | |||
1564 | /* 8.2. When an outstanding TSN is acknowledged, | 1585 | /* 8.2. When an outstanding TSN is acknowledged, |
1565 | * the endpoint shall clear the error counter of | 1586 | * the endpoint shall clear the error counter of |
1566 | * the destination transport address to which the | 1587 | * the destination transport address to which the |
@@ -1589,7 +1610,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1589 | transport->flight_size -= bytes_acked; | 1610 | transport->flight_size -= bytes_acked; |
1590 | if (transport->flight_size == 0) | 1611 | if (transport->flight_size == 0) |
1591 | transport->partial_bytes_acked = 0; | 1612 | transport->partial_bytes_acked = 0; |
1592 | q->outstanding_bytes -= bytes_acked; | 1613 | q->outstanding_bytes -= bytes_acked + migrate_bytes; |
1593 | } else { | 1614 | } else { |
1594 | /* RFC 2960 6.1, sctpimpguide-06 2.15.2 | 1615 | /* RFC 2960 6.1, sctpimpguide-06 2.15.2 |
1595 | * When a sender is doing zero window probing, it | 1616 | * When a sender is doing zero window probing, it |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index f268910620be..d093cbfeaac4 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -512,10 +512,8 @@ int __init sctp_remaddr_proc_init(void) | |||
512 | { | 512 | { |
513 | struct proc_dir_entry *p; | 513 | struct proc_dir_entry *p; |
514 | 514 | ||
515 | p = create_proc_entry("remaddr", S_IRUGO, proc_net_sctp); | 515 | p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops); |
516 | if (!p) | 516 | if (!p) |
517 | return -ENOMEM; | 517 | return -ENOMEM; |
518 | p->proc_fops = &sctp_remaddr_seq_fops; | ||
519 | |||
520 | return 0; | 518 | return 0; |
521 | } | 519 | } |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index a76da657244a..60093be8385d 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -431,16 +431,14 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) | |||
431 | * of requested destination address, sender and receiver | 431 | * of requested destination address, sender and receiver |
432 | * SHOULD include all of its addresses with level greater | 432 | * SHOULD include all of its addresses with level greater |
433 | * than or equal to L. | 433 | * than or equal to L. |
434 | * | ||
435 | * IPv4 scoping can be controlled through sysctl option | ||
436 | * net.sctp.addr_scope_policy | ||
434 | */ | 437 | */ |
435 | static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) | 438 | static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) |
436 | { | 439 | { |
437 | sctp_scope_t retval; | 440 | sctp_scope_t retval; |
438 | 441 | ||
439 | /* Should IPv4 scoping be a sysctl configurable option | ||
440 | * so users can turn it off (default on) for certain | ||
441 | * unconventional networking environments? | ||
442 | */ | ||
443 | |||
444 | /* Check for unusable SCTP addresses. */ | 442 | /* Check for unusable SCTP addresses. */ |
445 | if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) { | 443 | if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) { |
446 | retval = SCTP_SCOPE_UNUSABLE; | 444 | retval = SCTP_SCOPE_UNUSABLE; |
@@ -1259,6 +1257,9 @@ SCTP_STATIC __init int sctp_init(void) | |||
1259 | /* Disable AUTH by default. */ | 1257 | /* Disable AUTH by default. */ |
1260 | sctp_auth_enable = 0; | 1258 | sctp_auth_enable = 0; |
1261 | 1259 | ||
1260 | /* Set SCOPE policy to enabled */ | ||
1261 | sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE; | ||
1262 | |||
1262 | sctp_sysctl_register(); | 1263 | sctp_sysctl_register(); |
1263 | 1264 | ||
1264 | INIT_LIST_HEAD(&sctp_address_families); | 1265 | INIT_LIST_HEAD(&sctp_address_families); |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 61cc6075b0df..9d881a61ac02 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2861,6 +2861,11 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2861 | addr_param = (union sctp_addr_param *) | 2861 | addr_param = (union sctp_addr_param *) |
2862 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); | 2862 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); |
2863 | 2863 | ||
2864 | if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP && | ||
2865 | asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP && | ||
2866 | asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) | ||
2867 | return SCTP_ERROR_UNKNOWN_PARAM; | ||
2868 | |||
2864 | switch (addr_param->v4.param_hdr.type) { | 2869 | switch (addr_param->v4.param_hdr.type) { |
2865 | case SCTP_PARAM_IPV6_ADDRESS: | 2870 | case SCTP_PARAM_IPV6_ADDRESS: |
2866 | if (!asoc->peer.ipv6_address) | 2871 | if (!asoc->peer.ipv6_address) |
@@ -2958,9 +2963,6 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2958 | 2963 | ||
2959 | sctp_assoc_set_primary(asoc, peer); | 2964 | sctp_assoc_set_primary(asoc, peer); |
2960 | break; | 2965 | break; |
2961 | default: | ||
2962 | return SCTP_ERROR_UNKNOWN_PARAM; | ||
2963 | break; | ||
2964 | } | 2966 | } |
2965 | 2967 | ||
2966 | return SCTP_ERROR_NO_ERROR; | 2968 | return SCTP_ERROR_NO_ERROR; |
@@ -3104,7 +3106,7 @@ done: | |||
3104 | } | 3106 | } |
3105 | 3107 | ||
3106 | /* Process a asconf parameter that is successfully acked. */ | 3108 | /* Process a asconf parameter that is successfully acked. */ |
3107 | static int sctp_asconf_param_success(struct sctp_association *asoc, | 3109 | static void sctp_asconf_param_success(struct sctp_association *asoc, |
3108 | sctp_addip_param_t *asconf_param) | 3110 | sctp_addip_param_t *asconf_param) |
3109 | { | 3111 | { |
3110 | struct sctp_af *af; | 3112 | struct sctp_af *af; |
@@ -3113,7 +3115,6 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, | |||
3113 | union sctp_addr_param *addr_param; | 3115 | union sctp_addr_param *addr_param; |
3114 | struct sctp_transport *transport; | 3116 | struct sctp_transport *transport; |
3115 | struct sctp_sockaddr_entry *saddr; | 3117 | struct sctp_sockaddr_entry *saddr; |
3116 | int retval = 0; | ||
3117 | 3118 | ||
3118 | addr_param = (union sctp_addr_param *) | 3119 | addr_param = (union sctp_addr_param *) |
3119 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); | 3120 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); |
@@ -3133,10 +3134,18 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, | |||
3133 | saddr->state = SCTP_ADDR_SRC; | 3134 | saddr->state = SCTP_ADDR_SRC; |
3134 | } | 3135 | } |
3135 | local_bh_enable(); | 3136 | local_bh_enable(); |
3137 | list_for_each_entry(transport, &asoc->peer.transport_addr_list, | ||
3138 | transports) { | ||
3139 | if (transport->state == SCTP_ACTIVE) | ||
3140 | continue; | ||
3141 | dst_release(transport->dst); | ||
3142 | sctp_transport_route(transport, NULL, | ||
3143 | sctp_sk(asoc->base.sk)); | ||
3144 | } | ||
3136 | break; | 3145 | break; |
3137 | case SCTP_PARAM_DEL_IP: | 3146 | case SCTP_PARAM_DEL_IP: |
3138 | local_bh_disable(); | 3147 | local_bh_disable(); |
3139 | retval = sctp_del_bind_addr(bp, &addr); | 3148 | sctp_del_bind_addr(bp, &addr); |
3140 | local_bh_enable(); | 3149 | local_bh_enable(); |
3141 | list_for_each_entry(transport, &asoc->peer.transport_addr_list, | 3150 | list_for_each_entry(transport, &asoc->peer.transport_addr_list, |
3142 | transports) { | 3151 | transports) { |
@@ -3148,8 +3157,6 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, | |||
3148 | default: | 3157 | default: |
3149 | break; | 3158 | break; |
3150 | } | 3159 | } |
3151 | |||
3152 | return retval; | ||
3153 | } | 3160 | } |
3154 | 3161 | ||
3155 | /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk | 3162 | /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk |
@@ -3266,7 +3273,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, | |||
3266 | 3273 | ||
3267 | switch (err_code) { | 3274 | switch (err_code) { |
3268 | case SCTP_ERROR_NO_ERROR: | 3275 | case SCTP_ERROR_NO_ERROR: |
3269 | retval = sctp_asconf_param_success(asoc, asconf_param); | 3276 | sctp_asconf_param_success(asoc, asconf_param); |
3270 | break; | 3277 | break; |
3271 | 3278 | ||
3272 | case SCTP_ERROR_RSRC_LOW: | 3279 | case SCTP_ERROR_RSRC_LOW: |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 86426aac1600..8674d4919556 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -440,14 +440,26 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, | |||
440 | /* The check for association's overall error counter exceeding the | 440 | /* The check for association's overall error counter exceeding the |
441 | * threshold is done in the state function. | 441 | * threshold is done in the state function. |
442 | */ | 442 | */ |
443 | /* When probing UNCONFIRMED addresses, the association overall | 443 | /* We are here due to a timer expiration. If the timer was |
444 | * error count is NOT incremented | 444 | * not a HEARTBEAT, then normal error tracking is done. |
445 | * If the timer was a heartbeat, we only increment error counts | ||
446 | * when we already have an outstanding HEARTBEAT that has not | ||
447 | * been acknowledged. | ||
448 | * Additionaly, some tranport states inhibit error increments. | ||
445 | */ | 449 | */ |
446 | if (transport->state != SCTP_UNCONFIRMED) | 450 | if (!is_hb) { |
447 | asoc->overall_error_count++; | 451 | asoc->overall_error_count++; |
452 | if (transport->state != SCTP_INACTIVE) | ||
453 | transport->error_count++; | ||
454 | } else if (transport->hb_sent) { | ||
455 | if (transport->state != SCTP_UNCONFIRMED) | ||
456 | asoc->overall_error_count++; | ||
457 | if (transport->state != SCTP_INACTIVE) | ||
458 | transport->error_count++; | ||
459 | } | ||
448 | 460 | ||
449 | if (transport->state != SCTP_INACTIVE && | 461 | if (transport->state != SCTP_INACTIVE && |
450 | (transport->error_count++ >= transport->pathmaxrxt)) { | 462 | (transport->error_count > transport->pathmaxrxt)) { |
451 | SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p", | 463 | SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p", |
452 | " transport IP: port:%d failed.\n", | 464 | " transport IP: port:%d failed.\n", |
453 | asoc, | 465 | asoc, |
@@ -931,6 +943,27 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, | |||
931 | 943 | ||
932 | } | 944 | } |
933 | 945 | ||
946 | /* Send the whole message, chunk by chunk, to the outqueue. | ||
947 | * This way the whole message is queued up and bundling if | ||
948 | * encouraged for small fragments. | ||
949 | */ | ||
950 | static int sctp_cmd_send_msg(struct sctp_association *asoc, | ||
951 | struct sctp_datamsg *msg) | ||
952 | { | ||
953 | struct sctp_chunk *chunk; | ||
954 | int error = 0; | ||
955 | |||
956 | list_for_each_entry(chunk, &msg->chunks, frag_list) { | ||
957 | error = sctp_outq_tail(&asoc->outqueue, chunk); | ||
958 | if (error) | ||
959 | break; | ||
960 | } | ||
961 | |||
962 | return error; | ||
963 | } | ||
964 | |||
965 | |||
966 | |||
934 | /* These three macros allow us to pull the debugging code out of the | 967 | /* These three macros allow us to pull the debugging code out of the |
935 | * main flow of sctp_do_sm() to keep attention focused on the real | 968 | * main flow of sctp_do_sm() to keep attention focused on the real |
936 | * functionality there. | 969 | * functionality there. |
@@ -1500,7 +1533,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1500 | case SCTP_CMD_PROCESS_CTSN: | 1533 | case SCTP_CMD_PROCESS_CTSN: |
1501 | /* Dummy up a SACK for processing. */ | 1534 | /* Dummy up a SACK for processing. */ |
1502 | sackh.cum_tsn_ack = cmd->obj.be32; | 1535 | sackh.cum_tsn_ack = cmd->obj.be32; |
1503 | sackh.a_rwnd = 0; | 1536 | sackh.a_rwnd = asoc->peer.rwnd + |
1537 | asoc->outqueue.outstanding_bytes; | ||
1504 | sackh.num_gap_ack_blocks = 0; | 1538 | sackh.num_gap_ack_blocks = 0; |
1505 | sackh.num_dup_tsns = 0; | 1539 | sackh.num_dup_tsns = 0; |
1506 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, | 1540 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, |
@@ -1575,7 +1609,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1575 | case SCTP_CMD_UPDATE_INITTAG: | 1609 | case SCTP_CMD_UPDATE_INITTAG: |
1576 | asoc->peer.i.init_tag = cmd->obj.u32; | 1610 | asoc->peer.i.init_tag = cmd->obj.u32; |
1577 | break; | 1611 | break; |
1578 | 1612 | case SCTP_CMD_SEND_MSG: | |
1613 | if (!asoc->outqueue.cork) { | ||
1614 | sctp_outq_cork(&asoc->outqueue); | ||
1615 | local_cork = 1; | ||
1616 | } | ||
1617 | error = sctp_cmd_send_msg(asoc, cmd->obj.msg); | ||
1618 | break; | ||
1579 | default: | 1619 | default: |
1580 | printk(KERN_WARNING "Impossible command: %u, %p\n", | 1620 | printk(KERN_WARNING "Impossible command: %u, %p\n", |
1581 | cmd->verb, cmd->obj.ptr); | 1621 | cmd->verb, cmd->obj.ptr); |
@@ -1593,9 +1633,9 @@ out: | |||
1593 | */ | 1633 | */ |
1594 | if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { | 1634 | if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { |
1595 | if (chunk->end_of_packet || chunk->singleton) | 1635 | if (chunk->end_of_packet || chunk->singleton) |
1596 | sctp_outq_uncork(&asoc->outqueue); | 1636 | error = sctp_outq_uncork(&asoc->outqueue); |
1597 | } else if (local_cork) | 1637 | } else if (local_cork) |
1598 | sctp_outq_uncork(&asoc->outqueue); | 1638 | error = sctp_outq_uncork(&asoc->outqueue); |
1599 | return error; | 1639 | return error; |
1600 | nomem: | 1640 | nomem: |
1601 | error = -ENOMEM; | 1641 | error = -ENOMEM; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 7288192f7df5..c8fae1983dd1 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -334,6 +334,15 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, | |||
334 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) | 334 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) |
335 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 335 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
336 | 336 | ||
337 | /* If the INIT is coming toward a closing socket, we'll send back | ||
338 | * and ABORT. Essentially, this catches the race of INIT being | ||
339 | * backloged to the socket at the same time as the user isses close(). | ||
340 | * Since the socket and all its associations are going away, we | ||
341 | * can treat this OOTB | ||
342 | */ | ||
343 | if (sctp_sstate(ep->base.sk, CLOSING)) | ||
344 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
345 | |||
337 | /* Verify the INIT chunk before processing it. */ | 346 | /* Verify the INIT chunk before processing it. */ |
338 | err_chunk = NULL; | 347 | err_chunk = NULL; |
339 | if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, | 348 | if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, |
@@ -962,7 +971,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, | |||
962 | { | 971 | { |
963 | struct sctp_transport *transport = (struct sctp_transport *) arg; | 972 | struct sctp_transport *transport = (struct sctp_transport *) arg; |
964 | 973 | ||
965 | if (asoc->overall_error_count > asoc->max_retrans) { | 974 | if (asoc->overall_error_count >= asoc->max_retrans) { |
966 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 975 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
967 | SCTP_ERROR(ETIMEDOUT)); | 976 | SCTP_ERROR(ETIMEDOUT)); |
968 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 977 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
@@ -1106,7 +1115,8 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, | |||
1106 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 1115 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
1107 | 1116 | ||
1108 | /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ | 1117 | /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ |
1109 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) | 1118 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) + |
1119 | sizeof(sctp_sender_hb_info_t))) | ||
1110 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | 1120 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, |
1111 | commands); | 1121 | commands); |
1112 | 1122 | ||
@@ -2561,6 +2571,12 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep, | |||
2561 | chunk->subh.shutdown_hdr = sdh; | 2571 | chunk->subh.shutdown_hdr = sdh; |
2562 | ctsn = ntohl(sdh->cum_tsn_ack); | 2572 | ctsn = ntohl(sdh->cum_tsn_ack); |
2563 | 2573 | ||
2574 | if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { | ||
2575 | SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn); | ||
2576 | SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point); | ||
2577 | return SCTP_DISPOSITION_DISCARD; | ||
2578 | } | ||
2579 | |||
2564 | /* If Cumulative TSN Ack beyond the max tsn currently | 2580 | /* If Cumulative TSN Ack beyond the max tsn currently |
2565 | * send, terminating the association and respond to the | 2581 | * send, terminating the association and respond to the |
2566 | * sender with an ABORT. | 2582 | * sender with an ABORT. |
@@ -2624,6 +2640,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep, | |||
2624 | { | 2640 | { |
2625 | struct sctp_chunk *chunk = arg; | 2641 | struct sctp_chunk *chunk = arg; |
2626 | sctp_shutdownhdr_t *sdh; | 2642 | sctp_shutdownhdr_t *sdh; |
2643 | __u32 ctsn; | ||
2627 | 2644 | ||
2628 | if (!sctp_vtag_verify(chunk, asoc)) | 2645 | if (!sctp_vtag_verify(chunk, asoc)) |
2629 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 2646 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
@@ -2635,12 +2652,19 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep, | |||
2635 | commands); | 2652 | commands); |
2636 | 2653 | ||
2637 | sdh = (sctp_shutdownhdr_t *)chunk->skb->data; | 2654 | sdh = (sctp_shutdownhdr_t *)chunk->skb->data; |
2655 | ctsn = ntohl(sdh->cum_tsn_ack); | ||
2656 | |||
2657 | if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { | ||
2658 | SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn); | ||
2659 | SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point); | ||
2660 | return SCTP_DISPOSITION_DISCARD; | ||
2661 | } | ||
2638 | 2662 | ||
2639 | /* If Cumulative TSN Ack beyond the max tsn currently | 2663 | /* If Cumulative TSN Ack beyond the max tsn currently |
2640 | * send, terminating the association and respond to the | 2664 | * send, terminating the association and respond to the |
2641 | * sender with an ABORT. | 2665 | * sender with an ABORT. |
2642 | */ | 2666 | */ |
2643 | if (!TSN_lt(ntohl(sdh->cum_tsn_ack), asoc->next_tsn)) | 2667 | if (!TSN_lt(ctsn, asoc->next_tsn)) |
2644 | return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); | 2668 | return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); |
2645 | 2669 | ||
2646 | /* verify, by checking the Cumulative TSN Ack field of the | 2670 | /* verify, by checking the Cumulative TSN Ack field of the |
@@ -2867,6 +2891,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep, | |||
2867 | goto discard_force; | 2891 | goto discard_force; |
2868 | case SCTP_IERROR_NO_DATA: | 2892 | case SCTP_IERROR_NO_DATA: |
2869 | goto consume; | 2893 | goto consume; |
2894 | case SCTP_IERROR_PROTO_VIOLATION: | ||
2895 | return sctp_sf_abort_violation(ep, asoc, chunk, commands, | ||
2896 | (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); | ||
2870 | default: | 2897 | default: |
2871 | BUG(); | 2898 | BUG(); |
2872 | } | 2899 | } |
@@ -2977,6 +3004,9 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep, | |||
2977 | break; | 3004 | break; |
2978 | case SCTP_IERROR_NO_DATA: | 3005 | case SCTP_IERROR_NO_DATA: |
2979 | goto consume; | 3006 | goto consume; |
3007 | case SCTP_IERROR_PROTO_VIOLATION: | ||
3008 | return sctp_sf_abort_violation(ep, asoc, chunk, commands, | ||
3009 | (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); | ||
2980 | default: | 3010 | default: |
2981 | BUG(); | 3011 | BUG(); |
2982 | } | 3012 | } |
@@ -3519,6 +3549,12 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, | |||
3519 | asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); | 3549 | asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); |
3520 | if (!asconf_ack) | 3550 | if (!asconf_ack) |
3521 | return SCTP_DISPOSITION_DISCARD; | 3551 | return SCTP_DISPOSITION_DISCARD; |
3552 | |||
3553 | /* Reset the transport so that we select the correct one | ||
3554 | * this time around. This is to make sure that we don't | ||
3555 | * accidentally use a stale transport that's been removed. | ||
3556 | */ | ||
3557 | asconf_ack->transport = NULL; | ||
3522 | } else { | 3558 | } else { |
3523 | /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since | 3559 | /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since |
3524 | * it must be either a stale packet or from an attacker. | 3560 | * it must be either a stale packet or from an attacker. |
@@ -4546,9 +4582,9 @@ sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep, | |||
4546 | void *arg, | 4582 | void *arg, |
4547 | sctp_cmd_seq_t *commands) | 4583 | sctp_cmd_seq_t *commands) |
4548 | { | 4584 | { |
4549 | struct sctp_chunk *chunk = arg; | 4585 | struct sctp_datamsg *msg = arg; |
4550 | 4586 | ||
4551 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); | 4587 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg)); |
4552 | return SCTP_DISPOSITION_CONSUME; | 4588 | return SCTP_DISPOSITION_CONSUME; |
4553 | } | 4589 | } |
4554 | 4590 | ||
@@ -5847,6 +5883,9 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5847 | __u32 tsn; | 5883 | __u32 tsn; |
5848 | struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; | 5884 | struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; |
5849 | struct sock *sk = asoc->base.sk; | 5885 | struct sock *sk = asoc->base.sk; |
5886 | u16 ssn; | ||
5887 | u16 sid; | ||
5888 | u8 ordered = 0; | ||
5850 | 5889 | ||
5851 | data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; | 5890 | data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; |
5852 | skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); | 5891 | skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); |
@@ -5986,8 +6025,10 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5986 | */ | 6025 | */ |
5987 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | 6026 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) |
5988 | SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS); | 6027 | SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS); |
5989 | else | 6028 | else { |
5990 | SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS); | 6029 | SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS); |
6030 | ordered = 1; | ||
6031 | } | ||
5991 | 6032 | ||
5992 | /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number | 6033 | /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number |
5993 | * | 6034 | * |
@@ -5997,7 +6038,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5997 | * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) | 6038 | * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) |
5998 | * and discard the DATA chunk. | 6039 | * and discard the DATA chunk. |
5999 | */ | 6040 | */ |
6000 | if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) { | 6041 | sid = ntohs(data_hdr->stream); |
6042 | if (sid >= asoc->c.sinit_max_instreams) { | ||
6001 | /* Mark tsn as received even though we drop it */ | 6043 | /* Mark tsn as received even though we drop it */ |
6002 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); | 6044 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); |
6003 | 6045 | ||
@@ -6010,6 +6052,18 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
6010 | return SCTP_IERROR_BAD_STREAM; | 6052 | return SCTP_IERROR_BAD_STREAM; |
6011 | } | 6053 | } |
6012 | 6054 | ||
6055 | /* Check to see if the SSN is possible for this TSN. | ||
6056 | * The biggest gap we can record is 4K wide. Since SSNs wrap | ||
6057 | * at an unsigned short, there is no way that an SSN can | ||
6058 | * wrap and for a valid TSN. We can simply check if the current | ||
6059 | * SSN is smaller then the next expected one. If it is, it wrapped | ||
6060 | * and is invalid. | ||
6061 | */ | ||
6062 | ssn = ntohs(data_hdr->ssn); | ||
6063 | if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) { | ||
6064 | return SCTP_IERROR_PROTO_VIOLATION; | ||
6065 | } | ||
6066 | |||
6013 | /* Send the data up to the user. Note: Schedule the | 6067 | /* Send the data up to the user. Note: Schedule the |
6014 | * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK | 6068 | * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK |
6015 | * chunk needs the updated rwnd. | 6069 | * chunk needs the updated rwnd. |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 971890dbfea0..89af37a6c871 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -1361,6 +1361,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1361 | 1361 | ||
1362 | sctp_lock_sock(sk); | 1362 | sctp_lock_sock(sk); |
1363 | sk->sk_shutdown = SHUTDOWN_MASK; | 1363 | sk->sk_shutdown = SHUTDOWN_MASK; |
1364 | sk->sk_state = SCTP_SS_CLOSING; | ||
1364 | 1365 | ||
1365 | ep = sctp_sk(sk)->ep; | 1366 | ep = sctp_sk(sk)->ep; |
1366 | 1367 | ||
@@ -1813,20 +1814,22 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
1813 | sctp_set_owner_w(chunk); | 1814 | sctp_set_owner_w(chunk); |
1814 | 1815 | ||
1815 | chunk->transport = chunk_tp; | 1816 | chunk->transport = chunk_tp; |
1816 | |||
1817 | /* Send it to the lower layers. Note: all chunks | ||
1818 | * must either fail or succeed. The lower layer | ||
1819 | * works that way today. Keep it that way or this | ||
1820 | * breaks. | ||
1821 | */ | ||
1822 | err = sctp_primitive_SEND(asoc, chunk); | ||
1823 | /* Did the lower layer accept the chunk? */ | ||
1824 | if (err) | ||
1825 | sctp_chunk_free(chunk); | ||
1826 | SCTP_DEBUG_PRINTK("We sent primitively.\n"); | ||
1827 | } | 1817 | } |
1828 | 1818 | ||
1829 | sctp_datamsg_put(datamsg); | 1819 | /* Send it to the lower layers. Note: all chunks |
1820 | * must either fail or succeed. The lower layer | ||
1821 | * works that way today. Keep it that way or this | ||
1822 | * breaks. | ||
1823 | */ | ||
1824 | err = sctp_primitive_SEND(asoc, datamsg); | ||
1825 | /* Did the lower layer accept the chunk? */ | ||
1826 | if (err) | ||
1827 | sctp_datamsg_free(datamsg); | ||
1828 | else | ||
1829 | sctp_datamsg_put(datamsg); | ||
1830 | |||
1831 | SCTP_DEBUG_PRINTK("We sent primitively.\n"); | ||
1832 | |||
1830 | if (err) | 1833 | if (err) |
1831 | goto out_free; | 1834 | goto out_free; |
1832 | else | 1835 | else |
@@ -2240,7 +2243,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
2240 | sctp_assoc_sync_pmtu(asoc); | 2243 | sctp_assoc_sync_pmtu(asoc); |
2241 | } else if (asoc) { | 2244 | } else if (asoc) { |
2242 | asoc->pathmtu = params->spp_pathmtu; | 2245 | asoc->pathmtu = params->spp_pathmtu; |
2243 | sctp_frag_point(sp, params->spp_pathmtu); | 2246 | sctp_frag_point(asoc, params->spp_pathmtu); |
2244 | } else { | 2247 | } else { |
2245 | sp->pathmtu = params->spp_pathmtu; | 2248 | sp->pathmtu = params->spp_pathmtu; |
2246 | } | 2249 | } |
@@ -2877,15 +2880,10 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl | |||
2877 | val -= sizeof(struct sctphdr) + | 2880 | val -= sizeof(struct sctphdr) + |
2878 | sizeof(struct sctp_data_chunk); | 2881 | sizeof(struct sctp_data_chunk); |
2879 | } | 2882 | } |
2880 | 2883 | asoc->user_frag = val; | |
2881 | asoc->frag_point = val; | 2884 | asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); |
2882 | } else { | 2885 | } else { |
2883 | sp->user_frag = val; | 2886 | sp->user_frag = val; |
2884 | |||
2885 | /* Update the frag_point of the existing associations. */ | ||
2886 | list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { | ||
2887 | asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); | ||
2888 | } | ||
2889 | } | 2887 | } |
2890 | 2888 | ||
2891 | return 0; | 2889 | return 0; |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 63eabbc71298..ab7151da120f 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -51,6 +51,7 @@ static int timer_max = 86400000; /* ms in one day */ | |||
51 | static int int_max = INT_MAX; | 51 | static int int_max = INT_MAX; |
52 | static int sack_timer_min = 1; | 52 | static int sack_timer_min = 1; |
53 | static int sack_timer_max = 500; | 53 | static int sack_timer_max = 500; |
54 | static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ | ||
54 | 55 | ||
55 | extern int sysctl_sctp_mem[3]; | 56 | extern int sysctl_sctp_mem[3]; |
56 | extern int sysctl_sctp_rmem[3]; | 57 | extern int sysctl_sctp_rmem[3]; |
@@ -272,6 +273,17 @@ static ctl_table sctp_table[] = { | |||
272 | .proc_handler = proc_dointvec, | 273 | .proc_handler = proc_dointvec, |
273 | .strategy = sysctl_intvec | 274 | .strategy = sysctl_intvec |
274 | }, | 275 | }, |
276 | { | ||
277 | .ctl_name = CTL_UNNUMBERED, | ||
278 | .procname = "addr_scope_policy", | ||
279 | .data = &sctp_scope_policy, | ||
280 | .maxlen = sizeof(int), | ||
281 | .mode = 0644, | ||
282 | .proc_handler = &proc_dointvec_minmax, | ||
283 | .strategy = &sysctl_intvec, | ||
284 | .extra1 = &zero, | ||
285 | .extra2 = &addr_scope_max, | ||
286 | }, | ||
275 | { .ctl_name = 0 } | 287 | { .ctl_name = 0 } |
276 | }; | 288 | }; |
277 | 289 | ||
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index e5dde45c79d3..c256e4839316 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -503,6 +503,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, | |||
503 | transport->ssthresh = max(transport->cwnd/2, | 503 | transport->ssthresh = max(transport->cwnd/2, |
504 | 4*transport->asoc->pathmtu); | 504 | 4*transport->asoc->pathmtu); |
505 | transport->cwnd = transport->asoc->pathmtu; | 505 | transport->cwnd = transport->asoc->pathmtu; |
506 | |||
507 | /* T3-rtx also clears fast recovery on the transport */ | ||
508 | transport->fast_recovery = 0; | ||
506 | break; | 509 | break; |
507 | 510 | ||
508 | case SCTP_LOWER_CWND_FAST_RTX: | 511 | case SCTP_LOWER_CWND_FAST_RTX: |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index a7a36779b9b3..327011fcc407 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -119,7 +119,7 @@ int tipc_register_media(u32 media_type, | |||
119 | warn("Media <%s> rejected, no broadcast address\n", name); | 119 | warn("Media <%s> rejected, no broadcast address\n", name); |
120 | goto exit; | 120 | goto exit; |
121 | } | 121 | } |
122 | if ((bearer_priority < TIPC_MIN_LINK_PRI) && | 122 | if ((bearer_priority < TIPC_MIN_LINK_PRI) || |
123 | (bearer_priority > TIPC_MAX_LINK_PRI)) { | 123 | (bearer_priority > TIPC_MAX_LINK_PRI)) { |
124 | warn("Media <%s> rejected, illegal priority (%u)\n", name, | 124 | warn("Media <%s> rejected, illegal priority (%u)\n", name, |
125 | bearer_priority); | 125 | bearer_priority); |
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 3c57005e44d1..7bda8e3d1398 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c | |||
@@ -62,7 +62,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info) | |||
62 | rep_nlh = nlmsg_hdr(rep_buf); | 62 | rep_nlh = nlmsg_hdr(rep_buf); |
63 | memcpy(rep_nlh, req_nlh, hdr_space); | 63 | memcpy(rep_nlh, req_nlh, hdr_space); |
64 | rep_nlh->nlmsg_len = rep_buf->len; | 64 | rep_nlh->nlmsg_len = rep_buf->len; |
65 | genlmsg_unicast(rep_buf, NETLINK_CB(skb).pid); | 65 | genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).pid); |
66 | } | 66 | } |
67 | 67 | ||
68 | return 0; | 68 | return 0; |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1848693ebb82..e8254e809b79 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1748,6 +1748,12 @@ static int getsockopt(struct socket *sock, | |||
1748 | value = jiffies_to_msecs(sk->sk_rcvtimeo); | 1748 | value = jiffies_to_msecs(sk->sk_rcvtimeo); |
1749 | /* no need to set "res", since already 0 at this point */ | 1749 | /* no need to set "res", since already 0 at this point */ |
1750 | break; | 1750 | break; |
1751 | case TIPC_NODE_RECVQ_DEPTH: | ||
1752 | value = (u32)atomic_read(&tipc_queue_size); | ||
1753 | break; | ||
1754 | case TIPC_SOCK_RECVQ_DEPTH: | ||
1755 | value = skb_queue_len(&sk->sk_receive_queue); | ||
1756 | break; | ||
1751 | default: | 1757 | default: |
1752 | res = -EINVAL; | 1758 | res = -EINVAL; |
1753 | } | 1759 | } |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index fc3ebb906911..51ab497115eb 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1501,6 +1501,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1501 | struct sk_buff *skb; | 1501 | struct sk_buff *skb; |
1502 | int sent = 0; | 1502 | int sent = 0; |
1503 | struct scm_cookie tmp_scm; | 1503 | struct scm_cookie tmp_scm; |
1504 | bool fds_sent = false; | ||
1504 | 1505 | ||
1505 | if (NULL == siocb->scm) | 1506 | if (NULL == siocb->scm) |
1506 | siocb->scm = &tmp_scm; | 1507 | siocb->scm = &tmp_scm; |
@@ -1562,12 +1563,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1562 | size = min_t(int, size, skb_tailroom(skb)); | 1563 | size = min_t(int, size, skb_tailroom(skb)); |
1563 | 1564 | ||
1564 | memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); | 1565 | memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); |
1565 | if (siocb->scm->fp) { | 1566 | /* Only send the fds in the first buffer */ |
1567 | if (siocb->scm->fp && !fds_sent) { | ||
1566 | err = unix_attach_fds(siocb->scm, skb); | 1568 | err = unix_attach_fds(siocb->scm, skb); |
1567 | if (err) { | 1569 | if (err) { |
1568 | kfree_skb(skb); | 1570 | kfree_skb(skb); |
1569 | goto out_err; | 1571 | goto out_err; |
1570 | } | 1572 | } |
1573 | fds_sent = true; | ||
1571 | } | 1574 | } |
1572 | 1575 | ||
1573 | err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); | 1576 | err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); |
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 4428dd5e911d..abf7ca3f9ff9 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig | |||
@@ -1,6 +1,47 @@ | |||
1 | config CFG80211 | 1 | config CFG80211 |
2 | tristate "Improved wireless configuration API" | 2 | tristate "cfg80211 - wireless configuration API" |
3 | depends on RFKILL || !RFKILL | 3 | depends on RFKILL || !RFKILL |
4 | ---help--- | ||
5 | cfg80211 is the Linux wireless LAN (802.11) configuration API. | ||
6 | Enable this if you have a wireless device. | ||
7 | |||
8 | For more information refer to documentation on the wireless wiki: | ||
9 | |||
10 | http://wireless.kernel.org/en/developers/Documentation/cfg80211 | ||
11 | |||
12 | When built as a module it will be called cfg80211. | ||
13 | |||
14 | config NL80211_TESTMODE | ||
15 | bool "nl80211 testmode command" | ||
16 | depends on CFG80211 | ||
17 | help | ||
18 | The nl80211 testmode command helps implementing things like | ||
19 | factory calibration or validation tools for wireless chips. | ||
20 | |||
21 | Select this option ONLY for kernels that are specifically | ||
22 | built for such purposes. | ||
23 | |||
24 | Debugging tools that are supposed to end up in the hands of | ||
25 | users should better be implemented with debugfs. | ||
26 | |||
27 | Say N. | ||
28 | |||
29 | config CFG80211_DEVELOPER_WARNINGS | ||
30 | bool "enable developer warnings" | ||
31 | depends on CFG80211 | ||
32 | default n | ||
33 | help | ||
34 | This option enables some additional warnings that help | ||
35 | cfg80211 developers and driver developers, but that can | ||
36 | trigger due to races with userspace. | ||
37 | |||
38 | For example, when a driver reports that it was disconnected | ||
39 | from the AP, but the user disconnects manually at the same | ||
40 | time, the warning might trigger spuriously due to races. | ||
41 | |||
42 | Say Y only if you are developing cfg80211 or a driver based | ||
43 | on it (or mac80211). | ||
44 | |||
4 | 45 | ||
5 | config CFG80211_REG_DEBUG | 46 | config CFG80211_REG_DEBUG |
6 | bool "cfg80211 regulatory debugging" | 47 | bool "cfg80211 regulatory debugging" |
@@ -8,9 +49,29 @@ config CFG80211_REG_DEBUG | |||
8 | default n | 49 | default n |
9 | ---help--- | 50 | ---help--- |
10 | You can enable this if you want to debug regulatory changes. | 51 | You can enable this if you want to debug regulatory changes. |
52 | For more information on cfg80211 regulatory refer to the wireless | ||
53 | wiki: | ||
54 | |||
55 | http://wireless.kernel.org/en/developers/Regulatory | ||
11 | 56 | ||
12 | If unsure, say N. | 57 | If unsure, say N. |
13 | 58 | ||
59 | config CFG80211_DEFAULT_PS | ||
60 | bool "enable powersave by default" | ||
61 | depends on CFG80211 | ||
62 | default y | ||
63 | help | ||
64 | This option enables powersave mode by default. | ||
65 | |||
66 | If this causes your applications to misbehave you should fix your | ||
67 | applications instead -- they need to register their network | ||
68 | latency requirement, see Documentation/power/pm_qos_interface.txt. | ||
69 | |||
70 | config CFG80211_DEFAULT_PS_VALUE | ||
71 | int | ||
72 | default 1 if CFG80211_DEFAULT_PS | ||
73 | default 0 | ||
74 | |||
14 | config CFG80211_DEBUGFS | 75 | config CFG80211_DEBUGFS |
15 | bool "cfg80211 DebugFS entries" | 76 | bool "cfg80211 DebugFS entries" |
16 | depends on CFG80211 && DEBUG_FS | 77 | depends on CFG80211 && DEBUG_FS |
@@ -35,19 +96,13 @@ config WIRELESS_OLD_REGULATORY | |||
35 | 96 | ||
36 | config WIRELESS_EXT | 97 | config WIRELESS_EXT |
37 | bool "Wireless extensions" | 98 | bool "Wireless extensions" |
38 | default n | 99 | default y |
39 | ---help--- | 100 | ---help--- |
40 | This option enables the legacy wireless extensions | 101 | This option enables the legacy wireless extensions |
41 | (wireless network interface configuration via ioctls.) | 102 | (wireless network interface configuration via ioctls.) |
42 | 103 | ||
43 | Wireless extensions will be replaced by cfg80211 and | 104 | Say Y unless you've upgraded all your userspace to use |
44 | will be required only by legacy drivers that implement | 105 | nl80211 instead of wireless extensions. |
45 | wireless extension handlers. This option does not | ||
46 | affect the wireless-extension backward compatibility | ||
47 | code in cfg80211. | ||
48 | |||
49 | Say N (if you can) unless you know you need wireless | ||
50 | extensions for external modules. | ||
51 | 106 | ||
52 | config WIRELESS_EXT_SYSFS | 107 | config WIRELESS_EXT_SYSFS |
53 | bool "Wireless extensions sysfs files" | 108 | bool "Wireless extensions sysfs files" |
diff --git a/net/wireless/Makefile b/net/wireless/Makefile index f78c4832a9ca..3ecaa9179977 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile | |||
@@ -5,8 +5,9 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o | |||
5 | obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o | 5 | obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o |
6 | obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o | 6 | obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o |
7 | 7 | ||
8 | cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o ibss.o | 8 | cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o |
9 | cfg80211-y += mlme.o ibss.o sme.o chan.o | ||
9 | cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o | 10 | cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o |
10 | cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o | 11 | cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o wext-sme.o |
11 | 12 | ||
12 | ccflags-y += -D__CHECK_ENDIAN__ | 13 | ccflags-y += -D__CHECK_ENDIAN__ |
diff --git a/net/wireless/chan.c b/net/wireless/chan.c new file mode 100644 index 000000000000..a46ac6c9b365 --- /dev/null +++ b/net/wireless/chan.c | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * This file contains helper code to handle channel | ||
3 | * settings and keeping track of what is possible at | ||
4 | * any point in time. | ||
5 | * | ||
6 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
7 | */ | ||
8 | |||
9 | #include <net/cfg80211.h> | ||
10 | #include "core.h" | ||
11 | |||
12 | struct ieee80211_channel * | ||
13 | rdev_fixed_channel(struct cfg80211_registered_device *rdev, | ||
14 | struct wireless_dev *for_wdev) | ||
15 | { | ||
16 | struct wireless_dev *wdev; | ||
17 | struct ieee80211_channel *result = NULL; | ||
18 | |||
19 | WARN_ON(!mutex_is_locked(&rdev->devlist_mtx)); | ||
20 | |||
21 | list_for_each_entry(wdev, &rdev->netdev_list, list) { | ||
22 | if (wdev == for_wdev) | ||
23 | continue; | ||
24 | |||
25 | /* | ||
26 | * Lock manually to tell lockdep about allowed | ||
27 | * nesting here if for_wdev->mtx is held already. | ||
28 | * This is ok as it's all under the rdev devlist | ||
29 | * mutex and as such can only be done once at any | ||
30 | * given time. | ||
31 | */ | ||
32 | mutex_lock_nested(&wdev->mtx, SINGLE_DEPTH_NESTING); | ||
33 | if (wdev->current_bss) | ||
34 | result = wdev->current_bss->pub.channel; | ||
35 | wdev_unlock(wdev); | ||
36 | |||
37 | if (result) | ||
38 | break; | ||
39 | } | ||
40 | |||
41 | return result; | ||
42 | } | ||
43 | |||
44 | int rdev_set_freq(struct cfg80211_registered_device *rdev, | ||
45 | struct wireless_dev *for_wdev, | ||
46 | int freq, enum nl80211_channel_type channel_type) | ||
47 | { | ||
48 | struct ieee80211_channel *chan; | ||
49 | struct ieee80211_sta_ht_cap *ht_cap; | ||
50 | int result; | ||
51 | |||
52 | if (rdev_fixed_channel(rdev, for_wdev)) | ||
53 | return -EBUSY; | ||
54 | |||
55 | if (!rdev->ops->set_channel) | ||
56 | return -EOPNOTSUPP; | ||
57 | |||
58 | chan = ieee80211_get_channel(&rdev->wiphy, freq); | ||
59 | |||
60 | /* Primary channel not allowed */ | ||
61 | if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) | ||
62 | return -EINVAL; | ||
63 | |||
64 | if (channel_type == NL80211_CHAN_HT40MINUS && | ||
65 | chan->flags & IEEE80211_CHAN_NO_HT40MINUS) | ||
66 | return -EINVAL; | ||
67 | else if (channel_type == NL80211_CHAN_HT40PLUS && | ||
68 | chan->flags & IEEE80211_CHAN_NO_HT40PLUS) | ||
69 | return -EINVAL; | ||
70 | |||
71 | ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap; | ||
72 | |||
73 | if (channel_type != NL80211_CHAN_NO_HT) { | ||
74 | if (!ht_cap->ht_supported) | ||
75 | return -EINVAL; | ||
76 | |||
77 | if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || | ||
78 | ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) | ||
79 | return -EINVAL; | ||
80 | } | ||
81 | |||
82 | result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type); | ||
83 | if (result) | ||
84 | return result; | ||
85 | |||
86 | rdev->channel = chan; | ||
87 | |||
88 | return 0; | ||
89 | } | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index d5850292b3df..45b2be3274db 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
13 | #include <linux/notifier.h> | 13 | #include <linux/notifier.h> |
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/etherdevice.h> | ||
15 | #include <linux/rtnetlink.h> | 16 | #include <linux/rtnetlink.h> |
16 | #include <net/genetlink.h> | 17 | #include <net/genetlink.h> |
17 | #include <net/cfg80211.h> | 18 | #include <net/cfg80211.h> |
@@ -19,6 +20,7 @@ | |||
19 | #include "core.h" | 20 | #include "core.h" |
20 | #include "sysfs.h" | 21 | #include "sysfs.h" |
21 | #include "debugfs.h" | 22 | #include "debugfs.h" |
23 | #include "wext-compat.h" | ||
22 | 24 | ||
23 | /* name for sysfs, %d is appended */ | 25 | /* name for sysfs, %d is appended */ |
24 | #define PHY_NAME "phy" | 26 | #define PHY_NAME "phy" |
@@ -30,12 +32,11 @@ MODULE_DESCRIPTION("wireless configuration support"); | |||
30 | /* RCU might be appropriate here since we usually | 32 | /* RCU might be appropriate here since we usually |
31 | * only read the list, and that can happen quite | 33 | * only read the list, and that can happen quite |
32 | * often because we need to do it for each command */ | 34 | * often because we need to do it for each command */ |
33 | LIST_HEAD(cfg80211_drv_list); | 35 | LIST_HEAD(cfg80211_rdev_list); |
36 | int cfg80211_rdev_list_generation; | ||
34 | 37 | ||
35 | /* | 38 | /* |
36 | * This is used to protect the cfg80211_drv_list, cfg80211_regdomain, | 39 | * This is used to protect the cfg80211_rdev_list |
37 | * country_ie_regdomain, the reg_beacon_list and the the last regulatory | ||
38 | * request receipt (last_request). | ||
39 | */ | 40 | */ |
40 | DEFINE_MUTEX(cfg80211_mutex); | 41 | DEFINE_MUTEX(cfg80211_mutex); |
41 | 42 | ||
@@ -43,18 +44,18 @@ DEFINE_MUTEX(cfg80211_mutex); | |||
43 | static struct dentry *ieee80211_debugfs_dir; | 44 | static struct dentry *ieee80211_debugfs_dir; |
44 | 45 | ||
45 | /* requires cfg80211_mutex to be held! */ | 46 | /* requires cfg80211_mutex to be held! */ |
46 | struct cfg80211_registered_device *cfg80211_drv_by_wiphy_idx(int wiphy_idx) | 47 | struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) |
47 | { | 48 | { |
48 | struct cfg80211_registered_device *result = NULL, *drv; | 49 | struct cfg80211_registered_device *result = NULL, *rdev; |
49 | 50 | ||
50 | if (!wiphy_idx_valid(wiphy_idx)) | 51 | if (!wiphy_idx_valid(wiphy_idx)) |
51 | return NULL; | 52 | return NULL; |
52 | 53 | ||
53 | assert_cfg80211_lock(); | 54 | assert_cfg80211_lock(); |
54 | 55 | ||
55 | list_for_each_entry(drv, &cfg80211_drv_list, list) { | 56 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) { |
56 | if (drv->wiphy_idx == wiphy_idx) { | 57 | if (rdev->wiphy_idx == wiphy_idx) { |
57 | result = drv; | 58 | result = rdev; |
58 | break; | 59 | break; |
59 | } | 60 | } |
60 | } | 61 | } |
@@ -64,32 +65,32 @@ struct cfg80211_registered_device *cfg80211_drv_by_wiphy_idx(int wiphy_idx) | |||
64 | 65 | ||
65 | int get_wiphy_idx(struct wiphy *wiphy) | 66 | int get_wiphy_idx(struct wiphy *wiphy) |
66 | { | 67 | { |
67 | struct cfg80211_registered_device *drv; | 68 | struct cfg80211_registered_device *rdev; |
68 | if (!wiphy) | 69 | if (!wiphy) |
69 | return WIPHY_IDX_STALE; | 70 | return WIPHY_IDX_STALE; |
70 | drv = wiphy_to_dev(wiphy); | 71 | rdev = wiphy_to_dev(wiphy); |
71 | return drv->wiphy_idx; | 72 | return rdev->wiphy_idx; |
72 | } | 73 | } |
73 | 74 | ||
74 | /* requires cfg80211_drv_mutex to be held! */ | 75 | /* requires cfg80211_rdev_mutex to be held! */ |
75 | struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) | 76 | struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) |
76 | { | 77 | { |
77 | struct cfg80211_registered_device *drv; | 78 | struct cfg80211_registered_device *rdev; |
78 | 79 | ||
79 | if (!wiphy_idx_valid(wiphy_idx)) | 80 | if (!wiphy_idx_valid(wiphy_idx)) |
80 | return NULL; | 81 | return NULL; |
81 | 82 | ||
82 | assert_cfg80211_lock(); | 83 | assert_cfg80211_lock(); |
83 | 84 | ||
84 | drv = cfg80211_drv_by_wiphy_idx(wiphy_idx); | 85 | rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx); |
85 | if (!drv) | 86 | if (!rdev) |
86 | return NULL; | 87 | return NULL; |
87 | return &drv->wiphy; | 88 | return &rdev->wiphy; |
88 | } | 89 | } |
89 | 90 | ||
90 | /* requires cfg80211_mutex to be held! */ | 91 | /* requires cfg80211_mutex to be held! */ |
91 | struct cfg80211_registered_device * | 92 | struct cfg80211_registered_device * |
92 | __cfg80211_drv_from_info(struct genl_info *info) | 93 | __cfg80211_rdev_from_info(struct genl_info *info) |
93 | { | 94 | { |
94 | int ifindex; | 95 | int ifindex; |
95 | struct cfg80211_registered_device *bywiphyidx = NULL, *byifidx = NULL; | 96 | struct cfg80211_registered_device *bywiphyidx = NULL, *byifidx = NULL; |
@@ -99,14 +100,14 @@ __cfg80211_drv_from_info(struct genl_info *info) | |||
99 | assert_cfg80211_lock(); | 100 | assert_cfg80211_lock(); |
100 | 101 | ||
101 | if (info->attrs[NL80211_ATTR_WIPHY]) { | 102 | if (info->attrs[NL80211_ATTR_WIPHY]) { |
102 | bywiphyidx = cfg80211_drv_by_wiphy_idx( | 103 | bywiphyidx = cfg80211_rdev_by_wiphy_idx( |
103 | nla_get_u32(info->attrs[NL80211_ATTR_WIPHY])); | 104 | nla_get_u32(info->attrs[NL80211_ATTR_WIPHY])); |
104 | err = -ENODEV; | 105 | err = -ENODEV; |
105 | } | 106 | } |
106 | 107 | ||
107 | if (info->attrs[NL80211_ATTR_IFINDEX]) { | 108 | if (info->attrs[NL80211_ATTR_IFINDEX]) { |
108 | ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); | 109 | ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); |
109 | dev = dev_get_by_index(&init_net, ifindex); | 110 | dev = dev_get_by_index(genl_info_net(info), ifindex); |
110 | if (dev) { | 111 | if (dev) { |
111 | if (dev->ieee80211_ptr) | 112 | if (dev->ieee80211_ptr) |
112 | byifidx = | 113 | byifidx = |
@@ -134,54 +135,48 @@ __cfg80211_drv_from_info(struct genl_info *info) | |||
134 | struct cfg80211_registered_device * | 135 | struct cfg80211_registered_device * |
135 | cfg80211_get_dev_from_info(struct genl_info *info) | 136 | cfg80211_get_dev_from_info(struct genl_info *info) |
136 | { | 137 | { |
137 | struct cfg80211_registered_device *drv; | 138 | struct cfg80211_registered_device *rdev; |
138 | 139 | ||
139 | mutex_lock(&cfg80211_mutex); | 140 | mutex_lock(&cfg80211_mutex); |
140 | drv = __cfg80211_drv_from_info(info); | 141 | rdev = __cfg80211_rdev_from_info(info); |
141 | 142 | ||
142 | /* if it is not an error we grab the lock on | 143 | /* if it is not an error we grab the lock on |
143 | * it to assure it won't be going away while | 144 | * it to assure it won't be going away while |
144 | * we operate on it */ | 145 | * we operate on it */ |
145 | if (!IS_ERR(drv)) | 146 | if (!IS_ERR(rdev)) |
146 | mutex_lock(&drv->mtx); | 147 | mutex_lock(&rdev->mtx); |
147 | 148 | ||
148 | mutex_unlock(&cfg80211_mutex); | 149 | mutex_unlock(&cfg80211_mutex); |
149 | 150 | ||
150 | return drv; | 151 | return rdev; |
151 | } | 152 | } |
152 | 153 | ||
153 | struct cfg80211_registered_device * | 154 | struct cfg80211_registered_device * |
154 | cfg80211_get_dev_from_ifindex(int ifindex) | 155 | cfg80211_get_dev_from_ifindex(struct net *net, int ifindex) |
155 | { | 156 | { |
156 | struct cfg80211_registered_device *drv = ERR_PTR(-ENODEV); | 157 | struct cfg80211_registered_device *rdev = ERR_PTR(-ENODEV); |
157 | struct net_device *dev; | 158 | struct net_device *dev; |
158 | 159 | ||
159 | mutex_lock(&cfg80211_mutex); | 160 | mutex_lock(&cfg80211_mutex); |
160 | dev = dev_get_by_index(&init_net, ifindex); | 161 | dev = dev_get_by_index(net, ifindex); |
161 | if (!dev) | 162 | if (!dev) |
162 | goto out; | 163 | goto out; |
163 | if (dev->ieee80211_ptr) { | 164 | if (dev->ieee80211_ptr) { |
164 | drv = wiphy_to_dev(dev->ieee80211_ptr->wiphy); | 165 | rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); |
165 | mutex_lock(&drv->mtx); | 166 | mutex_lock(&rdev->mtx); |
166 | } else | 167 | } else |
167 | drv = ERR_PTR(-ENODEV); | 168 | rdev = ERR_PTR(-ENODEV); |
168 | dev_put(dev); | 169 | dev_put(dev); |
169 | out: | 170 | out: |
170 | mutex_unlock(&cfg80211_mutex); | 171 | mutex_unlock(&cfg80211_mutex); |
171 | return drv; | 172 | return rdev; |
172 | } | ||
173 | |||
174 | void cfg80211_put_dev(struct cfg80211_registered_device *drv) | ||
175 | { | ||
176 | BUG_ON(IS_ERR(drv)); | ||
177 | mutex_unlock(&drv->mtx); | ||
178 | } | 173 | } |
179 | 174 | ||
180 | /* requires cfg80211_mutex to be held */ | 175 | /* requires cfg80211_mutex to be held */ |
181 | int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, | 176 | int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, |
182 | char *newname) | 177 | char *newname) |
183 | { | 178 | { |
184 | struct cfg80211_registered_device *drv; | 179 | struct cfg80211_registered_device *rdev2; |
185 | int wiphy_idx, taken = -1, result, digits; | 180 | int wiphy_idx, taken = -1, result, digits; |
186 | 181 | ||
187 | assert_cfg80211_lock(); | 182 | assert_cfg80211_lock(); |
@@ -207,8 +202,8 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, | |||
207 | return 0; | 202 | return 0; |
208 | 203 | ||
209 | /* Ensure another device does not already have this name. */ | 204 | /* Ensure another device does not already have this name. */ |
210 | list_for_each_entry(drv, &cfg80211_drv_list, list) | 205 | list_for_each_entry(rdev2, &cfg80211_rdev_list, list) |
211 | if (strcmp(newname, dev_name(&drv->wiphy.dev)) == 0) | 206 | if (strcmp(newname, dev_name(&rdev2->wiphy.dev)) == 0) |
212 | return -EINVAL; | 207 | return -EINVAL; |
213 | 208 | ||
214 | result = device_rename(&rdev->wiphy.dev, newname); | 209 | result = device_rename(&rdev->wiphy.dev, newname); |
@@ -228,28 +223,64 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, | |||
228 | return 0; | 223 | return 0; |
229 | } | 224 | } |
230 | 225 | ||
226 | int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, | ||
227 | struct net *net) | ||
228 | { | ||
229 | struct wireless_dev *wdev; | ||
230 | int err = 0; | ||
231 | |||
232 | if (!rdev->wiphy.netnsok) | ||
233 | return -EOPNOTSUPP; | ||
234 | |||
235 | list_for_each_entry(wdev, &rdev->netdev_list, list) { | ||
236 | wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; | ||
237 | err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); | ||
238 | if (err) | ||
239 | break; | ||
240 | wdev->netdev->features |= NETIF_F_NETNS_LOCAL; | ||
241 | } | ||
242 | |||
243 | if (err) { | ||
244 | /* failed -- clean up to old netns */ | ||
245 | net = wiphy_net(&rdev->wiphy); | ||
246 | |||
247 | list_for_each_entry_continue_reverse(wdev, &rdev->netdev_list, | ||
248 | list) { | ||
249 | wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; | ||
250 | err = dev_change_net_namespace(wdev->netdev, net, | ||
251 | "wlan%d"); | ||
252 | WARN_ON(err); | ||
253 | wdev->netdev->features |= NETIF_F_NETNS_LOCAL; | ||
254 | } | ||
255 | } | ||
256 | |||
257 | wiphy_net_set(&rdev->wiphy, net); | ||
258 | |||
259 | return err; | ||
260 | } | ||
261 | |||
231 | static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) | 262 | static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) |
232 | { | 263 | { |
233 | struct cfg80211_registered_device *drv = data; | 264 | struct cfg80211_registered_device *rdev = data; |
234 | 265 | ||
235 | drv->ops->rfkill_poll(&drv->wiphy); | 266 | rdev->ops->rfkill_poll(&rdev->wiphy); |
236 | } | 267 | } |
237 | 268 | ||
238 | static int cfg80211_rfkill_set_block(void *data, bool blocked) | 269 | static int cfg80211_rfkill_set_block(void *data, bool blocked) |
239 | { | 270 | { |
240 | struct cfg80211_registered_device *drv = data; | 271 | struct cfg80211_registered_device *rdev = data; |
241 | struct wireless_dev *wdev; | 272 | struct wireless_dev *wdev; |
242 | 273 | ||
243 | if (!blocked) | 274 | if (!blocked) |
244 | return 0; | 275 | return 0; |
245 | 276 | ||
246 | rtnl_lock(); | 277 | rtnl_lock(); |
247 | mutex_lock(&drv->devlist_mtx); | 278 | mutex_lock(&rdev->devlist_mtx); |
248 | 279 | ||
249 | list_for_each_entry(wdev, &drv->netdev_list, list) | 280 | list_for_each_entry(wdev, &rdev->netdev_list, list) |
250 | dev_close(wdev->netdev); | 281 | dev_close(wdev->netdev); |
251 | 282 | ||
252 | mutex_unlock(&drv->devlist_mtx); | 283 | mutex_unlock(&rdev->devlist_mtx); |
253 | rtnl_unlock(); | 284 | rtnl_unlock(); |
254 | 285 | ||
255 | return 0; | 286 | return 0; |
@@ -257,10 +288,25 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) | |||
257 | 288 | ||
258 | static void cfg80211_rfkill_sync_work(struct work_struct *work) | 289 | static void cfg80211_rfkill_sync_work(struct work_struct *work) |
259 | { | 290 | { |
260 | struct cfg80211_registered_device *drv; | 291 | struct cfg80211_registered_device *rdev; |
261 | 292 | ||
262 | drv = container_of(work, struct cfg80211_registered_device, rfkill_sync); | 293 | rdev = container_of(work, struct cfg80211_registered_device, rfkill_sync); |
263 | cfg80211_rfkill_set_block(drv, rfkill_blocked(drv->rfkill)); | 294 | cfg80211_rfkill_set_block(rdev, rfkill_blocked(rdev->rfkill)); |
295 | } | ||
296 | |||
297 | static void cfg80211_event_work(struct work_struct *work) | ||
298 | { | ||
299 | struct cfg80211_registered_device *rdev; | ||
300 | |||
301 | rdev = container_of(work, struct cfg80211_registered_device, | ||
302 | event_work); | ||
303 | |||
304 | rtnl_lock(); | ||
305 | cfg80211_lock_rdev(rdev); | ||
306 | |||
307 | cfg80211_process_rdev_events(rdev); | ||
308 | cfg80211_unlock_rdev(rdev); | ||
309 | rtnl_unlock(); | ||
264 | } | 310 | } |
265 | 311 | ||
266 | /* exported functions */ | 312 | /* exported functions */ |
@@ -269,76 +315,90 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
269 | { | 315 | { |
270 | static int wiphy_counter; | 316 | static int wiphy_counter; |
271 | 317 | ||
272 | struct cfg80211_registered_device *drv; | 318 | struct cfg80211_registered_device *rdev; |
273 | int alloc_size; | 319 | int alloc_size; |
274 | 320 | ||
275 | WARN_ON(!ops->add_key && ops->del_key); | 321 | WARN_ON(ops->add_key && (!ops->del_key || !ops->set_default_key)); |
276 | WARN_ON(ops->add_key && !ops->del_key); | 322 | WARN_ON(ops->auth && (!ops->assoc || !ops->deauth || !ops->disassoc)); |
323 | WARN_ON(ops->connect && !ops->disconnect); | ||
324 | WARN_ON(ops->join_ibss && !ops->leave_ibss); | ||
325 | WARN_ON(ops->add_virtual_intf && !ops->del_virtual_intf); | ||
326 | WARN_ON(ops->add_station && !ops->del_station); | ||
327 | WARN_ON(ops->add_mpath && !ops->del_mpath); | ||
277 | 328 | ||
278 | alloc_size = sizeof(*drv) + sizeof_priv; | 329 | alloc_size = sizeof(*rdev) + sizeof_priv; |
279 | 330 | ||
280 | drv = kzalloc(alloc_size, GFP_KERNEL); | 331 | rdev = kzalloc(alloc_size, GFP_KERNEL); |
281 | if (!drv) | 332 | if (!rdev) |
282 | return NULL; | 333 | return NULL; |
283 | 334 | ||
284 | drv->ops = ops; | 335 | rdev->ops = ops; |
285 | 336 | ||
286 | mutex_lock(&cfg80211_mutex); | 337 | mutex_lock(&cfg80211_mutex); |
287 | 338 | ||
288 | drv->wiphy_idx = wiphy_counter++; | 339 | rdev->wiphy_idx = wiphy_counter++; |
289 | 340 | ||
290 | if (unlikely(!wiphy_idx_valid(drv->wiphy_idx))) { | 341 | if (unlikely(!wiphy_idx_valid(rdev->wiphy_idx))) { |
291 | wiphy_counter--; | 342 | wiphy_counter--; |
292 | mutex_unlock(&cfg80211_mutex); | 343 | mutex_unlock(&cfg80211_mutex); |
293 | /* ugh, wrapped! */ | 344 | /* ugh, wrapped! */ |
294 | kfree(drv); | 345 | kfree(rdev); |
295 | return NULL; | 346 | return NULL; |
296 | } | 347 | } |
297 | 348 | ||
298 | mutex_unlock(&cfg80211_mutex); | 349 | mutex_unlock(&cfg80211_mutex); |
299 | 350 | ||
300 | /* give it a proper name */ | 351 | /* give it a proper name */ |
301 | dev_set_name(&drv->wiphy.dev, PHY_NAME "%d", drv->wiphy_idx); | 352 | dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); |
302 | 353 | ||
303 | mutex_init(&drv->mtx); | 354 | mutex_init(&rdev->mtx); |
304 | mutex_init(&drv->devlist_mtx); | 355 | mutex_init(&rdev->devlist_mtx); |
305 | INIT_LIST_HEAD(&drv->netdev_list); | 356 | INIT_LIST_HEAD(&rdev->netdev_list); |
306 | spin_lock_init(&drv->bss_lock); | 357 | spin_lock_init(&rdev->bss_lock); |
307 | INIT_LIST_HEAD(&drv->bss_list); | 358 | INIT_LIST_HEAD(&rdev->bss_list); |
359 | INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); | ||
308 | 360 | ||
309 | device_initialize(&drv->wiphy.dev); | 361 | device_initialize(&rdev->wiphy.dev); |
310 | drv->wiphy.dev.class = &ieee80211_class; | 362 | rdev->wiphy.dev.class = &ieee80211_class; |
311 | drv->wiphy.dev.platform_data = drv; | 363 | rdev->wiphy.dev.platform_data = rdev; |
312 | 364 | ||
313 | drv->rfkill_ops.set_block = cfg80211_rfkill_set_block; | 365 | rdev->wiphy.ps_default = CONFIG_CFG80211_DEFAULT_PS_VALUE; |
314 | drv->rfkill = rfkill_alloc(dev_name(&drv->wiphy.dev), | ||
315 | &drv->wiphy.dev, RFKILL_TYPE_WLAN, | ||
316 | &drv->rfkill_ops, drv); | ||
317 | 366 | ||
318 | if (!drv->rfkill) { | 367 | wiphy_net_set(&rdev->wiphy, &init_net); |
319 | kfree(drv); | 368 | |
369 | rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block; | ||
370 | rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev), | ||
371 | &rdev->wiphy.dev, RFKILL_TYPE_WLAN, | ||
372 | &rdev->rfkill_ops, rdev); | ||
373 | |||
374 | if (!rdev->rfkill) { | ||
375 | kfree(rdev); | ||
320 | return NULL; | 376 | return NULL; |
321 | } | 377 | } |
322 | 378 | ||
323 | INIT_WORK(&drv->rfkill_sync, cfg80211_rfkill_sync_work); | 379 | INIT_WORK(&rdev->rfkill_sync, cfg80211_rfkill_sync_work); |
380 | INIT_WORK(&rdev->conn_work, cfg80211_conn_work); | ||
381 | INIT_WORK(&rdev->event_work, cfg80211_event_work); | ||
382 | |||
383 | init_waitqueue_head(&rdev->dev_wait); | ||
324 | 384 | ||
325 | /* | 385 | /* |
326 | * Initialize wiphy parameters to IEEE 802.11 MIB default values. | 386 | * Initialize wiphy parameters to IEEE 802.11 MIB default values. |
327 | * Fragmentation and RTS threshold are disabled by default with the | 387 | * Fragmentation and RTS threshold are disabled by default with the |
328 | * special -1 value. | 388 | * special -1 value. |
329 | */ | 389 | */ |
330 | drv->wiphy.retry_short = 7; | 390 | rdev->wiphy.retry_short = 7; |
331 | drv->wiphy.retry_long = 4; | 391 | rdev->wiphy.retry_long = 4; |
332 | drv->wiphy.frag_threshold = (u32) -1; | 392 | rdev->wiphy.frag_threshold = (u32) -1; |
333 | drv->wiphy.rts_threshold = (u32) -1; | 393 | rdev->wiphy.rts_threshold = (u32) -1; |
334 | 394 | ||
335 | return &drv->wiphy; | 395 | return &rdev->wiphy; |
336 | } | 396 | } |
337 | EXPORT_SYMBOL(wiphy_new); | 397 | EXPORT_SYMBOL(wiphy_new); |
338 | 398 | ||
339 | int wiphy_register(struct wiphy *wiphy) | 399 | int wiphy_register(struct wiphy *wiphy) |
340 | { | 400 | { |
341 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); | 401 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
342 | int res; | 402 | int res; |
343 | enum ieee80211_band band; | 403 | enum ieee80211_band band; |
344 | struct ieee80211_supported_band *sband; | 404 | struct ieee80211_supported_band *sband; |
@@ -346,9 +406,6 @@ int wiphy_register(struct wiphy *wiphy) | |||
346 | int i; | 406 | int i; |
347 | u16 ifmodes = wiphy->interface_modes; | 407 | u16 ifmodes = wiphy->interface_modes; |
348 | 408 | ||
349 | if (WARN_ON(wiphy->max_scan_ssids < 1)) | ||
350 | return -EINVAL; | ||
351 | |||
352 | /* sanity check ifmodes */ | 409 | /* sanity check ifmodes */ |
353 | WARN_ON(!ifmodes); | 410 | WARN_ON(!ifmodes); |
354 | ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; | 411 | ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; |
@@ -395,11 +452,11 @@ int wiphy_register(struct wiphy *wiphy) | |||
395 | /* check and set up bitrates */ | 452 | /* check and set up bitrates */ |
396 | ieee80211_set_bitrate_flags(wiphy); | 453 | ieee80211_set_bitrate_flags(wiphy); |
397 | 454 | ||
398 | res = device_add(&drv->wiphy.dev); | 455 | res = device_add(&rdev->wiphy.dev); |
399 | if (res) | 456 | if (res) |
400 | return res; | 457 | return res; |
401 | 458 | ||
402 | res = rfkill_register(drv->rfkill); | 459 | res = rfkill_register(rdev->rfkill); |
403 | if (res) | 460 | if (res) |
404 | goto out_rm_dev; | 461 | goto out_rm_dev; |
405 | 462 | ||
@@ -408,16 +465,17 @@ int wiphy_register(struct wiphy *wiphy) | |||
408 | /* set up regulatory info */ | 465 | /* set up regulatory info */ |
409 | wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); | 466 | wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); |
410 | 467 | ||
411 | list_add(&drv->list, &cfg80211_drv_list); | 468 | list_add(&rdev->list, &cfg80211_rdev_list); |
469 | cfg80211_rdev_list_generation++; | ||
412 | 470 | ||
413 | mutex_unlock(&cfg80211_mutex); | 471 | mutex_unlock(&cfg80211_mutex); |
414 | 472 | ||
415 | /* add to debugfs */ | 473 | /* add to debugfs */ |
416 | drv->wiphy.debugfsdir = | 474 | rdev->wiphy.debugfsdir = |
417 | debugfs_create_dir(wiphy_name(&drv->wiphy), | 475 | debugfs_create_dir(wiphy_name(&rdev->wiphy), |
418 | ieee80211_debugfs_dir); | 476 | ieee80211_debugfs_dir); |
419 | if (IS_ERR(drv->wiphy.debugfsdir)) | 477 | if (IS_ERR(rdev->wiphy.debugfsdir)) |
420 | drv->wiphy.debugfsdir = NULL; | 478 | rdev->wiphy.debugfsdir = NULL; |
421 | 479 | ||
422 | if (wiphy->custom_regulatory) { | 480 | if (wiphy->custom_regulatory) { |
423 | struct regulatory_request request; | 481 | struct regulatory_request request; |
@@ -430,83 +488,101 @@ int wiphy_register(struct wiphy *wiphy) | |||
430 | nl80211_send_reg_change_event(&request); | 488 | nl80211_send_reg_change_event(&request); |
431 | } | 489 | } |
432 | 490 | ||
433 | cfg80211_debugfs_drv_add(drv); | 491 | cfg80211_debugfs_rdev_add(rdev); |
434 | 492 | ||
435 | return 0; | 493 | return 0; |
436 | 494 | ||
437 | out_rm_dev: | 495 | out_rm_dev: |
438 | device_del(&drv->wiphy.dev); | 496 | device_del(&rdev->wiphy.dev); |
439 | return res; | 497 | return res; |
440 | } | 498 | } |
441 | EXPORT_SYMBOL(wiphy_register); | 499 | EXPORT_SYMBOL(wiphy_register); |
442 | 500 | ||
443 | void wiphy_rfkill_start_polling(struct wiphy *wiphy) | 501 | void wiphy_rfkill_start_polling(struct wiphy *wiphy) |
444 | { | 502 | { |
445 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); | 503 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
446 | 504 | ||
447 | if (!drv->ops->rfkill_poll) | 505 | if (!rdev->ops->rfkill_poll) |
448 | return; | 506 | return; |
449 | drv->rfkill_ops.poll = cfg80211_rfkill_poll; | 507 | rdev->rfkill_ops.poll = cfg80211_rfkill_poll; |
450 | rfkill_resume_polling(drv->rfkill); | 508 | rfkill_resume_polling(rdev->rfkill); |
451 | } | 509 | } |
452 | EXPORT_SYMBOL(wiphy_rfkill_start_polling); | 510 | EXPORT_SYMBOL(wiphy_rfkill_start_polling); |
453 | 511 | ||
454 | void wiphy_rfkill_stop_polling(struct wiphy *wiphy) | 512 | void wiphy_rfkill_stop_polling(struct wiphy *wiphy) |
455 | { | 513 | { |
456 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); | 514 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
457 | 515 | ||
458 | rfkill_pause_polling(drv->rfkill); | 516 | rfkill_pause_polling(rdev->rfkill); |
459 | } | 517 | } |
460 | EXPORT_SYMBOL(wiphy_rfkill_stop_polling); | 518 | EXPORT_SYMBOL(wiphy_rfkill_stop_polling); |
461 | 519 | ||
462 | void wiphy_unregister(struct wiphy *wiphy) | 520 | void wiphy_unregister(struct wiphy *wiphy) |
463 | { | 521 | { |
464 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); | 522 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
465 | 523 | ||
466 | rfkill_unregister(drv->rfkill); | 524 | rfkill_unregister(rdev->rfkill); |
467 | 525 | ||
468 | /* protect the device list */ | 526 | /* protect the device list */ |
469 | mutex_lock(&cfg80211_mutex); | 527 | mutex_lock(&cfg80211_mutex); |
470 | 528 | ||
471 | BUG_ON(!list_empty(&drv->netdev_list)); | 529 | wait_event(rdev->dev_wait, ({ |
530 | int __count; | ||
531 | mutex_lock(&rdev->devlist_mtx); | ||
532 | __count = rdev->opencount; | ||
533 | mutex_unlock(&rdev->devlist_mtx); | ||
534 | __count == 0;})); | ||
535 | |||
536 | mutex_lock(&rdev->devlist_mtx); | ||
537 | BUG_ON(!list_empty(&rdev->netdev_list)); | ||
538 | mutex_unlock(&rdev->devlist_mtx); | ||
539 | |||
540 | /* | ||
541 | * First remove the hardware from everywhere, this makes | ||
542 | * it impossible to find from userspace. | ||
543 | */ | ||
544 | cfg80211_debugfs_rdev_del(rdev); | ||
545 | list_del(&rdev->list); | ||
472 | 546 | ||
473 | /* | 547 | /* |
474 | * Try to grab drv->mtx. If a command is still in progress, | 548 | * Try to grab rdev->mtx. If a command is still in progress, |
475 | * hopefully the driver will refuse it since it's tearing | 549 | * hopefully the driver will refuse it since it's tearing |
476 | * down the device already. We wait for this command to complete | 550 | * down the device already. We wait for this command to complete |
477 | * before unlinking the item from the list. | 551 | * before unlinking the item from the list. |
478 | * Note: as codified by the BUG_ON above we cannot get here if | 552 | * Note: as codified by the BUG_ON above we cannot get here if |
479 | * a virtual interface is still associated. Hence, we can only | 553 | * a virtual interface is still present. Hence, we can only get |
480 | * get to lock contention here if userspace issues a command | 554 | * to lock contention here if userspace issues a command that |
481 | * that identified the hardware by wiphy index. | 555 | * identified the hardware by wiphy index. |
482 | */ | 556 | */ |
483 | mutex_lock(&drv->mtx); | 557 | cfg80211_lock_rdev(rdev); |
484 | /* unlock again before freeing */ | 558 | /* nothing */ |
485 | mutex_unlock(&drv->mtx); | 559 | cfg80211_unlock_rdev(rdev); |
486 | |||
487 | cfg80211_debugfs_drv_del(drv); | ||
488 | 560 | ||
489 | /* If this device got a regulatory hint tell core its | 561 | /* If this device got a regulatory hint tell core its |
490 | * free to listen now to a new shiny device regulatory hint */ | 562 | * free to listen now to a new shiny device regulatory hint */ |
491 | reg_device_remove(wiphy); | 563 | reg_device_remove(wiphy); |
492 | 564 | ||
493 | list_del(&drv->list); | 565 | cfg80211_rdev_list_generation++; |
494 | device_del(&drv->wiphy.dev); | 566 | device_del(&rdev->wiphy.dev); |
495 | debugfs_remove(drv->wiphy.debugfsdir); | 567 | debugfs_remove(rdev->wiphy.debugfsdir); |
496 | 568 | ||
497 | mutex_unlock(&cfg80211_mutex); | 569 | mutex_unlock(&cfg80211_mutex); |
570 | |||
571 | flush_work(&rdev->scan_done_wk); | ||
572 | cancel_work_sync(&rdev->conn_work); | ||
573 | flush_work(&rdev->event_work); | ||
498 | } | 574 | } |
499 | EXPORT_SYMBOL(wiphy_unregister); | 575 | EXPORT_SYMBOL(wiphy_unregister); |
500 | 576 | ||
501 | void cfg80211_dev_free(struct cfg80211_registered_device *drv) | 577 | void cfg80211_dev_free(struct cfg80211_registered_device *rdev) |
502 | { | 578 | { |
503 | struct cfg80211_internal_bss *scan, *tmp; | 579 | struct cfg80211_internal_bss *scan, *tmp; |
504 | rfkill_destroy(drv->rfkill); | 580 | rfkill_destroy(rdev->rfkill); |
505 | mutex_destroy(&drv->mtx); | 581 | mutex_destroy(&rdev->mtx); |
506 | mutex_destroy(&drv->devlist_mtx); | 582 | mutex_destroy(&rdev->devlist_mtx); |
507 | list_for_each_entry_safe(scan, tmp, &drv->bss_list, list) | 583 | list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) |
508 | cfg80211_put_bss(&scan->pub); | 584 | cfg80211_put_bss(&scan->pub); |
509 | kfree(drv); | 585 | kfree(rdev); |
510 | } | 586 | } |
511 | 587 | ||
512 | void wiphy_free(struct wiphy *wiphy) | 588 | void wiphy_free(struct wiphy *wiphy) |
@@ -517,68 +593,181 @@ EXPORT_SYMBOL(wiphy_free); | |||
517 | 593 | ||
518 | void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) | 594 | void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) |
519 | { | 595 | { |
520 | struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy); | 596 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
521 | 597 | ||
522 | if (rfkill_set_hw_state(drv->rfkill, blocked)) | 598 | if (rfkill_set_hw_state(rdev->rfkill, blocked)) |
523 | schedule_work(&drv->rfkill_sync); | 599 | schedule_work(&rdev->rfkill_sync); |
524 | } | 600 | } |
525 | EXPORT_SYMBOL(wiphy_rfkill_set_hw_state); | 601 | EXPORT_SYMBOL(wiphy_rfkill_set_hw_state); |
526 | 602 | ||
603 | static void wdev_cleanup_work(struct work_struct *work) | ||
604 | { | ||
605 | struct wireless_dev *wdev; | ||
606 | struct cfg80211_registered_device *rdev; | ||
607 | |||
608 | wdev = container_of(work, struct wireless_dev, cleanup_work); | ||
609 | rdev = wiphy_to_dev(wdev->wiphy); | ||
610 | |||
611 | cfg80211_lock_rdev(rdev); | ||
612 | |||
613 | if (WARN_ON(rdev->scan_req && rdev->scan_req->dev == wdev->netdev)) { | ||
614 | rdev->scan_req->aborted = true; | ||
615 | ___cfg80211_scan_done(rdev, true); | ||
616 | } | ||
617 | |||
618 | cfg80211_unlock_rdev(rdev); | ||
619 | |||
620 | mutex_lock(&rdev->devlist_mtx); | ||
621 | rdev->opencount--; | ||
622 | mutex_unlock(&rdev->devlist_mtx); | ||
623 | wake_up(&rdev->dev_wait); | ||
624 | |||
625 | dev_put(wdev->netdev); | ||
626 | } | ||
627 | |||
527 | static int cfg80211_netdev_notifier_call(struct notifier_block * nb, | 628 | static int cfg80211_netdev_notifier_call(struct notifier_block * nb, |
528 | unsigned long state, | 629 | unsigned long state, |
529 | void *ndev) | 630 | void *ndev) |
530 | { | 631 | { |
531 | struct net_device *dev = ndev; | 632 | struct net_device *dev = ndev; |
633 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
532 | struct cfg80211_registered_device *rdev; | 634 | struct cfg80211_registered_device *rdev; |
533 | 635 | ||
534 | if (!dev->ieee80211_ptr) | 636 | if (!wdev) |
535 | return NOTIFY_DONE; | 637 | return NOTIFY_DONE; |
536 | 638 | ||
537 | rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); | 639 | rdev = wiphy_to_dev(wdev->wiphy); |
538 | 640 | ||
539 | WARN_ON(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_UNSPECIFIED); | 641 | WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); |
540 | 642 | ||
541 | switch (state) { | 643 | switch (state) { |
542 | case NETDEV_REGISTER: | 644 | case NETDEV_REGISTER: |
645 | /* | ||
646 | * NB: cannot take rdev->mtx here because this may be | ||
647 | * called within code protected by it when interfaces | ||
648 | * are added with nl80211. | ||
649 | */ | ||
650 | mutex_init(&wdev->mtx); | ||
651 | INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work); | ||
652 | INIT_LIST_HEAD(&wdev->event_list); | ||
653 | spin_lock_init(&wdev->event_lock); | ||
543 | mutex_lock(&rdev->devlist_mtx); | 654 | mutex_lock(&rdev->devlist_mtx); |
544 | list_add(&dev->ieee80211_ptr->list, &rdev->netdev_list); | 655 | list_add(&wdev->list, &rdev->netdev_list); |
656 | rdev->devlist_generation++; | ||
657 | /* can only change netns with wiphy */ | ||
658 | dev->features |= NETIF_F_NETNS_LOCAL; | ||
659 | |||
545 | if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, | 660 | if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, |
546 | "phy80211")) { | 661 | "phy80211")) { |
547 | printk(KERN_ERR "wireless: failed to add phy80211 " | 662 | printk(KERN_ERR "wireless: failed to add phy80211 " |
548 | "symlink to netdev!\n"); | 663 | "symlink to netdev!\n"); |
549 | } | 664 | } |
550 | dev->ieee80211_ptr->netdev = dev; | 665 | wdev->netdev = dev; |
666 | wdev->sme_state = CFG80211_SME_IDLE; | ||
667 | mutex_unlock(&rdev->devlist_mtx); | ||
551 | #ifdef CONFIG_WIRELESS_EXT | 668 | #ifdef CONFIG_WIRELESS_EXT |
552 | dev->ieee80211_ptr->wext.default_key = -1; | 669 | if (!dev->wireless_handlers) |
553 | dev->ieee80211_ptr->wext.default_mgmt_key = -1; | 670 | dev->wireless_handlers = &cfg80211_wext_handler; |
671 | wdev->wext.default_key = -1; | ||
672 | wdev->wext.default_mgmt_key = -1; | ||
673 | wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; | ||
674 | wdev->wext.ps = wdev->wiphy->ps_default; | ||
675 | wdev->wext.ps_timeout = 100; | ||
676 | if (rdev->ops->set_power_mgmt) | ||
677 | if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, | ||
678 | wdev->wext.ps, | ||
679 | wdev->wext.ps_timeout)) { | ||
680 | /* assume this means it's off */ | ||
681 | wdev->wext.ps = false; | ||
682 | } | ||
554 | #endif | 683 | #endif |
555 | mutex_unlock(&rdev->devlist_mtx); | ||
556 | break; | 684 | break; |
557 | case NETDEV_GOING_DOWN: | 685 | case NETDEV_GOING_DOWN: |
558 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) | 686 | switch (wdev->iftype) { |
687 | case NL80211_IFTYPE_ADHOC: | ||
688 | cfg80211_leave_ibss(rdev, dev, true); | ||
559 | break; | 689 | break; |
560 | if (!dev->ieee80211_ptr->ssid_len) | 690 | case NL80211_IFTYPE_STATION: |
691 | wdev_lock(wdev); | ||
692 | #ifdef CONFIG_WIRELESS_EXT | ||
693 | kfree(wdev->wext.ie); | ||
694 | wdev->wext.ie = NULL; | ||
695 | wdev->wext.ie_len = 0; | ||
696 | wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; | ||
697 | #endif | ||
698 | __cfg80211_disconnect(rdev, dev, | ||
699 | WLAN_REASON_DEAUTH_LEAVING, true); | ||
700 | cfg80211_mlme_down(rdev, dev); | ||
701 | wdev_unlock(wdev); | ||
702 | break; | ||
703 | default: | ||
561 | break; | 704 | break; |
562 | cfg80211_leave_ibss(rdev, dev, true); | 705 | } |
706 | break; | ||
707 | case NETDEV_DOWN: | ||
708 | dev_hold(dev); | ||
709 | schedule_work(&wdev->cleanup_work); | ||
563 | break; | 710 | break; |
564 | case NETDEV_UP: | 711 | case NETDEV_UP: |
712 | /* | ||
713 | * If we have a really quick DOWN/UP succession we may | ||
714 | * have this work still pending ... cancel it and see | ||
715 | * if it was pending, in which case we need to account | ||
716 | * for some of the work it would have done. | ||
717 | */ | ||
718 | if (cancel_work_sync(&wdev->cleanup_work)) { | ||
719 | mutex_lock(&rdev->devlist_mtx); | ||
720 | rdev->opencount--; | ||
721 | mutex_unlock(&rdev->devlist_mtx); | ||
722 | dev_put(dev); | ||
723 | } | ||
565 | #ifdef CONFIG_WIRELESS_EXT | 724 | #ifdef CONFIG_WIRELESS_EXT |
566 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) | 725 | cfg80211_lock_rdev(rdev); |
726 | mutex_lock(&rdev->devlist_mtx); | ||
727 | wdev_lock(wdev); | ||
728 | switch (wdev->iftype) { | ||
729 | case NL80211_IFTYPE_ADHOC: | ||
730 | cfg80211_ibss_wext_join(rdev, wdev); | ||
567 | break; | 731 | break; |
568 | if (!dev->ieee80211_ptr->wext.ibss.ssid_len) | 732 | case NL80211_IFTYPE_STATION: |
733 | cfg80211_mgd_wext_connect(rdev, wdev); | ||
569 | break; | 734 | break; |
570 | cfg80211_join_ibss(rdev, dev, &dev->ieee80211_ptr->wext.ibss); | 735 | default: |
571 | break; | 736 | break; |
737 | } | ||
738 | wdev_unlock(wdev); | ||
739 | rdev->opencount++; | ||
740 | mutex_unlock(&rdev->devlist_mtx); | ||
741 | cfg80211_unlock_rdev(rdev); | ||
572 | #endif | 742 | #endif |
743 | break; | ||
573 | case NETDEV_UNREGISTER: | 744 | case NETDEV_UNREGISTER: |
745 | /* | ||
746 | * NB: cannot take rdev->mtx here because this may be | ||
747 | * called within code protected by it when interfaces | ||
748 | * are removed with nl80211. | ||
749 | */ | ||
574 | mutex_lock(&rdev->devlist_mtx); | 750 | mutex_lock(&rdev->devlist_mtx); |
575 | if (!list_empty(&dev->ieee80211_ptr->list)) { | 751 | /* |
752 | * It is possible to get NETDEV_UNREGISTER | ||
753 | * multiple times. To detect that, check | ||
754 | * that the interface is still on the list | ||
755 | * of registered interfaces, and only then | ||
756 | * remove and clean it up. | ||
757 | */ | ||
758 | if (!list_empty(&wdev->list)) { | ||
576 | sysfs_remove_link(&dev->dev.kobj, "phy80211"); | 759 | sysfs_remove_link(&dev->dev.kobj, "phy80211"); |
577 | list_del_init(&dev->ieee80211_ptr->list); | 760 | list_del_init(&wdev->list); |
761 | rdev->devlist_generation++; | ||
762 | #ifdef CONFIG_WIRELESS_EXT | ||
763 | kfree(wdev->wext.keys); | ||
764 | #endif | ||
578 | } | 765 | } |
579 | mutex_unlock(&rdev->devlist_mtx); | 766 | mutex_unlock(&rdev->devlist_mtx); |
580 | break; | 767 | break; |
581 | case NETDEV_PRE_UP: | 768 | case NETDEV_PRE_UP: |
769 | if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) | ||
770 | return notifier_from_errno(-EOPNOTSUPP); | ||
582 | if (rfkill_blocked(rdev->rfkill)) | 771 | if (rfkill_blocked(rdev->rfkill)) |
583 | return notifier_from_errno(-ERFKILL); | 772 | return notifier_from_errno(-ERFKILL); |
584 | break; | 773 | break; |
@@ -591,10 +780,32 @@ static struct notifier_block cfg80211_netdev_notifier = { | |||
591 | .notifier_call = cfg80211_netdev_notifier_call, | 780 | .notifier_call = cfg80211_netdev_notifier_call, |
592 | }; | 781 | }; |
593 | 782 | ||
594 | static int cfg80211_init(void) | 783 | static void __net_exit cfg80211_pernet_exit(struct net *net) |
784 | { | ||
785 | struct cfg80211_registered_device *rdev; | ||
786 | |||
787 | rtnl_lock(); | ||
788 | mutex_lock(&cfg80211_mutex); | ||
789 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) { | ||
790 | if (net_eq(wiphy_net(&rdev->wiphy), net)) | ||
791 | WARN_ON(cfg80211_switch_netns(rdev, &init_net)); | ||
792 | } | ||
793 | mutex_unlock(&cfg80211_mutex); | ||
794 | rtnl_unlock(); | ||
795 | } | ||
796 | |||
797 | static struct pernet_operations cfg80211_pernet_ops = { | ||
798 | .exit = cfg80211_pernet_exit, | ||
799 | }; | ||
800 | |||
801 | static int __init cfg80211_init(void) | ||
595 | { | 802 | { |
596 | int err; | 803 | int err; |
597 | 804 | ||
805 | err = register_pernet_device(&cfg80211_pernet_ops); | ||
806 | if (err) | ||
807 | goto out_fail_pernet; | ||
808 | |||
598 | err = wiphy_sysfs_init(); | 809 | err = wiphy_sysfs_init(); |
599 | if (err) | 810 | if (err) |
600 | goto out_fail_sysfs; | 811 | goto out_fail_sysfs; |
@@ -622,9 +833,10 @@ out_fail_nl80211: | |||
622 | out_fail_notifier: | 833 | out_fail_notifier: |
623 | wiphy_sysfs_exit(); | 834 | wiphy_sysfs_exit(); |
624 | out_fail_sysfs: | 835 | out_fail_sysfs: |
836 | unregister_pernet_device(&cfg80211_pernet_ops); | ||
837 | out_fail_pernet: | ||
625 | return err; | 838 | return err; |
626 | } | 839 | } |
627 | |||
628 | subsys_initcall(cfg80211_init); | 840 | subsys_initcall(cfg80211_init); |
629 | 841 | ||
630 | static void cfg80211_exit(void) | 842 | static void cfg80211_exit(void) |
@@ -634,5 +846,6 @@ static void cfg80211_exit(void) | |||
634 | unregister_netdevice_notifier(&cfg80211_netdev_notifier); | 846 | unregister_netdevice_notifier(&cfg80211_netdev_notifier); |
635 | wiphy_sysfs_exit(); | 847 | wiphy_sysfs_exit(); |
636 | regulatory_exit(); | 848 | regulatory_exit(); |
849 | unregister_pernet_device(&cfg80211_pernet_ops); | ||
637 | } | 850 | } |
638 | module_exit(cfg80211_exit); | 851 | module_exit(cfg80211_exit); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index bfa340c7abb5..2a33d8bc886b 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -49,6 +49,9 @@ struct cfg80211_registered_device { | |||
49 | /* associate netdev list */ | 49 | /* associate netdev list */ |
50 | struct mutex devlist_mtx; | 50 | struct mutex devlist_mtx; |
51 | struct list_head netdev_list; | 51 | struct list_head netdev_list; |
52 | int devlist_generation; | ||
53 | int opencount; /* also protected by devlist_mtx */ | ||
54 | wait_queue_head_t dev_wait; | ||
52 | 55 | ||
53 | /* BSSes/scanning */ | 56 | /* BSSes/scanning */ |
54 | spinlock_t bss_lock; | 57 | spinlock_t bss_lock; |
@@ -57,6 +60,17 @@ struct cfg80211_registered_device { | |||
57 | u32 bss_generation; | 60 | u32 bss_generation; |
58 | struct cfg80211_scan_request *scan_req; /* protected by RTNL */ | 61 | struct cfg80211_scan_request *scan_req; /* protected by RTNL */ |
59 | unsigned long suspend_at; | 62 | unsigned long suspend_at; |
63 | struct work_struct scan_done_wk; | ||
64 | |||
65 | #ifdef CONFIG_NL80211_TESTMODE | ||
66 | struct genl_info *testmode_info; | ||
67 | #endif | ||
68 | |||
69 | struct work_struct conn_work; | ||
70 | struct work_struct event_work; | ||
71 | |||
72 | /* current channel */ | ||
73 | struct ieee80211_channel *channel; | ||
60 | 74 | ||
61 | #ifdef CONFIG_CFG80211_DEBUGFS | 75 | #ifdef CONFIG_CFG80211_DEBUGFS |
62 | /* Debugfs entries */ | 76 | /* Debugfs entries */ |
@@ -89,13 +103,14 @@ bool wiphy_idx_valid(int wiphy_idx) | |||
89 | } | 103 | } |
90 | 104 | ||
91 | extern struct mutex cfg80211_mutex; | 105 | extern struct mutex cfg80211_mutex; |
92 | extern struct list_head cfg80211_drv_list; | 106 | extern struct list_head cfg80211_rdev_list; |
107 | extern int cfg80211_rdev_list_generation; | ||
93 | 108 | ||
94 | #define assert_cfg80211_lock() WARN_ON(!mutex_is_locked(&cfg80211_mutex)) | 109 | #define assert_cfg80211_lock() WARN_ON(!mutex_is_locked(&cfg80211_mutex)) |
95 | 110 | ||
96 | /* | 111 | /* |
97 | * You can use this to mark a wiphy_idx as not having an associated wiphy. | 112 | * You can use this to mark a wiphy_idx as not having an associated wiphy. |
98 | * It guarantees cfg80211_drv_by_wiphy_idx(wiphy_idx) will return NULL | 113 | * It guarantees cfg80211_rdev_by_wiphy_idx(wiphy_idx) will return NULL |
99 | */ | 114 | */ |
100 | #define WIPHY_IDX_STALE -1 | 115 | #define WIPHY_IDX_STALE -1 |
101 | 116 | ||
@@ -104,17 +119,40 @@ struct cfg80211_internal_bss { | |||
104 | struct rb_node rbn; | 119 | struct rb_node rbn; |
105 | unsigned long ts; | 120 | unsigned long ts; |
106 | struct kref ref; | 121 | struct kref ref; |
107 | bool hold, ies_allocated; | 122 | atomic_t hold; |
123 | bool ies_allocated; | ||
108 | 124 | ||
109 | /* must be last because of priv member */ | 125 | /* must be last because of priv member */ |
110 | struct cfg80211_bss pub; | 126 | struct cfg80211_bss pub; |
111 | }; | 127 | }; |
112 | 128 | ||
113 | struct cfg80211_registered_device *cfg80211_drv_by_wiphy_idx(int wiphy_idx); | 129 | static inline struct cfg80211_internal_bss *bss_from_pub(struct cfg80211_bss *pub) |
130 | { | ||
131 | return container_of(pub, struct cfg80211_internal_bss, pub); | ||
132 | } | ||
133 | |||
134 | static inline void cfg80211_ref_bss(struct cfg80211_internal_bss *bss) | ||
135 | { | ||
136 | kref_get(&bss->ref); | ||
137 | } | ||
138 | |||
139 | static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss) | ||
140 | { | ||
141 | atomic_inc(&bss->hold); | ||
142 | } | ||
143 | |||
144 | static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss) | ||
145 | { | ||
146 | int r = atomic_dec_return(&bss->hold); | ||
147 | WARN_ON(r < 0); | ||
148 | } | ||
149 | |||
150 | |||
151 | struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx); | ||
114 | int get_wiphy_idx(struct wiphy *wiphy); | 152 | int get_wiphy_idx(struct wiphy *wiphy); |
115 | 153 | ||
116 | struct cfg80211_registered_device * | 154 | struct cfg80211_registered_device * |
117 | __cfg80211_drv_from_info(struct genl_info *info); | 155 | __cfg80211_rdev_from_info(struct genl_info *info); |
118 | 156 | ||
119 | /* | 157 | /* |
120 | * This function returns a pointer to the driver | 158 | * This function returns a pointer to the driver |
@@ -122,12 +160,12 @@ __cfg80211_drv_from_info(struct genl_info *info); | |||
122 | * If successful, it returns non-NULL and also locks | 160 | * If successful, it returns non-NULL and also locks |
123 | * the driver's mutex! | 161 | * the driver's mutex! |
124 | * | 162 | * |
125 | * This means that you need to call cfg80211_put_dev() | 163 | * This means that you need to call cfg80211_unlock_rdev() |
126 | * before being allowed to acquire &cfg80211_mutex! | 164 | * before being allowed to acquire &cfg80211_mutex! |
127 | * | 165 | * |
128 | * This is necessary because we need to lock the global | 166 | * This is necessary because we need to lock the global |
129 | * mutex to get an item off the list safely, and then | 167 | * mutex to get an item off the list safely, and then |
130 | * we lock the drv mutex so it doesn't go away under us. | 168 | * we lock the rdev mutex so it doesn't go away under us. |
131 | * | 169 | * |
132 | * We don't want to keep cfg80211_mutex locked | 170 | * We don't want to keep cfg80211_mutex locked |
133 | * for all the time in order to allow requests on | 171 | * for all the time in order to allow requests on |
@@ -139,19 +177,93 @@ __cfg80211_drv_from_info(struct genl_info *info); | |||
139 | extern struct cfg80211_registered_device * | 177 | extern struct cfg80211_registered_device * |
140 | cfg80211_get_dev_from_info(struct genl_info *info); | 178 | cfg80211_get_dev_from_info(struct genl_info *info); |
141 | 179 | ||
142 | /* requires cfg80211_drv_mutex to be held! */ | 180 | /* requires cfg80211_rdev_mutex to be held! */ |
143 | struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); | 181 | struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); |
144 | 182 | ||
145 | /* identical to cfg80211_get_dev_from_info but only operate on ifindex */ | 183 | /* identical to cfg80211_get_dev_from_info but only operate on ifindex */ |
146 | extern struct cfg80211_registered_device * | 184 | extern struct cfg80211_registered_device * |
147 | cfg80211_get_dev_from_ifindex(int ifindex); | 185 | cfg80211_get_dev_from_ifindex(struct net *net, int ifindex); |
186 | |||
187 | int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, | ||
188 | struct net *net); | ||
189 | |||
190 | static inline void cfg80211_lock_rdev(struct cfg80211_registered_device *rdev) | ||
191 | { | ||
192 | mutex_lock(&rdev->mtx); | ||
193 | } | ||
194 | |||
195 | static inline void cfg80211_unlock_rdev(struct cfg80211_registered_device *rdev) | ||
196 | { | ||
197 | BUG_ON(IS_ERR(rdev) || !rdev); | ||
198 | mutex_unlock(&rdev->mtx); | ||
199 | } | ||
200 | |||
201 | static inline void wdev_lock(struct wireless_dev *wdev) | ||
202 | __acquires(wdev) | ||
203 | { | ||
204 | mutex_lock(&wdev->mtx); | ||
205 | __acquire(wdev->mtx); | ||
206 | } | ||
207 | |||
208 | static inline void wdev_unlock(struct wireless_dev *wdev) | ||
209 | __releases(wdev) | ||
210 | { | ||
211 | __release(wdev->mtx); | ||
212 | mutex_unlock(&wdev->mtx); | ||
213 | } | ||
214 | |||
215 | #define ASSERT_RDEV_LOCK(rdev) WARN_ON(!mutex_is_locked(&(rdev)->mtx)); | ||
216 | #define ASSERT_WDEV_LOCK(wdev) WARN_ON(!mutex_is_locked(&(wdev)->mtx)); | ||
217 | |||
218 | enum cfg80211_event_type { | ||
219 | EVENT_CONNECT_RESULT, | ||
220 | EVENT_ROAMED, | ||
221 | EVENT_DISCONNECTED, | ||
222 | EVENT_IBSS_JOINED, | ||
223 | }; | ||
224 | |||
225 | struct cfg80211_event { | ||
226 | struct list_head list; | ||
227 | enum cfg80211_event_type type; | ||
228 | |||
229 | union { | ||
230 | struct { | ||
231 | u8 bssid[ETH_ALEN]; | ||
232 | const u8 *req_ie; | ||
233 | const u8 *resp_ie; | ||
234 | size_t req_ie_len; | ||
235 | size_t resp_ie_len; | ||
236 | u16 status; | ||
237 | } cr; | ||
238 | struct { | ||
239 | u8 bssid[ETH_ALEN]; | ||
240 | const u8 *req_ie; | ||
241 | const u8 *resp_ie; | ||
242 | size_t req_ie_len; | ||
243 | size_t resp_ie_len; | ||
244 | } rm; | ||
245 | struct { | ||
246 | const u8 *ie; | ||
247 | size_t ie_len; | ||
248 | u16 reason; | ||
249 | } dc; | ||
250 | struct { | ||
251 | u8 bssid[ETH_ALEN]; | ||
252 | } ij; | ||
253 | }; | ||
254 | }; | ||
255 | |||
256 | struct cfg80211_cached_keys { | ||
257 | struct key_params params[6]; | ||
258 | u8 data[6][WLAN_MAX_KEY_LEN]; | ||
259 | int def, defmgmt; | ||
260 | }; | ||
148 | 261 | ||
149 | extern void cfg80211_put_dev(struct cfg80211_registered_device *drv); | ||
150 | 262 | ||
151 | /* free object */ | 263 | /* free object */ |
152 | extern void cfg80211_dev_free(struct cfg80211_registered_device *drv); | 264 | extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev); |
153 | 265 | ||
154 | extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv, | 266 | extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, |
155 | char *newname); | 267 | char *newname); |
156 | 268 | ||
157 | void ieee80211_set_bitrate_flags(struct wiphy *wiphy); | 269 | void ieee80211_set_bitrate_flags(struct wiphy *wiphy); |
@@ -163,15 +275,124 @@ void cfg80211_bss_age(struct cfg80211_registered_device *dev, | |||
163 | unsigned long age_secs); | 275 | unsigned long age_secs); |
164 | 276 | ||
165 | /* IBSS */ | 277 | /* IBSS */ |
278 | int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | ||
279 | struct net_device *dev, | ||
280 | struct cfg80211_ibss_params *params, | ||
281 | struct cfg80211_cached_keys *connkeys); | ||
166 | int cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | 282 | int cfg80211_join_ibss(struct cfg80211_registered_device *rdev, |
167 | struct net_device *dev, | 283 | struct net_device *dev, |
168 | struct cfg80211_ibss_params *params); | 284 | struct cfg80211_ibss_params *params, |
285 | struct cfg80211_cached_keys *connkeys); | ||
169 | void cfg80211_clear_ibss(struct net_device *dev, bool nowext); | 286 | void cfg80211_clear_ibss(struct net_device *dev, bool nowext); |
170 | int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, | 287 | int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, |
171 | struct net_device *dev, bool nowext); | 288 | struct net_device *dev, bool nowext); |
289 | void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid); | ||
290 | int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, | ||
291 | struct wireless_dev *wdev); | ||
292 | |||
293 | /* MLME */ | ||
294 | int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | ||
295 | struct net_device *dev, | ||
296 | struct ieee80211_channel *chan, | ||
297 | enum nl80211_auth_type auth_type, | ||
298 | const u8 *bssid, | ||
299 | const u8 *ssid, int ssid_len, | ||
300 | const u8 *ie, int ie_len, | ||
301 | const u8 *key, int key_len, int key_idx); | ||
302 | int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | ||
303 | struct net_device *dev, struct ieee80211_channel *chan, | ||
304 | enum nl80211_auth_type auth_type, const u8 *bssid, | ||
305 | const u8 *ssid, int ssid_len, | ||
306 | const u8 *ie, int ie_len, | ||
307 | const u8 *key, int key_len, int key_idx); | ||
308 | int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | ||
309 | struct net_device *dev, | ||
310 | struct ieee80211_channel *chan, | ||
311 | const u8 *bssid, const u8 *prev_bssid, | ||
312 | const u8 *ssid, int ssid_len, | ||
313 | const u8 *ie, int ie_len, bool use_mfp, | ||
314 | struct cfg80211_crypto_settings *crypt); | ||
315 | int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | ||
316 | struct net_device *dev, struct ieee80211_channel *chan, | ||
317 | const u8 *bssid, const u8 *prev_bssid, | ||
318 | const u8 *ssid, int ssid_len, | ||
319 | const u8 *ie, int ie_len, bool use_mfp, | ||
320 | struct cfg80211_crypto_settings *crypt); | ||
321 | int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | ||
322 | struct net_device *dev, const u8 *bssid, | ||
323 | const u8 *ie, int ie_len, u16 reason); | ||
324 | int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | ||
325 | struct net_device *dev, const u8 *bssid, | ||
326 | const u8 *ie, int ie_len, u16 reason); | ||
327 | int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | ||
328 | struct net_device *dev, const u8 *bssid, | ||
329 | const u8 *ie, int ie_len, u16 reason); | ||
330 | void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, | ||
331 | struct net_device *dev); | ||
332 | void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | ||
333 | const u8 *req_ie, size_t req_ie_len, | ||
334 | const u8 *resp_ie, size_t resp_ie_len, | ||
335 | u16 status, bool wextev, | ||
336 | struct cfg80211_bss *bss); | ||
337 | |||
338 | /* SME */ | ||
339 | int __cfg80211_connect(struct cfg80211_registered_device *rdev, | ||
340 | struct net_device *dev, | ||
341 | struct cfg80211_connect_params *connect, | ||
342 | struct cfg80211_cached_keys *connkeys, | ||
343 | const u8 *prev_bssid); | ||
344 | int cfg80211_connect(struct cfg80211_registered_device *rdev, | ||
345 | struct net_device *dev, | ||
346 | struct cfg80211_connect_params *connect, | ||
347 | struct cfg80211_cached_keys *connkeys); | ||
348 | int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, | ||
349 | struct net_device *dev, u16 reason, | ||
350 | bool wextev); | ||
351 | int cfg80211_disconnect(struct cfg80211_registered_device *rdev, | ||
352 | struct net_device *dev, u16 reason, | ||
353 | bool wextev); | ||
354 | void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, | ||
355 | const u8 *req_ie, size_t req_ie_len, | ||
356 | const u8 *resp_ie, size_t resp_ie_len); | ||
357 | int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, | ||
358 | struct wireless_dev *wdev); | ||
359 | |||
360 | void cfg80211_conn_work(struct work_struct *work); | ||
361 | bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev); | ||
172 | 362 | ||
173 | /* internal helpers */ | 363 | /* internal helpers */ |
174 | int cfg80211_validate_key_settings(struct key_params *params, int key_idx, | 364 | int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, |
365 | struct key_params *params, int key_idx, | ||
175 | const u8 *mac_addr); | 366 | const u8 *mac_addr); |
367 | void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | ||
368 | size_t ie_len, u16 reason, bool from_ap); | ||
369 | void cfg80211_sme_scan_done(struct net_device *dev); | ||
370 | void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len); | ||
371 | void cfg80211_sme_disassoc(struct net_device *dev, int idx); | ||
372 | void __cfg80211_scan_done(struct work_struct *wk); | ||
373 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak); | ||
374 | void cfg80211_upload_connect_keys(struct wireless_dev *wdev); | ||
375 | int cfg80211_change_iface(struct cfg80211_registered_device *rdev, | ||
376 | struct net_device *dev, enum nl80211_iftype ntype, | ||
377 | u32 *flags, struct vif_params *params); | ||
378 | void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); | ||
379 | |||
380 | struct ieee80211_channel * | ||
381 | rdev_fixed_channel(struct cfg80211_registered_device *rdev, | ||
382 | struct wireless_dev *for_wdev); | ||
383 | int rdev_set_freq(struct cfg80211_registered_device *rdev, | ||
384 | struct wireless_dev *for_wdev, | ||
385 | int freq, enum nl80211_channel_type channel_type); | ||
386 | |||
387 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS | ||
388 | #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) | ||
389 | #else | ||
390 | /* | ||
391 | * Trick to enable using it as a condition, | ||
392 | * and also not give a warning when it's | ||
393 | * not used that way. | ||
394 | */ | ||
395 | #define CFG80211_DEV_WARN_ON(cond) ({bool __r = (cond); __r; }) | ||
396 | #endif | ||
176 | 397 | ||
177 | #endif /* __NET_WIRELESS_CORE_H */ | 398 | #endif /* __NET_WIRELESS_CORE_H */ |
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 679ddfcec1ee..13d93d84f902 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c | |||
@@ -104,15 +104,15 @@ static const struct file_operations ht40allow_map_ops = { | |||
104 | }; | 104 | }; |
105 | 105 | ||
106 | #define DEBUGFS_ADD(name) \ | 106 | #define DEBUGFS_ADD(name) \ |
107 | drv->debugfs.name = debugfs_create_file(#name, S_IRUGO, phyd, \ | 107 | rdev->debugfs.name = debugfs_create_file(#name, S_IRUGO, phyd, \ |
108 | &drv->wiphy, &name## _ops); | 108 | &rdev->wiphy, &name## _ops); |
109 | #define DEBUGFS_DEL(name) \ | 109 | #define DEBUGFS_DEL(name) \ |
110 | debugfs_remove(drv->debugfs.name); \ | 110 | debugfs_remove(rdev->debugfs.name); \ |
111 | drv->debugfs.name = NULL; | 111 | rdev->debugfs.name = NULL; |
112 | 112 | ||
113 | void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv) | 113 | void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) |
114 | { | 114 | { |
115 | struct dentry *phyd = drv->wiphy.debugfsdir; | 115 | struct dentry *phyd = rdev->wiphy.debugfsdir; |
116 | 116 | ||
117 | DEBUGFS_ADD(rts_threshold); | 117 | DEBUGFS_ADD(rts_threshold); |
118 | DEBUGFS_ADD(fragmentation_threshold); | 118 | DEBUGFS_ADD(fragmentation_threshold); |
@@ -121,7 +121,7 @@ void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv) | |||
121 | DEBUGFS_ADD(ht40allow_map); | 121 | DEBUGFS_ADD(ht40allow_map); |
122 | } | 122 | } |
123 | 123 | ||
124 | void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv) | 124 | void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev) |
125 | { | 125 | { |
126 | DEBUGFS_DEL(rts_threshold); | 126 | DEBUGFS_DEL(rts_threshold); |
127 | DEBUGFS_DEL(fragmentation_threshold); | 127 | DEBUGFS_DEL(fragmentation_threshold); |
diff --git a/net/wireless/debugfs.h b/net/wireless/debugfs.h index c226983ae66b..6419b6d6ce3e 100644 --- a/net/wireless/debugfs.h +++ b/net/wireless/debugfs.h | |||
@@ -2,13 +2,13 @@ | |||
2 | #define __CFG80211_DEBUGFS_H | 2 | #define __CFG80211_DEBUGFS_H |
3 | 3 | ||
4 | #ifdef CONFIG_CFG80211_DEBUGFS | 4 | #ifdef CONFIG_CFG80211_DEBUGFS |
5 | void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv); | 5 | void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev); |
6 | void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv); | 6 | void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev); |
7 | #else | 7 | #else |
8 | static inline | 8 | static inline |
9 | void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv) {} | 9 | void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) {} |
10 | static inline | 10 | static inline |
11 | void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv) {} | 11 | void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev) {} |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | #endif /* __CFG80211_DEBUGFS_H */ | 14 | #endif /* __CFG80211_DEBUGFS_H */ |
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index a4a1c3498ff2..c88338911979 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
@@ -7,10 +7,11 @@ | |||
7 | #include <linux/etherdevice.h> | 7 | #include <linux/etherdevice.h> |
8 | #include <linux/if_arp.h> | 8 | #include <linux/if_arp.h> |
9 | #include <net/cfg80211.h> | 9 | #include <net/cfg80211.h> |
10 | #include "wext-compat.h" | ||
10 | #include "nl80211.h" | 11 | #include "nl80211.h" |
11 | 12 | ||
12 | 13 | ||
13 | void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) | 14 | void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) |
14 | { | 15 | { |
15 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 16 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
16 | struct cfg80211_bss *bss; | 17 | struct cfg80211_bss *bss; |
@@ -21,10 +22,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) | |||
21 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) | 22 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) |
22 | return; | 23 | return; |
23 | 24 | ||
24 | if (WARN_ON(!wdev->ssid_len)) | 25 | if (!wdev->ssid_len) |
25 | return; | ||
26 | |||
27 | if (memcmp(bssid, wdev->bssid, ETH_ALEN) == 0) | ||
28 | return; | 26 | return; |
29 | 27 | ||
30 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, | 28 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, |
@@ -36,39 +34,76 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) | |||
36 | 34 | ||
37 | if (wdev->current_bss) { | 35 | if (wdev->current_bss) { |
38 | cfg80211_unhold_bss(wdev->current_bss); | 36 | cfg80211_unhold_bss(wdev->current_bss); |
39 | cfg80211_put_bss(wdev->current_bss); | 37 | cfg80211_put_bss(&wdev->current_bss->pub); |
40 | } | 38 | } |
41 | 39 | ||
42 | cfg80211_hold_bss(bss); | 40 | cfg80211_hold_bss(bss_from_pub(bss)); |
43 | wdev->current_bss = bss; | 41 | wdev->current_bss = bss_from_pub(bss); |
44 | memcpy(wdev->bssid, bssid, ETH_ALEN); | 42 | |
43 | cfg80211_upload_connect_keys(wdev); | ||
45 | 44 | ||
46 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, gfp); | 45 | nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, |
46 | GFP_KERNEL); | ||
47 | #ifdef CONFIG_WIRELESS_EXT | 47 | #ifdef CONFIG_WIRELESS_EXT |
48 | memset(&wrqu, 0, sizeof(wrqu)); | 48 | memset(&wrqu, 0, sizeof(wrqu)); |
49 | memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); | 49 | memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); |
50 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | 50 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); |
51 | #endif | 51 | #endif |
52 | } | 52 | } |
53 | |||
54 | void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) | ||
55 | { | ||
56 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
57 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
58 | struct cfg80211_event *ev; | ||
59 | unsigned long flags; | ||
60 | |||
61 | CFG80211_DEV_WARN_ON(!wdev->ssid_len); | ||
62 | |||
63 | ev = kzalloc(sizeof(*ev), gfp); | ||
64 | if (!ev) | ||
65 | return; | ||
66 | |||
67 | ev->type = EVENT_IBSS_JOINED; | ||
68 | memcpy(ev->cr.bssid, bssid, ETH_ALEN); | ||
69 | |||
70 | spin_lock_irqsave(&wdev->event_lock, flags); | ||
71 | list_add_tail(&ev->list, &wdev->event_list); | ||
72 | spin_unlock_irqrestore(&wdev->event_lock, flags); | ||
73 | schedule_work(&rdev->event_work); | ||
74 | } | ||
53 | EXPORT_SYMBOL(cfg80211_ibss_joined); | 75 | EXPORT_SYMBOL(cfg80211_ibss_joined); |
54 | 76 | ||
55 | int cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | 77 | int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, |
56 | struct net_device *dev, | 78 | struct net_device *dev, |
57 | struct cfg80211_ibss_params *params) | 79 | struct cfg80211_ibss_params *params, |
80 | struct cfg80211_cached_keys *connkeys) | ||
58 | { | 81 | { |
59 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 82 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
83 | struct ieee80211_channel *chan; | ||
60 | int err; | 84 | int err; |
61 | 85 | ||
86 | ASSERT_WDEV_LOCK(wdev); | ||
87 | |||
88 | chan = rdev_fixed_channel(rdev, wdev); | ||
89 | if (chan && chan != params->channel) | ||
90 | return -EBUSY; | ||
91 | |||
62 | if (wdev->ssid_len) | 92 | if (wdev->ssid_len) |
63 | return -EALREADY; | 93 | return -EALREADY; |
64 | 94 | ||
95 | if (WARN_ON(wdev->connect_keys)) | ||
96 | kfree(wdev->connect_keys); | ||
97 | wdev->connect_keys = connkeys; | ||
98 | |||
65 | #ifdef CONFIG_WIRELESS_EXT | 99 | #ifdef CONFIG_WIRELESS_EXT |
66 | wdev->wext.ibss.channel = params->channel; | 100 | wdev->wext.ibss.channel = params->channel; |
67 | #endif | 101 | #endif |
68 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); | 102 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); |
69 | 103 | if (err) { | |
70 | if (err) | 104 | wdev->connect_keys = NULL; |
71 | return err; | 105 | return err; |
106 | } | ||
72 | 107 | ||
73 | memcpy(wdev->ssid, params->ssid, params->ssid_len); | 108 | memcpy(wdev->ssid, params->ssid, params->ssid_len); |
74 | wdev->ssid_len = params->ssid_len; | 109 | wdev->ssid_len = params->ssid_len; |
@@ -76,45 +111,107 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
76 | return 0; | 111 | return 0; |
77 | } | 112 | } |
78 | 113 | ||
79 | void cfg80211_clear_ibss(struct net_device *dev, bool nowext) | 114 | int cfg80211_join_ibss(struct cfg80211_registered_device *rdev, |
115 | struct net_device *dev, | ||
116 | struct cfg80211_ibss_params *params, | ||
117 | struct cfg80211_cached_keys *connkeys) | ||
118 | { | ||
119 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
120 | int err; | ||
121 | |||
122 | mutex_lock(&rdev->devlist_mtx); | ||
123 | wdev_lock(wdev); | ||
124 | err = __cfg80211_join_ibss(rdev, dev, params, connkeys); | ||
125 | wdev_unlock(wdev); | ||
126 | mutex_unlock(&rdev->devlist_mtx); | ||
127 | |||
128 | return err; | ||
129 | } | ||
130 | |||
131 | static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) | ||
80 | { | 132 | { |
81 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 133 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
134 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
135 | int i; | ||
136 | |||
137 | ASSERT_WDEV_LOCK(wdev); | ||
138 | |||
139 | kfree(wdev->connect_keys); | ||
140 | wdev->connect_keys = NULL; | ||
141 | |||
142 | /* | ||
143 | * Delete all the keys ... pairwise keys can't really | ||
144 | * exist any more anyway, but default keys might. | ||
145 | */ | ||
146 | if (rdev->ops->del_key) | ||
147 | for (i = 0; i < 6; i++) | ||
148 | rdev->ops->del_key(wdev->wiphy, dev, i, NULL); | ||
82 | 149 | ||
83 | if (wdev->current_bss) { | 150 | if (wdev->current_bss) { |
84 | cfg80211_unhold_bss(wdev->current_bss); | 151 | cfg80211_unhold_bss(wdev->current_bss); |
85 | cfg80211_put_bss(wdev->current_bss); | 152 | cfg80211_put_bss(&wdev->current_bss->pub); |
86 | } | 153 | } |
87 | 154 | ||
88 | wdev->current_bss = NULL; | 155 | wdev->current_bss = NULL; |
89 | wdev->ssid_len = 0; | 156 | wdev->ssid_len = 0; |
90 | memset(wdev->bssid, 0, ETH_ALEN); | ||
91 | #ifdef CONFIG_WIRELESS_EXT | 157 | #ifdef CONFIG_WIRELESS_EXT |
92 | if (!nowext) | 158 | if (!nowext) |
93 | wdev->wext.ibss.ssid_len = 0; | 159 | wdev->wext.ibss.ssid_len = 0; |
94 | #endif | 160 | #endif |
95 | } | 161 | } |
96 | 162 | ||
97 | int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, | 163 | void cfg80211_clear_ibss(struct net_device *dev, bool nowext) |
98 | struct net_device *dev, bool nowext) | 164 | { |
165 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
166 | |||
167 | wdev_lock(wdev); | ||
168 | __cfg80211_clear_ibss(dev, nowext); | ||
169 | wdev_unlock(wdev); | ||
170 | } | ||
171 | |||
172 | static int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, | ||
173 | struct net_device *dev, bool nowext) | ||
99 | { | 174 | { |
175 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
100 | int err; | 176 | int err; |
101 | 177 | ||
178 | ASSERT_WDEV_LOCK(wdev); | ||
179 | |||
180 | if (!wdev->ssid_len) | ||
181 | return -ENOLINK; | ||
182 | |||
102 | err = rdev->ops->leave_ibss(&rdev->wiphy, dev); | 183 | err = rdev->ops->leave_ibss(&rdev->wiphy, dev); |
103 | 184 | ||
104 | if (err) | 185 | if (err) |
105 | return err; | 186 | return err; |
106 | 187 | ||
107 | cfg80211_clear_ibss(dev, nowext); | 188 | __cfg80211_clear_ibss(dev, nowext); |
108 | 189 | ||
109 | return 0; | 190 | return 0; |
110 | } | 191 | } |
111 | 192 | ||
193 | int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, | ||
194 | struct net_device *dev, bool nowext) | ||
195 | { | ||
196 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
197 | int err; | ||
198 | |||
199 | wdev_lock(wdev); | ||
200 | err = __cfg80211_leave_ibss(rdev, dev, nowext); | ||
201 | wdev_unlock(wdev); | ||
202 | |||
203 | return err; | ||
204 | } | ||
205 | |||
112 | #ifdef CONFIG_WIRELESS_EXT | 206 | #ifdef CONFIG_WIRELESS_EXT |
113 | static int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, | 207 | int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, |
114 | struct wireless_dev *wdev) | 208 | struct wireless_dev *wdev) |
115 | { | 209 | { |
210 | struct cfg80211_cached_keys *ck = NULL; | ||
116 | enum ieee80211_band band; | 211 | enum ieee80211_band band; |
117 | int i; | 212 | int i, err; |
213 | |||
214 | ASSERT_WDEV_LOCK(wdev); | ||
118 | 215 | ||
119 | if (!wdev->wext.ibss.beacon_interval) | 216 | if (!wdev->wext.ibss.beacon_interval) |
120 | wdev->wext.ibss.beacon_interval = 100; | 217 | wdev->wext.ibss.beacon_interval = 100; |
@@ -154,43 +251,66 @@ static int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, | |||
154 | if (!netif_running(wdev->netdev)) | 251 | if (!netif_running(wdev->netdev)) |
155 | return 0; | 252 | return 0; |
156 | 253 | ||
157 | return cfg80211_join_ibss(wiphy_to_dev(wdev->wiphy), | 254 | if (wdev->wext.keys) |
158 | wdev->netdev, &wdev->wext.ibss); | 255 | wdev->wext.keys->def = wdev->wext.default_key; |
256 | |||
257 | wdev->wext.ibss.privacy = wdev->wext.default_key != -1; | ||
258 | |||
259 | if (wdev->wext.keys) { | ||
260 | ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); | ||
261 | if (!ck) | ||
262 | return -ENOMEM; | ||
263 | for (i = 0; i < 6; i++) | ||
264 | ck->params[i].key = ck->data[i]; | ||
265 | } | ||
266 | err = __cfg80211_join_ibss(rdev, wdev->netdev, | ||
267 | &wdev->wext.ibss, ck); | ||
268 | if (err) | ||
269 | kfree(ck); | ||
270 | |||
271 | return err; | ||
159 | } | 272 | } |
160 | 273 | ||
161 | int cfg80211_ibss_wext_siwfreq(struct net_device *dev, | 274 | int cfg80211_ibss_wext_siwfreq(struct net_device *dev, |
162 | struct iw_request_info *info, | 275 | struct iw_request_info *info, |
163 | struct iw_freq *freq, char *extra) | 276 | struct iw_freq *wextfreq, char *extra) |
164 | { | 277 | { |
165 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 278 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
166 | struct ieee80211_channel *chan; | 279 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
167 | int err; | 280 | struct ieee80211_channel *chan = NULL; |
281 | int err, freq; | ||
168 | 282 | ||
169 | /* call only for ibss! */ | 283 | /* call only for ibss! */ |
170 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) | 284 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) |
171 | return -EINVAL; | 285 | return -EINVAL; |
172 | 286 | ||
173 | if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss) | 287 | if (!rdev->ops->join_ibss) |
174 | return -EOPNOTSUPP; | 288 | return -EOPNOTSUPP; |
175 | 289 | ||
176 | chan = cfg80211_wext_freq(wdev->wiphy, freq); | 290 | freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); |
177 | if (chan && IS_ERR(chan)) | 291 | if (freq < 0) |
178 | return PTR_ERR(chan); | 292 | return freq; |
179 | 293 | ||
180 | if (chan && | 294 | if (freq) { |
181 | (chan->flags & IEEE80211_CHAN_NO_IBSS || | 295 | chan = ieee80211_get_channel(wdev->wiphy, freq); |
182 | chan->flags & IEEE80211_CHAN_DISABLED)) | 296 | if (!chan) |
183 | return -EINVAL; | 297 | return -EINVAL; |
298 | if (chan->flags & IEEE80211_CHAN_NO_IBSS || | ||
299 | chan->flags & IEEE80211_CHAN_DISABLED) | ||
300 | return -EINVAL; | ||
301 | } | ||
184 | 302 | ||
185 | if (wdev->wext.ibss.channel == chan) | 303 | if (wdev->wext.ibss.channel == chan) |
186 | return 0; | 304 | return 0; |
187 | 305 | ||
188 | if (wdev->ssid_len) { | 306 | wdev_lock(wdev); |
189 | err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy), | 307 | err = 0; |
190 | dev, true); | 308 | if (wdev->ssid_len) |
191 | if (err) | 309 | err = __cfg80211_leave_ibss(rdev, dev, true); |
192 | return err; | 310 | wdev_unlock(wdev); |
193 | } | 311 | |
312 | if (err) | ||
313 | return err; | ||
194 | 314 | ||
195 | if (chan) { | 315 | if (chan) { |
196 | wdev->wext.ibss.channel = chan; | 316 | wdev->wext.ibss.channel = chan; |
@@ -200,10 +320,14 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev, | |||
200 | wdev->wext.ibss.channel_fixed = false; | 320 | wdev->wext.ibss.channel_fixed = false; |
201 | } | 321 | } |
202 | 322 | ||
203 | return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev); | 323 | mutex_lock(&rdev->devlist_mtx); |
324 | wdev_lock(wdev); | ||
325 | err = cfg80211_ibss_wext_join(rdev, wdev); | ||
326 | wdev_unlock(wdev); | ||
327 | mutex_unlock(&rdev->devlist_mtx); | ||
328 | |||
329 | return err; | ||
204 | } | 330 | } |
205 | /* temporary symbol - mark GPL - in the future the handler won't be */ | ||
206 | EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwfreq); | ||
207 | 331 | ||
208 | int cfg80211_ibss_wext_giwfreq(struct net_device *dev, | 332 | int cfg80211_ibss_wext_giwfreq(struct net_device *dev, |
209 | struct iw_request_info *info, | 333 | struct iw_request_info *info, |
@@ -216,10 +340,12 @@ int cfg80211_ibss_wext_giwfreq(struct net_device *dev, | |||
216 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) | 340 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) |
217 | return -EINVAL; | 341 | return -EINVAL; |
218 | 342 | ||
343 | wdev_lock(wdev); | ||
219 | if (wdev->current_bss) | 344 | if (wdev->current_bss) |
220 | chan = wdev->current_bss->channel; | 345 | chan = wdev->current_bss->pub.channel; |
221 | else if (wdev->wext.ibss.channel) | 346 | else if (wdev->wext.ibss.channel) |
222 | chan = wdev->wext.ibss.channel; | 347 | chan = wdev->wext.ibss.channel; |
348 | wdev_unlock(wdev); | ||
223 | 349 | ||
224 | if (chan) { | 350 | if (chan) { |
225 | freq->m = chan->center_freq; | 351 | freq->m = chan->center_freq; |
@@ -230,14 +356,13 @@ int cfg80211_ibss_wext_giwfreq(struct net_device *dev, | |||
230 | /* no channel if not joining */ | 356 | /* no channel if not joining */ |
231 | return -EINVAL; | 357 | return -EINVAL; |
232 | } | 358 | } |
233 | /* temporary symbol - mark GPL - in the future the handler won't be */ | ||
234 | EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwfreq); | ||
235 | 359 | ||
236 | int cfg80211_ibss_wext_siwessid(struct net_device *dev, | 360 | int cfg80211_ibss_wext_siwessid(struct net_device *dev, |
237 | struct iw_request_info *info, | 361 | struct iw_request_info *info, |
238 | struct iw_point *data, char *ssid) | 362 | struct iw_point *data, char *ssid) |
239 | { | 363 | { |
240 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 364 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
365 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
241 | size_t len = data->length; | 366 | size_t len = data->length; |
242 | int err; | 367 | int err; |
243 | 368 | ||
@@ -245,15 +370,17 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev, | |||
245 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) | 370 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) |
246 | return -EINVAL; | 371 | return -EINVAL; |
247 | 372 | ||
248 | if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss) | 373 | if (!rdev->ops->join_ibss) |
249 | return -EOPNOTSUPP; | 374 | return -EOPNOTSUPP; |
250 | 375 | ||
251 | if (wdev->ssid_len) { | 376 | wdev_lock(wdev); |
252 | err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy), | 377 | err = 0; |
253 | dev, true); | 378 | if (wdev->ssid_len) |
254 | if (err) | 379 | err = __cfg80211_leave_ibss(rdev, dev, true); |
255 | return err; | 380 | wdev_unlock(wdev); |
256 | } | 381 | |
382 | if (err) | ||
383 | return err; | ||
257 | 384 | ||
258 | /* iwconfig uses nul termination in SSID.. */ | 385 | /* iwconfig uses nul termination in SSID.. */ |
259 | if (len > 0 && ssid[len - 1] == '\0') | 386 | if (len > 0 && ssid[len - 1] == '\0') |
@@ -263,10 +390,14 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev, | |||
263 | memcpy(wdev->wext.ibss.ssid, ssid, len); | 390 | memcpy(wdev->wext.ibss.ssid, ssid, len); |
264 | wdev->wext.ibss.ssid_len = len; | 391 | wdev->wext.ibss.ssid_len = len; |
265 | 392 | ||
266 | return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev); | 393 | mutex_lock(&rdev->devlist_mtx); |
394 | wdev_lock(wdev); | ||
395 | err = cfg80211_ibss_wext_join(rdev, wdev); | ||
396 | wdev_unlock(wdev); | ||
397 | mutex_unlock(&rdev->devlist_mtx); | ||
398 | |||
399 | return err; | ||
267 | } | 400 | } |
268 | /* temporary symbol - mark GPL - in the future the handler won't be */ | ||
269 | EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwessid); | ||
270 | 401 | ||
271 | int cfg80211_ibss_wext_giwessid(struct net_device *dev, | 402 | int cfg80211_ibss_wext_giwessid(struct net_device *dev, |
272 | struct iw_request_info *info, | 403 | struct iw_request_info *info, |
@@ -280,6 +411,7 @@ int cfg80211_ibss_wext_giwessid(struct net_device *dev, | |||
280 | 411 | ||
281 | data->flags = 0; | 412 | data->flags = 0; |
282 | 413 | ||
414 | wdev_lock(wdev); | ||
283 | if (wdev->ssid_len) { | 415 | if (wdev->ssid_len) { |
284 | data->flags = 1; | 416 | data->flags = 1; |
285 | data->length = wdev->ssid_len; | 417 | data->length = wdev->ssid_len; |
@@ -289,17 +421,17 @@ int cfg80211_ibss_wext_giwessid(struct net_device *dev, | |||
289 | data->length = wdev->wext.ibss.ssid_len; | 421 | data->length = wdev->wext.ibss.ssid_len; |
290 | memcpy(ssid, wdev->wext.ibss.ssid, data->length); | 422 | memcpy(ssid, wdev->wext.ibss.ssid, data->length); |
291 | } | 423 | } |
424 | wdev_unlock(wdev); | ||
292 | 425 | ||
293 | return 0; | 426 | return 0; |
294 | } | 427 | } |
295 | /* temporary symbol - mark GPL - in the future the handler won't be */ | ||
296 | EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwessid); | ||
297 | 428 | ||
298 | int cfg80211_ibss_wext_siwap(struct net_device *dev, | 429 | int cfg80211_ibss_wext_siwap(struct net_device *dev, |
299 | struct iw_request_info *info, | 430 | struct iw_request_info *info, |
300 | struct sockaddr *ap_addr, char *extra) | 431 | struct sockaddr *ap_addr, char *extra) |
301 | { | 432 | { |
302 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 433 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
434 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
303 | u8 *bssid = ap_addr->sa_data; | 435 | u8 *bssid = ap_addr->sa_data; |
304 | int err; | 436 | int err; |
305 | 437 | ||
@@ -307,7 +439,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev, | |||
307 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) | 439 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC)) |
308 | return -EINVAL; | 440 | return -EINVAL; |
309 | 441 | ||
310 | if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss) | 442 | if (!rdev->ops->join_ibss) |
311 | return -EOPNOTSUPP; | 443 | return -EOPNOTSUPP; |
312 | 444 | ||
313 | if (ap_addr->sa_family != ARPHRD_ETHER) | 445 | if (ap_addr->sa_family != ARPHRD_ETHER) |
@@ -326,12 +458,14 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev, | |||
326 | compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0) | 458 | compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0) |
327 | return 0; | 459 | return 0; |
328 | 460 | ||
329 | if (wdev->ssid_len) { | 461 | wdev_lock(wdev); |
330 | err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy), | 462 | err = 0; |
331 | dev, true); | 463 | if (wdev->ssid_len) |
332 | if (err) | 464 | err = __cfg80211_leave_ibss(rdev, dev, true); |
333 | return err; | 465 | wdev_unlock(wdev); |
334 | } | 466 | |
467 | if (err) | ||
468 | return err; | ||
335 | 469 | ||
336 | if (bssid) { | 470 | if (bssid) { |
337 | memcpy(wdev->wext.bssid, bssid, ETH_ALEN); | 471 | memcpy(wdev->wext.bssid, bssid, ETH_ALEN); |
@@ -339,10 +473,14 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev, | |||
339 | } else | 473 | } else |
340 | wdev->wext.ibss.bssid = NULL; | 474 | wdev->wext.ibss.bssid = NULL; |
341 | 475 | ||
342 | return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev); | 476 | mutex_lock(&rdev->devlist_mtx); |
477 | wdev_lock(wdev); | ||
478 | err = cfg80211_ibss_wext_join(rdev, wdev); | ||
479 | wdev_unlock(wdev); | ||
480 | mutex_unlock(&rdev->devlist_mtx); | ||
481 | |||
482 | return err; | ||
343 | } | 483 | } |
344 | /* temporary symbol - mark GPL - in the future the handler won't be */ | ||
345 | EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwap); | ||
346 | 484 | ||
347 | int cfg80211_ibss_wext_giwap(struct net_device *dev, | 485 | int cfg80211_ibss_wext_giwap(struct net_device *dev, |
348 | struct iw_request_info *info, | 486 | struct iw_request_info *info, |
@@ -356,14 +494,16 @@ int cfg80211_ibss_wext_giwap(struct net_device *dev, | |||
356 | 494 | ||
357 | ap_addr->sa_family = ARPHRD_ETHER; | 495 | ap_addr->sa_family = ARPHRD_ETHER; |
358 | 496 | ||
359 | if (wdev->wext.ibss.bssid) { | 497 | wdev_lock(wdev); |
498 | if (wdev->current_bss) | ||
499 | memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); | ||
500 | else if (wdev->wext.ibss.bssid) | ||
360 | memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN); | 501 | memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN); |
361 | return 0; | 502 | else |
362 | } | 503 | memset(ap_addr->sa_data, 0, ETH_ALEN); |
504 | |||
505 | wdev_unlock(wdev); | ||
363 | 506 | ||
364 | memcpy(ap_addr->sa_data, wdev->bssid, ETH_ALEN); | ||
365 | return 0; | 507 | return 0; |
366 | } | 508 | } |
367 | /* temporary symbol - mark GPL - in the future the handler won't be */ | ||
368 | EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwap); | ||
369 | #endif | 509 | #endif |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 42184361a109..79d2eec54cec 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -8,75 +8,652 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/netdevice.h> | 9 | #include <linux/netdevice.h> |
10 | #include <linux/nl80211.h> | 10 | #include <linux/nl80211.h> |
11 | #include <linux/wireless.h> | ||
11 | #include <net/cfg80211.h> | 12 | #include <net/cfg80211.h> |
13 | #include <net/iw_handler.h> | ||
12 | #include "core.h" | 14 | #include "core.h" |
13 | #include "nl80211.h" | 15 | #include "nl80211.h" |
14 | 16 | ||
15 | void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) | 17 | void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) |
16 | { | 18 | { |
17 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 19 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
20 | struct wiphy *wiphy = wdev->wiphy; | ||
18 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 21 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
19 | nl80211_send_rx_auth(rdev, dev, buf, len); | 22 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; |
23 | u8 *bssid = mgmt->bssid; | ||
24 | int i; | ||
25 | u16 status = le16_to_cpu(mgmt->u.auth.status_code); | ||
26 | bool done = false; | ||
27 | |||
28 | wdev_lock(wdev); | ||
29 | |||
30 | for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
31 | if (wdev->authtry_bsses[i] && | ||
32 | memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid, | ||
33 | ETH_ALEN) == 0) { | ||
34 | if (status == WLAN_STATUS_SUCCESS) { | ||
35 | wdev->auth_bsses[i] = wdev->authtry_bsses[i]; | ||
36 | } else { | ||
37 | cfg80211_unhold_bss(wdev->authtry_bsses[i]); | ||
38 | cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); | ||
39 | } | ||
40 | wdev->authtry_bsses[i] = NULL; | ||
41 | done = true; | ||
42 | break; | ||
43 | } | ||
44 | } | ||
45 | |||
46 | WARN_ON(!done); | ||
47 | |||
48 | nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); | ||
49 | cfg80211_sme_rx_auth(dev, buf, len); | ||
50 | |||
51 | wdev_unlock(wdev); | ||
20 | } | 52 | } |
21 | EXPORT_SYMBOL(cfg80211_send_rx_auth); | 53 | EXPORT_SYMBOL(cfg80211_send_rx_auth); |
22 | 54 | ||
23 | void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) | 55 | void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) |
24 | { | 56 | { |
25 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 57 | u16 status_code; |
58 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
59 | struct wiphy *wiphy = wdev->wiphy; | ||
26 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 60 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
27 | nl80211_send_rx_assoc(rdev, dev, buf, len); | 61 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; |
62 | u8 *ie = mgmt->u.assoc_resp.variable; | ||
63 | int i, ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); | ||
64 | struct cfg80211_internal_bss *bss = NULL; | ||
65 | |||
66 | wdev_lock(wdev); | ||
67 | |||
68 | status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); | ||
69 | |||
70 | /* | ||
71 | * This is a bit of a hack, we don't notify userspace of | ||
72 | * a (re-)association reply if we tried to send a reassoc | ||
73 | * and got a reject -- we only try again with an assoc | ||
74 | * frame instead of reassoc. | ||
75 | */ | ||
76 | if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && | ||
77 | cfg80211_sme_failed_reassoc(wdev)) | ||
78 | goto out; | ||
79 | |||
80 | nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); | ||
81 | |||
82 | if (status_code == WLAN_STATUS_SUCCESS) { | ||
83 | for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
84 | if (!wdev->auth_bsses[i]) | ||
85 | continue; | ||
86 | if (memcmp(wdev->auth_bsses[i]->pub.bssid, mgmt->bssid, | ||
87 | ETH_ALEN) == 0) { | ||
88 | bss = wdev->auth_bsses[i]; | ||
89 | wdev->auth_bsses[i] = NULL; | ||
90 | /* additional reference to drop hold */ | ||
91 | cfg80211_ref_bss(bss); | ||
92 | break; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | WARN_ON(!bss); | ||
97 | } | ||
98 | |||
99 | if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) { | ||
100 | /* | ||
101 | * This is for the userspace SME, the CONNECTING | ||
102 | * state will be changed to CONNECTED by | ||
103 | * __cfg80211_connect_result() below. | ||
104 | */ | ||
105 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
106 | } | ||
107 | |||
108 | /* this consumes one bss reference (unless bss is NULL) */ | ||
109 | __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, | ||
110 | status_code, | ||
111 | status_code == WLAN_STATUS_SUCCESS, | ||
112 | bss ? &bss->pub : NULL); | ||
113 | /* drop hold now, and also reference acquired above */ | ||
114 | if (bss) { | ||
115 | cfg80211_unhold_bss(bss); | ||
116 | cfg80211_put_bss(&bss->pub); | ||
117 | } | ||
118 | |||
119 | out: | ||
120 | wdev_unlock(wdev); | ||
28 | } | 121 | } |
29 | EXPORT_SYMBOL(cfg80211_send_rx_assoc); | 122 | EXPORT_SYMBOL(cfg80211_send_rx_assoc); |
30 | 123 | ||
31 | void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len) | 124 | static void __cfg80211_send_deauth(struct net_device *dev, |
125 | const u8 *buf, size_t len) | ||
32 | { | 126 | { |
33 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 127 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
128 | struct wiphy *wiphy = wdev->wiphy; | ||
34 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 129 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
35 | nl80211_send_deauth(rdev, dev, buf, len); | 130 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; |
131 | const u8 *bssid = mgmt->bssid; | ||
132 | int i; | ||
133 | bool done = false; | ||
134 | |||
135 | ASSERT_WDEV_LOCK(wdev); | ||
136 | |||
137 | nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL); | ||
138 | |||
139 | if (wdev->current_bss && | ||
140 | memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { | ||
141 | done = true; | ||
142 | cfg80211_unhold_bss(wdev->current_bss); | ||
143 | cfg80211_put_bss(&wdev->current_bss->pub); | ||
144 | wdev->current_bss = NULL; | ||
145 | } else for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
146 | if (wdev->auth_bsses[i] && | ||
147 | memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { | ||
148 | cfg80211_unhold_bss(wdev->auth_bsses[i]); | ||
149 | cfg80211_put_bss(&wdev->auth_bsses[i]->pub); | ||
150 | wdev->auth_bsses[i] = NULL; | ||
151 | done = true; | ||
152 | break; | ||
153 | } | ||
154 | if (wdev->authtry_bsses[i] && | ||
155 | memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { | ||
156 | cfg80211_unhold_bss(wdev->authtry_bsses[i]); | ||
157 | cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); | ||
158 | wdev->authtry_bsses[i] = NULL; | ||
159 | done = true; | ||
160 | break; | ||
161 | } | ||
162 | } | ||
163 | |||
164 | WARN_ON(!done); | ||
165 | |||
166 | if (wdev->sme_state == CFG80211_SME_CONNECTED) { | ||
167 | u16 reason_code; | ||
168 | bool from_ap; | ||
169 | |||
170 | reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); | ||
171 | |||
172 | from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; | ||
173 | __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); | ||
174 | } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { | ||
175 | __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, | ||
176 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
177 | false, NULL); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | |||
182 | void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len, | ||
183 | void *cookie) | ||
184 | { | ||
185 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
186 | |||
187 | BUG_ON(cookie && wdev != cookie); | ||
188 | |||
189 | if (cookie) { | ||
190 | /* called within callback */ | ||
191 | __cfg80211_send_deauth(dev, buf, len); | ||
192 | } else { | ||
193 | wdev_lock(wdev); | ||
194 | __cfg80211_send_deauth(dev, buf, len); | ||
195 | wdev_unlock(wdev); | ||
196 | } | ||
36 | } | 197 | } |
37 | EXPORT_SYMBOL(cfg80211_send_deauth); | 198 | EXPORT_SYMBOL(cfg80211_send_deauth); |
38 | 199 | ||
39 | void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len) | 200 | static void __cfg80211_send_disassoc(struct net_device *dev, |
201 | const u8 *buf, size_t len) | ||
40 | { | 202 | { |
41 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 203 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
204 | struct wiphy *wiphy = wdev->wiphy; | ||
42 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 205 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
43 | nl80211_send_disassoc(rdev, dev, buf, len); | 206 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; |
207 | const u8 *bssid = mgmt->bssid; | ||
208 | int i; | ||
209 | u16 reason_code; | ||
210 | bool from_ap; | ||
211 | bool done = false; | ||
212 | |||
213 | ASSERT_WDEV_LOCK(wdev); | ||
214 | |||
215 | nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); | ||
216 | |||
217 | if (wdev->sme_state != CFG80211_SME_CONNECTED) | ||
218 | return; | ||
219 | |||
220 | if (wdev->current_bss && | ||
221 | memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { | ||
222 | for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
223 | if (wdev->authtry_bsses[i] || wdev->auth_bsses[i]) | ||
224 | continue; | ||
225 | wdev->auth_bsses[i] = wdev->current_bss; | ||
226 | wdev->current_bss = NULL; | ||
227 | done = true; | ||
228 | cfg80211_sme_disassoc(dev, i); | ||
229 | break; | ||
230 | } | ||
231 | WARN_ON(!done); | ||
232 | } else | ||
233 | WARN_ON(1); | ||
234 | |||
235 | |||
236 | reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); | ||
237 | |||
238 | from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; | ||
239 | __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); | ||
44 | } | 240 | } |
45 | EXPORT_SYMBOL(cfg80211_send_disassoc); | ||
46 | 241 | ||
47 | static void cfg80211_wext_disconnected(struct net_device *dev) | 242 | void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len, |
243 | void *cookie) | ||
48 | { | 244 | { |
49 | #ifdef CONFIG_WIRELESS_EXT | 245 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
50 | union iwreq_data wrqu; | 246 | |
51 | memset(&wrqu, 0, sizeof(wrqu)); | 247 | BUG_ON(cookie && wdev != cookie); |
52 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | 248 | |
53 | #endif | 249 | if (cookie) { |
250 | /* called within callback */ | ||
251 | __cfg80211_send_disassoc(dev, buf, len); | ||
252 | } else { | ||
253 | wdev_lock(wdev); | ||
254 | __cfg80211_send_disassoc(dev, buf, len); | ||
255 | wdev_unlock(wdev); | ||
256 | } | ||
54 | } | 257 | } |
258 | EXPORT_SYMBOL(cfg80211_send_disassoc); | ||
55 | 259 | ||
56 | void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) | 260 | void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) |
57 | { | 261 | { |
58 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 262 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
263 | struct wiphy *wiphy = wdev->wiphy; | ||
59 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 264 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
60 | nl80211_send_auth_timeout(rdev, dev, addr); | 265 | int i; |
61 | cfg80211_wext_disconnected(dev); | 266 | bool done = false; |
267 | |||
268 | wdev_lock(wdev); | ||
269 | |||
270 | nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); | ||
271 | if (wdev->sme_state == CFG80211_SME_CONNECTING) | ||
272 | __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, | ||
273 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
274 | false, NULL); | ||
275 | |||
276 | for (i = 0; addr && i < MAX_AUTH_BSSES; i++) { | ||
277 | if (wdev->authtry_bsses[i] && | ||
278 | memcmp(wdev->authtry_bsses[i]->pub.bssid, | ||
279 | addr, ETH_ALEN) == 0) { | ||
280 | cfg80211_unhold_bss(wdev->authtry_bsses[i]); | ||
281 | cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); | ||
282 | wdev->authtry_bsses[i] = NULL; | ||
283 | done = true; | ||
284 | break; | ||
285 | } | ||
286 | } | ||
287 | |||
288 | WARN_ON(!done); | ||
289 | |||
290 | wdev_unlock(wdev); | ||
62 | } | 291 | } |
63 | EXPORT_SYMBOL(cfg80211_send_auth_timeout); | 292 | EXPORT_SYMBOL(cfg80211_send_auth_timeout); |
64 | 293 | ||
65 | void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) | 294 | void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) |
66 | { | 295 | { |
67 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 296 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
297 | struct wiphy *wiphy = wdev->wiphy; | ||
68 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 298 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
69 | nl80211_send_assoc_timeout(rdev, dev, addr); | 299 | int i; |
70 | cfg80211_wext_disconnected(dev); | 300 | bool done = false; |
301 | |||
302 | wdev_lock(wdev); | ||
303 | |||
304 | nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); | ||
305 | if (wdev->sme_state == CFG80211_SME_CONNECTING) | ||
306 | __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, | ||
307 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
308 | false, NULL); | ||
309 | |||
310 | for (i = 0; addr && i < MAX_AUTH_BSSES; i++) { | ||
311 | if (wdev->auth_bsses[i] && | ||
312 | memcmp(wdev->auth_bsses[i]->pub.bssid, | ||
313 | addr, ETH_ALEN) == 0) { | ||
314 | cfg80211_unhold_bss(wdev->auth_bsses[i]); | ||
315 | cfg80211_put_bss(&wdev->auth_bsses[i]->pub); | ||
316 | wdev->auth_bsses[i] = NULL; | ||
317 | done = true; | ||
318 | break; | ||
319 | } | ||
320 | } | ||
321 | |||
322 | WARN_ON(!done); | ||
323 | |||
324 | wdev_unlock(wdev); | ||
71 | } | 325 | } |
72 | EXPORT_SYMBOL(cfg80211_send_assoc_timeout); | 326 | EXPORT_SYMBOL(cfg80211_send_assoc_timeout); |
73 | 327 | ||
74 | void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, | 328 | void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, |
75 | enum nl80211_key_type key_type, int key_id, | 329 | enum nl80211_key_type key_type, int key_id, |
76 | const u8 *tsc) | 330 | const u8 *tsc, gfp_t gfp) |
77 | { | 331 | { |
78 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 332 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; |
79 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 333 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
80 | nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc); | 334 | #ifdef CONFIG_WIRELESS_EXT |
335 | union iwreq_data wrqu; | ||
336 | char *buf = kmalloc(128, gfp); | ||
337 | |||
338 | if (buf) { | ||
339 | sprintf(buf, "MLME-MICHAELMICFAILURE.indication(" | ||
340 | "keyid=%d %scast addr=%pM)", key_id, | ||
341 | key_type == NL80211_KEYTYPE_GROUP ? "broad" : "uni", | ||
342 | addr); | ||
343 | memset(&wrqu, 0, sizeof(wrqu)); | ||
344 | wrqu.data.length = strlen(buf); | ||
345 | wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); | ||
346 | kfree(buf); | ||
347 | } | ||
348 | #endif | ||
349 | |||
350 | nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp); | ||
81 | } | 351 | } |
82 | EXPORT_SYMBOL(cfg80211_michael_mic_failure); | 352 | EXPORT_SYMBOL(cfg80211_michael_mic_failure); |
353 | |||
354 | /* some MLME handling for userspace SME */ | ||
355 | int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | ||
356 | struct net_device *dev, | ||
357 | struct ieee80211_channel *chan, | ||
358 | enum nl80211_auth_type auth_type, | ||
359 | const u8 *bssid, | ||
360 | const u8 *ssid, int ssid_len, | ||
361 | const u8 *ie, int ie_len, | ||
362 | const u8 *key, int key_len, int key_idx) | ||
363 | { | ||
364 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
365 | struct cfg80211_auth_request req; | ||
366 | struct cfg80211_internal_bss *bss; | ||
367 | int i, err, slot = -1, nfree = 0; | ||
368 | |||
369 | ASSERT_WDEV_LOCK(wdev); | ||
370 | |||
371 | if (auth_type == NL80211_AUTHTYPE_SHARED_KEY) | ||
372 | if (!key || !key_len || key_idx < 0 || key_idx > 4) | ||
373 | return -EINVAL; | ||
374 | |||
375 | if (wdev->current_bss && | ||
376 | memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0) | ||
377 | return -EALREADY; | ||
378 | |||
379 | for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
380 | if (wdev->authtry_bsses[i] && | ||
381 | memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid, | ||
382 | ETH_ALEN) == 0) | ||
383 | return -EALREADY; | ||
384 | if (wdev->auth_bsses[i] && | ||
385 | memcmp(bssid, wdev->auth_bsses[i]->pub.bssid, | ||
386 | ETH_ALEN) == 0) | ||
387 | return -EALREADY; | ||
388 | } | ||
389 | |||
390 | memset(&req, 0, sizeof(req)); | ||
391 | |||
392 | req.ie = ie; | ||
393 | req.ie_len = ie_len; | ||
394 | req.auth_type = auth_type; | ||
395 | req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, | ||
396 | WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); | ||
397 | req.key = key; | ||
398 | req.key_len = key_len; | ||
399 | req.key_idx = key_idx; | ||
400 | if (!req.bss) | ||
401 | return -ENOENT; | ||
402 | |||
403 | bss = bss_from_pub(req.bss); | ||
404 | |||
405 | for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
406 | if (!wdev->auth_bsses[i] && !wdev->authtry_bsses[i]) { | ||
407 | slot = i; | ||
408 | nfree++; | ||
409 | } | ||
410 | } | ||
411 | |||
412 | /* we need one free slot for disassoc and one for this auth */ | ||
413 | if (nfree < 2) { | ||
414 | err = -ENOSPC; | ||
415 | goto out; | ||
416 | } | ||
417 | |||
418 | wdev->authtry_bsses[slot] = bss; | ||
419 | cfg80211_hold_bss(bss); | ||
420 | |||
421 | err = rdev->ops->auth(&rdev->wiphy, dev, &req); | ||
422 | if (err) { | ||
423 | wdev->authtry_bsses[slot] = NULL; | ||
424 | cfg80211_unhold_bss(bss); | ||
425 | } | ||
426 | |||
427 | out: | ||
428 | if (err) | ||
429 | cfg80211_put_bss(req.bss); | ||
430 | return err; | ||
431 | } | ||
432 | |||
433 | int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | ||
434 | struct net_device *dev, struct ieee80211_channel *chan, | ||
435 | enum nl80211_auth_type auth_type, const u8 *bssid, | ||
436 | const u8 *ssid, int ssid_len, | ||
437 | const u8 *ie, int ie_len, | ||
438 | const u8 *key, int key_len, int key_idx) | ||
439 | { | ||
440 | int err; | ||
441 | |||
442 | wdev_lock(dev->ieee80211_ptr); | ||
443 | err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, | ||
444 | ssid, ssid_len, ie, ie_len, | ||
445 | key, key_len, key_idx); | ||
446 | wdev_unlock(dev->ieee80211_ptr); | ||
447 | |||
448 | return err; | ||
449 | } | ||
450 | |||
451 | int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | ||
452 | struct net_device *dev, | ||
453 | struct ieee80211_channel *chan, | ||
454 | const u8 *bssid, const u8 *prev_bssid, | ||
455 | const u8 *ssid, int ssid_len, | ||
456 | const u8 *ie, int ie_len, bool use_mfp, | ||
457 | struct cfg80211_crypto_settings *crypt) | ||
458 | { | ||
459 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
460 | struct cfg80211_assoc_request req; | ||
461 | struct cfg80211_internal_bss *bss; | ||
462 | int i, err, slot = -1; | ||
463 | |||
464 | ASSERT_WDEV_LOCK(wdev); | ||
465 | |||
466 | memset(&req, 0, sizeof(req)); | ||
467 | |||
468 | if (wdev->current_bss) | ||
469 | return -EALREADY; | ||
470 | |||
471 | req.ie = ie; | ||
472 | req.ie_len = ie_len; | ||
473 | memcpy(&req.crypto, crypt, sizeof(req.crypto)); | ||
474 | req.use_mfp = use_mfp; | ||
475 | req.prev_bssid = prev_bssid; | ||
476 | req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, | ||
477 | WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); | ||
478 | if (!req.bss) | ||
479 | return -ENOENT; | ||
480 | |||
481 | bss = bss_from_pub(req.bss); | ||
482 | |||
483 | for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
484 | if (bss == wdev->auth_bsses[i]) { | ||
485 | slot = i; | ||
486 | break; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | if (slot < 0) { | ||
491 | err = -ENOTCONN; | ||
492 | goto out; | ||
493 | } | ||
494 | |||
495 | err = rdev->ops->assoc(&rdev->wiphy, dev, &req); | ||
496 | out: | ||
497 | /* still a reference in wdev->auth_bsses[slot] */ | ||
498 | cfg80211_put_bss(req.bss); | ||
499 | return err; | ||
500 | } | ||
501 | |||
502 | int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | ||
503 | struct net_device *dev, | ||
504 | struct ieee80211_channel *chan, | ||
505 | const u8 *bssid, const u8 *prev_bssid, | ||
506 | const u8 *ssid, int ssid_len, | ||
507 | const u8 *ie, int ie_len, bool use_mfp, | ||
508 | struct cfg80211_crypto_settings *crypt) | ||
509 | { | ||
510 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
511 | int err; | ||
512 | |||
513 | wdev_lock(wdev); | ||
514 | err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, | ||
515 | ssid, ssid_len, ie, ie_len, use_mfp, crypt); | ||
516 | wdev_unlock(wdev); | ||
517 | |||
518 | return err; | ||
519 | } | ||
520 | |||
521 | int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | ||
522 | struct net_device *dev, const u8 *bssid, | ||
523 | const u8 *ie, int ie_len, u16 reason) | ||
524 | { | ||
525 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
526 | struct cfg80211_deauth_request req; | ||
527 | int i; | ||
528 | |||
529 | ASSERT_WDEV_LOCK(wdev); | ||
530 | |||
531 | memset(&req, 0, sizeof(req)); | ||
532 | req.reason_code = reason; | ||
533 | req.ie = ie; | ||
534 | req.ie_len = ie_len; | ||
535 | if (wdev->current_bss && | ||
536 | memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { | ||
537 | req.bss = &wdev->current_bss->pub; | ||
538 | } else for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
539 | if (wdev->auth_bsses[i] && | ||
540 | memcmp(bssid, wdev->auth_bsses[i]->pub.bssid, ETH_ALEN) == 0) { | ||
541 | req.bss = &wdev->auth_bsses[i]->pub; | ||
542 | break; | ||
543 | } | ||
544 | if (wdev->authtry_bsses[i] && | ||
545 | memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid, ETH_ALEN) == 0) { | ||
546 | req.bss = &wdev->authtry_bsses[i]->pub; | ||
547 | break; | ||
548 | } | ||
549 | } | ||
550 | |||
551 | if (!req.bss) | ||
552 | return -ENOTCONN; | ||
553 | |||
554 | return rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); | ||
555 | } | ||
556 | |||
557 | int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | ||
558 | struct net_device *dev, const u8 *bssid, | ||
559 | const u8 *ie, int ie_len, u16 reason) | ||
560 | { | ||
561 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
562 | int err; | ||
563 | |||
564 | wdev_lock(wdev); | ||
565 | err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason); | ||
566 | wdev_unlock(wdev); | ||
567 | |||
568 | return err; | ||
569 | } | ||
570 | |||
571 | static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | ||
572 | struct net_device *dev, const u8 *bssid, | ||
573 | const u8 *ie, int ie_len, u16 reason) | ||
574 | { | ||
575 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
576 | struct cfg80211_disassoc_request req; | ||
577 | |||
578 | ASSERT_WDEV_LOCK(wdev); | ||
579 | |||
580 | if (wdev->sme_state != CFG80211_SME_CONNECTED) | ||
581 | return -ENOTCONN; | ||
582 | |||
583 | if (WARN_ON(!wdev->current_bss)) | ||
584 | return -ENOTCONN; | ||
585 | |||
586 | memset(&req, 0, sizeof(req)); | ||
587 | req.reason_code = reason; | ||
588 | req.ie = ie; | ||
589 | req.ie_len = ie_len; | ||
590 | if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) | ||
591 | req.bss = &wdev->current_bss->pub; | ||
592 | else | ||
593 | return -ENOTCONN; | ||
594 | |||
595 | return rdev->ops->disassoc(&rdev->wiphy, dev, &req, wdev); | ||
596 | } | ||
597 | |||
598 | int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | ||
599 | struct net_device *dev, const u8 *bssid, | ||
600 | const u8 *ie, int ie_len, u16 reason) | ||
601 | { | ||
602 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
603 | int err; | ||
604 | |||
605 | wdev_lock(wdev); | ||
606 | err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason); | ||
607 | wdev_unlock(wdev); | ||
608 | |||
609 | return err; | ||
610 | } | ||
611 | |||
612 | void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, | ||
613 | struct net_device *dev) | ||
614 | { | ||
615 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
616 | struct cfg80211_deauth_request req; | ||
617 | int i; | ||
618 | |||
619 | ASSERT_WDEV_LOCK(wdev); | ||
620 | |||
621 | if (!rdev->ops->deauth) | ||
622 | return; | ||
623 | |||
624 | memset(&req, 0, sizeof(req)); | ||
625 | req.reason_code = WLAN_REASON_DEAUTH_LEAVING; | ||
626 | req.ie = NULL; | ||
627 | req.ie_len = 0; | ||
628 | |||
629 | if (wdev->current_bss) { | ||
630 | req.bss = &wdev->current_bss->pub; | ||
631 | rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); | ||
632 | if (wdev->current_bss) { | ||
633 | cfg80211_unhold_bss(wdev->current_bss); | ||
634 | cfg80211_put_bss(&wdev->current_bss->pub); | ||
635 | wdev->current_bss = NULL; | ||
636 | } | ||
637 | } | ||
638 | |||
639 | for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
640 | if (wdev->auth_bsses[i]) { | ||
641 | req.bss = &wdev->auth_bsses[i]->pub; | ||
642 | rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); | ||
643 | if (wdev->auth_bsses[i]) { | ||
644 | cfg80211_unhold_bss(wdev->auth_bsses[i]); | ||
645 | cfg80211_put_bss(&wdev->auth_bsses[i]->pub); | ||
646 | wdev->auth_bsses[i] = NULL; | ||
647 | } | ||
648 | } | ||
649 | if (wdev->authtry_bsses[i]) { | ||
650 | req.bss = &wdev->authtry_bsses[i]->pub; | ||
651 | rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev); | ||
652 | if (wdev->authtry_bsses[i]) { | ||
653 | cfg80211_unhold_bss(wdev->authtry_bsses[i]); | ||
654 | cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); | ||
655 | wdev->authtry_bsses[i] = NULL; | ||
656 | } | ||
657 | } | ||
658 | } | ||
659 | } | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 634496b3ed77..eddab097435c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -14,8 +14,10 @@ | |||
14 | #include <linux/rtnetlink.h> | 14 | #include <linux/rtnetlink.h> |
15 | #include <linux/netlink.h> | 15 | #include <linux/netlink.h> |
16 | #include <linux/etherdevice.h> | 16 | #include <linux/etherdevice.h> |
17 | #include <net/net_namespace.h> | ||
17 | #include <net/genetlink.h> | 18 | #include <net/genetlink.h> |
18 | #include <net/cfg80211.h> | 19 | #include <net/cfg80211.h> |
20 | #include <net/sock.h> | ||
19 | #include "core.h" | 21 | #include "core.h" |
20 | #include "nl80211.h" | 22 | #include "nl80211.h" |
21 | #include "reg.h" | 23 | #include "reg.h" |
@@ -27,27 +29,29 @@ static struct genl_family nl80211_fam = { | |||
27 | .hdrsize = 0, /* no private header */ | 29 | .hdrsize = 0, /* no private header */ |
28 | .version = 1, /* no particular meaning now */ | 30 | .version = 1, /* no particular meaning now */ |
29 | .maxattr = NL80211_ATTR_MAX, | 31 | .maxattr = NL80211_ATTR_MAX, |
32 | .netnsok = true, | ||
30 | }; | 33 | }; |
31 | 34 | ||
32 | /* internal helper: get drv and dev */ | 35 | /* internal helper: get rdev and dev */ |
33 | static int get_drv_dev_by_info_ifindex(struct nlattr **attrs, | 36 | static int get_rdev_dev_by_info_ifindex(struct genl_info *info, |
34 | struct cfg80211_registered_device **drv, | 37 | struct cfg80211_registered_device **rdev, |
35 | struct net_device **dev) | 38 | struct net_device **dev) |
36 | { | 39 | { |
40 | struct nlattr **attrs = info->attrs; | ||
37 | int ifindex; | 41 | int ifindex; |
38 | 42 | ||
39 | if (!attrs[NL80211_ATTR_IFINDEX]) | 43 | if (!attrs[NL80211_ATTR_IFINDEX]) |
40 | return -EINVAL; | 44 | return -EINVAL; |
41 | 45 | ||
42 | ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); | 46 | ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); |
43 | *dev = dev_get_by_index(&init_net, ifindex); | 47 | *dev = dev_get_by_index(genl_info_net(info), ifindex); |
44 | if (!*dev) | 48 | if (!*dev) |
45 | return -ENODEV; | 49 | return -ENODEV; |
46 | 50 | ||
47 | *drv = cfg80211_get_dev_from_ifindex(ifindex); | 51 | *rdev = cfg80211_get_dev_from_ifindex(genl_info_net(info), ifindex); |
48 | if (IS_ERR(*drv)) { | 52 | if (IS_ERR(*rdev)) { |
49 | dev_put(*dev); | 53 | dev_put(*dev); |
50 | return PTR_ERR(*drv); | 54 | return PTR_ERR(*rdev); |
51 | } | 55 | } |
52 | 56 | ||
53 | return 0; | 57 | return 0; |
@@ -71,7 +75,9 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { | |||
71 | [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, | 75 | [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, |
72 | 76 | ||
73 | [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN }, | 77 | [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN }, |
78 | [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN }, | ||
74 | 79 | ||
80 | [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, | ||
75 | [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, | 81 | [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, |
76 | .len = WLAN_MAX_KEY_LEN }, | 82 | .len = WLAN_MAX_KEY_LEN }, |
77 | [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, | 83 | [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, |
@@ -128,6 +134,21 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { | |||
128 | .len = sizeof(struct nl80211_sta_flag_update), | 134 | .len = sizeof(struct nl80211_sta_flag_update), |
129 | }, | 135 | }, |
130 | [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG }, | 136 | [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG }, |
137 | [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, | ||
138 | [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, | ||
139 | [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, | ||
140 | [NL80211_ATTR_PID] = { .type = NLA_U32 }, | ||
141 | }; | ||
142 | |||
143 | /* policy for the attributes */ | ||
144 | static struct nla_policy | ||
145 | nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = { | ||
146 | [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, | ||
147 | [NL80211_KEY_IDX] = { .type = NLA_U8 }, | ||
148 | [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, | ||
149 | [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, | ||
150 | [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG }, | ||
151 | [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, | ||
131 | }; | 152 | }; |
132 | 153 | ||
133 | /* IE validation */ | 154 | /* IE validation */ |
@@ -194,6 +215,177 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, | |||
194 | 215 | ||
195 | /* netlink command implementations */ | 216 | /* netlink command implementations */ |
196 | 217 | ||
218 | struct key_parse { | ||
219 | struct key_params p; | ||
220 | int idx; | ||
221 | bool def, defmgmt; | ||
222 | }; | ||
223 | |||
224 | static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k) | ||
225 | { | ||
226 | struct nlattr *tb[NL80211_KEY_MAX + 1]; | ||
227 | int err = nla_parse_nested(tb, NL80211_KEY_MAX, key, | ||
228 | nl80211_key_policy); | ||
229 | if (err) | ||
230 | return err; | ||
231 | |||
232 | k->def = !!tb[NL80211_KEY_DEFAULT]; | ||
233 | k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; | ||
234 | |||
235 | if (tb[NL80211_KEY_IDX]) | ||
236 | k->idx = nla_get_u8(tb[NL80211_KEY_IDX]); | ||
237 | |||
238 | if (tb[NL80211_KEY_DATA]) { | ||
239 | k->p.key = nla_data(tb[NL80211_KEY_DATA]); | ||
240 | k->p.key_len = nla_len(tb[NL80211_KEY_DATA]); | ||
241 | } | ||
242 | |||
243 | if (tb[NL80211_KEY_SEQ]) { | ||
244 | k->p.seq = nla_data(tb[NL80211_KEY_SEQ]); | ||
245 | k->p.seq_len = nla_len(tb[NL80211_KEY_SEQ]); | ||
246 | } | ||
247 | |||
248 | if (tb[NL80211_KEY_CIPHER]) | ||
249 | k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]); | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k) | ||
255 | { | ||
256 | if (info->attrs[NL80211_ATTR_KEY_DATA]) { | ||
257 | k->p.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]); | ||
258 | k->p.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]); | ||
259 | } | ||
260 | |||
261 | if (info->attrs[NL80211_ATTR_KEY_SEQ]) { | ||
262 | k->p.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]); | ||
263 | k->p.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]); | ||
264 | } | ||
265 | |||
266 | if (info->attrs[NL80211_ATTR_KEY_IDX]) | ||
267 | k->idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); | ||
268 | |||
269 | if (info->attrs[NL80211_ATTR_KEY_CIPHER]) | ||
270 | k->p.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]); | ||
271 | |||
272 | k->def = !!info->attrs[NL80211_ATTR_KEY_DEFAULT]; | ||
273 | k->defmgmt = !!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]; | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) | ||
279 | { | ||
280 | int err; | ||
281 | |||
282 | memset(k, 0, sizeof(*k)); | ||
283 | k->idx = -1; | ||
284 | |||
285 | if (info->attrs[NL80211_ATTR_KEY]) | ||
286 | err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k); | ||
287 | else | ||
288 | err = nl80211_parse_key_old(info, k); | ||
289 | |||
290 | if (err) | ||
291 | return err; | ||
292 | |||
293 | if (k->def && k->defmgmt) | ||
294 | return -EINVAL; | ||
295 | |||
296 | if (k->idx != -1) { | ||
297 | if (k->defmgmt) { | ||
298 | if (k->idx < 4 || k->idx > 5) | ||
299 | return -EINVAL; | ||
300 | } else if (k->def) { | ||
301 | if (k->idx < 0 || k->idx > 3) | ||
302 | return -EINVAL; | ||
303 | } else { | ||
304 | if (k->idx < 0 || k->idx > 5) | ||
305 | return -EINVAL; | ||
306 | } | ||
307 | } | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static struct cfg80211_cached_keys * | ||
313 | nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, | ||
314 | struct nlattr *keys) | ||
315 | { | ||
316 | struct key_parse parse; | ||
317 | struct nlattr *key; | ||
318 | struct cfg80211_cached_keys *result; | ||
319 | int rem, err, def = 0; | ||
320 | |||
321 | result = kzalloc(sizeof(*result), GFP_KERNEL); | ||
322 | if (!result) | ||
323 | return ERR_PTR(-ENOMEM); | ||
324 | |||
325 | result->def = -1; | ||
326 | result->defmgmt = -1; | ||
327 | |||
328 | nla_for_each_nested(key, keys, rem) { | ||
329 | memset(&parse, 0, sizeof(parse)); | ||
330 | parse.idx = -1; | ||
331 | |||
332 | err = nl80211_parse_key_new(key, &parse); | ||
333 | if (err) | ||
334 | goto error; | ||
335 | err = -EINVAL; | ||
336 | if (!parse.p.key) | ||
337 | goto error; | ||
338 | if (parse.idx < 0 || parse.idx > 4) | ||
339 | goto error; | ||
340 | if (parse.def) { | ||
341 | if (def) | ||
342 | goto error; | ||
343 | def = 1; | ||
344 | result->def = parse.idx; | ||
345 | } else if (parse.defmgmt) | ||
346 | goto error; | ||
347 | err = cfg80211_validate_key_settings(rdev, &parse.p, | ||
348 | parse.idx, NULL); | ||
349 | if (err) | ||
350 | goto error; | ||
351 | result->params[parse.idx].cipher = parse.p.cipher; | ||
352 | result->params[parse.idx].key_len = parse.p.key_len; | ||
353 | result->params[parse.idx].key = result->data[parse.idx]; | ||
354 | memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len); | ||
355 | } | ||
356 | |||
357 | return result; | ||
358 | error: | ||
359 | kfree(result); | ||
360 | return ERR_PTR(err); | ||
361 | } | ||
362 | |||
363 | static int nl80211_key_allowed(struct wireless_dev *wdev) | ||
364 | { | ||
365 | ASSERT_WDEV_LOCK(wdev); | ||
366 | |||
367 | if (!netif_running(wdev->netdev)) | ||
368 | return -ENETDOWN; | ||
369 | |||
370 | switch (wdev->iftype) { | ||
371 | case NL80211_IFTYPE_AP: | ||
372 | case NL80211_IFTYPE_AP_VLAN: | ||
373 | break; | ||
374 | case NL80211_IFTYPE_ADHOC: | ||
375 | if (!wdev->current_bss) | ||
376 | return -ENOLINK; | ||
377 | break; | ||
378 | case NL80211_IFTYPE_STATION: | ||
379 | if (wdev->sme_state != CFG80211_SME_CONNECTED) | ||
380 | return -ENOLINK; | ||
381 | break; | ||
382 | default: | ||
383 | return -EINVAL; | ||
384 | } | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
197 | static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, | 389 | static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, |
198 | struct cfg80211_registered_device *dev) | 390 | struct cfg80211_registered_device *dev) |
199 | { | 391 | { |
@@ -216,6 +408,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
216 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); | 408 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); |
217 | NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); | 409 | NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); |
218 | 410 | ||
411 | NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, | ||
412 | cfg80211_rdev_list_generation); | ||
413 | |||
219 | NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, | 414 | NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, |
220 | dev->wiphy.retry_short); | 415 | dev->wiphy.retry_short); |
221 | NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, | 416 | NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, |
@@ -345,8 +540,23 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
345 | CMD(deauth, DEAUTHENTICATE); | 540 | CMD(deauth, DEAUTHENTICATE); |
346 | CMD(disassoc, DISASSOCIATE); | 541 | CMD(disassoc, DISASSOCIATE); |
347 | CMD(join_ibss, JOIN_IBSS); | 542 | CMD(join_ibss, JOIN_IBSS); |
543 | if (dev->wiphy.netnsok) { | ||
544 | i++; | ||
545 | NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); | ||
546 | } | ||
348 | 547 | ||
349 | #undef CMD | 548 | #undef CMD |
549 | |||
550 | if (dev->ops->connect || dev->ops->auth) { | ||
551 | i++; | ||
552 | NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT); | ||
553 | } | ||
554 | |||
555 | if (dev->ops->disconnect || dev->ops->deauth) { | ||
556 | i++; | ||
557 | NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT); | ||
558 | } | ||
559 | |||
350 | nla_nest_end(msg, nl_cmds); | 560 | nla_nest_end(msg, nl_cmds); |
351 | 561 | ||
352 | return genlmsg_end(msg, hdr); | 562 | return genlmsg_end(msg, hdr); |
@@ -363,7 +573,9 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) | |||
363 | struct cfg80211_registered_device *dev; | 573 | struct cfg80211_registered_device *dev; |
364 | 574 | ||
365 | mutex_lock(&cfg80211_mutex); | 575 | mutex_lock(&cfg80211_mutex); |
366 | list_for_each_entry(dev, &cfg80211_drv_list, list) { | 576 | list_for_each_entry(dev, &cfg80211_rdev_list, list) { |
577 | if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) | ||
578 | continue; | ||
367 | if (++idx <= start) | 579 | if (++idx <= start) |
368 | continue; | 580 | continue; |
369 | if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid, | 581 | if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid, |
@@ -396,14 +608,14 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
396 | if (nl80211_send_wiphy(msg, info->snd_pid, info->snd_seq, 0, dev) < 0) | 608 | if (nl80211_send_wiphy(msg, info->snd_pid, info->snd_seq, 0, dev) < 0) |
397 | goto out_free; | 609 | goto out_free; |
398 | 610 | ||
399 | cfg80211_put_dev(dev); | 611 | cfg80211_unlock_rdev(dev); |
400 | 612 | ||
401 | return genlmsg_unicast(msg, info->snd_pid); | 613 | return genlmsg_reply(msg, info); |
402 | 614 | ||
403 | out_free: | 615 | out_free: |
404 | nlmsg_free(msg); | 616 | nlmsg_free(msg); |
405 | out_err: | 617 | out_err: |
406 | cfg80211_put_dev(dev); | 618 | cfg80211_unlock_rdev(dev); |
407 | return -ENOBUFS; | 619 | return -ENOBUFS; |
408 | } | 620 | } |
409 | 621 | ||
@@ -445,7 +657,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
445 | 657 | ||
446 | mutex_lock(&cfg80211_mutex); | 658 | mutex_lock(&cfg80211_mutex); |
447 | 659 | ||
448 | rdev = __cfg80211_drv_from_info(info); | 660 | rdev = __cfg80211_rdev_from_info(info); |
449 | if (IS_ERR(rdev)) { | 661 | if (IS_ERR(rdev)) { |
450 | mutex_unlock(&cfg80211_mutex); | 662 | mutex_unlock(&cfg80211_mutex); |
451 | result = PTR_ERR(rdev); | 663 | result = PTR_ERR(rdev); |
@@ -492,15 +704,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
492 | 704 | ||
493 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { | 705 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { |
494 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | 706 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; |
495 | struct ieee80211_channel *chan; | ||
496 | struct ieee80211_sta_ht_cap *ht_cap; | ||
497 | u32 freq; | 707 | u32 freq; |
498 | 708 | ||
499 | if (!rdev->ops->set_channel) { | ||
500 | result = -EOPNOTSUPP; | ||
501 | goto bad_res; | ||
502 | } | ||
503 | |||
504 | result = -EINVAL; | 709 | result = -EINVAL; |
505 | 710 | ||
506 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { | 711 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { |
@@ -514,38 +719,10 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
514 | } | 719 | } |
515 | 720 | ||
516 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | 721 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); |
517 | chan = ieee80211_get_channel(&rdev->wiphy, freq); | ||
518 | |||
519 | /* Primary channel not allowed */ | ||
520 | if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) | ||
521 | goto bad_res; | ||
522 | |||
523 | if (channel_type == NL80211_CHAN_HT40MINUS && | ||
524 | (chan->flags & IEEE80211_CHAN_NO_HT40MINUS)) | ||
525 | goto bad_res; | ||
526 | else if (channel_type == NL80211_CHAN_HT40PLUS && | ||
527 | (chan->flags & IEEE80211_CHAN_NO_HT40PLUS)) | ||
528 | goto bad_res; | ||
529 | |||
530 | /* | ||
531 | * At this point we know if that if HT40 was requested | ||
532 | * we are allowed to use it and the extension channel | ||
533 | * exists. | ||
534 | */ | ||
535 | |||
536 | ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap; | ||
537 | 722 | ||
538 | /* no HT capabilities or intolerant */ | 723 | mutex_lock(&rdev->devlist_mtx); |
539 | if (channel_type != NL80211_CHAN_NO_HT) { | 724 | result = rdev_set_freq(rdev, NULL, freq, channel_type); |
540 | if (!ht_cap->ht_supported) | 725 | mutex_unlock(&rdev->devlist_mtx); |
541 | goto bad_res; | ||
542 | if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || | ||
543 | (ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)) | ||
544 | goto bad_res; | ||
545 | } | ||
546 | |||
547 | result = rdev->ops->set_channel(&rdev->wiphy, chan, | ||
548 | channel_type); | ||
549 | if (result) | 726 | if (result) |
550 | goto bad_res; | 727 | goto bad_res; |
551 | } | 728 | } |
@@ -651,6 +828,11 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
651 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); | 828 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); |
652 | NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); | 829 | NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); |
653 | NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); | 830 | NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); |
831 | |||
832 | NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, | ||
833 | rdev->devlist_generation ^ | ||
834 | (cfg80211_rdev_list_generation << 2)); | ||
835 | |||
654 | return genlmsg_end(msg, hdr); | 836 | return genlmsg_end(msg, hdr); |
655 | 837 | ||
656 | nla_put_failure: | 838 | nla_put_failure: |
@@ -664,32 +846,34 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * | |||
664 | int if_idx = 0; | 846 | int if_idx = 0; |
665 | int wp_start = cb->args[0]; | 847 | int wp_start = cb->args[0]; |
666 | int if_start = cb->args[1]; | 848 | int if_start = cb->args[1]; |
667 | struct cfg80211_registered_device *dev; | 849 | struct cfg80211_registered_device *rdev; |
668 | struct wireless_dev *wdev; | 850 | struct wireless_dev *wdev; |
669 | 851 | ||
670 | mutex_lock(&cfg80211_mutex); | 852 | mutex_lock(&cfg80211_mutex); |
671 | list_for_each_entry(dev, &cfg80211_drv_list, list) { | 853 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) { |
854 | if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) | ||
855 | continue; | ||
672 | if (wp_idx < wp_start) { | 856 | if (wp_idx < wp_start) { |
673 | wp_idx++; | 857 | wp_idx++; |
674 | continue; | 858 | continue; |
675 | } | 859 | } |
676 | if_idx = 0; | 860 | if_idx = 0; |
677 | 861 | ||
678 | mutex_lock(&dev->devlist_mtx); | 862 | mutex_lock(&rdev->devlist_mtx); |
679 | list_for_each_entry(wdev, &dev->netdev_list, list) { | 863 | list_for_each_entry(wdev, &rdev->netdev_list, list) { |
680 | if (if_idx < if_start) { | 864 | if (if_idx < if_start) { |
681 | if_idx++; | 865 | if_idx++; |
682 | continue; | 866 | continue; |
683 | } | 867 | } |
684 | if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, | 868 | if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, |
685 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 869 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
686 | dev, wdev->netdev) < 0) { | 870 | rdev, wdev->netdev) < 0) { |
687 | mutex_unlock(&dev->devlist_mtx); | 871 | mutex_unlock(&rdev->devlist_mtx); |
688 | goto out; | 872 | goto out; |
689 | } | 873 | } |
690 | if_idx++; | 874 | if_idx++; |
691 | } | 875 | } |
692 | mutex_unlock(&dev->devlist_mtx); | 876 | mutex_unlock(&rdev->devlist_mtx); |
693 | 877 | ||
694 | wp_idx++; | 878 | wp_idx++; |
695 | } | 879 | } |
@@ -709,7 +893,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) | |||
709 | struct net_device *netdev; | 893 | struct net_device *netdev; |
710 | int err; | 894 | int err; |
711 | 895 | ||
712 | err = get_drv_dev_by_info_ifindex(info->attrs, &dev, &netdev); | 896 | err = get_rdev_dev_by_info_ifindex(info, &dev, &netdev); |
713 | if (err) | 897 | if (err) |
714 | return err; | 898 | return err; |
715 | 899 | ||
@@ -722,15 +906,15 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) | |||
722 | goto out_free; | 906 | goto out_free; |
723 | 907 | ||
724 | dev_put(netdev); | 908 | dev_put(netdev); |
725 | cfg80211_put_dev(dev); | 909 | cfg80211_unlock_rdev(dev); |
726 | 910 | ||
727 | return genlmsg_unicast(msg, info->snd_pid); | 911 | return genlmsg_reply(msg, info); |
728 | 912 | ||
729 | out_free: | 913 | out_free: |
730 | nlmsg_free(msg); | 914 | nlmsg_free(msg); |
731 | out_err: | 915 | out_err: |
732 | dev_put(netdev); | 916 | dev_put(netdev); |
733 | cfg80211_put_dev(dev); | 917 | cfg80211_unlock_rdev(dev); |
734 | return -ENOBUFS; | 918 | return -ENOBUFS; |
735 | } | 919 | } |
736 | 920 | ||
@@ -765,9 +949,9 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) | |||
765 | 949 | ||
766 | static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | 950 | static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) |
767 | { | 951 | { |
768 | struct cfg80211_registered_device *drv; | 952 | struct cfg80211_registered_device *rdev; |
769 | struct vif_params params; | 953 | struct vif_params params; |
770 | int err, ifindex; | 954 | int err; |
771 | enum nl80211_iftype otype, ntype; | 955 | enum nl80211_iftype otype, ntype; |
772 | struct net_device *dev; | 956 | struct net_device *dev; |
773 | u32 _flags, *flags = NULL; | 957 | u32 _flags, *flags = NULL; |
@@ -777,13 +961,11 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | |||
777 | 961 | ||
778 | rtnl_lock(); | 962 | rtnl_lock(); |
779 | 963 | ||
780 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 964 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
781 | if (err) | 965 | if (err) |
782 | goto unlock_rtnl; | 966 | goto unlock_rtnl; |
783 | 967 | ||
784 | ifindex = dev->ifindex; | ||
785 | otype = ntype = dev->ieee80211_ptr->iftype; | 968 | otype = ntype = dev->ieee80211_ptr->iftype; |
786 | dev_put(dev); | ||
787 | 969 | ||
788 | if (info->attrs[NL80211_ATTR_IFTYPE]) { | 970 | if (info->attrs[NL80211_ATTR_IFTYPE]) { |
789 | ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); | 971 | ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); |
@@ -795,12 +977,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | |||
795 | } | 977 | } |
796 | } | 978 | } |
797 | 979 | ||
798 | if (!drv->ops->change_virtual_intf || | ||
799 | !(drv->wiphy.interface_modes & (1 << ntype))) { | ||
800 | err = -EOPNOTSUPP; | ||
801 | goto unlock; | ||
802 | } | ||
803 | |||
804 | if (info->attrs[NL80211_ATTR_MESH_ID]) { | 980 | if (info->attrs[NL80211_ATTR_MESH_ID]) { |
805 | if (ntype != NL80211_IFTYPE_MESH_POINT) { | 981 | if (ntype != NL80211_IFTYPE_MESH_POINT) { |
806 | err = -EINVAL; | 982 | err = -EINVAL; |
@@ -826,21 +1002,13 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | |||
826 | } | 1002 | } |
827 | 1003 | ||
828 | if (change) | 1004 | if (change) |
829 | err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, | 1005 | err = cfg80211_change_iface(rdev, dev, ntype, flags, ¶ms); |
830 | ntype, flags, ¶ms); | ||
831 | else | 1006 | else |
832 | err = 0; | 1007 | err = 0; |
833 | 1008 | ||
834 | dev = __dev_get_by_index(&init_net, ifindex); | ||
835 | WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != ntype)); | ||
836 | |||
837 | if (dev && !err && (ntype != otype)) { | ||
838 | if (otype == NL80211_IFTYPE_ADHOC) | ||
839 | cfg80211_clear_ibss(dev, false); | ||
840 | } | ||
841 | |||
842 | unlock: | 1009 | unlock: |
843 | cfg80211_put_dev(drv); | 1010 | dev_put(dev); |
1011 | cfg80211_unlock_rdev(rdev); | ||
844 | unlock_rtnl: | 1012 | unlock_rtnl: |
845 | rtnl_unlock(); | 1013 | rtnl_unlock(); |
846 | return err; | 1014 | return err; |
@@ -848,7 +1016,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | |||
848 | 1016 | ||
849 | static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | 1017 | static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) |
850 | { | 1018 | { |
851 | struct cfg80211_registered_device *drv; | 1019 | struct cfg80211_registered_device *rdev; |
852 | struct vif_params params; | 1020 | struct vif_params params; |
853 | int err; | 1021 | int err; |
854 | enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; | 1022 | enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; |
@@ -867,14 +1035,14 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
867 | 1035 | ||
868 | rtnl_lock(); | 1036 | rtnl_lock(); |
869 | 1037 | ||
870 | drv = cfg80211_get_dev_from_info(info); | 1038 | rdev = cfg80211_get_dev_from_info(info); |
871 | if (IS_ERR(drv)) { | 1039 | if (IS_ERR(rdev)) { |
872 | err = PTR_ERR(drv); | 1040 | err = PTR_ERR(rdev); |
873 | goto unlock_rtnl; | 1041 | goto unlock_rtnl; |
874 | } | 1042 | } |
875 | 1043 | ||
876 | if (!drv->ops->add_virtual_intf || | 1044 | if (!rdev->ops->add_virtual_intf || |
877 | !(drv->wiphy.interface_modes & (1 << type))) { | 1045 | !(rdev->wiphy.interface_modes & (1 << type))) { |
878 | err = -EOPNOTSUPP; | 1046 | err = -EOPNOTSUPP; |
879 | goto unlock; | 1047 | goto unlock; |
880 | } | 1048 | } |
@@ -888,12 +1056,12 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
888 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? | 1056 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? |
889 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, | 1057 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, |
890 | &flags); | 1058 | &flags); |
891 | err = drv->ops->add_virtual_intf(&drv->wiphy, | 1059 | err = rdev->ops->add_virtual_intf(&rdev->wiphy, |
892 | nla_data(info->attrs[NL80211_ATTR_IFNAME]), | 1060 | nla_data(info->attrs[NL80211_ATTR_IFNAME]), |
893 | type, err ? NULL : &flags, ¶ms); | 1061 | type, err ? NULL : &flags, ¶ms); |
894 | 1062 | ||
895 | unlock: | 1063 | unlock: |
896 | cfg80211_put_dev(drv); | 1064 | cfg80211_unlock_rdev(rdev); |
897 | unlock_rtnl: | 1065 | unlock_rtnl: |
898 | rtnl_unlock(); | 1066 | rtnl_unlock(); |
899 | return err; | 1067 | return err; |
@@ -901,27 +1069,26 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
901 | 1069 | ||
902 | static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) | 1070 | static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) |
903 | { | 1071 | { |
904 | struct cfg80211_registered_device *drv; | 1072 | struct cfg80211_registered_device *rdev; |
905 | int ifindex, err; | 1073 | int err; |
906 | struct net_device *dev; | 1074 | struct net_device *dev; |
907 | 1075 | ||
908 | rtnl_lock(); | 1076 | rtnl_lock(); |
909 | 1077 | ||
910 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 1078 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
911 | if (err) | 1079 | if (err) |
912 | goto unlock_rtnl; | 1080 | goto unlock_rtnl; |
913 | ifindex = dev->ifindex; | ||
914 | dev_put(dev); | ||
915 | 1081 | ||
916 | if (!drv->ops->del_virtual_intf) { | 1082 | if (!rdev->ops->del_virtual_intf) { |
917 | err = -EOPNOTSUPP; | 1083 | err = -EOPNOTSUPP; |
918 | goto out; | 1084 | goto out; |
919 | } | 1085 | } |
920 | 1086 | ||
921 | err = drv->ops->del_virtual_intf(&drv->wiphy, ifindex); | 1087 | err = rdev->ops->del_virtual_intf(&rdev->wiphy, dev); |
922 | 1088 | ||
923 | out: | 1089 | out: |
924 | cfg80211_put_dev(drv); | 1090 | cfg80211_unlock_rdev(rdev); |
1091 | dev_put(dev); | ||
925 | unlock_rtnl: | 1092 | unlock_rtnl: |
926 | rtnl_unlock(); | 1093 | rtnl_unlock(); |
927 | return err; | 1094 | return err; |
@@ -930,10 +1097,12 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) | |||
930 | struct get_key_cookie { | 1097 | struct get_key_cookie { |
931 | struct sk_buff *msg; | 1098 | struct sk_buff *msg; |
932 | int error; | 1099 | int error; |
1100 | int idx; | ||
933 | }; | 1101 | }; |
934 | 1102 | ||
935 | static void get_key_callback(void *c, struct key_params *params) | 1103 | static void get_key_callback(void *c, struct key_params *params) |
936 | { | 1104 | { |
1105 | struct nlattr *key; | ||
937 | struct get_key_cookie *cookie = c; | 1106 | struct get_key_cookie *cookie = c; |
938 | 1107 | ||
939 | if (params->key) | 1108 | if (params->key) |
@@ -948,6 +1117,26 @@ static void get_key_callback(void *c, struct key_params *params) | |||
948 | NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER, | 1117 | NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER, |
949 | params->cipher); | 1118 | params->cipher); |
950 | 1119 | ||
1120 | key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY); | ||
1121 | if (!key) | ||
1122 | goto nla_put_failure; | ||
1123 | |||
1124 | if (params->key) | ||
1125 | NLA_PUT(cookie->msg, NL80211_KEY_DATA, | ||
1126 | params->key_len, params->key); | ||
1127 | |||
1128 | if (params->seq) | ||
1129 | NLA_PUT(cookie->msg, NL80211_KEY_SEQ, | ||
1130 | params->seq_len, params->seq); | ||
1131 | |||
1132 | if (params->cipher) | ||
1133 | NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER, | ||
1134 | params->cipher); | ||
1135 | |||
1136 | NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx); | ||
1137 | |||
1138 | nla_nest_end(cookie->msg, key); | ||
1139 | |||
951 | return; | 1140 | return; |
952 | nla_put_failure: | 1141 | nla_put_failure: |
953 | cookie->error = 1; | 1142 | cookie->error = 1; |
@@ -955,7 +1144,7 @@ static void get_key_callback(void *c, struct key_params *params) | |||
955 | 1144 | ||
956 | static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | 1145 | static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) |
957 | { | 1146 | { |
958 | struct cfg80211_registered_device *drv; | 1147 | struct cfg80211_registered_device *rdev; |
959 | int err; | 1148 | int err; |
960 | struct net_device *dev; | 1149 | struct net_device *dev; |
961 | u8 key_idx = 0; | 1150 | u8 key_idx = 0; |
@@ -977,11 +1166,11 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
977 | 1166 | ||
978 | rtnl_lock(); | 1167 | rtnl_lock(); |
979 | 1168 | ||
980 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 1169 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
981 | if (err) | 1170 | if (err) |
982 | goto unlock_rtnl; | 1171 | goto unlock_rtnl; |
983 | 1172 | ||
984 | if (!drv->ops->get_key) { | 1173 | if (!rdev->ops->get_key) { |
985 | err = -EOPNOTSUPP; | 1174 | err = -EOPNOTSUPP; |
986 | goto out; | 1175 | goto out; |
987 | } | 1176 | } |
@@ -1001,13 +1190,14 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
1001 | } | 1190 | } |
1002 | 1191 | ||
1003 | cookie.msg = msg; | 1192 | cookie.msg = msg; |
1193 | cookie.idx = key_idx; | ||
1004 | 1194 | ||
1005 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); | 1195 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); |
1006 | NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx); | 1196 | NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx); |
1007 | if (mac_addr) | 1197 | if (mac_addr) |
1008 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); | 1198 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); |
1009 | 1199 | ||
1010 | err = drv->ops->get_key(&drv->wiphy, dev, key_idx, mac_addr, | 1200 | err = rdev->ops->get_key(&rdev->wiphy, dev, key_idx, mac_addr, |
1011 | &cookie, get_key_callback); | 1201 | &cookie, get_key_callback); |
1012 | 1202 | ||
1013 | if (err) | 1203 | if (err) |
@@ -1017,7 +1207,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
1017 | goto nla_put_failure; | 1207 | goto nla_put_failure; |
1018 | 1208 | ||
1019 | genlmsg_end(msg, hdr); | 1209 | genlmsg_end(msg, hdr); |
1020 | err = genlmsg_unicast(msg, info->snd_pid); | 1210 | err = genlmsg_reply(msg, info); |
1021 | goto out; | 1211 | goto out; |
1022 | 1212 | ||
1023 | nla_put_failure: | 1213 | nla_put_failure: |
@@ -1025,7 +1215,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
1025 | free_msg: | 1215 | free_msg: |
1026 | nlmsg_free(msg); | 1216 | nlmsg_free(msg); |
1027 | out: | 1217 | out: |
1028 | cfg80211_put_dev(drv); | 1218 | cfg80211_unlock_rdev(rdev); |
1029 | dev_put(dev); | 1219 | dev_put(dev); |
1030 | unlock_rtnl: | 1220 | unlock_rtnl: |
1031 | rtnl_unlock(); | 1221 | rtnl_unlock(); |
@@ -1035,57 +1225,57 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
1035 | 1225 | ||
1036 | static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) | 1226 | static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) |
1037 | { | 1227 | { |
1038 | struct cfg80211_registered_device *drv; | 1228 | struct cfg80211_registered_device *rdev; |
1229 | struct key_parse key; | ||
1039 | int err; | 1230 | int err; |
1040 | struct net_device *dev; | 1231 | struct net_device *dev; |
1041 | u8 key_idx; | ||
1042 | int (*func)(struct wiphy *wiphy, struct net_device *netdev, | 1232 | int (*func)(struct wiphy *wiphy, struct net_device *netdev, |
1043 | u8 key_index); | 1233 | u8 key_index); |
1044 | 1234 | ||
1045 | if (!info->attrs[NL80211_ATTR_KEY_IDX]) | 1235 | err = nl80211_parse_key(info, &key); |
1046 | return -EINVAL; | 1236 | if (err) |
1047 | 1237 | return err; | |
1048 | key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); | ||
1049 | 1238 | ||
1050 | if (info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]) { | 1239 | if (key.idx < 0) |
1051 | if (key_idx < 4 || key_idx > 5) | ||
1052 | return -EINVAL; | ||
1053 | } else if (key_idx > 3) | ||
1054 | return -EINVAL; | 1240 | return -EINVAL; |
1055 | 1241 | ||
1056 | /* currently only support setting default key */ | 1242 | /* only support setting default key */ |
1057 | if (!info->attrs[NL80211_ATTR_KEY_DEFAULT] && | 1243 | if (!key.def && !key.defmgmt) |
1058 | !info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]) | ||
1059 | return -EINVAL; | 1244 | return -EINVAL; |
1060 | 1245 | ||
1061 | rtnl_lock(); | 1246 | rtnl_lock(); |
1062 | 1247 | ||
1063 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 1248 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
1064 | if (err) | 1249 | if (err) |
1065 | goto unlock_rtnl; | 1250 | goto unlock_rtnl; |
1066 | 1251 | ||
1067 | if (info->attrs[NL80211_ATTR_KEY_DEFAULT]) | 1252 | if (key.def) |
1068 | func = drv->ops->set_default_key; | 1253 | func = rdev->ops->set_default_key; |
1069 | else | 1254 | else |
1070 | func = drv->ops->set_default_mgmt_key; | 1255 | func = rdev->ops->set_default_mgmt_key; |
1071 | 1256 | ||
1072 | if (!func) { | 1257 | if (!func) { |
1073 | err = -EOPNOTSUPP; | 1258 | err = -EOPNOTSUPP; |
1074 | goto out; | 1259 | goto out; |
1075 | } | 1260 | } |
1076 | 1261 | ||
1077 | err = func(&drv->wiphy, dev, key_idx); | 1262 | wdev_lock(dev->ieee80211_ptr); |
1263 | err = nl80211_key_allowed(dev->ieee80211_ptr); | ||
1264 | if (!err) | ||
1265 | err = func(&rdev->wiphy, dev, key.idx); | ||
1266 | |||
1078 | #ifdef CONFIG_WIRELESS_EXT | 1267 | #ifdef CONFIG_WIRELESS_EXT |
1079 | if (!err) { | 1268 | if (!err) { |
1080 | if (func == drv->ops->set_default_key) | 1269 | if (func == rdev->ops->set_default_key) |
1081 | dev->ieee80211_ptr->wext.default_key = key_idx; | 1270 | dev->ieee80211_ptr->wext.default_key = key.idx; |
1082 | else | 1271 | else |
1083 | dev->ieee80211_ptr->wext.default_mgmt_key = key_idx; | 1272 | dev->ieee80211_ptr->wext.default_mgmt_key = key.idx; |
1084 | } | 1273 | } |
1085 | #endif | 1274 | #endif |
1275 | wdev_unlock(dev->ieee80211_ptr); | ||
1086 | 1276 | ||
1087 | out: | 1277 | out: |
1088 | cfg80211_put_dev(drv); | 1278 | cfg80211_unlock_rdev(rdev); |
1089 | dev_put(dev); | 1279 | dev_put(dev); |
1090 | 1280 | ||
1091 | unlock_rtnl: | 1281 | unlock_rtnl: |
@@ -1096,62 +1286,47 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) | |||
1096 | 1286 | ||
1097 | static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) | 1287 | static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) |
1098 | { | 1288 | { |
1099 | struct cfg80211_registered_device *drv; | 1289 | struct cfg80211_registered_device *rdev; |
1100 | int err, i; | 1290 | int err; |
1101 | struct net_device *dev; | 1291 | struct net_device *dev; |
1102 | struct key_params params; | 1292 | struct key_parse key; |
1103 | u8 key_idx = 0; | ||
1104 | u8 *mac_addr = NULL; | 1293 | u8 *mac_addr = NULL; |
1105 | 1294 | ||
1106 | memset(¶ms, 0, sizeof(params)); | 1295 | err = nl80211_parse_key(info, &key); |
1296 | if (err) | ||
1297 | return err; | ||
1107 | 1298 | ||
1108 | if (!info->attrs[NL80211_ATTR_KEY_CIPHER]) | 1299 | if (!key.p.key) |
1109 | return -EINVAL; | 1300 | return -EINVAL; |
1110 | 1301 | ||
1111 | if (info->attrs[NL80211_ATTR_KEY_DATA]) { | ||
1112 | params.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]); | ||
1113 | params.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]); | ||
1114 | } | ||
1115 | |||
1116 | if (info->attrs[NL80211_ATTR_KEY_SEQ]) { | ||
1117 | params.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]); | ||
1118 | params.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]); | ||
1119 | } | ||
1120 | |||
1121 | if (info->attrs[NL80211_ATTR_KEY_IDX]) | ||
1122 | key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); | ||
1123 | |||
1124 | params.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]); | ||
1125 | |||
1126 | if (info->attrs[NL80211_ATTR_MAC]) | 1302 | if (info->attrs[NL80211_ATTR_MAC]) |
1127 | mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); | 1303 | mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); |
1128 | 1304 | ||
1129 | if (cfg80211_validate_key_settings(¶ms, key_idx, mac_addr)) | ||
1130 | return -EINVAL; | ||
1131 | |||
1132 | rtnl_lock(); | 1305 | rtnl_lock(); |
1133 | 1306 | ||
1134 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 1307 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
1135 | if (err) | 1308 | if (err) |
1136 | goto unlock_rtnl; | 1309 | goto unlock_rtnl; |
1137 | 1310 | ||
1138 | for (i = 0; i < drv->wiphy.n_cipher_suites; i++) | 1311 | if (!rdev->ops->add_key) { |
1139 | if (params.cipher == drv->wiphy.cipher_suites[i]) | 1312 | err = -EOPNOTSUPP; |
1140 | break; | ||
1141 | if (i == drv->wiphy.n_cipher_suites) { | ||
1142 | err = -EINVAL; | ||
1143 | goto out; | 1313 | goto out; |
1144 | } | 1314 | } |
1145 | 1315 | ||
1146 | if (!drv->ops->add_key) { | 1316 | if (cfg80211_validate_key_settings(rdev, &key.p, key.idx, mac_addr)) { |
1147 | err = -EOPNOTSUPP; | 1317 | err = -EINVAL; |
1148 | goto out; | 1318 | goto out; |
1149 | } | 1319 | } |
1150 | 1320 | ||
1151 | err = drv->ops->add_key(&drv->wiphy, dev, key_idx, mac_addr, ¶ms); | 1321 | wdev_lock(dev->ieee80211_ptr); |
1322 | err = nl80211_key_allowed(dev->ieee80211_ptr); | ||
1323 | if (!err) | ||
1324 | err = rdev->ops->add_key(&rdev->wiphy, dev, key.idx, | ||
1325 | mac_addr, &key.p); | ||
1326 | wdev_unlock(dev->ieee80211_ptr); | ||
1152 | 1327 | ||
1153 | out: | 1328 | out: |
1154 | cfg80211_put_dev(drv); | 1329 | cfg80211_unlock_rdev(rdev); |
1155 | dev_put(dev); | 1330 | dev_put(dev); |
1156 | unlock_rtnl: | 1331 | unlock_rtnl: |
1157 | rtnl_unlock(); | 1332 | rtnl_unlock(); |
@@ -1161,45 +1336,47 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) | |||
1161 | 1336 | ||
1162 | static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) | 1337 | static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) |
1163 | { | 1338 | { |
1164 | struct cfg80211_registered_device *drv; | 1339 | struct cfg80211_registered_device *rdev; |
1165 | int err; | 1340 | int err; |
1166 | struct net_device *dev; | 1341 | struct net_device *dev; |
1167 | u8 key_idx = 0; | ||
1168 | u8 *mac_addr = NULL; | 1342 | u8 *mac_addr = NULL; |
1343 | struct key_parse key; | ||
1169 | 1344 | ||
1170 | if (info->attrs[NL80211_ATTR_KEY_IDX]) | 1345 | err = nl80211_parse_key(info, &key); |
1171 | key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); | 1346 | if (err) |
1172 | 1347 | return err; | |
1173 | if (key_idx > 5) | ||
1174 | return -EINVAL; | ||
1175 | 1348 | ||
1176 | if (info->attrs[NL80211_ATTR_MAC]) | 1349 | if (info->attrs[NL80211_ATTR_MAC]) |
1177 | mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); | 1350 | mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); |
1178 | 1351 | ||
1179 | rtnl_lock(); | 1352 | rtnl_lock(); |
1180 | 1353 | ||
1181 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 1354 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
1182 | if (err) | 1355 | if (err) |
1183 | goto unlock_rtnl; | 1356 | goto unlock_rtnl; |
1184 | 1357 | ||
1185 | if (!drv->ops->del_key) { | 1358 | if (!rdev->ops->del_key) { |
1186 | err = -EOPNOTSUPP; | 1359 | err = -EOPNOTSUPP; |
1187 | goto out; | 1360 | goto out; |
1188 | } | 1361 | } |
1189 | 1362 | ||
1190 | err = drv->ops->del_key(&drv->wiphy, dev, key_idx, mac_addr); | 1363 | wdev_lock(dev->ieee80211_ptr); |
1364 | err = nl80211_key_allowed(dev->ieee80211_ptr); | ||
1365 | if (!err) | ||
1366 | err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, mac_addr); | ||
1191 | 1367 | ||
1192 | #ifdef CONFIG_WIRELESS_EXT | 1368 | #ifdef CONFIG_WIRELESS_EXT |
1193 | if (!err) { | 1369 | if (!err) { |
1194 | if (key_idx == dev->ieee80211_ptr->wext.default_key) | 1370 | if (key.idx == dev->ieee80211_ptr->wext.default_key) |
1195 | dev->ieee80211_ptr->wext.default_key = -1; | 1371 | dev->ieee80211_ptr->wext.default_key = -1; |
1196 | else if (key_idx == dev->ieee80211_ptr->wext.default_mgmt_key) | 1372 | else if (key.idx == dev->ieee80211_ptr->wext.default_mgmt_key) |
1197 | dev->ieee80211_ptr->wext.default_mgmt_key = -1; | 1373 | dev->ieee80211_ptr->wext.default_mgmt_key = -1; |
1198 | } | 1374 | } |
1199 | #endif | 1375 | #endif |
1376 | wdev_unlock(dev->ieee80211_ptr); | ||
1200 | 1377 | ||
1201 | out: | 1378 | out: |
1202 | cfg80211_put_dev(drv); | 1379 | cfg80211_unlock_rdev(rdev); |
1203 | dev_put(dev); | 1380 | dev_put(dev); |
1204 | 1381 | ||
1205 | unlock_rtnl: | 1382 | unlock_rtnl: |
@@ -1212,7 +1389,7 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) | |||
1212 | { | 1389 | { |
1213 | int (*call)(struct wiphy *wiphy, struct net_device *dev, | 1390 | int (*call)(struct wiphy *wiphy, struct net_device *dev, |
1214 | struct beacon_parameters *info); | 1391 | struct beacon_parameters *info); |
1215 | struct cfg80211_registered_device *drv; | 1392 | struct cfg80211_registered_device *rdev; |
1216 | int err; | 1393 | int err; |
1217 | struct net_device *dev; | 1394 | struct net_device *dev; |
1218 | struct beacon_parameters params; | 1395 | struct beacon_parameters params; |
@@ -1223,7 +1400,7 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) | |||
1223 | 1400 | ||
1224 | rtnl_lock(); | 1401 | rtnl_lock(); |
1225 | 1402 | ||
1226 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 1403 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
1227 | if (err) | 1404 | if (err) |
1228 | goto unlock_rtnl; | 1405 | goto unlock_rtnl; |
1229 | 1406 | ||
@@ -1242,10 +1419,10 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) | |||
1242 | goto out; | 1419 | goto out; |
1243 | } | 1420 | } |
1244 | 1421 | ||
1245 | call = drv->ops->add_beacon; | 1422 | call = rdev->ops->add_beacon; |
1246 | break; | 1423 | break; |
1247 | case NL80211_CMD_SET_BEACON: | 1424 | case NL80211_CMD_SET_BEACON: |
1248 | call = drv->ops->set_beacon; | 1425 | call = rdev->ops->set_beacon; |
1249 | break; | 1426 | break; |
1250 | default: | 1427 | default: |
1251 | WARN_ON(1); | 1428 | WARN_ON(1); |
@@ -1291,10 +1468,10 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) | |||
1291 | goto out; | 1468 | goto out; |
1292 | } | 1469 | } |
1293 | 1470 | ||
1294 | err = call(&drv->wiphy, dev, ¶ms); | 1471 | err = call(&rdev->wiphy, dev, ¶ms); |
1295 | 1472 | ||
1296 | out: | 1473 | out: |
1297 | cfg80211_put_dev(drv); | 1474 | cfg80211_unlock_rdev(rdev); |
1298 | dev_put(dev); | 1475 | dev_put(dev); |
1299 | unlock_rtnl: | 1476 | unlock_rtnl: |
1300 | rtnl_unlock(); | 1477 | rtnl_unlock(); |
@@ -1304,17 +1481,17 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) | |||
1304 | 1481 | ||
1305 | static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) | 1482 | static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) |
1306 | { | 1483 | { |
1307 | struct cfg80211_registered_device *drv; | 1484 | struct cfg80211_registered_device *rdev; |
1308 | int err; | 1485 | int err; |
1309 | struct net_device *dev; | 1486 | struct net_device *dev; |
1310 | 1487 | ||
1311 | rtnl_lock(); | 1488 | rtnl_lock(); |
1312 | 1489 | ||
1313 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 1490 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
1314 | if (err) | 1491 | if (err) |
1315 | goto unlock_rtnl; | 1492 | goto unlock_rtnl; |
1316 | 1493 | ||
1317 | if (!drv->ops->del_beacon) { | 1494 | if (!rdev->ops->del_beacon) { |
1318 | err = -EOPNOTSUPP; | 1495 | err = -EOPNOTSUPP; |
1319 | goto out; | 1496 | goto out; |
1320 | } | 1497 | } |
@@ -1323,10 +1500,10 @@ static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) | |||
1323 | err = -EOPNOTSUPP; | 1500 | err = -EOPNOTSUPP; |
1324 | goto out; | 1501 | goto out; |
1325 | } | 1502 | } |
1326 | err = drv->ops->del_beacon(&drv->wiphy, dev); | 1503 | err = rdev->ops->del_beacon(&rdev->wiphy, dev); |
1327 | 1504 | ||
1328 | out: | 1505 | out: |
1329 | cfg80211_put_dev(drv); | 1506 | cfg80211_unlock_rdev(rdev); |
1330 | dev_put(dev); | 1507 | dev_put(dev); |
1331 | unlock_rtnl: | 1508 | unlock_rtnl: |
1332 | rtnl_unlock(); | 1509 | rtnl_unlock(); |
@@ -1433,6 +1610,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, | |||
1433 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); | 1610 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); |
1434 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); | 1611 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); |
1435 | 1612 | ||
1613 | NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation); | ||
1614 | |||
1436 | sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); | 1615 | sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); |
1437 | if (!sinfoattr) | 1616 | if (!sinfoattr) |
1438 | goto nla_put_failure; | 1617 | goto nla_put_failure; |
@@ -1520,13 +1699,13 @@ static int nl80211_dump_station(struct sk_buff *skb, | |||
1520 | 1699 | ||
1521 | rtnl_lock(); | 1700 | rtnl_lock(); |
1522 | 1701 | ||
1523 | netdev = __dev_get_by_index(&init_net, ifidx); | 1702 | netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); |
1524 | if (!netdev) { | 1703 | if (!netdev) { |
1525 | err = -ENODEV; | 1704 | err = -ENODEV; |
1526 | goto out_rtnl; | 1705 | goto out_rtnl; |
1527 | } | 1706 | } |
1528 | 1707 | ||
1529 | dev = cfg80211_get_dev_from_ifindex(ifidx); | 1708 | dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); |
1530 | if (IS_ERR(dev)) { | 1709 | if (IS_ERR(dev)) { |
1531 | err = PTR_ERR(dev); | 1710 | err = PTR_ERR(dev); |
1532 | goto out_rtnl; | 1711 | goto out_rtnl; |
@@ -1560,7 +1739,7 @@ static int nl80211_dump_station(struct sk_buff *skb, | |||
1560 | cb->args[1] = sta_idx; | 1739 | cb->args[1] = sta_idx; |
1561 | err = skb->len; | 1740 | err = skb->len; |
1562 | out_err: | 1741 | out_err: |
1563 | cfg80211_put_dev(dev); | 1742 | cfg80211_unlock_rdev(dev); |
1564 | out_rtnl: | 1743 | out_rtnl: |
1565 | rtnl_unlock(); | 1744 | rtnl_unlock(); |
1566 | 1745 | ||
@@ -1569,7 +1748,7 @@ static int nl80211_dump_station(struct sk_buff *skb, | |||
1569 | 1748 | ||
1570 | static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) | 1749 | static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) |
1571 | { | 1750 | { |
1572 | struct cfg80211_registered_device *drv; | 1751 | struct cfg80211_registered_device *rdev; |
1573 | int err; | 1752 | int err; |
1574 | struct net_device *dev; | 1753 | struct net_device *dev; |
1575 | struct station_info sinfo; | 1754 | struct station_info sinfo; |
@@ -1585,16 +1764,16 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) | |||
1585 | 1764 | ||
1586 | rtnl_lock(); | 1765 | rtnl_lock(); |
1587 | 1766 | ||
1588 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 1767 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
1589 | if (err) | 1768 | if (err) |
1590 | goto out_rtnl; | 1769 | goto out_rtnl; |
1591 | 1770 | ||
1592 | if (!drv->ops->get_station) { | 1771 | if (!rdev->ops->get_station) { |
1593 | err = -EOPNOTSUPP; | 1772 | err = -EOPNOTSUPP; |
1594 | goto out; | 1773 | goto out; |
1595 | } | 1774 | } |
1596 | 1775 | ||
1597 | err = drv->ops->get_station(&drv->wiphy, dev, mac_addr, &sinfo); | 1776 | err = rdev->ops->get_station(&rdev->wiphy, dev, mac_addr, &sinfo); |
1598 | if (err) | 1777 | if (err) |
1599 | goto out; | 1778 | goto out; |
1600 | 1779 | ||
@@ -1606,13 +1785,13 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) | |||
1606 | dev, mac_addr, &sinfo) < 0) | 1785 | dev, mac_addr, &sinfo) < 0) |
1607 | goto out_free; | 1786 | goto out_free; |
1608 | 1787 | ||
1609 | err = genlmsg_unicast(msg, info->snd_pid); | 1788 | err = genlmsg_reply(msg, info); |
1610 | goto out; | 1789 | goto out; |
1611 | 1790 | ||
1612 | out_free: | 1791 | out_free: |
1613 | nlmsg_free(msg); | 1792 | nlmsg_free(msg); |
1614 | out: | 1793 | out: |
1615 | cfg80211_put_dev(drv); | 1794 | cfg80211_unlock_rdev(rdev); |
1616 | dev_put(dev); | 1795 | dev_put(dev); |
1617 | out_rtnl: | 1796 | out_rtnl: |
1618 | rtnl_unlock(); | 1797 | rtnl_unlock(); |
@@ -1623,14 +1802,16 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) | |||
1623 | /* | 1802 | /* |
1624 | * Get vlan interface making sure it is on the right wiphy. | 1803 | * Get vlan interface making sure it is on the right wiphy. |
1625 | */ | 1804 | */ |
1626 | static int get_vlan(struct nlattr *vlanattr, | 1805 | static int get_vlan(struct genl_info *info, |
1627 | struct cfg80211_registered_device *rdev, | 1806 | struct cfg80211_registered_device *rdev, |
1628 | struct net_device **vlan) | 1807 | struct net_device **vlan) |
1629 | { | 1808 | { |
1809 | struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN]; | ||
1630 | *vlan = NULL; | 1810 | *vlan = NULL; |
1631 | 1811 | ||
1632 | if (vlanattr) { | 1812 | if (vlanattr) { |
1633 | *vlan = dev_get_by_index(&init_net, nla_get_u32(vlanattr)); | 1813 | *vlan = dev_get_by_index(genl_info_net(info), |
1814 | nla_get_u32(vlanattr)); | ||
1634 | if (!*vlan) | 1815 | if (!*vlan) |
1635 | return -ENODEV; | 1816 | return -ENODEV; |
1636 | if (!(*vlan)->ieee80211_ptr) | 1817 | if (!(*vlan)->ieee80211_ptr) |
@@ -1643,7 +1824,7 @@ static int get_vlan(struct nlattr *vlanattr, | |||
1643 | 1824 | ||
1644 | static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) | 1825 | static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) |
1645 | { | 1826 | { |
1646 | struct cfg80211_registered_device *drv; | 1827 | struct cfg80211_registered_device *rdev; |
1647 | int err; | 1828 | int err; |
1648 | struct net_device *dev; | 1829 | struct net_device *dev; |
1649 | struct station_parameters params; | 1830 | struct station_parameters params; |
@@ -1685,11 +1866,11 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) | |||
1685 | 1866 | ||
1686 | rtnl_lock(); | 1867 | rtnl_lock(); |
1687 | 1868 | ||
1688 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 1869 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
1689 | if (err) | 1870 | if (err) |
1690 | goto out_rtnl; | 1871 | goto out_rtnl; |
1691 | 1872 | ||
1692 | err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan); | 1873 | err = get_vlan(info, rdev, ¶ms.vlan); |
1693 | if (err) | 1874 | if (err) |
1694 | goto out; | 1875 | goto out; |
1695 | 1876 | ||
@@ -1738,17 +1919,17 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) | |||
1738 | if (err) | 1919 | if (err) |
1739 | goto out; | 1920 | goto out; |
1740 | 1921 | ||
1741 | if (!drv->ops->change_station) { | 1922 | if (!rdev->ops->change_station) { |
1742 | err = -EOPNOTSUPP; | 1923 | err = -EOPNOTSUPP; |
1743 | goto out; | 1924 | goto out; |
1744 | } | 1925 | } |
1745 | 1926 | ||
1746 | err = drv->ops->change_station(&drv->wiphy, dev, mac_addr, ¶ms); | 1927 | err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, ¶ms); |
1747 | 1928 | ||
1748 | out: | 1929 | out: |
1749 | if (params.vlan) | 1930 | if (params.vlan) |
1750 | dev_put(params.vlan); | 1931 | dev_put(params.vlan); |
1751 | cfg80211_put_dev(drv); | 1932 | cfg80211_unlock_rdev(rdev); |
1752 | dev_put(dev); | 1933 | dev_put(dev); |
1753 | out_rtnl: | 1934 | out_rtnl: |
1754 | rtnl_unlock(); | 1935 | rtnl_unlock(); |
@@ -1758,7 +1939,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) | |||
1758 | 1939 | ||
1759 | static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | 1940 | static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) |
1760 | { | 1941 | { |
1761 | struct cfg80211_registered_device *drv; | 1942 | struct cfg80211_registered_device *rdev; |
1762 | int err; | 1943 | int err; |
1763 | struct net_device *dev; | 1944 | struct net_device *dev; |
1764 | struct station_parameters params; | 1945 | struct station_parameters params; |
@@ -1798,11 +1979,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
1798 | 1979 | ||
1799 | rtnl_lock(); | 1980 | rtnl_lock(); |
1800 | 1981 | ||
1801 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 1982 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
1802 | if (err) | 1983 | if (err) |
1803 | goto out_rtnl; | 1984 | goto out_rtnl; |
1804 | 1985 | ||
1805 | err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan); | 1986 | err = get_vlan(info, rdev, ¶ms.vlan); |
1806 | if (err) | 1987 | if (err) |
1807 | goto out; | 1988 | goto out; |
1808 | 1989 | ||
@@ -1838,7 +2019,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
1838 | if (err) | 2019 | if (err) |
1839 | goto out; | 2020 | goto out; |
1840 | 2021 | ||
1841 | if (!drv->ops->add_station) { | 2022 | if (!rdev->ops->add_station) { |
1842 | err = -EOPNOTSUPP; | 2023 | err = -EOPNOTSUPP; |
1843 | goto out; | 2024 | goto out; |
1844 | } | 2025 | } |
@@ -1848,12 +2029,12 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
1848 | goto out; | 2029 | goto out; |
1849 | } | 2030 | } |
1850 | 2031 | ||
1851 | err = drv->ops->add_station(&drv->wiphy, dev, mac_addr, ¶ms); | 2032 | err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, ¶ms); |
1852 | 2033 | ||
1853 | out: | 2034 | out: |
1854 | if (params.vlan) | 2035 | if (params.vlan) |
1855 | dev_put(params.vlan); | 2036 | dev_put(params.vlan); |
1856 | cfg80211_put_dev(drv); | 2037 | cfg80211_unlock_rdev(rdev); |
1857 | dev_put(dev); | 2038 | dev_put(dev); |
1858 | out_rtnl: | 2039 | out_rtnl: |
1859 | rtnl_unlock(); | 2040 | rtnl_unlock(); |
@@ -1863,7 +2044,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
1863 | 2044 | ||
1864 | static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) | 2045 | static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) |
1865 | { | 2046 | { |
1866 | struct cfg80211_registered_device *drv; | 2047 | struct cfg80211_registered_device *rdev; |
1867 | int err; | 2048 | int err; |
1868 | struct net_device *dev; | 2049 | struct net_device *dev; |
1869 | u8 *mac_addr = NULL; | 2050 | u8 *mac_addr = NULL; |
@@ -1873,7 +2054,7 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) | |||
1873 | 2054 | ||
1874 | rtnl_lock(); | 2055 | rtnl_lock(); |
1875 | 2056 | ||
1876 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 2057 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
1877 | if (err) | 2058 | if (err) |
1878 | goto out_rtnl; | 2059 | goto out_rtnl; |
1879 | 2060 | ||
@@ -1884,15 +2065,15 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) | |||
1884 | goto out; | 2065 | goto out; |
1885 | } | 2066 | } |
1886 | 2067 | ||
1887 | if (!drv->ops->del_station) { | 2068 | if (!rdev->ops->del_station) { |
1888 | err = -EOPNOTSUPP; | 2069 | err = -EOPNOTSUPP; |
1889 | goto out; | 2070 | goto out; |
1890 | } | 2071 | } |
1891 | 2072 | ||
1892 | err = drv->ops->del_station(&drv->wiphy, dev, mac_addr); | 2073 | err = rdev->ops->del_station(&rdev->wiphy, dev, mac_addr); |
1893 | 2074 | ||
1894 | out: | 2075 | out: |
1895 | cfg80211_put_dev(drv); | 2076 | cfg80211_unlock_rdev(rdev); |
1896 | dev_put(dev); | 2077 | dev_put(dev); |
1897 | out_rtnl: | 2078 | out_rtnl: |
1898 | rtnl_unlock(); | 2079 | rtnl_unlock(); |
@@ -1916,6 +2097,8 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq, | |||
1916 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); | 2097 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); |
1917 | NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); | 2098 | NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); |
1918 | 2099 | ||
2100 | NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation); | ||
2101 | |||
1919 | pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); | 2102 | pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); |
1920 | if (!pinfoattr) | 2103 | if (!pinfoattr) |
1921 | goto nla_put_failure; | 2104 | goto nla_put_failure; |
@@ -1979,13 +2162,13 @@ static int nl80211_dump_mpath(struct sk_buff *skb, | |||
1979 | 2162 | ||
1980 | rtnl_lock(); | 2163 | rtnl_lock(); |
1981 | 2164 | ||
1982 | netdev = __dev_get_by_index(&init_net, ifidx); | 2165 | netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); |
1983 | if (!netdev) { | 2166 | if (!netdev) { |
1984 | err = -ENODEV; | 2167 | err = -ENODEV; |
1985 | goto out_rtnl; | 2168 | goto out_rtnl; |
1986 | } | 2169 | } |
1987 | 2170 | ||
1988 | dev = cfg80211_get_dev_from_ifindex(ifidx); | 2171 | dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); |
1989 | if (IS_ERR(dev)) { | 2172 | if (IS_ERR(dev)) { |
1990 | err = PTR_ERR(dev); | 2173 | err = PTR_ERR(dev); |
1991 | goto out_rtnl; | 2174 | goto out_rtnl; |
@@ -1998,7 +2181,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb, | |||
1998 | 2181 | ||
1999 | if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { | 2182 | if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { |
2000 | err = -EOPNOTSUPP; | 2183 | err = -EOPNOTSUPP; |
2001 | goto out; | 2184 | goto out_err; |
2002 | } | 2185 | } |
2003 | 2186 | ||
2004 | while (1) { | 2187 | while (1) { |
@@ -2023,7 +2206,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb, | |||
2023 | cb->args[1] = path_idx; | 2206 | cb->args[1] = path_idx; |
2024 | err = skb->len; | 2207 | err = skb->len; |
2025 | out_err: | 2208 | out_err: |
2026 | cfg80211_put_dev(dev); | 2209 | cfg80211_unlock_rdev(dev); |
2027 | out_rtnl: | 2210 | out_rtnl: |
2028 | rtnl_unlock(); | 2211 | rtnl_unlock(); |
2029 | 2212 | ||
@@ -2032,7 +2215,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb, | |||
2032 | 2215 | ||
2033 | static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) | 2216 | static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) |
2034 | { | 2217 | { |
2035 | struct cfg80211_registered_device *drv; | 2218 | struct cfg80211_registered_device *rdev; |
2036 | int err; | 2219 | int err; |
2037 | struct net_device *dev; | 2220 | struct net_device *dev; |
2038 | struct mpath_info pinfo; | 2221 | struct mpath_info pinfo; |
@@ -2049,11 +2232,11 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2049 | 2232 | ||
2050 | rtnl_lock(); | 2233 | rtnl_lock(); |
2051 | 2234 | ||
2052 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 2235 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
2053 | if (err) | 2236 | if (err) |
2054 | goto out_rtnl; | 2237 | goto out_rtnl; |
2055 | 2238 | ||
2056 | if (!drv->ops->get_mpath) { | 2239 | if (!rdev->ops->get_mpath) { |
2057 | err = -EOPNOTSUPP; | 2240 | err = -EOPNOTSUPP; |
2058 | goto out; | 2241 | goto out; |
2059 | } | 2242 | } |
@@ -2063,7 +2246,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2063 | goto out; | 2246 | goto out; |
2064 | } | 2247 | } |
2065 | 2248 | ||
2066 | err = drv->ops->get_mpath(&drv->wiphy, dev, dst, next_hop, &pinfo); | 2249 | err = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, &pinfo); |
2067 | if (err) | 2250 | if (err) |
2068 | goto out; | 2251 | goto out; |
2069 | 2252 | ||
@@ -2075,13 +2258,13 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2075 | dev, dst, next_hop, &pinfo) < 0) | 2258 | dev, dst, next_hop, &pinfo) < 0) |
2076 | goto out_free; | 2259 | goto out_free; |
2077 | 2260 | ||
2078 | err = genlmsg_unicast(msg, info->snd_pid); | 2261 | err = genlmsg_reply(msg, info); |
2079 | goto out; | 2262 | goto out; |
2080 | 2263 | ||
2081 | out_free: | 2264 | out_free: |
2082 | nlmsg_free(msg); | 2265 | nlmsg_free(msg); |
2083 | out: | 2266 | out: |
2084 | cfg80211_put_dev(drv); | 2267 | cfg80211_unlock_rdev(rdev); |
2085 | dev_put(dev); | 2268 | dev_put(dev); |
2086 | out_rtnl: | 2269 | out_rtnl: |
2087 | rtnl_unlock(); | 2270 | rtnl_unlock(); |
@@ -2091,7 +2274,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2091 | 2274 | ||
2092 | static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) | 2275 | static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) |
2093 | { | 2276 | { |
2094 | struct cfg80211_registered_device *drv; | 2277 | struct cfg80211_registered_device *rdev; |
2095 | int err; | 2278 | int err; |
2096 | struct net_device *dev; | 2279 | struct net_device *dev; |
2097 | u8 *dst = NULL; | 2280 | u8 *dst = NULL; |
@@ -2108,11 +2291,11 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2108 | 2291 | ||
2109 | rtnl_lock(); | 2292 | rtnl_lock(); |
2110 | 2293 | ||
2111 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 2294 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
2112 | if (err) | 2295 | if (err) |
2113 | goto out_rtnl; | 2296 | goto out_rtnl; |
2114 | 2297 | ||
2115 | if (!drv->ops->change_mpath) { | 2298 | if (!rdev->ops->change_mpath) { |
2116 | err = -EOPNOTSUPP; | 2299 | err = -EOPNOTSUPP; |
2117 | goto out; | 2300 | goto out; |
2118 | } | 2301 | } |
@@ -2127,10 +2310,10 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2127 | goto out; | 2310 | goto out; |
2128 | } | 2311 | } |
2129 | 2312 | ||
2130 | err = drv->ops->change_mpath(&drv->wiphy, dev, dst, next_hop); | 2313 | err = rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop); |
2131 | 2314 | ||
2132 | out: | 2315 | out: |
2133 | cfg80211_put_dev(drv); | 2316 | cfg80211_unlock_rdev(rdev); |
2134 | dev_put(dev); | 2317 | dev_put(dev); |
2135 | out_rtnl: | 2318 | out_rtnl: |
2136 | rtnl_unlock(); | 2319 | rtnl_unlock(); |
@@ -2139,7 +2322,7 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2139 | } | 2322 | } |
2140 | static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) | 2323 | static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) |
2141 | { | 2324 | { |
2142 | struct cfg80211_registered_device *drv; | 2325 | struct cfg80211_registered_device *rdev; |
2143 | int err; | 2326 | int err; |
2144 | struct net_device *dev; | 2327 | struct net_device *dev; |
2145 | u8 *dst = NULL; | 2328 | u8 *dst = NULL; |
@@ -2156,11 +2339,11 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2156 | 2339 | ||
2157 | rtnl_lock(); | 2340 | rtnl_lock(); |
2158 | 2341 | ||
2159 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 2342 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
2160 | if (err) | 2343 | if (err) |
2161 | goto out_rtnl; | 2344 | goto out_rtnl; |
2162 | 2345 | ||
2163 | if (!drv->ops->add_mpath) { | 2346 | if (!rdev->ops->add_mpath) { |
2164 | err = -EOPNOTSUPP; | 2347 | err = -EOPNOTSUPP; |
2165 | goto out; | 2348 | goto out; |
2166 | } | 2349 | } |
@@ -2175,10 +2358,10 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2175 | goto out; | 2358 | goto out; |
2176 | } | 2359 | } |
2177 | 2360 | ||
2178 | err = drv->ops->add_mpath(&drv->wiphy, dev, dst, next_hop); | 2361 | err = rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop); |
2179 | 2362 | ||
2180 | out: | 2363 | out: |
2181 | cfg80211_put_dev(drv); | 2364 | cfg80211_unlock_rdev(rdev); |
2182 | dev_put(dev); | 2365 | dev_put(dev); |
2183 | out_rtnl: | 2366 | out_rtnl: |
2184 | rtnl_unlock(); | 2367 | rtnl_unlock(); |
@@ -2188,7 +2371,7 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2188 | 2371 | ||
2189 | static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) | 2372 | static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) |
2190 | { | 2373 | { |
2191 | struct cfg80211_registered_device *drv; | 2374 | struct cfg80211_registered_device *rdev; |
2192 | int err; | 2375 | int err; |
2193 | struct net_device *dev; | 2376 | struct net_device *dev; |
2194 | u8 *dst = NULL; | 2377 | u8 *dst = NULL; |
@@ -2198,19 +2381,19 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2198 | 2381 | ||
2199 | rtnl_lock(); | 2382 | rtnl_lock(); |
2200 | 2383 | ||
2201 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 2384 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
2202 | if (err) | 2385 | if (err) |
2203 | goto out_rtnl; | 2386 | goto out_rtnl; |
2204 | 2387 | ||
2205 | if (!drv->ops->del_mpath) { | 2388 | if (!rdev->ops->del_mpath) { |
2206 | err = -EOPNOTSUPP; | 2389 | err = -EOPNOTSUPP; |
2207 | goto out; | 2390 | goto out; |
2208 | } | 2391 | } |
2209 | 2392 | ||
2210 | err = drv->ops->del_mpath(&drv->wiphy, dev, dst); | 2393 | err = rdev->ops->del_mpath(&rdev->wiphy, dev, dst); |
2211 | 2394 | ||
2212 | out: | 2395 | out: |
2213 | cfg80211_put_dev(drv); | 2396 | cfg80211_unlock_rdev(rdev); |
2214 | dev_put(dev); | 2397 | dev_put(dev); |
2215 | out_rtnl: | 2398 | out_rtnl: |
2216 | rtnl_unlock(); | 2399 | rtnl_unlock(); |
@@ -2220,7 +2403,7 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) | |||
2220 | 2403 | ||
2221 | static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) | 2404 | static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) |
2222 | { | 2405 | { |
2223 | struct cfg80211_registered_device *drv; | 2406 | struct cfg80211_registered_device *rdev; |
2224 | int err; | 2407 | int err; |
2225 | struct net_device *dev; | 2408 | struct net_device *dev; |
2226 | struct bss_parameters params; | 2409 | struct bss_parameters params; |
@@ -2249,11 +2432,11 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) | |||
2249 | 2432 | ||
2250 | rtnl_lock(); | 2433 | rtnl_lock(); |
2251 | 2434 | ||
2252 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 2435 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
2253 | if (err) | 2436 | if (err) |
2254 | goto out_rtnl; | 2437 | goto out_rtnl; |
2255 | 2438 | ||
2256 | if (!drv->ops->change_bss) { | 2439 | if (!rdev->ops->change_bss) { |
2257 | err = -EOPNOTSUPP; | 2440 | err = -EOPNOTSUPP; |
2258 | goto out; | 2441 | goto out; |
2259 | } | 2442 | } |
@@ -2263,10 +2446,10 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) | |||
2263 | goto out; | 2446 | goto out; |
2264 | } | 2447 | } |
2265 | 2448 | ||
2266 | err = drv->ops->change_bss(&drv->wiphy, dev, ¶ms); | 2449 | err = rdev->ops->change_bss(&rdev->wiphy, dev, ¶ms); |
2267 | 2450 | ||
2268 | out: | 2451 | out: |
2269 | cfg80211_put_dev(drv); | 2452 | cfg80211_unlock_rdev(rdev); |
2270 | dev_put(dev); | 2453 | dev_put(dev); |
2271 | out_rtnl: | 2454 | out_rtnl: |
2272 | rtnl_unlock(); | 2455 | rtnl_unlock(); |
@@ -2357,7 +2540,7 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info) | |||
2357 | static int nl80211_get_mesh_params(struct sk_buff *skb, | 2540 | static int nl80211_get_mesh_params(struct sk_buff *skb, |
2358 | struct genl_info *info) | 2541 | struct genl_info *info) |
2359 | { | 2542 | { |
2360 | struct cfg80211_registered_device *drv; | 2543 | struct cfg80211_registered_device *rdev; |
2361 | struct mesh_config cur_params; | 2544 | struct mesh_config cur_params; |
2362 | int err; | 2545 | int err; |
2363 | struct net_device *dev; | 2546 | struct net_device *dev; |
@@ -2368,17 +2551,17 @@ static int nl80211_get_mesh_params(struct sk_buff *skb, | |||
2368 | rtnl_lock(); | 2551 | rtnl_lock(); |
2369 | 2552 | ||
2370 | /* Look up our device */ | 2553 | /* Look up our device */ |
2371 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 2554 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
2372 | if (err) | 2555 | if (err) |
2373 | goto out_rtnl; | 2556 | goto out_rtnl; |
2374 | 2557 | ||
2375 | if (!drv->ops->get_mesh_params) { | 2558 | if (!rdev->ops->get_mesh_params) { |
2376 | err = -EOPNOTSUPP; | 2559 | err = -EOPNOTSUPP; |
2377 | goto out; | 2560 | goto out; |
2378 | } | 2561 | } |
2379 | 2562 | ||
2380 | /* Get the mesh params */ | 2563 | /* Get the mesh params */ |
2381 | err = drv->ops->get_mesh_params(&drv->wiphy, dev, &cur_params); | 2564 | err = rdev->ops->get_mesh_params(&rdev->wiphy, dev, &cur_params); |
2382 | if (err) | 2565 | if (err) |
2383 | goto out; | 2566 | goto out; |
2384 | 2567 | ||
@@ -2424,7 +2607,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb, | |||
2424 | cur_params.dot11MeshHWMPnetDiameterTraversalTime); | 2607 | cur_params.dot11MeshHWMPnetDiameterTraversalTime); |
2425 | nla_nest_end(msg, pinfoattr); | 2608 | nla_nest_end(msg, pinfoattr); |
2426 | genlmsg_end(msg, hdr); | 2609 | genlmsg_end(msg, hdr); |
2427 | err = genlmsg_unicast(msg, info->snd_pid); | 2610 | err = genlmsg_reply(msg, info); |
2428 | goto out; | 2611 | goto out; |
2429 | 2612 | ||
2430 | nla_put_failure: | 2613 | nla_put_failure: |
@@ -2432,7 +2615,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb, | |||
2432 | err = -EMSGSIZE; | 2615 | err = -EMSGSIZE; |
2433 | out: | 2616 | out: |
2434 | /* Cleanup */ | 2617 | /* Cleanup */ |
2435 | cfg80211_put_dev(drv); | 2618 | cfg80211_unlock_rdev(rdev); |
2436 | dev_put(dev); | 2619 | dev_put(dev); |
2437 | out_rtnl: | 2620 | out_rtnl: |
2438 | rtnl_unlock(); | 2621 | rtnl_unlock(); |
@@ -2470,7 +2653,7 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info) | |||
2470 | { | 2653 | { |
2471 | int err; | 2654 | int err; |
2472 | u32 mask; | 2655 | u32 mask; |
2473 | struct cfg80211_registered_device *drv; | 2656 | struct cfg80211_registered_device *rdev; |
2474 | struct net_device *dev; | 2657 | struct net_device *dev; |
2475 | struct mesh_config cfg; | 2658 | struct mesh_config cfg; |
2476 | struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; | 2659 | struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; |
@@ -2485,11 +2668,11 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info) | |||
2485 | 2668 | ||
2486 | rtnl_lock(); | 2669 | rtnl_lock(); |
2487 | 2670 | ||
2488 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 2671 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
2489 | if (err) | 2672 | if (err) |
2490 | goto out_rtnl; | 2673 | goto out_rtnl; |
2491 | 2674 | ||
2492 | if (!drv->ops->set_mesh_params) { | 2675 | if (!rdev->ops->set_mesh_params) { |
2493 | err = -EOPNOTSUPP; | 2676 | err = -EOPNOTSUPP; |
2494 | goto out; | 2677 | goto out; |
2495 | } | 2678 | } |
@@ -2534,11 +2717,11 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info) | |||
2534 | nla_get_u16); | 2717 | nla_get_u16); |
2535 | 2718 | ||
2536 | /* Apply changes */ | 2719 | /* Apply changes */ |
2537 | err = drv->ops->set_mesh_params(&drv->wiphy, dev, &cfg, mask); | 2720 | err = rdev->ops->set_mesh_params(&rdev->wiphy, dev, &cfg, mask); |
2538 | 2721 | ||
2539 | out: | 2722 | out: |
2540 | /* cleanup */ | 2723 | /* cleanup */ |
2541 | cfg80211_put_dev(drv); | 2724 | cfg80211_unlock_rdev(rdev); |
2542 | dev_put(dev); | 2725 | dev_put(dev); |
2543 | out_rtnl: | 2726 | out_rtnl: |
2544 | rtnl_unlock(); | 2727 | rtnl_unlock(); |
@@ -2612,7 +2795,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) | |||
2612 | nla_nest_end(msg, nl_reg_rules); | 2795 | nla_nest_end(msg, nl_reg_rules); |
2613 | 2796 | ||
2614 | genlmsg_end(msg, hdr); | 2797 | genlmsg_end(msg, hdr); |
2615 | err = genlmsg_unicast(msg, info->snd_pid); | 2798 | err = genlmsg_reply(msg, info); |
2616 | goto out; | 2799 | goto out; |
2617 | 2800 | ||
2618 | nla_put_failure: | 2801 | nla_put_failure: |
@@ -2698,16 +2881,41 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) | |||
2698 | return r; | 2881 | return r; |
2699 | } | 2882 | } |
2700 | 2883 | ||
2884 | static int validate_scan_freqs(struct nlattr *freqs) | ||
2885 | { | ||
2886 | struct nlattr *attr1, *attr2; | ||
2887 | int n_channels = 0, tmp1, tmp2; | ||
2888 | |||
2889 | nla_for_each_nested(attr1, freqs, tmp1) { | ||
2890 | n_channels++; | ||
2891 | /* | ||
2892 | * Some hardware has a limited channel list for | ||
2893 | * scanning, and it is pretty much nonsensical | ||
2894 | * to scan for a channel twice, so disallow that | ||
2895 | * and don't require drivers to check that the | ||
2896 | * channel list they get isn't longer than what | ||
2897 | * they can scan, as long as they can scan all | ||
2898 | * the channels they registered at once. | ||
2899 | */ | ||
2900 | nla_for_each_nested(attr2, freqs, tmp2) | ||
2901 | if (attr1 != attr2 && | ||
2902 | nla_get_u32(attr1) == nla_get_u32(attr2)) | ||
2903 | return 0; | ||
2904 | } | ||
2905 | |||
2906 | return n_channels; | ||
2907 | } | ||
2908 | |||
2701 | static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | 2909 | static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) |
2702 | { | 2910 | { |
2703 | struct cfg80211_registered_device *drv; | 2911 | struct cfg80211_registered_device *rdev; |
2704 | struct net_device *dev; | 2912 | struct net_device *dev; |
2705 | struct cfg80211_scan_request *request; | 2913 | struct cfg80211_scan_request *request; |
2706 | struct cfg80211_ssid *ssid; | 2914 | struct cfg80211_ssid *ssid; |
2707 | struct ieee80211_channel *channel; | 2915 | struct ieee80211_channel *channel; |
2708 | struct nlattr *attr; | 2916 | struct nlattr *attr; |
2709 | struct wiphy *wiphy; | 2917 | struct wiphy *wiphy; |
2710 | int err, tmp, n_ssids = 0, n_channels = 0, i; | 2918 | int err, tmp, n_ssids = 0, n_channels, i; |
2711 | enum ieee80211_band band; | 2919 | enum ieee80211_band band; |
2712 | size_t ie_len; | 2920 | size_t ie_len; |
2713 | 2921 | ||
@@ -2716,13 +2924,13 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
2716 | 2924 | ||
2717 | rtnl_lock(); | 2925 | rtnl_lock(); |
2718 | 2926 | ||
2719 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 2927 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
2720 | if (err) | 2928 | if (err) |
2721 | goto out_rtnl; | 2929 | goto out_rtnl; |
2722 | 2930 | ||
2723 | wiphy = &drv->wiphy; | 2931 | wiphy = &rdev->wiphy; |
2724 | 2932 | ||
2725 | if (!drv->ops->scan) { | 2933 | if (!rdev->ops->scan) { |
2726 | err = -EOPNOTSUPP; | 2934 | err = -EOPNOTSUPP; |
2727 | goto out; | 2935 | goto out; |
2728 | } | 2936 | } |
@@ -2732,19 +2940,21 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
2732 | goto out; | 2940 | goto out; |
2733 | } | 2941 | } |
2734 | 2942 | ||
2735 | if (drv->scan_req) { | 2943 | if (rdev->scan_req) { |
2736 | err = -EBUSY; | 2944 | err = -EBUSY; |
2737 | goto out; | 2945 | goto out; |
2738 | } | 2946 | } |
2739 | 2947 | ||
2740 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { | 2948 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { |
2741 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) | 2949 | n_channels = validate_scan_freqs( |
2742 | n_channels++; | 2950 | info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); |
2743 | if (!n_channels) { | 2951 | if (!n_channels) { |
2744 | err = -EINVAL; | 2952 | err = -EINVAL; |
2745 | goto out; | 2953 | goto out; |
2746 | } | 2954 | } |
2747 | } else { | 2955 | } else { |
2956 | n_channels = 0; | ||
2957 | |||
2748 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) | 2958 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) |
2749 | if (wiphy->bands[band]) | 2959 | if (wiphy->bands[band]) |
2750 | n_channels += wiphy->bands[band]->n_channels; | 2960 | n_channels += wiphy->bands[band]->n_channels; |
@@ -2778,10 +2988,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
2778 | goto out; | 2988 | goto out; |
2779 | } | 2989 | } |
2780 | 2990 | ||
2781 | request->channels = (void *)((char *)request + sizeof(*request)); | ||
2782 | request->n_channels = n_channels; | 2991 | request->n_channels = n_channels; |
2783 | if (n_ssids) | 2992 | if (n_ssids) |
2784 | request->ssids = (void *)(request->channels + n_channels); | 2993 | request->ssids = (void *)&request->channels[n_channels]; |
2785 | request->n_ssids = n_ssids; | 2994 | request->n_ssids = n_ssids; |
2786 | if (ie_len) { | 2995 | if (ie_len) { |
2787 | if (request->ssids) | 2996 | if (request->ssids) |
@@ -2836,19 +3045,24 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
2836 | request->ie_len); | 3045 | request->ie_len); |
2837 | } | 3046 | } |
2838 | 3047 | ||
2839 | request->ifidx = dev->ifindex; | 3048 | request->dev = dev; |
2840 | request->wiphy = &drv->wiphy; | 3049 | request->wiphy = &rdev->wiphy; |
3050 | |||
3051 | rdev->scan_req = request; | ||
3052 | err = rdev->ops->scan(&rdev->wiphy, dev, request); | ||
2841 | 3053 | ||
2842 | drv->scan_req = request; | 3054 | if (!err) { |
2843 | err = drv->ops->scan(&drv->wiphy, dev, request); | 3055 | nl80211_send_scan_start(rdev, dev); |
3056 | dev_hold(dev); | ||
3057 | } | ||
2844 | 3058 | ||
2845 | out_free: | 3059 | out_free: |
2846 | if (err) { | 3060 | if (err) { |
2847 | drv->scan_req = NULL; | 3061 | rdev->scan_req = NULL; |
2848 | kfree(request); | 3062 | kfree(request); |
2849 | } | 3063 | } |
2850 | out: | 3064 | out: |
2851 | cfg80211_put_dev(drv); | 3065 | cfg80211_unlock_rdev(rdev); |
2852 | dev_put(dev); | 3066 | dev_put(dev); |
2853 | out_rtnl: | 3067 | out_rtnl: |
2854 | rtnl_unlock(); | 3068 | rtnl_unlock(); |
@@ -2858,20 +3072,23 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
2858 | 3072 | ||
2859 | static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, | 3073 | static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, |
2860 | struct cfg80211_registered_device *rdev, | 3074 | struct cfg80211_registered_device *rdev, |
2861 | struct net_device *dev, | 3075 | struct wireless_dev *wdev, |
2862 | struct cfg80211_bss *res) | 3076 | struct cfg80211_internal_bss *intbss) |
2863 | { | 3077 | { |
3078 | struct cfg80211_bss *res = &intbss->pub; | ||
2864 | void *hdr; | 3079 | void *hdr; |
2865 | struct nlattr *bss; | 3080 | struct nlattr *bss; |
3081 | int i; | ||
3082 | |||
3083 | ASSERT_WDEV_LOCK(wdev); | ||
2866 | 3084 | ||
2867 | hdr = nl80211hdr_put(msg, pid, seq, flags, | 3085 | hdr = nl80211hdr_put(msg, pid, seq, flags, |
2868 | NL80211_CMD_NEW_SCAN_RESULTS); | 3086 | NL80211_CMD_NEW_SCAN_RESULTS); |
2869 | if (!hdr) | 3087 | if (!hdr) |
2870 | return -1; | 3088 | return -1; |
2871 | 3089 | ||
2872 | NLA_PUT_U32(msg, NL80211_ATTR_SCAN_GENERATION, | 3090 | NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation); |
2873 | rdev->bss_generation); | 3091 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex); |
2874 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); | ||
2875 | 3092 | ||
2876 | bss = nla_nest_start(msg, NL80211_ATTR_BSS); | 3093 | bss = nla_nest_start(msg, NL80211_ATTR_BSS); |
2877 | if (!bss) | 3094 | if (!bss) |
@@ -2900,6 +3117,28 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
2900 | break; | 3117 | break; |
2901 | } | 3118 | } |
2902 | 3119 | ||
3120 | switch (wdev->iftype) { | ||
3121 | case NL80211_IFTYPE_STATION: | ||
3122 | if (intbss == wdev->current_bss) | ||
3123 | NLA_PUT_U32(msg, NL80211_BSS_STATUS, | ||
3124 | NL80211_BSS_STATUS_ASSOCIATED); | ||
3125 | else for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
3126 | if (intbss != wdev->auth_bsses[i]) | ||
3127 | continue; | ||
3128 | NLA_PUT_U32(msg, NL80211_BSS_STATUS, | ||
3129 | NL80211_BSS_STATUS_AUTHENTICATED); | ||
3130 | break; | ||
3131 | } | ||
3132 | break; | ||
3133 | case NL80211_IFTYPE_ADHOC: | ||
3134 | if (intbss == wdev->current_bss) | ||
3135 | NLA_PUT_U32(msg, NL80211_BSS_STATUS, | ||
3136 | NL80211_BSS_STATUS_IBSS_JOINED); | ||
3137 | break; | ||
3138 | default: | ||
3139 | break; | ||
3140 | } | ||
3141 | |||
2903 | nla_nest_end(msg, bss); | 3142 | nla_nest_end(msg, bss); |
2904 | 3143 | ||
2905 | return genlmsg_end(msg, hdr); | 3144 | return genlmsg_end(msg, hdr); |
@@ -2912,9 +3151,10 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, | |||
2912 | static int nl80211_dump_scan(struct sk_buff *skb, | 3151 | static int nl80211_dump_scan(struct sk_buff *skb, |
2913 | struct netlink_callback *cb) | 3152 | struct netlink_callback *cb) |
2914 | { | 3153 | { |
2915 | struct cfg80211_registered_device *dev; | 3154 | struct cfg80211_registered_device *rdev; |
2916 | struct net_device *netdev; | 3155 | struct net_device *dev; |
2917 | struct cfg80211_internal_bss *scan; | 3156 | struct cfg80211_internal_bss *scan; |
3157 | struct wireless_dev *wdev; | ||
2918 | int ifidx = cb->args[0]; | 3158 | int ifidx = cb->args[0]; |
2919 | int start = cb->args[1], idx = 0; | 3159 | int start = cb->args[1], idx = 0; |
2920 | int err; | 3160 | int err; |
@@ -2935,58 +3175,83 @@ static int nl80211_dump_scan(struct sk_buff *skb, | |||
2935 | cb->args[0] = ifidx; | 3175 | cb->args[0] = ifidx; |
2936 | } | 3176 | } |
2937 | 3177 | ||
2938 | netdev = dev_get_by_index(&init_net, ifidx); | 3178 | dev = dev_get_by_index(sock_net(skb->sk), ifidx); |
2939 | if (!netdev) | 3179 | if (!dev) |
2940 | return -ENODEV; | 3180 | return -ENODEV; |
2941 | 3181 | ||
2942 | dev = cfg80211_get_dev_from_ifindex(ifidx); | 3182 | rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); |
2943 | if (IS_ERR(dev)) { | 3183 | if (IS_ERR(rdev)) { |
2944 | err = PTR_ERR(dev); | 3184 | err = PTR_ERR(rdev); |
2945 | goto out_put_netdev; | 3185 | goto out_put_netdev; |
2946 | } | 3186 | } |
2947 | 3187 | ||
2948 | spin_lock_bh(&dev->bss_lock); | 3188 | wdev = dev->ieee80211_ptr; |
2949 | cfg80211_bss_expire(dev); | 3189 | |
3190 | wdev_lock(wdev); | ||
3191 | spin_lock_bh(&rdev->bss_lock); | ||
3192 | cfg80211_bss_expire(rdev); | ||
2950 | 3193 | ||
2951 | list_for_each_entry(scan, &dev->bss_list, list) { | 3194 | list_for_each_entry(scan, &rdev->bss_list, list) { |
2952 | if (++idx <= start) | 3195 | if (++idx <= start) |
2953 | continue; | 3196 | continue; |
2954 | if (nl80211_send_bss(skb, | 3197 | if (nl80211_send_bss(skb, |
2955 | NETLINK_CB(cb->skb).pid, | 3198 | NETLINK_CB(cb->skb).pid, |
2956 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 3199 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
2957 | dev, netdev, &scan->pub) < 0) { | 3200 | rdev, wdev, scan) < 0) { |
2958 | idx--; | 3201 | idx--; |
2959 | goto out; | 3202 | goto out; |
2960 | } | 3203 | } |
2961 | } | 3204 | } |
2962 | 3205 | ||
2963 | out: | 3206 | out: |
2964 | spin_unlock_bh(&dev->bss_lock); | 3207 | spin_unlock_bh(&rdev->bss_lock); |
3208 | wdev_unlock(wdev); | ||
2965 | 3209 | ||
2966 | cb->args[1] = idx; | 3210 | cb->args[1] = idx; |
2967 | err = skb->len; | 3211 | err = skb->len; |
2968 | cfg80211_put_dev(dev); | 3212 | cfg80211_unlock_rdev(rdev); |
2969 | out_put_netdev: | 3213 | out_put_netdev: |
2970 | dev_put(netdev); | 3214 | dev_put(dev); |
2971 | 3215 | ||
2972 | return err; | 3216 | return err; |
2973 | } | 3217 | } |
2974 | 3218 | ||
2975 | static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type) | 3219 | static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type) |
2976 | { | 3220 | { |
2977 | return auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM || | 3221 | return auth_type <= NL80211_AUTHTYPE_MAX; |
2978 | auth_type == NL80211_AUTHTYPE_SHARED_KEY || | ||
2979 | auth_type == NL80211_AUTHTYPE_FT || | ||
2980 | auth_type == NL80211_AUTHTYPE_NETWORK_EAP; | ||
2981 | } | 3222 | } |
2982 | 3223 | ||
3224 | static bool nl80211_valid_wpa_versions(u32 wpa_versions) | ||
3225 | { | ||
3226 | return !(wpa_versions & ~(NL80211_WPA_VERSION_1 | | ||
3227 | NL80211_WPA_VERSION_2)); | ||
3228 | } | ||
3229 | |||
3230 | static bool nl80211_valid_akm_suite(u32 akm) | ||
3231 | { | ||
3232 | return akm == WLAN_AKM_SUITE_8021X || | ||
3233 | akm == WLAN_AKM_SUITE_PSK; | ||
3234 | } | ||
3235 | |||
3236 | static bool nl80211_valid_cipher_suite(u32 cipher) | ||
3237 | { | ||
3238 | return cipher == WLAN_CIPHER_SUITE_WEP40 || | ||
3239 | cipher == WLAN_CIPHER_SUITE_WEP104 || | ||
3240 | cipher == WLAN_CIPHER_SUITE_TKIP || | ||
3241 | cipher == WLAN_CIPHER_SUITE_CCMP || | ||
3242 | cipher == WLAN_CIPHER_SUITE_AES_CMAC; | ||
3243 | } | ||
3244 | |||
3245 | |||
2983 | static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) | 3246 | static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) |
2984 | { | 3247 | { |
2985 | struct cfg80211_registered_device *drv; | 3248 | struct cfg80211_registered_device *rdev; |
2986 | struct net_device *dev; | 3249 | struct net_device *dev; |
2987 | struct cfg80211_auth_request req; | 3250 | struct ieee80211_channel *chan; |
2988 | struct wiphy *wiphy; | 3251 | const u8 *bssid, *ssid, *ie = NULL; |
2989 | int err; | 3252 | int err, ssid_len, ie_len = 0; |
3253 | enum nl80211_auth_type auth_type; | ||
3254 | struct key_parse key; | ||
2990 | 3255 | ||
2991 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 3256 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
2992 | return -EINVAL; | 3257 | return -EINVAL; |
@@ -2997,13 +3262,38 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) | |||
2997 | if (!info->attrs[NL80211_ATTR_AUTH_TYPE]) | 3262 | if (!info->attrs[NL80211_ATTR_AUTH_TYPE]) |
2998 | return -EINVAL; | 3263 | return -EINVAL; |
2999 | 3264 | ||
3265 | if (!info->attrs[NL80211_ATTR_SSID]) | ||
3266 | return -EINVAL; | ||
3267 | |||
3268 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) | ||
3269 | return -EINVAL; | ||
3270 | |||
3271 | err = nl80211_parse_key(info, &key); | ||
3272 | if (err) | ||
3273 | return err; | ||
3274 | |||
3275 | if (key.idx >= 0) { | ||
3276 | if (!key.p.key || !key.p.key_len) | ||
3277 | return -EINVAL; | ||
3278 | if ((key.p.cipher != WLAN_CIPHER_SUITE_WEP40 || | ||
3279 | key.p.key_len != WLAN_KEY_LEN_WEP40) && | ||
3280 | (key.p.cipher != WLAN_CIPHER_SUITE_WEP104 || | ||
3281 | key.p.key_len != WLAN_KEY_LEN_WEP104)) | ||
3282 | return -EINVAL; | ||
3283 | if (key.idx > 4) | ||
3284 | return -EINVAL; | ||
3285 | } else { | ||
3286 | key.p.key_len = 0; | ||
3287 | key.p.key = NULL; | ||
3288 | } | ||
3289 | |||
3000 | rtnl_lock(); | 3290 | rtnl_lock(); |
3001 | 3291 | ||
3002 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 3292 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
3003 | if (err) | 3293 | if (err) |
3004 | goto unlock_rtnl; | 3294 | goto unlock_rtnl; |
3005 | 3295 | ||
3006 | if (!drv->ops->auth) { | 3296 | if (!rdev->ops->auth) { |
3007 | err = -EOPNOTSUPP; | 3297 | err = -EOPNOTSUPP; |
3008 | goto out; | 3298 | goto out; |
3009 | } | 3299 | } |
@@ -3018,69 +3308,130 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) | |||
3018 | goto out; | 3308 | goto out; |
3019 | } | 3309 | } |
3020 | 3310 | ||
3021 | wiphy = &drv->wiphy; | 3311 | bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); |
3022 | memset(&req, 0, sizeof(req)); | 3312 | chan = ieee80211_get_channel(&rdev->wiphy, |
3023 | 3313 | nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); | |
3024 | req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); | 3314 | if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED)) { |
3025 | 3315 | err = -EINVAL; | |
3026 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { | 3316 | goto out; |
3027 | req.chan = ieee80211_get_channel( | ||
3028 | wiphy, | ||
3029 | nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); | ||
3030 | if (!req.chan) { | ||
3031 | err = -EINVAL; | ||
3032 | goto out; | ||
3033 | } | ||
3034 | } | 3317 | } |
3035 | 3318 | ||
3036 | if (info->attrs[NL80211_ATTR_SSID]) { | 3319 | ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); |
3037 | req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); | 3320 | ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); |
3038 | req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); | ||
3039 | } | ||
3040 | 3321 | ||
3041 | if (info->attrs[NL80211_ATTR_IE]) { | 3322 | if (info->attrs[NL80211_ATTR_IE]) { |
3042 | req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); | 3323 | ie = nla_data(info->attrs[NL80211_ATTR_IE]); |
3043 | req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 3324 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
3044 | } | 3325 | } |
3045 | 3326 | ||
3046 | req.auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); | 3327 | auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); |
3047 | if (!nl80211_valid_auth_type(req.auth_type)) { | 3328 | if (!nl80211_valid_auth_type(auth_type)) { |
3048 | err = -EINVAL; | 3329 | err = -EINVAL; |
3049 | goto out; | 3330 | goto out; |
3050 | } | 3331 | } |
3051 | 3332 | ||
3052 | err = drv->ops->auth(&drv->wiphy, dev, &req); | 3333 | err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, |
3334 | ssid, ssid_len, ie, ie_len, | ||
3335 | key.p.key, key.p.key_len, key.idx); | ||
3053 | 3336 | ||
3054 | out: | 3337 | out: |
3055 | cfg80211_put_dev(drv); | 3338 | cfg80211_unlock_rdev(rdev); |
3056 | dev_put(dev); | 3339 | dev_put(dev); |
3057 | unlock_rtnl: | 3340 | unlock_rtnl: |
3058 | rtnl_unlock(); | 3341 | rtnl_unlock(); |
3059 | return err; | 3342 | return err; |
3060 | } | 3343 | } |
3061 | 3344 | ||
3345 | static int nl80211_crypto_settings(struct genl_info *info, | ||
3346 | struct cfg80211_crypto_settings *settings, | ||
3347 | int cipher_limit) | ||
3348 | { | ||
3349 | memset(settings, 0, sizeof(*settings)); | ||
3350 | |||
3351 | settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT]; | ||
3352 | |||
3353 | if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) { | ||
3354 | void *data; | ||
3355 | int len, i; | ||
3356 | |||
3357 | data = nla_data(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); | ||
3358 | len = nla_len(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); | ||
3359 | settings->n_ciphers_pairwise = len / sizeof(u32); | ||
3360 | |||
3361 | if (len % sizeof(u32)) | ||
3362 | return -EINVAL; | ||
3363 | |||
3364 | if (settings->n_ciphers_pairwise > cipher_limit) | ||
3365 | return -EINVAL; | ||
3366 | |||
3367 | memcpy(settings->ciphers_pairwise, data, len); | ||
3368 | |||
3369 | for (i = 0; i < settings->n_ciphers_pairwise; i++) | ||
3370 | if (!nl80211_valid_cipher_suite( | ||
3371 | settings->ciphers_pairwise[i])) | ||
3372 | return -EINVAL; | ||
3373 | } | ||
3374 | |||
3375 | if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) { | ||
3376 | settings->cipher_group = | ||
3377 | nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]); | ||
3378 | if (!nl80211_valid_cipher_suite(settings->cipher_group)) | ||
3379 | return -EINVAL; | ||
3380 | } | ||
3381 | |||
3382 | if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) { | ||
3383 | settings->wpa_versions = | ||
3384 | nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]); | ||
3385 | if (!nl80211_valid_wpa_versions(settings->wpa_versions)) | ||
3386 | return -EINVAL; | ||
3387 | } | ||
3388 | |||
3389 | if (info->attrs[NL80211_ATTR_AKM_SUITES]) { | ||
3390 | void *data; | ||
3391 | int len, i; | ||
3392 | |||
3393 | data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]); | ||
3394 | len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]); | ||
3395 | settings->n_akm_suites = len / sizeof(u32); | ||
3396 | |||
3397 | if (len % sizeof(u32)) | ||
3398 | return -EINVAL; | ||
3399 | |||
3400 | memcpy(settings->akm_suites, data, len); | ||
3401 | |||
3402 | for (i = 0; i < settings->n_ciphers_pairwise; i++) | ||
3403 | if (!nl80211_valid_akm_suite(settings->akm_suites[i])) | ||
3404 | return -EINVAL; | ||
3405 | } | ||
3406 | |||
3407 | return 0; | ||
3408 | } | ||
3409 | |||
3062 | static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) | 3410 | static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) |
3063 | { | 3411 | { |
3064 | struct cfg80211_registered_device *drv; | 3412 | struct cfg80211_registered_device *rdev; |
3065 | struct net_device *dev; | 3413 | struct net_device *dev; |
3066 | struct cfg80211_assoc_request req; | 3414 | struct cfg80211_crypto_settings crypto; |
3067 | struct wiphy *wiphy; | 3415 | struct ieee80211_channel *chan, *fixedchan; |
3068 | int err; | 3416 | const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; |
3417 | int err, ssid_len, ie_len = 0; | ||
3418 | bool use_mfp = false; | ||
3069 | 3419 | ||
3070 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 3420 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
3071 | return -EINVAL; | 3421 | return -EINVAL; |
3072 | 3422 | ||
3073 | if (!info->attrs[NL80211_ATTR_MAC] || | 3423 | if (!info->attrs[NL80211_ATTR_MAC] || |
3074 | !info->attrs[NL80211_ATTR_SSID]) | 3424 | !info->attrs[NL80211_ATTR_SSID] || |
3425 | !info->attrs[NL80211_ATTR_WIPHY_FREQ]) | ||
3075 | return -EINVAL; | 3426 | return -EINVAL; |
3076 | 3427 | ||
3077 | rtnl_lock(); | 3428 | rtnl_lock(); |
3078 | 3429 | ||
3079 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 3430 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
3080 | if (err) | 3431 | if (err) |
3081 | goto unlock_rtnl; | 3432 | goto unlock_rtnl; |
3082 | 3433 | ||
3083 | if (!drv->ops->assoc) { | 3434 | if (!rdev->ops->assoc) { |
3084 | err = -EOPNOTSUPP; | 3435 | err = -EOPNOTSUPP; |
3085 | goto out; | 3436 | goto out; |
3086 | } | 3437 | } |
@@ -3095,46 +3446,54 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) | |||
3095 | goto out; | 3446 | goto out; |
3096 | } | 3447 | } |
3097 | 3448 | ||
3098 | wiphy = &drv->wiphy; | 3449 | bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); |
3099 | memset(&req, 0, sizeof(req)); | ||
3100 | 3450 | ||
3101 | req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); | 3451 | chan = ieee80211_get_channel(&rdev->wiphy, |
3452 | nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); | ||
3453 | if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED)) { | ||
3454 | err = -EINVAL; | ||
3455 | goto out; | ||
3456 | } | ||
3102 | 3457 | ||
3103 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { | 3458 | mutex_lock(&rdev->devlist_mtx); |
3104 | req.chan = ieee80211_get_channel( | 3459 | fixedchan = rdev_fixed_channel(rdev, NULL); |
3105 | wiphy, | 3460 | if (fixedchan && chan != fixedchan) { |
3106 | nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); | 3461 | err = -EBUSY; |
3107 | if (!req.chan) { | 3462 | mutex_unlock(&rdev->devlist_mtx); |
3108 | err = -EINVAL; | 3463 | goto out; |
3109 | goto out; | ||
3110 | } | ||
3111 | } | 3464 | } |
3465 | mutex_unlock(&rdev->devlist_mtx); | ||
3112 | 3466 | ||
3113 | req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); | 3467 | ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); |
3114 | req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); | 3468 | ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); |
3115 | 3469 | ||
3116 | if (info->attrs[NL80211_ATTR_IE]) { | 3470 | if (info->attrs[NL80211_ATTR_IE]) { |
3117 | req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); | 3471 | ie = nla_data(info->attrs[NL80211_ATTR_IE]); |
3118 | req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 3472 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
3119 | } | 3473 | } |
3120 | 3474 | ||
3121 | if (info->attrs[NL80211_ATTR_USE_MFP]) { | 3475 | if (info->attrs[NL80211_ATTR_USE_MFP]) { |
3122 | enum nl80211_mfp use_mfp = | 3476 | enum nl80211_mfp mfp = |
3123 | nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); | 3477 | nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); |
3124 | if (use_mfp == NL80211_MFP_REQUIRED) | 3478 | if (mfp == NL80211_MFP_REQUIRED) |
3125 | req.use_mfp = true; | 3479 | use_mfp = true; |
3126 | else if (use_mfp != NL80211_MFP_NO) { | 3480 | else if (mfp != NL80211_MFP_NO) { |
3127 | err = -EINVAL; | 3481 | err = -EINVAL; |
3128 | goto out; | 3482 | goto out; |
3129 | } | 3483 | } |
3130 | } | 3484 | } |
3131 | 3485 | ||
3132 | req.control_port = info->attrs[NL80211_ATTR_CONTROL_PORT]; | 3486 | if (info->attrs[NL80211_ATTR_PREV_BSSID]) |
3487 | prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); | ||
3133 | 3488 | ||
3134 | err = drv->ops->assoc(&drv->wiphy, dev, &req); | 3489 | err = nl80211_crypto_settings(info, &crypto, 1); |
3490 | if (!err) | ||
3491 | err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, | ||
3492 | ssid, ssid_len, ie, ie_len, use_mfp, | ||
3493 | &crypto); | ||
3135 | 3494 | ||
3136 | out: | 3495 | out: |
3137 | cfg80211_put_dev(drv); | 3496 | cfg80211_unlock_rdev(rdev); |
3138 | dev_put(dev); | 3497 | dev_put(dev); |
3139 | unlock_rtnl: | 3498 | unlock_rtnl: |
3140 | rtnl_unlock(); | 3499 | rtnl_unlock(); |
@@ -3143,11 +3502,11 @@ unlock_rtnl: | |||
3143 | 3502 | ||
3144 | static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) | 3503 | static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) |
3145 | { | 3504 | { |
3146 | struct cfg80211_registered_device *drv; | 3505 | struct cfg80211_registered_device *rdev; |
3147 | struct net_device *dev; | 3506 | struct net_device *dev; |
3148 | struct cfg80211_deauth_request req; | 3507 | const u8 *ie = NULL, *bssid; |
3149 | struct wiphy *wiphy; | 3508 | int err, ie_len = 0; |
3150 | int err; | 3509 | u16 reason_code; |
3151 | 3510 | ||
3152 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 3511 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
3153 | return -EINVAL; | 3512 | return -EINVAL; |
@@ -3160,11 +3519,11 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) | |||
3160 | 3519 | ||
3161 | rtnl_lock(); | 3520 | rtnl_lock(); |
3162 | 3521 | ||
3163 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 3522 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
3164 | if (err) | 3523 | if (err) |
3165 | goto unlock_rtnl; | 3524 | goto unlock_rtnl; |
3166 | 3525 | ||
3167 | if (!drv->ops->deauth) { | 3526 | if (!rdev->ops->deauth) { |
3168 | err = -EOPNOTSUPP; | 3527 | err = -EOPNOTSUPP; |
3169 | goto out; | 3528 | goto out; |
3170 | } | 3529 | } |
@@ -3179,27 +3538,24 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) | |||
3179 | goto out; | 3538 | goto out; |
3180 | } | 3539 | } |
3181 | 3540 | ||
3182 | wiphy = &drv->wiphy; | 3541 | bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); |
3183 | memset(&req, 0, sizeof(req)); | ||
3184 | 3542 | ||
3185 | req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); | 3543 | reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); |
3186 | 3544 | if (reason_code == 0) { | |
3187 | req.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); | ||
3188 | if (req.reason_code == 0) { | ||
3189 | /* Reason Code 0 is reserved */ | 3545 | /* Reason Code 0 is reserved */ |
3190 | err = -EINVAL; | 3546 | err = -EINVAL; |
3191 | goto out; | 3547 | goto out; |
3192 | } | 3548 | } |
3193 | 3549 | ||
3194 | if (info->attrs[NL80211_ATTR_IE]) { | 3550 | if (info->attrs[NL80211_ATTR_IE]) { |
3195 | req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); | 3551 | ie = nla_data(info->attrs[NL80211_ATTR_IE]); |
3196 | req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 3552 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
3197 | } | 3553 | } |
3198 | 3554 | ||
3199 | err = drv->ops->deauth(&drv->wiphy, dev, &req); | 3555 | err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code); |
3200 | 3556 | ||
3201 | out: | 3557 | out: |
3202 | cfg80211_put_dev(drv); | 3558 | cfg80211_unlock_rdev(rdev); |
3203 | dev_put(dev); | 3559 | dev_put(dev); |
3204 | unlock_rtnl: | 3560 | unlock_rtnl: |
3205 | rtnl_unlock(); | 3561 | rtnl_unlock(); |
@@ -3208,11 +3564,11 @@ unlock_rtnl: | |||
3208 | 3564 | ||
3209 | static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) | 3565 | static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) |
3210 | { | 3566 | { |
3211 | struct cfg80211_registered_device *drv; | 3567 | struct cfg80211_registered_device *rdev; |
3212 | struct net_device *dev; | 3568 | struct net_device *dev; |
3213 | struct cfg80211_disassoc_request req; | 3569 | const u8 *ie = NULL, *bssid; |
3214 | struct wiphy *wiphy; | 3570 | int err, ie_len = 0; |
3215 | int err; | 3571 | u16 reason_code; |
3216 | 3572 | ||
3217 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 3573 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
3218 | return -EINVAL; | 3574 | return -EINVAL; |
@@ -3225,11 +3581,11 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) | |||
3225 | 3581 | ||
3226 | rtnl_lock(); | 3582 | rtnl_lock(); |
3227 | 3583 | ||
3228 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 3584 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
3229 | if (err) | 3585 | if (err) |
3230 | goto unlock_rtnl; | 3586 | goto unlock_rtnl; |
3231 | 3587 | ||
3232 | if (!drv->ops->disassoc) { | 3588 | if (!rdev->ops->disassoc) { |
3233 | err = -EOPNOTSUPP; | 3589 | err = -EOPNOTSUPP; |
3234 | goto out; | 3590 | goto out; |
3235 | } | 3591 | } |
@@ -3244,27 +3600,24 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) | |||
3244 | goto out; | 3600 | goto out; |
3245 | } | 3601 | } |
3246 | 3602 | ||
3247 | wiphy = &drv->wiphy; | 3603 | bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); |
3248 | memset(&req, 0, sizeof(req)); | ||
3249 | 3604 | ||
3250 | req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); | 3605 | reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); |
3251 | 3606 | if (reason_code == 0) { | |
3252 | req.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); | ||
3253 | if (req.reason_code == 0) { | ||
3254 | /* Reason Code 0 is reserved */ | 3607 | /* Reason Code 0 is reserved */ |
3255 | err = -EINVAL; | 3608 | err = -EINVAL; |
3256 | goto out; | 3609 | goto out; |
3257 | } | 3610 | } |
3258 | 3611 | ||
3259 | if (info->attrs[NL80211_ATTR_IE]) { | 3612 | if (info->attrs[NL80211_ATTR_IE]) { |
3260 | req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); | 3613 | ie = nla_data(info->attrs[NL80211_ATTR_IE]); |
3261 | req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 3614 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
3262 | } | 3615 | } |
3263 | 3616 | ||
3264 | err = drv->ops->disassoc(&drv->wiphy, dev, &req); | 3617 | err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code); |
3265 | 3618 | ||
3266 | out: | 3619 | out: |
3267 | cfg80211_put_dev(drv); | 3620 | cfg80211_unlock_rdev(rdev); |
3268 | dev_put(dev); | 3621 | dev_put(dev); |
3269 | unlock_rtnl: | 3622 | unlock_rtnl: |
3270 | rtnl_unlock(); | 3623 | rtnl_unlock(); |
@@ -3273,10 +3626,11 @@ unlock_rtnl: | |||
3273 | 3626 | ||
3274 | static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) | 3627 | static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) |
3275 | { | 3628 | { |
3276 | struct cfg80211_registered_device *drv; | 3629 | struct cfg80211_registered_device *rdev; |
3277 | struct net_device *dev; | 3630 | struct net_device *dev; |
3278 | struct cfg80211_ibss_params ibss; | 3631 | struct cfg80211_ibss_params ibss; |
3279 | struct wiphy *wiphy; | 3632 | struct wiphy *wiphy; |
3633 | struct cfg80211_cached_keys *connkeys = NULL; | ||
3280 | int err; | 3634 | int err; |
3281 | 3635 | ||
3282 | memset(&ibss, 0, sizeof(ibss)); | 3636 | memset(&ibss, 0, sizeof(ibss)); |
@@ -3300,11 +3654,11 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) | |||
3300 | 3654 | ||
3301 | rtnl_lock(); | 3655 | rtnl_lock(); |
3302 | 3656 | ||
3303 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 3657 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
3304 | if (err) | 3658 | if (err) |
3305 | goto unlock_rtnl; | 3659 | goto unlock_rtnl; |
3306 | 3660 | ||
3307 | if (!drv->ops->join_ibss) { | 3661 | if (!rdev->ops->join_ibss) { |
3308 | err = -EOPNOTSUPP; | 3662 | err = -EOPNOTSUPP; |
3309 | goto out; | 3663 | goto out; |
3310 | } | 3664 | } |
@@ -3319,7 +3673,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) | |||
3319 | goto out; | 3673 | goto out; |
3320 | } | 3674 | } |
3321 | 3675 | ||
3322 | wiphy = &drv->wiphy; | 3676 | wiphy = &rdev->wiphy; |
3323 | 3677 | ||
3324 | if (info->attrs[NL80211_ATTR_MAC]) | 3678 | if (info->attrs[NL80211_ATTR_MAC]) |
3325 | ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); | 3679 | ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); |
@@ -3341,30 +3695,43 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) | |||
3341 | } | 3695 | } |
3342 | 3696 | ||
3343 | ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; | 3697 | ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; |
3698 | ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; | ||
3699 | |||
3700 | if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { | ||
3701 | connkeys = nl80211_parse_connkeys(rdev, | ||
3702 | info->attrs[NL80211_ATTR_KEYS]); | ||
3703 | if (IS_ERR(connkeys)) { | ||
3704 | err = PTR_ERR(connkeys); | ||
3705 | connkeys = NULL; | ||
3706 | goto out; | ||
3707 | } | ||
3708 | } | ||
3344 | 3709 | ||
3345 | err = cfg80211_join_ibss(drv, dev, &ibss); | 3710 | err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); |
3346 | 3711 | ||
3347 | out: | 3712 | out: |
3348 | cfg80211_put_dev(drv); | 3713 | cfg80211_unlock_rdev(rdev); |
3349 | dev_put(dev); | 3714 | dev_put(dev); |
3350 | unlock_rtnl: | 3715 | unlock_rtnl: |
3716 | if (err) | ||
3717 | kfree(connkeys); | ||
3351 | rtnl_unlock(); | 3718 | rtnl_unlock(); |
3352 | return err; | 3719 | return err; |
3353 | } | 3720 | } |
3354 | 3721 | ||
3355 | static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info) | 3722 | static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info) |
3356 | { | 3723 | { |
3357 | struct cfg80211_registered_device *drv; | 3724 | struct cfg80211_registered_device *rdev; |
3358 | struct net_device *dev; | 3725 | struct net_device *dev; |
3359 | int err; | 3726 | int err; |
3360 | 3727 | ||
3361 | rtnl_lock(); | 3728 | rtnl_lock(); |
3362 | 3729 | ||
3363 | err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); | 3730 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); |
3364 | if (err) | 3731 | if (err) |
3365 | goto unlock_rtnl; | 3732 | goto unlock_rtnl; |
3366 | 3733 | ||
3367 | if (!drv->ops->leave_ibss) { | 3734 | if (!rdev->ops->leave_ibss) { |
3368 | err = -EOPNOTSUPP; | 3735 | err = -EOPNOTSUPP; |
3369 | goto out; | 3736 | goto out; |
3370 | } | 3737 | } |
@@ -3379,12 +3746,309 @@ static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info) | |||
3379 | goto out; | 3746 | goto out; |
3380 | } | 3747 | } |
3381 | 3748 | ||
3382 | err = cfg80211_leave_ibss(drv, dev, false); | 3749 | err = cfg80211_leave_ibss(rdev, dev, false); |
3750 | |||
3751 | out: | ||
3752 | cfg80211_unlock_rdev(rdev); | ||
3753 | dev_put(dev); | ||
3754 | unlock_rtnl: | ||
3755 | rtnl_unlock(); | ||
3756 | return err; | ||
3757 | } | ||
3758 | |||
3759 | #ifdef CONFIG_NL80211_TESTMODE | ||
3760 | static struct genl_multicast_group nl80211_testmode_mcgrp = { | ||
3761 | .name = "testmode", | ||
3762 | }; | ||
3763 | |||
3764 | static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) | ||
3765 | { | ||
3766 | struct cfg80211_registered_device *rdev; | ||
3767 | int err; | ||
3768 | |||
3769 | if (!info->attrs[NL80211_ATTR_TESTDATA]) | ||
3770 | return -EINVAL; | ||
3771 | |||
3772 | rtnl_lock(); | ||
3773 | |||
3774 | rdev = cfg80211_get_dev_from_info(info); | ||
3775 | if (IS_ERR(rdev)) { | ||
3776 | err = PTR_ERR(rdev); | ||
3777 | goto unlock_rtnl; | ||
3778 | } | ||
3779 | |||
3780 | err = -EOPNOTSUPP; | ||
3781 | if (rdev->ops->testmode_cmd) { | ||
3782 | rdev->testmode_info = info; | ||
3783 | err = rdev->ops->testmode_cmd(&rdev->wiphy, | ||
3784 | nla_data(info->attrs[NL80211_ATTR_TESTDATA]), | ||
3785 | nla_len(info->attrs[NL80211_ATTR_TESTDATA])); | ||
3786 | rdev->testmode_info = NULL; | ||
3787 | } | ||
3788 | |||
3789 | cfg80211_unlock_rdev(rdev); | ||
3790 | |||
3791 | unlock_rtnl: | ||
3792 | rtnl_unlock(); | ||
3793 | return err; | ||
3794 | } | ||
3795 | |||
3796 | static struct sk_buff * | ||
3797 | __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev, | ||
3798 | int approxlen, u32 pid, u32 seq, gfp_t gfp) | ||
3799 | { | ||
3800 | struct sk_buff *skb; | ||
3801 | void *hdr; | ||
3802 | struct nlattr *data; | ||
3803 | |||
3804 | skb = nlmsg_new(approxlen + 100, gfp); | ||
3805 | if (!skb) | ||
3806 | return NULL; | ||
3807 | |||
3808 | hdr = nl80211hdr_put(skb, pid, seq, 0, NL80211_CMD_TESTMODE); | ||
3809 | if (!hdr) { | ||
3810 | kfree_skb(skb); | ||
3811 | return NULL; | ||
3812 | } | ||
3813 | |||
3814 | NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx); | ||
3815 | data = nla_nest_start(skb, NL80211_ATTR_TESTDATA); | ||
3816 | |||
3817 | ((void **)skb->cb)[0] = rdev; | ||
3818 | ((void **)skb->cb)[1] = hdr; | ||
3819 | ((void **)skb->cb)[2] = data; | ||
3820 | |||
3821 | return skb; | ||
3822 | |||
3823 | nla_put_failure: | ||
3824 | kfree_skb(skb); | ||
3825 | return NULL; | ||
3826 | } | ||
3827 | |||
3828 | struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, | ||
3829 | int approxlen) | ||
3830 | { | ||
3831 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | ||
3832 | |||
3833 | if (WARN_ON(!rdev->testmode_info)) | ||
3834 | return NULL; | ||
3835 | |||
3836 | return __cfg80211_testmode_alloc_skb(rdev, approxlen, | ||
3837 | rdev->testmode_info->snd_pid, | ||
3838 | rdev->testmode_info->snd_seq, | ||
3839 | GFP_KERNEL); | ||
3840 | } | ||
3841 | EXPORT_SYMBOL(cfg80211_testmode_alloc_reply_skb); | ||
3842 | |||
3843 | int cfg80211_testmode_reply(struct sk_buff *skb) | ||
3844 | { | ||
3845 | struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; | ||
3846 | void *hdr = ((void **)skb->cb)[1]; | ||
3847 | struct nlattr *data = ((void **)skb->cb)[2]; | ||
3848 | |||
3849 | if (WARN_ON(!rdev->testmode_info)) { | ||
3850 | kfree_skb(skb); | ||
3851 | return -EINVAL; | ||
3852 | } | ||
3853 | |||
3854 | nla_nest_end(skb, data); | ||
3855 | genlmsg_end(skb, hdr); | ||
3856 | return genlmsg_reply(skb, rdev->testmode_info); | ||
3857 | } | ||
3858 | EXPORT_SYMBOL(cfg80211_testmode_reply); | ||
3859 | |||
3860 | struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, | ||
3861 | int approxlen, gfp_t gfp) | ||
3862 | { | ||
3863 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | ||
3864 | |||
3865 | return __cfg80211_testmode_alloc_skb(rdev, approxlen, 0, 0, gfp); | ||
3866 | } | ||
3867 | EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb); | ||
3868 | |||
3869 | void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) | ||
3870 | { | ||
3871 | void *hdr = ((void **)skb->cb)[1]; | ||
3872 | struct nlattr *data = ((void **)skb->cb)[2]; | ||
3873 | |||
3874 | nla_nest_end(skb, data); | ||
3875 | genlmsg_end(skb, hdr); | ||
3876 | genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp); | ||
3877 | } | ||
3878 | EXPORT_SYMBOL(cfg80211_testmode_event); | ||
3879 | #endif | ||
3880 | |||
3881 | static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) | ||
3882 | { | ||
3883 | struct cfg80211_registered_device *rdev; | ||
3884 | struct net_device *dev; | ||
3885 | struct cfg80211_connect_params connect; | ||
3886 | struct wiphy *wiphy; | ||
3887 | struct cfg80211_cached_keys *connkeys = NULL; | ||
3888 | int err; | ||
3889 | |||
3890 | memset(&connect, 0, sizeof(connect)); | ||
3891 | |||
3892 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | ||
3893 | return -EINVAL; | ||
3894 | |||
3895 | if (!info->attrs[NL80211_ATTR_SSID] || | ||
3896 | !nla_len(info->attrs[NL80211_ATTR_SSID])) | ||
3897 | return -EINVAL; | ||
3898 | |||
3899 | if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { | ||
3900 | connect.auth_type = | ||
3901 | nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); | ||
3902 | if (!nl80211_valid_auth_type(connect.auth_type)) | ||
3903 | return -EINVAL; | ||
3904 | } else | ||
3905 | connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; | ||
3906 | |||
3907 | connect.privacy = info->attrs[NL80211_ATTR_PRIVACY]; | ||
3908 | |||
3909 | err = nl80211_crypto_settings(info, &connect.crypto, | ||
3910 | NL80211_MAX_NR_CIPHER_SUITES); | ||
3911 | if (err) | ||
3912 | return err; | ||
3913 | rtnl_lock(); | ||
3914 | |||
3915 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); | ||
3916 | if (err) | ||
3917 | goto unlock_rtnl; | ||
3918 | |||
3919 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { | ||
3920 | err = -EOPNOTSUPP; | ||
3921 | goto out; | ||
3922 | } | ||
3923 | |||
3924 | if (!netif_running(dev)) { | ||
3925 | err = -ENETDOWN; | ||
3926 | goto out; | ||
3927 | } | ||
3928 | |||
3929 | wiphy = &rdev->wiphy; | ||
3930 | |||
3931 | if (info->attrs[NL80211_ATTR_MAC]) | ||
3932 | connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); | ||
3933 | connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); | ||
3934 | connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); | ||
3935 | |||
3936 | if (info->attrs[NL80211_ATTR_IE]) { | ||
3937 | connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]); | ||
3938 | connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | ||
3939 | } | ||
3940 | |||
3941 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { | ||
3942 | connect.channel = | ||
3943 | ieee80211_get_channel(wiphy, | ||
3944 | nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); | ||
3945 | if (!connect.channel || | ||
3946 | connect.channel->flags & IEEE80211_CHAN_DISABLED) { | ||
3947 | err = -EINVAL; | ||
3948 | goto out; | ||
3949 | } | ||
3950 | } | ||
3951 | |||
3952 | if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { | ||
3953 | connkeys = nl80211_parse_connkeys(rdev, | ||
3954 | info->attrs[NL80211_ATTR_KEYS]); | ||
3955 | if (IS_ERR(connkeys)) { | ||
3956 | err = PTR_ERR(connkeys); | ||
3957 | connkeys = NULL; | ||
3958 | goto out; | ||
3959 | } | ||
3960 | } | ||
3961 | |||
3962 | err = cfg80211_connect(rdev, dev, &connect, connkeys); | ||
3383 | 3963 | ||
3384 | out: | 3964 | out: |
3385 | cfg80211_put_dev(drv); | 3965 | cfg80211_unlock_rdev(rdev); |
3386 | dev_put(dev); | 3966 | dev_put(dev); |
3387 | unlock_rtnl: | 3967 | unlock_rtnl: |
3968 | if (err) | ||
3969 | kfree(connkeys); | ||
3970 | rtnl_unlock(); | ||
3971 | return err; | ||
3972 | } | ||
3973 | |||
3974 | static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info) | ||
3975 | { | ||
3976 | struct cfg80211_registered_device *rdev; | ||
3977 | struct net_device *dev; | ||
3978 | int err; | ||
3979 | u16 reason; | ||
3980 | |||
3981 | if (!info->attrs[NL80211_ATTR_REASON_CODE]) | ||
3982 | reason = WLAN_REASON_DEAUTH_LEAVING; | ||
3983 | else | ||
3984 | reason = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); | ||
3985 | |||
3986 | if (reason == 0) | ||
3987 | return -EINVAL; | ||
3988 | |||
3989 | rtnl_lock(); | ||
3990 | |||
3991 | err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); | ||
3992 | if (err) | ||
3993 | goto unlock_rtnl; | ||
3994 | |||
3995 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { | ||
3996 | err = -EOPNOTSUPP; | ||
3997 | goto out; | ||
3998 | } | ||
3999 | |||
4000 | if (!netif_running(dev)) { | ||
4001 | err = -ENETDOWN; | ||
4002 | goto out; | ||
4003 | } | ||
4004 | |||
4005 | err = cfg80211_disconnect(rdev, dev, reason, true); | ||
4006 | |||
4007 | out: | ||
4008 | cfg80211_unlock_rdev(rdev); | ||
4009 | dev_put(dev); | ||
4010 | unlock_rtnl: | ||
4011 | rtnl_unlock(); | ||
4012 | return err; | ||
4013 | } | ||
4014 | |||
4015 | static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) | ||
4016 | { | ||
4017 | struct cfg80211_registered_device *rdev; | ||
4018 | struct net *net; | ||
4019 | int err; | ||
4020 | u32 pid; | ||
4021 | |||
4022 | if (!info->attrs[NL80211_ATTR_PID]) | ||
4023 | return -EINVAL; | ||
4024 | |||
4025 | pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]); | ||
4026 | |||
4027 | rtnl_lock(); | ||
4028 | |||
4029 | rdev = cfg80211_get_dev_from_info(info); | ||
4030 | if (IS_ERR(rdev)) { | ||
4031 | err = PTR_ERR(rdev); | ||
4032 | goto out; | ||
4033 | } | ||
4034 | |||
4035 | net = get_net_ns_by_pid(pid); | ||
4036 | if (IS_ERR(net)) { | ||
4037 | err = PTR_ERR(net); | ||
4038 | goto out; | ||
4039 | } | ||
4040 | |||
4041 | err = 0; | ||
4042 | |||
4043 | /* check if anything to do */ | ||
4044 | if (net_eq(wiphy_net(&rdev->wiphy), net)) | ||
4045 | goto out_put_net; | ||
4046 | |||
4047 | err = cfg80211_switch_netns(rdev, net); | ||
4048 | out_put_net: | ||
4049 | put_net(net); | ||
4050 | out: | ||
4051 | cfg80211_unlock_rdev(rdev); | ||
3388 | rtnl_unlock(); | 4052 | rtnl_unlock(); |
3389 | return err; | 4053 | return err; |
3390 | } | 4054 | } |
@@ -3602,6 +4266,32 @@ static struct genl_ops nl80211_ops[] = { | |||
3602 | .policy = nl80211_policy, | 4266 | .policy = nl80211_policy, |
3603 | .flags = GENL_ADMIN_PERM, | 4267 | .flags = GENL_ADMIN_PERM, |
3604 | }, | 4268 | }, |
4269 | #ifdef CONFIG_NL80211_TESTMODE | ||
4270 | { | ||
4271 | .cmd = NL80211_CMD_TESTMODE, | ||
4272 | .doit = nl80211_testmode_do, | ||
4273 | .policy = nl80211_policy, | ||
4274 | .flags = GENL_ADMIN_PERM, | ||
4275 | }, | ||
4276 | #endif | ||
4277 | { | ||
4278 | .cmd = NL80211_CMD_CONNECT, | ||
4279 | .doit = nl80211_connect, | ||
4280 | .policy = nl80211_policy, | ||
4281 | .flags = GENL_ADMIN_PERM, | ||
4282 | }, | ||
4283 | { | ||
4284 | .cmd = NL80211_CMD_DISCONNECT, | ||
4285 | .doit = nl80211_disconnect, | ||
4286 | .policy = nl80211_policy, | ||
4287 | .flags = GENL_ADMIN_PERM, | ||
4288 | }, | ||
4289 | { | ||
4290 | .cmd = NL80211_CMD_SET_WIPHY_NETNS, | ||
4291 | .doit = nl80211_wiphy_netns, | ||
4292 | .policy = nl80211_policy, | ||
4293 | .flags = GENL_ADMIN_PERM, | ||
4294 | }, | ||
3605 | }; | 4295 | }; |
3606 | static struct genl_multicast_group nl80211_mlme_mcgrp = { | 4296 | static struct genl_multicast_group nl80211_mlme_mcgrp = { |
3607 | .name = "mlme", | 4297 | .name = "mlme", |
@@ -3633,7 +4323,8 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) | |||
3633 | return; | 4323 | return; |
3634 | } | 4324 | } |
3635 | 4325 | ||
3636 | genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL); | 4326 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
4327 | nl80211_config_mcgrp.id, GFP_KERNEL); | ||
3637 | } | 4328 | } |
3638 | 4329 | ||
3639 | static int nl80211_add_scan_req(struct sk_buff *msg, | 4330 | static int nl80211_add_scan_req(struct sk_buff *msg, |
@@ -3643,6 +4334,8 @@ static int nl80211_add_scan_req(struct sk_buff *msg, | |||
3643 | struct nlattr *nest; | 4334 | struct nlattr *nest; |
3644 | int i; | 4335 | int i; |
3645 | 4336 | ||
4337 | ASSERT_RDEV_LOCK(rdev); | ||
4338 | |||
3646 | if (WARN_ON(!req)) | 4339 | if (WARN_ON(!req)) |
3647 | return 0; | 4340 | return 0; |
3648 | 4341 | ||
@@ -3668,11 +4361,11 @@ static int nl80211_add_scan_req(struct sk_buff *msg, | |||
3668 | return -ENOBUFS; | 4361 | return -ENOBUFS; |
3669 | } | 4362 | } |
3670 | 4363 | ||
3671 | static int nl80211_send_scan_donemsg(struct sk_buff *msg, | 4364 | static int nl80211_send_scan_msg(struct sk_buff *msg, |
3672 | struct cfg80211_registered_device *rdev, | 4365 | struct cfg80211_registered_device *rdev, |
3673 | struct net_device *netdev, | 4366 | struct net_device *netdev, |
3674 | u32 pid, u32 seq, int flags, | 4367 | u32 pid, u32 seq, int flags, |
3675 | u32 cmd) | 4368 | u32 cmd) |
3676 | { | 4369 | { |
3677 | void *hdr; | 4370 | void *hdr; |
3678 | 4371 | ||
@@ -3693,6 +4386,25 @@ static int nl80211_send_scan_donemsg(struct sk_buff *msg, | |||
3693 | return -EMSGSIZE; | 4386 | return -EMSGSIZE; |
3694 | } | 4387 | } |
3695 | 4388 | ||
4389 | void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, | ||
4390 | struct net_device *netdev) | ||
4391 | { | ||
4392 | struct sk_buff *msg; | ||
4393 | |||
4394 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
4395 | if (!msg) | ||
4396 | return; | ||
4397 | |||
4398 | if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, | ||
4399 | NL80211_CMD_TRIGGER_SCAN) < 0) { | ||
4400 | nlmsg_free(msg); | ||
4401 | return; | ||
4402 | } | ||
4403 | |||
4404 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | ||
4405 | nl80211_scan_mcgrp.id, GFP_KERNEL); | ||
4406 | } | ||
4407 | |||
3696 | void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, | 4408 | void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, |
3697 | struct net_device *netdev) | 4409 | struct net_device *netdev) |
3698 | { | 4410 | { |
@@ -3702,13 +4414,14 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, | |||
3702 | if (!msg) | 4414 | if (!msg) |
3703 | return; | 4415 | return; |
3704 | 4416 | ||
3705 | if (nl80211_send_scan_donemsg(msg, rdev, netdev, 0, 0, 0, | 4417 | if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, |
3706 | NL80211_CMD_NEW_SCAN_RESULTS) < 0) { | 4418 | NL80211_CMD_NEW_SCAN_RESULTS) < 0) { |
3707 | nlmsg_free(msg); | 4419 | nlmsg_free(msg); |
3708 | return; | 4420 | return; |
3709 | } | 4421 | } |
3710 | 4422 | ||
3711 | genlmsg_multicast(msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL); | 4423 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
4424 | nl80211_scan_mcgrp.id, GFP_KERNEL); | ||
3712 | } | 4425 | } |
3713 | 4426 | ||
3714 | void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, | 4427 | void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, |
@@ -3720,13 +4433,14 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, | |||
3720 | if (!msg) | 4433 | if (!msg) |
3721 | return; | 4434 | return; |
3722 | 4435 | ||
3723 | if (nl80211_send_scan_donemsg(msg, rdev, netdev, 0, 0, 0, | 4436 | if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, |
3724 | NL80211_CMD_SCAN_ABORTED) < 0) { | 4437 | NL80211_CMD_SCAN_ABORTED) < 0) { |
3725 | nlmsg_free(msg); | 4438 | nlmsg_free(msg); |
3726 | return; | 4439 | return; |
3727 | } | 4440 | } |
3728 | 4441 | ||
3729 | genlmsg_multicast(msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL); | 4442 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
4443 | nl80211_scan_mcgrp.id, GFP_KERNEL); | ||
3730 | } | 4444 | } |
3731 | 4445 | ||
3732 | /* | 4446 | /* |
@@ -3775,7 +4489,10 @@ void nl80211_send_reg_change_event(struct regulatory_request *request) | |||
3775 | return; | 4489 | return; |
3776 | } | 4490 | } |
3777 | 4491 | ||
3778 | genlmsg_multicast(msg, 0, nl80211_regulatory_mcgrp.id, GFP_KERNEL); | 4492 | rcu_read_lock(); |
4493 | genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, | ||
4494 | GFP_ATOMIC); | ||
4495 | rcu_read_unlock(); | ||
3779 | 4496 | ||
3780 | return; | 4497 | return; |
3781 | 4498 | ||
@@ -3787,12 +4504,12 @@ nla_put_failure: | |||
3787 | static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, | 4504 | static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, |
3788 | struct net_device *netdev, | 4505 | struct net_device *netdev, |
3789 | const u8 *buf, size_t len, | 4506 | const u8 *buf, size_t len, |
3790 | enum nl80211_commands cmd) | 4507 | enum nl80211_commands cmd, gfp_t gfp) |
3791 | { | 4508 | { |
3792 | struct sk_buff *msg; | 4509 | struct sk_buff *msg; |
3793 | void *hdr; | 4510 | void *hdr; |
3794 | 4511 | ||
3795 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | 4512 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); |
3796 | if (!msg) | 4513 | if (!msg) |
3797 | return; | 4514 | return; |
3798 | 4515 | ||
@@ -3811,7 +4528,8 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, | |||
3811 | return; | 4528 | return; |
3812 | } | 4529 | } |
3813 | 4530 | ||
3814 | genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC); | 4531 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
4532 | nl80211_mlme_mcgrp.id, gfp); | ||
3815 | return; | 4533 | return; |
3816 | 4534 | ||
3817 | nla_put_failure: | 4535 | nla_put_failure: |
@@ -3820,42 +4538,45 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, | |||
3820 | } | 4538 | } |
3821 | 4539 | ||
3822 | void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, | 4540 | void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, |
3823 | struct net_device *netdev, const u8 *buf, size_t len) | 4541 | struct net_device *netdev, const u8 *buf, |
4542 | size_t len, gfp_t gfp) | ||
3824 | { | 4543 | { |
3825 | nl80211_send_mlme_event(rdev, netdev, buf, len, | 4544 | nl80211_send_mlme_event(rdev, netdev, buf, len, |
3826 | NL80211_CMD_AUTHENTICATE); | 4545 | NL80211_CMD_AUTHENTICATE, gfp); |
3827 | } | 4546 | } |
3828 | 4547 | ||
3829 | void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, | 4548 | void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, |
3830 | struct net_device *netdev, const u8 *buf, | 4549 | struct net_device *netdev, const u8 *buf, |
3831 | size_t len) | 4550 | size_t len, gfp_t gfp) |
3832 | { | 4551 | { |
3833 | nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE); | 4552 | nl80211_send_mlme_event(rdev, netdev, buf, len, |
4553 | NL80211_CMD_ASSOCIATE, gfp); | ||
3834 | } | 4554 | } |
3835 | 4555 | ||
3836 | void nl80211_send_deauth(struct cfg80211_registered_device *rdev, | 4556 | void nl80211_send_deauth(struct cfg80211_registered_device *rdev, |
3837 | struct net_device *netdev, const u8 *buf, size_t len) | 4557 | struct net_device *netdev, const u8 *buf, |
4558 | size_t len, gfp_t gfp) | ||
3838 | { | 4559 | { |
3839 | nl80211_send_mlme_event(rdev, netdev, buf, len, | 4560 | nl80211_send_mlme_event(rdev, netdev, buf, len, |
3840 | NL80211_CMD_DEAUTHENTICATE); | 4561 | NL80211_CMD_DEAUTHENTICATE, gfp); |
3841 | } | 4562 | } |
3842 | 4563 | ||
3843 | void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, | 4564 | void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, |
3844 | struct net_device *netdev, const u8 *buf, | 4565 | struct net_device *netdev, const u8 *buf, |
3845 | size_t len) | 4566 | size_t len, gfp_t gfp) |
3846 | { | 4567 | { |
3847 | nl80211_send_mlme_event(rdev, netdev, buf, len, | 4568 | nl80211_send_mlme_event(rdev, netdev, buf, len, |
3848 | NL80211_CMD_DISASSOCIATE); | 4569 | NL80211_CMD_DISASSOCIATE, gfp); |
3849 | } | 4570 | } |
3850 | 4571 | ||
3851 | static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, | 4572 | static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, |
3852 | struct net_device *netdev, int cmd, | 4573 | struct net_device *netdev, int cmd, |
3853 | const u8 *addr) | 4574 | const u8 *addr, gfp_t gfp) |
3854 | { | 4575 | { |
3855 | struct sk_buff *msg; | 4576 | struct sk_buff *msg; |
3856 | void *hdr; | 4577 | void *hdr; |
3857 | 4578 | ||
3858 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | 4579 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); |
3859 | if (!msg) | 4580 | if (!msg) |
3860 | return; | 4581 | return; |
3861 | 4582 | ||
@@ -3875,7 +4596,8 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, | |||
3875 | return; | 4596 | return; |
3876 | } | 4597 | } |
3877 | 4598 | ||
3878 | genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC); | 4599 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
4600 | nl80211_mlme_mcgrp.id, gfp); | ||
3879 | return; | 4601 | return; |
3880 | 4602 | ||
3881 | nla_put_failure: | 4603 | nla_put_failure: |
@@ -3884,16 +4606,145 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, | |||
3884 | } | 4606 | } |
3885 | 4607 | ||
3886 | void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, | 4608 | void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, |
3887 | struct net_device *netdev, const u8 *addr) | 4609 | struct net_device *netdev, const u8 *addr, |
4610 | gfp_t gfp) | ||
3888 | { | 4611 | { |
3889 | nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_AUTHENTICATE, | 4612 | nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_AUTHENTICATE, |
3890 | addr); | 4613 | addr, gfp); |
3891 | } | 4614 | } |
3892 | 4615 | ||
3893 | void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, | 4616 | void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, |
3894 | struct net_device *netdev, const u8 *addr) | 4617 | struct net_device *netdev, const u8 *addr, |
4618 | gfp_t gfp) | ||
3895 | { | 4619 | { |
3896 | nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE, addr); | 4620 | nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE, |
4621 | addr, gfp); | ||
4622 | } | ||
4623 | |||
4624 | void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, | ||
4625 | struct net_device *netdev, const u8 *bssid, | ||
4626 | const u8 *req_ie, size_t req_ie_len, | ||
4627 | const u8 *resp_ie, size_t resp_ie_len, | ||
4628 | u16 status, gfp_t gfp) | ||
4629 | { | ||
4630 | struct sk_buff *msg; | ||
4631 | void *hdr; | ||
4632 | |||
4633 | msg = nlmsg_new(NLMSG_GOODSIZE, gfp); | ||
4634 | if (!msg) | ||
4635 | return; | ||
4636 | |||
4637 | hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONNECT); | ||
4638 | if (!hdr) { | ||
4639 | nlmsg_free(msg); | ||
4640 | return; | ||
4641 | } | ||
4642 | |||
4643 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); | ||
4644 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); | ||
4645 | if (bssid) | ||
4646 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); | ||
4647 | NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status); | ||
4648 | if (req_ie) | ||
4649 | NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); | ||
4650 | if (resp_ie) | ||
4651 | NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); | ||
4652 | |||
4653 | if (genlmsg_end(msg, hdr) < 0) { | ||
4654 | nlmsg_free(msg); | ||
4655 | return; | ||
4656 | } | ||
4657 | |||
4658 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | ||
4659 | nl80211_mlme_mcgrp.id, gfp); | ||
4660 | return; | ||
4661 | |||
4662 | nla_put_failure: | ||
4663 | genlmsg_cancel(msg, hdr); | ||
4664 | nlmsg_free(msg); | ||
4665 | |||
4666 | } | ||
4667 | |||
4668 | void nl80211_send_roamed(struct cfg80211_registered_device *rdev, | ||
4669 | struct net_device *netdev, const u8 *bssid, | ||
4670 | const u8 *req_ie, size_t req_ie_len, | ||
4671 | const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) | ||
4672 | { | ||
4673 | struct sk_buff *msg; | ||
4674 | void *hdr; | ||
4675 | |||
4676 | msg = nlmsg_new(NLMSG_GOODSIZE, gfp); | ||
4677 | if (!msg) | ||
4678 | return; | ||
4679 | |||
4680 | hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ROAM); | ||
4681 | if (!hdr) { | ||
4682 | nlmsg_free(msg); | ||
4683 | return; | ||
4684 | } | ||
4685 | |||
4686 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); | ||
4687 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); | ||
4688 | NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); | ||
4689 | if (req_ie) | ||
4690 | NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); | ||
4691 | if (resp_ie) | ||
4692 | NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); | ||
4693 | |||
4694 | if (genlmsg_end(msg, hdr) < 0) { | ||
4695 | nlmsg_free(msg); | ||
4696 | return; | ||
4697 | } | ||
4698 | |||
4699 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | ||
4700 | nl80211_mlme_mcgrp.id, gfp); | ||
4701 | return; | ||
4702 | |||
4703 | nla_put_failure: | ||
4704 | genlmsg_cancel(msg, hdr); | ||
4705 | nlmsg_free(msg); | ||
4706 | |||
4707 | } | ||
4708 | |||
4709 | void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, | ||
4710 | struct net_device *netdev, u16 reason, | ||
4711 | const u8 *ie, size_t ie_len, bool from_ap) | ||
4712 | { | ||
4713 | struct sk_buff *msg; | ||
4714 | void *hdr; | ||
4715 | |||
4716 | msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | ||
4717 | if (!msg) | ||
4718 | return; | ||
4719 | |||
4720 | hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DISCONNECT); | ||
4721 | if (!hdr) { | ||
4722 | nlmsg_free(msg); | ||
4723 | return; | ||
4724 | } | ||
4725 | |||
4726 | NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); | ||
4727 | NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); | ||
4728 | if (from_ap && reason) | ||
4729 | NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason); | ||
4730 | if (from_ap) | ||
4731 | NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP); | ||
4732 | if (ie) | ||
4733 | NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie); | ||
4734 | |||
4735 | if (genlmsg_end(msg, hdr) < 0) { | ||
4736 | nlmsg_free(msg); | ||
4737 | return; | ||
4738 | } | ||
4739 | |||
4740 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, | ||
4741 | nl80211_mlme_mcgrp.id, GFP_KERNEL); | ||
4742 | return; | ||
4743 | |||
4744 | nla_put_failure: | ||
4745 | genlmsg_cancel(msg, hdr); | ||
4746 | nlmsg_free(msg); | ||
4747 | |||
3897 | } | 4748 | } |
3898 | 4749 | ||
3899 | void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, | 4750 | void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, |
@@ -3922,7 +4773,8 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, | |||
3922 | return; | 4773 | return; |
3923 | } | 4774 | } |
3924 | 4775 | ||
3925 | genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp); | 4776 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
4777 | nl80211_mlme_mcgrp.id, gfp); | ||
3926 | return; | 4778 | return; |
3927 | 4779 | ||
3928 | nla_put_failure: | 4780 | nla_put_failure: |
@@ -3933,12 +4785,12 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, | |||
3933 | void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, | 4785 | void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, |
3934 | struct net_device *netdev, const u8 *addr, | 4786 | struct net_device *netdev, const u8 *addr, |
3935 | enum nl80211_key_type key_type, int key_id, | 4787 | enum nl80211_key_type key_type, int key_id, |
3936 | const u8 *tsc) | 4788 | const u8 *tsc, gfp_t gfp) |
3937 | { | 4789 | { |
3938 | struct sk_buff *msg; | 4790 | struct sk_buff *msg; |
3939 | void *hdr; | 4791 | void *hdr; |
3940 | 4792 | ||
3941 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | 4793 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); |
3942 | if (!msg) | 4794 | if (!msg) |
3943 | return; | 4795 | return; |
3944 | 4796 | ||
@@ -3962,7 +4814,8 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, | |||
3962 | return; | 4814 | return; |
3963 | } | 4815 | } |
3964 | 4816 | ||
3965 | genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC); | 4817 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
4818 | nl80211_mlme_mcgrp.id, gfp); | ||
3966 | return; | 4819 | return; |
3967 | 4820 | ||
3968 | nla_put_failure: | 4821 | nla_put_failure: |
@@ -4015,7 +4868,10 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy, | |||
4015 | return; | 4868 | return; |
4016 | } | 4869 | } |
4017 | 4870 | ||
4018 | genlmsg_multicast(msg, 0, nl80211_regulatory_mcgrp.id, GFP_ATOMIC); | 4871 | rcu_read_lock(); |
4872 | genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, | ||
4873 | GFP_ATOMIC); | ||
4874 | rcu_read_unlock(); | ||
4019 | 4875 | ||
4020 | return; | 4876 | return; |
4021 | 4877 | ||
@@ -4051,6 +4907,12 @@ int nl80211_init(void) | |||
4051 | if (err) | 4907 | if (err) |
4052 | goto err_out; | 4908 | goto err_out; |
4053 | 4909 | ||
4910 | #ifdef CONFIG_NL80211_TESTMODE | ||
4911 | err = genl_register_mc_group(&nl80211_fam, &nl80211_testmode_mcgrp); | ||
4912 | if (err) | ||
4913 | goto err_out; | ||
4914 | #endif | ||
4915 | |||
4054 | return 0; | 4916 | return 0; |
4055 | err_out: | 4917 | err_out: |
4056 | genl_unregister_family(&nl80211_fam); | 4918 | genl_unregister_family(&nl80211_fam); |
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 5c12ad13499b..44cc2a76a1b0 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h | |||
@@ -3,39 +3,54 @@ | |||
3 | 3 | ||
4 | #include "core.h" | 4 | #include "core.h" |
5 | 5 | ||
6 | extern int nl80211_init(void); | 6 | int nl80211_init(void); |
7 | extern void nl80211_exit(void); | 7 | void nl80211_exit(void); |
8 | extern void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); | 8 | void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); |
9 | extern void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, | 9 | void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, |
10 | struct net_device *netdev); | 10 | struct net_device *netdev); |
11 | extern void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, | 11 | void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, |
12 | struct net_device *netdev); | 12 | struct net_device *netdev); |
13 | extern void nl80211_send_reg_change_event(struct regulatory_request *request); | 13 | void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, |
14 | extern void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, | 14 | struct net_device *netdev); |
15 | struct net_device *netdev, | 15 | void nl80211_send_reg_change_event(struct regulatory_request *request); |
16 | const u8 *buf, size_t len); | 16 | void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, |
17 | extern void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, | 17 | struct net_device *netdev, |
18 | struct net_device *netdev, | 18 | const u8 *buf, size_t len, gfp_t gfp); |
19 | const u8 *buf, size_t len); | 19 | void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, |
20 | extern void nl80211_send_deauth(struct cfg80211_registered_device *rdev, | 20 | struct net_device *netdev, |
21 | const u8 *buf, size_t len, gfp_t gfp); | ||
22 | void nl80211_send_deauth(struct cfg80211_registered_device *rdev, | ||
23 | struct net_device *netdev, | ||
24 | const u8 *buf, size_t len, gfp_t gfp); | ||
25 | void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, | ||
26 | struct net_device *netdev, | ||
27 | const u8 *buf, size_t len, gfp_t gfp); | ||
28 | void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, | ||
29 | struct net_device *netdev, | ||
30 | const u8 *addr, gfp_t gfp); | ||
31 | void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, | ||
21 | struct net_device *netdev, | 32 | struct net_device *netdev, |
22 | const u8 *buf, size_t len); | 33 | const u8 *addr, gfp_t gfp); |
23 | extern void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, | 34 | void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, |
24 | struct net_device *netdev, | 35 | struct net_device *netdev, const u8 *bssid, |
25 | const u8 *buf, size_t len); | 36 | const u8 *req_ie, size_t req_ie_len, |
26 | extern void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, | 37 | const u8 *resp_ie, size_t resp_ie_len, |
27 | struct net_device *netdev, | 38 | u16 status, gfp_t gfp); |
28 | const u8 *addr); | 39 | void nl80211_send_roamed(struct cfg80211_registered_device *rdev, |
29 | extern void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, | 40 | struct net_device *netdev, const u8 *bssid, |
30 | struct net_device *netdev, | 41 | const u8 *req_ie, size_t req_ie_len, |
31 | const u8 *addr); | 42 | const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); |
32 | extern void | 43 | void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, |
44 | struct net_device *netdev, u16 reason, | ||
45 | const u8 *ie, size_t ie_len, bool from_ap); | ||
46 | |||
47 | void | ||
33 | nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, | 48 | nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, |
34 | struct net_device *netdev, const u8 *addr, | 49 | struct net_device *netdev, const u8 *addr, |
35 | enum nl80211_key_type key_type, | 50 | enum nl80211_key_type key_type, |
36 | int key_id, const u8 *tsc); | 51 | int key_id, const u8 *tsc, gfp_t gfp); |
37 | 52 | ||
38 | extern void | 53 | void |
39 | nl80211_send_beacon_hint_event(struct wiphy *wiphy, | 54 | nl80211_send_beacon_hint_event(struct wiphy *wiphy, |
40 | struct ieee80211_channel *channel_before, | 55 | struct ieee80211_channel *channel_before, |
41 | struct ieee80211_channel *channel_after); | 56 | struct ieee80211_channel *channel_after); |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 75a406d33619..f256dfffbf46 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -62,6 +62,16 @@ const struct ieee80211_regdomain *cfg80211_regdomain; | |||
62 | */ | 62 | */ |
63 | static const struct ieee80211_regdomain *country_ie_regdomain; | 63 | static const struct ieee80211_regdomain *country_ie_regdomain; |
64 | 64 | ||
65 | /* | ||
66 | * Protects static reg.c components: | ||
67 | * - cfg80211_world_regdom | ||
68 | * - cfg80211_regdom | ||
69 | * - country_ie_regdomain | ||
70 | * - last_request | ||
71 | */ | ||
72 | DEFINE_MUTEX(reg_mutex); | ||
73 | #define assert_reg_lock() WARN_ON(!mutex_is_locked(®_mutex)) | ||
74 | |||
65 | /* Used to queue up regulatory hints */ | 75 | /* Used to queue up regulatory hints */ |
66 | static LIST_HEAD(reg_requests_list); | 76 | static LIST_HEAD(reg_requests_list); |
67 | static spinlock_t reg_requests_lock; | 77 | static spinlock_t reg_requests_lock; |
@@ -113,11 +123,7 @@ static const struct ieee80211_regdomain world_regdom = { | |||
113 | static const struct ieee80211_regdomain *cfg80211_world_regdom = | 123 | static const struct ieee80211_regdomain *cfg80211_world_regdom = |
114 | &world_regdom; | 124 | &world_regdom; |
115 | 125 | ||
116 | #ifdef CONFIG_WIRELESS_OLD_REGULATORY | ||
117 | static char *ieee80211_regdom = "US"; | ||
118 | #else | ||
119 | static char *ieee80211_regdom = "00"; | 126 | static char *ieee80211_regdom = "00"; |
120 | #endif | ||
121 | 127 | ||
122 | module_param(ieee80211_regdom, charp, 0444); | 128 | module_param(ieee80211_regdom, charp, 0444); |
123 | MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); | 129 | MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); |
@@ -1012,7 +1018,6 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, | |||
1012 | map_regdom_flags(reg_rule->flags) | bw_flags; | 1018 | map_regdom_flags(reg_rule->flags) | bw_flags; |
1013 | chan->max_antenna_gain = chan->orig_mag = | 1019 | chan->max_antenna_gain = chan->orig_mag = |
1014 | (int) MBI_TO_DBI(power_rule->max_antenna_gain); | 1020 | (int) MBI_TO_DBI(power_rule->max_antenna_gain); |
1015 | chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz); | ||
1016 | chan->max_power = chan->orig_mpwr = | 1021 | chan->max_power = chan->orig_mpwr = |
1017 | (int) MBM_TO_DBM(power_rule->max_eirp); | 1022 | (int) MBM_TO_DBM(power_rule->max_eirp); |
1018 | return; | 1023 | return; |
@@ -1021,7 +1026,6 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, | |||
1021 | chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); | 1026 | chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); |
1022 | chan->max_antenna_gain = min(chan->orig_mag, | 1027 | chan->max_antenna_gain = min(chan->orig_mag, |
1023 | (int) MBI_TO_DBI(power_rule->max_antenna_gain)); | 1028 | (int) MBI_TO_DBI(power_rule->max_antenna_gain)); |
1024 | chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz); | ||
1025 | if (chan->orig_mpwr) | 1029 | if (chan->orig_mpwr) |
1026 | chan->max_power = min(chan->orig_mpwr, | 1030 | chan->max_power = min(chan->orig_mpwr, |
1027 | (int) MBM_TO_DBM(power_rule->max_eirp)); | 1031 | (int) MBM_TO_DBM(power_rule->max_eirp)); |
@@ -1061,10 +1065,10 @@ static bool ignore_reg_update(struct wiphy *wiphy, | |||
1061 | 1065 | ||
1062 | static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) | 1066 | static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) |
1063 | { | 1067 | { |
1064 | struct cfg80211_registered_device *drv; | 1068 | struct cfg80211_registered_device *rdev; |
1065 | 1069 | ||
1066 | list_for_each_entry(drv, &cfg80211_drv_list, list) | 1070 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) |
1067 | wiphy_update_regulatory(&drv->wiphy, initiator); | 1071 | wiphy_update_regulatory(&rdev->wiphy, initiator); |
1068 | } | 1072 | } |
1069 | 1073 | ||
1070 | static void handle_reg_beacon(struct wiphy *wiphy, | 1074 | static void handle_reg_beacon(struct wiphy *wiphy, |
@@ -1298,7 +1302,7 @@ static void handle_channel_custom(struct wiphy *wiphy, | |||
1298 | struct ieee80211_supported_band *sband; | 1302 | struct ieee80211_supported_band *sband; |
1299 | struct ieee80211_channel *chan; | 1303 | struct ieee80211_channel *chan; |
1300 | 1304 | ||
1301 | assert_cfg80211_lock(); | 1305 | assert_reg_lock(); |
1302 | 1306 | ||
1303 | sband = wiphy->bands[band]; | 1307 | sband = wiphy->bands[band]; |
1304 | BUG_ON(chan_idx >= sband->n_channels); | 1308 | BUG_ON(chan_idx >= sband->n_channels); |
@@ -1323,7 +1327,6 @@ static void handle_channel_custom(struct wiphy *wiphy, | |||
1323 | 1327 | ||
1324 | chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; | 1328 | chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; |
1325 | chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); | 1329 | chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); |
1326 | chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz); | ||
1327 | chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); | 1330 | chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); |
1328 | } | 1331 | } |
1329 | 1332 | ||
@@ -1347,14 +1350,14 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, | |||
1347 | enum ieee80211_band band; | 1350 | enum ieee80211_band band; |
1348 | unsigned int bands_set = 0; | 1351 | unsigned int bands_set = 0; |
1349 | 1352 | ||
1350 | mutex_lock(&cfg80211_mutex); | 1353 | mutex_lock(®_mutex); |
1351 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 1354 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
1352 | if (!wiphy->bands[band]) | 1355 | if (!wiphy->bands[band]) |
1353 | continue; | 1356 | continue; |
1354 | handle_band_custom(wiphy, band, regd); | 1357 | handle_band_custom(wiphy, band, regd); |
1355 | bands_set++; | 1358 | bands_set++; |
1356 | } | 1359 | } |
1357 | mutex_unlock(&cfg80211_mutex); | 1360 | mutex_unlock(®_mutex); |
1358 | 1361 | ||
1359 | /* | 1362 | /* |
1360 | * no point in calling this if it won't have any effect | 1363 | * no point in calling this if it won't have any effect |
@@ -1421,7 +1424,7 @@ static int ignore_request(struct wiphy *wiphy, | |||
1421 | if (last_wiphy != wiphy) { | 1424 | if (last_wiphy != wiphy) { |
1422 | /* | 1425 | /* |
1423 | * Two cards with two APs claiming different | 1426 | * Two cards with two APs claiming different |
1424 | * different Country IE alpha2s. We could | 1427 | * Country IE alpha2s. We could |
1425 | * intersect them, but that seems unlikely | 1428 | * intersect them, but that seems unlikely |
1426 | * to be correct. Reject second one for now. | 1429 | * to be correct. Reject second one for now. |
1427 | */ | 1430 | */ |
@@ -1500,7 +1503,7 @@ static int ignore_request(struct wiphy *wiphy, | |||
1500 | * Returns zero if all went fine, %-EALREADY if a regulatory domain had | 1503 | * Returns zero if all went fine, %-EALREADY if a regulatory domain had |
1501 | * already been set or other standard error codes. | 1504 | * already been set or other standard error codes. |
1502 | * | 1505 | * |
1503 | * Caller must hold &cfg80211_mutex | 1506 | * Caller must hold &cfg80211_mutex and ®_mutex |
1504 | */ | 1507 | */ |
1505 | static int __regulatory_hint(struct wiphy *wiphy, | 1508 | static int __regulatory_hint(struct wiphy *wiphy, |
1506 | struct regulatory_request *pending_request) | 1509 | struct regulatory_request *pending_request) |
@@ -1575,6 +1578,7 @@ static void reg_process_hint(struct regulatory_request *reg_request) | |||
1575 | BUG_ON(!reg_request->alpha2); | 1578 | BUG_ON(!reg_request->alpha2); |
1576 | 1579 | ||
1577 | mutex_lock(&cfg80211_mutex); | 1580 | mutex_lock(&cfg80211_mutex); |
1581 | mutex_lock(®_mutex); | ||
1578 | 1582 | ||
1579 | if (wiphy_idx_valid(reg_request->wiphy_idx)) | 1583 | if (wiphy_idx_valid(reg_request->wiphy_idx)) |
1580 | wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); | 1584 | wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); |
@@ -1590,6 +1594,7 @@ static void reg_process_hint(struct regulatory_request *reg_request) | |||
1590 | if (r == -EALREADY && wiphy && wiphy->strict_regulatory) | 1594 | if (r == -EALREADY && wiphy && wiphy->strict_regulatory) |
1591 | wiphy_update_regulatory(wiphy, reg_request->initiator); | 1595 | wiphy_update_regulatory(wiphy, reg_request->initiator); |
1592 | out: | 1596 | out: |
1597 | mutex_unlock(®_mutex); | ||
1593 | mutex_unlock(&cfg80211_mutex); | 1598 | mutex_unlock(&cfg80211_mutex); |
1594 | } | 1599 | } |
1595 | 1600 | ||
@@ -1615,9 +1620,13 @@ static void reg_process_pending_hints(void) | |||
1615 | /* Processes beacon hints -- this has nothing to do with country IEs */ | 1620 | /* Processes beacon hints -- this has nothing to do with country IEs */ |
1616 | static void reg_process_pending_beacon_hints(void) | 1621 | static void reg_process_pending_beacon_hints(void) |
1617 | { | 1622 | { |
1618 | struct cfg80211_registered_device *drv; | 1623 | struct cfg80211_registered_device *rdev; |
1619 | struct reg_beacon *pending_beacon, *tmp; | 1624 | struct reg_beacon *pending_beacon, *tmp; |
1620 | 1625 | ||
1626 | /* | ||
1627 | * No need to hold the reg_mutex here as we just touch wiphys | ||
1628 | * and do not read or access regulatory variables. | ||
1629 | */ | ||
1621 | mutex_lock(&cfg80211_mutex); | 1630 | mutex_lock(&cfg80211_mutex); |
1622 | 1631 | ||
1623 | /* This goes through the _pending_ beacon list */ | 1632 | /* This goes through the _pending_ beacon list */ |
@@ -1634,8 +1643,8 @@ static void reg_process_pending_beacon_hints(void) | |||
1634 | list_del_init(&pending_beacon->list); | 1643 | list_del_init(&pending_beacon->list); |
1635 | 1644 | ||
1636 | /* Applies the beacon hint to current wiphys */ | 1645 | /* Applies the beacon hint to current wiphys */ |
1637 | list_for_each_entry(drv, &cfg80211_drv_list, list) | 1646 | list_for_each_entry(rdev, &cfg80211_rdev_list, list) |
1638 | wiphy_update_new_beacon(&drv->wiphy, pending_beacon); | 1647 | wiphy_update_new_beacon(&rdev->wiphy, pending_beacon); |
1639 | 1648 | ||
1640 | /* Remembers the beacon hint for new wiphys or reg changes */ | 1649 | /* Remembers the beacon hint for new wiphys or reg changes */ |
1641 | list_add_tail(&pending_beacon->list, ®_beacon_list); | 1650 | list_add_tail(&pending_beacon->list, ®_beacon_list); |
@@ -1739,12 +1748,13 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) | |||
1739 | } | 1748 | } |
1740 | EXPORT_SYMBOL(regulatory_hint); | 1749 | EXPORT_SYMBOL(regulatory_hint); |
1741 | 1750 | ||
1751 | /* Caller must hold reg_mutex */ | ||
1742 | static bool reg_same_country_ie_hint(struct wiphy *wiphy, | 1752 | static bool reg_same_country_ie_hint(struct wiphy *wiphy, |
1743 | u32 country_ie_checksum) | 1753 | u32 country_ie_checksum) |
1744 | { | 1754 | { |
1745 | struct wiphy *request_wiphy; | 1755 | struct wiphy *request_wiphy; |
1746 | 1756 | ||
1747 | assert_cfg80211_lock(); | 1757 | assert_reg_lock(); |
1748 | 1758 | ||
1749 | if (unlikely(last_request->initiator != | 1759 | if (unlikely(last_request->initiator != |
1750 | NL80211_REGDOM_SET_BY_COUNTRY_IE)) | 1760 | NL80211_REGDOM_SET_BY_COUNTRY_IE)) |
@@ -1767,6 +1777,10 @@ static bool reg_same_country_ie_hint(struct wiphy *wiphy, | |||
1767 | return false; | 1777 | return false; |
1768 | } | 1778 | } |
1769 | 1779 | ||
1780 | /* | ||
1781 | * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and | ||
1782 | * therefore cannot iterate over the rdev list here. | ||
1783 | */ | ||
1770 | void regulatory_hint_11d(struct wiphy *wiphy, | 1784 | void regulatory_hint_11d(struct wiphy *wiphy, |
1771 | u8 *country_ie, | 1785 | u8 *country_ie, |
1772 | u8 country_ie_len) | 1786 | u8 country_ie_len) |
@@ -1777,12 +1791,10 @@ void regulatory_hint_11d(struct wiphy *wiphy, | |||
1777 | enum environment_cap env = ENVIRON_ANY; | 1791 | enum environment_cap env = ENVIRON_ANY; |
1778 | struct regulatory_request *request; | 1792 | struct regulatory_request *request; |
1779 | 1793 | ||
1780 | mutex_lock(&cfg80211_mutex); | 1794 | mutex_lock(®_mutex); |
1781 | 1795 | ||
1782 | if (unlikely(!last_request)) { | 1796 | if (unlikely(!last_request)) |
1783 | mutex_unlock(&cfg80211_mutex); | 1797 | goto out; |
1784 | return; | ||
1785 | } | ||
1786 | 1798 | ||
1787 | /* IE len must be evenly divisible by 2 */ | 1799 | /* IE len must be evenly divisible by 2 */ |
1788 | if (country_ie_len & 0x01) | 1800 | if (country_ie_len & 0x01) |
@@ -1808,54 +1820,14 @@ void regulatory_hint_11d(struct wiphy *wiphy, | |||
1808 | env = ENVIRON_OUTDOOR; | 1820 | env = ENVIRON_OUTDOOR; |
1809 | 1821 | ||
1810 | /* | 1822 | /* |
1811 | * We will run this for *every* beacon processed for the BSSID, so | 1823 | * We will run this only upon a successful connection on cfg80211. |
1812 | * we optimize an early check to exit out early if we don't have to | 1824 | * We leave conflict resolution to the workqueue, where can hold |
1813 | * do anything | 1825 | * cfg80211_mutex. |
1814 | */ | 1826 | */ |
1815 | if (likely(last_request->initiator == | 1827 | if (likely(last_request->initiator == |
1816 | NL80211_REGDOM_SET_BY_COUNTRY_IE && | 1828 | NL80211_REGDOM_SET_BY_COUNTRY_IE && |
1817 | wiphy_idx_valid(last_request->wiphy_idx))) { | 1829 | wiphy_idx_valid(last_request->wiphy_idx))) |
1818 | struct cfg80211_registered_device *drv_last_ie; | 1830 | goto out; |
1819 | |||
1820 | drv_last_ie = | ||
1821 | cfg80211_drv_by_wiphy_idx(last_request->wiphy_idx); | ||
1822 | |||
1823 | /* | ||
1824 | * Lets keep this simple -- we trust the first AP | ||
1825 | * after we intersect with CRDA | ||
1826 | */ | ||
1827 | if (likely(&drv_last_ie->wiphy == wiphy)) { | ||
1828 | /* | ||
1829 | * Ignore IEs coming in on this wiphy with | ||
1830 | * the same alpha2 and environment cap | ||
1831 | */ | ||
1832 | if (likely(alpha2_equal(drv_last_ie->country_ie_alpha2, | ||
1833 | alpha2) && | ||
1834 | env == drv_last_ie->env)) { | ||
1835 | goto out; | ||
1836 | } | ||
1837 | /* | ||
1838 | * the wiphy moved on to another BSSID or the AP | ||
1839 | * was reconfigured. XXX: We need to deal with the | ||
1840 | * case where the user suspends and goes to goes | ||
1841 | * to another country, and then gets IEs from an | ||
1842 | * AP with different settings | ||
1843 | */ | ||
1844 | goto out; | ||
1845 | } else { | ||
1846 | /* | ||
1847 | * Ignore IEs coming in on two separate wiphys with | ||
1848 | * the same alpha2 and environment cap | ||
1849 | */ | ||
1850 | if (likely(alpha2_equal(drv_last_ie->country_ie_alpha2, | ||
1851 | alpha2) && | ||
1852 | env == drv_last_ie->env)) { | ||
1853 | goto out; | ||
1854 | } | ||
1855 | /* We could potentially intersect though */ | ||
1856 | goto out; | ||
1857 | } | ||
1858 | } | ||
1859 | 1831 | ||
1860 | rd = country_ie_2_rd(country_ie, country_ie_len, &checksum); | 1832 | rd = country_ie_2_rd(country_ie, country_ie_len, &checksum); |
1861 | if (!rd) | 1833 | if (!rd) |
@@ -1890,7 +1862,7 @@ void regulatory_hint_11d(struct wiphy *wiphy, | |||
1890 | request->country_ie_checksum = checksum; | 1862 | request->country_ie_checksum = checksum; |
1891 | request->country_ie_env = env; | 1863 | request->country_ie_env = env; |
1892 | 1864 | ||
1893 | mutex_unlock(&cfg80211_mutex); | 1865 | mutex_unlock(®_mutex); |
1894 | 1866 | ||
1895 | queue_regulatory_request(request); | 1867 | queue_regulatory_request(request); |
1896 | 1868 | ||
@@ -1899,9 +1871,8 @@ void regulatory_hint_11d(struct wiphy *wiphy, | |||
1899 | free_rd_out: | 1871 | free_rd_out: |
1900 | kfree(rd); | 1872 | kfree(rd); |
1901 | out: | 1873 | out: |
1902 | mutex_unlock(&cfg80211_mutex); | 1874 | mutex_unlock(®_mutex); |
1903 | } | 1875 | } |
1904 | EXPORT_SYMBOL(regulatory_hint_11d); | ||
1905 | 1876 | ||
1906 | static bool freq_is_chan_12_13_14(u16 freq) | 1877 | static bool freq_is_chan_12_13_14(u16 freq) |
1907 | { | 1878 | { |
@@ -1996,14 +1967,14 @@ static void print_regdomain(const struct ieee80211_regdomain *rd) | |||
1996 | 1967 | ||
1997 | if (last_request->initiator == | 1968 | if (last_request->initiator == |
1998 | NL80211_REGDOM_SET_BY_COUNTRY_IE) { | 1969 | NL80211_REGDOM_SET_BY_COUNTRY_IE) { |
1999 | struct cfg80211_registered_device *drv; | 1970 | struct cfg80211_registered_device *rdev; |
2000 | drv = cfg80211_drv_by_wiphy_idx( | 1971 | rdev = cfg80211_rdev_by_wiphy_idx( |
2001 | last_request->wiphy_idx); | 1972 | last_request->wiphy_idx); |
2002 | if (drv) { | 1973 | if (rdev) { |
2003 | printk(KERN_INFO "cfg80211: Current regulatory " | 1974 | printk(KERN_INFO "cfg80211: Current regulatory " |
2004 | "domain updated by AP to: %c%c\n", | 1975 | "domain updated by AP to: %c%c\n", |
2005 | drv->country_ie_alpha2[0], | 1976 | rdev->country_ie_alpha2[0], |
2006 | drv->country_ie_alpha2[1]); | 1977 | rdev->country_ie_alpha2[1]); |
2007 | } else | 1978 | } else |
2008 | printk(KERN_INFO "cfg80211: Current regulatory " | 1979 | printk(KERN_INFO "cfg80211: Current regulatory " |
2009 | "domain intersected: \n"); | 1980 | "domain intersected: \n"); |
@@ -2064,7 +2035,7 @@ static inline void reg_country_ie_process_debug( | |||
2064 | static int __set_regdom(const struct ieee80211_regdomain *rd) | 2035 | static int __set_regdom(const struct ieee80211_regdomain *rd) |
2065 | { | 2036 | { |
2066 | const struct ieee80211_regdomain *intersected_rd = NULL; | 2037 | const struct ieee80211_regdomain *intersected_rd = NULL; |
2067 | struct cfg80211_registered_device *drv = NULL; | 2038 | struct cfg80211_registered_device *rdev = NULL; |
2068 | struct wiphy *request_wiphy; | 2039 | struct wiphy *request_wiphy; |
2069 | /* Some basic sanity checks first */ | 2040 | /* Some basic sanity checks first */ |
2070 | 2041 | ||
@@ -2203,11 +2174,11 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) | |||
2203 | if (!intersected_rd) | 2174 | if (!intersected_rd) |
2204 | return -EINVAL; | 2175 | return -EINVAL; |
2205 | 2176 | ||
2206 | drv = wiphy_to_dev(request_wiphy); | 2177 | rdev = wiphy_to_dev(request_wiphy); |
2207 | 2178 | ||
2208 | drv->country_ie_alpha2[0] = rd->alpha2[0]; | 2179 | rdev->country_ie_alpha2[0] = rd->alpha2[0]; |
2209 | drv->country_ie_alpha2[1] = rd->alpha2[1]; | 2180 | rdev->country_ie_alpha2[1] = rd->alpha2[1]; |
2210 | drv->env = last_request->country_ie_env; | 2181 | rdev->env = last_request->country_ie_env; |
2211 | 2182 | ||
2212 | BUG_ON(intersected_rd == rd); | 2183 | BUG_ON(intersected_rd == rd); |
2213 | 2184 | ||
@@ -2232,10 +2203,13 @@ int set_regdom(const struct ieee80211_regdomain *rd) | |||
2232 | 2203 | ||
2233 | assert_cfg80211_lock(); | 2204 | assert_cfg80211_lock(); |
2234 | 2205 | ||
2206 | mutex_lock(®_mutex); | ||
2207 | |||
2235 | /* Note that this doesn't update the wiphys, this is done below */ | 2208 | /* Note that this doesn't update the wiphys, this is done below */ |
2236 | r = __set_regdom(rd); | 2209 | r = __set_regdom(rd); |
2237 | if (r) { | 2210 | if (r) { |
2238 | kfree(rd); | 2211 | kfree(rd); |
2212 | mutex_unlock(®_mutex); | ||
2239 | return r; | 2213 | return r; |
2240 | } | 2214 | } |
2241 | 2215 | ||
@@ -2250,6 +2224,8 @@ int set_regdom(const struct ieee80211_regdomain *rd) | |||
2250 | 2224 | ||
2251 | nl80211_send_reg_change_event(last_request); | 2225 | nl80211_send_reg_change_event(last_request); |
2252 | 2226 | ||
2227 | mutex_unlock(®_mutex); | ||
2228 | |||
2253 | return r; | 2229 | return r; |
2254 | } | 2230 | } |
2255 | 2231 | ||
@@ -2260,16 +2236,20 @@ void reg_device_remove(struct wiphy *wiphy) | |||
2260 | 2236 | ||
2261 | assert_cfg80211_lock(); | 2237 | assert_cfg80211_lock(); |
2262 | 2238 | ||
2239 | mutex_lock(®_mutex); | ||
2240 | |||
2263 | kfree(wiphy->regd); | 2241 | kfree(wiphy->regd); |
2264 | 2242 | ||
2265 | if (last_request) | 2243 | if (last_request) |
2266 | request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); | 2244 | request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); |
2267 | 2245 | ||
2268 | if (!request_wiphy || request_wiphy != wiphy) | 2246 | if (!request_wiphy || request_wiphy != wiphy) |
2269 | return; | 2247 | goto out; |
2270 | 2248 | ||
2271 | last_request->wiphy_idx = WIPHY_IDX_STALE; | 2249 | last_request->wiphy_idx = WIPHY_IDX_STALE; |
2272 | last_request->country_ie_env = ENVIRON_ANY; | 2250 | last_request->country_ie_env = ENVIRON_ANY; |
2251 | out: | ||
2252 | mutex_unlock(®_mutex); | ||
2273 | } | 2253 | } |
2274 | 2254 | ||
2275 | int regulatory_init(void) | 2255 | int regulatory_init(void) |
@@ -2288,22 +2268,12 @@ int regulatory_init(void) | |||
2288 | 2268 | ||
2289 | printk(KERN_INFO "cfg80211: Using static regulatory domain info\n"); | 2269 | printk(KERN_INFO "cfg80211: Using static regulatory domain info\n"); |
2290 | print_regdomain_info(cfg80211_regdomain); | 2270 | print_regdomain_info(cfg80211_regdomain); |
2291 | /* | ||
2292 | * The old code still requests for a new regdomain and if | ||
2293 | * you have CRDA you get it updated, otherwise you get | ||
2294 | * stuck with the static values. Since "EU" is not a valid | ||
2295 | * ISO / IEC 3166 alpha2 code we can't expect userpace to | ||
2296 | * give us a regulatory domain for it. We need last_request | ||
2297 | * iniitalized though so lets just send a request which we | ||
2298 | * know will be ignored... this crap will be removed once | ||
2299 | * OLD_REG dies. | ||
2300 | */ | ||
2301 | err = regulatory_hint_core(ieee80211_regdom); | ||
2302 | #else | 2271 | #else |
2303 | cfg80211_regdomain = cfg80211_world_regdom; | 2272 | cfg80211_regdomain = cfg80211_world_regdom; |
2304 | 2273 | ||
2305 | err = regulatory_hint_core(ieee80211_regdom); | ||
2306 | #endif | 2274 | #endif |
2275 | /* We always try to get an update for the static regdomain */ | ||
2276 | err = regulatory_hint_core(cfg80211_regdomain->alpha2); | ||
2307 | if (err) { | 2277 | if (err) { |
2308 | if (err == -ENOMEM) | 2278 | if (err == -ENOMEM) |
2309 | return err; | 2279 | return err; |
@@ -2322,6 +2292,13 @@ int regulatory_init(void) | |||
2322 | #endif | 2292 | #endif |
2323 | } | 2293 | } |
2324 | 2294 | ||
2295 | /* | ||
2296 | * Finally, if the user set the module parameter treat it | ||
2297 | * as a user hint. | ||
2298 | */ | ||
2299 | if (!is_world_regdom(ieee80211_regdom)) | ||
2300 | regulatory_hint_user(ieee80211_regdom); | ||
2301 | |||
2325 | return 0; | 2302 | return 0; |
2326 | } | 2303 | } |
2327 | 2304 | ||
@@ -2333,6 +2310,7 @@ void regulatory_exit(void) | |||
2333 | cancel_work_sync(®_work); | 2310 | cancel_work_sync(®_work); |
2334 | 2311 | ||
2335 | mutex_lock(&cfg80211_mutex); | 2312 | mutex_lock(&cfg80211_mutex); |
2313 | mutex_lock(®_mutex); | ||
2336 | 2314 | ||
2337 | reset_regdomains(); | 2315 | reset_regdomains(); |
2338 | 2316 | ||
@@ -2371,5 +2349,6 @@ void regulatory_exit(void) | |||
2371 | } | 2349 | } |
2372 | spin_unlock(®_requests_lock); | 2350 | spin_unlock(®_requests_lock); |
2373 | 2351 | ||
2352 | mutex_unlock(®_mutex); | ||
2374 | mutex_unlock(&cfg80211_mutex); | 2353 | mutex_unlock(&cfg80211_mutex); |
2375 | } | 2354 | } |
diff --git a/net/wireless/reg.h b/net/wireless/reg.h index 4e167a8e11be..3362c7c069b2 100644 --- a/net/wireless/reg.h +++ b/net/wireless/reg.h | |||
@@ -37,4 +37,19 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy, | |||
37 | struct ieee80211_channel *beacon_chan, | 37 | struct ieee80211_channel *beacon_chan, |
38 | gfp_t gfp); | 38 | gfp_t gfp); |
39 | 39 | ||
40 | /** | ||
41 | * regulatory_hint_11d - hints a country IE as a regulatory domain | ||
42 | * @wiphy: the wireless device giving the hint (used only for reporting | ||
43 | * conflicts) | ||
44 | * @country_ie: pointer to the country IE | ||
45 | * @country_ie_len: length of the country IE | ||
46 | * | ||
47 | * We will intersect the rd with the what CRDA tells us should apply | ||
48 | * for the alpha2 this country IE belongs to, this prevents APs from | ||
49 | * sending us incorrect or outdated information against a country. | ||
50 | */ | ||
51 | void regulatory_hint_11d(struct wiphy *wiphy, | ||
52 | u8 *country_ie, | ||
53 | u8 country_ie_len); | ||
54 | |||
40 | #endif /* __NET_WIRELESS_REG_H */ | 55 | #endif /* __NET_WIRELESS_REG_H */ |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 7e595ce24eeb..4c210c2debc6 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -14,29 +14,41 @@ | |||
14 | #include <net/iw_handler.h> | 14 | #include <net/iw_handler.h> |
15 | #include "core.h" | 15 | #include "core.h" |
16 | #include "nl80211.h" | 16 | #include "nl80211.h" |
17 | #include "wext-compat.h" | ||
17 | 18 | ||
18 | #define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ) | 19 | #define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) |
19 | 20 | ||
20 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | 21 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) |
21 | { | 22 | { |
23 | struct cfg80211_scan_request *request; | ||
22 | struct net_device *dev; | 24 | struct net_device *dev; |
23 | #ifdef CONFIG_WIRELESS_EXT | 25 | #ifdef CONFIG_WIRELESS_EXT |
24 | union iwreq_data wrqu; | 26 | union iwreq_data wrqu; |
25 | #endif | 27 | #endif |
26 | 28 | ||
27 | dev = dev_get_by_index(&init_net, request->ifidx); | 29 | ASSERT_RDEV_LOCK(rdev); |
28 | if (!dev) | ||
29 | goto out; | ||
30 | 30 | ||
31 | WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); | 31 | request = rdev->scan_req; |
32 | |||
33 | if (!request) | ||
34 | return; | ||
35 | |||
36 | dev = request->dev; | ||
37 | |||
38 | /* | ||
39 | * This must be before sending the other events! | ||
40 | * Otherwise, wpa_supplicant gets completely confused with | ||
41 | * wext events. | ||
42 | */ | ||
43 | cfg80211_sme_scan_done(dev); | ||
32 | 44 | ||
33 | if (aborted) | 45 | if (request->aborted) |
34 | nl80211_send_scan_aborted(wiphy_to_dev(request->wiphy), dev); | 46 | nl80211_send_scan_aborted(rdev, dev); |
35 | else | 47 | else |
36 | nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev); | 48 | nl80211_send_scan_done(rdev, dev); |
37 | 49 | ||
38 | #ifdef CONFIG_WIRELESS_EXT | 50 | #ifdef CONFIG_WIRELESS_EXT |
39 | if (!aborted) { | 51 | if (!request->aborted) { |
40 | memset(&wrqu, 0, sizeof(wrqu)); | 52 | memset(&wrqu, 0, sizeof(wrqu)); |
41 | 53 | ||
42 | wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); | 54 | wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); |
@@ -45,9 +57,38 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | |||
45 | 57 | ||
46 | dev_put(dev); | 58 | dev_put(dev); |
47 | 59 | ||
48 | out: | 60 | rdev->scan_req = NULL; |
49 | wiphy_to_dev(request->wiphy)->scan_req = NULL; | 61 | |
50 | kfree(request); | 62 | /* |
63 | * OK. If this is invoked with "leak" then we can't | ||
64 | * free this ... but we've cleaned it up anyway. The | ||
65 | * driver failed to call the scan_done callback, so | ||
66 | * all bets are off, it might still be trying to use | ||
67 | * the scan request or not ... if it accesses the dev | ||
68 | * in there (it shouldn't anyway) then it may crash. | ||
69 | */ | ||
70 | if (!leak) | ||
71 | kfree(request); | ||
72 | } | ||
73 | |||
74 | void __cfg80211_scan_done(struct work_struct *wk) | ||
75 | { | ||
76 | struct cfg80211_registered_device *rdev; | ||
77 | |||
78 | rdev = container_of(wk, struct cfg80211_registered_device, | ||
79 | scan_done_wk); | ||
80 | |||
81 | cfg80211_lock_rdev(rdev); | ||
82 | ___cfg80211_scan_done(rdev, false); | ||
83 | cfg80211_unlock_rdev(rdev); | ||
84 | } | ||
85 | |||
86 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | ||
87 | { | ||
88 | WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); | ||
89 | |||
90 | request->aborted = aborted; | ||
91 | schedule_work(&wiphy_to_dev(request->wiphy)->scan_done_wk); | ||
51 | } | 92 | } |
52 | EXPORT_SYMBOL(cfg80211_scan_done); | 93 | EXPORT_SYMBOL(cfg80211_scan_done); |
53 | 94 | ||
@@ -62,6 +103,8 @@ static void bss_release(struct kref *ref) | |||
62 | if (bss->ies_allocated) | 103 | if (bss->ies_allocated) |
63 | kfree(bss->pub.information_elements); | 104 | kfree(bss->pub.information_elements); |
64 | 105 | ||
106 | BUG_ON(atomic_read(&bss->hold)); | ||
107 | |||
65 | kfree(bss); | 108 | kfree(bss); |
66 | } | 109 | } |
67 | 110 | ||
@@ -84,8 +127,9 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev) | |||
84 | bool expired = false; | 127 | bool expired = false; |
85 | 128 | ||
86 | list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { | 129 | list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { |
87 | if (bss->hold || | 130 | if (atomic_read(&bss->hold)) |
88 | !time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE)) | 131 | continue; |
132 | if (!time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE)) | ||
89 | continue; | 133 | continue; |
90 | list_del(&bss->list); | 134 | list_del(&bss->list); |
91 | rb_erase(&bss->rbn, &dev->bss_tree); | 135 | rb_erase(&bss->rbn, &dev->bss_tree); |
@@ -97,7 +141,7 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev) | |||
97 | dev->bss_generation++; | 141 | dev->bss_generation++; |
98 | } | 142 | } |
99 | 143 | ||
100 | static u8 *find_ie(u8 num, u8 *ies, size_t len) | 144 | static u8 *find_ie(u8 num, u8 *ies, int len) |
101 | { | 145 | { |
102 | while (len > 2 && ies[0] != num) { | 146 | while (len > 2 && ies[0] != num) { |
103 | len -= ies[1] + 2; | 147 | len -= ies[1] + 2; |
@@ -539,6 +583,7 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) | |||
539 | spin_lock_bh(&dev->bss_lock); | 583 | spin_lock_bh(&dev->bss_lock); |
540 | 584 | ||
541 | list_del(&bss->list); | 585 | list_del(&bss->list); |
586 | dev->bss_generation++; | ||
542 | rb_erase(&bss->rbn, &dev->bss_tree); | 587 | rb_erase(&bss->rbn, &dev->bss_tree); |
543 | 588 | ||
544 | spin_unlock_bh(&dev->bss_lock); | 589 | spin_unlock_bh(&dev->bss_lock); |
@@ -547,30 +592,6 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) | |||
547 | } | 592 | } |
548 | EXPORT_SYMBOL(cfg80211_unlink_bss); | 593 | EXPORT_SYMBOL(cfg80211_unlink_bss); |
549 | 594 | ||
550 | void cfg80211_hold_bss(struct cfg80211_bss *pub) | ||
551 | { | ||
552 | struct cfg80211_internal_bss *bss; | ||
553 | |||
554 | if (!pub) | ||
555 | return; | ||
556 | |||
557 | bss = container_of(pub, struct cfg80211_internal_bss, pub); | ||
558 | bss->hold = true; | ||
559 | } | ||
560 | EXPORT_SYMBOL(cfg80211_hold_bss); | ||
561 | |||
562 | void cfg80211_unhold_bss(struct cfg80211_bss *pub) | ||
563 | { | ||
564 | struct cfg80211_internal_bss *bss; | ||
565 | |||
566 | if (!pub) | ||
567 | return; | ||
568 | |||
569 | bss = container_of(pub, struct cfg80211_internal_bss, pub); | ||
570 | bss->hold = false; | ||
571 | } | ||
572 | EXPORT_SYMBOL(cfg80211_unhold_bss); | ||
573 | |||
574 | #ifdef CONFIG_WIRELESS_EXT | 595 | #ifdef CONFIG_WIRELESS_EXT |
575 | int cfg80211_wext_siwscan(struct net_device *dev, | 596 | int cfg80211_wext_siwscan(struct net_device *dev, |
576 | struct iw_request_info *info, | 597 | struct iw_request_info *info, |
@@ -586,7 +607,10 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
586 | if (!netif_running(dev)) | 607 | if (!netif_running(dev)) |
587 | return -ENETDOWN; | 608 | return -ENETDOWN; |
588 | 609 | ||
589 | rdev = cfg80211_get_dev_from_ifindex(dev->ifindex); | 610 | if (wrqu->data.length == sizeof(struct iw_scan_req)) |
611 | wreq = (struct iw_scan_req *)extra; | ||
612 | |||
613 | rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex); | ||
590 | 614 | ||
591 | if (IS_ERR(rdev)) | 615 | if (IS_ERR(rdev)) |
592 | return PTR_ERR(rdev); | 616 | return PTR_ERR(rdev); |
@@ -598,9 +622,14 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
598 | 622 | ||
599 | wiphy = &rdev->wiphy; | 623 | wiphy = &rdev->wiphy; |
600 | 624 | ||
601 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) | 625 | /* Determine number of channels, needed to allocate creq */ |
602 | if (wiphy->bands[band]) | 626 | if (wreq && wreq->num_channels) |
603 | n_channels += wiphy->bands[band]->n_channels; | 627 | n_channels = wreq->num_channels; |
628 | else { | ||
629 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) | ||
630 | if (wiphy->bands[band]) | ||
631 | n_channels += wiphy->bands[band]->n_channels; | ||
632 | } | ||
604 | 633 | ||
605 | creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + | 634 | creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + |
606 | n_channels * sizeof(void *), | 635 | n_channels * sizeof(void *), |
@@ -611,28 +640,47 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
611 | } | 640 | } |
612 | 641 | ||
613 | creq->wiphy = wiphy; | 642 | creq->wiphy = wiphy; |
614 | creq->ifidx = dev->ifindex; | 643 | creq->dev = dev; |
615 | creq->ssids = (void *)(creq + 1); | 644 | /* SSIDs come after channels */ |
616 | creq->channels = (void *)(creq->ssids + 1); | 645 | creq->ssids = (void *)&creq->channels[n_channels]; |
617 | creq->n_channels = n_channels; | 646 | creq->n_channels = n_channels; |
618 | creq->n_ssids = 1; | 647 | creq->n_ssids = 1; |
619 | 648 | ||
620 | /* all channels */ | 649 | /* translate "Scan on frequencies" request */ |
621 | i = 0; | 650 | i = 0; |
622 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 651 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
623 | int j; | 652 | int j; |
624 | if (!wiphy->bands[band]) | 653 | if (!wiphy->bands[band]) |
625 | continue; | 654 | continue; |
626 | for (j = 0; j < wiphy->bands[band]->n_channels; j++) { | 655 | for (j = 0; j < wiphy->bands[band]->n_channels; j++) { |
656 | |||
657 | /* If we have a wireless request structure and the | ||
658 | * wireless request specifies frequencies, then search | ||
659 | * for the matching hardware channel. | ||
660 | */ | ||
661 | if (wreq && wreq->num_channels) { | ||
662 | int k; | ||
663 | int wiphy_freq = wiphy->bands[band]->channels[j].center_freq; | ||
664 | for (k = 0; k < wreq->num_channels; k++) { | ||
665 | int wext_freq = wreq->channel_list[k].m / 100000; | ||
666 | if (wext_freq == wiphy_freq) | ||
667 | goto wext_freq_found; | ||
668 | } | ||
669 | goto wext_freq_not_found; | ||
670 | } | ||
671 | |||
672 | wext_freq_found: | ||
627 | creq->channels[i] = &wiphy->bands[band]->channels[j]; | 673 | creq->channels[i] = &wiphy->bands[band]->channels[j]; |
628 | i++; | 674 | i++; |
675 | wext_freq_not_found: ; | ||
629 | } | 676 | } |
630 | } | 677 | } |
631 | 678 | ||
632 | /* translate scan request */ | 679 | /* Set real number of channels specified in creq->channels[] */ |
633 | if (wrqu->data.length == sizeof(struct iw_scan_req)) { | 680 | creq->n_channels = i; |
634 | wreq = (struct iw_scan_req *)extra; | ||
635 | 681 | ||
682 | /* translate "Scan for SSID" request */ | ||
683 | if (wreq) { | ||
636 | if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { | 684 | if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { |
637 | if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) | 685 | if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) |
638 | return -EINVAL; | 686 | return -EINVAL; |
@@ -648,9 +696,12 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
648 | if (err) { | 696 | if (err) { |
649 | rdev->scan_req = NULL; | 697 | rdev->scan_req = NULL; |
650 | kfree(creq); | 698 | kfree(creq); |
699 | } else { | ||
700 | nl80211_send_scan_start(rdev, dev); | ||
701 | dev_hold(dev); | ||
651 | } | 702 | } |
652 | out: | 703 | out: |
653 | cfg80211_put_dev(rdev); | 704 | cfg80211_unlock_rdev(rdev); |
654 | return err; | 705 | return err; |
655 | } | 706 | } |
656 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan); | 707 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan); |
@@ -941,7 +992,7 @@ int cfg80211_wext_giwscan(struct net_device *dev, | |||
941 | if (!netif_running(dev)) | 992 | if (!netif_running(dev)) |
942 | return -ENETDOWN; | 993 | return -ENETDOWN; |
943 | 994 | ||
944 | rdev = cfg80211_get_dev_from_ifindex(dev->ifindex); | 995 | rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex); |
945 | 996 | ||
946 | if (IS_ERR(rdev)) | 997 | if (IS_ERR(rdev)) |
947 | return PTR_ERR(rdev); | 998 | return PTR_ERR(rdev); |
@@ -959,7 +1010,7 @@ int cfg80211_wext_giwscan(struct net_device *dev, | |||
959 | } | 1010 | } |
960 | 1011 | ||
961 | out: | 1012 | out: |
962 | cfg80211_put_dev(rdev); | 1013 | cfg80211_unlock_rdev(rdev); |
963 | return res; | 1014 | return res; |
964 | } | 1015 | } |
965 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan); | 1016 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan); |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c new file mode 100644 index 000000000000..68307883ec87 --- /dev/null +++ b/net/wireless/sme.c | |||
@@ -0,0 +1,933 @@ | |||
1 | /* | ||
2 | * SME code for cfg80211's connect emulation. | ||
3 | * | ||
4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
6 | */ | ||
7 | |||
8 | #include <linux/etherdevice.h> | ||
9 | #include <linux/if_arp.h> | ||
10 | #include <linux/workqueue.h> | ||
11 | #include <linux/wireless.h> | ||
12 | #include <net/iw_handler.h> | ||
13 | #include <net/cfg80211.h> | ||
14 | #include <net/rtnetlink.h> | ||
15 | #include "nl80211.h" | ||
16 | #include "reg.h" | ||
17 | |||
18 | struct cfg80211_conn { | ||
19 | struct cfg80211_connect_params params; | ||
20 | /* these are sub-states of the _CONNECTING sme_state */ | ||
21 | enum { | ||
22 | CFG80211_CONN_IDLE, | ||
23 | CFG80211_CONN_SCANNING, | ||
24 | CFG80211_CONN_SCAN_AGAIN, | ||
25 | CFG80211_CONN_AUTHENTICATE_NEXT, | ||
26 | CFG80211_CONN_AUTHENTICATING, | ||
27 | CFG80211_CONN_ASSOCIATE_NEXT, | ||
28 | CFG80211_CONN_ASSOCIATING, | ||
29 | } state; | ||
30 | u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; | ||
31 | u8 *ie; | ||
32 | size_t ie_len; | ||
33 | bool auto_auth, prev_bssid_valid; | ||
34 | }; | ||
35 | |||
36 | |||
37 | static int cfg80211_conn_scan(struct wireless_dev *wdev) | ||
38 | { | ||
39 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
40 | struct cfg80211_scan_request *request; | ||
41 | int n_channels, err; | ||
42 | |||
43 | ASSERT_RTNL(); | ||
44 | ASSERT_RDEV_LOCK(rdev); | ||
45 | ASSERT_WDEV_LOCK(wdev); | ||
46 | |||
47 | if (rdev->scan_req) | ||
48 | return -EBUSY; | ||
49 | |||
50 | if (wdev->conn->params.channel) { | ||
51 | n_channels = 1; | ||
52 | } else { | ||
53 | enum ieee80211_band band; | ||
54 | n_channels = 0; | ||
55 | |||
56 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | ||
57 | if (!wdev->wiphy->bands[band]) | ||
58 | continue; | ||
59 | n_channels += wdev->wiphy->bands[band]->n_channels; | ||
60 | } | ||
61 | } | ||
62 | request = kzalloc(sizeof(*request) + sizeof(request->ssids[0]) + | ||
63 | sizeof(request->channels[0]) * n_channels, | ||
64 | GFP_KERNEL); | ||
65 | if (!request) | ||
66 | return -ENOMEM; | ||
67 | |||
68 | if (wdev->conn->params.channel) | ||
69 | request->channels[0] = wdev->conn->params.channel; | ||
70 | else { | ||
71 | int i = 0, j; | ||
72 | enum ieee80211_band band; | ||
73 | |||
74 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | ||
75 | if (!wdev->wiphy->bands[band]) | ||
76 | continue; | ||
77 | for (j = 0; j < wdev->wiphy->bands[band]->n_channels; | ||
78 | i++, j++) | ||
79 | request->channels[i] = | ||
80 | &wdev->wiphy->bands[band]->channels[j]; | ||
81 | } | ||
82 | } | ||
83 | request->n_channels = n_channels; | ||
84 | request->ssids = (void *)&request->channels[n_channels]; | ||
85 | request->n_ssids = 1; | ||
86 | |||
87 | memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, | ||
88 | wdev->conn->params.ssid_len); | ||
89 | request->ssids[0].ssid_len = wdev->conn->params.ssid_len; | ||
90 | |||
91 | request->dev = wdev->netdev; | ||
92 | request->wiphy = &rdev->wiphy; | ||
93 | |||
94 | rdev->scan_req = request; | ||
95 | |||
96 | err = rdev->ops->scan(wdev->wiphy, wdev->netdev, request); | ||
97 | if (!err) { | ||
98 | wdev->conn->state = CFG80211_CONN_SCANNING; | ||
99 | nl80211_send_scan_start(rdev, wdev->netdev); | ||
100 | dev_hold(wdev->netdev); | ||
101 | } else { | ||
102 | rdev->scan_req = NULL; | ||
103 | kfree(request); | ||
104 | } | ||
105 | return err; | ||
106 | } | ||
107 | |||
108 | static int cfg80211_conn_do_work(struct wireless_dev *wdev) | ||
109 | { | ||
110 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
111 | struct cfg80211_connect_params *params; | ||
112 | const u8 *prev_bssid = NULL; | ||
113 | int err; | ||
114 | |||
115 | ASSERT_WDEV_LOCK(wdev); | ||
116 | |||
117 | if (!wdev->conn) | ||
118 | return 0; | ||
119 | |||
120 | params = &wdev->conn->params; | ||
121 | |||
122 | switch (wdev->conn->state) { | ||
123 | case CFG80211_CONN_SCAN_AGAIN: | ||
124 | return cfg80211_conn_scan(wdev); | ||
125 | case CFG80211_CONN_AUTHENTICATE_NEXT: | ||
126 | BUG_ON(!rdev->ops->auth); | ||
127 | wdev->conn->state = CFG80211_CONN_AUTHENTICATING; | ||
128 | return __cfg80211_mlme_auth(rdev, wdev->netdev, | ||
129 | params->channel, params->auth_type, | ||
130 | params->bssid, | ||
131 | params->ssid, params->ssid_len, | ||
132 | NULL, 0, | ||
133 | params->key, params->key_len, | ||
134 | params->key_idx); | ||
135 | case CFG80211_CONN_ASSOCIATE_NEXT: | ||
136 | BUG_ON(!rdev->ops->assoc); | ||
137 | wdev->conn->state = CFG80211_CONN_ASSOCIATING; | ||
138 | if (wdev->conn->prev_bssid_valid) | ||
139 | prev_bssid = wdev->conn->prev_bssid; | ||
140 | err = __cfg80211_mlme_assoc(rdev, wdev->netdev, | ||
141 | params->channel, params->bssid, | ||
142 | prev_bssid, | ||
143 | params->ssid, params->ssid_len, | ||
144 | params->ie, params->ie_len, | ||
145 | false, ¶ms->crypto); | ||
146 | if (err) | ||
147 | __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, | ||
148 | NULL, 0, | ||
149 | WLAN_REASON_DEAUTH_LEAVING); | ||
150 | return err; | ||
151 | default: | ||
152 | return 0; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | void cfg80211_conn_work(struct work_struct *work) | ||
157 | { | ||
158 | struct cfg80211_registered_device *rdev = | ||
159 | container_of(work, struct cfg80211_registered_device, conn_work); | ||
160 | struct wireless_dev *wdev; | ||
161 | |||
162 | rtnl_lock(); | ||
163 | cfg80211_lock_rdev(rdev); | ||
164 | mutex_lock(&rdev->devlist_mtx); | ||
165 | |||
166 | list_for_each_entry(wdev, &rdev->netdev_list, list) { | ||
167 | wdev_lock(wdev); | ||
168 | if (!netif_running(wdev->netdev)) { | ||
169 | wdev_unlock(wdev); | ||
170 | continue; | ||
171 | } | ||
172 | if (wdev->sme_state != CFG80211_SME_CONNECTING) { | ||
173 | wdev_unlock(wdev); | ||
174 | continue; | ||
175 | } | ||
176 | if (cfg80211_conn_do_work(wdev)) | ||
177 | __cfg80211_connect_result( | ||
178 | wdev->netdev, | ||
179 | wdev->conn->params.bssid, | ||
180 | NULL, 0, NULL, 0, | ||
181 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
182 | false, NULL); | ||
183 | wdev_unlock(wdev); | ||
184 | } | ||
185 | |||
186 | mutex_unlock(&rdev->devlist_mtx); | ||
187 | cfg80211_unlock_rdev(rdev); | ||
188 | rtnl_unlock(); | ||
189 | } | ||
190 | |||
191 | static bool cfg80211_get_conn_bss(struct wireless_dev *wdev) | ||
192 | { | ||
193 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
194 | struct cfg80211_bss *bss; | ||
195 | u16 capa = WLAN_CAPABILITY_ESS; | ||
196 | |||
197 | ASSERT_WDEV_LOCK(wdev); | ||
198 | |||
199 | if (wdev->conn->params.privacy) | ||
200 | capa |= WLAN_CAPABILITY_PRIVACY; | ||
201 | |||
202 | bss = cfg80211_get_bss(wdev->wiphy, NULL, wdev->conn->params.bssid, | ||
203 | wdev->conn->params.ssid, | ||
204 | wdev->conn->params.ssid_len, | ||
205 | WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, | ||
206 | capa); | ||
207 | if (!bss) | ||
208 | return false; | ||
209 | |||
210 | memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN); | ||
211 | wdev->conn->params.bssid = wdev->conn->bssid; | ||
212 | wdev->conn->params.channel = bss->channel; | ||
213 | wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; | ||
214 | schedule_work(&rdev->conn_work); | ||
215 | |||
216 | cfg80211_put_bss(bss); | ||
217 | return true; | ||
218 | } | ||
219 | |||
220 | static void __cfg80211_sme_scan_done(struct net_device *dev) | ||
221 | { | ||
222 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
223 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
224 | |||
225 | ASSERT_WDEV_LOCK(wdev); | ||
226 | |||
227 | if (wdev->sme_state != CFG80211_SME_CONNECTING) | ||
228 | return; | ||
229 | |||
230 | if (!wdev->conn) | ||
231 | return; | ||
232 | |||
233 | if (wdev->conn->state != CFG80211_CONN_SCANNING && | ||
234 | wdev->conn->state != CFG80211_CONN_SCAN_AGAIN) | ||
235 | return; | ||
236 | |||
237 | if (!cfg80211_get_conn_bss(wdev)) { | ||
238 | /* not found */ | ||
239 | if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) | ||
240 | schedule_work(&rdev->conn_work); | ||
241 | else | ||
242 | __cfg80211_connect_result( | ||
243 | wdev->netdev, | ||
244 | wdev->conn->params.bssid, | ||
245 | NULL, 0, NULL, 0, | ||
246 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
247 | false, NULL); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | void cfg80211_sme_scan_done(struct net_device *dev) | ||
252 | { | ||
253 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
254 | |||
255 | mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); | ||
256 | wdev_lock(wdev); | ||
257 | __cfg80211_sme_scan_done(dev); | ||
258 | wdev_unlock(wdev); | ||
259 | mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); | ||
260 | } | ||
261 | |||
262 | void cfg80211_sme_rx_auth(struct net_device *dev, | ||
263 | const u8 *buf, size_t len) | ||
264 | { | ||
265 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
266 | struct wiphy *wiphy = wdev->wiphy; | ||
267 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | ||
268 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; | ||
269 | u16 status_code = le16_to_cpu(mgmt->u.auth.status_code); | ||
270 | |||
271 | ASSERT_WDEV_LOCK(wdev); | ||
272 | |||
273 | /* should only RX auth frames when connecting */ | ||
274 | if (wdev->sme_state != CFG80211_SME_CONNECTING) | ||
275 | return; | ||
276 | |||
277 | if (WARN_ON(!wdev->conn)) | ||
278 | return; | ||
279 | |||
280 | if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && | ||
281 | wdev->conn->auto_auth && | ||
282 | wdev->conn->params.auth_type != NL80211_AUTHTYPE_NETWORK_EAP) { | ||
283 | /* select automatically between only open, shared, leap */ | ||
284 | switch (wdev->conn->params.auth_type) { | ||
285 | case NL80211_AUTHTYPE_OPEN_SYSTEM: | ||
286 | if (wdev->connect_keys) | ||
287 | wdev->conn->params.auth_type = | ||
288 | NL80211_AUTHTYPE_SHARED_KEY; | ||
289 | else | ||
290 | wdev->conn->params.auth_type = | ||
291 | NL80211_AUTHTYPE_NETWORK_EAP; | ||
292 | break; | ||
293 | case NL80211_AUTHTYPE_SHARED_KEY: | ||
294 | wdev->conn->params.auth_type = | ||
295 | NL80211_AUTHTYPE_NETWORK_EAP; | ||
296 | break; | ||
297 | default: | ||
298 | /* huh? */ | ||
299 | wdev->conn->params.auth_type = | ||
300 | NL80211_AUTHTYPE_OPEN_SYSTEM; | ||
301 | break; | ||
302 | } | ||
303 | wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; | ||
304 | schedule_work(&rdev->conn_work); | ||
305 | } else if (status_code != WLAN_STATUS_SUCCESS) { | ||
306 | __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, | ||
307 | status_code, false, NULL); | ||
308 | } else if (wdev->sme_state == CFG80211_SME_CONNECTING && | ||
309 | wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { | ||
310 | wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; | ||
311 | schedule_work(&rdev->conn_work); | ||
312 | } | ||
313 | } | ||
314 | |||
315 | bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev) | ||
316 | { | ||
317 | struct wiphy *wiphy = wdev->wiphy; | ||
318 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | ||
319 | |||
320 | if (WARN_ON(!wdev->conn)) | ||
321 | return false; | ||
322 | |||
323 | if (!wdev->conn->prev_bssid_valid) | ||
324 | return false; | ||
325 | |||
326 | /* | ||
327 | * Some stupid APs don't accept reassoc, so we | ||
328 | * need to fall back to trying regular assoc. | ||
329 | */ | ||
330 | wdev->conn->prev_bssid_valid = false; | ||
331 | wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; | ||
332 | schedule_work(&rdev->conn_work); | ||
333 | |||
334 | return true; | ||
335 | } | ||
336 | |||
337 | void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | ||
338 | const u8 *req_ie, size_t req_ie_len, | ||
339 | const u8 *resp_ie, size_t resp_ie_len, | ||
340 | u16 status, bool wextev, | ||
341 | struct cfg80211_bss *bss) | ||
342 | { | ||
343 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
344 | u8 *country_ie; | ||
345 | #ifdef CONFIG_WIRELESS_EXT | ||
346 | union iwreq_data wrqu; | ||
347 | #endif | ||
348 | |||
349 | ASSERT_WDEV_LOCK(wdev); | ||
350 | |||
351 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) | ||
352 | return; | ||
353 | |||
354 | if (wdev->sme_state != CFG80211_SME_CONNECTING) | ||
355 | return; | ||
356 | |||
357 | nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev, | ||
358 | bssid, req_ie, req_ie_len, | ||
359 | resp_ie, resp_ie_len, | ||
360 | status, GFP_KERNEL); | ||
361 | |||
362 | #ifdef CONFIG_WIRELESS_EXT | ||
363 | if (wextev) { | ||
364 | if (req_ie && status == WLAN_STATUS_SUCCESS) { | ||
365 | memset(&wrqu, 0, sizeof(wrqu)); | ||
366 | wrqu.data.length = req_ie_len; | ||
367 | wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie); | ||
368 | } | ||
369 | |||
370 | if (resp_ie && status == WLAN_STATUS_SUCCESS) { | ||
371 | memset(&wrqu, 0, sizeof(wrqu)); | ||
372 | wrqu.data.length = resp_ie_len; | ||
373 | wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie); | ||
374 | } | ||
375 | |||
376 | memset(&wrqu, 0, sizeof(wrqu)); | ||
377 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | ||
378 | if (bssid && status == WLAN_STATUS_SUCCESS) { | ||
379 | memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); | ||
380 | memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN); | ||
381 | wdev->wext.prev_bssid_valid = true; | ||
382 | } | ||
383 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | ||
384 | } | ||
385 | #endif | ||
386 | |||
387 | if (wdev->current_bss) { | ||
388 | cfg80211_unhold_bss(wdev->current_bss); | ||
389 | cfg80211_put_bss(&wdev->current_bss->pub); | ||
390 | wdev->current_bss = NULL; | ||
391 | } | ||
392 | |||
393 | if (wdev->conn) | ||
394 | wdev->conn->state = CFG80211_CONN_IDLE; | ||
395 | |||
396 | if (status != WLAN_STATUS_SUCCESS) { | ||
397 | wdev->sme_state = CFG80211_SME_IDLE; | ||
398 | if (wdev->conn) | ||
399 | kfree(wdev->conn->ie); | ||
400 | kfree(wdev->conn); | ||
401 | wdev->conn = NULL; | ||
402 | kfree(wdev->connect_keys); | ||
403 | wdev->connect_keys = NULL; | ||
404 | wdev->ssid_len = 0; | ||
405 | return; | ||
406 | } | ||
407 | |||
408 | if (!bss) | ||
409 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, | ||
410 | wdev->ssid, wdev->ssid_len, | ||
411 | WLAN_CAPABILITY_ESS, | ||
412 | WLAN_CAPABILITY_ESS); | ||
413 | |||
414 | if (WARN_ON(!bss)) | ||
415 | return; | ||
416 | |||
417 | cfg80211_hold_bss(bss_from_pub(bss)); | ||
418 | wdev->current_bss = bss_from_pub(bss); | ||
419 | |||
420 | wdev->sme_state = CFG80211_SME_CONNECTED; | ||
421 | cfg80211_upload_connect_keys(wdev); | ||
422 | |||
423 | country_ie = (u8 *) ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY); | ||
424 | |||
425 | if (!country_ie) | ||
426 | return; | ||
427 | |||
428 | /* | ||
429 | * ieee80211_bss_get_ie() ensures we can access: | ||
430 | * - country_ie + 2, the start of the country ie data, and | ||
431 | * - and country_ie[1] which is the IE length | ||
432 | */ | ||
433 | regulatory_hint_11d(wdev->wiphy, | ||
434 | country_ie + 2, | ||
435 | country_ie[1]); | ||
436 | } | ||
437 | |||
438 | void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | ||
439 | const u8 *req_ie, size_t req_ie_len, | ||
440 | const u8 *resp_ie, size_t resp_ie_len, | ||
441 | u16 status, gfp_t gfp) | ||
442 | { | ||
443 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
444 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
445 | struct cfg80211_event *ev; | ||
446 | unsigned long flags; | ||
447 | |||
448 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); | ||
449 | |||
450 | ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); | ||
451 | if (!ev) | ||
452 | return; | ||
453 | |||
454 | ev->type = EVENT_CONNECT_RESULT; | ||
455 | if (bssid) | ||
456 | memcpy(ev->cr.bssid, bssid, ETH_ALEN); | ||
457 | ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev); | ||
458 | ev->cr.req_ie_len = req_ie_len; | ||
459 | memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len); | ||
460 | ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; | ||
461 | ev->cr.resp_ie_len = resp_ie_len; | ||
462 | memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len); | ||
463 | ev->cr.status = status; | ||
464 | |||
465 | spin_lock_irqsave(&wdev->event_lock, flags); | ||
466 | list_add_tail(&ev->list, &wdev->event_list); | ||
467 | spin_unlock_irqrestore(&wdev->event_lock, flags); | ||
468 | schedule_work(&rdev->event_work); | ||
469 | } | ||
470 | EXPORT_SYMBOL(cfg80211_connect_result); | ||
471 | |||
472 | void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, | ||
473 | const u8 *req_ie, size_t req_ie_len, | ||
474 | const u8 *resp_ie, size_t resp_ie_len) | ||
475 | { | ||
476 | struct cfg80211_bss *bss; | ||
477 | #ifdef CONFIG_WIRELESS_EXT | ||
478 | union iwreq_data wrqu; | ||
479 | #endif | ||
480 | |||
481 | ASSERT_WDEV_LOCK(wdev); | ||
482 | |||
483 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) | ||
484 | return; | ||
485 | |||
486 | if (wdev->sme_state != CFG80211_SME_CONNECTED) | ||
487 | return; | ||
488 | |||
489 | /* internal error -- how did we get to CONNECTED w/o BSS? */ | ||
490 | if (WARN_ON(!wdev->current_bss)) { | ||
491 | return; | ||
492 | } | ||
493 | |||
494 | cfg80211_unhold_bss(wdev->current_bss); | ||
495 | cfg80211_put_bss(&wdev->current_bss->pub); | ||
496 | wdev->current_bss = NULL; | ||
497 | |||
498 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, | ||
499 | wdev->ssid, wdev->ssid_len, | ||
500 | WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); | ||
501 | |||
502 | if (WARN_ON(!bss)) | ||
503 | return; | ||
504 | |||
505 | cfg80211_hold_bss(bss_from_pub(bss)); | ||
506 | wdev->current_bss = bss_from_pub(bss); | ||
507 | |||
508 | nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bssid, | ||
509 | req_ie, req_ie_len, resp_ie, resp_ie_len, | ||
510 | GFP_KERNEL); | ||
511 | |||
512 | #ifdef CONFIG_WIRELESS_EXT | ||
513 | if (req_ie) { | ||
514 | memset(&wrqu, 0, sizeof(wrqu)); | ||
515 | wrqu.data.length = req_ie_len; | ||
516 | wireless_send_event(wdev->netdev, IWEVASSOCREQIE, | ||
517 | &wrqu, req_ie); | ||
518 | } | ||
519 | |||
520 | if (resp_ie) { | ||
521 | memset(&wrqu, 0, sizeof(wrqu)); | ||
522 | wrqu.data.length = resp_ie_len; | ||
523 | wireless_send_event(wdev->netdev, IWEVASSOCRESPIE, | ||
524 | &wrqu, resp_ie); | ||
525 | } | ||
526 | |||
527 | memset(&wrqu, 0, sizeof(wrqu)); | ||
528 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | ||
529 | memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); | ||
530 | memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN); | ||
531 | wdev->wext.prev_bssid_valid = true; | ||
532 | wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL); | ||
533 | #endif | ||
534 | } | ||
535 | |||
536 | void cfg80211_roamed(struct net_device *dev, const u8 *bssid, | ||
537 | const u8 *req_ie, size_t req_ie_len, | ||
538 | const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) | ||
539 | { | ||
540 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
541 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
542 | struct cfg80211_event *ev; | ||
543 | unsigned long flags; | ||
544 | |||
545 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); | ||
546 | |||
547 | ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); | ||
548 | if (!ev) | ||
549 | return; | ||
550 | |||
551 | ev->type = EVENT_ROAMED; | ||
552 | memcpy(ev->rm.bssid, bssid, ETH_ALEN); | ||
553 | ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); | ||
554 | ev->rm.req_ie_len = req_ie_len; | ||
555 | memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len); | ||
556 | ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; | ||
557 | ev->rm.resp_ie_len = resp_ie_len; | ||
558 | memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len); | ||
559 | |||
560 | spin_lock_irqsave(&wdev->event_lock, flags); | ||
561 | list_add_tail(&ev->list, &wdev->event_list); | ||
562 | spin_unlock_irqrestore(&wdev->event_lock, flags); | ||
563 | schedule_work(&rdev->event_work); | ||
564 | } | ||
565 | EXPORT_SYMBOL(cfg80211_roamed); | ||
566 | |||
567 | void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | ||
568 | size_t ie_len, u16 reason, bool from_ap) | ||
569 | { | ||
570 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
571 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
572 | int i; | ||
573 | #ifdef CONFIG_WIRELESS_EXT | ||
574 | union iwreq_data wrqu; | ||
575 | #endif | ||
576 | |||
577 | ASSERT_WDEV_LOCK(wdev); | ||
578 | |||
579 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) | ||
580 | return; | ||
581 | |||
582 | if (wdev->sme_state != CFG80211_SME_CONNECTED) | ||
583 | return; | ||
584 | |||
585 | if (wdev->current_bss) { | ||
586 | cfg80211_unhold_bss(wdev->current_bss); | ||
587 | cfg80211_put_bss(&wdev->current_bss->pub); | ||
588 | } | ||
589 | |||
590 | wdev->current_bss = NULL; | ||
591 | wdev->sme_state = CFG80211_SME_IDLE; | ||
592 | wdev->ssid_len = 0; | ||
593 | |||
594 | if (wdev->conn) { | ||
595 | const u8 *bssid; | ||
596 | int ret; | ||
597 | |||
598 | kfree(wdev->conn->ie); | ||
599 | wdev->conn->ie = NULL; | ||
600 | kfree(wdev->conn); | ||
601 | wdev->conn = NULL; | ||
602 | |||
603 | /* | ||
604 | * If this disconnect was due to a disassoc, we | ||
605 | * we might still have an auth BSS around. For | ||
606 | * the userspace SME that's currently expected, | ||
607 | * but for the kernel SME (nl80211 CONNECT or | ||
608 | * wireless extensions) we want to clear up all | ||
609 | * state. | ||
610 | */ | ||
611 | for (i = 0; i < MAX_AUTH_BSSES; i++) { | ||
612 | if (!wdev->auth_bsses[i]) | ||
613 | continue; | ||
614 | bssid = wdev->auth_bsses[i]->pub.bssid; | ||
615 | ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, | ||
616 | WLAN_REASON_DEAUTH_LEAVING); | ||
617 | WARN(ret, "deauth failed: %d\n", ret); | ||
618 | } | ||
619 | } | ||
620 | |||
621 | nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); | ||
622 | |||
623 | /* | ||
624 | * Delete all the keys ... pairwise keys can't really | ||
625 | * exist any more anyway, but default keys might. | ||
626 | */ | ||
627 | if (rdev->ops->del_key) | ||
628 | for (i = 0; i < 6; i++) | ||
629 | rdev->ops->del_key(wdev->wiphy, dev, i, NULL); | ||
630 | |||
631 | #ifdef CONFIG_WIRELESS_EXT | ||
632 | memset(&wrqu, 0, sizeof(wrqu)); | ||
633 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | ||
634 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | ||
635 | #endif | ||
636 | } | ||
637 | |||
638 | void cfg80211_disconnected(struct net_device *dev, u16 reason, | ||
639 | u8 *ie, size_t ie_len, gfp_t gfp) | ||
640 | { | ||
641 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
642 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
643 | struct cfg80211_event *ev; | ||
644 | unsigned long flags; | ||
645 | |||
646 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); | ||
647 | |||
648 | ev = kzalloc(sizeof(*ev) + ie_len, gfp); | ||
649 | if (!ev) | ||
650 | return; | ||
651 | |||
652 | ev->type = EVENT_DISCONNECTED; | ||
653 | ev->dc.ie = ((u8 *)ev) + sizeof(*ev); | ||
654 | ev->dc.ie_len = ie_len; | ||
655 | memcpy((void *)ev->dc.ie, ie, ie_len); | ||
656 | ev->dc.reason = reason; | ||
657 | |||
658 | spin_lock_irqsave(&wdev->event_lock, flags); | ||
659 | list_add_tail(&ev->list, &wdev->event_list); | ||
660 | spin_unlock_irqrestore(&wdev->event_lock, flags); | ||
661 | schedule_work(&rdev->event_work); | ||
662 | } | ||
663 | EXPORT_SYMBOL(cfg80211_disconnected); | ||
664 | |||
665 | int __cfg80211_connect(struct cfg80211_registered_device *rdev, | ||
666 | struct net_device *dev, | ||
667 | struct cfg80211_connect_params *connect, | ||
668 | struct cfg80211_cached_keys *connkeys, | ||
669 | const u8 *prev_bssid) | ||
670 | { | ||
671 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
672 | struct ieee80211_channel *chan; | ||
673 | int err; | ||
674 | |||
675 | ASSERT_WDEV_LOCK(wdev); | ||
676 | |||
677 | if (wdev->sme_state != CFG80211_SME_IDLE) | ||
678 | return -EALREADY; | ||
679 | |||
680 | chan = rdev_fixed_channel(rdev, wdev); | ||
681 | if (chan && chan != connect->channel) | ||
682 | return -EBUSY; | ||
683 | |||
684 | if (WARN_ON(wdev->connect_keys)) { | ||
685 | kfree(wdev->connect_keys); | ||
686 | wdev->connect_keys = NULL; | ||
687 | } | ||
688 | |||
689 | if (connkeys && connkeys->def >= 0) { | ||
690 | int idx; | ||
691 | u32 cipher; | ||
692 | |||
693 | idx = connkeys->def; | ||
694 | cipher = connkeys->params[idx].cipher; | ||
695 | /* If given a WEP key we may need it for shared key auth */ | ||
696 | if (cipher == WLAN_CIPHER_SUITE_WEP40 || | ||
697 | cipher == WLAN_CIPHER_SUITE_WEP104) { | ||
698 | connect->key_idx = idx; | ||
699 | connect->key = connkeys->params[idx].key; | ||
700 | connect->key_len = connkeys->params[idx].key_len; | ||
701 | |||
702 | /* | ||
703 | * If ciphers are not set (e.g. when going through | ||
704 | * iwconfig), we have to set them appropriately here. | ||
705 | */ | ||
706 | if (connect->crypto.cipher_group == 0) | ||
707 | connect->crypto.cipher_group = cipher; | ||
708 | |||
709 | if (connect->crypto.n_ciphers_pairwise == 0) { | ||
710 | connect->crypto.n_ciphers_pairwise = 1; | ||
711 | connect->crypto.ciphers_pairwise[0] = cipher; | ||
712 | } | ||
713 | } | ||
714 | } | ||
715 | |||
716 | if (!rdev->ops->connect) { | ||
717 | if (!rdev->ops->auth || !rdev->ops->assoc) | ||
718 | return -EOPNOTSUPP; | ||
719 | |||
720 | if (WARN_ON(wdev->conn)) | ||
721 | return -EINPROGRESS; | ||
722 | |||
723 | wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); | ||
724 | if (!wdev->conn) | ||
725 | return -ENOMEM; | ||
726 | |||
727 | /* | ||
728 | * Copy all parameters, and treat explicitly IEs, BSSID, SSID. | ||
729 | */ | ||
730 | memcpy(&wdev->conn->params, connect, sizeof(*connect)); | ||
731 | if (connect->bssid) { | ||
732 | wdev->conn->params.bssid = wdev->conn->bssid; | ||
733 | memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); | ||
734 | } | ||
735 | |||
736 | if (connect->ie) { | ||
737 | wdev->conn->ie = kmemdup(connect->ie, connect->ie_len, | ||
738 | GFP_KERNEL); | ||
739 | wdev->conn->params.ie = wdev->conn->ie; | ||
740 | if (!wdev->conn->ie) { | ||
741 | kfree(wdev->conn); | ||
742 | wdev->conn = NULL; | ||
743 | return -ENOMEM; | ||
744 | } | ||
745 | } | ||
746 | |||
747 | if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { | ||
748 | wdev->conn->auto_auth = true; | ||
749 | /* start with open system ... should mostly work */ | ||
750 | wdev->conn->params.auth_type = | ||
751 | NL80211_AUTHTYPE_OPEN_SYSTEM; | ||
752 | } else { | ||
753 | wdev->conn->auto_auth = false; | ||
754 | } | ||
755 | |||
756 | memcpy(wdev->ssid, connect->ssid, connect->ssid_len); | ||
757 | wdev->ssid_len = connect->ssid_len; | ||
758 | wdev->conn->params.ssid = wdev->ssid; | ||
759 | wdev->conn->params.ssid_len = connect->ssid_len; | ||
760 | |||
761 | /* don't care about result -- but fill bssid & channel */ | ||
762 | if (!wdev->conn->params.bssid || !wdev->conn->params.channel) | ||
763 | cfg80211_get_conn_bss(wdev); | ||
764 | |||
765 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
766 | wdev->connect_keys = connkeys; | ||
767 | |||
768 | if (prev_bssid) { | ||
769 | memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); | ||
770 | wdev->conn->prev_bssid_valid = true; | ||
771 | } | ||
772 | |||
773 | /* we're good if we have both BSSID and channel */ | ||
774 | if (wdev->conn->params.bssid && wdev->conn->params.channel) { | ||
775 | wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; | ||
776 | err = cfg80211_conn_do_work(wdev); | ||
777 | } else { | ||
778 | /* otherwise we'll need to scan for the AP first */ | ||
779 | err = cfg80211_conn_scan(wdev); | ||
780 | /* | ||
781 | * If we can't scan right now, then we need to scan again | ||
782 | * after the current scan finished, since the parameters | ||
783 | * changed (unless we find a good AP anyway). | ||
784 | */ | ||
785 | if (err == -EBUSY) { | ||
786 | err = 0; | ||
787 | wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; | ||
788 | } | ||
789 | } | ||
790 | if (err) { | ||
791 | kfree(wdev->conn->ie); | ||
792 | kfree(wdev->conn); | ||
793 | wdev->conn = NULL; | ||
794 | wdev->sme_state = CFG80211_SME_IDLE; | ||
795 | wdev->connect_keys = NULL; | ||
796 | wdev->ssid_len = 0; | ||
797 | } | ||
798 | |||
799 | return err; | ||
800 | } else { | ||
801 | wdev->sme_state = CFG80211_SME_CONNECTING; | ||
802 | wdev->connect_keys = connkeys; | ||
803 | err = rdev->ops->connect(&rdev->wiphy, dev, connect); | ||
804 | if (err) { | ||
805 | wdev->connect_keys = NULL; | ||
806 | wdev->sme_state = CFG80211_SME_IDLE; | ||
807 | return err; | ||
808 | } | ||
809 | |||
810 | memcpy(wdev->ssid, connect->ssid, connect->ssid_len); | ||
811 | wdev->ssid_len = connect->ssid_len; | ||
812 | |||
813 | return 0; | ||
814 | } | ||
815 | } | ||
816 | |||
817 | int cfg80211_connect(struct cfg80211_registered_device *rdev, | ||
818 | struct net_device *dev, | ||
819 | struct cfg80211_connect_params *connect, | ||
820 | struct cfg80211_cached_keys *connkeys) | ||
821 | { | ||
822 | int err; | ||
823 | |||
824 | mutex_lock(&rdev->devlist_mtx); | ||
825 | wdev_lock(dev->ieee80211_ptr); | ||
826 | err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); | ||
827 | wdev_unlock(dev->ieee80211_ptr); | ||
828 | mutex_unlock(&rdev->devlist_mtx); | ||
829 | |||
830 | return err; | ||
831 | } | ||
832 | |||
833 | int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, | ||
834 | struct net_device *dev, u16 reason, bool wextev) | ||
835 | { | ||
836 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
837 | int err; | ||
838 | |||
839 | ASSERT_WDEV_LOCK(wdev); | ||
840 | |||
841 | if (wdev->sme_state == CFG80211_SME_IDLE) | ||
842 | return -EINVAL; | ||
843 | |||
844 | kfree(wdev->connect_keys); | ||
845 | wdev->connect_keys = NULL; | ||
846 | |||
847 | if (!rdev->ops->disconnect) { | ||
848 | if (!rdev->ops->deauth) | ||
849 | return -EOPNOTSUPP; | ||
850 | |||
851 | /* was it connected by userspace SME? */ | ||
852 | if (!wdev->conn) { | ||
853 | cfg80211_mlme_down(rdev, dev); | ||
854 | return 0; | ||
855 | } | ||
856 | |||
857 | if (wdev->sme_state == CFG80211_SME_CONNECTING && | ||
858 | (wdev->conn->state == CFG80211_CONN_SCANNING || | ||
859 | wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) { | ||
860 | wdev->sme_state = CFG80211_SME_IDLE; | ||
861 | kfree(wdev->conn->ie); | ||
862 | kfree(wdev->conn); | ||
863 | wdev->conn = NULL; | ||
864 | wdev->ssid_len = 0; | ||
865 | return 0; | ||
866 | } | ||
867 | |||
868 | /* wdev->conn->params.bssid must be set if > SCANNING */ | ||
869 | err = __cfg80211_mlme_deauth(rdev, dev, | ||
870 | wdev->conn->params.bssid, | ||
871 | NULL, 0, reason); | ||
872 | if (err) | ||
873 | return err; | ||
874 | } else { | ||
875 | err = rdev->ops->disconnect(&rdev->wiphy, dev, reason); | ||
876 | if (err) | ||
877 | return err; | ||
878 | } | ||
879 | |||
880 | if (wdev->sme_state == CFG80211_SME_CONNECTED) | ||
881 | __cfg80211_disconnected(dev, NULL, 0, 0, false); | ||
882 | else if (wdev->sme_state == CFG80211_SME_CONNECTING) | ||
883 | __cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0, | ||
884 | WLAN_STATUS_UNSPECIFIED_FAILURE, | ||
885 | wextev, NULL); | ||
886 | |||
887 | return 0; | ||
888 | } | ||
889 | |||
890 | int cfg80211_disconnect(struct cfg80211_registered_device *rdev, | ||
891 | struct net_device *dev, | ||
892 | u16 reason, bool wextev) | ||
893 | { | ||
894 | int err; | ||
895 | |||
896 | wdev_lock(dev->ieee80211_ptr); | ||
897 | err = __cfg80211_disconnect(rdev, dev, reason, wextev); | ||
898 | wdev_unlock(dev->ieee80211_ptr); | ||
899 | |||
900 | return err; | ||
901 | } | ||
902 | |||
903 | void cfg80211_sme_disassoc(struct net_device *dev, int idx) | ||
904 | { | ||
905 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
906 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
907 | u8 bssid[ETH_ALEN]; | ||
908 | |||
909 | ASSERT_WDEV_LOCK(wdev); | ||
910 | |||
911 | if (!wdev->conn) | ||
912 | return; | ||
913 | |||
914 | if (wdev->conn->state == CFG80211_CONN_IDLE) | ||
915 | return; | ||
916 | |||
917 | /* | ||
918 | * Ok, so the association was made by this SME -- we don't | ||
919 | * want it any more so deauthenticate too. | ||
920 | */ | ||
921 | |||
922 | if (!wdev->auth_bsses[idx]) | ||
923 | return; | ||
924 | |||
925 | memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN); | ||
926 | if (__cfg80211_mlme_deauth(rdev, dev, bssid, | ||
927 | NULL, 0, WLAN_REASON_DEAUTH_LEAVING)) { | ||
928 | /* whatever -- assume gone anyway */ | ||
929 | cfg80211_unhold_bss(wdev->auth_bsses[idx]); | ||
930 | cfg80211_put_bss(&wdev->auth_bsses[idx]->pub); | ||
931 | wdev->auth_bsses[idx] = NULL; | ||
932 | } | ||
933 | } | ||
diff --git a/net/wireless/util.c b/net/wireless/util.c index 25550692dda6..3fc2df86278f 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -141,9 +141,12 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy) | |||
141 | set_mandatory_flags_band(wiphy->bands[band], band); | 141 | set_mandatory_flags_band(wiphy->bands[band], band); |
142 | } | 142 | } |
143 | 143 | ||
144 | int cfg80211_validate_key_settings(struct key_params *params, int key_idx, | 144 | int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, |
145 | struct key_params *params, int key_idx, | ||
145 | const u8 *mac_addr) | 146 | const u8 *mac_addr) |
146 | { | 147 | { |
148 | int i; | ||
149 | |||
147 | if (key_idx > 5) | 150 | if (key_idx > 5) |
148 | return -EINVAL; | 151 | return -EINVAL; |
149 | 152 | ||
@@ -197,6 +200,12 @@ int cfg80211_validate_key_settings(struct key_params *params, int key_idx, | |||
197 | } | 200 | } |
198 | } | 201 | } |
199 | 202 | ||
203 | for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) | ||
204 | if (params->cipher == rdev->wiphy.cipher_suites[i]) | ||
205 | break; | ||
206 | if (i == rdev->wiphy.n_cipher_suites) | ||
207 | return -EINVAL; | ||
208 | |||
200 | return 0; | 209 | return 0; |
201 | } | 210 | } |
202 | 211 | ||
@@ -265,11 +274,11 @@ static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) | |||
265 | switch (ae) { | 274 | switch (ae) { |
266 | case 0: | 275 | case 0: |
267 | return 6; | 276 | return 6; |
268 | case 1: | 277 | case MESH_FLAGS_AE_A4: |
269 | return 12; | 278 | return 12; |
270 | case 2: | 279 | case MESH_FLAGS_AE_A5_A6: |
271 | return 18; | 280 | return 18; |
272 | case 3: | 281 | case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6): |
273 | return 24; | 282 | return 24; |
274 | default: | 283 | default: |
275 | return 6; | 284 | return 6; |
@@ -324,10 +333,18 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr, | |||
324 | } | 333 | } |
325 | break; | 334 | break; |
326 | case cpu_to_le16(IEEE80211_FCTL_FROMDS): | 335 | case cpu_to_le16(IEEE80211_FCTL_FROMDS): |
327 | if (iftype != NL80211_IFTYPE_STATION || | 336 | if ((iftype != NL80211_IFTYPE_STATION && |
337 | iftype != NL80211_IFTYPE_MESH_POINT) || | ||
328 | (is_multicast_ether_addr(dst) && | 338 | (is_multicast_ether_addr(dst) && |
329 | !compare_ether_addr(src, addr))) | 339 | !compare_ether_addr(src, addr))) |
330 | return -1; | 340 | return -1; |
341 | if (iftype == NL80211_IFTYPE_MESH_POINT) { | ||
342 | struct ieee80211s_hdr *meshdr = | ||
343 | (struct ieee80211s_hdr *) (skb->data + hdrlen); | ||
344 | hdrlen += ieee80211_get_mesh_hdrlen(meshdr); | ||
345 | if (meshdr->flags & MESH_FLAGS_AE_A4) | ||
346 | memcpy(src, meshdr->eaddr1, ETH_ALEN); | ||
347 | } | ||
331 | break; | 348 | break; |
332 | case cpu_to_le16(0): | 349 | case cpu_to_le16(0): |
333 | if (iftype != NL80211_IFTYPE_ADHOC) | 350 | if (iftype != NL80211_IFTYPE_ADHOC) |
@@ -502,3 +519,166 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb) | |||
502 | return dscp >> 5; | 519 | return dscp >> 5; |
503 | } | 520 | } |
504 | EXPORT_SYMBOL(cfg80211_classify8021d); | 521 | EXPORT_SYMBOL(cfg80211_classify8021d); |
522 | |||
523 | const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie) | ||
524 | { | ||
525 | u8 *end, *pos; | ||
526 | |||
527 | pos = bss->information_elements; | ||
528 | if (pos == NULL) | ||
529 | return NULL; | ||
530 | end = pos + bss->len_information_elements; | ||
531 | |||
532 | while (pos + 1 < end) { | ||
533 | if (pos + 2 + pos[1] > end) | ||
534 | break; | ||
535 | if (pos[0] == ie) | ||
536 | return pos; | ||
537 | pos += 2 + pos[1]; | ||
538 | } | ||
539 | |||
540 | return NULL; | ||
541 | } | ||
542 | EXPORT_SYMBOL(ieee80211_bss_get_ie); | ||
543 | |||
544 | void cfg80211_upload_connect_keys(struct wireless_dev *wdev) | ||
545 | { | ||
546 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
547 | struct net_device *dev = wdev->netdev; | ||
548 | int i; | ||
549 | |||
550 | if (!wdev->connect_keys) | ||
551 | return; | ||
552 | |||
553 | for (i = 0; i < 6; i++) { | ||
554 | if (!wdev->connect_keys->params[i].cipher) | ||
555 | continue; | ||
556 | if (rdev->ops->add_key(wdev->wiphy, dev, i, NULL, | ||
557 | &wdev->connect_keys->params[i])) { | ||
558 | printk(KERN_ERR "%s: failed to set key %d\n", | ||
559 | dev->name, i); | ||
560 | continue; | ||
561 | } | ||
562 | if (wdev->connect_keys->def == i) | ||
563 | if (rdev->ops->set_default_key(wdev->wiphy, dev, i)) { | ||
564 | printk(KERN_ERR "%s: failed to set defkey %d\n", | ||
565 | dev->name, i); | ||
566 | continue; | ||
567 | } | ||
568 | if (wdev->connect_keys->defmgmt == i) | ||
569 | if (rdev->ops->set_default_mgmt_key(wdev->wiphy, dev, i)) | ||
570 | printk(KERN_ERR "%s: failed to set mgtdef %d\n", | ||
571 | dev->name, i); | ||
572 | } | ||
573 | |||
574 | kfree(wdev->connect_keys); | ||
575 | wdev->connect_keys = NULL; | ||
576 | } | ||
577 | |||
578 | static void cfg80211_process_wdev_events(struct wireless_dev *wdev) | ||
579 | { | ||
580 | struct cfg80211_event *ev; | ||
581 | unsigned long flags; | ||
582 | const u8 *bssid = NULL; | ||
583 | |||
584 | spin_lock_irqsave(&wdev->event_lock, flags); | ||
585 | while (!list_empty(&wdev->event_list)) { | ||
586 | ev = list_first_entry(&wdev->event_list, | ||
587 | struct cfg80211_event, list); | ||
588 | list_del(&ev->list); | ||
589 | spin_unlock_irqrestore(&wdev->event_lock, flags); | ||
590 | |||
591 | wdev_lock(wdev); | ||
592 | switch (ev->type) { | ||
593 | case EVENT_CONNECT_RESULT: | ||
594 | if (!is_zero_ether_addr(ev->cr.bssid)) | ||
595 | bssid = ev->cr.bssid; | ||
596 | __cfg80211_connect_result( | ||
597 | wdev->netdev, bssid, | ||
598 | ev->cr.req_ie, ev->cr.req_ie_len, | ||
599 | ev->cr.resp_ie, ev->cr.resp_ie_len, | ||
600 | ev->cr.status, | ||
601 | ev->cr.status == WLAN_STATUS_SUCCESS, | ||
602 | NULL); | ||
603 | break; | ||
604 | case EVENT_ROAMED: | ||
605 | __cfg80211_roamed(wdev, ev->rm.bssid, | ||
606 | ev->rm.req_ie, ev->rm.req_ie_len, | ||
607 | ev->rm.resp_ie, ev->rm.resp_ie_len); | ||
608 | break; | ||
609 | case EVENT_DISCONNECTED: | ||
610 | __cfg80211_disconnected(wdev->netdev, | ||
611 | ev->dc.ie, ev->dc.ie_len, | ||
612 | ev->dc.reason, true); | ||
613 | break; | ||
614 | case EVENT_IBSS_JOINED: | ||
615 | __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid); | ||
616 | break; | ||
617 | } | ||
618 | wdev_unlock(wdev); | ||
619 | |||
620 | kfree(ev); | ||
621 | |||
622 | spin_lock_irqsave(&wdev->event_lock, flags); | ||
623 | } | ||
624 | spin_unlock_irqrestore(&wdev->event_lock, flags); | ||
625 | } | ||
626 | |||
627 | void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev) | ||
628 | { | ||
629 | struct wireless_dev *wdev; | ||
630 | |||
631 | ASSERT_RTNL(); | ||
632 | ASSERT_RDEV_LOCK(rdev); | ||
633 | |||
634 | mutex_lock(&rdev->devlist_mtx); | ||
635 | |||
636 | list_for_each_entry(wdev, &rdev->netdev_list, list) | ||
637 | cfg80211_process_wdev_events(wdev); | ||
638 | |||
639 | mutex_unlock(&rdev->devlist_mtx); | ||
640 | } | ||
641 | |||
642 | int cfg80211_change_iface(struct cfg80211_registered_device *rdev, | ||
643 | struct net_device *dev, enum nl80211_iftype ntype, | ||
644 | u32 *flags, struct vif_params *params) | ||
645 | { | ||
646 | int err; | ||
647 | enum nl80211_iftype otype = dev->ieee80211_ptr->iftype; | ||
648 | |||
649 | ASSERT_RDEV_LOCK(rdev); | ||
650 | |||
651 | /* don't support changing VLANs, you just re-create them */ | ||
652 | if (otype == NL80211_IFTYPE_AP_VLAN) | ||
653 | return -EOPNOTSUPP; | ||
654 | |||
655 | if (!rdev->ops->change_virtual_intf || | ||
656 | !(rdev->wiphy.interface_modes & (1 << ntype))) | ||
657 | return -EOPNOTSUPP; | ||
658 | |||
659 | if (ntype != otype) { | ||
660 | switch (otype) { | ||
661 | case NL80211_IFTYPE_ADHOC: | ||
662 | cfg80211_leave_ibss(rdev, dev, false); | ||
663 | break; | ||
664 | case NL80211_IFTYPE_STATION: | ||
665 | cfg80211_disconnect(rdev, dev, | ||
666 | WLAN_REASON_DEAUTH_LEAVING, true); | ||
667 | break; | ||
668 | case NL80211_IFTYPE_MESH_POINT: | ||
669 | /* mesh should be handled? */ | ||
670 | break; | ||
671 | default: | ||
672 | break; | ||
673 | } | ||
674 | |||
675 | cfg80211_process_rdev_events(rdev); | ||
676 | } | ||
677 | |||
678 | err = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, | ||
679 | ntype, flags, params); | ||
680 | |||
681 | WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype); | ||
682 | |||
683 | return err; | ||
684 | } | ||
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index d030c5315672..429dd06a4ecc 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/etherdevice.h> | 14 | #include <linux/etherdevice.h> |
15 | #include <net/iw_handler.h> | 15 | #include <net/iw_handler.h> |
16 | #include <net/cfg80211.h> | 16 | #include <net/cfg80211.h> |
17 | #include "wext-compat.h" | ||
17 | #include "core.h" | 18 | #include "core.h" |
18 | 19 | ||
19 | int cfg80211_wext_giwname(struct net_device *dev, | 20 | int cfg80211_wext_giwname(struct net_device *dev, |
@@ -69,18 +70,8 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, | |||
69 | enum nl80211_iftype type; | 70 | enum nl80211_iftype type; |
70 | int ret; | 71 | int ret; |
71 | 72 | ||
72 | if (!wdev) | ||
73 | return -EOPNOTSUPP; | ||
74 | |||
75 | rdev = wiphy_to_dev(wdev->wiphy); | 73 | rdev = wiphy_to_dev(wdev->wiphy); |
76 | 74 | ||
77 | if (!rdev->ops->change_virtual_intf) | ||
78 | return -EOPNOTSUPP; | ||
79 | |||
80 | /* don't support changing VLANs, you just re-create them */ | ||
81 | if (wdev->iftype == NL80211_IFTYPE_AP_VLAN) | ||
82 | return -EOPNOTSUPP; | ||
83 | |||
84 | switch (*mode) { | 75 | switch (*mode) { |
85 | case IW_MODE_INFRA: | 76 | case IW_MODE_INFRA: |
86 | type = NL80211_IFTYPE_STATION; | 77 | type = NL80211_IFTYPE_STATION; |
@@ -103,9 +94,9 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, | |||
103 | 94 | ||
104 | memset(&vifparams, 0, sizeof(vifparams)); | 95 | memset(&vifparams, 0, sizeof(vifparams)); |
105 | 96 | ||
106 | ret = rdev->ops->change_virtual_intf(wdev->wiphy, dev->ifindex, type, | 97 | cfg80211_lock_rdev(rdev); |
107 | NULL, &vifparams); | 98 | ret = cfg80211_change_iface(rdev, dev, type, NULL, &vifparams); |
108 | WARN_ON(!ret && wdev->iftype != type); | 99 | cfg80211_unlock_rdev(rdev); |
109 | 100 | ||
110 | return ret; | 101 | return ret; |
111 | } | 102 | } |
@@ -154,7 +145,7 @@ int cfg80211_wext_giwrange(struct net_device *dev, | |||
154 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 145 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
155 | struct iw_range *range = (struct iw_range *) extra; | 146 | struct iw_range *range = (struct iw_range *) extra; |
156 | enum ieee80211_band band; | 147 | enum ieee80211_band band; |
157 | int c = 0; | 148 | int i, c = 0; |
158 | 149 | ||
159 | if (!wdev) | 150 | if (!wdev) |
160 | return -EOPNOTSUPP; | 151 | return -EOPNOTSUPP; |
@@ -173,9 +164,6 @@ int cfg80211_wext_giwrange(struct net_device *dev, | |||
173 | range->min_frag = 256; | 164 | range->min_frag = 256; |
174 | range->max_frag = 2346; | 165 | range->max_frag = 2346; |
175 | 166 | ||
176 | range->encoding_size[0] = 5; | ||
177 | range->encoding_size[1] = 13; | ||
178 | range->num_encoding_sizes = 2; | ||
179 | range->max_encoding_tokens = 4; | 167 | range->max_encoding_tokens = 4; |
180 | 168 | ||
181 | range->max_qual.updated = IW_QUAL_NOISE_INVALID; | 169 | range->max_qual.updated = IW_QUAL_NOISE_INVALID; |
@@ -204,11 +192,31 @@ int cfg80211_wext_giwrange(struct net_device *dev, | |||
204 | range->avg_qual.noise = range->max_qual.noise / 2; | 192 | range->avg_qual.noise = range->max_qual.noise / 2; |
205 | range->avg_qual.updated = range->max_qual.updated; | 193 | range->avg_qual.updated = range->max_qual.updated; |
206 | 194 | ||
207 | range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | | 195 | for (i = 0; i < wdev->wiphy->n_cipher_suites; i++) { |
208 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; | 196 | switch (wdev->wiphy->cipher_suites[i]) { |
197 | case WLAN_CIPHER_SUITE_TKIP: | ||
198 | range->enc_capa |= (IW_ENC_CAPA_CIPHER_TKIP | | ||
199 | IW_ENC_CAPA_WPA); | ||
200 | break; | ||
201 | |||
202 | case WLAN_CIPHER_SUITE_CCMP: | ||
203 | range->enc_capa |= (IW_ENC_CAPA_CIPHER_CCMP | | ||
204 | IW_ENC_CAPA_WPA2); | ||
205 | break; | ||
206 | |||
207 | case WLAN_CIPHER_SUITE_WEP40: | ||
208 | range->encoding_size[range->num_encoding_sizes++] = | ||
209 | WLAN_KEY_LEN_WEP40; | ||
210 | break; | ||
211 | |||
212 | case WLAN_CIPHER_SUITE_WEP104: | ||
213 | range->encoding_size[range->num_encoding_sizes++] = | ||
214 | WLAN_KEY_LEN_WEP104; | ||
215 | break; | ||
216 | } | ||
217 | } | ||
209 | 218 | ||
210 | for (band = 0; band < IEEE80211_NUM_BANDS; band ++) { | 219 | for (band = 0; band < IEEE80211_NUM_BANDS; band ++) { |
211 | int i; | ||
212 | struct ieee80211_supported_band *sband; | 220 | struct ieee80211_supported_band *sband; |
213 | 221 | ||
214 | sband = wdev->wiphy->bands[band]; | 222 | sband = wdev->wiphy->bands[band]; |
@@ -236,97 +244,40 @@ int cfg80211_wext_giwrange(struct net_device *dev, | |||
236 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); | 244 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); |
237 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); | 245 | IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); |
238 | 246 | ||
239 | range->scan_capa |= IW_SCAN_CAPA_ESSID; | 247 | if (wdev->wiphy->max_scan_ssids > 0) |
248 | range->scan_capa |= IW_SCAN_CAPA_ESSID; | ||
240 | 249 | ||
241 | return 0; | 250 | return 0; |
242 | } | 251 | } |
243 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange); | 252 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange); |
244 | 253 | ||
245 | int cfg80211_wext_siwmlme(struct net_device *dev, | ||
246 | struct iw_request_info *info, | ||
247 | struct iw_point *data, char *extra) | ||
248 | { | ||
249 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
250 | struct iw_mlme *mlme = (struct iw_mlme *)extra; | ||
251 | struct cfg80211_registered_device *rdev; | ||
252 | union { | ||
253 | struct cfg80211_disassoc_request disassoc; | ||
254 | struct cfg80211_deauth_request deauth; | ||
255 | } cmd; | ||
256 | |||
257 | if (!wdev) | ||
258 | return -EOPNOTSUPP; | ||
259 | |||
260 | rdev = wiphy_to_dev(wdev->wiphy); | ||
261 | |||
262 | if (wdev->iftype != NL80211_IFTYPE_STATION) | ||
263 | return -EINVAL; | ||
264 | |||
265 | if (mlme->addr.sa_family != ARPHRD_ETHER) | ||
266 | return -EINVAL; | ||
267 | |||
268 | memset(&cmd, 0, sizeof(cmd)); | ||
269 | |||
270 | switch (mlme->cmd) { | ||
271 | case IW_MLME_DEAUTH: | ||
272 | if (!rdev->ops->deauth) | ||
273 | return -EOPNOTSUPP; | ||
274 | cmd.deauth.peer_addr = mlme->addr.sa_data; | ||
275 | cmd.deauth.reason_code = mlme->reason_code; | ||
276 | return rdev->ops->deauth(wdev->wiphy, dev, &cmd.deauth); | ||
277 | case IW_MLME_DISASSOC: | ||
278 | if (!rdev->ops->disassoc) | ||
279 | return -EOPNOTSUPP; | ||
280 | cmd.disassoc.peer_addr = mlme->addr.sa_data; | ||
281 | cmd.disassoc.reason_code = mlme->reason_code; | ||
282 | return rdev->ops->disassoc(wdev->wiphy, dev, &cmd.disassoc); | ||
283 | default: | ||
284 | return -EOPNOTSUPP; | ||
285 | } | ||
286 | } | ||
287 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme); | ||
288 | |||
289 | 254 | ||
290 | /** | 255 | /** |
291 | * cfg80211_wext_freq - get wext frequency for non-"auto" | 256 | * cfg80211_wext_freq - get wext frequency for non-"auto" |
292 | * @wiphy: the wiphy | 257 | * @wiphy: the wiphy |
293 | * @freq: the wext freq encoding | 258 | * @freq: the wext freq encoding |
294 | * | 259 | * |
295 | * Returns a channel, %NULL for auto, or an ERR_PTR for errors! | 260 | * Returns a frequency, or a negative error code, or 0 for auto. |
296 | */ | 261 | */ |
297 | struct ieee80211_channel *cfg80211_wext_freq(struct wiphy *wiphy, | 262 | int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq) |
298 | struct iw_freq *freq) | ||
299 | { | 263 | { |
300 | struct ieee80211_channel *chan; | ||
301 | int f; | ||
302 | |||
303 | /* | 264 | /* |
304 | * Parse frequency - return NULL for auto and | 265 | * Parse frequency - return 0 for auto and |
305 | * -EINVAL for impossible things. | 266 | * -EINVAL for impossible things. |
306 | */ | 267 | */ |
307 | if (freq->e == 0) { | 268 | if (freq->e == 0) { |
308 | if (freq->m < 0) | 269 | if (freq->m < 0) |
309 | return NULL; | 270 | return 0; |
310 | f = ieee80211_channel_to_frequency(freq->m); | 271 | return ieee80211_channel_to_frequency(freq->m); |
311 | } else { | 272 | } else { |
312 | int i, div = 1000000; | 273 | int i, div = 1000000; |
313 | for (i = 0; i < freq->e; i++) | 274 | for (i = 0; i < freq->e; i++) |
314 | div /= 10; | 275 | div /= 10; |
315 | if (div <= 0) | 276 | if (div <= 0) |
316 | return ERR_PTR(-EINVAL); | 277 | return -EINVAL; |
317 | f = freq->m / div; | 278 | return freq->m / div; |
318 | } | 279 | } |
319 | |||
320 | /* | ||
321 | * Look up channel struct and return -EINVAL when | ||
322 | * it cannot be found. | ||
323 | */ | ||
324 | chan = ieee80211_get_channel(wiphy, f); | ||
325 | if (!chan) | ||
326 | return ERR_PTR(-EINVAL); | ||
327 | return chan; | ||
328 | } | 280 | } |
329 | EXPORT_SYMBOL_GPL(cfg80211_wext_freq); | ||
330 | 281 | ||
331 | int cfg80211_wext_siwrts(struct net_device *dev, | 282 | int cfg80211_wext_siwrts(struct net_device *dev, |
332 | struct iw_request_info *info, | 283 | struct iw_request_info *info, |
@@ -479,15 +430,32 @@ int cfg80211_wext_giwretry(struct net_device *dev, | |||
479 | } | 430 | } |
480 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwretry); | 431 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwretry); |
481 | 432 | ||
482 | static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev, | 433 | static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, |
483 | struct net_device *dev, const u8 *addr, | 434 | struct net_device *dev, const u8 *addr, |
484 | bool remove, bool tx_key, int idx, | 435 | bool remove, bool tx_key, int idx, |
485 | struct key_params *params) | 436 | struct key_params *params) |
486 | { | 437 | { |
487 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 438 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
488 | int err; | 439 | int err, i; |
440 | |||
441 | if (!wdev->wext.keys) { | ||
442 | wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys), | ||
443 | GFP_KERNEL); | ||
444 | if (!wdev->wext.keys) | ||
445 | return -ENOMEM; | ||
446 | for (i = 0; i < 6; i++) | ||
447 | wdev->wext.keys->params[i].key = | ||
448 | wdev->wext.keys->data[i]; | ||
449 | } | ||
450 | |||
451 | if (wdev->iftype != NL80211_IFTYPE_ADHOC && | ||
452 | wdev->iftype != NL80211_IFTYPE_STATION) | ||
453 | return -EOPNOTSUPP; | ||
489 | 454 | ||
490 | if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { | 455 | if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { |
456 | if (!wdev->current_bss) | ||
457 | return -ENOLINK; | ||
458 | |||
491 | if (!rdev->ops->set_default_mgmt_key) | 459 | if (!rdev->ops->set_default_mgmt_key) |
492 | return -EOPNOTSUPP; | 460 | return -EOPNOTSUPP; |
493 | 461 | ||
@@ -497,8 +465,14 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev, | |||
497 | return -EINVAL; | 465 | return -EINVAL; |
498 | 466 | ||
499 | if (remove) { | 467 | if (remove) { |
500 | err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr); | 468 | err = 0; |
469 | if (wdev->current_bss) | ||
470 | err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr); | ||
501 | if (!err) { | 471 | if (!err) { |
472 | if (!addr) { | ||
473 | wdev->wext.keys->params[idx].key_len = 0; | ||
474 | wdev->wext.keys->params[idx].cipher = 0; | ||
475 | } | ||
502 | if (idx == wdev->wext.default_key) | 476 | if (idx == wdev->wext.default_key) |
503 | wdev->wext.default_key = -1; | 477 | wdev->wext.default_key = -1; |
504 | else if (idx == wdev->wext.default_mgmt_key) | 478 | else if (idx == wdev->wext.default_mgmt_key) |
@@ -512,36 +486,65 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev, | |||
512 | return 0; | 486 | return 0; |
513 | 487 | ||
514 | return err; | 488 | return err; |
515 | } else { | 489 | } |
516 | if (addr) | ||
517 | tx_key = false; | ||
518 | 490 | ||
519 | if (cfg80211_validate_key_settings(params, idx, addr)) | 491 | if (addr) |
520 | return -EINVAL; | 492 | tx_key = false; |
521 | 493 | ||
494 | if (cfg80211_validate_key_settings(rdev, params, idx, addr)) | ||
495 | return -EINVAL; | ||
496 | |||
497 | err = 0; | ||
498 | if (wdev->current_bss) | ||
522 | err = rdev->ops->add_key(&rdev->wiphy, dev, idx, addr, params); | 499 | err = rdev->ops->add_key(&rdev->wiphy, dev, idx, addr, params); |
523 | if (err) | 500 | if (err) |
524 | return err; | 501 | return err; |
502 | |||
503 | if (!addr) { | ||
504 | wdev->wext.keys->params[idx] = *params; | ||
505 | memcpy(wdev->wext.keys->data[idx], | ||
506 | params->key, params->key_len); | ||
507 | wdev->wext.keys->params[idx].key = | ||
508 | wdev->wext.keys->data[idx]; | ||
509 | } | ||
525 | 510 | ||
526 | if (tx_key || (!addr && wdev->wext.default_key == -1)) { | 511 | if ((params->cipher == WLAN_CIPHER_SUITE_WEP40 || |
512 | params->cipher == WLAN_CIPHER_SUITE_WEP104) && | ||
513 | (tx_key || (!addr && wdev->wext.default_key == -1))) { | ||
514 | if (wdev->current_bss) | ||
527 | err = rdev->ops->set_default_key(&rdev->wiphy, | 515 | err = rdev->ops->set_default_key(&rdev->wiphy, |
528 | dev, idx); | 516 | dev, idx); |
529 | if (!err) | 517 | if (!err) |
530 | wdev->wext.default_key = idx; | 518 | wdev->wext.default_key = idx; |
531 | return err; | 519 | return err; |
532 | } | 520 | } |
533 | 521 | ||
534 | if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC && | 522 | if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC && |
535 | (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) { | 523 | (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) { |
524 | if (wdev->current_bss) | ||
536 | err = rdev->ops->set_default_mgmt_key(&rdev->wiphy, | 525 | err = rdev->ops->set_default_mgmt_key(&rdev->wiphy, |
537 | dev, idx); | 526 | dev, idx); |
538 | if (!err) | 527 | if (!err) |
539 | wdev->wext.default_mgmt_key = idx; | 528 | wdev->wext.default_mgmt_key = idx; |
540 | return err; | 529 | return err; |
541 | } | ||
542 | |||
543 | return 0; | ||
544 | } | 530 | } |
531 | |||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev, | ||
536 | struct net_device *dev, const u8 *addr, | ||
537 | bool remove, bool tx_key, int idx, | ||
538 | struct key_params *params) | ||
539 | { | ||
540 | int err; | ||
541 | |||
542 | wdev_lock(dev->ieee80211_ptr); | ||
543 | err = __cfg80211_set_encryption(rdev, dev, addr, remove, | ||
544 | tx_key, idx, params); | ||
545 | wdev_unlock(dev->ieee80211_ptr); | ||
546 | |||
547 | return err; | ||
545 | } | 548 | } |
546 | 549 | ||
547 | int cfg80211_wext_siwencode(struct net_device *dev, | 550 | int cfg80211_wext_siwencode(struct net_device *dev, |
@@ -554,6 +557,10 @@ int cfg80211_wext_siwencode(struct net_device *dev, | |||
554 | bool remove = false; | 557 | bool remove = false; |
555 | struct key_params params; | 558 | struct key_params params; |
556 | 559 | ||
560 | if (wdev->iftype != NL80211_IFTYPE_STATION && | ||
561 | wdev->iftype != NL80211_IFTYPE_ADHOC) | ||
562 | return -EOPNOTSUPP; | ||
563 | |||
557 | /* no use -- only MFP (set_default_mgmt_key) is optional */ | 564 | /* no use -- only MFP (set_default_mgmt_key) is optional */ |
558 | if (!rdev->ops->del_key || | 565 | if (!rdev->ops->del_key || |
559 | !rdev->ops->add_key || | 566 | !rdev->ops->add_key || |
@@ -574,9 +581,14 @@ int cfg80211_wext_siwencode(struct net_device *dev, | |||
574 | remove = true; | 581 | remove = true; |
575 | else if (erq->length == 0) { | 582 | else if (erq->length == 0) { |
576 | /* No key data - just set the default TX key index */ | 583 | /* No key data - just set the default TX key index */ |
577 | err = rdev->ops->set_default_key(&rdev->wiphy, dev, idx); | 584 | err = 0; |
585 | wdev_lock(wdev); | ||
586 | if (wdev->current_bss) | ||
587 | err = rdev->ops->set_default_key(&rdev->wiphy, | ||
588 | dev, idx); | ||
578 | if (!err) | 589 | if (!err) |
579 | wdev->wext.default_key = idx; | 590 | wdev->wext.default_key = idx; |
591 | wdev_unlock(wdev); | ||
580 | return err; | 592 | return err; |
581 | } | 593 | } |
582 | 594 | ||
@@ -609,6 +621,10 @@ int cfg80211_wext_siwencodeext(struct net_device *dev, | |||
609 | struct key_params params; | 621 | struct key_params params; |
610 | u32 cipher; | 622 | u32 cipher; |
611 | 623 | ||
624 | if (wdev->iftype != NL80211_IFTYPE_STATION && | ||
625 | wdev->iftype != NL80211_IFTYPE_ADHOC) | ||
626 | return -EOPNOTSUPP; | ||
627 | |||
612 | /* no use -- only MFP (set_default_mgmt_key) is optional */ | 628 | /* no use -- only MFP (set_default_mgmt_key) is optional */ |
613 | if (!rdev->ops->del_key || | 629 | if (!rdev->ops->del_key || |
614 | !rdev->ops->add_key || | 630 | !rdev->ops->add_key || |
@@ -682,37 +698,15 @@ int cfg80211_wext_siwencodeext(struct net_device *dev, | |||
682 | } | 698 | } |
683 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwencodeext); | 699 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwencodeext); |
684 | 700 | ||
685 | struct giwencode_cookie { | ||
686 | size_t buflen; | ||
687 | char *keybuf; | ||
688 | }; | ||
689 | |||
690 | static void giwencode_get_key_cb(void *cookie, struct key_params *params) | ||
691 | { | ||
692 | struct giwencode_cookie *data = cookie; | ||
693 | |||
694 | if (!params->key) { | ||
695 | data->buflen = 0; | ||
696 | return; | ||
697 | } | ||
698 | |||
699 | data->buflen = min_t(size_t, data->buflen, params->key_len); | ||
700 | memcpy(data->keybuf, params->key, data->buflen); | ||
701 | } | ||
702 | |||
703 | int cfg80211_wext_giwencode(struct net_device *dev, | 701 | int cfg80211_wext_giwencode(struct net_device *dev, |
704 | struct iw_request_info *info, | 702 | struct iw_request_info *info, |
705 | struct iw_point *erq, char *keybuf) | 703 | struct iw_point *erq, char *keybuf) |
706 | { | 704 | { |
707 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 705 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
708 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 706 | int idx; |
709 | int idx, err; | ||
710 | struct giwencode_cookie data = { | ||
711 | .keybuf = keybuf, | ||
712 | .buflen = erq->length, | ||
713 | }; | ||
714 | 707 | ||
715 | if (!rdev->ops->get_key) | 708 | if (wdev->iftype != NL80211_IFTYPE_STATION && |
709 | wdev->iftype != NL80211_IFTYPE_ADHOC) | ||
716 | return -EOPNOTSUPP; | 710 | return -EOPNOTSUPP; |
717 | 711 | ||
718 | idx = erq->flags & IW_ENCODE_INDEX; | 712 | idx = erq->flags & IW_ENCODE_INDEX; |
@@ -727,24 +721,70 @@ int cfg80211_wext_giwencode(struct net_device *dev, | |||
727 | 721 | ||
728 | erq->flags = idx + 1; | 722 | erq->flags = idx + 1; |
729 | 723 | ||
730 | err = rdev->ops->get_key(&rdev->wiphy, dev, idx, NULL, &data, | 724 | if (!wdev->wext.keys || !wdev->wext.keys->params[idx].cipher) { |
731 | giwencode_get_key_cb); | ||
732 | if (!err) { | ||
733 | erq->length = data.buflen; | ||
734 | erq->flags |= IW_ENCODE_ENABLED; | ||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | if (err == -ENOENT) { | ||
739 | erq->flags |= IW_ENCODE_DISABLED; | 725 | erq->flags |= IW_ENCODE_DISABLED; |
740 | erq->length = 0; | 726 | erq->length = 0; |
741 | return 0; | 727 | return 0; |
742 | } | 728 | } |
743 | 729 | ||
744 | return err; | 730 | erq->length = min_t(size_t, erq->length, |
731 | wdev->wext.keys->params[idx].key_len); | ||
732 | memcpy(keybuf, wdev->wext.keys->params[idx].key, erq->length); | ||
733 | erq->flags |= IW_ENCODE_ENABLED; | ||
734 | |||
735 | return 0; | ||
745 | } | 736 | } |
746 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode); | 737 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode); |
747 | 738 | ||
739 | int cfg80211_wext_siwfreq(struct net_device *dev, | ||
740 | struct iw_request_info *info, | ||
741 | struct iw_freq *wextfreq, char *extra) | ||
742 | { | ||
743 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
744 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
745 | int freq, err; | ||
746 | |||
747 | switch (wdev->iftype) { | ||
748 | case NL80211_IFTYPE_STATION: | ||
749 | return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra); | ||
750 | case NL80211_IFTYPE_ADHOC: | ||
751 | return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); | ||
752 | default: | ||
753 | freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); | ||
754 | if (freq < 0) | ||
755 | return freq; | ||
756 | if (freq == 0) | ||
757 | return -EINVAL; | ||
758 | mutex_lock(&rdev->devlist_mtx); | ||
759 | err = rdev_set_freq(rdev, NULL, freq, NL80211_CHAN_NO_HT); | ||
760 | mutex_unlock(&rdev->devlist_mtx); | ||
761 | return err; | ||
762 | } | ||
763 | } | ||
764 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq); | ||
765 | |||
766 | int cfg80211_wext_giwfreq(struct net_device *dev, | ||
767 | struct iw_request_info *info, | ||
768 | struct iw_freq *freq, char *extra) | ||
769 | { | ||
770 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
771 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
772 | |||
773 | switch (wdev->iftype) { | ||
774 | case NL80211_IFTYPE_STATION: | ||
775 | return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra); | ||
776 | case NL80211_IFTYPE_ADHOC: | ||
777 | return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); | ||
778 | default: | ||
779 | if (!rdev->channel) | ||
780 | return -EINVAL; | ||
781 | freq->m = rdev->channel->center_freq; | ||
782 | freq->e = 6; | ||
783 | return 0; | ||
784 | } | ||
785 | } | ||
786 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwfreq); | ||
787 | |||
748 | int cfg80211_wext_siwtxpower(struct net_device *dev, | 788 | int cfg80211_wext_siwtxpower(struct net_device *dev, |
749 | struct iw_request_info *info, | 789 | struct iw_request_info *info, |
750 | union iwreq_data *data, char *extra) | 790 | union iwreq_data *data, char *extra) |
@@ -827,3 +867,547 @@ int cfg80211_wext_giwtxpower(struct net_device *dev, | |||
827 | return 0; | 867 | return 0; |
828 | } | 868 | } |
829 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower); | 869 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower); |
870 | |||
871 | static int cfg80211_set_auth_alg(struct wireless_dev *wdev, | ||
872 | s32 auth_alg) | ||
873 | { | ||
874 | int nr_alg = 0; | ||
875 | |||
876 | if (!auth_alg) | ||
877 | return -EINVAL; | ||
878 | |||
879 | if (auth_alg & ~(IW_AUTH_ALG_OPEN_SYSTEM | | ||
880 | IW_AUTH_ALG_SHARED_KEY | | ||
881 | IW_AUTH_ALG_LEAP)) | ||
882 | return -EINVAL; | ||
883 | |||
884 | if (auth_alg & IW_AUTH_ALG_OPEN_SYSTEM) { | ||
885 | nr_alg++; | ||
886 | wdev->wext.connect.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; | ||
887 | } | ||
888 | |||
889 | if (auth_alg & IW_AUTH_ALG_SHARED_KEY) { | ||
890 | nr_alg++; | ||
891 | wdev->wext.connect.auth_type = NL80211_AUTHTYPE_SHARED_KEY; | ||
892 | } | ||
893 | |||
894 | if (auth_alg & IW_AUTH_ALG_LEAP) { | ||
895 | nr_alg++; | ||
896 | wdev->wext.connect.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; | ||
897 | } | ||
898 | |||
899 | if (nr_alg > 1) | ||
900 | wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; | ||
901 | |||
902 | return 0; | ||
903 | } | ||
904 | |||
905 | static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions) | ||
906 | { | ||
907 | wdev->wext.connect.crypto.wpa_versions = 0; | ||
908 | |||
909 | if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA | | ||
910 | IW_AUTH_WPA_VERSION_WPA2| | ||
911 | IW_AUTH_WPA_VERSION_DISABLED)) | ||
912 | return -EINVAL; | ||
913 | |||
914 | if ((wpa_versions & IW_AUTH_WPA_VERSION_DISABLED) && | ||
915 | (wpa_versions & (IW_AUTH_WPA_VERSION_WPA| | ||
916 | IW_AUTH_WPA_VERSION_WPA2))) | ||
917 | return -EINVAL; | ||
918 | |||
919 | if (wpa_versions & IW_AUTH_WPA_VERSION_DISABLED) | ||
920 | wdev->wext.connect.crypto.wpa_versions &= | ||
921 | ~(NL80211_WPA_VERSION_1|NL80211_WPA_VERSION_2); | ||
922 | |||
923 | if (wpa_versions & IW_AUTH_WPA_VERSION_WPA) | ||
924 | wdev->wext.connect.crypto.wpa_versions |= | ||
925 | NL80211_WPA_VERSION_1; | ||
926 | |||
927 | if (wpa_versions & IW_AUTH_WPA_VERSION_WPA2) | ||
928 | wdev->wext.connect.crypto.wpa_versions |= | ||
929 | NL80211_WPA_VERSION_2; | ||
930 | |||
931 | return 0; | ||
932 | } | ||
933 | |||
934 | static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher) | ||
935 | { | ||
936 | wdev->wext.connect.crypto.cipher_group = 0; | ||
937 | |||
938 | if (cipher & IW_AUTH_CIPHER_WEP40) | ||
939 | wdev->wext.connect.crypto.cipher_group = | ||
940 | WLAN_CIPHER_SUITE_WEP40; | ||
941 | else if (cipher & IW_AUTH_CIPHER_WEP104) | ||
942 | wdev->wext.connect.crypto.cipher_group = | ||
943 | WLAN_CIPHER_SUITE_WEP104; | ||
944 | else if (cipher & IW_AUTH_CIPHER_TKIP) | ||
945 | wdev->wext.connect.crypto.cipher_group = | ||
946 | WLAN_CIPHER_SUITE_TKIP; | ||
947 | else if (cipher & IW_AUTH_CIPHER_CCMP) | ||
948 | wdev->wext.connect.crypto.cipher_group = | ||
949 | WLAN_CIPHER_SUITE_CCMP; | ||
950 | else if (cipher & IW_AUTH_CIPHER_AES_CMAC) | ||
951 | wdev->wext.connect.crypto.cipher_group = | ||
952 | WLAN_CIPHER_SUITE_AES_CMAC; | ||
953 | else | ||
954 | return -EINVAL; | ||
955 | |||
956 | return 0; | ||
957 | } | ||
958 | |||
959 | static int cfg80211_set_cipher_pairwise(struct wireless_dev *wdev, u32 cipher) | ||
960 | { | ||
961 | int nr_ciphers = 0; | ||
962 | u32 *ciphers_pairwise = wdev->wext.connect.crypto.ciphers_pairwise; | ||
963 | |||
964 | if (cipher & IW_AUTH_CIPHER_WEP40) { | ||
965 | ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP40; | ||
966 | nr_ciphers++; | ||
967 | } | ||
968 | |||
969 | if (cipher & IW_AUTH_CIPHER_WEP104) { | ||
970 | ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP104; | ||
971 | nr_ciphers++; | ||
972 | } | ||
973 | |||
974 | if (cipher & IW_AUTH_CIPHER_TKIP) { | ||
975 | ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_TKIP; | ||
976 | nr_ciphers++; | ||
977 | } | ||
978 | |||
979 | if (cipher & IW_AUTH_CIPHER_CCMP) { | ||
980 | ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_CCMP; | ||
981 | nr_ciphers++; | ||
982 | } | ||
983 | |||
984 | if (cipher & IW_AUTH_CIPHER_AES_CMAC) { | ||
985 | ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_AES_CMAC; | ||
986 | nr_ciphers++; | ||
987 | } | ||
988 | |||
989 | BUILD_BUG_ON(NL80211_MAX_NR_CIPHER_SUITES < 5); | ||
990 | |||
991 | wdev->wext.connect.crypto.n_ciphers_pairwise = nr_ciphers; | ||
992 | |||
993 | return 0; | ||
994 | } | ||
995 | |||
996 | |||
997 | static int cfg80211_set_key_mgt(struct wireless_dev *wdev, u32 key_mgt) | ||
998 | { | ||
999 | int nr_akm_suites = 0; | ||
1000 | |||
1001 | if (key_mgt & ~(IW_AUTH_KEY_MGMT_802_1X | | ||
1002 | IW_AUTH_KEY_MGMT_PSK)) | ||
1003 | return -EINVAL; | ||
1004 | |||
1005 | if (key_mgt & IW_AUTH_KEY_MGMT_802_1X) { | ||
1006 | wdev->wext.connect.crypto.akm_suites[nr_akm_suites] = | ||
1007 | WLAN_AKM_SUITE_8021X; | ||
1008 | nr_akm_suites++; | ||
1009 | } | ||
1010 | |||
1011 | if (key_mgt & IW_AUTH_KEY_MGMT_PSK) { | ||
1012 | wdev->wext.connect.crypto.akm_suites[nr_akm_suites] = | ||
1013 | WLAN_AKM_SUITE_PSK; | ||
1014 | nr_akm_suites++; | ||
1015 | } | ||
1016 | |||
1017 | wdev->wext.connect.crypto.n_akm_suites = nr_akm_suites; | ||
1018 | |||
1019 | return 0; | ||
1020 | } | ||
1021 | |||
1022 | int cfg80211_wext_siwauth(struct net_device *dev, | ||
1023 | struct iw_request_info *info, | ||
1024 | struct iw_param *data, char *extra) | ||
1025 | { | ||
1026 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1027 | |||
1028 | if (wdev->iftype != NL80211_IFTYPE_STATION) | ||
1029 | return -EOPNOTSUPP; | ||
1030 | |||
1031 | switch (data->flags & IW_AUTH_INDEX) { | ||
1032 | case IW_AUTH_PRIVACY_INVOKED: | ||
1033 | wdev->wext.connect.privacy = data->value; | ||
1034 | return 0; | ||
1035 | case IW_AUTH_WPA_VERSION: | ||
1036 | return cfg80211_set_wpa_version(wdev, data->value); | ||
1037 | case IW_AUTH_CIPHER_GROUP: | ||
1038 | return cfg80211_set_cipher_group(wdev, data->value); | ||
1039 | case IW_AUTH_KEY_MGMT: | ||
1040 | return cfg80211_set_key_mgt(wdev, data->value); | ||
1041 | case IW_AUTH_CIPHER_PAIRWISE: | ||
1042 | return cfg80211_set_cipher_pairwise(wdev, data->value); | ||
1043 | case IW_AUTH_80211_AUTH_ALG: | ||
1044 | return cfg80211_set_auth_alg(wdev, data->value); | ||
1045 | case IW_AUTH_WPA_ENABLED: | ||
1046 | case IW_AUTH_RX_UNENCRYPTED_EAPOL: | ||
1047 | case IW_AUTH_DROP_UNENCRYPTED: | ||
1048 | case IW_AUTH_MFP: | ||
1049 | return 0; | ||
1050 | default: | ||
1051 | return -EOPNOTSUPP; | ||
1052 | } | ||
1053 | } | ||
1054 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwauth); | ||
1055 | |||
1056 | int cfg80211_wext_giwauth(struct net_device *dev, | ||
1057 | struct iw_request_info *info, | ||
1058 | struct iw_param *data, char *extra) | ||
1059 | { | ||
1060 | /* XXX: what do we need? */ | ||
1061 | |||
1062 | return -EOPNOTSUPP; | ||
1063 | } | ||
1064 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwauth); | ||
1065 | |||
1066 | int cfg80211_wext_siwpower(struct net_device *dev, | ||
1067 | struct iw_request_info *info, | ||
1068 | struct iw_param *wrq, char *extra) | ||
1069 | { | ||
1070 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1071 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
1072 | bool ps = wdev->wext.ps; | ||
1073 | int timeout = wdev->wext.ps_timeout; | ||
1074 | int err; | ||
1075 | |||
1076 | if (wdev->iftype != NL80211_IFTYPE_STATION) | ||
1077 | return -EINVAL; | ||
1078 | |||
1079 | if (!rdev->ops->set_power_mgmt) | ||
1080 | return -EOPNOTSUPP; | ||
1081 | |||
1082 | if (wrq->disabled) { | ||
1083 | ps = false; | ||
1084 | } else { | ||
1085 | switch (wrq->flags & IW_POWER_MODE) { | ||
1086 | case IW_POWER_ON: /* If not specified */ | ||
1087 | case IW_POWER_MODE: /* If set all mask */ | ||
1088 | case IW_POWER_ALL_R: /* If explicitely state all */ | ||
1089 | ps = true; | ||
1090 | break; | ||
1091 | default: /* Otherwise we ignore */ | ||
1092 | return -EINVAL; | ||
1093 | } | ||
1094 | |||
1095 | if (wrq->flags & ~(IW_POWER_MODE | IW_POWER_TIMEOUT)) | ||
1096 | return -EINVAL; | ||
1097 | |||
1098 | if (wrq->flags & IW_POWER_TIMEOUT) | ||
1099 | timeout = wrq->value / 1000; | ||
1100 | } | ||
1101 | |||
1102 | err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, ps, timeout); | ||
1103 | if (err) | ||
1104 | return err; | ||
1105 | |||
1106 | wdev->wext.ps = ps; | ||
1107 | wdev->wext.ps_timeout = timeout; | ||
1108 | |||
1109 | return 0; | ||
1110 | |||
1111 | } | ||
1112 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwpower); | ||
1113 | |||
1114 | int cfg80211_wext_giwpower(struct net_device *dev, | ||
1115 | struct iw_request_info *info, | ||
1116 | struct iw_param *wrq, char *extra) | ||
1117 | { | ||
1118 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1119 | |||
1120 | wrq->disabled = !wdev->wext.ps; | ||
1121 | |||
1122 | return 0; | ||
1123 | } | ||
1124 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwpower); | ||
1125 | |||
1126 | static int cfg80211_wds_wext_siwap(struct net_device *dev, | ||
1127 | struct iw_request_info *info, | ||
1128 | struct sockaddr *addr, char *extra) | ||
1129 | { | ||
1130 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1131 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
1132 | int err; | ||
1133 | |||
1134 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS)) | ||
1135 | return -EINVAL; | ||
1136 | |||
1137 | if (addr->sa_family != ARPHRD_ETHER) | ||
1138 | return -EINVAL; | ||
1139 | |||
1140 | if (netif_running(dev)) | ||
1141 | return -EBUSY; | ||
1142 | |||
1143 | if (!rdev->ops->set_wds_peer) | ||
1144 | return -EOPNOTSUPP; | ||
1145 | |||
1146 | err = rdev->ops->set_wds_peer(wdev->wiphy, dev, (u8 *) &addr->sa_data); | ||
1147 | if (err) | ||
1148 | return err; | ||
1149 | |||
1150 | memcpy(&wdev->wext.bssid, (u8 *) &addr->sa_data, ETH_ALEN); | ||
1151 | |||
1152 | return 0; | ||
1153 | } | ||
1154 | |||
1155 | static int cfg80211_wds_wext_giwap(struct net_device *dev, | ||
1156 | struct iw_request_info *info, | ||
1157 | struct sockaddr *addr, char *extra) | ||
1158 | { | ||
1159 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1160 | |||
1161 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS)) | ||
1162 | return -EINVAL; | ||
1163 | |||
1164 | addr->sa_family = ARPHRD_ETHER; | ||
1165 | memcpy(&addr->sa_data, wdev->wext.bssid, ETH_ALEN); | ||
1166 | |||
1167 | return 0; | ||
1168 | } | ||
1169 | |||
1170 | int cfg80211_wext_siwrate(struct net_device *dev, | ||
1171 | struct iw_request_info *info, | ||
1172 | struct iw_param *rate, char *extra) | ||
1173 | { | ||
1174 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1175 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
1176 | struct cfg80211_bitrate_mask mask; | ||
1177 | |||
1178 | if (!rdev->ops->set_bitrate_mask) | ||
1179 | return -EOPNOTSUPP; | ||
1180 | |||
1181 | mask.fixed = 0; | ||
1182 | mask.maxrate = 0; | ||
1183 | |||
1184 | if (rate->value < 0) { | ||
1185 | /* nothing */ | ||
1186 | } else if (rate->fixed) { | ||
1187 | mask.fixed = rate->value / 1000; /* kbps */ | ||
1188 | } else { | ||
1189 | mask.maxrate = rate->value / 1000; /* kbps */ | ||
1190 | } | ||
1191 | |||
1192 | return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask); | ||
1193 | } | ||
1194 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate); | ||
1195 | |||
1196 | int cfg80211_wext_giwrate(struct net_device *dev, | ||
1197 | struct iw_request_info *info, | ||
1198 | struct iw_param *rate, char *extra) | ||
1199 | { | ||
1200 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1201 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
1202 | /* we are under RTNL - globally locked - so can use a static struct */ | ||
1203 | static struct station_info sinfo; | ||
1204 | u8 addr[ETH_ALEN]; | ||
1205 | int err; | ||
1206 | |||
1207 | if (wdev->iftype != NL80211_IFTYPE_STATION) | ||
1208 | return -EOPNOTSUPP; | ||
1209 | |||
1210 | if (!rdev->ops->get_station) | ||
1211 | return -EOPNOTSUPP; | ||
1212 | |||
1213 | err = 0; | ||
1214 | wdev_lock(wdev); | ||
1215 | if (wdev->current_bss) | ||
1216 | memcpy(addr, wdev->current_bss->pub.bssid, ETH_ALEN); | ||
1217 | else | ||
1218 | err = -EOPNOTSUPP; | ||
1219 | wdev_unlock(wdev); | ||
1220 | if (err) | ||
1221 | return err; | ||
1222 | |||
1223 | err = rdev->ops->get_station(&rdev->wiphy, dev, addr, &sinfo); | ||
1224 | if (err) | ||
1225 | return err; | ||
1226 | |||
1227 | if (!(sinfo.filled & STATION_INFO_TX_BITRATE)) | ||
1228 | return -EOPNOTSUPP; | ||
1229 | |||
1230 | rate->value = 0; | ||
1231 | |||
1232 | if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS)) | ||
1233 | rate->value = 100000 * sinfo.txrate.legacy; | ||
1234 | |||
1235 | return 0; | ||
1236 | } | ||
1237 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwrate); | ||
1238 | |||
1239 | /* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */ | ||
1240 | struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) | ||
1241 | { | ||
1242 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1243 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
1244 | /* we are under RTNL - globally locked - so can use static structs */ | ||
1245 | static struct iw_statistics wstats; | ||
1246 | static struct station_info sinfo; | ||
1247 | u8 bssid[ETH_ALEN]; | ||
1248 | |||
1249 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) | ||
1250 | return NULL; | ||
1251 | |||
1252 | if (!rdev->ops->get_station) | ||
1253 | return NULL; | ||
1254 | |||
1255 | /* Grab BSSID of current BSS, if any */ | ||
1256 | wdev_lock(wdev); | ||
1257 | if (!wdev->current_bss) { | ||
1258 | wdev_unlock(wdev); | ||
1259 | return NULL; | ||
1260 | } | ||
1261 | memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); | ||
1262 | wdev_unlock(wdev); | ||
1263 | |||
1264 | if (rdev->ops->get_station(&rdev->wiphy, dev, bssid, &sinfo)) | ||
1265 | return NULL; | ||
1266 | |||
1267 | memset(&wstats, 0, sizeof(wstats)); | ||
1268 | |||
1269 | switch (rdev->wiphy.signal_type) { | ||
1270 | case CFG80211_SIGNAL_TYPE_MBM: | ||
1271 | if (sinfo.filled & STATION_INFO_SIGNAL) { | ||
1272 | int sig = sinfo.signal; | ||
1273 | wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; | ||
1274 | wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; | ||
1275 | wstats.qual.updated |= IW_QUAL_DBM; | ||
1276 | wstats.qual.level = sig; | ||
1277 | if (sig < -110) | ||
1278 | sig = -110; | ||
1279 | else if (sig > -40) | ||
1280 | sig = -40; | ||
1281 | wstats.qual.qual = sig + 110; | ||
1282 | break; | ||
1283 | } | ||
1284 | case CFG80211_SIGNAL_TYPE_UNSPEC: | ||
1285 | if (sinfo.filled & STATION_INFO_SIGNAL) { | ||
1286 | wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; | ||
1287 | wstats.qual.updated |= IW_QUAL_QUAL_UPDATED; | ||
1288 | wstats.qual.level = sinfo.signal; | ||
1289 | wstats.qual.qual = sinfo.signal; | ||
1290 | break; | ||
1291 | } | ||
1292 | default: | ||
1293 | wstats.qual.updated |= IW_QUAL_LEVEL_INVALID; | ||
1294 | wstats.qual.updated |= IW_QUAL_QUAL_INVALID; | ||
1295 | } | ||
1296 | |||
1297 | wstats.qual.updated |= IW_QUAL_NOISE_INVALID; | ||
1298 | |||
1299 | return &wstats; | ||
1300 | } | ||
1301 | EXPORT_SYMBOL_GPL(cfg80211_wireless_stats); | ||
1302 | |||
1303 | int cfg80211_wext_siwap(struct net_device *dev, | ||
1304 | struct iw_request_info *info, | ||
1305 | struct sockaddr *ap_addr, char *extra) | ||
1306 | { | ||
1307 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1308 | |||
1309 | switch (wdev->iftype) { | ||
1310 | case NL80211_IFTYPE_ADHOC: | ||
1311 | return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra); | ||
1312 | case NL80211_IFTYPE_STATION: | ||
1313 | return cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra); | ||
1314 | case NL80211_IFTYPE_WDS: | ||
1315 | return cfg80211_wds_wext_siwap(dev, info, ap_addr, extra); | ||
1316 | default: | ||
1317 | return -EOPNOTSUPP; | ||
1318 | } | ||
1319 | } | ||
1320 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwap); | ||
1321 | |||
1322 | int cfg80211_wext_giwap(struct net_device *dev, | ||
1323 | struct iw_request_info *info, | ||
1324 | struct sockaddr *ap_addr, char *extra) | ||
1325 | { | ||
1326 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1327 | |||
1328 | switch (wdev->iftype) { | ||
1329 | case NL80211_IFTYPE_ADHOC: | ||
1330 | return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra); | ||
1331 | case NL80211_IFTYPE_STATION: | ||
1332 | return cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra); | ||
1333 | case NL80211_IFTYPE_WDS: | ||
1334 | return cfg80211_wds_wext_giwap(dev, info, ap_addr, extra); | ||
1335 | default: | ||
1336 | return -EOPNOTSUPP; | ||
1337 | } | ||
1338 | } | ||
1339 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwap); | ||
1340 | |||
1341 | int cfg80211_wext_siwessid(struct net_device *dev, | ||
1342 | struct iw_request_info *info, | ||
1343 | struct iw_point *data, char *ssid) | ||
1344 | { | ||
1345 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1346 | |||
1347 | switch (wdev->iftype) { | ||
1348 | case NL80211_IFTYPE_ADHOC: | ||
1349 | return cfg80211_ibss_wext_siwessid(dev, info, data, ssid); | ||
1350 | case NL80211_IFTYPE_STATION: | ||
1351 | return cfg80211_mgd_wext_siwessid(dev, info, data, ssid); | ||
1352 | default: | ||
1353 | return -EOPNOTSUPP; | ||
1354 | } | ||
1355 | } | ||
1356 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwessid); | ||
1357 | |||
1358 | int cfg80211_wext_giwessid(struct net_device *dev, | ||
1359 | struct iw_request_info *info, | ||
1360 | struct iw_point *data, char *ssid) | ||
1361 | { | ||
1362 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
1363 | |||
1364 | switch (wdev->iftype) { | ||
1365 | case NL80211_IFTYPE_ADHOC: | ||
1366 | return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); | ||
1367 | case NL80211_IFTYPE_STATION: | ||
1368 | return cfg80211_mgd_wext_giwessid(dev, info, data, ssid); | ||
1369 | default: | ||
1370 | return -EOPNOTSUPP; | ||
1371 | } | ||
1372 | } | ||
1373 | EXPORT_SYMBOL_GPL(cfg80211_wext_giwessid); | ||
1374 | |||
1375 | static const iw_handler cfg80211_handlers[] = { | ||
1376 | [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname, | ||
1377 | [IW_IOCTL_IDX(SIOCSIWFREQ)] = (iw_handler) cfg80211_wext_siwfreq, | ||
1378 | [IW_IOCTL_IDX(SIOCGIWFREQ)] = (iw_handler) cfg80211_wext_giwfreq, | ||
1379 | [IW_IOCTL_IDX(SIOCSIWMODE)] = (iw_handler) cfg80211_wext_siwmode, | ||
1380 | [IW_IOCTL_IDX(SIOCGIWMODE)] = (iw_handler) cfg80211_wext_giwmode, | ||
1381 | [IW_IOCTL_IDX(SIOCGIWRANGE)] = (iw_handler) cfg80211_wext_giwrange, | ||
1382 | [IW_IOCTL_IDX(SIOCSIWAP)] = (iw_handler) cfg80211_wext_siwap, | ||
1383 | [IW_IOCTL_IDX(SIOCGIWAP)] = (iw_handler) cfg80211_wext_giwap, | ||
1384 | [IW_IOCTL_IDX(SIOCSIWMLME)] = (iw_handler) cfg80211_wext_siwmlme, | ||
1385 | [IW_IOCTL_IDX(SIOCSIWSCAN)] = (iw_handler) cfg80211_wext_siwscan, | ||
1386 | [IW_IOCTL_IDX(SIOCGIWSCAN)] = (iw_handler) cfg80211_wext_giwscan, | ||
1387 | [IW_IOCTL_IDX(SIOCSIWESSID)] = (iw_handler) cfg80211_wext_siwessid, | ||
1388 | [IW_IOCTL_IDX(SIOCGIWESSID)] = (iw_handler) cfg80211_wext_giwessid, | ||
1389 | [IW_IOCTL_IDX(SIOCSIWRATE)] = (iw_handler) cfg80211_wext_siwrate, | ||
1390 | [IW_IOCTL_IDX(SIOCGIWRATE)] = (iw_handler) cfg80211_wext_giwrate, | ||
1391 | [IW_IOCTL_IDX(SIOCSIWRTS)] = (iw_handler) cfg80211_wext_siwrts, | ||
1392 | [IW_IOCTL_IDX(SIOCGIWRTS)] = (iw_handler) cfg80211_wext_giwrts, | ||
1393 | [IW_IOCTL_IDX(SIOCSIWFRAG)] = (iw_handler) cfg80211_wext_siwfrag, | ||
1394 | [IW_IOCTL_IDX(SIOCGIWFRAG)] = (iw_handler) cfg80211_wext_giwfrag, | ||
1395 | [IW_IOCTL_IDX(SIOCSIWTXPOW)] = (iw_handler) cfg80211_wext_siwtxpower, | ||
1396 | [IW_IOCTL_IDX(SIOCGIWTXPOW)] = (iw_handler) cfg80211_wext_giwtxpower, | ||
1397 | [IW_IOCTL_IDX(SIOCSIWRETRY)] = (iw_handler) cfg80211_wext_siwretry, | ||
1398 | [IW_IOCTL_IDX(SIOCGIWRETRY)] = (iw_handler) cfg80211_wext_giwretry, | ||
1399 | [IW_IOCTL_IDX(SIOCSIWENCODE)] = (iw_handler) cfg80211_wext_siwencode, | ||
1400 | [IW_IOCTL_IDX(SIOCGIWENCODE)] = (iw_handler) cfg80211_wext_giwencode, | ||
1401 | [IW_IOCTL_IDX(SIOCSIWPOWER)] = (iw_handler) cfg80211_wext_siwpower, | ||
1402 | [IW_IOCTL_IDX(SIOCGIWPOWER)] = (iw_handler) cfg80211_wext_giwpower, | ||
1403 | [IW_IOCTL_IDX(SIOCSIWGENIE)] = (iw_handler) cfg80211_wext_siwgenie, | ||
1404 | [IW_IOCTL_IDX(SIOCSIWAUTH)] = (iw_handler) cfg80211_wext_siwauth, | ||
1405 | [IW_IOCTL_IDX(SIOCGIWAUTH)] = (iw_handler) cfg80211_wext_giwauth, | ||
1406 | [IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= (iw_handler) cfg80211_wext_siwencodeext, | ||
1407 | }; | ||
1408 | |||
1409 | const struct iw_handler_def cfg80211_wext_handler = { | ||
1410 | .num_standard = ARRAY_SIZE(cfg80211_handlers), | ||
1411 | .standard = cfg80211_handlers, | ||
1412 | .get_wireless_stats = cfg80211_wireless_stats, | ||
1413 | }; | ||
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h new file mode 100644 index 000000000000..20b3daef6964 --- /dev/null +++ b/net/wireless/wext-compat.h | |||
@@ -0,0 +1,49 @@ | |||
1 | #ifndef __WEXT_COMPAT | ||
2 | #define __WEXT_COMPAT | ||
3 | |||
4 | #include <net/iw_handler.h> | ||
5 | #include <linux/wireless.h> | ||
6 | |||
7 | int cfg80211_ibss_wext_siwfreq(struct net_device *dev, | ||
8 | struct iw_request_info *info, | ||
9 | struct iw_freq *freq, char *extra); | ||
10 | int cfg80211_ibss_wext_giwfreq(struct net_device *dev, | ||
11 | struct iw_request_info *info, | ||
12 | struct iw_freq *freq, char *extra); | ||
13 | int cfg80211_ibss_wext_siwap(struct net_device *dev, | ||
14 | struct iw_request_info *info, | ||
15 | struct sockaddr *ap_addr, char *extra); | ||
16 | int cfg80211_ibss_wext_giwap(struct net_device *dev, | ||
17 | struct iw_request_info *info, | ||
18 | struct sockaddr *ap_addr, char *extra); | ||
19 | int cfg80211_ibss_wext_siwessid(struct net_device *dev, | ||
20 | struct iw_request_info *info, | ||
21 | struct iw_point *data, char *ssid); | ||
22 | int cfg80211_ibss_wext_giwessid(struct net_device *dev, | ||
23 | struct iw_request_info *info, | ||
24 | struct iw_point *data, char *ssid); | ||
25 | |||
26 | int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | ||
27 | struct iw_request_info *info, | ||
28 | struct iw_freq *freq, char *extra); | ||
29 | int cfg80211_mgd_wext_giwfreq(struct net_device *dev, | ||
30 | struct iw_request_info *info, | ||
31 | struct iw_freq *freq, char *extra); | ||
32 | int cfg80211_mgd_wext_siwap(struct net_device *dev, | ||
33 | struct iw_request_info *info, | ||
34 | struct sockaddr *ap_addr, char *extra); | ||
35 | int cfg80211_mgd_wext_giwap(struct net_device *dev, | ||
36 | struct iw_request_info *info, | ||
37 | struct sockaddr *ap_addr, char *extra); | ||
38 | int cfg80211_mgd_wext_siwessid(struct net_device *dev, | ||
39 | struct iw_request_info *info, | ||
40 | struct iw_point *data, char *ssid); | ||
41 | int cfg80211_mgd_wext_giwessid(struct net_device *dev, | ||
42 | struct iw_request_info *info, | ||
43 | struct iw_point *data, char *ssid); | ||
44 | |||
45 | int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq); | ||
46 | |||
47 | |||
48 | extern const struct iw_handler_def cfg80211_wext_handler; | ||
49 | #endif /* __WEXT_COMPAT */ | ||
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c new file mode 100644 index 000000000000..d16cd9ea4d00 --- /dev/null +++ b/net/wireless/wext-sme.c | |||
@@ -0,0 +1,404 @@ | |||
1 | /* | ||
2 | * cfg80211 wext compat for managed mode. | ||
3 | * | ||
4 | * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * Copyright (C) 2009 Intel Corporation. All rights reserved. | ||
6 | */ | ||
7 | |||
8 | #include <linux/etherdevice.h> | ||
9 | #include <linux/if_arp.h> | ||
10 | #include <net/cfg80211.h> | ||
11 | #include "wext-compat.h" | ||
12 | #include "nl80211.h" | ||
13 | |||
14 | int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, | ||
15 | struct wireless_dev *wdev) | ||
16 | { | ||
17 | struct cfg80211_cached_keys *ck = NULL; | ||
18 | const u8 *prev_bssid = NULL; | ||
19 | int err, i; | ||
20 | |||
21 | ASSERT_RDEV_LOCK(rdev); | ||
22 | ASSERT_WDEV_LOCK(wdev); | ||
23 | |||
24 | if (!netif_running(wdev->netdev)) | ||
25 | return 0; | ||
26 | |||
27 | wdev->wext.connect.ie = wdev->wext.ie; | ||
28 | wdev->wext.connect.ie_len = wdev->wext.ie_len; | ||
29 | wdev->wext.connect.privacy = wdev->wext.default_key != -1; | ||
30 | |||
31 | if (wdev->wext.keys) { | ||
32 | wdev->wext.keys->def = wdev->wext.default_key; | ||
33 | wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key; | ||
34 | } | ||
35 | |||
36 | if (!wdev->wext.connect.ssid_len) | ||
37 | return 0; | ||
38 | |||
39 | if (wdev->wext.keys) { | ||
40 | ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); | ||
41 | if (!ck) | ||
42 | return -ENOMEM; | ||
43 | for (i = 0; i < 6; i++) | ||
44 | ck->params[i].key = ck->data[i]; | ||
45 | } | ||
46 | |||
47 | if (wdev->wext.prev_bssid_valid) | ||
48 | prev_bssid = wdev->wext.prev_bssid; | ||
49 | |||
50 | err = __cfg80211_connect(rdev, wdev->netdev, | ||
51 | &wdev->wext.connect, ck, prev_bssid); | ||
52 | if (err) | ||
53 | kfree(ck); | ||
54 | |||
55 | return err; | ||
56 | } | ||
57 | |||
58 | int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | ||
59 | struct iw_request_info *info, | ||
60 | struct iw_freq *wextfreq, char *extra) | ||
61 | { | ||
62 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
63 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
64 | struct ieee80211_channel *chan = NULL; | ||
65 | int err, freq; | ||
66 | |||
67 | /* call only for station! */ | ||
68 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) | ||
69 | return -EINVAL; | ||
70 | |||
71 | freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); | ||
72 | if (freq < 0) | ||
73 | return freq; | ||
74 | |||
75 | if (freq) { | ||
76 | chan = ieee80211_get_channel(wdev->wiphy, freq); | ||
77 | if (!chan) | ||
78 | return -EINVAL; | ||
79 | if (chan->flags & IEEE80211_CHAN_DISABLED) | ||
80 | return -EINVAL; | ||
81 | } | ||
82 | |||
83 | cfg80211_lock_rdev(rdev); | ||
84 | mutex_lock(&rdev->devlist_mtx); | ||
85 | wdev_lock(wdev); | ||
86 | |||
87 | if (wdev->sme_state != CFG80211_SME_IDLE) { | ||
88 | bool event = true; | ||
89 | |||
90 | if (wdev->wext.connect.channel == chan) { | ||
91 | err = 0; | ||
92 | goto out; | ||
93 | } | ||
94 | |||
95 | /* if SSID set, we'll try right again, avoid event */ | ||
96 | if (wdev->wext.connect.ssid_len) | ||
97 | event = false; | ||
98 | err = __cfg80211_disconnect(rdev, dev, | ||
99 | WLAN_REASON_DEAUTH_LEAVING, event); | ||
100 | if (err) | ||
101 | goto out; | ||
102 | } | ||
103 | |||
104 | |||
105 | wdev->wext.connect.channel = chan; | ||
106 | |||
107 | /* SSID is not set, we just want to switch channel */ | ||
108 | if (chan && !wdev->wext.connect.ssid_len) { | ||
109 | err = rdev_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); | ||
110 | goto out; | ||
111 | } | ||
112 | |||
113 | err = cfg80211_mgd_wext_connect(rdev, wdev); | ||
114 | out: | ||
115 | wdev_unlock(wdev); | ||
116 | mutex_unlock(&rdev->devlist_mtx); | ||
117 | cfg80211_unlock_rdev(rdev); | ||
118 | return err; | ||
119 | } | ||
120 | |||
121 | int cfg80211_mgd_wext_giwfreq(struct net_device *dev, | ||
122 | struct iw_request_info *info, | ||
123 | struct iw_freq *freq, char *extra) | ||
124 | { | ||
125 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
126 | struct ieee80211_channel *chan = NULL; | ||
127 | |||
128 | /* call only for station! */ | ||
129 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) | ||
130 | return -EINVAL; | ||
131 | |||
132 | wdev_lock(wdev); | ||
133 | if (wdev->current_bss) | ||
134 | chan = wdev->current_bss->pub.channel; | ||
135 | else if (wdev->wext.connect.channel) | ||
136 | chan = wdev->wext.connect.channel; | ||
137 | wdev_unlock(wdev); | ||
138 | |||
139 | if (chan) { | ||
140 | freq->m = chan->center_freq; | ||
141 | freq->e = 6; | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | /* no channel if not joining */ | ||
146 | return -EINVAL; | ||
147 | } | ||
148 | |||
149 | int cfg80211_mgd_wext_siwessid(struct net_device *dev, | ||
150 | struct iw_request_info *info, | ||
151 | struct iw_point *data, char *ssid) | ||
152 | { | ||
153 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
154 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
155 | size_t len = data->length; | ||
156 | int err; | ||
157 | |||
158 | /* call only for station! */ | ||
159 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) | ||
160 | return -EINVAL; | ||
161 | |||
162 | if (!data->flags) | ||
163 | len = 0; | ||
164 | |||
165 | /* iwconfig uses nul termination in SSID.. */ | ||
166 | if (len > 0 && ssid[len - 1] == '\0') | ||
167 | len--; | ||
168 | |||
169 | cfg80211_lock_rdev(rdev); | ||
170 | mutex_lock(&rdev->devlist_mtx); | ||
171 | wdev_lock(wdev); | ||
172 | |||
173 | err = 0; | ||
174 | |||
175 | if (wdev->sme_state != CFG80211_SME_IDLE) { | ||
176 | bool event = true; | ||
177 | |||
178 | if (wdev->wext.connect.ssid && len && | ||
179 | len == wdev->wext.connect.ssid_len && | ||
180 | memcmp(wdev->wext.connect.ssid, ssid, len) == 0) | ||
181 | goto out; | ||
182 | |||
183 | /* if SSID set now, we'll try to connect, avoid event */ | ||
184 | if (len) | ||
185 | event = false; | ||
186 | err = __cfg80211_disconnect(rdev, dev, | ||
187 | WLAN_REASON_DEAUTH_LEAVING, event); | ||
188 | if (err) | ||
189 | goto out; | ||
190 | } | ||
191 | |||
192 | wdev->wext.prev_bssid_valid = false; | ||
193 | wdev->wext.connect.ssid = wdev->wext.ssid; | ||
194 | memcpy(wdev->wext.ssid, ssid, len); | ||
195 | wdev->wext.connect.ssid_len = len; | ||
196 | |||
197 | wdev->wext.connect.crypto.control_port = false; | ||
198 | |||
199 | err = cfg80211_mgd_wext_connect(rdev, wdev); | ||
200 | out: | ||
201 | wdev_unlock(wdev); | ||
202 | mutex_unlock(&rdev->devlist_mtx); | ||
203 | cfg80211_unlock_rdev(rdev); | ||
204 | return err; | ||
205 | } | ||
206 | |||
207 | int cfg80211_mgd_wext_giwessid(struct net_device *dev, | ||
208 | struct iw_request_info *info, | ||
209 | struct iw_point *data, char *ssid) | ||
210 | { | ||
211 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
212 | |||
213 | /* call only for station! */ | ||
214 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) | ||
215 | return -EINVAL; | ||
216 | |||
217 | data->flags = 0; | ||
218 | |||
219 | wdev_lock(wdev); | ||
220 | if (wdev->current_bss) { | ||
221 | const u8 *ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, | ||
222 | WLAN_EID_SSID); | ||
223 | if (ie) { | ||
224 | data->flags = 1; | ||
225 | data->length = ie[1]; | ||
226 | memcpy(ssid, ie + 2, data->length); | ||
227 | } | ||
228 | } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { | ||
229 | data->flags = 1; | ||
230 | data->length = wdev->wext.connect.ssid_len; | ||
231 | memcpy(ssid, wdev->wext.connect.ssid, data->length); | ||
232 | } else | ||
233 | data->flags = 0; | ||
234 | wdev_unlock(wdev); | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | int cfg80211_mgd_wext_siwap(struct net_device *dev, | ||
240 | struct iw_request_info *info, | ||
241 | struct sockaddr *ap_addr, char *extra) | ||
242 | { | ||
243 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
244 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
245 | u8 *bssid = ap_addr->sa_data; | ||
246 | int err; | ||
247 | |||
248 | /* call only for station! */ | ||
249 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) | ||
250 | return -EINVAL; | ||
251 | |||
252 | if (ap_addr->sa_family != ARPHRD_ETHER) | ||
253 | return -EINVAL; | ||
254 | |||
255 | /* automatic mode */ | ||
256 | if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) | ||
257 | bssid = NULL; | ||
258 | |||
259 | cfg80211_lock_rdev(rdev); | ||
260 | mutex_lock(&rdev->devlist_mtx); | ||
261 | wdev_lock(wdev); | ||
262 | |||
263 | if (wdev->sme_state != CFG80211_SME_IDLE) { | ||
264 | err = 0; | ||
265 | /* both automatic */ | ||
266 | if (!bssid && !wdev->wext.connect.bssid) | ||
267 | goto out; | ||
268 | |||
269 | /* fixed already - and no change */ | ||
270 | if (wdev->wext.connect.bssid && bssid && | ||
271 | compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0) | ||
272 | goto out; | ||
273 | |||
274 | err = __cfg80211_disconnect(rdev, dev, | ||
275 | WLAN_REASON_DEAUTH_LEAVING, false); | ||
276 | if (err) | ||
277 | goto out; | ||
278 | } | ||
279 | |||
280 | if (bssid) { | ||
281 | memcpy(wdev->wext.bssid, bssid, ETH_ALEN); | ||
282 | wdev->wext.connect.bssid = wdev->wext.bssid; | ||
283 | } else | ||
284 | wdev->wext.connect.bssid = NULL; | ||
285 | |||
286 | err = cfg80211_mgd_wext_connect(rdev, wdev); | ||
287 | out: | ||
288 | wdev_unlock(wdev); | ||
289 | mutex_unlock(&rdev->devlist_mtx); | ||
290 | cfg80211_unlock_rdev(rdev); | ||
291 | return err; | ||
292 | } | ||
293 | |||
294 | int cfg80211_mgd_wext_giwap(struct net_device *dev, | ||
295 | struct iw_request_info *info, | ||
296 | struct sockaddr *ap_addr, char *extra) | ||
297 | { | ||
298 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
299 | |||
300 | /* call only for station! */ | ||
301 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) | ||
302 | return -EINVAL; | ||
303 | |||
304 | ap_addr->sa_family = ARPHRD_ETHER; | ||
305 | |||
306 | wdev_lock(wdev); | ||
307 | if (wdev->current_bss) | ||
308 | memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN); | ||
309 | else if (wdev->wext.connect.bssid) | ||
310 | memcpy(ap_addr->sa_data, wdev->wext.connect.bssid, ETH_ALEN); | ||
311 | else | ||
312 | memset(ap_addr->sa_data, 0, ETH_ALEN); | ||
313 | wdev_unlock(wdev); | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | int cfg80211_wext_siwgenie(struct net_device *dev, | ||
319 | struct iw_request_info *info, | ||
320 | struct iw_point *data, char *extra) | ||
321 | { | ||
322 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
323 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
324 | u8 *ie = extra; | ||
325 | int ie_len = data->length, err; | ||
326 | |||
327 | if (wdev->iftype != NL80211_IFTYPE_STATION) | ||
328 | return -EOPNOTSUPP; | ||
329 | |||
330 | if (!ie_len) | ||
331 | ie = NULL; | ||
332 | |||
333 | wdev_lock(wdev); | ||
334 | |||
335 | /* no change */ | ||
336 | err = 0; | ||
337 | if (wdev->wext.ie_len == ie_len && | ||
338 | memcmp(wdev->wext.ie, ie, ie_len) == 0) | ||
339 | goto out; | ||
340 | |||
341 | if (ie_len) { | ||
342 | ie = kmemdup(extra, ie_len, GFP_KERNEL); | ||
343 | if (!ie) { | ||
344 | err = -ENOMEM; | ||
345 | goto out; | ||
346 | } | ||
347 | } else | ||
348 | ie = NULL; | ||
349 | |||
350 | kfree(wdev->wext.ie); | ||
351 | wdev->wext.ie = ie; | ||
352 | wdev->wext.ie_len = ie_len; | ||
353 | |||
354 | if (wdev->sme_state != CFG80211_SME_IDLE) { | ||
355 | err = __cfg80211_disconnect(rdev, dev, | ||
356 | WLAN_REASON_DEAUTH_LEAVING, false); | ||
357 | if (err) | ||
358 | goto out; | ||
359 | } | ||
360 | |||
361 | /* userspace better not think we'll reconnect */ | ||
362 | err = 0; | ||
363 | out: | ||
364 | wdev_unlock(wdev); | ||
365 | return err; | ||
366 | } | ||
367 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwgenie); | ||
368 | |||
369 | int cfg80211_wext_siwmlme(struct net_device *dev, | ||
370 | struct iw_request_info *info, | ||
371 | struct iw_point *data, char *extra) | ||
372 | { | ||
373 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
374 | struct iw_mlme *mlme = (struct iw_mlme *)extra; | ||
375 | struct cfg80211_registered_device *rdev; | ||
376 | int err; | ||
377 | |||
378 | if (!wdev) | ||
379 | return -EOPNOTSUPP; | ||
380 | |||
381 | rdev = wiphy_to_dev(wdev->wiphy); | ||
382 | |||
383 | if (wdev->iftype != NL80211_IFTYPE_STATION) | ||
384 | return -EINVAL; | ||
385 | |||
386 | if (mlme->addr.sa_family != ARPHRD_ETHER) | ||
387 | return -EINVAL; | ||
388 | |||
389 | wdev_lock(wdev); | ||
390 | switch (mlme->cmd) { | ||
391 | case IW_MLME_DEAUTH: | ||
392 | case IW_MLME_DISASSOC: | ||
393 | err = __cfg80211_disconnect(rdev, dev, mlme->reason_code, | ||
394 | true); | ||
395 | break; | ||
396 | default: | ||
397 | err = -EOPNOTSUPP; | ||
398 | break; | ||
399 | } | ||
400 | wdev_unlock(wdev); | ||
401 | |||
402 | return err; | ||
403 | } | ||
404 | EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme); | ||
diff --git a/net/wireless/wext.c b/net/wireless/wext.c index 252c2010c2e2..5b4a0cee4418 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext.c | |||
@@ -417,6 +417,21 @@ static const int event_type_size[] = { | |||
417 | IW_EV_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ | 417 | IW_EV_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ |
418 | }; | 418 | }; |
419 | 419 | ||
420 | #ifdef CONFIG_COMPAT | ||
421 | static const int compat_event_type_size[] = { | ||
422 | IW_EV_COMPAT_LCP_LEN, /* IW_HEADER_TYPE_NULL */ | ||
423 | 0, | ||
424 | IW_EV_COMPAT_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */ | ||
425 | 0, | ||
426 | IW_EV_COMPAT_UINT_LEN, /* IW_HEADER_TYPE_UINT */ | ||
427 | IW_EV_COMPAT_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */ | ||
428 | IW_EV_COMPAT_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */ | ||
429 | 0, | ||
430 | IW_EV_COMPAT_POINT_LEN, /* Without variable payload */ | ||
431 | IW_EV_COMPAT_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */ | ||
432 | IW_EV_COMPAT_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ | ||
433 | }; | ||
434 | #endif | ||
420 | 435 | ||
421 | /************************ COMMON SUBROUTINES ************************/ | 436 | /************************ COMMON SUBROUTINES ************************/ |
422 | /* | 437 | /* |
@@ -610,6 +625,11 @@ static void wireless_seq_printf_stats(struct seq_file *seq, | |||
610 | { | 625 | { |
611 | /* Get stats from the driver */ | 626 | /* Get stats from the driver */ |
612 | struct iw_statistics *stats = get_wireless_stats(dev); | 627 | struct iw_statistics *stats = get_wireless_stats(dev); |
628 | static struct iw_statistics nullstats = {}; | ||
629 | |||
630 | /* show device if it's wireless regardless of current stats */ | ||
631 | if (!stats && dev->wireless_handlers) | ||
632 | stats = &nullstats; | ||
613 | 633 | ||
614 | if (stats) { | 634 | if (stats) { |
615 | seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d " | 635 | seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d " |
@@ -628,7 +648,9 @@ static void wireless_seq_printf_stats(struct seq_file *seq, | |||
628 | stats->discard.nwid, stats->discard.code, | 648 | stats->discard.nwid, stats->discard.code, |
629 | stats->discard.fragment, stats->discard.retries, | 649 | stats->discard.fragment, stats->discard.retries, |
630 | stats->discard.misc, stats->miss.beacon); | 650 | stats->discard.misc, stats->miss.beacon); |
631 | stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; | 651 | |
652 | if (stats != &nullstats) | ||
653 | stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; | ||
632 | } | 654 | } |
633 | } | 655 | } |
634 | 656 | ||
@@ -1250,65 +1272,57 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, | |||
1250 | } | 1272 | } |
1251 | #endif | 1273 | #endif |
1252 | 1274 | ||
1253 | /************************* EVENT PROCESSING *************************/ | 1275 | static int __net_init wext_pernet_init(struct net *net) |
1254 | /* | 1276 | { |
1255 | * Process events generated by the wireless layer or the driver. | 1277 | skb_queue_head_init(&net->wext_nlevents); |
1256 | * Most often, the event will be propagated through rtnetlink | 1278 | return 0; |
1257 | */ | 1279 | } |
1258 | 1280 | ||
1259 | /* ---------------------------------------------------------------- */ | 1281 | static void __net_exit wext_pernet_exit(struct net *net) |
1260 | /* | 1282 | { |
1261 | * Locking... | 1283 | skb_queue_purge(&net->wext_nlevents); |
1262 | * ---------- | 1284 | } |
1263 | * | ||
1264 | * Thanks to Herbert Xu <herbert@gondor.apana.org.au> for fixing | ||
1265 | * the locking issue in here and implementing this code ! | ||
1266 | * | ||
1267 | * The issue : wireless_send_event() is often called in interrupt context, | ||
1268 | * while the Netlink layer can never be called in interrupt context. | ||
1269 | * The fully formed RtNetlink events are queued, and then a tasklet is run | ||
1270 | * to feed those to Netlink. | ||
1271 | * The skb_queue is interrupt safe, and its lock is not held while calling | ||
1272 | * Netlink, so there is no possibility of dealock. | ||
1273 | * Jean II | ||
1274 | */ | ||
1275 | 1285 | ||
1276 | static struct sk_buff_head wireless_nlevent_queue; | 1286 | static struct pernet_operations wext_pernet_ops = { |
1287 | .init = wext_pernet_init, | ||
1288 | .exit = wext_pernet_exit, | ||
1289 | }; | ||
1277 | 1290 | ||
1278 | static int __init wireless_nlevent_init(void) | 1291 | static int __init wireless_nlevent_init(void) |
1279 | { | 1292 | { |
1280 | skb_queue_head_init(&wireless_nlevent_queue); | 1293 | return register_pernet_subsys(&wext_pernet_ops); |
1281 | return 0; | ||
1282 | } | 1294 | } |
1283 | 1295 | ||
1284 | subsys_initcall(wireless_nlevent_init); | 1296 | subsys_initcall(wireless_nlevent_init); |
1285 | 1297 | ||
1286 | static void wireless_nlevent_process(unsigned long data) | 1298 | /* Process events generated by the wireless layer or the driver. */ |
1299 | static void wireless_nlevent_process(struct work_struct *work) | ||
1287 | { | 1300 | { |
1288 | struct sk_buff *skb; | 1301 | struct sk_buff *skb; |
1302 | struct net *net; | ||
1289 | 1303 | ||
1290 | while ((skb = skb_dequeue(&wireless_nlevent_queue))) | 1304 | rtnl_lock(); |
1291 | rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); | 1305 | |
1306 | for_each_net(net) { | ||
1307 | while ((skb = skb_dequeue(&net->wext_nlevents))) | ||
1308 | rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, | ||
1309 | GFP_KERNEL); | ||
1310 | } | ||
1311 | |||
1312 | rtnl_unlock(); | ||
1292 | } | 1313 | } |
1293 | 1314 | ||
1294 | static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0); | 1315 | static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process); |
1295 | 1316 | ||
1296 | /* ---------------------------------------------------------------- */ | 1317 | static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, |
1297 | /* | 1318 | struct sk_buff *skb) |
1298 | * Fill a rtnetlink message with our event data. | ||
1299 | * Note that we propage only the specified event and don't dump the | ||
1300 | * current wireless config. Dumping the wireless config is far too | ||
1301 | * expensive (for each parameter, the driver need to query the hardware). | ||
1302 | */ | ||
1303 | static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev, | ||
1304 | int type, char *event, int event_len) | ||
1305 | { | 1319 | { |
1306 | struct ifinfomsg *r; | 1320 | struct ifinfomsg *r; |
1307 | struct nlmsghdr *nlh; | 1321 | struct nlmsghdr *nlh; |
1308 | 1322 | ||
1309 | nlh = nlmsg_put(skb, 0, 0, type, sizeof(*r), 0); | 1323 | nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0); |
1310 | if (nlh == NULL) | 1324 | if (!nlh) |
1311 | return -EMSGSIZE; | 1325 | return NULL; |
1312 | 1326 | ||
1313 | r = nlmsg_data(nlh); | 1327 | r = nlmsg_data(nlh); |
1314 | r->ifi_family = AF_UNSPEC; | 1328 | r->ifi_family = AF_UNSPEC; |
@@ -1319,48 +1333,14 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev, | |||
1319 | r->ifi_change = 0; /* Wireless changes don't affect those flags */ | 1333 | r->ifi_change = 0; /* Wireless changes don't affect those flags */ |
1320 | 1334 | ||
1321 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); | 1335 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); |
1322 | /* Add the wireless events in the netlink packet */ | ||
1323 | NLA_PUT(skb, IFLA_WIRELESS, event_len, event); | ||
1324 | 1336 | ||
1325 | return nlmsg_end(skb, nlh); | 1337 | return nlh; |
1326 | 1338 | nla_put_failure: | |
1327 | nla_put_failure: | ||
1328 | nlmsg_cancel(skb, nlh); | 1339 | nlmsg_cancel(skb, nlh); |
1329 | return -EMSGSIZE; | 1340 | return NULL; |
1330 | } | 1341 | } |
1331 | 1342 | ||
1332 | /* ---------------------------------------------------------------- */ | ||
1333 | /* | ||
1334 | * Create and broadcast and send it on the standard rtnetlink socket | ||
1335 | * This is a pure clone rtmsg_ifinfo() in net/core/rtnetlink.c | ||
1336 | * Andrzej Krzysztofowicz mandated that I used a IFLA_XXX field | ||
1337 | * within a RTM_NEWLINK event. | ||
1338 | */ | ||
1339 | static void rtmsg_iwinfo(struct net_device *dev, char *event, int event_len) | ||
1340 | { | ||
1341 | struct sk_buff *skb; | ||
1342 | int err; | ||
1343 | |||
1344 | if (!net_eq(dev_net(dev), &init_net)) | ||
1345 | return; | ||
1346 | |||
1347 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | ||
1348 | if (!skb) | ||
1349 | return; | ||
1350 | 1343 | ||
1351 | err = rtnetlink_fill_iwinfo(skb, dev, RTM_NEWLINK, event, event_len); | ||
1352 | if (err < 0) { | ||
1353 | WARN_ON(err == -EMSGSIZE); | ||
1354 | kfree_skb(skb); | ||
1355 | return; | ||
1356 | } | ||
1357 | |||
1358 | NETLINK_CB(skb).dst_group = RTNLGRP_LINK; | ||
1359 | skb_queue_tail(&wireless_nlevent_queue, skb); | ||
1360 | tasklet_schedule(&wireless_nlevent_tasklet); | ||
1361 | } | ||
1362 | |||
1363 | /* ---------------------------------------------------------------- */ | ||
1364 | /* | 1344 | /* |
1365 | * Main event dispatcher. Called from other parts and drivers. | 1345 | * Main event dispatcher. Called from other parts and drivers. |
1366 | * Send the event on the appropriate channels. | 1346 | * Send the event on the appropriate channels. |
@@ -1369,7 +1349,7 @@ static void rtmsg_iwinfo(struct net_device *dev, char *event, int event_len) | |||
1369 | void wireless_send_event(struct net_device * dev, | 1349 | void wireless_send_event(struct net_device * dev, |
1370 | unsigned int cmd, | 1350 | unsigned int cmd, |
1371 | union iwreq_data * wrqu, | 1351 | union iwreq_data * wrqu, |
1372 | char * extra) | 1352 | const char * extra) |
1373 | { | 1353 | { |
1374 | const struct iw_ioctl_description * descr = NULL; | 1354 | const struct iw_ioctl_description * descr = NULL; |
1375 | int extra_len = 0; | 1355 | int extra_len = 0; |
@@ -1379,6 +1359,25 @@ void wireless_send_event(struct net_device * dev, | |||
1379 | int wrqu_off = 0; /* Offset in wrqu */ | 1359 | int wrqu_off = 0; /* Offset in wrqu */ |
1380 | /* Don't "optimise" the following variable, it will crash */ | 1360 | /* Don't "optimise" the following variable, it will crash */ |
1381 | unsigned cmd_index; /* *MUST* be unsigned */ | 1361 | unsigned cmd_index; /* *MUST* be unsigned */ |
1362 | struct sk_buff *skb; | ||
1363 | struct nlmsghdr *nlh; | ||
1364 | struct nlattr *nla; | ||
1365 | #ifdef CONFIG_COMPAT | ||
1366 | struct __compat_iw_event *compat_event; | ||
1367 | struct compat_iw_point compat_wrqu; | ||
1368 | struct sk_buff *compskb; | ||
1369 | #endif | ||
1370 | |||
1371 | /* | ||
1372 | * Nothing in the kernel sends scan events with data, be safe. | ||
1373 | * This is necessary because we cannot fix up scan event data | ||
1374 | * for compat, due to being contained in 'extra', but normally | ||
1375 | * applications are required to retrieve the scan data anyway | ||
1376 | * and no data is included in the event, this codifies that | ||
1377 | * practice. | ||
1378 | */ | ||
1379 | if (WARN_ON(cmd == SIOCGIWSCAN && extra)) | ||
1380 | extra = NULL; | ||
1382 | 1381 | ||
1383 | /* Get the description of the Event */ | 1382 | /* Get the description of the Event */ |
1384 | if (cmd <= SIOCIWLAST) { | 1383 | if (cmd <= SIOCIWLAST) { |
@@ -1426,25 +1425,107 @@ void wireless_send_event(struct net_device * dev, | |||
1426 | hdr_len = event_type_size[descr->header_type]; | 1425 | hdr_len = event_type_size[descr->header_type]; |
1427 | event_len = hdr_len + extra_len; | 1426 | event_len = hdr_len + extra_len; |
1428 | 1427 | ||
1429 | /* Create temporary buffer to hold the event */ | 1428 | /* |
1430 | event = kmalloc(event_len, GFP_ATOMIC); | 1429 | * The problem for 64/32 bit. |
1431 | if (event == NULL) | 1430 | * |
1431 | * On 64-bit, a regular event is laid out as follows: | ||
1432 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
1433 | * | event.len | event.cmd | p a d d i n g | | ||
1434 | * | wrqu data ... (with the correct size) | | ||
1435 | * | ||
1436 | * This padding exists because we manipulate event->u, | ||
1437 | * and 'event' is not packed. | ||
1438 | * | ||
1439 | * An iw_point event is laid out like this instead: | ||
1440 | * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | | ||
1441 | * | event.len | event.cmd | p a d d i n g | | ||
1442 | * | iwpnt.len | iwpnt.flg | p a d d i n g | | ||
1443 | * | extra data ... | ||
1444 | * | ||
1445 | * The second padding exists because struct iw_point is extended, | ||
1446 | * but this depends on the platform... | ||
1447 | * | ||
1448 | * On 32-bit, all the padding shouldn't be there. | ||
1449 | */ | ||
1450 | |||
1451 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | ||
1452 | if (!skb) | ||
1453 | return; | ||
1454 | |||
1455 | /* Send via the RtNetlink event channel */ | ||
1456 | nlh = rtnetlink_ifinfo_prep(dev, skb); | ||
1457 | if (WARN_ON(!nlh)) { | ||
1458 | kfree_skb(skb); | ||
1459 | return; | ||
1460 | } | ||
1461 | |||
1462 | /* Add the wireless events in the netlink packet */ | ||
1463 | nla = nla_reserve(skb, IFLA_WIRELESS, event_len); | ||
1464 | if (!nla) { | ||
1465 | kfree_skb(skb); | ||
1432 | return; | 1466 | return; |
1467 | } | ||
1468 | event = nla_data(nla); | ||
1433 | 1469 | ||
1434 | /* Fill event */ | 1470 | /* Fill event - first clear to avoid data leaking */ |
1471 | memset(event, 0, hdr_len); | ||
1435 | event->len = event_len; | 1472 | event->len = event_len; |
1436 | event->cmd = cmd; | 1473 | event->cmd = cmd; |
1437 | memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); | 1474 | memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); |
1438 | if (extra) | 1475 | if (extra_len) |
1439 | memcpy(((char *) event) + hdr_len, extra, extra_len); | 1476 | memcpy(((char *) event) + hdr_len, extra, extra_len); |
1440 | 1477 | ||
1478 | nlmsg_end(skb, nlh); | ||
1479 | #ifdef CONFIG_COMPAT | ||
1480 | hdr_len = compat_event_type_size[descr->header_type]; | ||
1481 | event_len = hdr_len + extra_len; | ||
1482 | |||
1483 | compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | ||
1484 | if (!compskb) { | ||
1485 | kfree_skb(skb); | ||
1486 | return; | ||
1487 | } | ||
1488 | |||
1441 | /* Send via the RtNetlink event channel */ | 1489 | /* Send via the RtNetlink event channel */ |
1442 | rtmsg_iwinfo(dev, (char *) event, event_len); | 1490 | nlh = rtnetlink_ifinfo_prep(dev, compskb); |
1491 | if (WARN_ON(!nlh)) { | ||
1492 | kfree_skb(skb); | ||
1493 | kfree_skb(compskb); | ||
1494 | return; | ||
1495 | } | ||
1443 | 1496 | ||
1444 | /* Cleanup */ | 1497 | /* Add the wireless events in the netlink packet */ |
1445 | kfree(event); | 1498 | nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); |
1499 | if (!nla) { | ||
1500 | kfree_skb(skb); | ||
1501 | kfree_skb(compskb); | ||
1502 | return; | ||
1503 | } | ||
1504 | compat_event = nla_data(nla); | ||
1446 | 1505 | ||
1447 | return; /* Always success, I guess ;-) */ | 1506 | compat_event->len = event_len; |
1507 | compat_event->cmd = cmd; | ||
1508 | if (descr->header_type == IW_HEADER_TYPE_POINT) { | ||
1509 | compat_wrqu.length = wrqu->data.length; | ||
1510 | compat_wrqu.flags = wrqu->data.flags; | ||
1511 | memcpy(&compat_event->pointer, | ||
1512 | ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF, | ||
1513 | hdr_len - IW_EV_COMPAT_LCP_LEN); | ||
1514 | if (extra_len) | ||
1515 | memcpy(((char *) compat_event) + hdr_len, | ||
1516 | extra, extra_len); | ||
1517 | } else { | ||
1518 | /* extra_len must be zero, so no if (extra) needed */ | ||
1519 | memcpy(&compat_event->pointer, wrqu, | ||
1520 | hdr_len - IW_EV_COMPAT_LCP_LEN); | ||
1521 | } | ||
1522 | |||
1523 | nlmsg_end(compskb, nlh); | ||
1524 | |||
1525 | skb_shinfo(skb)->frag_list = compskb; | ||
1526 | #endif | ||
1527 | skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); | ||
1528 | schedule_work(&wireless_nlevent_work); | ||
1448 | } | 1529 | } |
1449 | EXPORT_SYMBOL(wireless_send_event); | 1530 | EXPORT_SYMBOL(wireless_send_event); |
1450 | 1531 | ||
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c index a2adb51849a9..fef8db553e8d 100644 --- a/net/xfrm/xfrm_proc.c +++ b/net/xfrm/xfrm_proc.c | |||
@@ -60,7 +60,7 @@ static int xfrm_statistics_seq_open(struct inode *inode, struct file *file) | |||
60 | return single_open_net(inode, file, xfrm_statistics_seq_show); | 60 | return single_open_net(inode, file, xfrm_statistics_seq_show); |
61 | } | 61 | } |
62 | 62 | ||
63 | static struct file_operations xfrm_statistics_seq_fops = { | 63 | static const struct file_operations xfrm_statistics_seq_fops = { |
64 | .owner = THIS_MODULE, | 64 | .owner = THIS_MODULE, |
65 | .open = xfrm_statistics_seq_open, | 65 | .open = xfrm_statistics_seq_open, |
66 | .read = seq_read, | 66 | .read = seq_read, |