diff options
Diffstat (limited to 'net')
302 files changed, 18429 insertions, 5585 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index ee070722a3a3..afba51e60310 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -294,7 +294,7 @@ static void vlan_transfer_features(struct net_device *dev, | |||
294 | else | 294 | else |
295 | vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; | 295 | vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; |
296 | 296 | ||
297 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 297 | #if IS_ENABLED(CONFIG_FCOE) |
298 | vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; | 298 | vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; |
299 | #endif | 299 | #endif |
300 | 300 | ||
@@ -529,7 +529,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
529 | switch (args.cmd) { | 529 | switch (args.cmd) { |
530 | case SET_VLAN_INGRESS_PRIORITY_CMD: | 530 | case SET_VLAN_INGRESS_PRIORITY_CMD: |
531 | err = -EPERM; | 531 | err = -EPERM; |
532 | if (!capable(CAP_NET_ADMIN)) | 532 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
533 | break; | 533 | break; |
534 | vlan_dev_set_ingress_priority(dev, | 534 | vlan_dev_set_ingress_priority(dev, |
535 | args.u.skb_priority, | 535 | args.u.skb_priority, |
@@ -539,7 +539,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
539 | 539 | ||
540 | case SET_VLAN_EGRESS_PRIORITY_CMD: | 540 | case SET_VLAN_EGRESS_PRIORITY_CMD: |
541 | err = -EPERM; | 541 | err = -EPERM; |
542 | if (!capable(CAP_NET_ADMIN)) | 542 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
543 | break; | 543 | break; |
544 | err = vlan_dev_set_egress_priority(dev, | 544 | err = vlan_dev_set_egress_priority(dev, |
545 | args.u.skb_priority, | 545 | args.u.skb_priority, |
@@ -548,7 +548,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
548 | 548 | ||
549 | case SET_VLAN_FLAG_CMD: | 549 | case SET_VLAN_FLAG_CMD: |
550 | err = -EPERM; | 550 | err = -EPERM; |
551 | if (!capable(CAP_NET_ADMIN)) | 551 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
552 | break; | 552 | break; |
553 | err = vlan_dev_change_flags(dev, | 553 | err = vlan_dev_change_flags(dev, |
554 | args.vlan_qos ? args.u.flag : 0, | 554 | args.vlan_qos ? args.u.flag : 0, |
@@ -557,7 +557,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
557 | 557 | ||
558 | case SET_VLAN_NAME_TYPE_CMD: | 558 | case SET_VLAN_NAME_TYPE_CMD: |
559 | err = -EPERM; | 559 | err = -EPERM; |
560 | if (!capable(CAP_NET_ADMIN)) | 560 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
561 | break; | 561 | break; |
562 | if ((args.u.name_type >= 0) && | 562 | if ((args.u.name_type >= 0) && |
563 | (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { | 563 | (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { |
@@ -573,14 +573,14 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) | |||
573 | 573 | ||
574 | case ADD_VLAN_CMD: | 574 | case ADD_VLAN_CMD: |
575 | err = -EPERM; | 575 | err = -EPERM; |
576 | if (!capable(CAP_NET_ADMIN)) | 576 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
577 | break; | 577 | break; |
578 | err = register_vlan_device(dev, args.u.VID); | 578 | err = register_vlan_device(dev, args.u.VID); |
579 | break; | 579 | break; |
580 | 580 | ||
581 | case DEL_VLAN_CMD: | 581 | case DEL_VLAN_CMD: |
582 | err = -EPERM; | 582 | err = -EPERM; |
583 | if (!capable(CAP_NET_ADMIN)) | 583 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
584 | break; | 584 | break; |
585 | unregister_vlan_dev(dev, NULL); | 585 | unregister_vlan_dev(dev, NULL); |
586 | err = 0; | 586 | err = 0; |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 402442402af7..4a6d31a082b9 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -409,7 +409,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) | |||
409 | return err; | 409 | return err; |
410 | } | 410 | } |
411 | 411 | ||
412 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 412 | #if IS_ENABLED(CONFIG_FCOE) |
413 | static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid, | 413 | static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid, |
414 | struct scatterlist *sgl, unsigned int sgc) | 414 | struct scatterlist *sgl, unsigned int sgc) |
415 | { | 415 | { |
@@ -531,6 +531,10 @@ static const struct header_ops vlan_header_ops = { | |||
531 | .parse = eth_header_parse, | 531 | .parse = eth_header_parse, |
532 | }; | 532 | }; |
533 | 533 | ||
534 | static struct device_type vlan_type = { | ||
535 | .name = "vlan", | ||
536 | }; | ||
537 | |||
534 | static const struct net_device_ops vlan_netdev_ops; | 538 | static const struct net_device_ops vlan_netdev_ops; |
535 | 539 | ||
536 | static int vlan_dev_init(struct net_device *dev) | 540 | static int vlan_dev_init(struct net_device *dev) |
@@ -564,7 +568,7 @@ static int vlan_dev_init(struct net_device *dev) | |||
564 | if (is_zero_ether_addr(dev->broadcast)) | 568 | if (is_zero_ether_addr(dev->broadcast)) |
565 | memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); | 569 | memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); |
566 | 570 | ||
567 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 571 | #if IS_ENABLED(CONFIG_FCOE) |
568 | dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid; | 572 | dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid; |
569 | #endif | 573 | #endif |
570 | 574 | ||
@@ -579,6 +583,8 @@ static int vlan_dev_init(struct net_device *dev) | |||
579 | 583 | ||
580 | dev->netdev_ops = &vlan_netdev_ops; | 584 | dev->netdev_ops = &vlan_netdev_ops; |
581 | 585 | ||
586 | SET_NETDEV_DEVTYPE(dev, &vlan_type); | ||
587 | |||
582 | if (is_vlan_dev(real_dev)) | 588 | if (is_vlan_dev(real_dev)) |
583 | subclass = 1; | 589 | subclass = 1; |
584 | 590 | ||
@@ -741,7 +747,7 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
741 | .ndo_do_ioctl = vlan_dev_ioctl, | 747 | .ndo_do_ioctl = vlan_dev_ioctl, |
742 | .ndo_neigh_setup = vlan_dev_neigh_setup, | 748 | .ndo_neigh_setup = vlan_dev_neigh_setup, |
743 | .ndo_get_stats64 = vlan_dev_get_stats64, | 749 | .ndo_get_stats64 = vlan_dev_get_stats64, |
744 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 750 | #if IS_ENABLED(CONFIG_FCOE) |
745 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | 751 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, |
746 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 752 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
747 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | 753 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 4819d31533e0..8eb6fbe8d8dd 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -74,6 +74,7 @@ struct br2684_vcc { | |||
74 | struct br2684_filter filter; | 74 | struct br2684_filter filter; |
75 | #endif /* CONFIG_ATM_BR2684_IPFILTER */ | 75 | #endif /* CONFIG_ATM_BR2684_IPFILTER */ |
76 | unsigned int copies_needed, copies_failed; | 76 | unsigned int copies_needed, copies_failed; |
77 | atomic_t qspace; | ||
77 | }; | 78 | }; |
78 | 79 | ||
79 | struct br2684_dev { | 80 | struct br2684_dev { |
@@ -181,18 +182,15 @@ static struct notifier_block atm_dev_notifier = { | |||
181 | static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb) | 182 | static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb) |
182 | { | 183 | { |
183 | struct br2684_vcc *brvcc = BR2684_VCC(vcc); | 184 | struct br2684_vcc *brvcc = BR2684_VCC(vcc); |
184 | struct net_device *net_dev = skb->dev; | ||
185 | 185 | ||
186 | pr_debug("(vcc %p ; net_dev %p )\n", vcc, net_dev); | 186 | pr_debug("(vcc %p ; net_dev %p )\n", vcc, brvcc->device); |
187 | brvcc->old_pop(vcc, skb); | 187 | brvcc->old_pop(vcc, skb); |
188 | 188 | ||
189 | if (!net_dev) | 189 | /* If the queue space just went up from zero, wake */ |
190 | return; | 190 | if (atomic_inc_return(&brvcc->qspace) == 1) |
191 | 191 | netif_wake_queue(brvcc->device); | |
192 | if (atm_may_send(vcc, 0)) | ||
193 | netif_wake_queue(net_dev); | ||
194 | |||
195 | } | 192 | } |
193 | |||
196 | /* | 194 | /* |
197 | * Send a packet out a particular vcc. Not to useful right now, but paves | 195 | * Send a packet out a particular vcc. Not to useful right now, but paves |
198 | * the way for multiple vcc's per itf. Returns true if we can send, | 196 | * the way for multiple vcc's per itf. Returns true if we can send, |
@@ -256,16 +254,19 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev, | |||
256 | ATM_SKB(skb)->atm_options = atmvcc->atm_options; | 254 | ATM_SKB(skb)->atm_options = atmvcc->atm_options; |
257 | dev->stats.tx_packets++; | 255 | dev->stats.tx_packets++; |
258 | dev->stats.tx_bytes += skb->len; | 256 | dev->stats.tx_bytes += skb->len; |
259 | atmvcc->send(atmvcc, skb); | ||
260 | 257 | ||
261 | if (!atm_may_send(atmvcc, 0)) { | 258 | if (atomic_dec_return(&brvcc->qspace) < 1) { |
259 | /* No more please! */ | ||
262 | netif_stop_queue(brvcc->device); | 260 | netif_stop_queue(brvcc->device); |
263 | /*check for race with br2684_pop*/ | 261 | /* We might have raced with br2684_pop() */ |
264 | if (atm_may_send(atmvcc, 0)) | 262 | if (unlikely(atomic_read(&brvcc->qspace) > 0)) |
265 | netif_start_queue(brvcc->device); | 263 | netif_wake_queue(brvcc->device); |
266 | } | 264 | } |
267 | 265 | ||
268 | return 1; | 266 | /* If this fails immediately, the skb will be freed and br2684_pop() |
267 | will wake the queue if appropriate. Just return an error so that | ||
268 | the stats are updated correctly */ | ||
269 | return !atmvcc->send(atmvcc, skb); | ||
269 | } | 270 | } |
270 | 271 | ||
271 | static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb, | 272 | static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb, |
@@ -504,6 +505,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) | |||
504 | brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL); | 505 | brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL); |
505 | if (!brvcc) | 506 | if (!brvcc) |
506 | return -ENOMEM; | 507 | return -ENOMEM; |
508 | /* | ||
509 | * Allow two packets in the ATM queue. One actually being sent, and one | ||
510 | * for the ATM 'TX done' handler to send. It shouldn't take long to get | ||
511 | * the next one from the netdev queue, when we need it. More than that | ||
512 | * would be bufferbloat. | ||
513 | */ | ||
514 | atomic_set(&brvcc->qspace, 2); | ||
507 | write_lock_irq(&devs_lock); | 515 | write_lock_irq(&devs_lock); |
508 | net_dev = br2684_find_dev(&be.ifspec); | 516 | net_dev = br2684_find_dev(&be.ifspec); |
509 | if (net_dev == NULL) { | 517 | if (net_dev == NULL) { |
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index 53f5244e28f8..8d8afb134b3a 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig | |||
@@ -6,6 +6,7 @@ config BATMAN_ADV | |||
6 | tristate "B.A.T.M.A.N. Advanced Meshing Protocol" | 6 | tristate "B.A.T.M.A.N. Advanced Meshing Protocol" |
7 | depends on NET | 7 | depends on NET |
8 | select CRC16 | 8 | select CRC16 |
9 | select LIBCRC32C | ||
9 | default n | 10 | default n |
10 | help | 11 | help |
11 | B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is | 12 | B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is |
@@ -25,6 +26,16 @@ config BATMAN_ADV_BLA | |||
25 | more than one mesh node in the same LAN, you can safely remove | 26 | more than one mesh node in the same LAN, you can safely remove |
26 | this feature and save some space. | 27 | this feature and save some space. |
27 | 28 | ||
29 | config BATMAN_ADV_DAT | ||
30 | bool "Distributed ARP Table" | ||
31 | depends on BATMAN_ADV && INET | ||
32 | default n | ||
33 | help | ||
34 | This option enables DAT (Distributed ARP Table), a DHT based | ||
35 | mechanism that increases ARP reliability on sparse wireless | ||
36 | mesh networks. If you think that your network does not need | ||
37 | this option you can safely remove it and save some space. | ||
38 | |||
28 | config BATMAN_ADV_DEBUG | 39 | config BATMAN_ADV_DEBUG |
29 | bool "B.A.T.M.A.N. debugging" | 40 | bool "B.A.T.M.A.N. debugging" |
30 | depends on BATMAN_ADV | 41 | depends on BATMAN_ADV |
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index 8676d2b1d574..e45e3b4e32e3 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile | |||
@@ -23,6 +23,7 @@ batman-adv-y += bat_iv_ogm.o | |||
23 | batman-adv-y += bitarray.o | 23 | batman-adv-y += bitarray.o |
24 | batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o | 24 | batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o |
25 | batman-adv-y += debugfs.o | 25 | batman-adv-y += debugfs.o |
26 | batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o | ||
26 | batman-adv-y += gateway_client.o | 27 | batman-adv-y += gateway_client.o |
27 | batman-adv-y += gateway_common.o | 28 | batman-adv-y += gateway_common.o |
28 | batman-adv-y += hard-interface.o | 29 | batman-adv-y += hard-interface.o |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index b02b75dae3a8..9f3925a85aab 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -57,20 +57,22 @@ out: | |||
57 | static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) | 57 | static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) |
58 | { | 58 | { |
59 | struct batadv_ogm_packet *batadv_ogm_packet; | 59 | struct batadv_ogm_packet *batadv_ogm_packet; |
60 | unsigned char *ogm_buff; | ||
60 | uint32_t random_seqno; | 61 | uint32_t random_seqno; |
61 | int res = -ENOMEM; | 62 | int res = -ENOMEM; |
62 | 63 | ||
63 | /* randomize initial seqno to avoid collision */ | 64 | /* randomize initial seqno to avoid collision */ |
64 | get_random_bytes(&random_seqno, sizeof(random_seqno)); | 65 | get_random_bytes(&random_seqno, sizeof(random_seqno)); |
65 | atomic_set(&hard_iface->seqno, random_seqno); | 66 | atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno); |
66 | 67 | ||
67 | hard_iface->packet_len = BATADV_OGM_HLEN; | 68 | hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN; |
68 | hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); | 69 | ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC); |
69 | 70 | if (!ogm_buff) | |
70 | if (!hard_iface->packet_buff) | ||
71 | goto out; | 71 | goto out; |
72 | 72 | ||
73 | batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff; | 73 | hard_iface->bat_iv.ogm_buff = ogm_buff; |
74 | |||
75 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; | ||
74 | batadv_ogm_packet->header.packet_type = BATADV_IV_OGM; | 76 | batadv_ogm_packet->header.packet_type = BATADV_IV_OGM; |
75 | batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION; | 77 | batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION; |
76 | batadv_ogm_packet->header.ttl = 2; | 78 | batadv_ogm_packet->header.ttl = 2; |
@@ -87,15 +89,16 @@ out: | |||
87 | 89 | ||
88 | static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface) | 90 | static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface) |
89 | { | 91 | { |
90 | kfree(hard_iface->packet_buff); | 92 | kfree(hard_iface->bat_iv.ogm_buff); |
91 | hard_iface->packet_buff = NULL; | 93 | hard_iface->bat_iv.ogm_buff = NULL; |
92 | } | 94 | } |
93 | 95 | ||
94 | static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface) | 96 | static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface) |
95 | { | 97 | { |
96 | struct batadv_ogm_packet *batadv_ogm_packet; | 98 | struct batadv_ogm_packet *batadv_ogm_packet; |
99 | unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff; | ||
97 | 100 | ||
98 | batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff; | 101 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; |
99 | memcpy(batadv_ogm_packet->orig, | 102 | memcpy(batadv_ogm_packet->orig, |
100 | hard_iface->net_dev->dev_addr, ETH_ALEN); | 103 | hard_iface->net_dev->dev_addr, ETH_ALEN); |
101 | memcpy(batadv_ogm_packet->prev_sender, | 104 | memcpy(batadv_ogm_packet->prev_sender, |
@@ -106,8 +109,9 @@ static void | |||
106 | batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) | 109 | batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) |
107 | { | 110 | { |
108 | struct batadv_ogm_packet *batadv_ogm_packet; | 111 | struct batadv_ogm_packet *batadv_ogm_packet; |
112 | unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff; | ||
109 | 113 | ||
110 | batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff; | 114 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; |
111 | batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; | 115 | batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; |
112 | batadv_ogm_packet->header.ttl = BATADV_TTL; | 116 | batadv_ogm_packet->header.ttl = BATADV_TTL; |
113 | } | 117 | } |
@@ -407,9 +411,11 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, | |||
407 | 411 | ||
408 | if ((atomic_read(&bat_priv->aggregated_ogms)) && | 412 | if ((atomic_read(&bat_priv->aggregated_ogms)) && |
409 | (packet_len < BATADV_MAX_AGGREGATION_BYTES)) | 413 | (packet_len < BATADV_MAX_AGGREGATION_BYTES)) |
410 | skb_size = BATADV_MAX_AGGREGATION_BYTES + ETH_HLEN; | 414 | skb_size = BATADV_MAX_AGGREGATION_BYTES; |
411 | else | 415 | else |
412 | skb_size = packet_len + ETH_HLEN; | 416 | skb_size = packet_len; |
417 | |||
418 | skb_size += ETH_HLEN + NET_IP_ALIGN; | ||
413 | 419 | ||
414 | forw_packet_aggr->skb = dev_alloc_skb(skb_size); | 420 | forw_packet_aggr->skb = dev_alloc_skb(skb_size); |
415 | if (!forw_packet_aggr->skb) { | 421 | if (!forw_packet_aggr->skb) { |
@@ -418,7 +424,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, | |||
418 | kfree(forw_packet_aggr); | 424 | kfree(forw_packet_aggr); |
419 | goto out; | 425 | goto out; |
420 | } | 426 | } |
421 | skb_reserve(forw_packet_aggr->skb, ETH_HLEN); | 427 | skb_reserve(forw_packet_aggr->skb, ETH_HLEN + NET_IP_ALIGN); |
422 | 428 | ||
423 | INIT_HLIST_NODE(&forw_packet_aggr->list); | 429 | INIT_HLIST_NODE(&forw_packet_aggr->list); |
424 | 430 | ||
@@ -590,8 +596,10 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, | |||
590 | static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) | 596 | static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) |
591 | { | 597 | { |
592 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 598 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
599 | unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff; | ||
593 | struct batadv_ogm_packet *batadv_ogm_packet; | 600 | struct batadv_ogm_packet *batadv_ogm_packet; |
594 | struct batadv_hard_iface *primary_if; | 601 | struct batadv_hard_iface *primary_if; |
602 | int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len; | ||
595 | int vis_server, tt_num_changes = 0; | 603 | int vis_server, tt_num_changes = 0; |
596 | uint32_t seqno; | 604 | uint32_t seqno; |
597 | uint8_t bandwidth; | 605 | uint8_t bandwidth; |
@@ -600,17 +608,16 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) | |||
600 | primary_if = batadv_primary_if_get_selected(bat_priv); | 608 | primary_if = batadv_primary_if_get_selected(bat_priv); |
601 | 609 | ||
602 | if (hard_iface == primary_if) | 610 | if (hard_iface == primary_if) |
603 | tt_num_changes = batadv_tt_append_diff(bat_priv, | 611 | tt_num_changes = batadv_tt_append_diff(bat_priv, ogm_buff, |
604 | &hard_iface->packet_buff, | 612 | ogm_buff_len, |
605 | &hard_iface->packet_len, | ||
606 | BATADV_OGM_HLEN); | 613 | BATADV_OGM_HLEN); |
607 | 614 | ||
608 | batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff; | 615 | batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff); |
609 | 616 | ||
610 | /* change sequence number to network order */ | 617 | /* change sequence number to network order */ |
611 | seqno = (uint32_t)atomic_read(&hard_iface->seqno); | 618 | seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno); |
612 | batadv_ogm_packet->seqno = htonl(seqno); | 619 | batadv_ogm_packet->seqno = htonl(seqno); |
613 | atomic_inc(&hard_iface->seqno); | 620 | atomic_inc(&hard_iface->bat_iv.ogm_seqno); |
614 | 621 | ||
615 | batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn); | 622 | batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn); |
616 | batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc); | 623 | batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc); |
@@ -631,8 +638,8 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) | |||
631 | } | 638 | } |
632 | 639 | ||
633 | batadv_slide_own_bcast_window(hard_iface); | 640 | batadv_slide_own_bcast_window(hard_iface); |
634 | batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, | 641 | batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff, |
635 | hard_iface->packet_len, hard_iface, 1, | 642 | hard_iface->bat_iv.ogm_buff_len, hard_iface, 1, |
636 | batadv_iv_ogm_emit_send_time(bat_priv)); | 643 | batadv_iv_ogm_emit_send_time(bat_priv)); |
637 | 644 | ||
638 | if (primary_if) | 645 | if (primary_if) |
@@ -1015,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, | |||
1015 | return; | 1022 | return; |
1016 | 1023 | ||
1017 | /* could be changed by schedule_own_packet() */ | 1024 | /* could be changed by schedule_own_packet() */ |
1018 | if_incoming_seqno = atomic_read(&if_incoming->seqno); | 1025 | if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno); |
1019 | 1026 | ||
1020 | if (batadv_ogm_packet->flags & BATADV_DIRECTLINK) | 1027 | if (batadv_ogm_packet->flags & BATADV_DIRECTLINK) |
1021 | has_directlink_flag = 1; | 1028 | has_directlink_flag = 1; |
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index aea174cdbfbd..5453b17d8df2 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c | |||
@@ -79,20 +79,17 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, | |||
79 | * or the old packet got delayed somewhere in the network. The | 79 | * or the old packet got delayed somewhere in the network. The |
80 | * packet should be dropped without calling this function if the | 80 | * packet should be dropped without calling this function if the |
81 | * seqno window is protected. | 81 | * seqno window is protected. |
82 | * | ||
83 | * seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE | ||
84 | * or | ||
85 | * seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE | ||
82 | */ | 86 | */ |
83 | if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE || | 87 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
84 | seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) { | 88 | "Other host probably restarted!\n"); |
85 | 89 | ||
86 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 90 | bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); |
87 | "Other host probably restarted!\n"); | 91 | if (set_mark) |
88 | 92 | batadv_set_bit(seq_bits, 0); | |
89 | bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); | ||
90 | if (set_mark) | ||
91 | batadv_set_bit(seq_bits, 0); | ||
92 | |||
93 | return 1; | ||
94 | } | ||
95 | 93 | ||
96 | /* never reached */ | 94 | return 1; |
97 | return 0; | ||
98 | } | 95 | } |
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index fd8d5afec0dd..5aebe9327d68 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -40,15 +40,11 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv, | |||
40 | /* return the index of the claim */ | 40 | /* return the index of the claim */ |
41 | static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) | 41 | static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) |
42 | { | 42 | { |
43 | const unsigned char *key = data; | 43 | struct batadv_claim *claim = (struct batadv_claim *)data; |
44 | uint32_t hash = 0; | 44 | uint32_t hash = 0; |
45 | size_t i; | ||
46 | 45 | ||
47 | for (i = 0; i < ETH_ALEN + sizeof(short); i++) { | 46 | hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr)); |
48 | hash += key[i]; | 47 | hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid)); |
49 | hash += (hash << 10); | ||
50 | hash ^= (hash >> 6); | ||
51 | } | ||
52 | 48 | ||
53 | hash += (hash << 3); | 49 | hash += (hash << 3); |
54 | hash ^= (hash >> 11); | 50 | hash ^= (hash >> 11); |
@@ -61,15 +57,11 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) | |||
61 | static inline uint32_t batadv_choose_backbone_gw(const void *data, | 57 | static inline uint32_t batadv_choose_backbone_gw(const void *data, |
62 | uint32_t size) | 58 | uint32_t size) |
63 | { | 59 | { |
64 | const unsigned char *key = data; | 60 | struct batadv_claim *claim = (struct batadv_claim *)data; |
65 | uint32_t hash = 0; | 61 | uint32_t hash = 0; |
66 | size_t i; | ||
67 | 62 | ||
68 | for (i = 0; i < ETH_ALEN + sizeof(short); i++) { | 63 | hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr)); |
69 | hash += key[i]; | 64 | hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid)); |
70 | hash += (hash << 10); | ||
71 | hash ^= (hash >> 6); | ||
72 | } | ||
73 | 65 | ||
74 | hash += (hash << 3); | 66 | hash += (hash << 3); |
75 | hash ^= (hash >> 11); | 67 | hash ^= (hash >> 11); |
@@ -85,8 +77,15 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node, | |||
85 | { | 77 | { |
86 | const void *data1 = container_of(node, struct batadv_backbone_gw, | 78 | const void *data1 = container_of(node, struct batadv_backbone_gw, |
87 | hash_entry); | 79 | hash_entry); |
80 | const struct batadv_backbone_gw *gw1 = data1, *gw2 = data2; | ||
81 | |||
82 | if (!batadv_compare_eth(gw1->orig, gw2->orig)) | ||
83 | return 0; | ||
84 | |||
85 | if (gw1->vid != gw2->vid) | ||
86 | return 0; | ||
88 | 87 | ||
89 | return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); | 88 | return 1; |
90 | } | 89 | } |
91 | 90 | ||
92 | /* compares address and vid of two claims */ | 91 | /* compares address and vid of two claims */ |
@@ -95,8 +94,15 @@ static int batadv_compare_claim(const struct hlist_node *node, | |||
95 | { | 94 | { |
96 | const void *data1 = container_of(node, struct batadv_claim, | 95 | const void *data1 = container_of(node, struct batadv_claim, |
97 | hash_entry); | 96 | hash_entry); |
97 | const struct batadv_claim *cl1 = data1, *cl2 = data2; | ||
98 | |||
99 | if (!batadv_compare_eth(cl1->addr, cl2->addr)) | ||
100 | return 0; | ||
101 | |||
102 | if (cl1->vid != cl2->vid) | ||
103 | return 0; | ||
98 | 104 | ||
99 | return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); | 105 | return 1; |
100 | } | 106 | } |
101 | 107 | ||
102 | /* free a backbone gw */ | 108 | /* free a backbone gw */ |
@@ -362,7 +368,7 @@ out: | |||
362 | */ | 368 | */ |
363 | static struct batadv_backbone_gw * | 369 | static struct batadv_backbone_gw * |
364 | batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, | 370 | batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, |
365 | short vid) | 371 | short vid, bool own_backbone) |
366 | { | 372 | { |
367 | struct batadv_backbone_gw *entry; | 373 | struct batadv_backbone_gw *entry; |
368 | struct batadv_orig_node *orig_node; | 374 | struct batadv_orig_node *orig_node; |
@@ -386,6 +392,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, | |||
386 | entry->crc = BATADV_BLA_CRC_INIT; | 392 | entry->crc = BATADV_BLA_CRC_INIT; |
387 | entry->bat_priv = bat_priv; | 393 | entry->bat_priv = bat_priv; |
388 | atomic_set(&entry->request_sent, 0); | 394 | atomic_set(&entry->request_sent, 0); |
395 | atomic_set(&entry->wait_periods, 0); | ||
389 | memcpy(entry->orig, orig, ETH_ALEN); | 396 | memcpy(entry->orig, orig, ETH_ALEN); |
390 | 397 | ||
391 | /* one for the hash, one for returning */ | 398 | /* one for the hash, one for returning */ |
@@ -409,6 +416,16 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, | |||
409 | "became a backbone gateway"); | 416 | "became a backbone gateway"); |
410 | batadv_orig_node_free_ref(orig_node); | 417 | batadv_orig_node_free_ref(orig_node); |
411 | } | 418 | } |
419 | |||
420 | if (own_backbone) { | ||
421 | batadv_bla_send_announce(bat_priv, entry); | ||
422 | |||
423 | /* this will be decreased in the worker thread */ | ||
424 | atomic_inc(&entry->request_sent); | ||
425 | atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS); | ||
426 | atomic_inc(&bat_priv->bla.num_requests); | ||
427 | } | ||
428 | |||
412 | return entry; | 429 | return entry; |
413 | } | 430 | } |
414 | 431 | ||
@@ -424,7 +441,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv, | |||
424 | 441 | ||
425 | backbone_gw = batadv_bla_get_backbone_gw(bat_priv, | 442 | backbone_gw = batadv_bla_get_backbone_gw(bat_priv, |
426 | primary_if->net_dev->dev_addr, | 443 | primary_if->net_dev->dev_addr, |
427 | vid); | 444 | vid, true); |
428 | if (unlikely(!backbone_gw)) | 445 | if (unlikely(!backbone_gw)) |
429 | return; | 446 | return; |
430 | 447 | ||
@@ -632,7 +649,8 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, | |||
632 | if (memcmp(an_addr, batadv_announce_mac, 4) != 0) | 649 | if (memcmp(an_addr, batadv_announce_mac, 4) != 0) |
633 | return 0; | 650 | return 0; |
634 | 651 | ||
635 | backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid); | 652 | backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid, |
653 | false); | ||
636 | 654 | ||
637 | if (unlikely(!backbone_gw)) | 655 | if (unlikely(!backbone_gw)) |
638 | return 1; | 656 | return 1; |
@@ -730,7 +748,8 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv, | |||
730 | 748 | ||
731 | /* register the gateway if not yet available, and add the claim. */ | 749 | /* register the gateway if not yet available, and add the claim. */ |
732 | 750 | ||
733 | backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid); | 751 | backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid, |
752 | false); | ||
734 | 753 | ||
735 | if (unlikely(!backbone_gw)) | 754 | if (unlikely(!backbone_gw)) |
736 | return 1; | 755 | return 1; |
@@ -1140,6 +1159,24 @@ static void batadv_bla_periodic_work(struct work_struct *work) | |||
1140 | backbone_gw->lasttime = jiffies; | 1159 | backbone_gw->lasttime = jiffies; |
1141 | 1160 | ||
1142 | batadv_bla_send_announce(bat_priv, backbone_gw); | 1161 | batadv_bla_send_announce(bat_priv, backbone_gw); |
1162 | |||
1163 | /* request_sent is only set after creation to avoid | ||
1164 | * problems when we are not yet known as backbone gw | ||
1165 | * in the backbone. | ||
1166 | * | ||
1167 | * We can reset this now after we waited some periods | ||
1168 | * to give bridge forward delays and bla group forming | ||
1169 | * some grace time. | ||
1170 | */ | ||
1171 | |||
1172 | if (atomic_read(&backbone_gw->request_sent) == 0) | ||
1173 | continue; | ||
1174 | |||
1175 | if (!atomic_dec_and_test(&backbone_gw->wait_periods)) | ||
1176 | continue; | ||
1177 | |||
1178 | atomic_dec(&backbone_gw->bat_priv->bla.num_requests); | ||
1179 | atomic_set(&backbone_gw->request_sent, 0); | ||
1143 | } | 1180 | } |
1144 | rcu_read_unlock(); | 1181 | rcu_read_unlock(); |
1145 | } | 1182 | } |
@@ -1212,8 +1249,7 @@ int batadv_bla_init(struct batadv_priv *bat_priv) | |||
1212 | /** | 1249 | /** |
1213 | * batadv_bla_check_bcast_duplist | 1250 | * batadv_bla_check_bcast_duplist |
1214 | * @bat_priv: the bat priv with all the soft interface information | 1251 | * @bat_priv: the bat priv with all the soft interface information |
1215 | * @bcast_packet: encapsulated broadcast frame plus batman header | 1252 | * @skb: contains the bcast_packet to be checked |
1216 | * @bcast_packet_len: length of encapsulated broadcast frame plus batman header | ||
1217 | * | 1253 | * |
1218 | * check if it is on our broadcast list. Another gateway might | 1254 | * check if it is on our broadcast list. Another gateway might |
1219 | * have sent the same packet because it is connected to the same backbone, | 1255 | * have sent the same packet because it is connected to the same backbone, |
@@ -1225,20 +1261,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv) | |||
1225 | * the same host however as this might be intended. | 1261 | * the same host however as this might be intended. |
1226 | */ | 1262 | */ |
1227 | int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, | 1263 | int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, |
1228 | struct batadv_bcast_packet *bcast_packet, | 1264 | struct sk_buff *skb) |
1229 | int bcast_packet_len) | ||
1230 | { | 1265 | { |
1231 | int i, length, curr, ret = 0; | 1266 | int i, curr, ret = 0; |
1232 | uint8_t *content; | 1267 | __be32 crc; |
1233 | uint16_t crc; | 1268 | struct batadv_bcast_packet *bcast_packet; |
1234 | struct batadv_bcast_duplist_entry *entry; | 1269 | struct batadv_bcast_duplist_entry *entry; |
1235 | 1270 | ||
1236 | length = bcast_packet_len - sizeof(*bcast_packet); | 1271 | bcast_packet = (struct batadv_bcast_packet *)skb->data; |
1237 | content = (uint8_t *)bcast_packet; | ||
1238 | content += sizeof(*bcast_packet); | ||
1239 | 1272 | ||
1240 | /* calculate the crc ... */ | 1273 | /* calculate the crc ... */ |
1241 | crc = crc16(0, content, length); | 1274 | crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1)); |
1242 | 1275 | ||
1243 | spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); | 1276 | spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); |
1244 | 1277 | ||
@@ -1585,23 +1618,11 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1585 | struct hlist_head *head; | 1618 | struct hlist_head *head; |
1586 | uint32_t i; | 1619 | uint32_t i; |
1587 | bool is_own; | 1620 | bool is_own; |
1588 | int ret = 0; | ||
1589 | uint8_t *primary_addr; | 1621 | uint8_t *primary_addr; |
1590 | 1622 | ||
1591 | primary_if = batadv_primary_if_get_selected(bat_priv); | 1623 | primary_if = batadv_seq_print_text_primary_if_get(seq); |
1592 | if (!primary_if) { | 1624 | if (!primary_if) |
1593 | ret = seq_printf(seq, | ||
1594 | "BATMAN mesh %s disabled - please specify interfaces to enable it\n", | ||
1595 | net_dev->name); | ||
1596 | goto out; | ||
1597 | } | ||
1598 | |||
1599 | if (primary_if->if_status != BATADV_IF_ACTIVE) { | ||
1600 | ret = seq_printf(seq, | ||
1601 | "BATMAN mesh %s disabled - primary interface not active\n", | ||
1602 | net_dev->name); | ||
1603 | goto out; | 1625 | goto out; |
1604 | } | ||
1605 | 1626 | ||
1606 | primary_addr = primary_if->net_dev->dev_addr; | 1627 | primary_addr = primary_if->net_dev->dev_addr; |
1607 | seq_printf(seq, | 1628 | seq_printf(seq, |
@@ -1628,7 +1649,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1628 | out: | 1649 | out: |
1629 | if (primary_if) | 1650 | if (primary_if) |
1630 | batadv_hardif_free_ref(primary_if); | 1651 | batadv_hardif_free_ref(primary_if); |
1631 | return ret; | 1652 | return 0; |
1632 | } | 1653 | } |
1633 | 1654 | ||
1634 | int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) | 1655 | int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) |
@@ -1643,23 +1664,11 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1643 | int secs, msecs; | 1664 | int secs, msecs; |
1644 | uint32_t i; | 1665 | uint32_t i; |
1645 | bool is_own; | 1666 | bool is_own; |
1646 | int ret = 0; | ||
1647 | uint8_t *primary_addr; | 1667 | uint8_t *primary_addr; |
1648 | 1668 | ||
1649 | primary_if = batadv_primary_if_get_selected(bat_priv); | 1669 | primary_if = batadv_seq_print_text_primary_if_get(seq); |
1650 | if (!primary_if) { | 1670 | if (!primary_if) |
1651 | ret = seq_printf(seq, | ||
1652 | "BATMAN mesh %s disabled - please specify interfaces to enable it\n", | ||
1653 | net_dev->name); | ||
1654 | goto out; | ||
1655 | } | ||
1656 | |||
1657 | if (primary_if->if_status != BATADV_IF_ACTIVE) { | ||
1658 | ret = seq_printf(seq, | ||
1659 | "BATMAN mesh %s disabled - primary interface not active\n", | ||
1660 | net_dev->name); | ||
1661 | goto out; | 1671 | goto out; |
1662 | } | ||
1663 | 1672 | ||
1664 | primary_addr = primary_if->net_dev->dev_addr; | 1673 | primary_addr = primary_if->net_dev->dev_addr; |
1665 | seq_printf(seq, | 1674 | seq_printf(seq, |
@@ -1693,5 +1702,5 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) | |||
1693 | out: | 1702 | out: |
1694 | if (primary_if) | 1703 | if (primary_if) |
1695 | batadv_hardif_free_ref(primary_if); | 1704 | batadv_hardif_free_ref(primary_if); |
1696 | return ret; | 1705 | return 0; |
1697 | } | 1706 | } |
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 789cb73bde67..196d9a0254bc 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h | |||
@@ -31,8 +31,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, | |||
31 | void *offset); | 31 | void *offset); |
32 | int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig); | 32 | int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig); |
33 | int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, | 33 | int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, |
34 | struct batadv_bcast_packet *bcast_packet, | 34 | struct sk_buff *skb); |
35 | int hdr_size); | ||
36 | void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, | 35 | void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, |
37 | struct batadv_hard_iface *primary_if, | 36 | struct batadv_hard_iface *primary_if, |
38 | struct batadv_hard_iface *oldif); | 37 | struct batadv_hard_iface *oldif); |
@@ -81,8 +80,7 @@ static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, | |||
81 | 80 | ||
82 | static inline int | 81 | static inline int |
83 | batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, | 82 | batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, |
84 | struct batadv_bcast_packet *bcast_packet, | 83 | struct sk_buff *skb) |
85 | int hdr_size) | ||
86 | { | 84 | { |
87 | return 0; | 85 | return 0; |
88 | } | 86 | } |
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 391d4fb2026f..6f58ddd53bff 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "vis.h" | 31 | #include "vis.h" |
32 | #include "icmp_socket.h" | 32 | #include "icmp_socket.h" |
33 | #include "bridge_loop_avoidance.h" | 33 | #include "bridge_loop_avoidance.h" |
34 | #include "distributed-arp-table.h" | ||
34 | 35 | ||
35 | static struct dentry *batadv_debugfs; | 36 | static struct dentry *batadv_debugfs; |
36 | 37 | ||
@@ -99,15 +100,17 @@ int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...) | |||
99 | 100 | ||
100 | static int batadv_log_open(struct inode *inode, struct file *file) | 101 | static int batadv_log_open(struct inode *inode, struct file *file) |
101 | { | 102 | { |
103 | if (!try_module_get(THIS_MODULE)) | ||
104 | return -EBUSY; | ||
105 | |||
102 | nonseekable_open(inode, file); | 106 | nonseekable_open(inode, file); |
103 | file->private_data = inode->i_private; | 107 | file->private_data = inode->i_private; |
104 | batadv_inc_module_count(); | ||
105 | return 0; | 108 | return 0; |
106 | } | 109 | } |
107 | 110 | ||
108 | static int batadv_log_release(struct inode *inode, struct file *file) | 111 | static int batadv_log_release(struct inode *inode, struct file *file) |
109 | { | 112 | { |
110 | batadv_dec_module_count(); | 113 | module_put(THIS_MODULE); |
111 | return 0; | 114 | return 0; |
112 | } | 115 | } |
113 | 116 | ||
@@ -278,6 +281,19 @@ static int batadv_bla_backbone_table_open(struct inode *inode, | |||
278 | 281 | ||
279 | #endif | 282 | #endif |
280 | 283 | ||
284 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
285 | /** | ||
286 | * batadv_dat_cache_open - Prepare file handler for reads from dat_chache | ||
287 | * @inode: inode which was opened | ||
288 | * @file: file handle to be initialized | ||
289 | */ | ||
290 | static int batadv_dat_cache_open(struct inode *inode, struct file *file) | ||
291 | { | ||
292 | struct net_device *net_dev = (struct net_device *)inode->i_private; | ||
293 | return single_open(file, batadv_dat_cache_seq_print_text, net_dev); | ||
294 | } | ||
295 | #endif | ||
296 | |||
281 | static int batadv_transtable_local_open(struct inode *inode, struct file *file) | 297 | static int batadv_transtable_local_open(struct inode *inode, struct file *file) |
282 | { | 298 | { |
283 | struct net_device *net_dev = (struct net_device *)inode->i_private; | 299 | struct net_device *net_dev = (struct net_device *)inode->i_private; |
@@ -307,7 +323,17 @@ struct batadv_debuginfo batadv_debuginfo_##_name = { \ | |||
307 | } \ | 323 | } \ |
308 | }; | 324 | }; |
309 | 325 | ||
326 | /* the following attributes are general and therefore they will be directly | ||
327 | * placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs | ||
328 | */ | ||
310 | static BATADV_DEBUGINFO(routing_algos, S_IRUGO, batadv_algorithms_open); | 329 | static BATADV_DEBUGINFO(routing_algos, S_IRUGO, batadv_algorithms_open); |
330 | |||
331 | static struct batadv_debuginfo *batadv_general_debuginfos[] = { | ||
332 | &batadv_debuginfo_routing_algos, | ||
333 | NULL, | ||
334 | }; | ||
335 | |||
336 | /* The following attributes are per soft interface */ | ||
311 | static BATADV_DEBUGINFO(originators, S_IRUGO, batadv_originators_open); | 337 | static BATADV_DEBUGINFO(originators, S_IRUGO, batadv_originators_open); |
312 | static BATADV_DEBUGINFO(gateways, S_IRUGO, batadv_gateways_open); | 338 | static BATADV_DEBUGINFO(gateways, S_IRUGO, batadv_gateways_open); |
313 | static BATADV_DEBUGINFO(transtable_global, S_IRUGO, | 339 | static BATADV_DEBUGINFO(transtable_global, S_IRUGO, |
@@ -317,6 +343,9 @@ static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open); | |||
317 | static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO, | 343 | static BATADV_DEBUGINFO(bla_backbone_table, S_IRUGO, |
318 | batadv_bla_backbone_table_open); | 344 | batadv_bla_backbone_table_open); |
319 | #endif | 345 | #endif |
346 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
347 | static BATADV_DEBUGINFO(dat_cache, S_IRUGO, batadv_dat_cache_open); | ||
348 | #endif | ||
320 | static BATADV_DEBUGINFO(transtable_local, S_IRUGO, | 349 | static BATADV_DEBUGINFO(transtable_local, S_IRUGO, |
321 | batadv_transtable_local_open); | 350 | batadv_transtable_local_open); |
322 | static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open); | 351 | static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open); |
@@ -329,6 +358,9 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = { | |||
329 | &batadv_debuginfo_bla_claim_table, | 358 | &batadv_debuginfo_bla_claim_table, |
330 | &batadv_debuginfo_bla_backbone_table, | 359 | &batadv_debuginfo_bla_backbone_table, |
331 | #endif | 360 | #endif |
361 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
362 | &batadv_debuginfo_dat_cache, | ||
363 | #endif | ||
332 | &batadv_debuginfo_transtable_local, | 364 | &batadv_debuginfo_transtable_local, |
333 | &batadv_debuginfo_vis_data, | 365 | &batadv_debuginfo_vis_data, |
334 | NULL, | 366 | NULL, |
@@ -336,7 +368,7 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = { | |||
336 | 368 | ||
337 | void batadv_debugfs_init(void) | 369 | void batadv_debugfs_init(void) |
338 | { | 370 | { |
339 | struct batadv_debuginfo *bat_debug; | 371 | struct batadv_debuginfo **bat_debug; |
340 | struct dentry *file; | 372 | struct dentry *file; |
341 | 373 | ||
342 | batadv_debugfs = debugfs_create_dir(BATADV_DEBUGFS_SUBDIR, NULL); | 374 | batadv_debugfs = debugfs_create_dir(BATADV_DEBUGFS_SUBDIR, NULL); |
@@ -344,17 +376,23 @@ void batadv_debugfs_init(void) | |||
344 | batadv_debugfs = NULL; | 376 | batadv_debugfs = NULL; |
345 | 377 | ||
346 | if (!batadv_debugfs) | 378 | if (!batadv_debugfs) |
347 | goto out; | 379 | goto err; |
348 | 380 | ||
349 | bat_debug = &batadv_debuginfo_routing_algos; | 381 | for (bat_debug = batadv_general_debuginfos; *bat_debug; ++bat_debug) { |
350 | file = debugfs_create_file(bat_debug->attr.name, | 382 | file = debugfs_create_file(((*bat_debug)->attr).name, |
351 | S_IFREG | bat_debug->attr.mode, | 383 | S_IFREG | ((*bat_debug)->attr).mode, |
352 | batadv_debugfs, NULL, &bat_debug->fops); | 384 | batadv_debugfs, NULL, |
353 | if (!file) | 385 | &(*bat_debug)->fops); |
354 | pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name); | 386 | if (!file) { |
387 | pr_err("Can't add general debugfs file: %s\n", | ||
388 | ((*bat_debug)->attr).name); | ||
389 | goto err; | ||
390 | } | ||
391 | } | ||
355 | 392 | ||
356 | out: | ||
357 | return; | 393 | return; |
394 | err: | ||
395 | debugfs_remove_recursive(batadv_debugfs); | ||
358 | } | 396 | } |
359 | 397 | ||
360 | void batadv_debugfs_destroy(void) | 398 | void batadv_debugfs_destroy(void) |
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c new file mode 100644 index 000000000000..8e1d89d2b1c1 --- /dev/null +++ b/net/batman-adv/distributed-arp-table.c | |||
@@ -0,0 +1,1066 @@ | |||
1 | /* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: | ||
2 | * | ||
3 | * Antonio Quartulli | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of version 2 of the GNU General Public | ||
7 | * License as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
17 | * 02110-1301, USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/if_ether.h> | ||
21 | #include <linux/if_arp.h> | ||
22 | #include <net/arp.h> | ||
23 | |||
24 | #include "main.h" | ||
25 | #include "hash.h" | ||
26 | #include "distributed-arp-table.h" | ||
27 | #include "hard-interface.h" | ||
28 | #include "originator.h" | ||
29 | #include "send.h" | ||
30 | #include "types.h" | ||
31 | #include "translation-table.h" | ||
32 | #include "unicast.h" | ||
33 | |||
34 | static void batadv_dat_purge(struct work_struct *work); | ||
35 | |||
36 | /** | ||
37 | * batadv_dat_start_timer - initialise the DAT periodic worker | ||
38 | * @bat_priv: the bat priv with all the soft interface information | ||
39 | */ | ||
40 | static void batadv_dat_start_timer(struct batadv_priv *bat_priv) | ||
41 | { | ||
42 | INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge); | ||
43 | queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work, | ||
44 | msecs_to_jiffies(10000)); | ||
45 | } | ||
46 | |||
47 | /** | ||
48 | * batadv_dat_entry_free_ref - decrements the dat_entry refcounter and possibly | ||
49 | * free it | ||
50 | * @dat_entry: the oentry to free | ||
51 | */ | ||
52 | static void batadv_dat_entry_free_ref(struct batadv_dat_entry *dat_entry) | ||
53 | { | ||
54 | if (atomic_dec_and_test(&dat_entry->refcount)) | ||
55 | kfree_rcu(dat_entry, rcu); | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * batadv_dat_to_purge - checks whether a dat_entry has to be purged or not | ||
60 | * @dat_entry: the entry to check | ||
61 | * | ||
62 | * Returns true if the entry has to be purged now, false otherwise | ||
63 | */ | ||
64 | static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry) | ||
65 | { | ||
66 | return batadv_has_timed_out(dat_entry->last_update, | ||
67 | BATADV_DAT_ENTRY_TIMEOUT); | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * __batadv_dat_purge - delete entries from the DAT local storage | ||
72 | * @bat_priv: the bat priv with all the soft interface information | ||
73 | * @to_purge: function in charge to decide whether an entry has to be purged or | ||
74 | * not. This function takes the dat_entry as argument and has to | ||
75 | * returns a boolean value: true is the entry has to be deleted, | ||
76 | * false otherwise | ||
77 | * | ||
78 | * Loops over each entry in the DAT local storage and delete it if and only if | ||
79 | * the to_purge function passed as argument returns true | ||
80 | */ | ||
81 | static void __batadv_dat_purge(struct batadv_priv *bat_priv, | ||
82 | bool (*to_purge)(struct batadv_dat_entry *)) | ||
83 | { | ||
84 | spinlock_t *list_lock; /* protects write access to the hash lists */ | ||
85 | struct batadv_dat_entry *dat_entry; | ||
86 | struct hlist_node *node, *node_tmp; | ||
87 | struct hlist_head *head; | ||
88 | uint32_t i; | ||
89 | |||
90 | if (!bat_priv->dat.hash) | ||
91 | return; | ||
92 | |||
93 | for (i = 0; i < bat_priv->dat.hash->size; i++) { | ||
94 | head = &bat_priv->dat.hash->table[i]; | ||
95 | list_lock = &bat_priv->dat.hash->list_locks[i]; | ||
96 | |||
97 | spin_lock_bh(list_lock); | ||
98 | hlist_for_each_entry_safe(dat_entry, node, node_tmp, head, | ||
99 | hash_entry) { | ||
100 | /* if an helper function has been passed as parameter, | ||
101 | * ask it if the entry has to be purged or not | ||
102 | */ | ||
103 | if (to_purge && !to_purge(dat_entry)) | ||
104 | continue; | ||
105 | |||
106 | hlist_del_rcu(node); | ||
107 | batadv_dat_entry_free_ref(dat_entry); | ||
108 | } | ||
109 | spin_unlock_bh(list_lock); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * batadv_dat_purge - periodic task that deletes old entries from the local DAT | ||
115 | * hash table | ||
116 | * @work: kernel work struct | ||
117 | */ | ||
118 | static void batadv_dat_purge(struct work_struct *work) | ||
119 | { | ||
120 | struct delayed_work *delayed_work; | ||
121 | struct batadv_priv_dat *priv_dat; | ||
122 | struct batadv_priv *bat_priv; | ||
123 | |||
124 | delayed_work = container_of(work, struct delayed_work, work); | ||
125 | priv_dat = container_of(delayed_work, struct batadv_priv_dat, work); | ||
126 | bat_priv = container_of(priv_dat, struct batadv_priv, dat); | ||
127 | |||
128 | __batadv_dat_purge(bat_priv, batadv_dat_to_purge); | ||
129 | batadv_dat_start_timer(bat_priv); | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * batadv_compare_dat - comparing function used in the local DAT hash table | ||
134 | * @node: node in the local table | ||
135 | * @data2: second object to compare the node to | ||
136 | * | ||
137 | * Returns 1 if the two entry are the same, 0 otherwise | ||
138 | */ | ||
139 | static int batadv_compare_dat(const struct hlist_node *node, const void *data2) | ||
140 | { | ||
141 | const void *data1 = container_of(node, struct batadv_dat_entry, | ||
142 | hash_entry); | ||
143 | |||
144 | return (memcmp(data1, data2, sizeof(__be32)) == 0 ? 1 : 0); | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * batadv_arp_hw_src - extract the hw_src field from an ARP packet | ||
149 | * @skb: ARP packet | ||
150 | * @hdr_size: size of the possible header before the ARP packet | ||
151 | * | ||
152 | * Returns the value of the hw_src field in the ARP packet | ||
153 | */ | ||
154 | static uint8_t *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size) | ||
155 | { | ||
156 | uint8_t *addr; | ||
157 | |||
158 | addr = (uint8_t *)(skb->data + hdr_size); | ||
159 | addr += ETH_HLEN + sizeof(struct arphdr); | ||
160 | |||
161 | return addr; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * batadv_arp_ip_src - extract the ip_src field from an ARP packet | ||
166 | * @skb: ARP packet | ||
167 | * @hdr_size: size of the possible header before the ARP packet | ||
168 | * | ||
169 | * Returns the value of the ip_src field in the ARP packet | ||
170 | */ | ||
171 | static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size) | ||
172 | { | ||
173 | return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN); | ||
174 | } | ||
175 | |||
176 | /** | ||
177 | * batadv_arp_hw_dst - extract the hw_dst field from an ARP packet | ||
178 | * @skb: ARP packet | ||
179 | * @hdr_size: size of the possible header before the ARP packet | ||
180 | * | ||
181 | * Returns the value of the hw_dst field in the ARP packet | ||
182 | */ | ||
183 | static uint8_t *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size) | ||
184 | { | ||
185 | return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4; | ||
186 | } | ||
187 | |||
188 | /** | ||
189 | * batadv_arp_ip_dst - extract the ip_dst field from an ARP packet | ||
190 | * @skb: ARP packet | ||
191 | * @hdr_size: size of the possible header before the ARP packet | ||
192 | * | ||
193 | * Returns the value of the ip_dst field in the ARP packet | ||
194 | */ | ||
195 | static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size) | ||
196 | { | ||
197 | return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4); | ||
198 | } | ||
199 | |||
200 | /** | ||
201 | * batadv_hash_dat - compute the hash value for an IP address | ||
202 | * @data: data to hash | ||
203 | * @size: size of the hash table | ||
204 | * | ||
205 | * Returns the selected index in the hash table for the given data | ||
206 | */ | ||
207 | static uint32_t batadv_hash_dat(const void *data, uint32_t size) | ||
208 | { | ||
209 | const unsigned char *key = data; | ||
210 | uint32_t hash = 0; | ||
211 | size_t i; | ||
212 | |||
213 | for (i = 0; i < 4; i++) { | ||
214 | hash += key[i]; | ||
215 | hash += (hash << 10); | ||
216 | hash ^= (hash >> 6); | ||
217 | } | ||
218 | |||
219 | hash += (hash << 3); | ||
220 | hash ^= (hash >> 11); | ||
221 | hash += (hash << 15); | ||
222 | |||
223 | return hash % size; | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * batadv_dat_entry_hash_find - looks for a given dat_entry in the local hash | ||
228 | * table | ||
229 | * @bat_priv: the bat priv with all the soft interface information | ||
230 | * @ip: search key | ||
231 | * | ||
232 | * Returns the dat_entry if found, NULL otherwise | ||
233 | */ | ||
234 | static struct batadv_dat_entry * | ||
235 | batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip) | ||
236 | { | ||
237 | struct hlist_head *head; | ||
238 | struct hlist_node *node; | ||
239 | struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL; | ||
240 | struct batadv_hashtable *hash = bat_priv->dat.hash; | ||
241 | uint32_t index; | ||
242 | |||
243 | if (!hash) | ||
244 | return NULL; | ||
245 | |||
246 | index = batadv_hash_dat(&ip, hash->size); | ||
247 | head = &hash->table[index]; | ||
248 | |||
249 | rcu_read_lock(); | ||
250 | hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { | ||
251 | if (dat_entry->ip != ip) | ||
252 | continue; | ||
253 | |||
254 | if (!atomic_inc_not_zero(&dat_entry->refcount)) | ||
255 | continue; | ||
256 | |||
257 | dat_entry_tmp = dat_entry; | ||
258 | break; | ||
259 | } | ||
260 | rcu_read_unlock(); | ||
261 | |||
262 | return dat_entry_tmp; | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * batadv_dat_entry_add - add a new dat entry or update it if already exists | ||
267 | * @bat_priv: the bat priv with all the soft interface information | ||
268 | * @ip: ipv4 to add/edit | ||
269 | * @mac_addr: mac address to assign to the given ipv4 | ||
270 | */ | ||
271 | static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip, | ||
272 | uint8_t *mac_addr) | ||
273 | { | ||
274 | struct batadv_dat_entry *dat_entry; | ||
275 | int hash_added; | ||
276 | |||
277 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip); | ||
278 | /* if this entry is already known, just update it */ | ||
279 | if (dat_entry) { | ||
280 | if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr)) | ||
281 | memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN); | ||
282 | dat_entry->last_update = jiffies; | ||
283 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
284 | "Entry updated: %pI4 %pM\n", &dat_entry->ip, | ||
285 | dat_entry->mac_addr); | ||
286 | goto out; | ||
287 | } | ||
288 | |||
289 | dat_entry = kmalloc(sizeof(*dat_entry), GFP_ATOMIC); | ||
290 | if (!dat_entry) | ||
291 | goto out; | ||
292 | |||
293 | dat_entry->ip = ip; | ||
294 | memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN); | ||
295 | dat_entry->last_update = jiffies; | ||
296 | atomic_set(&dat_entry->refcount, 2); | ||
297 | |||
298 | hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat, | ||
299 | batadv_hash_dat, &dat_entry->ip, | ||
300 | &dat_entry->hash_entry); | ||
301 | |||
302 | if (unlikely(hash_added != 0)) { | ||
303 | /* remove the reference for the hash */ | ||
304 | batadv_dat_entry_free_ref(dat_entry); | ||
305 | goto out; | ||
306 | } | ||
307 | |||
308 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM\n", | ||
309 | &dat_entry->ip, dat_entry->mac_addr); | ||
310 | |||
311 | out: | ||
312 | if (dat_entry) | ||
313 | batadv_dat_entry_free_ref(dat_entry); | ||
314 | } | ||
315 | |||
316 | #ifdef CONFIG_BATMAN_ADV_DEBUG | ||
317 | |||
318 | /** | ||
319 | * batadv_dbg_arp - print a debug message containing all the ARP packet details | ||
320 | * @bat_priv: the bat priv with all the soft interface information | ||
321 | * @skb: ARP packet | ||
322 | * @type: ARP type | ||
323 | * @hdr_size: size of the possible header before the ARP packet | ||
324 | * @msg: message to print together with the debugging information | ||
325 | */ | ||
326 | static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | ||
327 | uint16_t type, int hdr_size, char *msg) | ||
328 | { | ||
329 | struct batadv_unicast_4addr_packet *unicast_4addr_packet; | ||
330 | struct batadv_bcast_packet *bcast_pkt; | ||
331 | uint8_t *orig_addr; | ||
332 | __be32 ip_src, ip_dst; | ||
333 | |||
334 | if (msg) | ||
335 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "%s\n", msg); | ||
336 | |||
337 | ip_src = batadv_arp_ip_src(skb, hdr_size); | ||
338 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); | ||
339 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
340 | "ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]\n", | ||
341 | batadv_arp_hw_src(skb, hdr_size), &ip_src, | ||
342 | batadv_arp_hw_dst(skb, hdr_size), &ip_dst); | ||
343 | |||
344 | if (hdr_size == 0) | ||
345 | return; | ||
346 | |||
347 | /* if the ARP packet is encapsulated in a batman packet, let's print | ||
348 | * some debug messages | ||
349 | */ | ||
350 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | ||
351 | |||
352 | switch (unicast_4addr_packet->u.header.packet_type) { | ||
353 | case BATADV_UNICAST: | ||
354 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
355 | "* encapsulated within a UNICAST packet\n"); | ||
356 | break; | ||
357 | case BATADV_UNICAST_4ADDR: | ||
358 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
359 | "* encapsulated within a UNICAST_4ADDR packet (src: %pM)\n", | ||
360 | unicast_4addr_packet->src); | ||
361 | switch (unicast_4addr_packet->subtype) { | ||
362 | case BATADV_P_DAT_DHT_PUT: | ||
363 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_PUT\n"); | ||
364 | break; | ||
365 | case BATADV_P_DAT_DHT_GET: | ||
366 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_GET\n"); | ||
367 | break; | ||
368 | case BATADV_P_DAT_CACHE_REPLY: | ||
369 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
370 | "* type: DAT_CACHE_REPLY\n"); | ||
371 | break; | ||
372 | case BATADV_P_DATA: | ||
373 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DATA\n"); | ||
374 | break; | ||
375 | default: | ||
376 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", | ||
377 | unicast_4addr_packet->u.header.packet_type); | ||
378 | } | ||
379 | break; | ||
380 | case BATADV_BCAST: | ||
381 | bcast_pkt = (struct batadv_bcast_packet *)unicast_4addr_packet; | ||
382 | orig_addr = bcast_pkt->orig; | ||
383 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
384 | "* encapsulated within a BCAST packet (src: %pM)\n", | ||
385 | orig_addr); | ||
386 | break; | ||
387 | default: | ||
388 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
389 | "* encapsulated within an unknown packet type (0x%x)\n", | ||
390 | unicast_4addr_packet->u.header.packet_type); | ||
391 | } | ||
392 | } | ||
393 | |||
394 | #else | ||
395 | |||
396 | static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | ||
397 | uint16_t type, int hdr_size, char *msg) | ||
398 | { | ||
399 | } | ||
400 | |||
401 | #endif /* CONFIG_BATMAN_ADV_DEBUG */ | ||
402 | |||
403 | /** | ||
404 | * batadv_is_orig_node_eligible - check whether a node can be a DHT candidate | ||
405 | * @res: the array with the already selected candidates | ||
406 | * @select: number of already selected candidates | ||
407 | * @tmp_max: address of the currently evaluated node | ||
408 | * @max: current round max address | ||
409 | * @last_max: address of the last selected candidate | ||
410 | * @candidate: orig_node under evaluation | ||
411 | * @max_orig_node: last selected candidate | ||
412 | * | ||
413 | * Returns true if the node has been elected as next candidate or false othrwise | ||
414 | */ | ||
415 | static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res, | ||
416 | int select, batadv_dat_addr_t tmp_max, | ||
417 | batadv_dat_addr_t max, | ||
418 | batadv_dat_addr_t last_max, | ||
419 | struct batadv_orig_node *candidate, | ||
420 | struct batadv_orig_node *max_orig_node) | ||
421 | { | ||
422 | bool ret = false; | ||
423 | int j; | ||
424 | |||
425 | /* Check if this node has already been selected... */ | ||
426 | for (j = 0; j < select; j++) | ||
427 | if (res[j].orig_node == candidate) | ||
428 | break; | ||
429 | /* ..and possibly skip it */ | ||
430 | if (j < select) | ||
431 | goto out; | ||
432 | /* sanity check: has it already been selected? This should not happen */ | ||
433 | if (tmp_max > last_max) | ||
434 | goto out; | ||
435 | /* check if during this iteration an originator with a closer dht | ||
436 | * address has already been found | ||
437 | */ | ||
438 | if (tmp_max < max) | ||
439 | goto out; | ||
440 | /* this is an hash collision with the temporary selected node. Choose | ||
441 | * the one with the lowest address | ||
442 | */ | ||
443 | if ((tmp_max == max) && | ||
444 | (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) | ||
445 | goto out; | ||
446 | |||
447 | ret = true; | ||
448 | out: | ||
449 | return ret; | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * batadv_choose_next_candidate - select the next DHT candidate | ||
454 | * @bat_priv: the bat priv with all the soft interface information | ||
455 | * @cands: candidates array | ||
456 | * @select: number of candidates already present in the array | ||
457 | * @ip_key: key to look up in the DHT | ||
458 | * @last_max: pointer where the address of the selected candidate will be saved | ||
459 | */ | ||
460 | static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, | ||
461 | struct batadv_dat_candidate *cands, | ||
462 | int select, batadv_dat_addr_t ip_key, | ||
463 | batadv_dat_addr_t *last_max) | ||
464 | { | ||
465 | batadv_dat_addr_t max = 0, tmp_max = 0; | ||
466 | struct batadv_orig_node *orig_node, *max_orig_node = NULL; | ||
467 | struct batadv_hashtable *hash = bat_priv->orig_hash; | ||
468 | struct hlist_node *node; | ||
469 | struct hlist_head *head; | ||
470 | int i; | ||
471 | |||
472 | /* if no node is eligible as candidate, leave the candidate type as | ||
473 | * NOT_FOUND | ||
474 | */ | ||
475 | cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND; | ||
476 | |||
477 | /* iterate over the originator list and find the node with closest | ||
478 | * dat_address which has not been selected yet | ||
479 | */ | ||
480 | for (i = 0; i < hash->size; i++) { | ||
481 | head = &hash->table[i]; | ||
482 | |||
483 | rcu_read_lock(); | ||
484 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | ||
485 | /* the dht space is a ring and addresses are unsigned */ | ||
486 | tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr + | ||
487 | ip_key; | ||
488 | |||
489 | if (!batadv_is_orig_node_eligible(cands, select, | ||
490 | tmp_max, max, | ||
491 | *last_max, orig_node, | ||
492 | max_orig_node)) | ||
493 | continue; | ||
494 | |||
495 | if (!atomic_inc_not_zero(&orig_node->refcount)) | ||
496 | continue; | ||
497 | |||
498 | max = tmp_max; | ||
499 | if (max_orig_node) | ||
500 | batadv_orig_node_free_ref(max_orig_node); | ||
501 | max_orig_node = orig_node; | ||
502 | } | ||
503 | rcu_read_unlock(); | ||
504 | } | ||
505 | if (max_orig_node) { | ||
506 | cands[select].type = BATADV_DAT_CANDIDATE_ORIG; | ||
507 | cands[select].orig_node = max_orig_node; | ||
508 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
509 | "dat_select_candidates() %d: selected %pM addr=%u dist=%u\n", | ||
510 | select, max_orig_node->orig, max_orig_node->dat_addr, | ||
511 | max); | ||
512 | } | ||
513 | *last_max = max; | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * batadv_dat_select_candidates - selects the nodes which the DHT message has to | ||
518 | * be sent to | ||
519 | * @bat_priv: the bat priv with all the soft interface information | ||
520 | * @ip_dst: ipv4 to look up in the DHT | ||
521 | * | ||
522 | * An originator O is selected if and only if its DHT_ID value is one of three | ||
523 | * closest values (from the LEFT, with wrap around if needed) then the hash | ||
524 | * value of the key. ip_dst is the key. | ||
525 | * | ||
526 | * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM | ||
527 | */ | ||
528 | static struct batadv_dat_candidate * | ||
529 | batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) | ||
530 | { | ||
531 | int select; | ||
532 | batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; | ||
533 | struct batadv_dat_candidate *res; | ||
534 | |||
535 | if (!bat_priv->orig_hash) | ||
536 | return NULL; | ||
537 | |||
538 | res = kmalloc(BATADV_DAT_CANDIDATES_NUM * sizeof(*res), GFP_ATOMIC); | ||
539 | if (!res) | ||
540 | return NULL; | ||
541 | |||
542 | ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst, | ||
543 | BATADV_DAT_ADDR_MAX); | ||
544 | |||
545 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
546 | "dat_select_candidates(): IP=%pI4 hash(IP)=%u\n", &ip_dst, | ||
547 | ip_key); | ||
548 | |||
549 | for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++) | ||
550 | batadv_choose_next_candidate(bat_priv, res, select, ip_key, | ||
551 | &last_max); | ||
552 | |||
553 | return res; | ||
554 | } | ||
555 | |||
556 | /** | ||
557 | * batadv_dat_send_data - send a payload to the selected candidates | ||
558 | * @bat_priv: the bat priv with all the soft interface information | ||
559 | * @skb: payload to send | ||
560 | * @ip: the DHT key | ||
561 | * @packet_subtype: unicast4addr packet subtype to use | ||
562 | * | ||
563 | * In this function the skb is copied by means of pskb_copy() and is sent as | ||
564 | * unicast packet to each of the selected candidates | ||
565 | * | ||
566 | * Returns true if the packet is sent to at least one candidate, false otherwise | ||
567 | */ | ||
568 | static bool batadv_dat_send_data(struct batadv_priv *bat_priv, | ||
569 | struct sk_buff *skb, __be32 ip, | ||
570 | int packet_subtype) | ||
571 | { | ||
572 | int i; | ||
573 | bool ret = false; | ||
574 | int send_status; | ||
575 | struct batadv_neigh_node *neigh_node = NULL; | ||
576 | struct sk_buff *tmp_skb; | ||
577 | struct batadv_dat_candidate *cand; | ||
578 | |||
579 | cand = batadv_dat_select_candidates(bat_priv, ip); | ||
580 | if (!cand) | ||
581 | goto out; | ||
582 | |||
583 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip); | ||
584 | |||
585 | for (i = 0; i < BATADV_DAT_CANDIDATES_NUM; i++) { | ||
586 | if (cand[i].type == BATADV_DAT_CANDIDATE_NOT_FOUND) | ||
587 | continue; | ||
588 | |||
589 | neigh_node = batadv_orig_node_get_router(cand[i].orig_node); | ||
590 | if (!neigh_node) | ||
591 | goto free_orig; | ||
592 | |||
593 | tmp_skb = pskb_copy(skb, GFP_ATOMIC); | ||
594 | if (!batadv_unicast_4addr_prepare_skb(bat_priv, tmp_skb, | ||
595 | cand[i].orig_node, | ||
596 | packet_subtype)) { | ||
597 | kfree_skb(tmp_skb); | ||
598 | goto free_neigh; | ||
599 | } | ||
600 | |||
601 | send_status = batadv_send_skb_packet(tmp_skb, | ||
602 | neigh_node->if_incoming, | ||
603 | neigh_node->addr); | ||
604 | if (send_status == NET_XMIT_SUCCESS) { | ||
605 | /* count the sent packet */ | ||
606 | switch (packet_subtype) { | ||
607 | case BATADV_P_DAT_DHT_GET: | ||
608 | batadv_inc_counter(bat_priv, | ||
609 | BATADV_CNT_DAT_GET_TX); | ||
610 | break; | ||
611 | case BATADV_P_DAT_DHT_PUT: | ||
612 | batadv_inc_counter(bat_priv, | ||
613 | BATADV_CNT_DAT_PUT_TX); | ||
614 | break; | ||
615 | } | ||
616 | |||
617 | /* packet sent to a candidate: return true */ | ||
618 | ret = true; | ||
619 | } | ||
620 | free_neigh: | ||
621 | batadv_neigh_node_free_ref(neigh_node); | ||
622 | free_orig: | ||
623 | batadv_orig_node_free_ref(cand[i].orig_node); | ||
624 | } | ||
625 | |||
626 | out: | ||
627 | kfree(cand); | ||
628 | return ret; | ||
629 | } | ||
630 | |||
631 | /** | ||
632 | * batadv_dat_hash_free - free the local DAT hash table | ||
633 | * @bat_priv: the bat priv with all the soft interface information | ||
634 | */ | ||
635 | static void batadv_dat_hash_free(struct batadv_priv *bat_priv) | ||
636 | { | ||
637 | if (!bat_priv->dat.hash) | ||
638 | return; | ||
639 | |||
640 | __batadv_dat_purge(bat_priv, NULL); | ||
641 | |||
642 | batadv_hash_destroy(bat_priv->dat.hash); | ||
643 | |||
644 | bat_priv->dat.hash = NULL; | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * batadv_dat_init - initialise the DAT internals | ||
649 | * @bat_priv: the bat priv with all the soft interface information | ||
650 | */ | ||
651 | int batadv_dat_init(struct batadv_priv *bat_priv) | ||
652 | { | ||
653 | if (bat_priv->dat.hash) | ||
654 | return 0; | ||
655 | |||
656 | bat_priv->dat.hash = batadv_hash_new(1024); | ||
657 | |||
658 | if (!bat_priv->dat.hash) | ||
659 | return -ENOMEM; | ||
660 | |||
661 | batadv_dat_start_timer(bat_priv); | ||
662 | |||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | /** | ||
667 | * batadv_dat_free - free the DAT internals | ||
668 | * @bat_priv: the bat priv with all the soft interface information | ||
669 | */ | ||
670 | void batadv_dat_free(struct batadv_priv *bat_priv) | ||
671 | { | ||
672 | cancel_delayed_work_sync(&bat_priv->dat.work); | ||
673 | |||
674 | batadv_dat_hash_free(bat_priv); | ||
675 | } | ||
676 | |||
677 | /** | ||
678 | * batadv_dat_cache_seq_print_text - print the local DAT hash table | ||
679 | * @seq: seq file to print on | ||
680 | * @offset: not used | ||
681 | */ | ||
682 | int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) | ||
683 | { | ||
684 | struct net_device *net_dev = (struct net_device *)seq->private; | ||
685 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | ||
686 | struct batadv_hashtable *hash = bat_priv->dat.hash; | ||
687 | struct batadv_dat_entry *dat_entry; | ||
688 | struct batadv_hard_iface *primary_if; | ||
689 | struct hlist_node *node; | ||
690 | struct hlist_head *head; | ||
691 | unsigned long last_seen_jiffies; | ||
692 | int last_seen_msecs, last_seen_secs, last_seen_mins; | ||
693 | uint32_t i; | ||
694 | |||
695 | primary_if = batadv_seq_print_text_primary_if_get(seq); | ||
696 | if (!primary_if) | ||
697 | goto out; | ||
698 | |||
699 | seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name); | ||
700 | seq_printf(seq, " %-7s %-13s %5s\n", "IPv4", "MAC", | ||
701 | "last-seen"); | ||
702 | |||
703 | for (i = 0; i < hash->size; i++) { | ||
704 | head = &hash->table[i]; | ||
705 | |||
706 | rcu_read_lock(); | ||
707 | hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) { | ||
708 | last_seen_jiffies = jiffies - dat_entry->last_update; | ||
709 | last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); | ||
710 | last_seen_mins = last_seen_msecs / 60000; | ||
711 | last_seen_msecs = last_seen_msecs % 60000; | ||
712 | last_seen_secs = last_seen_msecs / 1000; | ||
713 | |||
714 | seq_printf(seq, " * %15pI4 %14pM %6i:%02i\n", | ||
715 | &dat_entry->ip, dat_entry->mac_addr, | ||
716 | last_seen_mins, last_seen_secs); | ||
717 | } | ||
718 | rcu_read_unlock(); | ||
719 | } | ||
720 | |||
721 | out: | ||
722 | if (primary_if) | ||
723 | batadv_hardif_free_ref(primary_if); | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | /** | ||
728 | * batadv_arp_get_type - parse an ARP packet and gets the type | ||
729 | * @bat_priv: the bat priv with all the soft interface information | ||
730 | * @skb: packet to analyse | ||
731 | * @hdr_size: size of the possible header before the ARP packet in the skb | ||
732 | * | ||
733 | * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise | ||
734 | */ | ||
735 | static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv, | ||
736 | struct sk_buff *skb, int hdr_size) | ||
737 | { | ||
738 | struct arphdr *arphdr; | ||
739 | struct ethhdr *ethhdr; | ||
740 | __be32 ip_src, ip_dst; | ||
741 | uint16_t type = 0; | ||
742 | |||
743 | /* pull the ethernet header */ | ||
744 | if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN))) | ||
745 | goto out; | ||
746 | |||
747 | ethhdr = (struct ethhdr *)(skb->data + hdr_size); | ||
748 | |||
749 | if (ethhdr->h_proto != htons(ETH_P_ARP)) | ||
750 | goto out; | ||
751 | |||
752 | /* pull the ARP payload */ | ||
753 | if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN + | ||
754 | arp_hdr_len(skb->dev)))) | ||
755 | goto out; | ||
756 | |||
757 | arphdr = (struct arphdr *)(skb->data + hdr_size + ETH_HLEN); | ||
758 | |||
759 | /* Check whether the ARP packet carries a valid | ||
760 | * IP information | ||
761 | */ | ||
762 | if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) | ||
763 | goto out; | ||
764 | |||
765 | if (arphdr->ar_pro != htons(ETH_P_IP)) | ||
766 | goto out; | ||
767 | |||
768 | if (arphdr->ar_hln != ETH_ALEN) | ||
769 | goto out; | ||
770 | |||
771 | if (arphdr->ar_pln != 4) | ||
772 | goto out; | ||
773 | |||
774 | /* Check for bad reply/request. If the ARP message is not sane, DAT | ||
775 | * will simply ignore it | ||
776 | */ | ||
777 | ip_src = batadv_arp_ip_src(skb, hdr_size); | ||
778 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); | ||
779 | if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) || | ||
780 | ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst)) | ||
781 | goto out; | ||
782 | |||
783 | type = ntohs(arphdr->ar_op); | ||
784 | out: | ||
785 | return type; | ||
786 | } | ||
787 | |||
788 | /** | ||
789 | * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to | ||
790 | * answer using DAT | ||
791 | * @bat_priv: the bat priv with all the soft interface information | ||
792 | * @skb: packet to check | ||
793 | * | ||
794 | * Returns true if the message has been sent to the dht candidates, false | ||
795 | * otherwise. In case of true the message has to be enqueued to permit the | ||
796 | * fallback | ||
797 | */ | ||
798 | bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, | ||
799 | struct sk_buff *skb) | ||
800 | { | ||
801 | uint16_t type = 0; | ||
802 | __be32 ip_dst, ip_src; | ||
803 | uint8_t *hw_src; | ||
804 | bool ret = false; | ||
805 | struct batadv_dat_entry *dat_entry = NULL; | ||
806 | struct sk_buff *skb_new; | ||
807 | struct batadv_hard_iface *primary_if = NULL; | ||
808 | |||
809 | if (!atomic_read(&bat_priv->distributed_arp_table)) | ||
810 | goto out; | ||
811 | |||
812 | type = batadv_arp_get_type(bat_priv, skb, 0); | ||
813 | /* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast | ||
814 | * message to the selected DHT candidates | ||
815 | */ | ||
816 | if (type != ARPOP_REQUEST) | ||
817 | goto out; | ||
818 | |||
819 | batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REQUEST"); | ||
820 | |||
821 | ip_src = batadv_arp_ip_src(skb, 0); | ||
822 | hw_src = batadv_arp_hw_src(skb, 0); | ||
823 | ip_dst = batadv_arp_ip_dst(skb, 0); | ||
824 | |||
825 | batadv_dat_entry_add(bat_priv, ip_src, hw_src); | ||
826 | |||
827 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst); | ||
828 | if (dat_entry) { | ||
829 | primary_if = batadv_primary_if_get_selected(bat_priv); | ||
830 | if (!primary_if) | ||
831 | goto out; | ||
832 | |||
833 | skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, | ||
834 | primary_if->soft_iface, ip_dst, hw_src, | ||
835 | dat_entry->mac_addr, hw_src); | ||
836 | if (!skb_new) | ||
837 | goto out; | ||
838 | |||
839 | skb_reset_mac_header(skb_new); | ||
840 | skb_new->protocol = eth_type_trans(skb_new, | ||
841 | primary_if->soft_iface); | ||
842 | bat_priv->stats.rx_packets++; | ||
843 | bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; | ||
844 | primary_if->soft_iface->last_rx = jiffies; | ||
845 | |||
846 | netif_rx(skb_new); | ||
847 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); | ||
848 | ret = true; | ||
849 | } else { | ||
850 | /* Send the request on the DHT */ | ||
851 | ret = batadv_dat_send_data(bat_priv, skb, ip_dst, | ||
852 | BATADV_P_DAT_DHT_GET); | ||
853 | } | ||
854 | out: | ||
855 | if (dat_entry) | ||
856 | batadv_dat_entry_free_ref(dat_entry); | ||
857 | if (primary_if) | ||
858 | batadv_hardif_free_ref(primary_if); | ||
859 | return ret; | ||
860 | } | ||
861 | |||
862 | /** | ||
863 | * batadv_dat_snoop_incoming_arp_request - snoop the ARP request and try to | ||
864 | * answer using the local DAT storage | ||
865 | * @bat_priv: the bat priv with all the soft interface information | ||
866 | * @skb: packet to check | ||
867 | * @hdr_size: size of the encapsulation header | ||
868 | * | ||
869 | * Returns true if the request has been answered, false otherwise | ||
870 | */ | ||
871 | bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv, | ||
872 | struct sk_buff *skb, int hdr_size) | ||
873 | { | ||
874 | uint16_t type; | ||
875 | __be32 ip_src, ip_dst; | ||
876 | uint8_t *hw_src; | ||
877 | struct sk_buff *skb_new; | ||
878 | struct batadv_hard_iface *primary_if = NULL; | ||
879 | struct batadv_dat_entry *dat_entry = NULL; | ||
880 | bool ret = false; | ||
881 | int err; | ||
882 | |||
883 | if (!atomic_read(&bat_priv->distributed_arp_table)) | ||
884 | goto out; | ||
885 | |||
886 | type = batadv_arp_get_type(bat_priv, skb, hdr_size); | ||
887 | if (type != ARPOP_REQUEST) | ||
888 | goto out; | ||
889 | |||
890 | hw_src = batadv_arp_hw_src(skb, hdr_size); | ||
891 | ip_src = batadv_arp_ip_src(skb, hdr_size); | ||
892 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); | ||
893 | |||
894 | batadv_dbg_arp(bat_priv, skb, type, hdr_size, | ||
895 | "Parsing incoming ARP REQUEST"); | ||
896 | |||
897 | batadv_dat_entry_add(bat_priv, ip_src, hw_src); | ||
898 | |||
899 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst); | ||
900 | if (!dat_entry) | ||
901 | goto out; | ||
902 | |||
903 | primary_if = batadv_primary_if_get_selected(bat_priv); | ||
904 | if (!primary_if) | ||
905 | goto out; | ||
906 | |||
907 | skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, | ||
908 | primary_if->soft_iface, ip_dst, hw_src, | ||
909 | dat_entry->mac_addr, hw_src); | ||
910 | |||
911 | if (!skb_new) | ||
912 | goto out; | ||
913 | |||
914 | /* to preserve backwards compatibility, here the node has to answer | ||
915 | * using the same packet type it received for the request. This is due | ||
916 | * to that if a node is not using the 4addr packet format it may not | ||
917 | * support it. | ||
918 | */ | ||
919 | if (hdr_size == sizeof(struct batadv_unicast_4addr_packet)) | ||
920 | err = batadv_unicast_4addr_send_skb(bat_priv, skb_new, | ||
921 | BATADV_P_DAT_CACHE_REPLY); | ||
922 | else | ||
923 | err = batadv_unicast_send_skb(bat_priv, skb_new); | ||
924 | |||
925 | if (!err) { | ||
926 | batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX); | ||
927 | ret = true; | ||
928 | } | ||
929 | out: | ||
930 | if (dat_entry) | ||
931 | batadv_dat_entry_free_ref(dat_entry); | ||
932 | if (primary_if) | ||
933 | batadv_hardif_free_ref(primary_if); | ||
934 | if (ret) | ||
935 | kfree_skb(skb); | ||
936 | return ret; | ||
937 | } | ||
938 | |||
939 | /** | ||
940 | * batadv_dat_snoop_outgoing_arp_reply - snoop the ARP reply and fill the DHT | ||
941 | * @bat_priv: the bat priv with all the soft interface information | ||
942 | * @skb: packet to check | ||
943 | */ | ||
944 | void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, | ||
945 | struct sk_buff *skb) | ||
946 | { | ||
947 | uint16_t type; | ||
948 | __be32 ip_src, ip_dst; | ||
949 | uint8_t *hw_src, *hw_dst; | ||
950 | |||
951 | if (!atomic_read(&bat_priv->distributed_arp_table)) | ||
952 | return; | ||
953 | |||
954 | type = batadv_arp_get_type(bat_priv, skb, 0); | ||
955 | if (type != ARPOP_REPLY) | ||
956 | return; | ||
957 | |||
958 | batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REPLY"); | ||
959 | |||
960 | hw_src = batadv_arp_hw_src(skb, 0); | ||
961 | ip_src = batadv_arp_ip_src(skb, 0); | ||
962 | hw_dst = batadv_arp_hw_dst(skb, 0); | ||
963 | ip_dst = batadv_arp_ip_dst(skb, 0); | ||
964 | |||
965 | batadv_dat_entry_add(bat_priv, ip_src, hw_src); | ||
966 | batadv_dat_entry_add(bat_priv, ip_dst, hw_dst); | ||
967 | |||
968 | /* Send the ARP reply to the candidates for both the IP addresses that | ||
969 | * the node got within the ARP reply | ||
970 | */ | ||
971 | batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT); | ||
972 | batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT); | ||
973 | } | ||
974 | /** | ||
975 | * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local | ||
976 | * DAT storage only | ||
977 | * @bat_priv: the bat priv with all the soft interface information | ||
978 | * @skb: packet to check | ||
979 | * @hdr_size: siaze of the encapsulation header | ||
980 | */ | ||
981 | bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, | ||
982 | struct sk_buff *skb, int hdr_size) | ||
983 | { | ||
984 | uint16_t type; | ||
985 | __be32 ip_src, ip_dst; | ||
986 | uint8_t *hw_src, *hw_dst; | ||
987 | bool ret = false; | ||
988 | |||
989 | if (!atomic_read(&bat_priv->distributed_arp_table)) | ||
990 | goto out; | ||
991 | |||
992 | type = batadv_arp_get_type(bat_priv, skb, hdr_size); | ||
993 | if (type != ARPOP_REPLY) | ||
994 | goto out; | ||
995 | |||
996 | batadv_dbg_arp(bat_priv, skb, type, hdr_size, | ||
997 | "Parsing incoming ARP REPLY"); | ||
998 | |||
999 | hw_src = batadv_arp_hw_src(skb, hdr_size); | ||
1000 | ip_src = batadv_arp_ip_src(skb, hdr_size); | ||
1001 | hw_dst = batadv_arp_hw_dst(skb, hdr_size); | ||
1002 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); | ||
1003 | |||
1004 | /* Update our internal cache with both the IP addresses the node got | ||
1005 | * within the ARP reply | ||
1006 | */ | ||
1007 | batadv_dat_entry_add(bat_priv, ip_src, hw_src); | ||
1008 | batadv_dat_entry_add(bat_priv, ip_dst, hw_dst); | ||
1009 | |||
1010 | /* if this REPLY is directed to a client of mine, let's deliver the | ||
1011 | * packet to the interface | ||
1012 | */ | ||
1013 | ret = !batadv_is_my_client(bat_priv, hw_dst); | ||
1014 | out: | ||
1015 | /* if ret == false -> packet has to be delivered to the interface */ | ||
1016 | return ret; | ||
1017 | } | ||
1018 | |||
1019 | /** | ||
1020 | * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped | ||
1021 | * (because the node has already got the reply via DAT) or not | ||
1022 | * @bat_priv: the bat priv with all the soft interface information | ||
1023 | * @forw_packet: the broadcast packet | ||
1024 | * | ||
1025 | * Returns true if the node can drop the packet, false otherwise | ||
1026 | */ | ||
1027 | bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv, | ||
1028 | struct batadv_forw_packet *forw_packet) | ||
1029 | { | ||
1030 | uint16_t type; | ||
1031 | __be32 ip_dst; | ||
1032 | struct batadv_dat_entry *dat_entry = NULL; | ||
1033 | bool ret = false; | ||
1034 | const size_t bcast_len = sizeof(struct batadv_bcast_packet); | ||
1035 | |||
1036 | if (!atomic_read(&bat_priv->distributed_arp_table)) | ||
1037 | goto out; | ||
1038 | |||
1039 | /* If this packet is an ARP_REQUEST and the node already has the | ||
1040 | * information that it is going to ask, then the packet can be dropped | ||
1041 | */ | ||
1042 | if (forw_packet->num_packets) | ||
1043 | goto out; | ||
1044 | |||
1045 | type = batadv_arp_get_type(bat_priv, forw_packet->skb, bcast_len); | ||
1046 | if (type != ARPOP_REQUEST) | ||
1047 | goto out; | ||
1048 | |||
1049 | ip_dst = batadv_arp_ip_dst(forw_packet->skb, bcast_len); | ||
1050 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst); | ||
1051 | /* check if the node already got this entry */ | ||
1052 | if (!dat_entry) { | ||
1053 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
1054 | "ARP Request for %pI4: fallback\n", &ip_dst); | ||
1055 | goto out; | ||
1056 | } | ||
1057 | |||
1058 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | ||
1059 | "ARP Request for %pI4: fallback prevented\n", &ip_dst); | ||
1060 | ret = true; | ||
1061 | |||
1062 | out: | ||
1063 | if (dat_entry) | ||
1064 | batadv_dat_entry_free_ref(dat_entry); | ||
1065 | return ret; | ||
1066 | } | ||
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h new file mode 100644 index 000000000000..d060c033e7de --- /dev/null +++ b/net/batman-adv/distributed-arp-table.h | |||
@@ -0,0 +1,167 @@ | |||
1 | /* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: | ||
2 | * | ||
3 | * Antonio Quartulli | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of version 2 of the GNU General Public | ||
7 | * License as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
17 | * 02110-1301, USA | ||
18 | */ | ||
19 | |||
20 | #ifndef _NET_BATMAN_ADV_ARP_H_ | ||
21 | #define _NET_BATMAN_ADV_ARP_H_ | ||
22 | |||
23 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
24 | |||
25 | #include "types.h" | ||
26 | #include "originator.h" | ||
27 | |||
28 | #include <linux/if_arp.h> | ||
29 | |||
30 | #define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0) | ||
31 | |||
32 | bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, | ||
33 | struct sk_buff *skb); | ||
34 | bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv, | ||
35 | struct sk_buff *skb, int hdr_size); | ||
36 | void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, | ||
37 | struct sk_buff *skb); | ||
38 | bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, | ||
39 | struct sk_buff *skb, int hdr_size); | ||
40 | bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv, | ||
41 | struct batadv_forw_packet *forw_packet); | ||
42 | |||
43 | /** | ||
44 | * batadv_dat_init_orig_node_addr - assign a DAT address to the orig_node | ||
45 | * @orig_node: the node to assign the DAT address to | ||
46 | */ | ||
47 | static inline void | ||
48 | batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node) | ||
49 | { | ||
50 | uint32_t addr; | ||
51 | |||
52 | addr = batadv_choose_orig(orig_node->orig, BATADV_DAT_ADDR_MAX); | ||
53 | orig_node->dat_addr = (batadv_dat_addr_t)addr; | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * batadv_dat_init_own_addr - assign a DAT address to the node itself | ||
58 | * @bat_priv: the bat priv with all the soft interface information | ||
59 | * @primary_if: a pointer to the primary interface | ||
60 | */ | ||
61 | static inline void | ||
62 | batadv_dat_init_own_addr(struct batadv_priv *bat_priv, | ||
63 | struct batadv_hard_iface *primary_if) | ||
64 | { | ||
65 | uint32_t addr; | ||
66 | |||
67 | addr = batadv_choose_orig(primary_if->net_dev->dev_addr, | ||
68 | BATADV_DAT_ADDR_MAX); | ||
69 | |||
70 | bat_priv->dat.addr = (batadv_dat_addr_t)addr; | ||
71 | } | ||
72 | |||
73 | int batadv_dat_init(struct batadv_priv *bat_priv); | ||
74 | void batadv_dat_free(struct batadv_priv *bat_priv); | ||
75 | int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset); | ||
76 | |||
77 | /** | ||
78 | * batadv_dat_inc_counter - increment the correct DAT packet counter | ||
79 | * @bat_priv: the bat priv with all the soft interface information | ||
80 | * @subtype: the 4addr subtype of the packet to be counted | ||
81 | * | ||
82 | * Updates the ethtool statistics for the received packet if it is a DAT subtype | ||
83 | */ | ||
84 | static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv, | ||
85 | uint8_t subtype) | ||
86 | { | ||
87 | switch (subtype) { | ||
88 | case BATADV_P_DAT_DHT_GET: | ||
89 | batadv_inc_counter(bat_priv, | ||
90 | BATADV_CNT_DAT_GET_RX); | ||
91 | break; | ||
92 | case BATADV_P_DAT_DHT_PUT: | ||
93 | batadv_inc_counter(bat_priv, | ||
94 | BATADV_CNT_DAT_PUT_RX); | ||
95 | break; | ||
96 | } | ||
97 | } | ||
98 | |||
99 | #else | ||
100 | |||
101 | static inline bool | ||
102 | batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, | ||
103 | struct sk_buff *skb) | ||
104 | { | ||
105 | return false; | ||
106 | } | ||
107 | |||
108 | static inline bool | ||
109 | batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv, | ||
110 | struct sk_buff *skb, int hdr_size) | ||
111 | { | ||
112 | return false; | ||
113 | } | ||
114 | |||
115 | static inline bool | ||
116 | batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, | ||
117 | struct sk_buff *skb) | ||
118 | { | ||
119 | return false; | ||
120 | } | ||
121 | |||
122 | static inline bool | ||
123 | batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, | ||
124 | struct sk_buff *skb, int hdr_size) | ||
125 | { | ||
126 | return false; | ||
127 | } | ||
128 | |||
129 | static inline bool | ||
130 | batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv, | ||
131 | struct batadv_forw_packet *forw_packet) | ||
132 | { | ||
133 | return false; | ||
134 | } | ||
135 | |||
136 | static inline void | ||
137 | batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node) | ||
138 | { | ||
139 | } | ||
140 | |||
141 | static inline void batadv_dat_init_own_addr(struct batadv_priv *bat_priv, | ||
142 | struct batadv_hard_iface *iface) | ||
143 | { | ||
144 | } | ||
145 | |||
146 | static inline void batadv_arp_change_timeout(struct net_device *soft_iface, | ||
147 | const char *name) | ||
148 | { | ||
149 | } | ||
150 | |||
151 | static inline int batadv_dat_init(struct batadv_priv *bat_priv) | ||
152 | { | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static inline void batadv_dat_free(struct batadv_priv *bat_priv) | ||
157 | { | ||
158 | } | ||
159 | |||
160 | static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv, | ||
161 | uint8_t subtype) | ||
162 | { | ||
163 | } | ||
164 | |||
165 | #endif /* CONFIG_BATMAN_ADV_DAT */ | ||
166 | |||
167 | #endif /* _NET_BATMAN_ADV_ARP_H_ */ | ||
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 15d67abc10a4..dd07c7e3654f 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -477,22 +477,11 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) | |||
477 | struct batadv_hard_iface *primary_if; | 477 | struct batadv_hard_iface *primary_if; |
478 | struct batadv_gw_node *gw_node; | 478 | struct batadv_gw_node *gw_node; |
479 | struct hlist_node *node; | 479 | struct hlist_node *node; |
480 | int gw_count = 0, ret = 0; | 480 | int gw_count = 0; |
481 | 481 | ||
482 | primary_if = batadv_primary_if_get_selected(bat_priv); | 482 | primary_if = batadv_seq_print_text_primary_if_get(seq); |
483 | if (!primary_if) { | 483 | if (!primary_if) |
484 | ret = seq_printf(seq, | ||
485 | "BATMAN mesh %s disabled - please specify interfaces to enable it\n", | ||
486 | net_dev->name); | ||
487 | goto out; | 484 | goto out; |
488 | } | ||
489 | |||
490 | if (primary_if->if_status != BATADV_IF_ACTIVE) { | ||
491 | ret = seq_printf(seq, | ||
492 | "BATMAN mesh %s disabled - primary interface not active\n", | ||
493 | net_dev->name); | ||
494 | goto out; | ||
495 | } | ||
496 | 485 | ||
497 | seq_printf(seq, | 486 | seq_printf(seq, |
498 | " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", | 487 | " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", |
@@ -519,7 +508,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) | |||
519 | out: | 508 | out: |
520 | if (primary_if) | 509 | if (primary_if) |
521 | batadv_hardif_free_ref(primary_if); | 510 | batadv_hardif_free_ref(primary_if); |
522 | return ret; | 511 | return 0; |
523 | } | 512 | } |
524 | 513 | ||
525 | static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) | 514 | static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index d112fd6750b0..365ed74f3946 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include "main.h" | 20 | #include "main.h" |
21 | #include "distributed-arp-table.h" | ||
21 | #include "hard-interface.h" | 22 | #include "hard-interface.h" |
22 | #include "soft-interface.h" | 23 | #include "soft-interface.h" |
23 | #include "send.h" | 24 | #include "send.h" |
@@ -58,6 +59,45 @@ out: | |||
58 | return hard_iface; | 59 | return hard_iface; |
59 | } | 60 | } |
60 | 61 | ||
62 | /** | ||
63 | * batadv_is_on_batman_iface - check if a device is a batman iface descendant | ||
64 | * @net_dev: the device to check | ||
65 | * | ||
66 | * If the user creates any virtual device on top of a batman-adv interface, it | ||
67 | * is important to prevent this new interface to be used to create a new mesh | ||
68 | * network (this behaviour would lead to a batman-over-batman configuration). | ||
69 | * This function recursively checks all the fathers of the device passed as | ||
70 | * argument looking for a batman-adv soft interface. | ||
71 | * | ||
72 | * Returns true if the device is descendant of a batman-adv mesh interface (or | ||
73 | * if it is a batman-adv interface itself), false otherwise | ||
74 | */ | ||
75 | static bool batadv_is_on_batman_iface(const struct net_device *net_dev) | ||
76 | { | ||
77 | struct net_device *parent_dev; | ||
78 | bool ret; | ||
79 | |||
80 | /* check if this is a batman-adv mesh interface */ | ||
81 | if (batadv_softif_is_valid(net_dev)) | ||
82 | return true; | ||
83 | |||
84 | /* no more parents..stop recursion */ | ||
85 | if (net_dev->iflink == net_dev->ifindex) | ||
86 | return false; | ||
87 | |||
88 | /* recurse over the parent device */ | ||
89 | parent_dev = dev_get_by_index(&init_net, net_dev->iflink); | ||
90 | /* if we got a NULL parent_dev there is something broken.. */ | ||
91 | if (WARN(!parent_dev, "Cannot find parent device")) | ||
92 | return false; | ||
93 | |||
94 | ret = batadv_is_on_batman_iface(parent_dev); | ||
95 | |||
96 | if (parent_dev) | ||
97 | dev_put(parent_dev); | ||
98 | return ret; | ||
99 | } | ||
100 | |||
61 | static int batadv_is_valid_iface(const struct net_device *net_dev) | 101 | static int batadv_is_valid_iface(const struct net_device *net_dev) |
62 | { | 102 | { |
63 | if (net_dev->flags & IFF_LOOPBACK) | 103 | if (net_dev->flags & IFF_LOOPBACK) |
@@ -70,7 +110,7 @@ static int batadv_is_valid_iface(const struct net_device *net_dev) | |||
70 | return 0; | 110 | return 0; |
71 | 111 | ||
72 | /* no batman over batman */ | 112 | /* no batman over batman */ |
73 | if (batadv_softif_is_valid(net_dev)) | 113 | if (batadv_is_on_batman_iface(net_dev)) |
74 | return 0; | 114 | return 0; |
75 | 115 | ||
76 | return 1; | 116 | return 1; |
@@ -109,6 +149,8 @@ static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv, | |||
109 | if (!primary_if) | 149 | if (!primary_if) |
110 | goto out; | 150 | goto out; |
111 | 151 | ||
152 | batadv_dat_init_own_addr(bat_priv, primary_if); | ||
153 | |||
112 | skb = bat_priv->vis.my_info->skb_packet; | 154 | skb = bat_priv->vis.my_info->skb_packet; |
113 | vis_packet = (struct batadv_vis_packet *)skb->data; | 155 | vis_packet = (struct batadv_vis_packet *)skb->data; |
114 | memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); | 156 | memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); |
@@ -450,8 +492,8 @@ batadv_hardif_add_interface(struct net_device *net_dev) | |||
450 | /* This can't be called via a bat_priv callback because | 492 | /* This can't be called via a bat_priv callback because |
451 | * we have no bat_priv yet. | 493 | * we have no bat_priv yet. |
452 | */ | 494 | */ |
453 | atomic_set(&hard_iface->seqno, 1); | 495 | atomic_set(&hard_iface->bat_iv.ogm_seqno, 1); |
454 | hard_iface->packet_buff = NULL; | 496 | hard_iface->bat_iv.ogm_buff = NULL; |
455 | 497 | ||
456 | return hard_iface; | 498 | return hard_iface; |
457 | 499 | ||
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h index 977de9c75fc2..e05333905afd 100644 --- a/net/batman-adv/hash.h +++ b/net/batman-adv/hash.h | |||
@@ -82,6 +82,28 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash, | |||
82 | } | 82 | } |
83 | 83 | ||
84 | /** | 84 | /** |
85 | * batadv_hash_bytes - hash some bytes and add them to the previous hash | ||
86 | * @hash: previous hash value | ||
87 | * @data: data to be hashed | ||
88 | * @size: number of bytes to be hashed | ||
89 | * | ||
90 | * Returns the new hash value. | ||
91 | */ | ||
92 | static inline uint32_t batadv_hash_bytes(uint32_t hash, void *data, | ||
93 | uint32_t size) | ||
94 | { | ||
95 | const unsigned char *key = data; | ||
96 | int i; | ||
97 | |||
98 | for (i = 0; i < size; i++) { | ||
99 | hash += key[i]; | ||
100 | hash += (hash << 10); | ||
101 | hash ^= (hash >> 6); | ||
102 | } | ||
103 | return hash; | ||
104 | } | ||
105 | |||
106 | /** | ||
85 | * batadv_hash_add - adds data to the hashtable | 107 | * batadv_hash_add - adds data to the hashtable |
86 | * @hash: storage hash table | 108 | * @hash: storage hash table |
87 | * @compare: callback to determine if 2 hash elements are identical | 109 | * @compare: callback to determine if 2 hash elements are identical |
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index bde3cf747507..87ca8095b011 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c | |||
@@ -42,12 +42,16 @@ static int batadv_socket_open(struct inode *inode, struct file *file) | |||
42 | unsigned int i; | 42 | unsigned int i; |
43 | struct batadv_socket_client *socket_client; | 43 | struct batadv_socket_client *socket_client; |
44 | 44 | ||
45 | if (!try_module_get(THIS_MODULE)) | ||
46 | return -EBUSY; | ||
47 | |||
45 | nonseekable_open(inode, file); | 48 | nonseekable_open(inode, file); |
46 | 49 | ||
47 | socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL); | 50 | socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL); |
48 | 51 | if (!socket_client) { | |
49 | if (!socket_client) | 52 | module_put(THIS_MODULE); |
50 | return -ENOMEM; | 53 | return -ENOMEM; |
54 | } | ||
51 | 55 | ||
52 | for (i = 0; i < ARRAY_SIZE(batadv_socket_client_hash); i++) { | 56 | for (i = 0; i < ARRAY_SIZE(batadv_socket_client_hash); i++) { |
53 | if (!batadv_socket_client_hash[i]) { | 57 | if (!batadv_socket_client_hash[i]) { |
@@ -59,6 +63,7 @@ static int batadv_socket_open(struct inode *inode, struct file *file) | |||
59 | if (i == ARRAY_SIZE(batadv_socket_client_hash)) { | 63 | if (i == ARRAY_SIZE(batadv_socket_client_hash)) { |
60 | pr_err("Error - can't add another packet client: maximum number of clients reached\n"); | 64 | pr_err("Error - can't add another packet client: maximum number of clients reached\n"); |
61 | kfree(socket_client); | 65 | kfree(socket_client); |
66 | module_put(THIS_MODULE); | ||
62 | return -EXFULL; | 67 | return -EXFULL; |
63 | } | 68 | } |
64 | 69 | ||
@@ -71,7 +76,6 @@ static int batadv_socket_open(struct inode *inode, struct file *file) | |||
71 | 76 | ||
72 | file->private_data = socket_client; | 77 | file->private_data = socket_client; |
73 | 78 | ||
74 | batadv_inc_module_count(); | ||
75 | return 0; | 79 | return 0; |
76 | } | 80 | } |
77 | 81 | ||
@@ -96,7 +100,7 @@ static int batadv_socket_release(struct inode *inode, struct file *file) | |||
96 | spin_unlock_bh(&socket_client->lock); | 100 | spin_unlock_bh(&socket_client->lock); |
97 | 101 | ||
98 | kfree(socket_client); | 102 | kfree(socket_client); |
99 | batadv_dec_module_count(); | 103 | module_put(THIS_MODULE); |
100 | 104 | ||
101 | return 0; | 105 | return 0; |
102 | } | 106 | } |
@@ -173,13 +177,13 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, | |||
173 | if (len >= sizeof(struct batadv_icmp_packet_rr)) | 177 | if (len >= sizeof(struct batadv_icmp_packet_rr)) |
174 | packet_len = sizeof(struct batadv_icmp_packet_rr); | 178 | packet_len = sizeof(struct batadv_icmp_packet_rr); |
175 | 179 | ||
176 | skb = dev_alloc_skb(packet_len + ETH_HLEN); | 180 | skb = dev_alloc_skb(packet_len + ETH_HLEN + NET_IP_ALIGN); |
177 | if (!skb) { | 181 | if (!skb) { |
178 | len = -ENOMEM; | 182 | len = -ENOMEM; |
179 | goto out; | 183 | goto out; |
180 | } | 184 | } |
181 | 185 | ||
182 | skb_reserve(skb, ETH_HLEN); | 186 | skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN); |
183 | icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len); | 187 | icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len); |
184 | 188 | ||
185 | if (copy_from_user(icmp_packet, buff, packet_len)) { | 189 | if (copy_from_user(icmp_packet, buff, packet_len)) { |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index b4aa470bc4a6..f65a222b7b83 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
@@ -17,6 +17,8 @@ | |||
17 | * 02110-1301, USA | 17 | * 02110-1301, USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/crc32c.h> | ||
21 | #include <linux/highmem.h> | ||
20 | #include "main.h" | 22 | #include "main.h" |
21 | #include "sysfs.h" | 23 | #include "sysfs.h" |
22 | #include "debugfs.h" | 24 | #include "debugfs.h" |
@@ -29,6 +31,7 @@ | |||
29 | #include "hard-interface.h" | 31 | #include "hard-interface.h" |
30 | #include "gateway_client.h" | 32 | #include "gateway_client.h" |
31 | #include "bridge_loop_avoidance.h" | 33 | #include "bridge_loop_avoidance.h" |
34 | #include "distributed-arp-table.h" | ||
32 | #include "vis.h" | 35 | #include "vis.h" |
33 | #include "hash.h" | 36 | #include "hash.h" |
34 | #include "bat_algo.h" | 37 | #include "bat_algo.h" |
@@ -128,6 +131,10 @@ int batadv_mesh_init(struct net_device *soft_iface) | |||
128 | if (ret < 0) | 131 | if (ret < 0) |
129 | goto err; | 132 | goto err; |
130 | 133 | ||
134 | ret = batadv_dat_init(bat_priv); | ||
135 | if (ret < 0) | ||
136 | goto err; | ||
137 | |||
131 | atomic_set(&bat_priv->gw.reselect, 0); | 138 | atomic_set(&bat_priv->gw.reselect, 0); |
132 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); | 139 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); |
133 | 140 | ||
@@ -155,21 +162,13 @@ void batadv_mesh_free(struct net_device *soft_iface) | |||
155 | 162 | ||
156 | batadv_bla_free(bat_priv); | 163 | batadv_bla_free(bat_priv); |
157 | 164 | ||
165 | batadv_dat_free(bat_priv); | ||
166 | |||
158 | free_percpu(bat_priv->bat_counters); | 167 | free_percpu(bat_priv->bat_counters); |
159 | 168 | ||
160 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); | 169 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); |
161 | } | 170 | } |
162 | 171 | ||
163 | void batadv_inc_module_count(void) | ||
164 | { | ||
165 | try_module_get(THIS_MODULE); | ||
166 | } | ||
167 | |||
168 | void batadv_dec_module_count(void) | ||
169 | { | ||
170 | module_put(THIS_MODULE); | ||
171 | } | ||
172 | |||
173 | int batadv_is_my_mac(const uint8_t *addr) | 172 | int batadv_is_my_mac(const uint8_t *addr) |
174 | { | 173 | { |
175 | const struct batadv_hard_iface *hard_iface; | 174 | const struct batadv_hard_iface *hard_iface; |
@@ -188,6 +187,42 @@ int batadv_is_my_mac(const uint8_t *addr) | |||
188 | return 0; | 187 | return 0; |
189 | } | 188 | } |
190 | 189 | ||
190 | /** | ||
191 | * batadv_seq_print_text_primary_if_get - called from debugfs table printing | ||
192 | * function that requires the primary interface | ||
193 | * @seq: debugfs table seq_file struct | ||
194 | * | ||
195 | * Returns primary interface if found or NULL otherwise. | ||
196 | */ | ||
197 | struct batadv_hard_iface * | ||
198 | batadv_seq_print_text_primary_if_get(struct seq_file *seq) | ||
199 | { | ||
200 | struct net_device *net_dev = (struct net_device *)seq->private; | ||
201 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | ||
202 | struct batadv_hard_iface *primary_if; | ||
203 | |||
204 | primary_if = batadv_primary_if_get_selected(bat_priv); | ||
205 | |||
206 | if (!primary_if) { | ||
207 | seq_printf(seq, | ||
208 | "BATMAN mesh %s disabled - please specify interfaces to enable it\n", | ||
209 | net_dev->name); | ||
210 | goto out; | ||
211 | } | ||
212 | |||
213 | if (primary_if->if_status == BATADV_IF_ACTIVE) | ||
214 | goto out; | ||
215 | |||
216 | seq_printf(seq, | ||
217 | "BATMAN mesh %s disabled - primary interface not active\n", | ||
218 | net_dev->name); | ||
219 | batadv_hardif_free_ref(primary_if); | ||
220 | primary_if = NULL; | ||
221 | |||
222 | out: | ||
223 | return primary_if; | ||
224 | } | ||
225 | |||
191 | static int batadv_recv_unhandled_packet(struct sk_buff *skb, | 226 | static int batadv_recv_unhandled_packet(struct sk_buff *skb, |
192 | struct batadv_hard_iface *recv_if) | 227 | struct batadv_hard_iface *recv_if) |
193 | { | 228 | { |
@@ -274,6 +309,8 @@ static void batadv_recv_handler_init(void) | |||
274 | 309 | ||
275 | /* batman icmp packet */ | 310 | /* batman icmp packet */ |
276 | batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet; | 311 | batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet; |
312 | /* unicast with 4 addresses packet */ | ||
313 | batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet; | ||
277 | /* unicast packet */ | 314 | /* unicast packet */ |
278 | batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet; | 315 | batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet; |
279 | /* fragmented unicast packet */ | 316 | /* fragmented unicast packet */ |
@@ -385,6 +422,38 @@ int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) | |||
385 | return 0; | 422 | return 0; |
386 | } | 423 | } |
387 | 424 | ||
425 | /** | ||
426 | * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in | ||
427 | * the header | ||
428 | * @skb: skb pointing to fragmented socket buffers | ||
429 | * @payload_ptr: Pointer to position inside the head buffer of the skb | ||
430 | * marking the start of the data to be CRC'ed | ||
431 | * | ||
432 | * payload_ptr must always point to an address in the skb head buffer and not to | ||
433 | * a fragment. | ||
434 | */ | ||
435 | __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr) | ||
436 | { | ||
437 | u32 crc = 0; | ||
438 | unsigned int from; | ||
439 | unsigned int to = skb->len; | ||
440 | struct skb_seq_state st; | ||
441 | const u8 *data; | ||
442 | unsigned int len; | ||
443 | unsigned int consumed = 0; | ||
444 | |||
445 | from = (unsigned int)(payload_ptr - skb->data); | ||
446 | |||
447 | skb_prepare_seq_read(skb, from, to, &st); | ||
448 | while ((len = skb_seq_read(consumed, &data, &st)) != 0) { | ||
449 | crc = crc32c(crc, data, len); | ||
450 | consumed += len; | ||
451 | } | ||
452 | skb_abort_seq_read(&st); | ||
453 | |||
454 | return htonl(crc); | ||
455 | } | ||
456 | |||
388 | static int batadv_param_set_ra(const char *val, const struct kernel_param *kp) | 457 | static int batadv_param_set_ra(const char *val, const struct kernel_param *kp) |
389 | { | 458 | { |
390 | struct batadv_algo_ops *bat_algo_ops; | 459 | struct batadv_algo_ops *bat_algo_ops; |
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index d57b746219de..2f85577086a7 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h | |||
@@ -26,7 +26,7 @@ | |||
26 | #define BATADV_DRIVER_DEVICE "batman-adv" | 26 | #define BATADV_DRIVER_DEVICE "batman-adv" |
27 | 27 | ||
28 | #ifndef BATADV_SOURCE_VERSION | 28 | #ifndef BATADV_SOURCE_VERSION |
29 | #define BATADV_SOURCE_VERSION "2012.4.0" | 29 | #define BATADV_SOURCE_VERSION "2012.5.0" |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | /* B.A.T.M.A.N. parameters */ | 32 | /* B.A.T.M.A.N. parameters */ |
@@ -44,6 +44,7 @@ | |||
44 | #define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */ | 44 | #define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */ |
45 | #define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */ | 45 | #define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */ |
46 | #define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */ | 46 | #define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */ |
47 | #define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */ | ||
47 | /* sliding packet range of received originator messages in sequence numbers | 48 | /* sliding packet range of received originator messages in sequence numbers |
48 | * (should be a multiple of our word size) | 49 | * (should be a multiple of our word size) |
49 | */ | 50 | */ |
@@ -73,6 +74,11 @@ | |||
73 | 74 | ||
74 | #define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */ | 75 | #define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */ |
75 | 76 | ||
77 | /* msecs after which an ARP_REQUEST is sent in broadcast as fallback */ | ||
78 | #define ARP_REQ_DELAY 250 | ||
79 | /* numbers of originator to contact for any PUT/GET DHT operation */ | ||
80 | #define BATADV_DAT_CANDIDATES_NUM 3 | ||
81 | |||
76 | #define BATADV_VIS_INTERVAL 5000 /* 5 seconds */ | 82 | #define BATADV_VIS_INTERVAL 5000 /* 5 seconds */ |
77 | 83 | ||
78 | /* how much worse secondary interfaces may be to be considered as bonding | 84 | /* how much worse secondary interfaces may be to be considered as bonding |
@@ -89,6 +95,7 @@ | |||
89 | #define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */ | 95 | #define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */ |
90 | #define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3) | 96 | #define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3) |
91 | #define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10) | 97 | #define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10) |
98 | #define BATADV_BLA_WAIT_PERIODS 3 | ||
92 | 99 | ||
93 | #define BATADV_DUPLIST_SIZE 16 | 100 | #define BATADV_DUPLIST_SIZE 16 |
94 | #define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */ | 101 | #define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */ |
@@ -117,6 +124,9 @@ enum batadv_uev_type { | |||
117 | 124 | ||
118 | #define BATADV_GW_THRESHOLD 50 | 125 | #define BATADV_GW_THRESHOLD 50 |
119 | 126 | ||
127 | #define BATADV_DAT_CANDIDATE_NOT_FOUND 0 | ||
128 | #define BATADV_DAT_CANDIDATE_ORIG 1 | ||
129 | |||
120 | /* Debug Messages */ | 130 | /* Debug Messages */ |
121 | #ifdef pr_fmt | 131 | #ifdef pr_fmt |
122 | #undef pr_fmt | 132 | #undef pr_fmt |
@@ -150,9 +160,9 @@ extern struct workqueue_struct *batadv_event_workqueue; | |||
150 | 160 | ||
151 | int batadv_mesh_init(struct net_device *soft_iface); | 161 | int batadv_mesh_init(struct net_device *soft_iface); |
152 | void batadv_mesh_free(struct net_device *soft_iface); | 162 | void batadv_mesh_free(struct net_device *soft_iface); |
153 | void batadv_inc_module_count(void); | ||
154 | void batadv_dec_module_count(void); | ||
155 | int batadv_is_my_mac(const uint8_t *addr); | 163 | int batadv_is_my_mac(const uint8_t *addr); |
164 | struct batadv_hard_iface * | ||
165 | batadv_seq_print_text_primary_if_get(struct seq_file *seq); | ||
156 | int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | 166 | int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, |
157 | struct packet_type *ptype, | 167 | struct packet_type *ptype, |
158 | struct net_device *orig_dev); | 168 | struct net_device *orig_dev); |
@@ -164,14 +174,24 @@ void batadv_recv_handler_unregister(uint8_t packet_type); | |||
164 | int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops); | 174 | int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops); |
165 | int batadv_algo_select(struct batadv_priv *bat_priv, char *name); | 175 | int batadv_algo_select(struct batadv_priv *bat_priv, char *name); |
166 | int batadv_algo_seq_print_text(struct seq_file *seq, void *offset); | 176 | int batadv_algo_seq_print_text(struct seq_file *seq, void *offset); |
177 | __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr); | ||
167 | 178 | ||
168 | /* all messages related to routing / flooding / broadcasting / etc */ | 179 | /** |
180 | * enum batadv_dbg_level - available log levels | ||
181 | * @BATADV_DBG_BATMAN: OGM and TQ computations related messages | ||
182 | * @BATADV_DBG_ROUTES: route added / changed / deleted | ||
183 | * @BATADV_DBG_TT: translation table messages | ||
184 | * @BATADV_DBG_BLA: bridge loop avoidance messages | ||
185 | * @BATADV_DBG_DAT: ARP snooping and DAT related messages | ||
186 | * @BATADV_DBG_ALL: the union of all the above log levels | ||
187 | */ | ||
169 | enum batadv_dbg_level { | 188 | enum batadv_dbg_level { |
170 | BATADV_DBG_BATMAN = BIT(0), | 189 | BATADV_DBG_BATMAN = BIT(0), |
171 | BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */ | 190 | BATADV_DBG_ROUTES = BIT(1), |
172 | BATADV_DBG_TT = BIT(2), /* translation table operations */ | 191 | BATADV_DBG_TT = BIT(2), |
173 | BATADV_DBG_BLA = BIT(3), /* bridge loop avoidance */ | 192 | BATADV_DBG_BLA = BIT(3), |
174 | BATADV_DBG_ALL = 15, | 193 | BATADV_DBG_DAT = BIT(4), |
194 | BATADV_DBG_ALL = 31, | ||
175 | }; | 195 | }; |
176 | 196 | ||
177 | #ifdef CONFIG_BATMAN_ADV_DEBUG | 197 | #ifdef CONFIG_BATMAN_ADV_DEBUG |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index ac9bdf8f80a6..8c32cf1c2dec 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include "main.h" | 20 | #include "main.h" |
21 | #include "distributed-arp-table.h" | ||
21 | #include "originator.h" | 22 | #include "originator.h" |
22 | #include "hash.h" | 23 | #include "hash.h" |
23 | #include "translation-table.h" | 24 | #include "translation-table.h" |
@@ -220,9 +221,9 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv, | |||
220 | atomic_set(&orig_node->refcount, 2); | 221 | atomic_set(&orig_node->refcount, 2); |
221 | 222 | ||
222 | orig_node->tt_initialised = false; | 223 | orig_node->tt_initialised = false; |
223 | orig_node->tt_poss_change = false; | ||
224 | orig_node->bat_priv = bat_priv; | 224 | orig_node->bat_priv = bat_priv; |
225 | memcpy(orig_node->orig, addr, ETH_ALEN); | 225 | memcpy(orig_node->orig, addr, ETH_ALEN); |
226 | batadv_dat_init_orig_node_addr(orig_node); | ||
226 | orig_node->router = NULL; | 227 | orig_node->router = NULL; |
227 | orig_node->tt_crc = 0; | 228 | orig_node->tt_crc = 0; |
228 | atomic_set(&orig_node->last_ttvn, 0); | 229 | atomic_set(&orig_node->last_ttvn, 0); |
@@ -415,23 +416,10 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) | |||
415 | int last_seen_msecs; | 416 | int last_seen_msecs; |
416 | unsigned long last_seen_jiffies; | 417 | unsigned long last_seen_jiffies; |
417 | uint32_t i; | 418 | uint32_t i; |
418 | int ret = 0; | ||
419 | 419 | ||
420 | primary_if = batadv_primary_if_get_selected(bat_priv); | 420 | primary_if = batadv_seq_print_text_primary_if_get(seq); |
421 | 421 | if (!primary_if) | |
422 | if (!primary_if) { | ||
423 | ret = seq_printf(seq, | ||
424 | "BATMAN mesh %s disabled - please specify interfaces to enable it\n", | ||
425 | net_dev->name); | ||
426 | goto out; | ||
427 | } | ||
428 | |||
429 | if (primary_if->if_status != BATADV_IF_ACTIVE) { | ||
430 | ret = seq_printf(seq, | ||
431 | "BATMAN mesh %s disabled - primary interface not active\n", | ||
432 | net_dev->name); | ||
433 | goto out; | 422 | goto out; |
434 | } | ||
435 | 423 | ||
436 | seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", | 424 | seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", |
437 | BATADV_SOURCE_VERSION, primary_if->net_dev->name, | 425 | BATADV_SOURCE_VERSION, primary_if->net_dev->name, |
@@ -485,7 +473,7 @@ next: | |||
485 | out: | 473 | out: |
486 | if (primary_if) | 474 | if (primary_if) |
487 | batadv_hardif_free_ref(primary_if); | 475 | batadv_hardif_free_ref(primary_if); |
488 | return ret; | 476 | return 0; |
489 | } | 477 | } |
490 | 478 | ||
491 | static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node, | 479 | static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node, |
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 2d23a14c220e..1c5454d33f67 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h | |||
@@ -23,14 +23,29 @@ | |||
23 | #define BATADV_ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ | 23 | #define BATADV_ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ |
24 | 24 | ||
25 | enum batadv_packettype { | 25 | enum batadv_packettype { |
26 | BATADV_IV_OGM = 0x01, | 26 | BATADV_IV_OGM = 0x01, |
27 | BATADV_ICMP = 0x02, | 27 | BATADV_ICMP = 0x02, |
28 | BATADV_UNICAST = 0x03, | 28 | BATADV_UNICAST = 0x03, |
29 | BATADV_BCAST = 0x04, | 29 | BATADV_BCAST = 0x04, |
30 | BATADV_VIS = 0x05, | 30 | BATADV_VIS = 0x05, |
31 | BATADV_UNICAST_FRAG = 0x06, | 31 | BATADV_UNICAST_FRAG = 0x06, |
32 | BATADV_TT_QUERY = 0x07, | 32 | BATADV_TT_QUERY = 0x07, |
33 | BATADV_ROAM_ADV = 0x08, | 33 | BATADV_ROAM_ADV = 0x08, |
34 | BATADV_UNICAST_4ADDR = 0x09, | ||
35 | }; | ||
36 | |||
37 | /** | ||
38 | * enum batadv_subtype - packet subtype for unicast4addr | ||
39 | * @BATADV_P_DATA: user payload | ||
40 | * @BATADV_P_DAT_DHT_GET: DHT request message | ||
41 | * @BATADV_P_DAT_DHT_PUT: DHT store message | ||
42 | * @BATADV_P_DAT_CACHE_REPLY: ARP reply generated by DAT | ||
43 | */ | ||
44 | enum batadv_subtype { | ||
45 | BATADV_P_DATA = 0x01, | ||
46 | BATADV_P_DAT_DHT_GET = 0x02, | ||
47 | BATADV_P_DAT_DHT_PUT = 0x03, | ||
48 | BATADV_P_DAT_CACHE_REPLY = 0x04, | ||
34 | }; | 49 | }; |
35 | 50 | ||
36 | /* this file is included by batctl which needs these defines */ | 51 | /* this file is included by batctl which needs these defines */ |
@@ -106,13 +121,16 @@ struct batadv_bla_claim_dst { | |||
106 | uint8_t magic[3]; /* FF:43:05 */ | 121 | uint8_t magic[3]; /* FF:43:05 */ |
107 | uint8_t type; /* bla_claimframe */ | 122 | uint8_t type; /* bla_claimframe */ |
108 | __be16 group; /* group id */ | 123 | __be16 group; /* group id */ |
109 | } __packed; | 124 | }; |
110 | 125 | ||
111 | struct batadv_header { | 126 | struct batadv_header { |
112 | uint8_t packet_type; | 127 | uint8_t packet_type; |
113 | uint8_t version; /* batman version field */ | 128 | uint8_t version; /* batman version field */ |
114 | uint8_t ttl; | 129 | uint8_t ttl; |
115 | } __packed; | 130 | /* the parent struct has to add a byte after the header to make |
131 | * everything 4 bytes aligned again | ||
132 | */ | ||
133 | }; | ||
116 | 134 | ||
117 | struct batadv_ogm_packet { | 135 | struct batadv_ogm_packet { |
118 | struct batadv_header header; | 136 | struct batadv_header header; |
@@ -137,7 +155,7 @@ struct batadv_icmp_packet { | |||
137 | __be16 seqno; | 155 | __be16 seqno; |
138 | uint8_t uid; | 156 | uint8_t uid; |
139 | uint8_t reserved; | 157 | uint8_t reserved; |
140 | } __packed; | 158 | }; |
141 | 159 | ||
142 | #define BATADV_RR_LEN 16 | 160 | #define BATADV_RR_LEN 16 |
143 | 161 | ||
@@ -153,13 +171,44 @@ struct batadv_icmp_packet_rr { | |||
153 | uint8_t uid; | 171 | uint8_t uid; |
154 | uint8_t rr_cur; | 172 | uint8_t rr_cur; |
155 | uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; | 173 | uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; |
156 | } __packed; | 174 | }; |
175 | |||
176 | /* All packet headers in front of an ethernet header have to be completely | ||
177 | * divisible by 2 but not by 4 to make the payload after the ethernet | ||
178 | * header again 4 bytes boundary aligned. | ||
179 | * | ||
180 | * A packing of 2 is necessary to avoid extra padding at the end of the struct | ||
181 | * caused by a structure member which is larger than two bytes. Otherwise | ||
182 | * the structure would not fulfill the previously mentioned rule to avoid the | ||
183 | * misalignment of the payload after the ethernet header. It may also lead to | ||
184 | * leakage of information when the padding it not initialized before sending. | ||
185 | */ | ||
186 | #pragma pack(2) | ||
157 | 187 | ||
158 | struct batadv_unicast_packet { | 188 | struct batadv_unicast_packet { |
159 | struct batadv_header header; | 189 | struct batadv_header header; |
160 | uint8_t ttvn; /* destination translation table version number */ | 190 | uint8_t ttvn; /* destination translation table version number */ |
161 | uint8_t dest[ETH_ALEN]; | 191 | uint8_t dest[ETH_ALEN]; |
162 | } __packed; | 192 | /* "4 bytes boundary + 2 bytes" long to make the payload after the |
193 | * following ethernet header again 4 bytes boundary aligned | ||
194 | */ | ||
195 | }; | ||
196 | |||
197 | /** | ||
198 | * struct batadv_unicast_4addr_packet - extended unicast packet | ||
199 | * @u: common unicast packet header | ||
200 | * @src: address of the source | ||
201 | * @subtype: packet subtype | ||
202 | */ | ||
203 | struct batadv_unicast_4addr_packet { | ||
204 | struct batadv_unicast_packet u; | ||
205 | uint8_t src[ETH_ALEN]; | ||
206 | uint8_t subtype; | ||
207 | uint8_t reserved; | ||
208 | /* "4 bytes boundary + 2 bytes" long to make the payload after the | ||
209 | * following ethernet header again 4 bytes boundary aligned | ||
210 | */ | ||
211 | }; | ||
163 | 212 | ||
164 | struct batadv_unicast_frag_packet { | 213 | struct batadv_unicast_frag_packet { |
165 | struct batadv_header header; | 214 | struct batadv_header header; |
@@ -176,7 +225,12 @@ struct batadv_bcast_packet { | |||
176 | uint8_t reserved; | 225 | uint8_t reserved; |
177 | __be32 seqno; | 226 | __be32 seqno; |
178 | uint8_t orig[ETH_ALEN]; | 227 | uint8_t orig[ETH_ALEN]; |
179 | } __packed; | 228 | /* "4 bytes boundary + 2 bytes" long to make the payload after the |
229 | * following ethernet header again 4 bytes boundary aligned | ||
230 | */ | ||
231 | }; | ||
232 | |||
233 | #pragma pack() | ||
180 | 234 | ||
181 | struct batadv_vis_packet { | 235 | struct batadv_vis_packet { |
182 | struct batadv_header header; | 236 | struct batadv_header header; |
@@ -187,7 +241,7 @@ struct batadv_vis_packet { | |||
187 | uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */ | 241 | uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */ |
188 | uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */ | 242 | uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */ |
189 | uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */ | 243 | uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */ |
190 | } __packed; | 244 | }; |
191 | 245 | ||
192 | struct batadv_tt_query_packet { | 246 | struct batadv_tt_query_packet { |
193 | struct batadv_header header; | 247 | struct batadv_header header; |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 376b4cc6ca82..1aa1722d0187 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "vis.h" | 28 | #include "vis.h" |
29 | #include "unicast.h" | 29 | #include "unicast.h" |
30 | #include "bridge_loop_avoidance.h" | 30 | #include "bridge_loop_avoidance.h" |
31 | #include "distributed-arp-table.h" | ||
31 | 32 | ||
32 | static int batadv_route_unicast_packet(struct sk_buff *skb, | 33 | static int batadv_route_unicast_packet(struct sk_buff *skb, |
33 | struct batadv_hard_iface *recv_if); | 34 | struct batadv_hard_iface *recv_if); |
@@ -284,7 +285,6 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv, | |||
284 | { | 285 | { |
285 | struct batadv_hard_iface *primary_if = NULL; | 286 | struct batadv_hard_iface *primary_if = NULL; |
286 | struct batadv_orig_node *orig_node = NULL; | 287 | struct batadv_orig_node *orig_node = NULL; |
287 | struct batadv_neigh_node *router = NULL; | ||
288 | struct batadv_icmp_packet_rr *icmp_packet; | 288 | struct batadv_icmp_packet_rr *icmp_packet; |
289 | int ret = NET_RX_DROP; | 289 | int ret = NET_RX_DROP; |
290 | 290 | ||
@@ -306,10 +306,6 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv, | |||
306 | if (!orig_node) | 306 | if (!orig_node) |
307 | goto out; | 307 | goto out; |
308 | 308 | ||
309 | router = batadv_orig_node_get_router(orig_node); | ||
310 | if (!router) | ||
311 | goto out; | ||
312 | |||
313 | /* create a copy of the skb, if needed, to modify it. */ | 309 | /* create a copy of the skb, if needed, to modify it. */ |
314 | if (skb_cow(skb, ETH_HLEN) < 0) | 310 | if (skb_cow(skb, ETH_HLEN) < 0) |
315 | goto out; | 311 | goto out; |
@@ -321,14 +317,12 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv, | |||
321 | icmp_packet->msg_type = BATADV_ECHO_REPLY; | 317 | icmp_packet->msg_type = BATADV_ECHO_REPLY; |
322 | icmp_packet->header.ttl = BATADV_TTL; | 318 | icmp_packet->header.ttl = BATADV_TTL; |
323 | 319 | ||
324 | batadv_send_skb_packet(skb, router->if_incoming, router->addr); | 320 | if (batadv_send_skb_to_orig(skb, orig_node, NULL)) |
325 | ret = NET_RX_SUCCESS; | 321 | ret = NET_RX_SUCCESS; |
326 | 322 | ||
327 | out: | 323 | out: |
328 | if (primary_if) | 324 | if (primary_if) |
329 | batadv_hardif_free_ref(primary_if); | 325 | batadv_hardif_free_ref(primary_if); |
330 | if (router) | ||
331 | batadv_neigh_node_free_ref(router); | ||
332 | if (orig_node) | 326 | if (orig_node) |
333 | batadv_orig_node_free_ref(orig_node); | 327 | batadv_orig_node_free_ref(orig_node); |
334 | return ret; | 328 | return ret; |
@@ -339,7 +333,6 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
339 | { | 333 | { |
340 | struct batadv_hard_iface *primary_if = NULL; | 334 | struct batadv_hard_iface *primary_if = NULL; |
341 | struct batadv_orig_node *orig_node = NULL; | 335 | struct batadv_orig_node *orig_node = NULL; |
342 | struct batadv_neigh_node *router = NULL; | ||
343 | struct batadv_icmp_packet *icmp_packet; | 336 | struct batadv_icmp_packet *icmp_packet; |
344 | int ret = NET_RX_DROP; | 337 | int ret = NET_RX_DROP; |
345 | 338 | ||
@@ -361,10 +354,6 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
361 | if (!orig_node) | 354 | if (!orig_node) |
362 | goto out; | 355 | goto out; |
363 | 356 | ||
364 | router = batadv_orig_node_get_router(orig_node); | ||
365 | if (!router) | ||
366 | goto out; | ||
367 | |||
368 | /* create a copy of the skb, if needed, to modify it. */ | 357 | /* create a copy of the skb, if needed, to modify it. */ |
369 | if (skb_cow(skb, ETH_HLEN) < 0) | 358 | if (skb_cow(skb, ETH_HLEN) < 0) |
370 | goto out; | 359 | goto out; |
@@ -376,14 +365,12 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
376 | icmp_packet->msg_type = BATADV_TTL_EXCEEDED; | 365 | icmp_packet->msg_type = BATADV_TTL_EXCEEDED; |
377 | icmp_packet->header.ttl = BATADV_TTL; | 366 | icmp_packet->header.ttl = BATADV_TTL; |
378 | 367 | ||
379 | batadv_send_skb_packet(skb, router->if_incoming, router->addr); | 368 | if (batadv_send_skb_to_orig(skb, orig_node, NULL)) |
380 | ret = NET_RX_SUCCESS; | 369 | ret = NET_RX_SUCCESS; |
381 | 370 | ||
382 | out: | 371 | out: |
383 | if (primary_if) | 372 | if (primary_if) |
384 | batadv_hardif_free_ref(primary_if); | 373 | batadv_hardif_free_ref(primary_if); |
385 | if (router) | ||
386 | batadv_neigh_node_free_ref(router); | ||
387 | if (orig_node) | 374 | if (orig_node) |
388 | batadv_orig_node_free_ref(orig_node); | 375 | batadv_orig_node_free_ref(orig_node); |
389 | return ret; | 376 | return ret; |
@@ -397,7 +384,6 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
397 | struct batadv_icmp_packet_rr *icmp_packet; | 384 | struct batadv_icmp_packet_rr *icmp_packet; |
398 | struct ethhdr *ethhdr; | 385 | struct ethhdr *ethhdr; |
399 | struct batadv_orig_node *orig_node = NULL; | 386 | struct batadv_orig_node *orig_node = NULL; |
400 | struct batadv_neigh_node *router = NULL; | ||
401 | int hdr_size = sizeof(struct batadv_icmp_packet); | 387 | int hdr_size = sizeof(struct batadv_icmp_packet); |
402 | int ret = NET_RX_DROP; | 388 | int ret = NET_RX_DROP; |
403 | 389 | ||
@@ -446,10 +432,6 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
446 | if (!orig_node) | 432 | if (!orig_node) |
447 | goto out; | 433 | goto out; |
448 | 434 | ||
449 | router = batadv_orig_node_get_router(orig_node); | ||
450 | if (!router) | ||
451 | goto out; | ||
452 | |||
453 | /* create a copy of the skb, if needed, to modify it. */ | 435 | /* create a copy of the skb, if needed, to modify it. */ |
454 | if (skb_cow(skb, ETH_HLEN) < 0) | 436 | if (skb_cow(skb, ETH_HLEN) < 0) |
455 | goto out; | 437 | goto out; |
@@ -460,12 +442,10 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
460 | icmp_packet->header.ttl--; | 442 | icmp_packet->header.ttl--; |
461 | 443 | ||
462 | /* route it */ | 444 | /* route it */ |
463 | batadv_send_skb_packet(skb, router->if_incoming, router->addr); | 445 | if (batadv_send_skb_to_orig(skb, orig_node, recv_if)) |
464 | ret = NET_RX_SUCCESS; | 446 | ret = NET_RX_SUCCESS; |
465 | 447 | ||
466 | out: | 448 | out: |
467 | if (router) | ||
468 | batadv_neigh_node_free_ref(router); | ||
469 | if (orig_node) | 449 | if (orig_node) |
470 | batadv_orig_node_free_ref(orig_node); | 450 | batadv_orig_node_free_ref(orig_node); |
471 | return ret; | 451 | return ret; |
@@ -549,25 +529,18 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig, | |||
549 | if (tmp_neigh_node->if_incoming == recv_if) | 529 | if (tmp_neigh_node->if_incoming == recv_if) |
550 | continue; | 530 | continue; |
551 | 531 | ||
552 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) | 532 | if (router && tmp_neigh_node->tq_avg <= router->tq_avg) |
553 | continue; | 533 | continue; |
554 | 534 | ||
555 | /* if we don't have a router yet | 535 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) |
556 | * or this one is better, choose it. | 536 | continue; |
557 | */ | ||
558 | if ((!router) || | ||
559 | (tmp_neigh_node->tq_avg > router->tq_avg)) { | ||
560 | /* decrement refcount of | ||
561 | * previously selected router | ||
562 | */ | ||
563 | if (router) | ||
564 | batadv_neigh_node_free_ref(router); | ||
565 | 537 | ||
566 | router = tmp_neigh_node; | 538 | /* decrement refcount of previously selected router */ |
567 | atomic_inc_not_zero(&router->refcount); | 539 | if (router) |
568 | } | 540 | batadv_neigh_node_free_ref(router); |
569 | 541 | ||
570 | batadv_neigh_node_free_ref(tmp_neigh_node); | 542 | /* we found a better router (or at least one valid router) */ |
543 | router = tmp_neigh_node; | ||
571 | } | 544 | } |
572 | 545 | ||
573 | /* use the first candidate if nothing was found. */ | 546 | /* use the first candidate if nothing was found. */ |
@@ -687,21 +660,8 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if) | |||
687 | struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 660 | struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
688 | struct batadv_roam_adv_packet *roam_adv_packet; | 661 | struct batadv_roam_adv_packet *roam_adv_packet; |
689 | struct batadv_orig_node *orig_node; | 662 | struct batadv_orig_node *orig_node; |
690 | struct ethhdr *ethhdr; | ||
691 | |||
692 | /* drop packet if it has not necessary minimum size */ | ||
693 | if (unlikely(!pskb_may_pull(skb, | ||
694 | sizeof(struct batadv_roam_adv_packet)))) | ||
695 | goto out; | ||
696 | |||
697 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | ||
698 | |||
699 | /* packet with unicast indication but broadcast recipient */ | ||
700 | if (is_broadcast_ether_addr(ethhdr->h_dest)) | ||
701 | goto out; | ||
702 | 663 | ||
703 | /* packet with broadcast sender address */ | 664 | if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0) |
704 | if (is_broadcast_ether_addr(ethhdr->h_source)) | ||
705 | goto out; | 665 | goto out; |
706 | 666 | ||
707 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX); | 667 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX); |
@@ -730,12 +690,6 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if) | |||
730 | BATADV_TT_CLIENT_ROAM, | 690 | BATADV_TT_CLIENT_ROAM, |
731 | atomic_read(&orig_node->last_ttvn) + 1); | 691 | atomic_read(&orig_node->last_ttvn) + 1); |
732 | 692 | ||
733 | /* Roaming phase starts: I have new information but the ttvn has not | ||
734 | * been incremented yet. This flag will make me check all the incoming | ||
735 | * packets for the correct destination. | ||
736 | */ | ||
737 | bat_priv->tt.poss_change = true; | ||
738 | |||
739 | batadv_orig_node_free_ref(orig_node); | 693 | batadv_orig_node_free_ref(orig_node); |
740 | out: | 694 | out: |
741 | /* returning NET_RX_DROP will make the caller function kfree the skb */ | 695 | /* returning NET_RX_DROP will make the caller function kfree the skb */ |
@@ -907,8 +861,8 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, | |||
907 | skb->len + ETH_HLEN); | 861 | skb->len + ETH_HLEN); |
908 | 862 | ||
909 | /* route it */ | 863 | /* route it */ |
910 | batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | 864 | if (batadv_send_skb_to_orig(skb, orig_node, recv_if)) |
911 | ret = NET_RX_SUCCESS; | 865 | ret = NET_RX_SUCCESS; |
912 | 866 | ||
913 | out: | 867 | out: |
914 | if (neigh_node) | 868 | if (neigh_node) |
@@ -918,80 +872,161 @@ out: | |||
918 | return ret; | 872 | return ret; |
919 | } | 873 | } |
920 | 874 | ||
875 | /** | ||
876 | * batadv_reroute_unicast_packet - update the unicast header for re-routing | ||
877 | * @bat_priv: the bat priv with all the soft interface information | ||
878 | * @unicast_packet: the unicast header to be updated | ||
879 | * @dst_addr: the payload destination | ||
880 | * | ||
881 | * Search the translation table for dst_addr and update the unicast header with | ||
882 | * the new corresponding information (originator address where the destination | ||
883 | * client currently is and its known TTVN) | ||
884 | * | ||
885 | * Returns true if the packet header has been updated, false otherwise | ||
886 | */ | ||
887 | static bool | ||
888 | batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, | ||
889 | struct batadv_unicast_packet *unicast_packet, | ||
890 | uint8_t *dst_addr) | ||
891 | { | ||
892 | struct batadv_orig_node *orig_node = NULL; | ||
893 | struct batadv_hard_iface *primary_if = NULL; | ||
894 | bool ret = false; | ||
895 | uint8_t *orig_addr, orig_ttvn; | ||
896 | |||
897 | if (batadv_is_my_client(bat_priv, dst_addr)) { | ||
898 | primary_if = batadv_primary_if_get_selected(bat_priv); | ||
899 | if (!primary_if) | ||
900 | goto out; | ||
901 | orig_addr = primary_if->net_dev->dev_addr; | ||
902 | orig_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); | ||
903 | } else { | ||
904 | orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr); | ||
905 | if (!orig_node) | ||
906 | goto out; | ||
907 | |||
908 | if (batadv_compare_eth(orig_node->orig, unicast_packet->dest)) | ||
909 | goto out; | ||
910 | |||
911 | orig_addr = orig_node->orig; | ||
912 | orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); | ||
913 | } | ||
914 | |||
915 | /* update the packet header */ | ||
916 | memcpy(unicast_packet->dest, orig_addr, ETH_ALEN); | ||
917 | unicast_packet->ttvn = orig_ttvn; | ||
918 | |||
919 | ret = true; | ||
920 | out: | ||
921 | if (primary_if) | ||
922 | batadv_hardif_free_ref(primary_if); | ||
923 | if (orig_node) | ||
924 | batadv_orig_node_free_ref(orig_node); | ||
925 | |||
926 | return ret; | ||
927 | } | ||
928 | |||
921 | static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, | 929 | static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, |
922 | struct sk_buff *skb) { | 930 | struct sk_buff *skb) { |
923 | uint8_t curr_ttvn; | 931 | uint8_t curr_ttvn, old_ttvn; |
924 | struct batadv_orig_node *orig_node; | 932 | struct batadv_orig_node *orig_node; |
925 | struct ethhdr *ethhdr; | 933 | struct ethhdr *ethhdr; |
926 | struct batadv_hard_iface *primary_if; | 934 | struct batadv_hard_iface *primary_if; |
927 | struct batadv_unicast_packet *unicast_packet; | 935 | struct batadv_unicast_packet *unicast_packet; |
928 | bool tt_poss_change; | ||
929 | int is_old_ttvn; | 936 | int is_old_ttvn; |
930 | 937 | ||
931 | /* I could need to modify it */ | 938 | /* check if there is enough data before accessing it */ |
932 | if (skb_cow(skb, sizeof(struct batadv_unicast_packet)) < 0) | 939 | if (pskb_may_pull(skb, sizeof(*unicast_packet) + ETH_HLEN) < 0) |
940 | return 0; | ||
941 | |||
942 | /* create a copy of the skb (in case of for re-routing) to modify it. */ | ||
943 | if (skb_cow(skb, sizeof(*unicast_packet)) < 0) | ||
933 | return 0; | 944 | return 0; |
934 | 945 | ||
935 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 946 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
947 | ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet)); | ||
936 | 948 | ||
937 | if (batadv_is_my_mac(unicast_packet->dest)) { | 949 | /* check if the destination client was served by this node and it is now |
938 | tt_poss_change = bat_priv->tt.poss_change; | 950 | * roaming. In this case, it means that the node has got a ROAM_ADV |
939 | curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); | 951 | * message and that it knows the new destination in the mesh to re-route |
940 | } else { | 952 | * the packet to |
953 | */ | ||
954 | if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest)) { | ||
955 | if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, | ||
956 | ethhdr->h_dest)) | ||
957 | net_ratelimited_function(batadv_dbg, BATADV_DBG_TT, | ||
958 | bat_priv, | ||
959 | "Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n", | ||
960 | unicast_packet->dest, | ||
961 | ethhdr->h_dest); | ||
962 | /* at this point the mesh destination should have been | ||
963 | * substituted with the originator address found in the global | ||
964 | * table. If not, let the packet go untouched anyway because | ||
965 | * there is nothing the node can do | ||
966 | */ | ||
967 | return 1; | ||
968 | } | ||
969 | |||
970 | /* retrieve the TTVN known by this node for the packet destination. This | ||
971 | * value is used later to check if the node which sent (or re-routed | ||
972 | * last time) the packet had an updated information or not | ||
973 | */ | ||
974 | curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); | ||
975 | if (!batadv_is_my_mac(unicast_packet->dest)) { | ||
941 | orig_node = batadv_orig_hash_find(bat_priv, | 976 | orig_node = batadv_orig_hash_find(bat_priv, |
942 | unicast_packet->dest); | 977 | unicast_packet->dest); |
943 | 978 | /* if it is not possible to find the orig_node representing the | |
979 | * destination, the packet can immediately be dropped as it will | ||
980 | * not be possible to deliver it | ||
981 | */ | ||
944 | if (!orig_node) | 982 | if (!orig_node) |
945 | return 0; | 983 | return 0; |
946 | 984 | ||
947 | curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); | 985 | curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); |
948 | tt_poss_change = orig_node->tt_poss_change; | ||
949 | batadv_orig_node_free_ref(orig_node); | 986 | batadv_orig_node_free_ref(orig_node); |
950 | } | 987 | } |
951 | 988 | ||
952 | /* Check whether I have to reroute the packet */ | 989 | /* check if the TTVN contained in the packet is fresher than what the |
990 | * node knows | ||
991 | */ | ||
953 | is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn); | 992 | is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn); |
954 | if (is_old_ttvn || tt_poss_change) { | 993 | if (!is_old_ttvn) |
955 | /* check if there is enough data before accessing it */ | 994 | return 1; |
956 | if (pskb_may_pull(skb, sizeof(struct batadv_unicast_packet) + | ||
957 | ETH_HLEN) < 0) | ||
958 | return 0; | ||
959 | 995 | ||
960 | ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet)); | 996 | old_ttvn = unicast_packet->ttvn; |
997 | /* the packet was forged based on outdated network information. Its | ||
998 | * destination can possibly be updated and forwarded towards the new | ||
999 | * target host | ||
1000 | */ | ||
1001 | if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, | ||
1002 | ethhdr->h_dest)) { | ||
1003 | net_ratelimited_function(batadv_dbg, BATADV_DBG_TT, bat_priv, | ||
1004 | "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n", | ||
1005 | unicast_packet->dest, ethhdr->h_dest, | ||
1006 | old_ttvn, curr_ttvn); | ||
1007 | return 1; | ||
1008 | } | ||
961 | 1009 | ||
962 | /* we don't have an updated route for this client, so we should | 1010 | /* the packet has not been re-routed: either the destination is |
963 | * not try to reroute the packet!! | 1011 | * currently served by this node or there is no destination at all and |
964 | */ | 1012 | * it is possible to drop the packet |
965 | if (batadv_tt_global_client_is_roaming(bat_priv, | 1013 | */ |
966 | ethhdr->h_dest)) | 1014 | if (!batadv_is_my_client(bat_priv, ethhdr->h_dest)) |
967 | return 1; | 1015 | return 0; |
968 | 1016 | ||
969 | orig_node = batadv_transtable_search(bat_priv, NULL, | 1017 | /* update the header in order to let the packet be delivered to this |
970 | ethhdr->h_dest); | 1018 | * node's soft interface |
971 | 1019 | */ | |
972 | if (!orig_node) { | 1020 | primary_if = batadv_primary_if_get_selected(bat_priv); |
973 | if (!batadv_is_my_client(bat_priv, ethhdr->h_dest)) | 1021 | if (!primary_if) |
974 | return 0; | 1022 | return 0; |
975 | primary_if = batadv_primary_if_get_selected(bat_priv); | ||
976 | if (!primary_if) | ||
977 | return 0; | ||
978 | memcpy(unicast_packet->dest, | ||
979 | primary_if->net_dev->dev_addr, ETH_ALEN); | ||
980 | batadv_hardif_free_ref(primary_if); | ||
981 | } else { | ||
982 | memcpy(unicast_packet->dest, orig_node->orig, | ||
983 | ETH_ALEN); | ||
984 | curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); | ||
985 | batadv_orig_node_free_ref(orig_node); | ||
986 | } | ||
987 | 1023 | ||
988 | batadv_dbg(BATADV_DBG_ROUTES, bat_priv, | 1024 | memcpy(unicast_packet->dest, primary_if->net_dev->dev_addr, ETH_ALEN); |
989 | "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n", | 1025 | |
990 | unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest, | 1026 | batadv_hardif_free_ref(primary_if); |
991 | unicast_packet->dest); | 1027 | |
1028 | unicast_packet->ttvn = curr_ttvn; | ||
992 | 1029 | ||
993 | unicast_packet->ttvn = curr_ttvn; | ||
994 | } | ||
995 | return 1; | 1030 | return 1; |
996 | } | 1031 | } |
997 | 1032 | ||
@@ -1000,7 +1035,19 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
1000 | { | 1035 | { |
1001 | struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1036 | struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
1002 | struct batadv_unicast_packet *unicast_packet; | 1037 | struct batadv_unicast_packet *unicast_packet; |
1038 | struct batadv_unicast_4addr_packet *unicast_4addr_packet; | ||
1039 | uint8_t *orig_addr; | ||
1040 | struct batadv_orig_node *orig_node = NULL; | ||
1003 | int hdr_size = sizeof(*unicast_packet); | 1041 | int hdr_size = sizeof(*unicast_packet); |
1042 | bool is4addr; | ||
1043 | |||
1044 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | ||
1045 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | ||
1046 | |||
1047 | is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR; | ||
1048 | /* the caller function should have already pulled 2 bytes */ | ||
1049 | if (is4addr) | ||
1050 | hdr_size = sizeof(*unicast_4addr_packet); | ||
1004 | 1051 | ||
1005 | if (batadv_check_unicast_packet(skb, hdr_size) < 0) | 1052 | if (batadv_check_unicast_packet(skb, hdr_size) < 0) |
1006 | return NET_RX_DROP; | 1053 | return NET_RX_DROP; |
@@ -1008,12 +1055,28 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
1008 | if (!batadv_check_unicast_ttvn(bat_priv, skb)) | 1055 | if (!batadv_check_unicast_ttvn(bat_priv, skb)) |
1009 | return NET_RX_DROP; | 1056 | return NET_RX_DROP; |
1010 | 1057 | ||
1011 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | ||
1012 | |||
1013 | /* packet for me */ | 1058 | /* packet for me */ |
1014 | if (batadv_is_my_mac(unicast_packet->dest)) { | 1059 | if (batadv_is_my_mac(unicast_packet->dest)) { |
1060 | if (is4addr) { | ||
1061 | batadv_dat_inc_counter(bat_priv, | ||
1062 | unicast_4addr_packet->subtype); | ||
1063 | orig_addr = unicast_4addr_packet->src; | ||
1064 | orig_node = batadv_orig_hash_find(bat_priv, orig_addr); | ||
1065 | } | ||
1066 | |||
1067 | if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, | ||
1068 | hdr_size)) | ||
1069 | goto rx_success; | ||
1070 | if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, | ||
1071 | hdr_size)) | ||
1072 | goto rx_success; | ||
1073 | |||
1015 | batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size, | 1074 | batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size, |
1016 | NULL); | 1075 | orig_node); |
1076 | |||
1077 | rx_success: | ||
1078 | if (orig_node) | ||
1079 | batadv_orig_node_free_ref(orig_node); | ||
1017 | 1080 | ||
1018 | return NET_RX_SUCCESS; | 1081 | return NET_RX_SUCCESS; |
1019 | } | 1082 | } |
@@ -1050,8 +1113,17 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb, | |||
1050 | if (!new_skb) | 1113 | if (!new_skb) |
1051 | return NET_RX_SUCCESS; | 1114 | return NET_RX_SUCCESS; |
1052 | 1115 | ||
1116 | if (batadv_dat_snoop_incoming_arp_request(bat_priv, new_skb, | ||
1117 | hdr_size)) | ||
1118 | goto rx_success; | ||
1119 | if (batadv_dat_snoop_incoming_arp_reply(bat_priv, new_skb, | ||
1120 | hdr_size)) | ||
1121 | goto rx_success; | ||
1122 | |||
1053 | batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if, | 1123 | batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if, |
1054 | sizeof(struct batadv_unicast_packet), NULL); | 1124 | sizeof(struct batadv_unicast_packet), NULL); |
1125 | |||
1126 | rx_success: | ||
1055 | return NET_RX_SUCCESS; | 1127 | return NET_RX_SUCCESS; |
1056 | } | 1128 | } |
1057 | 1129 | ||
@@ -1124,14 +1196,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, | |||
1124 | 1196 | ||
1125 | spin_unlock_bh(&orig_node->bcast_seqno_lock); | 1197 | spin_unlock_bh(&orig_node->bcast_seqno_lock); |
1126 | 1198 | ||
1127 | /* keep skb linear for crc calculation */ | ||
1128 | if (skb_linearize(skb) < 0) | ||
1129 | goto out; | ||
1130 | |||
1131 | bcast_packet = (struct batadv_bcast_packet *)skb->data; | ||
1132 | |||
1133 | /* check whether this has been sent by another originator before */ | 1199 | /* check whether this has been sent by another originator before */ |
1134 | if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, skb->len)) | 1200 | if (batadv_bla_check_bcast_duplist(bat_priv, skb)) |
1135 | goto out; | 1201 | goto out; |
1136 | 1202 | ||
1137 | /* rebroadcast packet */ | 1203 | /* rebroadcast packet */ |
@@ -1143,9 +1209,16 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, | |||
1143 | if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size)) | 1209 | if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size)) |
1144 | goto out; | 1210 | goto out; |
1145 | 1211 | ||
1212 | if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, hdr_size)) | ||
1213 | goto rx_success; | ||
1214 | if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size)) | ||
1215 | goto rx_success; | ||
1216 | |||
1146 | /* broadcast for me */ | 1217 | /* broadcast for me */ |
1147 | batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size, | 1218 | batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size, |
1148 | orig_node); | 1219 | orig_node); |
1220 | |||
1221 | rx_success: | ||
1149 | ret = NET_RX_SUCCESS; | 1222 | ret = NET_RX_SUCCESS; |
1150 | goto out; | 1223 | goto out; |
1151 | 1224 | ||
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 570a8bce0364..c7f702376535 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include "main.h" | 20 | #include "main.h" |
21 | #include "distributed-arp-table.h" | ||
21 | #include "send.h" | 22 | #include "send.h" |
22 | #include "routing.h" | 23 | #include "routing.h" |
23 | #include "translation-table.h" | 24 | #include "translation-table.h" |
@@ -77,6 +78,39 @@ send_skb_err: | |||
77 | return NET_XMIT_DROP; | 78 | return NET_XMIT_DROP; |
78 | } | 79 | } |
79 | 80 | ||
81 | /** | ||
82 | * batadv_send_skb_to_orig - Lookup next-hop and transmit skb. | ||
83 | * @skb: Packet to be transmitted. | ||
84 | * @orig_node: Final destination of the packet. | ||
85 | * @recv_if: Interface used when receiving the packet (can be NULL). | ||
86 | * | ||
87 | * Looks up the best next-hop towards the passed originator and passes the | ||
88 | * skb on for preparation of MAC header. If the packet originated from this | ||
89 | * host, NULL can be passed as recv_if and no interface alternating is | ||
90 | * attempted. | ||
91 | * | ||
92 | * Returns TRUE on success; FALSE otherwise. | ||
93 | */ | ||
94 | bool batadv_send_skb_to_orig(struct sk_buff *skb, | ||
95 | struct batadv_orig_node *orig_node, | ||
96 | struct batadv_hard_iface *recv_if) | ||
97 | { | ||
98 | struct batadv_priv *bat_priv = orig_node->bat_priv; | ||
99 | struct batadv_neigh_node *neigh_node; | ||
100 | |||
101 | /* batadv_find_router() increases neigh_nodes refcount if found. */ | ||
102 | neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); | ||
103 | if (!neigh_node) | ||
104 | return false; | ||
105 | |||
106 | /* route it */ | ||
107 | batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | ||
108 | |||
109 | batadv_neigh_node_free_ref(neigh_node); | ||
110 | |||
111 | return true; | ||
112 | } | ||
113 | |||
80 | void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) | 114 | void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) |
81 | { | 115 | { |
82 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 116 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
@@ -209,6 +243,9 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) | |||
209 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) | 243 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) |
210 | goto out; | 244 | goto out; |
211 | 245 | ||
246 | if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) | ||
247 | goto out; | ||
248 | |||
212 | /* rebroadcast packet */ | 249 | /* rebroadcast packet */ |
213 | rcu_read_lock(); | 250 | rcu_read_lock(); |
214 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { | 251 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { |
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index 643329b787ed..0078dece1abc 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h | |||
@@ -23,6 +23,9 @@ | |||
23 | int batadv_send_skb_packet(struct sk_buff *skb, | 23 | int batadv_send_skb_packet(struct sk_buff *skb, |
24 | struct batadv_hard_iface *hard_iface, | 24 | struct batadv_hard_iface *hard_iface, |
25 | const uint8_t *dst_addr); | 25 | const uint8_t *dst_addr); |
26 | bool batadv_send_skb_to_orig(struct sk_buff *skb, | ||
27 | struct batadv_orig_node *orig_node, | ||
28 | struct batadv_hard_iface *recv_if); | ||
26 | void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface); | 29 | void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface); |
27 | int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, | 30 | int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, |
28 | const struct sk_buff *skb, | 31 | const struct sk_buff *skb, |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index ce0684a1fc83..54800c783f96 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include "main.h" | 20 | #include "main.h" |
21 | #include "soft-interface.h" | 21 | #include "soft-interface.h" |
22 | #include "hard-interface.h" | 22 | #include "hard-interface.h" |
23 | #include "distributed-arp-table.h" | ||
23 | #include "routing.h" | 24 | #include "routing.h" |
24 | #include "send.h" | 25 | #include "send.h" |
25 | #include "debugfs.h" | 26 | #include "debugfs.h" |
@@ -146,13 +147,16 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
146 | struct batadv_bcast_packet *bcast_packet; | 147 | struct batadv_bcast_packet *bcast_packet; |
147 | struct vlan_ethhdr *vhdr; | 148 | struct vlan_ethhdr *vhdr; |
148 | __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); | 149 | __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN); |
149 | static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, | 150 | static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, |
150 | 0x00}; | 151 | 0x00, 0x00}; |
152 | static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00, | ||
153 | 0x00, 0x00}; | ||
151 | unsigned int header_len = 0; | 154 | unsigned int header_len = 0; |
152 | int data_len = skb->len, ret; | 155 | int data_len = skb->len, ret; |
153 | short vid __maybe_unused = -1; | 156 | short vid __maybe_unused = -1; |
154 | bool do_bcast = false; | 157 | bool do_bcast = false; |
155 | uint32_t seqno; | 158 | uint32_t seqno; |
159 | unsigned long brd_delay = 1; | ||
156 | 160 | ||
157 | if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) | 161 | if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) |
158 | goto dropped; | 162 | goto dropped; |
@@ -180,10 +184,16 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
180 | 184 | ||
181 | /* don't accept stp packets. STP does not help in meshes. | 185 | /* don't accept stp packets. STP does not help in meshes. |
182 | * better use the bridge loop avoidance ... | 186 | * better use the bridge loop avoidance ... |
187 | * | ||
188 | * The same goes for ECTP sent at least by some Cisco Switches, | ||
189 | * it might confuse the mesh when used with bridge loop avoidance. | ||
183 | */ | 190 | */ |
184 | if (batadv_compare_eth(ethhdr->h_dest, stp_addr)) | 191 | if (batadv_compare_eth(ethhdr->h_dest, stp_addr)) |
185 | goto dropped; | 192 | goto dropped; |
186 | 193 | ||
194 | if (batadv_compare_eth(ethhdr->h_dest, ectp_addr)) | ||
195 | goto dropped; | ||
196 | |||
187 | if (is_multicast_ether_addr(ethhdr->h_dest)) { | 197 | if (is_multicast_ether_addr(ethhdr->h_dest)) { |
188 | do_bcast = true; | 198 | do_bcast = true; |
189 | 199 | ||
@@ -216,6 +226,13 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
216 | if (!primary_if) | 226 | if (!primary_if) |
217 | goto dropped; | 227 | goto dropped; |
218 | 228 | ||
229 | /* in case of ARP request, we do not immediately broadcasti the | ||
230 | * packet, instead we first wait for DAT to try to retrieve the | ||
231 | * correct ARP entry | ||
232 | */ | ||
233 | if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) | ||
234 | brd_delay = msecs_to_jiffies(ARP_REQ_DELAY); | ||
235 | |||
219 | if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0) | 236 | if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0) |
220 | goto dropped; | 237 | goto dropped; |
221 | 238 | ||
@@ -237,7 +254,7 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
237 | seqno = atomic_inc_return(&bat_priv->bcast_seqno); | 254 | seqno = atomic_inc_return(&bat_priv->bcast_seqno); |
238 | bcast_packet->seqno = htonl(seqno); | 255 | bcast_packet->seqno = htonl(seqno); |
239 | 256 | ||
240 | batadv_add_bcast_packet_to_list(bat_priv, skb, 1); | 257 | batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay); |
241 | 258 | ||
242 | /* a copy is stored in the bcast list, therefore removing | 259 | /* a copy is stored in the bcast list, therefore removing |
243 | * the original skb. | 260 | * the original skb. |
@@ -252,7 +269,12 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
252 | goto dropped; | 269 | goto dropped; |
253 | } | 270 | } |
254 | 271 | ||
255 | ret = batadv_unicast_send_skb(skb, bat_priv); | 272 | if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) |
273 | goto dropped; | ||
274 | |||
275 | batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb); | ||
276 | |||
277 | ret = batadv_unicast_send_skb(bat_priv, skb); | ||
256 | if (ret != 0) | 278 | if (ret != 0) |
257 | goto dropped_freed; | 279 | goto dropped_freed; |
258 | } | 280 | } |
@@ -347,7 +369,51 @@ out: | |||
347 | return; | 369 | return; |
348 | } | 370 | } |
349 | 371 | ||
372 | /* batman-adv network devices have devices nesting below it and are a special | ||
373 | * "super class" of normal network devices; split their locks off into a | ||
374 | * separate class since they always nest. | ||
375 | */ | ||
376 | static struct lock_class_key batadv_netdev_xmit_lock_key; | ||
377 | static struct lock_class_key batadv_netdev_addr_lock_key; | ||
378 | |||
379 | /** | ||
380 | * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue | ||
381 | * @dev: device which owns the tx queue | ||
382 | * @txq: tx queue to modify | ||
383 | * @_unused: always NULL | ||
384 | */ | ||
385 | static void batadv_set_lockdep_class_one(struct net_device *dev, | ||
386 | struct netdev_queue *txq, | ||
387 | void *_unused) | ||
388 | { | ||
389 | lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); | ||
390 | } | ||
391 | |||
392 | /** | ||
393 | * batadv_set_lockdep_class - Set txq and addr_list lockdep class | ||
394 | * @dev: network device to modify | ||
395 | */ | ||
396 | static void batadv_set_lockdep_class(struct net_device *dev) | ||
397 | { | ||
398 | lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key); | ||
399 | netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL); | ||
400 | } | ||
401 | |||
402 | /** | ||
403 | * batadv_softif_init - Late stage initialization of soft interface | ||
404 | * @dev: registered network device to modify | ||
405 | * | ||
406 | * Returns error code on failures | ||
407 | */ | ||
408 | static int batadv_softif_init(struct net_device *dev) | ||
409 | { | ||
410 | batadv_set_lockdep_class(dev); | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | |||
350 | static const struct net_device_ops batadv_netdev_ops = { | 415 | static const struct net_device_ops batadv_netdev_ops = { |
416 | .ndo_init = batadv_softif_init, | ||
351 | .ndo_open = batadv_interface_open, | 417 | .ndo_open = batadv_interface_open, |
352 | .ndo_stop = batadv_interface_release, | 418 | .ndo_stop = batadv_interface_release, |
353 | .ndo_get_stats = batadv_interface_stats, | 419 | .ndo_get_stats = batadv_interface_stats, |
@@ -414,6 +480,9 @@ struct net_device *batadv_softif_create(const char *name) | |||
414 | atomic_set(&bat_priv->aggregated_ogms, 1); | 480 | atomic_set(&bat_priv->aggregated_ogms, 1); |
415 | atomic_set(&bat_priv->bonding, 0); | 481 | atomic_set(&bat_priv->bonding, 0); |
416 | atomic_set(&bat_priv->bridge_loop_avoidance, 0); | 482 | atomic_set(&bat_priv->bridge_loop_avoidance, 0); |
483 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
484 | atomic_set(&bat_priv->distributed_arp_table, 1); | ||
485 | #endif | ||
417 | atomic_set(&bat_priv->ap_isolation, 0); | 486 | atomic_set(&bat_priv->ap_isolation, 0); |
418 | atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE); | 487 | atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE); |
419 | atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF); | 488 | atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF); |
@@ -436,7 +505,6 @@ struct net_device *batadv_softif_create(const char *name) | |||
436 | #endif | 505 | #endif |
437 | bat_priv->tt.last_changeset = NULL; | 506 | bat_priv->tt.last_changeset = NULL; |
438 | bat_priv->tt.last_changeset_len = 0; | 507 | bat_priv->tt.last_changeset_len = 0; |
439 | bat_priv->tt.poss_change = false; | ||
440 | 508 | ||
441 | bat_priv->primary_if = NULL; | 509 | bat_priv->primary_if = NULL; |
442 | bat_priv->num_ifaces = 0; | 510 | bat_priv->num_ifaces = 0; |
@@ -556,6 +624,13 @@ static const struct { | |||
556 | { "tt_response_rx" }, | 624 | { "tt_response_rx" }, |
557 | { "tt_roam_adv_tx" }, | 625 | { "tt_roam_adv_tx" }, |
558 | { "tt_roam_adv_rx" }, | 626 | { "tt_roam_adv_rx" }, |
627 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
628 | { "dat_get_tx" }, | ||
629 | { "dat_get_rx" }, | ||
630 | { "dat_put_tx" }, | ||
631 | { "dat_put_rx" }, | ||
632 | { "dat_cached_reply_tx" }, | ||
633 | #endif | ||
559 | }; | 634 | }; |
560 | 635 | ||
561 | static void batadv_get_strings(struct net_device *dev, uint32_t stringset, | 636 | static void batadv_get_strings(struct net_device *dev, uint32_t stringset, |
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index 66518c75c217..84a55cb19b0b 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include "main.h" | 20 | #include "main.h" |
21 | #include "sysfs.h" | 21 | #include "sysfs.h" |
22 | #include "translation-table.h" | 22 | #include "translation-table.h" |
23 | #include "distributed-arp-table.h" | ||
23 | #include "originator.h" | 24 | #include "originator.h" |
24 | #include "hard-interface.h" | 25 | #include "hard-interface.h" |
25 | #include "gateway_common.h" | 26 | #include "gateway_common.h" |
@@ -122,55 +123,6 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \ | |||
122 | batadv_store_##_name) | 123 | batadv_store_##_name) |
123 | 124 | ||
124 | 125 | ||
125 | #define BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \ | ||
126 | ssize_t batadv_store_##_name(struct kobject *kobj, \ | ||
127 | struct attribute *attr, char *buff, \ | ||
128 | size_t count) \ | ||
129 | { \ | ||
130 | struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \ | ||
131 | struct batadv_hard_iface *hard_iface; \ | ||
132 | ssize_t length; \ | ||
133 | \ | ||
134 | hard_iface = batadv_hardif_get_by_netdev(net_dev); \ | ||
135 | if (!hard_iface) \ | ||
136 | return 0; \ | ||
137 | \ | ||
138 | length = __batadv_store_uint_attr(buff, count, _min, _max, \ | ||
139 | _post_func, attr, \ | ||
140 | &hard_iface->_name, net_dev); \ | ||
141 | \ | ||
142 | batadv_hardif_free_ref(hard_iface); \ | ||
143 | return length; \ | ||
144 | } | ||
145 | |||
146 | #define BATADV_ATTR_HIF_SHOW_UINT(_name) \ | ||
147 | ssize_t batadv_show_##_name(struct kobject *kobj, \ | ||
148 | struct attribute *attr, char *buff) \ | ||
149 | { \ | ||
150 | struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \ | ||
151 | struct batadv_hard_iface *hard_iface; \ | ||
152 | ssize_t length; \ | ||
153 | \ | ||
154 | hard_iface = batadv_hardif_get_by_netdev(net_dev); \ | ||
155 | if (!hard_iface) \ | ||
156 | return 0; \ | ||
157 | \ | ||
158 | length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\ | ||
159 | \ | ||
160 | batadv_hardif_free_ref(hard_iface); \ | ||
161 | return length; \ | ||
162 | } | ||
163 | |||
164 | /* Use this, if you are going to set [name] in hard_iface to an | ||
165 | * unsigned integer value | ||
166 | */ | ||
167 | #define BATADV_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \ | ||
168 | static BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func)\ | ||
169 | static BATADV_ATTR_HIF_SHOW_UINT(_name) \ | ||
170 | static BATADV_ATTR(_name, _mode, batadv_show_##_name, \ | ||
171 | batadv_store_##_name) | ||
172 | |||
173 | |||
174 | static int batadv_store_bool_attr(char *buff, size_t count, | 126 | static int batadv_store_bool_attr(char *buff, size_t count, |
175 | struct net_device *net_dev, | 127 | struct net_device *net_dev, |
176 | const char *attr_name, atomic_t *attr) | 128 | const char *attr_name, atomic_t *attr) |
@@ -469,6 +421,9 @@ BATADV_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); | |||
469 | #ifdef CONFIG_BATMAN_ADV_BLA | 421 | #ifdef CONFIG_BATMAN_ADV_BLA |
470 | BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL); | 422 | BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL); |
471 | #endif | 423 | #endif |
424 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
425 | BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR, NULL); | ||
426 | #endif | ||
472 | BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu); | 427 | BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu); |
473 | BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); | 428 | BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); |
474 | static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode, | 429 | static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode, |
@@ -494,6 +449,9 @@ static struct batadv_attribute *batadv_mesh_attrs[] = { | |||
494 | #ifdef CONFIG_BATMAN_ADV_BLA | 449 | #ifdef CONFIG_BATMAN_ADV_BLA |
495 | &batadv_attr_bridge_loop_avoidance, | 450 | &batadv_attr_bridge_loop_avoidance, |
496 | #endif | 451 | #endif |
452 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
453 | &batadv_attr_distributed_arp_table, | ||
454 | #endif | ||
497 | &batadv_attr_fragmentation, | 455 | &batadv_attr_fragmentation, |
498 | &batadv_attr_ap_isolation, | 456 | &batadv_attr_ap_isolation, |
499 | &batadv_attr_vis_mode, | 457 | &batadv_attr_vis_mode, |
@@ -730,7 +688,7 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type, | |||
730 | enum batadv_uev_action action, const char *data) | 688 | enum batadv_uev_action action, const char *data) |
731 | { | 689 | { |
732 | int ret = -ENOMEM; | 690 | int ret = -ENOMEM; |
733 | struct batadv_hard_iface *primary_if = NULL; | 691 | struct batadv_hard_iface *primary_if; |
734 | struct kobject *bat_kobj; | 692 | struct kobject *bat_kobj; |
735 | char *uevent_env[4] = { NULL, NULL, NULL, NULL }; | 693 | char *uevent_env[4] = { NULL, NULL, NULL, NULL }; |
736 | 694 | ||
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index baae71585804..22457a7952ba 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -238,92 +238,134 @@ static int batadv_tt_local_init(struct batadv_priv *bat_priv) | |||
238 | return 0; | 238 | return 0; |
239 | } | 239 | } |
240 | 240 | ||
241 | static void batadv_tt_global_free(struct batadv_priv *bat_priv, | ||
242 | struct batadv_tt_global_entry *tt_global, | ||
243 | const char *message) | ||
244 | { | ||
245 | batadv_dbg(BATADV_DBG_TT, bat_priv, | ||
246 | "Deleting global tt entry %pM: %s\n", | ||
247 | tt_global->common.addr, message); | ||
248 | |||
249 | batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, | ||
250 | batadv_choose_orig, tt_global->common.addr); | ||
251 | batadv_tt_global_entry_free_ref(tt_global); | ||
252 | |||
253 | } | ||
254 | |||
241 | void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, | 255 | void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, |
242 | int ifindex) | 256 | int ifindex) |
243 | { | 257 | { |
244 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | 258 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
245 | struct batadv_tt_local_entry *tt_local_entry = NULL; | 259 | struct batadv_tt_local_entry *tt_local; |
246 | struct batadv_tt_global_entry *tt_global_entry = NULL; | 260 | struct batadv_tt_global_entry *tt_global; |
247 | struct hlist_head *head; | 261 | struct hlist_head *head; |
248 | struct hlist_node *node; | 262 | struct hlist_node *node; |
249 | struct batadv_tt_orig_list_entry *orig_entry; | 263 | struct batadv_tt_orig_list_entry *orig_entry; |
250 | int hash_added; | 264 | int hash_added; |
265 | bool roamed_back = false; | ||
251 | 266 | ||
252 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); | 267 | tt_local = batadv_tt_local_hash_find(bat_priv, addr); |
268 | tt_global = batadv_tt_global_hash_find(bat_priv, addr); | ||
253 | 269 | ||
254 | if (tt_local_entry) { | 270 | if (tt_local) { |
255 | tt_local_entry->last_seen = jiffies; | 271 | tt_local->last_seen = jiffies; |
256 | /* possibly unset the BATADV_TT_CLIENT_PENDING flag */ | 272 | if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) { |
257 | tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING; | 273 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
258 | goto out; | 274 | "Re-adding pending client %pM\n", addr); |
275 | /* whatever the reason why the PENDING flag was set, | ||
276 | * this is a client which was enqueued to be removed in | ||
277 | * this orig_interval. Since it popped up again, the | ||
278 | * flag can be reset like it was never enqueued | ||
279 | */ | ||
280 | tt_local->common.flags &= ~BATADV_TT_CLIENT_PENDING; | ||
281 | goto add_event; | ||
282 | } | ||
283 | |||
284 | if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) { | ||
285 | batadv_dbg(BATADV_DBG_TT, bat_priv, | ||
286 | "Roaming client %pM came back to its original location\n", | ||
287 | addr); | ||
288 | /* the ROAM flag is set because this client roamed away | ||
289 | * and the node got a roaming_advertisement message. Now | ||
290 | * that the client popped up again at its original | ||
291 | * location such flag can be unset | ||
292 | */ | ||
293 | tt_local->common.flags &= ~BATADV_TT_CLIENT_ROAM; | ||
294 | roamed_back = true; | ||
295 | } | ||
296 | goto check_roaming; | ||
259 | } | 297 | } |
260 | 298 | ||
261 | tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC); | 299 | tt_local = kmalloc(sizeof(*tt_local), GFP_ATOMIC); |
262 | if (!tt_local_entry) | 300 | if (!tt_local) |
263 | goto out; | 301 | goto out; |
264 | 302 | ||
265 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 303 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
266 | "Creating new local tt entry: %pM (ttvn: %d)\n", addr, | 304 | "Creating new local tt entry: %pM (ttvn: %d)\n", addr, |
267 | (uint8_t)atomic_read(&bat_priv->tt.vn)); | 305 | (uint8_t)atomic_read(&bat_priv->tt.vn)); |
268 | 306 | ||
269 | memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); | 307 | memcpy(tt_local->common.addr, addr, ETH_ALEN); |
270 | tt_local_entry->common.flags = BATADV_NO_FLAGS; | 308 | tt_local->common.flags = BATADV_NO_FLAGS; |
271 | if (batadv_is_wifi_iface(ifindex)) | 309 | if (batadv_is_wifi_iface(ifindex)) |
272 | tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI; | 310 | tt_local->common.flags |= BATADV_TT_CLIENT_WIFI; |
273 | atomic_set(&tt_local_entry->common.refcount, 2); | 311 | atomic_set(&tt_local->common.refcount, 2); |
274 | tt_local_entry->last_seen = jiffies; | 312 | tt_local->last_seen = jiffies; |
275 | tt_local_entry->common.added_at = tt_local_entry->last_seen; | 313 | tt_local->common.added_at = tt_local->last_seen; |
276 | 314 | ||
277 | /* the batman interface mac address should never be purged */ | 315 | /* the batman interface mac address should never be purged */ |
278 | if (batadv_compare_eth(addr, soft_iface->dev_addr)) | 316 | if (batadv_compare_eth(addr, soft_iface->dev_addr)) |
279 | tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE; | 317 | tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE; |
280 | 318 | ||
281 | /* The local entry has to be marked as NEW to avoid to send it in | 319 | /* The local entry has to be marked as NEW to avoid to send it in |
282 | * a full table response going out before the next ttvn increment | 320 | * a full table response going out before the next ttvn increment |
283 | * (consistency check) | 321 | * (consistency check) |
284 | */ | 322 | */ |
285 | tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW; | 323 | tt_local->common.flags |= BATADV_TT_CLIENT_NEW; |
286 | 324 | ||
287 | hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt, | 325 | hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt, |
288 | batadv_choose_orig, | 326 | batadv_choose_orig, &tt_local->common, |
289 | &tt_local_entry->common, | 327 | &tt_local->common.hash_entry); |
290 | &tt_local_entry->common.hash_entry); | ||
291 | 328 | ||
292 | if (unlikely(hash_added != 0)) { | 329 | if (unlikely(hash_added != 0)) { |
293 | /* remove the reference for the hash */ | 330 | /* remove the reference for the hash */ |
294 | batadv_tt_local_entry_free_ref(tt_local_entry); | 331 | batadv_tt_local_entry_free_ref(tt_local); |
295 | goto out; | 332 | goto out; |
296 | } | 333 | } |
297 | 334 | ||
298 | batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags); | 335 | add_event: |
336 | batadv_tt_local_event(bat_priv, addr, tt_local->common.flags); | ||
299 | 337 | ||
300 | /* remove address from global hash if present */ | 338 | check_roaming: |
301 | tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr); | 339 | /* Check whether it is a roaming, but don't do anything if the roaming |
302 | 340 | * process has already been handled | |
303 | /* Check whether it is a roaming! */ | 341 | */ |
304 | if (tt_global_entry) { | 342 | if (tt_global && !(tt_global->common.flags & BATADV_TT_CLIENT_ROAM)) { |
305 | /* These node are probably going to update their tt table */ | 343 | /* These node are probably going to update their tt table */ |
306 | head = &tt_global_entry->orig_list; | 344 | head = &tt_global->orig_list; |
307 | rcu_read_lock(); | 345 | rcu_read_lock(); |
308 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | 346 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { |
309 | orig_entry->orig_node->tt_poss_change = true; | 347 | batadv_send_roam_adv(bat_priv, tt_global->common.addr, |
310 | |||
311 | batadv_send_roam_adv(bat_priv, | ||
312 | tt_global_entry->common.addr, | ||
313 | orig_entry->orig_node); | 348 | orig_entry->orig_node); |
314 | } | 349 | } |
315 | rcu_read_unlock(); | 350 | rcu_read_unlock(); |
316 | /* The global entry has to be marked as ROAMING and | 351 | if (roamed_back) { |
317 | * has to be kept for consistency purpose | 352 | batadv_tt_global_free(bat_priv, tt_global, |
318 | */ | 353 | "Roaming canceled"); |
319 | tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM; | 354 | tt_global = NULL; |
320 | tt_global_entry->roam_at = jiffies; | 355 | } else { |
356 | /* The global entry has to be marked as ROAMING and | ||
357 | * has to be kept for consistency purpose | ||
358 | */ | ||
359 | tt_global->common.flags |= BATADV_TT_CLIENT_ROAM; | ||
360 | tt_global->roam_at = jiffies; | ||
361 | } | ||
321 | } | 362 | } |
363 | |||
322 | out: | 364 | out: |
323 | if (tt_local_entry) | 365 | if (tt_local) |
324 | batadv_tt_local_entry_free_ref(tt_local_entry); | 366 | batadv_tt_local_entry_free_ref(tt_local); |
325 | if (tt_global_entry) | 367 | if (tt_global) |
326 | batadv_tt_global_entry_free_ref(tt_global_entry); | 368 | batadv_tt_global_entry_free_ref(tt_global); |
327 | } | 369 | } |
328 | 370 | ||
329 | static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff, | 371 | static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff, |
@@ -434,22 +476,10 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
434 | struct hlist_node *node; | 476 | struct hlist_node *node; |
435 | struct hlist_head *head; | 477 | struct hlist_head *head; |
436 | uint32_t i; | 478 | uint32_t i; |
437 | int ret = 0; | ||
438 | 479 | ||
439 | primary_if = batadv_primary_if_get_selected(bat_priv); | 480 | primary_if = batadv_seq_print_text_primary_if_get(seq); |
440 | if (!primary_if) { | 481 | if (!primary_if) |
441 | ret = seq_printf(seq, | ||
442 | "BATMAN mesh %s disabled - please specify interfaces to enable it\n", | ||
443 | net_dev->name); | ||
444 | goto out; | ||
445 | } | ||
446 | |||
447 | if (primary_if->if_status != BATADV_IF_ACTIVE) { | ||
448 | ret = seq_printf(seq, | ||
449 | "BATMAN mesh %s disabled - primary interface not active\n", | ||
450 | net_dev->name); | ||
451 | goto out; | 482 | goto out; |
452 | } | ||
453 | 483 | ||
454 | seq_printf(seq, | 484 | seq_printf(seq, |
455 | "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", | 485 | "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", |
@@ -479,7 +509,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
479 | out: | 509 | out: |
480 | if (primary_if) | 510 | if (primary_if) |
481 | batadv_hardif_free_ref(primary_if); | 511 | batadv_hardif_free_ref(primary_if); |
482 | return ret; | 512 | return 0; |
483 | } | 513 | } |
484 | 514 | ||
485 | static void | 515 | static void |
@@ -501,24 +531,57 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv, | |||
501 | tt_local_entry->common.addr, message); | 531 | tt_local_entry->common.addr, message); |
502 | } | 532 | } |
503 | 533 | ||
504 | void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr, | 534 | /** |
505 | const char *message, bool roaming) | 535 | * batadv_tt_local_remove - logically remove an entry from the local table |
536 | * @bat_priv: the bat priv with all the soft interface information | ||
537 | * @addr: the MAC address of the client to remove | ||
538 | * @message: message to append to the log on deletion | ||
539 | * @roaming: true if the deletion is due to a roaming event | ||
540 | * | ||
541 | * Returns the flags assigned to the local entry before being deleted | ||
542 | */ | ||
543 | uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, | ||
544 | const uint8_t *addr, const char *message, | ||
545 | bool roaming) | ||
506 | { | 546 | { |
507 | struct batadv_tt_local_entry *tt_local_entry = NULL; | 547 | struct batadv_tt_local_entry *tt_local_entry; |
508 | uint16_t flags; | 548 | uint16_t flags, curr_flags = BATADV_NO_FLAGS; |
509 | 549 | ||
510 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); | 550 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); |
511 | if (!tt_local_entry) | 551 | if (!tt_local_entry) |
512 | goto out; | 552 | goto out; |
513 | 553 | ||
554 | curr_flags = tt_local_entry->common.flags; | ||
555 | |||
514 | flags = BATADV_TT_CLIENT_DEL; | 556 | flags = BATADV_TT_CLIENT_DEL; |
515 | if (roaming) | 557 | /* if this global entry addition is due to a roaming, the node has to |
558 | * mark the local entry as "roamed" in order to correctly reroute | ||
559 | * packets later | ||
560 | */ | ||
561 | if (roaming) { | ||
516 | flags |= BATADV_TT_CLIENT_ROAM; | 562 | flags |= BATADV_TT_CLIENT_ROAM; |
563 | /* mark the local client as ROAMed */ | ||
564 | tt_local_entry->common.flags |= BATADV_TT_CLIENT_ROAM; | ||
565 | } | ||
566 | |||
567 | if (!(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW)) { | ||
568 | batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, | ||
569 | message); | ||
570 | goto out; | ||
571 | } | ||
572 | /* if this client has been added right now, it is possible to | ||
573 | * immediately purge it | ||
574 | */ | ||
575 | batadv_tt_local_event(bat_priv, tt_local_entry->common.addr, | ||
576 | curr_flags | BATADV_TT_CLIENT_DEL); | ||
577 | hlist_del_rcu(&tt_local_entry->common.hash_entry); | ||
578 | batadv_tt_local_entry_free_ref(tt_local_entry); | ||
517 | 579 | ||
518 | batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message); | ||
519 | out: | 580 | out: |
520 | if (tt_local_entry) | 581 | if (tt_local_entry) |
521 | batadv_tt_local_entry_free_ref(tt_local_entry); | 582 | batadv_tt_local_entry_free_ref(tt_local_entry); |
583 | |||
584 | return curr_flags; | ||
522 | } | 585 | } |
523 | 586 | ||
524 | static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv, | 587 | static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv, |
@@ -721,12 +784,23 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
721 | const unsigned char *tt_addr, uint8_t flags, | 784 | const unsigned char *tt_addr, uint8_t flags, |
722 | uint8_t ttvn) | 785 | uint8_t ttvn) |
723 | { | 786 | { |
724 | struct batadv_tt_global_entry *tt_global_entry = NULL; | 787 | struct batadv_tt_global_entry *tt_global_entry; |
788 | struct batadv_tt_local_entry *tt_local_entry; | ||
725 | int ret = 0; | 789 | int ret = 0; |
726 | int hash_added; | 790 | int hash_added; |
727 | struct batadv_tt_common_entry *common; | 791 | struct batadv_tt_common_entry *common; |
792 | uint16_t local_flags; | ||
728 | 793 | ||
729 | tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr); | 794 | tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr); |
795 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr); | ||
796 | |||
797 | /* if the node already has a local client for this entry, it has to wait | ||
798 | * for a roaming advertisement instead of manually messing up the global | ||
799 | * table | ||
800 | */ | ||
801 | if ((flags & BATADV_TT_CLIENT_TEMP) && tt_local_entry && | ||
802 | !(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW)) | ||
803 | goto out; | ||
730 | 804 | ||
731 | if (!tt_global_entry) { | 805 | if (!tt_global_entry) { |
732 | tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC); | 806 | tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC); |
@@ -738,6 +812,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
738 | 812 | ||
739 | common->flags = flags; | 813 | common->flags = flags; |
740 | tt_global_entry->roam_at = 0; | 814 | tt_global_entry->roam_at = 0; |
815 | /* node must store current time in case of roaming. This is | ||
816 | * needed to purge this entry out on timeout (if nobody claims | ||
817 | * it) | ||
818 | */ | ||
819 | if (flags & BATADV_TT_CLIENT_ROAM) | ||
820 | tt_global_entry->roam_at = jiffies; | ||
741 | atomic_set(&common->refcount, 2); | 821 | atomic_set(&common->refcount, 2); |
742 | common->added_at = jiffies; | 822 | common->added_at = jiffies; |
743 | 823 | ||
@@ -755,19 +835,31 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
755 | goto out_remove; | 835 | goto out_remove; |
756 | } | 836 | } |
757 | } else { | 837 | } else { |
838 | common = &tt_global_entry->common; | ||
758 | /* If there is already a global entry, we can use this one for | 839 | /* If there is already a global entry, we can use this one for |
759 | * our processing. | 840 | * our processing. |
760 | * But if we are trying to add a temporary client we can exit | 841 | * But if we are trying to add a temporary client then here are |
761 | * directly because the temporary information should never | 842 | * two options at this point: |
762 | * override any already known client state (whatever it is) | 843 | * 1) the global client is not a temporary client: the global |
844 | * client has to be left as it is, temporary information | ||
845 | * should never override any already known client state | ||
846 | * 2) the global client is a temporary client: purge the | ||
847 | * originator list and add the new one orig_entry | ||
763 | */ | 848 | */ |
764 | if (flags & BATADV_TT_CLIENT_TEMP) | 849 | if (flags & BATADV_TT_CLIENT_TEMP) { |
765 | goto out; | 850 | if (!(common->flags & BATADV_TT_CLIENT_TEMP)) |
851 | goto out; | ||
852 | if (batadv_tt_global_entry_has_orig(tt_global_entry, | ||
853 | orig_node)) | ||
854 | goto out_remove; | ||
855 | batadv_tt_global_del_orig_list(tt_global_entry); | ||
856 | goto add_orig_entry; | ||
857 | } | ||
766 | 858 | ||
767 | /* if the client was temporary added before receiving the first | 859 | /* if the client was temporary added before receiving the first |
768 | * OGM announcing it, we have to clear the TEMP flag | 860 | * OGM announcing it, we have to clear the TEMP flag |
769 | */ | 861 | */ |
770 | tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP; | 862 | common->flags &= ~BATADV_TT_CLIENT_TEMP; |
771 | 863 | ||
772 | /* the change can carry possible "attribute" flags like the | 864 | /* the change can carry possible "attribute" flags like the |
773 | * TT_CLIENT_WIFI, therefore they have to be copied in the | 865 | * TT_CLIENT_WIFI, therefore they have to be copied in the |
@@ -782,33 +874,81 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
782 | * We should first delete the old originator before adding the | 874 | * We should first delete the old originator before adding the |
783 | * new one. | 875 | * new one. |
784 | */ | 876 | */ |
785 | if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) { | 877 | if (common->flags & BATADV_TT_CLIENT_ROAM) { |
786 | batadv_tt_global_del_orig_list(tt_global_entry); | 878 | batadv_tt_global_del_orig_list(tt_global_entry); |
787 | tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM; | 879 | common->flags &= ~BATADV_TT_CLIENT_ROAM; |
788 | tt_global_entry->roam_at = 0; | 880 | tt_global_entry->roam_at = 0; |
789 | } | 881 | } |
790 | } | 882 | } |
883 | add_orig_entry: | ||
791 | /* add the new orig_entry (if needed) or update it */ | 884 | /* add the new orig_entry (if needed) or update it */ |
792 | batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); | 885 | batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); |
793 | 886 | ||
794 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 887 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
795 | "Creating new global tt entry: %pM (via %pM)\n", | 888 | "Creating new global tt entry: %pM (via %pM)\n", |
796 | tt_global_entry->common.addr, orig_node->orig); | 889 | common->addr, orig_node->orig); |
890 | ret = 1; | ||
797 | 891 | ||
798 | out_remove: | 892 | out_remove: |
893 | |||
799 | /* remove address from local hash if present */ | 894 | /* remove address from local hash if present */ |
800 | batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr, | 895 | local_flags = batadv_tt_local_remove(bat_priv, tt_addr, |
801 | "global tt received", | 896 | "global tt received", |
802 | flags & BATADV_TT_CLIENT_ROAM); | 897 | !!(flags & BATADV_TT_CLIENT_ROAM)); |
803 | ret = 1; | 898 | tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI; |
899 | |||
900 | if (!(flags & BATADV_TT_CLIENT_ROAM)) | ||
901 | /* this is a normal global add. Therefore the client is not in a | ||
902 | * roaming state anymore. | ||
903 | */ | ||
904 | tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM; | ||
905 | |||
804 | out: | 906 | out: |
805 | if (tt_global_entry) | 907 | if (tt_global_entry) |
806 | batadv_tt_global_entry_free_ref(tt_global_entry); | 908 | batadv_tt_global_entry_free_ref(tt_global_entry); |
909 | if (tt_local_entry) | ||
910 | batadv_tt_local_entry_free_ref(tt_local_entry); | ||
807 | return ret; | 911 | return ret; |
808 | } | 912 | } |
809 | 913 | ||
810 | /* print all orig nodes who announce the address for this global entry. | 914 | /* batadv_transtable_best_orig - Get best originator list entry from tt entry |
811 | * it is assumed that the caller holds rcu_read_lock(); | 915 | * @tt_global_entry: global translation table entry to be analyzed |
916 | * | ||
917 | * This functon assumes the caller holds rcu_read_lock(). | ||
918 | * Returns best originator list entry or NULL on errors. | ||
919 | */ | ||
920 | static struct batadv_tt_orig_list_entry * | ||
921 | batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry) | ||
922 | { | ||
923 | struct batadv_neigh_node *router = NULL; | ||
924 | struct hlist_head *head; | ||
925 | struct hlist_node *node; | ||
926 | struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL; | ||
927 | int best_tq = 0; | ||
928 | |||
929 | head = &tt_global_entry->orig_list; | ||
930 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | ||
931 | router = batadv_orig_node_get_router(orig_entry->orig_node); | ||
932 | if (!router) | ||
933 | continue; | ||
934 | |||
935 | if (router->tq_avg > best_tq) { | ||
936 | best_entry = orig_entry; | ||
937 | best_tq = router->tq_avg; | ||
938 | } | ||
939 | |||
940 | batadv_neigh_node_free_ref(router); | ||
941 | } | ||
942 | |||
943 | return best_entry; | ||
944 | } | ||
945 | |||
946 | /* batadv_tt_global_print_entry - print all orig nodes who announce the address | ||
947 | * for this global entry | ||
948 | * @tt_global_entry: global translation table entry to be printed | ||
949 | * @seq: debugfs table seq_file struct | ||
950 | * | ||
951 | * This functon assumes the caller holds rcu_read_lock(). | ||
812 | */ | 952 | */ |
813 | static void | 953 | static void |
814 | batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry, | 954 | batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry, |
@@ -816,21 +956,37 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry, | |||
816 | { | 956 | { |
817 | struct hlist_head *head; | 957 | struct hlist_head *head; |
818 | struct hlist_node *node; | 958 | struct hlist_node *node; |
819 | struct batadv_tt_orig_list_entry *orig_entry; | 959 | struct batadv_tt_orig_list_entry *orig_entry, *best_entry; |
820 | struct batadv_tt_common_entry *tt_common_entry; | 960 | struct batadv_tt_common_entry *tt_common_entry; |
821 | uint16_t flags; | 961 | uint16_t flags; |
822 | uint8_t last_ttvn; | 962 | uint8_t last_ttvn; |
823 | 963 | ||
824 | tt_common_entry = &tt_global_entry->common; | 964 | tt_common_entry = &tt_global_entry->common; |
965 | flags = tt_common_entry->flags; | ||
966 | |||
967 | best_entry = batadv_transtable_best_orig(tt_global_entry); | ||
968 | if (best_entry) { | ||
969 | last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn); | ||
970 | seq_printf(seq, " %c %pM (%3u) via %pM (%3u) [%c%c%c]\n", | ||
971 | '*', tt_global_entry->common.addr, | ||
972 | best_entry->ttvn, best_entry->orig_node->orig, | ||
973 | last_ttvn, | ||
974 | (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'), | ||
975 | (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'), | ||
976 | (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.')); | ||
977 | } | ||
825 | 978 | ||
826 | head = &tt_global_entry->orig_list; | 979 | head = &tt_global_entry->orig_list; |
827 | 980 | ||
828 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | 981 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { |
829 | flags = tt_common_entry->flags; | 982 | if (best_entry == orig_entry) |
983 | continue; | ||
984 | |||
830 | last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn); | 985 | last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn); |
831 | seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c%c]\n", | 986 | seq_printf(seq, " %c %pM (%3u) via %pM (%3u) [%c%c%c]\n", |
832 | tt_global_entry->common.addr, orig_entry->ttvn, | 987 | '+', tt_global_entry->common.addr, |
833 | orig_entry->orig_node->orig, last_ttvn, | 988 | orig_entry->ttvn, orig_entry->orig_node->orig, |
989 | last_ttvn, | ||
834 | (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'), | 990 | (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'), |
835 | (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'), | 991 | (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'), |
836 | (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.')); | 992 | (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.')); |
@@ -848,22 +1004,10 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) | |||
848 | struct hlist_node *node; | 1004 | struct hlist_node *node; |
849 | struct hlist_head *head; | 1005 | struct hlist_head *head; |
850 | uint32_t i; | 1006 | uint32_t i; |
851 | int ret = 0; | ||
852 | |||
853 | primary_if = batadv_primary_if_get_selected(bat_priv); | ||
854 | if (!primary_if) { | ||
855 | ret = seq_printf(seq, | ||
856 | "BATMAN mesh %s disabled - please specify interfaces to enable it\n", | ||
857 | net_dev->name); | ||
858 | goto out; | ||
859 | } | ||
860 | 1007 | ||
861 | if (primary_if->if_status != BATADV_IF_ACTIVE) { | 1008 | primary_if = batadv_seq_print_text_primary_if_get(seq); |
862 | ret = seq_printf(seq, | 1009 | if (!primary_if) |
863 | "BATMAN mesh %s disabled - primary interface not active\n", | ||
864 | net_dev->name); | ||
865 | goto out; | 1010 | goto out; |
866 | } | ||
867 | 1011 | ||
868 | seq_printf(seq, | 1012 | seq_printf(seq, |
869 | "Globally announced TT entries received via the mesh %s\n", | 1013 | "Globally announced TT entries received via the mesh %s\n", |
@@ -887,7 +1031,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) | |||
887 | out: | 1031 | out: |
888 | if (primary_if) | 1032 | if (primary_if) |
889 | batadv_hardif_free_ref(primary_if); | 1033 | batadv_hardif_free_ref(primary_if); |
890 | return ret; | 1034 | return 0; |
891 | } | 1035 | } |
892 | 1036 | ||
893 | /* deletes the orig list of a tt_global_entry */ | 1037 | /* deletes the orig list of a tt_global_entry */ |
@@ -933,21 +1077,6 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv, | |||
933 | spin_unlock_bh(&tt_global_entry->list_lock); | 1077 | spin_unlock_bh(&tt_global_entry->list_lock); |
934 | } | 1078 | } |
935 | 1079 | ||
936 | static void | ||
937 | batadv_tt_global_del_struct(struct batadv_priv *bat_priv, | ||
938 | struct batadv_tt_global_entry *tt_global_entry, | ||
939 | const char *message) | ||
940 | { | ||
941 | batadv_dbg(BATADV_DBG_TT, bat_priv, | ||
942 | "Deleting global tt entry %pM: %s\n", | ||
943 | tt_global_entry->common.addr, message); | ||
944 | |||
945 | batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, | ||
946 | batadv_choose_orig, tt_global_entry->common.addr); | ||
947 | batadv_tt_global_entry_free_ref(tt_global_entry); | ||
948 | |||
949 | } | ||
950 | |||
951 | /* If the client is to be deleted, we check if it is the last origantor entry | 1080 | /* If the client is to be deleted, we check if it is the last origantor entry |
952 | * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the | 1081 | * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the |
953 | * timer, otherwise we simply remove the originator scheduled for deletion. | 1082 | * timer, otherwise we simply remove the originator scheduled for deletion. |
@@ -996,7 +1125,7 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv, | |||
996 | const unsigned char *addr, | 1125 | const unsigned char *addr, |
997 | const char *message, bool roaming) | 1126 | const char *message, bool roaming) |
998 | { | 1127 | { |
999 | struct batadv_tt_global_entry *tt_global_entry = NULL; | 1128 | struct batadv_tt_global_entry *tt_global_entry; |
1000 | struct batadv_tt_local_entry *local_entry = NULL; | 1129 | struct batadv_tt_local_entry *local_entry = NULL; |
1001 | 1130 | ||
1002 | tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr); | 1131 | tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr); |
@@ -1008,8 +1137,8 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv, | |||
1008 | orig_node, message); | 1137 | orig_node, message); |
1009 | 1138 | ||
1010 | if (hlist_empty(&tt_global_entry->orig_list)) | 1139 | if (hlist_empty(&tt_global_entry->orig_list)) |
1011 | batadv_tt_global_del_struct(bat_priv, tt_global_entry, | 1140 | batadv_tt_global_free(bat_priv, tt_global_entry, |
1012 | message); | 1141 | message); |
1013 | 1142 | ||
1014 | goto out; | 1143 | goto out; |
1015 | } | 1144 | } |
@@ -1032,7 +1161,7 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv, | |||
1032 | if (local_entry) { | 1161 | if (local_entry) { |
1033 | /* local entry exists, case 2: client roamed to us. */ | 1162 | /* local entry exists, case 2: client roamed to us. */ |
1034 | batadv_tt_global_del_orig_list(tt_global_entry); | 1163 | batadv_tt_global_del_orig_list(tt_global_entry); |
1035 | batadv_tt_global_del_struct(bat_priv, tt_global_entry, message); | 1164 | batadv_tt_global_free(bat_priv, tt_global_entry, message); |
1036 | } else | 1165 | } else |
1037 | /* no local entry exists, case 1: check for roaming */ | 1166 | /* no local entry exists, case 1: check for roaming */ |
1038 | batadv_tt_global_del_roaming(bat_priv, tt_global_entry, | 1167 | batadv_tt_global_del_roaming(bat_priv, tt_global_entry, |
@@ -1203,15 +1332,12 @@ struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv, | |||
1203 | struct batadv_tt_local_entry *tt_local_entry = NULL; | 1332 | struct batadv_tt_local_entry *tt_local_entry = NULL; |
1204 | struct batadv_tt_global_entry *tt_global_entry = NULL; | 1333 | struct batadv_tt_global_entry *tt_global_entry = NULL; |
1205 | struct batadv_orig_node *orig_node = NULL; | 1334 | struct batadv_orig_node *orig_node = NULL; |
1206 | struct batadv_neigh_node *router = NULL; | 1335 | struct batadv_tt_orig_list_entry *best_entry; |
1207 | struct hlist_head *head; | ||
1208 | struct hlist_node *node; | ||
1209 | struct batadv_tt_orig_list_entry *orig_entry; | ||
1210 | int best_tq; | ||
1211 | 1336 | ||
1212 | if (src && atomic_read(&bat_priv->ap_isolation)) { | 1337 | if (src && atomic_read(&bat_priv->ap_isolation)) { |
1213 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, src); | 1338 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, src); |
1214 | if (!tt_local_entry) | 1339 | if (!tt_local_entry || |
1340 | (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)) | ||
1215 | goto out; | 1341 | goto out; |
1216 | } | 1342 | } |
1217 | 1343 | ||
@@ -1226,25 +1352,15 @@ struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv, | |||
1226 | _batadv_is_ap_isolated(tt_local_entry, tt_global_entry)) | 1352 | _batadv_is_ap_isolated(tt_local_entry, tt_global_entry)) |
1227 | goto out; | 1353 | goto out; |
1228 | 1354 | ||
1229 | best_tq = 0; | ||
1230 | |||
1231 | rcu_read_lock(); | 1355 | rcu_read_lock(); |
1232 | head = &tt_global_entry->orig_list; | 1356 | best_entry = batadv_transtable_best_orig(tt_global_entry); |
1233 | hlist_for_each_entry_rcu(orig_entry, node, head, list) { | ||
1234 | router = batadv_orig_node_get_router(orig_entry->orig_node); | ||
1235 | if (!router) | ||
1236 | continue; | ||
1237 | |||
1238 | if (router->tq_avg > best_tq) { | ||
1239 | orig_node = orig_entry->orig_node; | ||
1240 | best_tq = router->tq_avg; | ||
1241 | } | ||
1242 | batadv_neigh_node_free_ref(router); | ||
1243 | } | ||
1244 | /* found anything? */ | 1357 | /* found anything? */ |
1358 | if (best_entry) | ||
1359 | orig_node = best_entry->orig_node; | ||
1245 | if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) | 1360 | if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) |
1246 | orig_node = NULL; | 1361 | orig_node = NULL; |
1247 | rcu_read_unlock(); | 1362 | rcu_read_unlock(); |
1363 | |||
1248 | out: | 1364 | out: |
1249 | if (tt_global_entry) | 1365 | if (tt_global_entry) |
1250 | batadv_tt_global_entry_free_ref(tt_global_entry); | 1366 | batadv_tt_global_entry_free_ref(tt_global_entry); |
@@ -1477,11 +1593,11 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, | |||
1477 | tt_tot = tt_len / sizeof(struct batadv_tt_change); | 1593 | tt_tot = tt_len / sizeof(struct batadv_tt_change); |
1478 | 1594 | ||
1479 | len = tt_query_size + tt_len; | 1595 | len = tt_query_size + tt_len; |
1480 | skb = dev_alloc_skb(len + ETH_HLEN); | 1596 | skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN); |
1481 | if (!skb) | 1597 | if (!skb) |
1482 | goto out; | 1598 | goto out; |
1483 | 1599 | ||
1484 | skb_reserve(skb, ETH_HLEN); | 1600 | skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN); |
1485 | tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len); | 1601 | tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len); |
1486 | tt_response->ttvn = ttvn; | 1602 | tt_response->ttvn = ttvn; |
1487 | 1603 | ||
@@ -1526,7 +1642,6 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv, | |||
1526 | { | 1642 | { |
1527 | struct sk_buff *skb = NULL; | 1643 | struct sk_buff *skb = NULL; |
1528 | struct batadv_tt_query_packet *tt_request; | 1644 | struct batadv_tt_query_packet *tt_request; |
1529 | struct batadv_neigh_node *neigh_node = NULL; | ||
1530 | struct batadv_hard_iface *primary_if; | 1645 | struct batadv_hard_iface *primary_if; |
1531 | struct batadv_tt_req_node *tt_req_node = NULL; | 1646 | struct batadv_tt_req_node *tt_req_node = NULL; |
1532 | int ret = 1; | 1647 | int ret = 1; |
@@ -1543,11 +1658,11 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv, | |||
1543 | if (!tt_req_node) | 1658 | if (!tt_req_node) |
1544 | goto out; | 1659 | goto out; |
1545 | 1660 | ||
1546 | skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN); | 1661 | skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN + NET_IP_ALIGN); |
1547 | if (!skb) | 1662 | if (!skb) |
1548 | goto out; | 1663 | goto out; |
1549 | 1664 | ||
1550 | skb_reserve(skb, ETH_HLEN); | 1665 | skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN); |
1551 | 1666 | ||
1552 | tt_req_len = sizeof(*tt_request); | 1667 | tt_req_len = sizeof(*tt_request); |
1553 | tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len); | 1668 | tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len); |
@@ -1564,23 +1679,15 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv, | |||
1564 | if (full_table) | 1679 | if (full_table) |
1565 | tt_request->flags |= BATADV_TT_FULL_TABLE; | 1680 | tt_request->flags |= BATADV_TT_FULL_TABLE; |
1566 | 1681 | ||
1567 | neigh_node = batadv_orig_node_get_router(dst_orig_node); | 1682 | batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_REQUEST to %pM [%c]\n", |
1568 | if (!neigh_node) | 1683 | dst_orig_node->orig, (full_table ? 'F' : '.')); |
1569 | goto out; | ||
1570 | |||
1571 | batadv_dbg(BATADV_DBG_TT, bat_priv, | ||
1572 | "Sending TT_REQUEST to %pM via %pM [%c]\n", | ||
1573 | dst_orig_node->orig, neigh_node->addr, | ||
1574 | (full_table ? 'F' : '.')); | ||
1575 | 1684 | ||
1576 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX); | 1685 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX); |
1577 | 1686 | ||
1578 | batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | 1687 | if (batadv_send_skb_to_orig(skb, dst_orig_node, NULL)) |
1579 | ret = 0; | 1688 | ret = 0; |
1580 | 1689 | ||
1581 | out: | 1690 | out: |
1582 | if (neigh_node) | ||
1583 | batadv_neigh_node_free_ref(neigh_node); | ||
1584 | if (primary_if) | 1691 | if (primary_if) |
1585 | batadv_hardif_free_ref(primary_if); | 1692 | batadv_hardif_free_ref(primary_if); |
1586 | if (ret) | 1693 | if (ret) |
@@ -1598,9 +1705,8 @@ static bool | |||
1598 | batadv_send_other_tt_response(struct batadv_priv *bat_priv, | 1705 | batadv_send_other_tt_response(struct batadv_priv *bat_priv, |
1599 | struct batadv_tt_query_packet *tt_request) | 1706 | struct batadv_tt_query_packet *tt_request) |
1600 | { | 1707 | { |
1601 | struct batadv_orig_node *req_dst_orig_node = NULL; | 1708 | struct batadv_orig_node *req_dst_orig_node; |
1602 | struct batadv_orig_node *res_dst_orig_node = NULL; | 1709 | struct batadv_orig_node *res_dst_orig_node = NULL; |
1603 | struct batadv_neigh_node *neigh_node = NULL; | ||
1604 | struct batadv_hard_iface *primary_if = NULL; | 1710 | struct batadv_hard_iface *primary_if = NULL; |
1605 | uint8_t orig_ttvn, req_ttvn, ttvn; | 1711 | uint8_t orig_ttvn, req_ttvn, ttvn; |
1606 | int ret = false; | 1712 | int ret = false; |
@@ -1626,10 +1732,6 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv, | |||
1626 | if (!res_dst_orig_node) | 1732 | if (!res_dst_orig_node) |
1627 | goto out; | 1733 | goto out; |
1628 | 1734 | ||
1629 | neigh_node = batadv_orig_node_get_router(res_dst_orig_node); | ||
1630 | if (!neigh_node) | ||
1631 | goto out; | ||
1632 | |||
1633 | primary_if = batadv_primary_if_get_selected(bat_priv); | 1735 | primary_if = batadv_primary_if_get_selected(bat_priv); |
1634 | if (!primary_if) | 1736 | if (!primary_if) |
1635 | goto out; | 1737 | goto out; |
@@ -1658,11 +1760,11 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv, | |||
1658 | tt_tot = tt_len / sizeof(struct batadv_tt_change); | 1760 | tt_tot = tt_len / sizeof(struct batadv_tt_change); |
1659 | 1761 | ||
1660 | len = sizeof(*tt_response) + tt_len; | 1762 | len = sizeof(*tt_response) + tt_len; |
1661 | skb = dev_alloc_skb(len + ETH_HLEN); | 1763 | skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN); |
1662 | if (!skb) | 1764 | if (!skb) |
1663 | goto unlock; | 1765 | goto unlock; |
1664 | 1766 | ||
1665 | skb_reserve(skb, ETH_HLEN); | 1767 | skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN); |
1666 | packet_pos = skb_put(skb, len); | 1768 | packet_pos = skb_put(skb, len); |
1667 | tt_response = (struct batadv_tt_query_packet *)packet_pos; | 1769 | tt_response = (struct batadv_tt_query_packet *)packet_pos; |
1668 | tt_response->ttvn = req_ttvn; | 1770 | tt_response->ttvn = req_ttvn; |
@@ -1701,14 +1803,13 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv, | |||
1701 | tt_response->flags |= BATADV_TT_FULL_TABLE; | 1803 | tt_response->flags |= BATADV_TT_FULL_TABLE; |
1702 | 1804 | ||
1703 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 1805 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
1704 | "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n", | 1806 | "Sending TT_RESPONSE %pM for %pM (ttvn: %u)\n", |
1705 | res_dst_orig_node->orig, neigh_node->addr, | 1807 | res_dst_orig_node->orig, req_dst_orig_node->orig, req_ttvn); |
1706 | req_dst_orig_node->orig, req_ttvn); | ||
1707 | 1808 | ||
1708 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX); | 1809 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX); |
1709 | 1810 | ||
1710 | batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | 1811 | if (batadv_send_skb_to_orig(skb, res_dst_orig_node, NULL)) |
1711 | ret = true; | 1812 | ret = true; |
1712 | goto out; | 1813 | goto out; |
1713 | 1814 | ||
1714 | unlock: | 1815 | unlock: |
@@ -1719,8 +1820,6 @@ out: | |||
1719 | batadv_orig_node_free_ref(res_dst_orig_node); | 1820 | batadv_orig_node_free_ref(res_dst_orig_node); |
1720 | if (req_dst_orig_node) | 1821 | if (req_dst_orig_node) |
1721 | batadv_orig_node_free_ref(req_dst_orig_node); | 1822 | batadv_orig_node_free_ref(req_dst_orig_node); |
1722 | if (neigh_node) | ||
1723 | batadv_neigh_node_free_ref(neigh_node); | ||
1724 | if (primary_if) | 1823 | if (primary_if) |
1725 | batadv_hardif_free_ref(primary_if); | 1824 | batadv_hardif_free_ref(primary_if); |
1726 | if (!ret) | 1825 | if (!ret) |
@@ -1733,8 +1832,7 @@ static bool | |||
1733 | batadv_send_my_tt_response(struct batadv_priv *bat_priv, | 1832 | batadv_send_my_tt_response(struct batadv_priv *bat_priv, |
1734 | struct batadv_tt_query_packet *tt_request) | 1833 | struct batadv_tt_query_packet *tt_request) |
1735 | { | 1834 | { |
1736 | struct batadv_orig_node *orig_node = NULL; | 1835 | struct batadv_orig_node *orig_node; |
1737 | struct batadv_neigh_node *neigh_node = NULL; | ||
1738 | struct batadv_hard_iface *primary_if = NULL; | 1836 | struct batadv_hard_iface *primary_if = NULL; |
1739 | uint8_t my_ttvn, req_ttvn, ttvn; | 1837 | uint8_t my_ttvn, req_ttvn, ttvn; |
1740 | int ret = false; | 1838 | int ret = false; |
@@ -1759,10 +1857,6 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, | |||
1759 | if (!orig_node) | 1857 | if (!orig_node) |
1760 | goto out; | 1858 | goto out; |
1761 | 1859 | ||
1762 | neigh_node = batadv_orig_node_get_router(orig_node); | ||
1763 | if (!neigh_node) | ||
1764 | goto out; | ||
1765 | |||
1766 | primary_if = batadv_primary_if_get_selected(bat_priv); | 1860 | primary_if = batadv_primary_if_get_selected(bat_priv); |
1767 | if (!primary_if) | 1861 | if (!primary_if) |
1768 | goto out; | 1862 | goto out; |
@@ -1785,11 +1879,11 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, | |||
1785 | tt_tot = tt_len / sizeof(struct batadv_tt_change); | 1879 | tt_tot = tt_len / sizeof(struct batadv_tt_change); |
1786 | 1880 | ||
1787 | len = sizeof(*tt_response) + tt_len; | 1881 | len = sizeof(*tt_response) + tt_len; |
1788 | skb = dev_alloc_skb(len + ETH_HLEN); | 1882 | skb = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN); |
1789 | if (!skb) | 1883 | if (!skb) |
1790 | goto unlock; | 1884 | goto unlock; |
1791 | 1885 | ||
1792 | skb_reserve(skb, ETH_HLEN); | 1886 | skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN); |
1793 | packet_pos = skb_put(skb, len); | 1887 | packet_pos = skb_put(skb, len); |
1794 | tt_response = (struct batadv_tt_query_packet *)packet_pos; | 1888 | tt_response = (struct batadv_tt_query_packet *)packet_pos; |
1795 | tt_response->ttvn = req_ttvn; | 1889 | tt_response->ttvn = req_ttvn; |
@@ -1826,14 +1920,14 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv, | |||
1826 | tt_response->flags |= BATADV_TT_FULL_TABLE; | 1920 | tt_response->flags |= BATADV_TT_FULL_TABLE; |
1827 | 1921 | ||
1828 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 1922 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
1829 | "Sending TT_RESPONSE to %pM via %pM [%c]\n", | 1923 | "Sending TT_RESPONSE to %pM [%c]\n", |
1830 | orig_node->orig, neigh_node->addr, | 1924 | orig_node->orig, |
1831 | (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); | 1925 | (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); |
1832 | 1926 | ||
1833 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX); | 1927 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX); |
1834 | 1928 | ||
1835 | batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | 1929 | if (batadv_send_skb_to_orig(skb, orig_node, NULL)) |
1836 | ret = true; | 1930 | ret = true; |
1837 | goto out; | 1931 | goto out; |
1838 | 1932 | ||
1839 | unlock: | 1933 | unlock: |
@@ -1841,8 +1935,6 @@ unlock: | |||
1841 | out: | 1935 | out: |
1842 | if (orig_node) | 1936 | if (orig_node) |
1843 | batadv_orig_node_free_ref(orig_node); | 1937 | batadv_orig_node_free_ref(orig_node); |
1844 | if (neigh_node) | ||
1845 | batadv_neigh_node_free_ref(neigh_node); | ||
1846 | if (primary_if) | 1938 | if (primary_if) |
1847 | batadv_hardif_free_ref(primary_if); | 1939 | batadv_hardif_free_ref(primary_if); |
1848 | if (!ret) | 1940 | if (!ret) |
@@ -1899,7 +1991,7 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv, | |||
1899 | static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv, | 1991 | static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv, |
1900 | struct batadv_tt_query_packet *tt_response) | 1992 | struct batadv_tt_query_packet *tt_response) |
1901 | { | 1993 | { |
1902 | struct batadv_orig_node *orig_node = NULL; | 1994 | struct batadv_orig_node *orig_node; |
1903 | 1995 | ||
1904 | orig_node = batadv_orig_hash_find(bat_priv, tt_response->src); | 1996 | orig_node = batadv_orig_hash_find(bat_priv, tt_response->src); |
1905 | if (!orig_node) | 1997 | if (!orig_node) |
@@ -1941,7 +2033,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv, | |||
1941 | 2033 | ||
1942 | bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr) | 2034 | bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr) |
1943 | { | 2035 | { |
1944 | struct batadv_tt_local_entry *tt_local_entry = NULL; | 2036 | struct batadv_tt_local_entry *tt_local_entry; |
1945 | bool ret = false; | 2037 | bool ret = false; |
1946 | 2038 | ||
1947 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); | 2039 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); |
@@ -1950,7 +2042,8 @@ bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr) | |||
1950 | /* Check if the client has been logically deleted (but is kept for | 2042 | /* Check if the client has been logically deleted (but is kept for |
1951 | * consistency purpose) | 2043 | * consistency purpose) |
1952 | */ | 2044 | */ |
1953 | if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) | 2045 | if ((tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) || |
2046 | (tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM)) | ||
1954 | goto out; | 2047 | goto out; |
1955 | ret = true; | 2048 | ret = true; |
1956 | out: | 2049 | out: |
@@ -2001,10 +2094,6 @@ void batadv_handle_tt_response(struct batadv_priv *bat_priv, | |||
2001 | 2094 | ||
2002 | /* Recalculate the CRC for this orig_node and store it */ | 2095 | /* Recalculate the CRC for this orig_node and store it */ |
2003 | orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node); | 2096 | orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node); |
2004 | /* Roaming phase is over: tables are in sync again. I can | ||
2005 | * unset the flag | ||
2006 | */ | ||
2007 | orig_node->tt_poss_change = false; | ||
2008 | out: | 2097 | out: |
2009 | if (orig_node) | 2098 | if (orig_node) |
2010 | batadv_orig_node_free_ref(orig_node); | 2099 | batadv_orig_node_free_ref(orig_node); |
@@ -2110,7 +2199,6 @@ unlock: | |||
2110 | static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client, | 2199 | static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client, |
2111 | struct batadv_orig_node *orig_node) | 2200 | struct batadv_orig_node *orig_node) |
2112 | { | 2201 | { |
2113 | struct batadv_neigh_node *neigh_node = NULL; | ||
2114 | struct sk_buff *skb = NULL; | 2202 | struct sk_buff *skb = NULL; |
2115 | struct batadv_roam_adv_packet *roam_adv_packet; | 2203 | struct batadv_roam_adv_packet *roam_adv_packet; |
2116 | int ret = 1; | 2204 | int ret = 1; |
@@ -2123,11 +2211,11 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client, | |||
2123 | if (!batadv_tt_check_roam_count(bat_priv, client)) | 2211 | if (!batadv_tt_check_roam_count(bat_priv, client)) |
2124 | goto out; | 2212 | goto out; |
2125 | 2213 | ||
2126 | skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN); | 2214 | skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN + NET_IP_ALIGN); |
2127 | if (!skb) | 2215 | if (!skb) |
2128 | goto out; | 2216 | goto out; |
2129 | 2217 | ||
2130 | skb_reserve(skb, ETH_HLEN); | 2218 | skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN); |
2131 | 2219 | ||
2132 | roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len); | 2220 | roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len); |
2133 | 2221 | ||
@@ -2143,23 +2231,17 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client, | |||
2143 | memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); | 2231 | memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); |
2144 | memcpy(roam_adv_packet->client, client, ETH_ALEN); | 2232 | memcpy(roam_adv_packet->client, client, ETH_ALEN); |
2145 | 2233 | ||
2146 | neigh_node = batadv_orig_node_get_router(orig_node); | ||
2147 | if (!neigh_node) | ||
2148 | goto out; | ||
2149 | |||
2150 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 2234 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
2151 | "Sending ROAMING_ADV to %pM (client %pM) via %pM\n", | 2235 | "Sending ROAMING_ADV to %pM (client %pM)\n", |
2152 | orig_node->orig, client, neigh_node->addr); | 2236 | orig_node->orig, client); |
2153 | 2237 | ||
2154 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX); | 2238 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX); |
2155 | 2239 | ||
2156 | batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | 2240 | if (batadv_send_skb_to_orig(skb, orig_node, NULL)) |
2157 | ret = 0; | 2241 | ret = 0; |
2158 | 2242 | ||
2159 | out: | 2243 | out: |
2160 | if (neigh_node) | 2244 | if (ret && skb) |
2161 | batadv_neigh_node_free_ref(neigh_node); | ||
2162 | if (ret) | ||
2163 | kfree_skb(skb); | 2245 | kfree_skb(skb); |
2164 | return; | 2246 | return; |
2165 | } | 2247 | } |
@@ -2295,7 +2377,6 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv, | |||
2295 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 2377 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
2296 | "Local changes committed, updating to ttvn %u\n", | 2378 | "Local changes committed, updating to ttvn %u\n", |
2297 | (uint8_t)atomic_read(&bat_priv->tt.vn)); | 2379 | (uint8_t)atomic_read(&bat_priv->tt.vn)); |
2298 | bat_priv->tt.poss_change = false; | ||
2299 | 2380 | ||
2300 | /* reset the sending counter */ | 2381 | /* reset the sending counter */ |
2301 | atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX); | 2382 | atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX); |
@@ -2407,11 +2488,6 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv, | |||
2407 | */ | 2488 | */ |
2408 | if (orig_node->tt_crc != tt_crc) | 2489 | if (orig_node->tt_crc != tt_crc) |
2409 | goto request_table; | 2490 | goto request_table; |
2410 | |||
2411 | /* Roaming phase is over: tables are in sync again. I can | ||
2412 | * unset the flag | ||
2413 | */ | ||
2414 | orig_node->tt_poss_change = false; | ||
2415 | } else { | 2491 | } else { |
2416 | /* if we missed more than one change or our tables are not | 2492 | /* if we missed more than one change or our tables are not |
2417 | * in sync anymore -> request fresh tt data | 2493 | * in sync anymore -> request fresh tt data |
@@ -2444,12 +2520,38 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv, | |||
2444 | if (!tt_global_entry) | 2520 | if (!tt_global_entry) |
2445 | goto out; | 2521 | goto out; |
2446 | 2522 | ||
2447 | ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM; | 2523 | ret = !!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM); |
2448 | batadv_tt_global_entry_free_ref(tt_global_entry); | 2524 | batadv_tt_global_entry_free_ref(tt_global_entry); |
2449 | out: | 2525 | out: |
2450 | return ret; | 2526 | return ret; |
2451 | } | 2527 | } |
2452 | 2528 | ||
2529 | /** | ||
2530 | * batadv_tt_local_client_is_roaming - tells whether the client is roaming | ||
2531 | * @bat_priv: the bat priv with all the soft interface information | ||
2532 | * @addr: the MAC address of the local client to query | ||
2533 | * | ||
2534 | * Returns true if the local client is known to be roaming (it is not served by | ||
2535 | * this node anymore) or not. If yes, the client is still present in the table | ||
2536 | * to keep the latter consistent with the node TTVN | ||
2537 | */ | ||
2538 | bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv, | ||
2539 | uint8_t *addr) | ||
2540 | { | ||
2541 | struct batadv_tt_local_entry *tt_local_entry; | ||
2542 | bool ret = false; | ||
2543 | |||
2544 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); | ||
2545 | if (!tt_local_entry) | ||
2546 | goto out; | ||
2547 | |||
2548 | ret = tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM; | ||
2549 | batadv_tt_local_entry_free_ref(tt_local_entry); | ||
2550 | out: | ||
2551 | return ret; | ||
2552 | |||
2553 | } | ||
2554 | |||
2453 | bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, | 2555 | bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, |
2454 | struct batadv_orig_node *orig_node, | 2556 | struct batadv_orig_node *orig_node, |
2455 | const unsigned char *addr) | 2557 | const unsigned char *addr) |
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index 811fffd4760c..46d4451a59ee 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h | |||
@@ -24,9 +24,9 @@ int batadv_tt_len(int changes_num); | |||
24 | int batadv_tt_init(struct batadv_priv *bat_priv); | 24 | int batadv_tt_init(struct batadv_priv *bat_priv); |
25 | void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, | 25 | void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, |
26 | int ifindex); | 26 | int ifindex); |
27 | void batadv_tt_local_remove(struct batadv_priv *bat_priv, | 27 | uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, |
28 | const uint8_t *addr, const char *message, | 28 | const uint8_t *addr, const char *message, |
29 | bool roaming); | 29 | bool roaming); |
30 | int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset); | 30 | int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset); |
31 | void batadv_tt_global_add_orig(struct batadv_priv *bat_priv, | 31 | void batadv_tt_global_add_orig(struct batadv_priv *bat_priv, |
32 | struct batadv_orig_node *orig_node, | 32 | struct batadv_orig_node *orig_node, |
@@ -59,6 +59,8 @@ int batadv_tt_append_diff(struct batadv_priv *bat_priv, | |||
59 | int packet_min_len); | 59 | int packet_min_len); |
60 | bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv, | 60 | bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv, |
61 | uint8_t *addr); | 61 | uint8_t *addr); |
62 | bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv, | ||
63 | uint8_t *addr); | ||
62 | bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, | 64 | bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, |
63 | struct batadv_orig_node *orig_node, | 65 | struct batadv_orig_node *orig_node, |
64 | const unsigned char *addr); | 66 | const unsigned char *addr); |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index ac1e07a80454..ae9ac9aca8c5 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -28,20 +28,41 @@ | |||
28 | (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \ | 28 | (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \ |
29 | sizeof(struct batadv_bcast_packet))) | 29 | sizeof(struct batadv_bcast_packet))) |
30 | 30 | ||
31 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
32 | |||
33 | /* batadv_dat_addr_t is the type used for all DHT addresses. If it is changed, | ||
34 | * BATADV_DAT_ADDR_MAX is changed as well. | ||
35 | * | ||
36 | * *Please be careful: batadv_dat_addr_t must be UNSIGNED* | ||
37 | */ | ||
38 | #define batadv_dat_addr_t uint16_t | ||
39 | |||
40 | #endif /* CONFIG_BATMAN_ADV_DAT */ | ||
41 | |||
42 | /** | ||
43 | * struct batadv_hard_iface_bat_iv - per hard interface B.A.T.M.A.N. IV data | ||
44 | * @ogm_buff: buffer holding the OGM packet | ||
45 | * @ogm_buff_len: length of the OGM packet buffer | ||
46 | * @ogm_seqno: OGM sequence number - used to identify each OGM | ||
47 | */ | ||
48 | struct batadv_hard_iface_bat_iv { | ||
49 | unsigned char *ogm_buff; | ||
50 | int ogm_buff_len; | ||
51 | atomic_t ogm_seqno; | ||
52 | }; | ||
53 | |||
31 | struct batadv_hard_iface { | 54 | struct batadv_hard_iface { |
32 | struct list_head list; | 55 | struct list_head list; |
33 | int16_t if_num; | 56 | int16_t if_num; |
34 | char if_status; | 57 | char if_status; |
35 | struct net_device *net_dev; | 58 | struct net_device *net_dev; |
36 | atomic_t seqno; | ||
37 | atomic_t frag_seqno; | 59 | atomic_t frag_seqno; |
38 | unsigned char *packet_buff; | ||
39 | int packet_len; | ||
40 | struct kobject *hardif_obj; | 60 | struct kobject *hardif_obj; |
41 | atomic_t refcount; | 61 | atomic_t refcount; |
42 | struct packet_type batman_adv_ptype; | 62 | struct packet_type batman_adv_ptype; |
43 | struct net_device *soft_iface; | 63 | struct net_device *soft_iface; |
44 | struct rcu_head rcu; | 64 | struct rcu_head rcu; |
65 | struct batadv_hard_iface_bat_iv bat_iv; | ||
45 | }; | 66 | }; |
46 | 67 | ||
47 | /** | 68 | /** |
@@ -63,6 +84,9 @@ struct batadv_orig_node { | |||
63 | uint8_t orig[ETH_ALEN]; | 84 | uint8_t orig[ETH_ALEN]; |
64 | uint8_t primary_addr[ETH_ALEN]; | 85 | uint8_t primary_addr[ETH_ALEN]; |
65 | struct batadv_neigh_node __rcu *router; /* rcu protected pointer */ | 86 | struct batadv_neigh_node __rcu *router; /* rcu protected pointer */ |
87 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
88 | batadv_dat_addr_t dat_addr; | ||
89 | #endif | ||
66 | unsigned long *bcast_own; | 90 | unsigned long *bcast_own; |
67 | uint8_t *bcast_own_sum; | 91 | uint8_t *bcast_own_sum; |
68 | unsigned long last_seen; | 92 | unsigned long last_seen; |
@@ -77,13 +101,6 @@ struct batadv_orig_node { | |||
77 | spinlock_t tt_buff_lock; /* protects tt_buff */ | 101 | spinlock_t tt_buff_lock; /* protects tt_buff */ |
78 | atomic_t tt_size; | 102 | atomic_t tt_size; |
79 | bool tt_initialised; | 103 | bool tt_initialised; |
80 | /* The tt_poss_change flag is used to detect an ongoing roaming phase. | ||
81 | * If true, then I sent a Roaming_adv to this orig_node and I have to | ||
82 | * inspect every packet directed to it to check whether it is still | ||
83 | * the true destination or not. This flag will be reset to false as | ||
84 | * soon as I receive a new TTVN from this orig_node | ||
85 | */ | ||
86 | bool tt_poss_change; | ||
87 | uint32_t last_real_seqno; | 104 | uint32_t last_real_seqno; |
88 | uint8_t last_ttl; | 105 | uint8_t last_ttl; |
89 | DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); | 106 | DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); |
@@ -139,7 +156,7 @@ struct batadv_neigh_node { | |||
139 | #ifdef CONFIG_BATMAN_ADV_BLA | 156 | #ifdef CONFIG_BATMAN_ADV_BLA |
140 | struct batadv_bcast_duplist_entry { | 157 | struct batadv_bcast_duplist_entry { |
141 | uint8_t orig[ETH_ALEN]; | 158 | uint8_t orig[ETH_ALEN]; |
142 | uint16_t crc; | 159 | __be32 crc; |
143 | unsigned long entrytime; | 160 | unsigned long entrytime; |
144 | }; | 161 | }; |
145 | #endif | 162 | #endif |
@@ -162,6 +179,13 @@ enum batadv_counters { | |||
162 | BATADV_CNT_TT_RESPONSE_RX, | 179 | BATADV_CNT_TT_RESPONSE_RX, |
163 | BATADV_CNT_TT_ROAM_ADV_TX, | 180 | BATADV_CNT_TT_ROAM_ADV_TX, |
164 | BATADV_CNT_TT_ROAM_ADV_RX, | 181 | BATADV_CNT_TT_ROAM_ADV_RX, |
182 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
183 | BATADV_CNT_DAT_GET_TX, | ||
184 | BATADV_CNT_DAT_GET_RX, | ||
185 | BATADV_CNT_DAT_PUT_TX, | ||
186 | BATADV_CNT_DAT_PUT_RX, | ||
187 | BATADV_CNT_DAT_CACHED_REPLY_TX, | ||
188 | #endif | ||
165 | BATADV_CNT_NUM, | 189 | BATADV_CNT_NUM, |
166 | }; | 190 | }; |
167 | 191 | ||
@@ -181,7 +205,6 @@ struct batadv_priv_tt { | |||
181 | atomic_t vn; | 205 | atomic_t vn; |
182 | atomic_t ogm_append_cnt; | 206 | atomic_t ogm_append_cnt; |
183 | atomic_t local_changes; | 207 | atomic_t local_changes; |
184 | bool poss_change; | ||
185 | struct list_head changes_list; | 208 | struct list_head changes_list; |
186 | struct batadv_hashtable *local_hash; | 209 | struct batadv_hashtable *local_hash; |
187 | struct batadv_hashtable *global_hash; | 210 | struct batadv_hashtable *global_hash; |
@@ -228,6 +251,20 @@ struct batadv_priv_vis { | |||
228 | struct batadv_vis_info *my_info; | 251 | struct batadv_vis_info *my_info; |
229 | }; | 252 | }; |
230 | 253 | ||
254 | /** | ||
255 | * struct batadv_priv_dat - per mesh interface DAT private data | ||
256 | * @addr: node DAT address | ||
257 | * @hash: hashtable representing the local ARP cache | ||
258 | * @work: work queue callback item for cache purging | ||
259 | */ | ||
260 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
261 | struct batadv_priv_dat { | ||
262 | batadv_dat_addr_t addr; | ||
263 | struct batadv_hashtable *hash; | ||
264 | struct delayed_work work; | ||
265 | }; | ||
266 | #endif | ||
267 | |||
231 | struct batadv_priv { | 268 | struct batadv_priv { |
232 | atomic_t mesh_state; | 269 | atomic_t mesh_state; |
233 | struct net_device_stats stats; | 270 | struct net_device_stats stats; |
@@ -237,6 +274,9 @@ struct batadv_priv { | |||
237 | atomic_t fragmentation; /* boolean */ | 274 | atomic_t fragmentation; /* boolean */ |
238 | atomic_t ap_isolation; /* boolean */ | 275 | atomic_t ap_isolation; /* boolean */ |
239 | atomic_t bridge_loop_avoidance; /* boolean */ | 276 | atomic_t bridge_loop_avoidance; /* boolean */ |
277 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
278 | atomic_t distributed_arp_table; /* boolean */ | ||
279 | #endif | ||
240 | atomic_t vis_mode; /* VIS_TYPE_* */ | 280 | atomic_t vis_mode; /* VIS_TYPE_* */ |
241 | atomic_t gw_mode; /* GW_MODE_* */ | 281 | atomic_t gw_mode; /* GW_MODE_* */ |
242 | atomic_t gw_sel_class; /* uint */ | 282 | atomic_t gw_sel_class; /* uint */ |
@@ -255,7 +295,7 @@ struct batadv_priv { | |||
255 | struct hlist_head forw_bcast_list; | 295 | struct hlist_head forw_bcast_list; |
256 | struct batadv_hashtable *orig_hash; | 296 | struct batadv_hashtable *orig_hash; |
257 | spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ | 297 | spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ |
258 | spinlock_t forw_bcast_list_lock; /* protects */ | 298 | spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */ |
259 | struct delayed_work orig_work; | 299 | struct delayed_work orig_work; |
260 | struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ | 300 | struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ |
261 | struct batadv_algo_ops *bat_algo_ops; | 301 | struct batadv_algo_ops *bat_algo_ops; |
@@ -265,6 +305,9 @@ struct batadv_priv { | |||
265 | struct batadv_priv_gw gw; | 305 | struct batadv_priv_gw gw; |
266 | struct batadv_priv_tt tt; | 306 | struct batadv_priv_tt tt; |
267 | struct batadv_priv_vis vis; | 307 | struct batadv_priv_vis vis; |
308 | #ifdef CONFIG_BATMAN_ADV_DAT | ||
309 | struct batadv_priv_dat dat; | ||
310 | #endif | ||
268 | }; | 311 | }; |
269 | 312 | ||
270 | struct batadv_socket_client { | 313 | struct batadv_socket_client { |
@@ -318,6 +361,7 @@ struct batadv_backbone_gw { | |||
318 | struct hlist_node hash_entry; | 361 | struct hlist_node hash_entry; |
319 | struct batadv_priv *bat_priv; | 362 | struct batadv_priv *bat_priv; |
320 | unsigned long lasttime; /* last time we heard of this backbone gw */ | 363 | unsigned long lasttime; /* last time we heard of this backbone gw */ |
364 | atomic_t wait_periods; | ||
321 | atomic_t request_sent; | 365 | atomic_t request_sent; |
322 | atomic_t refcount; | 366 | atomic_t refcount; |
323 | struct rcu_head rcu; | 367 | struct rcu_head rcu; |
@@ -437,4 +481,36 @@ struct batadv_algo_ops { | |||
437 | void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet); | 481 | void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet); |
438 | }; | 482 | }; |
439 | 483 | ||
484 | /** | ||
485 | * struct batadv_dat_entry - it is a single entry of batman-adv ARP backend. It | ||
486 | * is used to stored ARP entries needed for the global DAT cache | ||
487 | * @ip: the IPv4 corresponding to this DAT/ARP entry | ||
488 | * @mac_addr: the MAC address associated to the stored IPv4 | ||
489 | * @last_update: time in jiffies when this entry was refreshed last time | ||
490 | * @hash_entry: hlist node for batadv_priv_dat::hash | ||
491 | * @refcount: number of contexts the object is used | ||
492 | * @rcu: struct used for freeing in an RCU-safe manner | ||
493 | */ | ||
494 | struct batadv_dat_entry { | ||
495 | __be32 ip; | ||
496 | uint8_t mac_addr[ETH_ALEN]; | ||
497 | unsigned long last_update; | ||
498 | struct hlist_node hash_entry; | ||
499 | atomic_t refcount; | ||
500 | struct rcu_head rcu; | ||
501 | }; | ||
502 | |||
503 | /** | ||
504 | * struct batadv_dat_candidate - candidate destination for DAT operations | ||
505 | * @type: the type of the selected candidate. It can one of the following: | ||
506 | * - BATADV_DAT_CANDIDATE_NOT_FOUND | ||
507 | * - BATADV_DAT_CANDIDATE_ORIG | ||
508 | * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to the | ||
509 | * corresponding originator node structure | ||
510 | */ | ||
511 | struct batadv_dat_candidate { | ||
512 | int type; | ||
513 | struct batadv_orig_node *orig_node; | ||
514 | }; | ||
515 | |||
440 | #endif /* _NET_BATMAN_ADV_TYPES_H_ */ | 516 | #endif /* _NET_BATMAN_ADV_TYPES_H_ */ |
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index f39723281ca1..10aff49fcf25 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
@@ -291,14 +291,118 @@ out: | |||
291 | return ret; | 291 | return ret; |
292 | } | 292 | } |
293 | 293 | ||
294 | int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv) | 294 | /** |
295 | * batadv_unicast_push_and_fill_skb - extends the buffer and initializes the | ||
296 | * common fields for unicast packets | ||
297 | * @skb: packet | ||
298 | * @hdr_size: amount of bytes to push at the beginning of the skb | ||
299 | * @orig_node: the destination node | ||
300 | * | ||
301 | * Returns false if the buffer extension was not possible or true otherwise | ||
302 | */ | ||
303 | static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size, | ||
304 | struct batadv_orig_node *orig_node) | ||
305 | { | ||
306 | struct batadv_unicast_packet *unicast_packet; | ||
307 | uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); | ||
308 | |||
309 | if (batadv_skb_head_push(skb, hdr_size) < 0) | ||
310 | return false; | ||
311 | |||
312 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | ||
313 | unicast_packet->header.version = BATADV_COMPAT_VERSION; | ||
314 | /* batman packet type: unicast */ | ||
315 | unicast_packet->header.packet_type = BATADV_UNICAST; | ||
316 | /* set unicast ttl */ | ||
317 | unicast_packet->header.ttl = BATADV_TTL; | ||
318 | /* copy the destination for faster routing */ | ||
319 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); | ||
320 | /* set the destination tt version number */ | ||
321 | unicast_packet->ttvn = ttvn; | ||
322 | |||
323 | return true; | ||
324 | } | ||
325 | |||
326 | /** | ||
327 | * batadv_unicast_prepare_skb - encapsulate an skb with a unicast header | ||
328 | * @skb: the skb containing the payload to encapsulate | ||
329 | * @orig_node: the destination node | ||
330 | * | ||
331 | * Returns false if the payload could not be encapsulated or true otherwise | ||
332 | */ | ||
333 | static bool batadv_unicast_prepare_skb(struct sk_buff *skb, | ||
334 | struct batadv_orig_node *orig_node) | ||
335 | { | ||
336 | size_t uni_size = sizeof(struct batadv_unicast_packet); | ||
337 | return batadv_unicast_push_and_fill_skb(skb, uni_size, orig_node); | ||
338 | } | ||
339 | |||
340 | /** | ||
341 | * batadv_unicast_4addr_prepare_skb - encapsulate an skb with a unicast4addr | ||
342 | * header | ||
343 | * @bat_priv: the bat priv with all the soft interface information | ||
344 | * @skb: the skb containing the payload to encapsulate | ||
345 | * @orig_node: the destination node | ||
346 | * @packet_subtype: the batman 4addr packet subtype to use | ||
347 | * | ||
348 | * Returns false if the payload could not be encapsulated or true otherwise | ||
349 | */ | ||
350 | bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, | ||
351 | struct sk_buff *skb, | ||
352 | struct batadv_orig_node *orig, | ||
353 | int packet_subtype) | ||
354 | { | ||
355 | struct batadv_hard_iface *primary_if; | ||
356 | struct batadv_unicast_4addr_packet *unicast_4addr_packet; | ||
357 | bool ret = false; | ||
358 | |||
359 | primary_if = batadv_primary_if_get_selected(bat_priv); | ||
360 | if (!primary_if) | ||
361 | goto out; | ||
362 | |||
363 | /* pull the header space and fill the unicast_packet substructure. | ||
364 | * We can do that because the first member of the unicast_4addr_packet | ||
365 | * is of type struct unicast_packet | ||
366 | */ | ||
367 | if (!batadv_unicast_push_and_fill_skb(skb, | ||
368 | sizeof(*unicast_4addr_packet), | ||
369 | orig)) | ||
370 | goto out; | ||
371 | |||
372 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | ||
373 | unicast_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR; | ||
374 | memcpy(unicast_4addr_packet->src, primary_if->net_dev->dev_addr, | ||
375 | ETH_ALEN); | ||
376 | unicast_4addr_packet->subtype = packet_subtype; | ||
377 | unicast_4addr_packet->reserved = 0; | ||
378 | |||
379 | ret = true; | ||
380 | out: | ||
381 | if (primary_if) | ||
382 | batadv_hardif_free_ref(primary_if); | ||
383 | return ret; | ||
384 | } | ||
385 | |||
386 | /** | ||
387 | * batadv_unicast_generic_send_skb - send an skb as unicast | ||
388 | * @bat_priv: the bat priv with all the soft interface information | ||
389 | * @skb: payload to send | ||
390 | * @packet_type: the batman unicast packet type to use | ||
391 | * @packet_subtype: the batman packet subtype. It is ignored if packet_type is | ||
392 | * not BATADV_UNICAT_4ADDR | ||
393 | * | ||
394 | * Returns 1 in case of error or 0 otherwise | ||
395 | */ | ||
396 | int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv, | ||
397 | struct sk_buff *skb, int packet_type, | ||
398 | int packet_subtype) | ||
295 | { | 399 | { |
296 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; | 400 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; |
297 | struct batadv_unicast_packet *unicast_packet; | 401 | struct batadv_unicast_packet *unicast_packet; |
298 | struct batadv_orig_node *orig_node; | 402 | struct batadv_orig_node *orig_node; |
299 | struct batadv_neigh_node *neigh_node; | 403 | struct batadv_neigh_node *neigh_node; |
300 | int data_len = skb->len; | 404 | int data_len = skb->len; |
301 | int ret = 1; | 405 | int ret = NET_RX_DROP; |
302 | unsigned int dev_mtu; | 406 | unsigned int dev_mtu; |
303 | 407 | ||
304 | /* get routing information */ | 408 | /* get routing information */ |
@@ -324,21 +428,23 @@ find_router: | |||
324 | if (!neigh_node) | 428 | if (!neigh_node) |
325 | goto out; | 429 | goto out; |
326 | 430 | ||
327 | if (batadv_skb_head_push(skb, sizeof(*unicast_packet)) < 0) | 431 | switch (packet_type) { |
432 | case BATADV_UNICAST: | ||
433 | batadv_unicast_prepare_skb(skb, orig_node); | ||
434 | break; | ||
435 | case BATADV_UNICAST_4ADDR: | ||
436 | batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, | ||
437 | packet_subtype); | ||
438 | break; | ||
439 | default: | ||
440 | /* this function supports UNICAST and UNICAST_4ADDR only. It | ||
441 | * should never be invoked with any other packet type | ||
442 | */ | ||
328 | goto out; | 443 | goto out; |
444 | } | ||
329 | 445 | ||
330 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 446 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
331 | 447 | ||
332 | unicast_packet->header.version = BATADV_COMPAT_VERSION; | ||
333 | /* batman packet type: unicast */ | ||
334 | unicast_packet->header.packet_type = BATADV_UNICAST; | ||
335 | /* set unicast ttl */ | ||
336 | unicast_packet->header.ttl = BATADV_TTL; | ||
337 | /* copy the destination for faster routing */ | ||
338 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); | ||
339 | /* set the destination tt version number */ | ||
340 | unicast_packet->ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); | ||
341 | |||
342 | /* inform the destination node that we are still missing a correct route | 448 | /* inform the destination node that we are still missing a correct route |
343 | * for this client. The destination will receive this packet and will | 449 | * for this client. The destination will receive this packet and will |
344 | * try to reroute it because the ttvn contained in the header is less | 450 | * try to reroute it because the ttvn contained in the header is less |
@@ -348,7 +454,9 @@ find_router: | |||
348 | unicast_packet->ttvn = unicast_packet->ttvn - 1; | 454 | unicast_packet->ttvn = unicast_packet->ttvn - 1; |
349 | 455 | ||
350 | dev_mtu = neigh_node->if_incoming->net_dev->mtu; | 456 | dev_mtu = neigh_node->if_incoming->net_dev->mtu; |
351 | if (atomic_read(&bat_priv->fragmentation) && | 457 | /* fragmentation mechanism only works for UNICAST (now) */ |
458 | if (packet_type == BATADV_UNICAST && | ||
459 | atomic_read(&bat_priv->fragmentation) && | ||
352 | data_len + sizeof(*unicast_packet) > dev_mtu) { | 460 | data_len + sizeof(*unicast_packet) > dev_mtu) { |
353 | /* send frag skb decreases ttl */ | 461 | /* send frag skb decreases ttl */ |
354 | unicast_packet->header.ttl++; | 462 | unicast_packet->header.ttl++; |
@@ -358,16 +466,15 @@ find_router: | |||
358 | goto out; | 466 | goto out; |
359 | } | 467 | } |
360 | 468 | ||
361 | batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | 469 | if (batadv_send_skb_to_orig(skb, orig_node, NULL)) |
362 | ret = 0; | 470 | ret = 0; |
363 | goto out; | ||
364 | 471 | ||
365 | out: | 472 | out: |
366 | if (neigh_node) | 473 | if (neigh_node) |
367 | batadv_neigh_node_free_ref(neigh_node); | 474 | batadv_neigh_node_free_ref(neigh_node); |
368 | if (orig_node) | 475 | if (orig_node) |
369 | batadv_orig_node_free_ref(orig_node); | 476 | batadv_orig_node_free_ref(orig_node); |
370 | if (ret == 1) | 477 | if (ret == NET_RX_DROP) |
371 | kfree_skb(skb); | 478 | kfree_skb(skb); |
372 | return ret; | 479 | return ret; |
373 | } | 480 | } |
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h index 1c46e2eb1ef9..61abba58bd8f 100644 --- a/net/batman-adv/unicast.h +++ b/net/batman-adv/unicast.h | |||
@@ -29,10 +29,44 @@ int batadv_frag_reassemble_skb(struct sk_buff *skb, | |||
29 | struct batadv_priv *bat_priv, | 29 | struct batadv_priv *bat_priv, |
30 | struct sk_buff **new_skb); | 30 | struct sk_buff **new_skb); |
31 | void batadv_frag_list_free(struct list_head *head); | 31 | void batadv_frag_list_free(struct list_head *head); |
32 | int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv); | ||
33 | int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv, | 32 | int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv, |
34 | struct batadv_hard_iface *hard_iface, | 33 | struct batadv_hard_iface *hard_iface, |
35 | const uint8_t dstaddr[]); | 34 | const uint8_t dstaddr[]); |
35 | bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, | ||
36 | struct sk_buff *skb, | ||
37 | struct batadv_orig_node *orig_node, | ||
38 | int packet_subtype); | ||
39 | int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv, | ||
40 | struct sk_buff *skb, int packet_type, | ||
41 | int packet_subtype); | ||
42 | |||
43 | |||
44 | /** | ||
45 | * batadv_unicast_send_skb - send the skb encapsulated in a unicast packet | ||
46 | * @bat_priv: the bat priv with all the soft interface information | ||
47 | * @skb: the payload to send | ||
48 | */ | ||
49 | static inline int batadv_unicast_send_skb(struct batadv_priv *bat_priv, | ||
50 | struct sk_buff *skb) | ||
51 | { | ||
52 | return batadv_unicast_generic_send_skb(bat_priv, skb, BATADV_UNICAST, | ||
53 | 0); | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * batadv_unicast_send_skb - send the skb encapsulated in a unicast4addr packet | ||
58 | * @bat_priv: the bat priv with all the soft interface information | ||
59 | * @skb: the payload to send | ||
60 | * @packet_subtype: the batman 4addr packet subtype to use | ||
61 | */ | ||
62 | static inline int batadv_unicast_4addr_send_skb(struct batadv_priv *bat_priv, | ||
63 | struct sk_buff *skb, | ||
64 | int packet_subtype) | ||
65 | { | ||
66 | return batadv_unicast_generic_send_skb(bat_priv, skb, | ||
67 | BATADV_UNICAST_4ADDR, | ||
68 | packet_subtype); | ||
69 | } | ||
36 | 70 | ||
37 | static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu) | 71 | static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu) |
38 | { | 72 | { |
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index 5abd1454fb07..0f65a9de5f74 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c | |||
@@ -396,12 +396,12 @@ batadv_add_packet(struct batadv_priv *bat_priv, | |||
396 | return NULL; | 396 | return NULL; |
397 | 397 | ||
398 | len = sizeof(*packet) + vis_info_len; | 398 | len = sizeof(*packet) + vis_info_len; |
399 | info->skb_packet = dev_alloc_skb(len + ETH_HLEN); | 399 | info->skb_packet = dev_alloc_skb(len + ETH_HLEN + NET_IP_ALIGN); |
400 | if (!info->skb_packet) { | 400 | if (!info->skb_packet) { |
401 | kfree(info); | 401 | kfree(info); |
402 | return NULL; | 402 | return NULL; |
403 | } | 403 | } |
404 | skb_reserve(info->skb_packet, ETH_HLEN); | 404 | skb_reserve(info->skb_packet, ETH_HLEN + NET_IP_ALIGN); |
405 | packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len); | 405 | packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len); |
406 | 406 | ||
407 | kref_init(&info->refcount); | 407 | kref_init(&info->refcount); |
@@ -698,15 +698,12 @@ static void batadv_purge_vis_packets(struct batadv_priv *bat_priv) | |||
698 | static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, | 698 | static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, |
699 | struct batadv_vis_info *info) | 699 | struct batadv_vis_info *info) |
700 | { | 700 | { |
701 | struct batadv_neigh_node *router; | ||
702 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 701 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
703 | struct hlist_node *node; | 702 | struct hlist_node *node; |
704 | struct hlist_head *head; | 703 | struct hlist_head *head; |
705 | struct batadv_orig_node *orig_node; | 704 | struct batadv_orig_node *orig_node; |
706 | struct batadv_vis_packet *packet; | 705 | struct batadv_vis_packet *packet; |
707 | struct sk_buff *skb; | 706 | struct sk_buff *skb; |
708 | struct batadv_hard_iface *hard_iface; | ||
709 | uint8_t dstaddr[ETH_ALEN]; | ||
710 | uint32_t i; | 707 | uint32_t i; |
711 | 708 | ||
712 | 709 | ||
@@ -722,30 +719,20 @@ static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv, | |||
722 | if (!(orig_node->flags & BATADV_VIS_SERVER)) | 719 | if (!(orig_node->flags & BATADV_VIS_SERVER)) |
723 | continue; | 720 | continue; |
724 | 721 | ||
725 | router = batadv_orig_node_get_router(orig_node); | ||
726 | if (!router) | ||
727 | continue; | ||
728 | |||
729 | /* don't send it if we already received the packet from | 722 | /* don't send it if we already received the packet from |
730 | * this node. | 723 | * this node. |
731 | */ | 724 | */ |
732 | if (batadv_recv_list_is_in(bat_priv, &info->recv_list, | 725 | if (batadv_recv_list_is_in(bat_priv, &info->recv_list, |
733 | orig_node->orig)) { | 726 | orig_node->orig)) |
734 | batadv_neigh_node_free_ref(router); | ||
735 | continue; | 727 | continue; |
736 | } | ||
737 | 728 | ||
738 | memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); | 729 | memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); |
739 | hard_iface = router->if_incoming; | ||
740 | memcpy(dstaddr, router->addr, ETH_ALEN); | ||
741 | |||
742 | batadv_neigh_node_free_ref(router); | ||
743 | |||
744 | skb = skb_clone(info->skb_packet, GFP_ATOMIC); | 730 | skb = skb_clone(info->skb_packet, GFP_ATOMIC); |
745 | if (skb) | 731 | if (!skb) |
746 | batadv_send_skb_packet(skb, hard_iface, | 732 | continue; |
747 | dstaddr); | ||
748 | 733 | ||
734 | if (!batadv_send_skb_to_orig(skb, orig_node, NULL)) | ||
735 | kfree_skb(skb); | ||
749 | } | 736 | } |
750 | rcu_read_unlock(); | 737 | rcu_read_unlock(); |
751 | } | 738 | } |
@@ -755,7 +742,6 @@ static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv, | |||
755 | struct batadv_vis_info *info) | 742 | struct batadv_vis_info *info) |
756 | { | 743 | { |
757 | struct batadv_orig_node *orig_node; | 744 | struct batadv_orig_node *orig_node; |
758 | struct batadv_neigh_node *router = NULL; | ||
759 | struct sk_buff *skb; | 745 | struct sk_buff *skb; |
760 | struct batadv_vis_packet *packet; | 746 | struct batadv_vis_packet *packet; |
761 | 747 | ||
@@ -765,17 +751,14 @@ static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv, | |||
765 | if (!orig_node) | 751 | if (!orig_node) |
766 | goto out; | 752 | goto out; |
767 | 753 | ||
768 | router = batadv_orig_node_get_router(orig_node); | 754 | skb = skb_clone(info->skb_packet, GFP_ATOMIC); |
769 | if (!router) | 755 | if (!skb) |
770 | goto out; | 756 | goto out; |
771 | 757 | ||
772 | skb = skb_clone(info->skb_packet, GFP_ATOMIC); | 758 | if (!batadv_send_skb_to_orig(skb, orig_node, NULL)) |
773 | if (skb) | 759 | kfree_skb(skb); |
774 | batadv_send_skb_packet(skb, router->if_incoming, router->addr); | ||
775 | 760 | ||
776 | out: | 761 | out: |
777 | if (router) | ||
778 | batadv_neigh_node_free_ref(router); | ||
779 | if (orig_node) | 762 | if (orig_node) |
780 | batadv_orig_node_free_ref(orig_node); | 763 | batadv_orig_node_free_ref(orig_node); |
781 | } | 764 | } |
@@ -873,12 +856,13 @@ int batadv_vis_init(struct batadv_priv *bat_priv) | |||
873 | if (!bat_priv->vis.my_info) | 856 | if (!bat_priv->vis.my_info) |
874 | goto err; | 857 | goto err; |
875 | 858 | ||
876 | len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN; | 859 | len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE; |
860 | len += ETH_HLEN + NET_IP_ALIGN; | ||
877 | bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len); | 861 | bat_priv->vis.my_info->skb_packet = dev_alloc_skb(len); |
878 | if (!bat_priv->vis.my_info->skb_packet) | 862 | if (!bat_priv->vis.my_info->skb_packet) |
879 | goto free_info; | 863 | goto free_info; |
880 | 864 | ||
881 | skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN); | 865 | skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN + NET_IP_ALIGN); |
882 | tmp_skb = bat_priv->vis.my_info->skb_packet; | 866 | tmp_skb = bat_priv->vis.my_info->skb_packet; |
883 | packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet)); | 867 | packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet)); |
884 | 868 | ||
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index 3537d385035e..d3f3f7b1d32c 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig | |||
@@ -11,6 +11,7 @@ menuconfig BT | |||
11 | select CRYPTO_BLKCIPHER | 11 | select CRYPTO_BLKCIPHER |
12 | select CRYPTO_AES | 12 | select CRYPTO_AES |
13 | select CRYPTO_ECB | 13 | select CRYPTO_ECB |
14 | select CRYPTO_SHA256 | ||
14 | help | 15 | help |
15 | Bluetooth is low-cost, low-power, short-range wireless technology. | 16 | Bluetooth is low-cost, low-power, short-range wireless technology. |
16 | It was designed as a replacement for cables and other short-range | 17 | It was designed as a replacement for cables and other short-range |
@@ -47,4 +48,3 @@ source "net/bluetooth/cmtp/Kconfig" | |||
47 | source "net/bluetooth/hidp/Kconfig" | 48 | source "net/bluetooth/hidp/Kconfig" |
48 | 49 | ||
49 | source "drivers/bluetooth/Kconfig" | 50 | source "drivers/bluetooth/Kconfig" |
50 | |||
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index fa6d94a4602a..dea6a287daca 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile | |||
@@ -10,4 +10,4 @@ obj-$(CONFIG_BT_HIDP) += hidp/ | |||
10 | 10 | ||
11 | bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ | 11 | bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ |
12 | hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ | 12 | hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ |
13 | a2mp.o | 13 | a2mp.o amp.o |
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index 0760d1fed6f0..2f67d5ecc907 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c | |||
@@ -16,6 +16,11 @@ | |||
16 | #include <net/bluetooth/hci_core.h> | 16 | #include <net/bluetooth/hci_core.h> |
17 | #include <net/bluetooth/l2cap.h> | 17 | #include <net/bluetooth/l2cap.h> |
18 | #include <net/bluetooth/a2mp.h> | 18 | #include <net/bluetooth/a2mp.h> |
19 | #include <net/bluetooth/amp.h> | ||
20 | |||
21 | /* Global AMP Manager list */ | ||
22 | LIST_HEAD(amp_mgr_list); | ||
23 | DEFINE_MUTEX(amp_mgr_list_lock); | ||
19 | 24 | ||
20 | /* A2MP build & send command helper functions */ | 25 | /* A2MP build & send command helper functions */ |
21 | static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) | 26 | static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) |
@@ -37,8 +42,7 @@ static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) | |||
37 | return cmd; | 42 | return cmd; |
38 | } | 43 | } |
39 | 44 | ||
40 | static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, | 45 | void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data) |
41 | void *data) | ||
42 | { | 46 | { |
43 | struct l2cap_chan *chan = mgr->a2mp_chan; | 47 | struct l2cap_chan *chan = mgr->a2mp_chan; |
44 | struct a2mp_cmd *cmd; | 48 | struct a2mp_cmd *cmd; |
@@ -63,6 +67,14 @@ static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, | |||
63 | kfree(cmd); | 67 | kfree(cmd); |
64 | } | 68 | } |
65 | 69 | ||
70 | u8 __next_ident(struct amp_mgr *mgr) | ||
71 | { | ||
72 | if (++mgr->ident == 0) | ||
73 | mgr->ident = 1; | ||
74 | |||
75 | return mgr->ident; | ||
76 | } | ||
77 | |||
66 | static inline void __a2mp_cl_bredr(struct a2mp_cl *cl) | 78 | static inline void __a2mp_cl_bredr(struct a2mp_cl *cl) |
67 | { | 79 | { |
68 | cl->id = 0; | 80 | cl->id = 0; |
@@ -161,6 +173,83 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb, | |||
161 | return 0; | 173 | return 0; |
162 | } | 174 | } |
163 | 175 | ||
176 | static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb, | ||
177 | struct a2mp_cmd *hdr) | ||
178 | { | ||
179 | struct a2mp_discov_rsp *rsp = (void *) skb->data; | ||
180 | u16 len = le16_to_cpu(hdr->len); | ||
181 | struct a2mp_cl *cl; | ||
182 | u16 ext_feat; | ||
183 | bool found = false; | ||
184 | |||
185 | if (len < sizeof(*rsp)) | ||
186 | return -EINVAL; | ||
187 | |||
188 | len -= sizeof(*rsp); | ||
189 | skb_pull(skb, sizeof(*rsp)); | ||
190 | |||
191 | ext_feat = le16_to_cpu(rsp->ext_feat); | ||
192 | |||
193 | BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat); | ||
194 | |||
195 | /* check that packet is not broken for now */ | ||
196 | while (ext_feat & A2MP_FEAT_EXT) { | ||
197 | if (len < sizeof(ext_feat)) | ||
198 | return -EINVAL; | ||
199 | |||
200 | ext_feat = get_unaligned_le16(skb->data); | ||
201 | BT_DBG("efm 0x%4.4x", ext_feat); | ||
202 | len -= sizeof(ext_feat); | ||
203 | skb_pull(skb, sizeof(ext_feat)); | ||
204 | } | ||
205 | |||
206 | cl = (void *) skb->data; | ||
207 | while (len >= sizeof(*cl)) { | ||
208 | BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type, | ||
209 | cl->status); | ||
210 | |||
211 | if (cl->id != HCI_BREDR_ID && cl->type == HCI_AMP) { | ||
212 | struct a2mp_info_req req; | ||
213 | |||
214 | found = true; | ||
215 | req.id = cl->id; | ||
216 | a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr), | ||
217 | sizeof(req), &req); | ||
218 | } | ||
219 | |||
220 | len -= sizeof(*cl); | ||
221 | cl = (void *) skb_pull(skb, sizeof(*cl)); | ||
222 | } | ||
223 | |||
224 | /* Fall back to L2CAP init sequence */ | ||
225 | if (!found) { | ||
226 | struct l2cap_conn *conn = mgr->l2cap_conn; | ||
227 | struct l2cap_chan *chan; | ||
228 | |||
229 | mutex_lock(&conn->chan_lock); | ||
230 | |||
231 | list_for_each_entry(chan, &conn->chan_l, list) { | ||
232 | |||
233 | BT_DBG("chan %p state %s", chan, | ||
234 | state_to_string(chan->state)); | ||
235 | |||
236 | if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) | ||
237 | continue; | ||
238 | |||
239 | l2cap_chan_lock(chan); | ||
240 | |||
241 | if (chan->state == BT_CONNECT) | ||
242 | l2cap_send_conn_req(chan); | ||
243 | |||
244 | l2cap_chan_unlock(chan); | ||
245 | } | ||
246 | |||
247 | mutex_unlock(&conn->chan_lock); | ||
248 | } | ||
249 | |||
250 | return 0; | ||
251 | } | ||
252 | |||
164 | static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb, | 253 | static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb, |
165 | struct a2mp_cmd *hdr) | 254 | struct a2mp_cmd *hdr) |
166 | { | 255 | { |
@@ -181,7 +270,6 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, | |||
181 | struct a2mp_cmd *hdr) | 270 | struct a2mp_cmd *hdr) |
182 | { | 271 | { |
183 | struct a2mp_info_req *req = (void *) skb->data; | 272 | struct a2mp_info_req *req = (void *) skb->data; |
184 | struct a2mp_info_rsp rsp; | ||
185 | struct hci_dev *hdev; | 273 | struct hci_dev *hdev; |
186 | 274 | ||
187 | if (le16_to_cpu(hdr->len) < sizeof(*req)) | 275 | if (le16_to_cpu(hdr->len) < sizeof(*req)) |
@@ -189,53 +277,93 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, | |||
189 | 277 | ||
190 | BT_DBG("id %d", req->id); | 278 | BT_DBG("id %d", req->id); |
191 | 279 | ||
192 | rsp.id = req->id; | ||
193 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; | ||
194 | |||
195 | hdev = hci_dev_get(req->id); | 280 | hdev = hci_dev_get(req->id); |
196 | if (hdev && hdev->amp_type != HCI_BREDR) { | 281 | if (!hdev || hdev->dev_type != HCI_AMP) { |
197 | rsp.status = 0; | 282 | struct a2mp_info_rsp rsp; |
198 | rsp.total_bw = cpu_to_le32(hdev->amp_total_bw); | 283 | |
199 | rsp.max_bw = cpu_to_le32(hdev->amp_max_bw); | 284 | rsp.id = req->id; |
200 | rsp.min_latency = cpu_to_le32(hdev->amp_min_latency); | 285 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; |
201 | rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap); | 286 | |
202 | rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size); | 287 | a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), |
288 | &rsp); | ||
289 | |||
290 | goto done; | ||
203 | } | 291 | } |
204 | 292 | ||
293 | mgr->state = READ_LOC_AMP_INFO; | ||
294 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); | ||
295 | |||
296 | done: | ||
205 | if (hdev) | 297 | if (hdev) |
206 | hci_dev_put(hdev); | 298 | hci_dev_put(hdev); |
207 | 299 | ||
208 | a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp); | ||
209 | |||
210 | skb_pull(skb, sizeof(*req)); | 300 | skb_pull(skb, sizeof(*req)); |
211 | return 0; | 301 | return 0; |
212 | } | 302 | } |
213 | 303 | ||
304 | static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb, | ||
305 | struct a2mp_cmd *hdr) | ||
306 | { | ||
307 | struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data; | ||
308 | struct a2mp_amp_assoc_req req; | ||
309 | struct amp_ctrl *ctrl; | ||
310 | |||
311 | if (le16_to_cpu(hdr->len) < sizeof(*rsp)) | ||
312 | return -EINVAL; | ||
313 | |||
314 | BT_DBG("id %d status 0x%2.2x", rsp->id, rsp->status); | ||
315 | |||
316 | if (rsp->status) | ||
317 | return -EINVAL; | ||
318 | |||
319 | ctrl = amp_ctrl_add(mgr, rsp->id); | ||
320 | if (!ctrl) | ||
321 | return -ENOMEM; | ||
322 | |||
323 | req.id = rsp->id; | ||
324 | a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req), | ||
325 | &req); | ||
326 | |||
327 | skb_pull(skb, sizeof(*rsp)); | ||
328 | return 0; | ||
329 | } | ||
330 | |||
214 | static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, | 331 | static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, |
215 | struct a2mp_cmd *hdr) | 332 | struct a2mp_cmd *hdr) |
216 | { | 333 | { |
217 | struct a2mp_amp_assoc_req *req = (void *) skb->data; | 334 | struct a2mp_amp_assoc_req *req = (void *) skb->data; |
218 | struct hci_dev *hdev; | 335 | struct hci_dev *hdev; |
336 | struct amp_mgr *tmp; | ||
219 | 337 | ||
220 | if (le16_to_cpu(hdr->len) < sizeof(*req)) | 338 | if (le16_to_cpu(hdr->len) < sizeof(*req)) |
221 | return -EINVAL; | 339 | return -EINVAL; |
222 | 340 | ||
223 | BT_DBG("id %d", req->id); | 341 | BT_DBG("id %d", req->id); |
224 | 342 | ||
343 | /* Make sure that other request is not processed */ | ||
344 | tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC); | ||
345 | |||
225 | hdev = hci_dev_get(req->id); | 346 | hdev = hci_dev_get(req->id); |
226 | if (!hdev || hdev->amp_type == HCI_BREDR) { | 347 | if (!hdev || hdev->amp_type == HCI_BREDR || tmp) { |
227 | struct a2mp_amp_assoc_rsp rsp; | 348 | struct a2mp_amp_assoc_rsp rsp; |
228 | rsp.id = req->id; | 349 | rsp.id = req->id; |
229 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; | 350 | |
351 | if (tmp) { | ||
352 | rsp.status = A2MP_STATUS_COLLISION_OCCURED; | ||
353 | amp_mgr_put(tmp); | ||
354 | } else { | ||
355 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; | ||
356 | } | ||
230 | 357 | ||
231 | a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp), | 358 | a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp), |
232 | &rsp); | 359 | &rsp); |
233 | goto clean; | 360 | |
361 | goto done; | ||
234 | } | 362 | } |
235 | 363 | ||
236 | /* Placeholder for HCI Read AMP Assoc */ | 364 | amp_read_loc_assoc(hdev, mgr); |
237 | 365 | ||
238 | clean: | 366 | done: |
239 | if (hdev) | 367 | if (hdev) |
240 | hci_dev_put(hdev); | 368 | hci_dev_put(hdev); |
241 | 369 | ||
@@ -243,6 +371,68 @@ clean: | |||
243 | return 0; | 371 | return 0; |
244 | } | 372 | } |
245 | 373 | ||
374 | static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb, | ||
375 | struct a2mp_cmd *hdr) | ||
376 | { | ||
377 | struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data; | ||
378 | u16 len = le16_to_cpu(hdr->len); | ||
379 | struct hci_dev *hdev; | ||
380 | struct amp_ctrl *ctrl; | ||
381 | struct hci_conn *hcon; | ||
382 | size_t assoc_len; | ||
383 | |||
384 | if (len < sizeof(*rsp)) | ||
385 | return -EINVAL; | ||
386 | |||
387 | assoc_len = len - sizeof(*rsp); | ||
388 | |||
389 | BT_DBG("id %d status 0x%2.2x assoc len %zu", rsp->id, rsp->status, | ||
390 | assoc_len); | ||
391 | |||
392 | if (rsp->status) | ||
393 | return -EINVAL; | ||
394 | |||
395 | /* Save remote ASSOC data */ | ||
396 | ctrl = amp_ctrl_lookup(mgr, rsp->id); | ||
397 | if (ctrl) { | ||
398 | u8 *assoc; | ||
399 | |||
400 | assoc = kzalloc(assoc_len, GFP_KERNEL); | ||
401 | if (!assoc) { | ||
402 | amp_ctrl_put(ctrl); | ||
403 | return -ENOMEM; | ||
404 | } | ||
405 | |||
406 | memcpy(assoc, rsp->amp_assoc, assoc_len); | ||
407 | ctrl->assoc = assoc; | ||
408 | ctrl->assoc_len = assoc_len; | ||
409 | ctrl->assoc_rem_len = assoc_len; | ||
410 | ctrl->assoc_len_so_far = 0; | ||
411 | |||
412 | amp_ctrl_put(ctrl); | ||
413 | } | ||
414 | |||
415 | /* Create Phys Link */ | ||
416 | hdev = hci_dev_get(rsp->id); | ||
417 | if (!hdev) | ||
418 | return -EINVAL; | ||
419 | |||
420 | hcon = phylink_add(hdev, mgr, rsp->id, true); | ||
421 | if (!hcon) | ||
422 | goto done; | ||
423 | |||
424 | BT_DBG("Created hcon %p: loc:%d -> rem:%d", hcon, hdev->id, rsp->id); | ||
425 | |||
426 | mgr->bredr_chan->remote_amp_id = rsp->id; | ||
427 | |||
428 | amp_create_phylink(hdev, mgr, hcon); | ||
429 | |||
430 | done: | ||
431 | hci_dev_put(hdev); | ||
432 | skb_pull(skb, len); | ||
433 | return 0; | ||
434 | } | ||
435 | |||
246 | static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, | 436 | static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, |
247 | struct a2mp_cmd *hdr) | 437 | struct a2mp_cmd *hdr) |
248 | { | 438 | { |
@@ -250,6 +440,8 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, | |||
250 | 440 | ||
251 | struct a2mp_physlink_rsp rsp; | 441 | struct a2mp_physlink_rsp rsp; |
252 | struct hci_dev *hdev; | 442 | struct hci_dev *hdev; |
443 | struct hci_conn *hcon; | ||
444 | struct amp_ctrl *ctrl; | ||
253 | 445 | ||
254 | if (le16_to_cpu(hdr->len) < sizeof(*req)) | 446 | if (le16_to_cpu(hdr->len) < sizeof(*req)) |
255 | return -EINVAL; | 447 | return -EINVAL; |
@@ -265,9 +457,43 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, | |||
265 | goto send_rsp; | 457 | goto send_rsp; |
266 | } | 458 | } |
267 | 459 | ||
268 | /* TODO process physlink create */ | 460 | ctrl = amp_ctrl_lookup(mgr, rsp.remote_id); |
461 | if (!ctrl) { | ||
462 | ctrl = amp_ctrl_add(mgr, rsp.remote_id); | ||
463 | if (ctrl) { | ||
464 | amp_ctrl_get(ctrl); | ||
465 | } else { | ||
466 | rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; | ||
467 | goto send_rsp; | ||
468 | } | ||
469 | } | ||
269 | 470 | ||
270 | rsp.status = A2MP_STATUS_SUCCESS; | 471 | if (ctrl) { |
472 | size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req); | ||
473 | u8 *assoc; | ||
474 | |||
475 | assoc = kzalloc(assoc_len, GFP_KERNEL); | ||
476 | if (!assoc) { | ||
477 | amp_ctrl_put(ctrl); | ||
478 | return -ENOMEM; | ||
479 | } | ||
480 | |||
481 | memcpy(assoc, req->amp_assoc, assoc_len); | ||
482 | ctrl->assoc = assoc; | ||
483 | ctrl->assoc_len = assoc_len; | ||
484 | ctrl->assoc_rem_len = assoc_len; | ||
485 | ctrl->assoc_len_so_far = 0; | ||
486 | |||
487 | amp_ctrl_put(ctrl); | ||
488 | } | ||
489 | |||
490 | hcon = phylink_add(hdev, mgr, req->local_id, false); | ||
491 | if (hcon) { | ||
492 | amp_accept_phylink(hdev, mgr, hcon); | ||
493 | rsp.status = A2MP_STATUS_SUCCESS; | ||
494 | } else { | ||
495 | rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; | ||
496 | } | ||
271 | 497 | ||
272 | send_rsp: | 498 | send_rsp: |
273 | if (hdev) | 499 | if (hdev) |
@@ -286,6 +512,7 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, | |||
286 | struct a2mp_physlink_req *req = (void *) skb->data; | 512 | struct a2mp_physlink_req *req = (void *) skb->data; |
287 | struct a2mp_physlink_rsp rsp; | 513 | struct a2mp_physlink_rsp rsp; |
288 | struct hci_dev *hdev; | 514 | struct hci_dev *hdev; |
515 | struct hci_conn *hcon; | ||
289 | 516 | ||
290 | if (le16_to_cpu(hdr->len) < sizeof(*req)) | 517 | if (le16_to_cpu(hdr->len) < sizeof(*req)) |
291 | return -EINVAL; | 518 | return -EINVAL; |
@@ -296,14 +523,22 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, | |||
296 | rsp.remote_id = req->local_id; | 523 | rsp.remote_id = req->local_id; |
297 | rsp.status = A2MP_STATUS_SUCCESS; | 524 | rsp.status = A2MP_STATUS_SUCCESS; |
298 | 525 | ||
299 | hdev = hci_dev_get(req->local_id); | 526 | hdev = hci_dev_get(req->remote_id); |
300 | if (!hdev) { | 527 | if (!hdev) { |
301 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; | 528 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; |
302 | goto send_rsp; | 529 | goto send_rsp; |
303 | } | 530 | } |
304 | 531 | ||
532 | hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, mgr->l2cap_conn->dst); | ||
533 | if (!hcon) { | ||
534 | BT_ERR("No phys link exist"); | ||
535 | rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS; | ||
536 | goto clean; | ||
537 | } | ||
538 | |||
305 | /* TODO Disconnect Phys Link here */ | 539 | /* TODO Disconnect Phys Link here */ |
306 | 540 | ||
541 | clean: | ||
307 | hci_dev_put(hdev); | 542 | hci_dev_put(hdev); |
308 | 543 | ||
309 | send_rsp: | 544 | send_rsp: |
@@ -377,10 +612,19 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) | |||
377 | err = a2mp_discphyslink_req(mgr, skb, hdr); | 612 | err = a2mp_discphyslink_req(mgr, skb, hdr); |
378 | break; | 613 | break; |
379 | 614 | ||
380 | case A2MP_CHANGE_RSP: | ||
381 | case A2MP_DISCOVER_RSP: | 615 | case A2MP_DISCOVER_RSP: |
616 | err = a2mp_discover_rsp(mgr, skb, hdr); | ||
617 | break; | ||
618 | |||
382 | case A2MP_GETINFO_RSP: | 619 | case A2MP_GETINFO_RSP: |
620 | err = a2mp_getinfo_rsp(mgr, skb, hdr); | ||
621 | break; | ||
622 | |||
383 | case A2MP_GETAMPASSOC_RSP: | 623 | case A2MP_GETAMPASSOC_RSP: |
624 | err = a2mp_getampassoc_rsp(mgr, skb, hdr); | ||
625 | break; | ||
626 | |||
627 | case A2MP_CHANGE_RSP: | ||
384 | case A2MP_CREATEPHYSLINK_RSP: | 628 | case A2MP_CREATEPHYSLINK_RSP: |
385 | case A2MP_DISCONNPHYSLINK_RSP: | 629 | case A2MP_DISCONNPHYSLINK_RSP: |
386 | err = a2mp_cmd_rsp(mgr, skb, hdr); | 630 | err = a2mp_cmd_rsp(mgr, skb, hdr); |
@@ -455,9 +699,10 @@ static struct l2cap_ops a2mp_chan_ops = { | |||
455 | .new_connection = l2cap_chan_no_new_connection, | 699 | .new_connection = l2cap_chan_no_new_connection, |
456 | .teardown = l2cap_chan_no_teardown, | 700 | .teardown = l2cap_chan_no_teardown, |
457 | .ready = l2cap_chan_no_ready, | 701 | .ready = l2cap_chan_no_ready, |
702 | .defer = l2cap_chan_no_defer, | ||
458 | }; | 703 | }; |
459 | 704 | ||
460 | static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn) | 705 | static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked) |
461 | { | 706 | { |
462 | struct l2cap_chan *chan; | 707 | struct l2cap_chan *chan; |
463 | int err; | 708 | int err; |
@@ -492,7 +737,10 @@ static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn) | |||
492 | 737 | ||
493 | chan->conf_state = 0; | 738 | chan->conf_state = 0; |
494 | 739 | ||
495 | l2cap_chan_add(conn, chan); | 740 | if (locked) |
741 | __l2cap_chan_add(conn, chan); | ||
742 | else | ||
743 | l2cap_chan_add(conn, chan); | ||
496 | 744 | ||
497 | chan->remote_mps = chan->omtu; | 745 | chan->remote_mps = chan->omtu; |
498 | chan->mps = chan->omtu; | 746 | chan->mps = chan->omtu; |
@@ -503,11 +751,13 @@ static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn) | |||
503 | } | 751 | } |
504 | 752 | ||
505 | /* AMP Manager functions */ | 753 | /* AMP Manager functions */ |
506 | void amp_mgr_get(struct amp_mgr *mgr) | 754 | struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr) |
507 | { | 755 | { |
508 | BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount)); | 756 | BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount)); |
509 | 757 | ||
510 | kref_get(&mgr->kref); | 758 | kref_get(&mgr->kref); |
759 | |||
760 | return mgr; | ||
511 | } | 761 | } |
512 | 762 | ||
513 | static void amp_mgr_destroy(struct kref *kref) | 763 | static void amp_mgr_destroy(struct kref *kref) |
@@ -516,6 +766,11 @@ static void amp_mgr_destroy(struct kref *kref) | |||
516 | 766 | ||
517 | BT_DBG("mgr %p", mgr); | 767 | BT_DBG("mgr %p", mgr); |
518 | 768 | ||
769 | mutex_lock(&_mgr_list_lock); | ||
770 | list_del(&mgr->list); | ||
771 | mutex_unlock(&_mgr_list_lock); | ||
772 | |||
773 | amp_ctrl_list_flush(mgr); | ||
519 | kfree(mgr); | 774 | kfree(mgr); |
520 | } | 775 | } |
521 | 776 | ||
@@ -526,7 +781,7 @@ int amp_mgr_put(struct amp_mgr *mgr) | |||
526 | return kref_put(&mgr->kref, &_mgr_destroy); | 781 | return kref_put(&mgr->kref, &_mgr_destroy); |
527 | } | 782 | } |
528 | 783 | ||
529 | static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn) | 784 | static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked) |
530 | { | 785 | { |
531 | struct amp_mgr *mgr; | 786 | struct amp_mgr *mgr; |
532 | struct l2cap_chan *chan; | 787 | struct l2cap_chan *chan; |
@@ -539,7 +794,7 @@ static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn) | |||
539 | 794 | ||
540 | mgr->l2cap_conn = conn; | 795 | mgr->l2cap_conn = conn; |
541 | 796 | ||
542 | chan = a2mp_chan_open(conn); | 797 | chan = a2mp_chan_open(conn, locked); |
543 | if (!chan) { | 798 | if (!chan) { |
544 | kfree(mgr); | 799 | kfree(mgr); |
545 | return NULL; | 800 | return NULL; |
@@ -552,6 +807,14 @@ static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn) | |||
552 | 807 | ||
553 | kref_init(&mgr->kref); | 808 | kref_init(&mgr->kref); |
554 | 809 | ||
810 | /* Remote AMP ctrl list initialization */ | ||
811 | INIT_LIST_HEAD(&mgr->amp_ctrls); | ||
812 | mutex_init(&mgr->amp_ctrls_lock); | ||
813 | |||
814 | mutex_lock(&_mgr_list_lock); | ||
815 | list_add(&mgr->list, &_mgr_list); | ||
816 | mutex_unlock(&_mgr_list_lock); | ||
817 | |||
555 | return mgr; | 818 | return mgr; |
556 | } | 819 | } |
557 | 820 | ||
@@ -560,7 +823,7 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, | |||
560 | { | 823 | { |
561 | struct amp_mgr *mgr; | 824 | struct amp_mgr *mgr; |
562 | 825 | ||
563 | mgr = amp_mgr_create(conn); | 826 | mgr = amp_mgr_create(conn, false); |
564 | if (!mgr) { | 827 | if (!mgr) { |
565 | BT_ERR("Could not create AMP manager"); | 828 | BT_ERR("Could not create AMP manager"); |
566 | return NULL; | 829 | return NULL; |
@@ -570,3 +833,139 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, | |||
570 | 833 | ||
571 | return mgr->a2mp_chan; | 834 | return mgr->a2mp_chan; |
572 | } | 835 | } |
836 | |||
837 | struct amp_mgr *amp_mgr_lookup_by_state(u8 state) | ||
838 | { | ||
839 | struct amp_mgr *mgr; | ||
840 | |||
841 | mutex_lock(&_mgr_list_lock); | ||
842 | list_for_each_entry(mgr, &_mgr_list, list) { | ||
843 | if (mgr->state == state) { | ||
844 | amp_mgr_get(mgr); | ||
845 | mutex_unlock(&_mgr_list_lock); | ||
846 | return mgr; | ||
847 | } | ||
848 | } | ||
849 | mutex_unlock(&_mgr_list_lock); | ||
850 | |||
851 | return NULL; | ||
852 | } | ||
853 | |||
854 | void a2mp_send_getinfo_rsp(struct hci_dev *hdev) | ||
855 | { | ||
856 | struct amp_mgr *mgr; | ||
857 | struct a2mp_info_rsp rsp; | ||
858 | |||
859 | mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO); | ||
860 | if (!mgr) | ||
861 | return; | ||
862 | |||
863 | BT_DBG("%s mgr %p", hdev->name, mgr); | ||
864 | |||
865 | rsp.id = hdev->id; | ||
866 | rsp.status = A2MP_STATUS_INVALID_CTRL_ID; | ||
867 | |||
868 | if (hdev->amp_type != HCI_BREDR) { | ||
869 | rsp.status = 0; | ||
870 | rsp.total_bw = cpu_to_le32(hdev->amp_total_bw); | ||
871 | rsp.max_bw = cpu_to_le32(hdev->amp_max_bw); | ||
872 | rsp.min_latency = cpu_to_le32(hdev->amp_min_latency); | ||
873 | rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap); | ||
874 | rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size); | ||
875 | } | ||
876 | |||
877 | a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp); | ||
878 | amp_mgr_put(mgr); | ||
879 | } | ||
880 | |||
881 | void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status) | ||
882 | { | ||
883 | struct amp_mgr *mgr; | ||
884 | struct amp_assoc *loc_assoc = &hdev->loc_assoc; | ||
885 | struct a2mp_amp_assoc_rsp *rsp; | ||
886 | size_t len; | ||
887 | |||
888 | mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC); | ||
889 | if (!mgr) | ||
890 | return; | ||
891 | |||
892 | BT_DBG("%s mgr %p", hdev->name, mgr); | ||
893 | |||
894 | len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len; | ||
895 | rsp = kzalloc(len, GFP_KERNEL); | ||
896 | if (!rsp) { | ||
897 | amp_mgr_put(mgr); | ||
898 | return; | ||
899 | } | ||
900 | |||
901 | rsp->id = hdev->id; | ||
902 | |||
903 | if (status) { | ||
904 | rsp->status = A2MP_STATUS_INVALID_CTRL_ID; | ||
905 | } else { | ||
906 | rsp->status = A2MP_STATUS_SUCCESS; | ||
907 | memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len); | ||
908 | } | ||
909 | |||
910 | a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp); | ||
911 | amp_mgr_put(mgr); | ||
912 | kfree(rsp); | ||
913 | } | ||
914 | |||
915 | void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status) | ||
916 | { | ||
917 | struct amp_mgr *mgr; | ||
918 | struct amp_assoc *loc_assoc = &hdev->loc_assoc; | ||
919 | struct a2mp_physlink_req *req; | ||
920 | struct l2cap_chan *bredr_chan; | ||
921 | size_t len; | ||
922 | |||
923 | mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL); | ||
924 | if (!mgr) | ||
925 | return; | ||
926 | |||
927 | len = sizeof(*req) + loc_assoc->len; | ||
928 | |||
929 | BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len); | ||
930 | |||
931 | req = kzalloc(len, GFP_KERNEL); | ||
932 | if (!req) { | ||
933 | amp_mgr_put(mgr); | ||
934 | return; | ||
935 | } | ||
936 | |||
937 | bredr_chan = mgr->bredr_chan; | ||
938 | if (!bredr_chan) | ||
939 | goto clean; | ||
940 | |||
941 | req->local_id = hdev->id; | ||
942 | req->remote_id = bredr_chan->remote_amp_id; | ||
943 | memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len); | ||
944 | |||
945 | a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req); | ||
946 | |||
947 | clean: | ||
948 | amp_mgr_put(mgr); | ||
949 | kfree(req); | ||
950 | } | ||
951 | |||
952 | void a2mp_discover_amp(struct l2cap_chan *chan) | ||
953 | { | ||
954 | struct l2cap_conn *conn = chan->conn; | ||
955 | struct amp_mgr *mgr = conn->hcon->amp_mgr; | ||
956 | struct a2mp_discov_req req; | ||
957 | |||
958 | BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr); | ||
959 | |||
960 | if (!mgr) { | ||
961 | mgr = amp_mgr_create(conn, true); | ||
962 | if (!mgr) | ||
963 | return; | ||
964 | } | ||
965 | |||
966 | mgr->bredr_chan = chan; | ||
967 | |||
968 | req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); | ||
969 | req.ext_feat = 0; | ||
970 | a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req); | ||
971 | } | ||
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index ba033f09196e..5355df63d39b 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -569,7 +569,6 @@ static int bt_seq_show(struct seq_file *seq, void *v) | |||
569 | { | 569 | { |
570 | struct bt_seq_state *s = seq->private; | 570 | struct bt_seq_state *s = seq->private; |
571 | struct bt_sock_list *l = s->l; | 571 | struct bt_sock_list *l = s->l; |
572 | bdaddr_t src_baswapped, dst_baswapped; | ||
573 | 572 | ||
574 | if (v == SEQ_START_TOKEN) { | 573 | if (v == SEQ_START_TOKEN) { |
575 | seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Src Dst Parent"); | 574 | seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Src Dst Parent"); |
@@ -583,18 +582,17 @@ static int bt_seq_show(struct seq_file *seq, void *v) | |||
583 | } else { | 582 | } else { |
584 | struct sock *sk = sk_entry(v); | 583 | struct sock *sk = sk_entry(v); |
585 | struct bt_sock *bt = bt_sk(sk); | 584 | struct bt_sock *bt = bt_sk(sk); |
586 | baswap(&src_baswapped, &bt->src); | ||
587 | baswap(&dst_baswapped, &bt->dst); | ||
588 | 585 | ||
589 | seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %pM %pM %-6lu", | 586 | seq_printf(seq, |
587 | "%pK %-6d %-6u %-6u %-6u %-6lu %pMR %pMR %-6lu", | ||
590 | sk, | 588 | sk, |
591 | atomic_read(&sk->sk_refcnt), | 589 | atomic_read(&sk->sk_refcnt), |
592 | sk_rmem_alloc_get(sk), | 590 | sk_rmem_alloc_get(sk), |
593 | sk_wmem_alloc_get(sk), | 591 | sk_wmem_alloc_get(sk), |
594 | from_kuid(seq_user_ns(seq), sock_i_uid(sk)), | 592 | from_kuid(seq_user_ns(seq), sock_i_uid(sk)), |
595 | sock_i_ino(sk), | 593 | sock_i_ino(sk), |
596 | &src_baswapped, | 594 | &bt->src, |
597 | &dst_baswapped, | 595 | &bt->dst, |
598 | bt->parent? sock_i_ino(bt->parent): 0LU); | 596 | bt->parent? sock_i_ino(bt->parent): 0LU); |
599 | 597 | ||
600 | if (l->custom_seq_show) { | 598 | if (l->custom_seq_show) { |
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c new file mode 100644 index 000000000000..1b0d92c0643a --- /dev/null +++ b/net/bluetooth/amp.c | |||
@@ -0,0 +1,471 @@ | |||
1 | /* | ||
2 | Copyright (c) 2011,2012 Intel Corp. | ||
3 | |||
4 | This program is free software; you can redistribute it and/or modify | ||
5 | it under the terms of the GNU General Public License version 2 and | ||
6 | only version 2 as published by the Free Software Foundation. | ||
7 | |||
8 | This program is distributed in the hope that it will be useful, | ||
9 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <net/bluetooth/bluetooth.h> | ||
15 | #include <net/bluetooth/hci.h> | ||
16 | #include <net/bluetooth/hci_core.h> | ||
17 | #include <net/bluetooth/a2mp.h> | ||
18 | #include <net/bluetooth/amp.h> | ||
19 | #include <crypto/hash.h> | ||
20 | |||
21 | /* Remote AMP Controllers interface */ | ||
22 | void amp_ctrl_get(struct amp_ctrl *ctrl) | ||
23 | { | ||
24 | BT_DBG("ctrl %p orig refcnt %d", ctrl, | ||
25 | atomic_read(&ctrl->kref.refcount)); | ||
26 | |||
27 | kref_get(&ctrl->kref); | ||
28 | } | ||
29 | |||
30 | static void amp_ctrl_destroy(struct kref *kref) | ||
31 | { | ||
32 | struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref); | ||
33 | |||
34 | BT_DBG("ctrl %p", ctrl); | ||
35 | |||
36 | kfree(ctrl->assoc); | ||
37 | kfree(ctrl); | ||
38 | } | ||
39 | |||
40 | int amp_ctrl_put(struct amp_ctrl *ctrl) | ||
41 | { | ||
42 | BT_DBG("ctrl %p orig refcnt %d", ctrl, | ||
43 | atomic_read(&ctrl->kref.refcount)); | ||
44 | |||
45 | return kref_put(&ctrl->kref, &_ctrl_destroy); | ||
46 | } | ||
47 | |||
48 | struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id) | ||
49 | { | ||
50 | struct amp_ctrl *ctrl; | ||
51 | |||
52 | ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); | ||
53 | if (!ctrl) | ||
54 | return NULL; | ||
55 | |||
56 | kref_init(&ctrl->kref); | ||
57 | ctrl->id = id; | ||
58 | |||
59 | mutex_lock(&mgr->amp_ctrls_lock); | ||
60 | list_add(&ctrl->list, &mgr->amp_ctrls); | ||
61 | mutex_unlock(&mgr->amp_ctrls_lock); | ||
62 | |||
63 | BT_DBG("mgr %p ctrl %p", mgr, ctrl); | ||
64 | |||
65 | return ctrl; | ||
66 | } | ||
67 | |||
68 | void amp_ctrl_list_flush(struct amp_mgr *mgr) | ||
69 | { | ||
70 | struct amp_ctrl *ctrl, *n; | ||
71 | |||
72 | BT_DBG("mgr %p", mgr); | ||
73 | |||
74 | mutex_lock(&mgr->amp_ctrls_lock); | ||
75 | list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) { | ||
76 | list_del(&ctrl->list); | ||
77 | amp_ctrl_put(ctrl); | ||
78 | } | ||
79 | mutex_unlock(&mgr->amp_ctrls_lock); | ||
80 | } | ||
81 | |||
82 | struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id) | ||
83 | { | ||
84 | struct amp_ctrl *ctrl; | ||
85 | |||
86 | BT_DBG("mgr %p id %d", mgr, id); | ||
87 | |||
88 | mutex_lock(&mgr->amp_ctrls_lock); | ||
89 | list_for_each_entry(ctrl, &mgr->amp_ctrls, list) { | ||
90 | if (ctrl->id == id) { | ||
91 | amp_ctrl_get(ctrl); | ||
92 | mutex_unlock(&mgr->amp_ctrls_lock); | ||
93 | return ctrl; | ||
94 | } | ||
95 | } | ||
96 | mutex_unlock(&mgr->amp_ctrls_lock); | ||
97 | |||
98 | return NULL; | ||
99 | } | ||
100 | |||
101 | /* Physical Link interface */ | ||
102 | static u8 __next_handle(struct amp_mgr *mgr) | ||
103 | { | ||
104 | if (++mgr->handle == 0) | ||
105 | mgr->handle = 1; | ||
106 | |||
107 | return mgr->handle; | ||
108 | } | ||
109 | |||
110 | struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr, | ||
111 | u8 remote_id, bool out) | ||
112 | { | ||
113 | bdaddr_t *dst = mgr->l2cap_conn->dst; | ||
114 | struct hci_conn *hcon; | ||
115 | |||
116 | hcon = hci_conn_add(hdev, AMP_LINK, dst); | ||
117 | if (!hcon) | ||
118 | return NULL; | ||
119 | |||
120 | BT_DBG("hcon %p dst %pMR", hcon, dst); | ||
121 | |||
122 | hcon->state = BT_CONNECT; | ||
123 | hcon->attempt++; | ||
124 | hcon->handle = __next_handle(mgr); | ||
125 | hcon->remote_id = remote_id; | ||
126 | hcon->amp_mgr = amp_mgr_get(mgr); | ||
127 | hcon->out = out; | ||
128 | |||
129 | return hcon; | ||
130 | } | ||
131 | |||
132 | /* AMP crypto key generation interface */ | ||
133 | static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output) | ||
134 | { | ||
135 | int ret = 0; | ||
136 | struct crypto_shash *tfm; | ||
137 | |||
138 | if (!ksize) | ||
139 | return -EINVAL; | ||
140 | |||
141 | tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); | ||
142 | if (IS_ERR(tfm)) { | ||
143 | BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm)); | ||
144 | return PTR_ERR(tfm); | ||
145 | } | ||
146 | |||
147 | ret = crypto_shash_setkey(tfm, key, ksize); | ||
148 | if (ret) { | ||
149 | BT_DBG("crypto_ahash_setkey failed: err %d", ret); | ||
150 | } else { | ||
151 | struct { | ||
152 | struct shash_desc shash; | ||
153 | char ctx[crypto_shash_descsize(tfm)]; | ||
154 | } desc; | ||
155 | |||
156 | desc.shash.tfm = tfm; | ||
157 | desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
158 | |||
159 | ret = crypto_shash_digest(&desc.shash, plaintext, psize, | ||
160 | output); | ||
161 | } | ||
162 | |||
163 | crypto_free_shash(tfm); | ||
164 | return ret; | ||
165 | } | ||
166 | |||
167 | int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type) | ||
168 | { | ||
169 | struct hci_dev *hdev = conn->hdev; | ||
170 | struct link_key *key; | ||
171 | u8 keybuf[HCI_AMP_LINK_KEY_SIZE]; | ||
172 | u8 gamp_key[HCI_AMP_LINK_KEY_SIZE]; | ||
173 | int err; | ||
174 | |||
175 | if (!hci_conn_check_link_mode(conn)) | ||
176 | return -EACCES; | ||
177 | |||
178 | BT_DBG("conn %p key_type %d", conn, conn->key_type); | ||
179 | |||
180 | /* Legacy key */ | ||
181 | if (conn->key_type < 3) { | ||
182 | BT_ERR("Legacy key type %d", conn->key_type); | ||
183 | return -EACCES; | ||
184 | } | ||
185 | |||
186 | *type = conn->key_type; | ||
187 | *len = HCI_AMP_LINK_KEY_SIZE; | ||
188 | |||
189 | key = hci_find_link_key(hdev, &conn->dst); | ||
190 | if (!key) { | ||
191 | BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst); | ||
192 | return -EACCES; | ||
193 | } | ||
194 | |||
195 | /* BR/EDR Link Key concatenated together with itself */ | ||
196 | memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE); | ||
197 | memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE); | ||
198 | |||
199 | /* Derive Generic AMP Link Key (gamp) */ | ||
200 | err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key); | ||
201 | if (err) { | ||
202 | BT_ERR("Could not derive Generic AMP Key: err %d", err); | ||
203 | return err; | ||
204 | } | ||
205 | |||
206 | if (conn->key_type == HCI_LK_DEBUG_COMBINATION) { | ||
207 | BT_DBG("Use Generic AMP Key (gamp)"); | ||
208 | memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE); | ||
209 | return err; | ||
210 | } | ||
211 | |||
212 | /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */ | ||
213 | return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data); | ||
214 | } | ||
215 | |||
216 | void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle) | ||
217 | { | ||
218 | struct hci_cp_read_local_amp_assoc cp; | ||
219 | struct amp_assoc *loc_assoc = &hdev->loc_assoc; | ||
220 | |||
221 | BT_DBG("%s handle %d", hdev->name, phy_handle); | ||
222 | |||
223 | cp.phy_handle = phy_handle; | ||
224 | cp.max_len = cpu_to_le16(hdev->amp_assoc_size); | ||
225 | cp.len_so_far = cpu_to_le16(loc_assoc->offset); | ||
226 | |||
227 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); | ||
228 | } | ||
229 | |||
230 | void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr) | ||
231 | { | ||
232 | struct hci_cp_read_local_amp_assoc cp; | ||
233 | |||
234 | memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc)); | ||
235 | memset(&cp, 0, sizeof(cp)); | ||
236 | |||
237 | cp.max_len = cpu_to_le16(hdev->amp_assoc_size); | ||
238 | |||
239 | mgr->state = READ_LOC_AMP_ASSOC; | ||
240 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); | ||
241 | } | ||
242 | |||
243 | void amp_read_loc_assoc_final_data(struct hci_dev *hdev, | ||
244 | struct hci_conn *hcon) | ||
245 | { | ||
246 | struct hci_cp_read_local_amp_assoc cp; | ||
247 | struct amp_mgr *mgr = hcon->amp_mgr; | ||
248 | |||
249 | cp.phy_handle = hcon->handle; | ||
250 | cp.len_so_far = cpu_to_le16(0); | ||
251 | cp.max_len = cpu_to_le16(hdev->amp_assoc_size); | ||
252 | |||
253 | mgr->state = READ_LOC_AMP_ASSOC_FINAL; | ||
254 | |||
255 | /* Read Local AMP Assoc final link information data */ | ||
256 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); | ||
257 | } | ||
258 | |||
259 | /* Write AMP Assoc data fragments, returns true with last fragment written*/ | ||
260 | static bool amp_write_rem_assoc_frag(struct hci_dev *hdev, | ||
261 | struct hci_conn *hcon) | ||
262 | { | ||
263 | struct hci_cp_write_remote_amp_assoc *cp; | ||
264 | struct amp_mgr *mgr = hcon->amp_mgr; | ||
265 | struct amp_ctrl *ctrl; | ||
266 | u16 frag_len, len; | ||
267 | |||
268 | ctrl = amp_ctrl_lookup(mgr, hcon->remote_id); | ||
269 | if (!ctrl) | ||
270 | return false; | ||
271 | |||
272 | if (!ctrl->assoc_rem_len) { | ||
273 | BT_DBG("all fragments are written"); | ||
274 | ctrl->assoc_rem_len = ctrl->assoc_len; | ||
275 | ctrl->assoc_len_so_far = 0; | ||
276 | |||
277 | amp_ctrl_put(ctrl); | ||
278 | return true; | ||
279 | } | ||
280 | |||
281 | frag_len = min_t(u16, 248, ctrl->assoc_rem_len); | ||
282 | len = frag_len + sizeof(*cp); | ||
283 | |||
284 | cp = kzalloc(len, GFP_KERNEL); | ||
285 | if (!cp) { | ||
286 | amp_ctrl_put(ctrl); | ||
287 | return false; | ||
288 | } | ||
289 | |||
290 | BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u", | ||
291 | hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len); | ||
292 | |||
293 | cp->phy_handle = hcon->handle; | ||
294 | cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far); | ||
295 | cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len); | ||
296 | memcpy(cp->frag, ctrl->assoc, frag_len); | ||
297 | |||
298 | ctrl->assoc_len_so_far += frag_len; | ||
299 | ctrl->assoc_rem_len -= frag_len; | ||
300 | |||
301 | amp_ctrl_put(ctrl); | ||
302 | |||
303 | hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp); | ||
304 | |||
305 | kfree(cp); | ||
306 | |||
307 | return false; | ||
308 | } | ||
309 | |||
310 | void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle) | ||
311 | { | ||
312 | struct hci_conn *hcon; | ||
313 | |||
314 | BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle); | ||
315 | |||
316 | hcon = hci_conn_hash_lookup_handle(hdev, handle); | ||
317 | if (!hcon) | ||
318 | return; | ||
319 | |||
320 | amp_write_rem_assoc_frag(hdev, hcon); | ||
321 | } | ||
322 | |||
323 | void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle) | ||
324 | { | ||
325 | struct hci_conn *hcon; | ||
326 | |||
327 | BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle); | ||
328 | |||
329 | hcon = hci_conn_hash_lookup_handle(hdev, handle); | ||
330 | if (!hcon) | ||
331 | return; | ||
332 | |||
333 | BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon); | ||
334 | |||
335 | amp_write_rem_assoc_frag(hdev, hcon); | ||
336 | } | ||
337 | |||
338 | void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, | ||
339 | struct hci_conn *hcon) | ||
340 | { | ||
341 | struct hci_cp_create_phy_link cp; | ||
342 | |||
343 | cp.phy_handle = hcon->handle; | ||
344 | |||
345 | BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon, | ||
346 | hcon->handle); | ||
347 | |||
348 | if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len, | ||
349 | &cp.key_type)) { | ||
350 | BT_DBG("Cannot create link key"); | ||
351 | return; | ||
352 | } | ||
353 | |||
354 | hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp); | ||
355 | } | ||
356 | |||
357 | void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, | ||
358 | struct hci_conn *hcon) | ||
359 | { | ||
360 | struct hci_cp_accept_phy_link cp; | ||
361 | |||
362 | cp.phy_handle = hcon->handle; | ||
363 | |||
364 | BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon, | ||
365 | hcon->handle); | ||
366 | |||
367 | if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len, | ||
368 | &cp.key_type)) { | ||
369 | BT_DBG("Cannot create link key"); | ||
370 | return; | ||
371 | } | ||
372 | |||
373 | hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp); | ||
374 | } | ||
375 | |||
376 | void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon) | ||
377 | { | ||
378 | struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev); | ||
379 | struct amp_mgr *mgr = hs_hcon->amp_mgr; | ||
380 | struct l2cap_chan *bredr_chan; | ||
381 | |||
382 | BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr); | ||
383 | |||
384 | if (!bredr_hdev || !mgr || !mgr->bredr_chan) | ||
385 | return; | ||
386 | |||
387 | bredr_chan = mgr->bredr_chan; | ||
388 | |||
389 | l2cap_chan_lock(bredr_chan); | ||
390 | |||
391 | set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags); | ||
392 | bredr_chan->remote_amp_id = hs_hcon->remote_id; | ||
393 | bredr_chan->local_amp_id = hs_hcon->hdev->id; | ||
394 | bredr_chan->hs_hcon = hs_hcon; | ||
395 | bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu; | ||
396 | |||
397 | __l2cap_physical_cfm(bredr_chan, 0); | ||
398 | |||
399 | l2cap_chan_unlock(bredr_chan); | ||
400 | |||
401 | hci_dev_put(bredr_hdev); | ||
402 | } | ||
403 | |||
404 | void amp_create_logical_link(struct l2cap_chan *chan) | ||
405 | { | ||
406 | struct hci_cp_create_accept_logical_link cp; | ||
407 | struct hci_conn *hcon; | ||
408 | struct hci_dev *hdev; | ||
409 | |||
410 | BT_DBG("chan %p", chan); | ||
411 | |||
412 | if (!chan->hs_hcon) | ||
413 | return; | ||
414 | |||
415 | hdev = hci_dev_hold(chan->hs_hcon->hdev); | ||
416 | if (!hdev) | ||
417 | return; | ||
418 | |||
419 | BT_DBG("chan %p dst %pMR", chan, chan->conn->dst); | ||
420 | |||
421 | hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, chan->conn->dst); | ||
422 | if (!hcon) | ||
423 | goto done; | ||
424 | |||
425 | cp.phy_handle = hcon->handle; | ||
426 | |||
427 | cp.tx_flow_spec.id = chan->local_id; | ||
428 | cp.tx_flow_spec.stype = chan->local_stype; | ||
429 | cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu); | ||
430 | cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime); | ||
431 | cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat); | ||
432 | cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to); | ||
433 | |||
434 | cp.rx_flow_spec.id = chan->remote_id; | ||
435 | cp.rx_flow_spec.stype = chan->remote_stype; | ||
436 | cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu); | ||
437 | cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime); | ||
438 | cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat); | ||
439 | cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to); | ||
440 | |||
441 | if (hcon->out) | ||
442 | hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp), | ||
443 | &cp); | ||
444 | else | ||
445 | hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp), | ||
446 | &cp); | ||
447 | |||
448 | done: | ||
449 | hci_dev_put(hdev); | ||
450 | } | ||
451 | |||
452 | void amp_disconnect_logical_link(struct hci_chan *hchan) | ||
453 | { | ||
454 | struct hci_conn *hcon = hchan->conn; | ||
455 | struct hci_cp_disconn_logical_link cp; | ||
456 | |||
457 | if (hcon->state != BT_CONNECTED) { | ||
458 | BT_DBG("hchan %p not connected", hchan); | ||
459 | return; | ||
460 | } | ||
461 | |||
462 | cp.log_handle = cpu_to_le16(hchan->handle); | ||
463 | hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp); | ||
464 | } | ||
465 | |||
466 | void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason) | ||
467 | { | ||
468 | BT_DBG("hchan %p", hchan); | ||
469 | |||
470 | hci_chan_del(hchan); | ||
471 | } | ||
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 4a6620bc1570..a5b639702637 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -182,8 +182,7 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len) | |||
182 | a2 = data; | 182 | a2 = data; |
183 | data += ETH_ALEN; | 183 | data += ETH_ALEN; |
184 | 184 | ||
185 | BT_DBG("mc filter %s -> %s", | 185 | BT_DBG("mc filter %pMR -> %pMR", a1, a2); |
186 | batostr((void *) a1), batostr((void *) a2)); | ||
187 | 186 | ||
188 | /* Iterate from a1 to a2 */ | 187 | /* Iterate from a1 to a2 */ |
189 | set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); | 188 | set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); |
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index 98f86f91d47c..e58c8b32589c 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -25,7 +25,6 @@ | |||
25 | SOFTWARE IS DISCLAIMED. | 25 | SOFTWARE IS DISCLAIMED. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/export.h> | ||
29 | #include <linux/etherdevice.h> | 28 | #include <linux/etherdevice.h> |
30 | 29 | ||
31 | #include <net/bluetooth/bluetooth.h> | 30 | #include <net/bluetooth/bluetooth.h> |
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index 50f0d135eb8f..a4a9d4b6816c 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c | |||
@@ -20,7 +20,7 @@ | |||
20 | SOFTWARE IS DISCLAIMED. | 20 | SOFTWARE IS DISCLAIMED. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/export.h> |
24 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
25 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 6c9c1fd601ca..e0a6ebf2baa6 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c | |||
@@ -353,7 +353,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) | |||
353 | 353 | ||
354 | BT_DBG("mtu %d", session->mtu); | 354 | BT_DBG("mtu %d", session->mtu); |
355 | 355 | ||
356 | sprintf(session->name, "%s", batostr(&bt_sk(sock->sk)->dst)); | 356 | sprintf(session->name, "%pMR", &bt_sk(sock->sk)->dst); |
357 | 357 | ||
358 | session->sock = sock; | 358 | session->sock = sock; |
359 | session->state = BT_CONFIG; | 359 | session->state = BT_CONFIG; |
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index aacb802d1ee4..1c57482112b6 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c | |||
@@ -20,7 +20,7 @@ | |||
20 | SOFTWARE IS DISCLAIMED. | 20 | SOFTWARE IS DISCLAIMED. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/export.h> |
24 | 24 | ||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/capability.h> | 26 | #include <linux/capability.h> |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index b9196a44f759..25bfce0666eb 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -130,6 +130,20 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason) | |||
130 | hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); | 130 | hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void hci_amp_disconn(struct hci_conn *conn, __u8 reason) | ||
134 | { | ||
135 | struct hci_cp_disconn_phy_link cp; | ||
136 | |||
137 | BT_DBG("hcon %p", conn); | ||
138 | |||
139 | conn->state = BT_DISCONN; | ||
140 | |||
141 | cp.phy_handle = HCI_PHY_HANDLE(conn->handle); | ||
142 | cp.reason = reason; | ||
143 | hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK, | ||
144 | sizeof(cp), &cp); | ||
145 | } | ||
146 | |||
133 | static void hci_add_sco(struct hci_conn *conn, __u16 handle) | 147 | static void hci_add_sco(struct hci_conn *conn, __u16 handle) |
134 | { | 148 | { |
135 | struct hci_dev *hdev = conn->hdev; | 149 | struct hci_dev *hdev = conn->hdev; |
@@ -230,11 +244,24 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status) | |||
230 | } | 244 | } |
231 | } | 245 | } |
232 | 246 | ||
247 | static void hci_conn_disconnect(struct hci_conn *conn) | ||
248 | { | ||
249 | __u8 reason = hci_proto_disconn_ind(conn); | ||
250 | |||
251 | switch (conn->type) { | ||
252 | case ACL_LINK: | ||
253 | hci_acl_disconn(conn, reason); | ||
254 | break; | ||
255 | case AMP_LINK: | ||
256 | hci_amp_disconn(conn, reason); | ||
257 | break; | ||
258 | } | ||
259 | } | ||
260 | |||
233 | static void hci_conn_timeout(struct work_struct *work) | 261 | static void hci_conn_timeout(struct work_struct *work) |
234 | { | 262 | { |
235 | struct hci_conn *conn = container_of(work, struct hci_conn, | 263 | struct hci_conn *conn = container_of(work, struct hci_conn, |
236 | disc_work.work); | 264 | disc_work.work); |
237 | __u8 reason; | ||
238 | 265 | ||
239 | BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); | 266 | BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); |
240 | 267 | ||
@@ -253,8 +280,7 @@ static void hci_conn_timeout(struct work_struct *work) | |||
253 | break; | 280 | break; |
254 | case BT_CONFIG: | 281 | case BT_CONFIG: |
255 | case BT_CONNECTED: | 282 | case BT_CONNECTED: |
256 | reason = hci_proto_disconn_ind(conn); | 283 | hci_conn_disconnect(conn); |
257 | hci_acl_disconn(conn, reason); | ||
258 | break; | 284 | break; |
259 | default: | 285 | default: |
260 | conn->state = BT_CLOSED; | 286 | conn->state = BT_CLOSED; |
@@ -320,7 +346,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
320 | { | 346 | { |
321 | struct hci_conn *conn; | 347 | struct hci_conn *conn; |
322 | 348 | ||
323 | BT_DBG("%s dst %s", hdev->name, batostr(dst)); | 349 | BT_DBG("%s dst %pMR", hdev->name, dst); |
324 | 350 | ||
325 | conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL); | 351 | conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL); |
326 | if (!conn) | 352 | if (!conn) |
@@ -437,7 +463,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) | |||
437 | int use_src = bacmp(src, BDADDR_ANY); | 463 | int use_src = bacmp(src, BDADDR_ANY); |
438 | struct hci_dev *hdev = NULL, *d; | 464 | struct hci_dev *hdev = NULL, *d; |
439 | 465 | ||
440 | BT_DBG("%s -> %s", batostr(src), batostr(dst)); | 466 | BT_DBG("%pMR -> %pMR", src, dst); |
441 | 467 | ||
442 | read_lock(&hci_dev_list_lock); | 468 | read_lock(&hci_dev_list_lock); |
443 | 469 | ||
@@ -476,6 +502,9 @@ static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, | |||
476 | { | 502 | { |
477 | struct hci_conn *le; | 503 | struct hci_conn *le; |
478 | 504 | ||
505 | if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags)) | ||
506 | return ERR_PTR(-ENOTSUPP); | ||
507 | |||
479 | le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); | 508 | le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); |
480 | if (!le) { | 509 | if (!le) { |
481 | le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); | 510 | le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); |
@@ -567,7 +596,7 @@ static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, | |||
567 | struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, | 596 | struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, |
568 | __u8 dst_type, __u8 sec_level, __u8 auth_type) | 597 | __u8 dst_type, __u8 sec_level, __u8 auth_type) |
569 | { | 598 | { |
570 | BT_DBG("%s dst %s type 0x%x", hdev->name, batostr(dst), type); | 599 | BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type); |
571 | 600 | ||
572 | switch (type) { | 601 | switch (type) { |
573 | case LE_LINK: | 602 | case LE_LINK: |
@@ -933,6 +962,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn) | |||
933 | 962 | ||
934 | chan->conn = conn; | 963 | chan->conn = conn; |
935 | skb_queue_head_init(&chan->data_q); | 964 | skb_queue_head_init(&chan->data_q); |
965 | chan->state = BT_CONNECTED; | ||
936 | 966 | ||
937 | list_add_rcu(&chan->list, &conn->chan_list); | 967 | list_add_rcu(&chan->list, &conn->chan_list); |
938 | 968 | ||
@@ -950,6 +980,8 @@ void hci_chan_del(struct hci_chan *chan) | |||
950 | 980 | ||
951 | synchronize_rcu(); | 981 | synchronize_rcu(); |
952 | 982 | ||
983 | hci_conn_put(conn); | ||
984 | |||
953 | skb_queue_purge(&chan->data_q); | 985 | skb_queue_purge(&chan->data_q); |
954 | kfree(chan); | 986 | kfree(chan); |
955 | } | 987 | } |
@@ -963,3 +995,35 @@ void hci_chan_list_flush(struct hci_conn *conn) | |||
963 | list_for_each_entry_safe(chan, n, &conn->chan_list, list) | 995 | list_for_each_entry_safe(chan, n, &conn->chan_list, list) |
964 | hci_chan_del(chan); | 996 | hci_chan_del(chan); |
965 | } | 997 | } |
998 | |||
999 | static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon, | ||
1000 | __u16 handle) | ||
1001 | { | ||
1002 | struct hci_chan *hchan; | ||
1003 | |||
1004 | list_for_each_entry(hchan, &hcon->chan_list, list) { | ||
1005 | if (hchan->handle == handle) | ||
1006 | return hchan; | ||
1007 | } | ||
1008 | |||
1009 | return NULL; | ||
1010 | } | ||
1011 | |||
1012 | struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle) | ||
1013 | { | ||
1014 | struct hci_conn_hash *h = &hdev->conn_hash; | ||
1015 | struct hci_conn *hcon; | ||
1016 | struct hci_chan *hchan = NULL; | ||
1017 | |||
1018 | rcu_read_lock(); | ||
1019 | |||
1020 | list_for_each_entry_rcu(hcon, &h->list, list) { | ||
1021 | hchan = __hci_chan_lookup_handle(hcon, handle); | ||
1022 | if (hchan) | ||
1023 | break; | ||
1024 | } | ||
1025 | |||
1026 | rcu_read_unlock(); | ||
1027 | |||
1028 | return hchan; | ||
1029 | } | ||
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index a0a2f97b9c62..7140f83328a2 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -178,48 +178,13 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) | |||
178 | 178 | ||
179 | static void bredr_init(struct hci_dev *hdev) | 179 | static void bredr_init(struct hci_dev *hdev) |
180 | { | 180 | { |
181 | struct hci_cp_delete_stored_link_key cp; | ||
182 | __le16 param; | ||
183 | __u8 flt_type; | ||
184 | |||
185 | hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; | 181 | hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; |
186 | 182 | ||
187 | /* Mandatory initialization */ | ||
188 | |||
189 | /* Read Local Supported Features */ | 183 | /* Read Local Supported Features */ |
190 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); | 184 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); |
191 | 185 | ||
192 | /* Read Local Version */ | 186 | /* Read Local Version */ |
193 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); | 187 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); |
194 | |||
195 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ | ||
196 | hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); | ||
197 | |||
198 | /* Read BD Address */ | ||
199 | hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); | ||
200 | |||
201 | /* Read Class of Device */ | ||
202 | hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); | ||
203 | |||
204 | /* Read Local Name */ | ||
205 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); | ||
206 | |||
207 | /* Read Voice Setting */ | ||
208 | hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); | ||
209 | |||
210 | /* Optional initialization */ | ||
211 | |||
212 | /* Clear Event Filters */ | ||
213 | flt_type = HCI_FLT_CLEAR_ALL; | ||
214 | hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); | ||
215 | |||
216 | /* Connection accept timeout ~20 secs */ | ||
217 | param = __constant_cpu_to_le16(0x7d00); | ||
218 | hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); | ||
219 | |||
220 | bacpy(&cp.bdaddr, BDADDR_ANY); | ||
221 | cp.delete_all = 1; | ||
222 | hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp); | ||
223 | } | 188 | } |
224 | 189 | ||
225 | static void amp_init(struct hci_dev *hdev) | 190 | static void amp_init(struct hci_dev *hdev) |
@@ -273,14 +238,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt) | |||
273 | } | 238 | } |
274 | } | 239 | } |
275 | 240 | ||
276 | static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt) | ||
277 | { | ||
278 | BT_DBG("%s", hdev->name); | ||
279 | |||
280 | /* Read LE buffer size */ | ||
281 | hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); | ||
282 | } | ||
283 | |||
284 | static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) | 241 | static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) |
285 | { | 242 | { |
286 | __u8 scan = opt; | 243 | __u8 scan = opt; |
@@ -405,7 +362,7 @@ struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, | |||
405 | struct discovery_state *cache = &hdev->discovery; | 362 | struct discovery_state *cache = &hdev->discovery; |
406 | struct inquiry_entry *e; | 363 | struct inquiry_entry *e; |
407 | 364 | ||
408 | BT_DBG("cache %p, %s", cache, batostr(bdaddr)); | 365 | BT_DBG("cache %p, %pMR", cache, bdaddr); |
409 | 366 | ||
410 | list_for_each_entry(e, &cache->all, all) { | 367 | list_for_each_entry(e, &cache->all, all) { |
411 | if (!bacmp(&e->data.bdaddr, bdaddr)) | 368 | if (!bacmp(&e->data.bdaddr, bdaddr)) |
@@ -421,7 +378,7 @@ struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, | |||
421 | struct discovery_state *cache = &hdev->discovery; | 378 | struct discovery_state *cache = &hdev->discovery; |
422 | struct inquiry_entry *e; | 379 | struct inquiry_entry *e; |
423 | 380 | ||
424 | BT_DBG("cache %p, %s", cache, batostr(bdaddr)); | 381 | BT_DBG("cache %p, %pMR", cache, bdaddr); |
425 | 382 | ||
426 | list_for_each_entry(e, &cache->unknown, list) { | 383 | list_for_each_entry(e, &cache->unknown, list) { |
427 | if (!bacmp(&e->data.bdaddr, bdaddr)) | 384 | if (!bacmp(&e->data.bdaddr, bdaddr)) |
@@ -438,7 +395,7 @@ struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, | |||
438 | struct discovery_state *cache = &hdev->discovery; | 395 | struct discovery_state *cache = &hdev->discovery; |
439 | struct inquiry_entry *e; | 396 | struct inquiry_entry *e; |
440 | 397 | ||
441 | BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state); | 398 | BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); |
442 | 399 | ||
443 | list_for_each_entry(e, &cache->resolve, list) { | 400 | list_for_each_entry(e, &cache->resolve, list) { |
444 | if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) | 401 | if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) |
@@ -475,7 +432,9 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, | |||
475 | struct discovery_state *cache = &hdev->discovery; | 432 | struct discovery_state *cache = &hdev->discovery; |
476 | struct inquiry_entry *ie; | 433 | struct inquiry_entry *ie; |
477 | 434 | ||
478 | BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); | 435 | BT_DBG("cache %p, %pMR", cache, &data->bdaddr); |
436 | |||
437 | hci_remove_remote_oob_data(hdev, &data->bdaddr); | ||
479 | 438 | ||
480 | if (ssp) | 439 | if (ssp) |
481 | *ssp = data->ssp_mode; | 440 | *ssp = data->ssp_mode; |
@@ -637,6 +596,99 @@ done: | |||
637 | return err; | 596 | return err; |
638 | } | 597 | } |
639 | 598 | ||
599 | static u8 create_ad(struct hci_dev *hdev, u8 *ptr) | ||
600 | { | ||
601 | u8 ad_len = 0, flags = 0; | ||
602 | size_t name_len; | ||
603 | |||
604 | if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) | ||
605 | flags |= LE_AD_GENERAL; | ||
606 | |||
607 | if (!lmp_bredr_capable(hdev)) | ||
608 | flags |= LE_AD_NO_BREDR; | ||
609 | |||
610 | if (lmp_le_br_capable(hdev)) | ||
611 | flags |= LE_AD_SIM_LE_BREDR_CTRL; | ||
612 | |||
613 | if (lmp_host_le_br_capable(hdev)) | ||
614 | flags |= LE_AD_SIM_LE_BREDR_HOST; | ||
615 | |||
616 | if (flags) { | ||
617 | BT_DBG("adv flags 0x%02x", flags); | ||
618 | |||
619 | ptr[0] = 2; | ||
620 | ptr[1] = EIR_FLAGS; | ||
621 | ptr[2] = flags; | ||
622 | |||
623 | ad_len += 3; | ||
624 | ptr += 3; | ||
625 | } | ||
626 | |||
627 | if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) { | ||
628 | ptr[0] = 2; | ||
629 | ptr[1] = EIR_TX_POWER; | ||
630 | ptr[2] = (u8) hdev->adv_tx_power; | ||
631 | |||
632 | ad_len += 3; | ||
633 | ptr += 3; | ||
634 | } | ||
635 | |||
636 | name_len = strlen(hdev->dev_name); | ||
637 | if (name_len > 0) { | ||
638 | size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2; | ||
639 | |||
640 | if (name_len > max_len) { | ||
641 | name_len = max_len; | ||
642 | ptr[1] = EIR_NAME_SHORT; | ||
643 | } else | ||
644 | ptr[1] = EIR_NAME_COMPLETE; | ||
645 | |||
646 | ptr[0] = name_len + 1; | ||
647 | |||
648 | memcpy(ptr + 2, hdev->dev_name, name_len); | ||
649 | |||
650 | ad_len += (name_len + 2); | ||
651 | ptr += (name_len + 2); | ||
652 | } | ||
653 | |||
654 | return ad_len; | ||
655 | } | ||
656 | |||
657 | int hci_update_ad(struct hci_dev *hdev) | ||
658 | { | ||
659 | struct hci_cp_le_set_adv_data cp; | ||
660 | u8 len; | ||
661 | int err; | ||
662 | |||
663 | hci_dev_lock(hdev); | ||
664 | |||
665 | if (!lmp_le_capable(hdev)) { | ||
666 | err = -EINVAL; | ||
667 | goto unlock; | ||
668 | } | ||
669 | |||
670 | memset(&cp, 0, sizeof(cp)); | ||
671 | |||
672 | len = create_ad(hdev, cp.data); | ||
673 | |||
674 | if (hdev->adv_data_len == len && | ||
675 | memcmp(cp.data, hdev->adv_data, len) == 0) { | ||
676 | err = 0; | ||
677 | goto unlock; | ||
678 | } | ||
679 | |||
680 | memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); | ||
681 | hdev->adv_data_len = len; | ||
682 | |||
683 | cp.length = len; | ||
684 | err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); | ||
685 | |||
686 | unlock: | ||
687 | hci_dev_unlock(hdev); | ||
688 | |||
689 | return err; | ||
690 | } | ||
691 | |||
640 | /* ---- HCI ioctl helpers ---- */ | 692 | /* ---- HCI ioctl helpers ---- */ |
641 | 693 | ||
642 | int hci_dev_open(__u16 dev) | 694 | int hci_dev_open(__u16 dev) |
@@ -687,10 +739,6 @@ int hci_dev_open(__u16 dev) | |||
687 | 739 | ||
688 | ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT); | 740 | ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT); |
689 | 741 | ||
690 | if (lmp_host_le_capable(hdev)) | ||
691 | ret = __hci_request(hdev, hci_le_init_req, 0, | ||
692 | HCI_INIT_TIMEOUT); | ||
693 | |||
694 | clear_bit(HCI_INIT, &hdev->flags); | 742 | clear_bit(HCI_INIT, &hdev->flags); |
695 | } | 743 | } |
696 | 744 | ||
@@ -698,6 +746,7 @@ int hci_dev_open(__u16 dev) | |||
698 | hci_dev_hold(hdev); | 746 | hci_dev_hold(hdev); |
699 | set_bit(HCI_UP, &hdev->flags); | 747 | set_bit(HCI_UP, &hdev->flags); |
700 | hci_notify(hdev, HCI_DEV_UP); | 748 | hci_notify(hdev, HCI_DEV_UP); |
749 | hci_update_ad(hdev); | ||
701 | if (!test_bit(HCI_SETUP, &hdev->dev_flags) && | 750 | if (!test_bit(HCI_SETUP, &hdev->dev_flags) && |
702 | mgmt_valid_hdev(hdev)) { | 751 | mgmt_valid_hdev(hdev)) { |
703 | hci_dev_lock(hdev); | 752 | hci_dev_lock(hdev); |
@@ -1039,10 +1088,17 @@ int hci_get_dev_info(void __user *arg) | |||
1039 | di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); | 1088 | di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); |
1040 | di.flags = hdev->flags; | 1089 | di.flags = hdev->flags; |
1041 | di.pkt_type = hdev->pkt_type; | 1090 | di.pkt_type = hdev->pkt_type; |
1042 | di.acl_mtu = hdev->acl_mtu; | 1091 | if (lmp_bredr_capable(hdev)) { |
1043 | di.acl_pkts = hdev->acl_pkts; | 1092 | di.acl_mtu = hdev->acl_mtu; |
1044 | di.sco_mtu = hdev->sco_mtu; | 1093 | di.acl_pkts = hdev->acl_pkts; |
1045 | di.sco_pkts = hdev->sco_pkts; | 1094 | di.sco_mtu = hdev->sco_mtu; |
1095 | di.sco_pkts = hdev->sco_pkts; | ||
1096 | } else { | ||
1097 | di.acl_mtu = hdev->le_mtu; | ||
1098 | di.acl_pkts = hdev->le_pkts; | ||
1099 | di.sco_mtu = 0; | ||
1100 | di.sco_pkts = 0; | ||
1101 | } | ||
1046 | di.link_policy = hdev->link_policy; | 1102 | di.link_policy = hdev->link_policy; |
1047 | di.link_mode = hdev->link_mode; | 1103 | di.link_mode = hdev->link_mode; |
1048 | 1104 | ||
@@ -1259,7 +1315,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, | |||
1259 | list_add(&key->list, &hdev->link_keys); | 1315 | list_add(&key->list, &hdev->link_keys); |
1260 | } | 1316 | } |
1261 | 1317 | ||
1262 | BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type); | 1318 | BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); |
1263 | 1319 | ||
1264 | /* Some buggy controller combinations generate a changed | 1320 | /* Some buggy controller combinations generate a changed |
1265 | * combination key for legacy pairing even when there's no | 1321 | * combination key for legacy pairing even when there's no |
@@ -1338,7 +1394,7 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
1338 | if (!key) | 1394 | if (!key) |
1339 | return -ENOENT; | 1395 | return -ENOENT; |
1340 | 1396 | ||
1341 | BT_DBG("%s removing %s", hdev->name, batostr(bdaddr)); | 1397 | BT_DBG("%s removing %pMR", hdev->name, bdaddr); |
1342 | 1398 | ||
1343 | list_del(&key->list); | 1399 | list_del(&key->list); |
1344 | kfree(key); | 1400 | kfree(key); |
@@ -1354,7 +1410,7 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
1354 | if (bacmp(bdaddr, &k->bdaddr)) | 1410 | if (bacmp(bdaddr, &k->bdaddr)) |
1355 | continue; | 1411 | continue; |
1356 | 1412 | ||
1357 | BT_DBG("%s removing %s", hdev->name, batostr(bdaddr)); | 1413 | BT_DBG("%s removing %pMR", hdev->name, bdaddr); |
1358 | 1414 | ||
1359 | list_del(&k->list); | 1415 | list_del(&k->list); |
1360 | kfree(k); | 1416 | kfree(k); |
@@ -1401,7 +1457,7 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
1401 | if (!data) | 1457 | if (!data) |
1402 | return -ENOENT; | 1458 | return -ENOENT; |
1403 | 1459 | ||
1404 | BT_DBG("%s removing %s", hdev->name, batostr(bdaddr)); | 1460 | BT_DBG("%s removing %pMR", hdev->name, bdaddr); |
1405 | 1461 | ||
1406 | list_del(&data->list); | 1462 | list_del(&data->list); |
1407 | kfree(data); | 1463 | kfree(data); |
@@ -1440,7 +1496,7 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, | |||
1440 | memcpy(data->hash, hash, sizeof(data->hash)); | 1496 | memcpy(data->hash, hash, sizeof(data->hash)); |
1441 | memcpy(data->randomizer, randomizer, sizeof(data->randomizer)); | 1497 | memcpy(data->randomizer, randomizer, sizeof(data->randomizer)); |
1442 | 1498 | ||
1443 | BT_DBG("%s for %s", hdev->name, batostr(bdaddr)); | 1499 | BT_DBG("%s for %pMR", hdev->name, bdaddr); |
1444 | 1500 | ||
1445 | return 0; | 1501 | return 0; |
1446 | } | 1502 | } |
@@ -1617,6 +1673,9 @@ int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window, | |||
1617 | 1673 | ||
1618 | BT_DBG("%s", hdev->name); | 1674 | BT_DBG("%s", hdev->name); |
1619 | 1675 | ||
1676 | if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) | ||
1677 | return -ENOTSUPP; | ||
1678 | |||
1620 | if (work_busy(&hdev->le_scan)) | 1679 | if (work_busy(&hdev->le_scan)) |
1621 | return -EINPROGRESS; | 1680 | return -EINPROGRESS; |
1622 | 1681 | ||
@@ -1643,6 +1702,8 @@ struct hci_dev *hci_alloc_dev(void) | |||
1643 | hdev->esco_type = (ESCO_HV1); | 1702 | hdev->esco_type = (ESCO_HV1); |
1644 | hdev->link_mode = (HCI_LM_ACCEPT); | 1703 | hdev->link_mode = (HCI_LM_ACCEPT); |
1645 | hdev->io_capability = 0x03; /* No Input No Output */ | 1704 | hdev->io_capability = 0x03; /* No Input No Output */ |
1705 | hdev->inq_tx_power = HCI_TX_POWER_INVALID; | ||
1706 | hdev->adv_tx_power = HCI_TX_POWER_INVALID; | ||
1646 | 1707 | ||
1647 | hdev->sniff_max_interval = 800; | 1708 | hdev->sniff_max_interval = 800; |
1648 | hdev->sniff_min_interval = 80; | 1709 | hdev->sniff_min_interval = 80; |
@@ -2153,9 +2214,10 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) | |||
2153 | hdr->dlen = cpu_to_le16(len); | 2214 | hdr->dlen = cpu_to_le16(len); |
2154 | } | 2215 | } |
2155 | 2216 | ||
2156 | static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, | 2217 | static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, |
2157 | struct sk_buff *skb, __u16 flags) | 2218 | struct sk_buff *skb, __u16 flags) |
2158 | { | 2219 | { |
2220 | struct hci_conn *conn = chan->conn; | ||
2159 | struct hci_dev *hdev = conn->hdev; | 2221 | struct hci_dev *hdev = conn->hdev; |
2160 | struct sk_buff *list; | 2222 | struct sk_buff *list; |
2161 | 2223 | ||
@@ -2163,7 +2225,18 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, | |||
2163 | skb->data_len = 0; | 2225 | skb->data_len = 0; |
2164 | 2226 | ||
2165 | bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; | 2227 | bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; |
2166 | hci_add_acl_hdr(skb, conn->handle, flags); | 2228 | |
2229 | switch (hdev->dev_type) { | ||
2230 | case HCI_BREDR: | ||
2231 | hci_add_acl_hdr(skb, conn->handle, flags); | ||
2232 | break; | ||
2233 | case HCI_AMP: | ||
2234 | hci_add_acl_hdr(skb, chan->handle, flags); | ||
2235 | break; | ||
2236 | default: | ||
2237 | BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); | ||
2238 | return; | ||
2239 | } | ||
2167 | 2240 | ||
2168 | list = skb_shinfo(skb)->frag_list; | 2241 | list = skb_shinfo(skb)->frag_list; |
2169 | if (!list) { | 2242 | if (!list) { |
@@ -2202,14 +2275,13 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, | |||
2202 | 2275 | ||
2203 | void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) | 2276 | void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) |
2204 | { | 2277 | { |
2205 | struct hci_conn *conn = chan->conn; | 2278 | struct hci_dev *hdev = chan->conn->hdev; |
2206 | struct hci_dev *hdev = conn->hdev; | ||
2207 | 2279 | ||
2208 | BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); | 2280 | BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); |
2209 | 2281 | ||
2210 | skb->dev = (void *) hdev; | 2282 | skb->dev = (void *) hdev; |
2211 | 2283 | ||
2212 | hci_queue_acl(conn, &chan->data_q, skb, flags); | 2284 | hci_queue_acl(chan, &chan->data_q, skb, flags); |
2213 | 2285 | ||
2214 | queue_work(hdev->workqueue, &hdev->tx_work); | 2286 | queue_work(hdev->workqueue, &hdev->tx_work); |
2215 | } | 2287 | } |
@@ -2311,8 +2383,8 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) | |||
2311 | /* Kill stalled connections */ | 2383 | /* Kill stalled connections */ |
2312 | list_for_each_entry_rcu(c, &h->list, list) { | 2384 | list_for_each_entry_rcu(c, &h->list, list) { |
2313 | if (c->type == type && c->sent) { | 2385 | if (c->type == type && c->sent) { |
2314 | BT_ERR("%s killing stalled connection %s", | 2386 | BT_ERR("%s killing stalled connection %pMR", |
2315 | hdev->name, batostr(&c->dst)); | 2387 | hdev->name, &c->dst); |
2316 | hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM); | 2388 | hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM); |
2317 | } | 2389 | } |
2318 | } | 2390 | } |
@@ -2381,6 +2453,9 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, | |||
2381 | case ACL_LINK: | 2453 | case ACL_LINK: |
2382 | cnt = hdev->acl_cnt; | 2454 | cnt = hdev->acl_cnt; |
2383 | break; | 2455 | break; |
2456 | case AMP_LINK: | ||
2457 | cnt = hdev->block_cnt; | ||
2458 | break; | ||
2384 | case SCO_LINK: | 2459 | case SCO_LINK: |
2385 | case ESCO_LINK: | 2460 | case ESCO_LINK: |
2386 | cnt = hdev->sco_cnt; | 2461 | cnt = hdev->sco_cnt; |
@@ -2510,11 +2585,19 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) | |||
2510 | struct hci_chan *chan; | 2585 | struct hci_chan *chan; |
2511 | struct sk_buff *skb; | 2586 | struct sk_buff *skb; |
2512 | int quote; | 2587 | int quote; |
2588 | u8 type; | ||
2513 | 2589 | ||
2514 | __check_timeout(hdev, cnt); | 2590 | __check_timeout(hdev, cnt); |
2515 | 2591 | ||
2592 | BT_DBG("%s", hdev->name); | ||
2593 | |||
2594 | if (hdev->dev_type == HCI_AMP) | ||
2595 | type = AMP_LINK; | ||
2596 | else | ||
2597 | type = ACL_LINK; | ||
2598 | |||
2516 | while (hdev->block_cnt > 0 && | 2599 | while (hdev->block_cnt > 0 && |
2517 | (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { | 2600 | (chan = hci_chan_sent(hdev, type, "e))) { |
2518 | u32 priority = (skb_peek(&chan->data_q))->priority; | 2601 | u32 priority = (skb_peek(&chan->data_q))->priority; |
2519 | while (quote > 0 && (skb = skb_peek(&chan->data_q))) { | 2602 | while (quote > 0 && (skb = skb_peek(&chan->data_q))) { |
2520 | int blocks; | 2603 | int blocks; |
@@ -2547,14 +2630,19 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) | |||
2547 | } | 2630 | } |
2548 | 2631 | ||
2549 | if (cnt != hdev->block_cnt) | 2632 | if (cnt != hdev->block_cnt) |
2550 | hci_prio_recalculate(hdev, ACL_LINK); | 2633 | hci_prio_recalculate(hdev, type); |
2551 | } | 2634 | } |
2552 | 2635 | ||
2553 | static void hci_sched_acl(struct hci_dev *hdev) | 2636 | static void hci_sched_acl(struct hci_dev *hdev) |
2554 | { | 2637 | { |
2555 | BT_DBG("%s", hdev->name); | 2638 | BT_DBG("%s", hdev->name); |
2556 | 2639 | ||
2557 | if (!hci_conn_num(hdev, ACL_LINK)) | 2640 | /* No ACL link over BR/EDR controller */ |
2641 | if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR) | ||
2642 | return; | ||
2643 | |||
2644 | /* No AMP link over AMP controller */ | ||
2645 | if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) | ||
2558 | return; | 2646 | return; |
2559 | 2647 | ||
2560 | switch (hdev->flow_ctl_mode) { | 2648 | switch (hdev->flow_ctl_mode) { |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 2022b43c7353..9f5c5f244502 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -24,12 +24,13 @@ | |||
24 | 24 | ||
25 | /* Bluetooth HCI event handling. */ | 25 | /* Bluetooth HCI event handling. */ |
26 | 26 | ||
27 | #include <linux/export.h> | ||
28 | #include <asm/unaligned.h> | 27 | #include <asm/unaligned.h> |
29 | 28 | ||
30 | #include <net/bluetooth/bluetooth.h> | 29 | #include <net/bluetooth/bluetooth.h> |
31 | #include <net/bluetooth/hci_core.h> | 30 | #include <net/bluetooth/hci_core.h> |
32 | #include <net/bluetooth/mgmt.h> | 31 | #include <net/bluetooth/mgmt.h> |
32 | #include <net/bluetooth/a2mp.h> | ||
33 | #include <net/bluetooth/amp.h> | ||
33 | 34 | ||
34 | /* Handle HCI Event packets */ | 35 | /* Handle HCI Event packets */ |
35 | 36 | ||
@@ -201,6 +202,11 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) | |||
201 | BIT(HCI_PERIODIC_INQ)); | 202 | BIT(HCI_PERIODIC_INQ)); |
202 | 203 | ||
203 | hdev->discovery.state = DISCOVERY_STOPPED; | 204 | hdev->discovery.state = DISCOVERY_STOPPED; |
205 | hdev->inq_tx_power = HCI_TX_POWER_INVALID; | ||
206 | hdev->adv_tx_power = HCI_TX_POWER_INVALID; | ||
207 | |||
208 | memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); | ||
209 | hdev->adv_data_len = 0; | ||
204 | } | 210 | } |
205 | 211 | ||
206 | static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) | 212 | static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -223,6 +229,9 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) | |||
223 | 229 | ||
224 | hci_dev_unlock(hdev); | 230 | hci_dev_unlock(hdev); |
225 | 231 | ||
232 | if (!status && !test_bit(HCI_INIT, &hdev->flags)) | ||
233 | hci_update_ad(hdev); | ||
234 | |||
226 | hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status); | 235 | hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status); |
227 | } | 236 | } |
228 | 237 | ||
@@ -438,7 +447,7 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) | |||
438 | static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) | 447 | static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) |
439 | { | 448 | { |
440 | __u8 status = *((__u8 *) skb->data); | 449 | __u8 status = *((__u8 *) skb->data); |
441 | void *sent; | 450 | struct hci_cp_write_ssp_mode *sent; |
442 | 451 | ||
443 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | 452 | BT_DBG("%s status 0x%2.2x", hdev->name, status); |
444 | 453 | ||
@@ -446,10 +455,17 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) | |||
446 | if (!sent) | 455 | if (!sent) |
447 | return; | 456 | return; |
448 | 457 | ||
458 | if (!status) { | ||
459 | if (sent->mode) | ||
460 | hdev->host_features[0] |= LMP_HOST_SSP; | ||
461 | else | ||
462 | hdev->host_features[0] &= ~LMP_HOST_SSP; | ||
463 | } | ||
464 | |||
449 | if (test_bit(HCI_MGMT, &hdev->dev_flags)) | 465 | if (test_bit(HCI_MGMT, &hdev->dev_flags)) |
450 | mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status); | 466 | mgmt_ssp_enable_complete(hdev, sent->mode, status); |
451 | else if (!status) { | 467 | else if (!status) { |
452 | if (*((u8 *) sent)) | 468 | if (sent->mode) |
453 | set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); | 469 | set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); |
454 | else | 470 | else |
455 | clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); | 471 | clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); |
@@ -458,10 +474,10 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) | |||
458 | 474 | ||
459 | static u8 hci_get_inquiry_mode(struct hci_dev *hdev) | 475 | static u8 hci_get_inquiry_mode(struct hci_dev *hdev) |
460 | { | 476 | { |
461 | if (hdev->features[6] & LMP_EXT_INQ) | 477 | if (lmp_ext_inq_capable(hdev)) |
462 | return 2; | 478 | return 2; |
463 | 479 | ||
464 | if (hdev->features[3] & LMP_RSSI_INQ) | 480 | if (lmp_inq_rssi_capable(hdev)) |
465 | return 1; | 481 | return 1; |
466 | 482 | ||
467 | if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && | 483 | if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && |
@@ -505,28 +521,30 @@ static void hci_setup_event_mask(struct hci_dev *hdev) | |||
505 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) | 521 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) |
506 | return; | 522 | return; |
507 | 523 | ||
508 | events[4] |= 0x01; /* Flow Specification Complete */ | 524 | if (lmp_bredr_capable(hdev)) { |
509 | events[4] |= 0x02; /* Inquiry Result with RSSI */ | 525 | events[4] |= 0x01; /* Flow Specification Complete */ |
510 | events[4] |= 0x04; /* Read Remote Extended Features Complete */ | 526 | events[4] |= 0x02; /* Inquiry Result with RSSI */ |
511 | events[5] |= 0x08; /* Synchronous Connection Complete */ | 527 | events[4] |= 0x04; /* Read Remote Extended Features Complete */ |
512 | events[5] |= 0x10; /* Synchronous Connection Changed */ | 528 | events[5] |= 0x08; /* Synchronous Connection Complete */ |
529 | events[5] |= 0x10; /* Synchronous Connection Changed */ | ||
530 | } | ||
513 | 531 | ||
514 | if (hdev->features[3] & LMP_RSSI_INQ) | 532 | if (lmp_inq_rssi_capable(hdev)) |
515 | events[4] |= 0x02; /* Inquiry Result with RSSI */ | 533 | events[4] |= 0x02; /* Inquiry Result with RSSI */ |
516 | 534 | ||
517 | if (lmp_sniffsubr_capable(hdev)) | 535 | if (lmp_sniffsubr_capable(hdev)) |
518 | events[5] |= 0x20; /* Sniff Subrating */ | 536 | events[5] |= 0x20; /* Sniff Subrating */ |
519 | 537 | ||
520 | if (hdev->features[5] & LMP_PAUSE_ENC) | 538 | if (lmp_pause_enc_capable(hdev)) |
521 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ | 539 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ |
522 | 540 | ||
523 | if (hdev->features[6] & LMP_EXT_INQ) | 541 | if (lmp_ext_inq_capable(hdev)) |
524 | events[5] |= 0x40; /* Extended Inquiry Result */ | 542 | events[5] |= 0x40; /* Extended Inquiry Result */ |
525 | 543 | ||
526 | if (lmp_no_flush_capable(hdev)) | 544 | if (lmp_no_flush_capable(hdev)) |
527 | events[7] |= 0x01; /* Enhanced Flush Complete */ | 545 | events[7] |= 0x01; /* Enhanced Flush Complete */ |
528 | 546 | ||
529 | if (hdev->features[7] & LMP_LSTO) | 547 | if (lmp_lsto_capable(hdev)) |
530 | events[6] |= 0x80; /* Link Supervision Timeout Changed */ | 548 | events[6] |= 0x80; /* Link Supervision Timeout Changed */ |
531 | 549 | ||
532 | if (lmp_ssp_capable(hdev)) { | 550 | if (lmp_ssp_capable(hdev)) { |
@@ -546,6 +564,53 @@ static void hci_setup_event_mask(struct hci_dev *hdev) | |||
546 | events[7] |= 0x20; /* LE Meta-Event */ | 564 | events[7] |= 0x20; /* LE Meta-Event */ |
547 | 565 | ||
548 | hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); | 566 | hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); |
567 | |||
568 | if (lmp_le_capable(hdev)) { | ||
569 | memset(events, 0, sizeof(events)); | ||
570 | events[0] = 0x1f; | ||
571 | hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK, | ||
572 | sizeof(events), events); | ||
573 | } | ||
574 | } | ||
575 | |||
576 | static void bredr_setup(struct hci_dev *hdev) | ||
577 | { | ||
578 | struct hci_cp_delete_stored_link_key cp; | ||
579 | __le16 param; | ||
580 | __u8 flt_type; | ||
581 | |||
582 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ | ||
583 | hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); | ||
584 | |||
585 | /* Read Class of Device */ | ||
586 | hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); | ||
587 | |||
588 | /* Read Local Name */ | ||
589 | hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); | ||
590 | |||
591 | /* Read Voice Setting */ | ||
592 | hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); | ||
593 | |||
594 | /* Clear Event Filters */ | ||
595 | flt_type = HCI_FLT_CLEAR_ALL; | ||
596 | hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); | ||
597 | |||
598 | /* Connection accept timeout ~20 secs */ | ||
599 | param = __constant_cpu_to_le16(0x7d00); | ||
600 | hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); | ||
601 | |||
602 | bacpy(&cp.bdaddr, BDADDR_ANY); | ||
603 | cp.delete_all = 1; | ||
604 | hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp); | ||
605 | } | ||
606 | |||
607 | static void le_setup(struct hci_dev *hdev) | ||
608 | { | ||
609 | /* Read LE Buffer Size */ | ||
610 | hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); | ||
611 | |||
612 | /* Read LE Advertising Channel TX Power */ | ||
613 | hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); | ||
549 | } | 614 | } |
550 | 615 | ||
551 | static void hci_setup(struct hci_dev *hdev) | 616 | static void hci_setup(struct hci_dev *hdev) |
@@ -553,6 +618,15 @@ static void hci_setup(struct hci_dev *hdev) | |||
553 | if (hdev->dev_type != HCI_BREDR) | 618 | if (hdev->dev_type != HCI_BREDR) |
554 | return; | 619 | return; |
555 | 620 | ||
621 | /* Read BD Address */ | ||
622 | hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); | ||
623 | |||
624 | if (lmp_bredr_capable(hdev)) | ||
625 | bredr_setup(hdev); | ||
626 | |||
627 | if (lmp_le_capable(hdev)) | ||
628 | le_setup(hdev); | ||
629 | |||
556 | hci_setup_event_mask(hdev); | 630 | hci_setup_event_mask(hdev); |
557 | 631 | ||
558 | if (hdev->hci_ver > BLUETOOTH_VER_1_1) | 632 | if (hdev->hci_ver > BLUETOOTH_VER_1_1) |
@@ -573,13 +647,13 @@ static void hci_setup(struct hci_dev *hdev) | |||
573 | } | 647 | } |
574 | } | 648 | } |
575 | 649 | ||
576 | if (hdev->features[3] & LMP_RSSI_INQ) | 650 | if (lmp_inq_rssi_capable(hdev)) |
577 | hci_setup_inquiry_mode(hdev); | 651 | hci_setup_inquiry_mode(hdev); |
578 | 652 | ||
579 | if (hdev->features[7] & LMP_INQ_TX_PWR) | 653 | if (lmp_inq_tx_pwr_capable(hdev)) |
580 | hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); | 654 | hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); |
581 | 655 | ||
582 | if (hdev->features[7] & LMP_EXTFEATURES) { | 656 | if (lmp_ext_feat_capable(hdev)) { |
583 | struct hci_cp_read_local_ext_features cp; | 657 | struct hci_cp_read_local_ext_features cp; |
584 | 658 | ||
585 | cp.page = 0x01; | 659 | cp.page = 0x01; |
@@ -626,11 +700,11 @@ static void hci_setup_link_policy(struct hci_dev *hdev) | |||
626 | 700 | ||
627 | if (lmp_rswitch_capable(hdev)) | 701 | if (lmp_rswitch_capable(hdev)) |
628 | link_policy |= HCI_LP_RSWITCH; | 702 | link_policy |= HCI_LP_RSWITCH; |
629 | if (hdev->features[0] & LMP_HOLD) | 703 | if (lmp_hold_capable(hdev)) |
630 | link_policy |= HCI_LP_HOLD; | 704 | link_policy |= HCI_LP_HOLD; |
631 | if (lmp_sniff_capable(hdev)) | 705 | if (lmp_sniff_capable(hdev)) |
632 | link_policy |= HCI_LP_SNIFF; | 706 | link_policy |= HCI_LP_SNIFF; |
633 | if (hdev->features[1] & LMP_PARK) | 707 | if (lmp_park_capable(hdev)) |
634 | link_policy |= HCI_LP_PARK; | 708 | link_policy |= HCI_LP_PARK; |
635 | 709 | ||
636 | cp.policy = cpu_to_le16(link_policy); | 710 | cp.policy = cpu_to_le16(link_policy); |
@@ -720,10 +794,10 @@ static void hci_set_le_support(struct hci_dev *hdev) | |||
720 | 794 | ||
721 | if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { | 795 | if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { |
722 | cp.le = 1; | 796 | cp.le = 1; |
723 | cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); | 797 | cp.simul = !!lmp_le_br_capable(hdev); |
724 | } | 798 | } |
725 | 799 | ||
726 | if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE)) | 800 | if (cp.le != !!lmp_host_le_capable(hdev)) |
727 | hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), | 801 | hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), |
728 | &cp); | 802 | &cp); |
729 | } | 803 | } |
@@ -846,7 +920,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev, | |||
846 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | 920 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); |
847 | 921 | ||
848 | if (rp->status) | 922 | if (rp->status) |
849 | return; | 923 | goto a2mp_rsp; |
850 | 924 | ||
851 | hdev->amp_status = rp->amp_status; | 925 | hdev->amp_status = rp->amp_status; |
852 | hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); | 926 | hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); |
@@ -860,6 +934,46 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev, | |||
860 | hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); | 934 | hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); |
861 | 935 | ||
862 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status); | 936 | hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status); |
937 | |||
938 | a2mp_rsp: | ||
939 | a2mp_send_getinfo_rsp(hdev); | ||
940 | } | ||
941 | |||
942 | static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev, | ||
943 | struct sk_buff *skb) | ||
944 | { | ||
945 | struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data; | ||
946 | struct amp_assoc *assoc = &hdev->loc_assoc; | ||
947 | size_t rem_len, frag_len; | ||
948 | |||
949 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | ||
950 | |||
951 | if (rp->status) | ||
952 | goto a2mp_rsp; | ||
953 | |||
954 | frag_len = skb->len - sizeof(*rp); | ||
955 | rem_len = __le16_to_cpu(rp->rem_len); | ||
956 | |||
957 | if (rem_len > frag_len) { | ||
958 | BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len); | ||
959 | |||
960 | memcpy(assoc->data + assoc->offset, rp->frag, frag_len); | ||
961 | assoc->offset += frag_len; | ||
962 | |||
963 | /* Read other fragments */ | ||
964 | amp_read_loc_assoc_frag(hdev, rp->phy_handle); | ||
965 | |||
966 | return; | ||
967 | } | ||
968 | |||
969 | memcpy(assoc->data + assoc->offset, rp->frag, rem_len); | ||
970 | assoc->len = assoc->offset + rem_len; | ||
971 | assoc->offset = 0; | ||
972 | |||
973 | a2mp_rsp: | ||
974 | /* Send A2MP Rsp when all fragments are received */ | ||
975 | a2mp_send_getampassoc_rsp(hdev, rp->status); | ||
976 | a2mp_send_create_phy_link_req(hdev, rp->status); | ||
863 | } | 977 | } |
864 | 978 | ||
865 | static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, | 979 | static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, |
@@ -976,6 +1090,31 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, | |||
976 | hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); | 1090 | hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); |
977 | } | 1091 | } |
978 | 1092 | ||
1093 | static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, | ||
1094 | struct sk_buff *skb) | ||
1095 | { | ||
1096 | struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; | ||
1097 | |||
1098 | BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); | ||
1099 | |||
1100 | if (!rp->status) { | ||
1101 | hdev->adv_tx_power = rp->tx_power; | ||
1102 | if (!test_bit(HCI_INIT, &hdev->flags)) | ||
1103 | hci_update_ad(hdev); | ||
1104 | } | ||
1105 | |||
1106 | hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status); | ||
1107 | } | ||
1108 | |||
1109 | static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) | ||
1110 | { | ||
1111 | __u8 status = *((__u8 *) skb->data); | ||
1112 | |||
1113 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1114 | |||
1115 | hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status); | ||
1116 | } | ||
1117 | |||
979 | static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) | 1118 | static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) |
980 | { | 1119 | { |
981 | struct hci_rp_user_confirm_reply *rp = (void *) skb->data; | 1120 | struct hci_rp_user_confirm_reply *rp = (void *) skb->data; |
@@ -1051,6 +1190,33 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, | |||
1051 | hci_dev_unlock(hdev); | 1190 | hci_dev_unlock(hdev); |
1052 | } | 1191 | } |
1053 | 1192 | ||
1193 | static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) | ||
1194 | { | ||
1195 | __u8 *sent, status = *((__u8 *) skb->data); | ||
1196 | |||
1197 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1198 | |||
1199 | sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); | ||
1200 | if (!sent) | ||
1201 | return; | ||
1202 | |||
1203 | hci_dev_lock(hdev); | ||
1204 | |||
1205 | if (!status) { | ||
1206 | if (*sent) | ||
1207 | set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); | ||
1208 | else | ||
1209 | clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); | ||
1210 | } | ||
1211 | |||
1212 | hci_dev_unlock(hdev); | ||
1213 | |||
1214 | if (!test_bit(HCI_INIT, &hdev->flags)) | ||
1215 | hci_update_ad(hdev); | ||
1216 | |||
1217 | hci_req_complete(hdev, HCI_OP_LE_SET_ADV_ENABLE, status); | ||
1218 | } | ||
1219 | |||
1054 | static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) | 1220 | static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) |
1055 | { | 1221 | { |
1056 | __u8 status = *((__u8 *) skb->data); | 1222 | __u8 status = *((__u8 *) skb->data); |
@@ -1165,6 +1331,11 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev, | |||
1165 | hdev->host_features[0] |= LMP_HOST_LE; | 1331 | hdev->host_features[0] |= LMP_HOST_LE; |
1166 | else | 1332 | else |
1167 | hdev->host_features[0] &= ~LMP_HOST_LE; | 1333 | hdev->host_features[0] &= ~LMP_HOST_LE; |
1334 | |||
1335 | if (sent->simul) | ||
1336 | hdev->host_features[0] |= LMP_HOST_LE_BREDR; | ||
1337 | else | ||
1338 | hdev->host_features[0] &= ~LMP_HOST_LE_BREDR; | ||
1168 | } | 1339 | } |
1169 | 1340 | ||
1170 | if (test_bit(HCI_MGMT, &hdev->dev_flags) && | 1341 | if (test_bit(HCI_MGMT, &hdev->dev_flags) && |
@@ -1174,6 +1345,20 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev, | |||
1174 | hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); | 1345 | hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); |
1175 | } | 1346 | } |
1176 | 1347 | ||
1348 | static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, | ||
1349 | struct sk_buff *skb) | ||
1350 | { | ||
1351 | struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data; | ||
1352 | |||
1353 | BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x", | ||
1354 | hdev->name, rp->status, rp->phy_handle); | ||
1355 | |||
1356 | if (rp->status) | ||
1357 | return; | ||
1358 | |||
1359 | amp_write_rem_assoc_continue(hdev, rp->phy_handle); | ||
1360 | } | ||
1361 | |||
1177 | static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) | 1362 | static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) |
1178 | { | 1363 | { |
1179 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | 1364 | BT_DBG("%s status 0x%2.2x", hdev->name, status); |
@@ -1210,7 +1395,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) | |||
1210 | 1395 | ||
1211 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); | 1396 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); |
1212 | 1397 | ||
1213 | BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn); | 1398 | BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); |
1214 | 1399 | ||
1215 | if (status) { | 1400 | if (status) { |
1216 | if (conn && conn->state == BT_CONNECT) { | 1401 | if (conn && conn->state == BT_CONNECT) { |
@@ -1639,8 +1824,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) | |||
1639 | return; | 1824 | return; |
1640 | } | 1825 | } |
1641 | 1826 | ||
1642 | BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst), | 1827 | BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn); |
1643 | conn); | ||
1644 | 1828 | ||
1645 | conn->state = BT_CLOSED; | 1829 | conn->state = BT_CLOSED; |
1646 | mgmt_connect_failed(hdev, &conn->dst, conn->type, | 1830 | mgmt_connect_failed(hdev, &conn->dst, conn->type, |
@@ -1657,6 +1841,52 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) | |||
1657 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | 1841 | BT_DBG("%s status 0x%2.2x", hdev->name, status); |
1658 | } | 1842 | } |
1659 | 1843 | ||
1844 | static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status) | ||
1845 | { | ||
1846 | struct hci_cp_create_phy_link *cp; | ||
1847 | |||
1848 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1849 | |||
1850 | cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK); | ||
1851 | if (!cp) | ||
1852 | return; | ||
1853 | |||
1854 | hci_dev_lock(hdev); | ||
1855 | |||
1856 | if (status) { | ||
1857 | struct hci_conn *hcon; | ||
1858 | |||
1859 | hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle); | ||
1860 | if (hcon) | ||
1861 | hci_conn_del(hcon); | ||
1862 | } else { | ||
1863 | amp_write_remote_assoc(hdev, cp->phy_handle); | ||
1864 | } | ||
1865 | |||
1866 | hci_dev_unlock(hdev); | ||
1867 | } | ||
1868 | |||
1869 | static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status) | ||
1870 | { | ||
1871 | struct hci_cp_accept_phy_link *cp; | ||
1872 | |||
1873 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1874 | |||
1875 | if (status) | ||
1876 | return; | ||
1877 | |||
1878 | cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK); | ||
1879 | if (!cp) | ||
1880 | return; | ||
1881 | |||
1882 | amp_write_remote_assoc(hdev, cp->phy_handle); | ||
1883 | } | ||
1884 | |||
1885 | static void hci_cs_create_logical_link(struct hci_dev *hdev, u8 status) | ||
1886 | { | ||
1887 | BT_DBG("%s status 0x%2.2x", hdev->name, status); | ||
1888 | } | ||
1889 | |||
1660 | static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1890 | static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1661 | { | 1891 | { |
1662 | __u8 status = *((__u8 *) skb->data); | 1892 | __u8 status = *((__u8 *) skb->data); |
@@ -1822,7 +2052,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
1822 | struct hci_ev_conn_request *ev = (void *) skb->data; | 2052 | struct hci_ev_conn_request *ev = (void *) skb->data; |
1823 | int mask = hdev->link_mode; | 2053 | int mask = hdev->link_mode; |
1824 | 2054 | ||
1825 | BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr), | 2055 | BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, |
1826 | ev->link_type); | 2056 | ev->link_type); |
1827 | 2057 | ||
1828 | mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); | 2058 | mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); |
@@ -2314,6 +2544,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2314 | hci_cc_read_local_amp_info(hdev, skb); | 2544 | hci_cc_read_local_amp_info(hdev, skb); |
2315 | break; | 2545 | break; |
2316 | 2546 | ||
2547 | case HCI_OP_READ_LOCAL_AMP_ASSOC: | ||
2548 | hci_cc_read_local_amp_assoc(hdev, skb); | ||
2549 | break; | ||
2550 | |||
2317 | case HCI_OP_DELETE_STORED_LINK_KEY: | 2551 | case HCI_OP_DELETE_STORED_LINK_KEY: |
2318 | hci_cc_delete_stored_link_key(hdev, skb); | 2552 | hci_cc_delete_stored_link_key(hdev, skb); |
2319 | break; | 2553 | break; |
@@ -2350,6 +2584,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2350 | hci_cc_le_read_buffer_size(hdev, skb); | 2584 | hci_cc_le_read_buffer_size(hdev, skb); |
2351 | break; | 2585 | break; |
2352 | 2586 | ||
2587 | case HCI_OP_LE_READ_ADV_TX_POWER: | ||
2588 | hci_cc_le_read_adv_tx_power(hdev, skb); | ||
2589 | break; | ||
2590 | |||
2591 | case HCI_OP_LE_SET_EVENT_MASK: | ||
2592 | hci_cc_le_set_event_mask(hdev, skb); | ||
2593 | break; | ||
2594 | |||
2353 | case HCI_OP_USER_CONFIRM_REPLY: | 2595 | case HCI_OP_USER_CONFIRM_REPLY: |
2354 | hci_cc_user_confirm_reply(hdev, skb); | 2596 | hci_cc_user_confirm_reply(hdev, skb); |
2355 | break; | 2597 | break; |
@@ -2370,6 +2612,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2370 | hci_cc_le_set_scan_param(hdev, skb); | 2612 | hci_cc_le_set_scan_param(hdev, skb); |
2371 | break; | 2613 | break; |
2372 | 2614 | ||
2615 | case HCI_OP_LE_SET_ADV_ENABLE: | ||
2616 | hci_cc_le_set_adv_enable(hdev, skb); | ||
2617 | break; | ||
2618 | |||
2373 | case HCI_OP_LE_SET_SCAN_ENABLE: | 2619 | case HCI_OP_LE_SET_SCAN_ENABLE: |
2374 | hci_cc_le_set_scan_enable(hdev, skb); | 2620 | hci_cc_le_set_scan_enable(hdev, skb); |
2375 | break; | 2621 | break; |
@@ -2386,6 +2632,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2386 | hci_cc_write_le_host_supported(hdev, skb); | 2632 | hci_cc_write_le_host_supported(hdev, skb); |
2387 | break; | 2633 | break; |
2388 | 2634 | ||
2635 | case HCI_OP_WRITE_REMOTE_AMP_ASSOC: | ||
2636 | hci_cc_write_remote_amp_assoc(hdev, skb); | ||
2637 | break; | ||
2638 | |||
2389 | default: | 2639 | default: |
2390 | BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); | 2640 | BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); |
2391 | break; | 2641 | break; |
@@ -2467,6 +2717,18 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2467 | hci_cs_le_start_enc(hdev, ev->status); | 2717 | hci_cs_le_start_enc(hdev, ev->status); |
2468 | break; | 2718 | break; |
2469 | 2719 | ||
2720 | case HCI_OP_CREATE_PHY_LINK: | ||
2721 | hci_cs_create_phylink(hdev, ev->status); | ||
2722 | break; | ||
2723 | |||
2724 | case HCI_OP_ACCEPT_PHY_LINK: | ||
2725 | hci_cs_accept_phylink(hdev, ev->status); | ||
2726 | break; | ||
2727 | |||
2728 | case HCI_OP_CREATE_LOGICAL_LINK: | ||
2729 | hci_cs_create_logical_link(hdev, ev->status); | ||
2730 | break; | ||
2731 | |||
2470 | default: | 2732 | default: |
2471 | BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); | 2733 | BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); |
2472 | break; | 2734 | break; |
@@ -2574,6 +2836,27 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2574 | queue_work(hdev->workqueue, &hdev->tx_work); | 2836 | queue_work(hdev->workqueue, &hdev->tx_work); |
2575 | } | 2837 | } |
2576 | 2838 | ||
2839 | static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, | ||
2840 | __u16 handle) | ||
2841 | { | ||
2842 | struct hci_chan *chan; | ||
2843 | |||
2844 | switch (hdev->dev_type) { | ||
2845 | case HCI_BREDR: | ||
2846 | return hci_conn_hash_lookup_handle(hdev, handle); | ||
2847 | case HCI_AMP: | ||
2848 | chan = hci_chan_lookup_handle(hdev, handle); | ||
2849 | if (chan) | ||
2850 | return chan->conn; | ||
2851 | break; | ||
2852 | default: | ||
2853 | BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); | ||
2854 | break; | ||
2855 | } | ||
2856 | |||
2857 | return NULL; | ||
2858 | } | ||
2859 | |||
2577 | static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) | 2860 | static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) |
2578 | { | 2861 | { |
2579 | struct hci_ev_num_comp_blocks *ev = (void *) skb->data; | 2862 | struct hci_ev_num_comp_blocks *ev = (void *) skb->data; |
@@ -2595,13 +2878,13 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2595 | 2878 | ||
2596 | for (i = 0; i < ev->num_hndl; i++) { | 2879 | for (i = 0; i < ev->num_hndl; i++) { |
2597 | struct hci_comp_blocks_info *info = &ev->handles[i]; | 2880 | struct hci_comp_blocks_info *info = &ev->handles[i]; |
2598 | struct hci_conn *conn; | 2881 | struct hci_conn *conn = NULL; |
2599 | __u16 handle, block_count; | 2882 | __u16 handle, block_count; |
2600 | 2883 | ||
2601 | handle = __le16_to_cpu(info->handle); | 2884 | handle = __le16_to_cpu(info->handle); |
2602 | block_count = __le16_to_cpu(info->blocks); | 2885 | block_count = __le16_to_cpu(info->blocks); |
2603 | 2886 | ||
2604 | conn = hci_conn_hash_lookup_handle(hdev, handle); | 2887 | conn = __hci_conn_lookup_handle(hdev, handle); |
2605 | if (!conn) | 2888 | if (!conn) |
2606 | continue; | 2889 | continue; |
2607 | 2890 | ||
@@ -2609,6 +2892,7 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2609 | 2892 | ||
2610 | switch (conn->type) { | 2893 | switch (conn->type) { |
2611 | case ACL_LINK: | 2894 | case ACL_LINK: |
2895 | case AMP_LINK: | ||
2612 | hdev->block_cnt += block_count; | 2896 | hdev->block_cnt += block_count; |
2613 | if (hdev->block_cnt > hdev->num_blocks) | 2897 | if (hdev->block_cnt > hdev->num_blocks) |
2614 | hdev->block_cnt = hdev->num_blocks; | 2898 | hdev->block_cnt = hdev->num_blocks; |
@@ -2705,13 +2989,13 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
2705 | 2989 | ||
2706 | key = hci_find_link_key(hdev, &ev->bdaddr); | 2990 | key = hci_find_link_key(hdev, &ev->bdaddr); |
2707 | if (!key) { | 2991 | if (!key) { |
2708 | BT_DBG("%s link key not found for %s", hdev->name, | 2992 | BT_DBG("%s link key not found for %pMR", hdev->name, |
2709 | batostr(&ev->bdaddr)); | 2993 | &ev->bdaddr); |
2710 | goto not_found; | 2994 | goto not_found; |
2711 | } | 2995 | } |
2712 | 2996 | ||
2713 | BT_DBG("%s found key type %u for %s", hdev->name, key->type, | 2997 | BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, |
2714 | batostr(&ev->bdaddr)); | 2998 | &ev->bdaddr); |
2715 | 2999 | ||
2716 | if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && | 3000 | if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && |
2717 | key->type == HCI_LK_DEBUG_COMBINATION) { | 3001 | key->type == HCI_LK_DEBUG_COMBINATION) { |
@@ -3419,6 +3703,130 @@ unlock: | |||
3419 | hci_dev_unlock(hdev); | 3703 | hci_dev_unlock(hdev); |
3420 | } | 3704 | } |
3421 | 3705 | ||
3706 | static void hci_phy_link_complete_evt(struct hci_dev *hdev, | ||
3707 | struct sk_buff *skb) | ||
3708 | { | ||
3709 | struct hci_ev_phy_link_complete *ev = (void *) skb->data; | ||
3710 | struct hci_conn *hcon, *bredr_hcon; | ||
3711 | |||
3712 | BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, | ||
3713 | ev->status); | ||
3714 | |||
3715 | hci_dev_lock(hdev); | ||
3716 | |||
3717 | hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); | ||
3718 | if (!hcon) { | ||
3719 | hci_dev_unlock(hdev); | ||
3720 | return; | ||
3721 | } | ||
3722 | |||
3723 | if (ev->status) { | ||
3724 | hci_conn_del(hcon); | ||
3725 | hci_dev_unlock(hdev); | ||
3726 | return; | ||
3727 | } | ||
3728 | |||
3729 | bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; | ||
3730 | |||
3731 | hcon->state = BT_CONNECTED; | ||
3732 | bacpy(&hcon->dst, &bredr_hcon->dst); | ||
3733 | |||
3734 | hci_conn_hold(hcon); | ||
3735 | hcon->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
3736 | hci_conn_put(hcon); | ||
3737 | |||
3738 | hci_conn_hold_device(hcon); | ||
3739 | hci_conn_add_sysfs(hcon); | ||
3740 | |||
3741 | amp_physical_cfm(bredr_hcon, hcon); | ||
3742 | |||
3743 | hci_dev_unlock(hdev); | ||
3744 | } | ||
3745 | |||
3746 | static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
3747 | { | ||
3748 | struct hci_ev_logical_link_complete *ev = (void *) skb->data; | ||
3749 | struct hci_conn *hcon; | ||
3750 | struct hci_chan *hchan; | ||
3751 | struct amp_mgr *mgr; | ||
3752 | |||
3753 | BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", | ||
3754 | hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, | ||
3755 | ev->status); | ||
3756 | |||
3757 | hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); | ||
3758 | if (!hcon) | ||
3759 | return; | ||
3760 | |||
3761 | /* Create AMP hchan */ | ||
3762 | hchan = hci_chan_create(hcon); | ||
3763 | if (!hchan) | ||
3764 | return; | ||
3765 | |||
3766 | hchan->handle = le16_to_cpu(ev->handle); | ||
3767 | |||
3768 | BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); | ||
3769 | |||
3770 | mgr = hcon->amp_mgr; | ||
3771 | if (mgr && mgr->bredr_chan) { | ||
3772 | struct l2cap_chan *bredr_chan = mgr->bredr_chan; | ||
3773 | |||
3774 | l2cap_chan_lock(bredr_chan); | ||
3775 | |||
3776 | bredr_chan->conn->mtu = hdev->block_mtu; | ||
3777 | l2cap_logical_cfm(bredr_chan, hchan, 0); | ||
3778 | hci_conn_hold(hcon); | ||
3779 | |||
3780 | l2cap_chan_unlock(bredr_chan); | ||
3781 | } | ||
3782 | } | ||
3783 | |||
3784 | static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, | ||
3785 | struct sk_buff *skb) | ||
3786 | { | ||
3787 | struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; | ||
3788 | struct hci_chan *hchan; | ||
3789 | |||
3790 | BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, | ||
3791 | le16_to_cpu(ev->handle), ev->status); | ||
3792 | |||
3793 | if (ev->status) | ||
3794 | return; | ||
3795 | |||
3796 | hci_dev_lock(hdev); | ||
3797 | |||
3798 | hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); | ||
3799 | if (!hchan) | ||
3800 | goto unlock; | ||
3801 | |||
3802 | amp_destroy_logical_link(hchan, ev->reason); | ||
3803 | |||
3804 | unlock: | ||
3805 | hci_dev_unlock(hdev); | ||
3806 | } | ||
3807 | |||
3808 | static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, | ||
3809 | struct sk_buff *skb) | ||
3810 | { | ||
3811 | struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; | ||
3812 | struct hci_conn *hcon; | ||
3813 | |||
3814 | BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); | ||
3815 | |||
3816 | if (ev->status) | ||
3817 | return; | ||
3818 | |||
3819 | hci_dev_lock(hdev); | ||
3820 | |||
3821 | hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); | ||
3822 | if (hcon) { | ||
3823 | hcon->state = BT_CLOSED; | ||
3824 | hci_conn_del(hcon); | ||
3825 | } | ||
3826 | |||
3827 | hci_dev_unlock(hdev); | ||
3828 | } | ||
3829 | |||
3422 | static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | 3830 | static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) |
3423 | { | 3831 | { |
3424 | struct hci_ev_le_conn_complete *ev = (void *) skb->data; | 3832 | struct hci_ev_le_conn_complete *ev = (void *) skb->data; |
@@ -3558,6 +3966,22 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
3558 | } | 3966 | } |
3559 | } | 3967 | } |
3560 | 3968 | ||
3969 | static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) | ||
3970 | { | ||
3971 | struct hci_ev_channel_selected *ev = (void *) skb->data; | ||
3972 | struct hci_conn *hcon; | ||
3973 | |||
3974 | BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); | ||
3975 | |||
3976 | skb_pull(skb, sizeof(*ev)); | ||
3977 | |||
3978 | hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); | ||
3979 | if (!hcon) | ||
3980 | return; | ||
3981 | |||
3982 | amp_read_loc_assoc_final_data(hdev, hcon); | ||
3983 | } | ||
3984 | |||
3561 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | 3985 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) |
3562 | { | 3986 | { |
3563 | struct hci_event_hdr *hdr = (void *) skb->data; | 3987 | struct hci_event_hdr *hdr = (void *) skb->data; |
@@ -3722,10 +4146,30 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) | |||
3722 | hci_le_meta_evt(hdev, skb); | 4146 | hci_le_meta_evt(hdev, skb); |
3723 | break; | 4147 | break; |
3724 | 4148 | ||
4149 | case HCI_EV_CHANNEL_SELECTED: | ||
4150 | hci_chan_selected_evt(hdev, skb); | ||
4151 | break; | ||
4152 | |||
3725 | case HCI_EV_REMOTE_OOB_DATA_REQUEST: | 4153 | case HCI_EV_REMOTE_OOB_DATA_REQUEST: |
3726 | hci_remote_oob_data_request_evt(hdev, skb); | 4154 | hci_remote_oob_data_request_evt(hdev, skb); |
3727 | break; | 4155 | break; |
3728 | 4156 | ||
4157 | case HCI_EV_PHY_LINK_COMPLETE: | ||
4158 | hci_phy_link_complete_evt(hdev, skb); | ||
4159 | break; | ||
4160 | |||
4161 | case HCI_EV_LOGICAL_LINK_COMPLETE: | ||
4162 | hci_loglink_complete_evt(hdev, skb); | ||
4163 | break; | ||
4164 | |||
4165 | case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: | ||
4166 | hci_disconn_loglink_complete_evt(hdev, skb); | ||
4167 | break; | ||
4168 | |||
4169 | case HCI_EV_DISCONN_PHY_LINK_COMPLETE: | ||
4170 | hci_disconn_phylink_complete_evt(hdev, skb); | ||
4171 | break; | ||
4172 | |||
3729 | case HCI_EV_NUM_COMP_BLOCKS: | 4173 | case HCI_EV_NUM_COMP_BLOCKS: |
3730 | hci_num_comp_blocks_evt(hdev, skb); | 4174 | hci_num_comp_blocks_evt(hdev, skb); |
3731 | break; | 4175 | break; |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index a20e61c3653d..55cceee02a84 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -38,7 +38,7 @@ static ssize_t show_link_address(struct device *dev, | |||
38 | struct device_attribute *attr, char *buf) | 38 | struct device_attribute *attr, char *buf) |
39 | { | 39 | { |
40 | struct hci_conn *conn = to_hci_conn(dev); | 40 | struct hci_conn *conn = to_hci_conn(dev); |
41 | return sprintf(buf, "%s\n", batostr(&conn->dst)); | 41 | return sprintf(buf, "%pMR\n", &conn->dst); |
42 | } | 42 | } |
43 | 43 | ||
44 | static ssize_t show_link_features(struct device *dev, | 44 | static ssize_t show_link_features(struct device *dev, |
@@ -224,7 +224,7 @@ static ssize_t show_address(struct device *dev, | |||
224 | struct device_attribute *attr, char *buf) | 224 | struct device_attribute *attr, char *buf) |
225 | { | 225 | { |
226 | struct hci_dev *hdev = to_hci_dev(dev); | 226 | struct hci_dev *hdev = to_hci_dev(dev); |
227 | return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); | 227 | return sprintf(buf, "%pMR\n", &hdev->bdaddr); |
228 | } | 228 | } |
229 | 229 | ||
230 | static ssize_t show_features(struct device *dev, | 230 | static ssize_t show_features(struct device *dev, |
@@ -406,8 +406,8 @@ static int inquiry_cache_show(struct seq_file *f, void *p) | |||
406 | 406 | ||
407 | list_for_each_entry(e, &cache->all, all) { | 407 | list_for_each_entry(e, &cache->all, all) { |
408 | struct inquiry_data *data = &e->data; | 408 | struct inquiry_data *data = &e->data; |
409 | seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", | 409 | seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", |
410 | batostr(&data->bdaddr), | 410 | &data->bdaddr, |
411 | data->pscan_rep_mode, data->pscan_period_mode, | 411 | data->pscan_rep_mode, data->pscan_period_mode, |
412 | data->pscan_mode, data->dev_class[2], | 412 | data->pscan_mode, data->dev_class[2], |
413 | data->dev_class[1], data->dev_class[0], | 413 | data->dev_class[1], data->dev_class[0], |
@@ -440,7 +440,7 @@ static int blacklist_show(struct seq_file *f, void *p) | |||
440 | hci_dev_lock(hdev); | 440 | hci_dev_lock(hdev); |
441 | 441 | ||
442 | list_for_each_entry(b, &hdev->blacklist, list) | 442 | list_for_each_entry(b, &hdev->blacklist, list) |
443 | seq_printf(f, "%s\n", batostr(&b->bdaddr)); | 443 | seq_printf(f, "%pMR\n", &b->bdaddr); |
444 | 444 | ||
445 | hci_dev_unlock(hdev); | 445 | hci_dev_unlock(hdev); |
446 | 446 | ||
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index ccd985da6518..0c0028463fa3 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -932,8 +932,12 @@ static int hidp_setup_hid(struct hidp_session *session, | |||
932 | hid->country = req->country; | 932 | hid->country = req->country; |
933 | 933 | ||
934 | strncpy(hid->name, req->name, 128); | 934 | strncpy(hid->name, req->name, 128); |
935 | strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64); | 935 | |
936 | strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64); | 936 | snprintf(hid->phys, sizeof(hid->phys), "%pMR", |
937 | &bt_sk(session->ctrl_sock->sk)->src); | ||
938 | |||
939 | snprintf(hid->uniq, sizeof(hid->uniq), "%pMR", | ||
940 | &bt_sk(session->ctrl_sock->sk)->dst); | ||
937 | 941 | ||
938 | hid->dev.parent = &session->conn->dev; | 942 | hid->dev.parent = &session->conn->dev; |
939 | hid->ll_driver = &hidp_hid_driver; | 943 | hid->ll_driver = &hidp_hid_driver; |
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index a91239dcda41..b52f66d22437 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <net/bluetooth/l2cap.h> | 38 | #include <net/bluetooth/l2cap.h> |
39 | #include <net/bluetooth/smp.h> | 39 | #include <net/bluetooth/smp.h> |
40 | #include <net/bluetooth/a2mp.h> | 40 | #include <net/bluetooth/a2mp.h> |
41 | #include <net/bluetooth/amp.h> | ||
41 | 42 | ||
42 | bool disable_ertm; | 43 | bool disable_ertm; |
43 | 44 | ||
@@ -48,19 +49,20 @@ static LIST_HEAD(chan_list); | |||
48 | static DEFINE_RWLOCK(chan_list_lock); | 49 | static DEFINE_RWLOCK(chan_list_lock); |
49 | 50 | ||
50 | static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, | 51 | static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, |
51 | u8 code, u8 ident, u16 dlen, void *data); | 52 | u8 code, u8 ident, u16 dlen, void *data); |
52 | static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, | 53 | static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, |
53 | void *data); | 54 | void *data); |
54 | static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); | 55 | static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); |
55 | static void l2cap_send_disconn_req(struct l2cap_conn *conn, | 56 | static void l2cap_send_disconn_req(struct l2cap_conn *conn, |
56 | struct l2cap_chan *chan, int err); | 57 | struct l2cap_chan *chan, int err); |
57 | 58 | ||
58 | static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, | 59 | static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, |
59 | struct sk_buff_head *skbs, u8 event); | 60 | struct sk_buff_head *skbs, u8 event); |
60 | 61 | ||
61 | /* ---- L2CAP channels ---- */ | 62 | /* ---- L2CAP channels ---- */ |
62 | 63 | ||
63 | static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) | 64 | static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, |
65 | u16 cid) | ||
64 | { | 66 | { |
65 | struct l2cap_chan *c; | 67 | struct l2cap_chan *c; |
66 | 68 | ||
@@ -71,7 +73,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 | |||
71 | return NULL; | 73 | return NULL; |
72 | } | 74 | } |
73 | 75 | ||
74 | static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) | 76 | static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, |
77 | u16 cid) | ||
75 | { | 78 | { |
76 | struct l2cap_chan *c; | 79 | struct l2cap_chan *c; |
77 | 80 | ||
@@ -84,7 +87,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 | |||
84 | 87 | ||
85 | /* Find channel with given SCID. | 88 | /* Find channel with given SCID. |
86 | * Returns locked channel. */ | 89 | * Returns locked channel. */ |
87 | static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) | 90 | static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, |
91 | u16 cid) | ||
88 | { | 92 | { |
89 | struct l2cap_chan *c; | 93 | struct l2cap_chan *c; |
90 | 94 | ||
@@ -97,7 +101,25 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 ci | |||
97 | return c; | 101 | return c; |
98 | } | 102 | } |
99 | 103 | ||
100 | static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) | 104 | /* Find channel with given DCID. |
105 | * Returns locked channel. | ||
106 | */ | ||
107 | static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, | ||
108 | u16 cid) | ||
109 | { | ||
110 | struct l2cap_chan *c; | ||
111 | |||
112 | mutex_lock(&conn->chan_lock); | ||
113 | c = __l2cap_get_chan_by_dcid(conn, cid); | ||
114 | if (c) | ||
115 | l2cap_chan_lock(c); | ||
116 | mutex_unlock(&conn->chan_lock); | ||
117 | |||
118 | return c; | ||
119 | } | ||
120 | |||
121 | static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, | ||
122 | u8 ident) | ||
101 | { | 123 | { |
102 | struct l2cap_chan *c; | 124 | struct l2cap_chan *c; |
103 | 125 | ||
@@ -108,6 +130,20 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 | |||
108 | return NULL; | 130 | return NULL; |
109 | } | 131 | } |
110 | 132 | ||
133 | static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, | ||
134 | u8 ident) | ||
135 | { | ||
136 | struct l2cap_chan *c; | ||
137 | |||
138 | mutex_lock(&conn->chan_lock); | ||
139 | c = __l2cap_get_chan_by_ident(conn, ident); | ||
140 | if (c) | ||
141 | l2cap_chan_lock(c); | ||
142 | mutex_unlock(&conn->chan_lock); | ||
143 | |||
144 | return c; | ||
145 | } | ||
146 | |||
111 | static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) | 147 | static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src) |
112 | { | 148 | { |
113 | struct l2cap_chan *c; | 149 | struct l2cap_chan *c; |
@@ -178,7 +214,7 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn) | |||
178 | static void __l2cap_state_change(struct l2cap_chan *chan, int state) | 214 | static void __l2cap_state_change(struct l2cap_chan *chan, int state) |
179 | { | 215 | { |
180 | BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), | 216 | BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), |
181 | state_to_string(state)); | 217 | state_to_string(state)); |
182 | 218 | ||
183 | chan->state = state; | 219 | chan->state = state; |
184 | chan->ops->state_change(chan, state); | 220 | chan->ops->state_change(chan, state); |
@@ -361,7 +397,7 @@ static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq) | |||
361 | static void l2cap_chan_timeout(struct work_struct *work) | 397 | static void l2cap_chan_timeout(struct work_struct *work) |
362 | { | 398 | { |
363 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, | 399 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
364 | chan_timer.work); | 400 | chan_timer.work); |
365 | struct l2cap_conn *conn = chan->conn; | 401 | struct l2cap_conn *conn = chan->conn; |
366 | int reason; | 402 | int reason; |
367 | 403 | ||
@@ -373,7 +409,7 @@ static void l2cap_chan_timeout(struct work_struct *work) | |||
373 | if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) | 409 | if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) |
374 | reason = ECONNREFUSED; | 410 | reason = ECONNREFUSED; |
375 | else if (chan->state == BT_CONNECT && | 411 | else if (chan->state == BT_CONNECT && |
376 | chan->sec_level != BT_SECURITY_SDP) | 412 | chan->sec_level != BT_SECURITY_SDP) |
377 | reason = ECONNREFUSED; | 413 | reason = ECONNREFUSED; |
378 | else | 414 | else |
379 | reason = ETIMEDOUT; | 415 | reason = ETIMEDOUT; |
@@ -455,7 +491,7 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan) | |||
455 | set_bit(FLAG_FORCE_ACTIVE, &chan->flags); | 491 | set_bit(FLAG_FORCE_ACTIVE, &chan->flags); |
456 | } | 492 | } |
457 | 493 | ||
458 | static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) | 494 | void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) |
459 | { | 495 | { |
460 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, | 496 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, |
461 | __le16_to_cpu(chan->psm), chan->dcid); | 497 | __le16_to_cpu(chan->psm), chan->dcid); |
@@ -504,7 +540,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) | |||
504 | chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; | 540 | chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; |
505 | chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; | 541 | chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; |
506 | chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; | 542 | chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; |
507 | chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO; | 543 | chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO; |
508 | 544 | ||
509 | l2cap_chan_hold(chan); | 545 | l2cap_chan_hold(chan); |
510 | 546 | ||
@@ -527,6 +563,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) | |||
527 | BT_DBG("chan %p, conn %p, err %d", chan, conn, err); | 563 | BT_DBG("chan %p, conn %p, err %d", chan, conn, err); |
528 | 564 | ||
529 | if (conn) { | 565 | if (conn) { |
566 | struct amp_mgr *mgr = conn->hcon->amp_mgr; | ||
530 | /* Delete from channel list */ | 567 | /* Delete from channel list */ |
531 | list_del(&chan->list); | 568 | list_del(&chan->list); |
532 | 569 | ||
@@ -536,10 +573,19 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) | |||
536 | 573 | ||
537 | if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP) | 574 | if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP) |
538 | hci_conn_put(conn->hcon); | 575 | hci_conn_put(conn->hcon); |
576 | |||
577 | if (mgr && mgr->bredr_chan == chan) | ||
578 | mgr->bredr_chan = NULL; | ||
579 | } | ||
580 | |||
581 | if (chan->hs_hchan) { | ||
582 | struct hci_chan *hs_hchan = chan->hs_hchan; | ||
583 | |||
584 | BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan); | ||
585 | amp_disconnect_logical_link(hs_hchan); | ||
539 | } | 586 | } |
540 | 587 | ||
541 | if (chan->ops->teardown) | 588 | chan->ops->teardown(chan, err); |
542 | chan->ops->teardown(chan, err); | ||
543 | 589 | ||
544 | if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) | 590 | if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) |
545 | return; | 591 | return; |
@@ -573,19 +619,18 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) | |||
573 | struct l2cap_conn *conn = chan->conn; | 619 | struct l2cap_conn *conn = chan->conn; |
574 | struct sock *sk = chan->sk; | 620 | struct sock *sk = chan->sk; |
575 | 621 | ||
576 | BT_DBG("chan %p state %s sk %p", chan, | 622 | BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state), |
577 | state_to_string(chan->state), sk); | 623 | sk); |
578 | 624 | ||
579 | switch (chan->state) { | 625 | switch (chan->state) { |
580 | case BT_LISTEN: | 626 | case BT_LISTEN: |
581 | if (chan->ops->teardown) | 627 | chan->ops->teardown(chan, 0); |
582 | chan->ops->teardown(chan, 0); | ||
583 | break; | 628 | break; |
584 | 629 | ||
585 | case BT_CONNECTED: | 630 | case BT_CONNECTED: |
586 | case BT_CONFIG: | 631 | case BT_CONFIG: |
587 | if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && | 632 | if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && |
588 | conn->hcon->type == ACL_LINK) { | 633 | conn->hcon->type == ACL_LINK) { |
589 | __set_chan_timer(chan, sk->sk_sndtimeo); | 634 | __set_chan_timer(chan, sk->sk_sndtimeo); |
590 | l2cap_send_disconn_req(conn, chan, reason); | 635 | l2cap_send_disconn_req(conn, chan, reason); |
591 | } else | 636 | } else |
@@ -594,7 +639,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) | |||
594 | 639 | ||
595 | case BT_CONNECT2: | 640 | case BT_CONNECT2: |
596 | if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && | 641 | if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && |
597 | conn->hcon->type == ACL_LINK) { | 642 | conn->hcon->type == ACL_LINK) { |
598 | struct l2cap_conn_rsp rsp; | 643 | struct l2cap_conn_rsp rsp; |
599 | __u16 result; | 644 | __u16 result; |
600 | 645 | ||
@@ -609,7 +654,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) | |||
609 | rsp.result = cpu_to_le16(result); | 654 | rsp.result = cpu_to_le16(result); |
610 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); | 655 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); |
611 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, | 656 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, |
612 | sizeof(rsp), &rsp); | 657 | sizeof(rsp), &rsp); |
613 | } | 658 | } |
614 | 659 | ||
615 | l2cap_chan_del(chan, reason); | 660 | l2cap_chan_del(chan, reason); |
@@ -621,8 +666,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) | |||
621 | break; | 666 | break; |
622 | 667 | ||
623 | default: | 668 | default: |
624 | if (chan->ops->teardown) | 669 | chan->ops->teardown(chan, 0); |
625 | chan->ops->teardown(chan, 0); | ||
626 | break; | 670 | break; |
627 | } | 671 | } |
628 | } | 672 | } |
@@ -691,7 +735,8 @@ static u8 l2cap_get_ident(struct l2cap_conn *conn) | |||
691 | return id; | 735 | return id; |
692 | } | 736 | } |
693 | 737 | ||
694 | static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) | 738 | static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, |
739 | void *data) | ||
695 | { | 740 | { |
696 | struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); | 741 | struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); |
697 | u8 flags; | 742 | u8 flags; |
@@ -712,16 +757,31 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, | |||
712 | hci_send_acl(conn->hchan, skb, flags); | 757 | hci_send_acl(conn->hchan, skb, flags); |
713 | } | 758 | } |
714 | 759 | ||
760 | static bool __chan_is_moving(struct l2cap_chan *chan) | ||
761 | { | ||
762 | return chan->move_state != L2CAP_MOVE_STABLE && | ||
763 | chan->move_state != L2CAP_MOVE_WAIT_PREPARE; | ||
764 | } | ||
765 | |||
715 | static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) | 766 | static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) |
716 | { | 767 | { |
717 | struct hci_conn *hcon = chan->conn->hcon; | 768 | struct hci_conn *hcon = chan->conn->hcon; |
718 | u16 flags; | 769 | u16 flags; |
719 | 770 | ||
720 | BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, | 771 | BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, |
721 | skb->priority); | 772 | skb->priority); |
773 | |||
774 | if (chan->hs_hcon && !__chan_is_moving(chan)) { | ||
775 | if (chan->hs_hchan) | ||
776 | hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE); | ||
777 | else | ||
778 | kfree_skb(skb); | ||
779 | |||
780 | return; | ||
781 | } | ||
722 | 782 | ||
723 | if (!test_bit(FLAG_FLUSHABLE, &chan->flags) && | 783 | if (!test_bit(FLAG_FLUSHABLE, &chan->flags) && |
724 | lmp_no_flush_capable(hcon->hdev)) | 784 | lmp_no_flush_capable(hcon->hdev)) |
725 | flags = ACL_START_NO_FLUSH; | 785 | flags = ACL_START_NO_FLUSH; |
726 | else | 786 | else |
727 | flags = ACL_START; | 787 | flags = ACL_START; |
@@ -895,6 +955,9 @@ static void l2cap_send_sframe(struct l2cap_chan *chan, | |||
895 | if (!control->sframe) | 955 | if (!control->sframe) |
896 | return; | 956 | return; |
897 | 957 | ||
958 | if (__chan_is_moving(chan)) | ||
959 | return; | ||
960 | |||
898 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && | 961 | if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && |
899 | !control->poll) | 962 | !control->poll) |
900 | control->final = 1; | 963 | control->final = 1; |
@@ -946,7 +1009,25 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) | |||
946 | return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); | 1009 | return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); |
947 | } | 1010 | } |
948 | 1011 | ||
949 | static void l2cap_send_conn_req(struct l2cap_chan *chan) | 1012 | static bool __amp_capable(struct l2cap_chan *chan) |
1013 | { | ||
1014 | struct l2cap_conn *conn = chan->conn; | ||
1015 | |||
1016 | if (enable_hs && | ||
1017 | chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED && | ||
1018 | conn->fixed_chan_mask & L2CAP_FC_A2MP) | ||
1019 | return true; | ||
1020 | else | ||
1021 | return false; | ||
1022 | } | ||
1023 | |||
1024 | static bool l2cap_check_efs(struct l2cap_chan *chan) | ||
1025 | { | ||
1026 | /* Check EFS parameters */ | ||
1027 | return true; | ||
1028 | } | ||
1029 | |||
1030 | void l2cap_send_conn_req(struct l2cap_chan *chan) | ||
950 | { | 1031 | { |
951 | struct l2cap_conn *conn = chan->conn; | 1032 | struct l2cap_conn *conn = chan->conn; |
952 | struct l2cap_conn_req req; | 1033 | struct l2cap_conn_req req; |
@@ -961,6 +1042,76 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan) | |||
961 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); | 1042 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); |
962 | } | 1043 | } |
963 | 1044 | ||
1045 | static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id) | ||
1046 | { | ||
1047 | struct l2cap_create_chan_req req; | ||
1048 | req.scid = cpu_to_le16(chan->scid); | ||
1049 | req.psm = chan->psm; | ||
1050 | req.amp_id = amp_id; | ||
1051 | |||
1052 | chan->ident = l2cap_get_ident(chan->conn); | ||
1053 | |||
1054 | l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ, | ||
1055 | sizeof(req), &req); | ||
1056 | } | ||
1057 | |||
1058 | static void l2cap_move_setup(struct l2cap_chan *chan) | ||
1059 | { | ||
1060 | struct sk_buff *skb; | ||
1061 | |||
1062 | BT_DBG("chan %p", chan); | ||
1063 | |||
1064 | if (chan->mode != L2CAP_MODE_ERTM) | ||
1065 | return; | ||
1066 | |||
1067 | __clear_retrans_timer(chan); | ||
1068 | __clear_monitor_timer(chan); | ||
1069 | __clear_ack_timer(chan); | ||
1070 | |||
1071 | chan->retry_count = 0; | ||
1072 | skb_queue_walk(&chan->tx_q, skb) { | ||
1073 | if (bt_cb(skb)->control.retries) | ||
1074 | bt_cb(skb)->control.retries = 1; | ||
1075 | else | ||
1076 | break; | ||
1077 | } | ||
1078 | |||
1079 | chan->expected_tx_seq = chan->buffer_seq; | ||
1080 | |||
1081 | clear_bit(CONN_REJ_ACT, &chan->conn_state); | ||
1082 | clear_bit(CONN_SREJ_ACT, &chan->conn_state); | ||
1083 | l2cap_seq_list_clear(&chan->retrans_list); | ||
1084 | l2cap_seq_list_clear(&chan->srej_list); | ||
1085 | skb_queue_purge(&chan->srej_q); | ||
1086 | |||
1087 | chan->tx_state = L2CAP_TX_STATE_XMIT; | ||
1088 | chan->rx_state = L2CAP_RX_STATE_MOVE; | ||
1089 | |||
1090 | set_bit(CONN_REMOTE_BUSY, &chan->conn_state); | ||
1091 | } | ||
1092 | |||
1093 | static void l2cap_move_done(struct l2cap_chan *chan) | ||
1094 | { | ||
1095 | u8 move_role = chan->move_role; | ||
1096 | BT_DBG("chan %p", chan); | ||
1097 | |||
1098 | chan->move_state = L2CAP_MOVE_STABLE; | ||
1099 | chan->move_role = L2CAP_MOVE_ROLE_NONE; | ||
1100 | |||
1101 | if (chan->mode != L2CAP_MODE_ERTM) | ||
1102 | return; | ||
1103 | |||
1104 | switch (move_role) { | ||
1105 | case L2CAP_MOVE_ROLE_INITIATOR: | ||
1106 | l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL); | ||
1107 | chan->rx_state = L2CAP_RX_STATE_WAIT_F; | ||
1108 | break; | ||
1109 | case L2CAP_MOVE_ROLE_RESPONDER: | ||
1110 | chan->rx_state = L2CAP_RX_STATE_WAIT_P; | ||
1111 | break; | ||
1112 | } | ||
1113 | } | ||
1114 | |||
964 | static void l2cap_chan_ready(struct l2cap_chan *chan) | 1115 | static void l2cap_chan_ready(struct l2cap_chan *chan) |
965 | { | 1116 | { |
966 | /* This clears all conf flags, including CONF_NOT_COMPLETE */ | 1117 | /* This clears all conf flags, including CONF_NOT_COMPLETE */ |
@@ -972,6 +1123,16 @@ static void l2cap_chan_ready(struct l2cap_chan *chan) | |||
972 | chan->ops->ready(chan); | 1123 | chan->ops->ready(chan); |
973 | } | 1124 | } |
974 | 1125 | ||
1126 | static void l2cap_start_connection(struct l2cap_chan *chan) | ||
1127 | { | ||
1128 | if (__amp_capable(chan)) { | ||
1129 | BT_DBG("chan %p AMP capable: discover AMPs", chan); | ||
1130 | a2mp_discover_amp(chan); | ||
1131 | } else { | ||
1132 | l2cap_send_conn_req(chan); | ||
1133 | } | ||
1134 | } | ||
1135 | |||
975 | static void l2cap_do_start(struct l2cap_chan *chan) | 1136 | static void l2cap_do_start(struct l2cap_chan *chan) |
976 | { | 1137 | { |
977 | struct l2cap_conn *conn = chan->conn; | 1138 | struct l2cap_conn *conn = chan->conn; |
@@ -986,8 +1147,9 @@ static void l2cap_do_start(struct l2cap_chan *chan) | |||
986 | return; | 1147 | return; |
987 | 1148 | ||
988 | if (l2cap_chan_check_security(chan) && | 1149 | if (l2cap_chan_check_security(chan) && |
989 | __l2cap_no_conn_pending(chan)) | 1150 | __l2cap_no_conn_pending(chan)) { |
990 | l2cap_send_conn_req(chan); | 1151 | l2cap_start_connection(chan); |
1152 | } | ||
991 | } else { | 1153 | } else { |
992 | struct l2cap_info_req req; | 1154 | struct l2cap_info_req req; |
993 | req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); | 1155 | req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); |
@@ -997,8 +1159,8 @@ static void l2cap_do_start(struct l2cap_chan *chan) | |||
997 | 1159 | ||
998 | schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); | 1160 | schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); |
999 | 1161 | ||
1000 | l2cap_send_cmd(conn, conn->info_ident, | 1162 | l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, |
1001 | L2CAP_INFO_REQ, sizeof(req), &req); | 1163 | sizeof(req), &req); |
1002 | } | 1164 | } |
1003 | } | 1165 | } |
1004 | 1166 | ||
@@ -1018,7 +1180,8 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) | |||
1018 | } | 1180 | } |
1019 | } | 1181 | } |
1020 | 1182 | ||
1021 | static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) | 1183 | static void l2cap_send_disconn_req(struct l2cap_conn *conn, |
1184 | struct l2cap_chan *chan, int err) | ||
1022 | { | 1185 | { |
1023 | struct sock *sk = chan->sk; | 1186 | struct sock *sk = chan->sk; |
1024 | struct l2cap_disconn_req req; | 1187 | struct l2cap_disconn_req req; |
@@ -1033,14 +1196,14 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c | |||
1033 | } | 1196 | } |
1034 | 1197 | ||
1035 | if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { | 1198 | if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { |
1036 | __l2cap_state_change(chan, BT_DISCONN); | 1199 | l2cap_state_change(chan, BT_DISCONN); |
1037 | return; | 1200 | return; |
1038 | } | 1201 | } |
1039 | 1202 | ||
1040 | req.dcid = cpu_to_le16(chan->dcid); | 1203 | req.dcid = cpu_to_le16(chan->dcid); |
1041 | req.scid = cpu_to_le16(chan->scid); | 1204 | req.scid = cpu_to_le16(chan->scid); |
1042 | l2cap_send_cmd(conn, l2cap_get_ident(conn), | 1205 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, |
1043 | L2CAP_DISCONN_REQ, sizeof(req), &req); | 1206 | sizeof(req), &req); |
1044 | 1207 | ||
1045 | lock_sock(sk); | 1208 | lock_sock(sk); |
1046 | __l2cap_state_change(chan, BT_DISCONN); | 1209 | __l2cap_state_change(chan, BT_DISCONN); |
@@ -1069,20 +1232,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
1069 | 1232 | ||
1070 | if (chan->state == BT_CONNECT) { | 1233 | if (chan->state == BT_CONNECT) { |
1071 | if (!l2cap_chan_check_security(chan) || | 1234 | if (!l2cap_chan_check_security(chan) || |
1072 | !__l2cap_no_conn_pending(chan)) { | 1235 | !__l2cap_no_conn_pending(chan)) { |
1073 | l2cap_chan_unlock(chan); | 1236 | l2cap_chan_unlock(chan); |
1074 | continue; | 1237 | continue; |
1075 | } | 1238 | } |
1076 | 1239 | ||
1077 | if (!l2cap_mode_supported(chan->mode, conn->feat_mask) | 1240 | if (!l2cap_mode_supported(chan->mode, conn->feat_mask) |
1078 | && test_bit(CONF_STATE2_DEVICE, | 1241 | && test_bit(CONF_STATE2_DEVICE, |
1079 | &chan->conf_state)) { | 1242 | &chan->conf_state)) { |
1080 | l2cap_chan_close(chan, ECONNRESET); | 1243 | l2cap_chan_close(chan, ECONNRESET); |
1081 | l2cap_chan_unlock(chan); | 1244 | l2cap_chan_unlock(chan); |
1082 | continue; | 1245 | continue; |
1083 | } | 1246 | } |
1084 | 1247 | ||
1085 | l2cap_send_conn_req(chan); | 1248 | l2cap_start_connection(chan); |
1086 | 1249 | ||
1087 | } else if (chan->state == BT_CONNECT2) { | 1250 | } else if (chan->state == BT_CONNECT2) { |
1088 | struct l2cap_conn_rsp rsp; | 1251 | struct l2cap_conn_rsp rsp; |
@@ -1094,11 +1257,9 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
1094 | lock_sock(sk); | 1257 | lock_sock(sk); |
1095 | if (test_bit(BT_SK_DEFER_SETUP, | 1258 | if (test_bit(BT_SK_DEFER_SETUP, |
1096 | &bt_sk(sk)->flags)) { | 1259 | &bt_sk(sk)->flags)) { |
1097 | struct sock *parent = bt_sk(sk)->parent; | ||
1098 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); | 1260 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); |
1099 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND); | 1261 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND); |
1100 | if (parent) | 1262 | chan->ops->defer(chan); |
1101 | parent->sk_data_ready(parent, 0); | ||
1102 | 1263 | ||
1103 | } else { | 1264 | } else { |
1104 | __l2cap_state_change(chan, BT_CONFIG); | 1265 | __l2cap_state_change(chan, BT_CONFIG); |
@@ -1112,17 +1273,17 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
1112 | } | 1273 | } |
1113 | 1274 | ||
1114 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, | 1275 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, |
1115 | sizeof(rsp), &rsp); | 1276 | sizeof(rsp), &rsp); |
1116 | 1277 | ||
1117 | if (test_bit(CONF_REQ_SENT, &chan->conf_state) || | 1278 | if (test_bit(CONF_REQ_SENT, &chan->conf_state) || |
1118 | rsp.result != L2CAP_CR_SUCCESS) { | 1279 | rsp.result != L2CAP_CR_SUCCESS) { |
1119 | l2cap_chan_unlock(chan); | 1280 | l2cap_chan_unlock(chan); |
1120 | continue; | 1281 | continue; |
1121 | } | 1282 | } |
1122 | 1283 | ||
1123 | set_bit(CONF_REQ_SENT, &chan->conf_state); | 1284 | set_bit(CONF_REQ_SENT, &chan->conf_state); |
1124 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 1285 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, |
1125 | l2cap_build_conf_req(chan, buf), buf); | 1286 | l2cap_build_conf_req(chan, buf), buf); |
1126 | chan->num_conf_req++; | 1287 | chan->num_conf_req++; |
1127 | } | 1288 | } |
1128 | 1289 | ||
@@ -1204,8 +1365,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) | |||
1204 | bacpy(&bt_sk(sk)->src, conn->src); | 1365 | bacpy(&bt_sk(sk)->src, conn->src); |
1205 | bacpy(&bt_sk(sk)->dst, conn->dst); | 1366 | bacpy(&bt_sk(sk)->dst, conn->dst); |
1206 | 1367 | ||
1207 | bt_accept_enqueue(parent, sk); | ||
1208 | |||
1209 | l2cap_chan_add(conn, chan); | 1368 | l2cap_chan_add(conn, chan); |
1210 | 1369 | ||
1211 | l2cap_chan_ready(chan); | 1370 | l2cap_chan_ready(chan); |
@@ -1270,7 +1429,7 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) | |||
1270 | 1429 | ||
1271 | list_for_each_entry(chan, &conn->chan_l, list) { | 1430 | list_for_each_entry(chan, &conn->chan_l, list) { |
1272 | if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) | 1431 | if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) |
1273 | __l2cap_chan_set_err(chan, err); | 1432 | l2cap_chan_set_err(chan, err); |
1274 | } | 1433 | } |
1275 | 1434 | ||
1276 | mutex_unlock(&conn->chan_lock); | 1435 | mutex_unlock(&conn->chan_lock); |
@@ -1279,7 +1438,7 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) | |||
1279 | static void l2cap_info_timeout(struct work_struct *work) | 1438 | static void l2cap_info_timeout(struct work_struct *work) |
1280 | { | 1439 | { |
1281 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, | 1440 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, |
1282 | info_timer.work); | 1441 | info_timer.work); |
1283 | 1442 | ||
1284 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; | 1443 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
1285 | conn->info_ident = 0; | 1444 | conn->info_ident = 0; |
@@ -1333,7 +1492,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) | |||
1333 | static void security_timeout(struct work_struct *work) | 1492 | static void security_timeout(struct work_struct *work) |
1334 | { | 1493 | { |
1335 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, | 1494 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, |
1336 | security_timer.work); | 1495 | security_timer.work); |
1337 | 1496 | ||
1338 | BT_DBG("conn %p", conn); | 1497 | BT_DBG("conn %p", conn); |
1339 | 1498 | ||
@@ -1355,7 +1514,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | |||
1355 | if (!hchan) | 1514 | if (!hchan) |
1356 | return NULL; | 1515 | return NULL; |
1357 | 1516 | ||
1358 | conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); | 1517 | conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL); |
1359 | if (!conn) { | 1518 | if (!conn) { |
1360 | hci_chan_del(hchan); | 1519 | hci_chan_del(hchan); |
1361 | return NULL; | 1520 | return NULL; |
@@ -1367,10 +1526,22 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) | |||
1367 | 1526 | ||
1368 | BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); | 1527 | BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); |
1369 | 1528 | ||
1370 | if (hcon->hdev->le_mtu && hcon->type == LE_LINK) | 1529 | switch (hcon->type) { |
1371 | conn->mtu = hcon->hdev->le_mtu; | 1530 | case AMP_LINK: |
1372 | else | 1531 | conn->mtu = hcon->hdev->block_mtu; |
1532 | break; | ||
1533 | |||
1534 | case LE_LINK: | ||
1535 | if (hcon->hdev->le_mtu) { | ||
1536 | conn->mtu = hcon->hdev->le_mtu; | ||
1537 | break; | ||
1538 | } | ||
1539 | /* fall through */ | ||
1540 | |||
1541 | default: | ||
1373 | conn->mtu = hcon->hdev->acl_mtu; | 1542 | conn->mtu = hcon->hdev->acl_mtu; |
1543 | break; | ||
1544 | } | ||
1374 | 1545 | ||
1375 | conn->src = &hcon->hdev->bdaddr; | 1546 | conn->src = &hcon->hdev->bdaddr; |
1376 | conn->dst = &hcon->dst; | 1547 | conn->dst = &hcon->dst; |
@@ -1448,7 +1619,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, | |||
1448 | __u8 auth_type; | 1619 | __u8 auth_type; |
1449 | int err; | 1620 | int err; |
1450 | 1621 | ||
1451 | BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst), | 1622 | BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst, |
1452 | dst_type, __le16_to_cpu(psm)); | 1623 | dst_type, __le16_to_cpu(psm)); |
1453 | 1624 | ||
1454 | hdev = hci_get_route(dst, src); | 1625 | hdev = hci_get_route(dst, src); |
@@ -1461,7 +1632,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, | |||
1461 | 1632 | ||
1462 | /* PSM must be odd and lsb of upper byte must be 0 */ | 1633 | /* PSM must be odd and lsb of upper byte must be 0 */ |
1463 | if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid && | 1634 | if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid && |
1464 | chan->chan_type != L2CAP_CHAN_RAW) { | 1635 | chan->chan_type != L2CAP_CHAN_RAW) { |
1465 | err = -EINVAL; | 1636 | err = -EINVAL; |
1466 | goto done; | 1637 | goto done; |
1467 | } | 1638 | } |
@@ -1657,6 +1828,9 @@ static void l2cap_streaming_send(struct l2cap_chan *chan, | |||
1657 | 1828 | ||
1658 | BT_DBG("chan %p, skbs %p", chan, skbs); | 1829 | BT_DBG("chan %p, skbs %p", chan, skbs); |
1659 | 1830 | ||
1831 | if (__chan_is_moving(chan)) | ||
1832 | return; | ||
1833 | |||
1660 | skb_queue_splice_tail_init(skbs, &chan->tx_q); | 1834 | skb_queue_splice_tail_init(skbs, &chan->tx_q); |
1661 | 1835 | ||
1662 | while (!skb_queue_empty(&chan->tx_q)) { | 1836 | while (!skb_queue_empty(&chan->tx_q)) { |
@@ -1699,6 +1873,9 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) | |||
1699 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) | 1873 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) |
1700 | return 0; | 1874 | return 0; |
1701 | 1875 | ||
1876 | if (__chan_is_moving(chan)) | ||
1877 | return 0; | ||
1878 | |||
1702 | while (chan->tx_send_head && | 1879 | while (chan->tx_send_head && |
1703 | chan->unacked_frames < chan->remote_tx_win && | 1880 | chan->unacked_frames < chan->remote_tx_win && |
1704 | chan->tx_state == L2CAP_TX_STATE_XMIT) { | 1881 | chan->tx_state == L2CAP_TX_STATE_XMIT) { |
@@ -1764,13 +1941,16 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan) | |||
1764 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) | 1941 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) |
1765 | return; | 1942 | return; |
1766 | 1943 | ||
1944 | if (__chan_is_moving(chan)) | ||
1945 | return; | ||
1946 | |||
1767 | while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { | 1947 | while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { |
1768 | seq = l2cap_seq_list_pop(&chan->retrans_list); | 1948 | seq = l2cap_seq_list_pop(&chan->retrans_list); |
1769 | 1949 | ||
1770 | skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); | 1950 | skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); |
1771 | if (!skb) { | 1951 | if (!skb) { |
1772 | BT_DBG("Error: Can't retransmit seq %d, frame missing", | 1952 | BT_DBG("Error: Can't retransmit seq %d, frame missing", |
1773 | seq); | 1953 | seq); |
1774 | continue; | 1954 | continue; |
1775 | } | 1955 | } |
1776 | 1956 | ||
@@ -1795,9 +1975,9 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan) | |||
1795 | /* Cloned sk_buffs are read-only, so we need a | 1975 | /* Cloned sk_buffs are read-only, so we need a |
1796 | * writeable copy | 1976 | * writeable copy |
1797 | */ | 1977 | */ |
1798 | tx_skb = skb_copy(skb, GFP_ATOMIC); | 1978 | tx_skb = skb_copy(skb, GFP_KERNEL); |
1799 | } else { | 1979 | } else { |
1800 | tx_skb = skb_clone(skb, GFP_ATOMIC); | 1980 | tx_skb = skb_clone(skb, GFP_KERNEL); |
1801 | } | 1981 | } |
1802 | 1982 | ||
1803 | if (!tx_skb) { | 1983 | if (!tx_skb) { |
@@ -1855,7 +2035,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan, | |||
1855 | if (chan->unacked_frames) { | 2035 | if (chan->unacked_frames) { |
1856 | skb_queue_walk(&chan->tx_q, skb) { | 2036 | skb_queue_walk(&chan->tx_q, skb) { |
1857 | if (bt_cb(skb)->control.txseq == control->reqseq || | 2037 | if (bt_cb(skb)->control.txseq == control->reqseq || |
1858 | skb == chan->tx_send_head) | 2038 | skb == chan->tx_send_head) |
1859 | break; | 2039 | break; |
1860 | } | 2040 | } |
1861 | 2041 | ||
@@ -2106,7 +2286,9 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, | |||
2106 | /* PDU size is derived from the HCI MTU */ | 2286 | /* PDU size is derived from the HCI MTU */ |
2107 | pdu_len = chan->conn->mtu; | 2287 | pdu_len = chan->conn->mtu; |
2108 | 2288 | ||
2109 | pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); | 2289 | /* Constrain PDU size for BR/EDR connections */ |
2290 | if (!chan->hs_hcon) | ||
2291 | pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); | ||
2110 | 2292 | ||
2111 | /* Adjust for largest possible L2CAP overhead. */ | 2293 | /* Adjust for largest possible L2CAP overhead. */ |
2112 | if (chan->fcs) | 2294 | if (chan->fcs) |
@@ -2156,7 +2338,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, | |||
2156 | } | 2338 | } |
2157 | 2339 | ||
2158 | int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, | 2340 | int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, |
2159 | u32 priority) | 2341 | u32 priority) |
2160 | { | 2342 | { |
2161 | struct sk_buff *skb; | 2343 | struct sk_buff *skb; |
2162 | int err; | 2344 | int err; |
@@ -2543,7 +2725,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) | |||
2543 | /* Don't send frame to the socket it came from */ | 2725 | /* Don't send frame to the socket it came from */ |
2544 | if (skb->sk == sk) | 2726 | if (skb->sk == sk) |
2545 | continue; | 2727 | continue; |
2546 | nskb = skb_clone(skb, GFP_ATOMIC); | 2728 | nskb = skb_clone(skb, GFP_KERNEL); |
2547 | if (!nskb) | 2729 | if (!nskb) |
2548 | continue; | 2730 | continue; |
2549 | 2731 | ||
@@ -2569,7 +2751,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, | |||
2569 | len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; | 2751 | len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; |
2570 | count = min_t(unsigned int, conn->mtu, len); | 2752 | count = min_t(unsigned int, conn->mtu, len); |
2571 | 2753 | ||
2572 | skb = bt_skb_alloc(count, GFP_ATOMIC); | 2754 | skb = bt_skb_alloc(count, GFP_KERNEL); |
2573 | if (!skb) | 2755 | if (!skb) |
2574 | return NULL; | 2756 | return NULL; |
2575 | 2757 | ||
@@ -2599,7 +2781,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, | |||
2599 | while (len) { | 2781 | while (len) { |
2600 | count = min_t(unsigned int, conn->mtu, len); | 2782 | count = min_t(unsigned int, conn->mtu, len); |
2601 | 2783 | ||
2602 | *frag = bt_skb_alloc(count, GFP_ATOMIC); | 2784 | *frag = bt_skb_alloc(count, GFP_KERNEL); |
2603 | if (!*frag) | 2785 | if (!*frag) |
2604 | goto fail; | 2786 | goto fail; |
2605 | 2787 | ||
@@ -2618,7 +2800,8 @@ fail: | |||
2618 | return NULL; | 2800 | return NULL; |
2619 | } | 2801 | } |
2620 | 2802 | ||
2621 | static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val) | 2803 | static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, |
2804 | unsigned long *val) | ||
2622 | { | 2805 | { |
2623 | struct l2cap_conf_opt *opt = *ptr; | 2806 | struct l2cap_conf_opt *opt = *ptr; |
2624 | int len; | 2807 | int len; |
@@ -2692,7 +2875,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) | |||
2692 | efs.msdu = cpu_to_le16(chan->local_msdu); | 2875 | efs.msdu = cpu_to_le16(chan->local_msdu); |
2693 | efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); | 2876 | efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); |
2694 | efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); | 2877 | efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); |
2695 | efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); | 2878 | efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO); |
2696 | break; | 2879 | break; |
2697 | 2880 | ||
2698 | case L2CAP_MODE_STREAMING: | 2881 | case L2CAP_MODE_STREAMING: |
@@ -2709,7 +2892,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) | |||
2709 | } | 2892 | } |
2710 | 2893 | ||
2711 | l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), | 2894 | l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), |
2712 | (unsigned long) &efs); | 2895 | (unsigned long) &efs); |
2713 | } | 2896 | } |
2714 | 2897 | ||
2715 | static void l2cap_ack_timeout(struct work_struct *work) | 2898 | static void l2cap_ack_timeout(struct work_struct *work) |
@@ -2749,6 +2932,11 @@ int l2cap_ertm_init(struct l2cap_chan *chan) | |||
2749 | 2932 | ||
2750 | skb_queue_head_init(&chan->tx_q); | 2933 | skb_queue_head_init(&chan->tx_q); |
2751 | 2934 | ||
2935 | chan->local_amp_id = 0; | ||
2936 | chan->move_id = 0; | ||
2937 | chan->move_state = L2CAP_MOVE_STABLE; | ||
2938 | chan->move_role = L2CAP_MOVE_ROLE_NONE; | ||
2939 | |||
2752 | if (chan->mode != L2CAP_MODE_ERTM) | 2940 | if (chan->mode != L2CAP_MODE_ERTM) |
2753 | return 0; | 2941 | return 0; |
2754 | 2942 | ||
@@ -2795,16 +2983,54 @@ static inline bool __l2cap_efs_supported(struct l2cap_chan *chan) | |||
2795 | return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW; | 2983 | return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW; |
2796 | } | 2984 | } |
2797 | 2985 | ||
2986 | static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, | ||
2987 | struct l2cap_conf_rfc *rfc) | ||
2988 | { | ||
2989 | if (chan->local_amp_id && chan->hs_hcon) { | ||
2990 | u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to; | ||
2991 | |||
2992 | /* Class 1 devices have must have ERTM timeouts | ||
2993 | * exceeding the Link Supervision Timeout. The | ||
2994 | * default Link Supervision Timeout for AMP | ||
2995 | * controllers is 10 seconds. | ||
2996 | * | ||
2997 | * Class 1 devices use 0xffffffff for their | ||
2998 | * best-effort flush timeout, so the clamping logic | ||
2999 | * will result in a timeout that meets the above | ||
3000 | * requirement. ERTM timeouts are 16-bit values, so | ||
3001 | * the maximum timeout is 65.535 seconds. | ||
3002 | */ | ||
3003 | |||
3004 | /* Convert timeout to milliseconds and round */ | ||
3005 | ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000); | ||
3006 | |||
3007 | /* This is the recommended formula for class 2 devices | ||
3008 | * that start ERTM timers when packets are sent to the | ||
3009 | * controller. | ||
3010 | */ | ||
3011 | ertm_to = 3 * ertm_to + 500; | ||
3012 | |||
3013 | if (ertm_to > 0xffff) | ||
3014 | ertm_to = 0xffff; | ||
3015 | |||
3016 | rfc->retrans_timeout = cpu_to_le16((u16) ertm_to); | ||
3017 | rfc->monitor_timeout = rfc->retrans_timeout; | ||
3018 | } else { | ||
3019 | rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); | ||
3020 | rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); | ||
3021 | } | ||
3022 | } | ||
3023 | |||
2798 | static inline void l2cap_txwin_setup(struct l2cap_chan *chan) | 3024 | static inline void l2cap_txwin_setup(struct l2cap_chan *chan) |
2799 | { | 3025 | { |
2800 | if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && | 3026 | if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && |
2801 | __l2cap_ews_supported(chan)) { | 3027 | __l2cap_ews_supported(chan)) { |
2802 | /* use extended control field */ | 3028 | /* use extended control field */ |
2803 | set_bit(FLAG_EXT_CTRL, &chan->flags); | 3029 | set_bit(FLAG_EXT_CTRL, &chan->flags); |
2804 | chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; | 3030 | chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; |
2805 | } else { | 3031 | } else { |
2806 | chan->tx_win = min_t(u16, chan->tx_win, | 3032 | chan->tx_win = min_t(u16, chan->tx_win, |
2807 | L2CAP_DEFAULT_TX_WINDOW); | 3033 | L2CAP_DEFAULT_TX_WINDOW); |
2808 | chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; | 3034 | chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; |
2809 | } | 3035 | } |
2810 | chan->ack_win = chan->tx_win; | 3036 | chan->ack_win = chan->tx_win; |
@@ -2844,7 +3070,7 @@ done: | |||
2844 | switch (chan->mode) { | 3070 | switch (chan->mode) { |
2845 | case L2CAP_MODE_BASIC: | 3071 | case L2CAP_MODE_BASIC: |
2846 | if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && | 3072 | if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && |
2847 | !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) | 3073 | !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) |
2848 | break; | 3074 | break; |
2849 | 3075 | ||
2850 | rfc.mode = L2CAP_MODE_BASIC; | 3076 | rfc.mode = L2CAP_MODE_BASIC; |
@@ -2855,28 +3081,27 @@ done: | |||
2855 | rfc.max_pdu_size = 0; | 3081 | rfc.max_pdu_size = 0; |
2856 | 3082 | ||
2857 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), | 3083 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), |
2858 | (unsigned long) &rfc); | 3084 | (unsigned long) &rfc); |
2859 | break; | 3085 | break; |
2860 | 3086 | ||
2861 | case L2CAP_MODE_ERTM: | 3087 | case L2CAP_MODE_ERTM: |
2862 | rfc.mode = L2CAP_MODE_ERTM; | 3088 | rfc.mode = L2CAP_MODE_ERTM; |
2863 | rfc.max_transmit = chan->max_tx; | 3089 | rfc.max_transmit = chan->max_tx; |
2864 | rfc.retrans_timeout = 0; | 3090 | |
2865 | rfc.monitor_timeout = 0; | 3091 | __l2cap_set_ertm_timeouts(chan, &rfc); |
2866 | 3092 | ||
2867 | size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - | 3093 | size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - |
2868 | L2CAP_EXT_HDR_SIZE - | 3094 | L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - |
2869 | L2CAP_SDULEN_SIZE - | 3095 | L2CAP_FCS_SIZE); |
2870 | L2CAP_FCS_SIZE); | ||
2871 | rfc.max_pdu_size = cpu_to_le16(size); | 3096 | rfc.max_pdu_size = cpu_to_le16(size); |
2872 | 3097 | ||
2873 | l2cap_txwin_setup(chan); | 3098 | l2cap_txwin_setup(chan); |
2874 | 3099 | ||
2875 | rfc.txwin_size = min_t(u16, chan->tx_win, | 3100 | rfc.txwin_size = min_t(u16, chan->tx_win, |
2876 | L2CAP_DEFAULT_TX_WINDOW); | 3101 | L2CAP_DEFAULT_TX_WINDOW); |
2877 | 3102 | ||
2878 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), | 3103 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), |
2879 | (unsigned long) &rfc); | 3104 | (unsigned long) &rfc); |
2880 | 3105 | ||
2881 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) | 3106 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) |
2882 | l2cap_add_opt_efs(&ptr, chan); | 3107 | l2cap_add_opt_efs(&ptr, chan); |
@@ -2885,14 +3110,14 @@ done: | |||
2885 | break; | 3110 | break; |
2886 | 3111 | ||
2887 | if (chan->fcs == L2CAP_FCS_NONE || | 3112 | if (chan->fcs == L2CAP_FCS_NONE || |
2888 | test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { | 3113 | test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { |
2889 | chan->fcs = L2CAP_FCS_NONE; | 3114 | chan->fcs = L2CAP_FCS_NONE; |
2890 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); | 3115 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); |
2891 | } | 3116 | } |
2892 | 3117 | ||
2893 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) | 3118 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) |
2894 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, | 3119 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, |
2895 | chan->tx_win); | 3120 | chan->tx_win); |
2896 | break; | 3121 | break; |
2897 | 3122 | ||
2898 | case L2CAP_MODE_STREAMING: | 3123 | case L2CAP_MODE_STREAMING: |
@@ -2904,13 +3129,12 @@ done: | |||
2904 | rfc.monitor_timeout = 0; | 3129 | rfc.monitor_timeout = 0; |
2905 | 3130 | ||
2906 | size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - | 3131 | size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - |
2907 | L2CAP_EXT_HDR_SIZE - | 3132 | L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - |
2908 | L2CAP_SDULEN_SIZE - | 3133 | L2CAP_FCS_SIZE); |
2909 | L2CAP_FCS_SIZE); | ||
2910 | rfc.max_pdu_size = cpu_to_le16(size); | 3134 | rfc.max_pdu_size = cpu_to_le16(size); |
2911 | 3135 | ||
2912 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), | 3136 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), |
2913 | (unsigned long) &rfc); | 3137 | (unsigned long) &rfc); |
2914 | 3138 | ||
2915 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) | 3139 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) |
2916 | l2cap_add_opt_efs(&ptr, chan); | 3140 | l2cap_add_opt_efs(&ptr, chan); |
@@ -2919,7 +3143,7 @@ done: | |||
2919 | break; | 3143 | break; |
2920 | 3144 | ||
2921 | if (chan->fcs == L2CAP_FCS_NONE || | 3145 | if (chan->fcs == L2CAP_FCS_NONE || |
2922 | test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { | 3146 | test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) { |
2923 | chan->fcs = L2CAP_FCS_NONE; | 3147 | chan->fcs = L2CAP_FCS_NONE; |
2924 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); | 3148 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); |
2925 | } | 3149 | } |
@@ -3011,7 +3235,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) | |||
3011 | case L2CAP_MODE_ERTM: | 3235 | case L2CAP_MODE_ERTM: |
3012 | if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { | 3236 | if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { |
3013 | chan->mode = l2cap_select_mode(rfc.mode, | 3237 | chan->mode = l2cap_select_mode(rfc.mode, |
3014 | chan->conn->feat_mask); | 3238 | chan->conn->feat_mask); |
3015 | break; | 3239 | break; |
3016 | } | 3240 | } |
3017 | 3241 | ||
@@ -3036,8 +3260,8 @@ done: | |||
3036 | if (chan->num_conf_rsp == 1) | 3260 | if (chan->num_conf_rsp == 1) |
3037 | return -ECONNREFUSED; | 3261 | return -ECONNREFUSED; |
3038 | 3262 | ||
3039 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | 3263 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), |
3040 | sizeof(rfc), (unsigned long) &rfc); | 3264 | (unsigned long) &rfc); |
3041 | } | 3265 | } |
3042 | 3266 | ||
3043 | if (result == L2CAP_CONF_SUCCESS) { | 3267 | if (result == L2CAP_CONF_SUCCESS) { |
@@ -3054,8 +3278,8 @@ done: | |||
3054 | 3278 | ||
3055 | if (remote_efs) { | 3279 | if (remote_efs) { |
3056 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && | 3280 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && |
3057 | efs.stype != L2CAP_SERV_NOTRAFIC && | 3281 | efs.stype != L2CAP_SERV_NOTRAFIC && |
3058 | efs.stype != chan->local_stype) { | 3282 | efs.stype != chan->local_stype) { |
3059 | 3283 | ||
3060 | result = L2CAP_CONF_UNACCEPT; | 3284 | result = L2CAP_CONF_UNACCEPT; |
3061 | 3285 | ||
@@ -3063,8 +3287,8 @@ done: | |||
3063 | return -ECONNREFUSED; | 3287 | return -ECONNREFUSED; |
3064 | 3288 | ||
3065 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, | 3289 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, |
3066 | sizeof(efs), | 3290 | sizeof(efs), |
3067 | (unsigned long) &efs); | 3291 | (unsigned long) &efs); |
3068 | } else { | 3292 | } else { |
3069 | /* Send PENDING Conf Rsp */ | 3293 | /* Send PENDING Conf Rsp */ |
3070 | result = L2CAP_CONF_PENDING; | 3294 | result = L2CAP_CONF_PENDING; |
@@ -3087,51 +3311,45 @@ done: | |||
3087 | chan->remote_max_tx = rfc.max_transmit; | 3311 | chan->remote_max_tx = rfc.max_transmit; |
3088 | 3312 | ||
3089 | size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), | 3313 | size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), |
3090 | chan->conn->mtu - | 3314 | chan->conn->mtu - L2CAP_EXT_HDR_SIZE - |
3091 | L2CAP_EXT_HDR_SIZE - | 3315 | L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); |
3092 | L2CAP_SDULEN_SIZE - | ||
3093 | L2CAP_FCS_SIZE); | ||
3094 | rfc.max_pdu_size = cpu_to_le16(size); | 3316 | rfc.max_pdu_size = cpu_to_le16(size); |
3095 | chan->remote_mps = size; | 3317 | chan->remote_mps = size; |
3096 | 3318 | ||
3097 | rfc.retrans_timeout = | 3319 | __l2cap_set_ertm_timeouts(chan, &rfc); |
3098 | __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); | ||
3099 | rfc.monitor_timeout = | ||
3100 | __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); | ||
3101 | 3320 | ||
3102 | set_bit(CONF_MODE_DONE, &chan->conf_state); | 3321 | set_bit(CONF_MODE_DONE, &chan->conf_state); |
3103 | 3322 | ||
3104 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | 3323 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, |
3105 | sizeof(rfc), (unsigned long) &rfc); | 3324 | sizeof(rfc), (unsigned long) &rfc); |
3106 | 3325 | ||
3107 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { | 3326 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { |
3108 | chan->remote_id = efs.id; | 3327 | chan->remote_id = efs.id; |
3109 | chan->remote_stype = efs.stype; | 3328 | chan->remote_stype = efs.stype; |
3110 | chan->remote_msdu = le16_to_cpu(efs.msdu); | 3329 | chan->remote_msdu = le16_to_cpu(efs.msdu); |
3111 | chan->remote_flush_to = | 3330 | chan->remote_flush_to = |
3112 | le32_to_cpu(efs.flush_to); | 3331 | le32_to_cpu(efs.flush_to); |
3113 | chan->remote_acc_lat = | 3332 | chan->remote_acc_lat = |
3114 | le32_to_cpu(efs.acc_lat); | 3333 | le32_to_cpu(efs.acc_lat); |
3115 | chan->remote_sdu_itime = | 3334 | chan->remote_sdu_itime = |
3116 | le32_to_cpu(efs.sdu_itime); | 3335 | le32_to_cpu(efs.sdu_itime); |
3117 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, | 3336 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, |
3118 | sizeof(efs), (unsigned long) &efs); | 3337 | sizeof(efs), |
3338 | (unsigned long) &efs); | ||
3119 | } | 3339 | } |
3120 | break; | 3340 | break; |
3121 | 3341 | ||
3122 | case L2CAP_MODE_STREAMING: | 3342 | case L2CAP_MODE_STREAMING: |
3123 | size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), | 3343 | size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), |
3124 | chan->conn->mtu - | 3344 | chan->conn->mtu - L2CAP_EXT_HDR_SIZE - |
3125 | L2CAP_EXT_HDR_SIZE - | 3345 | L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); |
3126 | L2CAP_SDULEN_SIZE - | ||
3127 | L2CAP_FCS_SIZE); | ||
3128 | rfc.max_pdu_size = cpu_to_le16(size); | 3346 | rfc.max_pdu_size = cpu_to_le16(size); |
3129 | chan->remote_mps = size; | 3347 | chan->remote_mps = size; |
3130 | 3348 | ||
3131 | set_bit(CONF_MODE_DONE, &chan->conf_state); | 3349 | set_bit(CONF_MODE_DONE, &chan->conf_state); |
3132 | 3350 | ||
3133 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | 3351 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), |
3134 | sizeof(rfc), (unsigned long) &rfc); | 3352 | (unsigned long) &rfc); |
3135 | 3353 | ||
3136 | break; | 3354 | break; |
3137 | 3355 | ||
@@ -3152,7 +3370,8 @@ done: | |||
3152 | return ptr - data; | 3370 | return ptr - data; |
3153 | } | 3371 | } |
3154 | 3372 | ||
3155 | static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result) | 3373 | static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, |
3374 | void *data, u16 *result) | ||
3156 | { | 3375 | { |
3157 | struct l2cap_conf_req *req = data; | 3376 | struct l2cap_conf_req *req = data; |
3158 | void *ptr = req->data; | 3377 | void *ptr = req->data; |
@@ -3179,7 +3398,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi | |||
3179 | case L2CAP_CONF_FLUSH_TO: | 3398 | case L2CAP_CONF_FLUSH_TO: |
3180 | chan->flush_to = val; | 3399 | chan->flush_to = val; |
3181 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, | 3400 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, |
3182 | 2, chan->flush_to); | 3401 | 2, chan->flush_to); |
3183 | break; | 3402 | break; |
3184 | 3403 | ||
3185 | case L2CAP_CONF_RFC: | 3404 | case L2CAP_CONF_RFC: |
@@ -3187,13 +3406,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi | |||
3187 | memcpy(&rfc, (void *)val, olen); | 3406 | memcpy(&rfc, (void *)val, olen); |
3188 | 3407 | ||
3189 | if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && | 3408 | if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && |
3190 | rfc.mode != chan->mode) | 3409 | rfc.mode != chan->mode) |
3191 | return -ECONNREFUSED; | 3410 | return -ECONNREFUSED; |
3192 | 3411 | ||
3193 | chan->fcs = 0; | 3412 | chan->fcs = 0; |
3194 | 3413 | ||
3195 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, | 3414 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, |
3196 | sizeof(rfc), (unsigned long) &rfc); | 3415 | sizeof(rfc), (unsigned long) &rfc); |
3197 | break; | 3416 | break; |
3198 | 3417 | ||
3199 | case L2CAP_CONF_EWS: | 3418 | case L2CAP_CONF_EWS: |
@@ -3207,12 +3426,12 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi | |||
3207 | memcpy(&efs, (void *)val, olen); | 3426 | memcpy(&efs, (void *)val, olen); |
3208 | 3427 | ||
3209 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && | 3428 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && |
3210 | efs.stype != L2CAP_SERV_NOTRAFIC && | 3429 | efs.stype != L2CAP_SERV_NOTRAFIC && |
3211 | efs.stype != chan->local_stype) | 3430 | efs.stype != chan->local_stype) |
3212 | return -ECONNREFUSED; | 3431 | return -ECONNREFUSED; |
3213 | 3432 | ||
3214 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, | 3433 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), |
3215 | sizeof(efs), (unsigned long) &efs); | 3434 | (unsigned long) &efs); |
3216 | break; | 3435 | break; |
3217 | } | 3436 | } |
3218 | } | 3437 | } |
@@ -3235,10 +3454,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi | |||
3235 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { | 3454 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { |
3236 | chan->local_msdu = le16_to_cpu(efs.msdu); | 3455 | chan->local_msdu = le16_to_cpu(efs.msdu); |
3237 | chan->local_sdu_itime = | 3456 | chan->local_sdu_itime = |
3238 | le32_to_cpu(efs.sdu_itime); | 3457 | le32_to_cpu(efs.sdu_itime); |
3239 | chan->local_acc_lat = le32_to_cpu(efs.acc_lat); | 3458 | chan->local_acc_lat = le32_to_cpu(efs.acc_lat); |
3240 | chan->local_flush_to = | 3459 | chan->local_flush_to = |
3241 | le32_to_cpu(efs.flush_to); | 3460 | le32_to_cpu(efs.flush_to); |
3242 | } | 3461 | } |
3243 | break; | 3462 | break; |
3244 | 3463 | ||
@@ -3253,7 +3472,8 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi | |||
3253 | return ptr - data; | 3472 | return ptr - data; |
3254 | } | 3473 | } |
3255 | 3474 | ||
3256 | static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags) | 3475 | static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, |
3476 | u16 result, u16 flags) | ||
3257 | { | 3477 | { |
3258 | struct l2cap_conf_rsp *rsp = data; | 3478 | struct l2cap_conf_rsp *rsp = data; |
3259 | void *ptr = rsp->data; | 3479 | void *ptr = rsp->data; |
@@ -3272,19 +3492,27 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) | |||
3272 | struct l2cap_conn_rsp rsp; | 3492 | struct l2cap_conn_rsp rsp; |
3273 | struct l2cap_conn *conn = chan->conn; | 3493 | struct l2cap_conn *conn = chan->conn; |
3274 | u8 buf[128]; | 3494 | u8 buf[128]; |
3495 | u8 rsp_code; | ||
3275 | 3496 | ||
3276 | rsp.scid = cpu_to_le16(chan->dcid); | 3497 | rsp.scid = cpu_to_le16(chan->dcid); |
3277 | rsp.dcid = cpu_to_le16(chan->scid); | 3498 | rsp.dcid = cpu_to_le16(chan->scid); |
3278 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); | 3499 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); |
3279 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); | 3500 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); |
3280 | l2cap_send_cmd(conn, chan->ident, | 3501 | |
3281 | L2CAP_CONN_RSP, sizeof(rsp), &rsp); | 3502 | if (chan->hs_hcon) |
3503 | rsp_code = L2CAP_CREATE_CHAN_RSP; | ||
3504 | else | ||
3505 | rsp_code = L2CAP_CONN_RSP; | ||
3506 | |||
3507 | BT_DBG("chan %p rsp_code %u", chan, rsp_code); | ||
3508 | |||
3509 | l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp); | ||
3282 | 3510 | ||
3283 | if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) | 3511 | if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) |
3284 | return; | 3512 | return; |
3285 | 3513 | ||
3286 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 3514 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, |
3287 | l2cap_build_conf_req(chan, buf), buf); | 3515 | l2cap_build_conf_req(chan, buf), buf); |
3288 | chan->num_conf_req++; | 3516 | chan->num_conf_req++; |
3289 | } | 3517 | } |
3290 | 3518 | ||
@@ -3339,7 +3567,8 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) | |||
3339 | } | 3567 | } |
3340 | } | 3568 | } |
3341 | 3569 | ||
3342 | static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | 3570 | static inline int l2cap_command_rej(struct l2cap_conn *conn, |
3571 | struct l2cap_cmd_hdr *cmd, u8 *data) | ||
3343 | { | 3572 | { |
3344 | struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; | 3573 | struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; |
3345 | 3574 | ||
@@ -3347,7 +3576,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
3347 | return 0; | 3576 | return 0; |
3348 | 3577 | ||
3349 | if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && | 3578 | if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && |
3350 | cmd->ident == conn->info_ident) { | 3579 | cmd->ident == conn->info_ident) { |
3351 | cancel_delayed_work(&conn->info_timer); | 3580 | cancel_delayed_work(&conn->info_timer); |
3352 | 3581 | ||
3353 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; | 3582 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
@@ -3359,7 +3588,9 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
3359 | return 0; | 3588 | return 0; |
3360 | } | 3589 | } |
3361 | 3590 | ||
3362 | static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | 3591 | static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, |
3592 | struct l2cap_cmd_hdr *cmd, | ||
3593 | u8 *data, u8 rsp_code, u8 amp_id) | ||
3363 | { | 3594 | { |
3364 | struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; | 3595 | struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; |
3365 | struct l2cap_conn_rsp rsp; | 3596 | struct l2cap_conn_rsp rsp; |
@@ -3386,7 +3617,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
3386 | 3617 | ||
3387 | /* Check if the ACL is secure enough (if not SDP) */ | 3618 | /* Check if the ACL is secure enough (if not SDP) */ |
3388 | if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) && | 3619 | if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) && |
3389 | !hci_conn_check_link_mode(conn->hcon)) { | 3620 | !hci_conn_check_link_mode(conn->hcon)) { |
3390 | conn->disc_reason = HCI_ERROR_AUTH_FAILURE; | 3621 | conn->disc_reason = HCI_ERROR_AUTH_FAILURE; |
3391 | result = L2CAP_CR_SEC_BLOCK; | 3622 | result = L2CAP_CR_SEC_BLOCK; |
3392 | goto response; | 3623 | goto response; |
@@ -3410,8 +3641,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
3410 | bacpy(&bt_sk(sk)->dst, conn->dst); | 3641 | bacpy(&bt_sk(sk)->dst, conn->dst); |
3411 | chan->psm = psm; | 3642 | chan->psm = psm; |
3412 | chan->dcid = scid; | 3643 | chan->dcid = scid; |
3413 | 3644 | chan->local_amp_id = amp_id; | |
3414 | bt_accept_enqueue(parent, sk); | ||
3415 | 3645 | ||
3416 | __l2cap_chan_add(conn, chan); | 3646 | __l2cap_chan_add(conn, chan); |
3417 | 3647 | ||
@@ -3427,10 +3657,19 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
3427 | __l2cap_state_change(chan, BT_CONNECT2); | 3657 | __l2cap_state_change(chan, BT_CONNECT2); |
3428 | result = L2CAP_CR_PEND; | 3658 | result = L2CAP_CR_PEND; |
3429 | status = L2CAP_CS_AUTHOR_PEND; | 3659 | status = L2CAP_CS_AUTHOR_PEND; |
3430 | parent->sk_data_ready(parent, 0); | 3660 | chan->ops->defer(chan); |
3431 | } else { | 3661 | } else { |
3432 | __l2cap_state_change(chan, BT_CONFIG); | 3662 | /* Force pending result for AMP controllers. |
3433 | result = L2CAP_CR_SUCCESS; | 3663 | * The connection will succeed after the |
3664 | * physical link is up. | ||
3665 | */ | ||
3666 | if (amp_id) { | ||
3667 | __l2cap_state_change(chan, BT_CONNECT2); | ||
3668 | result = L2CAP_CR_PEND; | ||
3669 | } else { | ||
3670 | __l2cap_state_change(chan, BT_CONFIG); | ||
3671 | result = L2CAP_CR_SUCCESS; | ||
3672 | } | ||
3434 | status = L2CAP_CS_NO_INFO; | 3673 | status = L2CAP_CS_NO_INFO; |
3435 | } | 3674 | } |
3436 | } else { | 3675 | } else { |
@@ -3453,7 +3692,7 @@ sendresp: | |||
3453 | rsp.dcid = cpu_to_le16(dcid); | 3692 | rsp.dcid = cpu_to_le16(dcid); |
3454 | rsp.result = cpu_to_le16(result); | 3693 | rsp.result = cpu_to_le16(result); |
3455 | rsp.status = cpu_to_le16(status); | 3694 | rsp.status = cpu_to_le16(status); |
3456 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); | 3695 | l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp); |
3457 | 3696 | ||
3458 | if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { | 3697 | if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { |
3459 | struct l2cap_info_req info; | 3698 | struct l2cap_info_req info; |
@@ -3464,23 +3703,31 @@ sendresp: | |||
3464 | 3703 | ||
3465 | schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); | 3704 | schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); |
3466 | 3705 | ||
3467 | l2cap_send_cmd(conn, conn->info_ident, | 3706 | l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, |
3468 | L2CAP_INFO_REQ, sizeof(info), &info); | 3707 | sizeof(info), &info); |
3469 | } | 3708 | } |
3470 | 3709 | ||
3471 | if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && | 3710 | if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && |
3472 | result == L2CAP_CR_SUCCESS) { | 3711 | result == L2CAP_CR_SUCCESS) { |
3473 | u8 buf[128]; | 3712 | u8 buf[128]; |
3474 | set_bit(CONF_REQ_SENT, &chan->conf_state); | 3713 | set_bit(CONF_REQ_SENT, &chan->conf_state); |
3475 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 3714 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, |
3476 | l2cap_build_conf_req(chan, buf), buf); | 3715 | l2cap_build_conf_req(chan, buf), buf); |
3477 | chan->num_conf_req++; | 3716 | chan->num_conf_req++; |
3478 | } | 3717 | } |
3479 | 3718 | ||
3719 | return chan; | ||
3720 | } | ||
3721 | |||
3722 | static int l2cap_connect_req(struct l2cap_conn *conn, | ||
3723 | struct l2cap_cmd_hdr *cmd, u8 *data) | ||
3724 | { | ||
3725 | l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); | ||
3480 | return 0; | 3726 | return 0; |
3481 | } | 3727 | } |
3482 | 3728 | ||
3483 | static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | 3729 | static int l2cap_connect_create_rsp(struct l2cap_conn *conn, |
3730 | struct l2cap_cmd_hdr *cmd, u8 *data) | ||
3484 | { | 3731 | { |
3485 | struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; | 3732 | struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; |
3486 | u16 scid, dcid, result, status; | 3733 | u16 scid, dcid, result, status; |
@@ -3494,7 +3741,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
3494 | status = __le16_to_cpu(rsp->status); | 3741 | status = __le16_to_cpu(rsp->status); |
3495 | 3742 | ||
3496 | BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", | 3743 | BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", |
3497 | dcid, scid, result, status); | 3744 | dcid, scid, result, status); |
3498 | 3745 | ||
3499 | mutex_lock(&conn->chan_lock); | 3746 | mutex_lock(&conn->chan_lock); |
3500 | 3747 | ||
@@ -3527,7 +3774,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
3527 | break; | 3774 | break; |
3528 | 3775 | ||
3529 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 3776 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, |
3530 | l2cap_build_conf_req(chan, req), req); | 3777 | l2cap_build_conf_req(chan, req), req); |
3531 | chan->num_conf_req++; | 3778 | chan->num_conf_req++; |
3532 | break; | 3779 | break; |
3533 | 3780 | ||
@@ -3559,7 +3806,25 @@ static inline void set_default_fcs(struct l2cap_chan *chan) | |||
3559 | chan->fcs = L2CAP_FCS_CRC16; | 3806 | chan->fcs = L2CAP_FCS_CRC16; |
3560 | } | 3807 | } |
3561 | 3808 | ||
3562 | static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) | 3809 | static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data, |
3810 | u8 ident, u16 flags) | ||
3811 | { | ||
3812 | struct l2cap_conn *conn = chan->conn; | ||
3813 | |||
3814 | BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident, | ||
3815 | flags); | ||
3816 | |||
3817 | clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); | ||
3818 | set_bit(CONF_OUTPUT_DONE, &chan->conf_state); | ||
3819 | |||
3820 | l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP, | ||
3821 | l2cap_build_conf_rsp(chan, data, | ||
3822 | L2CAP_CONF_SUCCESS, flags), data); | ||
3823 | } | ||
3824 | |||
3825 | static inline int l2cap_config_req(struct l2cap_conn *conn, | ||
3826 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, | ||
3827 | u8 *data) | ||
3563 | { | 3828 | { |
3564 | struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; | 3829 | struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; |
3565 | u16 dcid, flags; | 3830 | u16 dcid, flags; |
@@ -3584,7 +3849,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3584 | rej.dcid = cpu_to_le16(chan->dcid); | 3849 | rej.dcid = cpu_to_le16(chan->dcid); |
3585 | 3850 | ||
3586 | l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, | 3851 | l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, |
3587 | sizeof(rej), &rej); | 3852 | sizeof(rej), &rej); |
3588 | goto unlock; | 3853 | goto unlock; |
3589 | } | 3854 | } |
3590 | 3855 | ||
@@ -3592,8 +3857,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3592 | len = cmd_len - sizeof(*req); | 3857 | len = cmd_len - sizeof(*req); |
3593 | if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { | 3858 | if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { |
3594 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, | 3859 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, |
3595 | l2cap_build_conf_rsp(chan, rsp, | 3860 | l2cap_build_conf_rsp(chan, rsp, |
3596 | L2CAP_CONF_REJECT, flags), rsp); | 3861 | L2CAP_CONF_REJECT, flags), rsp); |
3597 | goto unlock; | 3862 | goto unlock; |
3598 | } | 3863 | } |
3599 | 3864 | ||
@@ -3604,8 +3869,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3604 | if (flags & L2CAP_CONF_FLAG_CONTINUATION) { | 3869 | if (flags & L2CAP_CONF_FLAG_CONTINUATION) { |
3605 | /* Incomplete config. Send empty response. */ | 3870 | /* Incomplete config. Send empty response. */ |
3606 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, | 3871 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, |
3607 | l2cap_build_conf_rsp(chan, rsp, | 3872 | l2cap_build_conf_rsp(chan, rsp, |
3608 | L2CAP_CONF_SUCCESS, flags), rsp); | 3873 | L2CAP_CONF_SUCCESS, flags), rsp); |
3609 | goto unlock; | 3874 | goto unlock; |
3610 | } | 3875 | } |
3611 | 3876 | ||
@@ -3616,6 +3881,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3616 | goto unlock; | 3881 | goto unlock; |
3617 | } | 3882 | } |
3618 | 3883 | ||
3884 | chan->ident = cmd->ident; | ||
3619 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); | 3885 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); |
3620 | chan->num_conf_rsp++; | 3886 | chan->num_conf_rsp++; |
3621 | 3887 | ||
@@ -3643,23 +3909,22 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3643 | if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { | 3909 | if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { |
3644 | u8 buf[64]; | 3910 | u8 buf[64]; |
3645 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 3911 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, |
3646 | l2cap_build_conf_req(chan, buf), buf); | 3912 | l2cap_build_conf_req(chan, buf), buf); |
3647 | chan->num_conf_req++; | 3913 | chan->num_conf_req++; |
3648 | } | 3914 | } |
3649 | 3915 | ||
3650 | /* Got Conf Rsp PENDING from remote side and asume we sent | 3916 | /* Got Conf Rsp PENDING from remote side and asume we sent |
3651 | Conf Rsp PENDING in the code above */ | 3917 | Conf Rsp PENDING in the code above */ |
3652 | if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && | 3918 | if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && |
3653 | test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { | 3919 | test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { |
3654 | 3920 | ||
3655 | /* check compatibility */ | 3921 | /* check compatibility */ |
3656 | 3922 | ||
3657 | clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); | 3923 | /* Send rsp for BR/EDR channel */ |
3658 | set_bit(CONF_OUTPUT_DONE, &chan->conf_state); | 3924 | if (!chan->hs_hcon) |
3659 | 3925 | l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags); | |
3660 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, | 3926 | else |
3661 | l2cap_build_conf_rsp(chan, rsp, | 3927 | chan->ident = cmd->ident; |
3662 | L2CAP_CONF_SUCCESS, flags), rsp); | ||
3663 | } | 3928 | } |
3664 | 3929 | ||
3665 | unlock: | 3930 | unlock: |
@@ -3667,7 +3932,8 @@ unlock: | |||
3667 | return err; | 3932 | return err; |
3668 | } | 3933 | } |
3669 | 3934 | ||
3670 | static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | 3935 | static inline int l2cap_config_rsp(struct l2cap_conn *conn, |
3936 | struct l2cap_cmd_hdr *cmd, u8 *data) | ||
3671 | { | 3937 | { |
3672 | struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; | 3938 | struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; |
3673 | u16 scid, flags, result; | 3939 | u16 scid, flags, result; |
@@ -3699,20 +3965,21 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3699 | char buf[64]; | 3965 | char buf[64]; |
3700 | 3966 | ||
3701 | len = l2cap_parse_conf_rsp(chan, rsp->data, len, | 3967 | len = l2cap_parse_conf_rsp(chan, rsp->data, len, |
3702 | buf, &result); | 3968 | buf, &result); |
3703 | if (len < 0) { | 3969 | if (len < 0) { |
3704 | l2cap_send_disconn_req(conn, chan, ECONNRESET); | 3970 | l2cap_send_disconn_req(conn, chan, ECONNRESET); |
3705 | goto done; | 3971 | goto done; |
3706 | } | 3972 | } |
3707 | 3973 | ||
3708 | /* check compatibility */ | 3974 | if (!chan->hs_hcon) { |
3709 | 3975 | l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, | |
3710 | clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); | 3976 | 0); |
3711 | set_bit(CONF_OUTPUT_DONE, &chan->conf_state); | 3977 | } else { |
3712 | 3978 | if (l2cap_check_efs(chan)) { | |
3713 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, | 3979 | amp_create_logical_link(chan); |
3714 | l2cap_build_conf_rsp(chan, buf, | 3980 | chan->ident = cmd->ident; |
3715 | L2CAP_CONF_SUCCESS, 0x0000), buf); | 3981 | } |
3982 | } | ||
3716 | } | 3983 | } |
3717 | goto done; | 3984 | goto done; |
3718 | 3985 | ||
@@ -3728,14 +3995,14 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr | |||
3728 | /* throw out any old stored conf requests */ | 3995 | /* throw out any old stored conf requests */ |
3729 | result = L2CAP_CONF_SUCCESS; | 3996 | result = L2CAP_CONF_SUCCESS; |
3730 | len = l2cap_parse_conf_rsp(chan, rsp->data, len, | 3997 | len = l2cap_parse_conf_rsp(chan, rsp->data, len, |
3731 | req, &result); | 3998 | req, &result); |
3732 | if (len < 0) { | 3999 | if (len < 0) { |
3733 | l2cap_send_disconn_req(conn, chan, ECONNRESET); | 4000 | l2cap_send_disconn_req(conn, chan, ECONNRESET); |
3734 | goto done; | 4001 | goto done; |
3735 | } | 4002 | } |
3736 | 4003 | ||
3737 | l2cap_send_cmd(conn, l2cap_get_ident(conn), | 4004 | l2cap_send_cmd(conn, l2cap_get_ident(conn), |
3738 | L2CAP_CONF_REQ, len, req); | 4005 | L2CAP_CONF_REQ, len, req); |
3739 | chan->num_conf_req++; | 4006 | chan->num_conf_req++; |
3740 | if (result != L2CAP_CONF_SUCCESS) | 4007 | if (result != L2CAP_CONF_SUCCESS) |
3741 | goto done; | 4008 | goto done; |
@@ -3773,7 +4040,8 @@ done: | |||
3773 | return err; | 4040 | return err; |
3774 | } | 4041 | } |
3775 | 4042 | ||
3776 | static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | 4043 | static inline int l2cap_disconnect_req(struct l2cap_conn *conn, |
4044 | struct l2cap_cmd_hdr *cmd, u8 *data) | ||
3777 | { | 4045 | { |
3778 | struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; | 4046 | struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; |
3779 | struct l2cap_disconn_rsp rsp; | 4047 | struct l2cap_disconn_rsp rsp; |
@@ -3819,7 +4087,8 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd | |||
3819 | return 0; | 4087 | return 0; |
3820 | } | 4088 | } |
3821 | 4089 | ||
3822 | static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | 4090 | static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, |
4091 | struct l2cap_cmd_hdr *cmd, u8 *data) | ||
3823 | { | 4092 | { |
3824 | struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; | 4093 | struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; |
3825 | u16 dcid, scid; | 4094 | u16 dcid, scid; |
@@ -3853,7 +4122,8 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd | |||
3853 | return 0; | 4122 | return 0; |
3854 | } | 4123 | } |
3855 | 4124 | ||
3856 | static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | 4125 | static inline int l2cap_information_req(struct l2cap_conn *conn, |
4126 | struct l2cap_cmd_hdr *cmd, u8 *data) | ||
3857 | { | 4127 | { |
3858 | struct l2cap_info_req *req = (struct l2cap_info_req *) data; | 4128 | struct l2cap_info_req *req = (struct l2cap_info_req *) data; |
3859 | u16 type; | 4129 | u16 type; |
@@ -3870,14 +4140,14 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm | |||
3870 | rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); | 4140 | rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); |
3871 | if (!disable_ertm) | 4141 | if (!disable_ertm) |
3872 | feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | 4142 | feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING |
3873 | | L2CAP_FEAT_FCS; | 4143 | | L2CAP_FEAT_FCS; |
3874 | if (enable_hs) | 4144 | if (enable_hs) |
3875 | feat_mask |= L2CAP_FEAT_EXT_FLOW | 4145 | feat_mask |= L2CAP_FEAT_EXT_FLOW |
3876 | | L2CAP_FEAT_EXT_WINDOW; | 4146 | | L2CAP_FEAT_EXT_WINDOW; |
3877 | 4147 | ||
3878 | put_unaligned_le32(feat_mask, rsp->data); | 4148 | put_unaligned_le32(feat_mask, rsp->data); |
3879 | l2cap_send_cmd(conn, cmd->ident, | 4149 | l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), |
3880 | L2CAP_INFO_RSP, sizeof(buf), buf); | 4150 | buf); |
3881 | } else if (type == L2CAP_IT_FIXED_CHAN) { | 4151 | } else if (type == L2CAP_IT_FIXED_CHAN) { |
3882 | u8 buf[12]; | 4152 | u8 buf[12]; |
3883 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; | 4153 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; |
@@ -3890,20 +4160,21 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm | |||
3890 | rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); | 4160 | rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); |
3891 | rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); | 4161 | rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); |
3892 | memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); | 4162 | memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); |
3893 | l2cap_send_cmd(conn, cmd->ident, | 4163 | l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), |
3894 | L2CAP_INFO_RSP, sizeof(buf), buf); | 4164 | buf); |
3895 | } else { | 4165 | } else { |
3896 | struct l2cap_info_rsp rsp; | 4166 | struct l2cap_info_rsp rsp; |
3897 | rsp.type = cpu_to_le16(type); | 4167 | rsp.type = cpu_to_le16(type); |
3898 | rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP); | 4168 | rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP); |
3899 | l2cap_send_cmd(conn, cmd->ident, | 4169 | l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), |
3900 | L2CAP_INFO_RSP, sizeof(rsp), &rsp); | 4170 | &rsp); |
3901 | } | 4171 | } |
3902 | 4172 | ||
3903 | return 0; | 4173 | return 0; |
3904 | } | 4174 | } |
3905 | 4175 | ||
3906 | static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) | 4176 | static inline int l2cap_information_rsp(struct l2cap_conn *conn, |
4177 | struct l2cap_cmd_hdr *cmd, u8 *data) | ||
3907 | { | 4178 | { |
3908 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; | 4179 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; |
3909 | u16 type, result; | 4180 | u16 type, result; |
@@ -3915,7 +4186,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm | |||
3915 | 4186 | ||
3916 | /* L2CAP Info req/rsp are unbound to channels, add extra checks */ | 4187 | /* L2CAP Info req/rsp are unbound to channels, add extra checks */ |
3917 | if (cmd->ident != conn->info_ident || | 4188 | if (cmd->ident != conn->info_ident || |
3918 | conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) | 4189 | conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) |
3919 | return 0; | 4190 | return 0; |
3920 | 4191 | ||
3921 | cancel_delayed_work(&conn->info_timer); | 4192 | cancel_delayed_work(&conn->info_timer); |
@@ -3940,7 +4211,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm | |||
3940 | conn->info_ident = l2cap_get_ident(conn); | 4211 | conn->info_ident = l2cap_get_ident(conn); |
3941 | 4212 | ||
3942 | l2cap_send_cmd(conn, conn->info_ident, | 4213 | l2cap_send_cmd(conn, conn->info_ident, |
3943 | L2CAP_INFO_REQ, sizeof(req), &req); | 4214 | L2CAP_INFO_REQ, sizeof(req), &req); |
3944 | } else { | 4215 | } else { |
3945 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; | 4216 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
3946 | conn->info_ident = 0; | 4217 | conn->info_ident = 0; |
@@ -3961,12 +4232,14 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm | |||
3961 | return 0; | 4232 | return 0; |
3962 | } | 4233 | } |
3963 | 4234 | ||
3964 | static inline int l2cap_create_channel_req(struct l2cap_conn *conn, | 4235 | static int l2cap_create_channel_req(struct l2cap_conn *conn, |
3965 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, | 4236 | struct l2cap_cmd_hdr *cmd, |
3966 | void *data) | 4237 | u16 cmd_len, void *data) |
3967 | { | 4238 | { |
3968 | struct l2cap_create_chan_req *req = data; | 4239 | struct l2cap_create_chan_req *req = data; |
3969 | struct l2cap_create_chan_rsp rsp; | 4240 | struct l2cap_create_chan_rsp rsp; |
4241 | struct l2cap_chan *chan; | ||
4242 | struct hci_dev *hdev; | ||
3970 | u16 psm, scid; | 4243 | u16 psm, scid; |
3971 | 4244 | ||
3972 | if (cmd_len != sizeof(*req)) | 4245 | if (cmd_len != sizeof(*req)) |
@@ -3980,56 +4253,119 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn, | |||
3980 | 4253 | ||
3981 | BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id); | 4254 | BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id); |
3982 | 4255 | ||
3983 | /* Placeholder: Always reject */ | 4256 | /* For controller id 0 make BR/EDR connection */ |
4257 | if (req->amp_id == HCI_BREDR_ID) { | ||
4258 | l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, | ||
4259 | req->amp_id); | ||
4260 | return 0; | ||
4261 | } | ||
4262 | |||
4263 | /* Validate AMP controller id */ | ||
4264 | hdev = hci_dev_get(req->amp_id); | ||
4265 | if (!hdev) | ||
4266 | goto error; | ||
4267 | |||
4268 | if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) { | ||
4269 | hci_dev_put(hdev); | ||
4270 | goto error; | ||
4271 | } | ||
4272 | |||
4273 | chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, | ||
4274 | req->amp_id); | ||
4275 | if (chan) { | ||
4276 | struct amp_mgr *mgr = conn->hcon->amp_mgr; | ||
4277 | struct hci_conn *hs_hcon; | ||
4278 | |||
4279 | hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst); | ||
4280 | if (!hs_hcon) { | ||
4281 | hci_dev_put(hdev); | ||
4282 | return -EFAULT; | ||
4283 | } | ||
4284 | |||
4285 | BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon); | ||
4286 | |||
4287 | mgr->bredr_chan = chan; | ||
4288 | chan->hs_hcon = hs_hcon; | ||
4289 | chan->fcs = L2CAP_FCS_NONE; | ||
4290 | conn->mtu = hdev->block_mtu; | ||
4291 | } | ||
4292 | |||
4293 | hci_dev_put(hdev); | ||
4294 | |||
4295 | return 0; | ||
4296 | |||
4297 | error: | ||
3984 | rsp.dcid = 0; | 4298 | rsp.dcid = 0; |
3985 | rsp.scid = cpu_to_le16(scid); | 4299 | rsp.scid = cpu_to_le16(scid); |
3986 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM); | 4300 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP); |
3987 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); | 4301 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); |
3988 | 4302 | ||
3989 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, | 4303 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, |
3990 | sizeof(rsp), &rsp); | 4304 | sizeof(rsp), &rsp); |
3991 | 4305 | ||
3992 | return 0; | 4306 | return -EFAULT; |
3993 | } | 4307 | } |
3994 | 4308 | ||
3995 | static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn, | 4309 | static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id) |
3996 | struct l2cap_cmd_hdr *cmd, void *data) | ||
3997 | { | 4310 | { |
3998 | BT_DBG("conn %p", conn); | 4311 | struct l2cap_move_chan_req req; |
4312 | u8 ident; | ||
4313 | |||
4314 | BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id); | ||
3999 | 4315 | ||
4000 | return l2cap_connect_rsp(conn, cmd, data); | 4316 | ident = l2cap_get_ident(chan->conn); |
4317 | chan->ident = ident; | ||
4318 | |||
4319 | req.icid = cpu_to_le16(chan->scid); | ||
4320 | req.dest_amp_id = dest_amp_id; | ||
4321 | |||
4322 | l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), | ||
4323 | &req); | ||
4324 | |||
4325 | __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); | ||
4001 | } | 4326 | } |
4002 | 4327 | ||
4003 | static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident, | 4328 | static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result) |
4004 | u16 icid, u16 result) | ||
4005 | { | 4329 | { |
4006 | struct l2cap_move_chan_rsp rsp; | 4330 | struct l2cap_move_chan_rsp rsp; |
4007 | 4331 | ||
4008 | BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); | 4332 | BT_DBG("chan %p, result 0x%4.4x", chan, result); |
4009 | 4333 | ||
4010 | rsp.icid = cpu_to_le16(icid); | 4334 | rsp.icid = cpu_to_le16(chan->dcid); |
4011 | rsp.result = cpu_to_le16(result); | 4335 | rsp.result = cpu_to_le16(result); |
4012 | 4336 | ||
4013 | l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp); | 4337 | l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP, |
4338 | sizeof(rsp), &rsp); | ||
4014 | } | 4339 | } |
4015 | 4340 | ||
4016 | static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn, | 4341 | static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result) |
4017 | struct l2cap_chan *chan, | ||
4018 | u16 icid, u16 result) | ||
4019 | { | 4342 | { |
4020 | struct l2cap_move_chan_cfm cfm; | 4343 | struct l2cap_move_chan_cfm cfm; |
4021 | u8 ident; | ||
4022 | 4344 | ||
4023 | BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); | 4345 | BT_DBG("chan %p, result 0x%4.4x", chan, result); |
4024 | 4346 | ||
4025 | ident = l2cap_get_ident(conn); | 4347 | chan->ident = l2cap_get_ident(chan->conn); |
4026 | if (chan) | ||
4027 | chan->ident = ident; | ||
4028 | 4348 | ||
4029 | cfm.icid = cpu_to_le16(icid); | 4349 | cfm.icid = cpu_to_le16(chan->scid); |
4030 | cfm.result = cpu_to_le16(result); | 4350 | cfm.result = cpu_to_le16(result); |
4031 | 4351 | ||
4032 | l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm); | 4352 | l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM, |
4353 | sizeof(cfm), &cfm); | ||
4354 | |||
4355 | __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); | ||
4356 | } | ||
4357 | |||
4358 | static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid) | ||
4359 | { | ||
4360 | struct l2cap_move_chan_cfm cfm; | ||
4361 | |||
4362 | BT_DBG("conn %p, icid 0x%4.4x", conn, icid); | ||
4363 | |||
4364 | cfm.icid = cpu_to_le16(icid); | ||
4365 | cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED); | ||
4366 | |||
4367 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM, | ||
4368 | sizeof(cfm), &cfm); | ||
4033 | } | 4369 | } |
4034 | 4370 | ||
4035 | static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, | 4371 | static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, |
@@ -4043,11 +4379,289 @@ static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, | |||
4043 | l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); | 4379 | l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); |
4044 | } | 4380 | } |
4045 | 4381 | ||
4382 | static void __release_logical_link(struct l2cap_chan *chan) | ||
4383 | { | ||
4384 | chan->hs_hchan = NULL; | ||
4385 | chan->hs_hcon = NULL; | ||
4386 | |||
4387 | /* Placeholder - release the logical link */ | ||
4388 | } | ||
4389 | |||
4390 | static void l2cap_logical_fail(struct l2cap_chan *chan) | ||
4391 | { | ||
4392 | /* Logical link setup failed */ | ||
4393 | if (chan->state != BT_CONNECTED) { | ||
4394 | /* Create channel failure, disconnect */ | ||
4395 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | ||
4396 | return; | ||
4397 | } | ||
4398 | |||
4399 | switch (chan->move_role) { | ||
4400 | case L2CAP_MOVE_ROLE_RESPONDER: | ||
4401 | l2cap_move_done(chan); | ||
4402 | l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP); | ||
4403 | break; | ||
4404 | case L2CAP_MOVE_ROLE_INITIATOR: | ||
4405 | if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP || | ||
4406 | chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) { | ||
4407 | /* Remote has only sent pending or | ||
4408 | * success responses, clean up | ||
4409 | */ | ||
4410 | l2cap_move_done(chan); | ||
4411 | } | ||
4412 | |||
4413 | /* Other amp move states imply that the move | ||
4414 | * has already aborted | ||
4415 | */ | ||
4416 | l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); | ||
4417 | break; | ||
4418 | } | ||
4419 | } | ||
4420 | |||
4421 | static void l2cap_logical_finish_create(struct l2cap_chan *chan, | ||
4422 | struct hci_chan *hchan) | ||
4423 | { | ||
4424 | struct l2cap_conf_rsp rsp; | ||
4425 | |||
4426 | chan->hs_hchan = hchan; | ||
4427 | chan->hs_hcon->l2cap_data = chan->conn; | ||
4428 | |||
4429 | l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0); | ||
4430 | |||
4431 | if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { | ||
4432 | int err; | ||
4433 | |||
4434 | set_default_fcs(chan); | ||
4435 | |||
4436 | err = l2cap_ertm_init(chan); | ||
4437 | if (err < 0) | ||
4438 | l2cap_send_disconn_req(chan->conn, chan, -err); | ||
4439 | else | ||
4440 | l2cap_chan_ready(chan); | ||
4441 | } | ||
4442 | } | ||
4443 | |||
4444 | static void l2cap_logical_finish_move(struct l2cap_chan *chan, | ||
4445 | struct hci_chan *hchan) | ||
4446 | { | ||
4447 | chan->hs_hcon = hchan->conn; | ||
4448 | chan->hs_hcon->l2cap_data = chan->conn; | ||
4449 | |||
4450 | BT_DBG("move_state %d", chan->move_state); | ||
4451 | |||
4452 | switch (chan->move_state) { | ||
4453 | case L2CAP_MOVE_WAIT_LOGICAL_COMP: | ||
4454 | /* Move confirm will be sent after a success | ||
4455 | * response is received | ||
4456 | */ | ||
4457 | chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; | ||
4458 | break; | ||
4459 | case L2CAP_MOVE_WAIT_LOGICAL_CFM: | ||
4460 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | ||
4461 | chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; | ||
4462 | } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { | ||
4463 | chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; | ||
4464 | l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); | ||
4465 | } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { | ||
4466 | chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; | ||
4467 | l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); | ||
4468 | } | ||
4469 | break; | ||
4470 | default: | ||
4471 | /* Move was not in expected state, free the channel */ | ||
4472 | __release_logical_link(chan); | ||
4473 | |||
4474 | chan->move_state = L2CAP_MOVE_STABLE; | ||
4475 | } | ||
4476 | } | ||
4477 | |||
4478 | /* Call with chan locked */ | ||
4479 | void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, | ||
4480 | u8 status) | ||
4481 | { | ||
4482 | BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status); | ||
4483 | |||
4484 | if (status) { | ||
4485 | l2cap_logical_fail(chan); | ||
4486 | __release_logical_link(chan); | ||
4487 | return; | ||
4488 | } | ||
4489 | |||
4490 | if (chan->state != BT_CONNECTED) { | ||
4491 | /* Ignore logical link if channel is on BR/EDR */ | ||
4492 | if (chan->local_amp_id) | ||
4493 | l2cap_logical_finish_create(chan, hchan); | ||
4494 | } else { | ||
4495 | l2cap_logical_finish_move(chan, hchan); | ||
4496 | } | ||
4497 | } | ||
4498 | |||
4499 | void l2cap_move_start(struct l2cap_chan *chan) | ||
4500 | { | ||
4501 | BT_DBG("chan %p", chan); | ||
4502 | |||
4503 | if (chan->local_amp_id == HCI_BREDR_ID) { | ||
4504 | if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED) | ||
4505 | return; | ||
4506 | chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; | ||
4507 | chan->move_state = L2CAP_MOVE_WAIT_PREPARE; | ||
4508 | /* Placeholder - start physical link setup */ | ||
4509 | } else { | ||
4510 | chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; | ||
4511 | chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; | ||
4512 | chan->move_id = 0; | ||
4513 | l2cap_move_setup(chan); | ||
4514 | l2cap_send_move_chan_req(chan, 0); | ||
4515 | } | ||
4516 | } | ||
4517 | |||
4518 | static void l2cap_do_create(struct l2cap_chan *chan, int result, | ||
4519 | u8 local_amp_id, u8 remote_amp_id) | ||
4520 | { | ||
4521 | BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state), | ||
4522 | local_amp_id, remote_amp_id); | ||
4523 | |||
4524 | chan->fcs = L2CAP_FCS_NONE; | ||
4525 | |||
4526 | /* Outgoing channel on AMP */ | ||
4527 | if (chan->state == BT_CONNECT) { | ||
4528 | if (result == L2CAP_CR_SUCCESS) { | ||
4529 | chan->local_amp_id = local_amp_id; | ||
4530 | l2cap_send_create_chan_req(chan, remote_amp_id); | ||
4531 | } else { | ||
4532 | /* Revert to BR/EDR connect */ | ||
4533 | l2cap_send_conn_req(chan); | ||
4534 | } | ||
4535 | |||
4536 | return; | ||
4537 | } | ||
4538 | |||
4539 | /* Incoming channel on AMP */ | ||
4540 | if (__l2cap_no_conn_pending(chan)) { | ||
4541 | struct l2cap_conn_rsp rsp; | ||
4542 | char buf[128]; | ||
4543 | rsp.scid = cpu_to_le16(chan->dcid); | ||
4544 | rsp.dcid = cpu_to_le16(chan->scid); | ||
4545 | |||
4546 | if (result == L2CAP_CR_SUCCESS) { | ||
4547 | /* Send successful response */ | ||
4548 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); | ||
4549 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); | ||
4550 | } else { | ||
4551 | /* Send negative response */ | ||
4552 | rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM); | ||
4553 | rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); | ||
4554 | } | ||
4555 | |||
4556 | l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP, | ||
4557 | sizeof(rsp), &rsp); | ||
4558 | |||
4559 | if (result == L2CAP_CR_SUCCESS) { | ||
4560 | __l2cap_state_change(chan, BT_CONFIG); | ||
4561 | set_bit(CONF_REQ_SENT, &chan->conf_state); | ||
4562 | l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn), | ||
4563 | L2CAP_CONF_REQ, | ||
4564 | l2cap_build_conf_req(chan, buf), buf); | ||
4565 | chan->num_conf_req++; | ||
4566 | } | ||
4567 | } | ||
4568 | } | ||
4569 | |||
4570 | static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id, | ||
4571 | u8 remote_amp_id) | ||
4572 | { | ||
4573 | l2cap_move_setup(chan); | ||
4574 | chan->move_id = local_amp_id; | ||
4575 | chan->move_state = L2CAP_MOVE_WAIT_RSP; | ||
4576 | |||
4577 | l2cap_send_move_chan_req(chan, remote_amp_id); | ||
4578 | } | ||
4579 | |||
4580 | static void l2cap_do_move_respond(struct l2cap_chan *chan, int result) | ||
4581 | { | ||
4582 | struct hci_chan *hchan = NULL; | ||
4583 | |||
4584 | /* Placeholder - get hci_chan for logical link */ | ||
4585 | |||
4586 | if (hchan) { | ||
4587 | if (hchan->state == BT_CONNECTED) { | ||
4588 | /* Logical link is ready to go */ | ||
4589 | chan->hs_hcon = hchan->conn; | ||
4590 | chan->hs_hcon->l2cap_data = chan->conn; | ||
4591 | chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; | ||
4592 | l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); | ||
4593 | |||
4594 | l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); | ||
4595 | } else { | ||
4596 | /* Wait for logical link to be ready */ | ||
4597 | chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; | ||
4598 | } | ||
4599 | } else { | ||
4600 | /* Logical link not available */ | ||
4601 | l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED); | ||
4602 | } | ||
4603 | } | ||
4604 | |||
4605 | static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result) | ||
4606 | { | ||
4607 | if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { | ||
4608 | u8 rsp_result; | ||
4609 | if (result == -EINVAL) | ||
4610 | rsp_result = L2CAP_MR_BAD_ID; | ||
4611 | else | ||
4612 | rsp_result = L2CAP_MR_NOT_ALLOWED; | ||
4613 | |||
4614 | l2cap_send_move_chan_rsp(chan, rsp_result); | ||
4615 | } | ||
4616 | |||
4617 | chan->move_role = L2CAP_MOVE_ROLE_NONE; | ||
4618 | chan->move_state = L2CAP_MOVE_STABLE; | ||
4619 | |||
4620 | /* Restart data transmission */ | ||
4621 | l2cap_ertm_send(chan); | ||
4622 | } | ||
4623 | |||
4624 | /* Invoke with locked chan */ | ||
4625 | void __l2cap_physical_cfm(struct l2cap_chan *chan, int result) | ||
4626 | { | ||
4627 | u8 local_amp_id = chan->local_amp_id; | ||
4628 | u8 remote_amp_id = chan->remote_amp_id; | ||
4629 | |||
4630 | BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d", | ||
4631 | chan, result, local_amp_id, remote_amp_id); | ||
4632 | |||
4633 | if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) { | ||
4634 | l2cap_chan_unlock(chan); | ||
4635 | return; | ||
4636 | } | ||
4637 | |||
4638 | if (chan->state != BT_CONNECTED) { | ||
4639 | l2cap_do_create(chan, result, local_amp_id, remote_amp_id); | ||
4640 | } else if (result != L2CAP_MR_SUCCESS) { | ||
4641 | l2cap_do_move_cancel(chan, result); | ||
4642 | } else { | ||
4643 | switch (chan->move_role) { | ||
4644 | case L2CAP_MOVE_ROLE_INITIATOR: | ||
4645 | l2cap_do_move_initiate(chan, local_amp_id, | ||
4646 | remote_amp_id); | ||
4647 | break; | ||
4648 | case L2CAP_MOVE_ROLE_RESPONDER: | ||
4649 | l2cap_do_move_respond(chan, result); | ||
4650 | break; | ||
4651 | default: | ||
4652 | l2cap_do_move_cancel(chan, result); | ||
4653 | break; | ||
4654 | } | ||
4655 | } | ||
4656 | } | ||
4657 | |||
4046 | static inline int l2cap_move_channel_req(struct l2cap_conn *conn, | 4658 | static inline int l2cap_move_channel_req(struct l2cap_conn *conn, |
4047 | struct l2cap_cmd_hdr *cmd, | 4659 | struct l2cap_cmd_hdr *cmd, |
4048 | u16 cmd_len, void *data) | 4660 | u16 cmd_len, void *data) |
4049 | { | 4661 | { |
4050 | struct l2cap_move_chan_req *req = data; | 4662 | struct l2cap_move_chan_req *req = data; |
4663 | struct l2cap_move_chan_rsp rsp; | ||
4664 | struct l2cap_chan *chan; | ||
4051 | u16 icid = 0; | 4665 | u16 icid = 0; |
4052 | u16 result = L2CAP_MR_NOT_ALLOWED; | 4666 | u16 result = L2CAP_MR_NOT_ALLOWED; |
4053 | 4667 | ||
@@ -4061,15 +4675,206 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn, | |||
4061 | if (!enable_hs) | 4675 | if (!enable_hs) |
4062 | return -EINVAL; | 4676 | return -EINVAL; |
4063 | 4677 | ||
4064 | /* Placeholder: Always refuse */ | 4678 | chan = l2cap_get_chan_by_dcid(conn, icid); |
4065 | l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result); | 4679 | if (!chan) { |
4680 | rsp.icid = cpu_to_le16(icid); | ||
4681 | rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED); | ||
4682 | l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP, | ||
4683 | sizeof(rsp), &rsp); | ||
4684 | return 0; | ||
4685 | } | ||
4686 | |||
4687 | chan->ident = cmd->ident; | ||
4688 | |||
4689 | if (chan->scid < L2CAP_CID_DYN_START || | ||
4690 | chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY || | ||
4691 | (chan->mode != L2CAP_MODE_ERTM && | ||
4692 | chan->mode != L2CAP_MODE_STREAMING)) { | ||
4693 | result = L2CAP_MR_NOT_ALLOWED; | ||
4694 | goto send_move_response; | ||
4695 | } | ||
4696 | |||
4697 | if (chan->local_amp_id == req->dest_amp_id) { | ||
4698 | result = L2CAP_MR_SAME_ID; | ||
4699 | goto send_move_response; | ||
4700 | } | ||
4701 | |||
4702 | if (req->dest_amp_id) { | ||
4703 | struct hci_dev *hdev; | ||
4704 | hdev = hci_dev_get(req->dest_amp_id); | ||
4705 | if (!hdev || hdev->dev_type != HCI_AMP || | ||
4706 | !test_bit(HCI_UP, &hdev->flags)) { | ||
4707 | if (hdev) | ||
4708 | hci_dev_put(hdev); | ||
4709 | |||
4710 | result = L2CAP_MR_BAD_ID; | ||
4711 | goto send_move_response; | ||
4712 | } | ||
4713 | hci_dev_put(hdev); | ||
4714 | } | ||
4715 | |||
4716 | /* Detect a move collision. Only send a collision response | ||
4717 | * if this side has "lost", otherwise proceed with the move. | ||
4718 | * The winner has the larger bd_addr. | ||
4719 | */ | ||
4720 | if ((__chan_is_moving(chan) || | ||
4721 | chan->move_role != L2CAP_MOVE_ROLE_NONE) && | ||
4722 | bacmp(conn->src, conn->dst) > 0) { | ||
4723 | result = L2CAP_MR_COLLISION; | ||
4724 | goto send_move_response; | ||
4725 | } | ||
4726 | |||
4727 | chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; | ||
4728 | l2cap_move_setup(chan); | ||
4729 | chan->move_id = req->dest_amp_id; | ||
4730 | icid = chan->dcid; | ||
4731 | |||
4732 | if (!req->dest_amp_id) { | ||
4733 | /* Moving to BR/EDR */ | ||
4734 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { | ||
4735 | chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; | ||
4736 | result = L2CAP_MR_PEND; | ||
4737 | } else { | ||
4738 | chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; | ||
4739 | result = L2CAP_MR_SUCCESS; | ||
4740 | } | ||
4741 | } else { | ||
4742 | chan->move_state = L2CAP_MOVE_WAIT_PREPARE; | ||
4743 | /* Placeholder - uncomment when amp functions are available */ | ||
4744 | /*amp_accept_physical(chan, req->dest_amp_id);*/ | ||
4745 | result = L2CAP_MR_PEND; | ||
4746 | } | ||
4747 | |||
4748 | send_move_response: | ||
4749 | l2cap_send_move_chan_rsp(chan, result); | ||
4750 | |||
4751 | l2cap_chan_unlock(chan); | ||
4066 | 4752 | ||
4067 | return 0; | 4753 | return 0; |
4068 | } | 4754 | } |
4069 | 4755 | ||
4070 | static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn, | 4756 | static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result) |
4071 | struct l2cap_cmd_hdr *cmd, | 4757 | { |
4072 | u16 cmd_len, void *data) | 4758 | struct l2cap_chan *chan; |
4759 | struct hci_chan *hchan = NULL; | ||
4760 | |||
4761 | chan = l2cap_get_chan_by_scid(conn, icid); | ||
4762 | if (!chan) { | ||
4763 | l2cap_send_move_chan_cfm_icid(conn, icid); | ||
4764 | return; | ||
4765 | } | ||
4766 | |||
4767 | __clear_chan_timer(chan); | ||
4768 | if (result == L2CAP_MR_PEND) | ||
4769 | __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT); | ||
4770 | |||
4771 | switch (chan->move_state) { | ||
4772 | case L2CAP_MOVE_WAIT_LOGICAL_COMP: | ||
4773 | /* Move confirm will be sent when logical link | ||
4774 | * is complete. | ||
4775 | */ | ||
4776 | chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; | ||
4777 | break; | ||
4778 | case L2CAP_MOVE_WAIT_RSP_SUCCESS: | ||
4779 | if (result == L2CAP_MR_PEND) { | ||
4780 | break; | ||
4781 | } else if (test_bit(CONN_LOCAL_BUSY, | ||
4782 | &chan->conn_state)) { | ||
4783 | chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; | ||
4784 | } else { | ||
4785 | /* Logical link is up or moving to BR/EDR, | ||
4786 | * proceed with move | ||
4787 | */ | ||
4788 | chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; | ||
4789 | l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); | ||
4790 | } | ||
4791 | break; | ||
4792 | case L2CAP_MOVE_WAIT_RSP: | ||
4793 | /* Moving to AMP */ | ||
4794 | if (result == L2CAP_MR_SUCCESS) { | ||
4795 | /* Remote is ready, send confirm immediately | ||
4796 | * after logical link is ready | ||
4797 | */ | ||
4798 | chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; | ||
4799 | } else { | ||
4800 | /* Both logical link and move success | ||
4801 | * are required to confirm | ||
4802 | */ | ||
4803 | chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP; | ||
4804 | } | ||
4805 | |||
4806 | /* Placeholder - get hci_chan for logical link */ | ||
4807 | if (!hchan) { | ||
4808 | /* Logical link not available */ | ||
4809 | l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); | ||
4810 | break; | ||
4811 | } | ||
4812 | |||
4813 | /* If the logical link is not yet connected, do not | ||
4814 | * send confirmation. | ||
4815 | */ | ||
4816 | if (hchan->state != BT_CONNECTED) | ||
4817 | break; | ||
4818 | |||
4819 | /* Logical link is already ready to go */ | ||
4820 | |||
4821 | chan->hs_hcon = hchan->conn; | ||
4822 | chan->hs_hcon->l2cap_data = chan->conn; | ||
4823 | |||
4824 | if (result == L2CAP_MR_SUCCESS) { | ||
4825 | /* Can confirm now */ | ||
4826 | l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); | ||
4827 | } else { | ||
4828 | /* Now only need move success | ||
4829 | * to confirm | ||
4830 | */ | ||
4831 | chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; | ||
4832 | } | ||
4833 | |||
4834 | l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); | ||
4835 | break; | ||
4836 | default: | ||
4837 | /* Any other amp move state means the move failed. */ | ||
4838 | chan->move_id = chan->local_amp_id; | ||
4839 | l2cap_move_done(chan); | ||
4840 | l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); | ||
4841 | } | ||
4842 | |||
4843 | l2cap_chan_unlock(chan); | ||
4844 | } | ||
4845 | |||
4846 | static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid, | ||
4847 | u16 result) | ||
4848 | { | ||
4849 | struct l2cap_chan *chan; | ||
4850 | |||
4851 | chan = l2cap_get_chan_by_ident(conn, ident); | ||
4852 | if (!chan) { | ||
4853 | /* Could not locate channel, icid is best guess */ | ||
4854 | l2cap_send_move_chan_cfm_icid(conn, icid); | ||
4855 | return; | ||
4856 | } | ||
4857 | |||
4858 | __clear_chan_timer(chan); | ||
4859 | |||
4860 | if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { | ||
4861 | if (result == L2CAP_MR_COLLISION) { | ||
4862 | chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; | ||
4863 | } else { | ||
4864 | /* Cleanup - cancel move */ | ||
4865 | chan->move_id = chan->local_amp_id; | ||
4866 | l2cap_move_done(chan); | ||
4867 | } | ||
4868 | } | ||
4869 | |||
4870 | l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); | ||
4871 | |||
4872 | l2cap_chan_unlock(chan); | ||
4873 | } | ||
4874 | |||
4875 | static int l2cap_move_channel_rsp(struct l2cap_conn *conn, | ||
4876 | struct l2cap_cmd_hdr *cmd, | ||
4877 | u16 cmd_len, void *data) | ||
4073 | { | 4878 | { |
4074 | struct l2cap_move_chan_rsp *rsp = data; | 4879 | struct l2cap_move_chan_rsp *rsp = data; |
4075 | u16 icid, result; | 4880 | u16 icid, result; |
@@ -4082,17 +4887,20 @@ static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn, | |||
4082 | 4887 | ||
4083 | BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); | 4888 | BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); |
4084 | 4889 | ||
4085 | /* Placeholder: Always unconfirmed */ | 4890 | if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND) |
4086 | l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED); | 4891 | l2cap_move_continue(conn, icid, result); |
4892 | else | ||
4893 | l2cap_move_fail(conn, cmd->ident, icid, result); | ||
4087 | 4894 | ||
4088 | return 0; | 4895 | return 0; |
4089 | } | 4896 | } |
4090 | 4897 | ||
4091 | static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn, | 4898 | static int l2cap_move_channel_confirm(struct l2cap_conn *conn, |
4092 | struct l2cap_cmd_hdr *cmd, | 4899 | struct l2cap_cmd_hdr *cmd, |
4093 | u16 cmd_len, void *data) | 4900 | u16 cmd_len, void *data) |
4094 | { | 4901 | { |
4095 | struct l2cap_move_chan_cfm *cfm = data; | 4902 | struct l2cap_move_chan_cfm *cfm = data; |
4903 | struct l2cap_chan *chan; | ||
4096 | u16 icid, result; | 4904 | u16 icid, result; |
4097 | 4905 | ||
4098 | if (cmd_len != sizeof(*cfm)) | 4906 | if (cmd_len != sizeof(*cfm)) |
@@ -4103,8 +4911,29 @@ static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn, | |||
4103 | 4911 | ||
4104 | BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); | 4912 | BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); |
4105 | 4913 | ||
4914 | chan = l2cap_get_chan_by_dcid(conn, icid); | ||
4915 | if (!chan) { | ||
4916 | /* Spec requires a response even if the icid was not found */ | ||
4917 | l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); | ||
4918 | return 0; | ||
4919 | } | ||
4920 | |||
4921 | if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) { | ||
4922 | if (result == L2CAP_MC_CONFIRMED) { | ||
4923 | chan->local_amp_id = chan->move_id; | ||
4924 | if (!chan->local_amp_id) | ||
4925 | __release_logical_link(chan); | ||
4926 | } else { | ||
4927 | chan->move_id = chan->local_amp_id; | ||
4928 | } | ||
4929 | |||
4930 | l2cap_move_done(chan); | ||
4931 | } | ||
4932 | |||
4106 | l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); | 4933 | l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); |
4107 | 4934 | ||
4935 | l2cap_chan_unlock(chan); | ||
4936 | |||
4108 | return 0; | 4937 | return 0; |
4109 | } | 4938 | } |
4110 | 4939 | ||
@@ -4113,6 +4942,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, | |||
4113 | u16 cmd_len, void *data) | 4942 | u16 cmd_len, void *data) |
4114 | { | 4943 | { |
4115 | struct l2cap_move_chan_cfm_rsp *rsp = data; | 4944 | struct l2cap_move_chan_cfm_rsp *rsp = data; |
4945 | struct l2cap_chan *chan; | ||
4116 | u16 icid; | 4946 | u16 icid; |
4117 | 4947 | ||
4118 | if (cmd_len != sizeof(*rsp)) | 4948 | if (cmd_len != sizeof(*rsp)) |
@@ -4122,11 +4952,28 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, | |||
4122 | 4952 | ||
4123 | BT_DBG("icid 0x%4.4x", icid); | 4953 | BT_DBG("icid 0x%4.4x", icid); |
4124 | 4954 | ||
4955 | chan = l2cap_get_chan_by_scid(conn, icid); | ||
4956 | if (!chan) | ||
4957 | return 0; | ||
4958 | |||
4959 | __clear_chan_timer(chan); | ||
4960 | |||
4961 | if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) { | ||
4962 | chan->local_amp_id = chan->move_id; | ||
4963 | |||
4964 | if (!chan->local_amp_id && chan->hs_hchan) | ||
4965 | __release_logical_link(chan); | ||
4966 | |||
4967 | l2cap_move_done(chan); | ||
4968 | } | ||
4969 | |||
4970 | l2cap_chan_unlock(chan); | ||
4971 | |||
4125 | return 0; | 4972 | return 0; |
4126 | } | 4973 | } |
4127 | 4974 | ||
4128 | static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, | 4975 | static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, |
4129 | u16 to_multiplier) | 4976 | u16 to_multiplier) |
4130 | { | 4977 | { |
4131 | u16 max_latency; | 4978 | u16 max_latency; |
4132 | 4979 | ||
@@ -4147,7 +4994,8 @@ static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, | |||
4147 | } | 4994 | } |
4148 | 4995 | ||
4149 | static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, | 4996 | static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, |
4150 | struct l2cap_cmd_hdr *cmd, u8 *data) | 4997 | struct l2cap_cmd_hdr *cmd, |
4998 | u8 *data) | ||
4151 | { | 4999 | { |
4152 | struct hci_conn *hcon = conn->hcon; | 5000 | struct hci_conn *hcon = conn->hcon; |
4153 | struct l2cap_conn_param_update_req *req; | 5001 | struct l2cap_conn_param_update_req *req; |
@@ -4169,7 +5017,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, | |||
4169 | to_multiplier = __le16_to_cpu(req->to_multiplier); | 5017 | to_multiplier = __le16_to_cpu(req->to_multiplier); |
4170 | 5018 | ||
4171 | BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", | 5019 | BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", |
4172 | min, max, latency, to_multiplier); | 5020 | min, max, latency, to_multiplier); |
4173 | 5021 | ||
4174 | memset(&rsp, 0, sizeof(rsp)); | 5022 | memset(&rsp, 0, sizeof(rsp)); |
4175 | 5023 | ||
@@ -4180,7 +5028,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, | |||
4180 | rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); | 5028 | rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); |
4181 | 5029 | ||
4182 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, | 5030 | l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, |
4183 | sizeof(rsp), &rsp); | 5031 | sizeof(rsp), &rsp); |
4184 | 5032 | ||
4185 | if (!err) | 5033 | if (!err) |
4186 | hci_le_conn_update(hcon, min, max, latency, to_multiplier); | 5034 | hci_le_conn_update(hcon, min, max, latency, to_multiplier); |
@@ -4189,7 +5037,8 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, | |||
4189 | } | 5037 | } |
4190 | 5038 | ||
4191 | static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, | 5039 | static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, |
4192 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) | 5040 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
5041 | u8 *data) | ||
4193 | { | 5042 | { |
4194 | int err = 0; | 5043 | int err = 0; |
4195 | 5044 | ||
@@ -4203,7 +5052,8 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, | |||
4203 | break; | 5052 | break; |
4204 | 5053 | ||
4205 | case L2CAP_CONN_RSP: | 5054 | case L2CAP_CONN_RSP: |
4206 | err = l2cap_connect_rsp(conn, cmd, data); | 5055 | case L2CAP_CREATE_CHAN_RSP: |
5056 | err = l2cap_connect_create_rsp(conn, cmd, data); | ||
4207 | break; | 5057 | break; |
4208 | 5058 | ||
4209 | case L2CAP_CONF_REQ: | 5059 | case L2CAP_CONF_REQ: |
@@ -4241,10 +5091,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, | |||
4241 | err = l2cap_create_channel_req(conn, cmd, cmd_len, data); | 5091 | err = l2cap_create_channel_req(conn, cmd, cmd_len, data); |
4242 | break; | 5092 | break; |
4243 | 5093 | ||
4244 | case L2CAP_CREATE_CHAN_RSP: | ||
4245 | err = l2cap_create_channel_rsp(conn, cmd, data); | ||
4246 | break; | ||
4247 | |||
4248 | case L2CAP_MOVE_CHAN_REQ: | 5094 | case L2CAP_MOVE_CHAN_REQ: |
4249 | err = l2cap_move_channel_req(conn, cmd, cmd_len, data); | 5095 | err = l2cap_move_channel_req(conn, cmd, cmd_len, data); |
4250 | break; | 5096 | break; |
@@ -4271,7 +5117,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, | |||
4271 | } | 5117 | } |
4272 | 5118 | ||
4273 | static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, | 5119 | static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, |
4274 | struct l2cap_cmd_hdr *cmd, u8 *data) | 5120 | struct l2cap_cmd_hdr *cmd, u8 *data) |
4275 | { | 5121 | { |
4276 | switch (cmd->code) { | 5122 | switch (cmd->code) { |
4277 | case L2CAP_COMMAND_REJ: | 5123 | case L2CAP_COMMAND_REJ: |
@@ -4290,7 +5136,7 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, | |||
4290 | } | 5136 | } |
4291 | 5137 | ||
4292 | static inline void l2cap_sig_channel(struct l2cap_conn *conn, | 5138 | static inline void l2cap_sig_channel(struct l2cap_conn *conn, |
4293 | struct sk_buff *skb) | 5139 | struct sk_buff *skb) |
4294 | { | 5140 | { |
4295 | u8 *data = skb->data; | 5141 | u8 *data = skb->data; |
4296 | int len = skb->len; | 5142 | int len = skb->len; |
@@ -4307,7 +5153,8 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, | |||
4307 | 5153 | ||
4308 | cmd_len = le16_to_cpu(cmd.len); | 5154 | cmd_len = le16_to_cpu(cmd.len); |
4309 | 5155 | ||
4310 | BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident); | 5156 | BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, |
5157 | cmd.ident); | ||
4311 | 5158 | ||
4312 | if (cmd_len > len || !cmd.ident) { | 5159 | if (cmd_len > len || !cmd.ident) { |
4313 | BT_DBG("corrupted command"); | 5160 | BT_DBG("corrupted command"); |
@@ -4326,7 +5173,8 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, | |||
4326 | 5173 | ||
4327 | /* FIXME: Map err to a valid reason */ | 5174 | /* FIXME: Map err to a valid reason */ |
4328 | rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); | 5175 | rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); |
4329 | l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); | 5176 | l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, |
5177 | sizeof(rej), &rej); | ||
4330 | } | 5178 | } |
4331 | 5179 | ||
4332 | data += cmd_len; | 5180 | data += cmd_len; |
@@ -4391,8 +5239,8 @@ static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) | |||
4391 | } | 5239 | } |
4392 | } | 5240 | } |
4393 | 5241 | ||
4394 | static void append_skb_frag(struct sk_buff *skb, | 5242 | static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag, |
4395 | struct sk_buff *new_frag, struct sk_buff **last_frag) | 5243 | struct sk_buff **last_frag) |
4396 | { | 5244 | { |
4397 | /* skb->len reflects data in skb as well as all fragments | 5245 | /* skb->len reflects data in skb as well as all fragments |
4398 | * skb->data_len reflects only data in fragments | 5246 | * skb->data_len reflects only data in fragments |
@@ -4492,6 +5340,12 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, | |||
4492 | return err; | 5340 | return err; |
4493 | } | 5341 | } |
4494 | 5342 | ||
5343 | static int l2cap_resegment(struct l2cap_chan *chan) | ||
5344 | { | ||
5345 | /* Placeholder */ | ||
5346 | return 0; | ||
5347 | } | ||
5348 | |||
4495 | void l2cap_chan_busy(struct l2cap_chan *chan, int busy) | 5349 | void l2cap_chan_busy(struct l2cap_chan *chan, int busy) |
4496 | { | 5350 | { |
4497 | u8 event; | 5351 | u8 event; |
@@ -4641,7 +5495,7 @@ static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) | |||
4641 | 5495 | ||
4642 | if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { | 5496 | if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { |
4643 | if (__seq_offset(chan, txseq, chan->last_acked_seq) >= | 5497 | if (__seq_offset(chan, txseq, chan->last_acked_seq) >= |
4644 | chan->tx_win) { | 5498 | chan->tx_win) { |
4645 | /* See notes below regarding "double poll" and | 5499 | /* See notes below regarding "double poll" and |
4646 | * invalid packets. | 5500 | * invalid packets. |
4647 | */ | 5501 | */ |
@@ -4682,8 +5536,7 @@ static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) | |||
4682 | } | 5536 | } |
4683 | 5537 | ||
4684 | if (__seq_offset(chan, txseq, chan->last_acked_seq) < | 5538 | if (__seq_offset(chan, txseq, chan->last_acked_seq) < |
4685 | __seq_offset(chan, chan->expected_tx_seq, | 5539 | __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) { |
4686 | chan->last_acked_seq)){ | ||
4687 | BT_DBG("Duplicate - expected_tx_seq later than txseq"); | 5540 | BT_DBG("Duplicate - expected_tx_seq later than txseq"); |
4688 | return L2CAP_TXSEQ_DUPLICATE; | 5541 | return L2CAP_TXSEQ_DUPLICATE; |
4689 | } | 5542 | } |
@@ -4808,8 +5661,8 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan, | |||
4808 | if (control->final) { | 5661 | if (control->final) { |
4809 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | 5662 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); |
4810 | 5663 | ||
4811 | if (!test_and_clear_bit(CONN_REJ_ACT, | 5664 | if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) && |
4812 | &chan->conn_state)) { | 5665 | !__chan_is_moving(chan)) { |
4813 | control->final = 0; | 5666 | control->final = 0; |
4814 | l2cap_retransmit_all(chan, control); | 5667 | l2cap_retransmit_all(chan, control); |
4815 | } | 5668 | } |
@@ -4998,6 +5851,96 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, | |||
4998 | return err; | 5851 | return err; |
4999 | } | 5852 | } |
5000 | 5853 | ||
5854 | static int l2cap_finish_move(struct l2cap_chan *chan) | ||
5855 | { | ||
5856 | BT_DBG("chan %p", chan); | ||
5857 | |||
5858 | chan->rx_state = L2CAP_RX_STATE_RECV; | ||
5859 | |||
5860 | if (chan->hs_hcon) | ||
5861 | chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; | ||
5862 | else | ||
5863 | chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; | ||
5864 | |||
5865 | return l2cap_resegment(chan); | ||
5866 | } | ||
5867 | |||
5868 | static int l2cap_rx_state_wait_p(struct l2cap_chan *chan, | ||
5869 | struct l2cap_ctrl *control, | ||
5870 | struct sk_buff *skb, u8 event) | ||
5871 | { | ||
5872 | int err; | ||
5873 | |||
5874 | BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, | ||
5875 | event); | ||
5876 | |||
5877 | if (!control->poll) | ||
5878 | return -EPROTO; | ||
5879 | |||
5880 | l2cap_process_reqseq(chan, control->reqseq); | ||
5881 | |||
5882 | if (!skb_queue_empty(&chan->tx_q)) | ||
5883 | chan->tx_send_head = skb_peek(&chan->tx_q); | ||
5884 | else | ||
5885 | chan->tx_send_head = NULL; | ||
5886 | |||
5887 | /* Rewind next_tx_seq to the point expected | ||
5888 | * by the receiver. | ||
5889 | */ | ||
5890 | chan->next_tx_seq = control->reqseq; | ||
5891 | chan->unacked_frames = 0; | ||
5892 | |||
5893 | err = l2cap_finish_move(chan); | ||
5894 | if (err) | ||
5895 | return err; | ||
5896 | |||
5897 | set_bit(CONN_SEND_FBIT, &chan->conn_state); | ||
5898 | l2cap_send_i_or_rr_or_rnr(chan); | ||
5899 | |||
5900 | if (event == L2CAP_EV_RECV_IFRAME) | ||
5901 | return -EPROTO; | ||
5902 | |||
5903 | return l2cap_rx_state_recv(chan, control, NULL, event); | ||
5904 | } | ||
5905 | |||
5906 | static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, | ||
5907 | struct l2cap_ctrl *control, | ||
5908 | struct sk_buff *skb, u8 event) | ||
5909 | { | ||
5910 | int err; | ||
5911 | |||
5912 | if (!control->final) | ||
5913 | return -EPROTO; | ||
5914 | |||
5915 | clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); | ||
5916 | |||
5917 | chan->rx_state = L2CAP_RX_STATE_RECV; | ||
5918 | l2cap_process_reqseq(chan, control->reqseq); | ||
5919 | |||
5920 | if (!skb_queue_empty(&chan->tx_q)) | ||
5921 | chan->tx_send_head = skb_peek(&chan->tx_q); | ||
5922 | else | ||
5923 | chan->tx_send_head = NULL; | ||
5924 | |||
5925 | /* Rewind next_tx_seq to the point expected | ||
5926 | * by the receiver. | ||
5927 | */ | ||
5928 | chan->next_tx_seq = control->reqseq; | ||
5929 | chan->unacked_frames = 0; | ||
5930 | |||
5931 | if (chan->hs_hcon) | ||
5932 | chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; | ||
5933 | else | ||
5934 | chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; | ||
5935 | |||
5936 | err = l2cap_resegment(chan); | ||
5937 | |||
5938 | if (!err) | ||
5939 | err = l2cap_rx_state_recv(chan, control, skb, event); | ||
5940 | |||
5941 | return err; | ||
5942 | } | ||
5943 | |||
5001 | static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) | 5944 | static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) |
5002 | { | 5945 | { |
5003 | /* Make sure reqseq is for a packet that has been sent but not acked */ | 5946 | /* Make sure reqseq is for a packet that has been sent but not acked */ |
@@ -5024,6 +5967,12 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, | |||
5024 | err = l2cap_rx_state_srej_sent(chan, control, skb, | 5967 | err = l2cap_rx_state_srej_sent(chan, control, skb, |
5025 | event); | 5968 | event); |
5026 | break; | 5969 | break; |
5970 | case L2CAP_RX_STATE_WAIT_P: | ||
5971 | err = l2cap_rx_state_wait_p(chan, control, skb, event); | ||
5972 | break; | ||
5973 | case L2CAP_RX_STATE_WAIT_F: | ||
5974 | err = l2cap_rx_state_wait_f(chan, control, skb, event); | ||
5975 | break; | ||
5027 | default: | 5976 | default: |
5028 | /* shut it down */ | 5977 | /* shut it down */ |
5029 | break; | 5978 | break; |
@@ -5143,7 +6092,7 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) | |||
5143 | control->super); | 6092 | control->super); |
5144 | 6093 | ||
5145 | if (len != 0) { | 6094 | if (len != 0) { |
5146 | BT_ERR("%d", len); | 6095 | BT_ERR("Trailing bytes: %d in sframe", len); |
5147 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); | 6096 | l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); |
5148 | goto drop; | 6097 | goto drop; |
5149 | } | 6098 | } |
@@ -5323,7 +6272,7 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
5323 | int exact = 0, lm1 = 0, lm2 = 0; | 6272 | int exact = 0, lm1 = 0, lm2 = 0; |
5324 | struct l2cap_chan *c; | 6273 | struct l2cap_chan *c; |
5325 | 6274 | ||
5326 | BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); | 6275 | BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); |
5327 | 6276 | ||
5328 | /* Find listening sockets and check their link_mode */ | 6277 | /* Find listening sockets and check their link_mode */ |
5329 | read_lock(&chan_list_lock); | 6278 | read_lock(&chan_list_lock); |
@@ -5353,15 +6302,15 @@ void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) | |||
5353 | { | 6302 | { |
5354 | struct l2cap_conn *conn; | 6303 | struct l2cap_conn *conn; |
5355 | 6304 | ||
5356 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); | 6305 | BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); |
5357 | 6306 | ||
5358 | if (!status) { | 6307 | if (!status) { |
5359 | conn = l2cap_conn_add(hcon, status); | 6308 | conn = l2cap_conn_add(hcon, status); |
5360 | if (conn) | 6309 | if (conn) |
5361 | l2cap_conn_ready(conn); | 6310 | l2cap_conn_ready(conn); |
5362 | } else | 6311 | } else { |
5363 | l2cap_conn_del(hcon, bt_to_errno(status)); | 6312 | l2cap_conn_del(hcon, bt_to_errno(status)); |
5364 | 6313 | } | |
5365 | } | 6314 | } |
5366 | 6315 | ||
5367 | int l2cap_disconn_ind(struct hci_conn *hcon) | 6316 | int l2cap_disconn_ind(struct hci_conn *hcon) |
@@ -5437,13 +6386,13 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | |||
5437 | continue; | 6386 | continue; |
5438 | } | 6387 | } |
5439 | 6388 | ||
5440 | if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) { | 6389 | if (!__l2cap_no_conn_pending(chan)) { |
5441 | l2cap_chan_unlock(chan); | 6390 | l2cap_chan_unlock(chan); |
5442 | continue; | 6391 | continue; |
5443 | } | 6392 | } |
5444 | 6393 | ||
5445 | if (!status && (chan->state == BT_CONNECTED || | 6394 | if (!status && (chan->state == BT_CONNECTED || |
5446 | chan->state == BT_CONFIG)) { | 6395 | chan->state == BT_CONFIG)) { |
5447 | struct sock *sk = chan->sk; | 6396 | struct sock *sk = chan->sk; |
5448 | 6397 | ||
5449 | clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); | 6398 | clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); |
@@ -5456,7 +6405,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | |||
5456 | 6405 | ||
5457 | if (chan->state == BT_CONNECT) { | 6406 | if (chan->state == BT_CONNECT) { |
5458 | if (!status) { | 6407 | if (!status) { |
5459 | l2cap_send_conn_req(chan); | 6408 | l2cap_start_connection(chan); |
5460 | } else { | 6409 | } else { |
5461 | __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); | 6410 | __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); |
5462 | } | 6411 | } |
@@ -5470,11 +6419,9 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | |||
5470 | if (!status) { | 6419 | if (!status) { |
5471 | if (test_bit(BT_SK_DEFER_SETUP, | 6420 | if (test_bit(BT_SK_DEFER_SETUP, |
5472 | &bt_sk(sk)->flags)) { | 6421 | &bt_sk(sk)->flags)) { |
5473 | struct sock *parent = bt_sk(sk)->parent; | ||
5474 | res = L2CAP_CR_PEND; | 6422 | res = L2CAP_CR_PEND; |
5475 | stat = L2CAP_CS_AUTHOR_PEND; | 6423 | stat = L2CAP_CS_AUTHOR_PEND; |
5476 | if (parent) | 6424 | chan->ops->defer(chan); |
5477 | parent->sk_data_ready(parent, 0); | ||
5478 | } else { | 6425 | } else { |
5479 | __l2cap_state_change(chan, BT_CONFIG); | 6426 | __l2cap_state_change(chan, BT_CONFIG); |
5480 | res = L2CAP_CR_SUCCESS; | 6427 | res = L2CAP_CR_SUCCESS; |
@@ -5494,7 +6441,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | |||
5494 | rsp.result = cpu_to_le16(res); | 6441 | rsp.result = cpu_to_le16(res); |
5495 | rsp.status = cpu_to_le16(stat); | 6442 | rsp.status = cpu_to_le16(stat); |
5496 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, | 6443 | l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, |
5497 | sizeof(rsp), &rsp); | 6444 | sizeof(rsp), &rsp); |
5498 | 6445 | ||
5499 | if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && | 6446 | if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && |
5500 | res == L2CAP_CR_SUCCESS) { | 6447 | res == L2CAP_CR_SUCCESS) { |
@@ -5519,6 +6466,12 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | |||
5519 | int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) | 6466 | int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) |
5520 | { | 6467 | { |
5521 | struct l2cap_conn *conn = hcon->l2cap_data; | 6468 | struct l2cap_conn *conn = hcon->l2cap_data; |
6469 | struct l2cap_hdr *hdr; | ||
6470 | int len; | ||
6471 | |||
6472 | /* For AMP controller do not create l2cap conn */ | ||
6473 | if (!conn && hcon->hdev->dev_type != HCI_BREDR) | ||
6474 | goto drop; | ||
5522 | 6475 | ||
5523 | if (!conn) | 6476 | if (!conn) |
5524 | conn = l2cap_conn_add(hcon, 0); | 6477 | conn = l2cap_conn_add(hcon, 0); |
@@ -5528,10 +6481,10 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) | |||
5528 | 6481 | ||
5529 | BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); | 6482 | BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); |
5530 | 6483 | ||
5531 | if (!(flags & ACL_CONT)) { | 6484 | switch (flags) { |
5532 | struct l2cap_hdr *hdr; | 6485 | case ACL_START: |
5533 | int len; | 6486 | case ACL_START_NO_FLUSH: |
5534 | 6487 | case ACL_COMPLETE: | |
5535 | if (conn->rx_len) { | 6488 | if (conn->rx_len) { |
5536 | BT_ERR("Unexpected start frame (len %d)", skb->len); | 6489 | BT_ERR("Unexpected start frame (len %d)", skb->len); |
5537 | kfree_skb(conn->rx_skb); | 6490 | kfree_skb(conn->rx_skb); |
@@ -5560,20 +6513,22 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) | |||
5560 | 6513 | ||
5561 | if (skb->len > len) { | 6514 | if (skb->len > len) { |
5562 | BT_ERR("Frame is too long (len %d, expected len %d)", | 6515 | BT_ERR("Frame is too long (len %d, expected len %d)", |
5563 | skb->len, len); | 6516 | skb->len, len); |
5564 | l2cap_conn_unreliable(conn, ECOMM); | 6517 | l2cap_conn_unreliable(conn, ECOMM); |
5565 | goto drop; | 6518 | goto drop; |
5566 | } | 6519 | } |
5567 | 6520 | ||
5568 | /* Allocate skb for the complete frame (with header) */ | 6521 | /* Allocate skb for the complete frame (with header) */ |
5569 | conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); | 6522 | conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); |
5570 | if (!conn->rx_skb) | 6523 | if (!conn->rx_skb) |
5571 | goto drop; | 6524 | goto drop; |
5572 | 6525 | ||
5573 | skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), | 6526 | skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), |
5574 | skb->len); | 6527 | skb->len); |
5575 | conn->rx_len = len - skb->len; | 6528 | conn->rx_len = len - skb->len; |
5576 | } else { | 6529 | break; |
6530 | |||
6531 | case ACL_CONT: | ||
5577 | BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); | 6532 | BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); |
5578 | 6533 | ||
5579 | if (!conn->rx_len) { | 6534 | if (!conn->rx_len) { |
@@ -5584,7 +6539,7 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) | |||
5584 | 6539 | ||
5585 | if (skb->len > conn->rx_len) { | 6540 | if (skb->len > conn->rx_len) { |
5586 | BT_ERR("Fragment is too long (len %d, expected %d)", | 6541 | BT_ERR("Fragment is too long (len %d, expected %d)", |
5587 | skb->len, conn->rx_len); | 6542 | skb->len, conn->rx_len); |
5588 | kfree_skb(conn->rx_skb); | 6543 | kfree_skb(conn->rx_skb); |
5589 | conn->rx_skb = NULL; | 6544 | conn->rx_skb = NULL; |
5590 | conn->rx_len = 0; | 6545 | conn->rx_len = 0; |
@@ -5593,7 +6548,7 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) | |||
5593 | } | 6548 | } |
5594 | 6549 | ||
5595 | skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), | 6550 | skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), |
5596 | skb->len); | 6551 | skb->len); |
5597 | conn->rx_len -= skb->len; | 6552 | conn->rx_len -= skb->len; |
5598 | 6553 | ||
5599 | if (!conn->rx_len) { | 6554 | if (!conn->rx_len) { |
@@ -5601,6 +6556,7 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) | |||
5601 | l2cap_recv_frame(conn, conn->rx_skb); | 6556 | l2cap_recv_frame(conn, conn->rx_skb); |
5602 | conn->rx_skb = NULL; | 6557 | conn->rx_skb = NULL; |
5603 | } | 6558 | } |
6559 | break; | ||
5604 | } | 6560 | } |
5605 | 6561 | ||
5606 | drop: | 6562 | drop: |
@@ -5617,12 +6573,11 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p) | |||
5617 | list_for_each_entry(c, &chan_list, global_l) { | 6573 | list_for_each_entry(c, &chan_list, global_l) { |
5618 | struct sock *sk = c->sk; | 6574 | struct sock *sk = c->sk; |
5619 | 6575 | ||
5620 | seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", | 6576 | seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", |
5621 | batostr(&bt_sk(sk)->src), | 6577 | &bt_sk(sk)->src, &bt_sk(sk)->dst, |
5622 | batostr(&bt_sk(sk)->dst), | 6578 | c->state, __le16_to_cpu(c->psm), |
5623 | c->state, __le16_to_cpu(c->psm), | 6579 | c->scid, c->dcid, c->imtu, c->omtu, |
5624 | c->scid, c->dcid, c->imtu, c->omtu, | 6580 | c->sec_level, c->mode); |
5625 | c->sec_level, c->mode); | ||
5626 | } | 6581 | } |
5627 | 6582 | ||
5628 | read_unlock(&chan_list_lock); | 6583 | read_unlock(&chan_list_lock); |
@@ -5653,8 +6608,8 @@ int __init l2cap_init(void) | |||
5653 | return err; | 6608 | return err; |
5654 | 6609 | ||
5655 | if (bt_debugfs) { | 6610 | if (bt_debugfs) { |
5656 | l2cap_debugfs = debugfs_create_file("l2cap", 0444, | 6611 | l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, |
5657 | bt_debugfs, NULL, &l2cap_debugfs_fops); | 6612 | NULL, &l2cap_debugfs_fops); |
5658 | if (!l2cap_debugfs) | 6613 | if (!l2cap_debugfs) |
5659 | BT_ERR("Failed to create L2CAP debug file"); | 6614 | BT_ERR("Failed to create L2CAP debug file"); |
5660 | } | 6615 | } |
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 083f2bf065d4..1bcfb8422fdc 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c | |||
@@ -40,7 +40,8 @@ static struct bt_sock_list l2cap_sk_list = { | |||
40 | 40 | ||
41 | static const struct proto_ops l2cap_sock_ops; | 41 | static const struct proto_ops l2cap_sock_ops; |
42 | static void l2cap_sock_init(struct sock *sk, struct sock *parent); | 42 | static void l2cap_sock_init(struct sock *sk, struct sock *parent); |
43 | static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio); | 43 | static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, |
44 | int proto, gfp_t prio); | ||
44 | 45 | ||
45 | static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) | 46 | static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) |
46 | { | 47 | { |
@@ -106,7 +107,8 @@ done: | |||
106 | return err; | 107 | return err; |
107 | } | 108 | } |
108 | 109 | ||
109 | static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) | 110 | static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, |
111 | int alen, int flags) | ||
110 | { | 112 | { |
111 | struct sock *sk = sock->sk; | 113 | struct sock *sk = sock->sk; |
112 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; | 114 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; |
@@ -134,7 +136,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al | |||
134 | lock_sock(sk); | 136 | lock_sock(sk); |
135 | 137 | ||
136 | err = bt_sock_wait_state(sk, BT_CONNECTED, | 138 | err = bt_sock_wait_state(sk, BT_CONNECTED, |
137 | sock_sndtimeo(sk, flags & O_NONBLOCK)); | 139 | sock_sndtimeo(sk, flags & O_NONBLOCK)); |
138 | 140 | ||
139 | release_sock(sk); | 141 | release_sock(sk); |
140 | 142 | ||
@@ -185,7 +187,8 @@ done: | |||
185 | return err; | 187 | return err; |
186 | } | 188 | } |
187 | 189 | ||
188 | static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags) | 190 | static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, |
191 | int flags) | ||
189 | { | 192 | { |
190 | DECLARE_WAITQUEUE(wait, current); | 193 | DECLARE_WAITQUEUE(wait, current); |
191 | struct sock *sk = sock->sk, *nsk; | 194 | struct sock *sk = sock->sk, *nsk; |
@@ -241,7 +244,8 @@ done: | |||
241 | return err; | 244 | return err; |
242 | } | 245 | } |
243 | 246 | ||
244 | static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) | 247 | static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, |
248 | int *len, int peer) | ||
245 | { | 249 | { |
246 | struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; | 250 | struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; |
247 | struct sock *sk = sock->sk; | 251 | struct sock *sk = sock->sk; |
@@ -266,7 +270,8 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l | |||
266 | return 0; | 270 | return 0; |
267 | } | 271 | } |
268 | 272 | ||
269 | static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) | 273 | static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, |
274 | char __user *optval, int __user *optlen) | ||
270 | { | 275 | { |
271 | struct sock *sk = sock->sk; | 276 | struct sock *sk = sock->sk; |
272 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; | 277 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; |
@@ -309,7 +314,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us | |||
309 | break; | 314 | break; |
310 | case BT_SECURITY_HIGH: | 315 | case BT_SECURITY_HIGH: |
311 | opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | | 316 | opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | |
312 | L2CAP_LM_SECURE; | 317 | L2CAP_LM_SECURE; |
313 | break; | 318 | break; |
314 | default: | 319 | default: |
315 | opt = 0; | 320 | opt = 0; |
@@ -353,7 +358,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us | |||
353 | return err; | 358 | return err; |
354 | } | 359 | } |
355 | 360 | ||
356 | static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) | 361 | static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, |
362 | char __user *optval, int __user *optlen) | ||
357 | { | 363 | { |
358 | struct sock *sk = sock->sk; | 364 | struct sock *sk = sock->sk; |
359 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; | 365 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; |
@@ -377,19 +383,20 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch | |||
377 | switch (optname) { | 383 | switch (optname) { |
378 | case BT_SECURITY: | 384 | case BT_SECURITY: |
379 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && | 385 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && |
380 | chan->chan_type != L2CAP_CHAN_RAW) { | 386 | chan->chan_type != L2CAP_CHAN_RAW) { |
381 | err = -EINVAL; | 387 | err = -EINVAL; |
382 | break; | 388 | break; |
383 | } | 389 | } |
384 | 390 | ||
385 | memset(&sec, 0, sizeof(sec)); | 391 | memset(&sec, 0, sizeof(sec)); |
386 | if (chan->conn) | 392 | if (chan->conn) { |
387 | sec.level = chan->conn->hcon->sec_level; | 393 | sec.level = chan->conn->hcon->sec_level; |
388 | else | ||
389 | sec.level = chan->sec_level; | ||
390 | 394 | ||
391 | if (sk->sk_state == BT_CONNECTED) | 395 | if (sk->sk_state == BT_CONNECTED) |
392 | sec.key_size = chan->conn->hcon->enc_key_size; | 396 | sec.key_size = chan->conn->hcon->enc_key_size; |
397 | } else { | ||
398 | sec.level = chan->sec_level; | ||
399 | } | ||
393 | 400 | ||
394 | len = min_t(unsigned int, len, sizeof(sec)); | 401 | len = min_t(unsigned int, len, sizeof(sec)); |
395 | if (copy_to_user(optval, (char *) &sec, len)) | 402 | if (copy_to_user(optval, (char *) &sec, len)) |
@@ -411,14 +418,14 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch | |||
411 | 418 | ||
412 | case BT_FLUSHABLE: | 419 | case BT_FLUSHABLE: |
413 | if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags), | 420 | if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags), |
414 | (u32 __user *) optval)) | 421 | (u32 __user *) optval)) |
415 | err = -EFAULT; | 422 | err = -EFAULT; |
416 | 423 | ||
417 | break; | 424 | break; |
418 | 425 | ||
419 | case BT_POWER: | 426 | case BT_POWER: |
420 | if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM | 427 | if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM |
421 | && sk->sk_type != SOCK_RAW) { | 428 | && sk->sk_type != SOCK_RAW) { |
422 | err = -EINVAL; | 429 | err = -EINVAL; |
423 | break; | 430 | break; |
424 | } | 431 | } |
@@ -466,7 +473,8 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) | |||
466 | return true; | 473 | return true; |
467 | } | 474 | } |
468 | 475 | ||
469 | static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) | 476 | static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, |
477 | char __user *optval, unsigned int optlen) | ||
470 | { | 478 | { |
471 | struct sock *sk = sock->sk; | 479 | struct sock *sk = sock->sk; |
472 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; | 480 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; |
@@ -529,6 +537,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us | |||
529 | chan->fcs = opts.fcs; | 537 | chan->fcs = opts.fcs; |
530 | chan->max_tx = opts.max_tx; | 538 | chan->max_tx = opts.max_tx; |
531 | chan->tx_win = opts.txwin_size; | 539 | chan->tx_win = opts.txwin_size; |
540 | chan->flush_to = opts.flush_to; | ||
532 | break; | 541 | break; |
533 | 542 | ||
534 | case L2CAP_LM: | 543 | case L2CAP_LM: |
@@ -564,7 +573,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us | |||
564 | return err; | 573 | return err; |
565 | } | 574 | } |
566 | 575 | ||
567 | static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) | 576 | static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, |
577 | char __user *optval, unsigned int optlen) | ||
568 | { | 578 | { |
569 | struct sock *sk = sock->sk; | 579 | struct sock *sk = sock->sk; |
570 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; | 580 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; |
@@ -587,7 +597,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
587 | switch (optname) { | 597 | switch (optname) { |
588 | case BT_SECURITY: | 598 | case BT_SECURITY: |
589 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && | 599 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && |
590 | chan->chan_type != L2CAP_CHAN_RAW) { | 600 | chan->chan_type != L2CAP_CHAN_RAW) { |
591 | err = -EINVAL; | 601 | err = -EINVAL; |
592 | break; | 602 | break; |
593 | } | 603 | } |
@@ -601,7 +611,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
601 | } | 611 | } |
602 | 612 | ||
603 | if (sec.level < BT_SECURITY_LOW || | 613 | if (sec.level < BT_SECURITY_LOW || |
604 | sec.level > BT_SECURITY_HIGH) { | 614 | sec.level > BT_SECURITY_HIGH) { |
605 | err = -EINVAL; | 615 | err = -EINVAL; |
606 | break; | 616 | break; |
607 | } | 617 | } |
@@ -627,7 +637,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
627 | 637 | ||
628 | /* or for ACL link */ | 638 | /* or for ACL link */ |
629 | } else if ((sk->sk_state == BT_CONNECT2 && | 639 | } else if ((sk->sk_state == BT_CONNECT2 && |
630 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) || | 640 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) || |
631 | sk->sk_state == BT_CONNECTED) { | 641 | sk->sk_state == BT_CONNECTED) { |
632 | if (!l2cap_chan_check_security(chan)) | 642 | if (!l2cap_chan_check_security(chan)) |
633 | set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); | 643 | set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); |
@@ -684,7 +694,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
684 | 694 | ||
685 | case BT_POWER: | 695 | case BT_POWER: |
686 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && | 696 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && |
687 | chan->chan_type != L2CAP_CHAN_RAW) { | 697 | chan->chan_type != L2CAP_CHAN_RAW) { |
688 | err = -EINVAL; | 698 | err = -EINVAL; |
689 | break; | 699 | break; |
690 | } | 700 | } |
@@ -720,12 +730,17 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
720 | } | 730 | } |
721 | 731 | ||
722 | if (chan->mode != L2CAP_MODE_ERTM && | 732 | if (chan->mode != L2CAP_MODE_ERTM && |
723 | chan->mode != L2CAP_MODE_STREAMING) { | 733 | chan->mode != L2CAP_MODE_STREAMING) { |
724 | err = -EOPNOTSUPP; | 734 | err = -EOPNOTSUPP; |
725 | break; | 735 | break; |
726 | } | 736 | } |
727 | 737 | ||
728 | chan->chan_policy = (u8) opt; | 738 | chan->chan_policy = (u8) opt; |
739 | |||
740 | if (sk->sk_state == BT_CONNECTED && | ||
741 | chan->move_role == L2CAP_MOVE_ROLE_NONE) | ||
742 | l2cap_move_start(chan); | ||
743 | |||
729 | break; | 744 | break; |
730 | 745 | ||
731 | default: | 746 | default: |
@@ -737,7 +752,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch | |||
737 | return err; | 752 | return err; |
738 | } | 753 | } |
739 | 754 | ||
740 | static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) | 755 | static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, |
756 | struct msghdr *msg, size_t len) | ||
741 | { | 757 | { |
742 | struct sock *sk = sock->sk; | 758 | struct sock *sk = sock->sk; |
743 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; | 759 | struct l2cap_chan *chan = l2cap_pi(sk)->chan; |
@@ -762,7 +778,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms | |||
762 | return err; | 778 | return err; |
763 | } | 779 | } |
764 | 780 | ||
765 | static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) | 781 | static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, |
782 | struct msghdr *msg, size_t len, int flags) | ||
766 | { | 783 | { |
767 | struct sock *sk = sock->sk; | 784 | struct sock *sk = sock->sk; |
768 | struct l2cap_pinfo *pi = l2cap_pi(sk); | 785 | struct l2cap_pinfo *pi = l2cap_pi(sk); |
@@ -866,7 +883,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) | |||
866 | 883 | ||
867 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) | 884 | if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) |
868 | err = bt_sock_wait_state(sk, BT_CLOSED, | 885 | err = bt_sock_wait_state(sk, BT_CLOSED, |
869 | sk->sk_lingertime); | 886 | sk->sk_lingertime); |
870 | } | 887 | } |
871 | 888 | ||
872 | if (!err && sk->sk_err) | 889 | if (!err && sk->sk_err) |
@@ -930,7 +947,7 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) | |||
930 | } | 947 | } |
931 | 948 | ||
932 | sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, | 949 | sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, |
933 | GFP_ATOMIC); | 950 | GFP_ATOMIC); |
934 | if (!sk) | 951 | if (!sk) |
935 | return NULL; | 952 | return NULL; |
936 | 953 | ||
@@ -938,6 +955,8 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) | |||
938 | 955 | ||
939 | l2cap_sock_init(sk, parent); | 956 | l2cap_sock_init(sk, parent); |
940 | 957 | ||
958 | bt_accept_enqueue(parent, sk); | ||
959 | |||
941 | return l2cap_pi(sk)->chan; | 960 | return l2cap_pi(sk)->chan; |
942 | } | 961 | } |
943 | 962 | ||
@@ -1068,6 +1087,15 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan) | |||
1068 | release_sock(sk); | 1087 | release_sock(sk); |
1069 | } | 1088 | } |
1070 | 1089 | ||
1090 | static void l2cap_sock_defer_cb(struct l2cap_chan *chan) | ||
1091 | { | ||
1092 | struct sock *sk = chan->data; | ||
1093 | struct sock *parent = bt_sk(sk)->parent; | ||
1094 | |||
1095 | if (parent) | ||
1096 | parent->sk_data_ready(parent, 0); | ||
1097 | } | ||
1098 | |||
1071 | static struct l2cap_ops l2cap_chan_ops = { | 1099 | static struct l2cap_ops l2cap_chan_ops = { |
1072 | .name = "L2CAP Socket Interface", | 1100 | .name = "L2CAP Socket Interface", |
1073 | .new_connection = l2cap_sock_new_connection_cb, | 1101 | .new_connection = l2cap_sock_new_connection_cb, |
@@ -1076,6 +1104,7 @@ static struct l2cap_ops l2cap_chan_ops = { | |||
1076 | .teardown = l2cap_sock_teardown_cb, | 1104 | .teardown = l2cap_sock_teardown_cb, |
1077 | .state_change = l2cap_sock_state_change_cb, | 1105 | .state_change = l2cap_sock_state_change_cb, |
1078 | .ready = l2cap_sock_ready_cb, | 1106 | .ready = l2cap_sock_ready_cb, |
1107 | .defer = l2cap_sock_defer_cb, | ||
1079 | .alloc_skb = l2cap_sock_alloc_skb_cb, | 1108 | .alloc_skb = l2cap_sock_alloc_skb_cb, |
1080 | }; | 1109 | }; |
1081 | 1110 | ||
@@ -1083,7 +1112,8 @@ static void l2cap_sock_destruct(struct sock *sk) | |||
1083 | { | 1112 | { |
1084 | BT_DBG("sk %p", sk); | 1113 | BT_DBG("sk %p", sk); |
1085 | 1114 | ||
1086 | l2cap_chan_put(l2cap_pi(sk)->chan); | 1115 | if (l2cap_pi(sk)->chan) |
1116 | l2cap_chan_put(l2cap_pi(sk)->chan); | ||
1087 | if (l2cap_pi(sk)->rx_busy_skb) { | 1117 | if (l2cap_pi(sk)->rx_busy_skb) { |
1088 | kfree_skb(l2cap_pi(sk)->rx_busy_skb); | 1118 | kfree_skb(l2cap_pi(sk)->rx_busy_skb); |
1089 | l2cap_pi(sk)->rx_busy_skb = NULL; | 1119 | l2cap_pi(sk)->rx_busy_skb = NULL; |
@@ -1159,7 +1189,8 @@ static struct proto l2cap_proto = { | |||
1159 | .obj_size = sizeof(struct l2cap_pinfo) | 1189 | .obj_size = sizeof(struct l2cap_pinfo) |
1160 | }; | 1190 | }; |
1161 | 1191 | ||
1162 | static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) | 1192 | static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, |
1193 | int proto, gfp_t prio) | ||
1163 | { | 1194 | { |
1164 | struct sock *sk; | 1195 | struct sock *sk; |
1165 | struct l2cap_chan *chan; | 1196 | struct l2cap_chan *chan; |
@@ -1204,7 +1235,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol, | |||
1204 | sock->state = SS_UNCONNECTED; | 1235 | sock->state = SS_UNCONNECTED; |
1205 | 1236 | ||
1206 | if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM && | 1237 | if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM && |
1207 | sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) | 1238 | sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) |
1208 | return -ESOCKTNOSUPPORT; | 1239 | return -ESOCKTNOSUPPORT; |
1209 | 1240 | ||
1210 | if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) | 1241 | if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) |
@@ -1261,7 +1292,8 @@ int __init l2cap_init_sockets(void) | |||
1261 | goto error; | 1292 | goto error; |
1262 | } | 1293 | } |
1263 | 1294 | ||
1264 | err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list, NULL); | 1295 | err = bt_procfs_init(THIS_MODULE, &init_net, "l2cap", &l2cap_sk_list, |
1296 | NULL); | ||
1265 | if (err < 0) { | 1297 | if (err < 0) { |
1266 | BT_ERR("Failed to create L2CAP proc file"); | 1298 | BT_ERR("Failed to create L2CAP proc file"); |
1267 | bt_sock_unregister(BTPROTO_L2CAP); | 1299 | bt_sock_unregister(BTPROTO_L2CAP); |
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c index e1c97527e16c..b3fbc73516c4 100644 --- a/net/bluetooth/lib.c +++ b/net/bluetooth/lib.c | |||
@@ -41,20 +41,6 @@ void baswap(bdaddr_t *dst, bdaddr_t *src) | |||
41 | } | 41 | } |
42 | EXPORT_SYMBOL(baswap); | 42 | EXPORT_SYMBOL(baswap); |
43 | 43 | ||
44 | char *batostr(bdaddr_t *ba) | ||
45 | { | ||
46 | static char str[2][18]; | ||
47 | static int i = 1; | ||
48 | |||
49 | i ^= 1; | ||
50 | sprintf(str[i], "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X", | ||
51 | ba->b[5], ba->b[4], ba->b[3], | ||
52 | ba->b[2], ba->b[1], ba->b[0]); | ||
53 | |||
54 | return str[i]; | ||
55 | } | ||
56 | EXPORT_SYMBOL(batostr); | ||
57 | |||
58 | /* Bluetooth error codes to Unix errno mapping */ | 44 | /* Bluetooth error codes to Unix errno mapping */ |
59 | int bt_to_errno(__u16 code) | 45 | int bt_to_errno(__u16 code) |
60 | { | 46 | { |
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 91de4239da66..142764aec2af 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c | |||
@@ -222,7 +222,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) | |||
222 | 222 | ||
223 | hdr = (void *) skb_put(skb, sizeof(*hdr)); | 223 | hdr = (void *) skb_put(skb, sizeof(*hdr)); |
224 | 224 | ||
225 | hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); | 225 | hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS); |
226 | hdr->index = cpu_to_le16(index); | 226 | hdr->index = cpu_to_le16(index); |
227 | hdr->len = cpu_to_le16(sizeof(*ev)); | 227 | hdr->len = cpu_to_le16(sizeof(*ev)); |
228 | 228 | ||
@@ -253,7 +253,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, | |||
253 | 253 | ||
254 | hdr = (void *) skb_put(skb, sizeof(*hdr)); | 254 | hdr = (void *) skb_put(skb, sizeof(*hdr)); |
255 | 255 | ||
256 | hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); | 256 | hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE); |
257 | hdr->index = cpu_to_le16(index); | 257 | hdr->index = cpu_to_le16(index); |
258 | hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); | 258 | hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); |
259 | 259 | ||
@@ -377,15 +377,15 @@ static u32 get_supported_settings(struct hci_dev *hdev) | |||
377 | u32 settings = 0; | 377 | u32 settings = 0; |
378 | 378 | ||
379 | settings |= MGMT_SETTING_POWERED; | 379 | settings |= MGMT_SETTING_POWERED; |
380 | settings |= MGMT_SETTING_CONNECTABLE; | ||
381 | settings |= MGMT_SETTING_FAST_CONNECTABLE; | ||
382 | settings |= MGMT_SETTING_DISCOVERABLE; | ||
383 | settings |= MGMT_SETTING_PAIRABLE; | 380 | settings |= MGMT_SETTING_PAIRABLE; |
384 | 381 | ||
385 | if (lmp_ssp_capable(hdev)) | 382 | if (lmp_ssp_capable(hdev)) |
386 | settings |= MGMT_SETTING_SSP; | 383 | settings |= MGMT_SETTING_SSP; |
387 | 384 | ||
388 | if (lmp_bredr_capable(hdev)) { | 385 | if (lmp_bredr_capable(hdev)) { |
386 | settings |= MGMT_SETTING_CONNECTABLE; | ||
387 | settings |= MGMT_SETTING_FAST_CONNECTABLE; | ||
388 | settings |= MGMT_SETTING_DISCOVERABLE; | ||
389 | settings |= MGMT_SETTING_BREDR; | 389 | settings |= MGMT_SETTING_BREDR; |
390 | settings |= MGMT_SETTING_LINK_SECURITY; | 390 | settings |= MGMT_SETTING_LINK_SECURITY; |
391 | } | 391 | } |
@@ -485,7 +485,7 @@ static void create_eir(struct hci_dev *hdev, u8 *data) | |||
485 | ptr += (name_len + 2); | 485 | ptr += (name_len + 2); |
486 | } | 486 | } |
487 | 487 | ||
488 | if (hdev->inq_tx_power) { | 488 | if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { |
489 | ptr[0] = 2; | 489 | ptr[0] = 2; |
490 | ptr[1] = EIR_TX_POWER; | 490 | ptr[1] = EIR_TX_POWER; |
491 | ptr[2] = (u8) hdev->inq_tx_power; | 491 | ptr[2] = (u8) hdev->inq_tx_power; |
@@ -566,7 +566,7 @@ static int update_eir(struct hci_dev *hdev) | |||
566 | if (!hdev_is_powered(hdev)) | 566 | if (!hdev_is_powered(hdev)) |
567 | return 0; | 567 | return 0; |
568 | 568 | ||
569 | if (!(hdev->features[6] & LMP_EXT_INQ)) | 569 | if (!lmp_ext_inq_capable(hdev)) |
570 | return 0; | 570 | return 0; |
571 | 571 | ||
572 | if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) | 572 | if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) |
@@ -833,7 +833,7 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len, | |||
833 | if (hdev) | 833 | if (hdev) |
834 | hdr->index = cpu_to_le16(hdev->id); | 834 | hdr->index = cpu_to_le16(hdev->id); |
835 | else | 835 | else |
836 | hdr->index = cpu_to_le16(MGMT_INDEX_NONE); | 836 | hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE); |
837 | hdr->len = cpu_to_le16(data_len); | 837 | hdr->len = cpu_to_le16(data_len); |
838 | 838 | ||
839 | if (data) | 839 | if (data) |
@@ -868,6 +868,10 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, | |||
868 | 868 | ||
869 | BT_DBG("request for %s", hdev->name); | 869 | BT_DBG("request for %s", hdev->name); |
870 | 870 | ||
871 | if (!lmp_bredr_capable(hdev)) | ||
872 | return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, | ||
873 | MGMT_STATUS_NOT_SUPPORTED); | ||
874 | |||
871 | timeout = __le16_to_cpu(cp->timeout); | 875 | timeout = __le16_to_cpu(cp->timeout); |
872 | if (!cp->val && timeout > 0) | 876 | if (!cp->val && timeout > 0) |
873 | return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, | 877 | return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, |
@@ -963,6 +967,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, | |||
963 | 967 | ||
964 | BT_DBG("request for %s", hdev->name); | 968 | BT_DBG("request for %s", hdev->name); |
965 | 969 | ||
970 | if (!lmp_bredr_capable(hdev)) | ||
971 | return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, | ||
972 | MGMT_STATUS_NOT_SUPPORTED); | ||
973 | |||
966 | hci_dev_lock(hdev); | 974 | hci_dev_lock(hdev); |
967 | 975 | ||
968 | if (!hdev_is_powered(hdev)) { | 976 | if (!hdev_is_powered(hdev)) { |
@@ -1061,6 +1069,10 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, | |||
1061 | 1069 | ||
1062 | BT_DBG("request for %s", hdev->name); | 1070 | BT_DBG("request for %s", hdev->name); |
1063 | 1071 | ||
1072 | if (!lmp_bredr_capable(hdev)) | ||
1073 | return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, | ||
1074 | MGMT_STATUS_NOT_SUPPORTED); | ||
1075 | |||
1064 | hci_dev_lock(hdev); | 1076 | hci_dev_lock(hdev); |
1065 | 1077 | ||
1066 | if (!hdev_is_powered(hdev)) { | 1078 | if (!hdev_is_powered(hdev)) { |
@@ -1214,7 +1226,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) | |||
1214 | } | 1226 | } |
1215 | 1227 | ||
1216 | val = !!cp->val; | 1228 | val = !!cp->val; |
1217 | enabled = !!(hdev->host_features[0] & LMP_HOST_LE); | 1229 | enabled = !!lmp_host_le_capable(hdev); |
1218 | 1230 | ||
1219 | if (!hdev_is_powered(hdev) || val == enabled) { | 1231 | if (!hdev_is_powered(hdev) || val == enabled) { |
1220 | bool changed = false; | 1232 | bool changed = false; |
@@ -1250,7 +1262,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) | |||
1250 | 1262 | ||
1251 | if (val) { | 1263 | if (val) { |
1252 | hci_cp.le = val; | 1264 | hci_cp.le = val; |
1253 | hci_cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); | 1265 | hci_cp.simul = !!lmp_le_br_capable(hdev); |
1254 | } | 1266 | } |
1255 | 1267 | ||
1256 | err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), | 1268 | err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), |
@@ -2596,6 +2608,10 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, | |||
2596 | 2608 | ||
2597 | BT_DBG("%s", hdev->name); | 2609 | BT_DBG("%s", hdev->name); |
2598 | 2610 | ||
2611 | if (!lmp_bredr_capable(hdev)) | ||
2612 | return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, | ||
2613 | MGMT_STATUS_NOT_SUPPORTED); | ||
2614 | |||
2599 | if (!hdev_is_powered(hdev)) | 2615 | if (!hdev_is_powered(hdev)) |
2600 | return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, | 2616 | return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, |
2601 | MGMT_STATUS_NOT_POWERED); | 2617 | MGMT_STATUS_NOT_POWERED); |
@@ -2873,6 +2889,21 @@ static void settings_rsp(struct pending_cmd *cmd, void *data) | |||
2873 | mgmt_pending_free(cmd); | 2889 | mgmt_pending_free(cmd); |
2874 | } | 2890 | } |
2875 | 2891 | ||
2892 | static int set_bredr_scan(struct hci_dev *hdev) | ||
2893 | { | ||
2894 | u8 scan = 0; | ||
2895 | |||
2896 | if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) | ||
2897 | scan |= SCAN_PAGE; | ||
2898 | if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) | ||
2899 | scan |= SCAN_INQUIRY; | ||
2900 | |||
2901 | if (!scan) | ||
2902 | return 0; | ||
2903 | |||
2904 | return hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | ||
2905 | } | ||
2906 | |||
2876 | int mgmt_powered(struct hci_dev *hdev, u8 powered) | 2907 | int mgmt_powered(struct hci_dev *hdev, u8 powered) |
2877 | { | 2908 | { |
2878 | struct cmd_lookup match = { NULL, hdev }; | 2909 | struct cmd_lookup match = { NULL, hdev }; |
@@ -2884,17 +2915,8 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) | |||
2884 | mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); | 2915 | mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); |
2885 | 2916 | ||
2886 | if (powered) { | 2917 | if (powered) { |
2887 | u8 scan = 0; | 2918 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && |
2888 | 2919 | !lmp_host_ssp_capable(hdev)) { | |
2889 | if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) | ||
2890 | scan |= SCAN_PAGE; | ||
2891 | if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) | ||
2892 | scan |= SCAN_INQUIRY; | ||
2893 | |||
2894 | if (scan) | ||
2895 | hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | ||
2896 | |||
2897 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { | ||
2898 | u8 ssp = 1; | 2920 | u8 ssp = 1; |
2899 | 2921 | ||
2900 | hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp); | 2922 | hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp); |
@@ -2904,15 +2926,24 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) | |||
2904 | struct hci_cp_write_le_host_supported cp; | 2926 | struct hci_cp_write_le_host_supported cp; |
2905 | 2927 | ||
2906 | cp.le = 1; | 2928 | cp.le = 1; |
2907 | cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); | 2929 | cp.simul = !!lmp_le_br_capable(hdev); |
2908 | 2930 | ||
2909 | hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, | 2931 | /* Check first if we already have the right |
2910 | sizeof(cp), &cp); | 2932 | * host state (host features set) |
2933 | */ | ||
2934 | if (cp.le != !!lmp_host_le_capable(hdev) || | ||
2935 | cp.simul != !!lmp_host_le_br_capable(hdev)) | ||
2936 | hci_send_cmd(hdev, | ||
2937 | HCI_OP_WRITE_LE_HOST_SUPPORTED, | ||
2938 | sizeof(cp), &cp); | ||
2911 | } | 2939 | } |
2912 | 2940 | ||
2913 | update_class(hdev); | 2941 | if (lmp_bredr_capable(hdev)) { |
2914 | update_name(hdev, hdev->dev_name); | 2942 | set_bredr_scan(hdev); |
2915 | update_eir(hdev); | 2943 | update_class(hdev); |
2944 | update_name(hdev, hdev->dev_name); | ||
2945 | update_eir(hdev); | ||
2946 | } | ||
2916 | } else { | 2947 | } else { |
2917 | u8 status = MGMT_STATUS_NOT_POWERED; | 2948 | u8 status = MGMT_STATUS_NOT_POWERED; |
2918 | mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); | 2949 | mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); |
@@ -3127,6 +3158,9 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, | |||
3127 | struct pending_cmd *cmd; | 3158 | struct pending_cmd *cmd; |
3128 | int err; | 3159 | int err; |
3129 | 3160 | ||
3161 | mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, | ||
3162 | hdev); | ||
3163 | |||
3130 | cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); | 3164 | cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); |
3131 | if (!cmd) | 3165 | if (!cmd) |
3132 | return -ENOENT; | 3166 | return -ENOENT; |
@@ -3139,8 +3173,6 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, | |||
3139 | 3173 | ||
3140 | mgmt_pending_remove(cmd); | 3174 | mgmt_pending_remove(cmd); |
3141 | 3175 | ||
3142 | mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, | ||
3143 | hdev); | ||
3144 | return err; | 3176 | return err; |
3145 | } | 3177 | } |
3146 | 3178 | ||
@@ -3360,7 +3392,7 @@ static int clear_eir(struct hci_dev *hdev) | |||
3360 | { | 3392 | { |
3361 | struct hci_cp_write_eir cp; | 3393 | struct hci_cp_write_eir cp; |
3362 | 3394 | ||
3363 | if (!(hdev->features[6] & LMP_EXT_INQ)) | 3395 | if (!lmp_ext_inq_capable(hdev)) |
3364 | return 0; | 3396 | return 0; |
3365 | 3397 | ||
3366 | memset(hdev->eir, 0, sizeof(hdev->eir)); | 3398 | memset(hdev->eir, 0, sizeof(hdev->eir)); |
@@ -3492,7 +3524,12 @@ send_event: | |||
3492 | err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, | 3524 | err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, |
3493 | sizeof(ev), cmd ? cmd->sk : NULL); | 3525 | sizeof(ev), cmd ? cmd->sk : NULL); |
3494 | 3526 | ||
3495 | update_eir(hdev); | 3527 | /* EIR is taken care of separately when powering on the |
3528 | * adapter so only update them here if this is a name change | ||
3529 | * unrelated to power on. | ||
3530 | */ | ||
3531 | if (!test_bit(HCI_INIT, &hdev->flags)) | ||
3532 | update_eir(hdev); | ||
3496 | 3533 | ||
3497 | failed: | 3534 | failed: |
3498 | if (cmd) | 3535 | if (cmd) |
@@ -3587,9 +3624,9 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, | |||
3587 | ev->addr.type = link_to_bdaddr(link_type, addr_type); | 3624 | ev->addr.type = link_to_bdaddr(link_type, addr_type); |
3588 | ev->rssi = rssi; | 3625 | ev->rssi = rssi; |
3589 | if (cfm_name) | 3626 | if (cfm_name) |
3590 | ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME); | 3627 | ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME); |
3591 | if (!ssp) | 3628 | if (!ssp) |
3592 | ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING); | 3629 | ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING); |
3593 | 3630 | ||
3594 | if (eir_len > 0) | 3631 | if (eir_len > 0) |
3595 | memcpy(ev->eir, eir, eir_len); | 3632 | memcpy(ev->eir, eir, eir_len); |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index c75107ef8920..201fdf737209 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -377,8 +377,8 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, | |||
377 | int err = 0; | 377 | int err = 0; |
378 | u8 dlci; | 378 | u8 dlci; |
379 | 379 | ||
380 | BT_DBG("dlc %p state %ld %s %s channel %d", | 380 | BT_DBG("dlc %p state %ld %pMR -> %pMR channel %d", |
381 | d, d->state, batostr(src), batostr(dst), channel); | 381 | d, d->state, src, dst, channel); |
382 | 382 | ||
383 | if (channel < 1 || channel > 30) | 383 | if (channel < 1 || channel > 30) |
384 | return -EINVAL; | 384 | return -EINVAL; |
@@ -676,7 +676,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, | |||
676 | struct socket *sock; | 676 | struct socket *sock; |
677 | struct sock *sk; | 677 | struct sock *sk; |
678 | 678 | ||
679 | BT_DBG("%s %s", batostr(src), batostr(dst)); | 679 | BT_DBG("%pMR -> %pMR", src, dst); |
680 | 680 | ||
681 | *err = rfcomm_l2sock_create(&sock); | 681 | *err = rfcomm_l2sock_create(&sock); |
682 | if (*err < 0) | 682 | if (*err < 0) |
@@ -709,7 +709,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, | |||
709 | 709 | ||
710 | bacpy(&addr.l2_bdaddr, dst); | 710 | bacpy(&addr.l2_bdaddr, dst); |
711 | addr.l2_family = AF_BLUETOOTH; | 711 | addr.l2_family = AF_BLUETOOTH; |
712 | addr.l2_psm = cpu_to_le16(RFCOMM_PSM); | 712 | addr.l2_psm = __constant_cpu_to_le16(RFCOMM_PSM); |
713 | addr.l2_cid = 0; | 713 | addr.l2_cid = 0; |
714 | *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); | 714 | *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); |
715 | if (*err == 0 || *err == -EINPROGRESS) | 715 | if (*err == 0 || *err == -EINPROGRESS) |
@@ -1987,7 +1987,7 @@ static int rfcomm_add_listener(bdaddr_t *ba) | |||
1987 | /* Bind socket */ | 1987 | /* Bind socket */ |
1988 | bacpy(&addr.l2_bdaddr, ba); | 1988 | bacpy(&addr.l2_bdaddr, ba); |
1989 | addr.l2_family = AF_BLUETOOTH; | 1989 | addr.l2_family = AF_BLUETOOTH; |
1990 | addr.l2_psm = cpu_to_le16(RFCOMM_PSM); | 1990 | addr.l2_psm = __constant_cpu_to_le16(RFCOMM_PSM); |
1991 | addr.l2_cid = 0; | 1991 | addr.l2_cid = 0; |
1992 | err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); | 1992 | err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); |
1993 | if (err < 0) { | 1993 | if (err < 0) { |
@@ -2125,11 +2125,10 @@ static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) | |||
2125 | list_for_each_entry(d, &s->dlcs, list) { | 2125 | list_for_each_entry(d, &s->dlcs, list) { |
2126 | struct sock *sk = s->sock->sk; | 2126 | struct sock *sk = s->sock->sk; |
2127 | 2127 | ||
2128 | seq_printf(f, "%s %s %ld %d %d %d %d\n", | 2128 | seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n", |
2129 | batostr(&bt_sk(sk)->src), | 2129 | &bt_sk(sk)->src, &bt_sk(sk)->dst, |
2130 | batostr(&bt_sk(sk)->dst), | 2130 | d->state, d->dlci, d->mtu, |
2131 | d->state, d->dlci, d->mtu, | 2131 | d->rx_credits, d->tx_credits); |
2132 | d->rx_credits, d->tx_credits); | ||
2133 | } | 2132 | } |
2134 | } | 2133 | } |
2135 | 2134 | ||
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index b3226f3658cf..4ddef57d03a7 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -334,7 +334,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr | |||
334 | struct sock *sk = sock->sk; | 334 | struct sock *sk = sock->sk; |
335 | int err = 0; | 335 | int err = 0; |
336 | 336 | ||
337 | BT_DBG("sk %p %s", sk, batostr(&sa->rc_bdaddr)); | 337 | BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr); |
338 | 338 | ||
339 | if (!addr || addr->sa_family != AF_BLUETOOTH) | 339 | if (!addr || addr->sa_family != AF_BLUETOOTH) |
340 | return -EINVAL; | 340 | return -EINVAL; |
@@ -975,10 +975,9 @@ static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) | |||
975 | read_lock(&rfcomm_sk_list.lock); | 975 | read_lock(&rfcomm_sk_list.lock); |
976 | 976 | ||
977 | sk_for_each(sk, node, &rfcomm_sk_list.head) { | 977 | sk_for_each(sk, node, &rfcomm_sk_list.head) { |
978 | seq_printf(f, "%s %s %d %d\n", | 978 | seq_printf(f, "%pMR %pMR %d %d\n", |
979 | batostr(&bt_sk(sk)->src), | 979 | &bt_sk(sk)->src, &bt_sk(sk)->dst, |
980 | batostr(&bt_sk(sk)->dst), | 980 | sk->sk_state, rfcomm_pi(sk)->channel); |
981 | sk->sk_state, rfcomm_pi(sk)->channel); | ||
982 | } | 981 | } |
983 | 982 | ||
984 | read_unlock(&rfcomm_sk_list.lock); | 983 | read_unlock(&rfcomm_sk_list.lock); |
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index ccc248791d50..bd6fd0f43d2b 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -166,7 +166,7 @@ static struct device *rfcomm_get_device(struct rfcomm_dev *dev) | |||
166 | static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf) | 166 | static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf) |
167 | { | 167 | { |
168 | struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); | 168 | struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); |
169 | return sprintf(buf, "%s\n", batostr(&dev->dst)); | 169 | return sprintf(buf, "%pMR\n", &dev->dst); |
170 | } | 170 | } |
171 | 171 | ||
172 | static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf) | 172 | static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf) |
@@ -663,8 +663,8 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
663 | if (!dev) | 663 | if (!dev) |
664 | return -ENODEV; | 664 | return -ENODEV; |
665 | 665 | ||
666 | BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst), | 666 | BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst, |
667 | dev->channel, dev->port.count); | 667 | dev->channel, dev->port.count); |
668 | 668 | ||
669 | spin_lock_irqsave(&dev->port.lock, flags); | 669 | spin_lock_irqsave(&dev->port.lock, flags); |
670 | if (++dev->port.count > 1) { | 670 | if (++dev->port.count > 1) { |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index dc42b917aaaf..450cdcd88e5c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -172,7 +172,7 @@ static int sco_connect(struct sock *sk) | |||
172 | struct hci_dev *hdev; | 172 | struct hci_dev *hdev; |
173 | int err, type; | 173 | int err, type; |
174 | 174 | ||
175 | BT_DBG("%s -> %s", batostr(src), batostr(dst)); | 175 | BT_DBG("%pMR -> %pMR", src, dst); |
176 | 176 | ||
177 | hdev = hci_get_route(dst, src); | 177 | hdev = hci_get_route(dst, src); |
178 | if (!hdev) | 178 | if (!hdev) |
@@ -460,7 +460,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le | |||
460 | struct sock *sk = sock->sk; | 460 | struct sock *sk = sock->sk; |
461 | int err = 0; | 461 | int err = 0; |
462 | 462 | ||
463 | BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr)); | 463 | BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); |
464 | 464 | ||
465 | if (!addr || addr->sa_family != AF_BLUETOOTH) | 465 | if (!addr || addr->sa_family != AF_BLUETOOTH) |
466 | return -EINVAL; | 466 | return -EINVAL; |
@@ -893,7 +893,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
893 | struct hlist_node *node; | 893 | struct hlist_node *node; |
894 | int lm = 0; | 894 | int lm = 0; |
895 | 895 | ||
896 | BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); | 896 | BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); |
897 | 897 | ||
898 | /* Find listening sockets */ | 898 | /* Find listening sockets */ |
899 | read_lock(&sco_sk_list.lock); | 899 | read_lock(&sco_sk_list.lock); |
@@ -914,7 +914,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) | |||
914 | 914 | ||
915 | void sco_connect_cfm(struct hci_conn *hcon, __u8 status) | 915 | void sco_connect_cfm(struct hci_conn *hcon, __u8 status) |
916 | { | 916 | { |
917 | BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); | 917 | BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); |
918 | if (!status) { | 918 | if (!status) { |
919 | struct sco_conn *conn; | 919 | struct sco_conn *conn; |
920 | 920 | ||
@@ -959,8 +959,8 @@ static int sco_debugfs_show(struct seq_file *f, void *p) | |||
959 | read_lock(&sco_sk_list.lock); | 959 | read_lock(&sco_sk_list.lock); |
960 | 960 | ||
961 | sk_for_each(sk, node, &sco_sk_list.head) { | 961 | sk_for_each(sk, node, &sco_sk_list.head) { |
962 | seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), | 962 | seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src, |
963 | batostr(&bt_sk(sk)->dst), sk->sk_state); | 963 | &bt_sk(sk)->dst, sk->sk_state); |
964 | } | 964 | } |
965 | 965 | ||
966 | read_unlock(&sco_sk_list.lock); | 966 | read_unlock(&sco_sk_list.lock); |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index a5923378bdf0..68a9587c9694 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -167,7 +167,7 @@ static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code, | |||
167 | 167 | ||
168 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); | 168 | lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); |
169 | lh->len = cpu_to_le16(sizeof(code) + dlen); | 169 | lh->len = cpu_to_le16(sizeof(code) + dlen); |
170 | lh->cid = cpu_to_le16(L2CAP_CID_SMP); | 170 | lh->cid = __constant_cpu_to_le16(L2CAP_CID_SMP); |
171 | 171 | ||
172 | memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code)); | 172 | memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code)); |
173 | 173 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 070e8a68cfc6..7c78e2640190 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -313,6 +313,8 @@ static const struct net_device_ops br_netdev_ops = { | |||
313 | .ndo_fdb_add = br_fdb_add, | 313 | .ndo_fdb_add = br_fdb_add, |
314 | .ndo_fdb_del = br_fdb_delete, | 314 | .ndo_fdb_del = br_fdb_delete, |
315 | .ndo_fdb_dump = br_fdb_dump, | 315 | .ndo_fdb_dump = br_fdb_dump, |
316 | .ndo_bridge_getlink = br_getlink, | ||
317 | .ndo_bridge_setlink = br_setlink, | ||
316 | }; | 318 | }; |
317 | 319 | ||
318 | static void br_dev_free(struct net_device *dev) | 320 | static void br_dev_free(struct net_device *dev) |
@@ -356,7 +358,7 @@ void br_dev_setup(struct net_device *dev) | |||
356 | br->bridge_id.prio[0] = 0x80; | 358 | br->bridge_id.prio[0] = 0x80; |
357 | br->bridge_id.prio[1] = 0x00; | 359 | br->bridge_id.prio[1] = 0x00; |
358 | 360 | ||
359 | memcpy(br->group_addr, br_group_address, ETH_ALEN); | 361 | memcpy(br->group_addr, eth_reserved_addr_base, ETH_ALEN); |
360 | 362 | ||
361 | br->stp_enabled = BR_NO_STP; | 363 | br->stp_enabled = BR_NO_STP; |
362 | br->group_fwd_mask = BR_GROUPFWD_DEFAULT; | 364 | br->group_fwd_mask = BR_GROUPFWD_DEFAULT; |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 76f15fda0212..4b34207419b1 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -19,9 +19,6 @@ | |||
19 | #include <linux/export.h> | 19 | #include <linux/export.h> |
20 | #include "br_private.h" | 20 | #include "br_private.h" |
21 | 21 | ||
22 | /* Bridge group multicast address 802.1d (pg 51). */ | ||
23 | const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; | ||
24 | |||
25 | /* Hook for brouter */ | 22 | /* Hook for brouter */ |
26 | br_should_route_hook_t __rcu *br_should_route_hook __read_mostly; | 23 | br_should_route_hook_t __rcu *br_should_route_hook __read_mostly; |
27 | EXPORT_SYMBOL(br_should_route_hook); | 24 | EXPORT_SYMBOL(br_should_route_hook); |
@@ -127,18 +124,6 @@ static int br_handle_local_finish(struct sk_buff *skb) | |||
127 | return 0; /* process further */ | 124 | return 0; /* process further */ |
128 | } | 125 | } |
129 | 126 | ||
130 | /* Does address match the link local multicast address. | ||
131 | * 01:80:c2:00:00:0X | ||
132 | */ | ||
133 | static inline int is_link_local(const unsigned char *dest) | ||
134 | { | ||
135 | __be16 *a = (__be16 *)dest; | ||
136 | static const __be16 *b = (const __be16 *)br_group_address; | ||
137 | static const __be16 m = cpu_to_be16(0xfff0); | ||
138 | |||
139 | return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; | ||
140 | } | ||
141 | |||
142 | /* | 127 | /* |
143 | * Return NULL if skb is handled | 128 | * Return NULL if skb is handled |
144 | * note: already called with rcu_read_lock | 129 | * note: already called with rcu_read_lock |
@@ -162,7 +147,7 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) | |||
162 | 147 | ||
163 | p = br_port_get_rcu(skb->dev); | 148 | p = br_port_get_rcu(skb->dev); |
164 | 149 | ||
165 | if (unlikely(is_link_local(dest))) { | 150 | if (unlikely(is_link_local_ether_addr(dest))) { |
166 | /* | 151 | /* |
167 | * See IEEE 802.1D Table 7-10 Reserved addresses | 152 | * See IEEE 802.1D Table 7-10 Reserved addresses |
168 | * | 153 | * |
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 7222fe1d5460..cd8c3a44ab7d 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c | |||
@@ -85,13 +85,14 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf, | |||
85 | /* called with RTNL */ | 85 | /* called with RTNL */ |
86 | static int add_del_if(struct net_bridge *br, int ifindex, int isadd) | 86 | static int add_del_if(struct net_bridge *br, int ifindex, int isadd) |
87 | { | 87 | { |
88 | struct net *net = dev_net(br->dev); | ||
88 | struct net_device *dev; | 89 | struct net_device *dev; |
89 | int ret; | 90 | int ret; |
90 | 91 | ||
91 | if (!capable(CAP_NET_ADMIN)) | 92 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
92 | return -EPERM; | 93 | return -EPERM; |
93 | 94 | ||
94 | dev = __dev_get_by_index(dev_net(br->dev), ifindex); | 95 | dev = __dev_get_by_index(net, ifindex); |
95 | if (dev == NULL) | 96 | if (dev == NULL) |
96 | return -EINVAL; | 97 | return -EINVAL; |
97 | 98 | ||
@@ -178,25 +179,25 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
178 | } | 179 | } |
179 | 180 | ||
180 | case BRCTL_SET_BRIDGE_FORWARD_DELAY: | 181 | case BRCTL_SET_BRIDGE_FORWARD_DELAY: |
181 | if (!capable(CAP_NET_ADMIN)) | 182 | if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) |
182 | return -EPERM; | 183 | return -EPERM; |
183 | 184 | ||
184 | return br_set_forward_delay(br, args[1]); | 185 | return br_set_forward_delay(br, args[1]); |
185 | 186 | ||
186 | case BRCTL_SET_BRIDGE_HELLO_TIME: | 187 | case BRCTL_SET_BRIDGE_HELLO_TIME: |
187 | if (!capable(CAP_NET_ADMIN)) | 188 | if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) |
188 | return -EPERM; | 189 | return -EPERM; |
189 | 190 | ||
190 | return br_set_hello_time(br, args[1]); | 191 | return br_set_hello_time(br, args[1]); |
191 | 192 | ||
192 | case BRCTL_SET_BRIDGE_MAX_AGE: | 193 | case BRCTL_SET_BRIDGE_MAX_AGE: |
193 | if (!capable(CAP_NET_ADMIN)) | 194 | if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) |
194 | return -EPERM; | 195 | return -EPERM; |
195 | 196 | ||
196 | return br_set_max_age(br, args[1]); | 197 | return br_set_max_age(br, args[1]); |
197 | 198 | ||
198 | case BRCTL_SET_AGEING_TIME: | 199 | case BRCTL_SET_AGEING_TIME: |
199 | if (!capable(CAP_NET_ADMIN)) | 200 | if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) |
200 | return -EPERM; | 201 | return -EPERM; |
201 | 202 | ||
202 | br->ageing_time = clock_t_to_jiffies(args[1]); | 203 | br->ageing_time = clock_t_to_jiffies(args[1]); |
@@ -236,14 +237,14 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
236 | } | 237 | } |
237 | 238 | ||
238 | case BRCTL_SET_BRIDGE_STP_STATE: | 239 | case BRCTL_SET_BRIDGE_STP_STATE: |
239 | if (!capable(CAP_NET_ADMIN)) | 240 | if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) |
240 | return -EPERM; | 241 | return -EPERM; |
241 | 242 | ||
242 | br_stp_set_enabled(br, args[1]); | 243 | br_stp_set_enabled(br, args[1]); |
243 | return 0; | 244 | return 0; |
244 | 245 | ||
245 | case BRCTL_SET_BRIDGE_PRIORITY: | 246 | case BRCTL_SET_BRIDGE_PRIORITY: |
246 | if (!capable(CAP_NET_ADMIN)) | 247 | if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) |
247 | return -EPERM; | 248 | return -EPERM; |
248 | 249 | ||
249 | spin_lock_bh(&br->lock); | 250 | spin_lock_bh(&br->lock); |
@@ -256,7 +257,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
256 | struct net_bridge_port *p; | 257 | struct net_bridge_port *p; |
257 | int ret; | 258 | int ret; |
258 | 259 | ||
259 | if (!capable(CAP_NET_ADMIN)) | 260 | if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) |
260 | return -EPERM; | 261 | return -EPERM; |
261 | 262 | ||
262 | spin_lock_bh(&br->lock); | 263 | spin_lock_bh(&br->lock); |
@@ -273,7 +274,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
273 | struct net_bridge_port *p; | 274 | struct net_bridge_port *p; |
274 | int ret; | 275 | int ret; |
275 | 276 | ||
276 | if (!capable(CAP_NET_ADMIN)) | 277 | if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) |
277 | return -EPERM; | 278 | return -EPERM; |
278 | 279 | ||
279 | spin_lock_bh(&br->lock); | 280 | spin_lock_bh(&br->lock); |
@@ -330,7 +331,7 @@ static int old_deviceless(struct net *net, void __user *uarg) | |||
330 | { | 331 | { |
331 | char buf[IFNAMSIZ]; | 332 | char buf[IFNAMSIZ]; |
332 | 333 | ||
333 | if (!capable(CAP_NET_ADMIN)) | 334 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
334 | return -EPERM; | 335 | return -EPERM; |
335 | 336 | ||
336 | if (copy_from_user(buf, (void __user *)args[1], IFNAMSIZ)) | 337 | if (copy_from_user(buf, (void __user *)args[1], IFNAMSIZ)) |
@@ -360,7 +361,7 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar | |||
360 | { | 361 | { |
361 | char buf[IFNAMSIZ]; | 362 | char buf[IFNAMSIZ]; |
362 | 363 | ||
363 | if (!capable(CAP_NET_ADMIN)) | 364 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
364 | return -EPERM; | 365 | return -EPERM; |
365 | 366 | ||
366 | if (copy_from_user(buf, uarg, IFNAMSIZ)) | 367 | if (copy_from_user(buf, uarg, IFNAMSIZ)) |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 093f527276a3..65429b99a2a3 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -20,16 +20,43 @@ | |||
20 | #include "br_private.h" | 20 | #include "br_private.h" |
21 | #include "br_private_stp.h" | 21 | #include "br_private_stp.h" |
22 | 22 | ||
23 | static inline size_t br_port_info_size(void) | ||
24 | { | ||
25 | return nla_total_size(1) /* IFLA_BRPORT_STATE */ | ||
26 | + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */ | ||
27 | + nla_total_size(4) /* IFLA_BRPORT_COST */ | ||
28 | + nla_total_size(1) /* IFLA_BRPORT_MODE */ | ||
29 | + nla_total_size(1) /* IFLA_BRPORT_GUARD */ | ||
30 | + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ | ||
31 | + 0; | ||
32 | } | ||
33 | |||
23 | static inline size_t br_nlmsg_size(void) | 34 | static inline size_t br_nlmsg_size(void) |
24 | { | 35 | { |
25 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) | 36 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) |
26 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ | 37 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ |
27 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ | 38 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ |
28 | + nla_total_size(4) /* IFLA_MASTER */ | 39 | + nla_total_size(4) /* IFLA_MASTER */ |
29 | + nla_total_size(4) /* IFLA_MTU */ | 40 | + nla_total_size(4) /* IFLA_MTU */ |
30 | + nla_total_size(4) /* IFLA_LINK */ | 41 | + nla_total_size(4) /* IFLA_LINK */ |
31 | + nla_total_size(1) /* IFLA_OPERSTATE */ | 42 | + nla_total_size(1) /* IFLA_OPERSTATE */ |
32 | + nla_total_size(1); /* IFLA_PROTINFO */ | 43 | + nla_total_size(br_port_info_size()); /* IFLA_PROTINFO */ |
44 | } | ||
45 | |||
46 | static int br_port_fill_attrs(struct sk_buff *skb, | ||
47 | const struct net_bridge_port *p) | ||
48 | { | ||
49 | u8 mode = !!(p->flags & BR_HAIRPIN_MODE); | ||
50 | |||
51 | if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) || | ||
52 | nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) || | ||
53 | nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) || | ||
54 | nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || | ||
55 | nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || | ||
56 | nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK))) | ||
57 | return -EMSGSIZE; | ||
58 | |||
59 | return 0; | ||
33 | } | 60 | } |
34 | 61 | ||
35 | /* | 62 | /* |
@@ -67,10 +94,18 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por | |||
67 | (dev->addr_len && | 94 | (dev->addr_len && |
68 | nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || | 95 | nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || |
69 | (dev->ifindex != dev->iflink && | 96 | (dev->ifindex != dev->iflink && |
70 | nla_put_u32(skb, IFLA_LINK, dev->iflink)) || | 97 | nla_put_u32(skb, IFLA_LINK, dev->iflink))) |
71 | (event == RTM_NEWLINK && | ||
72 | nla_put_u8(skb, IFLA_PROTINFO, port->state))) | ||
73 | goto nla_put_failure; | 98 | goto nla_put_failure; |
99 | |||
100 | if (event == RTM_NEWLINK) { | ||
101 | struct nlattr *nest | ||
102 | = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); | ||
103 | |||
104 | if (nest == NULL || br_port_fill_attrs(skb, port) < 0) | ||
105 | goto nla_put_failure; | ||
106 | nla_nest_end(skb, nest); | ||
107 | } | ||
108 | |||
74 | return nlmsg_end(skb, nlh); | 109 | return nlmsg_end(skb, nlh); |
75 | 110 | ||
76 | nla_put_failure: | 111 | nla_put_failure: |
@@ -111,89 +146,133 @@ errout: | |||
111 | /* | 146 | /* |
112 | * Dump information about all ports, in response to GETLINK | 147 | * Dump information about all ports, in response to GETLINK |
113 | */ | 148 | */ |
114 | static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | 149 | int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
150 | struct net_device *dev) | ||
115 | { | 151 | { |
116 | struct net *net = sock_net(skb->sk); | 152 | int err = 0; |
117 | struct net_device *dev; | 153 | struct net_bridge_port *port = br_port_get_rcu(dev); |
118 | int idx; | 154 | |
119 | 155 | /* not a bridge port */ | |
120 | idx = 0; | 156 | if (!port) |
121 | rcu_read_lock(); | 157 | goto out; |
122 | for_each_netdev_rcu(net, dev) { | 158 | |
123 | struct net_bridge_port *port = br_port_get_rcu(dev); | 159 | err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI); |
124 | 160 | out: | |
125 | /* not a bridge port */ | 161 | return err; |
126 | if (!port || idx < cb->args[0]) | 162 | } |
127 | goto skip; | 163 | |
128 | 164 | static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { | |
129 | if (br_fill_ifinfo(skb, port, | 165 | [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, |
130 | NETLINK_CB(cb->skb).portid, | 166 | [IFLA_BRPORT_COST] = { .type = NLA_U32 }, |
131 | cb->nlh->nlmsg_seq, RTM_NEWLINK, | 167 | [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, |
132 | NLM_F_MULTI) < 0) | 168 | [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, |
133 | break; | 169 | [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, |
134 | skip: | 170 | [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, |
135 | ++idx; | 171 | }; |
172 | |||
173 | /* Change the state of the port and notify spanning tree */ | ||
174 | static int br_set_port_state(struct net_bridge_port *p, u8 state) | ||
175 | { | ||
176 | if (state > BR_STATE_BLOCKING) | ||
177 | return -EINVAL; | ||
178 | |||
179 | /* if kernel STP is running, don't allow changes */ | ||
180 | if (p->br->stp_enabled == BR_KERNEL_STP) | ||
181 | return -EBUSY; | ||
182 | |||
183 | if (!netif_running(p->dev) || | ||
184 | (!netif_carrier_ok(p->dev) && state != BR_STATE_DISABLED)) | ||
185 | return -ENETDOWN; | ||
186 | |||
187 | p->state = state; | ||
188 | br_log_state(p); | ||
189 | br_port_state_selection(p->br); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | /* Set/clear or port flags based on attribute */ | ||
194 | static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], | ||
195 | int attrtype, unsigned long mask) | ||
196 | { | ||
197 | if (tb[attrtype]) { | ||
198 | u8 flag = nla_get_u8(tb[attrtype]); | ||
199 | if (flag) | ||
200 | p->flags |= mask; | ||
201 | else | ||
202 | p->flags &= ~mask; | ||
136 | } | 203 | } |
137 | rcu_read_unlock(); | 204 | } |
138 | cb->args[0] = idx; | 205 | |
206 | /* Process bridge protocol info on port */ | ||
207 | static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) | ||
208 | { | ||
209 | int err; | ||
139 | 210 | ||
140 | return skb->len; | 211 | br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); |
212 | br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); | ||
213 | |||
214 | if (tb[IFLA_BRPORT_COST]) { | ||
215 | err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); | ||
216 | if (err) | ||
217 | return err; | ||
218 | } | ||
219 | |||
220 | if (tb[IFLA_BRPORT_PRIORITY]) { | ||
221 | err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); | ||
222 | if (err) | ||
223 | return err; | ||
224 | } | ||
225 | |||
226 | if (tb[IFLA_BRPORT_STATE]) { | ||
227 | err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); | ||
228 | if (err) | ||
229 | return err; | ||
230 | } | ||
231 | return 0; | ||
141 | } | 232 | } |
142 | 233 | ||
143 | /* | 234 | /* Change state and parameters on port. */ |
144 | * Change state of port (ie from forwarding to blocking etc) | 235 | int br_setlink(struct net_device *dev, struct nlmsghdr *nlh) |
145 | * Used by spanning tree in user space. | ||
146 | */ | ||
147 | static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | ||
148 | { | 236 | { |
149 | struct net *net = sock_net(skb->sk); | ||
150 | struct ifinfomsg *ifm; | 237 | struct ifinfomsg *ifm; |
151 | struct nlattr *protinfo; | 238 | struct nlattr *protinfo; |
152 | struct net_device *dev; | ||
153 | struct net_bridge_port *p; | 239 | struct net_bridge_port *p; |
154 | u8 new_state; | 240 | struct nlattr *tb[IFLA_BRPORT_MAX]; |
155 | 241 | int err; | |
156 | if (nlmsg_len(nlh) < sizeof(*ifm)) | ||
157 | return -EINVAL; | ||
158 | 242 | ||
159 | ifm = nlmsg_data(nlh); | 243 | ifm = nlmsg_data(nlh); |
160 | if (ifm->ifi_family != AF_BRIDGE) | ||
161 | return -EPFNOSUPPORT; | ||
162 | 244 | ||
163 | protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO); | 245 | protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO); |
164 | if (!protinfo || nla_len(protinfo) < sizeof(u8)) | 246 | if (!protinfo) |
165 | return -EINVAL; | 247 | return 0; |
166 | |||
167 | new_state = nla_get_u8(protinfo); | ||
168 | if (new_state > BR_STATE_BLOCKING) | ||
169 | return -EINVAL; | ||
170 | |||
171 | dev = __dev_get_by_index(net, ifm->ifi_index); | ||
172 | if (!dev) | ||
173 | return -ENODEV; | ||
174 | 248 | ||
175 | p = br_port_get_rtnl(dev); | 249 | p = br_port_get_rtnl(dev); |
176 | if (!p) | 250 | if (!p) |
177 | return -EINVAL; | 251 | return -EINVAL; |
178 | 252 | ||
179 | /* if kernel STP is running, don't allow changes */ | 253 | if (protinfo->nla_type & NLA_F_NESTED) { |
180 | if (p->br->stp_enabled == BR_KERNEL_STP) | 254 | err = nla_parse_nested(tb, IFLA_BRPORT_MAX, |
181 | return -EBUSY; | 255 | protinfo, ifla_brport_policy); |
182 | 256 | if (err) | |
183 | if (!netif_running(dev) || | 257 | return err; |
184 | (!netif_carrier_ok(dev) && new_state != BR_STATE_DISABLED)) | 258 | |
185 | return -ENETDOWN; | 259 | spin_lock_bh(&p->br->lock); |
186 | 260 | err = br_setport(p, tb); | |
187 | p->state = new_state; | 261 | spin_unlock_bh(&p->br->lock); |
188 | br_log_state(p); | 262 | } else { |
263 | /* Binary compatability with old RSTP */ | ||
264 | if (nla_len(protinfo) < sizeof(u8)) | ||
265 | return -EINVAL; | ||
189 | 266 | ||
190 | spin_lock_bh(&p->br->lock); | 267 | spin_lock_bh(&p->br->lock); |
191 | br_port_state_selection(p->br); | 268 | err = br_set_port_state(p, nla_get_u8(protinfo)); |
192 | spin_unlock_bh(&p->br->lock); | 269 | spin_unlock_bh(&p->br->lock); |
270 | } | ||
193 | 271 | ||
194 | br_ifinfo_notify(RTM_NEWLINK, p); | 272 | if (err == 0) |
273 | br_ifinfo_notify(RTM_NEWLINK, p); | ||
195 | 274 | ||
196 | return 0; | 275 | return err; |
197 | } | 276 | } |
198 | 277 | ||
199 | static int br_validate(struct nlattr *tb[], struct nlattr *data[]) | 278 | static int br_validate(struct nlattr *tb[], struct nlattr *data[]) |
@@ -218,29 +297,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = { | |||
218 | 297 | ||
219 | int __init br_netlink_init(void) | 298 | int __init br_netlink_init(void) |
220 | { | 299 | { |
221 | int err; | 300 | return rtnl_link_register(&br_link_ops); |
222 | |||
223 | err = rtnl_link_register(&br_link_ops); | ||
224 | if (err < 0) | ||
225 | goto err1; | ||
226 | |||
227 | err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, | ||
228 | br_dump_ifinfo, NULL); | ||
229 | if (err) | ||
230 | goto err2; | ||
231 | err = __rtnl_register(PF_BRIDGE, RTM_SETLINK, | ||
232 | br_rtm_setlink, NULL, NULL); | ||
233 | if (err) | ||
234 | goto err3; | ||
235 | |||
236 | return 0; | ||
237 | |||
238 | err3: | ||
239 | rtnl_unregister_all(PF_BRIDGE); | ||
240 | err2: | ||
241 | rtnl_link_unregister(&br_link_ops); | ||
242 | err1: | ||
243 | return err; | ||
244 | } | 301 | } |
245 | 302 | ||
246 | void __exit br_netlink_fini(void) | 303 | void __exit br_netlink_fini(void) |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 9b278c4ebee1..eb9cd42146a5 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -135,6 +135,8 @@ struct net_bridge_port | |||
135 | 135 | ||
136 | unsigned long flags; | 136 | unsigned long flags; |
137 | #define BR_HAIRPIN_MODE 0x00000001 | 137 | #define BR_HAIRPIN_MODE 0x00000001 |
138 | #define BR_BPDU_GUARD 0x00000002 | ||
139 | #define BR_ROOT_BLOCK 0x00000004 | ||
138 | 140 | ||
139 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 141 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
140 | u32 multicast_startup_queries_sent; | 142 | u32 multicast_startup_queries_sent; |
@@ -158,7 +160,9 @@ struct net_bridge_port | |||
158 | 160 | ||
159 | static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev) | 161 | static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev) |
160 | { | 162 | { |
161 | struct net_bridge_port *port = rcu_dereference(dev->rx_handler_data); | 163 | struct net_bridge_port *port = |
164 | rcu_dereference_rtnl(dev->rx_handler_data); | ||
165 | |||
162 | return br_port_exists(dev) ? port : NULL; | 166 | return br_port_exists(dev) ? port : NULL; |
163 | } | 167 | } |
164 | 168 | ||
@@ -288,7 +292,6 @@ struct br_input_skb_cb { | |||
288 | pr_debug("%s: " format, (br)->dev->name, ##args) | 292 | pr_debug("%s: " format, (br)->dev->name, ##args) |
289 | 293 | ||
290 | extern struct notifier_block br_device_notifier; | 294 | extern struct notifier_block br_device_notifier; |
291 | extern const u8 br_group_address[ETH_ALEN]; | ||
292 | 295 | ||
293 | /* called under bridge lock */ | 296 | /* called under bridge lock */ |
294 | static inline int br_is_root_bridge(const struct net_bridge *br) | 297 | static inline int br_is_root_bridge(const struct net_bridge *br) |
@@ -553,6 +556,9 @@ extern struct rtnl_link_ops br_link_ops; | |||
553 | extern int br_netlink_init(void); | 556 | extern int br_netlink_init(void); |
554 | extern void br_netlink_fini(void); | 557 | extern void br_netlink_fini(void); |
555 | extern void br_ifinfo_notify(int event, struct net_bridge_port *port); | 558 | extern void br_ifinfo_notify(int event, struct net_bridge_port *port); |
559 | extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg); | ||
560 | extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, | ||
561 | struct net_device *dev); | ||
556 | 562 | ||
557 | #ifdef CONFIG_SYSFS | 563 | #ifdef CONFIG_SYSFS |
558 | /* br_sysfs_if.c */ | 564 | /* br_sysfs_if.c */ |
@@ -566,10 +572,10 @@ extern void br_sysfs_delbr(struct net_device *dev); | |||
566 | 572 | ||
567 | #else | 573 | #else |
568 | 574 | ||
569 | #define br_sysfs_addif(p) (0) | 575 | static inline int br_sysfs_addif(struct net_bridge_port *p) { return 0; } |
570 | #define br_sysfs_renameif(p) (0) | 576 | static inline int br_sysfs_renameif(struct net_bridge_port *p) { return 0; } |
571 | #define br_sysfs_addbr(dev) (0) | 577 | static inline int br_sysfs_addbr(struct net_device *dev) { return 0; } |
572 | #define br_sysfs_delbr(dev) do { } while(0) | 578 | static inline void br_sysfs_delbr(struct net_device *dev) { return; } |
573 | #endif /* CONFIG_SYSFS */ | 579 | #endif /* CONFIG_SYSFS */ |
574 | 580 | ||
575 | #endif | 581 | #endif |
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index af9a12099ba4..b01849a74310 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
@@ -100,6 +100,21 @@ static int br_should_become_root_port(const struct net_bridge_port *p, | |||
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | static void br_root_port_block(const struct net_bridge *br, | ||
104 | struct net_bridge_port *p) | ||
105 | { | ||
106 | |||
107 | br_notice(br, "port %u(%s) tried to become root port (blocked)", | ||
108 | (unsigned int) p->port_no, p->dev->name); | ||
109 | |||
110 | p->state = BR_STATE_LISTENING; | ||
111 | br_log_state(p); | ||
112 | br_ifinfo_notify(RTM_NEWLINK, p); | ||
113 | |||
114 | if (br->forward_delay > 0) | ||
115 | mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay); | ||
116 | } | ||
117 | |||
103 | /* called under bridge lock */ | 118 | /* called under bridge lock */ |
104 | static void br_root_selection(struct net_bridge *br) | 119 | static void br_root_selection(struct net_bridge *br) |
105 | { | 120 | { |
@@ -107,7 +122,12 @@ static void br_root_selection(struct net_bridge *br) | |||
107 | u16 root_port = 0; | 122 | u16 root_port = 0; |
108 | 123 | ||
109 | list_for_each_entry(p, &br->port_list, list) { | 124 | list_for_each_entry(p, &br->port_list, list) { |
110 | if (br_should_become_root_port(p, root_port)) | 125 | if (!br_should_become_root_port(p, root_port)) |
126 | continue; | ||
127 | |||
128 | if (p->flags & BR_ROOT_BLOCK) | ||
129 | br_root_port_block(br, p); | ||
130 | else | ||
111 | root_port = p->port_no; | 131 | root_port = p->port_no; |
112 | } | 132 | } |
113 | 133 | ||
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index fd30a6022dea..7f884e3fb955 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c | |||
@@ -170,6 +170,13 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, | |||
170 | if (!ether_addr_equal(dest, br->group_addr)) | 170 | if (!ether_addr_equal(dest, br->group_addr)) |
171 | goto out; | 171 | goto out; |
172 | 172 | ||
173 | if (p->flags & BR_BPDU_GUARD) { | ||
174 | br_notice(br, "BPDU received on blocked port %u(%s)\n", | ||
175 | (unsigned int) p->port_no, p->dev->name); | ||
176 | br_stp_disable_port(p); | ||
177 | goto out; | ||
178 | } | ||
179 | |||
173 | buf = skb_pull(skb, 3); | 180 | buf = skb_pull(skb, 3); |
174 | 181 | ||
175 | if (buf[0] == BPDU_TYPE_CONFIG) { | 182 | if (buf[0] == BPDU_TYPE_CONFIG) { |
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index c5c059333eab..5913a3a0047b 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/capability.h> | 14 | #include <linux/capability.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
17 | #include <linux/etherdevice.h> | ||
17 | #include <linux/if_bridge.h> | 18 | #include <linux/if_bridge.h> |
18 | #include <linux/rtnetlink.h> | 19 | #include <linux/rtnetlink.h> |
19 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
@@ -36,7 +37,7 @@ static ssize_t store_bridge_parm(struct device *d, | |||
36 | unsigned long val; | 37 | unsigned long val; |
37 | int err; | 38 | int err; |
38 | 39 | ||
39 | if (!capable(CAP_NET_ADMIN)) | 40 | if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN)) |
40 | return -EPERM; | 41 | return -EPERM; |
41 | 42 | ||
42 | val = simple_strtoul(buf, &endp, 0); | 43 | val = simple_strtoul(buf, &endp, 0); |
@@ -132,7 +133,7 @@ static ssize_t store_stp_state(struct device *d, | |||
132 | char *endp; | 133 | char *endp; |
133 | unsigned long val; | 134 | unsigned long val; |
134 | 135 | ||
135 | if (!capable(CAP_NET_ADMIN)) | 136 | if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN)) |
136 | return -EPERM; | 137 | return -EPERM; |
137 | 138 | ||
138 | val = simple_strtoul(buf, &endp, 0); | 139 | val = simple_strtoul(buf, &endp, 0); |
@@ -165,7 +166,7 @@ static ssize_t store_group_fwd_mask(struct device *d, | |||
165 | char *endp; | 166 | char *endp; |
166 | unsigned long val; | 167 | unsigned long val; |
167 | 168 | ||
168 | if (!capable(CAP_NET_ADMIN)) | 169 | if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN)) |
169 | return -EPERM; | 170 | return -EPERM; |
170 | 171 | ||
171 | val = simple_strtoul(buf, &endp, 0); | 172 | val = simple_strtoul(buf, &endp, 0); |
@@ -297,23 +298,18 @@ static ssize_t store_group_addr(struct device *d, | |||
297 | const char *buf, size_t len) | 298 | const char *buf, size_t len) |
298 | { | 299 | { |
299 | struct net_bridge *br = to_bridge(d); | 300 | struct net_bridge *br = to_bridge(d); |
300 | unsigned int new_addr[6]; | 301 | u8 new_addr[6]; |
301 | int i; | 302 | int i; |
302 | 303 | ||
303 | if (!capable(CAP_NET_ADMIN)) | 304 | if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN)) |
304 | return -EPERM; | 305 | return -EPERM; |
305 | 306 | ||
306 | if (sscanf(buf, "%x:%x:%x:%x:%x:%x", | 307 | if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", |
307 | &new_addr[0], &new_addr[1], &new_addr[2], | 308 | &new_addr[0], &new_addr[1], &new_addr[2], |
308 | &new_addr[3], &new_addr[4], &new_addr[5]) != 6) | 309 | &new_addr[3], &new_addr[4], &new_addr[5]) != 6) |
309 | return -EINVAL; | 310 | return -EINVAL; |
310 | 311 | ||
311 | /* Must be 01:80:c2:00:00:0X */ | 312 | if (!is_link_local_ether_addr(new_addr)) |
312 | for (i = 0; i < 5; i++) | ||
313 | if (new_addr[i] != br_group_address[i]) | ||
314 | return -EINVAL; | ||
315 | |||
316 | if (new_addr[5] & ~0xf) | ||
317 | return -EINVAL; | 313 | return -EINVAL; |
318 | 314 | ||
319 | if (new_addr[5] == 1 || /* 802.3x Pause address */ | 315 | if (new_addr[5] == 1 || /* 802.3x Pause address */ |
@@ -337,7 +333,7 @@ static ssize_t store_flush(struct device *d, | |||
337 | { | 333 | { |
338 | struct net_bridge *br = to_bridge(d); | 334 | struct net_bridge *br = to_bridge(d); |
339 | 335 | ||
340 | if (!capable(CAP_NET_ADMIN)) | 336 | if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN)) |
341 | return -EPERM; | 337 | return -EPERM; |
342 | 338 | ||
343 | br_fdb_flush(br); | 339 | br_fdb_flush(br); |
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 13b36bdc76a7..7ff95ba21982 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c | |||
@@ -34,6 +34,28 @@ const struct brport_attribute brport_attr_##_name = { \ | |||
34 | .store = _store, \ | 34 | .store = _store, \ |
35 | }; | 35 | }; |
36 | 36 | ||
37 | #define BRPORT_ATTR_FLAG(_name, _mask) \ | ||
38 | static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \ | ||
39 | { \ | ||
40 | return sprintf(buf, "%d\n", !!(p->flags & _mask)); \ | ||
41 | } \ | ||
42 | static int store_##_name(struct net_bridge_port *p, unsigned long v) \ | ||
43 | { \ | ||
44 | unsigned long flags = p->flags; \ | ||
45 | if (v) \ | ||
46 | flags |= _mask; \ | ||
47 | else \ | ||
48 | flags &= ~_mask; \ | ||
49 | if (flags != p->flags) { \ | ||
50 | p->flags = flags; \ | ||
51 | br_ifinfo_notify(RTM_NEWLINK, p); \ | ||
52 | } \ | ||
53 | return 0; \ | ||
54 | } \ | ||
55 | static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR, \ | ||
56 | show_##_name, store_##_name) | ||
57 | |||
58 | |||
37 | static ssize_t show_path_cost(struct net_bridge_port *p, char *buf) | 59 | static ssize_t show_path_cost(struct net_bridge_port *p, char *buf) |
38 | { | 60 | { |
39 | return sprintf(buf, "%d\n", p->path_cost); | 61 | return sprintf(buf, "%d\n", p->path_cost); |
@@ -133,21 +155,9 @@ static int store_flush(struct net_bridge_port *p, unsigned long v) | |||
133 | } | 155 | } |
134 | static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush); | 156 | static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush); |
135 | 157 | ||
136 | static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf) | 158 | BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE); |
137 | { | 159 | BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD); |
138 | int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0; | 160 | BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); |
139 | return sprintf(buf, "%d\n", hairpin_mode); | ||
140 | } | ||
141 | static int store_hairpin_mode(struct net_bridge_port *p, unsigned long v) | ||
142 | { | ||
143 | if (v) | ||
144 | p->flags |= BR_HAIRPIN_MODE; | ||
145 | else | ||
146 | p->flags &= ~BR_HAIRPIN_MODE; | ||
147 | return 0; | ||
148 | } | ||
149 | static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR, | ||
150 | show_hairpin_mode, store_hairpin_mode); | ||
151 | 161 | ||
152 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 162 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
153 | static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) | 163 | static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) |
@@ -181,6 +191,8 @@ static const struct brport_attribute *brport_attrs[] = { | |||
181 | &brport_attr_hold_timer, | 191 | &brport_attr_hold_timer, |
182 | &brport_attr_flush, | 192 | &brport_attr_flush, |
183 | &brport_attr_hairpin_mode, | 193 | &brport_attr_hairpin_mode, |
194 | &brport_attr_bpdu_guard, | ||
195 | &brport_attr_root_block, | ||
184 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 196 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
185 | &brport_attr_multicast_router, | 197 | &brport_attr_multicast_router, |
186 | #endif | 198 | #endif |
@@ -209,7 +221,7 @@ static ssize_t brport_store(struct kobject * kobj, | |||
209 | char *endp; | 221 | char *endp; |
210 | unsigned long val; | 222 | unsigned long val; |
211 | 223 | ||
212 | if (!capable(CAP_NET_ADMIN)) | 224 | if (!ns_capable(dev_net(p->dev)->user_ns, CAP_NET_ADMIN)) |
213 | return -EPERM; | 225 | return -EPERM; |
214 | 226 | ||
215 | val = simple_strtoul(buf, &endp, 0); | 227 | val = simple_strtoul(buf, &endp, 0); |
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 44f270fc2d06..a376ec1ac0a7 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c | |||
@@ -515,8 +515,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) | |||
515 | client_layer : NULL); | 515 | client_layer : NULL); |
516 | } | 516 | } |
517 | 517 | ||
518 | if (req != NULL) | 518 | kfree(req); |
519 | kfree(req); | ||
520 | 519 | ||
521 | spin_unlock_bh(&cfctrl->info_list_lock); | 520 | spin_unlock_bh(&cfctrl->info_list_lock); |
522 | } | 521 | } |
diff --git a/net/can/gw.c b/net/can/gw.c index 1f5c9785a262..574dda78eb0f 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
@@ -751,6 +751,9 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
751 | struct cgw_job *gwj; | 751 | struct cgw_job *gwj; |
752 | int err = 0; | 752 | int err = 0; |
753 | 753 | ||
754 | if (!capable(CAP_NET_ADMIN)) | ||
755 | return -EPERM; | ||
756 | |||
754 | if (nlmsg_len(nlh) < sizeof(*r)) | 757 | if (nlmsg_len(nlh) < sizeof(*r)) |
755 | return -EINVAL; | 758 | return -EINVAL; |
756 | 759 | ||
@@ -839,6 +842,9 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
839 | struct can_can_gw ccgw; | 842 | struct can_can_gw ccgw; |
840 | int err = 0; | 843 | int err = 0; |
841 | 844 | ||
845 | if (!capable(CAP_NET_ADMIN)) | ||
846 | return -EPERM; | ||
847 | |||
842 | if (nlmsg_len(nlh) < sizeof(*r)) | 848 | if (nlmsg_len(nlh) < sizeof(*r)) |
843 | return -EINVAL; | 849 | return -EINVAL; |
844 | 850 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index c0946cb2b354..2a5f55866429 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -176,8 +176,10 @@ | |||
176 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) | 176 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) |
177 | 177 | ||
178 | static DEFINE_SPINLOCK(ptype_lock); | 178 | static DEFINE_SPINLOCK(ptype_lock); |
179 | static DEFINE_SPINLOCK(offload_lock); | ||
179 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; | 180 | static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
180 | static struct list_head ptype_all __read_mostly; /* Taps */ | 181 | static struct list_head ptype_all __read_mostly; /* Taps */ |
182 | static struct list_head offload_base __read_mostly; | ||
181 | 183 | ||
182 | /* | 184 | /* |
183 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl | 185 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
@@ -201,6 +203,8 @@ static struct list_head ptype_all __read_mostly; /* Taps */ | |||
201 | DEFINE_RWLOCK(dev_base_lock); | 203 | DEFINE_RWLOCK(dev_base_lock); |
202 | EXPORT_SYMBOL(dev_base_lock); | 204 | EXPORT_SYMBOL(dev_base_lock); |
203 | 205 | ||
206 | DEFINE_SEQLOCK(devnet_rename_seq); | ||
207 | |||
204 | static inline void dev_base_seq_inc(struct net *net) | 208 | static inline void dev_base_seq_inc(struct net *net) |
205 | { | 209 | { |
206 | while (++net->dev_base_seq == 0); | 210 | while (++net->dev_base_seq == 0); |
@@ -470,6 +474,82 @@ void dev_remove_pack(struct packet_type *pt) | |||
470 | } | 474 | } |
471 | EXPORT_SYMBOL(dev_remove_pack); | 475 | EXPORT_SYMBOL(dev_remove_pack); |
472 | 476 | ||
477 | |||
478 | /** | ||
479 | * dev_add_offload - register offload handlers | ||
480 | * @po: protocol offload declaration | ||
481 | * | ||
482 | * Add protocol offload handlers to the networking stack. The passed | ||
483 | * &proto_offload is linked into kernel lists and may not be freed until | ||
484 | * it has been removed from the kernel lists. | ||
485 | * | ||
486 | * This call does not sleep therefore it can not | ||
487 | * guarantee all CPU's that are in middle of receiving packets | ||
488 | * will see the new offload handlers (until the next received packet). | ||
489 | */ | ||
490 | void dev_add_offload(struct packet_offload *po) | ||
491 | { | ||
492 | struct list_head *head = &offload_base; | ||
493 | |||
494 | spin_lock(&offload_lock); | ||
495 | list_add_rcu(&po->list, head); | ||
496 | spin_unlock(&offload_lock); | ||
497 | } | ||
498 | EXPORT_SYMBOL(dev_add_offload); | ||
499 | |||
500 | /** | ||
501 | * __dev_remove_offload - remove offload handler | ||
502 | * @po: packet offload declaration | ||
503 | * | ||
504 | * Remove a protocol offload handler that was previously added to the | ||
505 | * kernel offload handlers by dev_add_offload(). The passed &offload_type | ||
506 | * is removed from the kernel lists and can be freed or reused once this | ||
507 | * function returns. | ||
508 | * | ||
509 | * The packet type might still be in use by receivers | ||
510 | * and must not be freed until after all the CPU's have gone | ||
511 | * through a quiescent state. | ||
512 | */ | ||
513 | void __dev_remove_offload(struct packet_offload *po) | ||
514 | { | ||
515 | struct list_head *head = &offload_base; | ||
516 | struct packet_offload *po1; | ||
517 | |||
518 | spin_lock(&offload_lock); | ||
519 | |||
520 | list_for_each_entry(po1, head, list) { | ||
521 | if (po == po1) { | ||
522 | list_del_rcu(&po->list); | ||
523 | goto out; | ||
524 | } | ||
525 | } | ||
526 | |||
527 | pr_warn("dev_remove_offload: %p not found\n", po); | ||
528 | out: | ||
529 | spin_unlock(&offload_lock); | ||
530 | } | ||
531 | EXPORT_SYMBOL(__dev_remove_offload); | ||
532 | |||
533 | /** | ||
534 | * dev_remove_offload - remove packet offload handler | ||
535 | * @po: packet offload declaration | ||
536 | * | ||
537 | * Remove a packet offload handler that was previously added to the kernel | ||
538 | * offload handlers by dev_add_offload(). The passed &offload_type is | ||
539 | * removed from the kernel lists and can be freed or reused once this | ||
540 | * function returns. | ||
541 | * | ||
542 | * This call sleeps to guarantee that no CPU is looking at the packet | ||
543 | * type after return. | ||
544 | */ | ||
545 | void dev_remove_offload(struct packet_offload *po) | ||
546 | { | ||
547 | __dev_remove_offload(po); | ||
548 | |||
549 | synchronize_net(); | ||
550 | } | ||
551 | EXPORT_SYMBOL(dev_remove_offload); | ||
552 | |||
473 | /****************************************************************************** | 553 | /****************************************************************************** |
474 | 554 | ||
475 | Device Boot-time Settings Routines | 555 | Device Boot-time Settings Routines |
@@ -1013,22 +1093,31 @@ int dev_change_name(struct net_device *dev, const char *newname) | |||
1013 | if (dev->flags & IFF_UP) | 1093 | if (dev->flags & IFF_UP) |
1014 | return -EBUSY; | 1094 | return -EBUSY; |
1015 | 1095 | ||
1016 | if (strncmp(newname, dev->name, IFNAMSIZ) == 0) | 1096 | write_seqlock(&devnet_rename_seq); |
1097 | |||
1098 | if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { | ||
1099 | write_sequnlock(&devnet_rename_seq); | ||
1017 | return 0; | 1100 | return 0; |
1101 | } | ||
1018 | 1102 | ||
1019 | memcpy(oldname, dev->name, IFNAMSIZ); | 1103 | memcpy(oldname, dev->name, IFNAMSIZ); |
1020 | 1104 | ||
1021 | err = dev_get_valid_name(net, dev, newname); | 1105 | err = dev_get_valid_name(net, dev, newname); |
1022 | if (err < 0) | 1106 | if (err < 0) { |
1107 | write_sequnlock(&devnet_rename_seq); | ||
1023 | return err; | 1108 | return err; |
1109 | } | ||
1024 | 1110 | ||
1025 | rollback: | 1111 | rollback: |
1026 | ret = device_rename(&dev->dev, dev->name); | 1112 | ret = device_rename(&dev->dev, dev->name); |
1027 | if (ret) { | 1113 | if (ret) { |
1028 | memcpy(dev->name, oldname, IFNAMSIZ); | 1114 | memcpy(dev->name, oldname, IFNAMSIZ); |
1115 | write_sequnlock(&devnet_rename_seq); | ||
1029 | return ret; | 1116 | return ret; |
1030 | } | 1117 | } |
1031 | 1118 | ||
1119 | write_sequnlock(&devnet_rename_seq); | ||
1120 | |||
1032 | write_lock_bh(&dev_base_lock); | 1121 | write_lock_bh(&dev_base_lock); |
1033 | hlist_del_rcu(&dev->name_hlist); | 1122 | hlist_del_rcu(&dev->name_hlist); |
1034 | write_unlock_bh(&dev_base_lock); | 1123 | write_unlock_bh(&dev_base_lock); |
@@ -1046,6 +1135,7 @@ rollback: | |||
1046 | /* err >= 0 after dev_alloc_name() or stores the first errno */ | 1135 | /* err >= 0 after dev_alloc_name() or stores the first errno */ |
1047 | if (err >= 0) { | 1136 | if (err >= 0) { |
1048 | err = ret; | 1137 | err = ret; |
1138 | write_seqlock(&devnet_rename_seq); | ||
1049 | memcpy(dev->name, oldname, IFNAMSIZ); | 1139 | memcpy(dev->name, oldname, IFNAMSIZ); |
1050 | goto rollback; | 1140 | goto rollback; |
1051 | } else { | 1141 | } else { |
@@ -1075,10 +1165,8 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) | |||
1075 | return -EINVAL; | 1165 | return -EINVAL; |
1076 | 1166 | ||
1077 | if (!len) { | 1167 | if (!len) { |
1078 | if (dev->ifalias) { | 1168 | kfree(dev->ifalias); |
1079 | kfree(dev->ifalias); | 1169 | dev->ifalias = NULL; |
1080 | dev->ifalias = NULL; | ||
1081 | } | ||
1082 | return 0; | 1170 | return 0; |
1083 | } | 1171 | } |
1084 | 1172 | ||
@@ -1994,7 +2082,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, | |||
1994 | netdev_features_t features) | 2082 | netdev_features_t features) |
1995 | { | 2083 | { |
1996 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 2084 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
1997 | struct packet_type *ptype; | 2085 | struct packet_offload *ptype; |
1998 | __be16 type = skb->protocol; | 2086 | __be16 type = skb->protocol; |
1999 | int vlan_depth = ETH_HLEN; | 2087 | int vlan_depth = ETH_HLEN; |
2000 | int err; | 2088 | int err; |
@@ -2023,18 +2111,17 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, | |||
2023 | } | 2111 | } |
2024 | 2112 | ||
2025 | rcu_read_lock(); | 2113 | rcu_read_lock(); |
2026 | list_for_each_entry_rcu(ptype, | 2114 | list_for_each_entry_rcu(ptype, &offload_base, list) { |
2027 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2115 | if (ptype->type == type && ptype->callbacks.gso_segment) { |
2028 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { | ||
2029 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { | 2116 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { |
2030 | err = ptype->gso_send_check(skb); | 2117 | err = ptype->callbacks.gso_send_check(skb); |
2031 | segs = ERR_PTR(err); | 2118 | segs = ERR_PTR(err); |
2032 | if (err || skb_gso_ok(skb, features)) | 2119 | if (err || skb_gso_ok(skb, features)) |
2033 | break; | 2120 | break; |
2034 | __skb_push(skb, (skb->data - | 2121 | __skb_push(skb, (skb->data - |
2035 | skb_network_header(skb))); | 2122 | skb_network_header(skb))); |
2036 | } | 2123 | } |
2037 | segs = ptype->gso_segment(skb, features); | 2124 | segs = ptype->callbacks.gso_segment(skb, features); |
2038 | break; | 2125 | break; |
2039 | } | 2126 | } |
2040 | } | 2127 | } |
@@ -3446,9 +3533,9 @@ static void flush_backlog(void *arg) | |||
3446 | 3533 | ||
3447 | static int napi_gro_complete(struct sk_buff *skb) | 3534 | static int napi_gro_complete(struct sk_buff *skb) |
3448 | { | 3535 | { |
3449 | struct packet_type *ptype; | 3536 | struct packet_offload *ptype; |
3450 | __be16 type = skb->protocol; | 3537 | __be16 type = skb->protocol; |
3451 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; | 3538 | struct list_head *head = &offload_base; |
3452 | int err = -ENOENT; | 3539 | int err = -ENOENT; |
3453 | 3540 | ||
3454 | if (NAPI_GRO_CB(skb)->count == 1) { | 3541 | if (NAPI_GRO_CB(skb)->count == 1) { |
@@ -3458,10 +3545,10 @@ static int napi_gro_complete(struct sk_buff *skb) | |||
3458 | 3545 | ||
3459 | rcu_read_lock(); | 3546 | rcu_read_lock(); |
3460 | list_for_each_entry_rcu(ptype, head, list) { | 3547 | list_for_each_entry_rcu(ptype, head, list) { |
3461 | if (ptype->type != type || ptype->dev || !ptype->gro_complete) | 3548 | if (ptype->type != type || !ptype->callbacks.gro_complete) |
3462 | continue; | 3549 | continue; |
3463 | 3550 | ||
3464 | err = ptype->gro_complete(skb); | 3551 | err = ptype->callbacks.gro_complete(skb); |
3465 | break; | 3552 | break; |
3466 | } | 3553 | } |
3467 | rcu_read_unlock(); | 3554 | rcu_read_unlock(); |
@@ -3508,9 +3595,9 @@ EXPORT_SYMBOL(napi_gro_flush); | |||
3508 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 3595 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
3509 | { | 3596 | { |
3510 | struct sk_buff **pp = NULL; | 3597 | struct sk_buff **pp = NULL; |
3511 | struct packet_type *ptype; | 3598 | struct packet_offload *ptype; |
3512 | __be16 type = skb->protocol; | 3599 | __be16 type = skb->protocol; |
3513 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; | 3600 | struct list_head *head = &offload_base; |
3514 | int same_flow; | 3601 | int same_flow; |
3515 | int mac_len; | 3602 | int mac_len; |
3516 | enum gro_result ret; | 3603 | enum gro_result ret; |
@@ -3523,7 +3610,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
3523 | 3610 | ||
3524 | rcu_read_lock(); | 3611 | rcu_read_lock(); |
3525 | list_for_each_entry_rcu(ptype, head, list) { | 3612 | list_for_each_entry_rcu(ptype, head, list) { |
3526 | if (ptype->type != type || ptype->dev || !ptype->gro_receive) | 3613 | if (ptype->type != type || !ptype->callbacks.gro_receive) |
3527 | continue; | 3614 | continue; |
3528 | 3615 | ||
3529 | skb_set_network_header(skb, skb_gro_offset(skb)); | 3616 | skb_set_network_header(skb, skb_gro_offset(skb)); |
@@ -3533,7 +3620,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
3533 | NAPI_GRO_CB(skb)->flush = 0; | 3620 | NAPI_GRO_CB(skb)->flush = 0; |
3534 | NAPI_GRO_CB(skb)->free = 0; | 3621 | NAPI_GRO_CB(skb)->free = 0; |
3535 | 3622 | ||
3536 | pp = ptype->gro_receive(&napi->gro_list, skb); | 3623 | pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); |
3537 | break; | 3624 | break; |
3538 | } | 3625 | } |
3539 | rcu_read_unlock(); | 3626 | rcu_read_unlock(); |
@@ -4073,6 +4160,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) | |||
4073 | { | 4160 | { |
4074 | struct net_device *dev; | 4161 | struct net_device *dev; |
4075 | struct ifreq ifr; | 4162 | struct ifreq ifr; |
4163 | unsigned seq; | ||
4076 | 4164 | ||
4077 | /* | 4165 | /* |
4078 | * Fetch the caller's info block. | 4166 | * Fetch the caller's info block. |
@@ -4081,6 +4169,8 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) | |||
4081 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | 4169 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) |
4082 | return -EFAULT; | 4170 | return -EFAULT; |
4083 | 4171 | ||
4172 | retry: | ||
4173 | seq = read_seqbegin(&devnet_rename_seq); | ||
4084 | rcu_read_lock(); | 4174 | rcu_read_lock(); |
4085 | dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); | 4175 | dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); |
4086 | if (!dev) { | 4176 | if (!dev) { |
@@ -4090,6 +4180,8 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) | |||
4090 | 4180 | ||
4091 | strcpy(ifr.ifr_name, dev->name); | 4181 | strcpy(ifr.ifr_name, dev->name); |
4092 | rcu_read_unlock(); | 4182 | rcu_read_unlock(); |
4183 | if (read_seqretry(&devnet_rename_seq, seq)) | ||
4184 | goto retry; | ||
4093 | 4185 | ||
4094 | if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) | 4186 | if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) |
4095 | return -EFAULT; | 4187 | return -EFAULT; |
@@ -5202,7 +5294,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
5202 | case SIOCGMIIPHY: | 5294 | case SIOCGMIIPHY: |
5203 | case SIOCGMIIREG: | 5295 | case SIOCGMIIREG: |
5204 | case SIOCSIFNAME: | 5296 | case SIOCSIFNAME: |
5205 | if (!capable(CAP_NET_ADMIN)) | 5297 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
5206 | return -EPERM; | 5298 | return -EPERM; |
5207 | dev_load(net, ifr.ifr_name); | 5299 | dev_load(net, ifr.ifr_name); |
5208 | rtnl_lock(); | 5300 | rtnl_lock(); |
@@ -5223,16 +5315,25 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
5223 | * - require strict serialization. | 5315 | * - require strict serialization. |
5224 | * - do not return a value | 5316 | * - do not return a value |
5225 | */ | 5317 | */ |
5318 | case SIOCSIFMAP: | ||
5319 | case SIOCSIFTXQLEN: | ||
5320 | if (!capable(CAP_NET_ADMIN)) | ||
5321 | return -EPERM; | ||
5322 | /* fall through */ | ||
5323 | /* | ||
5324 | * These ioctl calls: | ||
5325 | * - require local superuser power. | ||
5326 | * - require strict serialization. | ||
5327 | * - do not return a value | ||
5328 | */ | ||
5226 | case SIOCSIFFLAGS: | 5329 | case SIOCSIFFLAGS: |
5227 | case SIOCSIFMETRIC: | 5330 | case SIOCSIFMETRIC: |
5228 | case SIOCSIFMTU: | 5331 | case SIOCSIFMTU: |
5229 | case SIOCSIFMAP: | ||
5230 | case SIOCSIFHWADDR: | 5332 | case SIOCSIFHWADDR: |
5231 | case SIOCSIFSLAVE: | 5333 | case SIOCSIFSLAVE: |
5232 | case SIOCADDMULTI: | 5334 | case SIOCADDMULTI: |
5233 | case SIOCDELMULTI: | 5335 | case SIOCDELMULTI: |
5234 | case SIOCSIFHWBROADCAST: | 5336 | case SIOCSIFHWBROADCAST: |
5235 | case SIOCSIFTXQLEN: | ||
5236 | case SIOCSMIIREG: | 5337 | case SIOCSMIIREG: |
5237 | case SIOCBONDENSLAVE: | 5338 | case SIOCBONDENSLAVE: |
5238 | case SIOCBONDRELEASE: | 5339 | case SIOCBONDRELEASE: |
@@ -5241,7 +5342,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
5241 | case SIOCBRADDIF: | 5342 | case SIOCBRADDIF: |
5242 | case SIOCBRDELIF: | 5343 | case SIOCBRDELIF: |
5243 | case SIOCSHWTSTAMP: | 5344 | case SIOCSHWTSTAMP: |
5244 | if (!capable(CAP_NET_ADMIN)) | 5345 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
5245 | return -EPERM; | 5346 | return -EPERM; |
5246 | /* fall through */ | 5347 | /* fall through */ |
5247 | case SIOCBONDSLAVEINFOQUERY: | 5348 | case SIOCBONDSLAVEINFOQUERY: |
@@ -6266,7 +6367,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
6266 | goto out; | 6367 | goto out; |
6267 | 6368 | ||
6268 | /* Ensure the device has been registrered */ | 6369 | /* Ensure the device has been registrered */ |
6269 | err = -EINVAL; | ||
6270 | if (dev->reg_state != NETREG_REGISTERED) | 6370 | if (dev->reg_state != NETREG_REGISTERED) |
6271 | goto out; | 6371 | goto out; |
6272 | 6372 | ||
@@ -6664,6 +6764,8 @@ static int __init net_dev_init(void) | |||
6664 | for (i = 0; i < PTYPE_HASH_SIZE; i++) | 6764 | for (i = 0; i < PTYPE_HASH_SIZE; i++) |
6665 | INIT_LIST_HEAD(&ptype_base[i]); | 6765 | INIT_LIST_HEAD(&ptype_base[i]); |
6666 | 6766 | ||
6767 | INIT_LIST_HEAD(&offload_base); | ||
6768 | |||
6667 | if (register_pernet_subsys(&netdev_net_ops)) | 6769 | if (register_pernet_subsys(&netdev_net_ops)) |
6668 | goto out; | 6770 | goto out; |
6669 | 6771 | ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 4d64cc2e3fa9..a8705432e4b1 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -1460,7 +1460,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1460 | case ETHTOOL_GEEE: | 1460 | case ETHTOOL_GEEE: |
1461 | break; | 1461 | break; |
1462 | default: | 1462 | default: |
1463 | if (!capable(CAP_NET_ADMIN)) | 1463 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1464 | return -EPERM; | 1464 | return -EPERM; |
1465 | } | 1465 | } |
1466 | 1466 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index 3d92ebb7fbcf..c23543cba132 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/reciprocal_div.h> | 39 | #include <linux/reciprocal_div.h> |
40 | #include <linux/ratelimit.h> | 40 | #include <linux/ratelimit.h> |
41 | #include <linux/seccomp.h> | 41 | #include <linux/seccomp.h> |
42 | #include <linux/if_vlan.h> | ||
42 | 43 | ||
43 | /* No hurry in this branch | 44 | /* No hurry in this branch |
44 | * | 45 | * |
@@ -341,6 +342,12 @@ load_b: | |||
341 | case BPF_S_ANC_CPU: | 342 | case BPF_S_ANC_CPU: |
342 | A = raw_smp_processor_id(); | 343 | A = raw_smp_processor_id(); |
343 | continue; | 344 | continue; |
345 | case BPF_S_ANC_VLAN_TAG: | ||
346 | A = vlan_tx_tag_get(skb); | ||
347 | continue; | ||
348 | case BPF_S_ANC_VLAN_TAG_PRESENT: | ||
349 | A = !!vlan_tx_tag_present(skb); | ||
350 | continue; | ||
344 | case BPF_S_ANC_NLATTR: { | 351 | case BPF_S_ANC_NLATTR: { |
345 | struct nlattr *nla; | 352 | struct nlattr *nla; |
346 | 353 | ||
@@ -600,6 +607,8 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) | |||
600 | ANCILLARY(RXHASH); | 607 | ANCILLARY(RXHASH); |
601 | ANCILLARY(CPU); | 608 | ANCILLARY(CPU); |
602 | ANCILLARY(ALU_XOR_X); | 609 | ANCILLARY(ALU_XOR_X); |
610 | ANCILLARY(VLAN_TAG); | ||
611 | ANCILLARY(VLAN_TAG_PRESENT); | ||
603 | } | 612 | } |
604 | } | 613 | } |
605 | ftest->code = code; | 614 | ftest->code = code; |
@@ -751,3 +760,133 @@ int sk_detach_filter(struct sock *sk) | |||
751 | return ret; | 760 | return ret; |
752 | } | 761 | } |
753 | EXPORT_SYMBOL_GPL(sk_detach_filter); | 762 | EXPORT_SYMBOL_GPL(sk_detach_filter); |
763 | |||
764 | static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) | ||
765 | { | ||
766 | static const u16 decodes[] = { | ||
767 | [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K, | ||
768 | [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X, | ||
769 | [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K, | ||
770 | [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X, | ||
771 | [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K, | ||
772 | [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X, | ||
773 | [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X, | ||
774 | [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K, | ||
775 | [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X, | ||
776 | [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K, | ||
777 | [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X, | ||
778 | [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K, | ||
779 | [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X, | ||
780 | [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K, | ||
781 | [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X, | ||
782 | [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K, | ||
783 | [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X, | ||
784 | [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K, | ||
785 | [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X, | ||
786 | [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG, | ||
787 | [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS, | ||
788 | [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS, | ||
789 | [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS, | ||
790 | [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS, | ||
791 | [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS, | ||
792 | [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS, | ||
793 | [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS, | ||
794 | [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS, | ||
795 | [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS, | ||
796 | [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS, | ||
797 | [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS, | ||
798 | [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS, | ||
799 | [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS, | ||
800 | [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS, | ||
801 | [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS, | ||
802 | [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, | ||
803 | [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, | ||
804 | [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN, | ||
805 | [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND, | ||
806 | [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND, | ||
807 | [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND, | ||
808 | [BPF_S_LD_IMM] = BPF_LD|BPF_IMM, | ||
809 | [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN, | ||
810 | [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH, | ||
811 | [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM, | ||
812 | [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX, | ||
813 | [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA, | ||
814 | [BPF_S_RET_K] = BPF_RET|BPF_K, | ||
815 | [BPF_S_RET_A] = BPF_RET|BPF_A, | ||
816 | [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K, | ||
817 | [BPF_S_LD_MEM] = BPF_LD|BPF_MEM, | ||
818 | [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM, | ||
819 | [BPF_S_ST] = BPF_ST, | ||
820 | [BPF_S_STX] = BPF_STX, | ||
821 | [BPF_S_JMP_JA] = BPF_JMP|BPF_JA, | ||
822 | [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K, | ||
823 | [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X, | ||
824 | [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K, | ||
825 | [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X, | ||
826 | [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K, | ||
827 | [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X, | ||
828 | [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K, | ||
829 | [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X, | ||
830 | }; | ||
831 | u16 code; | ||
832 | |||
833 | code = filt->code; | ||
834 | |||
835 | to->code = decodes[code]; | ||
836 | to->jt = filt->jt; | ||
837 | to->jf = filt->jf; | ||
838 | |||
839 | if (code == BPF_S_ALU_DIV_K) { | ||
840 | /* | ||
841 | * When loaded this rule user gave us X, which was | ||
842 | * translated into R = r(X). Now we calculate the | ||
843 | * RR = r(R) and report it back. If next time this | ||
844 | * value is loaded and RRR = r(RR) is calculated | ||
845 | * then the R == RRR will be true. | ||
846 | * | ||
847 | * One exception. X == 1 translates into R == 0 and | ||
848 | * we can't calculate RR out of it with r(). | ||
849 | */ | ||
850 | |||
851 | if (filt->k == 0) | ||
852 | to->k = 1; | ||
853 | else | ||
854 | to->k = reciprocal_value(filt->k); | ||
855 | |||
856 | BUG_ON(reciprocal_value(to->k) != filt->k); | ||
857 | } else | ||
858 | to->k = filt->k; | ||
859 | } | ||
860 | |||
861 | int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) | ||
862 | { | ||
863 | struct sk_filter *filter; | ||
864 | int i, ret; | ||
865 | |||
866 | lock_sock(sk); | ||
867 | filter = rcu_dereference_protected(sk->sk_filter, | ||
868 | sock_owned_by_user(sk)); | ||
869 | ret = 0; | ||
870 | if (!filter) | ||
871 | goto out; | ||
872 | ret = filter->len; | ||
873 | if (!len) | ||
874 | goto out; | ||
875 | ret = -EINVAL; | ||
876 | if (len < filter->len) | ||
877 | goto out; | ||
878 | |||
879 | ret = -EFAULT; | ||
880 | for (i = 0; i < filter->len; i++) { | ||
881 | struct sock_filter fb; | ||
882 | |||
883 | sk_decode_filter(&filter->insns[i], &fb); | ||
884 | if (copy_to_user(&ubuf[i], &fb, sizeof(fb))) | ||
885 | goto out; | ||
886 | } | ||
887 | |||
888 | ret = filter->len; | ||
889 | out: | ||
890 | release_sock(sk); | ||
891 | return ret; | ||
892 | } | ||
diff --git a/net/core/flow.c b/net/core/flow.c index e318c7e98042..b0901ee5a002 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -327,11 +327,9 @@ static void flow_cache_flush_tasklet(unsigned long data) | |||
327 | static void flow_cache_flush_per_cpu(void *data) | 327 | static void flow_cache_flush_per_cpu(void *data) |
328 | { | 328 | { |
329 | struct flow_flush_info *info = data; | 329 | struct flow_flush_info *info = data; |
330 | int cpu; | ||
331 | struct tasklet_struct *tasklet; | 330 | struct tasklet_struct *tasklet; |
332 | 331 | ||
333 | cpu = smp_processor_id(); | 332 | tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); |
334 | tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet; | ||
335 | tasklet->data = (unsigned long)info; | 333 | tasklet->data = (unsigned long)info; |
336 | tasklet_schedule(tasklet); | 334 | tasklet_schedule(tasklet); |
337 | } | 335 | } |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 22571488730a..f1c0c2e9cad5 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -2987,6 +2987,10 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, | |||
2987 | t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev; | 2987 | t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev; |
2988 | } | 2988 | } |
2989 | 2989 | ||
2990 | /* Don't export sysctls to unprivileged users */ | ||
2991 | if (neigh_parms_net(p)->user_ns != &init_user_ns) | ||
2992 | t->neigh_vars[0].procname = NULL; | ||
2993 | |||
2990 | snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", | 2994 | snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", |
2991 | p_name, dev_name_source); | 2995 | p_name, dev_name_source); |
2992 | t->sysctl_header = | 2996 | t->sysctl_header = |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 017a8bacfb27..334efd5d67a9 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -18,11 +18,9 @@ | |||
18 | #include <net/sock.h> | 18 | #include <net/sock.h> |
19 | #include <net/net_namespace.h> | 19 | #include <net/net_namespace.h> |
20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
21 | #include <linux/wireless.h> | ||
22 | #include <linux/vmalloc.h> | 21 | #include <linux/vmalloc.h> |
23 | #include <linux/export.h> | 22 | #include <linux/export.h> |
24 | #include <linux/jiffies.h> | 23 | #include <linux/jiffies.h> |
25 | #include <net/wext.h> | ||
26 | 24 | ||
27 | #include "net-sysfs.h" | 25 | #include "net-sysfs.h" |
28 | 26 | ||
@@ -73,11 +71,12 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, | |||
73 | const char *buf, size_t len, | 71 | const char *buf, size_t len, |
74 | int (*set)(struct net_device *, unsigned long)) | 72 | int (*set)(struct net_device *, unsigned long)) |
75 | { | 73 | { |
76 | struct net_device *net = to_net_dev(dev); | 74 | struct net_device *netdev = to_net_dev(dev); |
75 | struct net *net = dev_net(netdev); | ||
77 | unsigned long new; | 76 | unsigned long new; |
78 | int ret = -EINVAL; | 77 | int ret = -EINVAL; |
79 | 78 | ||
80 | if (!capable(CAP_NET_ADMIN)) | 79 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
81 | return -EPERM; | 80 | return -EPERM; |
82 | 81 | ||
83 | ret = kstrtoul(buf, 0, &new); | 82 | ret = kstrtoul(buf, 0, &new); |
@@ -87,8 +86,8 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, | |||
87 | if (!rtnl_trylock()) | 86 | if (!rtnl_trylock()) |
88 | return restart_syscall(); | 87 | return restart_syscall(); |
89 | 88 | ||
90 | if (dev_isalive(net)) { | 89 | if (dev_isalive(netdev)) { |
91 | if ((ret = (*set)(net, new)) == 0) | 90 | if ((ret = (*set)(netdev, new)) == 0) |
92 | ret = len; | 91 | ret = len; |
93 | } | 92 | } |
94 | rtnl_unlock(); | 93 | rtnl_unlock(); |
@@ -264,6 +263,9 @@ static ssize_t store_tx_queue_len(struct device *dev, | |||
264 | struct device_attribute *attr, | 263 | struct device_attribute *attr, |
265 | const char *buf, size_t len) | 264 | const char *buf, size_t len) |
266 | { | 265 | { |
266 | if (!capable(CAP_NET_ADMIN)) | ||
267 | return -EPERM; | ||
268 | |||
267 | return netdev_store(dev, attr, buf, len, change_tx_queue_len); | 269 | return netdev_store(dev, attr, buf, len, change_tx_queue_len); |
268 | } | 270 | } |
269 | 271 | ||
@@ -271,10 +273,11 @@ static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr, | |||
271 | const char *buf, size_t len) | 273 | const char *buf, size_t len) |
272 | { | 274 | { |
273 | struct net_device *netdev = to_net_dev(dev); | 275 | struct net_device *netdev = to_net_dev(dev); |
276 | struct net *net = dev_net(netdev); | ||
274 | size_t count = len; | 277 | size_t count = len; |
275 | ssize_t ret; | 278 | ssize_t ret; |
276 | 279 | ||
277 | if (!capable(CAP_NET_ADMIN)) | 280 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
278 | return -EPERM; | 281 | return -EPERM; |
279 | 282 | ||
280 | /* ignore trailing newline */ | 283 | /* ignore trailing newline */ |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 42f1e1c7514f..6456439cbbd9 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/proc_fs.h> | 13 | #include <linux/proc_fs.h> |
14 | #include <linux/file.h> | 14 | #include <linux/file.h> |
15 | #include <linux/export.h> | 15 | #include <linux/export.h> |
16 | #include <linux/user_namespace.h> | ||
16 | #include <net/net_namespace.h> | 17 | #include <net/net_namespace.h> |
17 | #include <net/netns/generic.h> | 18 | #include <net/netns/generic.h> |
18 | 19 | ||
@@ -145,7 +146,7 @@ static void ops_free_list(const struct pernet_operations *ops, | |||
145 | /* | 146 | /* |
146 | * setup_net runs the initializers for the network namespace object. | 147 | * setup_net runs the initializers for the network namespace object. |
147 | */ | 148 | */ |
148 | static __net_init int setup_net(struct net *net) | 149 | static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) |
149 | { | 150 | { |
150 | /* Must be called with net_mutex held */ | 151 | /* Must be called with net_mutex held */ |
151 | const struct pernet_operations *ops, *saved_ops; | 152 | const struct pernet_operations *ops, *saved_ops; |
@@ -155,6 +156,7 @@ static __net_init int setup_net(struct net *net) | |||
155 | atomic_set(&net->count, 1); | 156 | atomic_set(&net->count, 1); |
156 | atomic_set(&net->passive, 1); | 157 | atomic_set(&net->passive, 1); |
157 | net->dev_base_seq = 1; | 158 | net->dev_base_seq = 1; |
159 | net->user_ns = user_ns; | ||
158 | 160 | ||
159 | #ifdef NETNS_REFCNT_DEBUG | 161 | #ifdef NETNS_REFCNT_DEBUG |
160 | atomic_set(&net->use_count, 0); | 162 | atomic_set(&net->use_count, 0); |
@@ -232,7 +234,8 @@ void net_drop_ns(void *p) | |||
232 | net_free(ns); | 234 | net_free(ns); |
233 | } | 235 | } |
234 | 236 | ||
235 | struct net *copy_net_ns(unsigned long flags, struct net *old_net) | 237 | struct net *copy_net_ns(unsigned long flags, |
238 | struct user_namespace *user_ns, struct net *old_net) | ||
236 | { | 239 | { |
237 | struct net *net; | 240 | struct net *net; |
238 | int rv; | 241 | int rv; |
@@ -243,8 +246,11 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net) | |||
243 | net = net_alloc(); | 246 | net = net_alloc(); |
244 | if (!net) | 247 | if (!net) |
245 | return ERR_PTR(-ENOMEM); | 248 | return ERR_PTR(-ENOMEM); |
249 | |||
250 | get_user_ns(user_ns); | ||
251 | |||
246 | mutex_lock(&net_mutex); | 252 | mutex_lock(&net_mutex); |
247 | rv = setup_net(net); | 253 | rv = setup_net(net, user_ns); |
248 | if (rv == 0) { | 254 | if (rv == 0) { |
249 | rtnl_lock(); | 255 | rtnl_lock(); |
250 | list_add_tail_rcu(&net->list, &net_namespace_list); | 256 | list_add_tail_rcu(&net->list, &net_namespace_list); |
@@ -252,6 +258,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net) | |||
252 | } | 258 | } |
253 | mutex_unlock(&net_mutex); | 259 | mutex_unlock(&net_mutex); |
254 | if (rv < 0) { | 260 | if (rv < 0) { |
261 | put_user_ns(user_ns); | ||
255 | net_drop_ns(net); | 262 | net_drop_ns(net); |
256 | return ERR_PTR(rv); | 263 | return ERR_PTR(rv); |
257 | } | 264 | } |
@@ -308,6 +315,7 @@ static void cleanup_net(struct work_struct *work) | |||
308 | /* Finally it is safe to free my network namespace structure */ | 315 | /* Finally it is safe to free my network namespace structure */ |
309 | list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { | 316 | list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { |
310 | list_del_init(&net->exit_list); | 317 | list_del_init(&net->exit_list); |
318 | put_user_ns(net->user_ns); | ||
311 | net_drop_ns(net); | 319 | net_drop_ns(net); |
312 | } | 320 | } |
313 | } | 321 | } |
@@ -347,13 +355,6 @@ struct net *get_net_ns_by_fd(int fd) | |||
347 | } | 355 | } |
348 | 356 | ||
349 | #else | 357 | #else |
350 | struct net *copy_net_ns(unsigned long flags, struct net *old_net) | ||
351 | { | ||
352 | if (flags & CLONE_NEWNET) | ||
353 | return ERR_PTR(-EINVAL); | ||
354 | return old_net; | ||
355 | } | ||
356 | |||
357 | struct net *get_net_ns_by_fd(int fd) | 358 | struct net *get_net_ns_by_fd(int fd) |
358 | { | 359 | { |
359 | return ERR_PTR(-EINVAL); | 360 | return ERR_PTR(-EINVAL); |
@@ -402,7 +403,7 @@ static int __init net_ns_init(void) | |||
402 | rcu_assign_pointer(init_net.gen, ng); | 403 | rcu_assign_pointer(init_net.gen, ng); |
403 | 404 | ||
404 | mutex_lock(&net_mutex); | 405 | mutex_lock(&net_mutex); |
405 | if (setup_net(&init_net)) | 406 | if (setup_net(&init_net, &init_user_ns)) |
406 | panic("Could not setup the initial network namespace"); | 407 | panic("Could not setup the initial network namespace"); |
407 | 408 | ||
408 | rtnl_lock(); | 409 | rtnl_lock(); |
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 79285a36035f..847c02b197b0 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c | |||
@@ -248,7 +248,7 @@ static int update_netprio(const void *v, struct file *file, unsigned n) | |||
248 | return 0; | 248 | return 0; |
249 | } | 249 | } |
250 | 250 | ||
251 | void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) | 251 | static void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
252 | { | 252 | { |
253 | struct task_struct *p; | 253 | struct task_struct *p; |
254 | void *v; | 254 | void *v; |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index d1dc14c2aac4..b29dacf900f9 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -419,20 +419,6 @@ struct pktgen_thread { | |||
419 | #define REMOVE 1 | 419 | #define REMOVE 1 |
420 | #define FIND 0 | 420 | #define FIND 0 |
421 | 421 | ||
422 | static inline ktime_t ktime_now(void) | ||
423 | { | ||
424 | struct timespec ts; | ||
425 | ktime_get_ts(&ts); | ||
426 | |||
427 | return timespec_to_ktime(ts); | ||
428 | } | ||
429 | |||
430 | /* This works even if 32 bit because of careful byte order choice */ | ||
431 | static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2) | ||
432 | { | ||
433 | return cmp1.tv64 < cmp2.tv64; | ||
434 | } | ||
435 | |||
436 | static const char version[] = | 422 | static const char version[] = |
437 | "Packet Generator for packet performance testing. " | 423 | "Packet Generator for packet performance testing. " |
438 | "Version: " VERSION "\n"; | 424 | "Version: " VERSION "\n"; |
@@ -675,7 +661,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v) | |||
675 | seq_puts(seq, "\n"); | 661 | seq_puts(seq, "\n"); |
676 | 662 | ||
677 | /* not really stopped, more like last-running-at */ | 663 | /* not really stopped, more like last-running-at */ |
678 | stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at; | 664 | stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at; |
679 | idle = pkt_dev->idle_acc; | 665 | idle = pkt_dev->idle_acc; |
680 | do_div(idle, NSEC_PER_USEC); | 666 | do_div(idle, NSEC_PER_USEC); |
681 | 667 | ||
@@ -2141,12 +2127,12 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2141 | return; | 2127 | return; |
2142 | } | 2128 | } |
2143 | 2129 | ||
2144 | start_time = ktime_now(); | 2130 | start_time = ktime_get(); |
2145 | if (remaining < 100000) { | 2131 | if (remaining < 100000) { |
2146 | /* for small delays (<100us), just loop until limit is reached */ | 2132 | /* for small delays (<100us), just loop until limit is reached */ |
2147 | do { | 2133 | do { |
2148 | end_time = ktime_now(); | 2134 | end_time = ktime_get(); |
2149 | } while (ktime_lt(end_time, spin_until)); | 2135 | } while (ktime_compare(end_time, spin_until) < 0); |
2150 | } else { | 2136 | } else { |
2151 | /* see do_nanosleep */ | 2137 | /* see do_nanosleep */ |
2152 | hrtimer_init_sleeper(&t, current); | 2138 | hrtimer_init_sleeper(&t, current); |
@@ -2162,7 +2148,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2162 | hrtimer_cancel(&t.timer); | 2148 | hrtimer_cancel(&t.timer); |
2163 | } while (t.task && pkt_dev->running && !signal_pending(current)); | 2149 | } while (t.task && pkt_dev->running && !signal_pending(current)); |
2164 | __set_current_state(TASK_RUNNING); | 2150 | __set_current_state(TASK_RUNNING); |
2165 | end_time = ktime_now(); | 2151 | end_time = ktime_get(); |
2166 | } | 2152 | } |
2167 | 2153 | ||
2168 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | 2154 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); |
@@ -2427,11 +2413,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) | |||
2427 | } | 2413 | } |
2428 | } else { /* IPV6 * */ | 2414 | } else { /* IPV6 * */ |
2429 | 2415 | ||
2430 | if (pkt_dev->min_in6_daddr.s6_addr32[0] == 0 && | 2416 | if (!ipv6_addr_any(&pkt_dev->min_in6_daddr)) { |
2431 | pkt_dev->min_in6_daddr.s6_addr32[1] == 0 && | ||
2432 | pkt_dev->min_in6_daddr.s6_addr32[2] == 0 && | ||
2433 | pkt_dev->min_in6_daddr.s6_addr32[3] == 0) ; | ||
2434 | else { | ||
2435 | int i; | 2417 | int i; |
2436 | 2418 | ||
2437 | /* Only random destinations yet */ | 2419 | /* Only random destinations yet */ |
@@ -2916,8 +2898,7 @@ static void pktgen_run(struct pktgen_thread *t) | |||
2916 | pktgen_clear_counters(pkt_dev); | 2898 | pktgen_clear_counters(pkt_dev); |
2917 | pkt_dev->running = 1; /* Cranke yeself! */ | 2899 | pkt_dev->running = 1; /* Cranke yeself! */ |
2918 | pkt_dev->skb = NULL; | 2900 | pkt_dev->skb = NULL; |
2919 | pkt_dev->started_at = | 2901 | pkt_dev->started_at = pkt_dev->next_tx = ktime_get(); |
2920 | pkt_dev->next_tx = ktime_now(); | ||
2921 | 2902 | ||
2922 | set_pkt_overhead(pkt_dev); | 2903 | set_pkt_overhead(pkt_dev); |
2923 | 2904 | ||
@@ -3076,7 +3057,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev) | |||
3076 | 3057 | ||
3077 | kfree_skb(pkt_dev->skb); | 3058 | kfree_skb(pkt_dev->skb); |
3078 | pkt_dev->skb = NULL; | 3059 | pkt_dev->skb = NULL; |
3079 | pkt_dev->stopped_at = ktime_now(); | 3060 | pkt_dev->stopped_at = ktime_get(); |
3080 | pkt_dev->running = 0; | 3061 | pkt_dev->running = 0; |
3081 | 3062 | ||
3082 | show_results(pkt_dev, nr_frags); | 3063 | show_results(pkt_dev, nr_frags); |
@@ -3095,7 +3076,7 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t) | |||
3095 | continue; | 3076 | continue; |
3096 | if (best == NULL) | 3077 | if (best == NULL) |
3097 | best = pkt_dev; | 3078 | best = pkt_dev; |
3098 | else if (ktime_lt(pkt_dev->next_tx, best->next_tx)) | 3079 | else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0) |
3099 | best = pkt_dev; | 3080 | best = pkt_dev; |
3100 | } | 3081 | } |
3101 | if_unlock(t); | 3082 | if_unlock(t); |
@@ -3180,14 +3161,14 @@ static void pktgen_rem_thread(struct pktgen_thread *t) | |||
3180 | 3161 | ||
3181 | static void pktgen_resched(struct pktgen_dev *pkt_dev) | 3162 | static void pktgen_resched(struct pktgen_dev *pkt_dev) |
3182 | { | 3163 | { |
3183 | ktime_t idle_start = ktime_now(); | 3164 | ktime_t idle_start = ktime_get(); |
3184 | schedule(); | 3165 | schedule(); |
3185 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); | 3166 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start)); |
3186 | } | 3167 | } |
3187 | 3168 | ||
3188 | static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) | 3169 | static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) |
3189 | { | 3170 | { |
3190 | ktime_t idle_start = ktime_now(); | 3171 | ktime_t idle_start = ktime_get(); |
3191 | 3172 | ||
3192 | while (atomic_read(&(pkt_dev->skb->users)) != 1) { | 3173 | while (atomic_read(&(pkt_dev->skb->users)) != 1) { |
3193 | if (signal_pending(current)) | 3174 | if (signal_pending(current)) |
@@ -3198,7 +3179,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) | |||
3198 | else | 3179 | else |
3199 | cpu_relax(); | 3180 | cpu_relax(); |
3200 | } | 3181 | } |
3201 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); | 3182 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start)); |
3202 | } | 3183 | } |
3203 | 3184 | ||
3204 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) | 3185 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) |
@@ -3220,7 +3201,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3220 | * "never transmit" | 3201 | * "never transmit" |
3221 | */ | 3202 | */ |
3222 | if (unlikely(pkt_dev->delay == ULLONG_MAX)) { | 3203 | if (unlikely(pkt_dev->delay == ULLONG_MAX)) { |
3223 | pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX); | 3204 | pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX); |
3224 | return; | 3205 | return; |
3225 | } | 3206 | } |
3226 | 3207 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index fad649ae4dec..575a6ee89944 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -128,7 +128,7 @@ static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex) | |||
128 | if (tab == NULL || tab[msgindex].doit == NULL) | 128 | if (tab == NULL || tab[msgindex].doit == NULL) |
129 | tab = rtnl_msg_handlers[PF_UNSPEC]; | 129 | tab = rtnl_msg_handlers[PF_UNSPEC]; |
130 | 130 | ||
131 | return tab ? tab[msgindex].doit : NULL; | 131 | return tab[msgindex].doit; |
132 | } | 132 | } |
133 | 133 | ||
134 | static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) | 134 | static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) |
@@ -143,7 +143,7 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) | |||
143 | if (tab == NULL || tab[msgindex].dumpit == NULL) | 143 | if (tab == NULL || tab[msgindex].dumpit == NULL) |
144 | tab = rtnl_msg_handlers[PF_UNSPEC]; | 144 | tab = rtnl_msg_handlers[PF_UNSPEC]; |
145 | 145 | ||
146 | return tab ? tab[msgindex].dumpit : NULL; | 146 | return tab[msgindex].dumpit; |
147 | } | 147 | } |
148 | 148 | ||
149 | static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex) | 149 | static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex) |
@@ -158,7 +158,7 @@ static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex) | |||
158 | if (tab == NULL || tab[msgindex].calcit == NULL) | 158 | if (tab == NULL || tab[msgindex].calcit == NULL) |
159 | tab = rtnl_msg_handlers[PF_UNSPEC]; | 159 | tab = rtnl_msg_handlers[PF_UNSPEC]; |
160 | 160 | ||
161 | return tab ? tab[msgindex].calcit : NULL; | 161 | return tab[msgindex].calcit; |
162 | } | 162 | } |
163 | 163 | ||
164 | /** | 164 | /** |
@@ -1316,6 +1316,10 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
1316 | err = PTR_ERR(net); | 1316 | err = PTR_ERR(net); |
1317 | goto errout; | 1317 | goto errout; |
1318 | } | 1318 | } |
1319 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) { | ||
1320 | err = -EPERM; | ||
1321 | goto errout; | ||
1322 | } | ||
1319 | err = dev_change_net_namespace(dev, net, ifname); | 1323 | err = dev_change_net_namespace(dev, net, ifname); |
1320 | put_net(net); | 1324 | put_net(net); |
1321 | if (err) | 1325 | if (err) |
@@ -2057,6 +2061,9 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
2057 | u8 *addr; | 2061 | u8 *addr; |
2058 | int err; | 2062 | int err; |
2059 | 2063 | ||
2064 | if (!capable(CAP_NET_ADMIN)) | ||
2065 | return -EPERM; | ||
2066 | |||
2060 | err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); | 2067 | err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); |
2061 | if (err < 0) | 2068 | if (err < 0) |
2062 | return err; | 2069 | return err; |
@@ -2123,6 +2130,9 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
2123 | int err = -EINVAL; | 2130 | int err = -EINVAL; |
2124 | __u8 *addr; | 2131 | __u8 *addr; |
2125 | 2132 | ||
2133 | if (!capable(CAP_NET_ADMIN)) | ||
2134 | return -EPERM; | ||
2135 | |||
2126 | if (nlmsg_len(nlh) < sizeof(*ndm)) | 2136 | if (nlmsg_len(nlh) < sizeof(*ndm)) |
2127 | return -EINVAL; | 2137 | return -EINVAL; |
2128 | 2138 | ||
@@ -2253,6 +2263,211 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2253 | return skb->len; | 2263 | return skb->len; |
2254 | } | 2264 | } |
2255 | 2265 | ||
2266 | int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | ||
2267 | struct net_device *dev, u16 mode) | ||
2268 | { | ||
2269 | struct nlmsghdr *nlh; | ||
2270 | struct ifinfomsg *ifm; | ||
2271 | struct nlattr *br_afspec; | ||
2272 | u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; | ||
2273 | |||
2274 | nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI); | ||
2275 | if (nlh == NULL) | ||
2276 | return -EMSGSIZE; | ||
2277 | |||
2278 | ifm = nlmsg_data(nlh); | ||
2279 | ifm->ifi_family = AF_BRIDGE; | ||
2280 | ifm->__ifi_pad = 0; | ||
2281 | ifm->ifi_type = dev->type; | ||
2282 | ifm->ifi_index = dev->ifindex; | ||
2283 | ifm->ifi_flags = dev_get_flags(dev); | ||
2284 | ifm->ifi_change = 0; | ||
2285 | |||
2286 | |||
2287 | if (nla_put_string(skb, IFLA_IFNAME, dev->name) || | ||
2288 | nla_put_u32(skb, IFLA_MTU, dev->mtu) || | ||
2289 | nla_put_u8(skb, IFLA_OPERSTATE, operstate) || | ||
2290 | (dev->master && | ||
2291 | nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) || | ||
2292 | (dev->addr_len && | ||
2293 | nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || | ||
2294 | (dev->ifindex != dev->iflink && | ||
2295 | nla_put_u32(skb, IFLA_LINK, dev->iflink))) | ||
2296 | goto nla_put_failure; | ||
2297 | |||
2298 | br_afspec = nla_nest_start(skb, IFLA_AF_SPEC); | ||
2299 | if (!br_afspec) | ||
2300 | goto nla_put_failure; | ||
2301 | |||
2302 | if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF) || | ||
2303 | nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) { | ||
2304 | nla_nest_cancel(skb, br_afspec); | ||
2305 | goto nla_put_failure; | ||
2306 | } | ||
2307 | nla_nest_end(skb, br_afspec); | ||
2308 | |||
2309 | return nlmsg_end(skb, nlh); | ||
2310 | nla_put_failure: | ||
2311 | nlmsg_cancel(skb, nlh); | ||
2312 | return -EMSGSIZE; | ||
2313 | } | ||
2314 | EXPORT_SYMBOL(ndo_dflt_bridge_getlink); | ||
2315 | |||
2316 | static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | ||
2317 | { | ||
2318 | struct net *net = sock_net(skb->sk); | ||
2319 | struct net_device *dev; | ||
2320 | int idx = 0; | ||
2321 | u32 portid = NETLINK_CB(cb->skb).portid; | ||
2322 | u32 seq = cb->nlh->nlmsg_seq; | ||
2323 | |||
2324 | rcu_read_lock(); | ||
2325 | for_each_netdev_rcu(net, dev) { | ||
2326 | const struct net_device_ops *ops = dev->netdev_ops; | ||
2327 | struct net_device *master = dev->master; | ||
2328 | |||
2329 | if (master && master->netdev_ops->ndo_bridge_getlink) { | ||
2330 | if (idx >= cb->args[0] && | ||
2331 | master->netdev_ops->ndo_bridge_getlink( | ||
2332 | skb, portid, seq, dev) < 0) | ||
2333 | break; | ||
2334 | idx++; | ||
2335 | } | ||
2336 | |||
2337 | if (ops->ndo_bridge_getlink) { | ||
2338 | if (idx >= cb->args[0] && | ||
2339 | ops->ndo_bridge_getlink(skb, portid, seq, dev) < 0) | ||
2340 | break; | ||
2341 | idx++; | ||
2342 | } | ||
2343 | } | ||
2344 | rcu_read_unlock(); | ||
2345 | cb->args[0] = idx; | ||
2346 | |||
2347 | return skb->len; | ||
2348 | } | ||
2349 | |||
2350 | static inline size_t bridge_nlmsg_size(void) | ||
2351 | { | ||
2352 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) | ||
2353 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ | ||
2354 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ | ||
2355 | + nla_total_size(sizeof(u32)) /* IFLA_MASTER */ | ||
2356 | + nla_total_size(sizeof(u32)) /* IFLA_MTU */ | ||
2357 | + nla_total_size(sizeof(u32)) /* IFLA_LINK */ | ||
2358 | + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */ | ||
2359 | + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */ | ||
2360 | + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */ | ||
2361 | + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */ | ||
2362 | + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */ | ||
2363 | } | ||
2364 | |||
2365 | static int rtnl_bridge_notify(struct net_device *dev, u16 flags) | ||
2366 | { | ||
2367 | struct net *net = dev_net(dev); | ||
2368 | struct net_device *master = dev->master; | ||
2369 | struct sk_buff *skb; | ||
2370 | int err = -EOPNOTSUPP; | ||
2371 | |||
2372 | skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC); | ||
2373 | if (!skb) { | ||
2374 | err = -ENOMEM; | ||
2375 | goto errout; | ||
2376 | } | ||
2377 | |||
2378 | if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) && | ||
2379 | master && master->netdev_ops->ndo_bridge_getlink) { | ||
2380 | err = master->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev); | ||
2381 | if (err < 0) | ||
2382 | goto errout; | ||
2383 | } | ||
2384 | |||
2385 | if ((flags & BRIDGE_FLAGS_SELF) && | ||
2386 | dev->netdev_ops->ndo_bridge_getlink) { | ||
2387 | err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev); | ||
2388 | if (err < 0) | ||
2389 | goto errout; | ||
2390 | } | ||
2391 | |||
2392 | rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); | ||
2393 | return 0; | ||
2394 | errout: | ||
2395 | WARN_ON(err == -EMSGSIZE); | ||
2396 | kfree_skb(skb); | ||
2397 | rtnl_set_sk_err(net, RTNLGRP_LINK, err); | ||
2398 | return err; | ||
2399 | } | ||
2400 | |||
2401 | static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, | ||
2402 | void *arg) | ||
2403 | { | ||
2404 | struct net *net = sock_net(skb->sk); | ||
2405 | struct ifinfomsg *ifm; | ||
2406 | struct net_device *dev; | ||
2407 | struct nlattr *br_spec, *attr = NULL; | ||
2408 | int rem, err = -EOPNOTSUPP; | ||
2409 | u16 oflags, flags = 0; | ||
2410 | bool have_flags = false; | ||
2411 | |||
2412 | if (nlmsg_len(nlh) < sizeof(*ifm)) | ||
2413 | return -EINVAL; | ||
2414 | |||
2415 | ifm = nlmsg_data(nlh); | ||
2416 | if (ifm->ifi_family != AF_BRIDGE) | ||
2417 | return -EPFNOSUPPORT; | ||
2418 | |||
2419 | dev = __dev_get_by_index(net, ifm->ifi_index); | ||
2420 | if (!dev) { | ||
2421 | pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n"); | ||
2422 | return -ENODEV; | ||
2423 | } | ||
2424 | |||
2425 | br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); | ||
2426 | if (br_spec) { | ||
2427 | nla_for_each_nested(attr, br_spec, rem) { | ||
2428 | if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { | ||
2429 | have_flags = true; | ||
2430 | flags = nla_get_u16(attr); | ||
2431 | break; | ||
2432 | } | ||
2433 | } | ||
2434 | } | ||
2435 | |||
2436 | oflags = flags; | ||
2437 | |||
2438 | if (!flags || (flags & BRIDGE_FLAGS_MASTER)) { | ||
2439 | if (!dev->master || | ||
2440 | !dev->master->netdev_ops->ndo_bridge_setlink) { | ||
2441 | err = -EOPNOTSUPP; | ||
2442 | goto out; | ||
2443 | } | ||
2444 | |||
2445 | err = dev->master->netdev_ops->ndo_bridge_setlink(dev, nlh); | ||
2446 | if (err) | ||
2447 | goto out; | ||
2448 | |||
2449 | flags &= ~BRIDGE_FLAGS_MASTER; | ||
2450 | } | ||
2451 | |||
2452 | if ((flags & BRIDGE_FLAGS_SELF)) { | ||
2453 | if (!dev->netdev_ops->ndo_bridge_setlink) | ||
2454 | err = -EOPNOTSUPP; | ||
2455 | else | ||
2456 | err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh); | ||
2457 | |||
2458 | if (!err) | ||
2459 | flags &= ~BRIDGE_FLAGS_SELF; | ||
2460 | } | ||
2461 | |||
2462 | if (have_flags) | ||
2463 | memcpy(nla_data(attr), &flags, sizeof(flags)); | ||
2464 | /* Generate event to notify upper layer of bridge change */ | ||
2465 | if (!err) | ||
2466 | err = rtnl_bridge_notify(dev, oflags); | ||
2467 | out: | ||
2468 | return err; | ||
2469 | } | ||
2470 | |||
2256 | /* Protected by RTNL sempahore. */ | 2471 | /* Protected by RTNL sempahore. */ |
2257 | static struct rtattr **rta_buf; | 2472 | static struct rtattr **rta_buf; |
2258 | static int rtattr_max; | 2473 | static int rtattr_max; |
@@ -2283,7 +2498,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
2283 | sz_idx = type>>2; | 2498 | sz_idx = type>>2; |
2284 | kind = type&3; | 2499 | kind = type&3; |
2285 | 2500 | ||
2286 | if (kind != 2 && !capable(CAP_NET_ADMIN)) | 2501 | if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN)) |
2287 | return -EPERM; | 2502 | return -EPERM; |
2288 | 2503 | ||
2289 | if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { | 2504 | if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { |
@@ -2434,5 +2649,8 @@ void __init rtnetlink_init(void) | |||
2434 | rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL); | 2649 | rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL); |
2435 | rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL); | 2650 | rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL); |
2436 | rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL); | 2651 | rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL); |
2652 | |||
2653 | rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL); | ||
2654 | rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL); | ||
2437 | } | 2655 | } |
2438 | 2656 | ||
diff --git a/net/core/scm.c b/net/core/scm.c index ab570841a532..57fb1ee6649f 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -51,11 +51,11 @@ static __inline__ int scm_check_creds(struct ucred *creds) | |||
51 | if (!uid_valid(uid) || !gid_valid(gid)) | 51 | if (!uid_valid(uid) || !gid_valid(gid)) |
52 | return -EINVAL; | 52 | return -EINVAL; |
53 | 53 | ||
54 | if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) && | 54 | if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && |
55 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || | 55 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || |
56 | uid_eq(uid, cred->suid)) || capable(CAP_SETUID)) && | 56 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && |
57 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || | 57 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || |
58 | gid_eq(gid, cred->sgid)) || capable(CAP_SETGID))) { | 58 | gid_eq(gid, cred->sgid)) || nsown_capable(CAP_SETGID))) { |
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | return -EPERM; | 61 | return -EPERM; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4007c1437fda..880722e22cc5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -519,7 +519,7 @@ static void skb_release_data(struct sk_buff *skb) | |||
519 | 519 | ||
520 | uarg = skb_shinfo(skb)->destructor_arg; | 520 | uarg = skb_shinfo(skb)->destructor_arg; |
521 | if (uarg->callback) | 521 | if (uarg->callback) |
522 | uarg->callback(uarg); | 522 | uarg->callback(uarg, true); |
523 | } | 523 | } |
524 | 524 | ||
525 | if (skb_has_frag_list(skb)) | 525 | if (skb_has_frag_list(skb)) |
@@ -635,6 +635,26 @@ void kfree_skb(struct sk_buff *skb) | |||
635 | EXPORT_SYMBOL(kfree_skb); | 635 | EXPORT_SYMBOL(kfree_skb); |
636 | 636 | ||
637 | /** | 637 | /** |
638 | * skb_tx_error - report an sk_buff xmit error | ||
639 | * @skb: buffer that triggered an error | ||
640 | * | ||
641 | * Report xmit error if a device callback is tracking this skb. | ||
642 | * skb must be freed afterwards. | ||
643 | */ | ||
644 | void skb_tx_error(struct sk_buff *skb) | ||
645 | { | ||
646 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | ||
647 | struct ubuf_info *uarg; | ||
648 | |||
649 | uarg = skb_shinfo(skb)->destructor_arg; | ||
650 | if (uarg->callback) | ||
651 | uarg->callback(uarg, false); | ||
652 | skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; | ||
653 | } | ||
654 | } | ||
655 | EXPORT_SYMBOL(skb_tx_error); | ||
656 | |||
657 | /** | ||
638 | * consume_skb - free an skbuff | 658 | * consume_skb - free an skbuff |
639 | * @skb: buffer to free | 659 | * @skb: buffer to free |
640 | * | 660 | * |
@@ -797,7 +817,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) | |||
797 | for (i = 0; i < num_frags; i++) | 817 | for (i = 0; i < num_frags; i++) |
798 | skb_frag_unref(skb, i); | 818 | skb_frag_unref(skb, i); |
799 | 819 | ||
800 | uarg->callback(uarg); | 820 | uarg->callback(uarg, false); |
801 | 821 | ||
802 | /* skb frags point to kernel buffers */ | 822 | /* skb frags point to kernel buffers */ |
803 | for (i = num_frags - 1; i >= 0; i--) { | 823 | for (i = num_frags - 1; i >= 0; i--) { |
diff --git a/net/core/sock.c b/net/core/sock.c index 8a146cfcc366..a692ef49c9bb 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -505,7 +505,8 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) | |||
505 | } | 505 | } |
506 | EXPORT_SYMBOL(sk_dst_check); | 506 | EXPORT_SYMBOL(sk_dst_check); |
507 | 507 | ||
508 | static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) | 508 | static int sock_setbindtodevice(struct sock *sk, char __user *optval, |
509 | int optlen) | ||
509 | { | 510 | { |
510 | int ret = -ENOPROTOOPT; | 511 | int ret = -ENOPROTOOPT; |
511 | #ifdef CONFIG_NETDEVICES | 512 | #ifdef CONFIG_NETDEVICES |
@@ -515,7 +516,7 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) | |||
515 | 516 | ||
516 | /* Sorry... */ | 517 | /* Sorry... */ |
517 | ret = -EPERM; | 518 | ret = -EPERM; |
518 | if (!capable(CAP_NET_RAW)) | 519 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) |
519 | goto out; | 520 | goto out; |
520 | 521 | ||
521 | ret = -EINVAL; | 522 | ret = -EINVAL; |
@@ -562,6 +563,59 @@ out: | |||
562 | return ret; | 563 | return ret; |
563 | } | 564 | } |
564 | 565 | ||
566 | static int sock_getbindtodevice(struct sock *sk, char __user *optval, | ||
567 | int __user *optlen, int len) | ||
568 | { | ||
569 | int ret = -ENOPROTOOPT; | ||
570 | #ifdef CONFIG_NETDEVICES | ||
571 | struct net *net = sock_net(sk); | ||
572 | struct net_device *dev; | ||
573 | char devname[IFNAMSIZ]; | ||
574 | unsigned seq; | ||
575 | |||
576 | if (sk->sk_bound_dev_if == 0) { | ||
577 | len = 0; | ||
578 | goto zero; | ||
579 | } | ||
580 | |||
581 | ret = -EINVAL; | ||
582 | if (len < IFNAMSIZ) | ||
583 | goto out; | ||
584 | |||
585 | retry: | ||
586 | seq = read_seqbegin(&devnet_rename_seq); | ||
587 | rcu_read_lock(); | ||
588 | dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); | ||
589 | ret = -ENODEV; | ||
590 | if (!dev) { | ||
591 | rcu_read_unlock(); | ||
592 | goto out; | ||
593 | } | ||
594 | |||
595 | strcpy(devname, dev->name); | ||
596 | rcu_read_unlock(); | ||
597 | if (read_seqretry(&devnet_rename_seq, seq)) | ||
598 | goto retry; | ||
599 | |||
600 | len = strlen(devname) + 1; | ||
601 | |||
602 | ret = -EFAULT; | ||
603 | if (copy_to_user(optval, devname, len)) | ||
604 | goto out; | ||
605 | |||
606 | zero: | ||
607 | ret = -EFAULT; | ||
608 | if (put_user(len, optlen)) | ||
609 | goto out; | ||
610 | |||
611 | ret = 0; | ||
612 | |||
613 | out: | ||
614 | #endif | ||
615 | |||
616 | return ret; | ||
617 | } | ||
618 | |||
565 | static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) | 619 | static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) |
566 | { | 620 | { |
567 | if (valbool) | 621 | if (valbool) |
@@ -589,7 +643,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, | |||
589 | */ | 643 | */ |
590 | 644 | ||
591 | if (optname == SO_BINDTODEVICE) | 645 | if (optname == SO_BINDTODEVICE) |
592 | return sock_bindtodevice(sk, optval, optlen); | 646 | return sock_setbindtodevice(sk, optval, optlen); |
593 | 647 | ||
594 | if (optlen < sizeof(int)) | 648 | if (optlen < sizeof(int)) |
595 | return -EINVAL; | 649 | return -EINVAL; |
@@ -696,7 +750,8 @@ set_rcvbuf: | |||
696 | break; | 750 | break; |
697 | 751 | ||
698 | case SO_PRIORITY: | 752 | case SO_PRIORITY: |
699 | if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) | 753 | if ((val >= 0 && val <= 6) || |
754 | ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) | ||
700 | sk->sk_priority = val; | 755 | sk->sk_priority = val; |
701 | else | 756 | else |
702 | ret = -EPERM; | 757 | ret = -EPERM; |
@@ -813,7 +868,7 @@ set_rcvbuf: | |||
813 | clear_bit(SOCK_PASSSEC, &sock->flags); | 868 | clear_bit(SOCK_PASSSEC, &sock->flags); |
814 | break; | 869 | break; |
815 | case SO_MARK: | 870 | case SO_MARK: |
816 | if (!capable(CAP_NET_ADMIN)) | 871 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
817 | ret = -EPERM; | 872 | ret = -EPERM; |
818 | else | 873 | else |
819 | sk->sk_mark = val; | 874 | sk->sk_mark = val; |
@@ -1074,6 +1129,17 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
1074 | case SO_NOFCS: | 1129 | case SO_NOFCS: |
1075 | v.val = sock_flag(sk, SOCK_NOFCS); | 1130 | v.val = sock_flag(sk, SOCK_NOFCS); |
1076 | break; | 1131 | break; |
1132 | |||
1133 | case SO_BINDTODEVICE: | ||
1134 | return sock_getbindtodevice(sk, optval, optlen, len); | ||
1135 | |||
1136 | case SO_GET_FILTER: | ||
1137 | len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); | ||
1138 | if (len < 0) | ||
1139 | return len; | ||
1140 | |||
1141 | goto lenout; | ||
1142 | |||
1077 | default: | 1143 | default: |
1078 | return -ENOPROTOOPT; | 1144 | return -ENOPROTOOPT; |
1079 | } | 1145 | } |
@@ -1214,13 +1280,11 @@ static void sk_prot_free(struct proto *prot, struct sock *sk) | |||
1214 | 1280 | ||
1215 | #ifdef CONFIG_CGROUPS | 1281 | #ifdef CONFIG_CGROUPS |
1216 | #if IS_ENABLED(CONFIG_NET_CLS_CGROUP) | 1282 | #if IS_ENABLED(CONFIG_NET_CLS_CGROUP) |
1217 | void sock_update_classid(struct sock *sk) | 1283 | void sock_update_classid(struct sock *sk, struct task_struct *task) |
1218 | { | 1284 | { |
1219 | u32 classid; | 1285 | u32 classid; |
1220 | 1286 | ||
1221 | rcu_read_lock(); /* doing current task, which cannot vanish. */ | 1287 | classid = task_cls_classid(task); |
1222 | classid = task_cls_classid(current); | ||
1223 | rcu_read_unlock(); | ||
1224 | if (classid != sk->sk_classid) | 1288 | if (classid != sk->sk_classid) |
1225 | sk->sk_classid = classid; | 1289 | sk->sk_classid = classid; |
1226 | } | 1290 | } |
@@ -1263,7 +1327,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, | |||
1263 | sock_net_set(sk, get_net(net)); | 1327 | sock_net_set(sk, get_net(net)); |
1264 | atomic_set(&sk->sk_wmem_alloc, 1); | 1328 | atomic_set(&sk->sk_wmem_alloc, 1); |
1265 | 1329 | ||
1266 | sock_update_classid(sk); | 1330 | sock_update_classid(sk, current); |
1267 | sock_update_netprioidx(sk, current); | 1331 | sock_update_netprioidx(sk, current); |
1268 | } | 1332 | } |
1269 | 1333 | ||
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index a7c36845b123..d1b08045a9df 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -216,6 +216,11 @@ static __net_init int sysctl_core_net_init(struct net *net) | |||
216 | goto err_dup; | 216 | goto err_dup; |
217 | 217 | ||
218 | tbl[0].data = &net->core.sysctl_somaxconn; | 218 | tbl[0].data = &net->core.sysctl_somaxconn; |
219 | |||
220 | /* Don't export any sysctls to unprivileged users */ | ||
221 | if (net->user_ns != &init_user_ns) { | ||
222 | tbl[0].procname = NULL; | ||
223 | } | ||
219 | } | 224 | } |
220 | 225 | ||
221 | net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); | 226 | net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 70989e672304..b07c75d37e91 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -1662,6 +1662,9 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
1662 | struct nlmsghdr *reply_nlh = NULL; | 1662 | struct nlmsghdr *reply_nlh = NULL; |
1663 | const struct reply_func *fn; | 1663 | const struct reply_func *fn; |
1664 | 1664 | ||
1665 | if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN)) | ||
1666 | return -EPERM; | ||
1667 | |||
1665 | if (!net_eq(net, &init_net)) | 1668 | if (!net_eq(net, &init_net)) |
1666 | return -EINVAL; | 1669 | return -EINVAL; |
1667 | 1670 | ||
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index ea850ce35d4a..662071b249cc 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -174,8 +174,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, | |||
174 | * To protect against Request floods, increment retrans | 174 | * To protect against Request floods, increment retrans |
175 | * counter (backoff, monitored by dccp_response_timer). | 175 | * counter (backoff, monitored by dccp_response_timer). |
176 | */ | 176 | */ |
177 | req->retrans++; | 177 | inet_rtx_syn_ack(sk, req); |
178 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); | ||
179 | } | 178 | } |
180 | /* Network Duplicate, discard packet */ | 179 | /* Network Duplicate, discard packet */ |
181 | return NULL; | 180 | return NULL; |
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 7b7e561412d3..e47ba9fc4a0e 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -573,6 +573,9 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
573 | struct dn_ifaddr __rcu **ifap; | 573 | struct dn_ifaddr __rcu **ifap; |
574 | int err = -EINVAL; | 574 | int err = -EINVAL; |
575 | 575 | ||
576 | if (!capable(CAP_NET_ADMIN)) | ||
577 | return -EPERM; | ||
578 | |||
576 | if (!net_eq(net, &init_net)) | 579 | if (!net_eq(net, &init_net)) |
577 | goto errout; | 580 | goto errout; |
578 | 581 | ||
@@ -614,6 +617,9 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
614 | struct dn_ifaddr *ifa; | 617 | struct dn_ifaddr *ifa; |
615 | int err; | 618 | int err; |
616 | 619 | ||
620 | if (!capable(CAP_NET_ADMIN)) | ||
621 | return -EPERM; | ||
622 | |||
617 | if (!net_eq(net, &init_net)) | 623 | if (!net_eq(net, &init_net)) |
618 | return -EINVAL; | 624 | return -EINVAL; |
619 | 625 | ||
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index 102d6106a942..e36614eccc04 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c | |||
@@ -520,6 +520,9 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void * | |||
520 | struct rtattr **rta = arg; | 520 | struct rtattr **rta = arg; |
521 | struct rtmsg *r = NLMSG_DATA(nlh); | 521 | struct rtmsg *r = NLMSG_DATA(nlh); |
522 | 522 | ||
523 | if (!capable(CAP_NET_ADMIN)) | ||
524 | return -EPERM; | ||
525 | |||
523 | if (!net_eq(net, &init_net)) | 526 | if (!net_eq(net, &init_net)) |
524 | return -EINVAL; | 527 | return -EINVAL; |
525 | 528 | ||
@@ -540,6 +543,9 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void * | |||
540 | struct rtattr **rta = arg; | 543 | struct rtattr **rta = arg; |
541 | struct rtmsg *r = NLMSG_DATA(nlh); | 544 | struct rtmsg *r = NLMSG_DATA(nlh); |
542 | 545 | ||
546 | if (!capable(CAP_NET_ADMIN)) | ||
547 | return -EPERM; | ||
548 | |||
543 | if (!net_eq(net, &init_net)) | 549 | if (!net_eq(net, &init_net)) |
544 | return -EINVAL; | 550 | return -EINVAL; |
545 | 551 | ||
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 274791cd7a35..f5eede1d6cb8 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig | |||
@@ -1,26 +1,24 @@ | |||
1 | config HAVE_NET_DSA | ||
2 | def_bool y | ||
3 | depends on NETDEVICES && !S390 | ||
4 | |||
5 | # Drivers must select NET_DSA and the appropriate tagging format | ||
6 | |||
1 | config NET_DSA | 7 | config NET_DSA |
2 | tristate "Distributed Switch Architecture support" | 8 | tristate |
3 | default n | 9 | depends on HAVE_NET_DSA |
4 | depends on EXPERIMENTAL && NETDEVICES && !S390 | ||
5 | select PHYLIB | 10 | select PHYLIB |
6 | ---help--- | ||
7 | This allows you to use hardware switch chips that use | ||
8 | the Distributed Switch Architecture. | ||
9 | |||
10 | 11 | ||
11 | if NET_DSA | 12 | if NET_DSA |
12 | 13 | ||
13 | # tagging formats | 14 | # tagging formats |
14 | config NET_DSA_TAG_DSA | 15 | config NET_DSA_TAG_DSA |
15 | bool | 16 | bool |
16 | default n | ||
17 | 17 | ||
18 | config NET_DSA_TAG_EDSA | 18 | config NET_DSA_TAG_EDSA |
19 | bool | 19 | bool |
20 | default n | ||
21 | 20 | ||
22 | config NET_DSA_TAG_TRAILER | 21 | config NET_DSA_TAG_TRAILER |
23 | bool | 22 | bool |
24 | default n | ||
25 | 23 | ||
26 | endif | 24 | endif |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 766c59658563..24b384b7903e 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -346,7 +346,8 @@ lookup_protocol: | |||
346 | } | 346 | } |
347 | 347 | ||
348 | err = -EPERM; | 348 | err = -EPERM; |
349 | if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) | 349 | if (sock->type == SOCK_RAW && !kern && |
350 | !ns_capable(net->user_ns, CAP_NET_RAW)) | ||
350 | goto out_rcu_unlock; | 351 | goto out_rcu_unlock; |
351 | 352 | ||
352 | err = -EAFNOSUPPORT; | 353 | err = -EAFNOSUPPORT; |
@@ -473,6 +474,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
473 | struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; | 474 | struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; |
474 | struct sock *sk = sock->sk; | 475 | struct sock *sk = sock->sk; |
475 | struct inet_sock *inet = inet_sk(sk); | 476 | struct inet_sock *inet = inet_sk(sk); |
477 | struct net *net = sock_net(sk); | ||
476 | unsigned short snum; | 478 | unsigned short snum; |
477 | int chk_addr_ret; | 479 | int chk_addr_ret; |
478 | int err; | 480 | int err; |
@@ -496,7 +498,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
496 | goto out; | 498 | goto out; |
497 | } | 499 | } |
498 | 500 | ||
499 | chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); | 501 | chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); |
500 | 502 | ||
501 | /* Not specified by any standard per-se, however it breaks too | 503 | /* Not specified by any standard per-se, however it breaks too |
502 | * many applications when removed. It is unfortunate since | 504 | * many applications when removed. It is unfortunate since |
@@ -516,7 +518,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
516 | 518 | ||
517 | snum = ntohs(addr->sin_port); | 519 | snum = ntohs(addr->sin_port); |
518 | err = -EACCES; | 520 | err = -EACCES; |
519 | if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) | 521 | if (snum && snum < PROT_SOCK && |
522 | !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) | ||
520 | goto out; | 523 | goto out; |
521 | 524 | ||
522 | /* We keep a pair of addresses. rcv_saddr is the one | 525 | /* We keep a pair of addresses. rcv_saddr is the one |
@@ -1251,7 +1254,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header); | |||
1251 | 1254 | ||
1252 | static int inet_gso_send_check(struct sk_buff *skb) | 1255 | static int inet_gso_send_check(struct sk_buff *skb) |
1253 | { | 1256 | { |
1254 | const struct net_protocol *ops; | 1257 | const struct net_offload *ops; |
1255 | const struct iphdr *iph; | 1258 | const struct iphdr *iph; |
1256 | int proto; | 1259 | int proto; |
1257 | int ihl; | 1260 | int ihl; |
@@ -1275,9 +1278,9 @@ static int inet_gso_send_check(struct sk_buff *skb) | |||
1275 | err = -EPROTONOSUPPORT; | 1278 | err = -EPROTONOSUPPORT; |
1276 | 1279 | ||
1277 | rcu_read_lock(); | 1280 | rcu_read_lock(); |
1278 | ops = rcu_dereference(inet_protos[proto]); | 1281 | ops = rcu_dereference(inet_offloads[proto]); |
1279 | if (likely(ops && ops->gso_send_check)) | 1282 | if (likely(ops && ops->callbacks.gso_send_check)) |
1280 | err = ops->gso_send_check(skb); | 1283 | err = ops->callbacks.gso_send_check(skb); |
1281 | rcu_read_unlock(); | 1284 | rcu_read_unlock(); |
1282 | 1285 | ||
1283 | out: | 1286 | out: |
@@ -1288,7 +1291,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1288 | netdev_features_t features) | 1291 | netdev_features_t features) |
1289 | { | 1292 | { |
1290 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 1293 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
1291 | const struct net_protocol *ops; | 1294 | const struct net_offload *ops; |
1292 | struct iphdr *iph; | 1295 | struct iphdr *iph; |
1293 | int proto; | 1296 | int proto; |
1294 | int ihl; | 1297 | int ihl; |
@@ -1325,9 +1328,9 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1325 | segs = ERR_PTR(-EPROTONOSUPPORT); | 1328 | segs = ERR_PTR(-EPROTONOSUPPORT); |
1326 | 1329 | ||
1327 | rcu_read_lock(); | 1330 | rcu_read_lock(); |
1328 | ops = rcu_dereference(inet_protos[proto]); | 1331 | ops = rcu_dereference(inet_offloads[proto]); |
1329 | if (likely(ops && ops->gso_segment)) | 1332 | if (likely(ops && ops->callbacks.gso_segment)) |
1330 | segs = ops->gso_segment(skb, features); | 1333 | segs = ops->callbacks.gso_segment(skb, features); |
1331 | rcu_read_unlock(); | 1334 | rcu_read_unlock(); |
1332 | 1335 | ||
1333 | if (!segs || IS_ERR(segs)) | 1336 | if (!segs || IS_ERR(segs)) |
@@ -1356,7 +1359,7 @@ out: | |||
1356 | static struct sk_buff **inet_gro_receive(struct sk_buff **head, | 1359 | static struct sk_buff **inet_gro_receive(struct sk_buff **head, |
1357 | struct sk_buff *skb) | 1360 | struct sk_buff *skb) |
1358 | { | 1361 | { |
1359 | const struct net_protocol *ops; | 1362 | const struct net_offload *ops; |
1360 | struct sk_buff **pp = NULL; | 1363 | struct sk_buff **pp = NULL; |
1361 | struct sk_buff *p; | 1364 | struct sk_buff *p; |
1362 | const struct iphdr *iph; | 1365 | const struct iphdr *iph; |
@@ -1378,8 +1381,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1378 | proto = iph->protocol; | 1381 | proto = iph->protocol; |
1379 | 1382 | ||
1380 | rcu_read_lock(); | 1383 | rcu_read_lock(); |
1381 | ops = rcu_dereference(inet_protos[proto]); | 1384 | ops = rcu_dereference(inet_offloads[proto]); |
1382 | if (!ops || !ops->gro_receive) | 1385 | if (!ops || !ops->callbacks.gro_receive) |
1383 | goto out_unlock; | 1386 | goto out_unlock; |
1384 | 1387 | ||
1385 | if (*(u8 *)iph != 0x45) | 1388 | if (*(u8 *)iph != 0x45) |
@@ -1420,7 +1423,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, | |||
1420 | skb_gro_pull(skb, sizeof(*iph)); | 1423 | skb_gro_pull(skb, sizeof(*iph)); |
1421 | skb_set_transport_header(skb, skb_gro_offset(skb)); | 1424 | skb_set_transport_header(skb, skb_gro_offset(skb)); |
1422 | 1425 | ||
1423 | pp = ops->gro_receive(head, skb); | 1426 | pp = ops->callbacks.gro_receive(head, skb); |
1424 | 1427 | ||
1425 | out_unlock: | 1428 | out_unlock: |
1426 | rcu_read_unlock(); | 1429 | rcu_read_unlock(); |
@@ -1435,7 +1438,7 @@ static int inet_gro_complete(struct sk_buff *skb) | |||
1435 | { | 1438 | { |
1436 | __be16 newlen = htons(skb->len - skb_network_offset(skb)); | 1439 | __be16 newlen = htons(skb->len - skb_network_offset(skb)); |
1437 | struct iphdr *iph = ip_hdr(skb); | 1440 | struct iphdr *iph = ip_hdr(skb); |
1438 | const struct net_protocol *ops; | 1441 | const struct net_offload *ops; |
1439 | int proto = iph->protocol; | 1442 | int proto = iph->protocol; |
1440 | int err = -ENOSYS; | 1443 | int err = -ENOSYS; |
1441 | 1444 | ||
@@ -1443,11 +1446,11 @@ static int inet_gro_complete(struct sk_buff *skb) | |||
1443 | iph->tot_len = newlen; | 1446 | iph->tot_len = newlen; |
1444 | 1447 | ||
1445 | rcu_read_lock(); | 1448 | rcu_read_lock(); |
1446 | ops = rcu_dereference(inet_protos[proto]); | 1449 | ops = rcu_dereference(inet_offloads[proto]); |
1447 | if (WARN_ON(!ops || !ops->gro_complete)) | 1450 | if (WARN_ON(!ops || !ops->callbacks.gro_complete)) |
1448 | goto out_unlock; | 1451 | goto out_unlock; |
1449 | 1452 | ||
1450 | err = ops->gro_complete(skb); | 1453 | err = ops->callbacks.gro_complete(skb); |
1451 | 1454 | ||
1452 | out_unlock: | 1455 | out_unlock: |
1453 | rcu_read_unlock(); | 1456 | rcu_read_unlock(); |
@@ -1558,23 +1561,33 @@ static const struct net_protocol tcp_protocol = { | |||
1558 | .early_demux = tcp_v4_early_demux, | 1561 | .early_demux = tcp_v4_early_demux, |
1559 | .handler = tcp_v4_rcv, | 1562 | .handler = tcp_v4_rcv, |
1560 | .err_handler = tcp_v4_err, | 1563 | .err_handler = tcp_v4_err, |
1561 | .gso_send_check = tcp_v4_gso_send_check, | ||
1562 | .gso_segment = tcp_tso_segment, | ||
1563 | .gro_receive = tcp4_gro_receive, | ||
1564 | .gro_complete = tcp4_gro_complete, | ||
1565 | .no_policy = 1, | 1564 | .no_policy = 1, |
1566 | .netns_ok = 1, | 1565 | .netns_ok = 1, |
1567 | }; | 1566 | }; |
1568 | 1567 | ||
1568 | static const struct net_offload tcp_offload = { | ||
1569 | .callbacks = { | ||
1570 | .gso_send_check = tcp_v4_gso_send_check, | ||
1571 | .gso_segment = tcp_tso_segment, | ||
1572 | .gro_receive = tcp4_gro_receive, | ||
1573 | .gro_complete = tcp4_gro_complete, | ||
1574 | }, | ||
1575 | }; | ||
1576 | |||
1569 | static const struct net_protocol udp_protocol = { | 1577 | static const struct net_protocol udp_protocol = { |
1570 | .handler = udp_rcv, | 1578 | .handler = udp_rcv, |
1571 | .err_handler = udp_err, | 1579 | .err_handler = udp_err, |
1572 | .gso_send_check = udp4_ufo_send_check, | ||
1573 | .gso_segment = udp4_ufo_fragment, | ||
1574 | .no_policy = 1, | 1580 | .no_policy = 1, |
1575 | .netns_ok = 1, | 1581 | .netns_ok = 1, |
1576 | }; | 1582 | }; |
1577 | 1583 | ||
1584 | static const struct net_offload udp_offload = { | ||
1585 | .callbacks = { | ||
1586 | .gso_send_check = udp4_ufo_send_check, | ||
1587 | .gso_segment = udp4_ufo_fragment, | ||
1588 | }, | ||
1589 | }; | ||
1590 | |||
1578 | static const struct net_protocol icmp_protocol = { | 1591 | static const struct net_protocol icmp_protocol = { |
1579 | .handler = icmp_rcv, | 1592 | .handler = icmp_rcv, |
1580 | .err_handler = ping_err, | 1593 | .err_handler = ping_err, |
@@ -1659,13 +1672,35 @@ static int ipv4_proc_init(void); | |||
1659 | * IP protocol layer initialiser | 1672 | * IP protocol layer initialiser |
1660 | */ | 1673 | */ |
1661 | 1674 | ||
1675 | static struct packet_offload ip_packet_offload __read_mostly = { | ||
1676 | .type = cpu_to_be16(ETH_P_IP), | ||
1677 | .callbacks = { | ||
1678 | .gso_send_check = inet_gso_send_check, | ||
1679 | .gso_segment = inet_gso_segment, | ||
1680 | .gro_receive = inet_gro_receive, | ||
1681 | .gro_complete = inet_gro_complete, | ||
1682 | }, | ||
1683 | }; | ||
1684 | |||
1685 | static int __init ipv4_offload_init(void) | ||
1686 | { | ||
1687 | /* | ||
1688 | * Add offloads | ||
1689 | */ | ||
1690 | if (inet_add_offload(&udp_offload, IPPROTO_UDP) < 0) | ||
1691 | pr_crit("%s: Cannot add UDP protocol offload\n", __func__); | ||
1692 | if (inet_add_offload(&tcp_offload, IPPROTO_TCP) < 0) | ||
1693 | pr_crit("%s: Cannot add TCP protocol offlaod\n", __func__); | ||
1694 | |||
1695 | dev_add_offload(&ip_packet_offload); | ||
1696 | return 0; | ||
1697 | } | ||
1698 | |||
1699 | fs_initcall(ipv4_offload_init); | ||
1700 | |||
1662 | static struct packet_type ip_packet_type __read_mostly = { | 1701 | static struct packet_type ip_packet_type __read_mostly = { |
1663 | .type = cpu_to_be16(ETH_P_IP), | 1702 | .type = cpu_to_be16(ETH_P_IP), |
1664 | .func = ip_rcv, | 1703 | .func = ip_rcv, |
1665 | .gso_send_check = inet_gso_send_check, | ||
1666 | .gso_segment = inet_gso_segment, | ||
1667 | .gro_receive = inet_gro_receive, | ||
1668 | .gro_complete = inet_gro_complete, | ||
1669 | }; | 1704 | }; |
1670 | 1705 | ||
1671 | static int __init inet_init(void) | 1706 | static int __init inet_init(void) |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 47800459e4cb..ce6fbdfd40b8 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1161,7 +1161,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
1161 | switch (cmd) { | 1161 | switch (cmd) { |
1162 | case SIOCDARP: | 1162 | case SIOCDARP: |
1163 | case SIOCSARP: | 1163 | case SIOCSARP: |
1164 | if (!capable(CAP_NET_ADMIN)) | 1164 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1165 | return -EPERM; | 1165 | return -EPERM; |
1166 | case SIOCGARP: | 1166 | case SIOCGARP: |
1167 | err = copy_from_user(&r, arg, sizeof(struct arpreq)); | 1167 | err = copy_from_user(&r, arg, sizeof(struct arpreq)); |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 2a6abc163ed2..e13183abd7f6 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <linux/sysctl.h> | 55 | #include <linux/sysctl.h> |
56 | #endif | 56 | #endif |
57 | #include <linux/kmod.h> | 57 | #include <linux/kmod.h> |
58 | #include <linux/netconf.h> | ||
58 | 59 | ||
59 | #include <net/arp.h> | 60 | #include <net/arp.h> |
60 | #include <net/ip.h> | 61 | #include <net/ip.h> |
@@ -723,7 +724,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
723 | 724 | ||
724 | case SIOCSIFFLAGS: | 725 | case SIOCSIFFLAGS: |
725 | ret = -EPERM; | 726 | ret = -EPERM; |
726 | if (!capable(CAP_NET_ADMIN)) | 727 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
727 | goto out; | 728 | goto out; |
728 | break; | 729 | break; |
729 | case SIOCSIFADDR: /* Set interface address (and family) */ | 730 | case SIOCSIFADDR: /* Set interface address (and family) */ |
@@ -731,7 +732,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
731 | case SIOCSIFDSTADDR: /* Set the destination address */ | 732 | case SIOCSIFDSTADDR: /* Set the destination address */ |
732 | case SIOCSIFNETMASK: /* Set the netmask for the interface */ | 733 | case SIOCSIFNETMASK: /* Set the netmask for the interface */ |
733 | ret = -EPERM; | 734 | ret = -EPERM; |
734 | if (!capable(CAP_NET_ADMIN)) | 735 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
735 | goto out; | 736 | goto out; |
736 | ret = -EINVAL; | 737 | ret = -EINVAL; |
737 | if (sin->sin_family != AF_INET) | 738 | if (sin->sin_family != AF_INET) |
@@ -1442,6 +1443,149 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla) | |||
1442 | return 0; | 1443 | return 0; |
1443 | } | 1444 | } |
1444 | 1445 | ||
1446 | static int inet_netconf_msgsize_devconf(int type) | ||
1447 | { | ||
1448 | int size = NLMSG_ALIGN(sizeof(struct netconfmsg)) | ||
1449 | + nla_total_size(4); /* NETCONFA_IFINDEX */ | ||
1450 | |||
1451 | /* type -1 is used for ALL */ | ||
1452 | if (type == -1 || type == NETCONFA_FORWARDING) | ||
1453 | size += nla_total_size(4); | ||
1454 | if (type == -1 || type == NETCONFA_RP_FILTER) | ||
1455 | size += nla_total_size(4); | ||
1456 | |||
1457 | return size; | ||
1458 | } | ||
1459 | |||
1460 | static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, | ||
1461 | struct ipv4_devconf *devconf, u32 portid, | ||
1462 | u32 seq, int event, unsigned int flags, | ||
1463 | int type) | ||
1464 | { | ||
1465 | struct nlmsghdr *nlh; | ||
1466 | struct netconfmsg *ncm; | ||
1467 | |||
1468 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), | ||
1469 | flags); | ||
1470 | if (nlh == NULL) | ||
1471 | return -EMSGSIZE; | ||
1472 | |||
1473 | ncm = nlmsg_data(nlh); | ||
1474 | ncm->ncm_family = AF_INET; | ||
1475 | |||
1476 | if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0) | ||
1477 | goto nla_put_failure; | ||
1478 | |||
1479 | /* type -1 is used for ALL */ | ||
1480 | if ((type == -1 || type == NETCONFA_FORWARDING) && | ||
1481 | nla_put_s32(skb, NETCONFA_FORWARDING, | ||
1482 | IPV4_DEVCONF(*devconf, FORWARDING)) < 0) | ||
1483 | goto nla_put_failure; | ||
1484 | if ((type == -1 || type == NETCONFA_RP_FILTER) && | ||
1485 | nla_put_s32(skb, NETCONFA_RP_FILTER, | ||
1486 | IPV4_DEVCONF(*devconf, RP_FILTER)) < 0) | ||
1487 | goto nla_put_failure; | ||
1488 | |||
1489 | return nlmsg_end(skb, nlh); | ||
1490 | |||
1491 | nla_put_failure: | ||
1492 | nlmsg_cancel(skb, nlh); | ||
1493 | return -EMSGSIZE; | ||
1494 | } | ||
1495 | |||
1496 | static void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, | ||
1497 | struct ipv4_devconf *devconf) | ||
1498 | { | ||
1499 | struct sk_buff *skb; | ||
1500 | int err = -ENOBUFS; | ||
1501 | |||
1502 | skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC); | ||
1503 | if (skb == NULL) | ||
1504 | goto errout; | ||
1505 | |||
1506 | err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, | ||
1507 | RTM_NEWNETCONF, 0, type); | ||
1508 | if (err < 0) { | ||
1509 | /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */ | ||
1510 | WARN_ON(err == -EMSGSIZE); | ||
1511 | kfree_skb(skb); | ||
1512 | goto errout; | ||
1513 | } | ||
1514 | rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_ATOMIC); | ||
1515 | return; | ||
1516 | errout: | ||
1517 | if (err < 0) | ||
1518 | rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err); | ||
1519 | } | ||
1520 | |||
1521 | static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = { | ||
1522 | [NETCONFA_IFINDEX] = { .len = sizeof(int) }, | ||
1523 | [NETCONFA_FORWARDING] = { .len = sizeof(int) }, | ||
1524 | [NETCONFA_RP_FILTER] = { .len = sizeof(int) }, | ||
1525 | }; | ||
1526 | |||
1527 | static int inet_netconf_get_devconf(struct sk_buff *in_skb, | ||
1528 | struct nlmsghdr *nlh, | ||
1529 | void *arg) | ||
1530 | { | ||
1531 | struct net *net = sock_net(in_skb->sk); | ||
1532 | struct nlattr *tb[NETCONFA_MAX+1]; | ||
1533 | struct netconfmsg *ncm; | ||
1534 | struct sk_buff *skb; | ||
1535 | struct ipv4_devconf *devconf; | ||
1536 | struct in_device *in_dev; | ||
1537 | struct net_device *dev; | ||
1538 | int ifindex; | ||
1539 | int err; | ||
1540 | |||
1541 | err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, | ||
1542 | devconf_ipv4_policy); | ||
1543 | if (err < 0) | ||
1544 | goto errout; | ||
1545 | |||
1546 | err = EINVAL; | ||
1547 | if (!tb[NETCONFA_IFINDEX]) | ||
1548 | goto errout; | ||
1549 | |||
1550 | ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]); | ||
1551 | switch (ifindex) { | ||
1552 | case NETCONFA_IFINDEX_ALL: | ||
1553 | devconf = net->ipv4.devconf_all; | ||
1554 | break; | ||
1555 | case NETCONFA_IFINDEX_DEFAULT: | ||
1556 | devconf = net->ipv4.devconf_dflt; | ||
1557 | break; | ||
1558 | default: | ||
1559 | dev = __dev_get_by_index(net, ifindex); | ||
1560 | if (dev == NULL) | ||
1561 | goto errout; | ||
1562 | in_dev = __in_dev_get_rtnl(dev); | ||
1563 | if (in_dev == NULL) | ||
1564 | goto errout; | ||
1565 | devconf = &in_dev->cnf; | ||
1566 | break; | ||
1567 | } | ||
1568 | |||
1569 | err = -ENOBUFS; | ||
1570 | skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC); | ||
1571 | if (skb == NULL) | ||
1572 | goto errout; | ||
1573 | |||
1574 | err = inet_netconf_fill_devconf(skb, ifindex, devconf, | ||
1575 | NETLINK_CB(in_skb).portid, | ||
1576 | nlh->nlmsg_seq, RTM_NEWNETCONF, 0, | ||
1577 | -1); | ||
1578 | if (err < 0) { | ||
1579 | /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */ | ||
1580 | WARN_ON(err == -EMSGSIZE); | ||
1581 | kfree_skb(skb); | ||
1582 | goto errout; | ||
1583 | } | ||
1584 | err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); | ||
1585 | errout: | ||
1586 | return err; | ||
1587 | } | ||
1588 | |||
1445 | #ifdef CONFIG_SYSCTL | 1589 | #ifdef CONFIG_SYSCTL |
1446 | 1590 | ||
1447 | static void devinet_copy_dflt_conf(struct net *net, int i) | 1591 | static void devinet_copy_dflt_conf(struct net *net, int i) |
@@ -1467,6 +1611,12 @@ static void inet_forward_change(struct net *net) | |||
1467 | 1611 | ||
1468 | IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on; | 1612 | IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on; |
1469 | IPV4_DEVCONF_DFLT(net, FORWARDING) = on; | 1613 | IPV4_DEVCONF_DFLT(net, FORWARDING) = on; |
1614 | inet_netconf_notify_devconf(net, NETCONFA_FORWARDING, | ||
1615 | NETCONFA_IFINDEX_ALL, | ||
1616 | net->ipv4.devconf_all); | ||
1617 | inet_netconf_notify_devconf(net, NETCONFA_FORWARDING, | ||
1618 | NETCONFA_IFINDEX_DEFAULT, | ||
1619 | net->ipv4.devconf_dflt); | ||
1470 | 1620 | ||
1471 | for_each_netdev(net, dev) { | 1621 | for_each_netdev(net, dev) { |
1472 | struct in_device *in_dev; | 1622 | struct in_device *in_dev; |
@@ -1474,8 +1624,11 @@ static void inet_forward_change(struct net *net) | |||
1474 | dev_disable_lro(dev); | 1624 | dev_disable_lro(dev); |
1475 | rcu_read_lock(); | 1625 | rcu_read_lock(); |
1476 | in_dev = __in_dev_get_rcu(dev); | 1626 | in_dev = __in_dev_get_rcu(dev); |
1477 | if (in_dev) | 1627 | if (in_dev) { |
1478 | IN_DEV_CONF_SET(in_dev, FORWARDING, on); | 1628 | IN_DEV_CONF_SET(in_dev, FORWARDING, on); |
1629 | inet_netconf_notify_devconf(net, NETCONFA_FORWARDING, | ||
1630 | dev->ifindex, &in_dev->cnf); | ||
1631 | } | ||
1479 | rcu_read_unlock(); | 1632 | rcu_read_unlock(); |
1480 | } | 1633 | } |
1481 | } | 1634 | } |
@@ -1501,6 +1654,23 @@ static int devinet_conf_proc(ctl_table *ctl, int write, | |||
1501 | i == IPV4_DEVCONF_ROUTE_LOCALNET - 1) | 1654 | i == IPV4_DEVCONF_ROUTE_LOCALNET - 1) |
1502 | if ((new_value == 0) && (old_value != 0)) | 1655 | if ((new_value == 0) && (old_value != 0)) |
1503 | rt_cache_flush(net); | 1656 | rt_cache_flush(net); |
1657 | if (i == IPV4_DEVCONF_RP_FILTER - 1 && | ||
1658 | new_value != old_value) { | ||
1659 | int ifindex; | ||
1660 | |||
1661 | if (cnf == net->ipv4.devconf_dflt) | ||
1662 | ifindex = NETCONFA_IFINDEX_DEFAULT; | ||
1663 | else if (cnf == net->ipv4.devconf_all) | ||
1664 | ifindex = NETCONFA_IFINDEX_ALL; | ||
1665 | else { | ||
1666 | struct in_device *idev = | ||
1667 | container_of(cnf, struct in_device, | ||
1668 | cnf); | ||
1669 | ifindex = idev->dev->ifindex; | ||
1670 | } | ||
1671 | inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER, | ||
1672 | ifindex, cnf); | ||
1673 | } | ||
1504 | } | 1674 | } |
1505 | 1675 | ||
1506 | return ret; | 1676 | return ret; |
@@ -1527,15 +1697,23 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write, | |||
1527 | } | 1697 | } |
1528 | if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { | 1698 | if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { |
1529 | inet_forward_change(net); | 1699 | inet_forward_change(net); |
1530 | } else if (*valp) { | 1700 | } else { |
1531 | struct ipv4_devconf *cnf = ctl->extra1; | 1701 | struct ipv4_devconf *cnf = ctl->extra1; |
1532 | struct in_device *idev = | 1702 | struct in_device *idev = |
1533 | container_of(cnf, struct in_device, cnf); | 1703 | container_of(cnf, struct in_device, cnf); |
1534 | dev_disable_lro(idev->dev); | 1704 | if (*valp) |
1705 | dev_disable_lro(idev->dev); | ||
1706 | inet_netconf_notify_devconf(net, | ||
1707 | NETCONFA_FORWARDING, | ||
1708 | idev->dev->ifindex, | ||
1709 | cnf); | ||
1535 | } | 1710 | } |
1536 | rtnl_unlock(); | 1711 | rtnl_unlock(); |
1537 | rt_cache_flush(net); | 1712 | rt_cache_flush(net); |
1538 | } | 1713 | } else |
1714 | inet_netconf_notify_devconf(net, NETCONFA_FORWARDING, | ||
1715 | NETCONFA_IFINDEX_DEFAULT, | ||
1716 | net->ipv4.devconf_dflt); | ||
1539 | } | 1717 | } |
1540 | 1718 | ||
1541 | return ret; | 1719 | return ret; |
@@ -1809,5 +1987,7 @@ void __init devinet_init(void) | |||
1809 | rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL); | 1987 | rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL); |
1810 | rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL); | 1988 | rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL); |
1811 | rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL); | 1989 | rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL); |
1990 | rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf, | ||
1991 | NULL, NULL); | ||
1812 | } | 1992 | } |
1813 | 1993 | ||
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 825c608826de..5cd75e2dab2c 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -488,7 +488,7 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
488 | switch (cmd) { | 488 | switch (cmd) { |
489 | case SIOCADDRT: /* Add a route */ | 489 | case SIOCADDRT: /* Add a route */ |
490 | case SIOCDELRT: /* Delete a route */ | 490 | case SIOCDELRT: /* Delete a route */ |
491 | if (!capable(CAP_NET_ADMIN)) | 491 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
492 | return -EPERM; | 492 | return -EPERM; |
493 | 493 | ||
494 | if (copy_from_user(&rt, arg, sizeof(rt))) | 494 | if (copy_from_user(&rt, arg, sizeof(rt))) |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 71b125cd5db1..4797a800faf8 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -803,7 +803,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
803 | unsigned int bytes; | 803 | unsigned int bytes; |
804 | 804 | ||
805 | if (!new_size) | 805 | if (!new_size) |
806 | new_size = 1; | 806 | new_size = 16; |
807 | bytes = new_size * sizeof(struct hlist_head *); | 807 | bytes = new_size * sizeof(struct hlist_head *); |
808 | new_info_hash = fib_info_hash_alloc(bytes); | 808 | new_info_hash = fib_info_hash_alloc(bytes); |
809 | new_laddrhash = fib_info_hash_alloc(bytes); | 809 | new_laddrhash = fib_info_hash_alloc(bytes); |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index d34ce2972c8f..2026542d6836 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -521,21 +521,31 @@ static inline void syn_ack_recalc(struct request_sock *req, const int thresh, | |||
521 | int *expire, int *resend) | 521 | int *expire, int *resend) |
522 | { | 522 | { |
523 | if (!rskq_defer_accept) { | 523 | if (!rskq_defer_accept) { |
524 | *expire = req->retrans >= thresh; | 524 | *expire = req->num_timeout >= thresh; |
525 | *resend = 1; | 525 | *resend = 1; |
526 | return; | 526 | return; |
527 | } | 527 | } |
528 | *expire = req->retrans >= thresh && | 528 | *expire = req->num_timeout >= thresh && |
529 | (!inet_rsk(req)->acked || req->retrans >= max_retries); | 529 | (!inet_rsk(req)->acked || req->num_timeout >= max_retries); |
530 | /* | 530 | /* |
531 | * Do not resend while waiting for data after ACK, | 531 | * Do not resend while waiting for data after ACK, |
532 | * start to resend on end of deferring period to give | 532 | * start to resend on end of deferring period to give |
533 | * last chance for data or ACK to create established socket. | 533 | * last chance for data or ACK to create established socket. |
534 | */ | 534 | */ |
535 | *resend = !inet_rsk(req)->acked || | 535 | *resend = !inet_rsk(req)->acked || |
536 | req->retrans >= rskq_defer_accept - 1; | 536 | req->num_timeout >= rskq_defer_accept - 1; |
537 | } | 537 | } |
538 | 538 | ||
539 | int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req) | ||
540 | { | ||
541 | int err = req->rsk_ops->rtx_syn_ack(parent, req, NULL); | ||
542 | |||
543 | if (!err) | ||
544 | req->num_retrans++; | ||
545 | return err; | ||
546 | } | ||
547 | EXPORT_SYMBOL(inet_rtx_syn_ack); | ||
548 | |||
539 | void inet_csk_reqsk_queue_prune(struct sock *parent, | 549 | void inet_csk_reqsk_queue_prune(struct sock *parent, |
540 | const unsigned long interval, | 550 | const unsigned long interval, |
541 | const unsigned long timeout, | 551 | const unsigned long timeout, |
@@ -599,13 +609,14 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, | |||
599 | req->rsk_ops->syn_ack_timeout(parent, req); | 609 | req->rsk_ops->syn_ack_timeout(parent, req); |
600 | if (!expire && | 610 | if (!expire && |
601 | (!resend || | 611 | (!resend || |
602 | !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || | 612 | !inet_rtx_syn_ack(parent, req) || |
603 | inet_rsk(req)->acked)) { | 613 | inet_rsk(req)->acked)) { |
604 | unsigned long timeo; | 614 | unsigned long timeo; |
605 | 615 | ||
606 | if (req->retrans++ == 0) | 616 | if (req->num_timeout++ == 0) |
607 | lopt->qlen_young--; | 617 | lopt->qlen_young--; |
608 | timeo = min((timeout << req->retrans), max_rto); | 618 | timeo = min(timeout << req->num_timeout, |
619 | max_rto); | ||
609 | req->expires = now + timeo; | 620 | req->expires = now + timeo; |
610 | reqp = &req->dl_next; | 621 | reqp = &req->dl_next; |
611 | continue; | 622 | continue; |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 0c34bfabc11f..cb98cbed1973 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -105,6 +105,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | |||
105 | r->id.idiag_src[0] = inet->inet_rcv_saddr; | 105 | r->id.idiag_src[0] = inet->inet_rcv_saddr; |
106 | r->id.idiag_dst[0] = inet->inet_daddr; | 106 | r->id.idiag_dst[0] = inet->inet_daddr; |
107 | 107 | ||
108 | if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) | ||
109 | goto errout; | ||
110 | |||
108 | /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, | 111 | /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, |
109 | * hence this needs to be included regardless of socket family. | 112 | * hence this needs to be included regardless of socket family. |
110 | */ | 113 | */ |
@@ -617,7 +620,7 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, | |||
617 | r->idiag_family = sk->sk_family; | 620 | r->idiag_family = sk->sk_family; |
618 | r->idiag_state = TCP_SYN_RECV; | 621 | r->idiag_state = TCP_SYN_RECV; |
619 | r->idiag_timer = 1; | 622 | r->idiag_timer = 1; |
620 | r->idiag_retrans = req->retrans; | 623 | r->idiag_retrans = req->num_retrans; |
621 | 624 | ||
622 | r->id.idiag_if = sk->sk_bound_dev_if; | 625 | r->id.idiag_if = sk->sk_bound_dev_if; |
623 | sock_diag_save_cookie(req, r->id.idiag_cookie); | 626 | sock_diag_save_cookie(req, r->id.idiag_cookie); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 448e68546827..1cf6a768cd53 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -802,6 +802,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) | |||
802 | table[0].data = &net->ipv4.frags.high_thresh; | 802 | table[0].data = &net->ipv4.frags.high_thresh; |
803 | table[1].data = &net->ipv4.frags.low_thresh; | 803 | table[1].data = &net->ipv4.frags.low_thresh; |
804 | table[2].data = &net->ipv4.frags.timeout; | 804 | table[2].data = &net->ipv4.frags.timeout; |
805 | |||
806 | /* Don't export sysctls to unprivileged users */ | ||
807 | if (net->user_ns != &init_user_ns) | ||
808 | table[0].procname = NULL; | ||
805 | } | 809 | } |
806 | 810 | ||
807 | hdr = register_net_sysctl(net, "net/ipv4", table); | 811 | hdr = register_net_sysctl(net, "net/ipv4", table); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 7240f8e2dd45..a85ae2f7a21c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -164,21 +164,6 @@ struct ipgre_net { | |||
164 | #define tunnels_r tunnels[2] | 164 | #define tunnels_r tunnels[2] |
165 | #define tunnels_l tunnels[1] | 165 | #define tunnels_l tunnels[1] |
166 | #define tunnels_wc tunnels[0] | 166 | #define tunnels_wc tunnels[0] |
167 | /* | ||
168 | * Locking : hash tables are protected by RCU and RTNL | ||
169 | */ | ||
170 | |||
171 | #define for_each_ip_tunnel_rcu(start) \ | ||
172 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
173 | |||
174 | /* often modified stats are per cpu, other are shared (netdev->stats) */ | ||
175 | struct pcpu_tstats { | ||
176 | u64 rx_packets; | ||
177 | u64 rx_bytes; | ||
178 | u64 tx_packets; | ||
179 | u64 tx_bytes; | ||
180 | struct u64_stats_sync syncp; | ||
181 | }; | ||
182 | 167 | ||
183 | static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev, | 168 | static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev, |
184 | struct rtnl_link_stats64 *tot) | 169 | struct rtnl_link_stats64 *tot) |
@@ -250,7 +235,7 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev, | |||
250 | ARPHRD_ETHER : ARPHRD_IPGRE; | 235 | ARPHRD_ETHER : ARPHRD_IPGRE; |
251 | int score, cand_score = 4; | 236 | int score, cand_score = 4; |
252 | 237 | ||
253 | for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) { | 238 | for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { |
254 | if (local != t->parms.iph.saddr || | 239 | if (local != t->parms.iph.saddr || |
255 | remote != t->parms.iph.daddr || | 240 | remote != t->parms.iph.daddr || |
256 | !(t->dev->flags & IFF_UP)) | 241 | !(t->dev->flags & IFF_UP)) |
@@ -277,7 +262,7 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev, | |||
277 | } | 262 | } |
278 | } | 263 | } |
279 | 264 | ||
280 | for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) { | 265 | for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) { |
281 | if (remote != t->parms.iph.daddr || | 266 | if (remote != t->parms.iph.daddr || |
282 | !(t->dev->flags & IFF_UP)) | 267 | !(t->dev->flags & IFF_UP)) |
283 | continue; | 268 | continue; |
@@ -303,7 +288,7 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev, | |||
303 | } | 288 | } |
304 | } | 289 | } |
305 | 290 | ||
306 | for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) { | 291 | for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) { |
307 | if ((local != t->parms.iph.saddr && | 292 | if ((local != t->parms.iph.saddr && |
308 | (local != t->parms.iph.daddr || | 293 | (local != t->parms.iph.daddr || |
309 | !ipv4_is_multicast(local))) || | 294 | !ipv4_is_multicast(local))) || |
@@ -331,7 +316,7 @@ static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev, | |||
331 | } | 316 | } |
332 | } | 317 | } |
333 | 318 | ||
334 | for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) { | 319 | for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) { |
335 | if (t->parms.i_key != key || | 320 | if (t->parms.i_key != key || |
336 | !(t->dev->flags & IFF_UP)) | 321 | !(t->dev->flags & IFF_UP)) |
337 | continue; | 322 | continue; |
@@ -753,7 +738,6 @@ drop: | |||
753 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 738 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
754 | { | 739 | { |
755 | struct ip_tunnel *tunnel = netdev_priv(dev); | 740 | struct ip_tunnel *tunnel = netdev_priv(dev); |
756 | struct pcpu_tstats *tstats; | ||
757 | const struct iphdr *old_iph = ip_hdr(skb); | 741 | const struct iphdr *old_iph = ip_hdr(skb); |
758 | const struct iphdr *tiph; | 742 | const struct iphdr *tiph; |
759 | struct flowi4 fl4; | 743 | struct flowi4 fl4; |
@@ -977,9 +961,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
977 | } | 961 | } |
978 | } | 962 | } |
979 | 963 | ||
980 | nf_reset(skb); | 964 | iptunnel_xmit(skb, dev); |
981 | tstats = this_cpu_ptr(dev->tstats); | ||
982 | __IPTUNNEL_XMIT(tstats, &dev->stats); | ||
983 | return NETDEV_TX_OK; | 965 | return NETDEV_TX_OK; |
984 | 966 | ||
985 | #if IS_ENABLED(CONFIG_IPV6) | 967 | #if IS_ENABLED(CONFIG_IPV6) |
@@ -1082,7 +1064,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1082 | case SIOCADDTUNNEL: | 1064 | case SIOCADDTUNNEL: |
1083 | case SIOCCHGTUNNEL: | 1065 | case SIOCCHGTUNNEL: |
1084 | err = -EPERM; | 1066 | err = -EPERM; |
1085 | if (!capable(CAP_NET_ADMIN)) | 1067 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1086 | goto done; | 1068 | goto done; |
1087 | 1069 | ||
1088 | err = -EFAULT; | 1070 | err = -EFAULT; |
@@ -1157,7 +1139,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1157 | 1139 | ||
1158 | case SIOCDELTUNNEL: | 1140 | case SIOCDELTUNNEL: |
1159 | err = -EPERM; | 1141 | err = -EPERM; |
1160 | if (!capable(CAP_NET_ADMIN)) | 1142 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1161 | goto done; | 1143 | goto done; |
1162 | 1144 | ||
1163 | if (dev == ign->fb_tunnel_dev) { | 1145 | if (dev == ign->fb_tunnel_dev) { |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 1dc01f9793d5..f6289bf6f332 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -409,7 +409,7 @@ int ip_options_compile(struct net *net, | |||
409 | optptr[2] += 8; | 409 | optptr[2] += 8; |
410 | break; | 410 | break; |
411 | default: | 411 | default: |
412 | if (!skb && !capable(CAP_NET_RAW)) { | 412 | if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) { |
413 | pp_ptr = optptr + 3; | 413 | pp_ptr = optptr + 3; |
414 | goto error; | 414 | goto error; |
415 | } | 415 | } |
@@ -445,7 +445,7 @@ int ip_options_compile(struct net *net, | |||
445 | opt->router_alert = optptr - iph; | 445 | opt->router_alert = optptr - iph; |
446 | break; | 446 | break; |
447 | case IPOPT_CIPSO: | 447 | case IPOPT_CIPSO: |
448 | if ((!skb && !capable(CAP_NET_RAW)) || opt->cipso) { | 448 | if ((!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) || opt->cipso) { |
449 | pp_ptr = optptr; | 449 | pp_ptr = optptr; |
450 | goto error; | 450 | goto error; |
451 | } | 451 | } |
@@ -458,7 +458,7 @@ int ip_options_compile(struct net *net, | |||
458 | case IPOPT_SEC: | 458 | case IPOPT_SEC: |
459 | case IPOPT_SID: | 459 | case IPOPT_SID: |
460 | default: | 460 | default: |
461 | if (!skb && !capable(CAP_NET_RAW)) { | 461 | if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) { |
462 | pp_ptr = optptr; | 462 | pp_ptr = optptr; |
463 | goto error; | 463 | goto error; |
464 | } | 464 | } |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 14bbfcf717ac..3c9d20880283 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -989,13 +989,14 @@ mc_msf_out: | |||
989 | case IP_IPSEC_POLICY: | 989 | case IP_IPSEC_POLICY: |
990 | case IP_XFRM_POLICY: | 990 | case IP_XFRM_POLICY: |
991 | err = -EPERM; | 991 | err = -EPERM; |
992 | if (!capable(CAP_NET_ADMIN)) | 992 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
993 | break; | 993 | break; |
994 | err = xfrm_user_policy(sk, optname, optval, optlen); | 994 | err = xfrm_user_policy(sk, optname, optval, optlen); |
995 | break; | 995 | break; |
996 | 996 | ||
997 | case IP_TRANSPARENT: | 997 | case IP_TRANSPARENT: |
998 | if (!!val && !capable(CAP_NET_RAW) && !capable(CAP_NET_ADMIN)) { | 998 | if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && |
999 | !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { | ||
999 | err = -EPERM; | 1000 | err = -EPERM; |
1000 | break; | 1001 | break; |
1001 | } | 1002 | } |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 858fddf6482a..c3a4233c0ac2 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
@@ -66,20 +66,6 @@ static void vti_tunnel_setup(struct net_device *dev); | |||
66 | static void vti_dev_free(struct net_device *dev); | 66 | static void vti_dev_free(struct net_device *dev); |
67 | static int vti_tunnel_bind_dev(struct net_device *dev); | 67 | static int vti_tunnel_bind_dev(struct net_device *dev); |
68 | 68 | ||
69 | /* Locking : hash tables are protected by RCU and RTNL */ | ||
70 | |||
71 | #define for_each_ip_tunnel_rcu(start) \ | ||
72 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
73 | |||
74 | /* often modified stats are per cpu, other are shared (netdev->stats) */ | ||
75 | struct pcpu_tstats { | ||
76 | u64 rx_packets; | ||
77 | u64 rx_bytes; | ||
78 | u64 tx_packets; | ||
79 | u64 tx_bytes; | ||
80 | struct u64_stats_sync syncp; | ||
81 | }; | ||
82 | |||
83 | #define VTI_XMIT(stats1, stats2) do { \ | 69 | #define VTI_XMIT(stats1, stats2) do { \ |
84 | int err; \ | 70 | int err; \ |
85 | int pkt_len = skb->len; \ | 71 | int pkt_len = skb->len; \ |
@@ -142,19 +128,19 @@ static struct ip_tunnel *vti_tunnel_lookup(struct net *net, | |||
142 | struct ip_tunnel *t; | 128 | struct ip_tunnel *t; |
143 | struct vti_net *ipn = net_generic(net, vti_net_id); | 129 | struct vti_net *ipn = net_generic(net, vti_net_id); |
144 | 130 | ||
145 | for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1]) | 131 | for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1]) |
146 | if (local == t->parms.iph.saddr && | 132 | if (local == t->parms.iph.saddr && |
147 | remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) | 133 | remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) |
148 | return t; | 134 | return t; |
149 | for_each_ip_tunnel_rcu(ipn->tunnels_r[h0]) | 135 | for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0]) |
150 | if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) | 136 | if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) |
151 | return t; | 137 | return t; |
152 | 138 | ||
153 | for_each_ip_tunnel_rcu(ipn->tunnels_l[h1]) | 139 | for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1]) |
154 | if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) | 140 | if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) |
155 | return t; | 141 | return t; |
156 | 142 | ||
157 | for_each_ip_tunnel_rcu(ipn->tunnels_wc[0]) | 143 | for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0]) |
158 | if (t && (t->dev->flags&IFF_UP)) | 144 | if (t && (t->dev->flags&IFF_UP)) |
159 | return t; | 145 | return t; |
160 | return NULL; | 146 | return NULL; |
@@ -502,7 +488,7 @@ vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
502 | case SIOCADDTUNNEL: | 488 | case SIOCADDTUNNEL: |
503 | case SIOCCHGTUNNEL: | 489 | case SIOCCHGTUNNEL: |
504 | err = -EPERM; | 490 | err = -EPERM; |
505 | if (!capable(CAP_NET_ADMIN)) | 491 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
506 | goto done; | 492 | goto done; |
507 | 493 | ||
508 | err = -EFAULT; | 494 | err = -EFAULT; |
@@ -567,7 +553,7 @@ vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
567 | 553 | ||
568 | case SIOCDELTUNNEL: | 554 | case SIOCDELTUNNEL: |
569 | err = -EPERM; | 555 | err = -EPERM; |
570 | if (!capable(CAP_NET_ADMIN)) | 556 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
571 | goto done; | 557 | goto done; |
572 | 558 | ||
573 | if (dev == ipn->fb_tunnel_dev) { | 559 | if (dev == ipn->fb_tunnel_dev) { |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 798358b10717..d763701cff1b 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -1500,8 +1500,10 @@ static int __init ip_auto_config(void) | |||
1500 | * Clue in the operator. | 1500 | * Clue in the operator. |
1501 | */ | 1501 | */ |
1502 | pr_info("IP-Config: Complete:\n"); | 1502 | pr_info("IP-Config: Complete:\n"); |
1503 | pr_info(" device=%s, addr=%pI4, mask=%pI4, gw=%pI4\n", | 1503 | |
1504 | ic_dev->name, &ic_myaddr, &ic_netmask, &ic_gateway); | 1504 | pr_info(" device=%s, hwaddr=%*phC, ipaddr=%pI4, mask=%pI4, gw=%pI4\n", |
1505 | ic_dev->name, ic_dev->addr_len, ic_dev->dev_addr, | ||
1506 | &ic_myaddr, &ic_netmask, &ic_gateway); | ||
1505 | pr_info(" host=%s, domain=%s, nis-domain=%s\n", | 1507 | pr_info(" host=%s, domain=%s, nis-domain=%s\n", |
1506 | utsname()->nodename, ic_domain, utsname()->domainname); | 1508 | utsname()->nodename, ic_domain, utsname()->domainname); |
1507 | pr_info(" bootserver=%pI4, rootserver=%pI4, rootpath=%s", | 1509 | pr_info(" bootserver=%pI4, rootserver=%pI4, rootpath=%s", |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index e15b45297c09..191fc24a745a 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -138,22 +138,7 @@ struct ipip_net { | |||
138 | static int ipip_tunnel_init(struct net_device *dev); | 138 | static int ipip_tunnel_init(struct net_device *dev); |
139 | static void ipip_tunnel_setup(struct net_device *dev); | 139 | static void ipip_tunnel_setup(struct net_device *dev); |
140 | static void ipip_dev_free(struct net_device *dev); | 140 | static void ipip_dev_free(struct net_device *dev); |
141 | 141 | static struct rtnl_link_ops ipip_link_ops __read_mostly; | |
142 | /* | ||
143 | * Locking : hash tables are protected by RCU and RTNL | ||
144 | */ | ||
145 | |||
146 | #define for_each_ip_tunnel_rcu(start) \ | ||
147 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
148 | |||
149 | /* often modified stats are per cpu, other are shared (netdev->stats) */ | ||
150 | struct pcpu_tstats { | ||
151 | u64 rx_packets; | ||
152 | u64 rx_bytes; | ||
153 | u64 tx_packets; | ||
154 | u64 tx_bytes; | ||
155 | struct u64_stats_sync syncp; | ||
156 | }; | ||
157 | 142 | ||
158 | static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev, | 143 | static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev, |
159 | struct rtnl_link_stats64 *tot) | 144 | struct rtnl_link_stats64 *tot) |
@@ -197,16 +182,16 @@ static struct ip_tunnel *ipip_tunnel_lookup(struct net *net, | |||
197 | struct ip_tunnel *t; | 182 | struct ip_tunnel *t; |
198 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | 183 | struct ipip_net *ipn = net_generic(net, ipip_net_id); |
199 | 184 | ||
200 | for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1]) | 185 | for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1]) |
201 | if (local == t->parms.iph.saddr && | 186 | if (local == t->parms.iph.saddr && |
202 | remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) | 187 | remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) |
203 | return t; | 188 | return t; |
204 | 189 | ||
205 | for_each_ip_tunnel_rcu(ipn->tunnels_r[h0]) | 190 | for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0]) |
206 | if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) | 191 | if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) |
207 | return t; | 192 | return t; |
208 | 193 | ||
209 | for_each_ip_tunnel_rcu(ipn->tunnels_l[h1]) | 194 | for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1]) |
210 | if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) | 195 | if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) |
211 | return t; | 196 | return t; |
212 | 197 | ||
@@ -264,6 +249,32 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) | |||
264 | rcu_assign_pointer(*tp, t); | 249 | rcu_assign_pointer(*tp, t); |
265 | } | 250 | } |
266 | 251 | ||
252 | static int ipip_tunnel_create(struct net_device *dev) | ||
253 | { | ||
254 | struct ip_tunnel *t = netdev_priv(dev); | ||
255 | struct net *net = dev_net(dev); | ||
256 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | ||
257 | int err; | ||
258 | |||
259 | err = ipip_tunnel_init(dev); | ||
260 | if (err < 0) | ||
261 | goto out; | ||
262 | |||
263 | err = register_netdevice(dev); | ||
264 | if (err < 0) | ||
265 | goto out; | ||
266 | |||
267 | strcpy(t->parms.name, dev->name); | ||
268 | dev->rtnl_link_ops = &ipip_link_ops; | ||
269 | |||
270 | dev_hold(dev); | ||
271 | ipip_tunnel_link(ipn, t); | ||
272 | return 0; | ||
273 | |||
274 | out: | ||
275 | return err; | ||
276 | } | ||
277 | |||
267 | static struct ip_tunnel *ipip_tunnel_locate(struct net *net, | 278 | static struct ip_tunnel *ipip_tunnel_locate(struct net *net, |
268 | struct ip_tunnel_parm *parms, int create) | 279 | struct ip_tunnel_parm *parms, int create) |
269 | { | 280 | { |
@@ -298,16 +309,9 @@ static struct ip_tunnel *ipip_tunnel_locate(struct net *net, | |||
298 | nt = netdev_priv(dev); | 309 | nt = netdev_priv(dev); |
299 | nt->parms = *parms; | 310 | nt->parms = *parms; |
300 | 311 | ||
301 | if (ipip_tunnel_init(dev) < 0) | 312 | if (ipip_tunnel_create(dev) < 0) |
302 | goto failed_free; | 313 | goto failed_free; |
303 | 314 | ||
304 | if (register_netdevice(dev) < 0) | ||
305 | goto failed_free; | ||
306 | |||
307 | strcpy(nt->parms.name, dev->name); | ||
308 | |||
309 | dev_hold(dev); | ||
310 | ipip_tunnel_link(ipn, nt); | ||
311 | return nt; | 315 | return nt; |
312 | 316 | ||
313 | failed_free: | 317 | failed_free: |
@@ -463,7 +467,6 @@ drop: | |||
463 | static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 467 | static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
464 | { | 468 | { |
465 | struct ip_tunnel *tunnel = netdev_priv(dev); | 469 | struct ip_tunnel *tunnel = netdev_priv(dev); |
466 | struct pcpu_tstats *tstats; | ||
467 | const struct iphdr *tiph = &tunnel->parms.iph; | 470 | const struct iphdr *tiph = &tunnel->parms.iph; |
468 | u8 tos = tunnel->parms.iph.tos; | 471 | u8 tos = tunnel->parms.iph.tos; |
469 | __be16 df = tiph->frag_off; | 472 | __be16 df = tiph->frag_off; |
@@ -479,6 +482,10 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
479 | if (skb->protocol != htons(ETH_P_IP)) | 482 | if (skb->protocol != htons(ETH_P_IP)) |
480 | goto tx_error; | 483 | goto tx_error; |
481 | 484 | ||
485 | if (skb->ip_summed == CHECKSUM_PARTIAL && | ||
486 | skb_checksum_help(skb)) | ||
487 | goto tx_error; | ||
488 | |||
482 | if (tos & 1) | 489 | if (tos & 1) |
483 | tos = old_iph->tos; | 490 | tos = old_iph->tos; |
484 | 491 | ||
@@ -586,9 +593,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
586 | if ((iph->ttl = tiph->ttl) == 0) | 593 | if ((iph->ttl = tiph->ttl) == 0) |
587 | iph->ttl = old_iph->ttl; | 594 | iph->ttl = old_iph->ttl; |
588 | 595 | ||
589 | nf_reset(skb); | 596 | iptunnel_xmit(skb, dev); |
590 | tstats = this_cpu_ptr(dev->tstats); | ||
591 | __IPTUNNEL_XMIT(tstats, &dev->stats); | ||
592 | return NETDEV_TX_OK; | 597 | return NETDEV_TX_OK; |
593 | 598 | ||
594 | tx_error_icmp: | 599 | tx_error_icmp: |
@@ -635,6 +640,28 @@ static void ipip_tunnel_bind_dev(struct net_device *dev) | |||
635 | dev->iflink = tunnel->parms.link; | 640 | dev->iflink = tunnel->parms.link; |
636 | } | 641 | } |
637 | 642 | ||
643 | static void ipip_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) | ||
644 | { | ||
645 | struct net *net = dev_net(t->dev); | ||
646 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | ||
647 | |||
648 | ipip_tunnel_unlink(ipn, t); | ||
649 | synchronize_net(); | ||
650 | t->parms.iph.saddr = p->iph.saddr; | ||
651 | t->parms.iph.daddr = p->iph.daddr; | ||
652 | memcpy(t->dev->dev_addr, &p->iph.saddr, 4); | ||
653 | memcpy(t->dev->broadcast, &p->iph.daddr, 4); | ||
654 | ipip_tunnel_link(ipn, t); | ||
655 | t->parms.iph.ttl = p->iph.ttl; | ||
656 | t->parms.iph.tos = p->iph.tos; | ||
657 | t->parms.iph.frag_off = p->iph.frag_off; | ||
658 | if (t->parms.link != p->link) { | ||
659 | t->parms.link = p->link; | ||
660 | ipip_tunnel_bind_dev(t->dev); | ||
661 | } | ||
662 | netdev_state_change(t->dev); | ||
663 | } | ||
664 | |||
638 | static int | 665 | static int |
639 | ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | 666 | ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) |
640 | { | 667 | { |
@@ -664,7 +691,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
664 | case SIOCADDTUNNEL: | 691 | case SIOCADDTUNNEL: |
665 | case SIOCCHGTUNNEL: | 692 | case SIOCCHGTUNNEL: |
666 | err = -EPERM; | 693 | err = -EPERM; |
667 | if (!capable(CAP_NET_ADMIN)) | 694 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
668 | goto done; | 695 | goto done; |
669 | 696 | ||
670 | err = -EFAULT; | 697 | err = -EFAULT; |
@@ -693,29 +720,13 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
693 | break; | 720 | break; |
694 | } | 721 | } |
695 | t = netdev_priv(dev); | 722 | t = netdev_priv(dev); |
696 | ipip_tunnel_unlink(ipn, t); | ||
697 | synchronize_net(); | ||
698 | t->parms.iph.saddr = p.iph.saddr; | ||
699 | t->parms.iph.daddr = p.iph.daddr; | ||
700 | memcpy(dev->dev_addr, &p.iph.saddr, 4); | ||
701 | memcpy(dev->broadcast, &p.iph.daddr, 4); | ||
702 | ipip_tunnel_link(ipn, t); | ||
703 | netdev_state_change(dev); | ||
704 | } | 723 | } |
724 | |||
725 | ipip_tunnel_update(t, &p); | ||
705 | } | 726 | } |
706 | 727 | ||
707 | if (t) { | 728 | if (t) { |
708 | err = 0; | 729 | err = 0; |
709 | if (cmd == SIOCCHGTUNNEL) { | ||
710 | t->parms.iph.ttl = p.iph.ttl; | ||
711 | t->parms.iph.tos = p.iph.tos; | ||
712 | t->parms.iph.frag_off = p.iph.frag_off; | ||
713 | if (t->parms.link != p.link) { | ||
714 | t->parms.link = p.link; | ||
715 | ipip_tunnel_bind_dev(dev); | ||
716 | netdev_state_change(dev); | ||
717 | } | ||
718 | } | ||
719 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) | 730 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) |
720 | err = -EFAULT; | 731 | err = -EFAULT; |
721 | } else | 732 | } else |
@@ -724,7 +735,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
724 | 735 | ||
725 | case SIOCDELTUNNEL: | 736 | case SIOCDELTUNNEL: |
726 | err = -EPERM; | 737 | err = -EPERM; |
727 | if (!capable(CAP_NET_ADMIN)) | 738 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
728 | goto done; | 739 | goto done; |
729 | 740 | ||
730 | if (dev == ipn->fb_tunnel_dev) { | 741 | if (dev == ipn->fb_tunnel_dev) { |
@@ -773,6 +784,11 @@ static void ipip_dev_free(struct net_device *dev) | |||
773 | free_netdev(dev); | 784 | free_netdev(dev); |
774 | } | 785 | } |
775 | 786 | ||
787 | #define IPIP_FEATURES (NETIF_F_SG | \ | ||
788 | NETIF_F_FRAGLIST | \ | ||
789 | NETIF_F_HIGHDMA | \ | ||
790 | NETIF_F_HW_CSUM) | ||
791 | |||
776 | static void ipip_tunnel_setup(struct net_device *dev) | 792 | static void ipip_tunnel_setup(struct net_device *dev) |
777 | { | 793 | { |
778 | dev->netdev_ops = &ipip_netdev_ops; | 794 | dev->netdev_ops = &ipip_netdev_ops; |
@@ -787,6 +803,9 @@ static void ipip_tunnel_setup(struct net_device *dev) | |||
787 | dev->features |= NETIF_F_NETNS_LOCAL; | 803 | dev->features |= NETIF_F_NETNS_LOCAL; |
788 | dev->features |= NETIF_F_LLTX; | 804 | dev->features |= NETIF_F_LLTX; |
789 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | 805 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
806 | |||
807 | dev->features |= IPIP_FEATURES; | ||
808 | dev->hw_features |= IPIP_FEATURES; | ||
790 | } | 809 | } |
791 | 810 | ||
792 | static int ipip_tunnel_init(struct net_device *dev) | 811 | static int ipip_tunnel_init(struct net_device *dev) |
@@ -829,6 +848,142 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev) | |||
829 | return 0; | 848 | return 0; |
830 | } | 849 | } |
831 | 850 | ||
851 | static void ipip_netlink_parms(struct nlattr *data[], | ||
852 | struct ip_tunnel_parm *parms) | ||
853 | { | ||
854 | memset(parms, 0, sizeof(*parms)); | ||
855 | |||
856 | parms->iph.version = 4; | ||
857 | parms->iph.protocol = IPPROTO_IPIP; | ||
858 | parms->iph.ihl = 5; | ||
859 | |||
860 | if (!data) | ||
861 | return; | ||
862 | |||
863 | if (data[IFLA_IPTUN_LINK]) | ||
864 | parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); | ||
865 | |||
866 | if (data[IFLA_IPTUN_LOCAL]) | ||
867 | parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]); | ||
868 | |||
869 | if (data[IFLA_IPTUN_REMOTE]) | ||
870 | parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]); | ||
871 | |||
872 | if (data[IFLA_IPTUN_TTL]) { | ||
873 | parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]); | ||
874 | if (parms->iph.ttl) | ||
875 | parms->iph.frag_off = htons(IP_DF); | ||
876 | } | ||
877 | |||
878 | if (data[IFLA_IPTUN_TOS]) | ||
879 | parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]); | ||
880 | |||
881 | if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC])) | ||
882 | parms->iph.frag_off = htons(IP_DF); | ||
883 | } | ||
884 | |||
885 | static int ipip_newlink(struct net *src_net, struct net_device *dev, | ||
886 | struct nlattr *tb[], struct nlattr *data[]) | ||
887 | { | ||
888 | struct net *net = dev_net(dev); | ||
889 | struct ip_tunnel *nt; | ||
890 | |||
891 | nt = netdev_priv(dev); | ||
892 | ipip_netlink_parms(data, &nt->parms); | ||
893 | |||
894 | if (ipip_tunnel_locate(net, &nt->parms, 0)) | ||
895 | return -EEXIST; | ||
896 | |||
897 | return ipip_tunnel_create(dev); | ||
898 | } | ||
899 | |||
900 | static int ipip_changelink(struct net_device *dev, struct nlattr *tb[], | ||
901 | struct nlattr *data[]) | ||
902 | { | ||
903 | struct ip_tunnel *t; | ||
904 | struct ip_tunnel_parm p; | ||
905 | struct net *net = dev_net(dev); | ||
906 | struct ipip_net *ipn = net_generic(net, ipip_net_id); | ||
907 | |||
908 | if (dev == ipn->fb_tunnel_dev) | ||
909 | return -EINVAL; | ||
910 | |||
911 | ipip_netlink_parms(data, &p); | ||
912 | |||
913 | if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) || | ||
914 | (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr)) | ||
915 | return -EINVAL; | ||
916 | |||
917 | t = ipip_tunnel_locate(net, &p, 0); | ||
918 | |||
919 | if (t) { | ||
920 | if (t->dev != dev) | ||
921 | return -EEXIST; | ||
922 | } else | ||
923 | t = netdev_priv(dev); | ||
924 | |||
925 | ipip_tunnel_update(t, &p); | ||
926 | return 0; | ||
927 | } | ||
928 | |||
929 | static size_t ipip_get_size(const struct net_device *dev) | ||
930 | { | ||
931 | return | ||
932 | /* IFLA_IPTUN_LINK */ | ||
933 | nla_total_size(4) + | ||
934 | /* IFLA_IPTUN_LOCAL */ | ||
935 | nla_total_size(4) + | ||
936 | /* IFLA_IPTUN_REMOTE */ | ||
937 | nla_total_size(4) + | ||
938 | /* IFLA_IPTUN_TTL */ | ||
939 | nla_total_size(1) + | ||
940 | /* IFLA_IPTUN_TOS */ | ||
941 | nla_total_size(1) + | ||
942 | /* IFLA_IPTUN_PMTUDISC */ | ||
943 | nla_total_size(1) + | ||
944 | 0; | ||
945 | } | ||
946 | |||
947 | static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev) | ||
948 | { | ||
949 | struct ip_tunnel *tunnel = netdev_priv(dev); | ||
950 | struct ip_tunnel_parm *parm = &tunnel->parms; | ||
951 | |||
952 | if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || | ||
953 | nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) || | ||
954 | nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) || | ||
955 | nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) || | ||
956 | nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) || | ||
957 | nla_put_u8(skb, IFLA_IPTUN_PMTUDISC, | ||
958 | !!(parm->iph.frag_off & htons(IP_DF)))) | ||
959 | goto nla_put_failure; | ||
960 | return 0; | ||
961 | |||
962 | nla_put_failure: | ||
963 | return -EMSGSIZE; | ||
964 | } | ||
965 | |||
966 | static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = { | ||
967 | [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, | ||
968 | [IFLA_IPTUN_LOCAL] = { .type = NLA_U32 }, | ||
969 | [IFLA_IPTUN_REMOTE] = { .type = NLA_U32 }, | ||
970 | [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, | ||
971 | [IFLA_IPTUN_TOS] = { .type = NLA_U8 }, | ||
972 | [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 }, | ||
973 | }; | ||
974 | |||
975 | static struct rtnl_link_ops ipip_link_ops __read_mostly = { | ||
976 | .kind = "ipip", | ||
977 | .maxtype = IFLA_IPTUN_MAX, | ||
978 | .policy = ipip_policy, | ||
979 | .priv_size = sizeof(struct ip_tunnel), | ||
980 | .setup = ipip_tunnel_setup, | ||
981 | .newlink = ipip_newlink, | ||
982 | .changelink = ipip_changelink, | ||
983 | .get_size = ipip_get_size, | ||
984 | .fill_info = ipip_fill_info, | ||
985 | }; | ||
986 | |||
832 | static struct xfrm_tunnel ipip_handler __read_mostly = { | 987 | static struct xfrm_tunnel ipip_handler __read_mostly = { |
833 | .handler = ipip_rcv, | 988 | .handler = ipip_rcv, |
834 | .err_handler = ipip_err, | 989 | .err_handler = ipip_err, |
@@ -925,14 +1080,26 @@ static int __init ipip_init(void) | |||
925 | return err; | 1080 | return err; |
926 | err = xfrm4_tunnel_register(&ipip_handler, AF_INET); | 1081 | err = xfrm4_tunnel_register(&ipip_handler, AF_INET); |
927 | if (err < 0) { | 1082 | if (err < 0) { |
928 | unregister_pernet_device(&ipip_net_ops); | ||
929 | pr_info("%s: can't register tunnel\n", __func__); | 1083 | pr_info("%s: can't register tunnel\n", __func__); |
1084 | goto xfrm_tunnel_failed; | ||
930 | } | 1085 | } |
1086 | err = rtnl_link_register(&ipip_link_ops); | ||
1087 | if (err < 0) | ||
1088 | goto rtnl_link_failed; | ||
1089 | |||
1090 | out: | ||
931 | return err; | 1091 | return err; |
1092 | |||
1093 | rtnl_link_failed: | ||
1094 | xfrm4_tunnel_deregister(&ipip_handler, AF_INET); | ||
1095 | xfrm_tunnel_failed: | ||
1096 | unregister_pernet_device(&ipip_net_ops); | ||
1097 | goto out; | ||
932 | } | 1098 | } |
933 | 1099 | ||
934 | static void __exit ipip_fini(void) | 1100 | static void __exit ipip_fini(void) |
935 | { | 1101 | { |
1102 | rtnl_link_unregister(&ipip_link_ops); | ||
936 | if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET)) | 1103 | if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET)) |
937 | pr_info("%s: can't deregister tunnel\n", __func__); | 1104 | pr_info("%s: can't deregister tunnel\n", __func__); |
938 | 1105 | ||
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 3eab2b2ffd34..58e4160fdcee 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -83,8 +83,8 @@ struct mr_table { | |||
83 | struct vif_device vif_table[MAXVIFS]; | 83 | struct vif_device vif_table[MAXVIFS]; |
84 | int maxvif; | 84 | int maxvif; |
85 | atomic_t cache_resolve_queue_len; | 85 | atomic_t cache_resolve_queue_len; |
86 | int mroute_do_assert; | 86 | bool mroute_do_assert; |
87 | int mroute_do_pim; | 87 | bool mroute_do_pim; |
88 | #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) | 88 | #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) |
89 | int mroute_reg_vif_num; | 89 | int mroute_reg_vif_num; |
90 | #endif | 90 | #endif |
@@ -1207,23 +1207,24 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1207 | struct net *net = sock_net(sk); | 1207 | struct net *net = sock_net(sk); |
1208 | struct mr_table *mrt; | 1208 | struct mr_table *mrt; |
1209 | 1209 | ||
1210 | if (sk->sk_type != SOCK_RAW || | ||
1211 | inet_sk(sk)->inet_num != IPPROTO_IGMP) | ||
1212 | return -EOPNOTSUPP; | ||
1213 | |||
1210 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | 1214 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); |
1211 | if (mrt == NULL) | 1215 | if (mrt == NULL) |
1212 | return -ENOENT; | 1216 | return -ENOENT; |
1213 | 1217 | ||
1214 | if (optname != MRT_INIT) { | 1218 | if (optname != MRT_INIT) { |
1215 | if (sk != rcu_access_pointer(mrt->mroute_sk) && | 1219 | if (sk != rcu_access_pointer(mrt->mroute_sk) && |
1216 | !capable(CAP_NET_ADMIN)) | 1220 | !ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1217 | return -EACCES; | 1221 | return -EACCES; |
1218 | } | 1222 | } |
1219 | 1223 | ||
1220 | switch (optname) { | 1224 | switch (optname) { |
1221 | case MRT_INIT: | 1225 | case MRT_INIT: |
1222 | if (sk->sk_type != SOCK_RAW || | ||
1223 | inet_sk(sk)->inet_num != IPPROTO_IGMP) | ||
1224 | return -EOPNOTSUPP; | ||
1225 | if (optlen != sizeof(int)) | 1226 | if (optlen != sizeof(int)) |
1226 | return -ENOPROTOOPT; | 1227 | return -EINVAL; |
1227 | 1228 | ||
1228 | rtnl_lock(); | 1229 | rtnl_lock(); |
1229 | if (rtnl_dereference(mrt->mroute_sk)) { | 1230 | if (rtnl_dereference(mrt->mroute_sk)) { |
@@ -1284,9 +1285,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1284 | case MRT_ASSERT: | 1285 | case MRT_ASSERT: |
1285 | { | 1286 | { |
1286 | int v; | 1287 | int v; |
1288 | if (optlen != sizeof(v)) | ||
1289 | return -EINVAL; | ||
1287 | if (get_user(v, (int __user *)optval)) | 1290 | if (get_user(v, (int __user *)optval)) |
1288 | return -EFAULT; | 1291 | return -EFAULT; |
1289 | mrt->mroute_do_assert = (v) ? 1 : 0; | 1292 | mrt->mroute_do_assert = v; |
1290 | return 0; | 1293 | return 0; |
1291 | } | 1294 | } |
1292 | #ifdef CONFIG_IP_PIMSM | 1295 | #ifdef CONFIG_IP_PIMSM |
@@ -1294,9 +1297,11 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1294 | { | 1297 | { |
1295 | int v; | 1298 | int v; |
1296 | 1299 | ||
1300 | if (optlen != sizeof(v)) | ||
1301 | return -EINVAL; | ||
1297 | if (get_user(v, (int __user *)optval)) | 1302 | if (get_user(v, (int __user *)optval)) |
1298 | return -EFAULT; | 1303 | return -EFAULT; |
1299 | v = (v) ? 1 : 0; | 1304 | v = !!v; |
1300 | 1305 | ||
1301 | rtnl_lock(); | 1306 | rtnl_lock(); |
1302 | ret = 0; | 1307 | ret = 0; |
@@ -1329,7 +1334,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1329 | } else { | 1334 | } else { |
1330 | if (!ipmr_new_table(net, v)) | 1335 | if (!ipmr_new_table(net, v)) |
1331 | ret = -ENOMEM; | 1336 | ret = -ENOMEM; |
1332 | raw_sk(sk)->ipmr_table = v; | 1337 | else |
1338 | raw_sk(sk)->ipmr_table = v; | ||
1333 | } | 1339 | } |
1334 | rtnl_unlock(); | 1340 | rtnl_unlock(); |
1335 | return ret; | 1341 | return ret; |
@@ -1355,6 +1361,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1355 | struct net *net = sock_net(sk); | 1361 | struct net *net = sock_net(sk); |
1356 | struct mr_table *mrt; | 1362 | struct mr_table *mrt; |
1357 | 1363 | ||
1364 | if (sk->sk_type != SOCK_RAW || | ||
1365 | inet_sk(sk)->inet_num != IPPROTO_IGMP) | ||
1366 | return -EOPNOTSUPP; | ||
1367 | |||
1358 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | 1368 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); |
1359 | if (mrt == NULL) | 1369 | if (mrt == NULL) |
1360 | return -ENOENT; | 1370 | return -ENOENT; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 97e61eadf580..3ea4127404d6 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -1533,7 +1533,7 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, | |||
1533 | { | 1533 | { |
1534 | int ret; | 1534 | int ret; |
1535 | 1535 | ||
1536 | if (!capable(CAP_NET_ADMIN)) | 1536 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1537 | return -EPERM; | 1537 | return -EPERM; |
1538 | 1538 | ||
1539 | switch (cmd) { | 1539 | switch (cmd) { |
@@ -1677,7 +1677,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, | |||
1677 | { | 1677 | { |
1678 | int ret; | 1678 | int ret; |
1679 | 1679 | ||
1680 | if (!capable(CAP_NET_ADMIN)) | 1680 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1681 | return -EPERM; | 1681 | return -EPERM; |
1682 | 1682 | ||
1683 | switch (cmd) { | 1683 | switch (cmd) { |
@@ -1698,7 +1698,7 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned | |||
1698 | { | 1698 | { |
1699 | int ret; | 1699 | int ret; |
1700 | 1700 | ||
1701 | if (!capable(CAP_NET_ADMIN)) | 1701 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1702 | return -EPERM; | 1702 | return -EPERM; |
1703 | 1703 | ||
1704 | switch (cmd) { | 1704 | switch (cmd) { |
@@ -1722,7 +1722,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len | |||
1722 | { | 1722 | { |
1723 | int ret; | 1723 | int ret; |
1724 | 1724 | ||
1725 | if (!capable(CAP_NET_ADMIN)) | 1725 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1726 | return -EPERM; | 1726 | return -EPERM; |
1727 | 1727 | ||
1728 | switch (cmd) { | 1728 | switch (cmd) { |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 170b1fdd6b72..17c5e06da662 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -1846,7 +1846,7 @@ compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, | |||
1846 | { | 1846 | { |
1847 | int ret; | 1847 | int ret; |
1848 | 1848 | ||
1849 | if (!capable(CAP_NET_ADMIN)) | 1849 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1850 | return -EPERM; | 1850 | return -EPERM; |
1851 | 1851 | ||
1852 | switch (cmd) { | 1852 | switch (cmd) { |
@@ -1961,7 +1961,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
1961 | { | 1961 | { |
1962 | int ret; | 1962 | int ret; |
1963 | 1963 | ||
1964 | if (!capable(CAP_NET_ADMIN)) | 1964 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1965 | return -EPERM; | 1965 | return -EPERM; |
1966 | 1966 | ||
1967 | switch (cmd) { | 1967 | switch (cmd) { |
@@ -1983,7 +1983,7 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
1983 | { | 1983 | { |
1984 | int ret; | 1984 | int ret; |
1985 | 1985 | ||
1986 | if (!capable(CAP_NET_ADMIN)) | 1986 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1987 | return -EPERM; | 1987 | return -EPERM; |
1988 | 1988 | ||
1989 | switch (cmd) { | 1989 | switch (cmd) { |
@@ -2008,7 +2008,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2008 | { | 2008 | { |
2009 | int ret; | 2009 | int ret; |
2010 | 2010 | ||
2011 | if (!capable(CAP_NET_ADMIN)) | 2011 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
2012 | return -EPERM; | 2012 | return -EPERM; |
2013 | 2013 | ||
2014 | switch (cmd) { | 2014 | switch (cmd) { |
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c index a82047282dbb..ac635a7b4416 100644 --- a/net/ipv4/netfilter/iptable_nat.c +++ b/net/ipv4/netfilter/iptable_nat.c | |||
@@ -276,9 +276,7 @@ static int __net_init iptable_nat_net_init(struct net *net) | |||
276 | return -ENOMEM; | 276 | return -ENOMEM; |
277 | net->ipv4.nat_table = ipt_register_table(net, &nf_nat_ipv4_table, repl); | 277 | net->ipv4.nat_table = ipt_register_table(net, &nf_nat_ipv4_table, repl); |
278 | kfree(repl); | 278 | kfree(repl); |
279 | if (IS_ERR(net->ipv4.nat_table)) | 279 | return PTR_RET(net->ipv4.nat_table); |
280 | return PTR_ERR(net->ipv4.nat_table); | ||
281 | return 0; | ||
282 | } | 280 | } |
283 | 281 | ||
284 | static void __net_exit iptable_nat_net_exit(struct net *net) | 282 | static void __net_exit iptable_nat_net_exit(struct net *net) |
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index 8918eff1426d..0f9d09f54bd9 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <net/protocol.h> | 29 | #include <net/protocol.h> |
30 | 30 | ||
31 | const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; | 31 | const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; |
32 | const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Add a protocol handler to the hash tables | 35 | * Add a protocol handler to the hash tables |
@@ -41,6 +42,13 @@ int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) | |||
41 | } | 42 | } |
42 | EXPORT_SYMBOL(inet_add_protocol); | 43 | EXPORT_SYMBOL(inet_add_protocol); |
43 | 44 | ||
45 | int inet_add_offload(const struct net_offload *prot, unsigned char protocol) | ||
46 | { | ||
47 | return !cmpxchg((const struct net_offload **)&inet_offloads[protocol], | ||
48 | NULL, prot) ? 0 : -1; | ||
49 | } | ||
50 | EXPORT_SYMBOL(inet_add_offload); | ||
51 | |||
44 | /* | 52 | /* |
45 | * Remove a protocol from the hash tables. | 53 | * Remove a protocol from the hash tables. |
46 | */ | 54 | */ |
@@ -57,3 +65,16 @@ int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) | |||
57 | return ret; | 65 | return ret; |
58 | } | 66 | } |
59 | EXPORT_SYMBOL(inet_del_protocol); | 67 | EXPORT_SYMBOL(inet_del_protocol); |
68 | |||
69 | int inet_del_offload(const struct net_offload *prot, unsigned char protocol) | ||
70 | { | ||
71 | int ret; | ||
72 | |||
73 | ret = (cmpxchg((const struct net_offload **)&inet_offloads[protocol], | ||
74 | prot, NULL) == prot) ? 0 : -1; | ||
75 | |||
76 | synchronize_net(); | ||
77 | |||
78 | return ret; | ||
79 | } | ||
80 | EXPORT_SYMBOL(inet_del_offload); | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index df251424d816..baa9b289d7ab 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -2496,6 +2496,10 @@ static __net_init int sysctl_route_net_init(struct net *net) | |||
2496 | tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); | 2496 | tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); |
2497 | if (tbl == NULL) | 2497 | if (tbl == NULL) |
2498 | goto err_dup; | 2498 | goto err_dup; |
2499 | |||
2500 | /* Don't export sysctls to unprivileged users */ | ||
2501 | if (net->user_ns != &init_user_ns) | ||
2502 | tbl[0].procname = NULL; | ||
2499 | } | 2503 | } |
2500 | tbl[0].extra1 = net; | 2504 | tbl[0].extra1 = net; |
2501 | 2505 | ||
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index ba48e799b031..b236ef04914f 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -340,7 +340,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
340 | } | 340 | } |
341 | 341 | ||
342 | req->expires = 0UL; | 342 | req->expires = 0UL; |
343 | req->retrans = 0; | 343 | req->num_retrans = 0; |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * We need to lookup the route here to get at the correct | 346 | * We need to lookup the route here to get at the correct |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 63d4eccc674d..d84400b65049 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -883,6 +883,9 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) | |||
883 | table[6].data = | 883 | table[6].data = |
884 | &net->ipv4.sysctl_ping_group_range; | 884 | &net->ipv4.sysctl_ping_group_range; |
885 | 885 | ||
886 | /* Don't export sysctls to unprivileged users */ | ||
887 | if (net->user_ns != &init_user_ns) | ||
888 | table[0].procname = NULL; | ||
886 | } | 889 | } |
887 | 890 | ||
888 | /* | 891 | /* |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 083092e3aed6..e6eace1c2bdb 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -536,13 +536,14 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
536 | { | 536 | { |
537 | struct tcp_sock *tp = tcp_sk(sk); | 537 | struct tcp_sock *tp = tcp_sk(sk); |
538 | int answ; | 538 | int answ; |
539 | bool slow; | ||
539 | 540 | ||
540 | switch (cmd) { | 541 | switch (cmd) { |
541 | case SIOCINQ: | 542 | case SIOCINQ: |
542 | if (sk->sk_state == TCP_LISTEN) | 543 | if (sk->sk_state == TCP_LISTEN) |
543 | return -EINVAL; | 544 | return -EINVAL; |
544 | 545 | ||
545 | lock_sock(sk); | 546 | slow = lock_sock_fast(sk); |
546 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | 547 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) |
547 | answ = 0; | 548 | answ = 0; |
548 | else if (sock_flag(sk, SOCK_URGINLINE) || | 549 | else if (sock_flag(sk, SOCK_URGINLINE) || |
@@ -557,7 +558,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
557 | answ--; | 558 | answ--; |
558 | } else | 559 | } else |
559 | answ = tp->urg_seq - tp->copied_seq; | 560 | answ = tp->urg_seq - tp->copied_seq; |
560 | release_sock(sk); | 561 | unlock_sock_fast(sk, slow); |
561 | break; | 562 | break; |
562 | case SIOCATMARK: | 563 | case SIOCATMARK: |
563 | answ = tp->urg_data && tp->urg_seq == tp->copied_seq; | 564 | answ = tp->urg_data && tp->urg_seq == tp->copied_seq; |
@@ -2303,7 +2304,7 @@ void tcp_sock_destruct(struct sock *sk) | |||
2303 | 2304 | ||
2304 | static inline bool tcp_can_repair_sock(const struct sock *sk) | 2305 | static inline bool tcp_can_repair_sock(const struct sock *sk) |
2305 | { | 2306 | { |
2306 | return capable(CAP_NET_ADMIN) && | 2307 | return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && |
2307 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); | 2308 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); |
2308 | } | 2309 | } |
2309 | 2310 | ||
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 1432cdb0644c..baf28611b334 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -259,7 +259,8 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) | |||
259 | if (!ca) | 259 | if (!ca) |
260 | err = -ENOENT; | 260 | err = -ENOENT; |
261 | 261 | ||
262 | else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN))) | 262 | else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || |
263 | ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) | ||
263 | err = -EPERM; | 264 | err = -EPERM; |
264 | 265 | ||
265 | else if (!try_module_get(ca->owner)) | 266 | else if (!try_module_get(ca->owner)) |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 609ff98aeb47..fc67831656e5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3552,6 +3552,24 @@ static bool tcp_process_frto(struct sock *sk, int flag) | |||
3552 | return false; | 3552 | return false; |
3553 | } | 3553 | } |
3554 | 3554 | ||
3555 | /* RFC 5961 7 [ACK Throttling] */ | ||
3556 | static void tcp_send_challenge_ack(struct sock *sk) | ||
3557 | { | ||
3558 | /* unprotected vars, we dont care of overwrites */ | ||
3559 | static u32 challenge_timestamp; | ||
3560 | static unsigned int challenge_count; | ||
3561 | u32 now = jiffies / HZ; | ||
3562 | |||
3563 | if (now != challenge_timestamp) { | ||
3564 | challenge_timestamp = now; | ||
3565 | challenge_count = 0; | ||
3566 | } | ||
3567 | if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { | ||
3568 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); | ||
3569 | tcp_send_ack(sk); | ||
3570 | } | ||
3571 | } | ||
3572 | |||
3555 | /* This routine deals with incoming acks, but not outgoing ones. */ | 3573 | /* This routine deals with incoming acks, but not outgoing ones. */ |
3556 | static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | 3574 | static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
3557 | { | 3575 | { |
@@ -3571,8 +3589,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3571 | /* If the ack is older than previous acks | 3589 | /* If the ack is older than previous acks |
3572 | * then we can probably ignore it. | 3590 | * then we can probably ignore it. |
3573 | */ | 3591 | */ |
3574 | if (before(ack, prior_snd_una)) | 3592 | if (before(ack, prior_snd_una)) { |
3593 | /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ | ||
3594 | if (before(ack, prior_snd_una - tp->max_window)) { | ||
3595 | tcp_send_challenge_ack(sk); | ||
3596 | return -1; | ||
3597 | } | ||
3575 | goto old_ack; | 3598 | goto old_ack; |
3599 | } | ||
3576 | 3600 | ||
3577 | /* If the ack includes data we haven't sent yet, discard | 3601 | /* If the ack includes data we haven't sent yet, discard |
3578 | * this segment (RFC793 Section 3.9). | 3602 | * this segment (RFC793 Section 3.9). |
@@ -5244,23 +5268,6 @@ out: | |||
5244 | } | 5268 | } |
5245 | #endif /* CONFIG_NET_DMA */ | 5269 | #endif /* CONFIG_NET_DMA */ |
5246 | 5270 | ||
5247 | static void tcp_send_challenge_ack(struct sock *sk) | ||
5248 | { | ||
5249 | /* unprotected vars, we dont care of overwrites */ | ||
5250 | static u32 challenge_timestamp; | ||
5251 | static unsigned int challenge_count; | ||
5252 | u32 now = jiffies / HZ; | ||
5253 | |||
5254 | if (now != challenge_timestamp) { | ||
5255 | challenge_timestamp = now; | ||
5256 | challenge_count = 0; | ||
5257 | } | ||
5258 | if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { | ||
5259 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); | ||
5260 | tcp_send_ack(sk); | ||
5261 | } | ||
5262 | } | ||
5263 | |||
5264 | /* Does PAWS and seqno based validation of an incoming segment, flags will | 5271 | /* Does PAWS and seqno based validation of an incoming segment, flags will |
5265 | * play significant role here. | 5272 | * play significant role here. |
5266 | */ | 5273 | */ |
@@ -5988,7 +5995,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5988 | */ | 5995 | */ |
5989 | if (req) { | 5996 | if (req) { |
5990 | tcp_synack_rtt_meas(sk, req); | 5997 | tcp_synack_rtt_meas(sk, req); |
5991 | tp->total_retrans = req->retrans; | 5998 | tp->total_retrans = req->num_retrans; |
5992 | 5999 | ||
5993 | reqsk_fastopen_remove(sk, req, false); | 6000 | reqsk_fastopen_remove(sk, req, false); |
5994 | } else { | 6001 | } else { |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0c4a64355603..1ed230716d51 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -138,14 +138,6 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) | |||
138 | } | 138 | } |
139 | EXPORT_SYMBOL_GPL(tcp_twsk_unique); | 139 | EXPORT_SYMBOL_GPL(tcp_twsk_unique); |
140 | 140 | ||
141 | static int tcp_repair_connect(struct sock *sk) | ||
142 | { | ||
143 | tcp_connect_init(sk); | ||
144 | tcp_finish_connect(sk, NULL); | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | /* This will initiate an outgoing connection. */ | 141 | /* This will initiate an outgoing connection. */ |
150 | int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | 142 | int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
151 | { | 143 | { |
@@ -250,10 +242,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
250 | 242 | ||
251 | inet->inet_id = tp->write_seq ^ jiffies; | 243 | inet->inet_id = tp->write_seq ^ jiffies; |
252 | 244 | ||
253 | if (likely(!tp->repair)) | 245 | err = tcp_connect(sk); |
254 | err = tcp_connect(sk); | ||
255 | else | ||
256 | err = tcp_repair_connect(sk); | ||
257 | 246 | ||
258 | rt = NULL; | 247 | rt = NULL; |
259 | if (err) | 248 | if (err) |
@@ -877,10 +866,13 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, | |||
877 | } | 866 | } |
878 | 867 | ||
879 | static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, | 868 | static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, |
880 | struct request_values *rvp) | 869 | struct request_values *rvp) |
881 | { | 870 | { |
882 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); | 871 | int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false); |
883 | return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false); | 872 | |
873 | if (!res) | ||
874 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); | ||
875 | return res; | ||
884 | } | 876 | } |
885 | 877 | ||
886 | /* | 878 | /* |
@@ -1070,7 +1062,7 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) | |||
1070 | } | 1062 | } |
1071 | EXPORT_SYMBOL(tcp_md5_do_del); | 1063 | EXPORT_SYMBOL(tcp_md5_do_del); |
1072 | 1064 | ||
1073 | void tcp_clear_md5_list(struct sock *sk) | 1065 | static void tcp_clear_md5_list(struct sock *sk) |
1074 | { | 1066 | { |
1075 | struct tcp_sock *tp = tcp_sk(sk); | 1067 | struct tcp_sock *tp = tcp_sk(sk); |
1076 | struct tcp_md5sig_key *key; | 1068 | struct tcp_md5sig_key *key; |
@@ -1386,7 +1378,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk, | |||
1386 | struct sock *child; | 1378 | struct sock *child; |
1387 | int err; | 1379 | int err; |
1388 | 1380 | ||
1389 | req->retrans = 0; | 1381 | req->num_retrans = 0; |
1382 | req->num_timeout = 0; | ||
1390 | req->sk = NULL; | 1383 | req->sk = NULL; |
1391 | 1384 | ||
1392 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); | 1385 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); |
@@ -1741,7 +1734,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1741 | 1734 | ||
1742 | tcp_initialize_rcv_mss(newsk); | 1735 | tcp_initialize_rcv_mss(newsk); |
1743 | tcp_synack_rtt_meas(newsk, req); | 1736 | tcp_synack_rtt_meas(newsk, req); |
1744 | newtp->total_retrans = req->retrans; | 1737 | newtp->total_retrans = req->num_retrans; |
1745 | 1738 | ||
1746 | #ifdef CONFIG_TCP_MD5SIG | 1739 | #ifdef CONFIG_TCP_MD5SIG |
1747 | /* Copy over the MD5 key from the original socket */ | 1740 | /* Copy over the MD5 key from the original socket */ |
@@ -1919,7 +1912,6 @@ EXPORT_SYMBOL(tcp_v4_do_rcv); | |||
1919 | 1912 | ||
1920 | void tcp_v4_early_demux(struct sk_buff *skb) | 1913 | void tcp_v4_early_demux(struct sk_buff *skb) |
1921 | { | 1914 | { |
1922 | struct net *net = dev_net(skb->dev); | ||
1923 | const struct iphdr *iph; | 1915 | const struct iphdr *iph; |
1924 | const struct tcphdr *th; | 1916 | const struct tcphdr *th; |
1925 | struct sock *sk; | 1917 | struct sock *sk; |
@@ -1927,16 +1919,16 @@ void tcp_v4_early_demux(struct sk_buff *skb) | |||
1927 | if (skb->pkt_type != PACKET_HOST) | 1919 | if (skb->pkt_type != PACKET_HOST) |
1928 | return; | 1920 | return; |
1929 | 1921 | ||
1930 | if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr))) | 1922 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) |
1931 | return; | 1923 | return; |
1932 | 1924 | ||
1933 | iph = ip_hdr(skb); | 1925 | iph = ip_hdr(skb); |
1934 | th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb)); | 1926 | th = tcp_hdr(skb); |
1935 | 1927 | ||
1936 | if (th->doff < sizeof(struct tcphdr) / 4) | 1928 | if (th->doff < sizeof(struct tcphdr) / 4) |
1937 | return; | 1929 | return; |
1938 | 1930 | ||
1939 | sk = __inet_lookup_established(net, &tcp_hashinfo, | 1931 | sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, |
1940 | iph->saddr, th->source, | 1932 | iph->saddr, th->source, |
1941 | iph->daddr, ntohs(th->dest), | 1933 | iph->daddr, ntohs(th->dest), |
1942 | skb->skb_iif); | 1934 | skb->skb_iif); |
@@ -2640,7 +2632,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req, | |||
2640 | 0, 0, /* could print option size, but that is af dependent. */ | 2632 | 0, 0, /* could print option size, but that is af dependent. */ |
2641 | 1, /* timers active (only the expire timer) */ | 2633 | 1, /* timers active (only the expire timer) */ |
2642 | jiffies_delta_to_clock_t(delta), | 2634 | jiffies_delta_to_clock_t(delta), |
2643 | req->retrans, | 2635 | req->num_timeout, |
2644 | from_kuid_munged(seq_user_ns(f), uid), | 2636 | from_kuid_munged(seq_user_ns(f), uid), |
2645 | 0, /* non standard timer */ | 2637 | 0, /* non standard timer */ |
2646 | 0, /* open_requests have no inode */ | 2638 | 0, /* open_requests have no inode */ |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index a7302d974f32..f35f2dfb6401 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -553,7 +553,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
553 | * it can be estimated (approximately) | 553 | * it can be estimated (approximately) |
554 | * from another data. | 554 | * from another data. |
555 | */ | 555 | */ |
556 | tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); | 556 | tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); |
557 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); | 557 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); |
558 | } | 558 | } |
559 | } | 559 | } |
@@ -582,7 +582,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
582 | * Note that even if there is new data in the SYN packet | 582 | * Note that even if there is new data in the SYN packet |
583 | * they will be thrown away too. | 583 | * they will be thrown away too. |
584 | */ | 584 | */ |
585 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); | 585 | inet_rtx_syn_ack(sk, req); |
586 | return NULL; | 586 | return NULL; |
587 | } | 587 | } |
588 | 588 | ||
@@ -696,7 +696,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
696 | /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */ | 696 | /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */ |
697 | if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr) | 697 | if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr) |
698 | tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr; | 698 | tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr; |
699 | else if (req->retrans) /* don't take RTT sample if retrans && ~TS */ | 699 | else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */ |
700 | tcp_rsk(req)->snt_synack = 0; | 700 | tcp_rsk(req)->snt_synack = 0; |
701 | 701 | ||
702 | /* For Fast Open no more processing is needed (sk is the | 702 | /* For Fast Open no more processing is needed (sk is the |
@@ -706,7 +706,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
706 | return sk; | 706 | return sk; |
707 | 707 | ||
708 | /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ | 708 | /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ |
709 | if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | 709 | if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && |
710 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | 710 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
711 | inet_rsk(req)->acked = 1; | 711 | inet_rsk(req)->acked = 1; |
712 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); | 712 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2798706cb063..8ac085573217 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2987,6 +2987,11 @@ int tcp_connect(struct sock *sk) | |||
2987 | 2987 | ||
2988 | tcp_connect_init(sk); | 2988 | tcp_connect_init(sk); |
2989 | 2989 | ||
2990 | if (unlikely(tp->repair)) { | ||
2991 | tcp_finish_connect(sk, NULL); | ||
2992 | return 0; | ||
2993 | } | ||
2994 | |||
2990 | buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); | 2995 | buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); |
2991 | if (unlikely(buff == NULL)) | 2996 | if (unlikely(buff == NULL)) |
2992 | return -ENOBUFS; | 2997 | return -ENOBUFS; |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index d47c1b4421a3..b78aac30c498 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -318,7 +318,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk) | |||
318 | req = tcp_sk(sk)->fastopen_rsk; | 318 | req = tcp_sk(sk)->fastopen_rsk; |
319 | req->rsk_ops->syn_ack_timeout(sk, req); | 319 | req->rsk_ops->syn_ack_timeout(sk, req); |
320 | 320 | ||
321 | if (req->retrans >= max_retries) { | 321 | if (req->num_timeout >= max_retries) { |
322 | tcp_write_err(sk); | 322 | tcp_write_err(sk); |
323 | return; | 323 | return; |
324 | } | 324 | } |
@@ -327,10 +327,10 @@ static void tcp_fastopen_synack_timer(struct sock *sk) | |||
327 | * regular retransmit because if the child socket has been accepted | 327 | * regular retransmit because if the child socket has been accepted |
328 | * it's not good to give up too easily. | 328 | * it's not good to give up too easily. |
329 | */ | 329 | */ |
330 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); | 330 | inet_rtx_syn_ack(sk, req); |
331 | req->retrans++; | 331 | req->num_timeout++; |
332 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 332 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
333 | TCP_TIMEOUT_INIT << req->retrans, TCP_RTO_MAX); | 333 | TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); |
334 | } | 334 | } |
335 | 335 | ||
336 | /* | 336 | /* |
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index b6d3f79151e2..2068ac4fbdad 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile | |||
@@ -7,9 +7,11 @@ obj-$(CONFIG_IPV6) += ipv6.o | |||
7 | ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ | 7 | ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ |
8 | addrlabel.o \ | 8 | addrlabel.o \ |
9 | route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ | 9 | route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ |
10 | raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ | 10 | raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ |
11 | exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o | 11 | exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o |
12 | 12 | ||
13 | ipv6-offload := ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o | ||
14 | |||
13 | ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o | 15 | ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o |
14 | ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o | 16 | ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o |
15 | 17 | ||
@@ -39,5 +41,6 @@ obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o | |||
39 | obj-$(CONFIG_IPV6_GRE) += ip6_gre.o | 41 | obj-$(CONFIG_IPV6_GRE) += ip6_gre.o |
40 | 42 | ||
41 | obj-y += addrconf_core.o exthdrs_core.o | 43 | obj-y += addrconf_core.o exthdrs_core.o |
44 | obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6_offload) | ||
42 | 45 | ||
43 | obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o | 46 | obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0424e4e27414..fc0e13ad6337 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -81,6 +81,7 @@ | |||
81 | #include <net/pkt_sched.h> | 81 | #include <net/pkt_sched.h> |
82 | #include <linux/if_tunnel.h> | 82 | #include <linux/if_tunnel.h> |
83 | #include <linux/rtnetlink.h> | 83 | #include <linux/rtnetlink.h> |
84 | #include <linux/netconf.h> | ||
84 | 85 | ||
85 | #ifdef CONFIG_IPV6_PRIVACY | 86 | #ifdef CONFIG_IPV6_PRIVACY |
86 | #include <linux/random.h> | 87 | #include <linux/random.h> |
@@ -401,7 +402,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) | |||
401 | if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) | 402 | if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) |
402 | ndev->cnf.accept_dad = -1; | 403 | ndev->cnf.accept_dad = -1; |
403 | 404 | ||
404 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 405 | #if IS_ENABLED(CONFIG_IPV6_SIT) |
405 | if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { | 406 | if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) { |
406 | pr_info("%s: Disabled Multicast RS\n", dev->name); | 407 | pr_info("%s: Disabled Multicast RS\n", dev->name); |
407 | ndev->cnf.rtr_solicits = 0; | 408 | ndev->cnf.rtr_solicits = 0; |
@@ -460,6 +461,141 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev) | |||
460 | return idev; | 461 | return idev; |
461 | } | 462 | } |
462 | 463 | ||
464 | static int inet6_netconf_msgsize_devconf(int type) | ||
465 | { | ||
466 | int size = NLMSG_ALIGN(sizeof(struct netconfmsg)) | ||
467 | + nla_total_size(4); /* NETCONFA_IFINDEX */ | ||
468 | |||
469 | /* type -1 is used for ALL */ | ||
470 | if (type == -1 || type == NETCONFA_FORWARDING) | ||
471 | size += nla_total_size(4); | ||
472 | |||
473 | return size; | ||
474 | } | ||
475 | |||
476 | static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, | ||
477 | struct ipv6_devconf *devconf, u32 portid, | ||
478 | u32 seq, int event, unsigned int flags, | ||
479 | int type) | ||
480 | { | ||
481 | struct nlmsghdr *nlh; | ||
482 | struct netconfmsg *ncm; | ||
483 | |||
484 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), | ||
485 | flags); | ||
486 | if (nlh == NULL) | ||
487 | return -EMSGSIZE; | ||
488 | |||
489 | ncm = nlmsg_data(nlh); | ||
490 | ncm->ncm_family = AF_INET6; | ||
491 | |||
492 | if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0) | ||
493 | goto nla_put_failure; | ||
494 | |||
495 | /* type -1 is used for ALL */ | ||
496 | if ((type == -1 || type == NETCONFA_FORWARDING) && | ||
497 | nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0) | ||
498 | goto nla_put_failure; | ||
499 | |||
500 | return nlmsg_end(skb, nlh); | ||
501 | |||
502 | nla_put_failure: | ||
503 | nlmsg_cancel(skb, nlh); | ||
504 | return -EMSGSIZE; | ||
505 | } | ||
506 | |||
507 | static void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, | ||
508 | struct ipv6_devconf *devconf) | ||
509 | { | ||
510 | struct sk_buff *skb; | ||
511 | int err = -ENOBUFS; | ||
512 | |||
513 | skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_ATOMIC); | ||
514 | if (skb == NULL) | ||
515 | goto errout; | ||
516 | |||
517 | err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, | ||
518 | RTM_NEWNETCONF, 0, type); | ||
519 | if (err < 0) { | ||
520 | /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */ | ||
521 | WARN_ON(err == -EMSGSIZE); | ||
522 | kfree_skb(skb); | ||
523 | goto errout; | ||
524 | } | ||
525 | rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_ATOMIC); | ||
526 | return; | ||
527 | errout: | ||
528 | if (err < 0) | ||
529 | rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err); | ||
530 | } | ||
531 | |||
532 | static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = { | ||
533 | [NETCONFA_IFINDEX] = { .len = sizeof(int) }, | ||
534 | [NETCONFA_FORWARDING] = { .len = sizeof(int) }, | ||
535 | }; | ||
536 | |||
537 | static int inet6_netconf_get_devconf(struct sk_buff *in_skb, | ||
538 | struct nlmsghdr *nlh, | ||
539 | void *arg) | ||
540 | { | ||
541 | struct net *net = sock_net(in_skb->sk); | ||
542 | struct nlattr *tb[NETCONFA_MAX+1]; | ||
543 | struct netconfmsg *ncm; | ||
544 | struct sk_buff *skb; | ||
545 | struct ipv6_devconf *devconf; | ||
546 | struct inet6_dev *in6_dev; | ||
547 | struct net_device *dev; | ||
548 | int ifindex; | ||
549 | int err; | ||
550 | |||
551 | err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, | ||
552 | devconf_ipv6_policy); | ||
553 | if (err < 0) | ||
554 | goto errout; | ||
555 | |||
556 | err = EINVAL; | ||
557 | if (!tb[NETCONFA_IFINDEX]) | ||
558 | goto errout; | ||
559 | |||
560 | ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]); | ||
561 | switch (ifindex) { | ||
562 | case NETCONFA_IFINDEX_ALL: | ||
563 | devconf = net->ipv6.devconf_all; | ||
564 | break; | ||
565 | case NETCONFA_IFINDEX_DEFAULT: | ||
566 | devconf = net->ipv6.devconf_dflt; | ||
567 | break; | ||
568 | default: | ||
569 | dev = __dev_get_by_index(net, ifindex); | ||
570 | if (dev == NULL) | ||
571 | goto errout; | ||
572 | in6_dev = __in6_dev_get(dev); | ||
573 | if (in6_dev == NULL) | ||
574 | goto errout; | ||
575 | devconf = &in6_dev->cnf; | ||
576 | break; | ||
577 | } | ||
578 | |||
579 | err = -ENOBUFS; | ||
580 | skb = nlmsg_new(inet6_netconf_msgsize_devconf(-1), GFP_ATOMIC); | ||
581 | if (skb == NULL) | ||
582 | goto errout; | ||
583 | |||
584 | err = inet6_netconf_fill_devconf(skb, ifindex, devconf, | ||
585 | NETLINK_CB(in_skb).portid, | ||
586 | nlh->nlmsg_seq, RTM_NEWNETCONF, 0, | ||
587 | -1); | ||
588 | if (err < 0) { | ||
589 | /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */ | ||
590 | WARN_ON(err == -EMSGSIZE); | ||
591 | kfree_skb(skb); | ||
592 | goto errout; | ||
593 | } | ||
594 | err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); | ||
595 | errout: | ||
596 | return err; | ||
597 | } | ||
598 | |||
463 | #ifdef CONFIG_SYSCTL | 599 | #ifdef CONFIG_SYSCTL |
464 | static void dev_forward_change(struct inet6_dev *idev) | 600 | static void dev_forward_change(struct inet6_dev *idev) |
465 | { | 601 | { |
@@ -471,7 +607,7 @@ static void dev_forward_change(struct inet6_dev *idev) | |||
471 | dev = idev->dev; | 607 | dev = idev->dev; |
472 | if (idev->cnf.forwarding) | 608 | if (idev->cnf.forwarding) |
473 | dev_disable_lro(dev); | 609 | dev_disable_lro(dev); |
474 | if (dev && (dev->flags & IFF_MULTICAST)) { | 610 | if (dev->flags & IFF_MULTICAST) { |
475 | if (idev->cnf.forwarding) | 611 | if (idev->cnf.forwarding) |
476 | ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); | 612 | ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); |
477 | else | 613 | else |
@@ -486,6 +622,8 @@ static void dev_forward_change(struct inet6_dev *idev) | |||
486 | else | 622 | else |
487 | addrconf_leave_anycast(ifa); | 623 | addrconf_leave_anycast(ifa); |
488 | } | 624 | } |
625 | inet6_netconf_notify_devconf(dev_net(dev), NETCONFA_FORWARDING, | ||
626 | dev->ifindex, &idev->cnf); | ||
489 | } | 627 | } |
490 | 628 | ||
491 | 629 | ||
@@ -518,6 +656,10 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) | |||
518 | *p = newf; | 656 | *p = newf; |
519 | 657 | ||
520 | if (p == &net->ipv6.devconf_dflt->forwarding) { | 658 | if (p == &net->ipv6.devconf_dflt->forwarding) { |
659 | if ((!newf) ^ (!old)) | ||
660 | inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, | ||
661 | NETCONFA_IFINDEX_DEFAULT, | ||
662 | net->ipv6.devconf_dflt); | ||
521 | rtnl_unlock(); | 663 | rtnl_unlock(); |
522 | return 0; | 664 | return 0; |
523 | } | 665 | } |
@@ -525,6 +667,10 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) | |||
525 | if (p == &net->ipv6.devconf_all->forwarding) { | 667 | if (p == &net->ipv6.devconf_all->forwarding) { |
526 | net->ipv6.devconf_dflt->forwarding = newf; | 668 | net->ipv6.devconf_dflt->forwarding = newf; |
527 | addrconf_forward_change(net, newf); | 669 | addrconf_forward_change(net, newf); |
670 | if ((!newf) ^ (!old)) | ||
671 | inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, | ||
672 | NETCONFA_IFINDEX_ALL, | ||
673 | net->ipv6.devconf_all); | ||
528 | } else if ((!newf) ^ (!old)) | 674 | } else if ((!newf) ^ (!old)) |
529 | dev_forward_change((struct inet6_dev *)table->extra1); | 675 | dev_forward_change((struct inet6_dev *)table->extra1); |
530 | rtnl_unlock(); | 676 | rtnl_unlock(); |
@@ -553,7 +699,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | |||
553 | pr_warn("Freeing alive inet6 address %p\n", ifp); | 699 | pr_warn("Freeing alive inet6 address %p\n", ifp); |
554 | return; | 700 | return; |
555 | } | 701 | } |
556 | dst_release(&ifp->rt->dst); | 702 | ip6_rt_put(ifp->rt); |
557 | 703 | ||
558 | kfree_rcu(ifp, rcu); | 704 | kfree_rcu(ifp, rcu); |
559 | } | 705 | } |
@@ -805,7 +951,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
805 | rt6_set_expires(rt, expires); | 951 | rt6_set_expires(rt, expires); |
806 | } | 952 | } |
807 | } | 953 | } |
808 | dst_release(&rt->dst); | 954 | ip6_rt_put(rt); |
809 | } | 955 | } |
810 | 956 | ||
811 | /* clean up prefsrc entries */ | 957 | /* clean up prefsrc entries */ |
@@ -1692,7 +1838,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, | |||
1692 | This thing is done here expecting that the whole | 1838 | This thing is done here expecting that the whole |
1693 | class of non-broadcast devices need not cloning. | 1839 | class of non-broadcast devices need not cloning. |
1694 | */ | 1840 | */ |
1695 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 1841 | #if IS_ENABLED(CONFIG_IPV6_SIT) |
1696 | if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT)) | 1842 | if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT)) |
1697 | cfg.fc_flags |= RTF_NONEXTHOP; | 1843 | cfg.fc_flags |= RTF_NONEXTHOP; |
1698 | #endif | 1844 | #endif |
@@ -1752,7 +1898,7 @@ static void addrconf_add_mroute(struct net_device *dev) | |||
1752 | ip6_route_add(&cfg); | 1898 | ip6_route_add(&cfg); |
1753 | } | 1899 | } |
1754 | 1900 | ||
1755 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 1901 | #if IS_ENABLED(CONFIG_IPV6_SIT) |
1756 | static void sit_route_add(struct net_device *dev) | 1902 | static void sit_route_add(struct net_device *dev) |
1757 | { | 1903 | { |
1758 | struct fib6_config cfg = { | 1904 | struct fib6_config cfg = { |
@@ -1881,8 +2027,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) | |||
1881 | addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len, | 2027 | addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len, |
1882 | dev, expires, flags); | 2028 | dev, expires, flags); |
1883 | } | 2029 | } |
1884 | if (rt) | 2030 | ip6_rt_put(rt); |
1885 | dst_release(&rt->dst); | ||
1886 | } | 2031 | } |
1887 | 2032 | ||
1888 | /* Try to figure out our local address for this prefix */ | 2033 | /* Try to figure out our local address for this prefix */ |
@@ -2104,7 +2249,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg) | |||
2104 | if (dev == NULL) | 2249 | if (dev == NULL) |
2105 | goto err_exit; | 2250 | goto err_exit; |
2106 | 2251 | ||
2107 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 2252 | #if IS_ENABLED(CONFIG_IPV6_SIT) |
2108 | if (dev->type == ARPHRD_SIT) { | 2253 | if (dev->type == ARPHRD_SIT) { |
2109 | const struct net_device_ops *ops = dev->netdev_ops; | 2254 | const struct net_device_ops *ops = dev->netdev_ops; |
2110 | struct ifreq ifr; | 2255 | struct ifreq ifr; |
@@ -2268,7 +2413,7 @@ int addrconf_add_ifaddr(struct net *net, void __user *arg) | |||
2268 | struct in6_ifreq ireq; | 2413 | struct in6_ifreq ireq; |
2269 | int err; | 2414 | int err; |
2270 | 2415 | ||
2271 | if (!capable(CAP_NET_ADMIN)) | 2416 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
2272 | return -EPERM; | 2417 | return -EPERM; |
2273 | 2418 | ||
2274 | if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) | 2419 | if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) |
@@ -2287,7 +2432,7 @@ int addrconf_del_ifaddr(struct net *net, void __user *arg) | |||
2287 | struct in6_ifreq ireq; | 2432 | struct in6_ifreq ireq; |
2288 | int err; | 2433 | int err; |
2289 | 2434 | ||
2290 | if (!capable(CAP_NET_ADMIN)) | 2435 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
2291 | return -EPERM; | 2436 | return -EPERM; |
2292 | 2437 | ||
2293 | if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) | 2438 | if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) |
@@ -2315,7 +2460,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, | |||
2315 | } | 2460 | } |
2316 | } | 2461 | } |
2317 | 2462 | ||
2318 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 2463 | #if IS_ENABLED(CONFIG_IPV6_SIT) |
2319 | static void sit_add_v4_addrs(struct inet6_dev *idev) | 2464 | static void sit_add_v4_addrs(struct inet6_dev *idev) |
2320 | { | 2465 | { |
2321 | struct in6_addr addr; | 2466 | struct in6_addr addr; |
@@ -2434,7 +2579,7 @@ static void addrconf_dev_config(struct net_device *dev) | |||
2434 | addrconf_add_linklocal(idev, &addr); | 2579 | addrconf_add_linklocal(idev, &addr); |
2435 | } | 2580 | } |
2436 | 2581 | ||
2437 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 2582 | #if IS_ENABLED(CONFIG_IPV6_SIT) |
2438 | static void addrconf_sit_config(struct net_device *dev) | 2583 | static void addrconf_sit_config(struct net_device *dev) |
2439 | { | 2584 | { |
2440 | struct inet6_dev *idev; | 2585 | struct inet6_dev *idev; |
@@ -2471,7 +2616,7 @@ static void addrconf_sit_config(struct net_device *dev) | |||
2471 | } | 2616 | } |
2472 | #endif | 2617 | #endif |
2473 | 2618 | ||
2474 | #if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE) | 2619 | #if IS_ENABLED(CONFIG_NET_IPGRE) |
2475 | static void addrconf_gre_config(struct net_device *dev) | 2620 | static void addrconf_gre_config(struct net_device *dev) |
2476 | { | 2621 | { |
2477 | struct inet6_dev *idev; | 2622 | struct inet6_dev *idev; |
@@ -2601,12 +2746,12 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2601 | } | 2746 | } |
2602 | 2747 | ||
2603 | switch (dev->type) { | 2748 | switch (dev->type) { |
2604 | #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) | 2749 | #if IS_ENABLED(CONFIG_IPV6_SIT) |
2605 | case ARPHRD_SIT: | 2750 | case ARPHRD_SIT: |
2606 | addrconf_sit_config(dev); | 2751 | addrconf_sit_config(dev); |
2607 | break; | 2752 | break; |
2608 | #endif | 2753 | #endif |
2609 | #if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE) | 2754 | #if IS_ENABLED(CONFIG_NET_IPGRE) |
2610 | case ARPHRD_IPGRE: | 2755 | case ARPHRD_IPGRE: |
2611 | addrconf_gre_config(dev); | 2756 | addrconf_gre_config(dev); |
2612 | break; | 2757 | break; |
@@ -3194,7 +3339,7 @@ void if6_proc_exit(void) | |||
3194 | } | 3339 | } |
3195 | #endif /* CONFIG_PROC_FS */ | 3340 | #endif /* CONFIG_PROC_FS */ |
3196 | 3341 | ||
3197 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 3342 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
3198 | /* Check if address is a home address configured on any interface. */ | 3343 | /* Check if address is a home address configured on any interface. */ |
3199 | int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) | 3344 | int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) |
3200 | { | 3345 | { |
@@ -3892,6 +4037,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, | |||
3892 | array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; | 4037 | array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; |
3893 | array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; | 4038 | array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; |
3894 | array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao; | 4039 | array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao; |
4040 | array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify; | ||
3895 | } | 4041 | } |
3896 | 4042 | ||
3897 | static inline size_t inet6_ifla6_size(void) | 4043 | static inline size_t inet6_ifla6_size(void) |
@@ -4560,6 +4706,13 @@ static struct addrconf_sysctl_table | |||
4560 | .proc_handler = proc_dointvec | 4706 | .proc_handler = proc_dointvec |
4561 | }, | 4707 | }, |
4562 | { | 4708 | { |
4709 | .procname = "ndisc_notify", | ||
4710 | .data = &ipv6_devconf.ndisc_notify, | ||
4711 | .maxlen = sizeof(int), | ||
4712 | .mode = 0644, | ||
4713 | .proc_handler = proc_dointvec | ||
4714 | }, | ||
4715 | { | ||
4563 | /* sentinel */ | 4716 | /* sentinel */ |
4564 | } | 4717 | } |
4565 | }, | 4718 | }, |
@@ -4784,6 +4937,8 @@ int __init addrconf_init(void) | |||
4784 | inet6_dump_ifmcaddr, NULL); | 4937 | inet6_dump_ifmcaddr, NULL); |
4785 | __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, | 4938 | __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, |
4786 | inet6_dump_ifacaddr, NULL); | 4939 | inet6_dump_ifacaddr, NULL); |
4940 | __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf, | ||
4941 | NULL, NULL); | ||
4787 | 4942 | ||
4788 | ipv6_addr_label_rtnl_register(); | 4943 | ipv6_addr_label_rtnl_register(); |
4789 | 4944 | ||
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index a974247a9ae4..b043c60429bd 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -160,7 +160,8 @@ lookup_protocol: | |||
160 | } | 160 | } |
161 | 161 | ||
162 | err = -EPERM; | 162 | err = -EPERM; |
163 | if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) | 163 | if (sock->type == SOCK_RAW && !kern && |
164 | !ns_capable(net->user_ns, CAP_NET_RAW)) | ||
164 | goto out_rcu_unlock; | 165 | goto out_rcu_unlock; |
165 | 166 | ||
166 | sock->ops = answer->ops; | 167 | sock->ops = answer->ops; |
@@ -282,7 +283,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
282 | return -EINVAL; | 283 | return -EINVAL; |
283 | 284 | ||
284 | snum = ntohs(addr->sin6_port); | 285 | snum = ntohs(addr->sin6_port); |
285 | if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) | 286 | if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) |
286 | return -EACCES; | 287 | return -EACCES; |
287 | 288 | ||
288 | lock_sock(sk); | 289 | lock_sock(sk); |
@@ -699,249 +700,9 @@ bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb) | |||
699 | } | 700 | } |
700 | EXPORT_SYMBOL_GPL(ipv6_opt_accepted); | 701 | EXPORT_SYMBOL_GPL(ipv6_opt_accepted); |
701 | 702 | ||
702 | static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) | ||
703 | { | ||
704 | const struct inet6_protocol *ops = NULL; | ||
705 | |||
706 | for (;;) { | ||
707 | struct ipv6_opt_hdr *opth; | ||
708 | int len; | ||
709 | |||
710 | if (proto != NEXTHDR_HOP) { | ||
711 | ops = rcu_dereference(inet6_protos[proto]); | ||
712 | |||
713 | if (unlikely(!ops)) | ||
714 | break; | ||
715 | |||
716 | if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) | ||
717 | break; | ||
718 | } | ||
719 | |||
720 | if (unlikely(!pskb_may_pull(skb, 8))) | ||
721 | break; | ||
722 | |||
723 | opth = (void *)skb->data; | ||
724 | len = ipv6_optlen(opth); | ||
725 | |||
726 | if (unlikely(!pskb_may_pull(skb, len))) | ||
727 | break; | ||
728 | |||
729 | proto = opth->nexthdr; | ||
730 | __skb_pull(skb, len); | ||
731 | } | ||
732 | |||
733 | return proto; | ||
734 | } | ||
735 | |||
736 | static int ipv6_gso_send_check(struct sk_buff *skb) | ||
737 | { | ||
738 | const struct ipv6hdr *ipv6h; | ||
739 | const struct inet6_protocol *ops; | ||
740 | int err = -EINVAL; | ||
741 | |||
742 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
743 | goto out; | ||
744 | |||
745 | ipv6h = ipv6_hdr(skb); | ||
746 | __skb_pull(skb, sizeof(*ipv6h)); | ||
747 | err = -EPROTONOSUPPORT; | ||
748 | |||
749 | rcu_read_lock(); | ||
750 | ops = rcu_dereference(inet6_protos[ | ||
751 | ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); | ||
752 | |||
753 | if (likely(ops && ops->gso_send_check)) { | ||
754 | skb_reset_transport_header(skb); | ||
755 | err = ops->gso_send_check(skb); | ||
756 | } | ||
757 | rcu_read_unlock(); | ||
758 | |||
759 | out: | ||
760 | return err; | ||
761 | } | ||
762 | |||
763 | static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | ||
764 | netdev_features_t features) | ||
765 | { | ||
766 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
767 | struct ipv6hdr *ipv6h; | ||
768 | const struct inet6_protocol *ops; | ||
769 | int proto; | ||
770 | struct frag_hdr *fptr; | ||
771 | unsigned int unfrag_ip6hlen; | ||
772 | u8 *prevhdr; | ||
773 | int offset = 0; | ||
774 | |||
775 | if (!(features & NETIF_F_V6_CSUM)) | ||
776 | features &= ~NETIF_F_SG; | ||
777 | |||
778 | if (unlikely(skb_shinfo(skb)->gso_type & | ||
779 | ~(SKB_GSO_UDP | | ||
780 | SKB_GSO_DODGY | | ||
781 | SKB_GSO_TCP_ECN | | ||
782 | SKB_GSO_TCPV6 | | ||
783 | 0))) | ||
784 | goto out; | ||
785 | |||
786 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
787 | goto out; | ||
788 | |||
789 | ipv6h = ipv6_hdr(skb); | ||
790 | __skb_pull(skb, sizeof(*ipv6h)); | ||
791 | segs = ERR_PTR(-EPROTONOSUPPORT); | ||
792 | |||
793 | proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
794 | rcu_read_lock(); | ||
795 | ops = rcu_dereference(inet6_protos[proto]); | ||
796 | if (likely(ops && ops->gso_segment)) { | ||
797 | skb_reset_transport_header(skb); | ||
798 | segs = ops->gso_segment(skb, features); | ||
799 | } | ||
800 | rcu_read_unlock(); | ||
801 | |||
802 | if (IS_ERR(segs)) | ||
803 | goto out; | ||
804 | |||
805 | for (skb = segs; skb; skb = skb->next) { | ||
806 | ipv6h = ipv6_hdr(skb); | ||
807 | ipv6h->payload_len = htons(skb->len - skb->mac_len - | ||
808 | sizeof(*ipv6h)); | ||
809 | if (proto == IPPROTO_UDP) { | ||
810 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); | ||
811 | fptr = (struct frag_hdr *)(skb_network_header(skb) + | ||
812 | unfrag_ip6hlen); | ||
813 | fptr->frag_off = htons(offset); | ||
814 | if (skb->next != NULL) | ||
815 | fptr->frag_off |= htons(IP6_MF); | ||
816 | offset += (ntohs(ipv6h->payload_len) - | ||
817 | sizeof(struct frag_hdr)); | ||
818 | } | ||
819 | } | ||
820 | |||
821 | out: | ||
822 | return segs; | ||
823 | } | ||
824 | |||
825 | static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | ||
826 | struct sk_buff *skb) | ||
827 | { | ||
828 | const struct inet6_protocol *ops; | ||
829 | struct sk_buff **pp = NULL; | ||
830 | struct sk_buff *p; | ||
831 | struct ipv6hdr *iph; | ||
832 | unsigned int nlen; | ||
833 | unsigned int hlen; | ||
834 | unsigned int off; | ||
835 | int flush = 1; | ||
836 | int proto; | ||
837 | __wsum csum; | ||
838 | |||
839 | off = skb_gro_offset(skb); | ||
840 | hlen = off + sizeof(*iph); | ||
841 | iph = skb_gro_header_fast(skb, off); | ||
842 | if (skb_gro_header_hard(skb, hlen)) { | ||
843 | iph = skb_gro_header_slow(skb, hlen, off); | ||
844 | if (unlikely(!iph)) | ||
845 | goto out; | ||
846 | } | ||
847 | |||
848 | skb_gro_pull(skb, sizeof(*iph)); | ||
849 | skb_set_transport_header(skb, skb_gro_offset(skb)); | ||
850 | |||
851 | flush += ntohs(iph->payload_len) != skb_gro_len(skb); | ||
852 | |||
853 | rcu_read_lock(); | ||
854 | proto = iph->nexthdr; | ||
855 | ops = rcu_dereference(inet6_protos[proto]); | ||
856 | if (!ops || !ops->gro_receive) { | ||
857 | __pskb_pull(skb, skb_gro_offset(skb)); | ||
858 | proto = ipv6_gso_pull_exthdrs(skb, proto); | ||
859 | skb_gro_pull(skb, -skb_transport_offset(skb)); | ||
860 | skb_reset_transport_header(skb); | ||
861 | __skb_push(skb, skb_gro_offset(skb)); | ||
862 | |||
863 | ops = rcu_dereference(inet6_protos[proto]); | ||
864 | if (!ops || !ops->gro_receive) | ||
865 | goto out_unlock; | ||
866 | |||
867 | iph = ipv6_hdr(skb); | ||
868 | } | ||
869 | |||
870 | NAPI_GRO_CB(skb)->proto = proto; | ||
871 | |||
872 | flush--; | ||
873 | nlen = skb_network_header_len(skb); | ||
874 | |||
875 | for (p = *head; p; p = p->next) { | ||
876 | const struct ipv6hdr *iph2; | ||
877 | __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ | ||
878 | |||
879 | if (!NAPI_GRO_CB(p)->same_flow) | ||
880 | continue; | ||
881 | |||
882 | iph2 = ipv6_hdr(p); | ||
883 | first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ; | ||
884 | |||
885 | /* All fields must match except length and Traffic Class. */ | ||
886 | if (nlen != skb_network_header_len(p) || | ||
887 | (first_word & htonl(0xF00FFFFF)) || | ||
888 | memcmp(&iph->nexthdr, &iph2->nexthdr, | ||
889 | nlen - offsetof(struct ipv6hdr, nexthdr))) { | ||
890 | NAPI_GRO_CB(p)->same_flow = 0; | ||
891 | continue; | ||
892 | } | ||
893 | /* flush if Traffic Class fields are different */ | ||
894 | NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); | ||
895 | NAPI_GRO_CB(p)->flush |= flush; | ||
896 | } | ||
897 | |||
898 | NAPI_GRO_CB(skb)->flush |= flush; | ||
899 | |||
900 | csum = skb->csum; | ||
901 | skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); | ||
902 | |||
903 | pp = ops->gro_receive(head, skb); | ||
904 | |||
905 | skb->csum = csum; | ||
906 | |||
907 | out_unlock: | ||
908 | rcu_read_unlock(); | ||
909 | |||
910 | out: | ||
911 | NAPI_GRO_CB(skb)->flush |= flush; | ||
912 | |||
913 | return pp; | ||
914 | } | ||
915 | |||
916 | static int ipv6_gro_complete(struct sk_buff *skb) | ||
917 | { | ||
918 | const struct inet6_protocol *ops; | ||
919 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
920 | int err = -ENOSYS; | ||
921 | |||
922 | iph->payload_len = htons(skb->len - skb_network_offset(skb) - | ||
923 | sizeof(*iph)); | ||
924 | |||
925 | rcu_read_lock(); | ||
926 | ops = rcu_dereference(inet6_protos[NAPI_GRO_CB(skb)->proto]); | ||
927 | if (WARN_ON(!ops || !ops->gro_complete)) | ||
928 | goto out_unlock; | ||
929 | |||
930 | err = ops->gro_complete(skb); | ||
931 | |||
932 | out_unlock: | ||
933 | rcu_read_unlock(); | ||
934 | |||
935 | return err; | ||
936 | } | ||
937 | |||
938 | static struct packet_type ipv6_packet_type __read_mostly = { | 703 | static struct packet_type ipv6_packet_type __read_mostly = { |
939 | .type = cpu_to_be16(ETH_P_IPV6), | 704 | .type = cpu_to_be16(ETH_P_IPV6), |
940 | .func = ipv6_rcv, | 705 | .func = ipv6_rcv, |
941 | .gso_send_check = ipv6_gso_send_check, | ||
942 | .gso_segment = ipv6_gso_segment, | ||
943 | .gro_receive = ipv6_gro_receive, | ||
944 | .gro_complete = ipv6_gro_complete, | ||
945 | }; | 706 | }; |
946 | 707 | ||
947 | static int __init ipv6_packet_init(void) | 708 | static int __init ipv6_packet_init(void) |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 7e6139508ee7..ecc35b93314b 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #define IPV6HDR_BASELEN 8 | 44 | #define IPV6HDR_BASELEN 8 |
45 | 45 | ||
46 | struct tmp_ext { | 46 | struct tmp_ext { |
47 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 47 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
48 | struct in6_addr saddr; | 48 | struct in6_addr saddr; |
49 | #endif | 49 | #endif |
50 | struct in6_addr daddr; | 50 | struct in6_addr daddr; |
@@ -152,7 +152,7 @@ bad: | |||
152 | return false; | 152 | return false; |
153 | } | 153 | } |
154 | 154 | ||
155 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 155 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
156 | /** | 156 | /** |
157 | * ipv6_rearrange_destopt - rearrange IPv6 destination options header | 157 | * ipv6_rearrange_destopt - rearrange IPv6 destination options header |
158 | * @iph: IPv6 header | 158 | * @iph: IPv6 header |
@@ -320,7 +320,7 @@ static void ah6_output_done(struct crypto_async_request *base, int err) | |||
320 | memcpy(top_iph, iph_base, IPV6HDR_BASELEN); | 320 | memcpy(top_iph, iph_base, IPV6HDR_BASELEN); |
321 | 321 | ||
322 | if (extlen) { | 322 | if (extlen) { |
323 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 323 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
324 | memcpy(&top_iph->saddr, iph_ext, extlen); | 324 | memcpy(&top_iph->saddr, iph_ext, extlen); |
325 | #else | 325 | #else |
326 | memcpy(&top_iph->daddr, iph_ext, extlen); | 326 | memcpy(&top_iph->daddr, iph_ext, extlen); |
@@ -385,7 +385,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
385 | memcpy(iph_base, top_iph, IPV6HDR_BASELEN); | 385 | memcpy(iph_base, top_iph, IPV6HDR_BASELEN); |
386 | 386 | ||
387 | if (extlen) { | 387 | if (extlen) { |
388 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 388 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
389 | memcpy(iph_ext, &top_iph->saddr, extlen); | 389 | memcpy(iph_ext, &top_iph->saddr, extlen); |
390 | #else | 390 | #else |
391 | memcpy(iph_ext, &top_iph->daddr, extlen); | 391 | memcpy(iph_ext, &top_iph->daddr, extlen); |
@@ -434,7 +434,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
434 | memcpy(top_iph, iph_base, IPV6HDR_BASELEN); | 434 | memcpy(top_iph, iph_base, IPV6HDR_BASELEN); |
435 | 435 | ||
436 | if (extlen) { | 436 | if (extlen) { |
437 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 437 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
438 | memcpy(&top_iph->saddr, iph_ext, extlen); | 438 | memcpy(&top_iph->saddr, iph_ext, extlen); |
439 | #else | 439 | #else |
440 | memcpy(&top_iph->daddr, iph_ext, extlen); | 440 | memcpy(&top_iph->daddr, iph_ext, extlen); |
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index cdf02be5f191..2f4f584d796d 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
@@ -64,7 +64,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
64 | int ishost = !net->ipv6.devconf_all->forwarding; | 64 | int ishost = !net->ipv6.devconf_all->forwarding; |
65 | int err = 0; | 65 | int err = 0; |
66 | 66 | ||
67 | if (!capable(CAP_NET_ADMIN)) | 67 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
68 | return -EPERM; | 68 | return -EPERM; |
69 | if (ipv6_addr_is_multicast(addr)) | 69 | if (ipv6_addr_is_multicast(addr)) |
70 | return -EINVAL; | 70 | return -EINVAL; |
@@ -84,7 +84,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
84 | rt = rt6_lookup(net, addr, NULL, 0, 0); | 84 | rt = rt6_lookup(net, addr, NULL, 0, 0); |
85 | if (rt) { | 85 | if (rt) { |
86 | dev = rt->dst.dev; | 86 | dev = rt->dst.dev; |
87 | dst_release(&rt->dst); | 87 | ip6_rt_put(rt); |
88 | } else if (ishost) { | 88 | } else if (ishost) { |
89 | err = -EADDRNOTAVAIL; | 89 | err = -EADDRNOTAVAIL; |
90 | goto error; | 90 | goto error; |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index be2b67d631e5..8edf2601065a 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -701,7 +701,7 @@ int datagram_send_ctl(struct net *net, struct sock *sk, | |||
701 | err = -EINVAL; | 701 | err = -EINVAL; |
702 | goto exit_f; | 702 | goto exit_f; |
703 | } | 703 | } |
704 | if (!capable(CAP_NET_RAW)) { | 704 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) { |
705 | err = -EPERM; | 705 | err = -EPERM; |
706 | goto exit_f; | 706 | goto exit_f; |
707 | } | 707 | } |
@@ -721,7 +721,7 @@ int datagram_send_ctl(struct net *net, struct sock *sk, | |||
721 | err = -EINVAL; | 721 | err = -EINVAL; |
722 | goto exit_f; | 722 | goto exit_f; |
723 | } | 723 | } |
724 | if (!capable(CAP_NET_RAW)) { | 724 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) { |
725 | err = -EPERM; | 725 | err = -EPERM; |
726 | goto exit_f; | 726 | goto exit_f; |
727 | } | 727 | } |
@@ -746,7 +746,7 @@ int datagram_send_ctl(struct net *net, struct sock *sk, | |||
746 | err = -EINVAL; | 746 | err = -EINVAL; |
747 | goto exit_f; | 747 | goto exit_f; |
748 | } | 748 | } |
749 | if (!capable(CAP_NET_RAW)) { | 749 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) { |
750 | err = -EPERM; | 750 | err = -EPERM; |
751 | goto exit_f; | 751 | goto exit_f; |
752 | } | 752 | } |
@@ -769,7 +769,7 @@ int datagram_send_ctl(struct net *net, struct sock *sk, | |||
769 | rthdr = (struct ipv6_rt_hdr *)CMSG_DATA(cmsg); | 769 | rthdr = (struct ipv6_rt_hdr *)CMSG_DATA(cmsg); |
770 | 770 | ||
771 | switch (rthdr->type) { | 771 | switch (rthdr->type) { |
772 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 772 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
773 | case IPV6_SRCRT_TYPE_2: | 773 | case IPV6_SRCRT_TYPE_2: |
774 | if (rthdr->hdrlen != 2 || | 774 | if (rthdr->hdrlen != 2 || |
775 | rthdr->segments_left != 1) { | 775 | rthdr->segments_left != 1) { |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index fa3d9c328092..473f628f9f20 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -43,56 +43,12 @@ | |||
43 | #include <net/ndisc.h> | 43 | #include <net/ndisc.h> |
44 | #include <net/ip6_route.h> | 44 | #include <net/ip6_route.h> |
45 | #include <net/addrconf.h> | 45 | #include <net/addrconf.h> |
46 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 46 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
47 | #include <net/xfrm.h> | 47 | #include <net/xfrm.h> |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #include <asm/uaccess.h> | 50 | #include <asm/uaccess.h> |
51 | 51 | ||
52 | int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) | ||
53 | { | ||
54 | const unsigned char *nh = skb_network_header(skb); | ||
55 | int packet_len = skb->tail - skb->network_header; | ||
56 | struct ipv6_opt_hdr *hdr; | ||
57 | int len; | ||
58 | |||
59 | if (offset + 2 > packet_len) | ||
60 | goto bad; | ||
61 | hdr = (struct ipv6_opt_hdr *)(nh + offset); | ||
62 | len = ((hdr->hdrlen + 1) << 3); | ||
63 | |||
64 | if (offset + len > packet_len) | ||
65 | goto bad; | ||
66 | |||
67 | offset += 2; | ||
68 | len -= 2; | ||
69 | |||
70 | while (len > 0) { | ||
71 | int opttype = nh[offset]; | ||
72 | int optlen; | ||
73 | |||
74 | if (opttype == type) | ||
75 | return offset; | ||
76 | |||
77 | switch (opttype) { | ||
78 | case IPV6_TLV_PAD1: | ||
79 | optlen = 1; | ||
80 | break; | ||
81 | default: | ||
82 | optlen = nh[offset + 1] + 2; | ||
83 | if (optlen > len) | ||
84 | goto bad; | ||
85 | break; | ||
86 | } | ||
87 | offset += optlen; | ||
88 | len -= optlen; | ||
89 | } | ||
90 | /* not_found */ | ||
91 | bad: | ||
92 | return -1; | ||
93 | } | ||
94 | EXPORT_SYMBOL_GPL(ipv6_find_tlv); | ||
95 | |||
96 | /* | 52 | /* |
97 | * Parsing tlv encoded headers. | 53 | * Parsing tlv encoded headers. |
98 | * | 54 | * |
@@ -224,7 +180,7 @@ bad: | |||
224 | Destination options header. | 180 | Destination options header. |
225 | *****************************/ | 181 | *****************************/ |
226 | 182 | ||
227 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 183 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
228 | static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) | 184 | static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) |
229 | { | 185 | { |
230 | struct ipv6_destopt_hao *hao; | 186 | struct ipv6_destopt_hao *hao; |
@@ -288,7 +244,7 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) | |||
288 | #endif | 244 | #endif |
289 | 245 | ||
290 | static const struct tlvtype_proc tlvprocdestopt_lst[] = { | 246 | static const struct tlvtype_proc tlvprocdestopt_lst[] = { |
291 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 247 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
292 | { | 248 | { |
293 | .type = IPV6_TLV_HAO, | 249 | .type = IPV6_TLV_HAO, |
294 | .func = ipv6_dest_hao, | 250 | .func = ipv6_dest_hao, |
@@ -300,7 +256,7 @@ static const struct tlvtype_proc tlvprocdestopt_lst[] = { | |||
300 | static int ipv6_destopt_rcv(struct sk_buff *skb) | 256 | static int ipv6_destopt_rcv(struct sk_buff *skb) |
301 | { | 257 | { |
302 | struct inet6_skb_parm *opt = IP6CB(skb); | 258 | struct inet6_skb_parm *opt = IP6CB(skb); |
303 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 259 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
304 | __u16 dstbuf; | 260 | __u16 dstbuf; |
305 | #endif | 261 | #endif |
306 | struct dst_entry *dst = skb_dst(skb); | 262 | struct dst_entry *dst = skb_dst(skb); |
@@ -315,14 +271,14 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) | |||
315 | } | 271 | } |
316 | 272 | ||
317 | opt->lastopt = opt->dst1 = skb_network_header_len(skb); | 273 | opt->lastopt = opt->dst1 = skb_network_header_len(skb); |
318 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 274 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
319 | dstbuf = opt->dst1; | 275 | dstbuf = opt->dst1; |
320 | #endif | 276 | #endif |
321 | 277 | ||
322 | if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { | 278 | if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { |
323 | skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; | 279 | skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; |
324 | opt = IP6CB(skb); | 280 | opt = IP6CB(skb); |
325 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 281 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
326 | opt->nhoff = dstbuf; | 282 | opt->nhoff = dstbuf; |
327 | #else | 283 | #else |
328 | opt->nhoff = opt->dst1; | 284 | opt->nhoff = opt->dst1; |
@@ -378,7 +334,7 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) | |||
378 | looped_back: | 334 | looped_back: |
379 | if (hdr->segments_left == 0) { | 335 | if (hdr->segments_left == 0) { |
380 | switch (hdr->type) { | 336 | switch (hdr->type) { |
381 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 337 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
382 | case IPV6_SRCRT_TYPE_2: | 338 | case IPV6_SRCRT_TYPE_2: |
383 | /* Silently discard type 2 header unless it was | 339 | /* Silently discard type 2 header unless it was |
384 | * processed by own | 340 | * processed by own |
@@ -404,7 +360,7 @@ looped_back: | |||
404 | } | 360 | } |
405 | 361 | ||
406 | switch (hdr->type) { | 362 | switch (hdr->type) { |
407 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 363 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
408 | case IPV6_SRCRT_TYPE_2: | 364 | case IPV6_SRCRT_TYPE_2: |
409 | if (accept_source_route < 0) | 365 | if (accept_source_route < 0) |
410 | goto unknown_rh; | 366 | goto unknown_rh; |
@@ -461,7 +417,7 @@ looped_back: | |||
461 | addr += i - 1; | 417 | addr += i - 1; |
462 | 418 | ||
463 | switch (hdr->type) { | 419 | switch (hdr->type) { |
464 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 420 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
465 | case IPV6_SRCRT_TYPE_2: | 421 | case IPV6_SRCRT_TYPE_2: |
466 | if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, | 422 | if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, |
467 | (xfrm_address_t *)&ipv6_hdr(skb)->saddr, | 423 | (xfrm_address_t *)&ipv6_hdr(skb)->saddr, |
@@ -528,12 +484,12 @@ unknown_rh: | |||
528 | 484 | ||
529 | static const struct inet6_protocol rthdr_protocol = { | 485 | static const struct inet6_protocol rthdr_protocol = { |
530 | .handler = ipv6_rthdr_rcv, | 486 | .handler = ipv6_rthdr_rcv, |
531 | .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR, | 487 | .flags = INET6_PROTO_NOPOLICY, |
532 | }; | 488 | }; |
533 | 489 | ||
534 | static const struct inet6_protocol destopt_protocol = { | 490 | static const struct inet6_protocol destopt_protocol = { |
535 | .handler = ipv6_destopt_rcv, | 491 | .handler = ipv6_destopt_rcv, |
536 | .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR, | 492 | .flags = INET6_PROTO_NOPOLICY, |
537 | }; | 493 | }; |
538 | 494 | ||
539 | static const struct inet6_protocol nodata_protocol = { | 495 | static const struct inet6_protocol nodata_protocol = { |
@@ -559,10 +515,10 @@ int __init ipv6_exthdrs_init(void) | |||
559 | 515 | ||
560 | out: | 516 | out: |
561 | return ret; | 517 | return ret; |
562 | out_rthdr: | ||
563 | inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); | ||
564 | out_destopt: | 518 | out_destopt: |
565 | inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); | 519 | inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); |
520 | out_rthdr: | ||
521 | inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); | ||
566 | goto out; | 522 | goto out; |
567 | }; | 523 | }; |
568 | 524 | ||
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index f73d59a14131..e7d756e19d1d 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c | |||
@@ -111,3 +111,47 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp, | |||
111 | return start; | 111 | return start; |
112 | } | 112 | } |
113 | EXPORT_SYMBOL(ipv6_skip_exthdr); | 113 | EXPORT_SYMBOL(ipv6_skip_exthdr); |
114 | |||
115 | int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) | ||
116 | { | ||
117 | const unsigned char *nh = skb_network_header(skb); | ||
118 | int packet_len = skb->tail - skb->network_header; | ||
119 | struct ipv6_opt_hdr *hdr; | ||
120 | int len; | ||
121 | |||
122 | if (offset + 2 > packet_len) | ||
123 | goto bad; | ||
124 | hdr = (struct ipv6_opt_hdr *)(nh + offset); | ||
125 | len = ((hdr->hdrlen + 1) << 3); | ||
126 | |||
127 | if (offset + len > packet_len) | ||
128 | goto bad; | ||
129 | |||
130 | offset += 2; | ||
131 | len -= 2; | ||
132 | |||
133 | while (len > 0) { | ||
134 | int opttype = nh[offset]; | ||
135 | int optlen; | ||
136 | |||
137 | if (opttype == type) | ||
138 | return offset; | ||
139 | |||
140 | switch (opttype) { | ||
141 | case IPV6_TLV_PAD1: | ||
142 | optlen = 1; | ||
143 | break; | ||
144 | default: | ||
145 | optlen = nh[offset + 1] + 2; | ||
146 | if (optlen > len) | ||
147 | goto bad; | ||
148 | break; | ||
149 | } | ||
150 | offset += optlen; | ||
151 | len -= optlen; | ||
152 | } | ||
153 | /* not_found */ | ||
154 | bad: | ||
155 | return -1; | ||
156 | } | ||
157 | EXPORT_SYMBOL_GPL(ipv6_find_tlv); | ||
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c new file mode 100644 index 000000000000..cf77f3abfd06 --- /dev/null +++ b/net/ipv6/exthdrs_offload.c | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * IPV6 GSO/GRO offload support | ||
3 | * Linux INET6 implementation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * IPV6 Extension Header GSO/GRO support | ||
11 | */ | ||
12 | #include <net/protocol.h> | ||
13 | #include "ip6_offload.h" | ||
14 | |||
15 | static const struct net_offload rthdr_offload = { | ||
16 | .flags = INET6_PROTO_GSO_EXTHDR, | ||
17 | }; | ||
18 | |||
19 | static const struct net_offload dstopt_offload = { | ||
20 | .flags = INET6_PROTO_GSO_EXTHDR, | ||
21 | }; | ||
22 | |||
23 | int __init ipv6_exthdrs_offload_init(void) | ||
24 | { | ||
25 | int ret; | ||
26 | |||
27 | ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING); | ||
28 | if (!ret) | ||
29 | goto out; | ||
30 | |||
31 | ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS); | ||
32 | if (!ret) | ||
33 | goto out_rt; | ||
34 | |||
35 | out: | ||
36 | return ret; | ||
37 | |||
38 | out_rt: | ||
39 | inet_del_offload(&rthdr_offload, IPPROTO_ROUTING); | ||
40 | goto out; | ||
41 | } | ||
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index d9fb9110f607..2e1a432867c0 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -100,7 +100,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
100 | goto out; | 100 | goto out; |
101 | } | 101 | } |
102 | again: | 102 | again: |
103 | dst_release(&rt->dst); | 103 | ip6_rt_put(rt); |
104 | rt = NULL; | 104 | rt = NULL; |
105 | goto out; | 105 | goto out; |
106 | 106 | ||
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 24d69dbca4d6..b4a9fd51dae7 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -280,7 +280,7 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st | |||
280 | return 0; | 280 | return 0; |
281 | } | 281 | } |
282 | 282 | ||
283 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 283 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
284 | static void mip6_addr_swap(struct sk_buff *skb) | 284 | static void mip6_addr_swap(struct sk_buff *skb) |
285 | { | 285 | { |
286 | struct ipv6hdr *iph = ipv6_hdr(skb); | 286 | struct ipv6hdr *iph = ipv6_hdr(skb); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 24995a93ef8c..710cafd2e1a9 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -672,6 +672,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
672 | iter->rt6i_idev == rt->rt6i_idev && | 672 | iter->rt6i_idev == rt->rt6i_idev && |
673 | ipv6_addr_equal(&iter->rt6i_gateway, | 673 | ipv6_addr_equal(&iter->rt6i_gateway, |
674 | &rt->rt6i_gateway)) { | 674 | &rt->rt6i_gateway)) { |
675 | if (rt->rt6i_nsiblings) | ||
676 | rt->rt6i_nsiblings = 0; | ||
675 | if (!(iter->rt6i_flags & RTF_EXPIRES)) | 677 | if (!(iter->rt6i_flags & RTF_EXPIRES)) |
676 | return -EEXIST; | 678 | return -EEXIST; |
677 | if (!(rt->rt6i_flags & RTF_EXPIRES)) | 679 | if (!(rt->rt6i_flags & RTF_EXPIRES)) |
@@ -680,6 +682,21 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
680 | rt6_set_expires(iter, rt->dst.expires); | 682 | rt6_set_expires(iter, rt->dst.expires); |
681 | return -EEXIST; | 683 | return -EEXIST; |
682 | } | 684 | } |
685 | /* If we have the same destination and the same metric, | ||
686 | * but not the same gateway, then the route we try to | ||
687 | * add is sibling to this route, increment our counter | ||
688 | * of siblings, and later we will add our route to the | ||
689 | * list. | ||
690 | * Only static routes (which don't have flag | ||
691 | * RTF_EXPIRES) are used for ECMPv6. | ||
692 | * | ||
693 | * To avoid long list, we only had siblings if the | ||
694 | * route have a gateway. | ||
695 | */ | ||
696 | if (rt->rt6i_flags & RTF_GATEWAY && | ||
697 | !(rt->rt6i_flags & RTF_EXPIRES) && | ||
698 | !(iter->rt6i_flags & RTF_EXPIRES)) | ||
699 | rt->rt6i_nsiblings++; | ||
683 | } | 700 | } |
684 | 701 | ||
685 | if (iter->rt6i_metric > rt->rt6i_metric) | 702 | if (iter->rt6i_metric > rt->rt6i_metric) |
@@ -692,6 +709,35 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
692 | if (ins == &fn->leaf) | 709 | if (ins == &fn->leaf) |
693 | fn->rr_ptr = NULL; | 710 | fn->rr_ptr = NULL; |
694 | 711 | ||
712 | /* Link this route to others same route. */ | ||
713 | if (rt->rt6i_nsiblings) { | ||
714 | unsigned int rt6i_nsiblings; | ||
715 | struct rt6_info *sibling, *temp_sibling; | ||
716 | |||
717 | /* Find the first route that have the same metric */ | ||
718 | sibling = fn->leaf; | ||
719 | while (sibling) { | ||
720 | if (sibling->rt6i_metric == rt->rt6i_metric) { | ||
721 | list_add_tail(&rt->rt6i_siblings, | ||
722 | &sibling->rt6i_siblings); | ||
723 | break; | ||
724 | } | ||
725 | sibling = sibling->dst.rt6_next; | ||
726 | } | ||
727 | /* For each sibling in the list, increment the counter of | ||
728 | * siblings. BUG() if counters does not match, list of siblings | ||
729 | * is broken! | ||
730 | */ | ||
731 | rt6i_nsiblings = 0; | ||
732 | list_for_each_entry_safe(sibling, temp_sibling, | ||
733 | &rt->rt6i_siblings, rt6i_siblings) { | ||
734 | sibling->rt6i_nsiblings++; | ||
735 | BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings); | ||
736 | rt6i_nsiblings++; | ||
737 | } | ||
738 | BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings); | ||
739 | } | ||
740 | |||
695 | /* | 741 | /* |
696 | * insert node | 742 | * insert node |
697 | */ | 743 | */ |
@@ -1193,6 +1239,17 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1193 | if (fn->rr_ptr == rt) | 1239 | if (fn->rr_ptr == rt) |
1194 | fn->rr_ptr = NULL; | 1240 | fn->rr_ptr = NULL; |
1195 | 1241 | ||
1242 | /* Remove this entry from other siblings */ | ||
1243 | if (rt->rt6i_nsiblings) { | ||
1244 | struct rt6_info *sibling, *next_sibling; | ||
1245 | |||
1246 | list_for_each_entry_safe(sibling, next_sibling, | ||
1247 | &rt->rt6i_siblings, rt6i_siblings) | ||
1248 | sibling->rt6i_nsiblings--; | ||
1249 | rt->rt6i_nsiblings = 0; | ||
1250 | list_del_init(&rt->rt6i_siblings); | ||
1251 | } | ||
1252 | |||
1196 | /* Adjust walkers */ | 1253 | /* Adjust walkers */ |
1197 | read_lock(&fib6_walker_lock); | 1254 | read_lock(&fib6_walker_lock); |
1198 | FOR_WALKERS(w) { | 1255 | FOR_WALKERS(w) { |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 90bbefb57943..29124b7a04c8 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
@@ -519,7 +519,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) | |||
519 | } | 519 | } |
520 | read_unlock_bh(&ip6_sk_fl_lock); | 520 | read_unlock_bh(&ip6_sk_fl_lock); |
521 | 521 | ||
522 | if (freq.flr_share == IPV6_FL_S_NONE && capable(CAP_NET_ADMIN)) { | 522 | if (freq.flr_share == IPV6_FL_S_NONE && |
523 | ns_capable(net->user_ns, CAP_NET_ADMIN)) { | ||
523 | fl = fl_lookup(net, freq.flr_label); | 524 | fl = fl_lookup(net, freq.flr_label); |
524 | if (fl) { | 525 | if (fl) { |
525 | err = fl6_renew(fl, freq.flr_linger, freq.flr_expires); | 526 | err = fl6_renew(fl, freq.flr_linger, freq.flr_expires); |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index d5cb3c4e66f8..867466c96aac 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -109,21 +109,6 @@ static u32 HASH_ADDR(const struct in6_addr *addr) | |||
109 | #define tunnels_r tunnels[2] | 109 | #define tunnels_r tunnels[2] |
110 | #define tunnels_l tunnels[1] | 110 | #define tunnels_l tunnels[1] |
111 | #define tunnels_wc tunnels[0] | 111 | #define tunnels_wc tunnels[0] |
112 | /* | ||
113 | * Locking : hash tables are protected by RCU and RTNL | ||
114 | */ | ||
115 | |||
116 | #define for_each_ip_tunnel_rcu(start) \ | ||
117 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
118 | |||
119 | /* often modified stats are per cpu, other are shared (netdev->stats) */ | ||
120 | struct pcpu_tstats { | ||
121 | u64 rx_packets; | ||
122 | u64 rx_bytes; | ||
123 | u64 tx_packets; | ||
124 | u64 tx_bytes; | ||
125 | struct u64_stats_sync syncp; | ||
126 | }; | ||
127 | 112 | ||
128 | static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev, | 113 | static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev, |
129 | struct rtnl_link_stats64 *tot) | 114 | struct rtnl_link_stats64 *tot) |
@@ -181,7 +166,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, | |||
181 | ARPHRD_ETHER : ARPHRD_IP6GRE; | 166 | ARPHRD_ETHER : ARPHRD_IP6GRE; |
182 | int score, cand_score = 4; | 167 | int score, cand_score = 4; |
183 | 168 | ||
184 | for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) { | 169 | for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { |
185 | if (!ipv6_addr_equal(local, &t->parms.laddr) || | 170 | if (!ipv6_addr_equal(local, &t->parms.laddr) || |
186 | !ipv6_addr_equal(remote, &t->parms.raddr) || | 171 | !ipv6_addr_equal(remote, &t->parms.raddr) || |
187 | key != t->parms.i_key || | 172 | key != t->parms.i_key || |
@@ -206,7 +191,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, | |||
206 | } | 191 | } |
207 | } | 192 | } |
208 | 193 | ||
209 | for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) { | 194 | for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) { |
210 | if (!ipv6_addr_equal(remote, &t->parms.raddr) || | 195 | if (!ipv6_addr_equal(remote, &t->parms.raddr) || |
211 | key != t->parms.i_key || | 196 | key != t->parms.i_key || |
212 | !(t->dev->flags & IFF_UP)) | 197 | !(t->dev->flags & IFF_UP)) |
@@ -230,7 +215,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, | |||
230 | } | 215 | } |
231 | } | 216 | } |
232 | 217 | ||
233 | for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) { | 218 | for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) { |
234 | if ((!ipv6_addr_equal(local, &t->parms.laddr) && | 219 | if ((!ipv6_addr_equal(local, &t->parms.laddr) && |
235 | (!ipv6_addr_equal(local, &t->parms.raddr) || | 220 | (!ipv6_addr_equal(local, &t->parms.raddr) || |
236 | !ipv6_addr_is_multicast(local))) || | 221 | !ipv6_addr_is_multicast(local))) || |
@@ -256,7 +241,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, | |||
256 | } | 241 | } |
257 | } | 242 | } |
258 | 243 | ||
259 | for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) { | 244 | for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) { |
260 | if (t->parms.i_key != key || | 245 | if (t->parms.i_key != key || |
261 | !(t->dev->flags & IFF_UP)) | 246 | !(t->dev->flags & IFF_UP)) |
262 | continue; | 247 | continue; |
@@ -1069,7 +1054,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | |||
1069 | dev->mtu = IPV6_MIN_MTU; | 1054 | dev->mtu = IPV6_MIN_MTU; |
1070 | } | 1055 | } |
1071 | } | 1056 | } |
1072 | dst_release(&rt->dst); | 1057 | ip6_rt_put(rt); |
1073 | } | 1058 | } |
1074 | 1059 | ||
1075 | t->hlen = addend; | 1060 | t->hlen = addend; |
@@ -1161,7 +1146,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev, | |||
1161 | case SIOCADDTUNNEL: | 1146 | case SIOCADDTUNNEL: |
1162 | case SIOCCHGTUNNEL: | 1147 | case SIOCCHGTUNNEL: |
1163 | err = -EPERM; | 1148 | err = -EPERM; |
1164 | if (!capable(CAP_NET_ADMIN)) | 1149 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1165 | goto done; | 1150 | goto done; |
1166 | 1151 | ||
1167 | err = -EFAULT; | 1152 | err = -EFAULT; |
@@ -1209,7 +1194,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev, | |||
1209 | 1194 | ||
1210 | case SIOCDELTUNNEL: | 1195 | case SIOCDELTUNNEL: |
1211 | err = -EPERM; | 1196 | err = -EPERM; |
1212 | if (!capable(CAP_NET_ADMIN)) | 1197 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1213 | goto done; | 1198 | goto done; |
1214 | 1199 | ||
1215 | if (dev == ign->fb_tunnel_dev) { | 1200 | if (dev == ign->fb_tunnel_dev) { |
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c new file mode 100644 index 000000000000..f26f0da7f095 --- /dev/null +++ b/net/ipv6/ip6_offload.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * IPV6 GSO/GRO offload support | ||
3 | * Linux INET6 implementation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/socket.h> | ||
13 | #include <linux/netdevice.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/printk.h> | ||
16 | |||
17 | #include <net/protocol.h> | ||
18 | #include <net/ipv6.h> | ||
19 | |||
20 | #include "ip6_offload.h" | ||
21 | |||
22 | static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) | ||
23 | { | ||
24 | const struct net_offload *ops = NULL; | ||
25 | |||
26 | for (;;) { | ||
27 | struct ipv6_opt_hdr *opth; | ||
28 | int len; | ||
29 | |||
30 | if (proto != NEXTHDR_HOP) { | ||
31 | ops = rcu_dereference(inet6_offloads[proto]); | ||
32 | |||
33 | if (unlikely(!ops)) | ||
34 | break; | ||
35 | |||
36 | if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) | ||
37 | break; | ||
38 | } | ||
39 | |||
40 | if (unlikely(!pskb_may_pull(skb, 8))) | ||
41 | break; | ||
42 | |||
43 | opth = (void *)skb->data; | ||
44 | len = ipv6_optlen(opth); | ||
45 | |||
46 | if (unlikely(!pskb_may_pull(skb, len))) | ||
47 | break; | ||
48 | |||
49 | proto = opth->nexthdr; | ||
50 | __skb_pull(skb, len); | ||
51 | } | ||
52 | |||
53 | return proto; | ||
54 | } | ||
55 | |||
56 | static int ipv6_gso_send_check(struct sk_buff *skb) | ||
57 | { | ||
58 | const struct ipv6hdr *ipv6h; | ||
59 | const struct net_offload *ops; | ||
60 | int err = -EINVAL; | ||
61 | |||
62 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
63 | goto out; | ||
64 | |||
65 | ipv6h = ipv6_hdr(skb); | ||
66 | __skb_pull(skb, sizeof(*ipv6h)); | ||
67 | err = -EPROTONOSUPPORT; | ||
68 | |||
69 | rcu_read_lock(); | ||
70 | ops = rcu_dereference(inet6_offloads[ | ||
71 | ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); | ||
72 | |||
73 | if (likely(ops && ops->callbacks.gso_send_check)) { | ||
74 | skb_reset_transport_header(skb); | ||
75 | err = ops->callbacks.gso_send_check(skb); | ||
76 | } | ||
77 | rcu_read_unlock(); | ||
78 | |||
79 | out: | ||
80 | return err; | ||
81 | } | ||
82 | |||
83 | static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | ||
84 | netdev_features_t features) | ||
85 | { | ||
86 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
87 | struct ipv6hdr *ipv6h; | ||
88 | const struct net_offload *ops; | ||
89 | int proto; | ||
90 | struct frag_hdr *fptr; | ||
91 | unsigned int unfrag_ip6hlen; | ||
92 | u8 *prevhdr; | ||
93 | int offset = 0; | ||
94 | |||
95 | if (!(features & NETIF_F_V6_CSUM)) | ||
96 | features &= ~NETIF_F_SG; | ||
97 | |||
98 | if (unlikely(skb_shinfo(skb)->gso_type & | ||
99 | ~(SKB_GSO_UDP | | ||
100 | SKB_GSO_DODGY | | ||
101 | SKB_GSO_TCP_ECN | | ||
102 | SKB_GSO_TCPV6 | | ||
103 | 0))) | ||
104 | goto out; | ||
105 | |||
106 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
107 | goto out; | ||
108 | |||
109 | ipv6h = ipv6_hdr(skb); | ||
110 | __skb_pull(skb, sizeof(*ipv6h)); | ||
111 | segs = ERR_PTR(-EPROTONOSUPPORT); | ||
112 | |||
113 | proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
114 | rcu_read_lock(); | ||
115 | ops = rcu_dereference(inet6_offloads[proto]); | ||
116 | if (likely(ops && ops->callbacks.gso_segment)) { | ||
117 | skb_reset_transport_header(skb); | ||
118 | segs = ops->callbacks.gso_segment(skb, features); | ||
119 | } | ||
120 | rcu_read_unlock(); | ||
121 | |||
122 | if (IS_ERR(segs)) | ||
123 | goto out; | ||
124 | |||
125 | for (skb = segs; skb; skb = skb->next) { | ||
126 | ipv6h = ipv6_hdr(skb); | ||
127 | ipv6h->payload_len = htons(skb->len - skb->mac_len - | ||
128 | sizeof(*ipv6h)); | ||
129 | if (proto == IPPROTO_UDP) { | ||
130 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); | ||
131 | fptr = (struct frag_hdr *)(skb_network_header(skb) + | ||
132 | unfrag_ip6hlen); | ||
133 | fptr->frag_off = htons(offset); | ||
134 | if (skb->next != NULL) | ||
135 | fptr->frag_off |= htons(IP6_MF); | ||
136 | offset += (ntohs(ipv6h->payload_len) - | ||
137 | sizeof(struct frag_hdr)); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | out: | ||
142 | return segs; | ||
143 | } | ||
144 | |||
145 | static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | ||
146 | struct sk_buff *skb) | ||
147 | { | ||
148 | const struct net_offload *ops; | ||
149 | struct sk_buff **pp = NULL; | ||
150 | struct sk_buff *p; | ||
151 | struct ipv6hdr *iph; | ||
152 | unsigned int nlen; | ||
153 | unsigned int hlen; | ||
154 | unsigned int off; | ||
155 | int flush = 1; | ||
156 | int proto; | ||
157 | __wsum csum; | ||
158 | |||
159 | off = skb_gro_offset(skb); | ||
160 | hlen = off + sizeof(*iph); | ||
161 | iph = skb_gro_header_fast(skb, off); | ||
162 | if (skb_gro_header_hard(skb, hlen)) { | ||
163 | iph = skb_gro_header_slow(skb, hlen, off); | ||
164 | if (unlikely(!iph)) | ||
165 | goto out; | ||
166 | } | ||
167 | |||
168 | skb_gro_pull(skb, sizeof(*iph)); | ||
169 | skb_set_transport_header(skb, skb_gro_offset(skb)); | ||
170 | |||
171 | flush += ntohs(iph->payload_len) != skb_gro_len(skb); | ||
172 | |||
173 | rcu_read_lock(); | ||
174 | proto = iph->nexthdr; | ||
175 | ops = rcu_dereference(inet6_offloads[proto]); | ||
176 | if (!ops || !ops->callbacks.gro_receive) { | ||
177 | __pskb_pull(skb, skb_gro_offset(skb)); | ||
178 | proto = ipv6_gso_pull_exthdrs(skb, proto); | ||
179 | skb_gro_pull(skb, -skb_transport_offset(skb)); | ||
180 | skb_reset_transport_header(skb); | ||
181 | __skb_push(skb, skb_gro_offset(skb)); | ||
182 | |||
183 | ops = rcu_dereference(inet6_offloads[proto]); | ||
184 | if (!ops || !ops->callbacks.gro_receive) | ||
185 | goto out_unlock; | ||
186 | |||
187 | iph = ipv6_hdr(skb); | ||
188 | } | ||
189 | |||
190 | NAPI_GRO_CB(skb)->proto = proto; | ||
191 | |||
192 | flush--; | ||
193 | nlen = skb_network_header_len(skb); | ||
194 | |||
195 | for (p = *head; p; p = p->next) { | ||
196 | const struct ipv6hdr *iph2; | ||
197 | __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ | ||
198 | |||
199 | if (!NAPI_GRO_CB(p)->same_flow) | ||
200 | continue; | ||
201 | |||
202 | iph2 = ipv6_hdr(p); | ||
203 | first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ; | ||
204 | |||
205 | /* All fields must match except length and Traffic Class. */ | ||
206 | if (nlen != skb_network_header_len(p) || | ||
207 | (first_word & htonl(0xF00FFFFF)) || | ||
208 | memcmp(&iph->nexthdr, &iph2->nexthdr, | ||
209 | nlen - offsetof(struct ipv6hdr, nexthdr))) { | ||
210 | NAPI_GRO_CB(p)->same_flow = 0; | ||
211 | continue; | ||
212 | } | ||
213 | /* flush if Traffic Class fields are different */ | ||
214 | NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); | ||
215 | NAPI_GRO_CB(p)->flush |= flush; | ||
216 | } | ||
217 | |||
218 | NAPI_GRO_CB(skb)->flush |= flush; | ||
219 | |||
220 | csum = skb->csum; | ||
221 | skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); | ||
222 | |||
223 | pp = ops->callbacks.gro_receive(head, skb); | ||
224 | |||
225 | skb->csum = csum; | ||
226 | |||
227 | out_unlock: | ||
228 | rcu_read_unlock(); | ||
229 | |||
230 | out: | ||
231 | NAPI_GRO_CB(skb)->flush |= flush; | ||
232 | |||
233 | return pp; | ||
234 | } | ||
235 | |||
236 | static int ipv6_gro_complete(struct sk_buff *skb) | ||
237 | { | ||
238 | const struct net_offload *ops; | ||
239 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
240 | int err = -ENOSYS; | ||
241 | |||
242 | iph->payload_len = htons(skb->len - skb_network_offset(skb) - | ||
243 | sizeof(*iph)); | ||
244 | |||
245 | rcu_read_lock(); | ||
246 | ops = rcu_dereference(inet6_offloads[NAPI_GRO_CB(skb)->proto]); | ||
247 | if (WARN_ON(!ops || !ops->callbacks.gro_complete)) | ||
248 | goto out_unlock; | ||
249 | |||
250 | err = ops->callbacks.gro_complete(skb); | ||
251 | |||
252 | out_unlock: | ||
253 | rcu_read_unlock(); | ||
254 | |||
255 | return err; | ||
256 | } | ||
257 | |||
258 | static struct packet_offload ipv6_packet_offload __read_mostly = { | ||
259 | .type = cpu_to_be16(ETH_P_IPV6), | ||
260 | .callbacks = { | ||
261 | .gso_send_check = ipv6_gso_send_check, | ||
262 | .gso_segment = ipv6_gso_segment, | ||
263 | .gro_receive = ipv6_gro_receive, | ||
264 | .gro_complete = ipv6_gro_complete, | ||
265 | }, | ||
266 | }; | ||
267 | |||
268 | static int __init ipv6_offload_init(void) | ||
269 | { | ||
270 | |||
271 | if (tcpv6_offload_init() < 0) | ||
272 | pr_crit("%s: Cannot add TCP protocol offload\n", __func__); | ||
273 | if (udp_offload_init() < 0) | ||
274 | pr_crit("%s: Cannot add UDP protocol offload\n", __func__); | ||
275 | if (ipv6_exthdrs_offload_init() < 0) | ||
276 | pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__); | ||
277 | |||
278 | dev_add_offload(&ipv6_packet_offload); | ||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | fs_initcall(ipv6_offload_init); | ||
diff --git a/net/ipv6/ip6_offload.h b/net/ipv6/ip6_offload.h new file mode 100644 index 000000000000..2e155c651b35 --- /dev/null +++ b/net/ipv6/ip6_offload.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * IPV6 GSO/GRO offload support | ||
3 | * Linux INET6 implementation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ip6_offload_h | ||
12 | #define __ip6_offload_h | ||
13 | |||
14 | int ipv6_exthdrs_offload_init(void); | ||
15 | int udp_offload_init(void); | ||
16 | int tcpv6_offload_init(void); | ||
17 | |||
18 | #endif | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index aece3e792f84..5552d13ae92f 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -538,78 +538,12 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
538 | to->tc_index = from->tc_index; | 538 | to->tc_index = from->tc_index; |
539 | #endif | 539 | #endif |
540 | nf_copy(to, from); | 540 | nf_copy(to, from); |
541 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 541 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) |
542 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | ||
543 | to->nf_trace = from->nf_trace; | 542 | to->nf_trace = from->nf_trace; |
544 | #endif | 543 | #endif |
545 | skb_copy_secmark(to, from); | 544 | skb_copy_secmark(to, from); |
546 | } | 545 | } |
547 | 546 | ||
548 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) | ||
549 | { | ||
550 | u16 offset = sizeof(struct ipv6hdr); | ||
551 | struct ipv6_opt_hdr *exthdr = | ||
552 | (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); | ||
553 | unsigned int packet_len = skb->tail - skb->network_header; | ||
554 | int found_rhdr = 0; | ||
555 | *nexthdr = &ipv6_hdr(skb)->nexthdr; | ||
556 | |||
557 | while (offset + 1 <= packet_len) { | ||
558 | |||
559 | switch (**nexthdr) { | ||
560 | |||
561 | case NEXTHDR_HOP: | ||
562 | break; | ||
563 | case NEXTHDR_ROUTING: | ||
564 | found_rhdr = 1; | ||
565 | break; | ||
566 | case NEXTHDR_DEST: | ||
567 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | ||
568 | if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) | ||
569 | break; | ||
570 | #endif | ||
571 | if (found_rhdr) | ||
572 | return offset; | ||
573 | break; | ||
574 | default : | ||
575 | return offset; | ||
576 | } | ||
577 | |||
578 | offset += ipv6_optlen(exthdr); | ||
579 | *nexthdr = &exthdr->nexthdr; | ||
580 | exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + | ||
581 | offset); | ||
582 | } | ||
583 | |||
584 | return offset; | ||
585 | } | ||
586 | |||
587 | void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) | ||
588 | { | ||
589 | static atomic_t ipv6_fragmentation_id; | ||
590 | int old, new; | ||
591 | |||
592 | if (rt && !(rt->dst.flags & DST_NOPEER)) { | ||
593 | struct inet_peer *peer; | ||
594 | struct net *net; | ||
595 | |||
596 | net = dev_net(rt->dst.dev); | ||
597 | peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); | ||
598 | if (peer) { | ||
599 | fhdr->identification = htonl(inet_getid(peer, 0)); | ||
600 | inet_putpeer(peer); | ||
601 | return; | ||
602 | } | ||
603 | } | ||
604 | do { | ||
605 | old = atomic_read(&ipv6_fragmentation_id); | ||
606 | new = old + 1; | ||
607 | if (!new) | ||
608 | new = 1; | ||
609 | } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old); | ||
610 | fhdr->identification = htonl(new); | ||
611 | } | ||
612 | |||
613 | int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | 547 | int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) |
614 | { | 548 | { |
615 | struct sk_buff *frag; | 549 | struct sk_buff *frag; |
@@ -756,7 +690,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
756 | if (err == 0) { | 690 | if (err == 0) { |
757 | IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), | 691 | IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), |
758 | IPSTATS_MIB_FRAGOKS); | 692 | IPSTATS_MIB_FRAGOKS); |
759 | dst_release(&rt->dst); | 693 | ip6_rt_put(rt); |
760 | return 0; | 694 | return 0; |
761 | } | 695 | } |
762 | 696 | ||
@@ -768,7 +702,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
768 | 702 | ||
769 | IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), | 703 | IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), |
770 | IPSTATS_MIB_FRAGFAILS); | 704 | IPSTATS_MIB_FRAGFAILS); |
771 | dst_release(&rt->dst); | 705 | ip6_rt_put(rt); |
772 | return err; | 706 | return err; |
773 | 707 | ||
774 | slow_path_clean: | 708 | slow_path_clean: |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index cb7e2ded6f08..a14f28b280f5 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -74,6 +74,10 @@ MODULE_ALIAS_NETDEV("ip6tnl0"); | |||
74 | #define HASH_SIZE_SHIFT 5 | 74 | #define HASH_SIZE_SHIFT 5 |
75 | #define HASH_SIZE (1 << HASH_SIZE_SHIFT) | 75 | #define HASH_SIZE (1 << HASH_SIZE_SHIFT) |
76 | 76 | ||
77 | static bool log_ecn_error = true; | ||
78 | module_param(log_ecn_error, bool, 0644); | ||
79 | MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); | ||
80 | |||
77 | static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) | 81 | static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) |
78 | { | 82 | { |
79 | u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); | 83 | u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2); |
@@ -83,6 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) | |||
83 | 87 | ||
84 | static int ip6_tnl_dev_init(struct net_device *dev); | 88 | static int ip6_tnl_dev_init(struct net_device *dev); |
85 | static void ip6_tnl_dev_setup(struct net_device *dev); | 89 | static void ip6_tnl_dev_setup(struct net_device *dev); |
90 | static struct rtnl_link_ops ip6_link_ops __read_mostly; | ||
86 | 91 | ||
87 | static int ip6_tnl_net_id __read_mostly; | 92 | static int ip6_tnl_net_id __read_mostly; |
88 | struct ip6_tnl_net { | 93 | struct ip6_tnl_net { |
@@ -94,14 +99,6 @@ struct ip6_tnl_net { | |||
94 | struct ip6_tnl __rcu **tnls[2]; | 99 | struct ip6_tnl __rcu **tnls[2]; |
95 | }; | 100 | }; |
96 | 101 | ||
97 | /* often modified stats are per cpu, other are shared (netdev->stats) */ | ||
98 | struct pcpu_tstats { | ||
99 | unsigned long rx_packets; | ||
100 | unsigned long rx_bytes; | ||
101 | unsigned long tx_packets; | ||
102 | unsigned long tx_bytes; | ||
103 | } __attribute__((aligned(4*sizeof(unsigned long)))); | ||
104 | |||
105 | static struct net_device_stats *ip6_get_stats(struct net_device *dev) | 102 | static struct net_device_stats *ip6_get_stats(struct net_device *dev) |
106 | { | 103 | { |
107 | struct pcpu_tstats sum = { 0 }; | 104 | struct pcpu_tstats sum = { 0 }; |
@@ -258,6 +255,33 @@ static void ip6_dev_free(struct net_device *dev) | |||
258 | free_netdev(dev); | 255 | free_netdev(dev); |
259 | } | 256 | } |
260 | 257 | ||
258 | static int ip6_tnl_create2(struct net_device *dev) | ||
259 | { | ||
260 | struct ip6_tnl *t = netdev_priv(dev); | ||
261 | struct net *net = dev_net(dev); | ||
262 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | ||
263 | int err; | ||
264 | |||
265 | t = netdev_priv(dev); | ||
266 | err = ip6_tnl_dev_init(dev); | ||
267 | if (err < 0) | ||
268 | goto out; | ||
269 | |||
270 | err = register_netdevice(dev); | ||
271 | if (err < 0) | ||
272 | goto out; | ||
273 | |||
274 | strcpy(t->parms.name, dev->name); | ||
275 | dev->rtnl_link_ops = &ip6_link_ops; | ||
276 | |||
277 | dev_hold(dev); | ||
278 | ip6_tnl_link(ip6n, t); | ||
279 | return 0; | ||
280 | |||
281 | out: | ||
282 | return err; | ||
283 | } | ||
284 | |||
261 | /** | 285 | /** |
262 | * ip6_tnl_create - create a new tunnel | 286 | * ip6_tnl_create - create a new tunnel |
263 | * @p: tunnel parameters | 287 | * @p: tunnel parameters |
@@ -276,7 +300,6 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) | |||
276 | struct ip6_tnl *t; | 300 | struct ip6_tnl *t; |
277 | char name[IFNAMSIZ]; | 301 | char name[IFNAMSIZ]; |
278 | int err; | 302 | int err; |
279 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | ||
280 | 303 | ||
281 | if (p->name[0]) | 304 | if (p->name[0]) |
282 | strlcpy(name, p->name, IFNAMSIZ); | 305 | strlcpy(name, p->name, IFNAMSIZ); |
@@ -291,17 +314,10 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) | |||
291 | 314 | ||
292 | t = netdev_priv(dev); | 315 | t = netdev_priv(dev); |
293 | t->parms = *p; | 316 | t->parms = *p; |
294 | err = ip6_tnl_dev_init(dev); | 317 | err = ip6_tnl_create2(dev); |
295 | if (err < 0) | 318 | if (err < 0) |
296 | goto failed_free; | 319 | goto failed_free; |
297 | 320 | ||
298 | if ((err = register_netdevice(dev)) < 0) | ||
299 | goto failed_free; | ||
300 | |||
301 | strcpy(t->parms.name, dev->name); | ||
302 | |||
303 | dev_hold(dev); | ||
304 | ip6_tnl_link(ip6n, t); | ||
305 | return t; | 321 | return t; |
306 | 322 | ||
307 | failed_free: | 323 | failed_free: |
@@ -663,8 +679,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
663 | 679 | ||
664 | icmpv6_send(skb2, rel_type, rel_code, rel_info); | 680 | icmpv6_send(skb2, rel_type, rel_code, rel_info); |
665 | 681 | ||
666 | if (rt) | 682 | ip6_rt_put(rt); |
667 | dst_release(&rt->dst); | ||
668 | 683 | ||
669 | kfree_skb(skb2); | 684 | kfree_skb(skb2); |
670 | } | 685 | } |
@@ -672,28 +687,26 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
672 | return 0; | 687 | return 0; |
673 | } | 688 | } |
674 | 689 | ||
675 | static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, | 690 | static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, |
676 | const struct ipv6hdr *ipv6h, | 691 | const struct ipv6hdr *ipv6h, |
677 | struct sk_buff *skb) | 692 | struct sk_buff *skb) |
678 | { | 693 | { |
679 | __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; | 694 | __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; |
680 | 695 | ||
681 | if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) | 696 | if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) |
682 | ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); | 697 | ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); |
683 | 698 | ||
684 | if (INET_ECN_is_ce(dsfield)) | 699 | return IP6_ECN_decapsulate(ipv6h, skb); |
685 | IP_ECN_set_ce(ip_hdr(skb)); | ||
686 | } | 700 | } |
687 | 701 | ||
688 | static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, | 702 | static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t, |
689 | const struct ipv6hdr *ipv6h, | 703 | const struct ipv6hdr *ipv6h, |
690 | struct sk_buff *skb) | 704 | struct sk_buff *skb) |
691 | { | 705 | { |
692 | if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) | 706 | if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) |
693 | ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); | 707 | ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); |
694 | 708 | ||
695 | if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h))) | 709 | return IP6_ECN_decapsulate(ipv6h, skb); |
696 | IP6_ECN_set_ce(ipv6_hdr(skb)); | ||
697 | } | 710 | } |
698 | 711 | ||
699 | __u32 ip6_tnl_get_cap(struct ip6_tnl *t, | 712 | __u32 ip6_tnl_get_cap(struct ip6_tnl *t, |
@@ -757,12 +770,13 @@ EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); | |||
757 | 770 | ||
758 | static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | 771 | static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, |
759 | __u8 ipproto, | 772 | __u8 ipproto, |
760 | void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, | 773 | int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, |
761 | const struct ipv6hdr *ipv6h, | 774 | const struct ipv6hdr *ipv6h, |
762 | struct sk_buff *skb)) | 775 | struct sk_buff *skb)) |
763 | { | 776 | { |
764 | struct ip6_tnl *t; | 777 | struct ip6_tnl *t; |
765 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 778 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
779 | int err; | ||
766 | 780 | ||
767 | rcu_read_lock(); | 781 | rcu_read_lock(); |
768 | 782 | ||
@@ -792,14 +806,26 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
792 | skb->pkt_type = PACKET_HOST; | 806 | skb->pkt_type = PACKET_HOST; |
793 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); | 807 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); |
794 | 808 | ||
809 | __skb_tunnel_rx(skb, t->dev); | ||
810 | |||
811 | err = dscp_ecn_decapsulate(t, ipv6h, skb); | ||
812 | if (unlikely(err)) { | ||
813 | if (log_ecn_error) | ||
814 | net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n", | ||
815 | &ipv6h->saddr, | ||
816 | ipv6_get_dsfield(ipv6h)); | ||
817 | if (err > 1) { | ||
818 | ++t->dev->stats.rx_frame_errors; | ||
819 | ++t->dev->stats.rx_errors; | ||
820 | rcu_read_unlock(); | ||
821 | goto discard; | ||
822 | } | ||
823 | } | ||
824 | |||
795 | tstats = this_cpu_ptr(t->dev->tstats); | 825 | tstats = this_cpu_ptr(t->dev->tstats); |
796 | tstats->rx_packets++; | 826 | tstats->rx_packets++; |
797 | tstats->rx_bytes += skb->len; | 827 | tstats->rx_bytes += skb->len; |
798 | 828 | ||
799 | __skb_tunnel_rx(skb, t->dev); | ||
800 | |||
801 | dscp_ecn_decapsulate(t, ipv6h, skb); | ||
802 | |||
803 | netif_rx(skb); | 829 | netif_rx(skb); |
804 | 830 | ||
805 | rcu_read_unlock(); | 831 | rcu_read_unlock(); |
@@ -1208,7 +1234,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) | |||
1208 | if (dev->mtu < IPV6_MIN_MTU) | 1234 | if (dev->mtu < IPV6_MIN_MTU) |
1209 | dev->mtu = IPV6_MIN_MTU; | 1235 | dev->mtu = IPV6_MIN_MTU; |
1210 | } | 1236 | } |
1211 | dst_release(&rt->dst); | 1237 | ip6_rt_put(rt); |
1212 | } | 1238 | } |
1213 | } | 1239 | } |
1214 | 1240 | ||
@@ -1237,6 +1263,20 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p) | |||
1237 | return 0; | 1263 | return 0; |
1238 | } | 1264 | } |
1239 | 1265 | ||
1266 | static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) | ||
1267 | { | ||
1268 | struct net *net = dev_net(t->dev); | ||
1269 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | ||
1270 | int err; | ||
1271 | |||
1272 | ip6_tnl_unlink(ip6n, t); | ||
1273 | synchronize_net(); | ||
1274 | err = ip6_tnl_change(t, p); | ||
1275 | ip6_tnl_link(ip6n, t); | ||
1276 | netdev_state_change(t->dev); | ||
1277 | return err; | ||
1278 | } | ||
1279 | |||
1240 | static void | 1280 | static void |
1241 | ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) | 1281 | ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) |
1242 | { | 1282 | { |
@@ -1325,7 +1365,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1325 | case SIOCADDTUNNEL: | 1365 | case SIOCADDTUNNEL: |
1326 | case SIOCCHGTUNNEL: | 1366 | case SIOCCHGTUNNEL: |
1327 | err = -EPERM; | 1367 | err = -EPERM; |
1328 | if (!capable(CAP_NET_ADMIN)) | 1368 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1329 | break; | 1369 | break; |
1330 | err = -EFAULT; | 1370 | err = -EFAULT; |
1331 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) | 1371 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) |
@@ -1345,11 +1385,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1345 | } else | 1385 | } else |
1346 | t = netdev_priv(dev); | 1386 | t = netdev_priv(dev); |
1347 | 1387 | ||
1348 | ip6_tnl_unlink(ip6n, t); | 1388 | err = ip6_tnl_update(t, &p1); |
1349 | synchronize_net(); | ||
1350 | err = ip6_tnl_change(t, &p1); | ||
1351 | ip6_tnl_link(ip6n, t); | ||
1352 | netdev_state_change(dev); | ||
1353 | } | 1389 | } |
1354 | if (t) { | 1390 | if (t) { |
1355 | err = 0; | 1391 | err = 0; |
@@ -1362,7 +1398,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1362 | break; | 1398 | break; |
1363 | case SIOCDELTUNNEL: | 1399 | case SIOCDELTUNNEL: |
1364 | err = -EPERM; | 1400 | err = -EPERM; |
1365 | if (!capable(CAP_NET_ADMIN)) | 1401 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1366 | break; | 1402 | break; |
1367 | 1403 | ||
1368 | if (dev == ip6n->fb_tnl_dev) { | 1404 | if (dev == ip6n->fb_tnl_dev) { |
@@ -1505,6 +1541,164 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) | |||
1505 | return 0; | 1541 | return 0; |
1506 | } | 1542 | } |
1507 | 1543 | ||
1544 | static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[]) | ||
1545 | { | ||
1546 | u8 proto; | ||
1547 | |||
1548 | if (!data) | ||
1549 | return 0; | ||
1550 | |||
1551 | proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); | ||
1552 | if (proto != IPPROTO_IPV6 && | ||
1553 | proto != IPPROTO_IPIP && | ||
1554 | proto != 0) | ||
1555 | return -EINVAL; | ||
1556 | |||
1557 | return 0; | ||
1558 | } | ||
1559 | |||
1560 | static void ip6_tnl_netlink_parms(struct nlattr *data[], | ||
1561 | struct __ip6_tnl_parm *parms) | ||
1562 | { | ||
1563 | memset(parms, 0, sizeof(*parms)); | ||
1564 | |||
1565 | if (!data) | ||
1566 | return; | ||
1567 | |||
1568 | if (data[IFLA_IPTUN_LINK]) | ||
1569 | parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); | ||
1570 | |||
1571 | if (data[IFLA_IPTUN_LOCAL]) | ||
1572 | nla_memcpy(&parms->laddr, data[IFLA_IPTUN_LOCAL], | ||
1573 | sizeof(struct in6_addr)); | ||
1574 | |||
1575 | if (data[IFLA_IPTUN_REMOTE]) | ||
1576 | nla_memcpy(&parms->raddr, data[IFLA_IPTUN_REMOTE], | ||
1577 | sizeof(struct in6_addr)); | ||
1578 | |||
1579 | if (data[IFLA_IPTUN_TTL]) | ||
1580 | parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]); | ||
1581 | |||
1582 | if (data[IFLA_IPTUN_ENCAP_LIMIT]) | ||
1583 | parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]); | ||
1584 | |||
1585 | if (data[IFLA_IPTUN_FLOWINFO]) | ||
1586 | parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]); | ||
1587 | |||
1588 | if (data[IFLA_IPTUN_FLAGS]) | ||
1589 | parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]); | ||
1590 | |||
1591 | if (data[IFLA_IPTUN_PROTO]) | ||
1592 | parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); | ||
1593 | } | ||
1594 | |||
1595 | static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, | ||
1596 | struct nlattr *tb[], struct nlattr *data[]) | ||
1597 | { | ||
1598 | struct net *net = dev_net(dev); | ||
1599 | struct ip6_tnl *nt; | ||
1600 | |||
1601 | nt = netdev_priv(dev); | ||
1602 | ip6_tnl_netlink_parms(data, &nt->parms); | ||
1603 | |||
1604 | if (ip6_tnl_locate(net, &nt->parms, 0)) | ||
1605 | return -EEXIST; | ||
1606 | |||
1607 | return ip6_tnl_create2(dev); | ||
1608 | } | ||
1609 | |||
1610 | static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], | ||
1611 | struct nlattr *data[]) | ||
1612 | { | ||
1613 | struct ip6_tnl *t; | ||
1614 | struct __ip6_tnl_parm p; | ||
1615 | struct net *net = dev_net(dev); | ||
1616 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | ||
1617 | |||
1618 | if (dev == ip6n->fb_tnl_dev) | ||
1619 | return -EINVAL; | ||
1620 | |||
1621 | ip6_tnl_netlink_parms(data, &p); | ||
1622 | |||
1623 | t = ip6_tnl_locate(net, &p, 0); | ||
1624 | |||
1625 | if (t) { | ||
1626 | if (t->dev != dev) | ||
1627 | return -EEXIST; | ||
1628 | } else | ||
1629 | t = netdev_priv(dev); | ||
1630 | |||
1631 | return ip6_tnl_update(t, &p); | ||
1632 | } | ||
1633 | |||
1634 | static size_t ip6_tnl_get_size(const struct net_device *dev) | ||
1635 | { | ||
1636 | return | ||
1637 | /* IFLA_IPTUN_LINK */ | ||
1638 | nla_total_size(4) + | ||
1639 | /* IFLA_IPTUN_LOCAL */ | ||
1640 | nla_total_size(sizeof(struct in6_addr)) + | ||
1641 | /* IFLA_IPTUN_REMOTE */ | ||
1642 | nla_total_size(sizeof(struct in6_addr)) + | ||
1643 | /* IFLA_IPTUN_TTL */ | ||
1644 | nla_total_size(1) + | ||
1645 | /* IFLA_IPTUN_ENCAP_LIMIT */ | ||
1646 | nla_total_size(1) + | ||
1647 | /* IFLA_IPTUN_FLOWINFO */ | ||
1648 | nla_total_size(4) + | ||
1649 | /* IFLA_IPTUN_FLAGS */ | ||
1650 | nla_total_size(4) + | ||
1651 | /* IFLA_IPTUN_PROTO */ | ||
1652 | nla_total_size(1) + | ||
1653 | 0; | ||
1654 | } | ||
1655 | |||
1656 | static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev) | ||
1657 | { | ||
1658 | struct ip6_tnl *tunnel = netdev_priv(dev); | ||
1659 | struct __ip6_tnl_parm *parm = &tunnel->parms; | ||
1660 | |||
1661 | if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || | ||
1662 | nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr), | ||
1663 | &parm->raddr) || | ||
1664 | nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr), | ||
1665 | &parm->laddr) || | ||
1666 | nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || | ||
1667 | nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || | ||
1668 | nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) || | ||
1669 | nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) || | ||
1670 | nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto)) | ||
1671 | goto nla_put_failure; | ||
1672 | return 0; | ||
1673 | |||
1674 | nla_put_failure: | ||
1675 | return -EMSGSIZE; | ||
1676 | } | ||
1677 | |||
1678 | static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = { | ||
1679 | [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, | ||
1680 | [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) }, | ||
1681 | [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) }, | ||
1682 | [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, | ||
1683 | [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 }, | ||
1684 | [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 }, | ||
1685 | [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 }, | ||
1686 | [IFLA_IPTUN_PROTO] = { .type = NLA_U8 }, | ||
1687 | }; | ||
1688 | |||
1689 | static struct rtnl_link_ops ip6_link_ops __read_mostly = { | ||
1690 | .kind = "ip6tnl", | ||
1691 | .maxtype = IFLA_IPTUN_MAX, | ||
1692 | .policy = ip6_tnl_policy, | ||
1693 | .priv_size = sizeof(struct ip6_tnl), | ||
1694 | .setup = ip6_tnl_dev_setup, | ||
1695 | .validate = ip6_tnl_validate, | ||
1696 | .newlink = ip6_tnl_newlink, | ||
1697 | .changelink = ip6_tnl_changelink, | ||
1698 | .get_size = ip6_tnl_get_size, | ||
1699 | .fill_info = ip6_tnl_fill_info, | ||
1700 | }; | ||
1701 | |||
1508 | static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { | 1702 | static struct xfrm6_tunnel ip4ip6_handler __read_mostly = { |
1509 | .handler = ip4ip6_rcv, | 1703 | .handler = ip4ip6_rcv, |
1510 | .err_handler = ip4ip6_err, | 1704 | .err_handler = ip4ip6_err, |
@@ -1613,9 +1807,14 @@ static int __init ip6_tunnel_init(void) | |||
1613 | pr_err("%s: can't register ip6ip6\n", __func__); | 1807 | pr_err("%s: can't register ip6ip6\n", __func__); |
1614 | goto out_ip6ip6; | 1808 | goto out_ip6ip6; |
1615 | } | 1809 | } |
1810 | err = rtnl_link_register(&ip6_link_ops); | ||
1811 | if (err < 0) | ||
1812 | goto rtnl_link_failed; | ||
1616 | 1813 | ||
1617 | return 0; | 1814 | return 0; |
1618 | 1815 | ||
1816 | rtnl_link_failed: | ||
1817 | xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); | ||
1619 | out_ip6ip6: | 1818 | out_ip6ip6: |
1620 | xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); | 1819 | xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); |
1621 | out_ip4ip6: | 1820 | out_ip4ip6: |
@@ -1630,6 +1829,7 @@ out_pernet: | |||
1630 | 1829 | ||
1631 | static void __exit ip6_tunnel_cleanup(void) | 1830 | static void __exit ip6_tunnel_cleanup(void) |
1632 | { | 1831 | { |
1832 | rtnl_link_unregister(&ip6_link_ops); | ||
1633 | if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) | 1833 | if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET)) |
1634 | pr_info("%s: can't deregister ip4ip6\n", __func__); | 1834 | pr_info("%s: can't deregister ip4ip6\n", __func__); |
1635 | 1835 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index f7c7c6319720..926ea544f499 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -66,8 +66,8 @@ struct mr6_table { | |||
66 | struct mif_device vif6_table[MAXMIFS]; | 66 | struct mif_device vif6_table[MAXMIFS]; |
67 | int maxvif; | 67 | int maxvif; |
68 | atomic_t cache_resolve_queue_len; | 68 | atomic_t cache_resolve_queue_len; |
69 | int mroute_do_assert; | 69 | bool mroute_do_assert; |
70 | int mroute_do_pim; | 70 | bool mroute_do_pim; |
71 | #ifdef CONFIG_IPV6_PIMSM_V2 | 71 | #ifdef CONFIG_IPV6_PIMSM_V2 |
72 | int mroute_reg_vif_num; | 72 | int mroute_reg_vif_num; |
73 | #endif | 73 | #endif |
@@ -1583,7 +1583,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1583 | return -ENOENT; | 1583 | return -ENOENT; |
1584 | 1584 | ||
1585 | if (optname != MRT6_INIT) { | 1585 | if (optname != MRT6_INIT) { |
1586 | if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN)) | 1586 | if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1587 | return -EACCES; | 1587 | return -EACCES; |
1588 | } | 1588 | } |
1589 | 1589 | ||
@@ -1646,9 +1646,12 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1646 | case MRT6_ASSERT: | 1646 | case MRT6_ASSERT: |
1647 | { | 1647 | { |
1648 | int v; | 1648 | int v; |
1649 | |||
1650 | if (optlen != sizeof(v)) | ||
1651 | return -EINVAL; | ||
1649 | if (get_user(v, (int __user *)optval)) | 1652 | if (get_user(v, (int __user *)optval)) |
1650 | return -EFAULT; | 1653 | return -EFAULT; |
1651 | mrt->mroute_do_assert = !!v; | 1654 | mrt->mroute_do_assert = v; |
1652 | return 0; | 1655 | return 0; |
1653 | } | 1656 | } |
1654 | 1657 | ||
@@ -1656,6 +1659,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns | |||
1656 | case MRT6_PIM: | 1659 | case MRT6_PIM: |
1657 | { | 1660 | { |
1658 | int v; | 1661 | int v; |
1662 | |||
1663 | if (optlen != sizeof(v)) | ||
1664 | return -EINVAL; | ||
1659 | if (get_user(v, (int __user *)optval)) | 1665 | if (get_user(v, (int __user *)optval)) |
1660 | return -EFAULT; | 1666 | return -EFAULT; |
1661 | v = !!v; | 1667 | v = !!v; |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index e02faed6d17e..ee94d31c9d4d 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -343,7 +343,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
343 | break; | 343 | break; |
344 | 344 | ||
345 | case IPV6_TRANSPARENT: | 345 | case IPV6_TRANSPARENT: |
346 | if (valbool && !capable(CAP_NET_ADMIN) && !capable(CAP_NET_RAW)) { | 346 | if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) && |
347 | !ns_capable(net->user_ns, CAP_NET_RAW)) { | ||
347 | retv = -EPERM; | 348 | retv = -EPERM; |
348 | break; | 349 | break; |
349 | } | 350 | } |
@@ -381,7 +382,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
381 | 382 | ||
382 | /* hop-by-hop / destination options are privileged option */ | 383 | /* hop-by-hop / destination options are privileged option */ |
383 | retv = -EPERM; | 384 | retv = -EPERM; |
384 | if (optname != IPV6_RTHDR && !capable(CAP_NET_RAW)) | 385 | if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) |
385 | break; | 386 | break; |
386 | 387 | ||
387 | opt = ipv6_renew_options(sk, np->opt, optname, | 388 | opt = ipv6_renew_options(sk, np->opt, optname, |
@@ -397,7 +398,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
397 | if (optname == IPV6_RTHDR && opt && opt->srcrt) { | 398 | if (optname == IPV6_RTHDR && opt && opt->srcrt) { |
398 | struct ipv6_rt_hdr *rthdr = opt->srcrt; | 399 | struct ipv6_rt_hdr *rthdr = opt->srcrt; |
399 | switch (rthdr->type) { | 400 | switch (rthdr->type) { |
400 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 401 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
401 | case IPV6_SRCRT_TYPE_2: | 402 | case IPV6_SRCRT_TYPE_2: |
402 | if (rthdr->hdrlen != 2 || | 403 | if (rthdr->hdrlen != 2 || |
403 | rthdr->segments_left != 1) | 404 | rthdr->segments_left != 1) |
@@ -754,7 +755,7 @@ done: | |||
754 | case IPV6_IPSEC_POLICY: | 755 | case IPV6_IPSEC_POLICY: |
755 | case IPV6_XFRM_POLICY: | 756 | case IPV6_XFRM_POLICY: |
756 | retv = -EPERM; | 757 | retv = -EPERM; |
757 | if (!capable(CAP_NET_ADMIN)) | 758 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
758 | break; | 759 | break; |
759 | retv = xfrm_user_policy(sk, optname, optval, optlen); | 760 | retv = xfrm_user_policy(sk, optname, optval, optlen); |
760 | break; | 761 | break; |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 92f8e48e4ba4..b19ed51a45bb 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -163,7 +163,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
163 | rt = rt6_lookup(net, addr, NULL, 0, 0); | 163 | rt = rt6_lookup(net, addr, NULL, 0, 0); |
164 | if (rt) { | 164 | if (rt) { |
165 | dev = rt->dst.dev; | 165 | dev = rt->dst.dev; |
166 | dst_release(&rt->dst); | 166 | ip6_rt_put(rt); |
167 | } | 167 | } |
168 | } else | 168 | } else |
169 | dev = dev_get_by_index_rcu(net, ifindex); | 169 | dev = dev_get_by_index_rcu(net, ifindex); |
@@ -260,7 +260,7 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, | |||
260 | 260 | ||
261 | if (rt) { | 261 | if (rt) { |
262 | dev = rt->dst.dev; | 262 | dev = rt->dst.dev; |
263 | dst_release(&rt->dst); | 263 | ip6_rt_put(rt); |
264 | } | 264 | } |
265 | } else | 265 | } else |
266 | dev = dev_get_by_index_rcu(net, ifindex); | 266 | dev = dev_get_by_index_rcu(net, ifindex); |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 2edce30ef733..f41853bca428 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -535,7 +535,6 @@ static void ndisc_send_unsol_na(struct net_device *dev) | |||
535 | { | 535 | { |
536 | struct inet6_dev *idev; | 536 | struct inet6_dev *idev; |
537 | struct inet6_ifaddr *ifa; | 537 | struct inet6_ifaddr *ifa; |
538 | struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT; | ||
539 | 538 | ||
540 | idev = in6_dev_get(dev); | 539 | idev = in6_dev_get(dev); |
541 | if (!idev) | 540 | if (!idev) |
@@ -543,7 +542,7 @@ static void ndisc_send_unsol_na(struct net_device *dev) | |||
543 | 542 | ||
544 | read_lock_bh(&idev->lock); | 543 | read_lock_bh(&idev->lock); |
545 | list_for_each_entry(ifa, &idev->addr_list, if_list) { | 544 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
546 | ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr, | 545 | ndisc_send_na(dev, NULL, &in6addr_linklocal_allnodes, &ifa->addr, |
547 | /*router=*/ !!idev->cnf.forwarding, | 546 | /*router=*/ !!idev->cnf.forwarding, |
548 | /*solicited=*/ false, /*override=*/ true, | 547 | /*solicited=*/ false, /*override=*/ true, |
549 | /*inc_opt=*/ true); | 548 | /*inc_opt=*/ true); |
@@ -905,7 +904,7 @@ static void ndisc_recv_na(struct sk_buff *skb) | |||
905 | if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && | 904 | if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && |
906 | net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp && | 905 | net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp && |
907 | pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) { | 906 | pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) { |
908 | /* XXX: idev->cnf.prixy_ndp */ | 907 | /* XXX: idev->cnf.proxy_ndp */ |
909 | goto out; | 908 | goto out; |
910 | } | 909 | } |
911 | 910 | ||
@@ -1144,7 +1143,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
1144 | ND_PRINTK(0, err, | 1143 | ND_PRINTK(0, err, |
1145 | "RA: %s got default router without neighbour\n", | 1144 | "RA: %s got default router without neighbour\n", |
1146 | __func__); | 1145 | __func__); |
1147 | dst_release(&rt->dst); | 1146 | ip6_rt_put(rt); |
1148 | return; | 1147 | return; |
1149 | } | 1148 | } |
1150 | } | 1149 | } |
@@ -1169,7 +1168,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
1169 | ND_PRINTK(0, err, | 1168 | ND_PRINTK(0, err, |
1170 | "RA: %s got default router without neighbour\n", | 1169 | "RA: %s got default router without neighbour\n", |
1171 | __func__); | 1170 | __func__); |
1172 | dst_release(&rt->dst); | 1171 | ip6_rt_put(rt); |
1173 | return; | 1172 | return; |
1174 | } | 1173 | } |
1175 | neigh->flags |= NTF_ROUTER; | 1174 | neigh->flags |= NTF_ROUTER; |
@@ -1325,8 +1324,7 @@ skip_routeinfo: | |||
1325 | ND_PRINTK(2, warn, "RA: invalid RA options\n"); | 1324 | ND_PRINTK(2, warn, "RA: invalid RA options\n"); |
1326 | } | 1325 | } |
1327 | out: | 1326 | out: |
1328 | if (rt) | 1327 | ip6_rt_put(rt); |
1329 | dst_release(&rt->dst); | ||
1330 | if (neigh) | 1328 | if (neigh) |
1331 | neigh_release(neigh); | 1329 | neigh_release(neigh); |
1332 | } | 1330 | } |
@@ -1574,11 +1572,18 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, | |||
1574 | { | 1572 | { |
1575 | struct net_device *dev = ptr; | 1573 | struct net_device *dev = ptr; |
1576 | struct net *net = dev_net(dev); | 1574 | struct net *net = dev_net(dev); |
1575 | struct inet6_dev *idev; | ||
1577 | 1576 | ||
1578 | switch (event) { | 1577 | switch (event) { |
1579 | case NETDEV_CHANGEADDR: | 1578 | case NETDEV_CHANGEADDR: |
1580 | neigh_changeaddr(&nd_tbl, dev); | 1579 | neigh_changeaddr(&nd_tbl, dev); |
1581 | fib6_run_gc(~0UL, net); | 1580 | fib6_run_gc(~0UL, net); |
1581 | idev = in6_dev_get(dev); | ||
1582 | if (!idev) | ||
1583 | break; | ||
1584 | if (idev->cnf.ndisc_notify) | ||
1585 | ndisc_send_unsol_na(dev); | ||
1586 | in6_dev_put(idev); | ||
1582 | break; | 1587 | break; |
1583 | case NETDEV_DOWN: | 1588 | case NETDEV_DOWN: |
1584 | neigh_ifdown(&nd_tbl, dev); | 1589 | neigh_ifdown(&nd_tbl, dev); |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index d7cb04506c3d..74cadd0719a5 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -207,8 +207,7 @@ ip6t_get_target_c(const struct ip6t_entry *e) | |||
207 | return ip6t_get_target((struct ip6t_entry *)e); | 207 | return ip6t_get_target((struct ip6t_entry *)e); |
208 | } | 208 | } |
209 | 209 | ||
210 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 210 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) |
211 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | ||
212 | /* This cries for unification! */ | 211 | /* This cries for unification! */ |
213 | static const char *const hooknames[] = { | 212 | static const char *const hooknames[] = { |
214 | [NF_INET_PRE_ROUTING] = "PREROUTING", | 213 | [NF_INET_PRE_ROUTING] = "PREROUTING", |
@@ -381,8 +380,7 @@ ip6t_do_table(struct sk_buff *skb, | |||
381 | t = ip6t_get_target_c(e); | 380 | t = ip6t_get_target_c(e); |
382 | IP_NF_ASSERT(t->u.kernel.target); | 381 | IP_NF_ASSERT(t->u.kernel.target); |
383 | 382 | ||
384 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 383 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) |
385 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | ||
386 | /* The packet is traced: log it */ | 384 | /* The packet is traced: log it */ |
387 | if (unlikely(skb->nf_trace)) | 385 | if (unlikely(skb->nf_trace)) |
388 | trace_packet(skb, hook, in, out, | 386 | trace_packet(skb, hook, in, out, |
@@ -1856,7 +1854,7 @@ compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, | |||
1856 | { | 1854 | { |
1857 | int ret; | 1855 | int ret; |
1858 | 1856 | ||
1859 | if (!capable(CAP_NET_ADMIN)) | 1857 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1860 | return -EPERM; | 1858 | return -EPERM; |
1861 | 1859 | ||
1862 | switch (cmd) { | 1860 | switch (cmd) { |
@@ -1971,7 +1969,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
1971 | { | 1969 | { |
1972 | int ret; | 1970 | int ret; |
1973 | 1971 | ||
1974 | if (!capable(CAP_NET_ADMIN)) | 1972 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1975 | return -EPERM; | 1973 | return -EPERM; |
1976 | 1974 | ||
1977 | switch (cmd) { | 1975 | switch (cmd) { |
@@ -1993,7 +1991,7 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
1993 | { | 1991 | { |
1994 | int ret; | 1992 | int ret; |
1995 | 1993 | ||
1996 | if (!capable(CAP_NET_ADMIN)) | 1994 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1997 | return -EPERM; | 1995 | return -EPERM; |
1998 | 1996 | ||
1999 | switch (cmd) { | 1997 | switch (cmd) { |
@@ -2018,7 +2016,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2018 | { | 2016 | { |
2019 | int ret; | 2017 | int ret; |
2020 | 2018 | ||
2021 | if (!capable(CAP_NET_ADMIN)) | 2019 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
2022 | return -EPERM; | 2020 | return -EPERM; |
2023 | 2021 | ||
2024 | switch (cmd) { | 2022 | switch (cmd) { |
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 5d1d8b04d694..5060d54199ab 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c | |||
@@ -67,7 +67,7 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb, | |||
67 | if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) | 67 | if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) |
68 | ret = true; | 68 | ret = true; |
69 | out: | 69 | out: |
70 | dst_release(&rt->dst); | 70 | ip6_rt_put(rt); |
71 | return ret; | 71 | return ret; |
72 | } | 72 | } |
73 | 73 | ||
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c index d57dab17a182..fa84cf8ec6bc 100644 --- a/net/ipv6/netfilter/ip6table_nat.c +++ b/net/ipv6/netfilter/ip6table_nat.c | |||
@@ -277,9 +277,7 @@ static int __net_init ip6table_nat_net_init(struct net *net) | |||
277 | return -ENOMEM; | 277 | return -ENOMEM; |
278 | net->ipv6.ip6table_nat = ip6t_register_table(net, &nf_nat_ipv6_table, repl); | 278 | net->ipv6.ip6table_nat = ip6t_register_table(net, &nf_nat_ipv6_table, repl); |
279 | kfree(repl); | 279 | kfree(repl); |
280 | if (IS_ERR(net->ipv6.ip6table_nat)) | 280 | return PTR_RET(net->ipv6.ip6table_nat); |
281 | return PTR_ERR(net->ipv6.ip6table_nat); | ||
282 | return 0; | ||
283 | } | 281 | } |
284 | 282 | ||
285 | static void __net_exit ip6table_nat_net_exit(struct net *net) | 283 | static void __net_exit ip6table_nat_net_exit(struct net *net) |
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 8860d23e61cf..00ee17c3e893 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <linux/netfilter_bridge.h> | 22 | #include <linux/netfilter_bridge.h> |
23 | #include <linux/netfilter_ipv6.h> | 23 | #include <linux/netfilter_ipv6.h> |
24 | #include <linux/netfilter_ipv6/ip6_tables.h> | ||
24 | #include <net/netfilter/nf_conntrack.h> | 25 | #include <net/netfilter/nf_conntrack.h> |
25 | #include <net/netfilter/nf_conntrack_helper.h> | 26 | #include <net/netfilter/nf_conntrack_helper.h> |
26 | #include <net/netfilter/nf_conntrack_l4proto.h> | 27 | #include <net/netfilter/nf_conntrack_l4proto.h> |
@@ -295,7 +296,56 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { | |||
295 | }, | 296 | }, |
296 | }; | 297 | }; |
297 | 298 | ||
298 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 299 | static int |
300 | ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len) | ||
301 | { | ||
302 | const struct inet_sock *inet = inet_sk(sk); | ||
303 | const struct ipv6_pinfo *inet6 = inet6_sk(sk); | ||
304 | const struct nf_conntrack_tuple_hash *h; | ||
305 | struct sockaddr_in6 sin6; | ||
306 | struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 }; | ||
307 | struct nf_conn *ct; | ||
308 | |||
309 | tuple.src.u3.in6 = inet6->rcv_saddr; | ||
310 | tuple.src.u.tcp.port = inet->inet_sport; | ||
311 | tuple.dst.u3.in6 = inet6->daddr; | ||
312 | tuple.dst.u.tcp.port = inet->inet_dport; | ||
313 | tuple.dst.protonum = sk->sk_protocol; | ||
314 | |||
315 | if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) | ||
316 | return -ENOPROTOOPT; | ||
317 | |||
318 | if (*len < 0 || (unsigned int) *len < sizeof(sin6)) | ||
319 | return -EINVAL; | ||
320 | |||
321 | h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple); | ||
322 | if (!h) { | ||
323 | pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n", | ||
324 | &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port), | ||
325 | &tuple.dst.u3.ip6, ntohs(tuple.dst.u.tcp.port)); | ||
326 | return -ENOENT; | ||
327 | } | ||
328 | |||
329 | ct = nf_ct_tuplehash_to_ctrack(h); | ||
330 | |||
331 | sin6.sin6_family = AF_INET6; | ||
332 | sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port; | ||
333 | sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK; | ||
334 | memcpy(&sin6.sin6_addr, | ||
335 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6, | ||
336 | sizeof(sin6.sin6_addr)); | ||
337 | |||
338 | nf_ct_put(ct); | ||
339 | |||
340 | if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL) | ||
341 | sin6.sin6_scope_id = sk->sk_bound_dev_if; | ||
342 | else | ||
343 | sin6.sin6_scope_id = 0; | ||
344 | |||
345 | return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0; | ||
346 | } | ||
347 | |||
348 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) | ||
299 | 349 | ||
300 | #include <linux/netfilter/nfnetlink.h> | 350 | #include <linux/netfilter/nfnetlink.h> |
301 | #include <linux/netfilter/nfnetlink_conntrack.h> | 351 | #include <linux/netfilter/nfnetlink_conntrack.h> |
@@ -346,7 +396,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = { | |||
346 | .invert_tuple = ipv6_invert_tuple, | 396 | .invert_tuple = ipv6_invert_tuple, |
347 | .print_tuple = ipv6_print_tuple, | 397 | .print_tuple = ipv6_print_tuple, |
348 | .get_l4proto = ipv6_get_l4proto, | 398 | .get_l4proto = ipv6_get_l4proto, |
349 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 399 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) |
350 | .tuple_to_nlattr = ipv6_tuple_to_nlattr, | 400 | .tuple_to_nlattr = ipv6_tuple_to_nlattr, |
351 | .nlattr_tuple_size = ipv6_nlattr_tuple_size, | 401 | .nlattr_tuple_size = ipv6_nlattr_tuple_size, |
352 | .nlattr_to_tuple = ipv6_nlattr_to_tuple, | 402 | .nlattr_to_tuple = ipv6_nlattr_to_tuple, |
@@ -359,6 +409,14 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6)); | |||
359 | MODULE_LICENSE("GPL"); | 409 | MODULE_LICENSE("GPL"); |
360 | MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); | 410 | MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); |
361 | 411 | ||
412 | static struct nf_sockopt_ops so_getorigdst6 = { | ||
413 | .pf = NFPROTO_IPV6, | ||
414 | .get_optmin = IP6T_SO_ORIGINAL_DST, | ||
415 | .get_optmax = IP6T_SO_ORIGINAL_DST + 1, | ||
416 | .get = ipv6_getorigdst, | ||
417 | .owner = THIS_MODULE, | ||
418 | }; | ||
419 | |||
362 | static int ipv6_net_init(struct net *net) | 420 | static int ipv6_net_init(struct net *net) |
363 | { | 421 | { |
364 | int ret = 0; | 422 | int ret = 0; |
@@ -425,6 +483,12 @@ static int __init nf_conntrack_l3proto_ipv6_init(void) | |||
425 | need_conntrack(); | 483 | need_conntrack(); |
426 | nf_defrag_ipv6_enable(); | 484 | nf_defrag_ipv6_enable(); |
427 | 485 | ||
486 | ret = nf_register_sockopt(&so_getorigdst6); | ||
487 | if (ret < 0) { | ||
488 | pr_err("Unable to register netfilter socket option\n"); | ||
489 | return ret; | ||
490 | } | ||
491 | |||
428 | ret = register_pernet_subsys(&ipv6_net_ops); | 492 | ret = register_pernet_subsys(&ipv6_net_ops); |
429 | if (ret < 0) | 493 | if (ret < 0) |
430 | goto cleanup_pernet; | 494 | goto cleanup_pernet; |
@@ -440,6 +504,7 @@ static int __init nf_conntrack_l3proto_ipv6_init(void) | |||
440 | cleanup_ipv6: | 504 | cleanup_ipv6: |
441 | unregister_pernet_subsys(&ipv6_net_ops); | 505 | unregister_pernet_subsys(&ipv6_net_ops); |
442 | cleanup_pernet: | 506 | cleanup_pernet: |
507 | nf_unregister_sockopt(&so_getorigdst6); | ||
443 | return ret; | 508 | return ret; |
444 | } | 509 | } |
445 | 510 | ||
@@ -448,6 +513,7 @@ static void __exit nf_conntrack_l3proto_ipv6_fini(void) | |||
448 | synchronize_net(); | 513 | synchronize_net(); |
449 | nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); | 514 | nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); |
450 | unregister_pernet_subsys(&ipv6_net_ops); | 515 | unregister_pernet_subsys(&ipv6_net_ops); |
516 | nf_unregister_sockopt(&so_getorigdst6); | ||
451 | } | 517 | } |
452 | 518 | ||
453 | module_init(nf_conntrack_l3proto_ipv6_init); | 519 | module_init(nf_conntrack_l3proto_ipv6_init); |
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 2d54b2061d68..24df3dde0076 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | |||
@@ -232,7 +232,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, | |||
232 | return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum); | 232 | return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum); |
233 | } | 233 | } |
234 | 234 | ||
235 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 235 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) |
236 | 236 | ||
237 | #include <linux/netfilter/nfnetlink.h> | 237 | #include <linux/netfilter/nfnetlink.h> |
238 | #include <linux/netfilter/nfnetlink_conntrack.h> | 238 | #include <linux/netfilter/nfnetlink_conntrack.h> |
@@ -375,7 +375,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly = | |||
375 | .get_timeouts = icmpv6_get_timeouts, | 375 | .get_timeouts = icmpv6_get_timeouts, |
376 | .new = icmpv6_new, | 376 | .new = icmpv6_new, |
377 | .error = icmpv6_error, | 377 | .error = icmpv6_error, |
378 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 378 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) |
379 | .tuple_to_nlattr = icmpv6_tuple_to_nlattr, | 379 | .tuple_to_nlattr = icmpv6_tuple_to_nlattr, |
380 | .nlattr_tuple_size = icmpv6_nlattr_tuple_size, | 380 | .nlattr_tuple_size = icmpv6_nlattr_tuple_size, |
381 | .nlattr_to_tuple = icmpv6_nlattr_to_tuple, | 381 | .nlattr_to_tuple = icmpv6_nlattr_to_tuple, |
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index cdd6d045e42e..aacd121fe8c5 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/netfilter_ipv6.h> | 20 | #include <linux/netfilter_ipv6.h> |
21 | #include <linux/netfilter_bridge.h> | 21 | #include <linux/netfilter_bridge.h> |
22 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 22 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
23 | #include <net/netfilter/nf_conntrack.h> | 23 | #include <net/netfilter/nf_conntrack.h> |
24 | #include <net/netfilter/nf_conntrack_helper.h> | 24 | #include <net/netfilter/nf_conntrack_helper.h> |
25 | #include <net/netfilter/nf_conntrack_l4proto.h> | 25 | #include <net/netfilter/nf_conntrack_l4proto.h> |
@@ -35,7 +35,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | |||
35 | { | 35 | { |
36 | u16 zone = NF_CT_DEFAULT_ZONE; | 36 | u16 zone = NF_CT_DEFAULT_ZONE; |
37 | 37 | ||
38 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 38 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
39 | if (skb->nfct) | 39 | if (skb->nfct) |
40 | zone = nf_ct_zone((struct nf_conn *)skb->nfct); | 40 | zone = nf_ct_zone((struct nf_conn *)skb->nfct); |
41 | #endif | 41 | #endif |
@@ -60,7 +60,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum, | |||
60 | { | 60 | { |
61 | struct sk_buff *reasm; | 61 | struct sk_buff *reasm; |
62 | 62 | ||
63 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 63 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
64 | /* Previously seen (loopback)? */ | 64 | /* Previously seen (loopback)? */ |
65 | if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) | 65 | if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) |
66 | return NF_ACCEPT; | 66 | return NF_ACCEPT; |
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c index 5d6da784305b..61aaf70f376e 100644 --- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c | |||
@@ -84,7 +84,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_icmpv6 = { | |||
84 | .manip_pkt = icmpv6_manip_pkt, | 84 | .manip_pkt = icmpv6_manip_pkt, |
85 | .in_range = icmpv6_in_range, | 85 | .in_range = icmpv6_in_range, |
86 | .unique_tuple = icmpv6_unique_tuple, | 86 | .unique_tuple = icmpv6_unique_tuple, |
87 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 87 | #if IS_ENABLED(CONFIG_NF_CT_NETLINK) |
88 | .nlattr_to_range = nf_nat_l4proto_nlattr_to_range, | 88 | .nlattr_to_range = nf_nat_l4proto_nlattr_to_range, |
89 | #endif | 89 | #endif |
90 | }; | 90 | }; |
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c new file mode 100644 index 000000000000..c2e73e647e44 --- /dev/null +++ b/net/ipv6/output_core.c | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * IPv6 library code, needed by static components when full IPv6 support is | ||
3 | * not configured or static. These functions are needed by GSO/GRO implementation. | ||
4 | */ | ||
5 | #include <linux/export.h> | ||
6 | #include <net/ipv6.h> | ||
7 | #include <net/ip6_fib.h> | ||
8 | |||
9 | void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) | ||
10 | { | ||
11 | static atomic_t ipv6_fragmentation_id; | ||
12 | int old, new; | ||
13 | |||
14 | #if IS_ENABLED(CONFIG_IPV6) | ||
15 | if (rt && !(rt->dst.flags & DST_NOPEER)) { | ||
16 | struct inet_peer *peer; | ||
17 | struct net *net; | ||
18 | |||
19 | net = dev_net(rt->dst.dev); | ||
20 | peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); | ||
21 | if (peer) { | ||
22 | fhdr->identification = htonl(inet_getid(peer, 0)); | ||
23 | inet_putpeer(peer); | ||
24 | return; | ||
25 | } | ||
26 | } | ||
27 | #endif | ||
28 | do { | ||
29 | old = atomic_read(&ipv6_fragmentation_id); | ||
30 | new = old + 1; | ||
31 | if (!new) | ||
32 | new = 1; | ||
33 | } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old); | ||
34 | fhdr->identification = htonl(new); | ||
35 | } | ||
36 | EXPORT_SYMBOL(ipv6_select_ident); | ||
37 | |||
38 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) | ||
39 | { | ||
40 | u16 offset = sizeof(struct ipv6hdr); | ||
41 | struct ipv6_opt_hdr *exthdr = | ||
42 | (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); | ||
43 | unsigned int packet_len = skb->tail - skb->network_header; | ||
44 | int found_rhdr = 0; | ||
45 | *nexthdr = &ipv6_hdr(skb)->nexthdr; | ||
46 | |||
47 | while (offset + 1 <= packet_len) { | ||
48 | |||
49 | switch (**nexthdr) { | ||
50 | |||
51 | case NEXTHDR_HOP: | ||
52 | break; | ||
53 | case NEXTHDR_ROUTING: | ||
54 | found_rhdr = 1; | ||
55 | break; | ||
56 | case NEXTHDR_DEST: | ||
57 | #if IS_ENABLED(CONFIG_IPV6_MIP6) | ||
58 | if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) | ||
59 | break; | ||
60 | #endif | ||
61 | if (found_rhdr) | ||
62 | return offset; | ||
63 | break; | ||
64 | default : | ||
65 | return offset; | ||
66 | } | ||
67 | |||
68 | offset += ipv6_optlen(exthdr); | ||
69 | *nexthdr = &exthdr->nexthdr; | ||
70 | exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + | ||
71 | offset); | ||
72 | } | ||
73 | |||
74 | return offset; | ||
75 | } | ||
76 | EXPORT_SYMBOL(ip6_find_1stfragopt); | ||
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c index 053082dfc93e..22d1bd4670da 100644 --- a/net/ipv6/protocol.c +++ b/net/ipv6/protocol.c | |||
@@ -25,7 +25,9 @@ | |||
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | #include <net/protocol.h> | 26 | #include <net/protocol.h> |
27 | 27 | ||
28 | #if IS_ENABLED(CONFIG_IPV6) | ||
28 | const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly; | 29 | const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly; |
30 | EXPORT_SYMBOL(inet6_protos); | ||
29 | 31 | ||
30 | int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) | 32 | int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) |
31 | { | 33 | { |
@@ -50,3 +52,26 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol | |||
50 | return ret; | 52 | return ret; |
51 | } | 53 | } |
52 | EXPORT_SYMBOL(inet6_del_protocol); | 54 | EXPORT_SYMBOL(inet6_del_protocol); |
55 | #endif | ||
56 | |||
57 | const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS] __read_mostly; | ||
58 | |||
59 | int inet6_add_offload(const struct net_offload *prot, unsigned char protocol) | ||
60 | { | ||
61 | return !cmpxchg((const struct net_offload **)&inet6_offloads[protocol], | ||
62 | NULL, prot) ? 0 : -1; | ||
63 | } | ||
64 | EXPORT_SYMBOL(inet6_add_offload); | ||
65 | |||
66 | int inet6_del_offload(const struct net_offload *prot, unsigned char protocol) | ||
67 | { | ||
68 | int ret; | ||
69 | |||
70 | ret = (cmpxchg((const struct net_offload **)&inet6_offloads[protocol], | ||
71 | prot, NULL) == prot) ? 0 : -1; | ||
72 | |||
73 | synchronize_net(); | ||
74 | |||
75 | return ret; | ||
76 | } | ||
77 | EXPORT_SYMBOL(inet6_del_offload); | ||
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index d8e95c77db99..6cd29b1e8b92 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -50,7 +50,7 @@ | |||
50 | #include <net/udp.h> | 50 | #include <net/udp.h> |
51 | #include <net/inet_common.h> | 51 | #include <net/inet_common.h> |
52 | #include <net/tcp_states.h> | 52 | #include <net/tcp_states.h> |
53 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 53 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
54 | #include <net/mip6.h> | 54 | #include <net/mip6.h> |
55 | #endif | 55 | #endif |
56 | #include <linux/mroute6.h> | 56 | #include <linux/mroute6.h> |
@@ -123,7 +123,7 @@ static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb) | |||
123 | return 1; | 123 | return 1; |
124 | } | 124 | } |
125 | 125 | ||
126 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 126 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
127 | typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb); | 127 | typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb); |
128 | 128 | ||
129 | static mh_filter_t __rcu *mh_filter __read_mostly; | 129 | static mh_filter_t __rcu *mh_filter __read_mostly; |
@@ -184,7 +184,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) | |||
184 | filtered = icmpv6_filter(sk, skb); | 184 | filtered = icmpv6_filter(sk, skb); |
185 | break; | 185 | break; |
186 | 186 | ||
187 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 187 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
188 | case IPPROTO_MH: | 188 | case IPPROTO_MH: |
189 | { | 189 | { |
190 | /* XXX: To validate MH only once for each packet, | 190 | /* XXX: To validate MH only once for each packet, |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index da8a4e301b1b..e5253ec9e0fc 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -616,6 +616,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net) | |||
616 | table[0].data = &net->ipv6.frags.high_thresh; | 616 | table[0].data = &net->ipv6.frags.high_thresh; |
617 | table[1].data = &net->ipv6.frags.low_thresh; | 617 | table[1].data = &net->ipv6.frags.low_thresh; |
618 | table[2].data = &net->ipv6.frags.timeout; | 618 | table[2].data = &net->ipv6.frags.timeout; |
619 | |||
620 | /* Don't export sysctls to unprivileged users */ | ||
621 | if (net->user_ns != &init_user_ns) | ||
622 | table[0].procname = NULL; | ||
619 | } | 623 | } |
620 | 624 | ||
621 | hdr = register_net_sysctl(net, "net/ipv6", table); | 625 | hdr = register_net_sysctl(net, "net/ipv6", table); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b1e6cf0b95fd..8f124f575116 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <net/xfrm.h> | 57 | #include <net/xfrm.h> |
58 | #include <net/netevent.h> | 58 | #include <net/netevent.h> |
59 | #include <net/netlink.h> | 59 | #include <net/netlink.h> |
60 | #include <net/nexthop.h> | ||
60 | 61 | ||
61 | #include <asm/uaccess.h> | 62 | #include <asm/uaccess.h> |
62 | 63 | ||
@@ -289,6 +290,8 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net, | |||
289 | memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); | 290 | memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); |
290 | rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); | 291 | rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); |
291 | rt->rt6i_genid = rt_genid(net); | 292 | rt->rt6i_genid = rt_genid(net); |
293 | INIT_LIST_HEAD(&rt->rt6i_siblings); | ||
294 | rt->rt6i_nsiblings = 0; | ||
292 | } | 295 | } |
293 | return rt; | 296 | return rt; |
294 | } | 297 | } |
@@ -318,13 +321,6 @@ static void ip6_dst_destroy(struct dst_entry *dst) | |||
318 | } | 321 | } |
319 | } | 322 | } |
320 | 323 | ||
321 | static atomic_t __rt6_peer_genid = ATOMIC_INIT(0); | ||
322 | |||
323 | static u32 rt6_peer_genid(void) | ||
324 | { | ||
325 | return atomic_read(&__rt6_peer_genid); | ||
326 | } | ||
327 | |||
328 | void rt6_bind_peer(struct rt6_info *rt, int create) | 324 | void rt6_bind_peer(struct rt6_info *rt, int create) |
329 | { | 325 | { |
330 | struct inet_peer_base *base; | 326 | struct inet_peer_base *base; |
@@ -338,8 +334,6 @@ void rt6_bind_peer(struct rt6_info *rt, int create) | |||
338 | if (peer) { | 334 | if (peer) { |
339 | if (!rt6_set_peer(rt, peer)) | 335 | if (!rt6_set_peer(rt, peer)) |
340 | inet_putpeer(peer); | 336 | inet_putpeer(peer); |
341 | else | ||
342 | rt->rt6i_peer_genid = rt6_peer_genid(); | ||
343 | } | 337 | } |
344 | } | 338 | } |
345 | 339 | ||
@@ -385,6 +379,69 @@ static bool rt6_need_strict(const struct in6_addr *daddr) | |||
385 | (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); | 379 | (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); |
386 | } | 380 | } |
387 | 381 | ||
382 | /* Multipath route selection: | ||
383 | * Hash based function using packet header and flowlabel. | ||
384 | * Adapted from fib_info_hashfn() | ||
385 | */ | ||
386 | static int rt6_info_hash_nhsfn(unsigned int candidate_count, | ||
387 | const struct flowi6 *fl6) | ||
388 | { | ||
389 | unsigned int val = fl6->flowi6_proto; | ||
390 | |||
391 | val ^= (__force u32)fl6->daddr.s6_addr32[0]; | ||
392 | val ^= (__force u32)fl6->daddr.s6_addr32[1]; | ||
393 | val ^= (__force u32)fl6->daddr.s6_addr32[2]; | ||
394 | val ^= (__force u32)fl6->daddr.s6_addr32[3]; | ||
395 | |||
396 | val ^= (__force u32)fl6->saddr.s6_addr32[0]; | ||
397 | val ^= (__force u32)fl6->saddr.s6_addr32[1]; | ||
398 | val ^= (__force u32)fl6->saddr.s6_addr32[2]; | ||
399 | val ^= (__force u32)fl6->saddr.s6_addr32[3]; | ||
400 | |||
401 | /* Work only if this not encapsulated */ | ||
402 | switch (fl6->flowi6_proto) { | ||
403 | case IPPROTO_UDP: | ||
404 | case IPPROTO_TCP: | ||
405 | case IPPROTO_SCTP: | ||
406 | val ^= (__force u16)fl6->fl6_sport; | ||
407 | val ^= (__force u16)fl6->fl6_dport; | ||
408 | break; | ||
409 | |||
410 | case IPPROTO_ICMPV6: | ||
411 | val ^= (__force u16)fl6->fl6_icmp_type; | ||
412 | val ^= (__force u16)fl6->fl6_icmp_code; | ||
413 | break; | ||
414 | } | ||
415 | /* RFC6438 recommands to use flowlabel */ | ||
416 | val ^= (__force u32)fl6->flowlabel; | ||
417 | |||
418 | /* Perhaps, we need to tune, this function? */ | ||
419 | val = val ^ (val >> 7) ^ (val >> 12); | ||
420 | return val % candidate_count; | ||
421 | } | ||
422 | |||
423 | static struct rt6_info *rt6_multipath_select(struct rt6_info *match, | ||
424 | struct flowi6 *fl6) | ||
425 | { | ||
426 | struct rt6_info *sibling, *next_sibling; | ||
427 | int route_choosen; | ||
428 | |||
429 | route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6); | ||
430 | /* Don't change the route, if route_choosen == 0 | ||
431 | * (siblings does not include ourself) | ||
432 | */ | ||
433 | if (route_choosen) | ||
434 | list_for_each_entry_safe(sibling, next_sibling, | ||
435 | &match->rt6i_siblings, rt6i_siblings) { | ||
436 | route_choosen--; | ||
437 | if (route_choosen == 0) { | ||
438 | match = sibling; | ||
439 | break; | ||
440 | } | ||
441 | } | ||
442 | return match; | ||
443 | } | ||
444 | |||
388 | /* | 445 | /* |
389 | * Route lookup. Any table->tb6_lock is implied. | 446 | * Route lookup. Any table->tb6_lock is implied. |
390 | */ | 447 | */ |
@@ -666,7 +723,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, | |||
666 | else | 723 | else |
667 | rt6_set_expires(rt, jiffies + HZ * lifetime); | 724 | rt6_set_expires(rt, jiffies + HZ * lifetime); |
668 | 725 | ||
669 | dst_release(&rt->dst); | 726 | ip6_rt_put(rt); |
670 | } | 727 | } |
671 | return 0; | 728 | return 0; |
672 | } | 729 | } |
@@ -702,6 +759,8 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net, | |||
702 | restart: | 759 | restart: |
703 | rt = fn->leaf; | 760 | rt = fn->leaf; |
704 | rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); | 761 | rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); |
762 | if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) | ||
763 | rt = rt6_multipath_select(rt, fl6); | ||
705 | BACKTRACK(net, &fl6->saddr); | 764 | BACKTRACK(net, &fl6->saddr); |
706 | out: | 765 | out: |
707 | dst_use(&rt->dst, jiffies); | 766 | dst_use(&rt->dst, jiffies); |
@@ -863,7 +922,8 @@ restart_2: | |||
863 | 922 | ||
864 | restart: | 923 | restart: |
865 | rt = rt6_select(fn, oif, strict | reachable); | 924 | rt = rt6_select(fn, oif, strict | reachable); |
866 | 925 | if (rt->rt6i_nsiblings && oif == 0) | |
926 | rt = rt6_multipath_select(rt, fl6); | ||
867 | BACKTRACK(net, &fl6->saddr); | 927 | BACKTRACK(net, &fl6->saddr); |
868 | if (rt == net->ipv6.ip6_null_entry || | 928 | if (rt == net->ipv6.ip6_null_entry || |
869 | rt->rt6i_flags & RTF_CACHE) | 929 | rt->rt6i_flags & RTF_CACHE) |
@@ -879,7 +939,7 @@ restart: | |||
879 | else | 939 | else |
880 | goto out2; | 940 | goto out2; |
881 | 941 | ||
882 | dst_release(&rt->dst); | 942 | ip6_rt_put(rt); |
883 | rt = nrt ? : net->ipv6.ip6_null_entry; | 943 | rt = nrt ? : net->ipv6.ip6_null_entry; |
884 | 944 | ||
885 | dst_hold(&rt->dst); | 945 | dst_hold(&rt->dst); |
@@ -896,7 +956,7 @@ restart: | |||
896 | * Race condition! In the gap, when table->tb6_lock was | 956 | * Race condition! In the gap, when table->tb6_lock was |
897 | * released someone could insert this route. Relookup. | 957 | * released someone could insert this route. Relookup. |
898 | */ | 958 | */ |
899 | dst_release(&rt->dst); | 959 | ip6_rt_put(rt); |
900 | goto relookup; | 960 | goto relookup; |
901 | 961 | ||
902 | out: | 962 | out: |
@@ -1030,14 +1090,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) | |||
1030 | if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev))) | 1090 | if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev))) |
1031 | return NULL; | 1091 | return NULL; |
1032 | 1092 | ||
1033 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { | 1093 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) |
1034 | if (rt->rt6i_peer_genid != rt6_peer_genid()) { | ||
1035 | if (!rt6_has_peer(rt)) | ||
1036 | rt6_bind_peer(rt, 0); | ||
1037 | rt->rt6i_peer_genid = rt6_peer_genid(); | ||
1038 | } | ||
1039 | return dst; | 1094 | return dst; |
1040 | } | 1095 | |
1041 | return NULL; | 1096 | return NULL; |
1042 | } | 1097 | } |
1043 | 1098 | ||
@@ -1316,12 +1371,6 @@ out: | |||
1316 | return entries > rt_max_size; | 1371 | return entries > rt_max_size; |
1317 | } | 1372 | } |
1318 | 1373 | ||
1319 | /* Clean host part of a prefix. Not necessary in radix tree, | ||
1320 | but results in cleaner routing tables. | ||
1321 | |||
1322 | Remove it only when all the things will work! | ||
1323 | */ | ||
1324 | |||
1325 | int ip6_dst_hoplimit(struct dst_entry *dst) | 1374 | int ip6_dst_hoplimit(struct dst_entry *dst) |
1326 | { | 1375 | { |
1327 | int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); | 1376 | int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); |
@@ -1507,7 +1556,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1507 | goto out; | 1556 | goto out; |
1508 | if (dev) { | 1557 | if (dev) { |
1509 | if (dev != grt->dst.dev) { | 1558 | if (dev != grt->dst.dev) { |
1510 | dst_release(&grt->dst); | 1559 | ip6_rt_put(grt); |
1511 | goto out; | 1560 | goto out; |
1512 | } | 1561 | } |
1513 | } else { | 1562 | } else { |
@@ -1518,7 +1567,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1518 | } | 1567 | } |
1519 | if (!(grt->rt6i_flags & RTF_GATEWAY)) | 1568 | if (!(grt->rt6i_flags & RTF_GATEWAY)) |
1520 | err = 0; | 1569 | err = 0; |
1521 | dst_release(&grt->dst); | 1570 | ip6_rt_put(grt); |
1522 | 1571 | ||
1523 | if (err) | 1572 | if (err) |
1524 | goto out; | 1573 | goto out; |
@@ -1604,7 +1653,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) | |||
1604 | write_unlock_bh(&table->tb6_lock); | 1653 | write_unlock_bh(&table->tb6_lock); |
1605 | 1654 | ||
1606 | out: | 1655 | out: |
1607 | dst_release(&rt->dst); | 1656 | ip6_rt_put(rt); |
1608 | return err; | 1657 | return err; |
1609 | } | 1658 | } |
1610 | 1659 | ||
@@ -1987,7 +2036,7 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
1987 | switch(cmd) { | 2036 | switch(cmd) { |
1988 | case SIOCADDRT: /* Add a route */ | 2037 | case SIOCADDRT: /* Add a route */ |
1989 | case SIOCDELRT: /* Delete a route */ | 2038 | case SIOCDELRT: /* Delete a route */ |
1990 | if (!capable(CAP_NET_ADMIN)) | 2039 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1991 | return -EPERM; | 2040 | return -EPERM; |
1992 | err = copy_from_user(&rtmsg, arg, | 2041 | err = copy_from_user(&rtmsg, arg, |
1993 | sizeof(struct in6_rtmsg)); | 2042 | sizeof(struct in6_rtmsg)); |
@@ -2249,6 +2298,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { | |||
2249 | [RTA_IIF] = { .type = NLA_U32 }, | 2298 | [RTA_IIF] = { .type = NLA_U32 }, |
2250 | [RTA_PRIORITY] = { .type = NLA_U32 }, | 2299 | [RTA_PRIORITY] = { .type = NLA_U32 }, |
2251 | [RTA_METRICS] = { .type = NLA_NESTED }, | 2300 | [RTA_METRICS] = { .type = NLA_NESTED }, |
2301 | [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, | ||
2252 | }; | 2302 | }; |
2253 | 2303 | ||
2254 | static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, | 2304 | static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, |
@@ -2326,11 +2376,71 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
2326 | if (tb[RTA_TABLE]) | 2376 | if (tb[RTA_TABLE]) |
2327 | cfg->fc_table = nla_get_u32(tb[RTA_TABLE]); | 2377 | cfg->fc_table = nla_get_u32(tb[RTA_TABLE]); |
2328 | 2378 | ||
2379 | if (tb[RTA_MULTIPATH]) { | ||
2380 | cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); | ||
2381 | cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); | ||
2382 | } | ||
2383 | |||
2329 | err = 0; | 2384 | err = 0; |
2330 | errout: | 2385 | errout: |
2331 | return err; | 2386 | return err; |
2332 | } | 2387 | } |
2333 | 2388 | ||
2389 | static int ip6_route_multipath(struct fib6_config *cfg, int add) | ||
2390 | { | ||
2391 | struct fib6_config r_cfg; | ||
2392 | struct rtnexthop *rtnh; | ||
2393 | int remaining; | ||
2394 | int attrlen; | ||
2395 | int err = 0, last_err = 0; | ||
2396 | |||
2397 | beginning: | ||
2398 | rtnh = (struct rtnexthop *)cfg->fc_mp; | ||
2399 | remaining = cfg->fc_mp_len; | ||
2400 | |||
2401 | /* Parse a Multipath Entry */ | ||
2402 | while (rtnh_ok(rtnh, remaining)) { | ||
2403 | memcpy(&r_cfg, cfg, sizeof(*cfg)); | ||
2404 | if (rtnh->rtnh_ifindex) | ||
2405 | r_cfg.fc_ifindex = rtnh->rtnh_ifindex; | ||
2406 | |||
2407 | attrlen = rtnh_attrlen(rtnh); | ||
2408 | if (attrlen > 0) { | ||
2409 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); | ||
2410 | |||
2411 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); | ||
2412 | if (nla) { | ||
2413 | nla_memcpy(&r_cfg.fc_gateway, nla, 16); | ||
2414 | r_cfg.fc_flags |= RTF_GATEWAY; | ||
2415 | } | ||
2416 | } | ||
2417 | err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg); | ||
2418 | if (err) { | ||
2419 | last_err = err; | ||
2420 | /* If we are trying to remove a route, do not stop the | ||
2421 | * loop when ip6_route_del() fails (because next hop is | ||
2422 | * already gone), we should try to remove all next hops. | ||
2423 | */ | ||
2424 | if (add) { | ||
2425 | /* If add fails, we should try to delete all | ||
2426 | * next hops that have been already added. | ||
2427 | */ | ||
2428 | add = 0; | ||
2429 | goto beginning; | ||
2430 | } | ||
2431 | } | ||
2432 | /* Because each route is added like a single route we remove | ||
2433 | * this flag after the first nexthop (if there is a collision, | ||
2434 | * we have already fail to add the first nexthop: | ||
2435 | * fib6_add_rt2node() has reject it). | ||
2436 | */ | ||
2437 | cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL; | ||
2438 | rtnh = rtnh_next(rtnh, &remaining); | ||
2439 | } | ||
2440 | |||
2441 | return last_err; | ||
2442 | } | ||
2443 | |||
2334 | static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | 2444 | static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) |
2335 | { | 2445 | { |
2336 | struct fib6_config cfg; | 2446 | struct fib6_config cfg; |
@@ -2340,7 +2450,10 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *a | |||
2340 | if (err < 0) | 2450 | if (err < 0) |
2341 | return err; | 2451 | return err; |
2342 | 2452 | ||
2343 | return ip6_route_del(&cfg); | 2453 | if (cfg.fc_mp) |
2454 | return ip6_route_multipath(&cfg, 0); | ||
2455 | else | ||
2456 | return ip6_route_del(&cfg); | ||
2344 | } | 2457 | } |
2345 | 2458 | ||
2346 | static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | 2459 | static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) |
@@ -2352,7 +2465,10 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *a | |||
2352 | if (err < 0) | 2465 | if (err < 0) |
2353 | return err; | 2466 | return err; |
2354 | 2467 | ||
2355 | return ip6_route_add(&cfg); | 2468 | if (cfg.fc_mp) |
2469 | return ip6_route_multipath(&cfg, 1); | ||
2470 | else | ||
2471 | return ip6_route_add(&cfg); | ||
2356 | } | 2472 | } |
2357 | 2473 | ||
2358 | static inline size_t rt6_nlmsg_size(void) | 2474 | static inline size_t rt6_nlmsg_size(void) |
@@ -2596,7 +2712,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2596 | 2712 | ||
2597 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 2713 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
2598 | if (!skb) { | 2714 | if (!skb) { |
2599 | dst_release(&rt->dst); | 2715 | ip6_rt_put(rt); |
2600 | err = -ENOBUFS; | 2716 | err = -ENOBUFS; |
2601 | goto errout; | 2717 | goto errout; |
2602 | } | 2718 | } |
@@ -2873,6 +2989,10 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) | |||
2873 | table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; | 2989 | table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; |
2874 | table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; | 2990 | table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; |
2875 | table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; | 2991 | table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; |
2992 | |||
2993 | /* Don't export sysctls to unprivileged users */ | ||
2994 | if (net->user_ns != &init_user_ns) | ||
2995 | table[0].procname = NULL; | ||
2876 | } | 2996 | } |
2877 | 2997 | ||
2878 | return table; | 2998 | return table; |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 3ed54ffd8d50..cfba99b2c2a4 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -65,9 +65,14 @@ | |||
65 | #define HASH_SIZE 16 | 65 | #define HASH_SIZE 16 |
66 | #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) | 66 | #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) |
67 | 67 | ||
68 | static bool log_ecn_error = true; | ||
69 | module_param(log_ecn_error, bool, 0644); | ||
70 | MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); | ||
71 | |||
68 | static int ipip6_tunnel_init(struct net_device *dev); | 72 | static int ipip6_tunnel_init(struct net_device *dev); |
69 | static void ipip6_tunnel_setup(struct net_device *dev); | 73 | static void ipip6_tunnel_setup(struct net_device *dev); |
70 | static void ipip6_dev_free(struct net_device *dev); | 74 | static void ipip6_dev_free(struct net_device *dev); |
75 | static struct rtnl_link_ops sit_link_ops __read_mostly; | ||
71 | 76 | ||
72 | static int sit_net_id __read_mostly; | 77 | static int sit_net_id __read_mostly; |
73 | struct sit_net { | 78 | struct sit_net { |
@@ -80,22 +85,6 @@ struct sit_net { | |||
80 | struct net_device *fb_tunnel_dev; | 85 | struct net_device *fb_tunnel_dev; |
81 | }; | 86 | }; |
82 | 87 | ||
83 | /* | ||
84 | * Locking : hash tables are protected by RCU and RTNL | ||
85 | */ | ||
86 | |||
87 | #define for_each_ip_tunnel_rcu(start) \ | ||
88 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | ||
89 | |||
90 | /* often modified stats are per cpu, other are shared (netdev->stats) */ | ||
91 | struct pcpu_tstats { | ||
92 | u64 rx_packets; | ||
93 | u64 rx_bytes; | ||
94 | u64 tx_packets; | ||
95 | u64 tx_bytes; | ||
96 | struct u64_stats_sync syncp; | ||
97 | }; | ||
98 | |||
99 | static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev, | 88 | static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev, |
100 | struct rtnl_link_stats64 *tot) | 89 | struct rtnl_link_stats64 *tot) |
101 | { | 90 | { |
@@ -121,6 +110,7 @@ static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev, | |||
121 | } | 110 | } |
122 | 111 | ||
123 | tot->rx_errors = dev->stats.rx_errors; | 112 | tot->rx_errors = dev->stats.rx_errors; |
113 | tot->rx_frame_errors = dev->stats.rx_frame_errors; | ||
124 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; | 114 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
125 | tot->tx_carrier_errors = dev->stats.tx_carrier_errors; | 115 | tot->tx_carrier_errors = dev->stats.tx_carrier_errors; |
126 | tot->tx_dropped = dev->stats.tx_dropped; | 116 | tot->tx_dropped = dev->stats.tx_dropped; |
@@ -141,20 +131,20 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net, | |||
141 | struct ip_tunnel *t; | 131 | struct ip_tunnel *t; |
142 | struct sit_net *sitn = net_generic(net, sit_net_id); | 132 | struct sit_net *sitn = net_generic(net, sit_net_id); |
143 | 133 | ||
144 | for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) { | 134 | for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) { |
145 | if (local == t->parms.iph.saddr && | 135 | if (local == t->parms.iph.saddr && |
146 | remote == t->parms.iph.daddr && | 136 | remote == t->parms.iph.daddr && |
147 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 137 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
148 | (t->dev->flags & IFF_UP)) | 138 | (t->dev->flags & IFF_UP)) |
149 | return t; | 139 | return t; |
150 | } | 140 | } |
151 | for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) { | 141 | for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) { |
152 | if (remote == t->parms.iph.daddr && | 142 | if (remote == t->parms.iph.daddr && |
153 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 143 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
154 | (t->dev->flags & IFF_UP)) | 144 | (t->dev->flags & IFF_UP)) |
155 | return t; | 145 | return t; |
156 | } | 146 | } |
157 | for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) { | 147 | for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) { |
158 | if (local == t->parms.iph.saddr && | 148 | if (local == t->parms.iph.saddr && |
159 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 149 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
160 | (t->dev->flags & IFF_UP)) | 150 | (t->dev->flags & IFF_UP)) |
@@ -231,6 +221,37 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) | |||
231 | #endif | 221 | #endif |
232 | } | 222 | } |
233 | 223 | ||
224 | static int ipip6_tunnel_create(struct net_device *dev) | ||
225 | { | ||
226 | struct ip_tunnel *t = netdev_priv(dev); | ||
227 | struct net *net = dev_net(dev); | ||
228 | struct sit_net *sitn = net_generic(net, sit_net_id); | ||
229 | int err; | ||
230 | |||
231 | err = ipip6_tunnel_init(dev); | ||
232 | if (err < 0) | ||
233 | goto out; | ||
234 | ipip6_tunnel_clone_6rd(dev, sitn); | ||
235 | |||
236 | if ((__force u16)t->parms.i_flags & SIT_ISATAP) | ||
237 | dev->priv_flags |= IFF_ISATAP; | ||
238 | |||
239 | err = register_netdevice(dev); | ||
240 | if (err < 0) | ||
241 | goto out; | ||
242 | |||
243 | strcpy(t->parms.name, dev->name); | ||
244 | dev->rtnl_link_ops = &sit_link_ops; | ||
245 | |||
246 | dev_hold(dev); | ||
247 | |||
248 | ipip6_tunnel_link(sitn, t); | ||
249 | return 0; | ||
250 | |||
251 | out: | ||
252 | return err; | ||
253 | } | ||
254 | |||
234 | static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, | 255 | static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, |
235 | struct ip_tunnel_parm *parms, int create) | 256 | struct ip_tunnel_parm *parms, int create) |
236 | { | 257 | { |
@@ -271,21 +292,9 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, | |||
271 | nt = netdev_priv(dev); | 292 | nt = netdev_priv(dev); |
272 | 293 | ||
273 | nt->parms = *parms; | 294 | nt->parms = *parms; |
274 | if (ipip6_tunnel_init(dev) < 0) | 295 | if (ipip6_tunnel_create(dev) < 0) |
275 | goto failed_free; | 296 | goto failed_free; |
276 | ipip6_tunnel_clone_6rd(dev, sitn); | ||
277 | 297 | ||
278 | if (parms->i_flags & SIT_ISATAP) | ||
279 | dev->priv_flags |= IFF_ISATAP; | ||
280 | |||
281 | if (register_netdevice(dev) < 0) | ||
282 | goto failed_free; | ||
283 | |||
284 | strcpy(nt->parms.name, dev->name); | ||
285 | |||
286 | dev_hold(dev); | ||
287 | |||
288 | ipip6_tunnel_link(sitn, nt); | ||
289 | return nt; | 298 | return nt; |
290 | 299 | ||
291 | failed_free: | 300 | failed_free: |
@@ -581,16 +590,11 @@ out: | |||
581 | return err; | 590 | return err; |
582 | } | 591 | } |
583 | 592 | ||
584 | static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb) | ||
585 | { | ||
586 | if (INET_ECN_is_ce(iph->tos)) | ||
587 | IP6_ECN_set_ce(ipv6_hdr(skb)); | ||
588 | } | ||
589 | |||
590 | static int ipip6_rcv(struct sk_buff *skb) | 593 | static int ipip6_rcv(struct sk_buff *skb) |
591 | { | 594 | { |
592 | const struct iphdr *iph; | 595 | const struct iphdr *iph; |
593 | struct ip_tunnel *tunnel; | 596 | struct ip_tunnel *tunnel; |
597 | int err; | ||
594 | 598 | ||
595 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) | 599 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) |
596 | goto out; | 600 | goto out; |
@@ -612,18 +616,27 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
612 | if ((tunnel->dev->priv_flags & IFF_ISATAP) && | 616 | if ((tunnel->dev->priv_flags & IFF_ISATAP) && |
613 | !isatap_chksrc(skb, iph, tunnel)) { | 617 | !isatap_chksrc(skb, iph, tunnel)) { |
614 | tunnel->dev->stats.rx_errors++; | 618 | tunnel->dev->stats.rx_errors++; |
615 | kfree_skb(skb); | 619 | goto out; |
616 | return 0; | 620 | } |
621 | |||
622 | __skb_tunnel_rx(skb, tunnel->dev); | ||
623 | |||
624 | err = IP_ECN_decapsulate(iph, skb); | ||
625 | if (unlikely(err)) { | ||
626 | if (log_ecn_error) | ||
627 | net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", | ||
628 | &iph->saddr, iph->tos); | ||
629 | if (err > 1) { | ||
630 | ++tunnel->dev->stats.rx_frame_errors; | ||
631 | ++tunnel->dev->stats.rx_errors; | ||
632 | goto out; | ||
633 | } | ||
617 | } | 634 | } |
618 | 635 | ||
619 | tstats = this_cpu_ptr(tunnel->dev->tstats); | 636 | tstats = this_cpu_ptr(tunnel->dev->tstats); |
620 | tstats->rx_packets++; | 637 | tstats->rx_packets++; |
621 | tstats->rx_bytes += skb->len; | 638 | tstats->rx_bytes += skb->len; |
622 | 639 | ||
623 | __skb_tunnel_rx(skb, tunnel->dev); | ||
624 | |||
625 | ipip6_ecn_decapsulate(iph, skb); | ||
626 | |||
627 | netif_rx(skb); | 640 | netif_rx(skb); |
628 | 641 | ||
629 | return 0; | 642 | return 0; |
@@ -683,7 +696,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
683 | struct net_device *dev) | 696 | struct net_device *dev) |
684 | { | 697 | { |
685 | struct ip_tunnel *tunnel = netdev_priv(dev); | 698 | struct ip_tunnel *tunnel = netdev_priv(dev); |
686 | struct pcpu_tstats *tstats; | ||
687 | const struct iphdr *tiph = &tunnel->parms.iph; | 699 | const struct iphdr *tiph = &tunnel->parms.iph; |
688 | const struct ipv6hdr *iph6 = ipv6_hdr(skb); | 700 | const struct ipv6hdr *iph6 = ipv6_hdr(skb); |
689 | u8 tos = tunnel->parms.iph.tos; | 701 | u8 tos = tunnel->parms.iph.tos; |
@@ -864,9 +876,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
864 | if ((iph->ttl = tiph->ttl) == 0) | 876 | if ((iph->ttl = tiph->ttl) == 0) |
865 | iph->ttl = iph6->hop_limit; | 877 | iph->ttl = iph6->hop_limit; |
866 | 878 | ||
867 | nf_reset(skb); | 879 | iptunnel_xmit(skb, dev); |
868 | tstats = this_cpu_ptr(dev->tstats); | ||
869 | __IPTUNNEL_XMIT(tstats, &dev->stats); | ||
870 | return NETDEV_TX_OK; | 880 | return NETDEV_TX_OK; |
871 | 881 | ||
872 | tx_error_icmp: | 882 | tx_error_icmp: |
@@ -914,6 +924,59 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) | |||
914 | dev->iflink = tunnel->parms.link; | 924 | dev->iflink = tunnel->parms.link; |
915 | } | 925 | } |
916 | 926 | ||
927 | static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) | ||
928 | { | ||
929 | struct net *net = dev_net(t->dev); | ||
930 | struct sit_net *sitn = net_generic(net, sit_net_id); | ||
931 | |||
932 | ipip6_tunnel_unlink(sitn, t); | ||
933 | synchronize_net(); | ||
934 | t->parms.iph.saddr = p->iph.saddr; | ||
935 | t->parms.iph.daddr = p->iph.daddr; | ||
936 | memcpy(t->dev->dev_addr, &p->iph.saddr, 4); | ||
937 | memcpy(t->dev->broadcast, &p->iph.daddr, 4); | ||
938 | ipip6_tunnel_link(sitn, t); | ||
939 | t->parms.iph.ttl = p->iph.ttl; | ||
940 | t->parms.iph.tos = p->iph.tos; | ||
941 | if (t->parms.link != p->link) { | ||
942 | t->parms.link = p->link; | ||
943 | ipip6_tunnel_bind_dev(t->dev); | ||
944 | } | ||
945 | netdev_state_change(t->dev); | ||
946 | } | ||
947 | |||
948 | #ifdef CONFIG_IPV6_SIT_6RD | ||
949 | static int ipip6_tunnel_update_6rd(struct ip_tunnel *t, | ||
950 | struct ip_tunnel_6rd *ip6rd) | ||
951 | { | ||
952 | struct in6_addr prefix; | ||
953 | __be32 relay_prefix; | ||
954 | |||
955 | if (ip6rd->relay_prefixlen > 32 || | ||
956 | ip6rd->prefixlen + (32 - ip6rd->relay_prefixlen) > 64) | ||
957 | return -EINVAL; | ||
958 | |||
959 | ipv6_addr_prefix(&prefix, &ip6rd->prefix, ip6rd->prefixlen); | ||
960 | if (!ipv6_addr_equal(&prefix, &ip6rd->prefix)) | ||
961 | return -EINVAL; | ||
962 | if (ip6rd->relay_prefixlen) | ||
963 | relay_prefix = ip6rd->relay_prefix & | ||
964 | htonl(0xffffffffUL << | ||
965 | (32 - ip6rd->relay_prefixlen)); | ||
966 | else | ||
967 | relay_prefix = 0; | ||
968 | if (relay_prefix != ip6rd->relay_prefix) | ||
969 | return -EINVAL; | ||
970 | |||
971 | t->ip6rd.prefix = prefix; | ||
972 | t->ip6rd.relay_prefix = relay_prefix; | ||
973 | t->ip6rd.prefixlen = ip6rd->prefixlen; | ||
974 | t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen; | ||
975 | netdev_state_change(t->dev); | ||
976 | return 0; | ||
977 | } | ||
978 | #endif | ||
979 | |||
917 | static int | 980 | static int |
918 | ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | 981 | ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) |
919 | { | 982 | { |
@@ -966,7 +1029,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
966 | case SIOCADDTUNNEL: | 1029 | case SIOCADDTUNNEL: |
967 | case SIOCCHGTUNNEL: | 1030 | case SIOCCHGTUNNEL: |
968 | err = -EPERM; | 1031 | err = -EPERM; |
969 | if (!capable(CAP_NET_ADMIN)) | 1032 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
970 | goto done; | 1033 | goto done; |
971 | 1034 | ||
972 | err = -EFAULT; | 1035 | err = -EFAULT; |
@@ -995,28 +1058,13 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
995 | break; | 1058 | break; |
996 | } | 1059 | } |
997 | t = netdev_priv(dev); | 1060 | t = netdev_priv(dev); |
998 | ipip6_tunnel_unlink(sitn, t); | ||
999 | synchronize_net(); | ||
1000 | t->parms.iph.saddr = p.iph.saddr; | ||
1001 | t->parms.iph.daddr = p.iph.daddr; | ||
1002 | memcpy(dev->dev_addr, &p.iph.saddr, 4); | ||
1003 | memcpy(dev->broadcast, &p.iph.daddr, 4); | ||
1004 | ipip6_tunnel_link(sitn, t); | ||
1005 | netdev_state_change(dev); | ||
1006 | } | 1061 | } |
1062 | |||
1063 | ipip6_tunnel_update(t, &p); | ||
1007 | } | 1064 | } |
1008 | 1065 | ||
1009 | if (t) { | 1066 | if (t) { |
1010 | err = 0; | 1067 | err = 0; |
1011 | if (cmd == SIOCCHGTUNNEL) { | ||
1012 | t->parms.iph.ttl = p.iph.ttl; | ||
1013 | t->parms.iph.tos = p.iph.tos; | ||
1014 | if (t->parms.link != p.link) { | ||
1015 | t->parms.link = p.link; | ||
1016 | ipip6_tunnel_bind_dev(dev); | ||
1017 | netdev_state_change(dev); | ||
1018 | } | ||
1019 | } | ||
1020 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) | 1068 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) |
1021 | err = -EFAULT; | 1069 | err = -EFAULT; |
1022 | } else | 1070 | } else |
@@ -1025,7 +1073,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1025 | 1073 | ||
1026 | case SIOCDELTUNNEL: | 1074 | case SIOCDELTUNNEL: |
1027 | err = -EPERM; | 1075 | err = -EPERM; |
1028 | if (!capable(CAP_NET_ADMIN)) | 1076 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1029 | goto done; | 1077 | goto done; |
1030 | 1078 | ||
1031 | if (dev == sitn->fb_tunnel_dev) { | 1079 | if (dev == sitn->fb_tunnel_dev) { |
@@ -1058,7 +1106,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1058 | case SIOCDELPRL: | 1106 | case SIOCDELPRL: |
1059 | case SIOCCHGPRL: | 1107 | case SIOCCHGPRL: |
1060 | err = -EPERM; | 1108 | err = -EPERM; |
1061 | if (!capable(CAP_NET_ADMIN)) | 1109 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1062 | goto done; | 1110 | goto done; |
1063 | err = -EINVAL; | 1111 | err = -EINVAL; |
1064 | if (dev == sitn->fb_tunnel_dev) | 1112 | if (dev == sitn->fb_tunnel_dev) |
@@ -1087,7 +1135,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1087 | case SIOCCHG6RD: | 1135 | case SIOCCHG6RD: |
1088 | case SIOCDEL6RD: | 1136 | case SIOCDEL6RD: |
1089 | err = -EPERM; | 1137 | err = -EPERM; |
1090 | if (!capable(CAP_NET_ADMIN)) | 1138 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1091 | goto done; | 1139 | goto done; |
1092 | 1140 | ||
1093 | err = -EFAULT; | 1141 | err = -EFAULT; |
@@ -1098,31 +1146,9 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1098 | t = netdev_priv(dev); | 1146 | t = netdev_priv(dev); |
1099 | 1147 | ||
1100 | if (cmd != SIOCDEL6RD) { | 1148 | if (cmd != SIOCDEL6RD) { |
1101 | struct in6_addr prefix; | 1149 | err = ipip6_tunnel_update_6rd(t, &ip6rd); |
1102 | __be32 relay_prefix; | 1150 | if (err < 0) |
1103 | |||
1104 | err = -EINVAL; | ||
1105 | if (ip6rd.relay_prefixlen > 32 || | ||
1106 | ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64) | ||
1107 | goto done; | ||
1108 | |||
1109 | ipv6_addr_prefix(&prefix, &ip6rd.prefix, | ||
1110 | ip6rd.prefixlen); | ||
1111 | if (!ipv6_addr_equal(&prefix, &ip6rd.prefix)) | ||
1112 | goto done; | ||
1113 | if (ip6rd.relay_prefixlen) | ||
1114 | relay_prefix = ip6rd.relay_prefix & | ||
1115 | htonl(0xffffffffUL << | ||
1116 | (32 - ip6rd.relay_prefixlen)); | ||
1117 | else | ||
1118 | relay_prefix = 0; | ||
1119 | if (relay_prefix != ip6rd.relay_prefix) | ||
1120 | goto done; | 1151 | goto done; |
1121 | |||
1122 | t->ip6rd.prefix = prefix; | ||
1123 | t->ip6rd.relay_prefix = relay_prefix; | ||
1124 | t->ip6rd.prefixlen = ip6rd.prefixlen; | ||
1125 | t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen; | ||
1126 | } else | 1152 | } else |
1127 | ipip6_tunnel_clone_6rd(dev, sitn); | 1153 | ipip6_tunnel_clone_6rd(dev, sitn); |
1128 | 1154 | ||
@@ -1216,6 +1242,239 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) | |||
1216 | return 0; | 1242 | return 0; |
1217 | } | 1243 | } |
1218 | 1244 | ||
1245 | static void ipip6_netlink_parms(struct nlattr *data[], | ||
1246 | struct ip_tunnel_parm *parms) | ||
1247 | { | ||
1248 | memset(parms, 0, sizeof(*parms)); | ||
1249 | |||
1250 | parms->iph.version = 4; | ||
1251 | parms->iph.protocol = IPPROTO_IPV6; | ||
1252 | parms->iph.ihl = 5; | ||
1253 | parms->iph.ttl = 64; | ||
1254 | |||
1255 | if (!data) | ||
1256 | return; | ||
1257 | |||
1258 | if (data[IFLA_IPTUN_LINK]) | ||
1259 | parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); | ||
1260 | |||
1261 | if (data[IFLA_IPTUN_LOCAL]) | ||
1262 | parms->iph.saddr = nla_get_be32(data[IFLA_IPTUN_LOCAL]); | ||
1263 | |||
1264 | if (data[IFLA_IPTUN_REMOTE]) | ||
1265 | parms->iph.daddr = nla_get_be32(data[IFLA_IPTUN_REMOTE]); | ||
1266 | |||
1267 | if (data[IFLA_IPTUN_TTL]) { | ||
1268 | parms->iph.ttl = nla_get_u8(data[IFLA_IPTUN_TTL]); | ||
1269 | if (parms->iph.ttl) | ||
1270 | parms->iph.frag_off = htons(IP_DF); | ||
1271 | } | ||
1272 | |||
1273 | if (data[IFLA_IPTUN_TOS]) | ||
1274 | parms->iph.tos = nla_get_u8(data[IFLA_IPTUN_TOS]); | ||
1275 | |||
1276 | if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC])) | ||
1277 | parms->iph.frag_off = htons(IP_DF); | ||
1278 | |||
1279 | if (data[IFLA_IPTUN_FLAGS]) | ||
1280 | parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]); | ||
1281 | } | ||
1282 | |||
1283 | #ifdef CONFIG_IPV6_SIT_6RD | ||
1284 | /* This function returns true when 6RD attributes are present in the nl msg */ | ||
1285 | static bool ipip6_netlink_6rd_parms(struct nlattr *data[], | ||
1286 | struct ip_tunnel_6rd *ip6rd) | ||
1287 | { | ||
1288 | bool ret = false; | ||
1289 | memset(ip6rd, 0, sizeof(*ip6rd)); | ||
1290 | |||
1291 | if (!data) | ||
1292 | return ret; | ||
1293 | |||
1294 | if (data[IFLA_IPTUN_6RD_PREFIX]) { | ||
1295 | ret = true; | ||
1296 | nla_memcpy(&ip6rd->prefix, data[IFLA_IPTUN_6RD_PREFIX], | ||
1297 | sizeof(struct in6_addr)); | ||
1298 | } | ||
1299 | |||
1300 | if (data[IFLA_IPTUN_6RD_RELAY_PREFIX]) { | ||
1301 | ret = true; | ||
1302 | ip6rd->relay_prefix = | ||
1303 | nla_get_be32(data[IFLA_IPTUN_6RD_RELAY_PREFIX]); | ||
1304 | } | ||
1305 | |||
1306 | if (data[IFLA_IPTUN_6RD_PREFIXLEN]) { | ||
1307 | ret = true; | ||
1308 | ip6rd->prefixlen = nla_get_u16(data[IFLA_IPTUN_6RD_PREFIXLEN]); | ||
1309 | } | ||
1310 | |||
1311 | if (data[IFLA_IPTUN_6RD_RELAY_PREFIXLEN]) { | ||
1312 | ret = true; | ||
1313 | ip6rd->relay_prefixlen = | ||
1314 | nla_get_u16(data[IFLA_IPTUN_6RD_RELAY_PREFIXLEN]); | ||
1315 | } | ||
1316 | |||
1317 | return ret; | ||
1318 | } | ||
1319 | #endif | ||
1320 | |||
1321 | static int ipip6_newlink(struct net *src_net, struct net_device *dev, | ||
1322 | struct nlattr *tb[], struct nlattr *data[]) | ||
1323 | { | ||
1324 | struct net *net = dev_net(dev); | ||
1325 | struct ip_tunnel *nt; | ||
1326 | #ifdef CONFIG_IPV6_SIT_6RD | ||
1327 | struct ip_tunnel_6rd ip6rd; | ||
1328 | #endif | ||
1329 | int err; | ||
1330 | |||
1331 | nt = netdev_priv(dev); | ||
1332 | ipip6_netlink_parms(data, &nt->parms); | ||
1333 | |||
1334 | if (ipip6_tunnel_locate(net, &nt->parms, 0)) | ||
1335 | return -EEXIST; | ||
1336 | |||
1337 | err = ipip6_tunnel_create(dev); | ||
1338 | if (err < 0) | ||
1339 | return err; | ||
1340 | |||
1341 | #ifdef CONFIG_IPV6_SIT_6RD | ||
1342 | if (ipip6_netlink_6rd_parms(data, &ip6rd)) | ||
1343 | err = ipip6_tunnel_update_6rd(nt, &ip6rd); | ||
1344 | #endif | ||
1345 | |||
1346 | return err; | ||
1347 | } | ||
1348 | |||
1349 | static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[], | ||
1350 | struct nlattr *data[]) | ||
1351 | { | ||
1352 | struct ip_tunnel *t; | ||
1353 | struct ip_tunnel_parm p; | ||
1354 | struct net *net = dev_net(dev); | ||
1355 | struct sit_net *sitn = net_generic(net, sit_net_id); | ||
1356 | #ifdef CONFIG_IPV6_SIT_6RD | ||
1357 | struct ip_tunnel_6rd ip6rd; | ||
1358 | #endif | ||
1359 | |||
1360 | if (dev == sitn->fb_tunnel_dev) | ||
1361 | return -EINVAL; | ||
1362 | |||
1363 | ipip6_netlink_parms(data, &p); | ||
1364 | |||
1365 | if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) || | ||
1366 | (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr)) | ||
1367 | return -EINVAL; | ||
1368 | |||
1369 | t = ipip6_tunnel_locate(net, &p, 0); | ||
1370 | |||
1371 | if (t) { | ||
1372 | if (t->dev != dev) | ||
1373 | return -EEXIST; | ||
1374 | } else | ||
1375 | t = netdev_priv(dev); | ||
1376 | |||
1377 | ipip6_tunnel_update(t, &p); | ||
1378 | |||
1379 | #ifdef CONFIG_IPV6_SIT_6RD | ||
1380 | if (ipip6_netlink_6rd_parms(data, &ip6rd)) | ||
1381 | return ipip6_tunnel_update_6rd(t, &ip6rd); | ||
1382 | #endif | ||
1383 | |||
1384 | return 0; | ||
1385 | } | ||
1386 | |||
1387 | static size_t ipip6_get_size(const struct net_device *dev) | ||
1388 | { | ||
1389 | return | ||
1390 | /* IFLA_IPTUN_LINK */ | ||
1391 | nla_total_size(4) + | ||
1392 | /* IFLA_IPTUN_LOCAL */ | ||
1393 | nla_total_size(4) + | ||
1394 | /* IFLA_IPTUN_REMOTE */ | ||
1395 | nla_total_size(4) + | ||
1396 | /* IFLA_IPTUN_TTL */ | ||
1397 | nla_total_size(1) + | ||
1398 | /* IFLA_IPTUN_TOS */ | ||
1399 | nla_total_size(1) + | ||
1400 | /* IFLA_IPTUN_PMTUDISC */ | ||
1401 | nla_total_size(1) + | ||
1402 | /* IFLA_IPTUN_FLAGS */ | ||
1403 | nla_total_size(2) + | ||
1404 | #ifdef CONFIG_IPV6_SIT_6RD | ||
1405 | /* IFLA_IPTUN_6RD_PREFIX */ | ||
1406 | nla_total_size(sizeof(struct in6_addr)) + | ||
1407 | /* IFLA_IPTUN_6RD_RELAY_PREFIX */ | ||
1408 | nla_total_size(4) + | ||
1409 | /* IFLA_IPTUN_6RD_PREFIXLEN */ | ||
1410 | nla_total_size(2) + | ||
1411 | /* IFLA_IPTUN_6RD_RELAY_PREFIXLEN */ | ||
1412 | nla_total_size(2) + | ||
1413 | #endif | ||
1414 | 0; | ||
1415 | } | ||
1416 | |||
1417 | static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) | ||
1418 | { | ||
1419 | struct ip_tunnel *tunnel = netdev_priv(dev); | ||
1420 | struct ip_tunnel_parm *parm = &tunnel->parms; | ||
1421 | |||
1422 | if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || | ||
1423 | nla_put_be32(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) || | ||
1424 | nla_put_be32(skb, IFLA_IPTUN_REMOTE, parm->iph.daddr) || | ||
1425 | nla_put_u8(skb, IFLA_IPTUN_TTL, parm->iph.ttl) || | ||
1426 | nla_put_u8(skb, IFLA_IPTUN_TOS, parm->iph.tos) || | ||
1427 | nla_put_u8(skb, IFLA_IPTUN_PMTUDISC, | ||
1428 | !!(parm->iph.frag_off & htons(IP_DF))) || | ||
1429 | nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags)) | ||
1430 | goto nla_put_failure; | ||
1431 | |||
1432 | #ifdef CONFIG_IPV6_SIT_6RD | ||
1433 | if (nla_put(skb, IFLA_IPTUN_6RD_PREFIX, sizeof(struct in6_addr), | ||
1434 | &tunnel->ip6rd.prefix) || | ||
1435 | nla_put_be32(skb, IFLA_IPTUN_6RD_RELAY_PREFIX, | ||
1436 | tunnel->ip6rd.relay_prefix) || | ||
1437 | nla_put_u16(skb, IFLA_IPTUN_6RD_PREFIXLEN, | ||
1438 | tunnel->ip6rd.prefixlen) || | ||
1439 | nla_put_u16(skb, IFLA_IPTUN_6RD_RELAY_PREFIXLEN, | ||
1440 | tunnel->ip6rd.relay_prefixlen)) | ||
1441 | goto nla_put_failure; | ||
1442 | #endif | ||
1443 | |||
1444 | return 0; | ||
1445 | |||
1446 | nla_put_failure: | ||
1447 | return -EMSGSIZE; | ||
1448 | } | ||
1449 | |||
1450 | static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = { | ||
1451 | [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, | ||
1452 | [IFLA_IPTUN_LOCAL] = { .type = NLA_U32 }, | ||
1453 | [IFLA_IPTUN_REMOTE] = { .type = NLA_U32 }, | ||
1454 | [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, | ||
1455 | [IFLA_IPTUN_TOS] = { .type = NLA_U8 }, | ||
1456 | [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 }, | ||
1457 | [IFLA_IPTUN_FLAGS] = { .type = NLA_U16 }, | ||
1458 | #ifdef CONFIG_IPV6_SIT_6RD | ||
1459 | [IFLA_IPTUN_6RD_PREFIX] = { .len = sizeof(struct in6_addr) }, | ||
1460 | [IFLA_IPTUN_6RD_RELAY_PREFIX] = { .type = NLA_U32 }, | ||
1461 | [IFLA_IPTUN_6RD_PREFIXLEN] = { .type = NLA_U16 }, | ||
1462 | [IFLA_IPTUN_6RD_RELAY_PREFIXLEN] = { .type = NLA_U16 }, | ||
1463 | #endif | ||
1464 | }; | ||
1465 | |||
1466 | static struct rtnl_link_ops sit_link_ops __read_mostly = { | ||
1467 | .kind = "sit", | ||
1468 | .maxtype = IFLA_IPTUN_MAX, | ||
1469 | .policy = ipip6_policy, | ||
1470 | .priv_size = sizeof(struct ip_tunnel), | ||
1471 | .setup = ipip6_tunnel_setup, | ||
1472 | .newlink = ipip6_newlink, | ||
1473 | .changelink = ipip6_changelink, | ||
1474 | .get_size = ipip6_get_size, | ||
1475 | .fill_info = ipip6_fill_info, | ||
1476 | }; | ||
1477 | |||
1219 | static struct xfrm_tunnel sit_handler __read_mostly = { | 1478 | static struct xfrm_tunnel sit_handler __read_mostly = { |
1220 | .handler = ipip6_rcv, | 1479 | .handler = ipip6_rcv, |
1221 | .err_handler = ipip6_err, | 1480 | .err_handler = ipip6_err, |
@@ -1302,6 +1561,7 @@ static struct pernet_operations sit_net_ops = { | |||
1302 | 1561 | ||
1303 | static void __exit sit_cleanup(void) | 1562 | static void __exit sit_cleanup(void) |
1304 | { | 1563 | { |
1564 | rtnl_link_unregister(&sit_link_ops); | ||
1305 | xfrm4_tunnel_deregister(&sit_handler, AF_INET6); | 1565 | xfrm4_tunnel_deregister(&sit_handler, AF_INET6); |
1306 | 1566 | ||
1307 | unregister_pernet_device(&sit_net_ops); | 1567 | unregister_pernet_device(&sit_net_ops); |
@@ -1319,10 +1579,21 @@ static int __init sit_init(void) | |||
1319 | return err; | 1579 | return err; |
1320 | err = xfrm4_tunnel_register(&sit_handler, AF_INET6); | 1580 | err = xfrm4_tunnel_register(&sit_handler, AF_INET6); |
1321 | if (err < 0) { | 1581 | if (err < 0) { |
1322 | unregister_pernet_device(&sit_net_ops); | ||
1323 | pr_info("%s: can't add protocol\n", __func__); | 1582 | pr_info("%s: can't add protocol\n", __func__); |
1583 | goto xfrm_tunnel_failed; | ||
1324 | } | 1584 | } |
1585 | err = rtnl_link_register(&sit_link_ops); | ||
1586 | if (err < 0) | ||
1587 | goto rtnl_link_failed; | ||
1588 | |||
1589 | out: | ||
1325 | return err; | 1590 | return err; |
1591 | |||
1592 | rtnl_link_failed: | ||
1593 | xfrm4_tunnel_deregister(&sit_handler, AF_INET6); | ||
1594 | xfrm_tunnel_failed: | ||
1595 | unregister_pernet_device(&sit_net_ops); | ||
1596 | goto out; | ||
1326 | } | 1597 | } |
1327 | 1598 | ||
1328 | module_init(sit_init); | 1599 | module_init(sit_init); |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 182ab9a85d6c..40161977f7cf 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -214,7 +214,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
214 | ireq6->iif = inet6_iif(skb); | 214 | ireq6->iif = inet6_iif(skb); |
215 | 215 | ||
216 | req->expires = 0UL; | 216 | req->expires = 0UL; |
217 | req->retrans = 0; | 217 | req->num_retrans = 0; |
218 | ireq->ecn_ok = ecn_ok; | 218 | ireq->ecn_ok = ecn_ok; |
219 | ireq->snd_wscale = tcp_opt.snd_wscale; | 219 | ireq->snd_wscale = tcp_opt.snd_wscale; |
220 | ireq->sack_ok = tcp_opt.sack_ok; | 220 | ireq->sack_ok = tcp_opt.sack_ok; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 26175bffbaa0..6565cf55eb1e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -77,9 +77,6 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
77 | struct request_sock *req); | 77 | struct request_sock *req); |
78 | 78 | ||
79 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); | 79 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); |
80 | static void __tcp_v6_send_check(struct sk_buff *skb, | ||
81 | const struct in6_addr *saddr, | ||
82 | const struct in6_addr *daddr); | ||
83 | 80 | ||
84 | static const struct inet_connection_sock_af_ops ipv6_mapped; | 81 | static const struct inet_connection_sock_af_ops ipv6_mapped; |
85 | static const struct inet_connection_sock_af_ops ipv6_specific; | 82 | static const struct inet_connection_sock_af_ops ipv6_specific; |
@@ -119,14 +116,6 @@ static void tcp_v6_hash(struct sock *sk) | |||
119 | } | 116 | } |
120 | } | 117 | } |
121 | 118 | ||
122 | static __inline__ __sum16 tcp_v6_check(int len, | ||
123 | const struct in6_addr *saddr, | ||
124 | const struct in6_addr *daddr, | ||
125 | __wsum base) | ||
126 | { | ||
127 | return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); | ||
128 | } | ||
129 | |||
130 | static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) | 119 | static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) |
131 | { | 120 | { |
132 | return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, | 121 | return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, |
@@ -306,7 +295,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
306 | if (err) | 295 | if (err) |
307 | goto late_failure; | 296 | goto late_failure; |
308 | 297 | ||
309 | if (!tp->write_seq) | 298 | if (!tp->write_seq && likely(!tp->repair)) |
310 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, | 299 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, |
311 | np->daddr.s6_addr32, | 300 | np->daddr.s6_addr32, |
312 | inet->inet_sport, | 301 | inet->inet_sport, |
@@ -495,9 +484,12 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, | |||
495 | struct request_values *rvp) | 484 | struct request_values *rvp) |
496 | { | 485 | { |
497 | struct flowi6 fl6; | 486 | struct flowi6 fl6; |
487 | int res; | ||
498 | 488 | ||
499 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); | 489 | res = tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0); |
500 | return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0); | 490 | if (!res) |
491 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); | ||
492 | return res; | ||
501 | } | 493 | } |
502 | 494 | ||
503 | static void tcp_v6_reqsk_destructor(struct request_sock *req) | 495 | static void tcp_v6_reqsk_destructor(struct request_sock *req) |
@@ -719,94 +711,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | |||
719 | }; | 711 | }; |
720 | #endif | 712 | #endif |
721 | 713 | ||
722 | static void __tcp_v6_send_check(struct sk_buff *skb, | ||
723 | const struct in6_addr *saddr, const struct in6_addr *daddr) | ||
724 | { | ||
725 | struct tcphdr *th = tcp_hdr(skb); | ||
726 | |||
727 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
728 | th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0); | ||
729 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
730 | skb->csum_offset = offsetof(struct tcphdr, check); | ||
731 | } else { | ||
732 | th->check = tcp_v6_check(skb->len, saddr, daddr, | ||
733 | csum_partial(th, th->doff << 2, | ||
734 | skb->csum)); | ||
735 | } | ||
736 | } | ||
737 | |||
738 | static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) | ||
739 | { | ||
740 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
741 | |||
742 | __tcp_v6_send_check(skb, &np->saddr, &np->daddr); | ||
743 | } | ||
744 | |||
745 | static int tcp_v6_gso_send_check(struct sk_buff *skb) | ||
746 | { | ||
747 | const struct ipv6hdr *ipv6h; | ||
748 | struct tcphdr *th; | ||
749 | |||
750 | if (!pskb_may_pull(skb, sizeof(*th))) | ||
751 | return -EINVAL; | ||
752 | |||
753 | ipv6h = ipv6_hdr(skb); | ||
754 | th = tcp_hdr(skb); | ||
755 | |||
756 | th->check = 0; | ||
757 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
758 | __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr); | ||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, | ||
763 | struct sk_buff *skb) | ||
764 | { | ||
765 | const struct ipv6hdr *iph = skb_gro_network_header(skb); | ||
766 | __wsum wsum; | ||
767 | __sum16 sum; | ||
768 | |||
769 | switch (skb->ip_summed) { | ||
770 | case CHECKSUM_COMPLETE: | ||
771 | if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, | ||
772 | skb->csum)) { | ||
773 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
774 | break; | ||
775 | } | ||
776 | flush: | ||
777 | NAPI_GRO_CB(skb)->flush = 1; | ||
778 | return NULL; | ||
779 | |||
780 | case CHECKSUM_NONE: | ||
781 | wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr, | ||
782 | skb_gro_len(skb), | ||
783 | IPPROTO_TCP, 0)); | ||
784 | sum = csum_fold(skb_checksum(skb, | ||
785 | skb_gro_offset(skb), | ||
786 | skb_gro_len(skb), | ||
787 | wsum)); | ||
788 | if (sum) | ||
789 | goto flush; | ||
790 | |||
791 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
792 | break; | ||
793 | } | ||
794 | |||
795 | return tcp_gro_receive(head, skb); | ||
796 | } | ||
797 | |||
798 | static int tcp6_gro_complete(struct sk_buff *skb) | ||
799 | { | ||
800 | const struct ipv6hdr *iph = ipv6_hdr(skb); | ||
801 | struct tcphdr *th = tcp_hdr(skb); | ||
802 | |||
803 | th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), | ||
804 | &iph->saddr, &iph->daddr, 0); | ||
805 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | ||
806 | |||
807 | return tcp_gro_complete(skb); | ||
808 | } | ||
809 | |||
810 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | 714 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, |
811 | u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass) | 715 | u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass) |
812 | { | 716 | { |
@@ -1364,7 +1268,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1364 | 1268 | ||
1365 | tcp_initialize_rcv_mss(newsk); | 1269 | tcp_initialize_rcv_mss(newsk); |
1366 | tcp_synack_rtt_meas(newsk, req); | 1270 | tcp_synack_rtt_meas(newsk, req); |
1367 | newtp->total_retrans = req->retrans; | 1271 | newtp->total_retrans = req->num_retrans; |
1368 | 1272 | ||
1369 | newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; | 1273 | newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; |
1370 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; | 1274 | newinet->inet_rcv_saddr = LOOPBACK4_IPV6; |
@@ -1741,11 +1645,11 @@ static void tcp_v6_early_demux(struct sk_buff *skb) | |||
1741 | skb->destructor = sock_edemux; | 1645 | skb->destructor = sock_edemux; |
1742 | if (sk->sk_state != TCP_TIME_WAIT) { | 1646 | if (sk->sk_state != TCP_TIME_WAIT) { |
1743 | struct dst_entry *dst = sk->sk_rx_dst; | 1647 | struct dst_entry *dst = sk->sk_rx_dst; |
1744 | struct inet_sock *icsk = inet_sk(sk); | 1648 | |
1745 | if (dst) | 1649 | if (dst) |
1746 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); | 1650 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); |
1747 | if (dst && | 1651 | if (dst && |
1748 | icsk->rx_dst_ifindex == skb->skb_iif) | 1652 | inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) |
1749 | skb_dst_set_noref(skb, dst); | 1653 | skb_dst_set_noref(skb, dst); |
1750 | } | 1654 | } |
1751 | } | 1655 | } |
@@ -1866,7 +1770,7 @@ static void get_openreq6(struct seq_file *seq, | |||
1866 | 0,0, /* could print option size, but that is af dependent. */ | 1770 | 0,0, /* could print option size, but that is af dependent. */ |
1867 | 1, /* timers active (only the expire timer) */ | 1771 | 1, /* timers active (only the expire timer) */ |
1868 | jiffies_to_clock_t(ttd), | 1772 | jiffies_to_clock_t(ttd), |
1869 | req->retrans, | 1773 | req->num_timeout, |
1870 | from_kuid_munged(seq_user_ns(seq), uid), | 1774 | from_kuid_munged(seq_user_ns(seq), uid), |
1871 | 0, /* non standard timer */ | 1775 | 0, /* non standard timer */ |
1872 | 0, /* open_requests have no inode */ | 1776 | 0, /* open_requests have no inode */ |
@@ -2063,10 +1967,6 @@ static const struct inet6_protocol tcpv6_protocol = { | |||
2063 | .early_demux = tcp_v6_early_demux, | 1967 | .early_demux = tcp_v6_early_demux, |
2064 | .handler = tcp_v6_rcv, | 1968 | .handler = tcp_v6_rcv, |
2065 | .err_handler = tcp_v6_err, | 1969 | .err_handler = tcp_v6_err, |
2066 | .gso_send_check = tcp_v6_gso_send_check, | ||
2067 | .gso_segment = tcp_tso_segment, | ||
2068 | .gro_receive = tcp6_gro_receive, | ||
2069 | .gro_complete = tcp6_gro_complete, | ||
2070 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | 1970 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
2071 | }; | 1971 | }; |
2072 | 1972 | ||
@@ -2121,10 +2021,10 @@ int __init tcpv6_init(void) | |||
2121 | out: | 2021 | out: |
2122 | return ret; | 2022 | return ret; |
2123 | 2023 | ||
2124 | out_tcpv6_protocol: | ||
2125 | inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); | ||
2126 | out_tcpv6_protosw: | 2024 | out_tcpv6_protosw: |
2127 | inet6_unregister_protosw(&tcpv6_protosw); | 2025 | inet6_unregister_protosw(&tcpv6_protosw); |
2026 | out_tcpv6_protocol: | ||
2027 | inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); | ||
2128 | goto out; | 2028 | goto out; |
2129 | } | 2029 | } |
2130 | 2030 | ||
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c new file mode 100644 index 000000000000..2ec6bf6a0aa0 --- /dev/null +++ b/net/ipv6/tcpv6_offload.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * IPV6 GSO/GRO offload support | ||
3 | * Linux INET6 implementation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * TCPv6 GSO/GRO support | ||
11 | */ | ||
12 | #include <linux/skbuff.h> | ||
13 | #include <net/protocol.h> | ||
14 | #include <net/tcp.h> | ||
15 | #include <net/ip6_checksum.h> | ||
16 | #include "ip6_offload.h" | ||
17 | |||
18 | static int tcp_v6_gso_send_check(struct sk_buff *skb) | ||
19 | { | ||
20 | const struct ipv6hdr *ipv6h; | ||
21 | struct tcphdr *th; | ||
22 | |||
23 | if (!pskb_may_pull(skb, sizeof(*th))) | ||
24 | return -EINVAL; | ||
25 | |||
26 | ipv6h = ipv6_hdr(skb); | ||
27 | th = tcp_hdr(skb); | ||
28 | |||
29 | th->check = 0; | ||
30 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
31 | __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr); | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, | ||
36 | struct sk_buff *skb) | ||
37 | { | ||
38 | const struct ipv6hdr *iph = skb_gro_network_header(skb); | ||
39 | __wsum wsum; | ||
40 | __sum16 sum; | ||
41 | |||
42 | switch (skb->ip_summed) { | ||
43 | case CHECKSUM_COMPLETE: | ||
44 | if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, | ||
45 | skb->csum)) { | ||
46 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
47 | break; | ||
48 | } | ||
49 | flush: | ||
50 | NAPI_GRO_CB(skb)->flush = 1; | ||
51 | return NULL; | ||
52 | |||
53 | case CHECKSUM_NONE: | ||
54 | wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr, | ||
55 | skb_gro_len(skb), | ||
56 | IPPROTO_TCP, 0)); | ||
57 | sum = csum_fold(skb_checksum(skb, | ||
58 | skb_gro_offset(skb), | ||
59 | skb_gro_len(skb), | ||
60 | wsum)); | ||
61 | if (sum) | ||
62 | goto flush; | ||
63 | |||
64 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
65 | break; | ||
66 | } | ||
67 | |||
68 | return tcp_gro_receive(head, skb); | ||
69 | } | ||
70 | |||
71 | static int tcp6_gro_complete(struct sk_buff *skb) | ||
72 | { | ||
73 | const struct ipv6hdr *iph = ipv6_hdr(skb); | ||
74 | struct tcphdr *th = tcp_hdr(skb); | ||
75 | |||
76 | th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), | ||
77 | &iph->saddr, &iph->daddr, 0); | ||
78 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | ||
79 | |||
80 | return tcp_gro_complete(skb); | ||
81 | } | ||
82 | |||
83 | static const struct net_offload tcpv6_offload = { | ||
84 | .callbacks = { | ||
85 | .gso_send_check = tcp_v6_gso_send_check, | ||
86 | .gso_segment = tcp_tso_segment, | ||
87 | .gro_receive = tcp6_gro_receive, | ||
88 | .gro_complete = tcp6_gro_complete, | ||
89 | }, | ||
90 | }; | ||
91 | |||
92 | int __init tcpv6_offload_init(void) | ||
93 | { | ||
94 | return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP); | ||
95 | } | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index fc9997260a6b..dfaa29b8b293 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1343,103 +1343,9 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, | |||
1343 | } | 1343 | } |
1344 | #endif | 1344 | #endif |
1345 | 1345 | ||
1346 | static int udp6_ufo_send_check(struct sk_buff *skb) | ||
1347 | { | ||
1348 | const struct ipv6hdr *ipv6h; | ||
1349 | struct udphdr *uh; | ||
1350 | |||
1351 | if (!pskb_may_pull(skb, sizeof(*uh))) | ||
1352 | return -EINVAL; | ||
1353 | |||
1354 | ipv6h = ipv6_hdr(skb); | ||
1355 | uh = udp_hdr(skb); | ||
1356 | |||
1357 | uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, | ||
1358 | IPPROTO_UDP, 0); | ||
1359 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
1360 | skb->csum_offset = offsetof(struct udphdr, check); | ||
1361 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
1362 | return 0; | ||
1363 | } | ||
1364 | |||
1365 | static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, | ||
1366 | netdev_features_t features) | ||
1367 | { | ||
1368 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
1369 | unsigned int mss; | ||
1370 | unsigned int unfrag_ip6hlen, unfrag_len; | ||
1371 | struct frag_hdr *fptr; | ||
1372 | u8 *mac_start, *prevhdr; | ||
1373 | u8 nexthdr; | ||
1374 | u8 frag_hdr_sz = sizeof(struct frag_hdr); | ||
1375 | int offset; | ||
1376 | __wsum csum; | ||
1377 | |||
1378 | mss = skb_shinfo(skb)->gso_size; | ||
1379 | if (unlikely(skb->len <= mss)) | ||
1380 | goto out; | ||
1381 | |||
1382 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { | ||
1383 | /* Packet is from an untrusted source, reset gso_segs. */ | ||
1384 | int type = skb_shinfo(skb)->gso_type; | ||
1385 | |||
1386 | if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || | ||
1387 | !(type & (SKB_GSO_UDP)))) | ||
1388 | goto out; | ||
1389 | |||
1390 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); | ||
1391 | |||
1392 | segs = NULL; | ||
1393 | goto out; | ||
1394 | } | ||
1395 | |||
1396 | /* Do software UFO. Complete and fill in the UDP checksum as HW cannot | ||
1397 | * do checksum of UDP packets sent as multiple IP fragments. | ||
1398 | */ | ||
1399 | offset = skb_checksum_start_offset(skb); | ||
1400 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
1401 | offset += skb->csum_offset; | ||
1402 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
1403 | skb->ip_summed = CHECKSUM_NONE; | ||
1404 | |||
1405 | /* Check if there is enough headroom to insert fragment header. */ | ||
1406 | if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && | ||
1407 | pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) | ||
1408 | goto out; | ||
1409 | |||
1410 | /* Find the unfragmentable header and shift it left by frag_hdr_sz | ||
1411 | * bytes to insert fragment header. | ||
1412 | */ | ||
1413 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); | ||
1414 | nexthdr = *prevhdr; | ||
1415 | *prevhdr = NEXTHDR_FRAGMENT; | ||
1416 | unfrag_len = skb_network_header(skb) - skb_mac_header(skb) + | ||
1417 | unfrag_ip6hlen; | ||
1418 | mac_start = skb_mac_header(skb); | ||
1419 | memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len); | ||
1420 | |||
1421 | skb->mac_header -= frag_hdr_sz; | ||
1422 | skb->network_header -= frag_hdr_sz; | ||
1423 | |||
1424 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); | ||
1425 | fptr->nexthdr = nexthdr; | ||
1426 | fptr->reserved = 0; | ||
1427 | ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb)); | ||
1428 | |||
1429 | /* Fragment the skb. ipv6 header and the remaining fields of the | ||
1430 | * fragment header are updated in ipv6_gso_segment() | ||
1431 | */ | ||
1432 | segs = skb_segment(skb, features); | ||
1433 | |||
1434 | out: | ||
1435 | return segs; | ||
1436 | } | ||
1437 | |||
1438 | static const struct inet6_protocol udpv6_protocol = { | 1346 | static const struct inet6_protocol udpv6_protocol = { |
1439 | .handler = udpv6_rcv, | 1347 | .handler = udpv6_rcv, |
1440 | .err_handler = udpv6_err, | 1348 | .err_handler = udpv6_err, |
1441 | .gso_send_check = udp6_ufo_send_check, | ||
1442 | .gso_segment = udp6_ufo_fragment, | ||
1443 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | 1349 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
1444 | }; | 1350 | }; |
1445 | 1351 | ||
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c new file mode 100644 index 000000000000..0c8934a317c2 --- /dev/null +++ b/net/ipv6/udp_offload.c | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * IPV6 GSO/GRO offload support | ||
3 | * Linux INET6 implementation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * UDPv6 GSO support | ||
11 | */ | ||
12 | #include <linux/skbuff.h> | ||
13 | #include <net/protocol.h> | ||
14 | #include <net/ipv6.h> | ||
15 | #include <net/udp.h> | ||
16 | #include <net/ip6_checksum.h> | ||
17 | #include "ip6_offload.h" | ||
18 | |||
19 | static int udp6_ufo_send_check(struct sk_buff *skb) | ||
20 | { | ||
21 | const struct ipv6hdr *ipv6h; | ||
22 | struct udphdr *uh; | ||
23 | |||
24 | if (!pskb_may_pull(skb, sizeof(*uh))) | ||
25 | return -EINVAL; | ||
26 | |||
27 | ipv6h = ipv6_hdr(skb); | ||
28 | uh = udp_hdr(skb); | ||
29 | |||
30 | uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, | ||
31 | IPPROTO_UDP, 0); | ||
32 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
33 | skb->csum_offset = offsetof(struct udphdr, check); | ||
34 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, | ||
39 | netdev_features_t features) | ||
40 | { | ||
41 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
42 | unsigned int mss; | ||
43 | unsigned int unfrag_ip6hlen, unfrag_len; | ||
44 | struct frag_hdr *fptr; | ||
45 | u8 *mac_start, *prevhdr; | ||
46 | u8 nexthdr; | ||
47 | u8 frag_hdr_sz = sizeof(struct frag_hdr); | ||
48 | int offset; | ||
49 | __wsum csum; | ||
50 | |||
51 | mss = skb_shinfo(skb)->gso_size; | ||
52 | if (unlikely(skb->len <= mss)) | ||
53 | goto out; | ||
54 | |||
55 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { | ||
56 | /* Packet is from an untrusted source, reset gso_segs. */ | ||
57 | int type = skb_shinfo(skb)->gso_type; | ||
58 | |||
59 | if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || | ||
60 | !(type & (SKB_GSO_UDP)))) | ||
61 | goto out; | ||
62 | |||
63 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); | ||
64 | |||
65 | segs = NULL; | ||
66 | goto out; | ||
67 | } | ||
68 | |||
69 | /* Do software UFO. Complete and fill in the UDP checksum as HW cannot | ||
70 | * do checksum of UDP packets sent as multiple IP fragments. | ||
71 | */ | ||
72 | offset = skb_checksum_start_offset(skb); | ||
73 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
74 | offset += skb->csum_offset; | ||
75 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
76 | skb->ip_summed = CHECKSUM_NONE; | ||
77 | |||
78 | /* Check if there is enough headroom to insert fragment header. */ | ||
79 | if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && | ||
80 | pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) | ||
81 | goto out; | ||
82 | |||
83 | /* Find the unfragmentable header and shift it left by frag_hdr_sz | ||
84 | * bytes to insert fragment header. | ||
85 | */ | ||
86 | unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); | ||
87 | nexthdr = *prevhdr; | ||
88 | *prevhdr = NEXTHDR_FRAGMENT; | ||
89 | unfrag_len = skb_network_header(skb) - skb_mac_header(skb) + | ||
90 | unfrag_ip6hlen; | ||
91 | mac_start = skb_mac_header(skb); | ||
92 | memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len); | ||
93 | |||
94 | skb->mac_header -= frag_hdr_sz; | ||
95 | skb->network_header -= frag_hdr_sz; | ||
96 | |||
97 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); | ||
98 | fptr->nexthdr = nexthdr; | ||
99 | fptr->reserved = 0; | ||
100 | ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb)); | ||
101 | |||
102 | /* Fragment the skb. ipv6 header and the remaining fields of the | ||
103 | * fragment header are updated in ipv6_gso_segment() | ||
104 | */ | ||
105 | segs = skb_segment(skb, features); | ||
106 | |||
107 | out: | ||
108 | return segs; | ||
109 | } | ||
110 | static const struct net_offload udpv6_offload = { | ||
111 | .callbacks = { | ||
112 | .gso_send_check = udp6_ufo_send_check, | ||
113 | .gso_segment = udp6_ufo_fragment, | ||
114 | }, | ||
115 | }; | ||
116 | |||
117 | int __init udp_offload_init(void) | ||
118 | { | ||
119 | return inet6_add_offload(&udpv6_offload, IPPROTO_UDP); | ||
120 | } | ||
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index f8c4c08ffb60..c9844135c9ca 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <net/ip.h> | 20 | #include <net/ip.h> |
21 | #include <net/ipv6.h> | 21 | #include <net/ipv6.h> |
22 | #include <net/ip6_route.h> | 22 | #include <net/ip6_route.h> |
23 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 23 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
24 | #include <net/mip6.h> | 24 | #include <net/mip6.h> |
25 | #endif | 25 | #endif |
26 | 26 | ||
@@ -182,7 +182,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
182 | fl6->flowi6_proto = nexthdr; | 182 | fl6->flowi6_proto = nexthdr; |
183 | return; | 183 | return; |
184 | 184 | ||
185 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 185 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
186 | case IPPROTO_MH: | 186 | case IPPROTO_MH: |
187 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { | 187 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { |
188 | struct ip6_mh *mh; | 188 | struct ip6_mh *mh; |
@@ -327,21 +327,7 @@ static struct ctl_table_header *sysctl_hdr; | |||
327 | int __init xfrm6_init(void) | 327 | int __init xfrm6_init(void) |
328 | { | 328 | { |
329 | int ret; | 329 | int ret; |
330 | unsigned int gc_thresh; | 330 | |
331 | |||
332 | /* | ||
333 | * We need a good default value for the xfrm6 gc threshold. | ||
334 | * In ipv4 we set it to the route hash table size * 8, which | ||
335 | * is half the size of the maximaum route cache for ipv4. It | ||
336 | * would be good to do the same thing for v6, except the table is | ||
337 | * constructed differently here. Here each table for a net namespace | ||
338 | * can have FIB_TABLE_HASHSZ entries, so lets go with the same | ||
339 | * computation that we used for ipv4 here. Also, lets keep the initial | ||
340 | * gc_thresh to a minimum of 1024, since, the ipv6 route cache defaults | ||
341 | * to that as a minimum as well | ||
342 | */ | ||
343 | gc_thresh = FIB6_TABLE_HASHSZ * 8; | ||
344 | xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; | ||
345 | dst_entries_init(&xfrm6_dst_ops); | 331 | dst_entries_init(&xfrm6_dst_ops); |
346 | 332 | ||
347 | ret = xfrm6_policy_init(); | 333 | ret = xfrm6_policy_init(); |
@@ -370,7 +356,6 @@ void xfrm6_fini(void) | |||
370 | if (sysctl_hdr) | 356 | if (sysctl_hdr) |
371 | unregister_net_sysctl_table(sysctl_hdr); | 357 | unregister_net_sysctl_table(sysctl_hdr); |
372 | #endif | 358 | #endif |
373 | //xfrm6_input_fini(); | ||
374 | xfrm6_policy_fini(); | 359 | xfrm6_policy_fini(); |
375 | xfrm6_state_fini(); | 360 | xfrm6_state_fini(); |
376 | dst_entries_destroy(&xfrm6_dst_ops); | 361 | dst_entries_destroy(&xfrm6_dst_ops); |
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index 3f2f7c4ab721..d8c70b8efc24 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c | |||
@@ -101,7 +101,7 @@ static int __xfrm6_state_sort_cmp(void *p) | |||
101 | return 1; | 101 | return 1; |
102 | else | 102 | else |
103 | return 3; | 103 | return 3; |
104 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 104 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
105 | case XFRM_MODE_ROUTEOPTIMIZATION: | 105 | case XFRM_MODE_ROUTEOPTIMIZATION: |
106 | case XFRM_MODE_IN_TRIGGER: | 106 | case XFRM_MODE_IN_TRIGGER: |
107 | return 2; | 107 | return 2; |
@@ -134,7 +134,7 @@ static int __xfrm6_tmpl_sort_cmp(void *p) | |||
134 | switch (v->mode) { | 134 | switch (v->mode) { |
135 | case XFRM_MODE_TRANSPORT: | 135 | case XFRM_MODE_TRANSPORT: |
136 | return 1; | 136 | return 1; |
137 | #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) | 137 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
138 | case XFRM_MODE_ROUTEOPTIMIZATION: | 138 | case XFRM_MODE_ROUTEOPTIMIZATION: |
139 | case XFRM_MODE_IN_TRIGGER: | 139 | case XFRM_MODE_IN_TRIGGER: |
140 | return 2; | 140 | return 2; |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 08897a3c7ec7..5b426a646544 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -141,7 +141,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol, | |||
141 | struct sock *sk; | 141 | struct sock *sk; |
142 | int err; | 142 | int err; |
143 | 143 | ||
144 | if (!capable(CAP_NET_ADMIN)) | 144 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
145 | return -EPERM; | 145 | return -EPERM; |
146 | if (sock->type != SOCK_RAW) | 146 | if (sock->type != SOCK_RAW) |
147 | return -ESOCKTNOSUPPORT; | 147 | return -ESOCKTNOSUPPORT; |
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 6c4cc12c7414..bbba3a19e944 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
@@ -632,7 +632,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl | |||
632 | nla_put_u16(skb, L2TP_ATTR_MRU, session->mru))) | 632 | nla_put_u16(skb, L2TP_ATTR_MRU, session->mru))) |
633 | goto nla_put_failure; | 633 | goto nla_put_failure; |
634 | 634 | ||
635 | if ((session->ifname && session->ifname[0] && | 635 | if ((session->ifname[0] && |
636 | nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || | 636 | nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || |
637 | (session->cookie_len && | 637 | (session->cookie_len && |
638 | nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, | 638 | nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index c2190005a114..88709882c464 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -160,7 +160,7 @@ static int llc_ui_create(struct net *net, struct socket *sock, int protocol, | |||
160 | struct sock *sk; | 160 | struct sock *sk; |
161 | int rc = -ESOCKTNOSUPPORT; | 161 | int rc = -ESOCKTNOSUPPORT; |
162 | 162 | ||
163 | if (!capable(CAP_NET_RAW)) | 163 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) |
164 | return -EPERM; | 164 | return -EPERM; |
165 | 165 | ||
166 | if (!net_eq(net, &init_net)) | 166 | if (!net_eq(net, &init_net)) |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 63af25458fda..b4ecf267a34b 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -248,7 +248,7 @@ config MAC80211_MHWMP_DEBUG | |||
248 | Do not select this option. | 248 | Do not select this option. |
249 | 249 | ||
250 | config MAC80211_MESH_SYNC_DEBUG | 250 | config MAC80211_MESH_SYNC_DEBUG |
251 | bool "Verbose mesh mesh synchronization debugging" | 251 | bool "Verbose mesh synchronization debugging" |
252 | depends on MAC80211_DEBUG_MENU | 252 | depends on MAC80211_DEBUG_MENU |
253 | depends on MAC80211_MESH | 253 | depends on MAC80211_MESH |
254 | ---help--- | 254 | ---help--- |
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index a7dd110faafa..4911202334d9 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile | |||
@@ -8,6 +8,7 @@ mac80211-y := \ | |||
8 | wpa.o \ | 8 | wpa.o \ |
9 | scan.o offchannel.o \ | 9 | scan.o offchannel.o \ |
10 | ht.o agg-tx.o agg-rx.o \ | 10 | ht.o agg-tx.o agg-rx.o \ |
11 | vht.o \ | ||
11 | ibss.o \ | 12 | ibss.o \ |
12 | iface.o \ | 13 | iface.o \ |
13 | rate.o \ | 14 | rate.o \ |
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c index a04752e91023..537488cbf941 100644 --- a/net/mac80211/aes_cmac.c +++ b/net/mac80211/aes_cmac.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/crypto.h> | 12 | #include <linux/crypto.h> |
13 | #include <linux/export.h> | ||
13 | #include <linux/err.h> | 14 | #include <linux/err.h> |
14 | #include <crypto/aes.h> | 15 | #include <crypto/aes.h> |
15 | 16 | ||
@@ -126,3 +127,20 @@ void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm) | |||
126 | { | 127 | { |
127 | crypto_free_cipher(tfm); | 128 | crypto_free_cipher(tfm); |
128 | } | 129 | } |
130 | |||
131 | void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf, | ||
132 | u8 *k1, u8 *k2) | ||
133 | { | ||
134 | u8 l[AES_BLOCK_SIZE] = {}; | ||
135 | struct ieee80211_key *key = | ||
136 | container_of(keyconf, struct ieee80211_key, conf); | ||
137 | |||
138 | crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l); | ||
139 | |||
140 | memcpy(k1, l, AES_BLOCK_SIZE); | ||
141 | gf_mulx(k1); | ||
142 | |||
143 | memcpy(k2, k1, AES_BLOCK_SIZE); | ||
144 | gf_mulx(k2); | ||
145 | } | ||
146 | EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2); | ||
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 186d9919b043..808338a1bce5 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -118,7 +118,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, | |||
118 | return; | 118 | return; |
119 | } | 119 | } |
120 | 120 | ||
121 | for (i = 0; i < STA_TID_NUM; i++) | 121 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) |
122 | if (ba_rx_bitmap & BIT(i)) | 122 | if (ba_rx_bitmap & BIT(i)) |
123 | set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested); | 123 | set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested); |
124 | 124 | ||
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 3195a6307f50..4152ed1034b8 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -448,7 +448,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | |||
448 | if (WARN_ON(!local->ops->ampdu_action)) | 448 | if (WARN_ON(!local->ops->ampdu_action)) |
449 | return -EINVAL; | 449 | return -EINVAL; |
450 | 450 | ||
451 | if ((tid >= STA_TID_NUM) || | 451 | if ((tid >= IEEE80211_NUM_TIDS) || |
452 | !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) || | 452 | !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) || |
453 | (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) | 453 | (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) |
454 | return -EINVAL; | 454 | return -EINVAL; |
@@ -605,9 +605,9 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) | |||
605 | 605 | ||
606 | trace_api_start_tx_ba_cb(sdata, ra, tid); | 606 | trace_api_start_tx_ba_cb(sdata, ra, tid); |
607 | 607 | ||
608 | if (tid >= STA_TID_NUM) { | 608 | if (tid >= IEEE80211_NUM_TIDS) { |
609 | ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", | 609 | ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", |
610 | tid, STA_TID_NUM); | 610 | tid, IEEE80211_NUM_TIDS); |
611 | return; | 611 | return; |
612 | } | 612 | } |
613 | 613 | ||
@@ -687,7 +687,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
687 | if (!local->ops->ampdu_action) | 687 | if (!local->ops->ampdu_action) |
688 | return -EINVAL; | 688 | return -EINVAL; |
689 | 689 | ||
690 | if (tid >= STA_TID_NUM) | 690 | if (tid >= IEEE80211_NUM_TIDS) |
691 | return -EINVAL; | 691 | return -EINVAL; |
692 | 692 | ||
693 | spin_lock_bh(&sta->lock); | 693 | spin_lock_bh(&sta->lock); |
@@ -722,9 +722,9 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | |||
722 | 722 | ||
723 | trace_api_stop_tx_ba_cb(sdata, ra, tid); | 723 | trace_api_stop_tx_ba_cb(sdata, ra, tid); |
724 | 724 | ||
725 | if (tid >= STA_TID_NUM) { | 725 | if (tid >= IEEE80211_NUM_TIDS) { |
726 | ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", | 726 | ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", |
727 | tid, STA_TID_NUM); | 727 | tid, IEEE80211_NUM_TIDS); |
728 | return; | 728 | return; |
729 | } | 729 | } |
730 | 730 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 7371f676cf41..4965aa6424ec 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -370,29 +370,32 @@ static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy, | |||
370 | return 0; | 370 | return 0; |
371 | } | 371 | } |
372 | 372 | ||
373 | static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, int idx) | ||
374 | { | ||
375 | if (!(rate->flags & RATE_INFO_FLAGS_MCS)) { | ||
376 | struct ieee80211_supported_band *sband; | ||
377 | sband = sta->local->hw.wiphy->bands[ | ||
378 | sta->local->oper_channel->band]; | ||
379 | rate->legacy = sband->bitrates[idx].bitrate; | ||
380 | } else | ||
381 | rate->mcs = idx; | ||
382 | } | ||
383 | |||
384 | void sta_set_rate_info_tx(struct sta_info *sta, | 373 | void sta_set_rate_info_tx(struct sta_info *sta, |
385 | const struct ieee80211_tx_rate *rate, | 374 | const struct ieee80211_tx_rate *rate, |
386 | struct rate_info *rinfo) | 375 | struct rate_info *rinfo) |
387 | { | 376 | { |
388 | rinfo->flags = 0; | 377 | rinfo->flags = 0; |
389 | if (rate->flags & IEEE80211_TX_RC_MCS) | 378 | if (rate->flags & IEEE80211_TX_RC_MCS) { |
390 | rinfo->flags |= RATE_INFO_FLAGS_MCS; | 379 | rinfo->flags |= RATE_INFO_FLAGS_MCS; |
380 | rinfo->mcs = rate->idx; | ||
381 | } else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { | ||
382 | rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; | ||
383 | rinfo->mcs = ieee80211_rate_get_vht_mcs(rate); | ||
384 | rinfo->nss = ieee80211_rate_get_vht_nss(rate); | ||
385 | } else { | ||
386 | struct ieee80211_supported_band *sband; | ||
387 | sband = sta->local->hw.wiphy->bands[ | ||
388 | ieee80211_get_sdata_band(sta->sdata)]; | ||
389 | rinfo->legacy = sband->bitrates[rate->idx].bitrate; | ||
390 | } | ||
391 | if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | 391 | if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) |
392 | rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; | 392 | rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; |
393 | if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) | ||
394 | rinfo->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH; | ||
395 | if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) | ||
396 | rinfo->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH; | ||
393 | if (rate->flags & IEEE80211_TX_RC_SHORT_GI) | 397 | if (rate->flags & IEEE80211_TX_RC_SHORT_GI) |
394 | rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; | 398 | rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; |
395 | rate_idx_to_bitrate(rinfo, sta, rate->idx); | ||
396 | } | 399 | } |
397 | 400 | ||
398 | static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | 401 | static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) |
@@ -443,13 +446,32 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | |||
443 | sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate); | 446 | sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate); |
444 | 447 | ||
445 | sinfo->rxrate.flags = 0; | 448 | sinfo->rxrate.flags = 0; |
446 | if (sta->last_rx_rate_flag & RX_FLAG_HT) | 449 | if (sta->last_rx_rate_flag & RX_FLAG_HT) { |
447 | sinfo->rxrate.flags |= RATE_INFO_FLAGS_MCS; | 450 | sinfo->rxrate.flags |= RATE_INFO_FLAGS_MCS; |
451 | sinfo->rxrate.mcs = sta->last_rx_rate_idx; | ||
452 | } else if (sta->last_rx_rate_flag & RX_FLAG_VHT) { | ||
453 | sinfo->rxrate.flags |= RATE_INFO_FLAGS_VHT_MCS; | ||
454 | sinfo->rxrate.nss = sta->last_rx_rate_vht_nss; | ||
455 | sinfo->rxrate.mcs = sta->last_rx_rate_idx; | ||
456 | } else { | ||
457 | struct ieee80211_supported_band *sband; | ||
458 | |||
459 | sband = sta->local->hw.wiphy->bands[ | ||
460 | ieee80211_get_sdata_band(sta->sdata)]; | ||
461 | sinfo->rxrate.legacy = | ||
462 | sband->bitrates[sta->last_rx_rate_idx].bitrate; | ||
463 | } | ||
464 | |||
448 | if (sta->last_rx_rate_flag & RX_FLAG_40MHZ) | 465 | if (sta->last_rx_rate_flag & RX_FLAG_40MHZ) |
449 | sinfo->rxrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; | 466 | sinfo->rxrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; |
450 | if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI) | 467 | if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI) |
451 | sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI; | 468 | sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI; |
452 | rate_idx_to_bitrate(&sinfo->rxrate, sta, sta->last_rx_rate_idx); | 469 | if (sta->last_rx_rate_flag & RX_FLAG_80MHZ) |
470 | sinfo->rxrate.flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH; | ||
471 | if (sta->last_rx_rate_flag & RX_FLAG_80P80MHZ) | ||
472 | sinfo->rxrate.flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH; | ||
473 | if (sta->last_rx_rate_flag & RX_FLAG_160MHZ) | ||
474 | sinfo->rxrate.flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH; | ||
453 | 475 | ||
454 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | 476 | if (ieee80211_vif_is_mesh(&sdata->vif)) { |
455 | #ifdef CONFIG_MAC80211_MESH | 477 | #ifdef CONFIG_MAC80211_MESH |
@@ -532,6 +554,8 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, | |||
532 | u64 *data) | 554 | u64 *data) |
533 | { | 555 | { |
534 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 556 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
557 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
558 | struct ieee80211_channel *channel; | ||
535 | struct sta_info *sta; | 559 | struct sta_info *sta; |
536 | struct ieee80211_local *local = sdata->local; | 560 | struct ieee80211_local *local = sdata->local; |
537 | struct station_info sinfo; | 561 | struct station_info sinfo; |
@@ -607,19 +631,26 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy, | |||
607 | do_survey: | 631 | do_survey: |
608 | i = STA_STATS_LEN - STA_STATS_SURVEY_LEN; | 632 | i = STA_STATS_LEN - STA_STATS_SURVEY_LEN; |
609 | /* Get survey stats for current channel */ | 633 | /* Get survey stats for current channel */ |
610 | q = 0; | 634 | survey.filled = 0; |
611 | while (true) { | ||
612 | survey.filled = 0; | ||
613 | if (drv_get_survey(local, q, &survey) != 0) { | ||
614 | survey.filled = 0; | ||
615 | break; | ||
616 | } | ||
617 | 635 | ||
618 | if (survey.channel && | 636 | rcu_read_lock(); |
619 | (local->oper_channel->center_freq == | 637 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); |
620 | survey.channel->center_freq)) | 638 | if (chanctx_conf) |
621 | break; | 639 | channel = chanctx_conf->def.chan; |
622 | q++; | 640 | else |
641 | channel = NULL; | ||
642 | rcu_read_unlock(); | ||
643 | |||
644 | if (channel) { | ||
645 | q = 0; | ||
646 | do { | ||
647 | survey.filled = 0; | ||
648 | if (drv_get_survey(local, q, &survey) != 0) { | ||
649 | survey.filled = 0; | ||
650 | break; | ||
651 | } | ||
652 | q++; | ||
653 | } while (channel != survey.channel); | ||
623 | } | 654 | } |
624 | 655 | ||
625 | if (survey.filled) | 656 | if (survey.filled) |
@@ -724,47 +755,37 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, | |||
724 | return ret; | 755 | return ret; |
725 | } | 756 | } |
726 | 757 | ||
727 | static int ieee80211_set_channel(struct wiphy *wiphy, | 758 | static int ieee80211_set_monitor_channel(struct wiphy *wiphy, |
728 | struct net_device *netdev, | 759 | struct cfg80211_chan_def *chandef) |
729 | struct ieee80211_channel *chan, | ||
730 | enum nl80211_channel_type channel_type) | ||
731 | { | 760 | { |
732 | struct ieee80211_local *local = wiphy_priv(wiphy); | 761 | struct ieee80211_local *local = wiphy_priv(wiphy); |
733 | struct ieee80211_sub_if_data *sdata = NULL; | 762 | struct ieee80211_sub_if_data *sdata; |
734 | 763 | int ret = 0; | |
735 | if (netdev) | ||
736 | sdata = IEEE80211_DEV_TO_SUB_IF(netdev); | ||
737 | |||
738 | switch (ieee80211_get_channel_mode(local, NULL)) { | ||
739 | case CHAN_MODE_HOPPING: | ||
740 | return -EBUSY; | ||
741 | case CHAN_MODE_FIXED: | ||
742 | if (local->oper_channel != chan || | ||
743 | (!sdata && local->_oper_channel_type != channel_type)) | ||
744 | return -EBUSY; | ||
745 | if (!sdata && local->_oper_channel_type == channel_type) | ||
746 | return 0; | ||
747 | break; | ||
748 | case CHAN_MODE_UNDEFINED: | ||
749 | break; | ||
750 | } | ||
751 | |||
752 | if (!ieee80211_set_channel_type(local, sdata, channel_type)) | ||
753 | return -EBUSY; | ||
754 | 764 | ||
755 | local->oper_channel = chan; | 765 | if (cfg80211_chandef_identical(&local->monitor_chandef, chandef)) |
766 | return 0; | ||
756 | 767 | ||
757 | /* auto-detects changes */ | 768 | mutex_lock(&local->iflist_mtx); |
758 | ieee80211_hw_config(local, 0); | 769 | if (local->use_chanctx) { |
770 | sdata = rcu_dereference_protected( | ||
771 | local->monitor_sdata, | ||
772 | lockdep_is_held(&local->iflist_mtx)); | ||
773 | if (sdata) { | ||
774 | ieee80211_vif_release_channel(sdata); | ||
775 | ret = ieee80211_vif_use_channel(sdata, chandef, | ||
776 | IEEE80211_CHANCTX_EXCLUSIVE); | ||
777 | } | ||
778 | } else if (local->open_count == local->monitors) { | ||
779 | local->_oper_channel = chandef->chan; | ||
780 | local->_oper_channel_type = cfg80211_get_chandef_type(chandef); | ||
781 | ieee80211_hw_config(local, 0); | ||
782 | } | ||
759 | 783 | ||
760 | return 0; | 784 | if (ret == 0) |
761 | } | 785 | local->monitor_chandef = *chandef; |
786 | mutex_unlock(&local->iflist_mtx); | ||
762 | 787 | ||
763 | static int ieee80211_set_monitor_channel(struct wiphy *wiphy, | 788 | return ret; |
764 | struct ieee80211_channel *chan, | ||
765 | enum nl80211_channel_type channel_type) | ||
766 | { | ||
767 | return ieee80211_set_channel(wiphy, NULL, chan, channel_type); | ||
768 | } | 789 | } |
769 | 790 | ||
770 | static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, | 791 | static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, |
@@ -879,8 +900,12 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
879 | if (old) | 900 | if (old) |
880 | return -EALREADY; | 901 | return -EALREADY; |
881 | 902 | ||
882 | err = ieee80211_set_channel(wiphy, dev, params->channel, | 903 | /* TODO: make hostapd tell us what it wants */ |
883 | params->channel_type); | 904 | sdata->smps_mode = IEEE80211_SMPS_OFF; |
905 | sdata->needed_rx_chains = sdata->local->rx_chains; | ||
906 | |||
907 | err = ieee80211_vif_use_channel(sdata, ¶ms->chandef, | ||
908 | IEEE80211_CHANCTX_SHARED); | ||
884 | if (err) | 909 | if (err) |
885 | return err; | 910 | return err; |
886 | 911 | ||
@@ -912,6 +937,15 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
912 | return err; | 937 | return err; |
913 | changed |= err; | 938 | changed |= err; |
914 | 939 | ||
940 | err = drv_start_ap(sdata->local, sdata); | ||
941 | if (err) { | ||
942 | old = rtnl_dereference(sdata->u.ap.beacon); | ||
943 | if (old) | ||
944 | kfree_rcu(old, rcu_head); | ||
945 | RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); | ||
946 | return err; | ||
947 | } | ||
948 | |||
915 | ieee80211_bss_info_change_notify(sdata, changed); | 949 | ieee80211_bss_info_change_notify(sdata, changed); |
916 | 950 | ||
917 | netif_carrier_on(dev); | 951 | netif_carrier_on(dev); |
@@ -943,26 +977,40 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, | |||
943 | 977 | ||
944 | static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) | 978 | static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) |
945 | { | 979 | { |
946 | struct ieee80211_sub_if_data *sdata, *vlan; | 980 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
947 | struct beacon_data *old; | 981 | struct ieee80211_sub_if_data *vlan; |
948 | 982 | struct ieee80211_local *local = sdata->local; | |
949 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 983 | struct beacon_data *old_beacon; |
984 | struct probe_resp *old_probe_resp; | ||
950 | 985 | ||
951 | old = rtnl_dereference(sdata->u.ap.beacon); | 986 | old_beacon = rtnl_dereference(sdata->u.ap.beacon); |
952 | if (!old) | 987 | if (!old_beacon) |
953 | return -ENOENT; | 988 | return -ENOENT; |
989 | old_probe_resp = rtnl_dereference(sdata->u.ap.probe_resp); | ||
954 | 990 | ||
991 | /* turn off carrier for this interface and dependent VLANs */ | ||
955 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) | 992 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) |
956 | netif_carrier_off(vlan->dev); | 993 | netif_carrier_off(vlan->dev); |
957 | netif_carrier_off(dev); | 994 | netif_carrier_off(dev); |
958 | 995 | ||
996 | /* remove beacon and probe response */ | ||
959 | RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); | 997 | RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); |
998 | RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL); | ||
999 | kfree_rcu(old_beacon, rcu_head); | ||
1000 | if (old_probe_resp) | ||
1001 | kfree_rcu(old_probe_resp, rcu_head); | ||
960 | 1002 | ||
961 | kfree_rcu(old, rcu_head); | 1003 | sta_info_flush(local, sdata); |
962 | |||
963 | sta_info_flush(sdata->local, sdata); | ||
964 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); | 1004 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); |
965 | 1005 | ||
1006 | drv_stop_ap(sdata->local, sdata); | ||
1007 | |||
1008 | /* free all potentially still buffered bcast frames */ | ||
1009 | local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); | ||
1010 | skb_queue_purge(&sdata->u.ap.ps.bc_buf); | ||
1011 | |||
1012 | ieee80211_vif_release_channel(sdata); | ||
1013 | |||
966 | return 0; | 1014 | return 0; |
967 | } | 1015 | } |
968 | 1016 | ||
@@ -1019,9 +1067,10 @@ static int sta_apply_parameters(struct ieee80211_local *local, | |||
1019 | int i, j; | 1067 | int i, j; |
1020 | struct ieee80211_supported_band *sband; | 1068 | struct ieee80211_supported_band *sband; |
1021 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 1069 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
1070 | enum ieee80211_band band = ieee80211_get_sdata_band(sdata); | ||
1022 | u32 mask, set; | 1071 | u32 mask, set; |
1023 | 1072 | ||
1024 | sband = local->hw.wiphy->bands[local->oper_channel->band]; | 1073 | sband = local->hw.wiphy->bands[band]; |
1025 | 1074 | ||
1026 | mask = params->sta_flags_mask; | 1075 | mask = params->sta_flags_mask; |
1027 | set = params->sta_flags_set; | 1076 | set = params->sta_flags_set; |
@@ -1136,7 +1185,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, | |||
1136 | rates |= BIT(j); | 1185 | rates |= BIT(j); |
1137 | } | 1186 | } |
1138 | } | 1187 | } |
1139 | sta->sta.supp_rates[local->oper_channel->band] = rates; | 1188 | sta->sta.supp_rates[band] = rates; |
1140 | } | 1189 | } |
1141 | 1190 | ||
1142 | if (params->ht_capa) | 1191 | if (params->ht_capa) |
@@ -1144,6 +1193,11 @@ static int sta_apply_parameters(struct ieee80211_local *local, | |||
1144 | params->ht_capa, | 1193 | params->ht_capa, |
1145 | &sta->sta.ht_cap); | 1194 | &sta->sta.ht_cap); |
1146 | 1195 | ||
1196 | if (params->vht_capa) | ||
1197 | ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, | ||
1198 | params->vht_capa, | ||
1199 | &sta->sta.vht_cap); | ||
1200 | |||
1147 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | 1201 | if (ieee80211_vif_is_mesh(&sdata->vif)) { |
1148 | #ifdef CONFIG_MAC80211_MESH | 1202 | #ifdef CONFIG_MAC80211_MESH |
1149 | if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) | 1203 | if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) |
@@ -1664,8 +1718,12 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev, | |||
1664 | if (err) | 1718 | if (err) |
1665 | return err; | 1719 | return err; |
1666 | 1720 | ||
1667 | err = ieee80211_set_channel(wiphy, dev, setup->channel, | 1721 | /* can mesh use other SMPS modes? */ |
1668 | setup->channel_type); | 1722 | sdata->smps_mode = IEEE80211_SMPS_OFF; |
1723 | sdata->needed_rx_chains = sdata->local->rx_chains; | ||
1724 | |||
1725 | err = ieee80211_vif_use_channel(sdata, &setup->chandef, | ||
1726 | IEEE80211_CHANCTX_SHARED); | ||
1669 | if (err) | 1727 | if (err) |
1670 | return err; | 1728 | return err; |
1671 | 1729 | ||
@@ -1679,6 +1737,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) | |||
1679 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1737 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1680 | 1738 | ||
1681 | ieee80211_stop_mesh(sdata); | 1739 | ieee80211_stop_mesh(sdata); |
1740 | ieee80211_vif_release_channel(sdata); | ||
1682 | 1741 | ||
1683 | return 0; | 1742 | return 0; |
1684 | } | 1743 | } |
@@ -1688,10 +1747,14 @@ static int ieee80211_change_bss(struct wiphy *wiphy, | |||
1688 | struct net_device *dev, | 1747 | struct net_device *dev, |
1689 | struct bss_parameters *params) | 1748 | struct bss_parameters *params) |
1690 | { | 1749 | { |
1691 | struct ieee80211_sub_if_data *sdata; | 1750 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1751 | enum ieee80211_band band; | ||
1692 | u32 changed = 0; | 1752 | u32 changed = 0; |
1693 | 1753 | ||
1694 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1754 | if (!rtnl_dereference(sdata->u.ap.beacon)) |
1755 | return -ENOENT; | ||
1756 | |||
1757 | band = ieee80211_get_sdata_band(sdata); | ||
1695 | 1758 | ||
1696 | if (params->use_cts_prot >= 0) { | 1759 | if (params->use_cts_prot >= 0) { |
1697 | sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; | 1760 | sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; |
@@ -1704,7 +1767,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy, | |||
1704 | } | 1767 | } |
1705 | 1768 | ||
1706 | if (!sdata->vif.bss_conf.use_short_slot && | 1769 | if (!sdata->vif.bss_conf.use_short_slot && |
1707 | sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) { | 1770 | band == IEEE80211_BAND_5GHZ) { |
1708 | sdata->vif.bss_conf.use_short_slot = true; | 1771 | sdata->vif.bss_conf.use_short_slot = true; |
1709 | changed |= BSS_CHANGED_ERP_SLOT; | 1772 | changed |= BSS_CHANGED_ERP_SLOT; |
1710 | } | 1773 | } |
@@ -1718,9 +1781,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy, | |||
1718 | if (params->basic_rates) { | 1781 | if (params->basic_rates) { |
1719 | int i, j; | 1782 | int i, j; |
1720 | u32 rates = 0; | 1783 | u32 rates = 0; |
1721 | struct ieee80211_local *local = wiphy_priv(wiphy); | 1784 | struct ieee80211_supported_band *sband = wiphy->bands[band]; |
1722 | struct ieee80211_supported_band *sband = | ||
1723 | wiphy->bands[local->oper_channel->band]; | ||
1724 | 1785 | ||
1725 | for (i = 0; i < params->basic_rates_len; i++) { | 1786 | for (i = 0; i < params->basic_rates_len; i++) { |
1726 | int rate = (params->basic_rates[i] & 0x7f) * 5; | 1787 | int rate = (params->basic_rates[i] & 0x7f) * 5; |
@@ -1829,7 +1890,16 @@ static int ieee80211_scan(struct wiphy *wiphy, | |||
1829 | * beaconing hasn't been configured yet | 1890 | * beaconing hasn't been configured yet |
1830 | */ | 1891 | */ |
1831 | case NL80211_IFTYPE_AP: | 1892 | case NL80211_IFTYPE_AP: |
1832 | if (sdata->u.ap.beacon) | 1893 | /* |
1894 | * If the scan has been forced (and the driver supports | ||
1895 | * forcing), don't care about being beaconing already. | ||
1896 | * This will create problems to the attached stations (e.g. all | ||
1897 | * the frames sent while scanning on other channel will be | ||
1898 | * lost) | ||
1899 | */ | ||
1900 | if (sdata->u.ap.beacon && | ||
1901 | (!(wiphy->features & NL80211_FEATURE_AP_SCAN) || | ||
1902 | !(req->flags & NL80211_SCAN_FLAG_AP))) | ||
1833 | return -EOPNOTSUPP; | 1903 | return -EOPNOTSUPP; |
1834 | break; | 1904 | break; |
1835 | default: | 1905 | default: |
@@ -1872,20 +1942,6 @@ static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, | |||
1872 | static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, | 1942 | static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, |
1873 | struct cfg80211_assoc_request *req) | 1943 | struct cfg80211_assoc_request *req) |
1874 | { | 1944 | { |
1875 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
1876 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1877 | |||
1878 | switch (ieee80211_get_channel_mode(local, sdata)) { | ||
1879 | case CHAN_MODE_HOPPING: | ||
1880 | return -EBUSY; | ||
1881 | case CHAN_MODE_FIXED: | ||
1882 | if (local->oper_channel == req->bss->channel) | ||
1883 | break; | ||
1884 | return -EBUSY; | ||
1885 | case CHAN_MODE_UNDEFINED: | ||
1886 | break; | ||
1887 | } | ||
1888 | |||
1889 | return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); | 1945 | return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); |
1890 | } | 1946 | } |
1891 | 1947 | ||
@@ -1904,30 +1960,22 @@ static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, | |||
1904 | static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, | 1960 | static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, |
1905 | struct cfg80211_ibss_params *params) | 1961 | struct cfg80211_ibss_params *params) |
1906 | { | 1962 | { |
1907 | struct ieee80211_local *local = wiphy_priv(wiphy); | 1963 | return ieee80211_ibss_join(IEEE80211_DEV_TO_SUB_IF(dev), params); |
1908 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
1909 | |||
1910 | switch (ieee80211_get_channel_mode(local, sdata)) { | ||
1911 | case CHAN_MODE_HOPPING: | ||
1912 | return -EBUSY; | ||
1913 | case CHAN_MODE_FIXED: | ||
1914 | if (!params->channel_fixed) | ||
1915 | return -EBUSY; | ||
1916 | if (local->oper_channel == params->channel) | ||
1917 | break; | ||
1918 | return -EBUSY; | ||
1919 | case CHAN_MODE_UNDEFINED: | ||
1920 | break; | ||
1921 | } | ||
1922 | |||
1923 | return ieee80211_ibss_join(sdata, params); | ||
1924 | } | 1964 | } |
1925 | 1965 | ||
1926 | static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) | 1966 | static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) |
1927 | { | 1967 | { |
1968 | return ieee80211_ibss_leave(IEEE80211_DEV_TO_SUB_IF(dev)); | ||
1969 | } | ||
1970 | |||
1971 | static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev, | ||
1972 | int rate[IEEE80211_NUM_BANDS]) | ||
1973 | { | ||
1928 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1974 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1929 | 1975 | ||
1930 | return ieee80211_ibss_leave(sdata); | 1976 | memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate)); |
1977 | |||
1978 | return 0; | ||
1931 | } | 1979 | } |
1932 | 1980 | ||
1933 | static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) | 1981 | static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) |
@@ -1956,10 +2004,16 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) | |||
1956 | return err; | 2004 | return err; |
1957 | } | 2005 | } |
1958 | 2006 | ||
1959 | if (changed & WIPHY_PARAM_RETRY_SHORT) | 2007 | if (changed & WIPHY_PARAM_RETRY_SHORT) { |
2008 | if (wiphy->retry_short > IEEE80211_MAX_TX_RETRY) | ||
2009 | return -EINVAL; | ||
1960 | local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; | 2010 | local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; |
1961 | if (changed & WIPHY_PARAM_RETRY_LONG) | 2011 | } |
2012 | if (changed & WIPHY_PARAM_RETRY_LONG) { | ||
2013 | if (wiphy->retry_long > IEEE80211_MAX_TX_RETRY) | ||
2014 | return -EINVAL; | ||
1962 | local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; | 2015 | local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; |
2016 | } | ||
1963 | if (changed & | 2017 | if (changed & |
1964 | (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG)) | 2018 | (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG)) |
1965 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS); | 2019 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS); |
@@ -1968,41 +2022,65 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) | |||
1968 | } | 2022 | } |
1969 | 2023 | ||
1970 | static int ieee80211_set_tx_power(struct wiphy *wiphy, | 2024 | static int ieee80211_set_tx_power(struct wiphy *wiphy, |
2025 | struct wireless_dev *wdev, | ||
1971 | enum nl80211_tx_power_setting type, int mbm) | 2026 | enum nl80211_tx_power_setting type, int mbm) |
1972 | { | 2027 | { |
1973 | struct ieee80211_local *local = wiphy_priv(wiphy); | 2028 | struct ieee80211_local *local = wiphy_priv(wiphy); |
1974 | struct ieee80211_channel *chan = local->oper_channel; | 2029 | struct ieee80211_sub_if_data *sdata; |
1975 | u32 changes = 0; | 2030 | |
2031 | if (wdev) { | ||
2032 | sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); | ||
2033 | |||
2034 | switch (type) { | ||
2035 | case NL80211_TX_POWER_AUTOMATIC: | ||
2036 | sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL; | ||
2037 | break; | ||
2038 | case NL80211_TX_POWER_LIMITED: | ||
2039 | case NL80211_TX_POWER_FIXED: | ||
2040 | if (mbm < 0 || (mbm % 100)) | ||
2041 | return -EOPNOTSUPP; | ||
2042 | sdata->user_power_level = MBM_TO_DBM(mbm); | ||
2043 | break; | ||
2044 | } | ||
2045 | |||
2046 | ieee80211_recalc_txpower(sdata); | ||
2047 | |||
2048 | return 0; | ||
2049 | } | ||
1976 | 2050 | ||
1977 | switch (type) { | 2051 | switch (type) { |
1978 | case NL80211_TX_POWER_AUTOMATIC: | 2052 | case NL80211_TX_POWER_AUTOMATIC: |
1979 | local->user_power_level = -1; | 2053 | local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; |
1980 | break; | 2054 | break; |
1981 | case NL80211_TX_POWER_LIMITED: | 2055 | case NL80211_TX_POWER_LIMITED: |
1982 | if (mbm < 0 || (mbm % 100)) | ||
1983 | return -EOPNOTSUPP; | ||
1984 | local->user_power_level = MBM_TO_DBM(mbm); | ||
1985 | break; | ||
1986 | case NL80211_TX_POWER_FIXED: | 2056 | case NL80211_TX_POWER_FIXED: |
1987 | if (mbm < 0 || (mbm % 100)) | 2057 | if (mbm < 0 || (mbm % 100)) |
1988 | return -EOPNOTSUPP; | 2058 | return -EOPNOTSUPP; |
1989 | /* TODO: move to cfg80211 when it knows the channel */ | ||
1990 | if (MBM_TO_DBM(mbm) > chan->max_power) | ||
1991 | return -EINVAL; | ||
1992 | local->user_power_level = MBM_TO_DBM(mbm); | 2059 | local->user_power_level = MBM_TO_DBM(mbm); |
1993 | break; | 2060 | break; |
1994 | } | 2061 | } |
1995 | 2062 | ||
1996 | ieee80211_hw_config(local, changes); | 2063 | mutex_lock(&local->iflist_mtx); |
2064 | list_for_each_entry(sdata, &local->interfaces, list) | ||
2065 | sdata->user_power_level = local->user_power_level; | ||
2066 | list_for_each_entry(sdata, &local->interfaces, list) | ||
2067 | ieee80211_recalc_txpower(sdata); | ||
2068 | mutex_unlock(&local->iflist_mtx); | ||
1997 | 2069 | ||
1998 | return 0; | 2070 | return 0; |
1999 | } | 2071 | } |
2000 | 2072 | ||
2001 | static int ieee80211_get_tx_power(struct wiphy *wiphy, int *dbm) | 2073 | static int ieee80211_get_tx_power(struct wiphy *wiphy, |
2074 | struct wireless_dev *wdev, | ||
2075 | int *dbm) | ||
2002 | { | 2076 | { |
2003 | struct ieee80211_local *local = wiphy_priv(wiphy); | 2077 | struct ieee80211_local *local = wiphy_priv(wiphy); |
2078 | struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); | ||
2004 | 2079 | ||
2005 | *dbm = local->hw.conf.power_level; | 2080 | if (!local->use_chanctx) |
2081 | *dbm = local->hw.conf.power_level; | ||
2082 | else | ||
2083 | *dbm = sdata->vif.bss_conf.txpower; | ||
2006 | 2084 | ||
2007 | return 0; | 2085 | return 0; |
2008 | } | 2086 | } |
@@ -2067,13 +2145,12 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, | |||
2067 | 2145 | ||
2068 | /* | 2146 | /* |
2069 | * If not associated, or current association is not an HT | 2147 | * If not associated, or current association is not an HT |
2070 | * association, there's no need to send an action frame. | 2148 | * association, there's no need to do anything, just store |
2149 | * the new value until we associate. | ||
2071 | */ | 2150 | */ |
2072 | if (!sdata->u.mgd.associated || | 2151 | if (!sdata->u.mgd.associated || |
2073 | sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) { | 2152 | sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) |
2074 | ieee80211_recalc_smps(sdata->local); | ||
2075 | return 0; | 2153 | return 0; |
2076 | } | ||
2077 | 2154 | ||
2078 | ap = sdata->u.mgd.associated->bssid; | 2155 | ap = sdata->u.mgd.associated->bssid; |
2079 | 2156 | ||
@@ -2179,7 +2256,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, | |||
2179 | static int ieee80211_start_roc_work(struct ieee80211_local *local, | 2256 | static int ieee80211_start_roc_work(struct ieee80211_local *local, |
2180 | struct ieee80211_sub_if_data *sdata, | 2257 | struct ieee80211_sub_if_data *sdata, |
2181 | struct ieee80211_channel *channel, | 2258 | struct ieee80211_channel *channel, |
2182 | enum nl80211_channel_type channel_type, | ||
2183 | unsigned int duration, u64 *cookie, | 2259 | unsigned int duration, u64 *cookie, |
2184 | struct sk_buff *txskb) | 2260 | struct sk_buff *txskb) |
2185 | { | 2261 | { |
@@ -2189,12 +2265,14 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, | |||
2189 | 2265 | ||
2190 | lockdep_assert_held(&local->mtx); | 2266 | lockdep_assert_held(&local->mtx); |
2191 | 2267 | ||
2268 | if (local->use_chanctx && !local->ops->remain_on_channel) | ||
2269 | return -EOPNOTSUPP; | ||
2270 | |||
2192 | roc = kzalloc(sizeof(*roc), GFP_KERNEL); | 2271 | roc = kzalloc(sizeof(*roc), GFP_KERNEL); |
2193 | if (!roc) | 2272 | if (!roc) |
2194 | return -ENOMEM; | 2273 | return -ENOMEM; |
2195 | 2274 | ||
2196 | roc->chan = channel; | 2275 | roc->chan = channel; |
2197 | roc->chan_type = channel_type; | ||
2198 | roc->duration = duration; | 2276 | roc->duration = duration; |
2199 | roc->req_duration = duration; | 2277 | roc->req_duration = duration; |
2200 | roc->frame = txskb; | 2278 | roc->frame = txskb; |
@@ -2227,7 +2305,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, | |||
2227 | if (!duration) | 2305 | if (!duration) |
2228 | duration = 10; | 2306 | duration = 10; |
2229 | 2307 | ||
2230 | ret = drv_remain_on_channel(local, channel, channel_type, duration); | 2308 | ret = drv_remain_on_channel(local, sdata, channel, duration); |
2231 | if (ret) { | 2309 | if (ret) { |
2232 | kfree(roc); | 2310 | kfree(roc); |
2233 | return ret; | 2311 | return ret; |
@@ -2238,7 +2316,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, | |||
2238 | 2316 | ||
2239 | out_check_combine: | 2317 | out_check_combine: |
2240 | list_for_each_entry(tmp, &local->roc_list, list) { | 2318 | list_for_each_entry(tmp, &local->roc_list, list) { |
2241 | if (tmp->chan != channel || tmp->chan_type != channel_type) | 2319 | if (tmp->chan != channel || tmp->sdata != sdata) |
2242 | continue; | 2320 | continue; |
2243 | 2321 | ||
2244 | /* | 2322 | /* |
@@ -2332,13 +2410,22 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, | |||
2332 | list_add_tail(&roc->list, &local->roc_list); | 2410 | list_add_tail(&roc->list, &local->roc_list); |
2333 | 2411 | ||
2334 | /* | 2412 | /* |
2335 | * cookie is either the roc (for normal roc) | 2413 | * cookie is either the roc cookie (for normal roc) |
2336 | * or the SKB (for mgmt TX) | 2414 | * or the SKB (for mgmt TX) |
2337 | */ | 2415 | */ |
2338 | if (txskb) | 2416 | if (!txskb) { |
2417 | /* local->mtx protects this */ | ||
2418 | local->roc_cookie_counter++; | ||
2419 | roc->cookie = local->roc_cookie_counter; | ||
2420 | /* wow, you wrapped 64 bits ... more likely a bug */ | ||
2421 | if (WARN_ON(roc->cookie == 0)) { | ||
2422 | roc->cookie = 1; | ||
2423 | local->roc_cookie_counter++; | ||
2424 | } | ||
2425 | *cookie = roc->cookie; | ||
2426 | } else { | ||
2339 | *cookie = (unsigned long)txskb; | 2427 | *cookie = (unsigned long)txskb; |
2340 | else | 2428 | } |
2341 | *cookie = (unsigned long)roc; | ||
2342 | 2429 | ||
2343 | return 0; | 2430 | return 0; |
2344 | } | 2431 | } |
@@ -2346,7 +2433,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, | |||
2346 | static int ieee80211_remain_on_channel(struct wiphy *wiphy, | 2433 | static int ieee80211_remain_on_channel(struct wiphy *wiphy, |
2347 | struct wireless_dev *wdev, | 2434 | struct wireless_dev *wdev, |
2348 | struct ieee80211_channel *chan, | 2435 | struct ieee80211_channel *chan, |
2349 | enum nl80211_channel_type channel_type, | ||
2350 | unsigned int duration, | 2436 | unsigned int duration, |
2351 | u64 *cookie) | 2437 | u64 *cookie) |
2352 | { | 2438 | { |
@@ -2355,7 +2441,7 @@ static int ieee80211_remain_on_channel(struct wiphy *wiphy, | |||
2355 | int ret; | 2441 | int ret; |
2356 | 2442 | ||
2357 | mutex_lock(&local->mtx); | 2443 | mutex_lock(&local->mtx); |
2358 | ret = ieee80211_start_roc_work(local, sdata, chan, channel_type, | 2444 | ret = ieee80211_start_roc_work(local, sdata, chan, |
2359 | duration, cookie, NULL); | 2445 | duration, cookie, NULL); |
2360 | mutex_unlock(&local->mtx); | 2446 | mutex_unlock(&local->mtx); |
2361 | 2447 | ||
@@ -2373,7 +2459,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2373 | struct ieee80211_roc_work *dep, *tmp2; | 2459 | struct ieee80211_roc_work *dep, *tmp2; |
2374 | 2460 | ||
2375 | list_for_each_entry_safe(dep, tmp2, &roc->dependents, list) { | 2461 | list_for_each_entry_safe(dep, tmp2, &roc->dependents, list) { |
2376 | if (!mgmt_tx && (unsigned long)dep != cookie) | 2462 | if (!mgmt_tx && dep->cookie != cookie) |
2377 | continue; | 2463 | continue; |
2378 | else if (mgmt_tx && dep->mgmt_tx_cookie != cookie) | 2464 | else if (mgmt_tx && dep->mgmt_tx_cookie != cookie) |
2379 | continue; | 2465 | continue; |
@@ -2385,7 +2471,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2385 | return 0; | 2471 | return 0; |
2386 | } | 2472 | } |
2387 | 2473 | ||
2388 | if (!mgmt_tx && (unsigned long)roc != cookie) | 2474 | if (!mgmt_tx && roc->cookie != cookie) |
2389 | continue; | 2475 | continue; |
2390 | else if (mgmt_tx && roc->mgmt_tx_cookie != cookie) | 2476 | else if (mgmt_tx && roc->mgmt_tx_cookie != cookie) |
2391 | continue; | 2477 | continue; |
@@ -2448,10 +2534,8 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, | |||
2448 | 2534 | ||
2449 | static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | 2535 | static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, |
2450 | struct ieee80211_channel *chan, bool offchan, | 2536 | struct ieee80211_channel *chan, bool offchan, |
2451 | enum nl80211_channel_type channel_type, | 2537 | unsigned int wait, const u8 *buf, size_t len, |
2452 | bool channel_type_valid, unsigned int wait, | 2538 | bool no_cck, bool dont_wait_for_ack, u64 *cookie) |
2453 | const u8 *buf, size_t len, bool no_cck, | ||
2454 | bool dont_wait_for_ack, u64 *cookie) | ||
2455 | { | 2539 | { |
2456 | struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); | 2540 | struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); |
2457 | struct ieee80211_local *local = sdata->local; | 2541 | struct ieee80211_local *local = sdata->local; |
@@ -2515,10 +2599,16 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
2515 | 2599 | ||
2516 | /* Check if the operating channel is the requested channel */ | 2600 | /* Check if the operating channel is the requested channel */ |
2517 | if (!need_offchan) { | 2601 | if (!need_offchan) { |
2518 | need_offchan = chan != local->oper_channel; | 2602 | struct ieee80211_chanctx_conf *chanctx_conf; |
2519 | if (channel_type_valid && | 2603 | |
2520 | channel_type != local->_oper_channel_type) | 2604 | rcu_read_lock(); |
2605 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
2606 | |||
2607 | if (chanctx_conf) | ||
2608 | need_offchan = chan != chanctx_conf->def.chan; | ||
2609 | else | ||
2521 | need_offchan = true; | 2610 | need_offchan = true; |
2611 | rcu_read_unlock(); | ||
2522 | } | 2612 | } |
2523 | 2613 | ||
2524 | if (need_offchan && !offchan) { | 2614 | if (need_offchan && !offchan) { |
@@ -2552,7 +2642,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
2552 | local->hw.offchannel_tx_hw_queue; | 2642 | local->hw.offchannel_tx_hw_queue; |
2553 | 2643 | ||
2554 | /* This will handle all kinds of coalescing and immediate TX */ | 2644 | /* This will handle all kinds of coalescing and immediate TX */ |
2555 | ret = ieee80211_start_roc_work(local, sdata, chan, channel_type, | 2645 | ret = ieee80211_start_roc_work(local, sdata, chan, |
2556 | wait, cookie, skb); | 2646 | wait, cookie, skb); |
2557 | if (ret) | 2647 | if (ret) |
2558 | kfree_skb(skb); | 2648 | kfree_skb(skb); |
@@ -2670,7 +2760,7 @@ static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata) | |||
2670 | u16 capab; | 2760 | u16 capab; |
2671 | 2761 | ||
2672 | capab = 0; | 2762 | capab = 0; |
2673 | if (local->oper_channel->band != IEEE80211_BAND_2GHZ) | 2763 | if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ) |
2674 | return capab; | 2764 | return capab; |
2675 | 2765 | ||
2676 | if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)) | 2766 | if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)) |
@@ -2702,7 +2792,7 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, | |||
2702 | u16 status_code, struct sk_buff *skb) | 2792 | u16 status_code, struct sk_buff *skb) |
2703 | { | 2793 | { |
2704 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 2794 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
2705 | struct ieee80211_local *local = sdata->local; | 2795 | enum ieee80211_band band = ieee80211_get_sdata_band(sdata); |
2706 | struct ieee80211_tdls_data *tf; | 2796 | struct ieee80211_tdls_data *tf; |
2707 | 2797 | ||
2708 | tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u)); | 2798 | tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u)); |
@@ -2722,10 +2812,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, | |||
2722 | tf->u.setup_req.capability = | 2812 | tf->u.setup_req.capability = |
2723 | cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); | 2813 | cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); |
2724 | 2814 | ||
2725 | ieee80211_add_srates_ie(sdata, skb, false, | 2815 | ieee80211_add_srates_ie(sdata, skb, false, band); |
2726 | local->oper_channel->band); | 2816 | ieee80211_add_ext_srates_ie(sdata, skb, false, band); |
2727 | ieee80211_add_ext_srates_ie(sdata, skb, false, | ||
2728 | local->oper_channel->band); | ||
2729 | ieee80211_tdls_add_ext_capab(skb); | 2817 | ieee80211_tdls_add_ext_capab(skb); |
2730 | break; | 2818 | break; |
2731 | case WLAN_TDLS_SETUP_RESPONSE: | 2819 | case WLAN_TDLS_SETUP_RESPONSE: |
@@ -2738,10 +2826,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, | |||
2738 | tf->u.setup_resp.capability = | 2826 | tf->u.setup_resp.capability = |
2739 | cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); | 2827 | cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); |
2740 | 2828 | ||
2741 | ieee80211_add_srates_ie(sdata, skb, false, | 2829 | ieee80211_add_srates_ie(sdata, skb, false, band); |
2742 | local->oper_channel->band); | 2830 | ieee80211_add_ext_srates_ie(sdata, skb, false, band); |
2743 | ieee80211_add_ext_srates_ie(sdata, skb, false, | ||
2744 | local->oper_channel->band); | ||
2745 | ieee80211_tdls_add_ext_capab(skb); | 2831 | ieee80211_tdls_add_ext_capab(skb); |
2746 | break; | 2832 | break; |
2747 | case WLAN_TDLS_SETUP_CONFIRM: | 2833 | case WLAN_TDLS_SETUP_CONFIRM: |
@@ -2779,7 +2865,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, | |||
2779 | u16 status_code, struct sk_buff *skb) | 2865 | u16 status_code, struct sk_buff *skb) |
2780 | { | 2866 | { |
2781 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 2867 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
2782 | struct ieee80211_local *local = sdata->local; | 2868 | enum ieee80211_band band = ieee80211_get_sdata_band(sdata); |
2783 | struct ieee80211_mgmt *mgmt; | 2869 | struct ieee80211_mgmt *mgmt; |
2784 | 2870 | ||
2785 | mgmt = (void *)skb_put(skb, 24); | 2871 | mgmt = (void *)skb_put(skb, 24); |
@@ -2802,10 +2888,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, | |||
2802 | mgmt->u.action.u.tdls_discover_resp.capability = | 2888 | mgmt->u.action.u.tdls_discover_resp.capability = |
2803 | cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); | 2889 | cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); |
2804 | 2890 | ||
2805 | ieee80211_add_srates_ie(sdata, skb, false, | 2891 | ieee80211_add_srates_ie(sdata, skb, false, band); |
2806 | local->oper_channel->band); | 2892 | ieee80211_add_ext_srates_ie(sdata, skb, false, band); |
2807 | ieee80211_add_ext_srates_ie(sdata, skb, false, | ||
2808 | local->oper_channel->band); | ||
2809 | ieee80211_tdls_add_ext_capab(skb); | 2893 | ieee80211_tdls_add_ext_capab(skb); |
2810 | break; | 2894 | break; |
2811 | default: | 2895 | default: |
@@ -2822,7 +2906,6 @@ static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
2822 | { | 2906 | { |
2823 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 2907 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
2824 | struct ieee80211_local *local = sdata->local; | 2908 | struct ieee80211_local *local = sdata->local; |
2825 | struct ieee80211_tx_info *info; | ||
2826 | struct sk_buff *skb = NULL; | 2909 | struct sk_buff *skb = NULL; |
2827 | bool send_direct; | 2910 | bool send_direct; |
2828 | int ret; | 2911 | int ret; |
@@ -2848,7 +2931,6 @@ static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
2848 | if (!skb) | 2931 | if (!skb) |
2849 | return -ENOMEM; | 2932 | return -ENOMEM; |
2850 | 2933 | ||
2851 | info = IEEE80211_SKB_CB(skb); | ||
2852 | skb_reserve(skb, local->hw.extra_tx_headroom); | 2934 | skb_reserve(skb, local->hw.extra_tx_headroom); |
2853 | 2935 | ||
2854 | switch (action_code) { | 2936 | switch (action_code) { |
@@ -2985,12 +3067,19 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, | |||
2985 | bool qos; | 3067 | bool qos; |
2986 | struct ieee80211_tx_info *info; | 3068 | struct ieee80211_tx_info *info; |
2987 | struct sta_info *sta; | 3069 | struct sta_info *sta; |
3070 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
3071 | enum ieee80211_band band; | ||
2988 | 3072 | ||
2989 | rcu_read_lock(); | 3073 | rcu_read_lock(); |
3074 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
3075 | if (WARN_ON(!chanctx_conf)) { | ||
3076 | rcu_read_unlock(); | ||
3077 | return -EINVAL; | ||
3078 | } | ||
3079 | band = chanctx_conf->def.chan->band; | ||
2990 | sta = sta_info_get(sdata, peer); | 3080 | sta = sta_info_get(sdata, peer); |
2991 | if (sta) { | 3081 | if (sta) { |
2992 | qos = test_sta_flag(sta, WLAN_STA_WME); | 3082 | qos = test_sta_flag(sta, WLAN_STA_WME); |
2993 | rcu_read_unlock(); | ||
2994 | } else { | 3083 | } else { |
2995 | rcu_read_unlock(); | 3084 | rcu_read_unlock(); |
2996 | return -ENOLINK; | 3085 | return -ENOLINK; |
@@ -3008,8 +3097,10 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, | |||
3008 | } | 3097 | } |
3009 | 3098 | ||
3010 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); | 3099 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); |
3011 | if (!skb) | 3100 | if (!skb) { |
3101 | rcu_read_unlock(); | ||
3012 | return -ENOMEM; | 3102 | return -ENOMEM; |
3103 | } | ||
3013 | 3104 | ||
3014 | skb->dev = dev; | 3105 | skb->dev = dev; |
3015 | 3106 | ||
@@ -3034,21 +3125,31 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, | |||
3034 | nullfunc->qos_ctrl = cpu_to_le16(7); | 3125 | nullfunc->qos_ctrl = cpu_to_le16(7); |
3035 | 3126 | ||
3036 | local_bh_disable(); | 3127 | local_bh_disable(); |
3037 | ieee80211_xmit(sdata, skb); | 3128 | ieee80211_xmit(sdata, skb, band); |
3038 | local_bh_enable(); | 3129 | local_bh_enable(); |
3130 | rcu_read_unlock(); | ||
3039 | 3131 | ||
3040 | *cookie = (unsigned long) skb; | 3132 | *cookie = (unsigned long) skb; |
3041 | return 0; | 3133 | return 0; |
3042 | } | 3134 | } |
3043 | 3135 | ||
3044 | static struct ieee80211_channel * | 3136 | static int ieee80211_cfg_get_channel(struct wiphy *wiphy, |
3045 | ieee80211_cfg_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, | 3137 | struct wireless_dev *wdev, |
3046 | enum nl80211_channel_type *type) | 3138 | struct cfg80211_chan_def *chandef) |
3047 | { | 3139 | { |
3048 | struct ieee80211_local *local = wiphy_priv(wiphy); | 3140 | struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); |
3141 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
3142 | int ret = -ENODATA; | ||
3049 | 3143 | ||
3050 | *type = local->_oper_channel_type; | 3144 | rcu_read_lock(); |
3051 | return local->oper_channel; | 3145 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); |
3146 | if (chanctx_conf) { | ||
3147 | *chandef = chanctx_conf->def; | ||
3148 | ret = 0; | ||
3149 | } | ||
3150 | rcu_read_unlock(); | ||
3151 | |||
3152 | return ret; | ||
3052 | } | 3153 | } |
3053 | 3154 | ||
3054 | #ifdef CONFIG_PM | 3155 | #ifdef CONFIG_PM |
@@ -3103,6 +3204,7 @@ struct cfg80211_ops mac80211_config_ops = { | |||
3103 | .disassoc = ieee80211_disassoc, | 3204 | .disassoc = ieee80211_disassoc, |
3104 | .join_ibss = ieee80211_join_ibss, | 3205 | .join_ibss = ieee80211_join_ibss, |
3105 | .leave_ibss = ieee80211_leave_ibss, | 3206 | .leave_ibss = ieee80211_leave_ibss, |
3207 | .set_mcast_rate = ieee80211_set_mcast_rate, | ||
3106 | .set_wiphy_params = ieee80211_set_wiphy_params, | 3208 | .set_wiphy_params = ieee80211_set_wiphy_params, |
3107 | .set_tx_power = ieee80211_set_tx_power, | 3209 | .set_tx_power = ieee80211_set_tx_power, |
3108 | .get_tx_power = ieee80211_get_tx_power, | 3210 | .get_tx_power = ieee80211_get_tx_power, |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 0bfc914ddd15..53f03120db55 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -3,168 +3,347 @@ | |||
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/nl80211.h> | 5 | #include <linux/nl80211.h> |
6 | #include <linux/export.h> | ||
6 | #include <net/cfg80211.h> | 7 | #include <net/cfg80211.h> |
7 | #include "ieee80211_i.h" | 8 | #include "ieee80211_i.h" |
9 | #include "driver-ops.h" | ||
8 | 10 | ||
9 | static enum ieee80211_chan_mode | 11 | static void ieee80211_change_chandef(struct ieee80211_local *local, |
10 | __ieee80211_get_channel_mode(struct ieee80211_local *local, | 12 | struct ieee80211_chanctx *ctx, |
11 | struct ieee80211_sub_if_data *ignore) | 13 | const struct cfg80211_chan_def *chandef) |
12 | { | 14 | { |
15 | if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) | ||
16 | return; | ||
17 | |||
18 | WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef)); | ||
19 | |||
20 | ctx->conf.def = *chandef; | ||
21 | drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH); | ||
22 | |||
23 | if (!local->use_chanctx) { | ||
24 | local->_oper_channel_type = cfg80211_get_chandef_type(chandef); | ||
25 | ieee80211_hw_config(local, 0); | ||
26 | } | ||
27 | } | ||
28 | |||
29 | static struct ieee80211_chanctx * | ||
30 | ieee80211_find_chanctx(struct ieee80211_local *local, | ||
31 | const struct cfg80211_chan_def *chandef, | ||
32 | enum ieee80211_chanctx_mode mode) | ||
33 | { | ||
34 | struct ieee80211_chanctx *ctx; | ||
35 | |||
36 | lockdep_assert_held(&local->chanctx_mtx); | ||
37 | |||
38 | if (mode == IEEE80211_CHANCTX_EXCLUSIVE) | ||
39 | return NULL; | ||
40 | |||
41 | list_for_each_entry(ctx, &local->chanctx_list, list) { | ||
42 | const struct cfg80211_chan_def *compat; | ||
43 | |||
44 | if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) | ||
45 | continue; | ||
46 | |||
47 | compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef); | ||
48 | if (!compat) | ||
49 | continue; | ||
50 | |||
51 | ieee80211_change_chandef(local, ctx, compat); | ||
52 | |||
53 | return ctx; | ||
54 | } | ||
55 | |||
56 | return NULL; | ||
57 | } | ||
58 | |||
59 | static struct ieee80211_chanctx * | ||
60 | ieee80211_new_chanctx(struct ieee80211_local *local, | ||
61 | const struct cfg80211_chan_def *chandef, | ||
62 | enum ieee80211_chanctx_mode mode) | ||
63 | { | ||
64 | struct ieee80211_chanctx *ctx; | ||
65 | int err; | ||
66 | |||
67 | lockdep_assert_held(&local->chanctx_mtx); | ||
68 | |||
69 | ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL); | ||
70 | if (!ctx) | ||
71 | return ERR_PTR(-ENOMEM); | ||
72 | |||
73 | ctx->conf.def = *chandef; | ||
74 | ctx->conf.rx_chains_static = 1; | ||
75 | ctx->conf.rx_chains_dynamic = 1; | ||
76 | ctx->mode = mode; | ||
77 | |||
78 | if (!local->use_chanctx) { | ||
79 | local->_oper_channel_type = | ||
80 | cfg80211_get_chandef_type(chandef); | ||
81 | local->_oper_channel = chandef->chan; | ||
82 | ieee80211_hw_config(local, 0); | ||
83 | } else { | ||
84 | err = drv_add_chanctx(local, ctx); | ||
85 | if (err) { | ||
86 | kfree(ctx); | ||
87 | return ERR_PTR(err); | ||
88 | } | ||
89 | } | ||
90 | |||
91 | list_add_rcu(&ctx->list, &local->chanctx_list); | ||
92 | |||
93 | return ctx; | ||
94 | } | ||
95 | |||
96 | static void ieee80211_free_chanctx(struct ieee80211_local *local, | ||
97 | struct ieee80211_chanctx *ctx) | ||
98 | { | ||
99 | lockdep_assert_held(&local->chanctx_mtx); | ||
100 | |||
101 | WARN_ON_ONCE(ctx->refcount != 0); | ||
102 | |||
103 | if (!local->use_chanctx) { | ||
104 | local->_oper_channel_type = NL80211_CHAN_NO_HT; | ||
105 | ieee80211_hw_config(local, 0); | ||
106 | } else { | ||
107 | drv_remove_chanctx(local, ctx); | ||
108 | } | ||
109 | |||
110 | list_del_rcu(&ctx->list); | ||
111 | kfree_rcu(ctx, rcu_head); | ||
112 | } | ||
113 | |||
114 | static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata, | ||
115 | struct ieee80211_chanctx *ctx) | ||
116 | { | ||
117 | struct ieee80211_local *local = sdata->local; | ||
118 | int ret; | ||
119 | |||
120 | lockdep_assert_held(&local->chanctx_mtx); | ||
121 | |||
122 | ret = drv_assign_vif_chanctx(local, sdata, ctx); | ||
123 | if (ret) | ||
124 | return ret; | ||
125 | |||
126 | rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf); | ||
127 | ctx->refcount++; | ||
128 | |||
129 | ieee80211_recalc_txpower(sdata); | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, | ||
135 | struct ieee80211_chanctx *ctx) | ||
136 | { | ||
137 | struct ieee80211_chanctx_conf *conf = &ctx->conf; | ||
13 | struct ieee80211_sub_if_data *sdata; | 138 | struct ieee80211_sub_if_data *sdata; |
139 | const struct cfg80211_chan_def *compat = NULL; | ||
14 | 140 | ||
15 | lockdep_assert_held(&local->iflist_mtx); | 141 | lockdep_assert_held(&local->chanctx_mtx); |
16 | 142 | ||
17 | list_for_each_entry(sdata, &local->interfaces, list) { | 143 | rcu_read_lock(); |
18 | if (sdata == ignore) | 144 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
145 | |||
146 | if (!ieee80211_sdata_running(sdata)) | ||
147 | continue; | ||
148 | if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf) | ||
19 | continue; | 149 | continue; |
20 | 150 | ||
151 | if (!compat) | ||
152 | compat = &sdata->vif.bss_conf.chandef; | ||
153 | |||
154 | compat = cfg80211_chandef_compatible( | ||
155 | &sdata->vif.bss_conf.chandef, compat); | ||
156 | if (!compat) | ||
157 | break; | ||
158 | } | ||
159 | rcu_read_unlock(); | ||
160 | |||
161 | if (WARN_ON_ONCE(!compat)) | ||
162 | return; | ||
163 | |||
164 | ieee80211_change_chandef(local, ctx, compat); | ||
165 | } | ||
166 | |||
167 | static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata, | ||
168 | struct ieee80211_chanctx *ctx) | ||
169 | { | ||
170 | struct ieee80211_local *local = sdata->local; | ||
171 | |||
172 | lockdep_assert_held(&local->chanctx_mtx); | ||
173 | |||
174 | ctx->refcount--; | ||
175 | rcu_assign_pointer(sdata->vif.chanctx_conf, NULL); | ||
176 | |||
177 | drv_unassign_vif_chanctx(local, sdata, ctx); | ||
178 | |||
179 | if (ctx->refcount > 0) { | ||
180 | ieee80211_recalc_chanctx_chantype(sdata->local, ctx); | ||
181 | ieee80211_recalc_smps_chanctx(local, ctx); | ||
182 | } | ||
183 | } | ||
184 | |||
185 | static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) | ||
186 | { | ||
187 | struct ieee80211_local *local = sdata->local; | ||
188 | struct ieee80211_chanctx_conf *conf; | ||
189 | struct ieee80211_chanctx *ctx; | ||
190 | |||
191 | lockdep_assert_held(&local->chanctx_mtx); | ||
192 | |||
193 | conf = rcu_dereference_protected(sdata->vif.chanctx_conf, | ||
194 | lockdep_is_held(&local->chanctx_mtx)); | ||
195 | if (!conf) | ||
196 | return; | ||
197 | |||
198 | ctx = container_of(conf, struct ieee80211_chanctx, conf); | ||
199 | |||
200 | ieee80211_unassign_vif_chanctx(sdata, ctx); | ||
201 | if (ctx->refcount == 0) | ||
202 | ieee80211_free_chanctx(local, ctx); | ||
203 | } | ||
204 | |||
205 | void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, | ||
206 | struct ieee80211_chanctx *chanctx) | ||
207 | { | ||
208 | struct ieee80211_sub_if_data *sdata; | ||
209 | u8 rx_chains_static, rx_chains_dynamic; | ||
210 | |||
211 | lockdep_assert_held(&local->chanctx_mtx); | ||
212 | |||
213 | rx_chains_static = 1; | ||
214 | rx_chains_dynamic = 1; | ||
215 | |||
216 | rcu_read_lock(); | ||
217 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
218 | u8 needed_static, needed_dynamic; | ||
219 | |||
21 | if (!ieee80211_sdata_running(sdata)) | 220 | if (!ieee80211_sdata_running(sdata)) |
22 | continue; | 221 | continue; |
23 | 222 | ||
223 | if (rcu_access_pointer(sdata->vif.chanctx_conf) != | ||
224 | &chanctx->conf) | ||
225 | continue; | ||
226 | |||
24 | switch (sdata->vif.type) { | 227 | switch (sdata->vif.type) { |
25 | case NL80211_IFTYPE_MONITOR: | 228 | case NL80211_IFTYPE_P2P_DEVICE: |
26 | continue; | 229 | continue; |
27 | case NL80211_IFTYPE_STATION: | 230 | case NL80211_IFTYPE_STATION: |
28 | if (!sdata->u.mgd.associated) | 231 | if (!sdata->u.mgd.associated) |
29 | continue; | 232 | continue; |
30 | break; | 233 | break; |
31 | case NL80211_IFTYPE_ADHOC: | ||
32 | if (!sdata->u.ibss.ssid_len) | ||
33 | continue; | ||
34 | if (!sdata->u.ibss.fixed_channel) | ||
35 | return CHAN_MODE_HOPPING; | ||
36 | break; | ||
37 | case NL80211_IFTYPE_AP_VLAN: | 234 | case NL80211_IFTYPE_AP_VLAN: |
38 | /* will also have _AP interface */ | ||
39 | continue; | 235 | continue; |
40 | case NL80211_IFTYPE_AP: | 236 | case NL80211_IFTYPE_AP: |
41 | if (!sdata->u.ap.beacon) | 237 | case NL80211_IFTYPE_ADHOC: |
42 | continue; | 238 | case NL80211_IFTYPE_WDS: |
43 | break; | ||
44 | case NL80211_IFTYPE_MESH_POINT: | 239 | case NL80211_IFTYPE_MESH_POINT: |
45 | if (!sdata->wdev.mesh_id_len) | ||
46 | continue; | ||
47 | break; | 240 | break; |
48 | default: | 241 | default: |
242 | WARN_ON_ONCE(1); | ||
243 | } | ||
244 | |||
245 | switch (sdata->smps_mode) { | ||
246 | default: | ||
247 | WARN_ONCE(1, "Invalid SMPS mode %d\n", | ||
248 | sdata->smps_mode); | ||
249 | /* fall through */ | ||
250 | case IEEE80211_SMPS_OFF: | ||
251 | needed_static = sdata->needed_rx_chains; | ||
252 | needed_dynamic = sdata->needed_rx_chains; | ||
253 | break; | ||
254 | case IEEE80211_SMPS_DYNAMIC: | ||
255 | needed_static = 1; | ||
256 | needed_dynamic = sdata->needed_rx_chains; | ||
257 | break; | ||
258 | case IEEE80211_SMPS_STATIC: | ||
259 | needed_static = 1; | ||
260 | needed_dynamic = 1; | ||
49 | break; | 261 | break; |
50 | } | 262 | } |
51 | 263 | ||
52 | return CHAN_MODE_FIXED; | 264 | rx_chains_static = max(rx_chains_static, needed_static); |
265 | rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic); | ||
53 | } | 266 | } |
267 | rcu_read_unlock(); | ||
54 | 268 | ||
55 | return CHAN_MODE_UNDEFINED; | 269 | if (!local->use_chanctx) { |
56 | } | 270 | if (rx_chains_static > 1) |
57 | 271 | local->smps_mode = IEEE80211_SMPS_OFF; | |
58 | enum ieee80211_chan_mode | 272 | else if (rx_chains_dynamic > 1) |
59 | ieee80211_get_channel_mode(struct ieee80211_local *local, | 273 | local->smps_mode = IEEE80211_SMPS_DYNAMIC; |
60 | struct ieee80211_sub_if_data *ignore) | 274 | else |
61 | { | 275 | local->smps_mode = IEEE80211_SMPS_STATIC; |
62 | enum ieee80211_chan_mode mode; | 276 | ieee80211_hw_config(local, 0); |
277 | } | ||
63 | 278 | ||
64 | mutex_lock(&local->iflist_mtx); | 279 | if (rx_chains_static == chanctx->conf.rx_chains_static && |
65 | mode = __ieee80211_get_channel_mode(local, ignore); | 280 | rx_chains_dynamic == chanctx->conf.rx_chains_dynamic) |
66 | mutex_unlock(&local->iflist_mtx); | 281 | return; |
67 | 282 | ||
68 | return mode; | 283 | chanctx->conf.rx_chains_static = rx_chains_static; |
284 | chanctx->conf.rx_chains_dynamic = rx_chains_dynamic; | ||
285 | drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS); | ||
69 | } | 286 | } |
70 | 287 | ||
71 | static enum nl80211_channel_type | 288 | int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, |
72 | ieee80211_get_superchan(struct ieee80211_local *local, | 289 | const struct cfg80211_chan_def *chandef, |
73 | struct ieee80211_sub_if_data *sdata) | 290 | enum ieee80211_chanctx_mode mode) |
74 | { | 291 | { |
75 | enum nl80211_channel_type superchan = NL80211_CHAN_NO_HT; | 292 | struct ieee80211_local *local = sdata->local; |
76 | struct ieee80211_sub_if_data *tmp; | 293 | struct ieee80211_chanctx *ctx; |
294 | int ret; | ||
77 | 295 | ||
78 | mutex_lock(&local->iflist_mtx); | 296 | WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); |
79 | list_for_each_entry(tmp, &local->interfaces, list) { | ||
80 | if (tmp == sdata) | ||
81 | continue; | ||
82 | |||
83 | if (!ieee80211_sdata_running(tmp)) | ||
84 | continue; | ||
85 | 297 | ||
86 | switch (tmp->vif.bss_conf.channel_type) { | 298 | mutex_lock(&local->chanctx_mtx); |
87 | case NL80211_CHAN_NO_HT: | 299 | __ieee80211_vif_release_channel(sdata); |
88 | case NL80211_CHAN_HT20: | ||
89 | if (superchan > tmp->vif.bss_conf.channel_type) | ||
90 | break; | ||
91 | 300 | ||
92 | superchan = tmp->vif.bss_conf.channel_type; | 301 | ctx = ieee80211_find_chanctx(local, chandef, mode); |
93 | break; | 302 | if (!ctx) |
94 | case NL80211_CHAN_HT40PLUS: | 303 | ctx = ieee80211_new_chanctx(local, chandef, mode); |
95 | WARN_ON(superchan == NL80211_CHAN_HT40MINUS); | 304 | if (IS_ERR(ctx)) { |
96 | superchan = NL80211_CHAN_HT40PLUS; | 305 | ret = PTR_ERR(ctx); |
97 | break; | 306 | goto out; |
98 | case NL80211_CHAN_HT40MINUS: | ||
99 | WARN_ON(superchan == NL80211_CHAN_HT40PLUS); | ||
100 | superchan = NL80211_CHAN_HT40MINUS; | ||
101 | break; | ||
102 | } | ||
103 | } | 307 | } |
104 | mutex_unlock(&local->iflist_mtx); | ||
105 | 308 | ||
106 | return superchan; | 309 | sdata->vif.bss_conf.chandef = *chandef; |
107 | } | ||
108 | 310 | ||
109 | static bool | 311 | ret = ieee80211_assign_vif_chanctx(sdata, ctx); |
110 | ieee80211_channel_types_are_compatible(enum nl80211_channel_type chantype1, | 312 | if (ret) { |
111 | enum nl80211_channel_type chantype2, | 313 | /* if assign fails refcount stays the same */ |
112 | enum nl80211_channel_type *compat) | 314 | if (ctx->refcount == 0) |
113 | { | 315 | ieee80211_free_chanctx(local, ctx); |
114 | /* | 316 | goto out; |
115 | * start out with chantype1 being the result, | ||
116 | * overwriting later if needed | ||
117 | */ | ||
118 | if (compat) | ||
119 | *compat = chantype1; | ||
120 | |||
121 | switch (chantype1) { | ||
122 | case NL80211_CHAN_NO_HT: | ||
123 | if (compat) | ||
124 | *compat = chantype2; | ||
125 | break; | ||
126 | case NL80211_CHAN_HT20: | ||
127 | /* | ||
128 | * allow any change that doesn't go to no-HT | ||
129 | * (if it already is no-HT no change is needed) | ||
130 | */ | ||
131 | if (chantype2 == NL80211_CHAN_NO_HT) | ||
132 | break; | ||
133 | if (compat) | ||
134 | *compat = chantype2; | ||
135 | break; | ||
136 | case NL80211_CHAN_HT40PLUS: | ||
137 | case NL80211_CHAN_HT40MINUS: | ||
138 | /* allow smaller bandwidth and same */ | ||
139 | if (chantype2 == NL80211_CHAN_NO_HT) | ||
140 | break; | ||
141 | if (chantype2 == NL80211_CHAN_HT20) | ||
142 | break; | ||
143 | if (chantype2 == chantype1) | ||
144 | break; | ||
145 | return false; | ||
146 | } | 317 | } |
147 | 318 | ||
148 | return true; | 319 | ieee80211_recalc_smps_chanctx(local, ctx); |
320 | out: | ||
321 | mutex_unlock(&local->chanctx_mtx); | ||
322 | return ret; | ||
149 | } | 323 | } |
150 | 324 | ||
151 | bool ieee80211_set_channel_type(struct ieee80211_local *local, | 325 | void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) |
152 | struct ieee80211_sub_if_data *sdata, | ||
153 | enum nl80211_channel_type chantype) | ||
154 | { | 326 | { |
155 | enum nl80211_channel_type superchan; | 327 | WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); |
156 | enum nl80211_channel_type compatchan; | ||
157 | |||
158 | superchan = ieee80211_get_superchan(local, sdata); | ||
159 | if (!ieee80211_channel_types_are_compatible(superchan, chantype, | ||
160 | &compatchan)) | ||
161 | return false; | ||
162 | 328 | ||
163 | local->_oper_channel_type = compatchan; | 329 | mutex_lock(&sdata->local->chanctx_mtx); |
164 | 330 | __ieee80211_vif_release_channel(sdata); | |
165 | if (sdata) | 331 | mutex_unlock(&sdata->local->chanctx_mtx); |
166 | sdata->vif.bss_conf.channel_type = chantype; | 332 | } |
167 | 333 | ||
168 | return true; | 334 | void ieee80211_iter_chan_contexts_atomic( |
335 | struct ieee80211_hw *hw, | ||
336 | void (*iter)(struct ieee80211_hw *hw, | ||
337 | struct ieee80211_chanctx_conf *chanctx_conf, | ||
338 | void *data), | ||
339 | void *iter_data) | ||
340 | { | ||
341 | struct ieee80211_local *local = hw_to_local(hw); | ||
342 | struct ieee80211_chanctx *ctx; | ||
169 | 343 | ||
344 | rcu_read_lock(); | ||
345 | list_for_each_entry_rcu(ctx, &local->chanctx_list, list) | ||
346 | iter(hw, &ctx->conf, iter_data); | ||
347 | rcu_read_unlock(); | ||
170 | } | 348 | } |
349 | EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic); | ||
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h index 9be4e6d71d00..214ed4ecd739 100644 --- a/net/mac80211/debugfs.h +++ b/net/mac80211/debugfs.h | |||
@@ -2,9 +2,9 @@ | |||
2 | #define __MAC80211_DEBUGFS_H | 2 | #define __MAC80211_DEBUGFS_H |
3 | 3 | ||
4 | #ifdef CONFIG_MAC80211_DEBUGFS | 4 | #ifdef CONFIG_MAC80211_DEBUGFS |
5 | extern void debugfs_hw_add(struct ieee80211_local *local); | 5 | void debugfs_hw_add(struct ieee80211_local *local); |
6 | extern int mac80211_format_buffer(char __user *userbuf, size_t count, | 6 | int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count, |
7 | loff_t *ppos, char *fmt, ...); | 7 | loff_t *ppos, char *fmt, ...); |
8 | #else | 8 | #else |
9 | static inline void debugfs_hw_add(struct ieee80211_local *local) | 9 | static inline void debugfs_hw_add(struct ieee80211_local *local) |
10 | { | 10 | { |
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 090d08ff22c4..2d4235497f1b 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c | |||
@@ -116,7 +116,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, | |||
116 | size_t count, loff_t *ppos) | 116 | size_t count, loff_t *ppos) |
117 | { | 117 | { |
118 | struct ieee80211_key *key = file->private_data; | 118 | struct ieee80211_key *key = file->private_data; |
119 | char buf[14*NUM_RX_DATA_QUEUES+1], *p = buf; | 119 | char buf[14*IEEE80211_NUM_TIDS+1], *p = buf; |
120 | int i, len; | 120 | int i, len; |
121 | const u8 *rpn; | 121 | const u8 *rpn; |
122 | 122 | ||
@@ -126,7 +126,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, | |||
126 | len = scnprintf(buf, sizeof(buf), "\n"); | 126 | len = scnprintf(buf, sizeof(buf), "\n"); |
127 | break; | 127 | break; |
128 | case WLAN_CIPHER_SUITE_TKIP: | 128 | case WLAN_CIPHER_SUITE_TKIP: |
129 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) | 129 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) |
130 | p += scnprintf(p, sizeof(buf)+buf-p, | 130 | p += scnprintf(p, sizeof(buf)+buf-p, |
131 | "%08x %04x\n", | 131 | "%08x %04x\n", |
132 | key->u.tkip.rx[i].iv32, | 132 | key->u.tkip.rx[i].iv32, |
@@ -134,7 +134,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, | |||
134 | len = p - buf; | 134 | len = p - buf; |
135 | break; | 135 | break; |
136 | case WLAN_CIPHER_SUITE_CCMP: | 136 | case WLAN_CIPHER_SUITE_CCMP: |
137 | for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) { | 137 | for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) { |
138 | rpn = key->u.ccmp.rx_pn[i]; | 138 | rpn = key->u.ccmp.rx_pn[i]; |
139 | p += scnprintf(p, sizeof(buf)+buf-p, | 139 | p += scnprintf(p, sizeof(buf)+buf-p, |
140 | "%02x%02x%02x%02x%02x%02x\n", | 140 | "%02x%02x%02x%02x%02x%02x\n", |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 6d5aec9418ee..cbde5cc49a40 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/device.h> | 11 | #include <linux/device.h> |
12 | #include <linux/if.h> | 12 | #include <linux/if.h> |
13 | #include <linux/if_ether.h> | ||
13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
14 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
15 | #include <linux/rtnetlink.h> | 16 | #include <linux/rtnetlink.h> |
@@ -167,7 +168,29 @@ IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz, | |||
167 | 168 | ||
168 | IEEE80211_IF_FILE(flags, flags, HEX); | 169 | IEEE80211_IF_FILE(flags, flags, HEX); |
169 | IEEE80211_IF_FILE(state, state, LHEX); | 170 | IEEE80211_IF_FILE(state, state, LHEX); |
170 | IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC); | 171 | IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC); |
172 | IEEE80211_IF_FILE(ap_power_level, ap_power_level, DEC); | ||
173 | IEEE80211_IF_FILE(user_power_level, user_power_level, DEC); | ||
174 | |||
175 | static ssize_t | ||
176 | ieee80211_if_fmt_hw_queues(const struct ieee80211_sub_if_data *sdata, | ||
177 | char *buf, int buflen) | ||
178 | { | ||
179 | int len; | ||
180 | |||
181 | len = scnprintf(buf, buflen, "AC queues: VO:%d VI:%d BE:%d BK:%d\n", | ||
182 | sdata->vif.hw_queue[IEEE80211_AC_VO], | ||
183 | sdata->vif.hw_queue[IEEE80211_AC_VI], | ||
184 | sdata->vif.hw_queue[IEEE80211_AC_BE], | ||
185 | sdata->vif.hw_queue[IEEE80211_AC_BK]); | ||
186 | |||
187 | if (sdata->vif.type == NL80211_IFTYPE_AP) | ||
188 | len += scnprintf(buf + len, buflen - len, "cab queue: %d\n", | ||
189 | sdata->vif.cab_queue); | ||
190 | |||
191 | return len; | ||
192 | } | ||
193 | __IEEE80211_IF_FILE(hw_queues, NULL); | ||
171 | 194 | ||
172 | /* STA attributes */ | 195 | /* STA attributes */ |
173 | IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); | 196 | IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); |
@@ -217,7 +240,7 @@ static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata, | |||
217 | 240 | ||
218 | return snprintf(buf, buflen, "request: %s\nused: %s\n", | 241 | return snprintf(buf, buflen, "request: %s\nused: %s\n", |
219 | smps_modes[sdata->u.mgd.req_smps], | 242 | smps_modes[sdata->u.mgd.req_smps], |
220 | smps_modes[sdata->u.mgd.ap_smps]); | 243 | smps_modes[sdata->smps_mode]); |
221 | } | 244 | } |
222 | 245 | ||
223 | static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata, | 246 | static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata, |
@@ -245,27 +268,6 @@ static ssize_t ieee80211_if_fmt_tkip_mic_test( | |||
245 | return -EOPNOTSUPP; | 268 | return -EOPNOTSUPP; |
246 | } | 269 | } |
247 | 270 | ||
248 | static int hwaddr_aton(const char *txt, u8 *addr) | ||
249 | { | ||
250 | int i; | ||
251 | |||
252 | for (i = 0; i < ETH_ALEN; i++) { | ||
253 | int a, b; | ||
254 | |||
255 | a = hex_to_bin(*txt++); | ||
256 | if (a < 0) | ||
257 | return -1; | ||
258 | b = hex_to_bin(*txt++); | ||
259 | if (b < 0) | ||
260 | return -1; | ||
261 | *addr++ = (a << 4) | b; | ||
262 | if (i < 5 && *txt++ != ':') | ||
263 | return -1; | ||
264 | } | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static ssize_t ieee80211_if_parse_tkip_mic_test( | 271 | static ssize_t ieee80211_if_parse_tkip_mic_test( |
270 | struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) | 272 | struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) |
271 | { | 273 | { |
@@ -275,13 +277,7 @@ static ssize_t ieee80211_if_parse_tkip_mic_test( | |||
275 | struct ieee80211_hdr *hdr; | 277 | struct ieee80211_hdr *hdr; |
276 | __le16 fc; | 278 | __le16 fc; |
277 | 279 | ||
278 | /* | 280 | if (!mac_pton(buf, addr)) |
279 | * Assume colon-delimited MAC address with possible white space | ||
280 | * following. | ||
281 | */ | ||
282 | if (buflen < 3 * ETH_ALEN - 1) | ||
283 | return -EINVAL; | ||
284 | if (hwaddr_aton(buf, addr) < 0) | ||
285 | return -EINVAL; | 281 | return -EINVAL; |
286 | 282 | ||
287 | if (!ieee80211_sdata_running(sdata)) | 283 | if (!ieee80211_sdata_running(sdata)) |
@@ -307,13 +303,16 @@ static ssize_t ieee80211_if_parse_tkip_mic_test( | |||
307 | case NL80211_IFTYPE_STATION: | 303 | case NL80211_IFTYPE_STATION: |
308 | fc |= cpu_to_le16(IEEE80211_FCTL_TODS); | 304 | fc |= cpu_to_le16(IEEE80211_FCTL_TODS); |
309 | /* BSSID SA DA */ | 305 | /* BSSID SA DA */ |
310 | if (sdata->vif.bss_conf.bssid == NULL) { | 306 | mutex_lock(&sdata->u.mgd.mtx); |
307 | if (!sdata->u.mgd.associated) { | ||
308 | mutex_unlock(&sdata->u.mgd.mtx); | ||
311 | dev_kfree_skb(skb); | 309 | dev_kfree_skb(skb); |
312 | return -ENOTCONN; | 310 | return -ENOTCONN; |
313 | } | 311 | } |
314 | memcpy(hdr->addr1, sdata->vif.bss_conf.bssid, ETH_ALEN); | 312 | memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN); |
315 | memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); | 313 | memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); |
316 | memcpy(hdr->addr3, addr, ETH_ALEN); | 314 | memcpy(hdr->addr3, addr, ETH_ALEN); |
315 | mutex_unlock(&sdata->u.mgd.mtx); | ||
317 | break; | 316 | break; |
318 | default: | 317 | default: |
319 | dev_kfree_skb(skb); | 318 | dev_kfree_skb(skb); |
@@ -395,14 +394,14 @@ __IEEE80211_IF_FILE_W(uapsd_max_sp_len); | |||
395 | 394 | ||
396 | /* AP attributes */ | 395 | /* AP attributes */ |
397 | IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC); | 396 | IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC); |
398 | IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); | 397 | IEEE80211_IF_FILE(num_sta_ps, u.ap.ps.num_sta_ps, ATOMIC); |
399 | IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); | 398 | IEEE80211_IF_FILE(dtim_count, u.ap.ps.dtim_count, DEC); |
400 | 399 | ||
401 | static ssize_t ieee80211_if_fmt_num_buffered_multicast( | 400 | static ssize_t ieee80211_if_fmt_num_buffered_multicast( |
402 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) | 401 | const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) |
403 | { | 402 | { |
404 | return scnprintf(buf, buflen, "%u\n", | 403 | return scnprintf(buf, buflen, "%u\n", |
405 | skb_queue_len(&sdata->u.ap.ps_bc_buf)); | 404 | skb_queue_len(&sdata->u.ap.ps.bc_buf)); |
406 | } | 405 | } |
407 | __IEEE80211_IF_FILE(num_buffered_multicast, NULL); | 406 | __IEEE80211_IF_FILE(num_buffered_multicast, NULL); |
408 | 407 | ||
@@ -443,7 +442,7 @@ static ssize_t ieee80211_if_parse_tsf( | |||
443 | } | 442 | } |
444 | ret = kstrtoull(buf, 10, &tsf); | 443 | ret = kstrtoull(buf, 10, &tsf); |
445 | if (ret < 0) | 444 | if (ret < 0) |
446 | return -EINVAL; | 445 | return ret; |
447 | if (tsf_is_delta) | 446 | if (tsf_is_delta) |
448 | tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf; | 447 | tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf; |
449 | if (local->ops->set_tsf) { | 448 | if (local->ops->set_tsf) { |
@@ -471,7 +470,7 @@ IEEE80211_IF_FILE(dropped_frames_congestion, | |||
471 | u.mesh.mshstats.dropped_frames_congestion, DEC); | 470 | u.mesh.mshstats.dropped_frames_congestion, DEC); |
472 | IEEE80211_IF_FILE(dropped_frames_no_route, | 471 | IEEE80211_IF_FILE(dropped_frames_no_route, |
473 | u.mesh.mshstats.dropped_frames_no_route, DEC); | 472 | u.mesh.mshstats.dropped_frames_no_route, DEC); |
474 | IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); | 473 | IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC); |
475 | 474 | ||
476 | /* Mesh parameters */ | 475 | /* Mesh parameters */ |
477 | IEEE80211_IF_FILE(dot11MeshMaxRetries, | 476 | IEEE80211_IF_FILE(dot11MeshMaxRetries, |
@@ -531,6 +530,7 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata) | |||
531 | DEBUGFS_ADD(rc_rateidx_mask_5ghz); | 530 | DEBUGFS_ADD(rc_rateidx_mask_5ghz); |
532 | DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); | 531 | DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); |
533 | DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); | 532 | DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); |
533 | DEBUGFS_ADD(hw_queues); | ||
534 | } | 534 | } |
535 | 535 | ||
536 | static void add_sta_files(struct ieee80211_sub_if_data *sdata) | 536 | static void add_sta_files(struct ieee80211_sub_if_data *sdata) |
@@ -631,7 +631,9 @@ static void add_files(struct ieee80211_sub_if_data *sdata) | |||
631 | 631 | ||
632 | DEBUGFS_ADD(flags); | 632 | DEBUGFS_ADD(flags); |
633 | DEBUGFS_ADD(state); | 633 | DEBUGFS_ADD(state); |
634 | DEBUGFS_ADD(channel_type); | 634 | DEBUGFS_ADD(txpower); |
635 | DEBUGFS_ADD(user_power_level); | ||
636 | DEBUGFS_ADD(ap_power_level); | ||
635 | 637 | ||
636 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR) | 638 | if (sdata->vif.type != NL80211_IFTYPE_MONITOR) |
637 | add_common_files(sdata); | 639 | add_common_files(sdata); |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 5ccec2c1e9f6..89281d24b094 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include "debugfs.h" | 14 | #include "debugfs.h" |
15 | #include "debugfs_sta.h" | 15 | #include "debugfs_sta.h" |
16 | #include "sta_info.h" | 16 | #include "sta_info.h" |
17 | #include "driver-ops.h" | ||
17 | 18 | ||
18 | /* sta attributtes */ | 19 | /* sta attributtes */ |
19 | 20 | ||
@@ -131,10 +132,10 @@ STA_OPS(connected_time); | |||
131 | static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, | 132 | static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, |
132 | size_t count, loff_t *ppos) | 133 | size_t count, loff_t *ppos) |
133 | { | 134 | { |
134 | char buf[15*NUM_RX_DATA_QUEUES], *p = buf; | 135 | char buf[15*IEEE80211_NUM_TIDS], *p = buf; |
135 | int i; | 136 | int i; |
136 | struct sta_info *sta = file->private_data; | 137 | struct sta_info *sta = file->private_data; |
137 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) | 138 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) |
138 | p += scnprintf(p, sizeof(buf)+buf-p, "%x ", | 139 | p += scnprintf(p, sizeof(buf)+buf-p, "%x ", |
139 | le16_to_cpu(sta->last_seq_ctrl[i])); | 140 | le16_to_cpu(sta->last_seq_ctrl[i])); |
140 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); | 141 | p += scnprintf(p, sizeof(buf)+buf-p, "\n"); |
@@ -145,7 +146,7 @@ STA_OPS(last_seq_ctrl); | |||
145 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | 146 | static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, |
146 | size_t count, loff_t *ppos) | 147 | size_t count, loff_t *ppos) |
147 | { | 148 | { |
148 | char buf[71 + STA_TID_NUM * 40], *p = buf; | 149 | char buf[71 + IEEE80211_NUM_TIDS * 40], *p = buf; |
149 | int i; | 150 | int i; |
150 | struct sta_info *sta = file->private_data; | 151 | struct sta_info *sta = file->private_data; |
151 | struct tid_ampdu_rx *tid_rx; | 152 | struct tid_ampdu_rx *tid_rx; |
@@ -158,7 +159,7 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | |||
158 | p += scnprintf(p, sizeof(buf) + buf - p, | 159 | p += scnprintf(p, sizeof(buf) + buf - p, |
159 | "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); | 160 | "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); |
160 | 161 | ||
161 | for (i = 0; i < STA_TID_NUM; i++) { | 162 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { |
162 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); | 163 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); |
163 | tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]); | 164 | tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]); |
164 | 165 | ||
@@ -220,7 +221,7 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu | |||
220 | 221 | ||
221 | tid = simple_strtoul(buf, NULL, 0); | 222 | tid = simple_strtoul(buf, NULL, 0); |
222 | 223 | ||
223 | if (tid >= STA_TID_NUM) | 224 | if (tid >= IEEE80211_NUM_TIDS) |
224 | return -EINVAL; | 225 | return -EINVAL; |
225 | 226 | ||
226 | if (tx) { | 227 | if (tx) { |
@@ -334,6 +335,8 @@ STA_OPS(ht_capa); | |||
334 | 335 | ||
335 | void ieee80211_sta_debugfs_add(struct sta_info *sta) | 336 | void ieee80211_sta_debugfs_add(struct sta_info *sta) |
336 | { | 337 | { |
338 | struct ieee80211_local *local = sta->local; | ||
339 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
337 | struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations; | 340 | struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations; |
338 | u8 mac[3*ETH_ALEN]; | 341 | u8 mac[3*ETH_ALEN]; |
339 | 342 | ||
@@ -379,10 +382,16 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) | |||
379 | DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed); | 382 | DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed); |
380 | DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count); | 383 | DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count); |
381 | DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count); | 384 | DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count); |
385 | |||
386 | drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir); | ||
382 | } | 387 | } |
383 | 388 | ||
384 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) | 389 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) |
385 | { | 390 | { |
391 | struct ieee80211_local *local = sta->local; | ||
392 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
393 | |||
394 | drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs.dir); | ||
386 | debugfs_remove_recursive(sta->debugfs.dir); | 395 | debugfs_remove_recursive(sta->debugfs.dir); |
387 | sta->debugfs.dir = NULL; | 396 | sta->debugfs.dir = NULL; |
388 | } | 397 | } |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index da9003b20004..c6560cc7a9d6 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -490,6 +490,38 @@ static inline void drv_sta_remove(struct ieee80211_local *local, | |||
490 | trace_drv_return_void(local); | 490 | trace_drv_return_void(local); |
491 | } | 491 | } |
492 | 492 | ||
493 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
494 | static inline void drv_sta_add_debugfs(struct ieee80211_local *local, | ||
495 | struct ieee80211_sub_if_data *sdata, | ||
496 | struct ieee80211_sta *sta, | ||
497 | struct dentry *dir) | ||
498 | { | ||
499 | might_sleep(); | ||
500 | |||
501 | sdata = get_bss_sdata(sdata); | ||
502 | check_sdata_in_driver(sdata); | ||
503 | |||
504 | if (local->ops->sta_add_debugfs) | ||
505 | local->ops->sta_add_debugfs(&local->hw, &sdata->vif, | ||
506 | sta, dir); | ||
507 | } | ||
508 | |||
509 | static inline void drv_sta_remove_debugfs(struct ieee80211_local *local, | ||
510 | struct ieee80211_sub_if_data *sdata, | ||
511 | struct ieee80211_sta *sta, | ||
512 | struct dentry *dir) | ||
513 | { | ||
514 | might_sleep(); | ||
515 | |||
516 | sdata = get_bss_sdata(sdata); | ||
517 | check_sdata_in_driver(sdata); | ||
518 | |||
519 | if (local->ops->sta_remove_debugfs) | ||
520 | local->ops->sta_remove_debugfs(&local->hw, &sdata->vif, | ||
521 | sta, dir); | ||
522 | } | ||
523 | #endif | ||
524 | |||
493 | static inline __must_check | 525 | static inline __must_check |
494 | int drv_sta_state(struct ieee80211_local *local, | 526 | int drv_sta_state(struct ieee80211_local *local, |
495 | struct ieee80211_sub_if_data *sdata, | 527 | struct ieee80211_sub_if_data *sdata, |
@@ -704,17 +736,17 @@ static inline int drv_get_antenna(struct ieee80211_local *local, | |||
704 | } | 736 | } |
705 | 737 | ||
706 | static inline int drv_remain_on_channel(struct ieee80211_local *local, | 738 | static inline int drv_remain_on_channel(struct ieee80211_local *local, |
739 | struct ieee80211_sub_if_data *sdata, | ||
707 | struct ieee80211_channel *chan, | 740 | struct ieee80211_channel *chan, |
708 | enum nl80211_channel_type chantype, | ||
709 | unsigned int duration) | 741 | unsigned int duration) |
710 | { | 742 | { |
711 | int ret; | 743 | int ret; |
712 | 744 | ||
713 | might_sleep(); | 745 | might_sleep(); |
714 | 746 | ||
715 | trace_drv_remain_on_channel(local, chan, chantype, duration); | 747 | trace_drv_remain_on_channel(local, sdata, chan, duration); |
716 | ret = local->ops->remain_on_channel(&local->hw, chan, chantype, | 748 | ret = local->ops->remain_on_channel(&local->hw, &sdata->vif, |
717 | duration); | 749 | chan, duration); |
718 | trace_drv_return_int(local, ret); | 750 | trace_drv_return_int(local, ret); |
719 | 751 | ||
720 | return ret; | 752 | return ret; |
@@ -871,4 +903,104 @@ static inline void drv_mgd_prepare_tx(struct ieee80211_local *local, | |||
871 | local->ops->mgd_prepare_tx(&local->hw, &sdata->vif); | 903 | local->ops->mgd_prepare_tx(&local->hw, &sdata->vif); |
872 | trace_drv_return_void(local); | 904 | trace_drv_return_void(local); |
873 | } | 905 | } |
906 | |||
907 | static inline int drv_add_chanctx(struct ieee80211_local *local, | ||
908 | struct ieee80211_chanctx *ctx) | ||
909 | { | ||
910 | int ret = -EOPNOTSUPP; | ||
911 | |||
912 | trace_drv_add_chanctx(local, ctx); | ||
913 | if (local->ops->add_chanctx) | ||
914 | ret = local->ops->add_chanctx(&local->hw, &ctx->conf); | ||
915 | trace_drv_return_int(local, ret); | ||
916 | |||
917 | return ret; | ||
918 | } | ||
919 | |||
920 | static inline void drv_remove_chanctx(struct ieee80211_local *local, | ||
921 | struct ieee80211_chanctx *ctx) | ||
922 | { | ||
923 | trace_drv_remove_chanctx(local, ctx); | ||
924 | if (local->ops->remove_chanctx) | ||
925 | local->ops->remove_chanctx(&local->hw, &ctx->conf); | ||
926 | trace_drv_return_void(local); | ||
927 | } | ||
928 | |||
929 | static inline void drv_change_chanctx(struct ieee80211_local *local, | ||
930 | struct ieee80211_chanctx *ctx, | ||
931 | u32 changed) | ||
932 | { | ||
933 | trace_drv_change_chanctx(local, ctx, changed); | ||
934 | if (local->ops->change_chanctx) | ||
935 | local->ops->change_chanctx(&local->hw, &ctx->conf, changed); | ||
936 | trace_drv_return_void(local); | ||
937 | } | ||
938 | |||
939 | static inline int drv_assign_vif_chanctx(struct ieee80211_local *local, | ||
940 | struct ieee80211_sub_if_data *sdata, | ||
941 | struct ieee80211_chanctx *ctx) | ||
942 | { | ||
943 | int ret = 0; | ||
944 | |||
945 | check_sdata_in_driver(sdata); | ||
946 | |||
947 | trace_drv_assign_vif_chanctx(local, sdata, ctx); | ||
948 | if (local->ops->assign_vif_chanctx) | ||
949 | ret = local->ops->assign_vif_chanctx(&local->hw, | ||
950 | &sdata->vif, | ||
951 | &ctx->conf); | ||
952 | trace_drv_return_int(local, ret); | ||
953 | |||
954 | return ret; | ||
955 | } | ||
956 | |||
957 | static inline void drv_unassign_vif_chanctx(struct ieee80211_local *local, | ||
958 | struct ieee80211_sub_if_data *sdata, | ||
959 | struct ieee80211_chanctx *ctx) | ||
960 | { | ||
961 | check_sdata_in_driver(sdata); | ||
962 | |||
963 | trace_drv_unassign_vif_chanctx(local, sdata, ctx); | ||
964 | if (local->ops->unassign_vif_chanctx) | ||
965 | local->ops->unassign_vif_chanctx(&local->hw, | ||
966 | &sdata->vif, | ||
967 | &ctx->conf); | ||
968 | trace_drv_return_void(local); | ||
969 | } | ||
970 | |||
971 | static inline int drv_start_ap(struct ieee80211_local *local, | ||
972 | struct ieee80211_sub_if_data *sdata) | ||
973 | { | ||
974 | int ret = 0; | ||
975 | |||
976 | check_sdata_in_driver(sdata); | ||
977 | |||
978 | trace_drv_start_ap(local, sdata, &sdata->vif.bss_conf); | ||
979 | if (local->ops->start_ap) | ||
980 | ret = local->ops->start_ap(&local->hw, &sdata->vif); | ||
981 | trace_drv_return_int(local, ret); | ||
982 | return ret; | ||
983 | } | ||
984 | |||
985 | static inline void drv_stop_ap(struct ieee80211_local *local, | ||
986 | struct ieee80211_sub_if_data *sdata) | ||
987 | { | ||
988 | check_sdata_in_driver(sdata); | ||
989 | |||
990 | trace_drv_stop_ap(local, sdata); | ||
991 | if (local->ops->stop_ap) | ||
992 | local->ops->stop_ap(&local->hw, &sdata->vif); | ||
993 | trace_drv_return_void(local); | ||
994 | } | ||
995 | |||
996 | static inline void drv_restart_complete(struct ieee80211_local *local) | ||
997 | { | ||
998 | might_sleep(); | ||
999 | |||
1000 | trace_drv_restart_complete(local); | ||
1001 | if (local->ops->restart_complete) | ||
1002 | local->ops->restart_complete(&local->hw); | ||
1003 | trace_drv_return_void(local); | ||
1004 | } | ||
1005 | |||
874 | #endif /* __MAC80211_DRIVER_OPS */ | 1006 | #endif /* __MAC80211_DRIVER_OPS */ |
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 4b4538d63925..a71d891794a4 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c | |||
@@ -185,7 +185,7 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx) | |||
185 | 185 | ||
186 | cancel_work_sync(&sta->ampdu_mlme.work); | 186 | cancel_work_sync(&sta->ampdu_mlme.work); |
187 | 187 | ||
188 | for (i = 0; i < STA_TID_NUM; i++) { | 188 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { |
189 | __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR, tx); | 189 | __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR, tx); |
190 | __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, | 190 | __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, |
191 | WLAN_REASON_QSTA_LEAVE_QBSS, tx); | 191 | WLAN_REASON_QSTA_LEAVE_QBSS, tx); |
@@ -209,7 +209,7 @@ void ieee80211_ba_session_work(struct work_struct *work) | |||
209 | return; | 209 | return; |
210 | 210 | ||
211 | mutex_lock(&sta->ampdu_mlme.mtx); | 211 | mutex_lock(&sta->ampdu_mlme.mtx); |
212 | for (tid = 0; tid < STA_TID_NUM; tid++) { | 212 | for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { |
213 | if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired)) | 213 | if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired)) |
214 | ___ieee80211_stop_rx_ba_session( | 214 | ___ieee80211_stop_rx_ba_session( |
215 | sta, tid, WLAN_BACK_RECIPIENT, | 215 | sta, tid, WLAN_BACK_RECIPIENT, |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index c21e33d1abd0..fa862b24a7e0 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include "rate.h" | 26 | #include "rate.h" |
27 | 27 | ||
28 | #define IEEE80211_SCAN_INTERVAL (2 * HZ) | 28 | #define IEEE80211_SCAN_INTERVAL (2 * HZ) |
29 | #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) | ||
30 | #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) | 29 | #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) |
31 | 30 | ||
32 | #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) | 31 | #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) |
@@ -39,7 +38,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
39 | const u8 *bssid, const int beacon_int, | 38 | const u8 *bssid, const int beacon_int, |
40 | struct ieee80211_channel *chan, | 39 | struct ieee80211_channel *chan, |
41 | const u32 basic_rates, | 40 | const u32 basic_rates, |
42 | const u16 capability, u64 tsf) | 41 | const u16 capability, u64 tsf, |
42 | bool creator) | ||
43 | { | 43 | { |
44 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | 44 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; |
45 | struct ieee80211_local *local = sdata->local; | 45 | struct ieee80211_local *local = sdata->local; |
@@ -51,7 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
51 | struct cfg80211_bss *bss; | 51 | struct cfg80211_bss *bss; |
52 | u32 bss_change; | 52 | u32 bss_change; |
53 | u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; | 53 | u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; |
54 | enum nl80211_channel_type channel_type; | 54 | struct cfg80211_chan_def chandef; |
55 | 55 | ||
56 | lockdep_assert_held(&ifibss->mtx); | 56 | lockdep_assert_held(&ifibss->mtx); |
57 | 57 | ||
@@ -72,25 +72,29 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
72 | /* if merging, indicate to driver that we leave the old IBSS */ | 72 | /* if merging, indicate to driver that we leave the old IBSS */ |
73 | if (sdata->vif.bss_conf.ibss_joined) { | 73 | if (sdata->vif.bss_conf.ibss_joined) { |
74 | sdata->vif.bss_conf.ibss_joined = false; | 74 | sdata->vif.bss_conf.ibss_joined = false; |
75 | sdata->vif.bss_conf.ibss_creator = false; | ||
75 | netif_carrier_off(sdata->dev); | 76 | netif_carrier_off(sdata->dev); |
76 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS); | 77 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS); |
77 | } | 78 | } |
78 | 79 | ||
79 | memcpy(ifibss->bssid, bssid, ETH_ALEN); | ||
80 | |||
81 | sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; | 80 | sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; |
82 | 81 | ||
83 | local->oper_channel = chan; | 82 | cfg80211_chandef_create(&chandef, chan, ifibss->channel_type); |
84 | channel_type = ifibss->channel_type; | 83 | if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { |
85 | if (!cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type)) | 84 | chandef.width = NL80211_CHAN_WIDTH_20; |
86 | channel_type = NL80211_CHAN_HT20; | 85 | chandef.center_freq1 = chan->center_freq; |
87 | if (!ieee80211_set_channel_type(local, sdata, channel_type)) { | 86 | } |
88 | /* can only fail due to HT40+/- mismatch */ | 87 | |
89 | channel_type = NL80211_CHAN_HT20; | 88 | ieee80211_vif_release_channel(sdata); |
90 | WARN_ON(!ieee80211_set_channel_type(local, sdata, | 89 | if (ieee80211_vif_use_channel(sdata, &chandef, |
91 | NL80211_CHAN_HT20)); | 90 | ifibss->fixed_channel ? |
91 | IEEE80211_CHANCTX_SHARED : | ||
92 | IEEE80211_CHANCTX_EXCLUSIVE)) { | ||
93 | sdata_info(sdata, "Failed to join IBSS, no channel context\n"); | ||
94 | return; | ||
92 | } | 95 | } |
93 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | 96 | |
97 | memcpy(ifibss->bssid, bssid, ETH_ALEN); | ||
94 | 98 | ||
95 | sband = local->hw.wiphy->bands[chan->band]; | 99 | sband = local->hw.wiphy->bands[chan->band]; |
96 | 100 | ||
@@ -156,7 +160,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
156 | ifibss->ie, ifibss->ie_len); | 160 | ifibss->ie, ifibss->ie_len); |
157 | 161 | ||
158 | /* add HT capability and information IEs */ | 162 | /* add HT capability and information IEs */ |
159 | if (channel_type && sband->ht_cap.ht_supported) { | 163 | if (chandef.width != NL80211_CHAN_WIDTH_20_NOHT && |
164 | sband->ht_cap.ht_supported) { | ||
160 | pos = skb_put(skb, 4 + | 165 | pos = skb_put(skb, 4 + |
161 | sizeof(struct ieee80211_ht_cap) + | 166 | sizeof(struct ieee80211_ht_cap) + |
162 | sizeof(struct ieee80211_ht_operation)); | 167 | sizeof(struct ieee80211_ht_operation)); |
@@ -168,7 +173,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
168 | * keep them at 0 | 173 | * keep them at 0 |
169 | */ | 174 | */ |
170 | pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, | 175 | pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, |
171 | chan, channel_type, 0); | 176 | &chandef, 0); |
172 | } | 177 | } |
173 | 178 | ||
174 | if (local->hw.queues >= IEEE80211_NUM_ACS) { | 179 | if (local->hw.queues >= IEEE80211_NUM_ACS) { |
@@ -197,6 +202,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
197 | bss_change |= BSS_CHANGED_HT; | 202 | bss_change |= BSS_CHANGED_HT; |
198 | bss_change |= BSS_CHANGED_IBSS; | 203 | bss_change |= BSS_CHANGED_IBSS; |
199 | sdata->vif.bss_conf.ibss_joined = true; | 204 | sdata->vif.bss_conf.ibss_joined = true; |
205 | sdata->vif.bss_conf.ibss_creator = creator; | ||
200 | ieee80211_bss_info_change_notify(sdata, bss_change); | 206 | ieee80211_bss_info_change_notify(sdata, bss_change); |
201 | 207 | ||
202 | ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates); | 208 | ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates); |
@@ -249,7 +255,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
249 | cbss->channel, | 255 | cbss->channel, |
250 | basic_rates, | 256 | basic_rates, |
251 | cbss->capability, | 257 | cbss->capability, |
252 | cbss->tsf); | 258 | cbss->tsf, |
259 | false); | ||
253 | } | 260 | } |
254 | 261 | ||
255 | static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta, | 262 | static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta, |
@@ -279,7 +286,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta, | |||
279 | ibss_dbg(sdata, | 286 | ibss_dbg(sdata, |
280 | "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n", | 287 | "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n", |
281 | sdata->vif.addr, addr, sdata->u.ibss.bssid); | 288 | sdata->vif.addr, addr, sdata->u.ibss.bssid); |
282 | ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0, | 289 | ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, 0, NULL, 0, |
283 | addr, sdata->u.ibss.bssid, NULL, 0, 0); | 290 | addr, sdata->u.ibss.bssid, NULL, 0, 0); |
284 | } | 291 | } |
285 | return sta; | 292 | return sta; |
@@ -294,7 +301,8 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, | |||
294 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | 301 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; |
295 | struct ieee80211_local *local = sdata->local; | 302 | struct ieee80211_local *local = sdata->local; |
296 | struct sta_info *sta; | 303 | struct sta_info *sta; |
297 | int band = local->oper_channel->band; | 304 | struct ieee80211_chanctx_conf *chanctx_conf; |
305 | int band; | ||
298 | 306 | ||
299 | /* | 307 | /* |
300 | * XXX: Consider removing the least recently used entry and | 308 | * XXX: Consider removing the least recently used entry and |
@@ -317,6 +325,13 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, | |||
317 | return NULL; | 325 | return NULL; |
318 | } | 326 | } |
319 | 327 | ||
328 | rcu_read_lock(); | ||
329 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
330 | if (WARN_ON_ONCE(!chanctx_conf)) | ||
331 | return NULL; | ||
332 | band = chanctx_conf->def.chan->band; | ||
333 | rcu_read_unlock(); | ||
334 | |||
320 | sta = sta_info_alloc(sdata, addr, GFP_KERNEL); | 335 | sta = sta_info_alloc(sdata, addr, GFP_KERNEL); |
321 | if (!sta) { | 336 | if (!sta) { |
322 | rcu_read_lock(); | 337 | rcu_read_lock(); |
@@ -362,11 +377,13 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, | |||
362 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); | 377 | auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); |
363 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); | 378 | auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); |
364 | 379 | ||
365 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) | ||
366 | return; | ||
367 | ibss_dbg(sdata, | 380 | ibss_dbg(sdata, |
368 | "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", | 381 | "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", |
369 | mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); | 382 | mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); |
383 | |||
384 | if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) | ||
385 | return; | ||
386 | |||
370 | sta_info_destroy_addr(sdata, mgmt->sa); | 387 | sta_info_destroy_addr(sdata, mgmt->sa); |
371 | sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false); | 388 | sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false); |
372 | rcu_read_unlock(); | 389 | rcu_read_unlock(); |
@@ -389,7 +406,7 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, | |||
389 | * However, try to reply to authentication attempts if someone | 406 | * However, try to reply to authentication attempts if someone |
390 | * has actually implemented this. | 407 | * has actually implemented this. |
391 | */ | 408 | */ |
392 | ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0, | 409 | ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0, |
393 | mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0); | 410 | mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0); |
394 | } | 411 | } |
395 | 412 | ||
@@ -461,9 +478,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
461 | sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { | 478 | sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { |
462 | /* we both use HT */ | 479 | /* we both use HT */ |
463 | struct ieee80211_sta_ht_cap sta_ht_cap_new; | 480 | struct ieee80211_sta_ht_cap sta_ht_cap_new; |
464 | enum nl80211_channel_type channel_type = | 481 | struct cfg80211_chan_def chandef; |
465 | ieee80211_ht_oper_to_channel_type( | 482 | |
466 | elems->ht_operation); | 483 | ieee80211_ht_oper_to_chandef(channel, |
484 | elems->ht_operation, | ||
485 | &chandef); | ||
467 | 486 | ||
468 | ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, | 487 | ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, |
469 | elems->ht_cap_elem, | 488 | elems->ht_cap_elem, |
@@ -473,9 +492,9 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
473 | * fall back to HT20 if we don't use or use | 492 | * fall back to HT20 if we don't use or use |
474 | * the other extension channel | 493 | * the other extension channel |
475 | */ | 494 | */ |
476 | if (!(channel_type == NL80211_CHAN_HT40MINUS || | 495 | if (chandef.width != NL80211_CHAN_WIDTH_40 || |
477 | channel_type == NL80211_CHAN_HT40PLUS) || | 496 | cfg80211_get_chandef_type(&chandef) != |
478 | channel_type != sdata->u.ibss.channel_type) | 497 | sdata->u.ibss.channel_type) |
479 | sta_ht_cap_new.cap &= | 498 | sta_ht_cap_new.cap &= |
480 | ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; | 499 | ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; |
481 | 500 | ||
@@ -517,7 +536,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
517 | goto put_bss; | 536 | goto put_bss; |
518 | 537 | ||
519 | /* different channel */ | 538 | /* different channel */ |
520 | if (cbss->channel != local->oper_channel) | 539 | if (sdata->u.ibss.fixed_channel && |
540 | sdata->u.ibss.channel != cbss->channel) | ||
521 | goto put_bss; | 541 | goto put_bss; |
522 | 542 | ||
523 | /* different SSID */ | 543 | /* different SSID */ |
@@ -530,30 +550,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, | |||
530 | if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid)) | 550 | if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid)) |
531 | goto put_bss; | 551 | goto put_bss; |
532 | 552 | ||
533 | if (rx_status->flag & RX_FLAG_MACTIME_MPDU) { | 553 | if (ieee80211_have_rx_timestamp(rx_status)) { |
534 | /* | 554 | /* time when timestamp field was received */ |
535 | * For correct IBSS merging we need mactime; since mactime is | 555 | rx_timestamp = |
536 | * defined as the time the first data symbol of the frame hits | 556 | ieee80211_calculate_rx_timestamp(local, rx_status, |
537 | * the PHY, and the timestamp of the beacon is defined as "the | 557 | len + FCS_LEN, 24); |
538 | * time that the data symbol containing the first bit of the | ||
539 | * timestamp is transmitted to the PHY plus the transmitting | ||
540 | * STA's delays through its local PHY from the MAC-PHY | ||
541 | * interface to its interface with the WM" (802.11 11.1.2) | ||
542 | * - equals the time this bit arrives at the receiver - we have | ||
543 | * to take into account the offset between the two. | ||
544 | * | ||
545 | * E.g. at 1 MBit that means mactime is 192 usec earlier | ||
546 | * (=24 bytes * 8 usecs/byte) than the beacon timestamp. | ||
547 | */ | ||
548 | int rate; | ||
549 | |||
550 | if (rx_status->flag & RX_FLAG_HT) | ||
551 | rate = 65; /* TODO: HT rates */ | ||
552 | else | ||
553 | rate = local->hw.wiphy->bands[band]-> | ||
554 | bitrates[rx_status->rate_idx].bitrate; | ||
555 | |||
556 | rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); | ||
557 | } else { | 558 | } else { |
558 | /* | 559 | /* |
559 | * second best option: get current TSF | 560 | * second best option: get current TSF |
@@ -592,7 +593,8 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, | |||
592 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | 593 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; |
593 | struct ieee80211_local *local = sdata->local; | 594 | struct ieee80211_local *local = sdata->local; |
594 | struct sta_info *sta; | 595 | struct sta_info *sta; |
595 | int band = local->oper_channel->band; | 596 | struct ieee80211_chanctx_conf *chanctx_conf; |
597 | int band; | ||
596 | 598 | ||
597 | /* | 599 | /* |
598 | * XXX: Consider removing the least recently used entry and | 600 | * XXX: Consider removing the least recently used entry and |
@@ -610,6 +612,15 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, | |||
610 | if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) | 612 | if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) |
611 | return; | 613 | return; |
612 | 614 | ||
615 | rcu_read_lock(); | ||
616 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
617 | if (WARN_ON_ONCE(!chanctx_conf)) { | ||
618 | rcu_read_unlock(); | ||
619 | return; | ||
620 | } | ||
621 | band = chanctx_conf->def.chan->band; | ||
622 | rcu_read_unlock(); | ||
623 | |||
613 | sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); | 624 | sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); |
614 | if (!sta) | 625 | if (!sta) |
615 | return; | 626 | return; |
@@ -715,7 +726,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) | |||
715 | 726 | ||
716 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, | 727 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, |
717 | ifibss->channel, ifibss->basic_rates, | 728 | ifibss->channel, ifibss->basic_rates, |
718 | capability, 0); | 729 | capability, 0, true); |
719 | } | 730 | } |
720 | 731 | ||
721 | /* | 732 | /* |
@@ -784,18 +795,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) | |||
784 | int interval = IEEE80211_SCAN_INTERVAL; | 795 | int interval = IEEE80211_SCAN_INTERVAL; |
785 | 796 | ||
786 | if (time_after(jiffies, ifibss->ibss_join_req + | 797 | if (time_after(jiffies, ifibss->ibss_join_req + |
787 | IEEE80211_IBSS_JOIN_TIMEOUT)) { | 798 | IEEE80211_IBSS_JOIN_TIMEOUT)) |
788 | if (!(local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)) { | 799 | ieee80211_sta_create_ibss(sdata); |
789 | ieee80211_sta_create_ibss(sdata); | ||
790 | return; | ||
791 | } | ||
792 | sdata_info(sdata, "IBSS not allowed on %d MHz\n", | ||
793 | local->oper_channel->center_freq); | ||
794 | |||
795 | /* No IBSS found - decrease scan interval and continue | ||
796 | * scanning. */ | ||
797 | interval = IEEE80211_SCAN_INTERVAL_SLOW; | ||
798 | } | ||
799 | 800 | ||
800 | mod_timer(&ifibss->timer, | 801 | mod_timer(&ifibss->timer, |
801 | round_jiffies(jiffies + interval)); | 802 | round_jiffies(jiffies + interval)); |
@@ -1082,21 +1083,11 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
1082 | 1083 | ||
1083 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; | 1084 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; |
1084 | 1085 | ||
1085 | sdata->u.ibss.channel = params->channel; | 1086 | sdata->u.ibss.channel = params->chandef.chan; |
1086 | sdata->u.ibss.channel_type = params->channel_type; | 1087 | sdata->u.ibss.channel_type = |
1088 | cfg80211_get_chandef_type(¶ms->chandef); | ||
1087 | sdata->u.ibss.fixed_channel = params->channel_fixed; | 1089 | sdata->u.ibss.fixed_channel = params->channel_fixed; |
1088 | 1090 | ||
1089 | /* fix ourselves to that channel now already */ | ||
1090 | if (params->channel_fixed) { | ||
1091 | sdata->local->oper_channel = params->channel; | ||
1092 | if (!ieee80211_set_channel_type(sdata->local, sdata, | ||
1093 | params->channel_type)) { | ||
1094 | mutex_unlock(&sdata->u.ibss.mtx); | ||
1095 | kfree_skb(skb); | ||
1096 | return -EINVAL; | ||
1097 | } | ||
1098 | } | ||
1099 | |||
1100 | if (params->ie) { | 1091 | if (params->ie) { |
1101 | sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len, | 1092 | sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len, |
1102 | GFP_KERNEL); | 1093 | GFP_KERNEL); |
@@ -1134,6 +1125,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
1134 | changed |= BSS_CHANGED_HT; | 1125 | changed |= BSS_CHANGED_HT; |
1135 | ieee80211_bss_info_change_notify(sdata, changed); | 1126 | ieee80211_bss_info_change_notify(sdata, changed); |
1136 | 1127 | ||
1128 | sdata->smps_mode = IEEE80211_SMPS_OFF; | ||
1129 | sdata->needed_rx_chains = sdata->local->rx_chains; | ||
1130 | |||
1137 | ieee80211_queue_work(&sdata->local->hw, &sdata->work); | 1131 | ieee80211_queue_work(&sdata->local->hw, &sdata->work); |
1138 | 1132 | ||
1139 | return 0; | 1133 | return 0; |
@@ -1197,6 +1191,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) | |||
1197 | lockdep_is_held(&sdata->u.ibss.mtx)); | 1191 | lockdep_is_held(&sdata->u.ibss.mtx)); |
1198 | RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); | 1192 | RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); |
1199 | sdata->vif.bss_conf.ibss_joined = false; | 1193 | sdata->vif.bss_conf.ibss_joined = false; |
1194 | sdata->vif.bss_conf.ibss_creator = false; | ||
1200 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | | 1195 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | |
1201 | BSS_CHANGED_IBSS); | 1196 | BSS_CHANGED_IBSS); |
1202 | synchronize_rcu(); | 1197 | synchronize_rcu(); |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 156e5835e37f..5c0d5a6946c1 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -56,6 +56,9 @@ struct ieee80211_local; | |||
56 | #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) | 56 | #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) |
57 | #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) | 57 | #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) |
58 | 58 | ||
59 | /* power level hasn't been configured (or set to automatic) */ | ||
60 | #define IEEE80211_UNSET_POWER_LEVEL INT_MIN | ||
61 | |||
59 | /* | 62 | /* |
60 | * Some APs experience problems when working with U-APSD. Decrease the | 63 | * Some APs experience problems when working with U-APSD. Decrease the |
61 | * probability of that happening by using legacy mode for all ACs but VO. | 64 | * probability of that happening by using legacy mode for all ACs but VO. |
@@ -280,23 +283,27 @@ struct probe_resp { | |||
280 | u8 data[0]; | 283 | u8 data[0]; |
281 | }; | 284 | }; |
282 | 285 | ||
283 | struct ieee80211_if_ap { | 286 | struct ps_data { |
284 | struct beacon_data __rcu *beacon; | ||
285 | struct probe_resp __rcu *probe_resp; | ||
286 | |||
287 | struct list_head vlans; | ||
288 | |||
289 | /* yes, this looks ugly, but guarantees that we can later use | 287 | /* yes, this looks ugly, but guarantees that we can later use |
290 | * bitmap_empty :) | 288 | * bitmap_empty :) |
291 | * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */ | 289 | * NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */ |
292 | u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; | 290 | u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; |
293 | struct sk_buff_head ps_bc_buf; | 291 | struct sk_buff_head bc_buf; |
294 | atomic_t num_sta_ps; /* number of stations in PS mode */ | 292 | atomic_t num_sta_ps; /* number of stations in PS mode */ |
295 | atomic_t num_mcast_sta; /* number of stations receiving multicast */ | ||
296 | int dtim_count; | 293 | int dtim_count; |
297 | bool dtim_bc_mc; | 294 | bool dtim_bc_mc; |
298 | }; | 295 | }; |
299 | 296 | ||
297 | struct ieee80211_if_ap { | ||
298 | struct beacon_data __rcu *beacon; | ||
299 | struct probe_resp __rcu *probe_resp; | ||
300 | |||
301 | struct list_head vlans; | ||
302 | |||
303 | struct ps_data ps; | ||
304 | atomic_t num_mcast_sta; /* number of stations receiving multicast */ | ||
305 | }; | ||
306 | |||
300 | struct ieee80211_if_wds { | 307 | struct ieee80211_if_wds { |
301 | struct sta_info *sta; | 308 | struct sta_info *sta; |
302 | u8 remote_addr[ETH_ALEN]; | 309 | u8 remote_addr[ETH_ALEN]; |
@@ -316,7 +323,6 @@ struct mesh_stats { | |||
316 | __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ | 323 | __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ |
317 | __u32 dropped_frames_no_route; /* Not transmitted, no route found */ | 324 | __u32 dropped_frames_no_route; /* Not transmitted, no route found */ |
318 | __u32 dropped_frames_congestion;/* Not forwarded due to congestion */ | 325 | __u32 dropped_frames_congestion;/* Not forwarded due to congestion */ |
319 | atomic_t estab_plinks; | ||
320 | }; | 326 | }; |
321 | 327 | ||
322 | #define PREQ_Q_F_START 0x1 | 328 | #define PREQ_Q_F_START 0x1 |
@@ -342,7 +348,6 @@ struct ieee80211_roc_work { | |||
342 | struct ieee80211_sub_if_data *sdata; | 348 | struct ieee80211_sub_if_data *sdata; |
343 | 349 | ||
344 | struct ieee80211_channel *chan; | 350 | struct ieee80211_channel *chan; |
345 | enum nl80211_channel_type chan_type; | ||
346 | 351 | ||
347 | bool started, abort, hw_begun, notified; | 352 | bool started, abort, hw_begun, notified; |
348 | 353 | ||
@@ -350,7 +355,7 @@ struct ieee80211_roc_work { | |||
350 | 355 | ||
351 | u32 duration, req_duration; | 356 | u32 duration, req_duration; |
352 | struct sk_buff *frame; | 357 | struct sk_buff *frame; |
353 | u64 mgmt_tx_cookie; | 358 | u64 cookie, mgmt_tx_cookie; |
354 | }; | 359 | }; |
355 | 360 | ||
356 | /* flags used in struct ieee80211_if_managed.flags */ | 361 | /* flags used in struct ieee80211_if_managed.flags */ |
@@ -358,7 +363,7 @@ enum ieee80211_sta_flags { | |||
358 | IEEE80211_STA_BEACON_POLL = BIT(0), | 363 | IEEE80211_STA_BEACON_POLL = BIT(0), |
359 | IEEE80211_STA_CONNECTION_POLL = BIT(1), | 364 | IEEE80211_STA_CONNECTION_POLL = BIT(1), |
360 | IEEE80211_STA_CONTROL_PORT = BIT(2), | 365 | IEEE80211_STA_CONTROL_PORT = BIT(2), |
361 | IEEE80211_STA_DISABLE_11N = BIT(4), | 366 | IEEE80211_STA_DISABLE_HT = BIT(4), |
362 | IEEE80211_STA_CSA_RECEIVED = BIT(5), | 367 | IEEE80211_STA_CSA_RECEIVED = BIT(5), |
363 | IEEE80211_STA_MFP_ENABLED = BIT(6), | 368 | IEEE80211_STA_MFP_ENABLED = BIT(6), |
364 | IEEE80211_STA_UAPSD_ENABLED = BIT(7), | 369 | IEEE80211_STA_UAPSD_ENABLED = BIT(7), |
@@ -378,8 +383,9 @@ struct ieee80211_mgd_auth_data { | |||
378 | u8 key_len, key_idx; | 383 | u8 key_len, key_idx; |
379 | bool done; | 384 | bool done; |
380 | 385 | ||
381 | size_t ie_len; | 386 | u16 sae_trans, sae_status; |
382 | u8 ie[]; | 387 | size_t data_len; |
388 | u8 data[]; | ||
383 | }; | 389 | }; |
384 | 390 | ||
385 | struct ieee80211_mgd_assoc_data { | 391 | struct ieee80211_mgd_assoc_data { |
@@ -433,7 +439,6 @@ struct ieee80211_if_managed { | |||
433 | bool powersave; /* powersave requested for this iface */ | 439 | bool powersave; /* powersave requested for this iface */ |
434 | bool broken_ap; /* AP is broken -- turn off powersave */ | 440 | bool broken_ap; /* AP is broken -- turn off powersave */ |
435 | enum ieee80211_smps_mode req_smps, /* requested smps mode */ | 441 | enum ieee80211_smps_mode req_smps, /* requested smps mode */ |
436 | ap_smps, /* smps mode AP thinks we're in */ | ||
437 | driver_smps_mode; /* smps mode request */ | 442 | driver_smps_mode; /* smps mode request */ |
438 | 443 | ||
439 | struct work_struct request_smps_work; | 444 | struct work_struct request_smps_work; |
@@ -467,6 +472,8 @@ struct ieee80211_if_managed { | |||
467 | 472 | ||
468 | u8 use_4addr; | 473 | u8 use_4addr; |
469 | 474 | ||
475 | u8 p2p_noa_index; | ||
476 | |||
470 | /* Signal strength from the last Beacon frame in the current BSS. */ | 477 | /* Signal strength from the last Beacon frame in the current BSS. */ |
471 | int last_beacon_signal; | 478 | int last_beacon_signal; |
472 | 479 | ||
@@ -599,6 +606,7 @@ struct ieee80211_if_mesh { | |||
599 | int preq_queue_len; | 606 | int preq_queue_len; |
600 | struct mesh_stats mshstats; | 607 | struct mesh_stats mshstats; |
601 | struct mesh_config mshcfg; | 608 | struct mesh_config mshcfg; |
609 | atomic_t estab_plinks; | ||
602 | u32 mesh_seqnum; | 610 | u32 mesh_seqnum; |
603 | bool accepting_plinks; | 611 | bool accepting_plinks; |
604 | int num_gates; | 612 | int num_gates; |
@@ -610,7 +618,7 @@ struct ieee80211_if_mesh { | |||
610 | IEEE80211_MESH_SEC_SECURED = 0x2, | 618 | IEEE80211_MESH_SEC_SECURED = 0x2, |
611 | } security; | 619 | } security; |
612 | /* Extensible Synchronization Framework */ | 620 | /* Extensible Synchronization Framework */ |
613 | struct ieee80211_mesh_sync_ops *sync_ops; | 621 | const struct ieee80211_mesh_sync_ops *sync_ops; |
614 | s64 sync_offset_clockdrift_max; | 622 | s64 sync_offset_clockdrift_max; |
615 | spinlock_t sync_offset_lock; | 623 | spinlock_t sync_offset_lock; |
616 | bool adjusting_tbtt; | 624 | bool adjusting_tbtt; |
@@ -658,6 +666,30 @@ enum ieee80211_sdata_state_bits { | |||
658 | SDATA_STATE_OFFCHANNEL, | 666 | SDATA_STATE_OFFCHANNEL, |
659 | }; | 667 | }; |
660 | 668 | ||
669 | /** | ||
670 | * enum ieee80211_chanctx_mode - channel context configuration mode | ||
671 | * | ||
672 | * @IEEE80211_CHANCTX_SHARED: channel context may be used by | ||
673 | * multiple interfaces | ||
674 | * @IEEE80211_CHANCTX_EXCLUSIVE: channel context can be used | ||
675 | * only by a single interface. This can be used for example for | ||
676 | * non-fixed channel IBSS. | ||
677 | */ | ||
678 | enum ieee80211_chanctx_mode { | ||
679 | IEEE80211_CHANCTX_SHARED, | ||
680 | IEEE80211_CHANCTX_EXCLUSIVE | ||
681 | }; | ||
682 | |||
683 | struct ieee80211_chanctx { | ||
684 | struct list_head list; | ||
685 | struct rcu_head rcu_head; | ||
686 | |||
687 | enum ieee80211_chanctx_mode mode; | ||
688 | int refcount; | ||
689 | |||
690 | struct ieee80211_chanctx_conf conf; | ||
691 | }; | ||
692 | |||
661 | struct ieee80211_sub_if_data { | 693 | struct ieee80211_sub_if_data { |
662 | struct list_head list; | 694 | struct list_head list; |
663 | 695 | ||
@@ -704,11 +736,20 @@ struct ieee80211_sub_if_data { | |||
704 | 736 | ||
705 | struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; | 737 | struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; |
706 | 738 | ||
739 | /* used to reconfigure hardware SM PS */ | ||
740 | struct work_struct recalc_smps; | ||
741 | |||
707 | struct work_struct work; | 742 | struct work_struct work; |
708 | struct sk_buff_head skb_queue; | 743 | struct sk_buff_head skb_queue; |
709 | 744 | ||
710 | bool arp_filter_state; | 745 | bool arp_filter_state; |
711 | 746 | ||
747 | u8 needed_rx_chains; | ||
748 | enum ieee80211_smps_mode smps_mode; | ||
749 | |||
750 | int user_power_level; /* in dBm */ | ||
751 | int ap_power_level; /* in dBm */ | ||
752 | |||
712 | /* | 753 | /* |
713 | * AP this belongs to: self in AP mode and | 754 | * AP this belongs to: self in AP mode and |
714 | * corresponding AP in VLAN mode, NULL for | 755 | * corresponding AP in VLAN mode, NULL for |
@@ -749,6 +790,21 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) | |||
749 | return container_of(p, struct ieee80211_sub_if_data, vif); | 790 | return container_of(p, struct ieee80211_sub_if_data, vif); |
750 | } | 791 | } |
751 | 792 | ||
793 | static inline enum ieee80211_band | ||
794 | ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata) | ||
795 | { | ||
796 | enum ieee80211_band band = IEEE80211_BAND_2GHZ; | ||
797 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
798 | |||
799 | rcu_read_lock(); | ||
800 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
801 | if (!WARN_ON(!chanctx_conf)) | ||
802 | band = chanctx_conf->def.chan->band; | ||
803 | rcu_read_unlock(); | ||
804 | |||
805 | return band; | ||
806 | } | ||
807 | |||
752 | enum sdata_queue_type { | 808 | enum sdata_queue_type { |
753 | IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, | 809 | IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, |
754 | IEEE80211_SDATA_QUEUE_AGG_START = 1, | 810 | IEEE80211_SDATA_QUEUE_AGG_START = 1, |
@@ -821,6 +877,7 @@ enum { | |||
821 | * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to | 877 | * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to |
822 | * send out data | 878 | * send out data |
823 | * @SCAN_RESUME: Resume the scan and scan the next channel | 879 | * @SCAN_RESUME: Resume the scan and scan the next channel |
880 | * @SCAN_ABORT: Abort the scan and go back to operating channel | ||
824 | */ | 881 | */ |
825 | enum mac80211_scan_state { | 882 | enum mac80211_scan_state { |
826 | SCAN_DECISION, | 883 | SCAN_DECISION, |
@@ -828,6 +885,7 @@ enum mac80211_scan_state { | |||
828 | SCAN_SEND_PROBE, | 885 | SCAN_SEND_PROBE, |
829 | SCAN_SUSPEND, | 886 | SCAN_SUSPEND, |
830 | SCAN_RESUME, | 887 | SCAN_RESUME, |
888 | SCAN_ABORT, | ||
831 | }; | 889 | }; |
832 | 890 | ||
833 | struct ieee80211_local { | 891 | struct ieee80211_local { |
@@ -858,15 +916,14 @@ struct ieee80211_local { | |||
858 | 916 | ||
859 | bool wiphy_ciphers_allocated; | 917 | bool wiphy_ciphers_allocated; |
860 | 918 | ||
919 | bool use_chanctx; | ||
920 | |||
861 | /* protects the aggregated multicast list and filter calls */ | 921 | /* protects the aggregated multicast list and filter calls */ |
862 | spinlock_t filter_lock; | 922 | spinlock_t filter_lock; |
863 | 923 | ||
864 | /* used for uploading changed mc list */ | 924 | /* used for uploading changed mc list */ |
865 | struct work_struct reconfig_filter; | 925 | struct work_struct reconfig_filter; |
866 | 926 | ||
867 | /* used to reconfigure hardware SM PS */ | ||
868 | struct work_struct recalc_smps; | ||
869 | |||
870 | /* aggregated multicast list */ | 927 | /* aggregated multicast list */ |
871 | struct netdev_hw_addr_list mc_list; | 928 | struct netdev_hw_addr_list mc_list; |
872 | 929 | ||
@@ -903,6 +960,9 @@ struct ieee80211_local { | |||
903 | /* wowlan is enabled -- don't reconfig on resume */ | 960 | /* wowlan is enabled -- don't reconfig on resume */ |
904 | bool wowlan; | 961 | bool wowlan; |
905 | 962 | ||
963 | /* number of RX chains the hardware has */ | ||
964 | u8 rx_chains; | ||
965 | |||
906 | int tx_headroom; /* required headroom for hardware/radiotap */ | 966 | int tx_headroom; /* required headroom for hardware/radiotap */ |
907 | 967 | ||
908 | /* Tasklet and skb queue to process calls from IRQ mode. All frames | 968 | /* Tasklet and skb queue to process calls from IRQ mode. All frames |
@@ -980,12 +1040,17 @@ struct ieee80211_local { | |||
980 | enum mac80211_scan_state next_scan_state; | 1040 | enum mac80211_scan_state next_scan_state; |
981 | struct delayed_work scan_work; | 1041 | struct delayed_work scan_work; |
982 | struct ieee80211_sub_if_data __rcu *scan_sdata; | 1042 | struct ieee80211_sub_if_data __rcu *scan_sdata; |
1043 | struct ieee80211_channel *csa_channel; | ||
1044 | /* For backward compatibility only -- do not use */ | ||
1045 | struct ieee80211_channel *_oper_channel; | ||
983 | enum nl80211_channel_type _oper_channel_type; | 1046 | enum nl80211_channel_type _oper_channel_type; |
984 | struct ieee80211_channel *oper_channel, *csa_channel; | ||
985 | 1047 | ||
986 | /* Temporary remain-on-channel for off-channel operations */ | 1048 | /* Temporary remain-on-channel for off-channel operations */ |
987 | struct ieee80211_channel *tmp_channel; | 1049 | struct ieee80211_channel *tmp_channel; |
988 | enum nl80211_channel_type tmp_channel_type; | 1050 | |
1051 | /* channel contexts */ | ||
1052 | struct list_head chanctx_list; | ||
1053 | struct mutex chanctx_mtx; | ||
989 | 1054 | ||
990 | /* SNMP counters */ | 1055 | /* SNMP counters */ |
991 | /* dot11CountersTable */ | 1056 | /* dot11CountersTable */ |
@@ -1058,8 +1123,7 @@ struct ieee80211_local { | |||
1058 | int dynamic_ps_user_timeout; | 1123 | int dynamic_ps_user_timeout; |
1059 | bool disable_dynamic_ps; | 1124 | bool disable_dynamic_ps; |
1060 | 1125 | ||
1061 | int user_power_level; /* in dBm */ | 1126 | int user_power_level; /* in dBm, for all interfaces */ |
1062 | int ap_power_level; /* in dBm */ | ||
1063 | 1127 | ||
1064 | enum ieee80211_smps_mode smps_mode; | 1128 | enum ieee80211_smps_mode smps_mode; |
1065 | 1129 | ||
@@ -1078,6 +1142,7 @@ struct ieee80211_local { | |||
1078 | struct list_head roc_list; | 1142 | struct list_head roc_list; |
1079 | struct work_struct hw_roc_start, hw_roc_done; | 1143 | struct work_struct hw_roc_start, hw_roc_done; |
1080 | unsigned long hw_roc_start_time; | 1144 | unsigned long hw_roc_start_time; |
1145 | u64 roc_cookie_counter; | ||
1081 | 1146 | ||
1082 | struct idr ack_status_frames; | 1147 | struct idr ack_status_frames; |
1083 | spinlock_t ack_status_lock; | 1148 | spinlock_t ack_status_lock; |
@@ -1091,6 +1156,7 @@ struct ieee80211_local { | |||
1091 | 1156 | ||
1092 | /* virtual monitor interface */ | 1157 | /* virtual monitor interface */ |
1093 | struct ieee80211_sub_if_data __rcu *monitor_sdata; | 1158 | struct ieee80211_sub_if_data __rcu *monitor_sdata; |
1159 | struct cfg80211_chan_def monitor_chandef; | ||
1094 | }; | 1160 | }; |
1095 | 1161 | ||
1096 | static inline struct ieee80211_sub_if_data * | 1162 | static inline struct ieee80211_sub_if_data * |
@@ -1133,6 +1199,8 @@ struct ieee802_11_elems { | |||
1133 | u8 *wmm_param; | 1199 | u8 *wmm_param; |
1134 | struct ieee80211_ht_cap *ht_cap_elem; | 1200 | struct ieee80211_ht_cap *ht_cap_elem; |
1135 | struct ieee80211_ht_operation *ht_operation; | 1201 | struct ieee80211_ht_operation *ht_operation; |
1202 | struct ieee80211_vht_cap *vht_cap_elem; | ||
1203 | struct ieee80211_vht_operation *vht_operation; | ||
1136 | struct ieee80211_meshconf_ie *mesh_config; | 1204 | struct ieee80211_meshconf_ie *mesh_config; |
1137 | u8 *mesh_id; | 1205 | u8 *mesh_id; |
1138 | u8 *peering; | 1206 | u8 *peering; |
@@ -1188,7 +1256,18 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) | |||
1188 | is_broadcast_ether_addr(raddr); | 1256 | is_broadcast_ether_addr(raddr); |
1189 | } | 1257 | } |
1190 | 1258 | ||
1259 | static inline bool | ||
1260 | ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status) | ||
1261 | { | ||
1262 | WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START && | ||
1263 | status->flag & RX_FLAG_MACTIME_END); | ||
1264 | return status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END); | ||
1265 | } | ||
1191 | 1266 | ||
1267 | u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, | ||
1268 | struct ieee80211_rx_status *status, | ||
1269 | unsigned int mpdu_len, | ||
1270 | unsigned int mpdu_offset); | ||
1192 | int ieee80211_hw_config(struct ieee80211_local *local, u32 changed); | 1271 | int ieee80211_hw_config(struct ieee80211_local *local, u32 changed); |
1193 | void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); | 1272 | void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); |
1194 | void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, | 1273 | void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, |
@@ -1302,6 +1381,9 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, | |||
1302 | int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up); | 1381 | int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up); |
1303 | void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata); | 1382 | void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata); |
1304 | 1383 | ||
1384 | bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata); | ||
1385 | void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata); | ||
1386 | |||
1305 | static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) | 1387 | static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) |
1306 | { | 1388 | { |
1307 | return test_bit(SDATA_STATE_RUNNING, &sdata->state); | 1389 | return test_bit(SDATA_STATE_RUNNING, &sdata->state); |
@@ -1361,6 +1443,13 @@ void ieee80211_ba_session_work(struct work_struct *work); | |||
1361 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); | 1443 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); |
1362 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); | 1444 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); |
1363 | 1445 | ||
1446 | u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs); | ||
1447 | |||
1448 | /* VHT */ | ||
1449 | void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, | ||
1450 | struct ieee80211_supported_band *sband, | ||
1451 | struct ieee80211_vht_cap *vht_cap_ie, | ||
1452 | struct ieee80211_sta_vht_cap *vht_cap); | ||
1364 | /* Spectrum management */ | 1453 | /* Spectrum management */ |
1365 | void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | 1454 | void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, |
1366 | struct ieee80211_mgmt *mgmt, | 1455 | struct ieee80211_mgmt *mgmt, |
@@ -1395,11 +1484,42 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke | |||
1395 | gfp_t gfp); | 1484 | gfp_t gfp); |
1396 | void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, | 1485 | void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, |
1397 | bool bss_notify); | 1486 | bool bss_notify); |
1398 | void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); | 1487 | void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, |
1488 | enum ieee80211_band band); | ||
1489 | |||
1490 | void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, | ||
1491 | struct sk_buff *skb, int tid, | ||
1492 | enum ieee80211_band band); | ||
1399 | 1493 | ||
1400 | void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, | 1494 | static inline void |
1401 | struct sk_buff *skb, int tid); | 1495 | ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, |
1402 | static void inline ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, | 1496 | struct sk_buff *skb, int tid, |
1497 | enum ieee80211_band band) | ||
1498 | { | ||
1499 | rcu_read_lock(); | ||
1500 | __ieee80211_tx_skb_tid_band(sdata, skb, tid, band); | ||
1501 | rcu_read_unlock(); | ||
1502 | } | ||
1503 | |||
1504 | static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, | ||
1505 | struct sk_buff *skb, int tid) | ||
1506 | { | ||
1507 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
1508 | |||
1509 | rcu_read_lock(); | ||
1510 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
1511 | if (WARN_ON(!chanctx_conf)) { | ||
1512 | rcu_read_unlock(); | ||
1513 | kfree_skb(skb); | ||
1514 | return; | ||
1515 | } | ||
1516 | |||
1517 | __ieee80211_tx_skb_tid_band(sdata, skb, tid, | ||
1518 | chanctx_conf->def.chan->band); | ||
1519 | rcu_read_unlock(); | ||
1520 | } | ||
1521 | |||
1522 | static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, | ||
1403 | struct sk_buff *skb) | 1523 | struct sk_buff *skb) |
1404 | { | 1524 | { |
1405 | /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ | 1525 | /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ |
@@ -1446,7 +1566,7 @@ static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local, | |||
1446 | } | 1566 | } |
1447 | 1567 | ||
1448 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | 1568 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, |
1449 | u16 transaction, u16 auth_alg, | 1569 | u16 transaction, u16 auth_alg, u16 status, |
1450 | u8 *extra, size_t extra_len, const u8 *bssid, | 1570 | u8 *extra, size_t extra_len, const u8 *bssid, |
1451 | const u8 *da, const u8 *key, u8 key_len, u8 key_idx); | 1571 | const u8 *da, const u8 *key, u8 key_len, u8 key_idx); |
1452 | void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, | 1572 | void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, |
@@ -1466,7 +1586,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, | |||
1466 | const u8 *ssid, size_t ssid_len, | 1586 | const u8 *ssid, size_t ssid_len, |
1467 | const u8 *ie, size_t ie_len, | 1587 | const u8 *ie, size_t ie_len, |
1468 | u32 ratemask, bool directed, bool no_cck, | 1588 | u32 ratemask, bool directed, bool no_cck, |
1469 | struct ieee80211_channel *channel); | 1589 | struct ieee80211_channel *channel, bool scan); |
1470 | 1590 | ||
1471 | void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, | 1591 | void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, |
1472 | const size_t supp_rates_len, | 1592 | const size_t supp_rates_len, |
@@ -1476,7 +1596,7 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local, | |||
1476 | enum ieee80211_band band, u32 *basic_rates); | 1596 | enum ieee80211_band band, u32 *basic_rates); |
1477 | int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, | 1597 | int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, |
1478 | enum ieee80211_smps_mode smps_mode); | 1598 | enum ieee80211_smps_mode smps_mode); |
1479 | void ieee80211_recalc_smps(struct ieee80211_local *local); | 1599 | void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata); |
1480 | 1600 | ||
1481 | size_t ieee80211_ie_split(const u8 *ies, size_t ielen, | 1601 | size_t ieee80211_ie_split(const u8 *ies, size_t ielen, |
1482 | const u8 *ids, int n_ids, size_t offset); | 1602 | const u8 *ids, int n_ids, size_t offset); |
@@ -1484,8 +1604,7 @@ size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); | |||
1484 | u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, | 1604 | u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, |
1485 | u16 cap); | 1605 | u16 cap); |
1486 | u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, | 1606 | u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, |
1487 | struct ieee80211_channel *channel, | 1607 | const struct cfg80211_chan_def *chandef, |
1488 | enum nl80211_channel_type channel_type, | ||
1489 | u16 prot_mode); | 1608 | u16 prot_mode); |
1490 | u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, | 1609 | u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, |
1491 | u32 cap); | 1610 | u32 cap); |
@@ -1497,20 +1616,18 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, | |||
1497 | enum ieee80211_band band); | 1616 | enum ieee80211_band band); |
1498 | 1617 | ||
1499 | /* channel management */ | 1618 | /* channel management */ |
1500 | enum ieee80211_chan_mode { | 1619 | void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan, |
1501 | CHAN_MODE_UNDEFINED, | 1620 | struct ieee80211_ht_operation *ht_oper, |
1502 | CHAN_MODE_HOPPING, | 1621 | struct cfg80211_chan_def *chandef); |
1503 | CHAN_MODE_FIXED, | 1622 | |
1504 | }; | 1623 | int __must_check |
1505 | 1624 | ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, | |
1506 | enum ieee80211_chan_mode | 1625 | const struct cfg80211_chan_def *chandef, |
1507 | ieee80211_get_channel_mode(struct ieee80211_local *local, | 1626 | enum ieee80211_chanctx_mode mode); |
1508 | struct ieee80211_sub_if_data *ignore); | 1627 | void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata); |
1509 | bool ieee80211_set_channel_type(struct ieee80211_local *local, | 1628 | |
1510 | struct ieee80211_sub_if_data *sdata, | 1629 | void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, |
1511 | enum nl80211_channel_type chantype); | 1630 | struct ieee80211_chanctx *chanctx); |
1512 | enum nl80211_channel_type | ||
1513 | ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper); | ||
1514 | 1631 | ||
1515 | #ifdef CONFIG_MAC80211_NOINLINE | 1632 | #ifdef CONFIG_MAC80211_NOINLINE |
1516 | #define debug_noinline noinline | 1633 | #define debug_noinline noinline |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 7de7717ad67d..5331662489f7 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -42,6 +42,41 @@ | |||
42 | * by either the RTNL, the iflist_mtx or RCU. | 42 | * by either the RTNL, the iflist_mtx or RCU. |
43 | */ | 43 | */ |
44 | 44 | ||
45 | bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) | ||
46 | { | ||
47 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
48 | int power; | ||
49 | |||
50 | rcu_read_lock(); | ||
51 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
52 | if (!chanctx_conf) { | ||
53 | rcu_read_unlock(); | ||
54 | return false; | ||
55 | } | ||
56 | |||
57 | power = chanctx_conf->def.chan->max_power; | ||
58 | rcu_read_unlock(); | ||
59 | |||
60 | if (sdata->user_power_level != IEEE80211_UNSET_POWER_LEVEL) | ||
61 | power = min(power, sdata->user_power_level); | ||
62 | |||
63 | if (sdata->ap_power_level != IEEE80211_UNSET_POWER_LEVEL) | ||
64 | power = min(power, sdata->ap_power_level); | ||
65 | |||
66 | if (power != sdata->vif.bss_conf.txpower) { | ||
67 | sdata->vif.bss_conf.txpower = power; | ||
68 | ieee80211_hw_config(sdata->local, 0); | ||
69 | return true; | ||
70 | } | ||
71 | |||
72 | return false; | ||
73 | } | ||
74 | |||
75 | void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) | ||
76 | { | ||
77 | if (__ieee80211_recalc_txpower(sdata)) | ||
78 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); | ||
79 | } | ||
45 | 80 | ||
46 | static u32 ieee80211_idle_off(struct ieee80211_local *local, | 81 | static u32 ieee80211_idle_off(struct ieee80211_local *local, |
47 | const char *reason) | 82 | const char *reason) |
@@ -380,6 +415,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | |||
380 | goto out_unlock; | 415 | goto out_unlock; |
381 | } | 416 | } |
382 | 417 | ||
418 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, | ||
419 | IEEE80211_CHANCTX_EXCLUSIVE); | ||
420 | if (ret) { | ||
421 | drv_remove_interface(local, sdata); | ||
422 | kfree(sdata); | ||
423 | goto out_unlock; | ||
424 | } | ||
425 | |||
383 | rcu_assign_pointer(local->monitor_sdata, sdata); | 426 | rcu_assign_pointer(local->monitor_sdata, sdata); |
384 | out_unlock: | 427 | out_unlock: |
385 | mutex_unlock(&local->iflist_mtx); | 428 | mutex_unlock(&local->iflist_mtx); |
@@ -403,6 +446,8 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | |||
403 | rcu_assign_pointer(local->monitor_sdata, NULL); | 446 | rcu_assign_pointer(local->monitor_sdata, NULL); |
404 | synchronize_net(); | 447 | synchronize_net(); |
405 | 448 | ||
449 | ieee80211_vif_release_channel(sdata); | ||
450 | |||
406 | drv_remove_interface(local, sdata); | 451 | drv_remove_interface(local, sdata); |
407 | 452 | ||
408 | kfree(sdata); | 453 | kfree(sdata); |
@@ -665,7 +710,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
665 | struct sk_buff *skb, *tmp; | 710 | struct sk_buff *skb, *tmp; |
666 | u32 hw_reconf_flags = 0; | 711 | u32 hw_reconf_flags = 0; |
667 | int i; | 712 | int i; |
668 | enum nl80211_channel_type orig_ct; | ||
669 | 713 | ||
670 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); | 714 | clear_bit(SDATA_STATE_RUNNING, &sdata->state); |
671 | 715 | ||
@@ -729,34 +773,17 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
729 | del_timer_sync(&local->dynamic_ps_timer); | 773 | del_timer_sync(&local->dynamic_ps_timer); |
730 | cancel_work_sync(&local->dynamic_ps_enable_work); | 774 | cancel_work_sync(&local->dynamic_ps_enable_work); |
731 | 775 | ||
776 | cancel_work_sync(&sdata->recalc_smps); | ||
777 | |||
732 | /* APs need special treatment */ | 778 | /* APs need special treatment */ |
733 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | 779 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
734 | struct ieee80211_sub_if_data *vlan, *tmpsdata; | 780 | struct ieee80211_sub_if_data *vlan, *tmpsdata; |
735 | struct beacon_data *old_beacon = | ||
736 | rtnl_dereference(sdata->u.ap.beacon); | ||
737 | struct probe_resp *old_probe_resp = | ||
738 | rtnl_dereference(sdata->u.ap.probe_resp); | ||
739 | |||
740 | /* sdata_running will return false, so this will disable */ | ||
741 | ieee80211_bss_info_change_notify(sdata, | ||
742 | BSS_CHANGED_BEACON_ENABLED); | ||
743 | |||
744 | /* remove beacon and probe response */ | ||
745 | RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); | ||
746 | RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL); | ||
747 | synchronize_rcu(); | ||
748 | kfree(old_beacon); | ||
749 | kfree(old_probe_resp); | ||
750 | 781 | ||
751 | /* down all dependent devices, that is VLANs */ | 782 | /* down all dependent devices, that is VLANs */ |
752 | list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, | 783 | list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, |
753 | u.vlan.list) | 784 | u.vlan.list) |
754 | dev_close(vlan->dev); | 785 | dev_close(vlan->dev); |
755 | WARN_ON(!list_empty(&sdata->u.ap.vlans)); | 786 | WARN_ON(!list_empty(&sdata->u.ap.vlans)); |
756 | |||
757 | /* free all potentially still buffered bcast frames */ | ||
758 | local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf); | ||
759 | skb_queue_purge(&sdata->u.ap.ps_bc_buf); | ||
760 | } else if (sdata->vif.type == NL80211_IFTYPE_STATION) { | 787 | } else if (sdata->vif.type == NL80211_IFTYPE_STATION) { |
761 | ieee80211_mgd_stop(sdata); | 788 | ieee80211_mgd_stop(sdata); |
762 | } | 789 | } |
@@ -837,14 +864,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
837 | hw_reconf_flags = 0; | 864 | hw_reconf_flags = 0; |
838 | } | 865 | } |
839 | 866 | ||
840 | /* Re-calculate channel-type, in case there are multiple vifs | ||
841 | * on different channel types. | ||
842 | */ | ||
843 | orig_ct = local->_oper_channel_type; | ||
844 | ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT); | ||
845 | |||
846 | /* do after stop to avoid reconfiguring when we stop anyway */ | 867 | /* do after stop to avoid reconfiguring when we stop anyway */ |
847 | if (hw_reconf_flags || (orig_ct != local->_oper_channel_type)) | 868 | if (hw_reconf_flags) |
848 | ieee80211_hw_config(local, hw_reconf_flags); | 869 | ieee80211_hw_config(local, hw_reconf_flags); |
849 | 870 | ||
850 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 871 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
@@ -1121,6 +1142,13 @@ static void ieee80211_iface_work(struct work_struct *work) | |||
1121 | } | 1142 | } |
1122 | } | 1143 | } |
1123 | 1144 | ||
1145 | static void ieee80211_recalc_smps_work(struct work_struct *work) | ||
1146 | { | ||
1147 | struct ieee80211_sub_if_data *sdata = | ||
1148 | container_of(work, struct ieee80211_sub_if_data, recalc_smps); | ||
1149 | |||
1150 | ieee80211_recalc_smps(sdata); | ||
1151 | } | ||
1124 | 1152 | ||
1125 | /* | 1153 | /* |
1126 | * Helper function to initialise an interface to a specific type. | 1154 | * Helper function to initialise an interface to a specific type. |
@@ -1149,6 +1177,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, | |||
1149 | 1177 | ||
1150 | skb_queue_head_init(&sdata->skb_queue); | 1178 | skb_queue_head_init(&sdata->skb_queue); |
1151 | INIT_WORK(&sdata->work, ieee80211_iface_work); | 1179 | INIT_WORK(&sdata->work, ieee80211_iface_work); |
1180 | INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work); | ||
1152 | 1181 | ||
1153 | switch (type) { | 1182 | switch (type) { |
1154 | case NL80211_IFTYPE_P2P_GO: | 1183 | case NL80211_IFTYPE_P2P_GO: |
@@ -1157,7 +1186,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, | |||
1157 | sdata->vif.p2p = true; | 1186 | sdata->vif.p2p = true; |
1158 | /* fall through */ | 1187 | /* fall through */ |
1159 | case NL80211_IFTYPE_AP: | 1188 | case NL80211_IFTYPE_AP: |
1160 | skb_queue_head_init(&sdata->u.ap.ps_bc_buf); | 1189 | skb_queue_head_init(&sdata->u.ap.ps.bc_buf); |
1161 | INIT_LIST_HEAD(&sdata->u.ap.vlans); | 1190 | INIT_LIST_HEAD(&sdata->u.ap.vlans); |
1162 | break; | 1191 | break; |
1163 | case NL80211_IFTYPE_P2P_CLIENT: | 1192 | case NL80211_IFTYPE_P2P_CLIENT: |
@@ -1282,11 +1311,6 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
1282 | if (type == ieee80211_vif_type_p2p(&sdata->vif)) | 1311 | if (type == ieee80211_vif_type_p2p(&sdata->vif)) |
1283 | return 0; | 1312 | return 0; |
1284 | 1313 | ||
1285 | /* Setting ad-hoc mode on non-IBSS channel is not supported. */ | ||
1286 | if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS && | ||
1287 | type == NL80211_IFTYPE_ADHOC) | ||
1288 | return -EOPNOTSUPP; | ||
1289 | |||
1290 | if (ieee80211_sdata_running(sdata)) { | 1314 | if (ieee80211_sdata_running(sdata)) { |
1291 | ret = ieee80211_runtime_change_iftype(sdata, type); | 1315 | ret = ieee80211_runtime_change_iftype(sdata, type); |
1292 | if (ret) | 1316 | if (ret) |
@@ -1298,9 +1322,6 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
1298 | } | 1322 | } |
1299 | 1323 | ||
1300 | /* reset some values that shouldn't be kept across type changes */ | 1324 | /* reset some values that shouldn't be kept across type changes */ |
1301 | sdata->vif.bss_conf.basic_rates = | ||
1302 | ieee80211_mandatory_rates(sdata->local, | ||
1303 | sdata->local->oper_channel->band); | ||
1304 | sdata->drop_unencrypted = 0; | 1325 | sdata->drop_unencrypted = 0; |
1305 | if (type == NL80211_IFTYPE_STATION) | 1326 | if (type == NL80211_IFTYPE_STATION) |
1306 | sdata->u.mgd.use_4addr = false; | 1327 | sdata->u.mgd.use_4addr = false; |
@@ -1523,6 +1544,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
1523 | 1544 | ||
1524 | ieee80211_set_default_queues(sdata); | 1545 | ieee80211_set_default_queues(sdata); |
1525 | 1546 | ||
1547 | sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL; | ||
1548 | sdata->user_power_level = local->user_power_level; | ||
1549 | |||
1526 | /* setup type-dependent data */ | 1550 | /* setup type-dependent data */ |
1527 | ieee80211_setup_sdata(sdata, type); | 1551 | ieee80211_setup_sdata(sdata, type); |
1528 | 1552 | ||
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index d27e61aaa71b..619c5d697999 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -339,7 +339,7 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, | |||
339 | key->conf.iv_len = TKIP_IV_LEN; | 339 | key->conf.iv_len = TKIP_IV_LEN; |
340 | key->conf.icv_len = TKIP_ICV_LEN; | 340 | key->conf.icv_len = TKIP_ICV_LEN; |
341 | if (seq) { | 341 | if (seq) { |
342 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) { | 342 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { |
343 | key->u.tkip.rx[i].iv32 = | 343 | key->u.tkip.rx[i].iv32 = |
344 | get_unaligned_le32(&seq[2]); | 344 | get_unaligned_le32(&seq[2]); |
345 | key->u.tkip.rx[i].iv16 = | 345 | key->u.tkip.rx[i].iv16 = |
@@ -352,7 +352,7 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, | |||
352 | key->conf.iv_len = CCMP_HDR_LEN; | 352 | key->conf.iv_len = CCMP_HDR_LEN; |
353 | key->conf.icv_len = CCMP_MIC_LEN; | 353 | key->conf.icv_len = CCMP_MIC_LEN; |
354 | if (seq) { | 354 | if (seq) { |
355 | for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) | 355 | for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) |
356 | for (j = 0; j < CCMP_PN_LEN; j++) | 356 | for (j = 0; j < CCMP_PN_LEN; j++) |
357 | key->u.ccmp.rx_pn[i][j] = | 357 | key->u.ccmp.rx_pn[i][j] = |
358 | seq[CCMP_PN_LEN - j - 1]; | 358 | seq[CCMP_PN_LEN - j - 1]; |
@@ -372,8 +372,9 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, | |||
372 | key->conf.iv_len = 0; | 372 | key->conf.iv_len = 0; |
373 | key->conf.icv_len = sizeof(struct ieee80211_mmie); | 373 | key->conf.icv_len = sizeof(struct ieee80211_mmie); |
374 | if (seq) | 374 | if (seq) |
375 | for (j = 0; j < 6; j++) | 375 | for (j = 0; j < CMAC_PN_LEN; j++) |
376 | key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1]; | 376 | key->u.aes_cmac.rx_pn[j] = |
377 | seq[CMAC_PN_LEN - j - 1]; | ||
377 | /* | 378 | /* |
378 | * Initialize AES key state here as an optimization so that | 379 | * Initialize AES key state here as an optimization so that |
379 | * it does not need to be initialized for every packet. | 380 | * it does not need to be initialized for every packet. |
@@ -654,16 +655,16 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, | |||
654 | 655 | ||
655 | switch (key->conf.cipher) { | 656 | switch (key->conf.cipher) { |
656 | case WLAN_CIPHER_SUITE_TKIP: | 657 | case WLAN_CIPHER_SUITE_TKIP: |
657 | if (WARN_ON(tid < 0 || tid >= NUM_RX_DATA_QUEUES)) | 658 | if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS)) |
658 | return; | 659 | return; |
659 | seq->tkip.iv32 = key->u.tkip.rx[tid].iv32; | 660 | seq->tkip.iv32 = key->u.tkip.rx[tid].iv32; |
660 | seq->tkip.iv16 = key->u.tkip.rx[tid].iv16; | 661 | seq->tkip.iv16 = key->u.tkip.rx[tid].iv16; |
661 | break; | 662 | break; |
662 | case WLAN_CIPHER_SUITE_CCMP: | 663 | case WLAN_CIPHER_SUITE_CCMP: |
663 | if (WARN_ON(tid < -1 || tid >= NUM_RX_DATA_QUEUES)) | 664 | if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) |
664 | return; | 665 | return; |
665 | if (tid < 0) | 666 | if (tid < 0) |
666 | pn = key->u.ccmp.rx_pn[NUM_RX_DATA_QUEUES]; | 667 | pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; |
667 | else | 668 | else |
668 | pn = key->u.ccmp.rx_pn[tid]; | 669 | pn = key->u.ccmp.rx_pn[tid]; |
669 | memcpy(seq->ccmp.pn, pn, CCMP_PN_LEN); | 670 | memcpy(seq->ccmp.pn, pn, CCMP_PN_LEN); |
diff --git a/net/mac80211/key.h b/net/mac80211/key.h index 7d4e31f037d7..7cff0d3a519c 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h | |||
@@ -30,8 +30,6 @@ | |||
30 | #define TKIP_ICV_LEN 4 | 30 | #define TKIP_ICV_LEN 4 |
31 | #define CMAC_PN_LEN 6 | 31 | #define CMAC_PN_LEN 6 |
32 | 32 | ||
33 | #define NUM_RX_DATA_QUEUES 16 | ||
34 | |||
35 | struct ieee80211_local; | 33 | struct ieee80211_local; |
36 | struct ieee80211_sub_if_data; | 34 | struct ieee80211_sub_if_data; |
37 | struct sta_info; | 35 | struct sta_info; |
@@ -82,17 +80,17 @@ struct ieee80211_key { | |||
82 | struct tkip_ctx tx; | 80 | struct tkip_ctx tx; |
83 | 81 | ||
84 | /* last received RSC */ | 82 | /* last received RSC */ |
85 | struct tkip_ctx rx[NUM_RX_DATA_QUEUES]; | 83 | struct tkip_ctx rx[IEEE80211_NUM_TIDS]; |
86 | } tkip; | 84 | } tkip; |
87 | struct { | 85 | struct { |
88 | atomic64_t tx_pn; | 86 | atomic64_t tx_pn; |
89 | /* | 87 | /* |
90 | * Last received packet number. The first | 88 | * Last received packet number. The first |
91 | * NUM_RX_DATA_QUEUES counters are used with Data | 89 | * IEEE80211_NUM_TIDS counters are used with Data |
92 | * frames and the last counter is used with Robust | 90 | * frames and the last counter is used with Robust |
93 | * Management frames. | 91 | * Management frames. |
94 | */ | 92 | */ |
95 | u8 rx_pn[NUM_RX_DATA_QUEUES + 1][CCMP_PN_LEN]; | 93 | u8 rx_pn[IEEE80211_NUM_TIDS + 1][CCMP_PN_LEN]; |
96 | struct crypto_cipher *tfm; | 94 | struct crypto_cipher *tfm; |
97 | u32 replays; /* dot11RSNAStatsCCMPReplays */ | 95 | u32 replays; /* dot11RSNAStatsCCMPReplays */ |
98 | } ccmp; | 96 | } ccmp; |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index f57f597972f8..f5e4c1f24bf2 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -93,15 +93,15 @@ static void ieee80211_reconfig_filter(struct work_struct *work) | |||
93 | ieee80211_configure_filter(local); | 93 | ieee80211_configure_filter(local); |
94 | } | 94 | } |
95 | 95 | ||
96 | int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) | 96 | static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local) |
97 | { | 97 | { |
98 | struct ieee80211_sub_if_data *sdata; | ||
98 | struct ieee80211_channel *chan; | 99 | struct ieee80211_channel *chan; |
99 | int ret = 0; | 100 | u32 changed = 0; |
100 | int power; | 101 | int power; |
101 | enum nl80211_channel_type channel_type; | 102 | enum nl80211_channel_type channel_type; |
102 | u32 offchannel_flag; | 103 | u32 offchannel_flag; |
103 | 104 | bool scanning = false; | |
104 | might_sleep(); | ||
105 | 105 | ||
106 | offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; | 106 | offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; |
107 | if (local->scan_channel) { | 107 | if (local->scan_channel) { |
@@ -109,19 +109,19 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) | |||
109 | /* If scanning on oper channel, use whatever channel-type | 109 | /* If scanning on oper channel, use whatever channel-type |
110 | * is currently in use. | 110 | * is currently in use. |
111 | */ | 111 | */ |
112 | if (chan == local->oper_channel) | 112 | if (chan == local->_oper_channel) |
113 | channel_type = local->_oper_channel_type; | 113 | channel_type = local->_oper_channel_type; |
114 | else | 114 | else |
115 | channel_type = NL80211_CHAN_NO_HT; | 115 | channel_type = NL80211_CHAN_NO_HT; |
116 | } else if (local->tmp_channel) { | 116 | } else if (local->tmp_channel) { |
117 | chan = local->tmp_channel; | 117 | chan = local->tmp_channel; |
118 | channel_type = local->tmp_channel_type; | 118 | channel_type = NL80211_CHAN_NO_HT; |
119 | } else { | 119 | } else { |
120 | chan = local->oper_channel; | 120 | chan = local->_oper_channel; |
121 | channel_type = local->_oper_channel_type; | 121 | channel_type = local->_oper_channel_type; |
122 | } | 122 | } |
123 | 123 | ||
124 | if (chan != local->oper_channel || | 124 | if (chan != local->_oper_channel || |
125 | channel_type != local->_oper_channel_type) | 125 | channel_type != local->_oper_channel_type) |
126 | local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; | 126 | local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; |
127 | else | 127 | else |
@@ -148,22 +148,39 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) | |||
148 | changed |= IEEE80211_CONF_CHANGE_SMPS; | 148 | changed |= IEEE80211_CONF_CHANGE_SMPS; |
149 | } | 149 | } |
150 | 150 | ||
151 | if (test_bit(SCAN_SW_SCANNING, &local->scanning) || | 151 | scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) || |
152 | test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || | 152 | test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) || |
153 | test_bit(SCAN_HW_SCANNING, &local->scanning) || | 153 | test_bit(SCAN_HW_SCANNING, &local->scanning); |
154 | !local->ap_power_level) | 154 | power = chan->max_power; |
155 | power = chan->max_power; | ||
156 | else | ||
157 | power = min(chan->max_power, local->ap_power_level); | ||
158 | 155 | ||
159 | if (local->user_power_level >= 0) | 156 | rcu_read_lock(); |
160 | power = min(power, local->user_power_level); | 157 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
158 | if (!rcu_access_pointer(sdata->vif.chanctx_conf)) | ||
159 | continue; | ||
160 | power = min(power, sdata->vif.bss_conf.txpower); | ||
161 | } | ||
162 | rcu_read_unlock(); | ||
161 | 163 | ||
162 | if (local->hw.conf.power_level != power) { | 164 | if (local->hw.conf.power_level != power) { |
163 | changed |= IEEE80211_CONF_CHANGE_POWER; | 165 | changed |= IEEE80211_CONF_CHANGE_POWER; |
164 | local->hw.conf.power_level = power; | 166 | local->hw.conf.power_level = power; |
165 | } | 167 | } |
166 | 168 | ||
169 | return changed; | ||
170 | } | ||
171 | |||
172 | int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) | ||
173 | { | ||
174 | int ret = 0; | ||
175 | |||
176 | might_sleep(); | ||
177 | |||
178 | if (!local->use_chanctx) | ||
179 | changed |= ieee80211_hw_conf_chan(local); | ||
180 | else | ||
181 | changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL | | ||
182 | IEEE80211_CONF_CHANGE_POWER); | ||
183 | |||
167 | if (changed && local->open_count) { | 184 | if (changed && local->open_count) { |
168 | ret = drv_config(local, changed); | 185 | ret = drv_config(local, changed); |
169 | /* | 186 | /* |
@@ -359,14 +376,6 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw) | |||
359 | } | 376 | } |
360 | EXPORT_SYMBOL(ieee80211_restart_hw); | 377 | EXPORT_SYMBOL(ieee80211_restart_hw); |
361 | 378 | ||
362 | static void ieee80211_recalc_smps_work(struct work_struct *work) | ||
363 | { | ||
364 | struct ieee80211_local *local = | ||
365 | container_of(work, struct ieee80211_local, recalc_smps); | ||
366 | |||
367 | ieee80211_recalc_smps(local); | ||
368 | } | ||
369 | |||
370 | #ifdef CONFIG_INET | 379 | #ifdef CONFIG_INET |
371 | static int ieee80211_ifa_changed(struct notifier_block *nb, | 380 | static int ieee80211_ifa_changed(struct notifier_block *nb, |
372 | unsigned long data, void *arg) | 381 | unsigned long data, void *arg) |
@@ -540,6 +549,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
540 | struct ieee80211_local *local; | 549 | struct ieee80211_local *local; |
541 | int priv_size, i; | 550 | int priv_size, i; |
542 | struct wiphy *wiphy; | 551 | struct wiphy *wiphy; |
552 | bool use_chanctx; | ||
543 | 553 | ||
544 | if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config || | 554 | if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config || |
545 | !ops->add_interface || !ops->remove_interface || | 555 | !ops->add_interface || !ops->remove_interface || |
@@ -549,6 +559,14 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
549 | if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove))) | 559 | if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove))) |
550 | return NULL; | 560 | return NULL; |
551 | 561 | ||
562 | /* check all or no channel context operations exist */ | ||
563 | i = !!ops->add_chanctx + !!ops->remove_chanctx + | ||
564 | !!ops->change_chanctx + !!ops->assign_vif_chanctx + | ||
565 | !!ops->unassign_vif_chanctx; | ||
566 | if (WARN_ON(i != 0 && i != 5)) | ||
567 | return NULL; | ||
568 | use_chanctx = i == 5; | ||
569 | |||
552 | /* Ensure 32-byte alignment of our private data and hw private data. | 570 | /* Ensure 32-byte alignment of our private data and hw private data. |
553 | * We use the wiphy priv data for both our ieee80211_local and for | 571 | * We use the wiphy priv data for both our ieee80211_local and for |
554 | * the driver's private data | 572 | * the driver's private data |
@@ -584,8 +602,15 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
584 | if (ops->remain_on_channel) | 602 | if (ops->remain_on_channel) |
585 | wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; | 603 | wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; |
586 | 604 | ||
587 | wiphy->features = NL80211_FEATURE_SK_TX_STATUS | | 605 | wiphy->features |= NL80211_FEATURE_SK_TX_STATUS | |
588 | NL80211_FEATURE_HT_IBSS; | 606 | NL80211_FEATURE_SAE | |
607 | NL80211_FEATURE_HT_IBSS | | ||
608 | NL80211_FEATURE_VIF_TXPOWER; | ||
609 | |||
610 | if (!ops->hw_scan) | ||
611 | wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | | ||
612 | NL80211_FEATURE_AP_SCAN; | ||
613 | |||
589 | 614 | ||
590 | if (!ops->set_key) | 615 | if (!ops->set_key) |
591 | wiphy->flags |= WIPHY_FLAG_IBSS_RSN; | 616 | wiphy->flags |= WIPHY_FLAG_IBSS_RSN; |
@@ -599,6 +624,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
599 | local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); | 624 | local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); |
600 | 625 | ||
601 | local->ops = ops; | 626 | local->ops = ops; |
627 | local->use_chanctx = use_chanctx; | ||
602 | 628 | ||
603 | /* set up some defaults */ | 629 | /* set up some defaults */ |
604 | local->hw.queues = 1; | 630 | local->hw.queues = 1; |
@@ -612,7 +638,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
612 | local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS | | 638 | local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS | |
613 | IEEE80211_RADIOTAP_MCS_HAVE_GI | | 639 | IEEE80211_RADIOTAP_MCS_HAVE_GI | |
614 | IEEE80211_RADIOTAP_MCS_HAVE_BW; | 640 | IEEE80211_RADIOTAP_MCS_HAVE_BW; |
615 | local->user_power_level = -1; | 641 | local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; |
616 | wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; | 642 | wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; |
617 | 643 | ||
618 | INIT_LIST_HEAD(&local->interfaces); | 644 | INIT_LIST_HEAD(&local->interfaces); |
@@ -626,6 +652,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
626 | spin_lock_init(&local->filter_lock); | 652 | spin_lock_init(&local->filter_lock); |
627 | spin_lock_init(&local->queue_stop_reason_lock); | 653 | spin_lock_init(&local->queue_stop_reason_lock); |
628 | 654 | ||
655 | INIT_LIST_HEAD(&local->chanctx_list); | ||
656 | mutex_init(&local->chanctx_mtx); | ||
657 | |||
629 | /* | 658 | /* |
630 | * The rx_skb_queue is only accessed from tasklets, | 659 | * The rx_skb_queue is only accessed from tasklets, |
631 | * but other SKB queues are used from within IRQ | 660 | * but other SKB queues are used from within IRQ |
@@ -641,7 +670,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
641 | INIT_WORK(&local->restart_work, ieee80211_restart_work); | 670 | INIT_WORK(&local->restart_work, ieee80211_restart_work); |
642 | 671 | ||
643 | INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); | 672 | INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); |
644 | INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work); | ||
645 | local->smps_mode = IEEE80211_SMPS_OFF; | 673 | local->smps_mode = IEEE80211_SMPS_OFF; |
646 | 674 | ||
647 | INIT_WORK(&local->dynamic_ps_enable_work, | 675 | INIT_WORK(&local->dynamic_ps_enable_work, |
@@ -719,6 +747,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
719 | if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan) | 747 | if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan) |
720 | return -EINVAL; | 748 | return -EINVAL; |
721 | 749 | ||
750 | if (!local->use_chanctx) { | ||
751 | for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { | ||
752 | const struct ieee80211_iface_combination *comb; | ||
753 | |||
754 | comb = &local->hw.wiphy->iface_combinations[i]; | ||
755 | |||
756 | if (comb->num_different_channels > 1) | ||
757 | return -EINVAL; | ||
758 | } | ||
759 | } else { | ||
760 | /* | ||
761 | * WDS is currently prohibited when channel contexts are used | ||
762 | * because there's no clear definition of which channel WDS | ||
763 | * type interfaces use | ||
764 | */ | ||
765 | if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS)) | ||
766 | return -EINVAL; | ||
767 | } | ||
768 | |||
722 | /* Only HW csum features are currently compatible with mac80211 */ | 769 | /* Only HW csum features are currently compatible with mac80211 */ |
723 | feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 770 | feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
724 | NETIF_F_HW_CSUM; | 771 | NETIF_F_HW_CSUM; |
@@ -728,6 +775,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
728 | if (hw->max_report_rates == 0) | 775 | if (hw->max_report_rates == 0) |
729 | hw->max_report_rates = hw->max_rates; | 776 | hw->max_report_rates = hw->max_rates; |
730 | 777 | ||
778 | local->rx_chains = 1; | ||
779 | |||
731 | /* | 780 | /* |
732 | * generic code guarantees at least one band, | 781 | * generic code guarantees at least one band, |
733 | * set this very early because much code assumes | 782 | * set this very early because much code assumes |
@@ -743,18 +792,28 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
743 | sband = local->hw.wiphy->bands[band]; | 792 | sband = local->hw.wiphy->bands[band]; |
744 | if (!sband) | 793 | if (!sband) |
745 | continue; | 794 | continue; |
746 | if (!local->oper_channel) { | 795 | if (!local->use_chanctx && !local->_oper_channel) { |
747 | /* init channel we're on */ | 796 | /* init channel we're on */ |
748 | local->hw.conf.channel = | 797 | local->hw.conf.channel = |
749 | local->oper_channel = &sband->channels[0]; | 798 | local->_oper_channel = &sband->channels[0]; |
750 | local->hw.conf.channel_type = NL80211_CHAN_NO_HT; | 799 | local->hw.conf.channel_type = NL80211_CHAN_NO_HT; |
751 | } | 800 | } |
801 | cfg80211_chandef_create(&local->monitor_chandef, | ||
802 | &sband->channels[0], | ||
803 | NL80211_CHAN_NO_HT); | ||
752 | channels += sband->n_channels; | 804 | channels += sband->n_channels; |
753 | 805 | ||
754 | if (max_bitrates < sband->n_bitrates) | 806 | if (max_bitrates < sband->n_bitrates) |
755 | max_bitrates = sband->n_bitrates; | 807 | max_bitrates = sband->n_bitrates; |
756 | supp_ht = supp_ht || sband->ht_cap.ht_supported; | 808 | supp_ht = supp_ht || sband->ht_cap.ht_supported; |
757 | supp_vht = supp_vht || sband->vht_cap.vht_supported; | 809 | supp_vht = supp_vht || sband->vht_cap.vht_supported; |
810 | |||
811 | if (sband->ht_cap.ht_supported) | ||
812 | local->rx_chains = | ||
813 | max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), | ||
814 | local->rx_chains); | ||
815 | |||
816 | /* TODO: consider VHT for RX chains, hopefully it's the same */ | ||
758 | } | 817 | } |
759 | 818 | ||
760 | local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + | 819 | local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + |
@@ -778,19 +837,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
778 | hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); | 837 | hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); |
779 | hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); | 838 | hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); |
780 | 839 | ||
781 | /* | 840 | /* mac80211 doesn't support more than one IBSS interface right now */ |
782 | * mac80211 doesn't support more than 1 channel, and also not more | ||
783 | * than one IBSS interface | ||
784 | */ | ||
785 | for (i = 0; i < hw->wiphy->n_iface_combinations; i++) { | 841 | for (i = 0; i < hw->wiphy->n_iface_combinations; i++) { |
786 | const struct ieee80211_iface_combination *c; | 842 | const struct ieee80211_iface_combination *c; |
787 | int j; | 843 | int j; |
788 | 844 | ||
789 | c = &hw->wiphy->iface_combinations[i]; | 845 | c = &hw->wiphy->iface_combinations[i]; |
790 | 846 | ||
791 | if (c->num_different_channels > 1) | ||
792 | return -EINVAL; | ||
793 | |||
794 | for (j = 0; j < c->n_limits; j++) | 847 | for (j = 0; j < c->n_limits; j++) |
795 | if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && | 848 | if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && |
796 | c->limits[j].max > 1) | 849 | c->limits[j].max > 1) |
@@ -830,9 +883,21 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
830 | if (supp_ht) | 883 | if (supp_ht) |
831 | local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); | 884 | local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); |
832 | 885 | ||
833 | if (supp_vht) | 886 | if (supp_vht) { |
834 | local->scan_ies_len += | 887 | local->scan_ies_len += |
835 | 2 + sizeof(struct ieee80211_vht_capabilities); | 888 | 2 + sizeof(struct ieee80211_vht_cap); |
889 | |||
890 | /* | ||
891 | * (for now at least), drivers wanting to use VHT must | ||
892 | * support channel contexts, as they contain all the | ||
893 | * necessary VHT information and the global hw config | ||
894 | * doesn't (yet) | ||
895 | */ | ||
896 | if (WARN_ON(!local->use_chanctx)) { | ||
897 | result = -EINVAL; | ||
898 | goto fail_wiphy_register; | ||
899 | } | ||
900 | } | ||
836 | 901 | ||
837 | if (!local->ops->hw_scan) { | 902 | if (!local->ops->hw_scan) { |
838 | /* For hw_scan, driver needs to set these up. */ | 903 | /* For hw_scan, driver needs to set these up. */ |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index ff0296c7bab8..1bf03f9ff3ba 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -76,7 +76,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, | |||
76 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 76 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
77 | struct ieee80211_local *local = sdata->local; | 77 | struct ieee80211_local *local = sdata->local; |
78 | u32 basic_rates = 0; | 78 | u32 basic_rates = 0; |
79 | enum nl80211_channel_type sta_channel_type = NL80211_CHAN_NO_HT; | 79 | struct cfg80211_chan_def sta_chan_def; |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * As support for each feature is added, check for matching | 82 | * As support for each feature is added, check for matching |
@@ -97,23 +97,17 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata, | |||
97 | (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) | 97 | (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) |
98 | goto mismatch; | 98 | goto mismatch; |
99 | 99 | ||
100 | ieee80211_sta_get_rates(local, ie, local->oper_channel->band, | 100 | ieee80211_sta_get_rates(local, ie, ieee80211_get_sdata_band(sdata), |
101 | &basic_rates); | 101 | &basic_rates); |
102 | 102 | ||
103 | if (sdata->vif.bss_conf.basic_rates != basic_rates) | 103 | if (sdata->vif.bss_conf.basic_rates != basic_rates) |
104 | goto mismatch; | 104 | goto mismatch; |
105 | 105 | ||
106 | if (ie->ht_operation) | 106 | ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan, |
107 | sta_channel_type = | 107 | ie->ht_operation, &sta_chan_def); |
108 | ieee80211_ht_oper_to_channel_type(ie->ht_operation); | 108 | |
109 | 109 | if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef, | |
110 | /* Disallow HT40+/- mismatch */ | 110 | &sta_chan_def)) |
111 | if (ie->ht_operation && | ||
112 | (sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40MINUS || | ||
113 | sdata->vif.bss_conf.channel_type == NL80211_CHAN_HT40PLUS) && | ||
114 | (sta_channel_type == NL80211_CHAN_HT40MINUS || | ||
115 | sta_channel_type == NL80211_CHAN_HT40PLUS) && | ||
116 | sdata->vif.bss_conf.channel_type != sta_channel_type) | ||
117 | goto mismatch; | 111 | goto mismatch; |
118 | 112 | ||
119 | return true; | 113 | return true; |
@@ -129,7 +123,7 @@ mismatch: | |||
129 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) | 123 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) |
130 | { | 124 | { |
131 | return (ie->mesh_config->meshconf_cap & | 125 | return (ie->mesh_config->meshconf_cap & |
132 | MESHCONF_CAPAB_ACCEPT_PLINKS) != 0; | 126 | IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS) != 0; |
133 | } | 127 | } |
134 | 128 | ||
135 | /** | 129 | /** |
@@ -264,16 +258,16 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) | |||
264 | /* Authentication Protocol identifier */ | 258 | /* Authentication Protocol identifier */ |
265 | *pos++ = ifmsh->mesh_auth_id; | 259 | *pos++ = ifmsh->mesh_auth_id; |
266 | /* Mesh Formation Info - number of neighbors */ | 260 | /* Mesh Formation Info - number of neighbors */ |
267 | neighbors = atomic_read(&ifmsh->mshstats.estab_plinks); | 261 | neighbors = atomic_read(&ifmsh->estab_plinks); |
268 | /* Number of neighbor mesh STAs or 15 whichever is smaller */ | 262 | /* Number of neighbor mesh STAs or 15 whichever is smaller */ |
269 | neighbors = (neighbors > 15) ? 15 : neighbors; | 263 | neighbors = (neighbors > 15) ? 15 : neighbors; |
270 | *pos++ = neighbors << 1; | 264 | *pos++ = neighbors << 1; |
271 | /* Mesh capability */ | 265 | /* Mesh capability */ |
272 | *pos = MESHCONF_CAPAB_FORWARDING; | 266 | *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING; |
273 | *pos |= ifmsh->accepting_plinks ? | 267 | *pos |= ifmsh->accepting_plinks ? |
274 | MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; | 268 | IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; |
275 | *pos++ |= ifmsh->adjusting_tbtt ? | 269 | *pos++ |= ifmsh->adjusting_tbtt ? |
276 | MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00; | 270 | IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00; |
277 | *pos++ = 0x00; | 271 | *pos++ = 0x00; |
278 | 272 | ||
279 | return 0; | 273 | return 0; |
@@ -355,12 +349,22 @@ int mesh_add_ds_params_ie(struct sk_buff *skb, | |||
355 | { | 349 | { |
356 | struct ieee80211_local *local = sdata->local; | 350 | struct ieee80211_local *local = sdata->local; |
357 | struct ieee80211_supported_band *sband; | 351 | struct ieee80211_supported_band *sband; |
358 | struct ieee80211_channel *chan = local->oper_channel; | 352 | struct ieee80211_chanctx_conf *chanctx_conf; |
353 | struct ieee80211_channel *chan; | ||
359 | u8 *pos; | 354 | u8 *pos; |
360 | 355 | ||
361 | if (skb_tailroom(skb) < 3) | 356 | if (skb_tailroom(skb) < 3) |
362 | return -ENOMEM; | 357 | return -ENOMEM; |
363 | 358 | ||
359 | rcu_read_lock(); | ||
360 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
361 | if (WARN_ON(!chanctx_conf)) { | ||
362 | rcu_read_unlock(); | ||
363 | return -EINVAL; | ||
364 | } | ||
365 | chan = chanctx_conf->def.chan; | ||
366 | rcu_read_unlock(); | ||
367 | |||
364 | sband = local->hw.wiphy->bands[chan->band]; | 368 | sband = local->hw.wiphy->bands[chan->band]; |
365 | if (sband->band == IEEE80211_BAND_2GHZ) { | 369 | if (sband->band == IEEE80211_BAND_2GHZ) { |
366 | pos = skb_put(skb, 2 + 1); | 370 | pos = skb_put(skb, 2 + 1); |
@@ -376,12 +380,13 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb, | |||
376 | struct ieee80211_sub_if_data *sdata) | 380 | struct ieee80211_sub_if_data *sdata) |
377 | { | 381 | { |
378 | struct ieee80211_local *local = sdata->local; | 382 | struct ieee80211_local *local = sdata->local; |
383 | enum ieee80211_band band = ieee80211_get_sdata_band(sdata); | ||
379 | struct ieee80211_supported_band *sband; | 384 | struct ieee80211_supported_band *sband; |
380 | u8 *pos; | 385 | u8 *pos; |
381 | 386 | ||
382 | sband = local->hw.wiphy->bands[local->oper_channel->band]; | 387 | sband = local->hw.wiphy->bands[band]; |
383 | if (!sband->ht_cap.ht_supported || | 388 | if (!sband->ht_cap.ht_supported || |
384 | sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) | 389 | sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) |
385 | return 0; | 390 | return 0; |
386 | 391 | ||
387 | if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) | 392 | if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) |
@@ -397,14 +402,26 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb, | |||
397 | struct ieee80211_sub_if_data *sdata) | 402 | struct ieee80211_sub_if_data *sdata) |
398 | { | 403 | { |
399 | struct ieee80211_local *local = sdata->local; | 404 | struct ieee80211_local *local = sdata->local; |
400 | struct ieee80211_channel *channel = local->oper_channel; | 405 | struct ieee80211_chanctx_conf *chanctx_conf; |
406 | struct ieee80211_channel *channel; | ||
401 | enum nl80211_channel_type channel_type = | 407 | enum nl80211_channel_type channel_type = |
402 | sdata->vif.bss_conf.channel_type; | 408 | cfg80211_get_chandef_type(&sdata->vif.bss_conf.chandef); |
403 | struct ieee80211_supported_band *sband = | 409 | struct ieee80211_supported_band *sband; |
404 | local->hw.wiphy->bands[channel->band]; | 410 | struct ieee80211_sta_ht_cap *ht_cap; |
405 | struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; | ||
406 | u8 *pos; | 411 | u8 *pos; |
407 | 412 | ||
413 | rcu_read_lock(); | ||
414 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
415 | if (WARN_ON(!chanctx_conf)) { | ||
416 | rcu_read_unlock(); | ||
417 | return -EINVAL; | ||
418 | } | ||
419 | channel = chanctx_conf->def.chan; | ||
420 | rcu_read_unlock(); | ||
421 | |||
422 | sband = local->hw.wiphy->bands[channel->band]; | ||
423 | ht_cap = &sband->ht_cap; | ||
424 | |||
408 | if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT) | 425 | if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT) |
409 | return 0; | 426 | return 0; |
410 | 427 | ||
@@ -412,7 +429,7 @@ int mesh_add_ht_oper_ie(struct sk_buff *skb, | |||
412 | return -ENOMEM; | 429 | return -ENOMEM; |
413 | 430 | ||
414 | pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); | 431 | pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); |
415 | ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type, | 432 | ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chandef, |
416 | sdata->vif.bss_conf.ht_operation_mode); | 433 | sdata->vif.bss_conf.ht_operation_mode); |
417 | 434 | ||
418 | return 0; | 435 | return 0; |
@@ -610,7 +627,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | |||
610 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; | 627 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; |
611 | sdata->vif.bss_conf.basic_rates = | 628 | sdata->vif.bss_conf.basic_rates = |
612 | ieee80211_mandatory_rates(sdata->local, | 629 | ieee80211_mandatory_rates(sdata->local, |
613 | sdata->local->oper_channel->band); | 630 | ieee80211_get_sdata_band(sdata)); |
614 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | | 631 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | |
615 | BSS_CHANGED_BEACON_ENABLED | | 632 | BSS_CHANGED_BEACON_ENABLED | |
616 | BSS_CHANGED_HT | | 633 | BSS_CHANGED_HT | |
@@ -680,8 +697,10 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | |||
680 | ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, | 697 | ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, |
681 | &elems); | 698 | &elems); |
682 | 699 | ||
683 | /* ignore beacons from secure mesh peers if our security is off */ | 700 | /* ignore non-mesh or secure / unsecure mismatch */ |
684 | if (elems.rsn_len && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) | 701 | if ((!elems.mesh_id || !elems.mesh_config) || |
702 | (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || | ||
703 | (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) | ||
685 | return; | 704 | return; |
686 | 705 | ||
687 | if (elems.ds_params && elems.ds_params_len == 1) | 706 | if (elems.ds_params && elems.ds_params_len == 1) |
@@ -694,8 +713,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | |||
694 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) | 713 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) |
695 | return; | 714 | return; |
696 | 715 | ||
697 | if (elems.mesh_id && elems.mesh_config && | 716 | if (mesh_matches_local(sdata, &elems)) |
698 | mesh_matches_local(sdata, &elems)) | ||
699 | mesh_neighbour_update(sdata, mgmt->sa, &elems); | 717 | mesh_neighbour_update(sdata, mgmt->sa, &elems); |
700 | 718 | ||
701 | if (ifmsh->sync_ops) | 719 | if (ifmsh->sync_ops) |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 25d0f17dec71..7c9215fb2ac8 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -19,20 +19,6 @@ | |||
19 | /* Data structures */ | 19 | /* Data structures */ |
20 | 20 | ||
21 | /** | 21 | /** |
22 | * enum mesh_config_capab_flags - mesh config IE capability flags | ||
23 | * | ||
24 | * @MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish | ||
25 | * additional mesh peerings with other mesh STAs | ||
26 | * @MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs | ||
27 | * @MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure is ongoing | ||
28 | */ | ||
29 | enum mesh_config_capab_flags { | ||
30 | MESHCONF_CAPAB_ACCEPT_PLINKS = BIT(0), | ||
31 | MESHCONF_CAPAB_FORWARDING = BIT(3), | ||
32 | MESHCONF_CAPAB_TBTT_ADJUSTING = BIT(5), | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * enum mesh_path_flags - mac80211 mesh path flags | 22 | * enum mesh_path_flags - mac80211 mesh path flags |
37 | * | 23 | * |
38 | * | 24 | * |
@@ -256,7 +242,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); | |||
256 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); | 242 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); |
257 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); | 243 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); |
258 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); | 244 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); |
259 | struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method); | 245 | const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method); |
260 | 246 | ||
261 | /* Mesh paths */ | 247 | /* Mesh paths */ |
262 | int mesh_nexthop_lookup(struct sk_buff *skb, | 248 | int mesh_nexthop_lookup(struct sk_buff *skb, |
@@ -324,7 +310,7 @@ extern int mesh_allocated; | |||
324 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) | 310 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) |
325 | { | 311 | { |
326 | return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - | 312 | return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - |
327 | atomic_read(&sdata->u.mesh.mshstats.estab_plinks); | 313 | atomic_read(&sdata->u.mesh.estab_plinks); |
328 | } | 314 | } |
329 | 315 | ||
330 | static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) | 316 | static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 3ab34d816897..ca52dfdd5375 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -19,12 +19,6 @@ | |||
19 | #define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ | 19 | #define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ |
20 | jiffies + HZ * t / 1000)) | 20 | jiffies + HZ * t / 1000)) |
21 | 21 | ||
22 | #define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries) | ||
23 | #define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout) | ||
24 | #define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout) | ||
25 | #define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout) | ||
26 | #define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks) | ||
27 | |||
28 | /* We only need a valid sta if user configured a minimum rssi_threshold. */ | 22 | /* We only need a valid sta if user configured a minimum rssi_threshold. */ |
29 | #define rssi_threshold_check(sta, sdata) \ | 23 | #define rssi_threshold_check(sta, sdata) \ |
30 | (sdata->u.mesh.mshcfg.rssi_threshold == 0 ||\ | 24 | (sdata->u.mesh.mshcfg.rssi_threshold == 0 ||\ |
@@ -50,14 +44,14 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, | |||
50 | static inline | 44 | static inline |
51 | u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) | 45 | u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) |
52 | { | 46 | { |
53 | atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); | 47 | atomic_inc(&sdata->u.mesh.estab_plinks); |
54 | return mesh_accept_plinks_update(sdata); | 48 | return mesh_accept_plinks_update(sdata); |
55 | } | 49 | } |
56 | 50 | ||
57 | static inline | 51 | static inline |
58 | u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) | 52 | u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) |
59 | { | 53 | { |
60 | atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); | 54 | atomic_dec(&sdata->u.mesh.estab_plinks); |
61 | return mesh_accept_plinks_update(sdata); | 55 | return mesh_accept_plinks_update(sdata); |
62 | } | 56 | } |
63 | 57 | ||
@@ -117,7 +111,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) | |||
117 | u16 ht_opmode; | 111 | u16 ht_opmode; |
118 | bool non_ht_sta = false, ht20_sta = false; | 112 | bool non_ht_sta = false, ht20_sta = false; |
119 | 113 | ||
120 | if (sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) | 114 | if (sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) |
121 | return 0; | 115 | return 0; |
122 | 116 | ||
123 | rcu_read_lock(); | 117 | rcu_read_lock(); |
@@ -126,14 +120,14 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) | |||
126 | sta->plink_state != NL80211_PLINK_ESTAB) | 120 | sta->plink_state != NL80211_PLINK_ESTAB) |
127 | continue; | 121 | continue; |
128 | 122 | ||
129 | switch (sta->ch_type) { | 123 | switch (sta->ch_width) { |
130 | case NL80211_CHAN_NO_HT: | 124 | case NL80211_CHAN_WIDTH_20_NOHT: |
131 | mpl_dbg(sdata, | 125 | mpl_dbg(sdata, |
132 | "mesh_plink %pM: nonHT sta (%pM) is present\n", | 126 | "mesh_plink %pM: nonHT sta (%pM) is present\n", |
133 | sdata->vif.addr, sta->sta.addr); | 127 | sdata->vif.addr, sta->sta.addr); |
134 | non_ht_sta = true; | 128 | non_ht_sta = true; |
135 | goto out; | 129 | goto out; |
136 | case NL80211_CHAN_HT20: | 130 | case NL80211_CHAN_WIDTH_20: |
137 | mpl_dbg(sdata, | 131 | mpl_dbg(sdata, |
138 | "mesh_plink %pM: HT20 sta (%pM) is present\n", | 132 | "mesh_plink %pM: HT20 sta (%pM) is present\n", |
139 | sdata->vif.addr, sta->sta.addr); | 133 | sdata->vif.addr, sta->sta.addr); |
@@ -148,7 +142,7 @@ out: | |||
148 | if (non_ht_sta) | 142 | if (non_ht_sta) |
149 | ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED; | 143 | ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED; |
150 | else if (ht20_sta && | 144 | else if (ht20_sta && |
151 | sdata->vif.bss_conf.channel_type > NL80211_CHAN_HT20) | 145 | sdata->vif.bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) |
152 | ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ; | 146 | ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ; |
153 | else | 147 | else |
154 | ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; | 148 | ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; |
@@ -252,6 +246,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, | |||
252 | mgmt->u.action.u.self_prot.action_code = action; | 246 | mgmt->u.action.u.self_prot.action_code = action; |
253 | 247 | ||
254 | if (action != WLAN_SP_MESH_PEERING_CLOSE) { | 248 | if (action != WLAN_SP_MESH_PEERING_CLOSE) { |
249 | enum ieee80211_band band = ieee80211_get_sdata_band(sdata); | ||
250 | |||
255 | /* capability info */ | 251 | /* capability info */ |
256 | pos = skb_put(skb, 2); | 252 | pos = skb_put(skb, 2); |
257 | memset(pos, 0, 2); | 253 | memset(pos, 0, 2); |
@@ -260,10 +256,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, | |||
260 | pos = skb_put(skb, 2); | 256 | pos = skb_put(skb, 2); |
261 | memcpy(pos + 2, &plid, 2); | 257 | memcpy(pos + 2, &plid, 2); |
262 | } | 258 | } |
263 | if (ieee80211_add_srates_ie(sdata, skb, true, | 259 | if (ieee80211_add_srates_ie(sdata, skb, true, band) || |
264 | local->oper_channel->band) || | 260 | ieee80211_add_ext_srates_ie(sdata, skb, true, band) || |
265 | ieee80211_add_ext_srates_ie(sdata, skb, true, | ||
266 | local->oper_channel->band) || | ||
267 | mesh_add_rsn_ie(skb, sdata) || | 261 | mesh_add_rsn_ie(skb, sdata) || |
268 | mesh_add_meshid_ie(skb, sdata) || | 262 | mesh_add_meshid_ie(skb, sdata) || |
269 | mesh_add_meshconf_ie(skb, sdata)) | 263 | mesh_add_meshconf_ie(skb, sdata)) |
@@ -343,7 +337,7 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, | |||
343 | struct ieee802_11_elems *elems) | 337 | struct ieee802_11_elems *elems) |
344 | { | 338 | { |
345 | struct ieee80211_local *local = sdata->local; | 339 | struct ieee80211_local *local = sdata->local; |
346 | enum ieee80211_band band = local->oper_channel->band; | 340 | enum ieee80211_band band = ieee80211_get_sdata_band(sdata); |
347 | struct ieee80211_supported_band *sband; | 341 | struct ieee80211_supported_band *sband; |
348 | u32 rates, basic_rates = 0; | 342 | u32 rates, basic_rates = 0; |
349 | struct sta_info *sta; | 343 | struct sta_info *sta; |
@@ -378,7 +372,7 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, | |||
378 | 372 | ||
379 | sta->sta.supp_rates[band] = rates; | 373 | sta->sta.supp_rates[band] = rates; |
380 | if (elems->ht_cap_elem && | 374 | if (elems->ht_cap_elem && |
381 | sdata->vif.bss_conf.channel_type != NL80211_CHAN_NO_HT) | 375 | sdata->vif.bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) |
382 | ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, | 376 | ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, |
383 | elems->ht_cap_elem, | 377 | elems->ht_cap_elem, |
384 | &sta->sta.ht_cap); | 378 | &sta->sta.ht_cap); |
@@ -386,12 +380,15 @@ static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata, | |||
386 | memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap)); | 380 | memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap)); |
387 | 381 | ||
388 | if (elems->ht_operation) { | 382 | if (elems->ht_operation) { |
383 | struct cfg80211_chan_def chandef; | ||
384 | |||
389 | if (!(elems->ht_operation->ht_param & | 385 | if (!(elems->ht_operation->ht_param & |
390 | IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) | 386 | IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) |
391 | sta->sta.ht_cap.cap &= | 387 | sta->sta.ht_cap.cap &= |
392 | ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; | 388 | ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; |
393 | sta->ch_type = | 389 | ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan, |
394 | ieee80211_ht_oper_to_channel_type(elems->ht_operation); | 390 | elems->ht_operation, &chandef); |
391 | sta->ch_width = chandef.width; | ||
395 | } | 392 | } |
396 | 393 | ||
397 | rate_control_rate_init(sta); | 394 | rate_control_rate_init(sta); |
@@ -430,6 +427,7 @@ static void mesh_plink_timer(unsigned long data) | |||
430 | struct sta_info *sta; | 427 | struct sta_info *sta; |
431 | __le16 llid, plid, reason; | 428 | __le16 llid, plid, reason; |
432 | struct ieee80211_sub_if_data *sdata; | 429 | struct ieee80211_sub_if_data *sdata; |
430 | struct mesh_config *mshcfg; | ||
433 | 431 | ||
434 | /* | 432 | /* |
435 | * This STA is valid because sta_info_destroy() will | 433 | * This STA is valid because sta_info_destroy() will |
@@ -456,12 +454,13 @@ static void mesh_plink_timer(unsigned long data) | |||
456 | llid = sta->llid; | 454 | llid = sta->llid; |
457 | plid = sta->plid; | 455 | plid = sta->plid; |
458 | sdata = sta->sdata; | 456 | sdata = sta->sdata; |
457 | mshcfg = &sdata->u.mesh.mshcfg; | ||
459 | 458 | ||
460 | switch (sta->plink_state) { | 459 | switch (sta->plink_state) { |
461 | case NL80211_PLINK_OPN_RCVD: | 460 | case NL80211_PLINK_OPN_RCVD: |
462 | case NL80211_PLINK_OPN_SNT: | 461 | case NL80211_PLINK_OPN_SNT: |
463 | /* retry timer */ | 462 | /* retry timer */ |
464 | if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { | 463 | if (sta->plink_retries < mshcfg->dot11MeshMaxRetries) { |
465 | u32 rand; | 464 | u32 rand; |
466 | mpl_dbg(sta->sdata, | 465 | mpl_dbg(sta->sdata, |
467 | "Mesh plink for %pM (retry, timeout): %d %d\n", | 466 | "Mesh plink for %pM (retry, timeout): %d %d\n", |
@@ -484,7 +483,7 @@ static void mesh_plink_timer(unsigned long data) | |||
484 | if (!reason) | 483 | if (!reason) |
485 | reason = cpu_to_le16(WLAN_REASON_MESH_CONFIRM_TIMEOUT); | 484 | reason = cpu_to_le16(WLAN_REASON_MESH_CONFIRM_TIMEOUT); |
486 | sta->plink_state = NL80211_PLINK_HOLDING; | 485 | sta->plink_state = NL80211_PLINK_HOLDING; |
487 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | 486 | mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); |
488 | spin_unlock_bh(&sta->lock); | 487 | spin_unlock_bh(&sta->lock); |
489 | mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, | 488 | mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, |
490 | sta->sta.addr, llid, plid, reason); | 489 | sta->sta.addr, llid, plid, reason); |
@@ -543,7 +542,7 @@ int mesh_plink_open(struct sta_info *sta) | |||
543 | return -EBUSY; | 542 | return -EBUSY; |
544 | } | 543 | } |
545 | sta->plink_state = NL80211_PLINK_OPN_SNT; | 544 | sta->plink_state = NL80211_PLINK_OPN_SNT; |
546 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); | 545 | mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout); |
547 | spin_unlock_bh(&sta->lock); | 546 | spin_unlock_bh(&sta->lock); |
548 | mpl_dbg(sdata, | 547 | mpl_dbg(sdata, |
549 | "Mesh plink: starting establishment with %pM\n", | 548 | "Mesh plink: starting establishment with %pM\n", |
@@ -570,6 +569,7 @@ void mesh_plink_block(struct sta_info *sta) | |||
570 | void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, | 569 | void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, |
571 | size_t len, struct ieee80211_rx_status *rx_status) | 570 | size_t len, struct ieee80211_rx_status *rx_status) |
572 | { | 571 | { |
572 | struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; | ||
573 | struct ieee802_11_elems elems; | 573 | struct ieee802_11_elems elems; |
574 | struct sta_info *sta; | 574 | struct sta_info *sta; |
575 | enum plink_event event; | 575 | enum plink_event event; |
@@ -777,7 +777,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
777 | sta->plid = plid; | 777 | sta->plid = plid; |
778 | get_random_bytes(&llid, 2); | 778 | get_random_bytes(&llid, 2); |
779 | sta->llid = llid; | 779 | sta->llid = llid; |
780 | mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); | 780 | mesh_plink_timer_set(sta, |
781 | mshcfg->dot11MeshRetryTimeout); | ||
781 | spin_unlock_bh(&sta->lock); | 782 | spin_unlock_bh(&sta->lock); |
782 | mesh_plink_frame_tx(sdata, | 783 | mesh_plink_frame_tx(sdata, |
783 | WLAN_SP_MESH_PEERING_OPEN, | 784 | WLAN_SP_MESH_PEERING_OPEN, |
@@ -803,7 +804,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
803 | sta->reason = reason; | 804 | sta->reason = reason; |
804 | sta->plink_state = NL80211_PLINK_HOLDING; | 805 | sta->plink_state = NL80211_PLINK_HOLDING; |
805 | if (!mod_plink_timer(sta, | 806 | if (!mod_plink_timer(sta, |
806 | dot11MeshHoldingTimeout(sdata))) | 807 | mshcfg->dot11MeshHoldingTimeout)) |
807 | sta->ignore_plink_timer = true; | 808 | sta->ignore_plink_timer = true; |
808 | 809 | ||
809 | llid = sta->llid; | 810 | llid = sta->llid; |
@@ -825,7 +826,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
825 | case CNF_ACPT: | 826 | case CNF_ACPT: |
826 | sta->plink_state = NL80211_PLINK_CNF_RCVD; | 827 | sta->plink_state = NL80211_PLINK_CNF_RCVD; |
827 | if (!mod_plink_timer(sta, | 828 | if (!mod_plink_timer(sta, |
828 | dot11MeshConfirmTimeout(sdata))) | 829 | mshcfg->dot11MeshConfirmTimeout)) |
829 | sta->ignore_plink_timer = true; | 830 | sta->ignore_plink_timer = true; |
830 | 831 | ||
831 | spin_unlock_bh(&sta->lock); | 832 | spin_unlock_bh(&sta->lock); |
@@ -847,7 +848,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
847 | sta->reason = reason; | 848 | sta->reason = reason; |
848 | sta->plink_state = NL80211_PLINK_HOLDING; | 849 | sta->plink_state = NL80211_PLINK_HOLDING; |
849 | if (!mod_plink_timer(sta, | 850 | if (!mod_plink_timer(sta, |
850 | dot11MeshHoldingTimeout(sdata))) | 851 | mshcfg->dot11MeshHoldingTimeout)) |
851 | sta->ignore_plink_timer = true; | 852 | sta->ignore_plink_timer = true; |
852 | 853 | ||
853 | llid = sta->llid; | 854 | llid = sta->llid; |
@@ -888,7 +889,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
888 | sta->reason = reason; | 889 | sta->reason = reason; |
889 | sta->plink_state = NL80211_PLINK_HOLDING; | 890 | sta->plink_state = NL80211_PLINK_HOLDING; |
890 | if (!mod_plink_timer(sta, | 891 | if (!mod_plink_timer(sta, |
891 | dot11MeshHoldingTimeout(sdata))) | 892 | mshcfg->dot11MeshHoldingTimeout)) |
892 | sta->ignore_plink_timer = true; | 893 | sta->ignore_plink_timer = true; |
893 | 894 | ||
894 | llid = sta->llid; | 895 | llid = sta->llid; |
@@ -923,7 +924,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m | |||
923 | changed |= __mesh_plink_deactivate(sta); | 924 | changed |= __mesh_plink_deactivate(sta); |
924 | sta->plink_state = NL80211_PLINK_HOLDING; | 925 | sta->plink_state = NL80211_PLINK_HOLDING; |
925 | llid = sta->llid; | 926 | llid = sta->llid; |
926 | mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); | 927 | mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); |
927 | spin_unlock_bh(&sta->lock); | 928 | spin_unlock_bh(&sta->lock); |
928 | changed |= mesh_set_ht_prot_mode(sdata); | 929 | changed |= mesh_set_ht_prot_mode(sdata); |
929 | mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, | 930 | mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, |
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index a16b7b4b1e02..0f40086cce18 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c | |||
@@ -43,7 +43,7 @@ struct sync_method { | |||
43 | static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie) | 43 | static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie) |
44 | { | 44 | { |
45 | return (ie->mesh_config->meshconf_cap & | 45 | return (ie->mesh_config->meshconf_cap & |
46 | MESHCONF_CAPAB_TBTT_ADJUSTING) != 0; | 46 | IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING) != 0; |
47 | } | 47 | } |
48 | 48 | ||
49 | void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) | 49 | void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata) |
@@ -116,43 +116,13 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | |||
116 | goto no_sync; | 116 | goto no_sync; |
117 | } | 117 | } |
118 | 118 | ||
119 | if (rx_status->flag & RX_FLAG_MACTIME_MPDU && rx_status->mactime) { | 119 | if (ieee80211_have_rx_timestamp(rx_status)) |
120 | /* | 120 | /* time when timestamp field was received */ |
121 | * The mactime is defined as the time the first data symbol | 121 | t_r = ieee80211_calculate_rx_timestamp(local, rx_status, |
122 | * of the frame hits the PHY, and the timestamp of the beacon | 122 | 24 + 12 + |
123 | * is defined as "the time that the data symbol containing the | 123 | elems->total_len + |
124 | * first bit of the timestamp is transmitted to the PHY plus | 124 | FCS_LEN, |
125 | * the transmitting STA's delays through its local PHY from the | 125 | 24); |
126 | * MAC-PHY interface to its interface with the WM" (802.11 | ||
127 | * 11.1.2) | ||
128 | * | ||
129 | * T_r, in 13.13.2.2.2, is just defined as "the frame reception | ||
130 | * time" but we unless we interpret that time to be the same | ||
131 | * time of the beacon timestamp, the offset calculation will be | ||
132 | * off. Below we adjust t_r to be "the time at which the first | ||
133 | * symbol of the timestamp element in the beacon is received". | ||
134 | * This correction depends on the rate. | ||
135 | * | ||
136 | * Based on similar code in ibss.c | ||
137 | */ | ||
138 | int rate; | ||
139 | |||
140 | if (rx_status->flag & RX_FLAG_HT) { | ||
141 | /* TODO: | ||
142 | * In principle there could be HT-beacons (Dual Beacon | ||
143 | * HT Operation options), but for now ignore them and | ||
144 | * just use the primary (i.e. non-HT) beacons for | ||
145 | * synchronization. | ||
146 | * */ | ||
147 | goto no_sync; | ||
148 | } else | ||
149 | rate = local->hw.wiphy->bands[rx_status->band]-> | ||
150 | bitrates[rx_status->rate_idx].bitrate; | ||
151 | |||
152 | /* 24 bytes of header * 8 bits/byte * | ||
153 | * 10*(100 Kbps)/Mbps / rate (100 Kbps)*/ | ||
154 | t_r = rx_status->mactime + (24 * 8 * 10 / rate); | ||
155 | } | ||
156 | 126 | ||
157 | /* Timing offset calculation (see 13.13.2.2.2) */ | 127 | /* Timing offset calculation (see 13.13.2.2.2) */ |
158 | t_t = le64_to_cpu(mgmt->u.beacon.timestamp); | 128 | t_t = le64_to_cpu(mgmt->u.beacon.timestamp); |
@@ -234,49 +204,7 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata) | |||
234 | spin_unlock_bh(&ifmsh->sync_offset_lock); | 204 | spin_unlock_bh(&ifmsh->sync_offset_lock); |
235 | } | 205 | } |
236 | 206 | ||
237 | static const u8 *mesh_get_vendor_oui(struct ieee80211_sub_if_data *sdata) | 207 | static const struct sync_method sync_methods[] = { |
238 | { | ||
239 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
240 | u8 offset; | ||
241 | |||
242 | if (!ifmsh->ie || !ifmsh->ie_len) | ||
243 | return NULL; | ||
244 | |||
245 | offset = ieee80211_ie_split_vendor(ifmsh->ie, | ||
246 | ifmsh->ie_len, 0); | ||
247 | |||
248 | if (!offset) | ||
249 | return NULL; | ||
250 | |||
251 | return ifmsh->ie + offset + 2; | ||
252 | } | ||
253 | |||
254 | static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | ||
255 | u16 stype, | ||
256 | struct ieee80211_mgmt *mgmt, | ||
257 | struct ieee802_11_elems *elems, | ||
258 | struct ieee80211_rx_status *rx_status) | ||
259 | { | ||
260 | const u8 *oui; | ||
261 | |||
262 | WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); | ||
263 | msync_dbg(sdata, "called mesh_sync_vendor_rx_bcn_presp\n"); | ||
264 | oui = mesh_get_vendor_oui(sdata); | ||
265 | /* here you would implement the vendor offset tracking for this oui */ | ||
266 | } | ||
267 | |||
268 | static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata) | ||
269 | { | ||
270 | const u8 *oui; | ||
271 | |||
272 | WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR); | ||
273 | msync_dbg(sdata, "called mesh_sync_vendor_adjust_tbtt\n"); | ||
274 | oui = mesh_get_vendor_oui(sdata); | ||
275 | /* here you would implement the vendor tsf adjustment for this oui */ | ||
276 | } | ||
277 | |||
278 | /* global variable */ | ||
279 | static struct sync_method sync_methods[] = { | ||
280 | { | 208 | { |
281 | .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, | 209 | .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, |
282 | .ops = { | 210 | .ops = { |
@@ -284,18 +212,11 @@ static struct sync_method sync_methods[] = { | |||
284 | .adjust_tbtt = &mesh_sync_offset_adjust_tbtt, | 212 | .adjust_tbtt = &mesh_sync_offset_adjust_tbtt, |
285 | } | 213 | } |
286 | }, | 214 | }, |
287 | { | ||
288 | .method = IEEE80211_SYNC_METHOD_VENDOR, | ||
289 | .ops = { | ||
290 | .rx_bcn_presp = &mesh_sync_vendor_rx_bcn_presp, | ||
291 | .adjust_tbtt = &mesh_sync_vendor_adjust_tbtt, | ||
292 | } | ||
293 | }, | ||
294 | }; | 215 | }; |
295 | 216 | ||
296 | struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method) | 217 | const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method) |
297 | { | 218 | { |
298 | struct ieee80211_mesh_sync_ops *ops = NULL; | 219 | const struct ieee80211_mesh_sync_ops *ops = NULL; |
299 | u8 i; | 220 | u8 i; |
300 | 221 | ||
301 | for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) { | 222 | for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) { |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 1b7eed252fe9..d2a4f78b4b0f 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -178,20 +178,32 @@ static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata, | |||
178 | { | 178 | { |
179 | struct ieee80211_local *local = sdata->local; | 179 | struct ieee80211_local *local = sdata->local; |
180 | struct ieee80211_supported_band *sband; | 180 | struct ieee80211_supported_band *sband; |
181 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
182 | struct ieee80211_channel *chan; | ||
181 | struct sta_info *sta; | 183 | struct sta_info *sta; |
182 | u32 changed = 0; | 184 | u32 changed = 0; |
183 | u16 ht_opmode; | 185 | u16 ht_opmode; |
184 | bool disable_40 = false; | 186 | bool disable_40 = false; |
185 | 187 | ||
186 | sband = local->hw.wiphy->bands[local->oper_channel->band]; | 188 | rcu_read_lock(); |
189 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
190 | if (WARN_ON(!chanctx_conf)) { | ||
191 | rcu_read_unlock(); | ||
192 | return 0; | ||
193 | } | ||
194 | chan = chanctx_conf->def.chan; | ||
195 | rcu_read_unlock(); | ||
196 | sband = local->hw.wiphy->bands[chan->band]; | ||
187 | 197 | ||
188 | switch (sdata->vif.bss_conf.channel_type) { | 198 | switch (sdata->vif.bss_conf.chandef.width) { |
189 | case NL80211_CHAN_HT40PLUS: | 199 | case NL80211_CHAN_WIDTH_40: |
190 | if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40PLUS) | 200 | if (sdata->vif.bss_conf.chandef.chan->center_freq > |
201 | sdata->vif.bss_conf.chandef.center_freq1 && | ||
202 | chan->flags & IEEE80211_CHAN_NO_HT40PLUS) | ||
191 | disable_40 = true; | 203 | disable_40 = true; |
192 | break; | 204 | if (sdata->vif.bss_conf.chandef.chan->center_freq < |
193 | case NL80211_CHAN_HT40MINUS: | 205 | sdata->vif.bss_conf.chandef.center_freq1 && |
194 | if (local->oper_channel->flags & IEEE80211_CHAN_NO_HT40MINUS) | 206 | chan->flags & IEEE80211_CHAN_NO_HT40MINUS) |
195 | disable_40 = true; | 207 | disable_40 = true; |
196 | break; | 208 | break; |
197 | default: | 209 | default: |
@@ -343,7 +355,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, | |||
343 | cap = vht_cap.cap; | 355 | cap = vht_cap.cap; |
344 | 356 | ||
345 | /* reserve and fill IE */ | 357 | /* reserve and fill IE */ |
346 | pos = skb_put(skb, sizeof(struct ieee80211_vht_capabilities) + 2); | 358 | pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); |
347 | ieee80211_ie_build_vht_cap(pos, &vht_cap, cap); | 359 | ieee80211_ie_build_vht_cap(pos, &vht_cap, cap); |
348 | } | 360 | } |
349 | 361 | ||
@@ -359,11 +371,21 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
359 | int i, count, rates_len, supp_rates_len; | 371 | int i, count, rates_len, supp_rates_len; |
360 | u16 capab; | 372 | u16 capab; |
361 | struct ieee80211_supported_band *sband; | 373 | struct ieee80211_supported_band *sband; |
374 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
375 | struct ieee80211_channel *chan; | ||
362 | u32 rates = 0; | 376 | u32 rates = 0; |
363 | 377 | ||
364 | lockdep_assert_held(&ifmgd->mtx); | 378 | lockdep_assert_held(&ifmgd->mtx); |
365 | 379 | ||
366 | sband = local->hw.wiphy->bands[local->oper_channel->band]; | 380 | rcu_read_lock(); |
381 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
382 | if (WARN_ON(!chanctx_conf)) { | ||
383 | rcu_read_unlock(); | ||
384 | return; | ||
385 | } | ||
386 | chan = chanctx_conf->def.chan; | ||
387 | rcu_read_unlock(); | ||
388 | sband = local->hw.wiphy->bands[chan->band]; | ||
367 | 389 | ||
368 | if (assoc_data->supp_rates_len) { | 390 | if (assoc_data->supp_rates_len) { |
369 | /* | 391 | /* |
@@ -392,7 +414,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
392 | 4 + /* power capability */ | 414 | 4 + /* power capability */ |
393 | 2 + 2 * sband->n_channels + /* supported channels */ | 415 | 2 + 2 * sband->n_channels + /* supported channels */ |
394 | 2 + sizeof(struct ieee80211_ht_cap) + /* HT */ | 416 | 2 + sizeof(struct ieee80211_ht_cap) + /* HT */ |
395 | 2 + sizeof(struct ieee80211_vht_capabilities) + /* VHT */ | 417 | 2 + sizeof(struct ieee80211_vht_cap) + /* VHT */ |
396 | assoc_data->ie_len + /* extra IEs */ | 418 | assoc_data->ie_len + /* extra IEs */ |
397 | 9, /* WMM */ | 419 | 9, /* WMM */ |
398 | GFP_KERNEL); | 420 | GFP_KERNEL); |
@@ -485,7 +507,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
485 | *pos++ = WLAN_EID_PWR_CAPABILITY; | 507 | *pos++ = WLAN_EID_PWR_CAPABILITY; |
486 | *pos++ = 2; | 508 | *pos++ = 2; |
487 | *pos++ = 0; /* min tx power */ | 509 | *pos++ = 0; /* min tx power */ |
488 | *pos++ = local->oper_channel->max_power; /* max tx power */ | 510 | *pos++ = chan->max_power; /* max tx power */ |
489 | 511 | ||
490 | /* 2. supported channels */ | 512 | /* 2. supported channels */ |
491 | /* TODO: get this in reg domain format */ | 513 | /* TODO: get this in reg domain format */ |
@@ -521,9 +543,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) | |||
521 | offset = noffset; | 543 | offset = noffset; |
522 | } | 544 | } |
523 | 545 | ||
524 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) | 546 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) |
525 | ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param, | 547 | ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param, |
526 | sband, local->oper_channel, ifmgd->ap_smps); | 548 | sband, chan, sdata->smps_mode); |
527 | 549 | ||
528 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) | 550 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) |
529 | ieee80211_add_vht_ie(sdata, skb, sband); | 551 | ieee80211_add_vht_ie(sdata, skb, sband); |
@@ -657,18 +679,18 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
657 | if (!ifmgd->associated) | 679 | if (!ifmgd->associated) |
658 | goto out; | 680 | goto out; |
659 | 681 | ||
660 | sdata->local->oper_channel = sdata->local->csa_channel; | 682 | sdata->local->_oper_channel = sdata->local->csa_channel; |
661 | if (!sdata->local->ops->channel_switch) { | 683 | if (!sdata->local->ops->channel_switch) { |
662 | /* call "hw_config" only if doing sw channel switch */ | 684 | /* call "hw_config" only if doing sw channel switch */ |
663 | ieee80211_hw_config(sdata->local, | 685 | ieee80211_hw_config(sdata->local, |
664 | IEEE80211_CONF_CHANGE_CHANNEL); | 686 | IEEE80211_CONF_CHANGE_CHANNEL); |
665 | } else { | 687 | } else { |
666 | /* update the device channel directly */ | 688 | /* update the device channel directly */ |
667 | sdata->local->hw.conf.channel = sdata->local->oper_channel; | 689 | sdata->local->hw.conf.channel = sdata->local->_oper_channel; |
668 | } | 690 | } |
669 | 691 | ||
670 | /* XXX: shouldn't really modify cfg80211-owned data! */ | 692 | /* XXX: shouldn't really modify cfg80211-owned data! */ |
671 | ifmgd->associated->channel = sdata->local->oper_channel; | 693 | ifmgd->associated->channel = sdata->local->_oper_channel; |
672 | 694 | ||
673 | /* XXX: wait for a beacon first? */ | 695 | /* XXX: wait for a beacon first? */ |
674 | ieee80211_wake_queues_by_reason(&sdata->local->hw, | 696 | ieee80211_wake_queues_by_reason(&sdata->local->hw, |
@@ -680,11 +702,8 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
680 | 702 | ||
681 | void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) | 703 | void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) |
682 | { | 704 | { |
683 | struct ieee80211_sub_if_data *sdata; | 705 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
684 | struct ieee80211_if_managed *ifmgd; | 706 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
685 | |||
686 | sdata = vif_to_sdata(vif); | ||
687 | ifmgd = &sdata->u.mgd; | ||
688 | 707 | ||
689 | trace_api_chswitch_done(sdata, success); | 708 | trace_api_chswitch_done(sdata, success); |
690 | if (!success) { | 709 | if (!success) { |
@@ -723,6 +742,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
723 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 742 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
724 | int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num, | 743 | int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num, |
725 | cbss->channel->band); | 744 | cbss->channel->band); |
745 | struct ieee80211_chanctx *chanctx; | ||
726 | 746 | ||
727 | ASSERT_MGD_MTX(ifmgd); | 747 | ASSERT_MGD_MTX(ifmgd); |
728 | 748 | ||
@@ -748,10 +768,34 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
748 | return; | 768 | return; |
749 | } | 769 | } |
750 | 770 | ||
751 | sdata->local->csa_channel = new_ch; | ||
752 | |||
753 | ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; | 771 | ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED; |
754 | 772 | ||
773 | if (sdata->local->use_chanctx) { | ||
774 | sdata_info(sdata, | ||
775 | "not handling channel switch with channel contexts\n"); | ||
776 | ieee80211_queue_work(&sdata->local->hw, | ||
777 | &ifmgd->csa_connection_drop_work); | ||
778 | } | ||
779 | |||
780 | mutex_lock(&sdata->local->chanctx_mtx); | ||
781 | if (WARN_ON(!rcu_access_pointer(sdata->vif.chanctx_conf))) { | ||
782 | mutex_unlock(&sdata->local->chanctx_mtx); | ||
783 | return; | ||
784 | } | ||
785 | chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf), | ||
786 | struct ieee80211_chanctx, conf); | ||
787 | if (chanctx->refcount > 1) { | ||
788 | sdata_info(sdata, | ||
789 | "channel switch with multiple interfaces on the same channel, disconnecting\n"); | ||
790 | ieee80211_queue_work(&sdata->local->hw, | ||
791 | &ifmgd->csa_connection_drop_work); | ||
792 | mutex_unlock(&sdata->local->chanctx_mtx); | ||
793 | return; | ||
794 | } | ||
795 | mutex_unlock(&sdata->local->chanctx_mtx); | ||
796 | |||
797 | sdata->local->csa_channel = new_ch; | ||
798 | |||
755 | if (sw_elem->mode) | 799 | if (sw_elem->mode) |
756 | ieee80211_stop_queues_by_reason(&sdata->local->hw, | 800 | ieee80211_stop_queues_by_reason(&sdata->local->hw, |
757 | IEEE80211_QUEUE_STOP_REASON_CSA); | 801 | IEEE80211_QUEUE_STOP_REASON_CSA); |
@@ -778,10 +822,10 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
778 | cbss->beacon_interval)); | 822 | cbss->beacon_interval)); |
779 | } | 823 | } |
780 | 824 | ||
781 | static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, | 825 | static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, |
782 | struct ieee80211_channel *channel, | 826 | struct ieee80211_channel *channel, |
783 | const u8 *country_ie, u8 country_ie_len, | 827 | const u8 *country_ie, u8 country_ie_len, |
784 | const u8 *pwr_constr_elem) | 828 | const u8 *pwr_constr_elem) |
785 | { | 829 | { |
786 | struct ieee80211_country_ie_triplet *triplet; | 830 | struct ieee80211_country_ie_triplet *triplet; |
787 | int chan = ieee80211_frequency_to_channel(channel->center_freq); | 831 | int chan = ieee80211_frequency_to_channel(channel->center_freq); |
@@ -790,7 +834,7 @@ static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, | |||
790 | 834 | ||
791 | /* Invalid IE */ | 835 | /* Invalid IE */ |
792 | if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) | 836 | if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) |
793 | return; | 837 | return 0; |
794 | 838 | ||
795 | triplet = (void *)(country_ie + 3); | 839 | triplet = (void *)(country_ie + 3); |
796 | country_ie_len -= 3; | 840 | country_ie_len -= 3; |
@@ -831,19 +875,21 @@ static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, | |||
831 | } | 875 | } |
832 | 876 | ||
833 | if (!have_chan_pwr) | 877 | if (!have_chan_pwr) |
834 | return; | 878 | return 0; |
835 | 879 | ||
836 | new_ap_level = max_t(int, 0, chan_pwr - *pwr_constr_elem); | 880 | new_ap_level = max_t(int, 0, chan_pwr - *pwr_constr_elem); |
837 | 881 | ||
838 | if (sdata->local->ap_power_level == new_ap_level) | 882 | if (sdata->ap_power_level == new_ap_level) |
839 | return; | 883 | return 0; |
840 | 884 | ||
841 | sdata_info(sdata, | 885 | sdata_info(sdata, |
842 | "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", | 886 | "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", |
843 | new_ap_level, chan_pwr, *pwr_constr_elem, | 887 | new_ap_level, chan_pwr, *pwr_constr_elem, |
844 | sdata->u.mgd.bssid); | 888 | sdata->u.mgd.bssid); |
845 | sdata->local->ap_power_level = new_ap_level; | 889 | sdata->ap_power_level = new_ap_level; |
846 | ieee80211_hw_config(sdata->local, 0); | 890 | if (__ieee80211_recalc_txpower(sdata)) |
891 | return BSS_CHANGED_TXPOWER; | ||
892 | return 0; | ||
847 | } | 893 | } |
848 | 894 | ||
849 | void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif) | 895 | void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif) |
@@ -1280,7 +1326,7 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, | |||
1280 | } | 1326 | } |
1281 | 1327 | ||
1282 | use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); | 1328 | use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); |
1283 | if (sdata->local->oper_channel->band == IEEE80211_BAND_5GHZ) | 1329 | if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_5GHZ) |
1284 | use_short_slot = true; | 1330 | use_short_slot = true; |
1285 | 1331 | ||
1286 | if (use_protection != bss_conf->use_cts_prot) { | 1332 | if (use_protection != bss_conf->use_cts_prot) { |
@@ -1321,6 +1367,22 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
1321 | 1367 | ||
1322 | sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; | 1368 | sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; |
1323 | 1369 | ||
1370 | if (sdata->vif.p2p) { | ||
1371 | u8 noa[2]; | ||
1372 | int ret; | ||
1373 | |||
1374 | ret = cfg80211_get_p2p_attr(cbss->information_elements, | ||
1375 | cbss->len_information_elements, | ||
1376 | IEEE80211_P2P_ATTR_ABSENCE_NOTICE, | ||
1377 | noa, sizeof(noa)); | ||
1378 | if (ret >= 2) { | ||
1379 | bss_conf->p2p_oppps = noa[1] & 0x80; | ||
1380 | bss_conf->p2p_ctwindow = noa[1] & 0x7f; | ||
1381 | bss_info_changed |= BSS_CHANGED_P2P_PS; | ||
1382 | sdata->u.mgd.p2p_noa_index = noa[0]; | ||
1383 | } | ||
1384 | } | ||
1385 | |||
1324 | /* just to be sure */ | 1386 | /* just to be sure */ |
1325 | ieee80211_stop_poll(sdata); | 1387 | ieee80211_stop_poll(sdata); |
1326 | 1388 | ||
@@ -1350,7 +1412,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
1350 | ieee80211_recalc_ps(local, -1); | 1412 | ieee80211_recalc_ps(local, -1); |
1351 | mutex_unlock(&local->iflist_mtx); | 1413 | mutex_unlock(&local->iflist_mtx); |
1352 | 1414 | ||
1353 | ieee80211_recalc_smps(local); | 1415 | ieee80211_recalc_smps(sdata); |
1354 | ieee80211_recalc_ps_vif(sdata); | 1416 | ieee80211_recalc_ps_vif(sdata); |
1355 | 1417 | ||
1356 | netif_tx_start_all_queues(sdata->dev); | 1418 | netif_tx_start_all_queues(sdata->dev); |
@@ -1443,11 +1505,14 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1443 | changed |= BSS_CHANGED_ASSOC; | 1505 | changed |= BSS_CHANGED_ASSOC; |
1444 | sdata->vif.bss_conf.assoc = false; | 1506 | sdata->vif.bss_conf.assoc = false; |
1445 | 1507 | ||
1508 | sdata->vif.bss_conf.p2p_ctwindow = 0; | ||
1509 | sdata->vif.bss_conf.p2p_oppps = false; | ||
1510 | |||
1446 | /* on the next assoc, re-program HT parameters */ | 1511 | /* on the next assoc, re-program HT parameters */ |
1447 | memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); | 1512 | memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); |
1448 | memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); | 1513 | memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); |
1449 | 1514 | ||
1450 | local->ap_power_level = 0; | 1515 | sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL; |
1451 | 1516 | ||
1452 | del_timer_sync(&local->dynamic_ps_timer); | 1517 | del_timer_sync(&local->dynamic_ps_timer); |
1453 | cancel_work_sync(&local->dynamic_ps_enable_work); | 1518 | cancel_work_sync(&local->dynamic_ps_enable_work); |
@@ -1465,10 +1530,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1465 | changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; | 1530 | changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; |
1466 | ieee80211_bss_info_change_notify(sdata, changed); | 1531 | ieee80211_bss_info_change_notify(sdata, changed); |
1467 | 1532 | ||
1468 | /* channel(_type) changes are handled by ieee80211_hw_config */ | ||
1469 | WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT)); | ||
1470 | ieee80211_hw_config(local, 0); | ||
1471 | |||
1472 | /* disassociated - set to defaults now */ | 1533 | /* disassociated - set to defaults now */ |
1473 | ieee80211_set_wmm_default(sdata, false); | 1534 | ieee80211_set_wmm_default(sdata, false); |
1474 | 1535 | ||
@@ -1478,6 +1539,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
1478 | del_timer_sync(&sdata->u.mgd.chswitch_timer); | 1539 | del_timer_sync(&sdata->u.mgd.chswitch_timer); |
1479 | 1540 | ||
1480 | sdata->u.mgd.timers_running = 0; | 1541 | sdata->u.mgd.timers_running = 0; |
1542 | |||
1543 | ifmgd->flags = 0; | ||
1544 | ieee80211_vif_release_channel(sdata); | ||
1481 | } | 1545 | } |
1482 | 1546 | ||
1483 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | 1547 | void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, |
@@ -1589,7 +1653,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) | |||
1589 | 1653 | ||
1590 | ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL, | 1654 | ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL, |
1591 | 0, (u32) -1, true, false, | 1655 | 0, (u32) -1, true, false, |
1592 | ifmgd->associated->channel); | 1656 | ifmgd->associated->channel, false); |
1593 | } | 1657 | } |
1594 | 1658 | ||
1595 | ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); | 1659 | ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); |
@@ -1692,8 +1756,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, | |||
1692 | ssid_len = ssid[1]; | 1756 | ssid_len = ssid[1]; |
1693 | 1757 | ||
1694 | skb = ieee80211_build_probe_req(sdata, cbss->bssid, | 1758 | skb = ieee80211_build_probe_req(sdata, cbss->bssid, |
1695 | (u32) -1, | 1759 | (u32) -1, cbss->channel, |
1696 | sdata->local->oper_channel, | ||
1697 | ssid + 2, ssid_len, | 1760 | ssid + 2, ssid_len, |
1698 | NULL, 0, true); | 1761 | NULL, 0, true); |
1699 | 1762 | ||
@@ -1804,6 +1867,8 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata, | |||
1804 | 1867 | ||
1805 | memset(sdata->u.mgd.bssid, 0, ETH_ALEN); | 1868 | memset(sdata->u.mgd.bssid, 0, ETH_ALEN); |
1806 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); | 1869 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); |
1870 | sdata->u.mgd.flags = 0; | ||
1871 | ieee80211_vif_release_channel(sdata); | ||
1807 | } | 1872 | } |
1808 | 1873 | ||
1809 | cfg80211_put_bss(auth_data->bss); | 1874 | cfg80211_put_bss(auth_data->bss); |
@@ -1824,7 +1889,7 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, | |||
1824 | return; | 1889 | return; |
1825 | auth_data->expected_transaction = 4; | 1890 | auth_data->expected_transaction = 4; |
1826 | drv_mgd_prepare_tx(sdata->local, sdata); | 1891 | drv_mgd_prepare_tx(sdata->local, sdata); |
1827 | ieee80211_send_auth(sdata, 3, auth_data->algorithm, | 1892 | ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0, |
1828 | elems.challenge - 2, elems.challenge_len + 2, | 1893 | elems.challenge - 2, elems.challenge_len + 2, |
1829 | auth_data->bss->bssid, auth_data->bss->bssid, | 1894 | auth_data->bss->bssid, auth_data->bss->bssid, |
1830 | auth_data->key, auth_data->key_len, | 1895 | auth_data->key, auth_data->key_len, |
@@ -1858,8 +1923,13 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | |||
1858 | status_code = le16_to_cpu(mgmt->u.auth.status_code); | 1923 | status_code = le16_to_cpu(mgmt->u.auth.status_code); |
1859 | 1924 | ||
1860 | if (auth_alg != ifmgd->auth_data->algorithm || | 1925 | if (auth_alg != ifmgd->auth_data->algorithm || |
1861 | auth_transaction != ifmgd->auth_data->expected_transaction) | 1926 | auth_transaction != ifmgd->auth_data->expected_transaction) { |
1927 | sdata_info(sdata, "%pM unexpected authentication state: alg %d (expected %d) transact %d (expected %d)\n", | ||
1928 | mgmt->sa, auth_alg, ifmgd->auth_data->algorithm, | ||
1929 | auth_transaction, | ||
1930 | ifmgd->auth_data->expected_transaction); | ||
1862 | return RX_MGMT_NONE; | 1931 | return RX_MGMT_NONE; |
1932 | } | ||
1863 | 1933 | ||
1864 | if (status_code != WLAN_STATUS_SUCCESS) { | 1934 | if (status_code != WLAN_STATUS_SUCCESS) { |
1865 | sdata_info(sdata, "%pM denied authentication (status %d)\n", | 1935 | sdata_info(sdata, "%pM denied authentication (status %d)\n", |
@@ -1872,6 +1942,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | |||
1872 | case WLAN_AUTH_OPEN: | 1942 | case WLAN_AUTH_OPEN: |
1873 | case WLAN_AUTH_LEAP: | 1943 | case WLAN_AUTH_LEAP: |
1874 | case WLAN_AUTH_FT: | 1944 | case WLAN_AUTH_FT: |
1945 | case WLAN_AUTH_SAE: | ||
1875 | break; | 1946 | break; |
1876 | case WLAN_AUTH_SHARED_KEY: | 1947 | case WLAN_AUTH_SHARED_KEY: |
1877 | if (ifmgd->auth_data->expected_transaction != 4) { | 1948 | if (ifmgd->auth_data->expected_transaction != 4) { |
@@ -1891,6 +1962,15 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, | |||
1891 | ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; | 1962 | ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; |
1892 | run_again(ifmgd, ifmgd->auth_data->timeout); | 1963 | run_again(ifmgd, ifmgd->auth_data->timeout); |
1893 | 1964 | ||
1965 | if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && | ||
1966 | ifmgd->auth_data->expected_transaction != 2) { | ||
1967 | /* | ||
1968 | * Report auth frame to user space for processing since another | ||
1969 | * round of Authentication frames is still needed. | ||
1970 | */ | ||
1971 | return RX_MGMT_CFG80211_RX_AUTH; | ||
1972 | } | ||
1973 | |||
1894 | /* move station state to auth */ | 1974 | /* move station state to auth */ |
1895 | mutex_lock(&sdata->local->sta_mtx); | 1975 | mutex_lock(&sdata->local->sta_mtx); |
1896 | sta = sta_info_get(sdata, bssid); | 1976 | sta = sta_info_get(sdata, bssid); |
@@ -2030,6 +2110,8 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, | |||
2030 | 2110 | ||
2031 | memset(sdata->u.mgd.bssid, 0, ETH_ALEN); | 2111 | memset(sdata->u.mgd.bssid, 0, ETH_ALEN); |
2032 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); | 2112 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); |
2113 | sdata->u.mgd.flags = 0; | ||
2114 | ieee80211_vif_release_channel(sdata); | ||
2033 | } | 2115 | } |
2034 | 2116 | ||
2035 | kfree(assoc_data); | 2117 | kfree(assoc_data); |
@@ -2091,15 +2173,20 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, | |||
2091 | return false; | 2173 | return false; |
2092 | } | 2174 | } |
2093 | 2175 | ||
2094 | sband = local->hw.wiphy->bands[local->oper_channel->band]; | 2176 | sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; |
2095 | 2177 | ||
2096 | if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) | 2178 | if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) |
2097 | ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, | 2179 | ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, |
2098 | elems.ht_cap_elem, &sta->sta.ht_cap); | 2180 | elems.ht_cap_elem, &sta->sta.ht_cap); |
2099 | 2181 | ||
2100 | sta->supports_40mhz = | 2182 | sta->supports_40mhz = |
2101 | sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; | 2183 | sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; |
2102 | 2184 | ||
2185 | if (elems.vht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) | ||
2186 | ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, | ||
2187 | elems.vht_cap_elem, | ||
2188 | &sta->sta.vht_cap); | ||
2189 | |||
2103 | rate_control_rate_init(sta); | 2190 | rate_control_rate_init(sta); |
2104 | 2191 | ||
2105 | if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) | 2192 | if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) |
@@ -2140,7 +2227,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, | |||
2140 | changed |= BSS_CHANGED_QOS; | 2227 | changed |= BSS_CHANGED_QOS; |
2141 | 2228 | ||
2142 | if (elems.ht_operation && elems.wmm_param && | 2229 | if (elems.ht_operation && elems.wmm_param && |
2143 | !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) | 2230 | !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) |
2144 | changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, | 2231 | changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, |
2145 | cbss->bssid, false); | 2232 | cbss->bssid, false); |
2146 | 2233 | ||
@@ -2369,8 +2456,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2369 | size_t baselen; | 2456 | size_t baselen; |
2370 | struct ieee802_11_elems elems; | 2457 | struct ieee802_11_elems elems; |
2371 | struct ieee80211_local *local = sdata->local; | 2458 | struct ieee80211_local *local = sdata->local; |
2459 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
2460 | struct ieee80211_channel *chan; | ||
2372 | u32 changed = 0; | 2461 | u32 changed = 0; |
2373 | bool erp_valid, directed_tim = false; | 2462 | bool erp_valid; |
2374 | u8 erp_value = 0; | 2463 | u8 erp_value = 0; |
2375 | u32 ncrc; | 2464 | u32 ncrc; |
2376 | u8 *bssid; | 2465 | u8 *bssid; |
@@ -2382,8 +2471,19 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2382 | if (baselen > len) | 2471 | if (baselen > len) |
2383 | return; | 2472 | return; |
2384 | 2473 | ||
2385 | if (rx_status->freq != local->oper_channel->center_freq) | 2474 | rcu_read_lock(); |
2475 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
2476 | if (!chanctx_conf) { | ||
2477 | rcu_read_unlock(); | ||
2386 | return; | 2478 | return; |
2479 | } | ||
2480 | |||
2481 | if (rx_status->freq != chanctx_conf->def.chan->center_freq) { | ||
2482 | rcu_read_unlock(); | ||
2483 | return; | ||
2484 | } | ||
2485 | chan = chanctx_conf->def.chan; | ||
2486 | rcu_read_unlock(); | ||
2387 | 2487 | ||
2388 | if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon && | 2488 | if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon && |
2389 | ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) { | 2489 | ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) { |
@@ -2490,11 +2590,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2490 | len - baselen, &elems, | 2590 | len - baselen, &elems, |
2491 | care_about_ies, ncrc); | 2591 | care_about_ies, ncrc); |
2492 | 2592 | ||
2493 | if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) | ||
2494 | directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len, | ||
2495 | ifmgd->aid); | ||
2496 | |||
2497 | if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { | 2593 | if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { |
2594 | bool directed_tim = ieee80211_check_tim(elems.tim, | ||
2595 | elems.tim_len, | ||
2596 | ifmgd->aid); | ||
2498 | if (directed_tim) { | 2597 | if (directed_tim) { |
2499 | if (local->hw.conf.dynamic_ps_timeout > 0) { | 2598 | if (local->hw.conf.dynamic_ps_timeout > 0) { |
2500 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { | 2599 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { |
@@ -2519,6 +2618,27 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2519 | } | 2618 | } |
2520 | } | 2619 | } |
2521 | 2620 | ||
2621 | if (sdata->vif.p2p) { | ||
2622 | u8 noa[2]; | ||
2623 | int ret; | ||
2624 | |||
2625 | ret = cfg80211_get_p2p_attr(mgmt->u.beacon.variable, | ||
2626 | len - baselen, | ||
2627 | IEEE80211_P2P_ATTR_ABSENCE_NOTICE, | ||
2628 | noa, sizeof(noa)); | ||
2629 | if (ret >= 2 && sdata->u.mgd.p2p_noa_index != noa[0]) { | ||
2630 | bss_conf->p2p_oppps = noa[1] & 0x80; | ||
2631 | bss_conf->p2p_ctwindow = noa[1] & 0x7f; | ||
2632 | changed |= BSS_CHANGED_P2P_PS; | ||
2633 | sdata->u.mgd.p2p_noa_index = noa[0]; | ||
2634 | /* | ||
2635 | * make sure we update all information, the CRC | ||
2636 | * mechanism doesn't look at P2P attributes. | ||
2637 | */ | ||
2638 | ifmgd->beacon_crc_valid = false; | ||
2639 | } | ||
2640 | } | ||
2641 | |||
2522 | if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) | 2642 | if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) |
2523 | return; | 2643 | return; |
2524 | ifmgd->beacon_crc = ncrc; | 2644 | ifmgd->beacon_crc = ncrc; |
@@ -2543,22 +2663,17 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
2543 | 2663 | ||
2544 | 2664 | ||
2545 | if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param && | 2665 | if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param && |
2546 | !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { | 2666 | !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) |
2547 | struct ieee80211_supported_band *sband; | ||
2548 | |||
2549 | sband = local->hw.wiphy->bands[local->oper_channel->band]; | ||
2550 | |||
2551 | changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, | 2667 | changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation, |
2552 | bssid, true); | 2668 | bssid, true); |
2553 | } | ||
2554 | 2669 | ||
2555 | if (elems.country_elem && elems.pwr_constr_elem && | 2670 | if (elems.country_elem && elems.pwr_constr_elem && |
2556 | mgmt->u.probe_resp.capab_info & | 2671 | mgmt->u.probe_resp.capab_info & |
2557 | cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT)) | 2672 | cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT)) |
2558 | ieee80211_handle_pwr_constr(sdata, local->oper_channel, | 2673 | changed |= ieee80211_handle_pwr_constr(sdata, chan, |
2559 | elems.country_elem, | 2674 | elems.country_elem, |
2560 | elems.country_elem_len, | 2675 | elems.country_elem_len, |
2561 | elems.pwr_constr_elem); | 2676 | elems.pwr_constr_elem); |
2562 | 2677 | ||
2563 | ieee80211_bss_info_change_notify(sdata, changed); | 2678 | ieee80211_bss_info_change_notify(sdata, changed); |
2564 | } | 2679 | } |
@@ -2703,13 +2818,23 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) | |||
2703 | drv_mgd_prepare_tx(local, sdata); | 2818 | drv_mgd_prepare_tx(local, sdata); |
2704 | 2819 | ||
2705 | if (auth_data->bss->proberesp_ies) { | 2820 | if (auth_data->bss->proberesp_ies) { |
2821 | u16 trans = 1; | ||
2822 | u16 status = 0; | ||
2823 | |||
2706 | sdata_info(sdata, "send auth to %pM (try %d/%d)\n", | 2824 | sdata_info(sdata, "send auth to %pM (try %d/%d)\n", |
2707 | auth_data->bss->bssid, auth_data->tries, | 2825 | auth_data->bss->bssid, auth_data->tries, |
2708 | IEEE80211_AUTH_MAX_TRIES); | 2826 | IEEE80211_AUTH_MAX_TRIES); |
2709 | 2827 | ||
2710 | auth_data->expected_transaction = 2; | 2828 | auth_data->expected_transaction = 2; |
2711 | ieee80211_send_auth(sdata, 1, auth_data->algorithm, | 2829 | |
2712 | auth_data->ie, auth_data->ie_len, | 2830 | if (auth_data->algorithm == WLAN_AUTH_SAE) { |
2831 | trans = auth_data->sae_trans; | ||
2832 | status = auth_data->sae_status; | ||
2833 | auth_data->expected_transaction = trans; | ||
2834 | } | ||
2835 | |||
2836 | ieee80211_send_auth(sdata, trans, auth_data->algorithm, status, | ||
2837 | auth_data->data, auth_data->data_len, | ||
2713 | auth_data->bss->bssid, | 2838 | auth_data->bss->bssid, |
2714 | auth_data->bss->bssid, NULL, 0, 0); | 2839 | auth_data->bss->bssid, NULL, 0, 0); |
2715 | } else { | 2840 | } else { |
@@ -2728,7 +2853,7 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) | |||
2728 | */ | 2853 | */ |
2729 | ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], | 2854 | ieee80211_send_probe_req(sdata, NULL, ssidie + 2, ssidie[1], |
2730 | NULL, 0, (u32) -1, true, false, | 2855 | NULL, 0, (u32) -1, true, false, |
2731 | auth_data->bss->channel); | 2856 | auth_data->bss->channel, false); |
2732 | } | 2857 | } |
2733 | 2858 | ||
2734 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; | 2859 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; |
@@ -3068,6 +3193,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, | |||
3068 | const u8 *ht_oper_ie; | 3193 | const u8 *ht_oper_ie; |
3069 | const struct ieee80211_ht_operation *ht_oper = NULL; | 3194 | const struct ieee80211_ht_operation *ht_oper = NULL; |
3070 | struct ieee80211_supported_band *sband; | 3195 | struct ieee80211_supported_band *sband; |
3196 | struct cfg80211_chan_def chandef; | ||
3071 | 3197 | ||
3072 | sband = local->hw.wiphy->bands[cbss->channel->band]; | 3198 | sband = local->hw.wiphy->bands[cbss->channel->band]; |
3073 | 3199 | ||
@@ -3099,49 +3225,64 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, | |||
3099 | ht_cfreq, ht_oper->primary_chan, | 3225 | ht_cfreq, ht_oper->primary_chan, |
3100 | cbss->channel->band); | 3226 | cbss->channel->band); |
3101 | ht_oper = NULL; | 3227 | ht_oper = NULL; |
3102 | } else { | ||
3103 | channel_type = NL80211_CHAN_HT20; | ||
3104 | } | 3228 | } |
3105 | } | 3229 | } |
3106 | 3230 | ||
3107 | if (ht_oper && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { | 3231 | if (ht_oper) { |
3108 | /* | 3232 | /* |
3109 | * cfg80211 already verified that the channel itself can | 3233 | * cfg80211 already verified that the channel itself can |
3110 | * be used, but it didn't check that we can do the right | 3234 | * be used, but it didn't check that we can do the right |
3111 | * HT type, so do that here as well. If HT40 isn't allowed | 3235 | * HT type, so do that here as well. If HT40 isn't allowed |
3112 | * on this channel, disable 40 MHz operation. | 3236 | * on this channel, disable 40 MHz operation. |
3113 | */ | 3237 | */ |
3238 | const u8 *ht_cap_ie; | ||
3239 | const struct ieee80211_ht_cap *ht_cap; | ||
3240 | u8 chains = 1; | ||
3114 | 3241 | ||
3115 | switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { | 3242 | channel_type = NL80211_CHAN_HT20; |
3116 | case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: | 3243 | |
3117 | if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40PLUS) | 3244 | if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { |
3118 | ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ; | 3245 | switch (ht_oper->ht_param & |
3119 | else | 3246 | IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { |
3120 | channel_type = NL80211_CHAN_HT40PLUS; | 3247 | case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: |
3121 | break; | 3248 | if (cbss->channel->flags & |
3122 | case IEEE80211_HT_PARAM_CHA_SEC_BELOW: | 3249 | IEEE80211_CHAN_NO_HT40PLUS) |
3123 | if (cbss->channel->flags & IEEE80211_CHAN_NO_HT40MINUS) | 3250 | ifmgd->flags |= |
3124 | ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ; | 3251 | IEEE80211_STA_DISABLE_40MHZ; |
3125 | else | 3252 | else |
3126 | channel_type = NL80211_CHAN_HT40MINUS; | 3253 | channel_type = NL80211_CHAN_HT40PLUS; |
3127 | break; | 3254 | break; |
3255 | case IEEE80211_HT_PARAM_CHA_SEC_BELOW: | ||
3256 | if (cbss->channel->flags & | ||
3257 | IEEE80211_CHAN_NO_HT40MINUS) | ||
3258 | ifmgd->flags |= | ||
3259 | IEEE80211_STA_DISABLE_40MHZ; | ||
3260 | else | ||
3261 | channel_type = NL80211_CHAN_HT40MINUS; | ||
3262 | break; | ||
3263 | } | ||
3128 | } | 3264 | } |
3129 | } | ||
3130 | 3265 | ||
3131 | if (!ieee80211_set_channel_type(local, sdata, channel_type)) { | 3266 | ht_cap_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, |
3132 | /* can only fail due to HT40+/- mismatch */ | 3267 | cbss->information_elements, |
3133 | channel_type = NL80211_CHAN_HT20; | 3268 | cbss->len_information_elements); |
3134 | sdata_info(sdata, | 3269 | if (ht_cap_ie && ht_cap_ie[1] >= sizeof(*ht_cap)) { |
3135 | "disabling 40 MHz due to multi-vif mismatch\n"); | 3270 | ht_cap = (void *)(ht_cap_ie + 2); |
3136 | ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ; | 3271 | chains = ieee80211_mcs_to_chains(&ht_cap->mcs); |
3137 | WARN_ON(!ieee80211_set_channel_type(local, sdata, | 3272 | } |
3138 | channel_type)); | 3273 | sdata->needed_rx_chains = min(chains, local->rx_chains); |
3274 | } else { | ||
3275 | sdata->needed_rx_chains = 1; | ||
3276 | sdata->u.mgd.flags |= IEEE80211_STA_DISABLE_HT; | ||
3139 | } | 3277 | } |
3140 | 3278 | ||
3141 | local->oper_channel = cbss->channel; | 3279 | /* will change later if needed */ |
3142 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); | 3280 | sdata->smps_mode = IEEE80211_SMPS_OFF; |
3143 | 3281 | ||
3144 | return 0; | 3282 | ieee80211_vif_release_channel(sdata); |
3283 | cfg80211_chandef_create(&chandef, cbss->channel, channel_type); | ||
3284 | return ieee80211_vif_use_channel(sdata, &chandef, | ||
3285 | IEEE80211_CHANCTX_SHARED); | ||
3145 | } | 3286 | } |
3146 | 3287 | ||
3147 | static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, | 3288 | static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, |
@@ -3211,7 +3352,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, | |||
3211 | sdata->vif.bss_conf.basic_rates = basic_rates; | 3352 | sdata->vif.bss_conf.basic_rates = basic_rates; |
3212 | 3353 | ||
3213 | /* cf. IEEE 802.11 9.2.12 */ | 3354 | /* cf. IEEE 802.11 9.2.12 */ |
3214 | if (local->oper_channel->band == IEEE80211_BAND_2GHZ && | 3355 | if (cbss->channel->band == IEEE80211_BAND_2GHZ && |
3215 | have_higher_than_11mbit) | 3356 | have_higher_than_11mbit) |
3216 | sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; | 3357 | sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; |
3217 | else | 3358 | else |
@@ -3273,19 +3414,33 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, | |||
3273 | case NL80211_AUTHTYPE_NETWORK_EAP: | 3414 | case NL80211_AUTHTYPE_NETWORK_EAP: |
3274 | auth_alg = WLAN_AUTH_LEAP; | 3415 | auth_alg = WLAN_AUTH_LEAP; |
3275 | break; | 3416 | break; |
3417 | case NL80211_AUTHTYPE_SAE: | ||
3418 | auth_alg = WLAN_AUTH_SAE; | ||
3419 | break; | ||
3276 | default: | 3420 | default: |
3277 | return -EOPNOTSUPP; | 3421 | return -EOPNOTSUPP; |
3278 | } | 3422 | } |
3279 | 3423 | ||
3280 | auth_data = kzalloc(sizeof(*auth_data) + req->ie_len, GFP_KERNEL); | 3424 | auth_data = kzalloc(sizeof(*auth_data) + req->sae_data_len + |
3425 | req->ie_len, GFP_KERNEL); | ||
3281 | if (!auth_data) | 3426 | if (!auth_data) |
3282 | return -ENOMEM; | 3427 | return -ENOMEM; |
3283 | 3428 | ||
3284 | auth_data->bss = req->bss; | 3429 | auth_data->bss = req->bss; |
3285 | 3430 | ||
3431 | if (req->sae_data_len >= 4) { | ||
3432 | __le16 *pos = (__le16 *) req->sae_data; | ||
3433 | auth_data->sae_trans = le16_to_cpu(pos[0]); | ||
3434 | auth_data->sae_status = le16_to_cpu(pos[1]); | ||
3435 | memcpy(auth_data->data, req->sae_data + 4, | ||
3436 | req->sae_data_len - 4); | ||
3437 | auth_data->data_len += req->sae_data_len - 4; | ||
3438 | } | ||
3439 | |||
3286 | if (req->ie && req->ie_len) { | 3440 | if (req->ie && req->ie_len) { |
3287 | memcpy(auth_data->ie, req->ie, req->ie_len); | 3441 | memcpy(&auth_data->data[auth_data->data_len], |
3288 | auth_data->ie_len = req->ie_len; | 3442 | req->ie, req->ie_len); |
3443 | auth_data->data_len += req->ie_len; | ||
3289 | } | 3444 | } |
3290 | 3445 | ||
3291 | if (req->key && req->key_len) { | 3446 | if (req->key && req->key_len) { |
@@ -3388,13 +3543,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
3388 | 3543 | ||
3389 | /* prepare assoc data */ | 3544 | /* prepare assoc data */ |
3390 | 3545 | ||
3391 | /* | ||
3392 | * keep only the 40 MHz disable bit set as it might have | ||
3393 | * been set during authentication already, all other bits | ||
3394 | * should be reset for a new connection | ||
3395 | */ | ||
3396 | ifmgd->flags &= IEEE80211_STA_DISABLE_40MHZ; | ||
3397 | |||
3398 | ifmgd->beacon_crc_valid = false; | 3546 | ifmgd->beacon_crc_valid = false; |
3399 | 3547 | ||
3400 | /* | 3548 | /* |
@@ -3408,7 +3556,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
3408 | if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || | 3556 | if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || |
3409 | req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || | 3557 | req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP || |
3410 | req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) { | 3558 | req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) { |
3411 | ifmgd->flags |= IEEE80211_STA_DISABLE_11N; | 3559 | ifmgd->flags |= IEEE80211_STA_DISABLE_HT; |
3412 | ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; | 3560 | ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; |
3413 | netdev_info(sdata->dev, | 3561 | netdev_info(sdata->dev, |
3414 | "disabling HT/VHT due to WEP/TKIP use\n"); | 3562 | "disabling HT/VHT due to WEP/TKIP use\n"); |
@@ -3416,7 +3564,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
3416 | } | 3564 | } |
3417 | 3565 | ||
3418 | if (req->flags & ASSOC_REQ_DISABLE_HT) { | 3566 | if (req->flags & ASSOC_REQ_DISABLE_HT) { |
3419 | ifmgd->flags |= IEEE80211_STA_DISABLE_11N; | 3567 | ifmgd->flags |= IEEE80211_STA_DISABLE_HT; |
3420 | ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; | 3568 | ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; |
3421 | } | 3569 | } |
3422 | 3570 | ||
@@ -3424,7 +3572,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
3424 | sband = local->hw.wiphy->bands[req->bss->channel->band]; | 3572 | sband = local->hw.wiphy->bands[req->bss->channel->band]; |
3425 | if (!sband->ht_cap.ht_supported || | 3573 | if (!sband->ht_cap.ht_supported || |
3426 | local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) { | 3574 | local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) { |
3427 | ifmgd->flags |= IEEE80211_STA_DISABLE_11N; | 3575 | ifmgd->flags |= IEEE80211_STA_DISABLE_HT; |
3428 | if (!bss->wmm_used) | 3576 | if (!bss->wmm_used) |
3429 | netdev_info(sdata->dev, | 3577 | netdev_info(sdata->dev, |
3430 | "disabling HT as WMM/QoS is not supported by the AP\n"); | 3578 | "disabling HT as WMM/QoS is not supported by the AP\n"); |
@@ -3452,11 +3600,11 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
3452 | 3600 | ||
3453 | if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) { | 3601 | if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) { |
3454 | if (ifmgd->powersave) | 3602 | if (ifmgd->powersave) |
3455 | ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC; | 3603 | sdata->smps_mode = IEEE80211_SMPS_DYNAMIC; |
3456 | else | 3604 | else |
3457 | ifmgd->ap_smps = IEEE80211_SMPS_OFF; | 3605 | sdata->smps_mode = IEEE80211_SMPS_OFF; |
3458 | } else | 3606 | } else |
3459 | ifmgd->ap_smps = ifmgd->req_smps; | 3607 | sdata->smps_mode = ifmgd->req_smps; |
3460 | 3608 | ||
3461 | assoc_data->capability = req->bss->capability; | 3609 | assoc_data->capability = req->bss->capability; |
3462 | assoc_data->wmm = bss->wmm_used && | 3610 | assoc_data->wmm = bss->wmm_used && |
@@ -3469,7 +3617,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
3469 | assoc_data->ap_ht_param = | 3617 | assoc_data->ap_ht_param = |
3470 | ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param; | 3618 | ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param; |
3471 | else | 3619 | else |
3472 | ifmgd->flags |= IEEE80211_STA_DISABLE_11N; | 3620 | ifmgd->flags |= IEEE80211_STA_DISABLE_HT; |
3473 | 3621 | ||
3474 | if (bss->wmm_used && bss->uapsd_supported && | 3622 | if (bss->wmm_used && bss->uapsd_supported && |
3475 | (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { | 3623 | (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { |
@@ -3560,40 +3708,44 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
3560 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 3708 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
3561 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; | 3709 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
3562 | bool tx = !req->local_state_change; | 3710 | bool tx = !req->local_state_change; |
3711 | bool sent_frame = false; | ||
3563 | 3712 | ||
3564 | mutex_lock(&ifmgd->mtx); | 3713 | mutex_lock(&ifmgd->mtx); |
3565 | 3714 | ||
3566 | if (ifmgd->auth_data) { | ||
3567 | ieee80211_destroy_auth_data(sdata, false); | ||
3568 | mutex_unlock(&ifmgd->mtx); | ||
3569 | return 0; | ||
3570 | } | ||
3571 | |||
3572 | sdata_info(sdata, | 3715 | sdata_info(sdata, |
3573 | "deauthenticating from %pM by local choice (reason=%d)\n", | 3716 | "deauthenticating from %pM by local choice (reason=%d)\n", |
3574 | req->bssid, req->reason_code); | 3717 | req->bssid, req->reason_code); |
3575 | 3718 | ||
3576 | if (ifmgd->associated && | 3719 | if (ifmgd->auth_data) { |
3577 | ether_addr_equal(ifmgd->associated->bssid, req->bssid)) { | ||
3578 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | ||
3579 | req->reason_code, tx, frame_buf); | ||
3580 | } else { | ||
3581 | drv_mgd_prepare_tx(sdata->local, sdata); | 3720 | drv_mgd_prepare_tx(sdata->local, sdata); |
3582 | ieee80211_send_deauth_disassoc(sdata, req->bssid, | 3721 | ieee80211_send_deauth_disassoc(sdata, req->bssid, |
3583 | IEEE80211_STYPE_DEAUTH, | 3722 | IEEE80211_STYPE_DEAUTH, |
3584 | req->reason_code, tx, | 3723 | req->reason_code, tx, |
3585 | frame_buf); | 3724 | frame_buf); |
3725 | ieee80211_destroy_auth_data(sdata, false); | ||
3726 | mutex_unlock(&ifmgd->mtx); | ||
3727 | |||
3728 | sent_frame = tx; | ||
3729 | goto out; | ||
3586 | } | 3730 | } |
3587 | 3731 | ||
3732 | if (ifmgd->associated && | ||
3733 | ether_addr_equal(ifmgd->associated->bssid, req->bssid)) { | ||
3734 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | ||
3735 | req->reason_code, tx, frame_buf); | ||
3736 | sent_frame = tx; | ||
3737 | } | ||
3588 | mutex_unlock(&ifmgd->mtx); | 3738 | mutex_unlock(&ifmgd->mtx); |
3589 | 3739 | ||
3590 | __cfg80211_send_deauth(sdata->dev, frame_buf, | 3740 | out: |
3591 | IEEE80211_DEAUTH_FRAME_LEN); | ||
3592 | |||
3593 | mutex_lock(&sdata->local->mtx); | 3741 | mutex_lock(&sdata->local->mtx); |
3594 | ieee80211_recalc_idle(sdata->local); | 3742 | ieee80211_recalc_idle(sdata->local); |
3595 | mutex_unlock(&sdata->local->mtx); | 3743 | mutex_unlock(&sdata->local->mtx); |
3596 | 3744 | ||
3745 | if (sent_frame) | ||
3746 | __cfg80211_send_deauth(sdata->dev, frame_buf, | ||
3747 | IEEE80211_DEAUTH_FRAME_LEN); | ||
3748 | |||
3597 | return 0; | 3749 | return 0; |
3598 | } | 3750 | } |
3599 | 3751 | ||
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 83608ac16780..5abddfe3e101 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
@@ -107,6 +107,9 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, | |||
107 | { | 107 | { |
108 | struct ieee80211_sub_if_data *sdata; | 108 | struct ieee80211_sub_if_data *sdata; |
109 | 109 | ||
110 | if (WARN_ON(local->use_chanctx)) | ||
111 | return; | ||
112 | |||
110 | /* | 113 | /* |
111 | * notify the AP about us leaving the channel and stop all | 114 | * notify the AP about us leaving the channel and stop all |
112 | * STA interfaces. | 115 | * STA interfaces. |
@@ -145,6 +148,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local, | |||
145 | { | 148 | { |
146 | struct ieee80211_sub_if_data *sdata; | 149 | struct ieee80211_sub_if_data *sdata; |
147 | 150 | ||
151 | if (WARN_ON(local->use_chanctx)) | ||
152 | return; | ||
153 | |||
148 | mutex_lock(&local->iflist_mtx); | 154 | mutex_lock(&local->iflist_mtx); |
149 | list_for_each_entry(sdata, &local->interfaces, list) { | 155 | list_for_each_entry(sdata, &local->interfaces, list) { |
150 | if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) | 156 | if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) |
@@ -193,13 +199,14 @@ void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc) | |||
193 | 199 | ||
194 | if (roc->mgmt_tx_cookie) { | 200 | if (roc->mgmt_tx_cookie) { |
195 | if (!WARN_ON(!roc->frame)) { | 201 | if (!WARN_ON(!roc->frame)) { |
196 | ieee80211_tx_skb(roc->sdata, roc->frame); | 202 | ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7, |
203 | roc->chan->band); | ||
197 | roc->frame = NULL; | 204 | roc->frame = NULL; |
198 | } | 205 | } |
199 | } else { | 206 | } else { |
200 | cfg80211_ready_on_channel(&roc->sdata->wdev, (unsigned long)roc, | 207 | cfg80211_ready_on_channel(&roc->sdata->wdev, roc->cookie, |
201 | roc->chan, roc->chan_type, | 208 | roc->chan, roc->req_duration, |
202 | roc->req_duration, GFP_KERNEL); | 209 | GFP_KERNEL); |
203 | } | 210 | } |
204 | 211 | ||
205 | roc->notified = true; | 212 | roc->notified = true; |
@@ -276,8 +283,7 @@ void ieee80211_start_next_roc(struct ieee80211_local *local) | |||
276 | if (!duration) | 283 | if (!duration) |
277 | duration = 10; | 284 | duration = 10; |
278 | 285 | ||
279 | ret = drv_remain_on_channel(local, roc->chan, | 286 | ret = drv_remain_on_channel(local, roc->sdata, roc->chan, |
280 | roc->chan_type, | ||
281 | duration); | 287 | duration); |
282 | 288 | ||
283 | roc->started = true; | 289 | roc->started = true; |
@@ -313,8 +319,7 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) | |||
313 | 319 | ||
314 | if (!roc->mgmt_tx_cookie) | 320 | if (!roc->mgmt_tx_cookie) |
315 | cfg80211_remain_on_channel_expired(&roc->sdata->wdev, | 321 | cfg80211_remain_on_channel_expired(&roc->sdata->wdev, |
316 | (unsigned long)roc, | 322 | roc->cookie, roc->chan, |
317 | roc->chan, roc->chan_type, | ||
318 | GFP_KERNEL); | 323 | GFP_KERNEL); |
319 | 324 | ||
320 | list_for_each_entry_safe(dep, tmp, &roc->dependents, list) | 325 | list_for_each_entry_safe(dep, tmp, &roc->dependents, list) |
@@ -353,7 +358,6 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
353 | ieee80211_recalc_idle(local); | 358 | ieee80211_recalc_idle(local); |
354 | 359 | ||
355 | local->tmp_channel = roc->chan; | 360 | local->tmp_channel = roc->chan; |
356 | local->tmp_channel_type = roc->chan_type; | ||
357 | ieee80211_hw_config(local, 0); | 361 | ieee80211_hw_config(local, 0); |
358 | 362 | ||
359 | /* tell userspace or send frame */ | 363 | /* tell userspace or send frame */ |
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 5c572e7a1a71..79a48f37d409 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c | |||
@@ -33,6 +33,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) | |||
33 | struct ieee80211_local *local = hw_to_local(hw); | 33 | struct ieee80211_local *local = hw_to_local(hw); |
34 | struct ieee80211_sub_if_data *sdata; | 34 | struct ieee80211_sub_if_data *sdata; |
35 | struct sta_info *sta; | 35 | struct sta_info *sta; |
36 | struct ieee80211_chanctx *ctx; | ||
36 | 37 | ||
37 | if (!local->open_count) | 38 | if (!local->open_count) |
38 | goto suspend; | 39 | goto suspend; |
@@ -135,12 +136,55 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) | |||
135 | ieee80211_bss_info_change_notify(sdata, | 136 | ieee80211_bss_info_change_notify(sdata, |
136 | BSS_CHANGED_BEACON_ENABLED); | 137 | BSS_CHANGED_BEACON_ENABLED); |
137 | 138 | ||
139 | if (sdata->vif.type == NL80211_IFTYPE_AP && | ||
140 | rcu_access_pointer(sdata->u.ap.beacon)) | ||
141 | drv_stop_ap(local, sdata); | ||
142 | |||
143 | if (local->use_chanctx) { | ||
144 | struct ieee80211_chanctx_conf *conf; | ||
145 | |||
146 | mutex_lock(&local->chanctx_mtx); | ||
147 | conf = rcu_dereference_protected( | ||
148 | sdata->vif.chanctx_conf, | ||
149 | lockdep_is_held(&local->chanctx_mtx)); | ||
150 | if (conf) { | ||
151 | ctx = container_of(conf, | ||
152 | struct ieee80211_chanctx, | ||
153 | conf); | ||
154 | drv_unassign_vif_chanctx(local, sdata, ctx); | ||
155 | } | ||
156 | |||
157 | mutex_unlock(&local->chanctx_mtx); | ||
158 | } | ||
138 | drv_remove_interface(local, sdata); | 159 | drv_remove_interface(local, sdata); |
139 | } | 160 | } |
140 | 161 | ||
141 | sdata = rtnl_dereference(local->monitor_sdata); | 162 | sdata = rtnl_dereference(local->monitor_sdata); |
142 | if (sdata) | 163 | if (sdata) { |
164 | if (local->use_chanctx) { | ||
165 | struct ieee80211_chanctx_conf *conf; | ||
166 | |||
167 | mutex_lock(&local->chanctx_mtx); | ||
168 | conf = rcu_dereference_protected( | ||
169 | sdata->vif.chanctx_conf, | ||
170 | lockdep_is_held(&local->chanctx_mtx)); | ||
171 | if (conf) { | ||
172 | ctx = container_of(conf, | ||
173 | struct ieee80211_chanctx, | ||
174 | conf); | ||
175 | drv_unassign_vif_chanctx(local, sdata, ctx); | ||
176 | } | ||
177 | |||
178 | mutex_unlock(&local->chanctx_mtx); | ||
179 | } | ||
180 | |||
143 | drv_remove_interface(local, sdata); | 181 | drv_remove_interface(local, sdata); |
182 | } | ||
183 | |||
184 | mutex_lock(&local->chanctx_mtx); | ||
185 | list_for_each_entry(ctx, &local->chanctx_list, list) | ||
186 | drv_remove_chanctx(local, ctx); | ||
187 | mutex_unlock(&local->chanctx_mtx); | ||
144 | 188 | ||
145 | /* stop hardware - this must stop RX */ | 189 | /* stop hardware - this must stop RX */ |
146 | if (local->open_count) | 190 | if (local->open_count) |
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 3313c117b322..dd88381c53b7 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c | |||
@@ -391,7 +391,7 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate, | |||
391 | return; | 391 | return; |
392 | 392 | ||
393 | /* if HT BSS, and we handle a data frame, also try HT rates */ | 393 | /* if HT BSS, and we handle a data frame, also try HT rates */ |
394 | if (txrc->bss_conf->channel_type == NL80211_CHAN_NO_HT) | 394 | if (txrc->bss_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT) |
395 | return; | 395 | return; |
396 | 396 | ||
397 | fc = hdr->frame_control; | 397 | fc = hdr->frame_control; |
@@ -408,8 +408,7 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate, | |||
408 | 408 | ||
409 | alt_rate.flags |= IEEE80211_TX_RC_MCS; | 409 | alt_rate.flags |= IEEE80211_TX_RC_MCS; |
410 | 410 | ||
411 | if ((txrc->bss_conf->channel_type == NL80211_CHAN_HT40MINUS) || | 411 | if (txrc->bss_conf->chandef.width == NL80211_CHAN_WIDTH_40) |
412 | (txrc->bss_conf->channel_type == NL80211_CHAN_HT40PLUS)) | ||
413 | alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; | 412 | alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; |
414 | 413 | ||
415 | if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) { | 414 | if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) { |
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 10de668eb9f6..301386dabf88 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h | |||
@@ -52,11 +52,21 @@ static inline void rate_control_rate_init(struct sta_info *sta) | |||
52 | struct ieee80211_sta *ista = &sta->sta; | 52 | struct ieee80211_sta *ista = &sta->sta; |
53 | void *priv_sta = sta->rate_ctrl_priv; | 53 | void *priv_sta = sta->rate_ctrl_priv; |
54 | struct ieee80211_supported_band *sband; | 54 | struct ieee80211_supported_band *sband; |
55 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
55 | 56 | ||
56 | if (!ref) | 57 | if (!ref) |
57 | return; | 58 | return; |
58 | 59 | ||
59 | sband = local->hw.wiphy->bands[local->oper_channel->band]; | 60 | rcu_read_lock(); |
61 | |||
62 | chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf); | ||
63 | if (WARN_ON(!chanctx_conf)) { | ||
64 | rcu_read_unlock(); | ||
65 | return; | ||
66 | } | ||
67 | |||
68 | sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band]; | ||
69 | rcu_read_unlock(); | ||
60 | 70 | ||
61 | ref->ops->rate_init(ref->priv, sband, ista, priv_sta); | 71 | ref->ops->rate_init(ref->priv, sband, ista, priv_sta); |
62 | set_sta_flag(sta, WLAN_STA_RATE_CONTROL); | 72 | set_sta_flag(sta, WLAN_STA_RATE_CONTROL); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 00ade7feb2e3..825f33cf7bbc 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -40,6 +40,8 @@ | |||
40 | static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, | 40 | static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, |
41 | struct sk_buff *skb) | 41 | struct sk_buff *skb) |
42 | { | 42 | { |
43 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | ||
44 | |||
43 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { | 45 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { |
44 | if (likely(skb->len > FCS_LEN)) | 46 | if (likely(skb->len > FCS_LEN)) |
45 | __pskb_trim(skb, skb->len - FCS_LEN); | 47 | __pskb_trim(skb, skb->len - FCS_LEN); |
@@ -51,20 +53,25 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, | |||
51 | } | 53 | } |
52 | } | 54 | } |
53 | 55 | ||
56 | if (status->vendor_radiotap_len) | ||
57 | __pskb_pull(skb, status->vendor_radiotap_len); | ||
58 | |||
54 | return skb; | 59 | return skb; |
55 | } | 60 | } |
56 | 61 | ||
57 | static inline int should_drop_frame(struct sk_buff *skb, | 62 | static inline int should_drop_frame(struct sk_buff *skb, int present_fcs_len) |
58 | int present_fcs_len) | ||
59 | { | 63 | { |
60 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 64 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
61 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 65 | struct ieee80211_hdr *hdr; |
66 | |||
67 | hdr = (void *)(skb->data + status->vendor_radiotap_len); | ||
62 | 68 | ||
63 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | | 69 | if (status->flag & (RX_FLAG_FAILED_FCS_CRC | |
64 | RX_FLAG_FAILED_PLCP_CRC | | 70 | RX_FLAG_FAILED_PLCP_CRC | |
65 | RX_FLAG_AMPDU_IS_ZEROLEN)) | 71 | RX_FLAG_AMPDU_IS_ZEROLEN)) |
66 | return 1; | 72 | return 1; |
67 | if (unlikely(skb->len < 16 + present_fcs_len)) | 73 | if (unlikely(skb->len < 16 + present_fcs_len + |
74 | status->vendor_radiotap_len)) | ||
68 | return 1; | 75 | return 1; |
69 | if (ieee80211_is_ctl(hdr->frame_control) && | 76 | if (ieee80211_is_ctl(hdr->frame_control) && |
70 | !ieee80211_is_pspoll(hdr->frame_control) && | 77 | !ieee80211_is_pspoll(hdr->frame_control) && |
@@ -74,32 +81,48 @@ static inline int should_drop_frame(struct sk_buff *skb, | |||
74 | } | 81 | } |
75 | 82 | ||
76 | static int | 83 | static int |
77 | ieee80211_rx_radiotap_len(struct ieee80211_local *local, | 84 | ieee80211_rx_radiotap_space(struct ieee80211_local *local, |
78 | struct ieee80211_rx_status *status) | 85 | struct ieee80211_rx_status *status) |
79 | { | 86 | { |
80 | int len; | 87 | int len; |
81 | 88 | ||
82 | /* always present fields */ | 89 | /* always present fields */ |
83 | len = sizeof(struct ieee80211_radiotap_header) + 9; | 90 | len = sizeof(struct ieee80211_radiotap_header) + 9; |
84 | 91 | ||
85 | if (status->flag & RX_FLAG_MACTIME_MPDU) | 92 | /* allocate extra bitmap */ |
93 | if (status->vendor_radiotap_len) | ||
94 | len += 4; | ||
95 | |||
96 | if (ieee80211_have_rx_timestamp(status)) { | ||
97 | len = ALIGN(len, 8); | ||
86 | len += 8; | 98 | len += 8; |
99 | } | ||
87 | if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) | 100 | if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) |
88 | len += 1; | 101 | len += 1; |
89 | 102 | ||
90 | if (len & 1) /* padding for RX_FLAGS if necessary */ | 103 | /* padding for RX_FLAGS if necessary */ |
91 | len++; | 104 | len = ALIGN(len, 2); |
92 | 105 | ||
93 | if (status->flag & RX_FLAG_HT) /* HT info */ | 106 | if (status->flag & RX_FLAG_HT) /* HT info */ |
94 | len += 3; | 107 | len += 3; |
95 | 108 | ||
96 | if (status->flag & RX_FLAG_AMPDU_DETAILS) { | 109 | if (status->flag & RX_FLAG_AMPDU_DETAILS) { |
97 | /* padding */ | 110 | len = ALIGN(len, 4); |
98 | while (len & 3) | ||
99 | len++; | ||
100 | len += 8; | 111 | len += 8; |
101 | } | 112 | } |
102 | 113 | ||
114 | if (status->vendor_radiotap_len) { | ||
115 | if (WARN_ON_ONCE(status->vendor_radiotap_align == 0)) | ||
116 | status->vendor_radiotap_align = 1; | ||
117 | /* align standard part of vendor namespace */ | ||
118 | len = ALIGN(len, 2); | ||
119 | /* allocate standard part of vendor namespace */ | ||
120 | len += 6; | ||
121 | /* align vendor-defined part */ | ||
122 | len = ALIGN(len, status->vendor_radiotap_align); | ||
123 | /* vendor-defined part is already in skb */ | ||
124 | } | ||
125 | |||
103 | return len; | 126 | return len; |
104 | } | 127 | } |
105 | 128 | ||
@@ -118,6 +141,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
118 | struct ieee80211_radiotap_header *rthdr; | 141 | struct ieee80211_radiotap_header *rthdr; |
119 | unsigned char *pos; | 142 | unsigned char *pos; |
120 | u16 rx_flags = 0; | 143 | u16 rx_flags = 0; |
144 | int mpdulen; | ||
145 | |||
146 | mpdulen = skb->len; | ||
147 | if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))) | ||
148 | mpdulen += FCS_LEN; | ||
121 | 149 | ||
122 | rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); | 150 | rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); |
123 | memset(rthdr, 0, rtap_len); | 151 | memset(rthdr, 0, rtap_len); |
@@ -128,17 +156,30 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
128 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | 156 | (1 << IEEE80211_RADIOTAP_CHANNEL) | |
129 | (1 << IEEE80211_RADIOTAP_ANTENNA) | | 157 | (1 << IEEE80211_RADIOTAP_ANTENNA) | |
130 | (1 << IEEE80211_RADIOTAP_RX_FLAGS)); | 158 | (1 << IEEE80211_RADIOTAP_RX_FLAGS)); |
131 | rthdr->it_len = cpu_to_le16(rtap_len); | 159 | rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len); |
160 | |||
161 | pos = (unsigned char *)(rthdr + 1); | ||
132 | 162 | ||
133 | pos = (unsigned char *)(rthdr+1); | 163 | if (status->vendor_radiotap_len) { |
164 | rthdr->it_present |= | ||
165 | cpu_to_le32(BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE)) | | ||
166 | cpu_to_le32(BIT(IEEE80211_RADIOTAP_EXT)); | ||
167 | put_unaligned_le32(status->vendor_radiotap_bitmap, pos); | ||
168 | pos += 4; | ||
169 | } | ||
134 | 170 | ||
135 | /* the order of the following fields is important */ | 171 | /* the order of the following fields is important */ |
136 | 172 | ||
137 | /* IEEE80211_RADIOTAP_TSFT */ | 173 | /* IEEE80211_RADIOTAP_TSFT */ |
138 | if (status->flag & RX_FLAG_MACTIME_MPDU) { | 174 | if (ieee80211_have_rx_timestamp(status)) { |
139 | put_unaligned_le64(status->mactime, pos); | 175 | /* padding */ |
140 | rthdr->it_present |= | 176 | while ((pos - (u8 *)rthdr) & 7) |
141 | cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); | 177 | *pos++ = 0; |
178 | put_unaligned_le64( | ||
179 | ieee80211_calculate_rx_timestamp(local, status, | ||
180 | mpdulen, 0), | ||
181 | pos); | ||
182 | rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); | ||
142 | pos += 8; | 183 | pos += 8; |
143 | } | 184 | } |
144 | 185 | ||
@@ -152,7 +193,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
152 | pos++; | 193 | pos++; |
153 | 194 | ||
154 | /* IEEE80211_RADIOTAP_RATE */ | 195 | /* IEEE80211_RADIOTAP_RATE */ |
155 | if (!rate || status->flag & RX_FLAG_HT) { | 196 | if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) { |
156 | /* | 197 | /* |
157 | * Without rate information don't add it. If we have, | 198 | * Without rate information don't add it. If we have, |
158 | * MCS information is a separate field in radiotap, | 199 | * MCS information is a separate field in radiotap, |
@@ -172,7 +213,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
172 | if (status->band == IEEE80211_BAND_5GHZ) | 213 | if (status->band == IEEE80211_BAND_5GHZ) |
173 | put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, | 214 | put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, |
174 | pos); | 215 | pos); |
175 | else if (status->flag & RX_FLAG_HT) | 216 | else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) |
176 | put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, | 217 | put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, |
177 | pos); | 218 | pos); |
178 | else if (rate && rate->flags & IEEE80211_RATE_ERP_G) | 219 | else if (rate && rate->flags & IEEE80211_RATE_ERP_G) |
@@ -205,7 +246,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
205 | /* IEEE80211_RADIOTAP_RX_FLAGS */ | 246 | /* IEEE80211_RADIOTAP_RX_FLAGS */ |
206 | /* ensure 2 byte alignment for the 2 byte field as required */ | 247 | /* ensure 2 byte alignment for the 2 byte field as required */ |
207 | if ((pos - (u8 *)rthdr) & 1) | 248 | if ((pos - (u8 *)rthdr) & 1) |
208 | pos++; | 249 | *pos++ = 0; |
209 | if (status->flag & RX_FLAG_FAILED_PLCP_CRC) | 250 | if (status->flag & RX_FLAG_FAILED_PLCP_CRC) |
210 | rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; | 251 | rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; |
211 | put_unaligned_le16(rx_flags, pos); | 252 | put_unaligned_le16(rx_flags, pos); |
@@ -255,6 +296,21 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
255 | *pos++ = 0; | 296 | *pos++ = 0; |
256 | *pos++ = 0; | 297 | *pos++ = 0; |
257 | } | 298 | } |
299 | |||
300 | if (status->vendor_radiotap_len) { | ||
301 | /* ensure 2 byte alignment for the vendor field as required */ | ||
302 | if ((pos - (u8 *)rthdr) & 1) | ||
303 | *pos++ = 0; | ||
304 | *pos++ = status->vendor_radiotap_oui[0]; | ||
305 | *pos++ = status->vendor_radiotap_oui[1]; | ||
306 | *pos++ = status->vendor_radiotap_oui[2]; | ||
307 | *pos++ = status->vendor_radiotap_subns; | ||
308 | put_unaligned_le16(status->vendor_radiotap_len, pos); | ||
309 | pos += 2; | ||
310 | /* align the actual payload as requested */ | ||
311 | while ((pos - (u8 *)rthdr) & (status->vendor_radiotap_align - 1)) | ||
312 | *pos++ = 0; | ||
313 | } | ||
258 | } | 314 | } |
259 | 315 | ||
260 | /* | 316 | /* |
@@ -283,13 +339,13 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
283 | */ | 339 | */ |
284 | 340 | ||
285 | /* room for the radiotap header based on driver features */ | 341 | /* room for the radiotap header based on driver features */ |
286 | needed_headroom = ieee80211_rx_radiotap_len(local, status); | 342 | needed_headroom = ieee80211_rx_radiotap_space(local, status); |
287 | 343 | ||
288 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) | 344 | if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) |
289 | present_fcs_len = FCS_LEN; | 345 | present_fcs_len = FCS_LEN; |
290 | 346 | ||
291 | /* make sure hdr->frame_control is on the linear part */ | 347 | /* ensure hdr->frame_control and vendor radiotap data are in skb head */ |
292 | if (!pskb_may_pull(origskb, 2)) { | 348 | if (!pskb_may_pull(origskb, 2 + status->vendor_radiotap_len)) { |
293 | dev_kfree_skb(origskb); | 349 | dev_kfree_skb(origskb); |
294 | return NULL; | 350 | return NULL; |
295 | } | 351 | } |
@@ -374,7 +430,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
374 | return origskb; | 430 | return origskb; |
375 | } | 431 | } |
376 | 432 | ||
377 | |||
378 | static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) | 433 | static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) |
379 | { | 434 | { |
380 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 435 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
@@ -403,10 +458,10 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) | |||
403 | * | 458 | * |
404 | * We also use that counter for non-QoS STAs. | 459 | * We also use that counter for non-QoS STAs. |
405 | */ | 460 | */ |
406 | seqno_idx = NUM_RX_DATA_QUEUES; | 461 | seqno_idx = IEEE80211_NUM_TIDS; |
407 | security_idx = 0; | 462 | security_idx = 0; |
408 | if (ieee80211_is_mgmt(hdr->frame_control)) | 463 | if (ieee80211_is_mgmt(hdr->frame_control)) |
409 | security_idx = NUM_RX_DATA_QUEUES; | 464 | security_idx = IEEE80211_NUM_TIDS; |
410 | tid = 0; | 465 | tid = 0; |
411 | } | 466 | } |
412 | 467 | ||
@@ -481,8 +536,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) | |||
481 | struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; | 536 | struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; |
482 | struct ieee80211_mmie *mmie; | 537 | struct ieee80211_mmie *mmie; |
483 | 538 | ||
484 | if (skb->len < 24 + sizeof(*mmie) || | 539 | if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) |
485 | !is_multicast_ether_addr(hdr->da)) | ||
486 | return -1; | 540 | return -1; |
487 | 541 | ||
488 | if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) | 542 | if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) |
@@ -497,9 +551,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) | |||
497 | return le16_to_cpu(mmie->key_id); | 551 | return le16_to_cpu(mmie->key_id); |
498 | } | 552 | } |
499 | 553 | ||
500 | 554 | static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |
501 | static ieee80211_rx_result | ||
502 | ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | ||
503 | { | 555 | { |
504 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 556 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
505 | char *dev_addr = rx->sdata->vif.addr; | 557 | char *dev_addr = rx->sdata->vif.addr; |
@@ -507,7 +559,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
507 | if (ieee80211_is_data(hdr->frame_control)) { | 559 | if (ieee80211_is_data(hdr->frame_control)) { |
508 | if (is_multicast_ether_addr(hdr->addr1)) { | 560 | if (is_multicast_ether_addr(hdr->addr1)) { |
509 | if (ieee80211_has_tods(hdr->frame_control) || | 561 | if (ieee80211_has_tods(hdr->frame_control) || |
510 | !ieee80211_has_fromds(hdr->frame_control)) | 562 | !ieee80211_has_fromds(hdr->frame_control)) |
511 | return RX_DROP_MONITOR; | 563 | return RX_DROP_MONITOR; |
512 | if (ether_addr_equal(hdr->addr3, dev_addr)) | 564 | if (ether_addr_equal(hdr->addr3, dev_addr)) |
513 | return RX_DROP_MONITOR; | 565 | return RX_DROP_MONITOR; |
@@ -539,7 +591,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
539 | mgmt = (struct ieee80211_mgmt *)hdr; | 591 | mgmt = (struct ieee80211_mgmt *)hdr; |
540 | category = mgmt->u.action.category; | 592 | category = mgmt->u.action.category; |
541 | if (category != WLAN_CATEGORY_MESH_ACTION && | 593 | if (category != WLAN_CATEGORY_MESH_ACTION && |
542 | category != WLAN_CATEGORY_SELF_PROTECTED) | 594 | category != WLAN_CATEGORY_SELF_PROTECTED) |
543 | return RX_DROP_MONITOR; | 595 | return RX_DROP_MONITOR; |
544 | return RX_CONTINUE; | 596 | return RX_CONTINUE; |
545 | } | 597 | } |
@@ -551,7 +603,6 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
551 | return RX_CONTINUE; | 603 | return RX_CONTINUE; |
552 | 604 | ||
553 | return RX_DROP_MONITOR; | 605 | return RX_DROP_MONITOR; |
554 | |||
555 | } | 606 | } |
556 | 607 | ||
557 | return RX_CONTINUE; | 608 | return RX_CONTINUE; |
@@ -575,7 +626,6 @@ static inline u16 seq_sub(u16 sq1, u16 sq2) | |||
575 | return (sq1 - sq2) & SEQ_MASK; | 626 | return (sq1 - sq2) & SEQ_MASK; |
576 | } | 627 | } |
577 | 628 | ||
578 | |||
579 | static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, | 629 | static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, |
580 | struct tid_ampdu_rx *tid_agg_rx, | 630 | struct tid_ampdu_rx *tid_agg_rx, |
581 | int index) | 631 | int index) |
@@ -1148,12 +1198,19 @@ ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) | |||
1148 | return RX_CONTINUE; | 1198 | return RX_CONTINUE; |
1149 | } | 1199 | } |
1150 | 1200 | ||
1151 | static void ap_sta_ps_start(struct sta_info *sta) | 1201 | static void sta_ps_start(struct sta_info *sta) |
1152 | { | 1202 | { |
1153 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 1203 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
1154 | struct ieee80211_local *local = sdata->local; | 1204 | struct ieee80211_local *local = sdata->local; |
1205 | struct ps_data *ps; | ||
1155 | 1206 | ||
1156 | atomic_inc(&sdata->bss->num_sta_ps); | 1207 | if (sta->sdata->vif.type == NL80211_IFTYPE_AP || |
1208 | sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
1209 | ps = &sdata->bss->ps; | ||
1210 | else | ||
1211 | return; | ||
1212 | |||
1213 | atomic_inc(&ps->num_sta_ps); | ||
1157 | set_sta_flag(sta, WLAN_STA_PS_STA); | 1214 | set_sta_flag(sta, WLAN_STA_PS_STA); |
1158 | if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) | 1215 | if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) |
1159 | drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); | 1216 | drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); |
@@ -1161,7 +1218,7 @@ static void ap_sta_ps_start(struct sta_info *sta) | |||
1161 | sta->sta.addr, sta->sta.aid); | 1218 | sta->sta.addr, sta->sta.aid); |
1162 | } | 1219 | } |
1163 | 1220 | ||
1164 | static void ap_sta_ps_end(struct sta_info *sta) | 1221 | static void sta_ps_end(struct sta_info *sta) |
1165 | { | 1222 | { |
1166 | ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", | 1223 | ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", |
1167 | sta->sta.addr, sta->sta.aid); | 1224 | sta->sta.addr, sta->sta.aid); |
@@ -1188,9 +1245,9 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start) | |||
1188 | return -EINVAL; | 1245 | return -EINVAL; |
1189 | 1246 | ||
1190 | if (start) | 1247 | if (start) |
1191 | ap_sta_ps_start(sta_inf); | 1248 | sta_ps_start(sta_inf); |
1192 | else | 1249 | else |
1193 | ap_sta_ps_end(sta_inf); | 1250 | sta_ps_end(sta_inf); |
1194 | 1251 | ||
1195 | return 0; | 1252 | return 0; |
1196 | } | 1253 | } |
@@ -1284,17 +1341,22 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1284 | 1341 | ||
1285 | /* | 1342 | /* |
1286 | * Update last_rx only for IBSS packets which are for the current | 1343 | * Update last_rx only for IBSS packets which are for the current |
1287 | * BSSID to avoid keeping the current IBSS network alive in cases | 1344 | * BSSID and for station already AUTHORIZED to avoid keeping the |
1288 | * where other STAs start using different BSSID. | 1345 | * current IBSS network alive in cases where other STAs start |
1346 | * using different BSSID. This will also give the station another | ||
1347 | * chance to restart the authentication/authorization in case | ||
1348 | * something went wrong the first time. | ||
1289 | */ | 1349 | */ |
1290 | if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { | 1350 | if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { |
1291 | u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, | 1351 | u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, |
1292 | NL80211_IFTYPE_ADHOC); | 1352 | NL80211_IFTYPE_ADHOC); |
1293 | if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) { | 1353 | if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && |
1354 | test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { | ||
1294 | sta->last_rx = jiffies; | 1355 | sta->last_rx = jiffies; |
1295 | if (ieee80211_is_data(hdr->frame_control)) { | 1356 | if (ieee80211_is_data(hdr->frame_control)) { |
1296 | sta->last_rx_rate_idx = status->rate_idx; | 1357 | sta->last_rx_rate_idx = status->rate_idx; |
1297 | sta->last_rx_rate_flag = status->flag; | 1358 | sta->last_rx_rate_flag = status->flag; |
1359 | sta->last_rx_rate_vht_nss = status->vht_nss; | ||
1298 | } | 1360 | } |
1299 | } | 1361 | } |
1300 | } else if (!is_multicast_ether_addr(hdr->addr1)) { | 1362 | } else if (!is_multicast_ether_addr(hdr->addr1)) { |
@@ -1306,6 +1368,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1306 | if (ieee80211_is_data(hdr->frame_control)) { | 1368 | if (ieee80211_is_data(hdr->frame_control)) { |
1307 | sta->last_rx_rate_idx = status->rate_idx; | 1369 | sta->last_rx_rate_idx = status->rate_idx; |
1308 | sta->last_rx_rate_flag = status->flag; | 1370 | sta->last_rx_rate_flag = status->flag; |
1371 | sta->last_rx_rate_vht_nss = status->vht_nss; | ||
1309 | } | 1372 | } |
1310 | } | 1373 | } |
1311 | 1374 | ||
@@ -1342,10 +1405,10 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1342 | */ | 1405 | */ |
1343 | if (ieee80211_is_data(hdr->frame_control) && | 1406 | if (ieee80211_is_data(hdr->frame_control) && |
1344 | !ieee80211_has_pm(hdr->frame_control)) | 1407 | !ieee80211_has_pm(hdr->frame_control)) |
1345 | ap_sta_ps_end(sta); | 1408 | sta_ps_end(sta); |
1346 | } else { | 1409 | } else { |
1347 | if (ieee80211_has_pm(hdr->frame_control)) | 1410 | if (ieee80211_has_pm(hdr->frame_control)) |
1348 | ap_sta_ps_start(sta); | 1411 | sta_ps_start(sta); |
1349 | } | 1412 | } |
1350 | } | 1413 | } |
1351 | 1414 | ||
@@ -1391,9 +1454,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, | |||
1391 | struct sk_buff **skb) | 1454 | struct sk_buff **skb) |
1392 | { | 1455 | { |
1393 | struct ieee80211_fragment_entry *entry; | 1456 | struct ieee80211_fragment_entry *entry; |
1394 | int idx; | ||
1395 | 1457 | ||
1396 | idx = sdata->fragment_next; | ||
1397 | entry = &sdata->fragments[sdata->fragment_next++]; | 1458 | entry = &sdata->fragments[sdata->fragment_next++]; |
1398 | if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) | 1459 | if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) |
1399 | sdata->fragment_next = 0; | 1460 | sdata->fragment_next = 0; |
@@ -1580,18 +1641,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1580 | return RX_CONTINUE; | 1641 | return RX_CONTINUE; |
1581 | } | 1642 | } |
1582 | 1643 | ||
1583 | static int | 1644 | static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) |
1584 | ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) | ||
1585 | { | 1645 | { |
1586 | if (unlikely(!rx->sta || | 1646 | if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) |
1587 | !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) | ||
1588 | return -EACCES; | 1647 | return -EACCES; |
1589 | 1648 | ||
1590 | return 0; | 1649 | return 0; |
1591 | } | 1650 | } |
1592 | 1651 | ||
1593 | static int | 1652 | static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) |
1594 | ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) | ||
1595 | { | 1653 | { |
1596 | struct sk_buff *skb = rx->skb; | 1654 | struct sk_buff *skb = rx->skb; |
1597 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 1655 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
@@ -1613,8 +1671,7 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) | |||
1613 | return 0; | 1671 | return 0; |
1614 | } | 1672 | } |
1615 | 1673 | ||
1616 | static int | 1674 | static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) |
1617 | ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) | ||
1618 | { | 1675 | { |
1619 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 1676 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
1620 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | 1677 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); |
@@ -1998,7 +2055,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1998 | } else { | 2055 | } else { |
1999 | /* unable to resolve next hop */ | 2056 | /* unable to resolve next hop */ |
2000 | mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3, | 2057 | mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3, |
2001 | 0, reason, fwd_hdr->addr2, sdata); | 2058 | 0, reason, fwd_hdr->addr2, sdata); |
2002 | IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); | 2059 | IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); |
2003 | kfree_skb(fwd_skb); | 2060 | kfree_skb(fwd_skb); |
2004 | return RX_DROP_MONITOR; | 2061 | return RX_DROP_MONITOR; |
@@ -2207,7 +2264,7 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) | |||
2207 | 2264 | ||
2208 | cfg80211_report_obss_beacon(rx->local->hw.wiphy, | 2265 | cfg80211_report_obss_beacon(rx->local->hw.wiphy, |
2209 | rx->skb->data, rx->skb->len, | 2266 | rx->skb->data, rx->skb->len, |
2210 | status->freq, sig, GFP_ATOMIC); | 2267 | status->freq, sig); |
2211 | rx->flags |= IEEE80211_RX_BEACON_REPORTED; | 2268 | rx->flags |= IEEE80211_RX_BEACON_REPORTED; |
2212 | } | 2269 | } |
2213 | 2270 | ||
@@ -2407,7 +2464,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2407 | if (!ieee80211_vif_is_mesh(&sdata->vif)) | 2464 | if (!ieee80211_vif_is_mesh(&sdata->vif)) |
2408 | break; | 2465 | break; |
2409 | if (mesh_action_is_path_sel(mgmt) && | 2466 | if (mesh_action_is_path_sel(mgmt) && |
2410 | (!mesh_path_sel_is_hwmp(sdata))) | 2467 | !mesh_path_sel_is_hwmp(sdata)) |
2411 | break; | 2468 | break; |
2412 | goto queue; | 2469 | goto queue; |
2413 | } | 2470 | } |
@@ -2463,7 +2520,6 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) | |||
2463 | return RX_QUEUED; | 2520 | return RX_QUEUED; |
2464 | } | 2521 | } |
2465 | 2522 | ||
2466 | |||
2467 | return RX_CONTINUE; | 2523 | return RX_CONTINUE; |
2468 | } | 2524 | } |
2469 | 2525 | ||
@@ -2593,7 +2649,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, | |||
2593 | goto out_free_skb; | 2649 | goto out_free_skb; |
2594 | 2650 | ||
2595 | /* room for the radiotap header based on driver features */ | 2651 | /* room for the radiotap header based on driver features */ |
2596 | needed_headroom = ieee80211_rx_radiotap_len(local, status); | 2652 | needed_headroom = ieee80211_rx_radiotap_space(local, status); |
2597 | 2653 | ||
2598 | if (skb_headroom(skb) < needed_headroom && | 2654 | if (skb_headroom(skb) < needed_headroom && |
2599 | pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) | 2655 | pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) |
@@ -2656,7 +2712,8 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, | |||
2656 | status = IEEE80211_SKB_RXCB((rx->skb)); | 2712 | status = IEEE80211_SKB_RXCB((rx->skb)); |
2657 | 2713 | ||
2658 | sband = rx->local->hw.wiphy->bands[status->band]; | 2714 | sband = rx->local->hw.wiphy->bands[status->band]; |
2659 | if (!(status->flag & RX_FLAG_HT)) | 2715 | if (!(status->flag & RX_FLAG_HT) && |
2716 | !(status->flag & RX_FLAG_VHT)) | ||
2660 | rate = &sband->bitrates[status->rate_idx]; | 2717 | rate = &sband->bitrates[status->rate_idx]; |
2661 | 2718 | ||
2662 | ieee80211_rx_cooked_monitor(rx, rate); | 2719 | ieee80211_rx_cooked_monitor(rx, rate); |
@@ -2823,8 +2880,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, | |||
2823 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; | 2880 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; |
2824 | } else if (!rx->sta) { | 2881 | } else if (!rx->sta) { |
2825 | int rate_idx; | 2882 | int rate_idx; |
2826 | if (status->flag & RX_FLAG_HT) | 2883 | if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) |
2827 | rate_idx = 0; /* TODO: HT rates */ | 2884 | rate_idx = 0; /* TODO: HT/VHT rates */ |
2828 | else | 2885 | else |
2829 | rate_idx = status->rate_idx; | 2886 | rate_idx = status->rate_idx; |
2830 | ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, | 2887 | ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, |
@@ -3048,8 +3105,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
3048 | 3105 | ||
3049 | WARN_ON_ONCE(softirq_count() == 0); | 3106 | WARN_ON_ONCE(softirq_count() == 0); |
3050 | 3107 | ||
3051 | if (WARN_ON(status->band < 0 || | 3108 | if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) |
3052 | status->band >= IEEE80211_NUM_BANDS)) | ||
3053 | goto drop; | 3109 | goto drop; |
3054 | 3110 | ||
3055 | sband = local->hw.wiphy->bands[status->band]; | 3111 | sband = local->hw.wiphy->bands[status->band]; |
@@ -3094,17 +3150,22 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
3094 | * hardware error. The driver should catch hardware | 3150 | * hardware error. The driver should catch hardware |
3095 | * errors. | 3151 | * errors. |
3096 | */ | 3152 | */ |
3097 | if (WARN((status->rate_idx < 0 || | 3153 | if (WARN(status->rate_idx > 76, |
3098 | status->rate_idx > 76), | ||
3099 | "Rate marked as an HT rate but passed " | 3154 | "Rate marked as an HT rate but passed " |
3100 | "status->rate_idx is not " | 3155 | "status->rate_idx is not " |
3101 | "an MCS index [0-76]: %d (0x%02x)\n", | 3156 | "an MCS index [0-76]: %d (0x%02x)\n", |
3102 | status->rate_idx, | 3157 | status->rate_idx, |
3103 | status->rate_idx)) | 3158 | status->rate_idx)) |
3104 | goto drop; | 3159 | goto drop; |
3160 | } else if (status->flag & RX_FLAG_VHT) { | ||
3161 | if (WARN_ONCE(status->rate_idx > 9 || | ||
3162 | !status->vht_nss || | ||
3163 | status->vht_nss > 8, | ||
3164 | "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", | ||
3165 | status->rate_idx, status->vht_nss)) | ||
3166 | goto drop; | ||
3105 | } else { | 3167 | } else { |
3106 | if (WARN_ON(status->rate_idx < 0 || | 3168 | if (WARN_ON(status->rate_idx >= sband->n_bitrates)) |
3107 | status->rate_idx >= sband->n_bitrates)) | ||
3108 | goto drop; | 3169 | goto drop; |
3109 | rate = &sband->bitrates[status->rate_idx]; | 3170 | rate = &sband->bitrates[status->rate_idx]; |
3110 | } | 3171 | } |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 43e60b5a7546..f3340279aba3 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -174,7 +174,6 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) | |||
174 | u8 *elements; | 174 | u8 *elements; |
175 | struct ieee80211_channel *channel; | 175 | struct ieee80211_channel *channel; |
176 | size_t baselen; | 176 | size_t baselen; |
177 | int freq; | ||
178 | bool beacon; | 177 | bool beacon; |
179 | struct ieee802_11_elems elems; | 178 | struct ieee802_11_elems elems; |
180 | 179 | ||
@@ -209,13 +208,7 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) | |||
209 | 208 | ||
210 | ieee802_11_parse_elems(elements, skb->len - baselen, &elems); | 209 | ieee802_11_parse_elems(elements, skb->len - baselen, &elems); |
211 | 210 | ||
212 | if (elems.ds_params && elems.ds_params_len == 1) | 211 | channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq); |
213 | freq = ieee80211_channel_to_frequency(elems.ds_params[0], | ||
214 | rx_status->band); | ||
215 | else | ||
216 | freq = rx_status->freq; | ||
217 | |||
218 | channel = ieee80211_get_channel(local->hw.wiphy, freq); | ||
219 | 212 | ||
220 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) | 213 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) |
221 | return; | 214 | return; |
@@ -336,6 +329,10 @@ EXPORT_SYMBOL(ieee80211_scan_completed); | |||
336 | 329 | ||
337 | static int ieee80211_start_sw_scan(struct ieee80211_local *local) | 330 | static int ieee80211_start_sw_scan(struct ieee80211_local *local) |
338 | { | 331 | { |
332 | /* Software scan is not supported in multi-channel cases */ | ||
333 | if (local->use_chanctx) | ||
334 | return -EOPNOTSUPP; | ||
335 | |||
339 | /* | 336 | /* |
340 | * Hardware/driver doesn't support hw_scan, so use software | 337 | * Hardware/driver doesn't support hw_scan, so use software |
341 | * scanning instead. First send a nullfunc frame with power save | 338 | * scanning instead. First send a nullfunc frame with power save |
@@ -417,7 +414,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, | |||
417 | local->scan_req->ie, local->scan_req->ie_len, | 414 | local->scan_req->ie, local->scan_req->ie_len, |
418 | local->scan_req->rates[band], false, | 415 | local->scan_req->rates[band], false, |
419 | local->scan_req->no_cck, | 416 | local->scan_req->no_cck, |
420 | local->hw.conf.channel); | 417 | local->hw.conf.channel, true); |
421 | 418 | ||
422 | /* | 419 | /* |
423 | * After sending probe requests, wait for probe responses | 420 | * After sending probe requests, wait for probe responses |
@@ -462,6 +459,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
462 | sizeof(*local->hw_scan_req) + | 459 | sizeof(*local->hw_scan_req) + |
463 | req->n_channels * sizeof(req->channels[0]); | 460 | req->n_channels * sizeof(req->channels[0]); |
464 | local->hw_scan_req->ie = ies; | 461 | local->hw_scan_req->ie = ies; |
462 | local->hw_scan_req->flags = req->flags; | ||
465 | 463 | ||
466 | local->hw_scan_band = 0; | 464 | local->hw_scan_band = 0; |
467 | 465 | ||
@@ -480,7 +478,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
480 | if (local->ops->hw_scan) { | 478 | if (local->ops->hw_scan) { |
481 | __set_bit(SCAN_HW_SCANNING, &local->scanning); | 479 | __set_bit(SCAN_HW_SCANNING, &local->scanning); |
482 | } else if ((req->n_channels == 1) && | 480 | } else if ((req->n_channels == 1) && |
483 | (req->channels[0] == local->oper_channel)) { | 481 | (req->channels[0] == local->_oper_channel)) { |
484 | /* | 482 | /* |
485 | * If we are scanning only on the operating channel | 483 | * If we are scanning only on the operating channel |
486 | * then we do not need to stop normal activities | 484 | * then we do not need to stop normal activities |
@@ -562,6 +560,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
562 | unsigned long min_beacon_int = 0; | 560 | unsigned long min_beacon_int = 0; |
563 | struct ieee80211_sub_if_data *sdata; | 561 | struct ieee80211_sub_if_data *sdata; |
564 | struct ieee80211_channel *next_chan; | 562 | struct ieee80211_channel *next_chan; |
563 | enum mac80211_scan_state next_scan_state; | ||
565 | 564 | ||
566 | /* | 565 | /* |
567 | * check if at least one STA interface is associated, | 566 | * check if at least one STA interface is associated, |
@@ -620,10 +619,18 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, | |||
620 | usecs_to_jiffies(min_beacon_int * 1024) * | 619 | usecs_to_jiffies(min_beacon_int * 1024) * |
621 | local->hw.conf.listen_interval); | 620 | local->hw.conf.listen_interval); |
622 | 621 | ||
623 | if (associated && (!tx_empty || bad_latency || listen_int_exceeded)) | 622 | if (associated && !tx_empty) { |
624 | local->next_scan_state = SCAN_SUSPEND; | 623 | if (local->scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) |
625 | else | 624 | next_scan_state = SCAN_ABORT; |
626 | local->next_scan_state = SCAN_SET_CHANNEL; | 625 | else |
626 | next_scan_state = SCAN_SUSPEND; | ||
627 | } else if (associated && (bad_latency || listen_int_exceeded)) { | ||
628 | next_scan_state = SCAN_SUSPEND; | ||
629 | } else { | ||
630 | next_scan_state = SCAN_SET_CHANNEL; | ||
631 | } | ||
632 | |||
633 | local->next_scan_state = next_scan_state; | ||
627 | 634 | ||
628 | *next_delay = 0; | 635 | *next_delay = 0; |
629 | } | 636 | } |
@@ -794,6 +801,9 @@ void ieee80211_scan_work(struct work_struct *work) | |||
794 | case SCAN_RESUME: | 801 | case SCAN_RESUME: |
795 | ieee80211_scan_state_resume(local, &next_delay); | 802 | ieee80211_scan_state_resume(local, &next_delay); |
796 | break; | 803 | break; |
804 | case SCAN_ABORT: | ||
805 | aborted = true; | ||
806 | goto out_complete; | ||
797 | } | 807 | } |
798 | } while (next_delay == 0); | 808 | } while (next_delay == 0); |
799 | 809 | ||
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index d2eb64e12353..f3e502502fee 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -98,6 +98,7 @@ static void free_sta_work(struct work_struct *wk) | |||
98 | struct tid_ampdu_tx *tid_tx; | 98 | struct tid_ampdu_tx *tid_tx; |
99 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 99 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
100 | struct ieee80211_local *local = sdata->local; | 100 | struct ieee80211_local *local = sdata->local; |
101 | struct ps_data *ps; | ||
101 | 102 | ||
102 | /* | 103 | /* |
103 | * At this point, when being called as call_rcu callback, | 104 | * At this point, when being called as call_rcu callback, |
@@ -107,11 +108,15 @@ static void free_sta_work(struct work_struct *wk) | |||
107 | */ | 108 | */ |
108 | 109 | ||
109 | if (test_sta_flag(sta, WLAN_STA_PS_STA)) { | 110 | if (test_sta_flag(sta, WLAN_STA_PS_STA)) { |
110 | BUG_ON(!sdata->bss); | 111 | if (sta->sdata->vif.type == NL80211_IFTYPE_AP || |
112 | sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
113 | ps = &sdata->bss->ps; | ||
114 | else | ||
115 | return; | ||
111 | 116 | ||
112 | clear_sta_flag(sta, WLAN_STA_PS_STA); | 117 | clear_sta_flag(sta, WLAN_STA_PS_STA); |
113 | 118 | ||
114 | atomic_dec(&sdata->bss->num_sta_ps); | 119 | atomic_dec(&ps->num_sta_ps); |
115 | sta_info_recalc_tim(sta); | 120 | sta_info_recalc_tim(sta); |
116 | } | 121 | } |
117 | 122 | ||
@@ -137,7 +142,7 @@ static void free_sta_work(struct work_struct *wk) | |||
137 | * drivers have to handle aggregation stop being requested, followed | 142 | * drivers have to handle aggregation stop being requested, followed |
138 | * directly by station destruction. | 143 | * directly by station destruction. |
139 | */ | 144 | */ |
140 | for (i = 0; i < STA_TID_NUM; i++) { | 145 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { |
141 | tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); | 146 | tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); |
142 | if (!tid_tx) | 147 | if (!tid_tx) |
143 | continue; | 148 | continue; |
@@ -325,7 +330,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
325 | return NULL; | 330 | return NULL; |
326 | } | 331 | } |
327 | 332 | ||
328 | for (i = 0; i < STA_TID_NUM; i++) { | 333 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { |
329 | /* | 334 | /* |
330 | * timer_to_tid must be initialized with identity mapping | 335 | * timer_to_tid must be initialized with identity mapping |
331 | * to enable session_timer's data differentiation. See | 336 | * to enable session_timer's data differentiation. See |
@@ -338,7 +343,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
338 | skb_queue_head_init(&sta->tx_filtered[i]); | 343 | skb_queue_head_init(&sta->tx_filtered[i]); |
339 | } | 344 | } |
340 | 345 | ||
341 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) | 346 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) |
342 | sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); | 347 | sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); |
343 | 348 | ||
344 | sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); | 349 | sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); |
@@ -502,22 +507,22 @@ int sta_info_insert(struct sta_info *sta) | |||
502 | return err; | 507 | return err; |
503 | } | 508 | } |
504 | 509 | ||
505 | static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) | 510 | static inline void __bss_tim_set(u8 *tim, u16 id) |
506 | { | 511 | { |
507 | /* | 512 | /* |
508 | * This format has been mandated by the IEEE specifications, | 513 | * This format has been mandated by the IEEE specifications, |
509 | * so this line may not be changed to use the __set_bit() format. | 514 | * so this line may not be changed to use the __set_bit() format. |
510 | */ | 515 | */ |
511 | bss->tim[aid / 8] |= (1 << (aid % 8)); | 516 | tim[id / 8] |= (1 << (id % 8)); |
512 | } | 517 | } |
513 | 518 | ||
514 | static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid) | 519 | static inline void __bss_tim_clear(u8 *tim, u16 id) |
515 | { | 520 | { |
516 | /* | 521 | /* |
517 | * This format has been mandated by the IEEE specifications, | 522 | * This format has been mandated by the IEEE specifications, |
518 | * so this line may not be changed to use the __clear_bit() format. | 523 | * so this line may not be changed to use the __clear_bit() format. |
519 | */ | 524 | */ |
520 | bss->tim[aid / 8] &= ~(1 << (aid % 8)); | 525 | tim[id / 8] &= ~(1 << (id % 8)); |
521 | } | 526 | } |
522 | 527 | ||
523 | static unsigned long ieee80211_tids_for_ac(int ac) | 528 | static unsigned long ieee80211_tids_for_ac(int ac) |
@@ -541,14 +546,23 @@ static unsigned long ieee80211_tids_for_ac(int ac) | |||
541 | void sta_info_recalc_tim(struct sta_info *sta) | 546 | void sta_info_recalc_tim(struct sta_info *sta) |
542 | { | 547 | { |
543 | struct ieee80211_local *local = sta->local; | 548 | struct ieee80211_local *local = sta->local; |
544 | struct ieee80211_if_ap *bss = sta->sdata->bss; | 549 | struct ps_data *ps; |
545 | unsigned long flags; | 550 | unsigned long flags; |
546 | bool indicate_tim = false; | 551 | bool indicate_tim = false; |
547 | u8 ignore_for_tim = sta->sta.uapsd_queues; | 552 | u8 ignore_for_tim = sta->sta.uapsd_queues; |
548 | int ac; | 553 | int ac; |
554 | u16 id; | ||
555 | |||
556 | if (sta->sdata->vif.type == NL80211_IFTYPE_AP || | ||
557 | sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { | ||
558 | if (WARN_ON_ONCE(!sta->sdata->bss)) | ||
559 | return; | ||
549 | 560 | ||
550 | if (WARN_ON_ONCE(!sta->sdata->bss)) | 561 | ps = &sta->sdata->bss->ps; |
562 | id = sta->sta.aid; | ||
563 | } else { | ||
551 | return; | 564 | return; |
565 | } | ||
552 | 566 | ||
553 | /* No need to do anything if the driver does all */ | 567 | /* No need to do anything if the driver does all */ |
554 | if (local->hw.flags & IEEE80211_HW_AP_LINK_PS) | 568 | if (local->hw.flags & IEEE80211_HW_AP_LINK_PS) |
@@ -587,9 +601,9 @@ void sta_info_recalc_tim(struct sta_info *sta) | |||
587 | spin_lock_irqsave(&local->tim_lock, flags); | 601 | spin_lock_irqsave(&local->tim_lock, flags); |
588 | 602 | ||
589 | if (indicate_tim) | 603 | if (indicate_tim) |
590 | __bss_tim_set(bss, sta->sta.aid); | 604 | __bss_tim_set(ps->tim, id); |
591 | else | 605 | else |
592 | __bss_tim_clear(bss, sta->sta.aid); | 606 | __bss_tim_clear(ps->tim, id); |
593 | 607 | ||
594 | if (local->ops->set_tim) { | 608 | if (local->ops->set_tim) { |
595 | local->tim_in_locked_section = true; | 609 | local->tim_in_locked_section = true; |
@@ -893,8 +907,8 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, | |||
893 | continue; | 907 | continue; |
894 | 908 | ||
895 | if (time_after(jiffies, sta->last_rx + exp_time)) { | 909 | if (time_after(jiffies, sta->last_rx + exp_time)) { |
896 | ibss_dbg(sdata, "expiring inactive STA %pM\n", | 910 | sta_dbg(sta->sdata, "expiring inactive STA %pM\n", |
897 | sta->sta.addr); | 911 | sta->sta.addr); |
898 | WARN_ON(__sta_info_destroy(sta)); | 912 | WARN_ON(__sta_info_destroy(sta)); |
899 | } | 913 | } |
900 | } | 914 | } |
@@ -948,10 +962,17 @@ static void clear_sta_ps_flags(void *_sta) | |||
948 | { | 962 | { |
949 | struct sta_info *sta = _sta; | 963 | struct sta_info *sta = _sta; |
950 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 964 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
965 | struct ps_data *ps; | ||
966 | |||
967 | if (sdata->vif.type == NL80211_IFTYPE_AP || | ||
968 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
969 | ps = &sdata->bss->ps; | ||
970 | else | ||
971 | return; | ||
951 | 972 | ||
952 | clear_sta_flag(sta, WLAN_STA_PS_DRIVER); | 973 | clear_sta_flag(sta, WLAN_STA_PS_DRIVER); |
953 | if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA)) | 974 | if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA)) |
954 | atomic_dec(&sdata->bss->num_sta_ps); | 975 | atomic_dec(&ps->num_sta_ps); |
955 | } | 976 | } |
956 | 977 | ||
957 | /* powersave support code */ | 978 | /* powersave support code */ |
@@ -965,7 +986,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | |||
965 | 986 | ||
966 | clear_sta_flag(sta, WLAN_STA_SP); | 987 | clear_sta_flag(sta, WLAN_STA_SP); |
967 | 988 | ||
968 | BUILD_BUG_ON(BITS_TO_LONGS(STA_TID_NUM) > 1); | 989 | BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); |
969 | sta->driver_buffered_tids = 0; | 990 | sta->driver_buffered_tids = 0; |
970 | 991 | ||
971 | if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) | 992 | if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) |
@@ -1013,6 +1034,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, | |||
1013 | __le16 fc; | 1034 | __le16 fc; |
1014 | bool qos = test_sta_flag(sta, WLAN_STA_WME); | 1035 | bool qos = test_sta_flag(sta, WLAN_STA_WME); |
1015 | struct ieee80211_tx_info *info; | 1036 | struct ieee80211_tx_info *info; |
1037 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
1016 | 1038 | ||
1017 | if (qos) { | 1039 | if (qos) { |
1018 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | | 1040 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | |
@@ -1062,7 +1084,16 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, | |||
1062 | 1084 | ||
1063 | drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false); | 1085 | drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false); |
1064 | 1086 | ||
1065 | ieee80211_xmit(sdata, skb); | 1087 | rcu_read_lock(); |
1088 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
1089 | if (WARN_ON(!chanctx_conf)) { | ||
1090 | rcu_read_unlock(); | ||
1091 | kfree_skb(skb); | ||
1092 | return; | ||
1093 | } | ||
1094 | |||
1095 | ieee80211_xmit(sdata, skb, chanctx_conf->def.chan->band); | ||
1096 | rcu_read_unlock(); | ||
1066 | } | 1097 | } |
1067 | 1098 | ||
1068 | static void | 1099 | static void |
@@ -1343,7 +1374,7 @@ void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, | |||
1343 | { | 1374 | { |
1344 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | 1375 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); |
1345 | 1376 | ||
1346 | if (WARN_ON(tid >= STA_TID_NUM)) | 1377 | if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) |
1347 | return; | 1378 | return; |
1348 | 1379 | ||
1349 | if (buffered) | 1380 | if (buffered) |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index c88f161f8118..6835cea4e402 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -80,7 +80,6 @@ enum ieee80211_sta_info_flags { | |||
80 | WLAN_STA_TOFFSET_KNOWN, | 80 | WLAN_STA_TOFFSET_KNOWN, |
81 | }; | 81 | }; |
82 | 82 | ||
83 | #define STA_TID_NUM 16 | ||
84 | #define ADDBA_RESP_INTERVAL HZ | 83 | #define ADDBA_RESP_INTERVAL HZ |
85 | #define HT_AGG_MAX_RETRIES 15 | 84 | #define HT_AGG_MAX_RETRIES 15 |
86 | #define HT_AGG_BURST_RETRIES 3 | 85 | #define HT_AGG_BURST_RETRIES 3 |
@@ -197,15 +196,15 @@ struct tid_ampdu_rx { | |||
197 | struct sta_ampdu_mlme { | 196 | struct sta_ampdu_mlme { |
198 | struct mutex mtx; | 197 | struct mutex mtx; |
199 | /* rx */ | 198 | /* rx */ |
200 | struct tid_ampdu_rx __rcu *tid_rx[STA_TID_NUM]; | 199 | struct tid_ampdu_rx __rcu *tid_rx[IEEE80211_NUM_TIDS]; |
201 | unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)]; | 200 | unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; |
202 | unsigned long tid_rx_stop_requested[BITS_TO_LONGS(STA_TID_NUM)]; | 201 | unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)]; |
203 | /* tx */ | 202 | /* tx */ |
204 | struct work_struct work; | 203 | struct work_struct work; |
205 | struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM]; | 204 | struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS]; |
206 | struct tid_ampdu_tx *tid_start_tx[STA_TID_NUM]; | 205 | struct tid_ampdu_tx *tid_start_tx[IEEE80211_NUM_TIDS]; |
207 | unsigned long last_addba_req_time[STA_TID_NUM]; | 206 | unsigned long last_addba_req_time[IEEE80211_NUM_TIDS]; |
208 | u8 addba_req_num[STA_TID_NUM]; | 207 | u8 addba_req_num[IEEE80211_NUM_TIDS]; |
209 | u8 dialog_token_allocator; | 208 | u8 dialog_token_allocator; |
210 | }; | 209 | }; |
211 | 210 | ||
@@ -228,6 +227,7 @@ struct sta_ampdu_mlme { | |||
228 | * "the" transmit rate | 227 | * "the" transmit rate |
229 | * @last_rx_rate_idx: rx status rate index of the last data packet | 228 | * @last_rx_rate_idx: rx status rate index of the last data packet |
230 | * @last_rx_rate_flag: rx status flag of the last data packet | 229 | * @last_rx_rate_flag: rx status flag of the last data packet |
230 | * @last_rx_rate_vht_nss: rx status nss of last data packet | ||
231 | * @lock: used for locking all fields that require locking, see comments | 231 | * @lock: used for locking all fields that require locking, see comments |
232 | * in the header file. | 232 | * in the header file. |
233 | * @drv_unblock_wk: used for driver PS unblocking | 233 | * @drv_unblock_wk: used for driver PS unblocking |
@@ -273,7 +273,7 @@ struct sta_ampdu_mlme { | |||
273 | * @t_offset: timing offset relative to this host | 273 | * @t_offset: timing offset relative to this host |
274 | * @t_offset_setpoint: reference timing offset of this sta to be used when | 274 | * @t_offset_setpoint: reference timing offset of this sta to be used when |
275 | * calculating clockdrift | 275 | * calculating clockdrift |
276 | * @ch_type: peer's channel type | 276 | * @ch_width: peer's channel width |
277 | * @debugfs: debug filesystem info | 277 | * @debugfs: debug filesystem info |
278 | * @dead: set to true when sta is unlinked | 278 | * @dead: set to true when sta is unlinked |
279 | * @uploaded: set to true when sta is uploaded to the driver | 279 | * @uploaded: set to true when sta is uploaded to the driver |
@@ -330,7 +330,7 @@ struct sta_info { | |||
330 | int last_signal; | 330 | int last_signal; |
331 | struct ewma avg_signal; | 331 | struct ewma avg_signal; |
332 | /* Plus 1 for non-QoS frames */ | 332 | /* Plus 1 for non-QoS frames */ |
333 | __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES + 1]; | 333 | __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1]; |
334 | 334 | ||
335 | /* Updated from TX status path only, no locking requirements */ | 335 | /* Updated from TX status path only, no locking requirements */ |
336 | unsigned long tx_filtered_count; | 336 | unsigned long tx_filtered_count; |
@@ -344,14 +344,15 @@ struct sta_info { | |||
344 | unsigned long tx_fragments; | 344 | unsigned long tx_fragments; |
345 | struct ieee80211_tx_rate last_tx_rate; | 345 | struct ieee80211_tx_rate last_tx_rate; |
346 | int last_rx_rate_idx; | 346 | int last_rx_rate_idx; |
347 | int last_rx_rate_flag; | 347 | u32 last_rx_rate_flag; |
348 | u8 last_rx_rate_vht_nss; | ||
348 | u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; | 349 | u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; |
349 | 350 | ||
350 | /* | 351 | /* |
351 | * Aggregation information, locked with lock. | 352 | * Aggregation information, locked with lock. |
352 | */ | 353 | */ |
353 | struct sta_ampdu_mlme ampdu_mlme; | 354 | struct sta_ampdu_mlme ampdu_mlme; |
354 | u8 timer_to_tid[STA_TID_NUM]; | 355 | u8 timer_to_tid[IEEE80211_NUM_TIDS]; |
355 | 356 | ||
356 | #ifdef CONFIG_MAC80211_MESH | 357 | #ifdef CONFIG_MAC80211_MESH |
357 | /* | 358 | /* |
@@ -369,7 +370,7 @@ struct sta_info { | |||
369 | struct timer_list plink_timer; | 370 | struct timer_list plink_timer; |
370 | s64 t_offset; | 371 | s64 t_offset; |
371 | s64 t_offset_setpoint; | 372 | s64 t_offset_setpoint; |
372 | enum nl80211_channel_type ch_type; | 373 | enum nl80211_chan_width ch_width; |
373 | #endif | 374 | #endif |
374 | 375 | ||
375 | #ifdef CONFIG_MAC80211_DEBUGFS | 376 | #ifdef CONFIG_MAC80211_DEBUGFS |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 101eb88a2b78..ab63237107c8 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -189,30 +189,31 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) | |||
189 | } | 189 | } |
190 | 190 | ||
191 | if (ieee80211_is_action(mgmt->frame_control) && | 191 | if (ieee80211_is_action(mgmt->frame_control) && |
192 | sdata->vif.type == NL80211_IFTYPE_STATION && | ||
193 | mgmt->u.action.category == WLAN_CATEGORY_HT && | 192 | mgmt->u.action.category == WLAN_CATEGORY_HT && |
194 | mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) { | 193 | mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS && |
194 | sdata->vif.type == NL80211_IFTYPE_STATION && | ||
195 | ieee80211_sdata_running(sdata)) { | ||
195 | /* | 196 | /* |
196 | * This update looks racy, but isn't -- if we come | 197 | * This update looks racy, but isn't -- if we come |
197 | * here we've definitely got a station that we're | 198 | * here we've definitely got a station that we're |
198 | * talking to, and on a managed interface that can | 199 | * talking to, and on a managed interface that can |
199 | * only be the AP. And the only other place updating | 200 | * only be the AP. And the only other place updating |
200 | * this variable is before we're associated. | 201 | * this variable in managed mode is before association. |
201 | */ | 202 | */ |
202 | switch (mgmt->u.action.u.ht_smps.smps_control) { | 203 | switch (mgmt->u.action.u.ht_smps.smps_control) { |
203 | case WLAN_HT_SMPS_CONTROL_DYNAMIC: | 204 | case WLAN_HT_SMPS_CONTROL_DYNAMIC: |
204 | sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC; | 205 | sdata->smps_mode = IEEE80211_SMPS_DYNAMIC; |
205 | break; | 206 | break; |
206 | case WLAN_HT_SMPS_CONTROL_STATIC: | 207 | case WLAN_HT_SMPS_CONTROL_STATIC: |
207 | sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC; | 208 | sdata->smps_mode = IEEE80211_SMPS_STATIC; |
208 | break; | 209 | break; |
209 | case WLAN_HT_SMPS_CONTROL_DISABLED: | 210 | case WLAN_HT_SMPS_CONTROL_DISABLED: |
210 | default: /* shouldn't happen since we don't send that */ | 211 | default: /* shouldn't happen since we don't send that */ |
211 | sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF; | 212 | sdata->smps_mode = IEEE80211_SMPS_OFF; |
212 | break; | 213 | break; |
213 | } | 214 | } |
214 | 215 | ||
215 | ieee80211_queue_work(&local->hw, &local->recalc_smps); | 216 | ieee80211_queue_work(&local->hw, &sdata->recalc_smps); |
216 | } | 217 | } |
217 | } | 218 | } |
218 | 219 | ||
@@ -324,6 +325,75 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band | |||
324 | 325 | ||
325 | } | 326 | } |
326 | 327 | ||
328 | static void ieee80211_report_used_skb(struct ieee80211_local *local, | ||
329 | struct sk_buff *skb, bool dropped) | ||
330 | { | ||
331 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
332 | struct ieee80211_hdr *hdr = (void *)skb->data; | ||
333 | bool acked = info->flags & IEEE80211_TX_STAT_ACK; | ||
334 | |||
335 | if (dropped) | ||
336 | acked = false; | ||
337 | |||
338 | if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { | ||
339 | struct ieee80211_sub_if_data *sdata = NULL; | ||
340 | struct ieee80211_sub_if_data *iter_sdata; | ||
341 | u64 cookie = (unsigned long)skb; | ||
342 | |||
343 | rcu_read_lock(); | ||
344 | |||
345 | if (skb->dev) { | ||
346 | list_for_each_entry_rcu(iter_sdata, &local->interfaces, | ||
347 | list) { | ||
348 | if (!iter_sdata->dev) | ||
349 | continue; | ||
350 | |||
351 | if (skb->dev == iter_sdata->dev) { | ||
352 | sdata = iter_sdata; | ||
353 | break; | ||
354 | } | ||
355 | } | ||
356 | } else { | ||
357 | sdata = rcu_dereference(local->p2p_sdata); | ||
358 | } | ||
359 | |||
360 | if (!sdata) | ||
361 | skb->dev = NULL; | ||
362 | else if (ieee80211_is_nullfunc(hdr->frame_control) || | ||
363 | ieee80211_is_qos_nullfunc(hdr->frame_control)) { | ||
364 | cfg80211_probe_status(sdata->dev, hdr->addr1, | ||
365 | cookie, acked, GFP_ATOMIC); | ||
366 | } else { | ||
367 | cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data, | ||
368 | skb->len, acked, GFP_ATOMIC); | ||
369 | } | ||
370 | |||
371 | rcu_read_unlock(); | ||
372 | } | ||
373 | |||
374 | if (unlikely(info->ack_frame_id)) { | ||
375 | struct sk_buff *ack_skb; | ||
376 | unsigned long flags; | ||
377 | |||
378 | spin_lock_irqsave(&local->ack_status_lock, flags); | ||
379 | ack_skb = idr_find(&local->ack_status_frames, | ||
380 | info->ack_frame_id); | ||
381 | if (ack_skb) | ||
382 | idr_remove(&local->ack_status_frames, | ||
383 | info->ack_frame_id); | ||
384 | spin_unlock_irqrestore(&local->ack_status_lock, flags); | ||
385 | |||
386 | if (ack_skb) { | ||
387 | if (!dropped) { | ||
388 | /* consumes ack_skb */ | ||
389 | skb_complete_wifi_ack(ack_skb, acked); | ||
390 | } else { | ||
391 | dev_kfree_skb_any(ack_skb); | ||
392 | } | ||
393 | } | ||
394 | } | ||
395 | } | ||
396 | |||
327 | /* | 397 | /* |
328 | * Use a static threshold for now, best value to be determined | 398 | * Use a static threshold for now, best value to be determined |
329 | * by testing ... | 399 | * by testing ... |
@@ -515,62 +585,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
515 | msecs_to_jiffies(10)); | 585 | msecs_to_jiffies(10)); |
516 | } | 586 | } |
517 | 587 | ||
518 | if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { | 588 | ieee80211_report_used_skb(local, skb, false); |
519 | u64 cookie = (unsigned long)skb; | ||
520 | bool found = false; | ||
521 | |||
522 | acked = info->flags & IEEE80211_TX_STAT_ACK; | ||
523 | |||
524 | rcu_read_lock(); | ||
525 | |||
526 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
527 | if (!sdata->dev) | ||
528 | continue; | ||
529 | |||
530 | if (skb->dev != sdata->dev) | ||
531 | continue; | ||
532 | |||
533 | found = true; | ||
534 | break; | ||
535 | } | ||
536 | |||
537 | if (!skb->dev) { | ||
538 | sdata = rcu_dereference(local->p2p_sdata); | ||
539 | if (sdata) | ||
540 | found = true; | ||
541 | } | ||
542 | |||
543 | if (!found) | ||
544 | skb->dev = NULL; | ||
545 | else if (ieee80211_is_nullfunc(hdr->frame_control) || | ||
546 | ieee80211_is_qos_nullfunc(hdr->frame_control)) { | ||
547 | cfg80211_probe_status(sdata->dev, hdr->addr1, | ||
548 | cookie, acked, GFP_ATOMIC); | ||
549 | } else { | ||
550 | cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data, | ||
551 | skb->len, acked, GFP_ATOMIC); | ||
552 | } | ||
553 | |||
554 | rcu_read_unlock(); | ||
555 | } | ||
556 | |||
557 | if (unlikely(info->ack_frame_id)) { | ||
558 | struct sk_buff *ack_skb; | ||
559 | unsigned long flags; | ||
560 | |||
561 | spin_lock_irqsave(&local->ack_status_lock, flags); | ||
562 | ack_skb = idr_find(&local->ack_status_frames, | ||
563 | info->ack_frame_id); | ||
564 | if (ack_skb) | ||
565 | idr_remove(&local->ack_status_frames, | ||
566 | info->ack_frame_id); | ||
567 | spin_unlock_irqrestore(&local->ack_status_lock, flags); | ||
568 | |||
569 | /* consumes ack_skb */ | ||
570 | if (ack_skb) | ||
571 | skb_complete_wifi_ack(ack_skb, | ||
572 | info->flags & IEEE80211_TX_STAT_ACK); | ||
573 | } | ||
574 | 589 | ||
575 | /* this was a transmitted frame, but now we want to reuse it */ | 590 | /* this was a transmitted frame, but now we want to reuse it */ |
576 | skb_orphan(skb); | 591 | skb_orphan(skb); |
@@ -646,25 +661,8 @@ EXPORT_SYMBOL(ieee80211_report_low_ack); | |||
646 | void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) | 661 | void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) |
647 | { | 662 | { |
648 | struct ieee80211_local *local = hw_to_local(hw); | 663 | struct ieee80211_local *local = hw_to_local(hw); |
649 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
650 | |||
651 | if (unlikely(info->ack_frame_id)) { | ||
652 | struct sk_buff *ack_skb; | ||
653 | unsigned long flags; | ||
654 | |||
655 | spin_lock_irqsave(&local->ack_status_lock, flags); | ||
656 | ack_skb = idr_find(&local->ack_status_frames, | ||
657 | info->ack_frame_id); | ||
658 | if (ack_skb) | ||
659 | idr_remove(&local->ack_status_frames, | ||
660 | info->ack_frame_id); | ||
661 | spin_unlock_irqrestore(&local->ack_status_lock, flags); | ||
662 | |||
663 | /* consumes ack_skb */ | ||
664 | if (ack_skb) | ||
665 | dev_kfree_skb_any(ack_skb); | ||
666 | } | ||
667 | 664 | ||
665 | ieee80211_report_used_skb(local, skb, true); | ||
668 | dev_kfree_skb_any(skb); | 666 | dev_kfree_skb_any(skb); |
669 | } | 667 | } |
670 | EXPORT_SYMBOL(ieee80211_free_txskb); | 668 | EXPORT_SYMBOL(ieee80211_free_txskb); |
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 18d9c8a52e9e..a8270b441a6f 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h | |||
@@ -28,6 +28,25 @@ | |||
28 | #define VIF_PR_FMT " vif:%s(%d%s)" | 28 | #define VIF_PR_FMT " vif:%s(%d%s)" |
29 | #define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" | 29 | #define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" |
30 | 30 | ||
31 | #define CHANCTX_ENTRY __field(u32, control_freq) \ | ||
32 | __field(u32, chan_width) \ | ||
33 | __field(u32, center_freq1) \ | ||
34 | __field(u32, center_freq2) \ | ||
35 | __field(u8, rx_chains_static) \ | ||
36 | __field(u8, rx_chains_dynamic) | ||
37 | #define CHANCTX_ASSIGN __entry->control_freq = ctx->conf.def.chan->center_freq;\ | ||
38 | __entry->chan_width = ctx->conf.def.width; \ | ||
39 | __entry->center_freq1 = ctx->conf.def.center_freq1; \ | ||
40 | __entry->center_freq2 = ctx->conf.def.center_freq2; \ | ||
41 | __entry->rx_chains_static = ctx->conf.rx_chains_static; \ | ||
42 | __entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic | ||
43 | #define CHANCTX_PR_FMT " control:%d MHz width:%d center: %d/%d MHz chains:%d/%d" | ||
44 | #define CHANCTX_PR_ARG __entry->control_freq, __entry->chan_width, \ | ||
45 | __entry->center_freq1, __entry->center_freq2, \ | ||
46 | __entry->rx_chains_static, __entry->rx_chains_dynamic | ||
47 | |||
48 | |||
49 | |||
31 | /* | 50 | /* |
32 | * Tracing for driver callbacks. | 51 | * Tracing for driver callbacks. |
33 | */ | 52 | */ |
@@ -301,20 +320,37 @@ TRACE_EVENT(drv_bss_info_changed, | |||
301 | TP_STRUCT__entry( | 320 | TP_STRUCT__entry( |
302 | LOCAL_ENTRY | 321 | LOCAL_ENTRY |
303 | VIF_ENTRY | 322 | VIF_ENTRY |
323 | __field(u32, changed) | ||
304 | __field(bool, assoc) | 324 | __field(bool, assoc) |
325 | __field(bool, ibss_joined) | ||
326 | __field(bool, ibss_creator) | ||
305 | __field(u16, aid) | 327 | __field(u16, aid) |
306 | __field(bool, cts) | 328 | __field(bool, cts) |
307 | __field(bool, shortpre) | 329 | __field(bool, shortpre) |
308 | __field(bool, shortslot) | 330 | __field(bool, shortslot) |
331 | __field(bool, enable_beacon) | ||
309 | __field(u8, dtimper) | 332 | __field(u8, dtimper) |
310 | __field(u16, bcnint) | 333 | __field(u16, bcnint) |
311 | __field(u16, assoc_cap) | 334 | __field(u16, assoc_cap) |
312 | __field(u64, sync_tsf) | 335 | __field(u64, sync_tsf) |
313 | __field(u32, sync_device_ts) | 336 | __field(u32, sync_device_ts) |
314 | __field(u32, basic_rates) | 337 | __field(u32, basic_rates) |
315 | __field(u32, changed) | 338 | __array(int, mcast_rate, IEEE80211_NUM_BANDS) |
316 | __field(bool, enable_beacon) | ||
317 | __field(u16, ht_operation_mode) | 339 | __field(u16, ht_operation_mode) |
340 | __field(s32, cqm_rssi_thold); | ||
341 | __field(s32, cqm_rssi_hyst); | ||
342 | __field(u32, channel_width); | ||
343 | __field(u32, channel_cfreq1); | ||
344 | __dynamic_array(u32, arp_addr_list, info->arp_addr_cnt); | ||
345 | __field(bool, arp_filter_enabled); | ||
346 | __field(bool, qos); | ||
347 | __field(bool, idle); | ||
348 | __field(bool, ps); | ||
349 | __dynamic_array(u8, ssid, info->ssid_len); | ||
350 | __field(bool, hidden_ssid); | ||
351 | __field(int, txpower) | ||
352 | __field(u8, p2p_ctwindow) | ||
353 | __field(bool, p2p_oppps) | ||
318 | ), | 354 | ), |
319 | 355 | ||
320 | TP_fast_assign( | 356 | TP_fast_assign( |
@@ -323,17 +359,36 @@ TRACE_EVENT(drv_bss_info_changed, | |||
323 | __entry->changed = changed; | 359 | __entry->changed = changed; |
324 | __entry->aid = info->aid; | 360 | __entry->aid = info->aid; |
325 | __entry->assoc = info->assoc; | 361 | __entry->assoc = info->assoc; |
362 | __entry->ibss_joined = info->ibss_joined; | ||
363 | __entry->ibss_creator = info->ibss_creator; | ||
326 | __entry->shortpre = info->use_short_preamble; | 364 | __entry->shortpre = info->use_short_preamble; |
327 | __entry->cts = info->use_cts_prot; | 365 | __entry->cts = info->use_cts_prot; |
328 | __entry->shortslot = info->use_short_slot; | 366 | __entry->shortslot = info->use_short_slot; |
367 | __entry->enable_beacon = info->enable_beacon; | ||
329 | __entry->dtimper = info->dtim_period; | 368 | __entry->dtimper = info->dtim_period; |
330 | __entry->bcnint = info->beacon_int; | 369 | __entry->bcnint = info->beacon_int; |
331 | __entry->assoc_cap = info->assoc_capability; | 370 | __entry->assoc_cap = info->assoc_capability; |
332 | __entry->sync_tsf = info->sync_tsf; | 371 | __entry->sync_tsf = info->sync_tsf; |
333 | __entry->sync_device_ts = info->sync_device_ts; | 372 | __entry->sync_device_ts = info->sync_device_ts; |
334 | __entry->basic_rates = info->basic_rates; | 373 | __entry->basic_rates = info->basic_rates; |
335 | __entry->enable_beacon = info->enable_beacon; | 374 | memcpy(__entry->mcast_rate, info->mcast_rate, |
375 | sizeof(__entry->mcast_rate)); | ||
336 | __entry->ht_operation_mode = info->ht_operation_mode; | 376 | __entry->ht_operation_mode = info->ht_operation_mode; |
377 | __entry->cqm_rssi_thold = info->cqm_rssi_thold; | ||
378 | __entry->cqm_rssi_hyst = info->cqm_rssi_hyst; | ||
379 | __entry->channel_width = info->chandef.width; | ||
380 | __entry->channel_cfreq1 = info->chandef.center_freq1; | ||
381 | memcpy(__get_dynamic_array(arp_addr_list), info->arp_addr_list, | ||
382 | sizeof(u32) * info->arp_addr_cnt); | ||
383 | __entry->arp_filter_enabled = info->arp_filter_enabled; | ||
384 | __entry->qos = info->qos; | ||
385 | __entry->idle = info->idle; | ||
386 | __entry->ps = info->ps; | ||
387 | memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len); | ||
388 | __entry->hidden_ssid = info->hidden_ssid; | ||
389 | __entry->txpower = info->txpower; | ||
390 | __entry->p2p_ctwindow = info->p2p_ctwindow; | ||
391 | __entry->p2p_oppps = info->p2p_oppps; | ||
337 | ), | 392 | ), |
338 | 393 | ||
339 | TP_printk( | 394 | TP_printk( |
@@ -971,28 +1026,31 @@ TRACE_EVENT(drv_get_antenna, | |||
971 | ); | 1026 | ); |
972 | 1027 | ||
973 | TRACE_EVENT(drv_remain_on_channel, | 1028 | TRACE_EVENT(drv_remain_on_channel, |
974 | TP_PROTO(struct ieee80211_local *local, struct ieee80211_channel *chan, | 1029 | TP_PROTO(struct ieee80211_local *local, |
975 | enum nl80211_channel_type chantype, unsigned int duration), | 1030 | struct ieee80211_sub_if_data *sdata, |
1031 | struct ieee80211_channel *chan, | ||
1032 | unsigned int duration), | ||
976 | 1033 | ||
977 | TP_ARGS(local, chan, chantype, duration), | 1034 | TP_ARGS(local, sdata, chan, duration), |
978 | 1035 | ||
979 | TP_STRUCT__entry( | 1036 | TP_STRUCT__entry( |
980 | LOCAL_ENTRY | 1037 | LOCAL_ENTRY |
1038 | VIF_ENTRY | ||
981 | __field(int, center_freq) | 1039 | __field(int, center_freq) |
982 | __field(int, channel_type) | ||
983 | __field(unsigned int, duration) | 1040 | __field(unsigned int, duration) |
984 | ), | 1041 | ), |
985 | 1042 | ||
986 | TP_fast_assign( | 1043 | TP_fast_assign( |
987 | LOCAL_ASSIGN; | 1044 | LOCAL_ASSIGN; |
1045 | VIF_ASSIGN; | ||
988 | __entry->center_freq = chan->center_freq; | 1046 | __entry->center_freq = chan->center_freq; |
989 | __entry->channel_type = chantype; | ||
990 | __entry->duration = duration; | 1047 | __entry->duration = duration; |
991 | ), | 1048 | ), |
992 | 1049 | ||
993 | TP_printk( | 1050 | TP_printk( |
994 | LOCAL_PR_FMT " freq:%dMHz duration:%dms", | 1051 | LOCAL_PR_FMT VIF_PR_FMT " freq:%dMHz duration:%dms", |
995 | LOCAL_PR_ARG, __entry->center_freq, __entry->duration | 1052 | LOCAL_PR_ARG, VIF_PR_ARG, |
1053 | __entry->center_freq, __entry->duration | ||
996 | ) | 1054 | ) |
997 | ); | 1055 | ); |
998 | 1056 | ||
@@ -1001,34 +1059,6 @@ DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel, | |||
1001 | TP_ARGS(local) | 1059 | TP_ARGS(local) |
1002 | ); | 1060 | ); |
1003 | 1061 | ||
1004 | TRACE_EVENT(drv_offchannel_tx, | ||
1005 | TP_PROTO(struct ieee80211_local *local, struct sk_buff *skb, | ||
1006 | struct ieee80211_channel *chan, | ||
1007 | enum nl80211_channel_type channel_type, | ||
1008 | unsigned int wait), | ||
1009 | |||
1010 | TP_ARGS(local, skb, chan, channel_type, wait), | ||
1011 | |||
1012 | TP_STRUCT__entry( | ||
1013 | LOCAL_ENTRY | ||
1014 | __field(int, center_freq) | ||
1015 | __field(int, channel_type) | ||
1016 | __field(unsigned int, wait) | ||
1017 | ), | ||
1018 | |||
1019 | TP_fast_assign( | ||
1020 | LOCAL_ASSIGN; | ||
1021 | __entry->center_freq = chan->center_freq; | ||
1022 | __entry->channel_type = channel_type; | ||
1023 | __entry->wait = wait; | ||
1024 | ), | ||
1025 | |||
1026 | TP_printk( | ||
1027 | LOCAL_PR_FMT " freq:%dMHz, wait:%dms", | ||
1028 | LOCAL_PR_ARG, __entry->center_freq, __entry->wait | ||
1029 | ) | ||
1030 | ); | ||
1031 | |||
1032 | TRACE_EVENT(drv_set_ringparam, | 1062 | TRACE_EVENT(drv_set_ringparam, |
1033 | TP_PROTO(struct ieee80211_local *local, u32 tx, u32 rx), | 1063 | TP_PROTO(struct ieee80211_local *local, u32 tx, u32 rx), |
1034 | 1064 | ||
@@ -1256,6 +1286,146 @@ DEFINE_EVENT(local_sdata_evt, drv_mgd_prepare_tx, | |||
1256 | TP_ARGS(local, sdata) | 1286 | TP_ARGS(local, sdata) |
1257 | ); | 1287 | ); |
1258 | 1288 | ||
1289 | DECLARE_EVENT_CLASS(local_chanctx, | ||
1290 | TP_PROTO(struct ieee80211_local *local, | ||
1291 | struct ieee80211_chanctx *ctx), | ||
1292 | |||
1293 | TP_ARGS(local, ctx), | ||
1294 | |||
1295 | TP_STRUCT__entry( | ||
1296 | LOCAL_ENTRY | ||
1297 | CHANCTX_ENTRY | ||
1298 | ), | ||
1299 | |||
1300 | TP_fast_assign( | ||
1301 | LOCAL_ASSIGN; | ||
1302 | CHANCTX_ASSIGN; | ||
1303 | ), | ||
1304 | |||
1305 | TP_printk( | ||
1306 | LOCAL_PR_FMT CHANCTX_PR_FMT, | ||
1307 | LOCAL_PR_ARG, CHANCTX_PR_ARG | ||
1308 | ) | ||
1309 | ); | ||
1310 | |||
1311 | DEFINE_EVENT(local_chanctx, drv_add_chanctx, | ||
1312 | TP_PROTO(struct ieee80211_local *local, | ||
1313 | struct ieee80211_chanctx *ctx), | ||
1314 | TP_ARGS(local, ctx) | ||
1315 | ); | ||
1316 | |||
1317 | DEFINE_EVENT(local_chanctx, drv_remove_chanctx, | ||
1318 | TP_PROTO(struct ieee80211_local *local, | ||
1319 | struct ieee80211_chanctx *ctx), | ||
1320 | TP_ARGS(local, ctx) | ||
1321 | ); | ||
1322 | |||
1323 | TRACE_EVENT(drv_change_chanctx, | ||
1324 | TP_PROTO(struct ieee80211_local *local, | ||
1325 | struct ieee80211_chanctx *ctx, | ||
1326 | u32 changed), | ||
1327 | |||
1328 | TP_ARGS(local, ctx, changed), | ||
1329 | |||
1330 | TP_STRUCT__entry( | ||
1331 | LOCAL_ENTRY | ||
1332 | CHANCTX_ENTRY | ||
1333 | __field(u32, changed) | ||
1334 | ), | ||
1335 | |||
1336 | TP_fast_assign( | ||
1337 | LOCAL_ASSIGN; | ||
1338 | CHANCTX_ASSIGN; | ||
1339 | __entry->changed = changed; | ||
1340 | ), | ||
1341 | |||
1342 | TP_printk( | ||
1343 | LOCAL_PR_FMT CHANCTX_PR_FMT " changed:%#x", | ||
1344 | LOCAL_PR_ARG, CHANCTX_PR_ARG, __entry->changed | ||
1345 | ) | ||
1346 | ); | ||
1347 | |||
1348 | DECLARE_EVENT_CLASS(local_sdata_chanctx, | ||
1349 | TP_PROTO(struct ieee80211_local *local, | ||
1350 | struct ieee80211_sub_if_data *sdata, | ||
1351 | struct ieee80211_chanctx *ctx), | ||
1352 | |||
1353 | TP_ARGS(local, sdata, ctx), | ||
1354 | |||
1355 | TP_STRUCT__entry( | ||
1356 | LOCAL_ENTRY | ||
1357 | VIF_ENTRY | ||
1358 | CHANCTX_ENTRY | ||
1359 | ), | ||
1360 | |||
1361 | TP_fast_assign( | ||
1362 | LOCAL_ASSIGN; | ||
1363 | VIF_ASSIGN; | ||
1364 | CHANCTX_ASSIGN; | ||
1365 | ), | ||
1366 | |||
1367 | TP_printk( | ||
1368 | LOCAL_PR_FMT VIF_PR_FMT CHANCTX_PR_FMT, | ||
1369 | LOCAL_PR_ARG, VIF_PR_ARG, CHANCTX_PR_ARG | ||
1370 | ) | ||
1371 | ); | ||
1372 | |||
1373 | DEFINE_EVENT(local_sdata_chanctx, drv_assign_vif_chanctx, | ||
1374 | TP_PROTO(struct ieee80211_local *local, | ||
1375 | struct ieee80211_sub_if_data *sdata, | ||
1376 | struct ieee80211_chanctx *ctx), | ||
1377 | TP_ARGS(local, sdata, ctx) | ||
1378 | ); | ||
1379 | |||
1380 | DEFINE_EVENT(local_sdata_chanctx, drv_unassign_vif_chanctx, | ||
1381 | TP_PROTO(struct ieee80211_local *local, | ||
1382 | struct ieee80211_sub_if_data *sdata, | ||
1383 | struct ieee80211_chanctx *ctx), | ||
1384 | TP_ARGS(local, sdata, ctx) | ||
1385 | ); | ||
1386 | |||
1387 | TRACE_EVENT(drv_start_ap, | ||
1388 | TP_PROTO(struct ieee80211_local *local, | ||
1389 | struct ieee80211_sub_if_data *sdata, | ||
1390 | struct ieee80211_bss_conf *info), | ||
1391 | |||
1392 | TP_ARGS(local, sdata, info), | ||
1393 | |||
1394 | TP_STRUCT__entry( | ||
1395 | LOCAL_ENTRY | ||
1396 | VIF_ENTRY | ||
1397 | __field(u8, dtimper) | ||
1398 | __field(u16, bcnint) | ||
1399 | __dynamic_array(u8, ssid, info->ssid_len); | ||
1400 | __field(bool, hidden_ssid); | ||
1401 | ), | ||
1402 | |||
1403 | TP_fast_assign( | ||
1404 | LOCAL_ASSIGN; | ||
1405 | VIF_ASSIGN; | ||
1406 | __entry->dtimper = info->dtim_period; | ||
1407 | __entry->bcnint = info->beacon_int; | ||
1408 | memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len); | ||
1409 | __entry->hidden_ssid = info->hidden_ssid; | ||
1410 | ), | ||
1411 | |||
1412 | TP_printk( | ||
1413 | LOCAL_PR_FMT VIF_PR_FMT, | ||
1414 | LOCAL_PR_ARG, VIF_PR_ARG | ||
1415 | ) | ||
1416 | ); | ||
1417 | |||
1418 | DEFINE_EVENT(local_sdata_evt, drv_stop_ap, | ||
1419 | TP_PROTO(struct ieee80211_local *local, | ||
1420 | struct ieee80211_sub_if_data *sdata), | ||
1421 | TP_ARGS(local, sdata) | ||
1422 | ); | ||
1423 | |||
1424 | DEFINE_EVENT(local_only_evt, drv_restart_complete, | ||
1425 | TP_PROTO(struct ieee80211_local *local), | ||
1426 | TP_ARGS(local) | ||
1427 | ); | ||
1428 | |||
1259 | /* | 1429 | /* |
1260 | * Tracing for API calls that drivers call. | 1430 | * Tracing for API calls that drivers call. |
1261 | */ | 1431 | */ |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b858ebe41fda..d287a4f2c01b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -324,22 +324,20 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
324 | struct ieee80211_sub_if_data *sdata; | 324 | struct ieee80211_sub_if_data *sdata; |
325 | struct sta_info *sta; | 325 | struct sta_info *sta; |
326 | 326 | ||
327 | /* | ||
328 | * virtual interfaces are protected by RCU | ||
329 | */ | ||
330 | rcu_read_lock(); | ||
331 | |||
332 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | 327 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
333 | struct ieee80211_if_ap *ap; | 328 | struct ps_data *ps; |
334 | if (sdata->vif.type != NL80211_IFTYPE_AP) | 329 | |
330 | if (sdata->vif.type == NL80211_IFTYPE_AP) | ||
331 | ps = &sdata->u.ap.ps; | ||
332 | else | ||
335 | continue; | 333 | continue; |
336 | ap = &sdata->u.ap; | 334 | |
337 | skb = skb_dequeue(&ap->ps_bc_buf); | 335 | skb = skb_dequeue(&ps->bc_buf); |
338 | if (skb) { | 336 | if (skb) { |
339 | purged++; | 337 | purged++; |
340 | dev_kfree_skb(skb); | 338 | dev_kfree_skb(skb); |
341 | } | 339 | } |
342 | total += skb_queue_len(&ap->ps_bc_buf); | 340 | total += skb_queue_len(&ps->bc_buf); |
343 | } | 341 | } |
344 | 342 | ||
345 | /* | 343 | /* |
@@ -360,8 +358,6 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
360 | } | 358 | } |
361 | } | 359 | } |
362 | 360 | ||
363 | rcu_read_unlock(); | ||
364 | |||
365 | local->total_ps_buffered = total; | 361 | local->total_ps_buffered = total; |
366 | ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged); | 362 | ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged); |
367 | } | 363 | } |
@@ -371,6 +367,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
371 | { | 367 | { |
372 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 368 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
373 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | 369 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
370 | struct ps_data *ps; | ||
374 | 371 | ||
375 | /* | 372 | /* |
376 | * broadcast/multicast frame | 373 | * broadcast/multicast frame |
@@ -380,16 +377,24 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
380 | * This is done either by the hardware or us. | 377 | * This is done either by the hardware or us. |
381 | */ | 378 | */ |
382 | 379 | ||
383 | /* powersaving STAs only in AP/VLAN mode */ | 380 | /* powersaving STAs currently only in AP/VLAN mode */ |
384 | if (!tx->sdata->bss) | 381 | if (tx->sdata->vif.type == NL80211_IFTYPE_AP || |
382 | tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { | ||
383 | if (!tx->sdata->bss) | ||
384 | return TX_CONTINUE; | ||
385 | |||
386 | ps = &tx->sdata->bss->ps; | ||
387 | } else { | ||
385 | return TX_CONTINUE; | 388 | return TX_CONTINUE; |
389 | } | ||
390 | |||
386 | 391 | ||
387 | /* no buffering for ordered frames */ | 392 | /* no buffering for ordered frames */ |
388 | if (ieee80211_has_order(hdr->frame_control)) | 393 | if (ieee80211_has_order(hdr->frame_control)) |
389 | return TX_CONTINUE; | 394 | return TX_CONTINUE; |
390 | 395 | ||
391 | /* no stations in PS mode */ | 396 | /* no stations in PS mode */ |
392 | if (!atomic_read(&tx->sdata->bss->num_sta_ps)) | 397 | if (!atomic_read(&ps->num_sta_ps)) |
393 | return TX_CONTINUE; | 398 | return TX_CONTINUE; |
394 | 399 | ||
395 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; | 400 | info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; |
@@ -404,14 +409,14 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
404 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) | 409 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) |
405 | purge_old_ps_buffers(tx->local); | 410 | purge_old_ps_buffers(tx->local); |
406 | 411 | ||
407 | if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) { | 412 | if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { |
408 | ps_dbg(tx->sdata, | 413 | ps_dbg(tx->sdata, |
409 | "BC TX buffer full - dropping the oldest frame\n"); | 414 | "BC TX buffer full - dropping the oldest frame\n"); |
410 | dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); | 415 | dev_kfree_skb(skb_dequeue(&ps->bc_buf)); |
411 | } else | 416 | } else |
412 | tx->local->total_ps_buffered++; | 417 | tx->local->total_ps_buffered++; |
413 | 418 | ||
414 | skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); | 419 | skb_queue_tail(&ps->bc_buf, tx->skb); |
415 | 420 | ||
416 | return TX_QUEUED; | 421 | return TX_QUEUED; |
417 | } | 422 | } |
@@ -951,7 +956,6 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
951 | fragnum = 0; | 956 | fragnum = 0; |
952 | 957 | ||
953 | skb_queue_walk(&tx->skbs, skb) { | 958 | skb_queue_walk(&tx->skbs, skb) { |
954 | int next_len; | ||
955 | const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); | 959 | const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); |
956 | 960 | ||
957 | hdr = (void *)skb->data; | 961 | hdr = (void *)skb->data; |
@@ -970,7 +974,6 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
970 | info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; | 974 | info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; |
971 | } else { | 975 | } else { |
972 | hdr->frame_control &= ~morefrags; | 976 | hdr->frame_control &= ~morefrags; |
973 | next_len = 0; | ||
974 | } | 977 | } |
975 | hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); | 978 | hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); |
976 | fragnum++; | 979 | fragnum++; |
@@ -1372,7 +1375,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) | |||
1372 | * Returns false if the frame couldn't be transmitted but was queued instead. | 1375 | * Returns false if the frame couldn't be transmitted but was queued instead. |
1373 | */ | 1376 | */ |
1374 | static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, | 1377 | static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, |
1375 | struct sk_buff *skb, bool txpending) | 1378 | struct sk_buff *skb, bool txpending, |
1379 | enum ieee80211_band band) | ||
1376 | { | 1380 | { |
1377 | struct ieee80211_local *local = sdata->local; | 1381 | struct ieee80211_local *local = sdata->local; |
1378 | struct ieee80211_tx_data tx; | 1382 | struct ieee80211_tx_data tx; |
@@ -1386,20 +1390,18 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, | |||
1386 | return true; | 1390 | return true; |
1387 | } | 1391 | } |
1388 | 1392 | ||
1389 | rcu_read_lock(); | ||
1390 | |||
1391 | /* initialises tx */ | 1393 | /* initialises tx */ |
1392 | led_len = skb->len; | 1394 | led_len = skb->len; |
1393 | res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); | 1395 | res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); |
1394 | 1396 | ||
1395 | if (unlikely(res_prepare == TX_DROP)) { | 1397 | if (unlikely(res_prepare == TX_DROP)) { |
1396 | ieee80211_free_txskb(&local->hw, skb); | 1398 | ieee80211_free_txskb(&local->hw, skb); |
1397 | goto out; | 1399 | return true; |
1398 | } else if (unlikely(res_prepare == TX_QUEUED)) { | 1400 | } else if (unlikely(res_prepare == TX_QUEUED)) { |
1399 | goto out; | 1401 | return true; |
1400 | } | 1402 | } |
1401 | 1403 | ||
1402 | info->band = local->hw.conf.channel->band; | 1404 | info->band = band; |
1403 | 1405 | ||
1404 | /* set up hw_queue value early */ | 1406 | /* set up hw_queue value early */ |
1405 | if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || | 1407 | if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || |
@@ -1410,8 +1412,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, | |||
1410 | if (!invoke_tx_handlers(&tx)) | 1412 | if (!invoke_tx_handlers(&tx)) |
1411 | result = __ieee80211_tx(local, &tx.skbs, led_len, | 1413 | result = __ieee80211_tx(local, &tx.skbs, led_len, |
1412 | tx.sta, txpending); | 1414 | tx.sta, txpending); |
1413 | out: | 1415 | |
1414 | rcu_read_unlock(); | ||
1415 | return result; | 1416 | return result; |
1416 | } | 1417 | } |
1417 | 1418 | ||
@@ -1446,7 +1447,8 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, | |||
1446 | return 0; | 1447 | return 0; |
1447 | } | 1448 | } |
1448 | 1449 | ||
1449 | void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | 1450 | void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, |
1451 | enum ieee80211_band band) | ||
1450 | { | 1452 | { |
1451 | struct ieee80211_local *local = sdata->local; | 1453 | struct ieee80211_local *local = sdata->local; |
1452 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1454 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
@@ -1454,8 +1456,6 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
1454 | int headroom; | 1456 | int headroom; |
1455 | bool may_encrypt; | 1457 | bool may_encrypt; |
1456 | 1458 | ||
1457 | rcu_read_lock(); | ||
1458 | |||
1459 | may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); | 1459 | may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); |
1460 | 1460 | ||
1461 | headroom = local->tx_headroom; | 1461 | headroom = local->tx_headroom; |
@@ -1466,7 +1466,6 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
1466 | 1466 | ||
1467 | if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { | 1467 | if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { |
1468 | ieee80211_free_txskb(&local->hw, skb); | 1468 | ieee80211_free_txskb(&local->hw, skb); |
1469 | rcu_read_unlock(); | ||
1470 | return; | 1469 | return; |
1471 | } | 1470 | } |
1472 | 1471 | ||
@@ -1478,13 +1477,11 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | |||
1478 | !is_multicast_ether_addr(hdr->addr1) && | 1477 | !is_multicast_ether_addr(hdr->addr1) && |
1479 | mesh_nexthop_resolve(skb, sdata)) { | 1478 | mesh_nexthop_resolve(skb, sdata)) { |
1480 | /* skb queued: don't free */ | 1479 | /* skb queued: don't free */ |
1481 | rcu_read_unlock(); | ||
1482 | return; | 1480 | return; |
1483 | } | 1481 | } |
1484 | 1482 | ||
1485 | ieee80211_set_qos_hdr(sdata, skb); | 1483 | ieee80211_set_qos_hdr(sdata, skb); |
1486 | ieee80211_tx(sdata, skb, false); | 1484 | ieee80211_tx(sdata, skb, false, band); |
1487 | rcu_read_unlock(); | ||
1488 | } | 1485 | } |
1489 | 1486 | ||
1490 | static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb) | 1487 | static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb) |
@@ -1574,7 +1571,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, | |||
1574 | struct net_device *dev) | 1571 | struct net_device *dev) |
1575 | { | 1572 | { |
1576 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1573 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1577 | struct ieee80211_channel *chan = local->hw.conf.channel; | 1574 | struct ieee80211_chanctx_conf *chanctx_conf; |
1575 | struct ieee80211_channel *chan; | ||
1578 | struct ieee80211_radiotap_header *prthdr = | 1576 | struct ieee80211_radiotap_header *prthdr = |
1579 | (struct ieee80211_radiotap_header *)skb->data; | 1577 | (struct ieee80211_radiotap_header *)skb->data; |
1580 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1578 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
@@ -1583,26 +1581,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, | |||
1583 | u16 len_rthdr; | 1581 | u16 len_rthdr; |
1584 | int hdrlen; | 1582 | int hdrlen; |
1585 | 1583 | ||
1586 | /* | ||
1587 | * Frame injection is not allowed if beaconing is not allowed | ||
1588 | * or if we need radar detection. Beaconing is usually not allowed when | ||
1589 | * the mode or operation (Adhoc, AP, Mesh) does not support DFS. | ||
1590 | * Passive scan is also used in world regulatory domains where | ||
1591 | * your country is not known and as such it should be treated as | ||
1592 | * NO TX unless the channel is explicitly allowed in which case | ||
1593 | * your current regulatory domain would not have the passive scan | ||
1594 | * flag. | ||
1595 | * | ||
1596 | * Since AP mode uses monitor interfaces to inject/TX management | ||
1597 | * frames we can make AP mode the exception to this rule once it | ||
1598 | * supports radar detection as its implementation can deal with | ||
1599 | * radar detection by itself. We can do that later by adding a | ||
1600 | * monitor flag interfaces used for AP support. | ||
1601 | */ | ||
1602 | if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR | | ||
1603 | IEEE80211_CHAN_PASSIVE_SCAN))) | ||
1604 | goto fail; | ||
1605 | |||
1606 | /* check for not even having the fixed radiotap header part */ | 1584 | /* check for not even having the fixed radiotap header part */ |
1607 | if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) | 1585 | if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) |
1608 | goto fail; /* too short to be possibly valid */ | 1586 | goto fail; /* too short to be possibly valid */ |
@@ -1688,11 +1666,45 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, | |||
1688 | } | 1666 | } |
1689 | } | 1667 | } |
1690 | 1668 | ||
1691 | ieee80211_xmit(sdata, skb); | 1669 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); |
1670 | if (!chanctx_conf) { | ||
1671 | tmp_sdata = rcu_dereference(local->monitor_sdata); | ||
1672 | if (tmp_sdata) | ||
1673 | chanctx_conf = | ||
1674 | rcu_dereference(tmp_sdata->vif.chanctx_conf); | ||
1675 | } | ||
1676 | if (!chanctx_conf) | ||
1677 | goto fail_rcu; | ||
1678 | |||
1679 | chan = chanctx_conf->def.chan; | ||
1680 | |||
1681 | /* | ||
1682 | * Frame injection is not allowed if beaconing is not allowed | ||
1683 | * or if we need radar detection. Beaconing is usually not allowed when | ||
1684 | * the mode or operation (Adhoc, AP, Mesh) does not support DFS. | ||
1685 | * Passive scan is also used in world regulatory domains where | ||
1686 | * your country is not known and as such it should be treated as | ||
1687 | * NO TX unless the channel is explicitly allowed in which case | ||
1688 | * your current regulatory domain would not have the passive scan | ||
1689 | * flag. | ||
1690 | * | ||
1691 | * Since AP mode uses monitor interfaces to inject/TX management | ||
1692 | * frames we can make AP mode the exception to this rule once it | ||
1693 | * supports radar detection as its implementation can deal with | ||
1694 | * radar detection by itself. We can do that later by adding a | ||
1695 | * monitor flag interfaces used for AP support. | ||
1696 | */ | ||
1697 | if ((chan->flags & (IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR | | ||
1698 | IEEE80211_CHAN_PASSIVE_SCAN))) | ||
1699 | goto fail_rcu; | ||
1700 | |||
1701 | ieee80211_xmit(sdata, skb, chan->band); | ||
1692 | rcu_read_unlock(); | 1702 | rcu_read_unlock(); |
1693 | 1703 | ||
1694 | return NETDEV_TX_OK; | 1704 | return NETDEV_TX_OK; |
1695 | 1705 | ||
1706 | fail_rcu: | ||
1707 | rcu_read_unlock(); | ||
1696 | fail: | 1708 | fail: |
1697 | dev_kfree_skb(skb); | 1709 | dev_kfree_skb(skb); |
1698 | return NETDEV_TX_OK; /* meaning, we dealt with the skb */ | 1710 | return NETDEV_TX_OK; /* meaning, we dealt with the skb */ |
@@ -1734,6 +1746,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1734 | bool multicast; | 1746 | bool multicast; |
1735 | u32 info_flags = 0; | 1747 | u32 info_flags = 0; |
1736 | u16 info_id = 0; | 1748 | u16 info_id = 0; |
1749 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
1750 | struct ieee80211_sub_if_data *ap_sdata; | ||
1751 | enum ieee80211_band band; | ||
1737 | 1752 | ||
1738 | if (unlikely(skb->len < ETH_HLEN)) | 1753 | if (unlikely(skb->len < ETH_HLEN)) |
1739 | goto fail; | 1754 | goto fail; |
@@ -1743,9 +1758,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1743 | ethertype = (skb->data[12] << 8) | skb->data[13]; | 1758 | ethertype = (skb->data[12] << 8) | skb->data[13]; |
1744 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); | 1759 | fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); |
1745 | 1760 | ||
1761 | rcu_read_lock(); | ||
1762 | |||
1746 | switch (sdata->vif.type) { | 1763 | switch (sdata->vif.type) { |
1747 | case NL80211_IFTYPE_AP_VLAN: | 1764 | case NL80211_IFTYPE_AP_VLAN: |
1748 | rcu_read_lock(); | ||
1749 | sta = rcu_dereference(sdata->u.vlan.sta); | 1765 | sta = rcu_dereference(sdata->u.vlan.sta); |
1750 | if (sta) { | 1766 | if (sta) { |
1751 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); | 1767 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); |
@@ -1758,7 +1774,12 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1758 | authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); | 1774 | authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); |
1759 | wme_sta = test_sta_flag(sta, WLAN_STA_WME); | 1775 | wme_sta = test_sta_flag(sta, WLAN_STA_WME); |
1760 | } | 1776 | } |
1761 | rcu_read_unlock(); | 1777 | ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, |
1778 | u.ap); | ||
1779 | chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf); | ||
1780 | if (!chanctx_conf) | ||
1781 | goto fail_rcu; | ||
1782 | band = chanctx_conf->def.chan->band; | ||
1762 | if (sta) | 1783 | if (sta) |
1763 | break; | 1784 | break; |
1764 | /* fall through */ | 1785 | /* fall through */ |
@@ -1769,6 +1790,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1769 | memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); | 1790 | memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); |
1770 | memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); | 1791 | memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); |
1771 | hdrlen = 24; | 1792 | hdrlen = 24; |
1793 | if (sdata->vif.type == NL80211_IFTYPE_AP) | ||
1794 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
1795 | if (!chanctx_conf) | ||
1796 | goto fail_rcu; | ||
1797 | band = chanctx_conf->def.chan->band; | ||
1772 | break; | 1798 | break; |
1773 | case NL80211_IFTYPE_WDS: | 1799 | case NL80211_IFTYPE_WDS: |
1774 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); | 1800 | fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); |
@@ -1778,15 +1804,20 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1778 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | 1804 | memcpy(hdr.addr3, skb->data, ETH_ALEN); |
1779 | memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); | 1805 | memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); |
1780 | hdrlen = 30; | 1806 | hdrlen = 30; |
1807 | /* | ||
1808 | * This is the exception! WDS style interfaces are prohibited | ||
1809 | * when channel contexts are in used so this must be valid | ||
1810 | */ | ||
1811 | band = local->hw.conf.channel->band; | ||
1781 | break; | 1812 | break; |
1782 | #ifdef CONFIG_MAC80211_MESH | 1813 | #ifdef CONFIG_MAC80211_MESH |
1783 | case NL80211_IFTYPE_MESH_POINT: | 1814 | case NL80211_IFTYPE_MESH_POINT: |
1784 | if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { | 1815 | if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { |
1785 | /* Do not send frames with mesh_ttl == 0 */ | 1816 | /* Do not send frames with mesh_ttl == 0 */ |
1786 | sdata->u.mesh.mshstats.dropped_frames_ttl++; | 1817 | sdata->u.mesh.mshstats.dropped_frames_ttl++; |
1787 | goto fail; | 1818 | goto fail_rcu; |
1788 | } | 1819 | } |
1789 | rcu_read_lock(); | 1820 | |
1790 | if (!is_multicast_ether_addr(skb->data)) { | 1821 | if (!is_multicast_ether_addr(skb->data)) { |
1791 | mpath = mesh_path_lookup(skb->data, sdata); | 1822 | mpath = mesh_path_lookup(skb->data, sdata); |
1792 | if (!mpath) | 1823 | if (!mpath) |
@@ -1803,7 +1834,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1803 | !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) { | 1834 | !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) { |
1804 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, | 1835 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, |
1805 | skb->data, skb->data + ETH_ALEN); | 1836 | skb->data, skb->data + ETH_ALEN); |
1806 | rcu_read_unlock(); | ||
1807 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, | 1837 | meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, |
1808 | sdata, NULL, NULL); | 1838 | sdata, NULL, NULL); |
1809 | } else { | 1839 | } else { |
@@ -1819,7 +1849,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1819 | mesh_da = mppath->mpp; | 1849 | mesh_da = mppath->mpp; |
1820 | else if (mpath) | 1850 | else if (mpath) |
1821 | mesh_da = mpath->dst; | 1851 | mesh_da = mpath->dst; |
1822 | rcu_read_unlock(); | ||
1823 | 1852 | ||
1824 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, | 1853 | hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, |
1825 | mesh_da, sdata->vif.addr); | 1854 | mesh_da, sdata->vif.addr); |
@@ -1839,13 +1868,16 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1839 | skb->data + ETH_ALEN); | 1868 | skb->data + ETH_ALEN); |
1840 | 1869 | ||
1841 | } | 1870 | } |
1871 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
1872 | if (!chanctx_conf) | ||
1873 | goto fail_rcu; | ||
1874 | band = chanctx_conf->def.chan->band; | ||
1842 | break; | 1875 | break; |
1843 | #endif | 1876 | #endif |
1844 | case NL80211_IFTYPE_STATION: | 1877 | case NL80211_IFTYPE_STATION: |
1845 | if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { | 1878 | if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { |
1846 | bool tdls_peer = false; | 1879 | bool tdls_peer = false; |
1847 | 1880 | ||
1848 | rcu_read_lock(); | ||
1849 | sta = sta_info_get(sdata, skb->data); | 1881 | sta = sta_info_get(sdata, skb->data); |
1850 | if (sta) { | 1882 | if (sta) { |
1851 | authorized = test_sta_flag(sta, | 1883 | authorized = test_sta_flag(sta, |
@@ -1856,7 +1888,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1856 | tdls_auth = test_sta_flag(sta, | 1888 | tdls_auth = test_sta_flag(sta, |
1857 | WLAN_STA_TDLS_PEER_AUTH); | 1889 | WLAN_STA_TDLS_PEER_AUTH); |
1858 | } | 1890 | } |
1859 | rcu_read_unlock(); | ||
1860 | 1891 | ||
1861 | /* | 1892 | /* |
1862 | * If the TDLS link is enabled, send everything | 1893 | * If the TDLS link is enabled, send everything |
@@ -1871,7 +1902,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1871 | if (tdls_direct) { | 1902 | if (tdls_direct) { |
1872 | /* link during setup - throw out frames to peer */ | 1903 | /* link during setup - throw out frames to peer */ |
1873 | if (!tdls_auth) | 1904 | if (!tdls_auth) |
1874 | goto fail; | 1905 | goto fail_rcu; |
1875 | 1906 | ||
1876 | /* DA SA BSSID */ | 1907 | /* DA SA BSSID */ |
1877 | memcpy(hdr.addr1, skb->data, ETH_ALEN); | 1908 | memcpy(hdr.addr1, skb->data, ETH_ALEN); |
@@ -1896,6 +1927,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1896 | memcpy(hdr.addr3, skb->data, ETH_ALEN); | 1927 | memcpy(hdr.addr3, skb->data, ETH_ALEN); |
1897 | hdrlen = 24; | 1928 | hdrlen = 24; |
1898 | } | 1929 | } |
1930 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
1931 | if (!chanctx_conf) | ||
1932 | goto fail_rcu; | ||
1933 | band = chanctx_conf->def.chan->band; | ||
1899 | break; | 1934 | break; |
1900 | case NL80211_IFTYPE_ADHOC: | 1935 | case NL80211_IFTYPE_ADHOC: |
1901 | /* DA SA BSSID */ | 1936 | /* DA SA BSSID */ |
@@ -1903,9 +1938,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1903 | memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); | 1938 | memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); |
1904 | memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); | 1939 | memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); |
1905 | hdrlen = 24; | 1940 | hdrlen = 24; |
1941 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
1942 | if (!chanctx_conf) | ||
1943 | goto fail_rcu; | ||
1944 | band = chanctx_conf->def.chan->band; | ||
1906 | break; | 1945 | break; |
1907 | default: | 1946 | default: |
1908 | goto fail; | 1947 | goto fail_rcu; |
1909 | } | 1948 | } |
1910 | 1949 | ||
1911 | /* | 1950 | /* |
@@ -1915,13 +1954,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1915 | */ | 1954 | */ |
1916 | multicast = is_multicast_ether_addr(hdr.addr1); | 1955 | multicast = is_multicast_ether_addr(hdr.addr1); |
1917 | if (!multicast) { | 1956 | if (!multicast) { |
1918 | rcu_read_lock(); | ||
1919 | sta = sta_info_get(sdata, hdr.addr1); | 1957 | sta = sta_info_get(sdata, hdr.addr1); |
1920 | if (sta) { | 1958 | if (sta) { |
1921 | authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); | 1959 | authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); |
1922 | wme_sta = test_sta_flag(sta, WLAN_STA_WME); | 1960 | wme_sta = test_sta_flag(sta, WLAN_STA_WME); |
1923 | } | 1961 | } |
1924 | rcu_read_unlock(); | ||
1925 | } | 1962 | } |
1926 | 1963 | ||
1927 | /* For mesh, the use of the QoS header is mandatory */ | 1964 | /* For mesh, the use of the QoS header is mandatory */ |
@@ -1949,7 +1986,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1949 | 1986 | ||
1950 | I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); | 1987 | I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); |
1951 | 1988 | ||
1952 | goto fail; | 1989 | goto fail_rcu; |
1953 | } | 1990 | } |
1954 | 1991 | ||
1955 | if (unlikely(!multicast && skb->sk && | 1992 | if (unlikely(!multicast && skb->sk && |
@@ -2004,7 +2041,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
2004 | kfree_skb(tmp_skb); | 2041 | kfree_skb(tmp_skb); |
2005 | 2042 | ||
2006 | if (!skb) | 2043 | if (!skb) |
2007 | goto fail; | 2044 | goto fail_rcu; |
2008 | } | 2045 | } |
2009 | 2046 | ||
2010 | hdr.frame_control = fc; | 2047 | hdr.frame_control = fc; |
@@ -2052,7 +2089,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
2052 | head_need = max_t(int, 0, head_need); | 2089 | head_need = max_t(int, 0, head_need); |
2053 | if (ieee80211_skb_resize(sdata, skb, head_need, true)) { | 2090 | if (ieee80211_skb_resize(sdata, skb, head_need, true)) { |
2054 | ieee80211_free_txskb(&local->hw, skb); | 2091 | ieee80211_free_txskb(&local->hw, skb); |
2055 | return NETDEV_TX_OK; | 2092 | skb = NULL; |
2093 | goto fail_rcu; | ||
2056 | } | 2094 | } |
2057 | } | 2095 | } |
2058 | 2096 | ||
@@ -2104,10 +2142,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
2104 | info->flags = info_flags; | 2142 | info->flags = info_flags; |
2105 | info->ack_frame_id = info_id; | 2143 | info->ack_frame_id = info_id; |
2106 | 2144 | ||
2107 | ieee80211_xmit(sdata, skb); | 2145 | ieee80211_xmit(sdata, skb, band); |
2146 | rcu_read_unlock(); | ||
2108 | 2147 | ||
2109 | return NETDEV_TX_OK; | 2148 | return NETDEV_TX_OK; |
2110 | 2149 | ||
2150 | fail_rcu: | ||
2151 | rcu_read_unlock(); | ||
2111 | fail: | 2152 | fail: |
2112 | dev_kfree_skb(skb); | 2153 | dev_kfree_skb(skb); |
2113 | return NETDEV_TX_OK; | 2154 | return NETDEV_TX_OK; |
@@ -2142,11 +2183,18 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, | |||
2142 | struct sta_info *sta; | 2183 | struct sta_info *sta; |
2143 | struct ieee80211_hdr *hdr; | 2184 | struct ieee80211_hdr *hdr; |
2144 | bool result; | 2185 | bool result; |
2186 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
2145 | 2187 | ||
2146 | sdata = vif_to_sdata(info->control.vif); | 2188 | sdata = vif_to_sdata(info->control.vif); |
2147 | 2189 | ||
2148 | if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { | 2190 | if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { |
2149 | result = ieee80211_tx(sdata, skb, true); | 2191 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); |
2192 | if (unlikely(!chanctx_conf)) { | ||
2193 | dev_kfree_skb(skb); | ||
2194 | return true; | ||
2195 | } | ||
2196 | result = ieee80211_tx(sdata, skb, true, | ||
2197 | chanctx_conf->def.chan->band); | ||
2150 | } else { | 2198 | } else { |
2151 | struct sk_buff_head skbs; | 2199 | struct sk_buff_head skbs; |
2152 | 2200 | ||
@@ -2214,9 +2262,8 @@ void ieee80211_tx_pending(unsigned long data) | |||
2214 | /* functions for drivers to get certain frames */ | 2262 | /* functions for drivers to get certain frames */ |
2215 | 2263 | ||
2216 | static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, | 2264 | static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, |
2217 | struct ieee80211_if_ap *bss, | 2265 | struct ps_data *ps, |
2218 | struct sk_buff *skb, | 2266 | struct sk_buff *skb) |
2219 | struct beacon_data *beacon) | ||
2220 | { | 2267 | { |
2221 | u8 *pos, *tim; | 2268 | u8 *pos, *tim; |
2222 | int aid0 = 0; | 2269 | int aid0 = 0; |
@@ -2224,27 +2271,27 @@ static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, | |||
2224 | 2271 | ||
2225 | /* Generate bitmap for TIM only if there are any STAs in power save | 2272 | /* Generate bitmap for TIM only if there are any STAs in power save |
2226 | * mode. */ | 2273 | * mode. */ |
2227 | if (atomic_read(&bss->num_sta_ps) > 0) | 2274 | if (atomic_read(&ps->num_sta_ps) > 0) |
2228 | /* in the hope that this is faster than | 2275 | /* in the hope that this is faster than |
2229 | * checking byte-for-byte */ | 2276 | * checking byte-for-byte */ |
2230 | have_bits = !bitmap_empty((unsigned long*)bss->tim, | 2277 | have_bits = !bitmap_empty((unsigned long*)ps->tim, |
2231 | IEEE80211_MAX_AID+1); | 2278 | IEEE80211_MAX_AID+1); |
2232 | 2279 | ||
2233 | if (bss->dtim_count == 0) | 2280 | if (ps->dtim_count == 0) |
2234 | bss->dtim_count = sdata->vif.bss_conf.dtim_period - 1; | 2281 | ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1; |
2235 | else | 2282 | else |
2236 | bss->dtim_count--; | 2283 | ps->dtim_count--; |
2237 | 2284 | ||
2238 | tim = pos = (u8 *) skb_put(skb, 6); | 2285 | tim = pos = (u8 *) skb_put(skb, 6); |
2239 | *pos++ = WLAN_EID_TIM; | 2286 | *pos++ = WLAN_EID_TIM; |
2240 | *pos++ = 4; | 2287 | *pos++ = 4; |
2241 | *pos++ = bss->dtim_count; | 2288 | *pos++ = ps->dtim_count; |
2242 | *pos++ = sdata->vif.bss_conf.dtim_period; | 2289 | *pos++ = sdata->vif.bss_conf.dtim_period; |
2243 | 2290 | ||
2244 | if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) | 2291 | if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf)) |
2245 | aid0 = 1; | 2292 | aid0 = 1; |
2246 | 2293 | ||
2247 | bss->dtim_bc_mc = aid0 == 1; | 2294 | ps->dtim_bc_mc = aid0 == 1; |
2248 | 2295 | ||
2249 | if (have_bits) { | 2296 | if (have_bits) { |
2250 | /* Find largest even number N1 so that bits numbered 1 through | 2297 | /* Find largest even number N1 so that bits numbered 1 through |
@@ -2252,14 +2299,14 @@ static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, | |||
2252 | * (N2 + 1) x 8 through 2007 are 0. */ | 2299 | * (N2 + 1) x 8 through 2007 are 0. */ |
2253 | n1 = 0; | 2300 | n1 = 0; |
2254 | for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { | 2301 | for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { |
2255 | if (bss->tim[i]) { | 2302 | if (ps->tim[i]) { |
2256 | n1 = i & 0xfe; | 2303 | n1 = i & 0xfe; |
2257 | break; | 2304 | break; |
2258 | } | 2305 | } |
2259 | } | 2306 | } |
2260 | n2 = n1; | 2307 | n2 = n1; |
2261 | for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { | 2308 | for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { |
2262 | if (bss->tim[i]) { | 2309 | if (ps->tim[i]) { |
2263 | n2 = i; | 2310 | n2 = i; |
2264 | break; | 2311 | break; |
2265 | } | 2312 | } |
@@ -2269,7 +2316,7 @@ static void ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, | |||
2269 | *pos++ = n1 | aid0; | 2316 | *pos++ = n1 | aid0; |
2270 | /* Part Virt Bitmap */ | 2317 | /* Part Virt Bitmap */ |
2271 | skb_put(skb, n2 - n1); | 2318 | skb_put(skb, n2 - n1); |
2272 | memcpy(pos, bss->tim + n1, n2 - n1 + 1); | 2319 | memcpy(pos, ps->tim + n1, n2 - n1 + 1); |
2273 | 2320 | ||
2274 | tim[1] = n2 - n1 + 4; | 2321 | tim[1] = n2 - n1 + 4; |
2275 | } else { | 2322 | } else { |
@@ -2286,16 +2333,16 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, | |||
2286 | struct sk_buff *skb = NULL; | 2333 | struct sk_buff *skb = NULL; |
2287 | struct ieee80211_tx_info *info; | 2334 | struct ieee80211_tx_info *info; |
2288 | struct ieee80211_sub_if_data *sdata = NULL; | 2335 | struct ieee80211_sub_if_data *sdata = NULL; |
2289 | struct ieee80211_if_ap *ap = NULL; | 2336 | enum ieee80211_band band; |
2290 | struct beacon_data *beacon; | ||
2291 | enum ieee80211_band band = local->oper_channel->band; | ||
2292 | struct ieee80211_tx_rate_control txrc; | 2337 | struct ieee80211_tx_rate_control txrc; |
2338 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
2293 | 2339 | ||
2294 | rcu_read_lock(); | 2340 | rcu_read_lock(); |
2295 | 2341 | ||
2296 | sdata = vif_to_sdata(vif); | 2342 | sdata = vif_to_sdata(vif); |
2343 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
2297 | 2344 | ||
2298 | if (!ieee80211_sdata_running(sdata)) | 2345 | if (!ieee80211_sdata_running(sdata) || !chanctx_conf) |
2299 | goto out; | 2346 | goto out; |
2300 | 2347 | ||
2301 | if (tim_offset) | 2348 | if (tim_offset) |
@@ -2304,8 +2351,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, | |||
2304 | *tim_length = 0; | 2351 | *tim_length = 0; |
2305 | 2352 | ||
2306 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | 2353 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
2307 | ap = &sdata->u.ap; | 2354 | struct ieee80211_if_ap *ap = &sdata->u.ap; |
2308 | beacon = rcu_dereference(ap->beacon); | 2355 | struct beacon_data *beacon = rcu_dereference(ap->beacon); |
2356 | |||
2309 | if (beacon) { | 2357 | if (beacon) { |
2310 | /* | 2358 | /* |
2311 | * headroom, head length, | 2359 | * headroom, head length, |
@@ -2329,14 +2377,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, | |||
2329 | * of the tim bitmap in mac80211 and the driver. | 2377 | * of the tim bitmap in mac80211 and the driver. |
2330 | */ | 2378 | */ |
2331 | if (local->tim_in_locked_section) { | 2379 | if (local->tim_in_locked_section) { |
2332 | ieee80211_beacon_add_tim(sdata, ap, skb, | 2380 | ieee80211_beacon_add_tim(sdata, &ap->ps, skb); |
2333 | beacon); | ||
2334 | } else { | 2381 | } else { |
2335 | unsigned long flags; | 2382 | unsigned long flags; |
2336 | 2383 | ||
2337 | spin_lock_irqsave(&local->tim_lock, flags); | 2384 | spin_lock_irqsave(&local->tim_lock, flags); |
2338 | ieee80211_beacon_add_tim(sdata, ap, skb, | 2385 | ieee80211_beacon_add_tim(sdata, &ap->ps, skb); |
2339 | beacon); | ||
2340 | spin_unlock_irqrestore(&local->tim_lock, flags); | 2386 | spin_unlock_irqrestore(&local->tim_lock, flags); |
2341 | } | 2387 | } |
2342 | 2388 | ||
@@ -2412,6 +2458,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, | |||
2412 | *pos++ = WLAN_EID_SSID; | 2458 | *pos++ = WLAN_EID_SSID; |
2413 | *pos++ = 0x0; | 2459 | *pos++ = 0x0; |
2414 | 2460 | ||
2461 | band = chanctx_conf->def.chan->band; | ||
2462 | |||
2415 | if (ieee80211_add_srates_ie(sdata, skb, true, band) || | 2463 | if (ieee80211_add_srates_ie(sdata, skb, true, band) || |
2416 | mesh_add_ds_params_ie(skb, sdata) || | 2464 | mesh_add_ds_params_ie(skb, sdata) || |
2417 | ieee80211_add_ext_srates_ie(sdata, skb, true, band) || | 2465 | ieee80211_add_ext_srates_ie(sdata, skb, true, band) || |
@@ -2429,6 +2477,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, | |||
2429 | goto out; | 2477 | goto out; |
2430 | } | 2478 | } |
2431 | 2479 | ||
2480 | band = chanctx_conf->def.chan->band; | ||
2481 | |||
2432 | info = IEEE80211_SKB_CB(skb); | 2482 | info = IEEE80211_SKB_CB(skb); |
2433 | 2483 | ||
2434 | info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; | 2484 | info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; |
@@ -2656,29 +2706,40 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2656 | struct sk_buff *skb = NULL; | 2706 | struct sk_buff *skb = NULL; |
2657 | struct ieee80211_tx_data tx; | 2707 | struct ieee80211_tx_data tx; |
2658 | struct ieee80211_sub_if_data *sdata; | 2708 | struct ieee80211_sub_if_data *sdata; |
2659 | struct ieee80211_if_ap *bss = NULL; | 2709 | struct ps_data *ps; |
2660 | struct beacon_data *beacon; | ||
2661 | struct ieee80211_tx_info *info; | 2710 | struct ieee80211_tx_info *info; |
2711 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
2662 | 2712 | ||
2663 | sdata = vif_to_sdata(vif); | 2713 | sdata = vif_to_sdata(vif); |
2664 | bss = &sdata->u.ap; | ||
2665 | 2714 | ||
2666 | rcu_read_lock(); | 2715 | rcu_read_lock(); |
2667 | beacon = rcu_dereference(bss->beacon); | 2716 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); |
2668 | 2717 | ||
2669 | if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head) | 2718 | if (!chanctx_conf) |
2670 | goto out; | 2719 | goto out; |
2671 | 2720 | ||
2672 | if (bss->dtim_count != 0 || !bss->dtim_bc_mc) | 2721 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
2722 | struct beacon_data *beacon = | ||
2723 | rcu_dereference(sdata->u.ap.beacon); | ||
2724 | |||
2725 | if (!beacon || !beacon->head) | ||
2726 | goto out; | ||
2727 | |||
2728 | ps = &sdata->u.ap.ps; | ||
2729 | } else { | ||
2730 | goto out; | ||
2731 | } | ||
2732 | |||
2733 | if (ps->dtim_count != 0 || !ps->dtim_bc_mc) | ||
2673 | goto out; /* send buffered bc/mc only after DTIM beacon */ | 2734 | goto out; /* send buffered bc/mc only after DTIM beacon */ |
2674 | 2735 | ||
2675 | while (1) { | 2736 | while (1) { |
2676 | skb = skb_dequeue(&bss->ps_bc_buf); | 2737 | skb = skb_dequeue(&ps->bc_buf); |
2677 | if (!skb) | 2738 | if (!skb) |
2678 | goto out; | 2739 | goto out; |
2679 | local->total_ps_buffered--; | 2740 | local->total_ps_buffered--; |
2680 | 2741 | ||
2681 | if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { | 2742 | if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) { |
2682 | struct ieee80211_hdr *hdr = | 2743 | struct ieee80211_hdr *hdr = |
2683 | (struct ieee80211_hdr *) skb->data; | 2744 | (struct ieee80211_hdr *) skb->data; |
2684 | /* more buffered multicast/broadcast frames ==> set | 2745 | /* more buffered multicast/broadcast frames ==> set |
@@ -2696,7 +2757,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2696 | info = IEEE80211_SKB_CB(skb); | 2757 | info = IEEE80211_SKB_CB(skb); |
2697 | 2758 | ||
2698 | tx.flags |= IEEE80211_TX_PS_BUFFERED; | 2759 | tx.flags |= IEEE80211_TX_PS_BUFFERED; |
2699 | info->band = local->oper_channel->band; | 2760 | info->band = chanctx_conf->def.chan->band; |
2700 | 2761 | ||
2701 | if (invoke_tx_handlers(&tx)) | 2762 | if (invoke_tx_handlers(&tx)) |
2702 | skb = NULL; | 2763 | skb = NULL; |
@@ -2707,8 +2768,9 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
2707 | } | 2768 | } |
2708 | EXPORT_SYMBOL(ieee80211_get_buffered_bc); | 2769 | EXPORT_SYMBOL(ieee80211_get_buffered_bc); |
2709 | 2770 | ||
2710 | void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, | 2771 | void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, |
2711 | struct sk_buff *skb, int tid) | 2772 | struct sk_buff *skb, int tid, |
2773 | enum ieee80211_band band) | ||
2712 | { | 2774 | { |
2713 | int ac = ieee802_1d_to_ac[tid & 7]; | 2775 | int ac = ieee802_1d_to_ac[tid & 7]; |
2714 | 2776 | ||
@@ -2725,6 +2787,6 @@ void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, | |||
2725 | * requirements are that we do not come into tx with bhs on. | 2787 | * requirements are that we do not come into tx with bhs on. |
2726 | */ | 2788 | */ |
2727 | local_bh_disable(); | 2789 | local_bh_disable(); |
2728 | ieee80211_xmit(sdata, skb); | 2790 | ieee80211_xmit(sdata, skb, band); |
2729 | local_bh_enable(); | 2791 | local_bh_enable(); |
2730 | } | 2792 | } |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 0151ae33c4cd..08132ff98155 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -512,7 +512,7 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw) | |||
512 | EXPORT_SYMBOL(ieee80211_wake_queues); | 512 | EXPORT_SYMBOL(ieee80211_wake_queues); |
513 | 513 | ||
514 | void ieee80211_iterate_active_interfaces( | 514 | void ieee80211_iterate_active_interfaces( |
515 | struct ieee80211_hw *hw, | 515 | struct ieee80211_hw *hw, u32 iter_flags, |
516 | void (*iterator)(void *data, u8 *mac, | 516 | void (*iterator)(void *data, u8 *mac, |
517 | struct ieee80211_vif *vif), | 517 | struct ieee80211_vif *vif), |
518 | void *data) | 518 | void *data) |
@@ -530,6 +530,9 @@ void ieee80211_iterate_active_interfaces( | |||
530 | default: | 530 | default: |
531 | break; | 531 | break; |
532 | } | 532 | } |
533 | if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) && | ||
534 | !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) | ||
535 | continue; | ||
533 | if (ieee80211_sdata_running(sdata)) | 536 | if (ieee80211_sdata_running(sdata)) |
534 | iterator(data, sdata->vif.addr, | 537 | iterator(data, sdata->vif.addr, |
535 | &sdata->vif); | 538 | &sdata->vif); |
@@ -537,7 +540,9 @@ void ieee80211_iterate_active_interfaces( | |||
537 | 540 | ||
538 | sdata = rcu_dereference_protected(local->monitor_sdata, | 541 | sdata = rcu_dereference_protected(local->monitor_sdata, |
539 | lockdep_is_held(&local->iflist_mtx)); | 542 | lockdep_is_held(&local->iflist_mtx)); |
540 | if (sdata) | 543 | if (sdata && |
544 | (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || | ||
545 | sdata->flags & IEEE80211_SDATA_IN_DRIVER)) | ||
541 | iterator(data, sdata->vif.addr, &sdata->vif); | 546 | iterator(data, sdata->vif.addr, &sdata->vif); |
542 | 547 | ||
543 | mutex_unlock(&local->iflist_mtx); | 548 | mutex_unlock(&local->iflist_mtx); |
@@ -545,7 +550,7 @@ void ieee80211_iterate_active_interfaces( | |||
545 | EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces); | 550 | EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces); |
546 | 551 | ||
547 | void ieee80211_iterate_active_interfaces_atomic( | 552 | void ieee80211_iterate_active_interfaces_atomic( |
548 | struct ieee80211_hw *hw, | 553 | struct ieee80211_hw *hw, u32 iter_flags, |
549 | void (*iterator)(void *data, u8 *mac, | 554 | void (*iterator)(void *data, u8 *mac, |
550 | struct ieee80211_vif *vif), | 555 | struct ieee80211_vif *vif), |
551 | void *data) | 556 | void *data) |
@@ -563,13 +568,18 @@ void ieee80211_iterate_active_interfaces_atomic( | |||
563 | default: | 568 | default: |
564 | break; | 569 | break; |
565 | } | 570 | } |
571 | if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) && | ||
572 | !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) | ||
573 | continue; | ||
566 | if (ieee80211_sdata_running(sdata)) | 574 | if (ieee80211_sdata_running(sdata)) |
567 | iterator(data, sdata->vif.addr, | 575 | iterator(data, sdata->vif.addr, |
568 | &sdata->vif); | 576 | &sdata->vif); |
569 | } | 577 | } |
570 | 578 | ||
571 | sdata = rcu_dereference(local->monitor_sdata); | 579 | sdata = rcu_dereference(local->monitor_sdata); |
572 | if (sdata) | 580 | if (sdata && |
581 | (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || | ||
582 | sdata->flags & IEEE80211_SDATA_IN_DRIVER)) | ||
573 | iterator(data, sdata->vif.addr, &sdata->vif); | 583 | iterator(data, sdata->vif.addr, &sdata->vif); |
574 | 584 | ||
575 | rcu_read_unlock(); | 585 | rcu_read_unlock(); |
@@ -769,6 +779,18 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, | |||
769 | else | 779 | else |
770 | elem_parse_failed = true; | 780 | elem_parse_failed = true; |
771 | break; | 781 | break; |
782 | case WLAN_EID_VHT_CAPABILITY: | ||
783 | if (elen >= sizeof(struct ieee80211_vht_cap)) | ||
784 | elems->vht_cap_elem = (void *)pos; | ||
785 | else | ||
786 | elem_parse_failed = true; | ||
787 | break; | ||
788 | case WLAN_EID_VHT_OPERATION: | ||
789 | if (elen >= sizeof(struct ieee80211_vht_operation)) | ||
790 | elems->vht_operation = (void *)pos; | ||
791 | else | ||
792 | elem_parse_failed = true; | ||
793 | break; | ||
772 | case WLAN_EID_MESH_ID: | 794 | case WLAN_EID_MESH_ID: |
773 | elems->mesh_id = pos; | 795 | elems->mesh_id = pos; |
774 | elems->mesh_id_len = elen; | 796 | elems->mesh_id_len = elen; |
@@ -837,7 +859,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, | |||
837 | if (elem_parse_failed) | 859 | if (elem_parse_failed) |
838 | elems->parse_error = true; | 860 | elems->parse_error = true; |
839 | else | 861 | else |
840 | set_bit(id, seen_elems); | 862 | __set_bit(id, seen_elems); |
841 | 863 | ||
842 | left -= elen; | 864 | left -= elen; |
843 | pos += elen; | 865 | pos += elen; |
@@ -860,6 +882,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, | |||
860 | { | 882 | { |
861 | struct ieee80211_local *local = sdata->local; | 883 | struct ieee80211_local *local = sdata->local; |
862 | struct ieee80211_tx_queue_params qparam; | 884 | struct ieee80211_tx_queue_params qparam; |
885 | struct ieee80211_chanctx_conf *chanctx_conf; | ||
863 | int ac; | 886 | int ac; |
864 | bool use_11b, enable_qos; | 887 | bool use_11b, enable_qos; |
865 | int aCWmin, aCWmax; | 888 | int aCWmin, aCWmax; |
@@ -872,8 +895,12 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, | |||
872 | 895 | ||
873 | memset(&qparam, 0, sizeof(qparam)); | 896 | memset(&qparam, 0, sizeof(qparam)); |
874 | 897 | ||
875 | use_11b = (local->oper_channel->band == IEEE80211_BAND_2GHZ) && | 898 | rcu_read_lock(); |
899 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
900 | use_11b = (chanctx_conf && | ||
901 | chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ) && | ||
876 | !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); | 902 | !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); |
903 | rcu_read_unlock(); | ||
877 | 904 | ||
878 | /* | 905 | /* |
879 | * By default disable QoS in STA mode for old access points, which do | 906 | * By default disable QoS in STA mode for old access points, which do |
@@ -952,7 +979,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
952 | const size_t supp_rates_len, | 979 | const size_t supp_rates_len, |
953 | const u8 *supp_rates) | 980 | const u8 *supp_rates) |
954 | { | 981 | { |
955 | struct ieee80211_local *local = sdata->local; | 982 | struct ieee80211_chanctx_conf *chanctx_conf; |
956 | int i, have_higher_than_11mbit = 0; | 983 | int i, have_higher_than_11mbit = 0; |
957 | 984 | ||
958 | /* cf. IEEE 802.11 9.2.12 */ | 985 | /* cf. IEEE 802.11 9.2.12 */ |
@@ -960,11 +987,16 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, | |||
960 | if ((supp_rates[i] & 0x7f) * 5 > 110) | 987 | if ((supp_rates[i] & 0x7f) * 5 > 110) |
961 | have_higher_than_11mbit = 1; | 988 | have_higher_than_11mbit = 1; |
962 | 989 | ||
963 | if (local->oper_channel->band == IEEE80211_BAND_2GHZ && | 990 | rcu_read_lock(); |
991 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | ||
992 | |||
993 | if (chanctx_conf && | ||
994 | chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ && | ||
964 | have_higher_than_11mbit) | 995 | have_higher_than_11mbit) |
965 | sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; | 996 | sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; |
966 | else | 997 | else |
967 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; | 998 | sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; |
999 | rcu_read_unlock(); | ||
968 | 1000 | ||
969 | ieee80211_set_wmm_default(sdata, true); | 1001 | ieee80211_set_wmm_default(sdata, true); |
970 | } | 1002 | } |
@@ -996,7 +1028,7 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local, | |||
996 | } | 1028 | } |
997 | 1029 | ||
998 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | 1030 | void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, |
999 | u16 transaction, u16 auth_alg, | 1031 | u16 transaction, u16 auth_alg, u16 status, |
1000 | u8 *extra, size_t extra_len, const u8 *da, | 1032 | u8 *extra, size_t extra_len, const u8 *da, |
1001 | const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx) | 1033 | const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx) |
1002 | { | 1034 | { |
@@ -1021,7 +1053,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | |||
1021 | memcpy(mgmt->bssid, bssid, ETH_ALEN); | 1053 | memcpy(mgmt->bssid, bssid, ETH_ALEN); |
1022 | mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); | 1054 | mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); |
1023 | mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); | 1055 | mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); |
1024 | mgmt->u.auth.status_code = cpu_to_le16(0); | 1056 | mgmt->u.auth.status_code = cpu_to_le16(status); |
1025 | if (extra) | 1057 | if (extra) |
1026 | memcpy(skb_put(skb, extra_len), extra, extra_len); | 1058 | memcpy(skb_put(skb, extra_len), extra, extra_len); |
1027 | 1059 | ||
@@ -1234,7 +1266,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, | |||
1234 | const u8 *ssid, size_t ssid_len, | 1266 | const u8 *ssid, size_t ssid_len, |
1235 | const u8 *ie, size_t ie_len, | 1267 | const u8 *ie, size_t ie_len, |
1236 | u32 ratemask, bool directed, bool no_cck, | 1268 | u32 ratemask, bool directed, bool no_cck, |
1237 | struct ieee80211_channel *channel) | 1269 | struct ieee80211_channel *channel, bool scan) |
1238 | { | 1270 | { |
1239 | struct sk_buff *skb; | 1271 | struct sk_buff *skb; |
1240 | 1272 | ||
@@ -1245,7 +1277,10 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, | |||
1245 | if (no_cck) | 1277 | if (no_cck) |
1246 | IEEE80211_SKB_CB(skb)->flags |= | 1278 | IEEE80211_SKB_CB(skb)->flags |= |
1247 | IEEE80211_TX_CTL_NO_CCK_RATE; | 1279 | IEEE80211_TX_CTL_NO_CCK_RATE; |
1248 | ieee80211_tx_skb(sdata, skb); | 1280 | if (scan) |
1281 | ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); | ||
1282 | else | ||
1283 | ieee80211_tx_skb(sdata, skb); | ||
1249 | } | 1284 | } |
1250 | } | 1285 | } |
1251 | 1286 | ||
@@ -1308,6 +1343,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1308 | { | 1343 | { |
1309 | struct ieee80211_hw *hw = &local->hw; | 1344 | struct ieee80211_hw *hw = &local->hw; |
1310 | struct ieee80211_sub_if_data *sdata; | 1345 | struct ieee80211_sub_if_data *sdata; |
1346 | struct ieee80211_chanctx *ctx; | ||
1311 | struct sta_info *sta; | 1347 | struct sta_info *sta; |
1312 | int res, i; | 1348 | int res, i; |
1313 | 1349 | ||
@@ -1380,6 +1416,46 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1380 | res = drv_add_interface(local, sdata); | 1416 | res = drv_add_interface(local, sdata); |
1381 | } | 1417 | } |
1382 | 1418 | ||
1419 | /* add channel contexts */ | ||
1420 | if (local->use_chanctx) { | ||
1421 | mutex_lock(&local->chanctx_mtx); | ||
1422 | list_for_each_entry(ctx, &local->chanctx_list, list) | ||
1423 | WARN_ON(drv_add_chanctx(local, ctx)); | ||
1424 | mutex_unlock(&local->chanctx_mtx); | ||
1425 | } | ||
1426 | |||
1427 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
1428 | struct ieee80211_chanctx_conf *ctx_conf; | ||
1429 | |||
1430 | if (!ieee80211_sdata_running(sdata)) | ||
1431 | continue; | ||
1432 | |||
1433 | mutex_lock(&local->chanctx_mtx); | ||
1434 | ctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf, | ||
1435 | lockdep_is_held(&local->chanctx_mtx)); | ||
1436 | if (ctx_conf) { | ||
1437 | ctx = container_of(ctx_conf, struct ieee80211_chanctx, | ||
1438 | conf); | ||
1439 | drv_assign_vif_chanctx(local, sdata, ctx); | ||
1440 | } | ||
1441 | mutex_unlock(&local->chanctx_mtx); | ||
1442 | } | ||
1443 | |||
1444 | sdata = rtnl_dereference(local->monitor_sdata); | ||
1445 | if (sdata && local->use_chanctx && ieee80211_sdata_running(sdata)) { | ||
1446 | struct ieee80211_chanctx_conf *ctx_conf; | ||
1447 | |||
1448 | mutex_lock(&local->chanctx_mtx); | ||
1449 | ctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf, | ||
1450 | lockdep_is_held(&local->chanctx_mtx)); | ||
1451 | if (ctx_conf) { | ||
1452 | ctx = container_of(ctx_conf, struct ieee80211_chanctx, | ||
1453 | conf); | ||
1454 | drv_assign_vif_chanctx(local, sdata, ctx); | ||
1455 | } | ||
1456 | mutex_unlock(&local->chanctx_mtx); | ||
1457 | } | ||
1458 | |||
1383 | /* add STAs back */ | 1459 | /* add STAs back */ |
1384 | mutex_lock(&local->sta_mtx); | 1460 | mutex_lock(&local->sta_mtx); |
1385 | list_for_each_entry(sta, &local->sta_list, list) { | 1461 | list_for_each_entry(sta, &local->sta_list, list) { |
@@ -1435,7 +1511,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1435 | BSS_CHANGED_BSSID | | 1511 | BSS_CHANGED_BSSID | |
1436 | BSS_CHANGED_CQM | | 1512 | BSS_CHANGED_CQM | |
1437 | BSS_CHANGED_QOS | | 1513 | BSS_CHANGED_QOS | |
1438 | BSS_CHANGED_IDLE; | 1514 | BSS_CHANGED_IDLE | |
1515 | BSS_CHANGED_TXPOWER; | ||
1439 | 1516 | ||
1440 | switch (sdata->vif.type) { | 1517 | switch (sdata->vif.type) { |
1441 | case NL80211_IFTYPE_STATION: | 1518 | case NL80211_IFTYPE_STATION: |
@@ -1452,9 +1529,13 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1452 | case NL80211_IFTYPE_AP: | 1529 | case NL80211_IFTYPE_AP: |
1453 | changed |= BSS_CHANGED_SSID; | 1530 | changed |= BSS_CHANGED_SSID; |
1454 | 1531 | ||
1455 | if (sdata->vif.type == NL80211_IFTYPE_AP) | 1532 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
1456 | changed |= BSS_CHANGED_AP_PROBE_RESP; | 1533 | changed |= BSS_CHANGED_AP_PROBE_RESP; |
1457 | 1534 | ||
1535 | if (rcu_access_pointer(sdata->u.ap.beacon)) | ||
1536 | drv_start_ap(local, sdata); | ||
1537 | } | ||
1538 | |||
1458 | /* fall through */ | 1539 | /* fall through */ |
1459 | case NL80211_IFTYPE_MESH_POINT: | 1540 | case NL80211_IFTYPE_MESH_POINT: |
1460 | changed |= BSS_CHANGED_BEACON | | 1541 | changed |= BSS_CHANGED_BEACON | |
@@ -1553,8 +1634,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1553 | * If this is for hw restart things are still running. | 1634 | * If this is for hw restart things are still running. |
1554 | * We may want to change that later, however. | 1635 | * We may want to change that later, however. |
1555 | */ | 1636 | */ |
1556 | if (!local->suspended) | 1637 | if (!local->suspended) { |
1638 | drv_restart_complete(local); | ||
1557 | return 0; | 1639 | return 0; |
1640 | } | ||
1558 | 1641 | ||
1559 | #ifdef CONFIG_PM | 1642 | #ifdef CONFIG_PM |
1560 | /* first set suspended false, then resuming */ | 1643 | /* first set suspended false, then resuming */ |
@@ -1617,68 +1700,24 @@ void ieee80211_resume_disconnect(struct ieee80211_vif *vif) | |||
1617 | } | 1700 | } |
1618 | EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect); | 1701 | EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect); |
1619 | 1702 | ||
1620 | static int check_mgd_smps(struct ieee80211_if_managed *ifmgd, | 1703 | void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata) |
1621 | enum ieee80211_smps_mode *smps_mode) | ||
1622 | { | ||
1623 | if (ifmgd->associated) { | ||
1624 | *smps_mode = ifmgd->ap_smps; | ||
1625 | |||
1626 | if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) { | ||
1627 | if (ifmgd->powersave) | ||
1628 | *smps_mode = IEEE80211_SMPS_DYNAMIC; | ||
1629 | else | ||
1630 | *smps_mode = IEEE80211_SMPS_OFF; | ||
1631 | } | ||
1632 | |||
1633 | return 1; | ||
1634 | } | ||
1635 | |||
1636 | return 0; | ||
1637 | } | ||
1638 | |||
1639 | void ieee80211_recalc_smps(struct ieee80211_local *local) | ||
1640 | { | 1704 | { |
1641 | struct ieee80211_sub_if_data *sdata; | 1705 | struct ieee80211_local *local = sdata->local; |
1642 | enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF; | 1706 | struct ieee80211_chanctx_conf *chanctx_conf; |
1643 | int count = 0; | 1707 | struct ieee80211_chanctx *chanctx; |
1644 | |||
1645 | mutex_lock(&local->iflist_mtx); | ||
1646 | |||
1647 | /* | ||
1648 | * This function could be improved to handle multiple | ||
1649 | * interfaces better, but right now it makes any | ||
1650 | * non-station interfaces force SM PS to be turned | ||
1651 | * off. If there are multiple station interfaces it | ||
1652 | * could also use the best possible mode, e.g. if | ||
1653 | * one is in static and the other in dynamic then | ||
1654 | * dynamic is ok. | ||
1655 | */ | ||
1656 | |||
1657 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
1658 | if (!ieee80211_sdata_running(sdata)) | ||
1659 | continue; | ||
1660 | if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) | ||
1661 | continue; | ||
1662 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
1663 | goto set; | ||
1664 | 1708 | ||
1665 | count += check_mgd_smps(&sdata->u.mgd, &smps_mode); | 1709 | mutex_lock(&local->chanctx_mtx); |
1666 | 1710 | ||
1667 | if (count > 1) { | 1711 | chanctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf, |
1668 | smps_mode = IEEE80211_SMPS_OFF; | 1712 | lockdep_is_held(&local->chanctx_mtx)); |
1669 | break; | ||
1670 | } | ||
1671 | } | ||
1672 | 1713 | ||
1673 | if (smps_mode == local->smps_mode) | 1714 | if (WARN_ON_ONCE(!chanctx_conf)) |
1674 | goto unlock; | 1715 | goto unlock; |
1675 | 1716 | ||
1676 | set: | 1717 | chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); |
1677 | local->smps_mode = smps_mode; | 1718 | ieee80211_recalc_smps_chanctx(local, chanctx); |
1678 | /* changed flag is auto-detected for this */ | ||
1679 | ieee80211_hw_config(local, 0); | ||
1680 | unlock: | 1719 | unlock: |
1681 | mutex_unlock(&local->iflist_mtx); | 1720 | mutex_unlock(&local->chanctx_mtx); |
1682 | } | 1721 | } |
1683 | 1722 | ||
1684 | static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id) | 1723 | static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id) |
@@ -1818,8 +1857,8 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, | |||
1818 | __le32 tmp; | 1857 | __le32 tmp; |
1819 | 1858 | ||
1820 | *pos++ = WLAN_EID_VHT_CAPABILITY; | 1859 | *pos++ = WLAN_EID_VHT_CAPABILITY; |
1821 | *pos++ = sizeof(struct ieee80211_vht_capabilities); | 1860 | *pos++ = sizeof(struct ieee80211_vht_cap); |
1822 | memset(pos, 0, sizeof(struct ieee80211_vht_capabilities)); | 1861 | memset(pos, 0, sizeof(struct ieee80211_vht_cap)); |
1823 | 1862 | ||
1824 | /* capability flags */ | 1863 | /* capability flags */ |
1825 | tmp = cpu_to_le32(cap); | 1864 | tmp = cpu_to_le32(cap); |
@@ -1834,8 +1873,7 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, | |||
1834 | } | 1873 | } |
1835 | 1874 | ||
1836 | u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, | 1875 | u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, |
1837 | struct ieee80211_channel *channel, | 1876 | const struct cfg80211_chan_def *chandef, |
1838 | enum nl80211_channel_type channel_type, | ||
1839 | u16 prot_mode) | 1877 | u16 prot_mode) |
1840 | { | 1878 | { |
1841 | struct ieee80211_ht_operation *ht_oper; | 1879 | struct ieee80211_ht_operation *ht_oper; |
@@ -1843,23 +1881,25 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, | |||
1843 | *pos++ = WLAN_EID_HT_OPERATION; | 1881 | *pos++ = WLAN_EID_HT_OPERATION; |
1844 | *pos++ = sizeof(struct ieee80211_ht_operation); | 1882 | *pos++ = sizeof(struct ieee80211_ht_operation); |
1845 | ht_oper = (struct ieee80211_ht_operation *)pos; | 1883 | ht_oper = (struct ieee80211_ht_operation *)pos; |
1846 | ht_oper->primary_chan = | 1884 | ht_oper->primary_chan = ieee80211_frequency_to_channel( |
1847 | ieee80211_frequency_to_channel(channel->center_freq); | 1885 | chandef->chan->center_freq); |
1848 | switch (channel_type) { | 1886 | switch (chandef->width) { |
1849 | case NL80211_CHAN_HT40MINUS: | 1887 | case NL80211_CHAN_WIDTH_160: |
1850 | ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; | 1888 | case NL80211_CHAN_WIDTH_80P80: |
1851 | break; | 1889 | case NL80211_CHAN_WIDTH_80: |
1852 | case NL80211_CHAN_HT40PLUS: | 1890 | case NL80211_CHAN_WIDTH_40: |
1853 | ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; | 1891 | if (chandef->center_freq1 > chandef->chan->center_freq) |
1892 | ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; | ||
1893 | else | ||
1894 | ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; | ||
1854 | break; | 1895 | break; |
1855 | case NL80211_CHAN_HT20: | ||
1856 | default: | 1896 | default: |
1857 | ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; | 1897 | ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; |
1858 | break; | 1898 | break; |
1859 | } | 1899 | } |
1860 | if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && | 1900 | if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && |
1861 | channel_type != NL80211_CHAN_NO_HT && | 1901 | chandef->width != NL80211_CHAN_WIDTH_20_NOHT && |
1862 | channel_type != NL80211_CHAN_HT20) | 1902 | chandef->width != NL80211_CHAN_WIDTH_20) |
1863 | ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; | 1903 | ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; |
1864 | 1904 | ||
1865 | ht_oper->operation_mode = cpu_to_le16(prot_mode); | 1905 | ht_oper->operation_mode = cpu_to_le16(prot_mode); |
@@ -1873,13 +1913,17 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, | |||
1873 | return pos + sizeof(struct ieee80211_ht_operation); | 1913 | return pos + sizeof(struct ieee80211_ht_operation); |
1874 | } | 1914 | } |
1875 | 1915 | ||
1876 | enum nl80211_channel_type | 1916 | void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan, |
1877 | ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper) | 1917 | struct ieee80211_ht_operation *ht_oper, |
1918 | struct cfg80211_chan_def *chandef) | ||
1878 | { | 1919 | { |
1879 | enum nl80211_channel_type channel_type; | 1920 | enum nl80211_channel_type channel_type; |
1880 | 1921 | ||
1881 | if (!ht_oper) | 1922 | if (!ht_oper) { |
1882 | return NL80211_CHAN_NO_HT; | 1923 | cfg80211_chandef_create(chandef, control_chan, |
1924 | NL80211_CHAN_NO_HT); | ||
1925 | return; | ||
1926 | } | ||
1883 | 1927 | ||
1884 | switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { | 1928 | switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { |
1885 | case IEEE80211_HT_PARAM_CHA_SEC_NONE: | 1929 | case IEEE80211_HT_PARAM_CHA_SEC_NONE: |
@@ -1895,7 +1939,7 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper) | |||
1895 | channel_type = NL80211_CHAN_NO_HT; | 1939 | channel_type = NL80211_CHAN_NO_HT; |
1896 | } | 1940 | } |
1897 | 1941 | ||
1898 | return channel_type; | 1942 | cfg80211_chandef_create(chandef, control_chan, channel_type); |
1899 | } | 1943 | } |
1900 | 1944 | ||
1901 | int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, | 1945 | int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata, |
@@ -1977,3 +2021,84 @@ int ieee80211_ave_rssi(struct ieee80211_vif *vif) | |||
1977 | return ifmgd->ave_beacon_signal; | 2021 | return ifmgd->ave_beacon_signal; |
1978 | } | 2022 | } |
1979 | EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); | 2023 | EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); |
2024 | |||
2025 | u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs) | ||
2026 | { | ||
2027 | if (!mcs) | ||
2028 | return 1; | ||
2029 | |||
2030 | /* TODO: consider rx_highest */ | ||
2031 | |||
2032 | if (mcs->rx_mask[3]) | ||
2033 | return 4; | ||
2034 | if (mcs->rx_mask[2]) | ||
2035 | return 3; | ||
2036 | if (mcs->rx_mask[1]) | ||
2037 | return 2; | ||
2038 | return 1; | ||
2039 | } | ||
2040 | |||
2041 | /** | ||
2042 | * ieee80211_calculate_rx_timestamp - calculate timestamp in frame | ||
2043 | * @local: mac80211 hw info struct | ||
2044 | * @status: RX status | ||
2045 | * @mpdu_len: total MPDU length (including FCS) | ||
2046 | * @mpdu_offset: offset into MPDU to calculate timestamp at | ||
2047 | * | ||
2048 | * This function calculates the RX timestamp at the given MPDU offset, taking | ||
2049 | * into account what the RX timestamp was. An offset of 0 will just normalize | ||
2050 | * the timestamp to TSF at beginning of MPDU reception. | ||
2051 | */ | ||
2052 | u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, | ||
2053 | struct ieee80211_rx_status *status, | ||
2054 | unsigned int mpdu_len, | ||
2055 | unsigned int mpdu_offset) | ||
2056 | { | ||
2057 | u64 ts = status->mactime; | ||
2058 | struct rate_info ri; | ||
2059 | u16 rate; | ||
2060 | |||
2061 | if (WARN_ON(!ieee80211_have_rx_timestamp(status))) | ||
2062 | return 0; | ||
2063 | |||
2064 | memset(&ri, 0, sizeof(ri)); | ||
2065 | |||
2066 | /* Fill cfg80211 rate info */ | ||
2067 | if (status->flag & RX_FLAG_HT) { | ||
2068 | ri.mcs = status->rate_idx; | ||
2069 | ri.flags |= RATE_INFO_FLAGS_MCS; | ||
2070 | if (status->flag & RX_FLAG_40MHZ) | ||
2071 | ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; | ||
2072 | if (status->flag & RX_FLAG_SHORT_GI) | ||
2073 | ri.flags |= RATE_INFO_FLAGS_SHORT_GI; | ||
2074 | } else if (status->flag & RX_FLAG_VHT) { | ||
2075 | ri.flags |= RATE_INFO_FLAGS_VHT_MCS; | ||
2076 | ri.mcs = status->rate_idx; | ||
2077 | ri.nss = status->vht_nss; | ||
2078 | if (status->flag & RX_FLAG_40MHZ) | ||
2079 | ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; | ||
2080 | if (status->flag & RX_FLAG_80MHZ) | ||
2081 | ri.flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH; | ||
2082 | if (status->flag & RX_FLAG_80P80MHZ) | ||
2083 | ri.flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH; | ||
2084 | if (status->flag & RX_FLAG_160MHZ) | ||
2085 | ri.flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH; | ||
2086 | if (status->flag & RX_FLAG_SHORT_GI) | ||
2087 | ri.flags |= RATE_INFO_FLAGS_SHORT_GI; | ||
2088 | } else { | ||
2089 | struct ieee80211_supported_band *sband; | ||
2090 | |||
2091 | sband = local->hw.wiphy->bands[status->band]; | ||
2092 | ri.legacy = sband->bitrates[status->rate_idx].bitrate; | ||
2093 | } | ||
2094 | |||
2095 | rate = cfg80211_calculate_bitrate(&ri); | ||
2096 | |||
2097 | /* rewind from end of MPDU */ | ||
2098 | if (status->flag & RX_FLAG_MACTIME_END) | ||
2099 | ts -= mpdu_len * 8 * 10 / rate; | ||
2100 | |||
2101 | ts += mpdu_offset * 8 * 10 / rate; | ||
2102 | |||
2103 | return ts; | ||
2104 | } | ||
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c new file mode 100644 index 000000000000..f311388aeedf --- /dev/null +++ b/net/mac80211/vht.c | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * VHT handling | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/ieee80211.h> | ||
10 | #include <linux/export.h> | ||
11 | #include <net/mac80211.h> | ||
12 | #include "ieee80211_i.h" | ||
13 | |||
14 | |||
15 | void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, | ||
16 | struct ieee80211_supported_band *sband, | ||
17 | struct ieee80211_vht_cap *vht_cap_ie, | ||
18 | struct ieee80211_sta_vht_cap *vht_cap) | ||
19 | { | ||
20 | if (WARN_ON_ONCE(!vht_cap)) | ||
21 | return; | ||
22 | |||
23 | memset(vht_cap, 0, sizeof(*vht_cap)); | ||
24 | |||
25 | if (!vht_cap_ie || !sband->vht_cap.vht_supported) | ||
26 | return; | ||
27 | |||
28 | vht_cap->vht_supported = true; | ||
29 | |||
30 | vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info); | ||
31 | |||
32 | /* Copy peer MCS info, the driver might need them. */ | ||
33 | memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, | ||
34 | sizeof(struct ieee80211_vht_mcs_info)); | ||
35 | } | ||
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index cea06e9f26f4..906f00cd6d2f 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -160,31 +160,37 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, | |||
160 | return ieee80211_downgrade_queue(sdata, skb); | 160 | return ieee80211_downgrade_queue(sdata, skb); |
161 | } | 161 | } |
162 | 162 | ||
163 | /** | ||
164 | * ieee80211_set_qos_hdr - Fill in the QoS header if there is one. | ||
165 | * | ||
166 | * @sdata: local subif | ||
167 | * @skb: packet to be updated | ||
168 | */ | ||
163 | void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, | 169 | void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, |
164 | struct sk_buff *skb) | 170 | struct sk_buff *skb) |
165 | { | 171 | { |
166 | struct ieee80211_hdr *hdr = (void *)skb->data; | 172 | struct ieee80211_hdr *hdr = (void *)skb->data; |
167 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 173 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
174 | u8 *p; | ||
175 | u8 ack_policy, tid; | ||
168 | 176 | ||
169 | /* Fill in the QoS header if there is one. */ | 177 | if (!ieee80211_is_data_qos(hdr->frame_control)) |
170 | if (ieee80211_is_data_qos(hdr->frame_control)) { | 178 | return; |
171 | u8 *p = ieee80211_get_qos_ctl(hdr); | ||
172 | u8 ack_policy, tid; | ||
173 | |||
174 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; | ||
175 | 179 | ||
176 | /* preserve EOSP bit */ | 180 | p = ieee80211_get_qos_ctl(hdr); |
177 | ack_policy = *p & IEEE80211_QOS_CTL_EOSP; | 181 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
178 | 182 | ||
179 | if (is_multicast_ether_addr(hdr->addr1) || | 183 | /* preserve EOSP bit */ |
180 | sdata->noack_map & BIT(tid)) { | 184 | ack_policy = *p & IEEE80211_QOS_CTL_EOSP; |
181 | ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; | ||
182 | info->flags |= IEEE80211_TX_CTL_NO_ACK; | ||
183 | } | ||
184 | 185 | ||
185 | /* qos header is 2 bytes */ | 186 | if (is_multicast_ether_addr(hdr->addr1) || |
186 | *p++ = ack_policy | tid; | 187 | sdata->noack_map & BIT(tid)) { |
187 | *p = ieee80211_vif_is_mesh(&sdata->vif) ? | 188 | ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; |
188 | (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0; | 189 | info->flags |= IEEE80211_TX_CTL_NO_ACK; |
189 | } | 190 | } |
191 | |||
192 | /* qos header is 2 bytes */ | ||
193 | *p++ = ack_policy | tid; | ||
194 | *p = ieee80211_vif_is_mesh(&sdata->vif) ? | ||
195 | (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0; | ||
190 | } | 196 | } |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 778465f217fa..fed899f600b2 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
@@ -1643,7 +1643,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) | |||
1643 | void *data; | 1643 | void *data; |
1644 | int copylen = *len, ret = 0; | 1644 | int copylen = *len, ret = 0; |
1645 | 1645 | ||
1646 | if (!capable(CAP_NET_ADMIN)) | 1646 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
1647 | return -EPERM; | 1647 | return -EPERM; |
1648 | if (optval != SO_IP_SET) | 1648 | if (optval != SO_IP_SET) |
1649 | return -EBADF; | 1649 | return -EBADF; |
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index 8b2cffdfdd99..0c3b1670b0d1 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig | |||
@@ -28,12 +28,11 @@ if IP_VS | |||
28 | config IP_VS_IPV6 | 28 | config IP_VS_IPV6 |
29 | bool "IPv6 support for IPVS" | 29 | bool "IPv6 support for IPVS" |
30 | depends on IPV6 = y || IP_VS = IPV6 | 30 | depends on IPV6 = y || IP_VS = IPV6 |
31 | select IP6_NF_IPTABLES | ||
31 | ---help--- | 32 | ---help--- |
32 | Add IPv6 support to IPVS. This is incomplete and might be dangerous. | 33 | Add IPv6 support to IPVS. |
33 | 34 | ||
34 | See http://www.mindbasket.com/ipvs for more information. | 35 | Say Y if unsure. |
35 | |||
36 | Say N if unsure. | ||
37 | 36 | ||
38 | config IP_VS_DEBUG | 37 | config IP_VS_DEBUG |
39 | bool "IP virtual server debugging" | 38 | bool "IP virtual server debugging" |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 1548df9a7524..30e764ad021f 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
@@ -308,13 +308,12 @@ struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p) | |||
308 | static int | 308 | static int |
309 | ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb, | 309 | ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb, |
310 | const struct ip_vs_iphdr *iph, | 310 | const struct ip_vs_iphdr *iph, |
311 | unsigned int proto_off, int inverse, | 311 | int inverse, struct ip_vs_conn_param *p) |
312 | struct ip_vs_conn_param *p) | ||
313 | { | 312 | { |
314 | __be16 _ports[2], *pptr; | 313 | __be16 _ports[2], *pptr; |
315 | struct net *net = skb_net(skb); | 314 | struct net *net = skb_net(skb); |
316 | 315 | ||
317 | pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); | 316 | pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph); |
318 | if (pptr == NULL) | 317 | if (pptr == NULL) |
319 | return 1; | 318 | return 1; |
320 | 319 | ||
@@ -329,12 +328,11 @@ ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb, | |||
329 | 328 | ||
330 | struct ip_vs_conn * | 329 | struct ip_vs_conn * |
331 | ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, | 330 | ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, |
332 | const struct ip_vs_iphdr *iph, | 331 | const struct ip_vs_iphdr *iph, int inverse) |
333 | unsigned int proto_off, int inverse) | ||
334 | { | 332 | { |
335 | struct ip_vs_conn_param p; | 333 | struct ip_vs_conn_param p; |
336 | 334 | ||
337 | if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p)) | 335 | if (ip_vs_conn_fill_param_proto(af, skb, iph, inverse, &p)) |
338 | return NULL; | 336 | return NULL; |
339 | 337 | ||
340 | return ip_vs_conn_in_get(&p); | 338 | return ip_vs_conn_in_get(&p); |
@@ -432,12 +430,11 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) | |||
432 | 430 | ||
433 | struct ip_vs_conn * | 431 | struct ip_vs_conn * |
434 | ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, | 432 | ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, |
435 | const struct ip_vs_iphdr *iph, | 433 | const struct ip_vs_iphdr *iph, int inverse) |
436 | unsigned int proto_off, int inverse) | ||
437 | { | 434 | { |
438 | struct ip_vs_conn_param p; | 435 | struct ip_vs_conn_param p; |
439 | 436 | ||
440 | if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p)) | 437 | if (ip_vs_conn_fill_param_proto(af, skb, iph, inverse, &p)) |
441 | return NULL; | 438 | return NULL; |
442 | 439 | ||
443 | return ip_vs_conn_out_get(&p); | 440 | return ip_vs_conn_out_get(&p); |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 58918e20f9d5..fb45640dc1fb 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -222,11 +222,10 @@ ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc, | |||
222 | */ | 222 | */ |
223 | static struct ip_vs_conn * | 223 | static struct ip_vs_conn * |
224 | ip_vs_sched_persist(struct ip_vs_service *svc, | 224 | ip_vs_sched_persist(struct ip_vs_service *svc, |
225 | struct sk_buff *skb, | 225 | struct sk_buff *skb, __be16 src_port, __be16 dst_port, |
226 | __be16 src_port, __be16 dst_port, int *ignored) | 226 | int *ignored, struct ip_vs_iphdr *iph) |
227 | { | 227 | { |
228 | struct ip_vs_conn *cp = NULL; | 228 | struct ip_vs_conn *cp = NULL; |
229 | struct ip_vs_iphdr iph; | ||
230 | struct ip_vs_dest *dest; | 229 | struct ip_vs_dest *dest; |
231 | struct ip_vs_conn *ct; | 230 | struct ip_vs_conn *ct; |
232 | __be16 dport = 0; /* destination port to forward */ | 231 | __be16 dport = 0; /* destination port to forward */ |
@@ -236,20 +235,18 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
236 | union nf_inet_addr snet; /* source network of the client, | 235 | union nf_inet_addr snet; /* source network of the client, |
237 | after masking */ | 236 | after masking */ |
238 | 237 | ||
239 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | ||
240 | |||
241 | /* Mask saddr with the netmask to adjust template granularity */ | 238 | /* Mask saddr with the netmask to adjust template granularity */ |
242 | #ifdef CONFIG_IP_VS_IPV6 | 239 | #ifdef CONFIG_IP_VS_IPV6 |
243 | if (svc->af == AF_INET6) | 240 | if (svc->af == AF_INET6) |
244 | ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask); | 241 | ipv6_addr_prefix(&snet.in6, &iph->saddr.in6, svc->netmask); |
245 | else | 242 | else |
246 | #endif | 243 | #endif |
247 | snet.ip = iph.saddr.ip & svc->netmask; | 244 | snet.ip = iph->saddr.ip & svc->netmask; |
248 | 245 | ||
249 | IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u " | 246 | IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u " |
250 | "mnet %s\n", | 247 | "mnet %s\n", |
251 | IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port), | 248 | IP_VS_DBG_ADDR(svc->af, &iph->saddr), ntohs(src_port), |
252 | IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port), | 249 | IP_VS_DBG_ADDR(svc->af, &iph->daddr), ntohs(dst_port), |
253 | IP_VS_DBG_ADDR(svc->af, &snet)); | 250 | IP_VS_DBG_ADDR(svc->af, &snet)); |
254 | 251 | ||
255 | /* | 252 | /* |
@@ -266,8 +263,8 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
266 | * is created for other persistent services. | 263 | * is created for other persistent services. |
267 | */ | 264 | */ |
268 | { | 265 | { |
269 | int protocol = iph.protocol; | 266 | int protocol = iph->protocol; |
270 | const union nf_inet_addr *vaddr = &iph.daddr; | 267 | const union nf_inet_addr *vaddr = &iph->daddr; |
271 | __be16 vport = 0; | 268 | __be16 vport = 0; |
272 | 269 | ||
273 | if (dst_port == svc->port) { | 270 | if (dst_port == svc->port) { |
@@ -342,14 +339,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
342 | dport = dest->port; | 339 | dport = dest->port; |
343 | 340 | ||
344 | flags = (svc->flags & IP_VS_SVC_F_ONEPACKET | 341 | flags = (svc->flags & IP_VS_SVC_F_ONEPACKET |
345 | && iph.protocol == IPPROTO_UDP)? | 342 | && iph->protocol == IPPROTO_UDP) ? |
346 | IP_VS_CONN_F_ONE_PACKET : 0; | 343 | IP_VS_CONN_F_ONE_PACKET : 0; |
347 | 344 | ||
348 | /* | 345 | /* |
349 | * Create a new connection according to the template | 346 | * Create a new connection according to the template |
350 | */ | 347 | */ |
351 | ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr, | 348 | ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr, |
352 | src_port, &iph.daddr, dst_port, ¶m); | 349 | src_port, &iph->daddr, dst_port, ¶m); |
353 | 350 | ||
354 | cp = ip_vs_conn_new(¶m, &dest->addr, dport, flags, dest, skb->mark); | 351 | cp = ip_vs_conn_new(¶m, &dest->addr, dport, flags, dest, skb->mark); |
355 | if (cp == NULL) { | 352 | if (cp == NULL) { |
@@ -392,18 +389,20 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
392 | */ | 389 | */ |
393 | struct ip_vs_conn * | 390 | struct ip_vs_conn * |
394 | ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, | 391 | ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, |
395 | struct ip_vs_proto_data *pd, int *ignored) | 392 | struct ip_vs_proto_data *pd, int *ignored, |
393 | struct ip_vs_iphdr *iph) | ||
396 | { | 394 | { |
397 | struct ip_vs_protocol *pp = pd->pp; | 395 | struct ip_vs_protocol *pp = pd->pp; |
398 | struct ip_vs_conn *cp = NULL; | 396 | struct ip_vs_conn *cp = NULL; |
399 | struct ip_vs_iphdr iph; | ||
400 | struct ip_vs_dest *dest; | 397 | struct ip_vs_dest *dest; |
401 | __be16 _ports[2], *pptr; | 398 | __be16 _ports[2], *pptr; |
402 | unsigned int flags; | 399 | unsigned int flags; |
403 | 400 | ||
404 | *ignored = 1; | 401 | *ignored = 1; |
405 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | 402 | /* |
406 | pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); | 403 | * IPv6 frags, only the first hit here. |
404 | */ | ||
405 | pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph); | ||
407 | if (pptr == NULL) | 406 | if (pptr == NULL) |
408 | return NULL; | 407 | return NULL; |
409 | 408 | ||
@@ -423,7 +422,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, | |||
423 | * Do not schedule replies from local real server. | 422 | * Do not schedule replies from local real server. |
424 | */ | 423 | */ |
425 | if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) && | 424 | if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) && |
426 | (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) { | 425 | (cp = pp->conn_in_get(svc->af, skb, iph, 1))) { |
427 | IP_VS_DBG_PKT(12, svc->af, pp, skb, 0, | 426 | IP_VS_DBG_PKT(12, svc->af, pp, skb, 0, |
428 | "Not scheduling reply for existing connection"); | 427 | "Not scheduling reply for existing connection"); |
429 | __ip_vs_conn_put(cp); | 428 | __ip_vs_conn_put(cp); |
@@ -434,7 +433,8 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, | |||
434 | * Persistent service | 433 | * Persistent service |
435 | */ | 434 | */ |
436 | if (svc->flags & IP_VS_SVC_F_PERSISTENT) | 435 | if (svc->flags & IP_VS_SVC_F_PERSISTENT) |
437 | return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored); | 436 | return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored, |
437 | iph); | ||
438 | 438 | ||
439 | *ignored = 0; | 439 | *ignored = 0; |
440 | 440 | ||
@@ -456,7 +456,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, | |||
456 | } | 456 | } |
457 | 457 | ||
458 | flags = (svc->flags & IP_VS_SVC_F_ONEPACKET | 458 | flags = (svc->flags & IP_VS_SVC_F_ONEPACKET |
459 | && iph.protocol == IPPROTO_UDP)? | 459 | && iph->protocol == IPPROTO_UDP) ? |
460 | IP_VS_CONN_F_ONE_PACKET : 0; | 460 | IP_VS_CONN_F_ONE_PACKET : 0; |
461 | 461 | ||
462 | /* | 462 | /* |
@@ -465,9 +465,9 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, | |||
465 | { | 465 | { |
466 | struct ip_vs_conn_param p; | 466 | struct ip_vs_conn_param p; |
467 | 467 | ||
468 | ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, | 468 | ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, |
469 | &iph.saddr, pptr[0], &iph.daddr, pptr[1], | 469 | &iph->saddr, pptr[0], &iph->daddr, |
470 | &p); | 470 | pptr[1], &p); |
471 | cp = ip_vs_conn_new(&p, &dest->addr, | 471 | cp = ip_vs_conn_new(&p, &dest->addr, |
472 | dest->port ? dest->port : pptr[1], | 472 | dest->port ? dest->port : pptr[1], |
473 | flags, dest, skb->mark); | 473 | flags, dest, skb->mark); |
@@ -496,19 +496,16 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, | |||
496 | * no destination is available for a new connection. | 496 | * no destination is available for a new connection. |
497 | */ | 497 | */ |
498 | int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | 498 | int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, |
499 | struct ip_vs_proto_data *pd) | 499 | struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph) |
500 | { | 500 | { |
501 | __be16 _ports[2], *pptr; | 501 | __be16 _ports[2], *pptr; |
502 | struct ip_vs_iphdr iph; | ||
503 | #ifdef CONFIG_SYSCTL | 502 | #ifdef CONFIG_SYSCTL |
504 | struct net *net; | 503 | struct net *net; |
505 | struct netns_ipvs *ipvs; | 504 | struct netns_ipvs *ipvs; |
506 | int unicast; | 505 | int unicast; |
507 | #endif | 506 | #endif |
508 | 507 | ||
509 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | 508 | pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph); |
510 | |||
511 | pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); | ||
512 | if (pptr == NULL) { | 509 | if (pptr == NULL) { |
513 | ip_vs_service_put(svc); | 510 | ip_vs_service_put(svc); |
514 | return NF_DROP; | 511 | return NF_DROP; |
@@ -519,10 +516,10 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
519 | 516 | ||
520 | #ifdef CONFIG_IP_VS_IPV6 | 517 | #ifdef CONFIG_IP_VS_IPV6 |
521 | if (svc->af == AF_INET6) | 518 | if (svc->af == AF_INET6) |
522 | unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST; | 519 | unicast = ipv6_addr_type(&iph->daddr.in6) & IPV6_ADDR_UNICAST; |
523 | else | 520 | else |
524 | #endif | 521 | #endif |
525 | unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST); | 522 | unicast = (inet_addr_type(net, iph->daddr.ip) == RTN_UNICAST); |
526 | 523 | ||
527 | /* if it is fwmark-based service, the cache_bypass sysctl is up | 524 | /* if it is fwmark-based service, the cache_bypass sysctl is up |
528 | and the destination is a non-local unicast, then create | 525 | and the destination is a non-local unicast, then create |
@@ -532,7 +529,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
532 | int ret; | 529 | int ret; |
533 | struct ip_vs_conn *cp; | 530 | struct ip_vs_conn *cp; |
534 | unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET && | 531 | unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET && |
535 | iph.protocol == IPPROTO_UDP)? | 532 | iph->protocol == IPPROTO_UDP) ? |
536 | IP_VS_CONN_F_ONE_PACKET : 0; | 533 | IP_VS_CONN_F_ONE_PACKET : 0; |
537 | union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } }; | 534 | union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } }; |
538 | 535 | ||
@@ -542,9 +539,9 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
542 | IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__); | 539 | IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__); |
543 | { | 540 | { |
544 | struct ip_vs_conn_param p; | 541 | struct ip_vs_conn_param p; |
545 | ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, | 542 | ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, |
546 | &iph.saddr, pptr[0], | 543 | &iph->saddr, pptr[0], |
547 | &iph.daddr, pptr[1], &p); | 544 | &iph->daddr, pptr[1], &p); |
548 | cp = ip_vs_conn_new(&p, &daddr, 0, | 545 | cp = ip_vs_conn_new(&p, &daddr, 0, |
549 | IP_VS_CONN_F_BYPASS | flags, | 546 | IP_VS_CONN_F_BYPASS | flags, |
550 | NULL, skb->mark); | 547 | NULL, skb->mark); |
@@ -559,7 +556,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | |||
559 | ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); | 556 | ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); |
560 | 557 | ||
561 | /* transmit the first SYN packet */ | 558 | /* transmit the first SYN packet */ |
562 | ret = cp->packet_xmit(skb, cp, pd->pp); | 559 | ret = cp->packet_xmit(skb, cp, pd->pp, iph); |
563 | /* do not touch skb anymore */ | 560 | /* do not touch skb anymore */ |
564 | 561 | ||
565 | atomic_inc(&cp->in_pkts); | 562 | atomic_inc(&cp->in_pkts); |
@@ -654,14 +651,6 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) | |||
654 | return err; | 651 | return err; |
655 | } | 652 | } |
656 | 653 | ||
657 | #ifdef CONFIG_IP_VS_IPV6 | ||
658 | static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user) | ||
659 | { | ||
660 | /* TODO IPv6: Find out what to do here for IPv6 */ | ||
661 | return 0; | ||
662 | } | ||
663 | #endif | ||
664 | |||
665 | static int ip_vs_route_me_harder(int af, struct sk_buff *skb) | 654 | static int ip_vs_route_me_harder(int af, struct sk_buff *skb) |
666 | { | 655 | { |
667 | #ifdef CONFIG_IP_VS_IPV6 | 656 | #ifdef CONFIG_IP_VS_IPV6 |
@@ -732,10 +721,19 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
732 | struct ip_vs_conn *cp, int inout) | 721 | struct ip_vs_conn *cp, int inout) |
733 | { | 722 | { |
734 | struct ipv6hdr *iph = ipv6_hdr(skb); | 723 | struct ipv6hdr *iph = ipv6_hdr(skb); |
735 | unsigned int icmp_offset = sizeof(struct ipv6hdr); | 724 | unsigned int icmp_offset = 0; |
736 | struct icmp6hdr *icmph = (struct icmp6hdr *)(skb_network_header(skb) + | 725 | unsigned int offs = 0; /* header offset*/ |
737 | icmp_offset); | 726 | int protocol; |
738 | struct ipv6hdr *ciph = (struct ipv6hdr *)(icmph + 1); | 727 | struct icmp6hdr *icmph; |
728 | struct ipv6hdr *ciph; | ||
729 | unsigned short fragoffs; | ||
730 | |||
731 | ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL); | ||
732 | icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset); | ||
733 | offs = icmp_offset + sizeof(struct icmp6hdr); | ||
734 | ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs); | ||
735 | |||
736 | protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL); | ||
739 | 737 | ||
740 | if (inout) { | 738 | if (inout) { |
741 | iph->saddr = cp->vaddr.in6; | 739 | iph->saddr = cp->vaddr.in6; |
@@ -746,10 +744,13 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
746 | } | 744 | } |
747 | 745 | ||
748 | /* the TCP/UDP/SCTP port */ | 746 | /* the TCP/UDP/SCTP port */ |
749 | if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr || | 747 | if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol || |
750 | IPPROTO_SCTP == ciph->nexthdr) { | 748 | IPPROTO_SCTP == protocol)) { |
751 | __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr); | 749 | __be16 *ports = (void *)(skb_network_header(skb) + offs); |
752 | 750 | ||
751 | IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__, | ||
752 | ntohs(inout ? ports[1] : ports[0]), | ||
753 | ntohs(inout ? cp->vport : cp->dport)); | ||
753 | if (inout) | 754 | if (inout) |
754 | ports[1] = cp->vport; | 755 | ports[1] = cp->vport; |
755 | else | 756 | else |
@@ -898,51 +899,35 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related, | |||
898 | IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset, | 899 | IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset, |
899 | "Checking outgoing ICMP for"); | 900 | "Checking outgoing ICMP for"); |
900 | 901 | ||
901 | offset += cih->ihl * 4; | 902 | ip_vs_fill_ip4hdr(cih, &ciph); |
902 | 903 | ciph.len += offset; | |
903 | ip_vs_fill_iphdr(AF_INET, cih, &ciph); | ||
904 | /* The embedded headers contain source and dest in reverse order */ | 904 | /* The embedded headers contain source and dest in reverse order */ |
905 | cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1); | 905 | cp = pp->conn_out_get(AF_INET, skb, &ciph, 1); |
906 | if (!cp) | 906 | if (!cp) |
907 | return NF_ACCEPT; | 907 | return NF_ACCEPT; |
908 | 908 | ||
909 | snet.ip = iph->saddr; | 909 | snet.ip = iph->saddr; |
910 | return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, | 910 | return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp, |
911 | pp, offset, ihl); | 911 | pp, ciph.len, ihl); |
912 | } | 912 | } |
913 | 913 | ||
914 | #ifdef CONFIG_IP_VS_IPV6 | 914 | #ifdef CONFIG_IP_VS_IPV6 |
915 | static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, | 915 | static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, |
916 | unsigned int hooknum) | 916 | unsigned int hooknum, struct ip_vs_iphdr *ipvsh) |
917 | { | 917 | { |
918 | struct ipv6hdr *iph; | ||
919 | struct icmp6hdr _icmph, *ic; | 918 | struct icmp6hdr _icmph, *ic; |
920 | struct ipv6hdr _ciph, *cih; /* The ip header contained | 919 | struct ipv6hdr _ip6h, *ip6h; /* The ip header contained within ICMP */ |
921 | within the ICMP */ | 920 | struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */ |
922 | struct ip_vs_iphdr ciph; | ||
923 | struct ip_vs_conn *cp; | 921 | struct ip_vs_conn *cp; |
924 | struct ip_vs_protocol *pp; | 922 | struct ip_vs_protocol *pp; |
925 | unsigned int offset; | ||
926 | union nf_inet_addr snet; | 923 | union nf_inet_addr snet; |
924 | unsigned int writable; | ||
927 | 925 | ||
928 | *related = 1; | 926 | *related = 1; |
929 | 927 | ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh); | |
930 | /* reassemble IP fragments */ | ||
931 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) { | ||
932 | if (ip_vs_gather_frags_v6(skb, ip_vs_defrag_user(hooknum))) | ||
933 | return NF_STOLEN; | ||
934 | } | ||
935 | |||
936 | iph = ipv6_hdr(skb); | ||
937 | offset = sizeof(struct ipv6hdr); | ||
938 | ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); | ||
939 | if (ic == NULL) | 928 | if (ic == NULL) |
940 | return NF_DROP; | 929 | return NF_DROP; |
941 | 930 | ||
942 | IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) %pI6->%pI6\n", | ||
943 | ic->icmp6_type, ntohs(icmpv6_id(ic)), | ||
944 | &iph->saddr, &iph->daddr); | ||
945 | |||
946 | /* | 931 | /* |
947 | * Work through seeing if this is for us. | 932 | * Work through seeing if this is for us. |
948 | * These checks are supposed to be in an order that means easy | 933 | * These checks are supposed to be in an order that means easy |
@@ -950,42 +935,45 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, | |||
950 | * this means that some packets will manage to get a long way | 935 | * this means that some packets will manage to get a long way |
951 | * down this stack and then be rejected, but that's life. | 936 | * down this stack and then be rejected, but that's life. |
952 | */ | 937 | */ |
953 | if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) && | 938 | if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) { |
954 | (ic->icmp6_type != ICMPV6_PKT_TOOBIG) && | ||
955 | (ic->icmp6_type != ICMPV6_TIME_EXCEED)) { | ||
956 | *related = 0; | 939 | *related = 0; |
957 | return NF_ACCEPT; | 940 | return NF_ACCEPT; |
958 | } | 941 | } |
942 | /* Fragment header that is before ICMP header tells us that: | ||
943 | * it's not an error message since they can't be fragmented. | ||
944 | */ | ||
945 | if (ipvsh->flags & IP6T_FH_F_FRAG) | ||
946 | return NF_DROP; | ||
947 | |||
948 | IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n", | ||
949 | ic->icmp6_type, ntohs(icmpv6_id(ic)), | ||
950 | &ipvsh->saddr, &ipvsh->daddr); | ||
959 | 951 | ||
960 | /* Now find the contained IP header */ | 952 | /* Now find the contained IP header */ |
961 | offset += sizeof(_icmph); | 953 | ciph.len = ipvsh->len + sizeof(_icmph); |
962 | cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); | 954 | ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h); |
963 | if (cih == NULL) | 955 | if (ip6h == NULL) |
964 | return NF_ACCEPT; /* The packet looks wrong, ignore */ | 956 | return NF_ACCEPT; /* The packet looks wrong, ignore */ |
965 | 957 | ciph.saddr.in6 = ip6h->saddr; /* conn_out_get() handles reverse order */ | |
966 | pp = ip_vs_proto_get(cih->nexthdr); | 958 | ciph.daddr.in6 = ip6h->daddr; |
959 | /* skip possible IPv6 exthdrs of contained IPv6 packet */ | ||
960 | ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL); | ||
961 | if (ciph.protocol < 0) | ||
962 | return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */ | ||
963 | |||
964 | pp = ip_vs_proto_get(ciph.protocol); | ||
967 | if (!pp) | 965 | if (!pp) |
968 | return NF_ACCEPT; | 966 | return NF_ACCEPT; |
969 | 967 | ||
970 | /* Is the embedded protocol header present? */ | ||
971 | /* TODO: we don't support fragmentation at the moment anyways */ | ||
972 | if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag)) | ||
973 | return NF_ACCEPT; | ||
974 | |||
975 | IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset, | ||
976 | "Checking outgoing ICMPv6 for"); | ||
977 | |||
978 | offset += sizeof(struct ipv6hdr); | ||
979 | |||
980 | ip_vs_fill_iphdr(AF_INET6, cih, &ciph); | ||
981 | /* The embedded headers contain source and dest in reverse order */ | 968 | /* The embedded headers contain source and dest in reverse order */ |
982 | cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1); | 969 | cp = pp->conn_out_get(AF_INET6, skb, &ciph, 1); |
983 | if (!cp) | 970 | if (!cp) |
984 | return NF_ACCEPT; | 971 | return NF_ACCEPT; |
985 | 972 | ||
986 | snet.in6 = iph->saddr; | 973 | snet.in6 = ciph.saddr.in6; |
987 | return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp, | 974 | writable = ciph.len; |
988 | pp, offset, sizeof(struct ipv6hdr)); | 975 | return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp, |
976 | pp, writable, sizeof(struct ipv6hdr)); | ||
989 | } | 977 | } |
990 | #endif | 978 | #endif |
991 | 979 | ||
@@ -1018,17 +1006,17 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len) | |||
1018 | */ | 1006 | */ |
1019 | static unsigned int | 1007 | static unsigned int |
1020 | handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | 1008 | handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, |
1021 | struct ip_vs_conn *cp, int ihl) | 1009 | struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) |
1022 | { | 1010 | { |
1023 | struct ip_vs_protocol *pp = pd->pp; | 1011 | struct ip_vs_protocol *pp = pd->pp; |
1024 | 1012 | ||
1025 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet"); | 1013 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet"); |
1026 | 1014 | ||
1027 | if (!skb_make_writable(skb, ihl)) | 1015 | if (!skb_make_writable(skb, iph->len)) |
1028 | goto drop; | 1016 | goto drop; |
1029 | 1017 | ||
1030 | /* mangle the packet */ | 1018 | /* mangle the packet */ |
1031 | if (pp->snat_handler && !pp->snat_handler(skb, pp, cp)) | 1019 | if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph)) |
1032 | goto drop; | 1020 | goto drop; |
1033 | 1021 | ||
1034 | #ifdef CONFIG_IP_VS_IPV6 | 1022 | #ifdef CONFIG_IP_VS_IPV6 |
@@ -1115,17 +1103,22 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1115 | if (!net_ipvs(net)->enable) | 1103 | if (!net_ipvs(net)->enable) |
1116 | return NF_ACCEPT; | 1104 | return NF_ACCEPT; |
1117 | 1105 | ||
1118 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1106 | ip_vs_fill_iph_skb(af, skb, &iph); |
1119 | #ifdef CONFIG_IP_VS_IPV6 | 1107 | #ifdef CONFIG_IP_VS_IPV6 |
1120 | if (af == AF_INET6) { | 1108 | if (af == AF_INET6) { |
1109 | if (!iph.fragoffs && skb_nfct_reasm(skb)) { | ||
1110 | struct sk_buff *reasm = skb_nfct_reasm(skb); | ||
1111 | /* Save fw mark for coming frags */ | ||
1112 | reasm->ipvs_property = 1; | ||
1113 | reasm->mark = skb->mark; | ||
1114 | } | ||
1121 | if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { | 1115 | if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { |
1122 | int related; | 1116 | int related; |
1123 | int verdict = ip_vs_out_icmp_v6(skb, &related, | 1117 | int verdict = ip_vs_out_icmp_v6(skb, &related, |
1124 | hooknum); | 1118 | hooknum, &iph); |
1125 | 1119 | ||
1126 | if (related) | 1120 | if (related) |
1127 | return verdict; | 1121 | return verdict; |
1128 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
1129 | } | 1122 | } |
1130 | } else | 1123 | } else |
1131 | #endif | 1124 | #endif |
@@ -1135,7 +1128,6 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1135 | 1128 | ||
1136 | if (related) | 1129 | if (related) |
1137 | return verdict; | 1130 | return verdict; |
1138 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
1139 | } | 1131 | } |
1140 | 1132 | ||
1141 | pd = ip_vs_proto_data_get(net, iph.protocol); | 1133 | pd = ip_vs_proto_data_get(net, iph.protocol); |
@@ -1145,39 +1137,31 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1145 | 1137 | ||
1146 | /* reassemble IP fragments */ | 1138 | /* reassemble IP fragments */ |
1147 | #ifdef CONFIG_IP_VS_IPV6 | 1139 | #ifdef CONFIG_IP_VS_IPV6 |
1148 | if (af == AF_INET6) { | 1140 | if (af == AF_INET) |
1149 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) { | ||
1150 | if (ip_vs_gather_frags_v6(skb, | ||
1151 | ip_vs_defrag_user(hooknum))) | ||
1152 | return NF_STOLEN; | ||
1153 | } | ||
1154 | |||
1155 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
1156 | } else | ||
1157 | #endif | 1141 | #endif |
1158 | if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) { | 1142 | if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) { |
1159 | if (ip_vs_gather_frags(skb, | 1143 | if (ip_vs_gather_frags(skb, |
1160 | ip_vs_defrag_user(hooknum))) | 1144 | ip_vs_defrag_user(hooknum))) |
1161 | return NF_STOLEN; | 1145 | return NF_STOLEN; |
1162 | 1146 | ||
1163 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1147 | ip_vs_fill_ip4hdr(skb_network_header(skb), &iph); |
1164 | } | 1148 | } |
1165 | 1149 | ||
1166 | /* | 1150 | /* |
1167 | * Check if the packet belongs to an existing entry | 1151 | * Check if the packet belongs to an existing entry |
1168 | */ | 1152 | */ |
1169 | cp = pp->conn_out_get(af, skb, &iph, iph.len, 0); | 1153 | cp = pp->conn_out_get(af, skb, &iph, 0); |
1170 | 1154 | ||
1171 | if (likely(cp)) | 1155 | if (likely(cp)) |
1172 | return handle_response(af, skb, pd, cp, iph.len); | 1156 | return handle_response(af, skb, pd, cp, &iph); |
1173 | if (sysctl_nat_icmp_send(net) && | 1157 | if (sysctl_nat_icmp_send(net) && |
1174 | (pp->protocol == IPPROTO_TCP || | 1158 | (pp->protocol == IPPROTO_TCP || |
1175 | pp->protocol == IPPROTO_UDP || | 1159 | pp->protocol == IPPROTO_UDP || |
1176 | pp->protocol == IPPROTO_SCTP)) { | 1160 | pp->protocol == IPPROTO_SCTP)) { |
1177 | __be16 _ports[2], *pptr; | 1161 | __be16 _ports[2], *pptr; |
1178 | 1162 | ||
1179 | pptr = skb_header_pointer(skb, iph.len, | 1163 | pptr = frag_safe_skb_hp(skb, iph.len, |
1180 | sizeof(_ports), _ports); | 1164 | sizeof(_ports), _ports, &iph); |
1181 | if (pptr == NULL) | 1165 | if (pptr == NULL) |
1182 | return NF_ACCEPT; /* Not for me */ | 1166 | return NF_ACCEPT; /* Not for me */ |
1183 | if (ip_vs_lookup_real_service(net, af, iph.protocol, | 1167 | if (ip_vs_lookup_real_service(net, af, iph.protocol, |
@@ -1375,13 +1359,13 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
1375 | "Checking incoming ICMP for"); | 1359 | "Checking incoming ICMP for"); |
1376 | 1360 | ||
1377 | offset2 = offset; | 1361 | offset2 = offset; |
1378 | offset += cih->ihl * 4; | 1362 | ip_vs_fill_ip4hdr(cih, &ciph); |
1379 | 1363 | ciph.len += offset; | |
1380 | ip_vs_fill_iphdr(AF_INET, cih, &ciph); | 1364 | offset = ciph.len; |
1381 | /* The embedded headers contain source and dest in reverse order. | 1365 | /* The embedded headers contain source and dest in reverse order. |
1382 | * For IPIP this is error for request, not for reply. | 1366 | * For IPIP this is error for request, not for reply. |
1383 | */ | 1367 | */ |
1384 | cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, ipip ? 0 : 1); | 1368 | cp = pp->conn_in_get(AF_INET, skb, &ciph, ipip ? 0 : 1); |
1385 | if (!cp) | 1369 | if (!cp) |
1386 | return NF_ACCEPT; | 1370 | return NF_ACCEPT; |
1387 | 1371 | ||
@@ -1450,7 +1434,7 @@ ignore_ipip: | |||
1450 | ip_vs_in_stats(cp, skb); | 1434 | ip_vs_in_stats(cp, skb); |
1451 | if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) | 1435 | if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) |
1452 | offset += 2 * sizeof(__u16); | 1436 | offset += 2 * sizeof(__u16); |
1453 | verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum); | 1437 | verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph); |
1454 | 1438 | ||
1455 | out: | 1439 | out: |
1456 | __ip_vs_conn_put(cp); | 1440 | __ip_vs_conn_put(cp); |
@@ -1459,38 +1443,24 @@ out: | |||
1459 | } | 1443 | } |
1460 | 1444 | ||
1461 | #ifdef CONFIG_IP_VS_IPV6 | 1445 | #ifdef CONFIG_IP_VS_IPV6 |
1462 | static int | 1446 | static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, |
1463 | ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) | 1447 | unsigned int hooknum, struct ip_vs_iphdr *iph) |
1464 | { | 1448 | { |
1465 | struct net *net = NULL; | 1449 | struct net *net = NULL; |
1466 | struct ipv6hdr *iph; | 1450 | struct ipv6hdr _ip6h, *ip6h; |
1467 | struct icmp6hdr _icmph, *ic; | 1451 | struct icmp6hdr _icmph, *ic; |
1468 | struct ipv6hdr _ciph, *cih; /* The ip header contained | 1452 | struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */ |
1469 | within the ICMP */ | ||
1470 | struct ip_vs_iphdr ciph; | ||
1471 | struct ip_vs_conn *cp; | 1453 | struct ip_vs_conn *cp; |
1472 | struct ip_vs_protocol *pp; | 1454 | struct ip_vs_protocol *pp; |
1473 | struct ip_vs_proto_data *pd; | 1455 | struct ip_vs_proto_data *pd; |
1474 | unsigned int offset, verdict; | 1456 | unsigned int offs_ciph, writable, verdict; |
1475 | 1457 | ||
1476 | *related = 1; | 1458 | *related = 1; |
1477 | 1459 | ||
1478 | /* reassemble IP fragments */ | 1460 | ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph); |
1479 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) { | ||
1480 | if (ip_vs_gather_frags_v6(skb, ip_vs_defrag_user(hooknum))) | ||
1481 | return NF_STOLEN; | ||
1482 | } | ||
1483 | |||
1484 | iph = ipv6_hdr(skb); | ||
1485 | offset = sizeof(struct ipv6hdr); | ||
1486 | ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); | ||
1487 | if (ic == NULL) | 1461 | if (ic == NULL) |
1488 | return NF_DROP; | 1462 | return NF_DROP; |
1489 | 1463 | ||
1490 | IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) %pI6->%pI6\n", | ||
1491 | ic->icmp6_type, ntohs(icmpv6_id(ic)), | ||
1492 | &iph->saddr, &iph->daddr); | ||
1493 | |||
1494 | /* | 1464 | /* |
1495 | * Work through seeing if this is for us. | 1465 | * Work through seeing if this is for us. |
1496 | * These checks are supposed to be in an order that means easy | 1466 | * These checks are supposed to be in an order that means easy |
@@ -1498,47 +1468,71 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
1498 | * this means that some packets will manage to get a long way | 1468 | * this means that some packets will manage to get a long way |
1499 | * down this stack and then be rejected, but that's life. | 1469 | * down this stack and then be rejected, but that's life. |
1500 | */ | 1470 | */ |
1501 | if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) && | 1471 | if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) { |
1502 | (ic->icmp6_type != ICMPV6_PKT_TOOBIG) && | ||
1503 | (ic->icmp6_type != ICMPV6_TIME_EXCEED)) { | ||
1504 | *related = 0; | 1472 | *related = 0; |
1505 | return NF_ACCEPT; | 1473 | return NF_ACCEPT; |
1506 | } | 1474 | } |
1475 | /* Fragment header that is before ICMP header tells us that: | ||
1476 | * it's not an error message since they can't be fragmented. | ||
1477 | */ | ||
1478 | if (iph->flags & IP6T_FH_F_FRAG) | ||
1479 | return NF_DROP; | ||
1480 | |||
1481 | IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n", | ||
1482 | ic->icmp6_type, ntohs(icmpv6_id(ic)), | ||
1483 | &iph->saddr, &iph->daddr); | ||
1507 | 1484 | ||
1508 | /* Now find the contained IP header */ | 1485 | /* Now find the contained IP header */ |
1509 | offset += sizeof(_icmph); | 1486 | ciph.len = iph->len + sizeof(_icmph); |
1510 | cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); | 1487 | offs_ciph = ciph.len; /* Save ip header offset */ |
1511 | if (cih == NULL) | 1488 | ip6h = skb_header_pointer(skb, ciph.len, sizeof(_ip6h), &_ip6h); |
1489 | if (ip6h == NULL) | ||
1512 | return NF_ACCEPT; /* The packet looks wrong, ignore */ | 1490 | return NF_ACCEPT; /* The packet looks wrong, ignore */ |
1491 | ciph.saddr.in6 = ip6h->saddr; /* conn_in_get() handles reverse order */ | ||
1492 | ciph.daddr.in6 = ip6h->daddr; | ||
1493 | /* skip possible IPv6 exthdrs of contained IPv6 packet */ | ||
1494 | ciph.protocol = ipv6_find_hdr(skb, &ciph.len, -1, &ciph.fragoffs, NULL); | ||
1495 | if (ciph.protocol < 0) | ||
1496 | return NF_ACCEPT; /* Contained IPv6 hdr looks wrong, ignore */ | ||
1513 | 1497 | ||
1514 | net = skb_net(skb); | 1498 | net = skb_net(skb); |
1515 | pd = ip_vs_proto_data_get(net, cih->nexthdr); | 1499 | pd = ip_vs_proto_data_get(net, ciph.protocol); |
1516 | if (!pd) | 1500 | if (!pd) |
1517 | return NF_ACCEPT; | 1501 | return NF_ACCEPT; |
1518 | pp = pd->pp; | 1502 | pp = pd->pp; |
1519 | 1503 | ||
1520 | /* Is the embedded protocol header present? */ | 1504 | /* Cannot handle fragmented embedded protocol */ |
1521 | /* TODO: we don't support fragmentation at the moment anyways */ | 1505 | if (ciph.fragoffs) |
1522 | if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag)) | ||
1523 | return NF_ACCEPT; | 1506 | return NF_ACCEPT; |
1524 | 1507 | ||
1525 | IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset, | 1508 | IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offs_ciph, |
1526 | "Checking incoming ICMPv6 for"); | 1509 | "Checking incoming ICMPv6 for"); |
1527 | 1510 | ||
1528 | offset += sizeof(struct ipv6hdr); | 1511 | /* The embedded headers contain source and dest in reverse order |
1512 | * if not from localhost | ||
1513 | */ | ||
1514 | cp = pp->conn_in_get(AF_INET6, skb, &ciph, | ||
1515 | (hooknum == NF_INET_LOCAL_OUT) ? 0 : 1); | ||
1529 | 1516 | ||
1530 | ip_vs_fill_iphdr(AF_INET6, cih, &ciph); | ||
1531 | /* The embedded headers contain source and dest in reverse order */ | ||
1532 | cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1); | ||
1533 | if (!cp) | 1517 | if (!cp) |
1534 | return NF_ACCEPT; | 1518 | return NF_ACCEPT; |
1519 | /* VS/TUN, VS/DR and LOCALNODE just let it go */ | ||
1520 | if ((hooknum == NF_INET_LOCAL_OUT) && | ||
1521 | (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) { | ||
1522 | __ip_vs_conn_put(cp); | ||
1523 | return NF_ACCEPT; | ||
1524 | } | ||
1535 | 1525 | ||
1536 | /* do the statistics and put it back */ | 1526 | /* do the statistics and put it back */ |
1537 | ip_vs_in_stats(cp, skb); | 1527 | ip_vs_in_stats(cp, skb); |
1538 | if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr || | 1528 | |
1539 | IPPROTO_SCTP == cih->nexthdr) | 1529 | /* Need to mangle contained IPv6 header in ICMPv6 packet */ |
1540 | offset += 2 * sizeof(__u16); | 1530 | writable = ciph.len; |
1541 | verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum); | 1531 | if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol || |
1532 | IPPROTO_SCTP == ciph.protocol) | ||
1533 | writable += 2 * sizeof(__u16); /* Also mangle ports */ | ||
1534 | |||
1535 | verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum, &ciph); | ||
1542 | 1536 | ||
1543 | __ip_vs_conn_put(cp); | 1537 | __ip_vs_conn_put(cp); |
1544 | 1538 | ||
@@ -1574,7 +1568,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1574 | if (unlikely((skb->pkt_type != PACKET_HOST && | 1568 | if (unlikely((skb->pkt_type != PACKET_HOST && |
1575 | hooknum != NF_INET_LOCAL_OUT) || | 1569 | hooknum != NF_INET_LOCAL_OUT) || |
1576 | !skb_dst(skb))) { | 1570 | !skb_dst(skb))) { |
1577 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1571 | ip_vs_fill_iph_skb(af, skb, &iph); |
1578 | IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s" | 1572 | IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s" |
1579 | " ignored in hook %u\n", | 1573 | " ignored in hook %u\n", |
1580 | skb->pkt_type, iph.protocol, | 1574 | skb->pkt_type, iph.protocol, |
@@ -1586,7 +1580,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1586 | if (!net_ipvs(net)->enable) | 1580 | if (!net_ipvs(net)->enable) |
1587 | return NF_ACCEPT; | 1581 | return NF_ACCEPT; |
1588 | 1582 | ||
1589 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 1583 | ip_vs_fill_iph_skb(af, skb, &iph); |
1590 | 1584 | ||
1591 | /* Bad... Do not break raw sockets */ | 1585 | /* Bad... Do not break raw sockets */ |
1592 | if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && | 1586 | if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && |
@@ -1600,13 +1594,19 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1600 | 1594 | ||
1601 | #ifdef CONFIG_IP_VS_IPV6 | 1595 | #ifdef CONFIG_IP_VS_IPV6 |
1602 | if (af == AF_INET6) { | 1596 | if (af == AF_INET6) { |
1597 | if (!iph.fragoffs && skb_nfct_reasm(skb)) { | ||
1598 | struct sk_buff *reasm = skb_nfct_reasm(skb); | ||
1599 | /* Save fw mark for coming frags. */ | ||
1600 | reasm->ipvs_property = 1; | ||
1601 | reasm->mark = skb->mark; | ||
1602 | } | ||
1603 | if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { | 1603 | if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { |
1604 | int related; | 1604 | int related; |
1605 | int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum); | 1605 | int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum, |
1606 | &iph); | ||
1606 | 1607 | ||
1607 | if (related) | 1608 | if (related) |
1608 | return verdict; | 1609 | return verdict; |
1609 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
1610 | } | 1610 | } |
1611 | } else | 1611 | } else |
1612 | #endif | 1612 | #endif |
@@ -1616,7 +1616,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1616 | 1616 | ||
1617 | if (related) | 1617 | if (related) |
1618 | return verdict; | 1618 | return verdict; |
1619 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | ||
1620 | } | 1619 | } |
1621 | 1620 | ||
1622 | /* Protocol supported? */ | 1621 | /* Protocol supported? */ |
@@ -1627,12 +1626,15 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1627 | /* | 1626 | /* |
1628 | * Check if the packet belongs to an existing connection entry | 1627 | * Check if the packet belongs to an existing connection entry |
1629 | */ | 1628 | */ |
1630 | cp = pp->conn_in_get(af, skb, &iph, iph.len, 0); | 1629 | cp = pp->conn_in_get(af, skb, &iph, 0); |
1631 | 1630 | if (unlikely(!cp) && !iph.fragoffs) { | |
1632 | if (unlikely(!cp)) { | 1631 | /* No (second) fragments need to enter here, as nf_defrag_ipv6 |
1632 | * replayed fragment zero will already have created the cp | ||
1633 | */ | ||
1633 | int v; | 1634 | int v; |
1634 | 1635 | ||
1635 | if (!pp->conn_schedule(af, skb, pd, &v, &cp)) | 1636 | /* Schedule and create new connection entry into &cp */ |
1637 | if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph)) | ||
1636 | return v; | 1638 | return v; |
1637 | } | 1639 | } |
1638 | 1640 | ||
@@ -1640,6 +1642,14 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1640 | /* sorry, all this trouble for a no-hit :) */ | 1642 | /* sorry, all this trouble for a no-hit :) */ |
1641 | IP_VS_DBG_PKT(12, af, pp, skb, 0, | 1643 | IP_VS_DBG_PKT(12, af, pp, skb, 0, |
1642 | "ip_vs_in: packet continues traversal as normal"); | 1644 | "ip_vs_in: packet continues traversal as normal"); |
1645 | if (iph.fragoffs && !skb_nfct_reasm(skb)) { | ||
1646 | /* Fragment that couldn't be mapped to a conn entry | ||
1647 | * and don't have any pointer to a reasm skb | ||
1648 | * is missing module nf_defrag_ipv6 | ||
1649 | */ | ||
1650 | IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n"); | ||
1651 | IP_VS_DBG_PKT(7, af, pp, skb, 0, "unhandled fragment"); | ||
1652 | } | ||
1643 | return NF_ACCEPT; | 1653 | return NF_ACCEPT; |
1644 | } | 1654 | } |
1645 | 1655 | ||
@@ -1662,7 +1672,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1662 | ip_vs_in_stats(cp, skb); | 1672 | ip_vs_in_stats(cp, skb); |
1663 | ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); | 1673 | ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); |
1664 | if (cp->packet_xmit) | 1674 | if (cp->packet_xmit) |
1665 | ret = cp->packet_xmit(skb, cp, pp); | 1675 | ret = cp->packet_xmit(skb, cp, pp, &iph); |
1666 | /* do not touch skb anymore */ | 1676 | /* do not touch skb anymore */ |
1667 | else { | 1677 | else { |
1668 | IP_VS_DBG_RL("warning: packet_xmit is null"); | 1678 | IP_VS_DBG_RL("warning: packet_xmit is null"); |
@@ -1724,6 +1734,38 @@ ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb, | |||
1724 | #ifdef CONFIG_IP_VS_IPV6 | 1734 | #ifdef CONFIG_IP_VS_IPV6 |
1725 | 1735 | ||
1726 | /* | 1736 | /* |
1737 | * AF_INET6 fragment handling | ||
1738 | * Copy info from first fragment, to the rest of them. | ||
1739 | */ | ||
1740 | static unsigned int | ||
1741 | ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb, | ||
1742 | const struct net_device *in, | ||
1743 | const struct net_device *out, | ||
1744 | int (*okfn)(struct sk_buff *)) | ||
1745 | { | ||
1746 | struct sk_buff *reasm = skb_nfct_reasm(skb); | ||
1747 | struct net *net; | ||
1748 | |||
1749 | /* Skip if not a "replay" from nf_ct_frag6_output or first fragment. | ||
1750 | * ipvs_property is set when checking first fragment | ||
1751 | * in ip_vs_in() and ip_vs_out(). | ||
1752 | */ | ||
1753 | if (reasm) | ||
1754 | IP_VS_DBG(2, "Fragment recv prop:%d\n", reasm->ipvs_property); | ||
1755 | if (!reasm || !reasm->ipvs_property) | ||
1756 | return NF_ACCEPT; | ||
1757 | |||
1758 | net = skb_net(skb); | ||
1759 | if (!net_ipvs(net)->enable) | ||
1760 | return NF_ACCEPT; | ||
1761 | |||
1762 | /* Copy stored fw mark, saved in ip_vs_{in,out} */ | ||
1763 | skb->mark = reasm->mark; | ||
1764 | |||
1765 | return NF_ACCEPT; | ||
1766 | } | ||
1767 | |||
1768 | /* | ||
1727 | * AF_INET6 handler in NF_INET_LOCAL_IN chain | 1769 | * AF_INET6 handler in NF_INET_LOCAL_IN chain |
1728 | * Schedule and forward packets from remote clients | 1770 | * Schedule and forward packets from remote clients |
1729 | */ | 1771 | */ |
@@ -1793,8 +1835,10 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | |||
1793 | { | 1835 | { |
1794 | int r; | 1836 | int r; |
1795 | struct net *net; | 1837 | struct net *net; |
1838 | struct ip_vs_iphdr iphdr; | ||
1796 | 1839 | ||
1797 | if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6) | 1840 | ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); |
1841 | if (iphdr.protocol != IPPROTO_ICMPV6) | ||
1798 | return NF_ACCEPT; | 1842 | return NF_ACCEPT; |
1799 | 1843 | ||
1800 | /* ipvs enabled in this netns ? */ | 1844 | /* ipvs enabled in this netns ? */ |
@@ -1802,7 +1846,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | |||
1802 | if (!net_ipvs(net)->enable) | 1846 | if (!net_ipvs(net)->enable) |
1803 | return NF_ACCEPT; | 1847 | return NF_ACCEPT; |
1804 | 1848 | ||
1805 | return ip_vs_in_icmp_v6(skb, &r, hooknum); | 1849 | return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); |
1806 | } | 1850 | } |
1807 | #endif | 1851 | #endif |
1808 | 1852 | ||
@@ -1860,6 +1904,14 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = { | |||
1860 | .priority = 100, | 1904 | .priority = 100, |
1861 | }, | 1905 | }, |
1862 | #ifdef CONFIG_IP_VS_IPV6 | 1906 | #ifdef CONFIG_IP_VS_IPV6 |
1907 | /* After mangle & nat fetch 2:nd fragment and following */ | ||
1908 | { | ||
1909 | .hook = ip_vs_preroute_frag6, | ||
1910 | .owner = THIS_MODULE, | ||
1911 | .pf = NFPROTO_IPV6, | ||
1912 | .hooknum = NF_INET_PRE_ROUTING, | ||
1913 | .priority = NF_IP6_PRI_NAT_DST + 1, | ||
1914 | }, | ||
1863 | /* After packet filtering, change source only for VS/NAT */ | 1915 | /* After packet filtering, change source only for VS/NAT */ |
1864 | { | 1916 | { |
1865 | .hook = ip_vs_reply6, | 1917 | .hook = ip_vs_reply6, |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c4ee43710aab..ec664cbb119f 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -2339,7 +2339,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
2339 | struct ip_vs_dest_user_kern udest; | 2339 | struct ip_vs_dest_user_kern udest; |
2340 | struct netns_ipvs *ipvs = net_ipvs(net); | 2340 | struct netns_ipvs *ipvs = net_ipvs(net); |
2341 | 2341 | ||
2342 | if (!capable(CAP_NET_ADMIN)) | 2342 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
2343 | return -EPERM; | 2343 | return -EPERM; |
2344 | 2344 | ||
2345 | if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX) | 2345 | if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX) |
@@ -2632,7 +2632,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2632 | struct netns_ipvs *ipvs = net_ipvs(net); | 2632 | struct netns_ipvs *ipvs = net_ipvs(net); |
2633 | 2633 | ||
2634 | BUG_ON(!net); | 2634 | BUG_ON(!net); |
2635 | if (!capable(CAP_NET_ADMIN)) | 2635 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
2636 | return -EPERM; | 2636 | return -EPERM; |
2637 | 2637 | ||
2638 | if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX) | 2638 | if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX) |
@@ -3699,6 +3699,10 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net) | |||
3699 | tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL); | 3699 | tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL); |
3700 | if (tbl == NULL) | 3700 | if (tbl == NULL) |
3701 | return -ENOMEM; | 3701 | return -ENOMEM; |
3702 | |||
3703 | /* Don't export sysctls to unprivileged users */ | ||
3704 | if (net->user_ns != &init_user_ns) | ||
3705 | tbl[0].procname = NULL; | ||
3702 | } else | 3706 | } else |
3703 | tbl = vs_vars; | 3707 | tbl = vs_vars; |
3704 | /* Initialize sysctl defaults */ | 3708 | /* Initialize sysctl defaults */ |
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c index 8b7dca9ea422..7f3b0cc00b7a 100644 --- a/net/netfilter/ipvs/ip_vs_dh.c +++ b/net/netfilter/ipvs/ip_vs_dh.c | |||
@@ -215,7 +215,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
215 | struct ip_vs_dh_bucket *tbl; | 215 | struct ip_vs_dh_bucket *tbl; |
216 | struct ip_vs_iphdr iph; | 216 | struct ip_vs_iphdr iph; |
217 | 217 | ||
218 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | 218 | ip_vs_fill_iph_addr_only(svc->af, skb, &iph); |
219 | 219 | ||
220 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); | 220 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
221 | 221 | ||
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index df646ccf08a7..fdd89b9564ea 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c | |||
@@ -479,7 +479,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
479 | struct ip_vs_dest *dest = NULL; | 479 | struct ip_vs_dest *dest = NULL; |
480 | struct ip_vs_lblc_entry *en; | 480 | struct ip_vs_lblc_entry *en; |
481 | 481 | ||
482 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | 482 | ip_vs_fill_iph_addr_only(svc->af, skb, &iph); |
483 | 483 | ||
484 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); | 484 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
485 | 485 | ||
@@ -560,6 +560,11 @@ static int __net_init __ip_vs_lblc_init(struct net *net) | |||
560 | GFP_KERNEL); | 560 | GFP_KERNEL); |
561 | if (ipvs->lblc_ctl_table == NULL) | 561 | if (ipvs->lblc_ctl_table == NULL) |
562 | return -ENOMEM; | 562 | return -ENOMEM; |
563 | |||
564 | /* Don't export sysctls to unprivileged users */ | ||
565 | if (net->user_ns != &init_user_ns) | ||
566 | ipvs->lblc_ctl_table[0].procname = NULL; | ||
567 | |||
563 | } else | 568 | } else |
564 | ipvs->lblc_ctl_table = vs_vars_table; | 569 | ipvs->lblc_ctl_table = vs_vars_table; |
565 | ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION; | 570 | ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION; |
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 570e31ea427a..c03b6a3ade2f 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c | |||
@@ -649,7 +649,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
649 | struct ip_vs_dest *dest = NULL; | 649 | struct ip_vs_dest *dest = NULL; |
650 | struct ip_vs_lblcr_entry *en; | 650 | struct ip_vs_lblcr_entry *en; |
651 | 651 | ||
652 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | 652 | ip_vs_fill_iph_addr_only(svc->af, skb, &iph); |
653 | 653 | ||
654 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); | 654 | IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); |
655 | 655 | ||
@@ -754,6 +754,10 @@ static int __net_init __ip_vs_lblcr_init(struct net *net) | |||
754 | GFP_KERNEL); | 754 | GFP_KERNEL); |
755 | if (ipvs->lblcr_ctl_table == NULL) | 755 | if (ipvs->lblcr_ctl_table == NULL) |
756 | return -ENOMEM; | 756 | return -ENOMEM; |
757 | |||
758 | /* Don't export sysctls to unprivileged users */ | ||
759 | if (net->user_ns != &init_user_ns) | ||
760 | ipvs->lblcr_ctl_table[0].procname = NULL; | ||
757 | } else | 761 | } else |
758 | ipvs->lblcr_ctl_table = vs_vars_table; | 762 | ipvs->lblcr_ctl_table = vs_vars_table; |
759 | ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION; | 763 | ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION; |
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c index 022e77e1e766..c8beafd401aa 100644 --- a/net/netfilter/ipvs/ip_vs_nfct.c +++ b/net/netfilter/ipvs/ip_vs_nfct.c | |||
@@ -82,7 +82,7 @@ void | |||
82 | ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) | 82 | ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) |
83 | { | 83 | { |
84 | enum ip_conntrack_info ctinfo; | 84 | enum ip_conntrack_info ctinfo; |
85 | struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); | 85 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
86 | struct nf_conntrack_tuple new_tuple; | 86 | struct nf_conntrack_tuple new_tuple; |
87 | 87 | ||
88 | if (ct == NULL || nf_ct_is_confirmed(ct) || nf_ct_is_untracked(ct) || | 88 | if (ct == NULL || nf_ct_is_confirmed(ct) || nf_ct_is_untracked(ct) || |
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c index 1aa5cac748c4..12475ef88daf 100644 --- a/net/netfilter/ipvs/ip_vs_pe_sip.c +++ b/net/netfilter/ipvs/ip_vs_pe_sip.c | |||
@@ -68,23 +68,31 @@ static int get_callid(const char *dptr, unsigned int dataoff, | |||
68 | static int | 68 | static int |
69 | ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb) | 69 | ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb) |
70 | { | 70 | { |
71 | struct sk_buff *reasm = skb_nfct_reasm(skb); | ||
71 | struct ip_vs_iphdr iph; | 72 | struct ip_vs_iphdr iph; |
72 | unsigned int dataoff, datalen, matchoff, matchlen; | 73 | unsigned int dataoff, datalen, matchoff, matchlen; |
73 | const char *dptr; | 74 | const char *dptr; |
74 | int retc; | 75 | int retc; |
75 | 76 | ||
76 | ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph); | 77 | ip_vs_fill_iph_skb(p->af, skb, &iph); |
77 | 78 | ||
78 | /* Only useful with UDP */ | 79 | /* Only useful with UDP */ |
79 | if (iph.protocol != IPPROTO_UDP) | 80 | if (iph.protocol != IPPROTO_UDP) |
80 | return -EINVAL; | 81 | return -EINVAL; |
82 | /* todo: IPv6 fragments: | ||
83 | * I think this only should be done for the first fragment. /HS | ||
84 | */ | ||
85 | if (reasm) { | ||
86 | skb = reasm; | ||
87 | dataoff = iph.thoff_reasm + sizeof(struct udphdr); | ||
88 | } else | ||
89 | dataoff = iph.len + sizeof(struct udphdr); | ||
81 | 90 | ||
82 | /* No Data ? */ | ||
83 | dataoff = iph.len + sizeof(struct udphdr); | ||
84 | if (dataoff >= skb->len) | 91 | if (dataoff >= skb->len) |
85 | return -EINVAL; | 92 | return -EINVAL; |
86 | 93 | /* todo: Check if this will mess-up the reasm skb !!! /HS */ | |
87 | if ((retc=skb_linearize(skb)) < 0) | 94 | retc = skb_linearize(skb); |
95 | if (retc < 0) | ||
88 | return retc; | 96 | return retc; |
89 | dptr = skb->data + dataoff; | 97 | dptr = skb->data + dataoff; |
90 | datalen = skb->len - dataoff; | 98 | datalen = skb->len - dataoff; |
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index 50d82186da87..939f7fbe9b46 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c | |||
@@ -280,17 +280,17 @@ ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, | |||
280 | if (ih == NULL) | 280 | if (ih == NULL) |
281 | sprintf(buf, "TRUNCATED"); | 281 | sprintf(buf, "TRUNCATED"); |
282 | else if (ih->nexthdr == IPPROTO_FRAGMENT) | 282 | else if (ih->nexthdr == IPPROTO_FRAGMENT) |
283 | sprintf(buf, "%pI6->%pI6 frag", &ih->saddr, &ih->daddr); | 283 | sprintf(buf, "%pI6c->%pI6c frag", &ih->saddr, &ih->daddr); |
284 | else { | 284 | else { |
285 | __be16 _ports[2], *pptr; | 285 | __be16 _ports[2], *pptr; |
286 | 286 | ||
287 | pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), | 287 | pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), |
288 | sizeof(_ports), _ports); | 288 | sizeof(_ports), _ports); |
289 | if (pptr == NULL) | 289 | if (pptr == NULL) |
290 | sprintf(buf, "TRUNCATED %pI6->%pI6", | 290 | sprintf(buf, "TRUNCATED %pI6c->%pI6c", |
291 | &ih->saddr, &ih->daddr); | 291 | &ih->saddr, &ih->daddr); |
292 | else | 292 | else |
293 | sprintf(buf, "%pI6:%u->%pI6:%u", | 293 | sprintf(buf, "%pI6c:%u->%pI6c:%u", |
294 | &ih->saddr, ntohs(pptr[0]), | 294 | &ih->saddr, ntohs(pptr[0]), |
295 | &ih->daddr, ntohs(pptr[1])); | 295 | &ih->daddr, ntohs(pptr[1])); |
296 | } | 296 | } |
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c index 5b8eb8b12c3e..5de3dd312c0f 100644 --- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c +++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c | |||
@@ -57,7 +57,7 @@ ah_esp_conn_fill_param_proto(struct net *net, int af, | |||
57 | 57 | ||
58 | static struct ip_vs_conn * | 58 | static struct ip_vs_conn * |
59 | ah_esp_conn_in_get(int af, const struct sk_buff *skb, | 59 | ah_esp_conn_in_get(int af, const struct sk_buff *skb, |
60 | const struct ip_vs_iphdr *iph, unsigned int proto_off, | 60 | const struct ip_vs_iphdr *iph, |
61 | int inverse) | 61 | int inverse) |
62 | { | 62 | { |
63 | struct ip_vs_conn *cp; | 63 | struct ip_vs_conn *cp; |
@@ -85,9 +85,7 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, | |||
85 | 85 | ||
86 | static struct ip_vs_conn * | 86 | static struct ip_vs_conn * |
87 | ah_esp_conn_out_get(int af, const struct sk_buff *skb, | 87 | ah_esp_conn_out_get(int af, const struct sk_buff *skb, |
88 | const struct ip_vs_iphdr *iph, | 88 | const struct ip_vs_iphdr *iph, int inverse) |
89 | unsigned int proto_off, | ||
90 | int inverse) | ||
91 | { | 89 | { |
92 | struct ip_vs_conn *cp; | 90 | struct ip_vs_conn *cp; |
93 | struct ip_vs_conn_param p; | 91 | struct ip_vs_conn_param p; |
@@ -110,7 +108,8 @@ ah_esp_conn_out_get(int af, const struct sk_buff *skb, | |||
110 | 108 | ||
111 | static int | 109 | static int |
112 | ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | 110 | ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, |
113 | int *verdict, struct ip_vs_conn **cpp) | 111 | int *verdict, struct ip_vs_conn **cpp, |
112 | struct ip_vs_iphdr *iph) | ||
114 | { | 113 | { |
115 | /* | 114 | /* |
116 | * AH/ESP is only related traffic. Pass the packet to IP stack. | 115 | * AH/ESP is only related traffic. Pass the packet to IP stack. |
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 9f3fb751c491..746048b13ef3 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c | |||
@@ -10,28 +10,26 @@ | |||
10 | 10 | ||
11 | static int | 11 | static int |
12 | sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | 12 | sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, |
13 | int *verdict, struct ip_vs_conn **cpp) | 13 | int *verdict, struct ip_vs_conn **cpp, |
14 | struct ip_vs_iphdr *iph) | ||
14 | { | 15 | { |
15 | struct net *net; | 16 | struct net *net; |
16 | struct ip_vs_service *svc; | 17 | struct ip_vs_service *svc; |
17 | sctp_chunkhdr_t _schunkh, *sch; | 18 | sctp_chunkhdr_t _schunkh, *sch; |
18 | sctp_sctphdr_t *sh, _sctph; | 19 | sctp_sctphdr_t *sh, _sctph; |
19 | struct ip_vs_iphdr iph; | ||
20 | 20 | ||
21 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 21 | sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph); |
22 | |||
23 | sh = skb_header_pointer(skb, iph.len, sizeof(_sctph), &_sctph); | ||
24 | if (sh == NULL) | 22 | if (sh == NULL) |
25 | return 0; | 23 | return 0; |
26 | 24 | ||
27 | sch = skb_header_pointer(skb, iph.len + sizeof(sctp_sctphdr_t), | 25 | sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t), |
28 | sizeof(_schunkh), &_schunkh); | 26 | sizeof(_schunkh), &_schunkh); |
29 | if (sch == NULL) | 27 | if (sch == NULL) |
30 | return 0; | 28 | return 0; |
31 | net = skb_net(skb); | 29 | net = skb_net(skb); |
32 | if ((sch->type == SCTP_CID_INIT) && | 30 | if ((sch->type == SCTP_CID_INIT) && |
33 | (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, | 31 | (svc = ip_vs_service_get(net, af, skb->mark, iph->protocol, |
34 | &iph.daddr, sh->dest))) { | 32 | &iph->daddr, sh->dest))) { |
35 | int ignored; | 33 | int ignored; |
36 | 34 | ||
37 | if (ip_vs_todrop(net_ipvs(net))) { | 35 | if (ip_vs_todrop(net_ipvs(net))) { |
@@ -47,10 +45,10 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | |||
47 | * Let the virtual server select a real server for the | 45 | * Let the virtual server select a real server for the |
48 | * incoming connection, and create a connection entry. | 46 | * incoming connection, and create a connection entry. |
49 | */ | 47 | */ |
50 | *cpp = ip_vs_schedule(svc, skb, pd, &ignored); | 48 | *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); |
51 | if (!*cpp && ignored <= 0) { | 49 | if (!*cpp && ignored <= 0) { |
52 | if (!ignored) | 50 | if (!ignored) |
53 | *verdict = ip_vs_leave(svc, skb, pd); | 51 | *verdict = ip_vs_leave(svc, skb, pd, iph); |
54 | else { | 52 | else { |
55 | ip_vs_service_put(svc); | 53 | ip_vs_service_put(svc); |
56 | *verdict = NF_DROP; | 54 | *verdict = NF_DROP; |
@@ -64,20 +62,18 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | |||
64 | } | 62 | } |
65 | 63 | ||
66 | static int | 64 | static int |
67 | sctp_snat_handler(struct sk_buff *skb, | 65 | sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, |
68 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 66 | struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) |
69 | { | 67 | { |
70 | sctp_sctphdr_t *sctph; | 68 | sctp_sctphdr_t *sctph; |
71 | unsigned int sctphoff; | 69 | unsigned int sctphoff = iph->len; |
72 | struct sk_buff *iter; | 70 | struct sk_buff *iter; |
73 | __be32 crc32; | 71 | __be32 crc32; |
74 | 72 | ||
75 | #ifdef CONFIG_IP_VS_IPV6 | 73 | #ifdef CONFIG_IP_VS_IPV6 |
76 | if (cp->af == AF_INET6) | 74 | if (cp->af == AF_INET6 && iph->fragoffs) |
77 | sctphoff = sizeof(struct ipv6hdr); | 75 | return 1; |
78 | else | ||
79 | #endif | 76 | #endif |
80 | sctphoff = ip_hdrlen(skb); | ||
81 | 77 | ||
82 | /* csum_check requires unshared skb */ | 78 | /* csum_check requires unshared skb */ |
83 | if (!skb_make_writable(skb, sctphoff + sizeof(*sctph))) | 79 | if (!skb_make_writable(skb, sctphoff + sizeof(*sctph))) |
@@ -108,20 +104,18 @@ sctp_snat_handler(struct sk_buff *skb, | |||
108 | } | 104 | } |
109 | 105 | ||
110 | static int | 106 | static int |
111 | sctp_dnat_handler(struct sk_buff *skb, | 107 | sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, |
112 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 108 | struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) |
113 | { | 109 | { |
114 | sctp_sctphdr_t *sctph; | 110 | sctp_sctphdr_t *sctph; |
115 | unsigned int sctphoff; | 111 | unsigned int sctphoff = iph->len; |
116 | struct sk_buff *iter; | 112 | struct sk_buff *iter; |
117 | __be32 crc32; | 113 | __be32 crc32; |
118 | 114 | ||
119 | #ifdef CONFIG_IP_VS_IPV6 | 115 | #ifdef CONFIG_IP_VS_IPV6 |
120 | if (cp->af == AF_INET6) | 116 | if (cp->af == AF_INET6 && iph->fragoffs) |
121 | sctphoff = sizeof(struct ipv6hdr); | 117 | return 1; |
122 | else | ||
123 | #endif | 118 | #endif |
124 | sctphoff = ip_hdrlen(skb); | ||
125 | 119 | ||
126 | /* csum_check requires unshared skb */ | 120 | /* csum_check requires unshared skb */ |
127 | if (!skb_make_writable(skb, sctphoff + sizeof(*sctph))) | 121 | if (!skb_make_writable(skb, sctphoff + sizeof(*sctph))) |
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index cd609cc62721..9af653a75825 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c | |||
@@ -33,16 +33,14 @@ | |||
33 | 33 | ||
34 | static int | 34 | static int |
35 | tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | 35 | tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, |
36 | int *verdict, struct ip_vs_conn **cpp) | 36 | int *verdict, struct ip_vs_conn **cpp, |
37 | struct ip_vs_iphdr *iph) | ||
37 | { | 38 | { |
38 | struct net *net; | 39 | struct net *net; |
39 | struct ip_vs_service *svc; | 40 | struct ip_vs_service *svc; |
40 | struct tcphdr _tcph, *th; | 41 | struct tcphdr _tcph, *th; |
41 | struct ip_vs_iphdr iph; | ||
42 | 42 | ||
43 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 43 | th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph); |
44 | |||
45 | th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph); | ||
46 | if (th == NULL) { | 44 | if (th == NULL) { |
47 | *verdict = NF_DROP; | 45 | *verdict = NF_DROP; |
48 | return 0; | 46 | return 0; |
@@ -50,8 +48,8 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | |||
50 | net = skb_net(skb); | 48 | net = skb_net(skb); |
51 | /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */ | 49 | /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */ |
52 | if (th->syn && | 50 | if (th->syn && |
53 | (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, | 51 | (svc = ip_vs_service_get(net, af, skb->mark, iph->protocol, |
54 | &iph.daddr, th->dest))) { | 52 | &iph->daddr, th->dest))) { |
55 | int ignored; | 53 | int ignored; |
56 | 54 | ||
57 | if (ip_vs_todrop(net_ipvs(net))) { | 55 | if (ip_vs_todrop(net_ipvs(net))) { |
@@ -68,10 +66,10 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | |||
68 | * Let the virtual server select a real server for the | 66 | * Let the virtual server select a real server for the |
69 | * incoming connection, and create a connection entry. | 67 | * incoming connection, and create a connection entry. |
70 | */ | 68 | */ |
71 | *cpp = ip_vs_schedule(svc, skb, pd, &ignored); | 69 | *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); |
72 | if (!*cpp && ignored <= 0) { | 70 | if (!*cpp && ignored <= 0) { |
73 | if (!ignored) | 71 | if (!ignored) |
74 | *verdict = ip_vs_leave(svc, skb, pd); | 72 | *verdict = ip_vs_leave(svc, skb, pd, iph); |
75 | else { | 73 | else { |
76 | ip_vs_service_put(svc); | 74 | ip_vs_service_put(svc); |
77 | *verdict = NF_DROP; | 75 | *verdict = NF_DROP; |
@@ -128,20 +126,18 @@ tcp_partial_csum_update(int af, struct tcphdr *tcph, | |||
128 | 126 | ||
129 | 127 | ||
130 | static int | 128 | static int |
131 | tcp_snat_handler(struct sk_buff *skb, | 129 | tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, |
132 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 130 | struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) |
133 | { | 131 | { |
134 | struct tcphdr *tcph; | 132 | struct tcphdr *tcph; |
135 | unsigned int tcphoff; | 133 | unsigned int tcphoff = iph->len; |
136 | int oldlen; | 134 | int oldlen; |
137 | int payload_csum = 0; | 135 | int payload_csum = 0; |
138 | 136 | ||
139 | #ifdef CONFIG_IP_VS_IPV6 | 137 | #ifdef CONFIG_IP_VS_IPV6 |
140 | if (cp->af == AF_INET6) | 138 | if (cp->af == AF_INET6 && iph->fragoffs) |
141 | tcphoff = sizeof(struct ipv6hdr); | 139 | return 1; |
142 | else | ||
143 | #endif | 140 | #endif |
144 | tcphoff = ip_hdrlen(skb); | ||
145 | oldlen = skb->len - tcphoff; | 141 | oldlen = skb->len - tcphoff; |
146 | 142 | ||
147 | /* csum_check requires unshared skb */ | 143 | /* csum_check requires unshared skb */ |
@@ -208,20 +204,18 @@ tcp_snat_handler(struct sk_buff *skb, | |||
208 | 204 | ||
209 | 205 | ||
210 | static int | 206 | static int |
211 | tcp_dnat_handler(struct sk_buff *skb, | 207 | tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, |
212 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 208 | struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) |
213 | { | 209 | { |
214 | struct tcphdr *tcph; | 210 | struct tcphdr *tcph; |
215 | unsigned int tcphoff; | 211 | unsigned int tcphoff = iph->len; |
216 | int oldlen; | 212 | int oldlen; |
217 | int payload_csum = 0; | 213 | int payload_csum = 0; |
218 | 214 | ||
219 | #ifdef CONFIG_IP_VS_IPV6 | 215 | #ifdef CONFIG_IP_VS_IPV6 |
220 | if (cp->af == AF_INET6) | 216 | if (cp->af == AF_INET6 && iph->fragoffs) |
221 | tcphoff = sizeof(struct ipv6hdr); | 217 | return 1; |
222 | else | ||
223 | #endif | 218 | #endif |
224 | tcphoff = ip_hdrlen(skb); | ||
225 | oldlen = skb->len - tcphoff; | 219 | oldlen = skb->len - tcphoff; |
226 | 220 | ||
227 | /* csum_check requires unshared skb */ | 221 | /* csum_check requires unshared skb */ |
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index 2fedb2dcb3d1..503a842c90d2 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c | |||
@@ -30,23 +30,22 @@ | |||
30 | 30 | ||
31 | static int | 31 | static int |
32 | udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | 32 | udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, |
33 | int *verdict, struct ip_vs_conn **cpp) | 33 | int *verdict, struct ip_vs_conn **cpp, |
34 | struct ip_vs_iphdr *iph) | ||
34 | { | 35 | { |
35 | struct net *net; | 36 | struct net *net; |
36 | struct ip_vs_service *svc; | 37 | struct ip_vs_service *svc; |
37 | struct udphdr _udph, *uh; | 38 | struct udphdr _udph, *uh; |
38 | struct ip_vs_iphdr iph; | ||
39 | 39 | ||
40 | ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); | 40 | /* IPv6 fragments, only first fragment will hit this */ |
41 | 41 | uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph); | |
42 | uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph); | ||
43 | if (uh == NULL) { | 42 | if (uh == NULL) { |
44 | *verdict = NF_DROP; | 43 | *verdict = NF_DROP; |
45 | return 0; | 44 | return 0; |
46 | } | 45 | } |
47 | net = skb_net(skb); | 46 | net = skb_net(skb); |
48 | svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, | 47 | svc = ip_vs_service_get(net, af, skb->mark, iph->protocol, |
49 | &iph.daddr, uh->dest); | 48 | &iph->daddr, uh->dest); |
50 | if (svc) { | 49 | if (svc) { |
51 | int ignored; | 50 | int ignored; |
52 | 51 | ||
@@ -64,10 +63,10 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | |||
64 | * Let the virtual server select a real server for the | 63 | * Let the virtual server select a real server for the |
65 | * incoming connection, and create a connection entry. | 64 | * incoming connection, and create a connection entry. |
66 | */ | 65 | */ |
67 | *cpp = ip_vs_schedule(svc, skb, pd, &ignored); | 66 | *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); |
68 | if (!*cpp && ignored <= 0) { | 67 | if (!*cpp && ignored <= 0) { |
69 | if (!ignored) | 68 | if (!ignored) |
70 | *verdict = ip_vs_leave(svc, skb, pd); | 69 | *verdict = ip_vs_leave(svc, skb, pd, iph); |
71 | else { | 70 | else { |
72 | ip_vs_service_put(svc); | 71 | ip_vs_service_put(svc); |
73 | *verdict = NF_DROP; | 72 | *verdict = NF_DROP; |
@@ -125,20 +124,18 @@ udp_partial_csum_update(int af, struct udphdr *uhdr, | |||
125 | 124 | ||
126 | 125 | ||
127 | static int | 126 | static int |
128 | udp_snat_handler(struct sk_buff *skb, | 127 | udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, |
129 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 128 | struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) |
130 | { | 129 | { |
131 | struct udphdr *udph; | 130 | struct udphdr *udph; |
132 | unsigned int udphoff; | 131 | unsigned int udphoff = iph->len; |
133 | int oldlen; | 132 | int oldlen; |
134 | int payload_csum = 0; | 133 | int payload_csum = 0; |
135 | 134 | ||
136 | #ifdef CONFIG_IP_VS_IPV6 | 135 | #ifdef CONFIG_IP_VS_IPV6 |
137 | if (cp->af == AF_INET6) | 136 | if (cp->af == AF_INET6 && iph->fragoffs) |
138 | udphoff = sizeof(struct ipv6hdr); | 137 | return 1; |
139 | else | ||
140 | #endif | 138 | #endif |
141 | udphoff = ip_hdrlen(skb); | ||
142 | oldlen = skb->len - udphoff; | 139 | oldlen = skb->len - udphoff; |
143 | 140 | ||
144 | /* csum_check requires unshared skb */ | 141 | /* csum_check requires unshared skb */ |
@@ -210,20 +207,18 @@ udp_snat_handler(struct sk_buff *skb, | |||
210 | 207 | ||
211 | 208 | ||
212 | static int | 209 | static int |
213 | udp_dnat_handler(struct sk_buff *skb, | 210 | udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, |
214 | struct ip_vs_protocol *pp, struct ip_vs_conn *cp) | 211 | struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) |
215 | { | 212 | { |
216 | struct udphdr *udph; | 213 | struct udphdr *udph; |
217 | unsigned int udphoff; | 214 | unsigned int udphoff = iph->len; |
218 | int oldlen; | 215 | int oldlen; |
219 | int payload_csum = 0; | 216 | int payload_csum = 0; |
220 | 217 | ||
221 | #ifdef CONFIG_IP_VS_IPV6 | 218 | #ifdef CONFIG_IP_VS_IPV6 |
222 | if (cp->af == AF_INET6) | 219 | if (cp->af == AF_INET6 && iph->fragoffs) |
223 | udphoff = sizeof(struct ipv6hdr); | 220 | return 1; |
224 | else | ||
225 | #endif | 221 | #endif |
226 | udphoff = ip_hdrlen(skb); | ||
227 | oldlen = skb->len - udphoff; | 222 | oldlen = skb->len - udphoff; |
228 | 223 | ||
229 | /* csum_check requires unshared skb */ | 224 | /* csum_check requires unshared skb */ |
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c index 08dbdd5bc18f..d6bf20d6cdbe 100644 --- a/net/netfilter/ipvs/ip_vs_sched.c +++ b/net/netfilter/ipvs/ip_vs_sched.c | |||
@@ -159,7 +159,7 @@ void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) | |||
159 | svc->fwmark, msg); | 159 | svc->fwmark, msg); |
160 | #ifdef CONFIG_IP_VS_IPV6 | 160 | #ifdef CONFIG_IP_VS_IPV6 |
161 | } else if (svc->af == AF_INET6) { | 161 | } else if (svc->af == AF_INET6) { |
162 | IP_VS_ERR_RL("%s: %s [%pI6]:%d - %s\n", | 162 | IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", |
163 | svc->scheduler->name, | 163 | svc->scheduler->name, |
164 | ip_vs_proto_name(svc->protocol), | 164 | ip_vs_proto_name(svc->protocol), |
165 | &svc->addr.in6, ntohs(svc->port), msg); | 165 | &svc->addr.in6, ntohs(svc->port), msg); |
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c index 05126521743e..e33126994628 100644 --- a/net/netfilter/ipvs/ip_vs_sh.c +++ b/net/netfilter/ipvs/ip_vs_sh.c | |||
@@ -228,7 +228,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
228 | struct ip_vs_sh_bucket *tbl; | 228 | struct ip_vs_sh_bucket *tbl; |
229 | struct ip_vs_iphdr iph; | 229 | struct ip_vs_iphdr iph; |
230 | 230 | ||
231 | ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); | 231 | ip_vs_fill_iph_addr_only(svc->af, skb, &iph); |
232 | 232 | ||
233 | IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n"); | 233 | IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n"); |
234 | 234 | ||
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index cc4c8095681a..ee6b7a9f1ec2 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -338,7 +338,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, | |||
338 | local = __ip_vs_is_local_route6(rt); | 338 | local = __ip_vs_is_local_route6(rt); |
339 | if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) & | 339 | if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) & |
340 | rt_mode)) { | 340 | rt_mode)) { |
341 | IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n", | 341 | IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n", |
342 | local ? "local":"non-local", daddr); | 342 | local ? "local":"non-local", daddr); |
343 | dst_release(&rt->dst); | 343 | dst_release(&rt->dst); |
344 | return NULL; | 344 | return NULL; |
@@ -346,8 +346,8 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, | |||
346 | if (local && !(rt_mode & IP_VS_RT_MODE_RDR) && | 346 | if (local && !(rt_mode & IP_VS_RT_MODE_RDR) && |
347 | !((ort = (struct rt6_info *) skb_dst(skb)) && | 347 | !((ort = (struct rt6_info *) skb_dst(skb)) && |
348 | __ip_vs_is_local_route6(ort))) { | 348 | __ip_vs_is_local_route6(ort))) { |
349 | IP_VS_DBG_RL("Redirect from non-local address %pI6 to local " | 349 | IP_VS_DBG_RL("Redirect from non-local address %pI6c to local " |
350 | "requires NAT method, dest: %pI6\n", | 350 | "requires NAT method, dest: %pI6c\n", |
351 | &ipv6_hdr(skb)->daddr, daddr); | 351 | &ipv6_hdr(skb)->daddr, daddr); |
352 | dst_release(&rt->dst); | 352 | dst_release(&rt->dst); |
353 | return NULL; | 353 | return NULL; |
@@ -355,8 +355,8 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, | |||
355 | if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) && | 355 | if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) && |
356 | ipv6_addr_type(&ipv6_hdr(skb)->saddr) & | 356 | ipv6_addr_type(&ipv6_hdr(skb)->saddr) & |
357 | IPV6_ADDR_LOOPBACK)) { | 357 | IPV6_ADDR_LOOPBACK)) { |
358 | IP_VS_DBG_RL("Stopping traffic from loopback address %pI6 " | 358 | IP_VS_DBG_RL("Stopping traffic from loopback address %pI6c " |
359 | "to non-local address, dest: %pI6\n", | 359 | "to non-local address, dest: %pI6c\n", |
360 | &ipv6_hdr(skb)->saddr, daddr); | 360 | &ipv6_hdr(skb)->saddr, daddr); |
361 | dst_release(&rt->dst); | 361 | dst_release(&rt->dst); |
362 | return NULL; | 362 | return NULL; |
@@ -427,7 +427,7 @@ do { \ | |||
427 | */ | 427 | */ |
428 | int | 428 | int |
429 | ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | 429 | ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, |
430 | struct ip_vs_protocol *pp) | 430 | struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) |
431 | { | 431 | { |
432 | /* we do not touch skb and do not need pskb ptr */ | 432 | /* we do not touch skb and do not need pskb ptr */ |
433 | IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1); | 433 | IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1); |
@@ -441,7 +441,7 @@ ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
441 | */ | 441 | */ |
442 | int | 442 | int |
443 | ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | 443 | ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, |
444 | struct ip_vs_protocol *pp) | 444 | struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) |
445 | { | 445 | { |
446 | struct rtable *rt; /* Route to the other host */ | 446 | struct rtable *rt; /* Route to the other host */ |
447 | struct iphdr *iph = ip_hdr(skb); | 447 | struct iphdr *iph = ip_hdr(skb); |
@@ -496,16 +496,16 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
496 | #ifdef CONFIG_IP_VS_IPV6 | 496 | #ifdef CONFIG_IP_VS_IPV6 |
497 | int | 497 | int |
498 | ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | 498 | ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, |
499 | struct ip_vs_protocol *pp) | 499 | struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph) |
500 | { | 500 | { |
501 | struct rt6_info *rt; /* Route to the other host */ | 501 | struct rt6_info *rt; /* Route to the other host */ |
502 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
503 | int mtu; | 502 | int mtu; |
504 | 503 | ||
505 | EnterFunction(10); | 504 | EnterFunction(10); |
506 | 505 | ||
507 | if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0, | 506 | rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr.in6, NULL, 0, |
508 | IP_VS_RT_MODE_NON_LOCAL))) | 507 | IP_VS_RT_MODE_NON_LOCAL); |
508 | if (!rt) | ||
509 | goto tx_error_icmp; | 509 | goto tx_error_icmp; |
510 | 510 | ||
511 | /* MTU checking */ | 511 | /* MTU checking */ |
@@ -516,7 +516,9 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
516 | 516 | ||
517 | skb->dev = net->loopback_dev; | 517 | skb->dev = net->loopback_dev; |
518 | } | 518 | } |
519 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 519 | /* only send ICMP too big on first fragment */ |
520 | if (!iph->fragoffs) | ||
521 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | ||
520 | dst_release(&rt->dst); | 522 | dst_release(&rt->dst); |
521 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); | 523 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
522 | goto tx_error; | 524 | goto tx_error; |
@@ -559,7 +561,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
559 | */ | 561 | */ |
560 | int | 562 | int |
561 | ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | 563 | ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, |
562 | struct ip_vs_protocol *pp) | 564 | struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) |
563 | { | 565 | { |
564 | struct rtable *rt; /* Route to the other host */ | 566 | struct rtable *rt; /* Route to the other host */ |
565 | int mtu; | 567 | int mtu; |
@@ -592,7 +594,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
592 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | 594 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
593 | if (cp->flags & IP_VS_CONN_F_SYNC && local) { | 595 | if (cp->flags & IP_VS_CONN_F_SYNC && local) { |
594 | enum ip_conntrack_info ctinfo; | 596 | enum ip_conntrack_info ctinfo; |
595 | struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); | 597 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
596 | 598 | ||
597 | if (ct && !nf_ct_is_untracked(ct)) { | 599 | if (ct && !nf_ct_is_untracked(ct)) { |
598 | IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0, | 600 | IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0, |
@@ -629,7 +631,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
629 | goto tx_error_put; | 631 | goto tx_error_put; |
630 | 632 | ||
631 | /* mangle the packet */ | 633 | /* mangle the packet */ |
632 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) | 634 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh)) |
633 | goto tx_error_put; | 635 | goto tx_error_put; |
634 | ip_hdr(skb)->daddr = cp->daddr.ip; | 636 | ip_hdr(skb)->daddr = cp->daddr.ip; |
635 | ip_send_check(ip_hdr(skb)); | 637 | ip_send_check(ip_hdr(skb)); |
@@ -677,7 +679,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
677 | #ifdef CONFIG_IP_VS_IPV6 | 679 | #ifdef CONFIG_IP_VS_IPV6 |
678 | int | 680 | int |
679 | ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | 681 | ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, |
680 | struct ip_vs_protocol *pp) | 682 | struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph) |
681 | { | 683 | { |
682 | struct rt6_info *rt; /* Route to the other host */ | 684 | struct rt6_info *rt; /* Route to the other host */ |
683 | int mtu; | 685 | int mtu; |
@@ -686,10 +688,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
686 | EnterFunction(10); | 688 | EnterFunction(10); |
687 | 689 | ||
688 | /* check if it is a connection of no-client-port */ | 690 | /* check if it is a connection of no-client-port */ |
689 | if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) { | 691 | if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !iph->fragoffs)) { |
690 | __be16 _pt, *p; | 692 | __be16 _pt, *p; |
691 | p = skb_header_pointer(skb, sizeof(struct ipv6hdr), | 693 | p = skb_header_pointer(skb, iph->len, sizeof(_pt), &_pt); |
692 | sizeof(_pt), &_pt); | ||
693 | if (p == NULL) | 694 | if (p == NULL) |
694 | goto tx_error; | 695 | goto tx_error; |
695 | ip_vs_conn_fill_cport(cp, *p); | 696 | ip_vs_conn_fill_cport(cp, *p); |
@@ -709,7 +710,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
709 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | 710 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
710 | if (cp->flags & IP_VS_CONN_F_SYNC && local) { | 711 | if (cp->flags & IP_VS_CONN_F_SYNC && local) { |
711 | enum ip_conntrack_info ctinfo; | 712 | enum ip_conntrack_info ctinfo; |
712 | struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); | 713 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
713 | 714 | ||
714 | if (ct && !nf_ct_is_untracked(ct)) { | 715 | if (ct && !nf_ct_is_untracked(ct)) { |
715 | IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0, | 716 | IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0, |
@@ -737,7 +738,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
737 | 738 | ||
738 | skb->dev = net->loopback_dev; | 739 | skb->dev = net->loopback_dev; |
739 | } | 740 | } |
740 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 741 | /* only send ICMP too big on first fragment */ |
742 | if (!iph->fragoffs) | ||
743 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | ||
741 | IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0, | 744 | IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0, |
742 | "ip_vs_nat_xmit_v6(): frag needed for"); | 745 | "ip_vs_nat_xmit_v6(): frag needed for"); |
743 | goto tx_error_put; | 746 | goto tx_error_put; |
@@ -751,7 +754,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
751 | goto tx_error_put; | 754 | goto tx_error_put; |
752 | 755 | ||
753 | /* mangle the packet */ | 756 | /* mangle the packet */ |
754 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) | 757 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, iph)) |
755 | goto tx_error; | 758 | goto tx_error; |
756 | ipv6_hdr(skb)->daddr = cp->daddr.in6; | 759 | ipv6_hdr(skb)->daddr = cp->daddr.in6; |
757 | 760 | ||
@@ -812,7 +815,7 @@ tx_error_put: | |||
812 | */ | 815 | */ |
813 | int | 816 | int |
814 | ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | 817 | ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, |
815 | struct ip_vs_protocol *pp) | 818 | struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) |
816 | { | 819 | { |
817 | struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); | 820 | struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); |
818 | struct rtable *rt; /* Route to the other host */ | 821 | struct rtable *rt; /* Route to the other host */ |
@@ -932,7 +935,7 @@ tx_error_put: | |||
932 | #ifdef CONFIG_IP_VS_IPV6 | 935 | #ifdef CONFIG_IP_VS_IPV6 |
933 | int | 936 | int |
934 | ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | 937 | ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, |
935 | struct ip_vs_protocol *pp) | 938 | struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) |
936 | { | 939 | { |
937 | struct rt6_info *rt; /* Route to the other host */ | 940 | struct rt6_info *rt; /* Route to the other host */ |
938 | struct in6_addr saddr; /* Source for tunnel */ | 941 | struct in6_addr saddr; /* Source for tunnel */ |
@@ -972,7 +975,9 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
972 | 975 | ||
973 | skb->dev = net->loopback_dev; | 976 | skb->dev = net->loopback_dev; |
974 | } | 977 | } |
975 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 978 | /* only send ICMP too big on first fragment */ |
979 | if (!ipvsh->fragoffs) | ||
980 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | ||
976 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); | 981 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
977 | goto tx_error_put; | 982 | goto tx_error_put; |
978 | } | 983 | } |
@@ -1053,7 +1058,7 @@ tx_error_put: | |||
1053 | */ | 1058 | */ |
1054 | int | 1059 | int |
1055 | ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | 1060 | ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, |
1056 | struct ip_vs_protocol *pp) | 1061 | struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) |
1057 | { | 1062 | { |
1058 | struct rtable *rt; /* Route to the other host */ | 1063 | struct rtable *rt; /* Route to the other host */ |
1059 | struct iphdr *iph = ip_hdr(skb); | 1064 | struct iphdr *iph = ip_hdr(skb); |
@@ -1115,7 +1120,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
1115 | #ifdef CONFIG_IP_VS_IPV6 | 1120 | #ifdef CONFIG_IP_VS_IPV6 |
1116 | int | 1121 | int |
1117 | ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | 1122 | ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, |
1118 | struct ip_vs_protocol *pp) | 1123 | struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph) |
1119 | { | 1124 | { |
1120 | struct rt6_info *rt; /* Route to the other host */ | 1125 | struct rt6_info *rt; /* Route to the other host */ |
1121 | int mtu; | 1126 | int mtu; |
@@ -1139,7 +1144,9 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
1139 | 1144 | ||
1140 | skb->dev = net->loopback_dev; | 1145 | skb->dev = net->loopback_dev; |
1141 | } | 1146 | } |
1142 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 1147 | /* only send ICMP too big on first fragment */ |
1148 | if (!iph->fragoffs) | ||
1149 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | ||
1143 | dst_release(&rt->dst); | 1150 | dst_release(&rt->dst); |
1144 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); | 1151 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
1145 | goto tx_error; | 1152 | goto tx_error; |
@@ -1183,7 +1190,8 @@ tx_error: | |||
1183 | */ | 1190 | */ |
1184 | int | 1191 | int |
1185 | ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | 1192 | ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, |
1186 | struct ip_vs_protocol *pp, int offset, unsigned int hooknum) | 1193 | struct ip_vs_protocol *pp, int offset, unsigned int hooknum, |
1194 | struct ip_vs_iphdr *iph) | ||
1187 | { | 1195 | { |
1188 | struct rtable *rt; /* Route to the other host */ | 1196 | struct rtable *rt; /* Route to the other host */ |
1189 | int mtu; | 1197 | int mtu; |
@@ -1198,7 +1206,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
1198 | translate address/port back */ | 1206 | translate address/port back */ |
1199 | if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { | 1207 | if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { |
1200 | if (cp->packet_xmit) | 1208 | if (cp->packet_xmit) |
1201 | rc = cp->packet_xmit(skb, cp, pp); | 1209 | rc = cp->packet_xmit(skb, cp, pp, iph); |
1202 | else | 1210 | else |
1203 | rc = NF_ACCEPT; | 1211 | rc = NF_ACCEPT; |
1204 | /* do not touch skb anymore */ | 1212 | /* do not touch skb anymore */ |
@@ -1227,7 +1235,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
1227 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | 1235 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
1228 | if (cp->flags & IP_VS_CONN_F_SYNC && local) { | 1236 | if (cp->flags & IP_VS_CONN_F_SYNC && local) { |
1229 | enum ip_conntrack_info ctinfo; | 1237 | enum ip_conntrack_info ctinfo; |
1230 | struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); | 1238 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1231 | 1239 | ||
1232 | if (ct && !nf_ct_is_untracked(ct)) { | 1240 | if (ct && !nf_ct_is_untracked(ct)) { |
1233 | IP_VS_DBG(10, "%s(): " | 1241 | IP_VS_DBG(10, "%s(): " |
@@ -1304,7 +1312,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
1304 | #ifdef CONFIG_IP_VS_IPV6 | 1312 | #ifdef CONFIG_IP_VS_IPV6 |
1305 | int | 1313 | int |
1306 | ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | 1314 | ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, |
1307 | struct ip_vs_protocol *pp, int offset, unsigned int hooknum) | 1315 | struct ip_vs_protocol *pp, int offset, unsigned int hooknum, |
1316 | struct ip_vs_iphdr *iph) | ||
1308 | { | 1317 | { |
1309 | struct rt6_info *rt; /* Route to the other host */ | 1318 | struct rt6_info *rt; /* Route to the other host */ |
1310 | int mtu; | 1319 | int mtu; |
@@ -1319,7 +1328,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
1319 | translate address/port back */ | 1328 | translate address/port back */ |
1320 | if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { | 1329 | if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { |
1321 | if (cp->packet_xmit) | 1330 | if (cp->packet_xmit) |
1322 | rc = cp->packet_xmit(skb, cp, pp); | 1331 | rc = cp->packet_xmit(skb, cp, pp, iph); |
1323 | else | 1332 | else |
1324 | rc = NF_ACCEPT; | 1333 | rc = NF_ACCEPT; |
1325 | /* do not touch skb anymore */ | 1334 | /* do not touch skb anymore */ |
@@ -1347,7 +1356,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
1347 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | 1356 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
1348 | if (cp->flags & IP_VS_CONN_F_SYNC && local) { | 1357 | if (cp->flags & IP_VS_CONN_F_SYNC && local) { |
1349 | enum ip_conntrack_info ctinfo; | 1358 | enum ip_conntrack_info ctinfo; |
1350 | struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); | 1359 | struct nf_conn *ct = nf_ct_get(skb, &ctinfo); |
1351 | 1360 | ||
1352 | if (ct && !nf_ct_is_untracked(ct)) { | 1361 | if (ct && !nf_ct_is_untracked(ct)) { |
1353 | IP_VS_DBG(10, "%s(): " | 1362 | IP_VS_DBG(10, "%s(): " |
@@ -1375,7 +1384,9 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
1375 | 1384 | ||
1376 | skb->dev = net->loopback_dev; | 1385 | skb->dev = net->loopback_dev; |
1377 | } | 1386 | } |
1378 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 1387 | /* only send ICMP too big on first fragment */ |
1388 | if (!iph->fragoffs) | ||
1389 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | ||
1379 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); | 1390 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
1380 | goto tx_error_put; | 1391 | goto tx_error_put; |
1381 | } | 1392 | } |
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c index d61e0782a797..7df424e2d10c 100644 --- a/net/netfilter/nf_conntrack_acct.c +++ b/net/netfilter/nf_conntrack_acct.c | |||
@@ -69,6 +69,10 @@ static int nf_conntrack_acct_init_sysctl(struct net *net) | |||
69 | 69 | ||
70 | table[0].data = &net->ct.sysctl_acct; | 70 | table[0].data = &net->ct.sysctl_acct; |
71 | 71 | ||
72 | /* Don't export sysctls to unprivileged users */ | ||
73 | if (net->user_ns != &init_user_ns) | ||
74 | table[0].procname = NULL; | ||
75 | |||
72 | net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter", | 76 | net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter", |
73 | table); | 77 | table); |
74 | if (!net->ct.acct_sysctl_header) { | 78 | if (!net->ct.acct_sysctl_header) { |
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index de9781b6464f..faa978f1714b 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c | |||
@@ -196,6 +196,10 @@ static int nf_conntrack_event_init_sysctl(struct net *net) | |||
196 | table[0].data = &net->ct.sysctl_events; | 196 | table[0].data = &net->ct.sysctl_events; |
197 | table[1].data = &net->ct.sysctl_events_retry_timeout; | 197 | table[1].data = &net->ct.sysctl_events_retry_timeout; |
198 | 198 | ||
199 | /* Don't export sysctls to unprivileged users */ | ||
200 | if (net->user_ns != &init_user_ns) | ||
201 | table[0].procname = NULL; | ||
202 | |||
199 | net->ct.event_sysctl_header = | 203 | net->ct.event_sysctl_header = |
200 | register_net_sysctl(net, "net/netfilter", table); | 204 | register_net_sysctl(net, "net/netfilter", table); |
201 | if (!net->ct.event_sysctl_header) { | 205 | if (!net->ct.event_sysctl_header) { |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index c4bc637feb76..884f2b39319a 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -64,6 +64,10 @@ static int nf_conntrack_helper_init_sysctl(struct net *net) | |||
64 | 64 | ||
65 | table[0].data = &net->ct.sysctl_auto_assign_helper; | 65 | table[0].data = &net->ct.sysctl_auto_assign_helper; |
66 | 66 | ||
67 | /* Don't export sysctls to unprivileged users */ | ||
68 | if (net->user_ns != &init_user_ns) | ||
69 | table[0].procname = NULL; | ||
70 | |||
67 | net->ct.helper_sysctl_header = | 71 | net->ct.helper_sysctl_header = |
68 | register_net_sysctl(net, "net/netfilter", table); | 72 | register_net_sysctl(net, "net/netfilter", table); |
69 | 73 | ||
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 6535326cf07c..a8ae287bc7af 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -815,7 +815,7 @@ static struct ctl_table dccp_sysctl_table[] = { | |||
815 | }; | 815 | }; |
816 | #endif /* CONFIG_SYSCTL */ | 816 | #endif /* CONFIG_SYSCTL */ |
817 | 817 | ||
818 | static int dccp_kmemdup_sysctl_table(struct nf_proto_net *pn, | 818 | static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn, |
819 | struct dccp_net *dn) | 819 | struct dccp_net *dn) |
820 | { | 820 | { |
821 | #ifdef CONFIG_SYSCTL | 821 | #ifdef CONFIG_SYSCTL |
@@ -836,6 +836,10 @@ static int dccp_kmemdup_sysctl_table(struct nf_proto_net *pn, | |||
836 | pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING]; | 836 | pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING]; |
837 | pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; | 837 | pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; |
838 | pn->ctl_table[7].data = &dn->dccp_loose; | 838 | pn->ctl_table[7].data = &dn->dccp_loose; |
839 | |||
840 | /* Don't export sysctls to unprivileged users */ | ||
841 | if (net->user_ns != &init_user_ns) | ||
842 | pn->ctl_table[0].procname = NULL; | ||
839 | #endif | 843 | #endif |
840 | return 0; | 844 | return 0; |
841 | } | 845 | } |
@@ -857,7 +861,7 @@ static int dccp_init_net(struct net *net, u_int16_t proto) | |||
857 | dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; | 861 | dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; |
858 | } | 862 | } |
859 | 863 | ||
860 | return dccp_kmemdup_sysctl_table(pn, dn); | 864 | return dccp_kmemdup_sysctl_table(net, pn, dn); |
861 | } | 865 | } |
862 | 866 | ||
863 | static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = { | 867 | static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = { |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 9b3943252a5e..363285d544a1 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -489,6 +489,10 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) | |||
489 | table[3].data = &net->ct.sysctl_checksum; | 489 | table[3].data = &net->ct.sysctl_checksum; |
490 | table[4].data = &net->ct.sysctl_log_invalid; | 490 | table[4].data = &net->ct.sysctl_log_invalid; |
491 | 491 | ||
492 | /* Don't export sysctls to unprivileged users */ | ||
493 | if (net->user_ns != &init_user_ns) | ||
494 | table[0].procname = NULL; | ||
495 | |||
492 | net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table); | 496 | net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table); |
493 | if (!net->ct.sysctl_header) | 497 | if (!net->ct.sysctl_header) |
494 | goto out_unregister_netfilter; | 498 | goto out_unregister_netfilter; |
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c index dbb364f62d6f..7ea8026f07c9 100644 --- a/net/netfilter/nf_conntrack_timestamp.c +++ b/net/netfilter/nf_conntrack_timestamp.c | |||
@@ -51,6 +51,10 @@ static int nf_conntrack_tstamp_init_sysctl(struct net *net) | |||
51 | 51 | ||
52 | table[0].data = &net->ct.sysctl_tstamp; | 52 | table[0].data = &net->ct.sysctl_tstamp; |
53 | 53 | ||
54 | /* Don't export sysctls to unprivileged users */ | ||
55 | if (net->user_ns != &init_user_ns) | ||
56 | table[0].procname = NULL; | ||
57 | |||
54 | net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter", | 58 | net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter", |
55 | table); | 59 | table); |
56 | if (!net->ct.tstamp_sysctl_header) { | 60 | if (!net->ct.tstamp_sysctl_header) { |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index ffb92c03a358..58a09b7c3f6d 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -138,7 +138,7 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
138 | const struct nfnetlink_subsystem *ss; | 138 | const struct nfnetlink_subsystem *ss; |
139 | int type, err; | 139 | int type, err; |
140 | 140 | ||
141 | if (!capable(CAP_NET_ADMIN)) | 141 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
142 | return -EPERM; | 142 | return -EPERM; |
143 | 143 | ||
144 | /* All the messages must at least contain nfgenmsg */ | 144 | /* All the messages must at least contain nfgenmsg */ |
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c index bb10b0717f1b..8d47c3780fda 100644 --- a/net/netfilter/xt_ipvs.c +++ b/net/netfilter/xt_ipvs.c | |||
@@ -67,7 +67,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
67 | goto out; | 67 | goto out; |
68 | } | 68 | } |
69 | 69 | ||
70 | ip_vs_fill_iphdr(family, skb_network_header(skb), &iph); | 70 | ip_vs_fill_iph_skb(family, skb, &iph); |
71 | 71 | ||
72 | if (data->bitmask & XT_IPVS_PROTO) | 72 | if (data->bitmask & XT_IPVS_PROTO) |
73 | if ((iph.protocol == data->l4proto) ^ | 73 | if ((iph.protocol == data->l4proto) ^ |
@@ -85,7 +85,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
85 | /* | 85 | /* |
86 | * Check if the packet belongs to an existing entry | 86 | * Check if the packet belongs to an existing entry |
87 | */ | 87 | */ |
88 | cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */); | 88 | cp = pp->conn_out_get(family, skb, &iph, 1 /* inverse */); |
89 | if (unlikely(cp == NULL)) { | 89 | if (unlikely(cp == NULL)) { |
90 | match = false; | 90 | match = false; |
91 | goto out; | 91 | goto out; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 4da797fa5ec5..c8a1eb6eca2d 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -612,7 +612,7 @@ retry: | |||
612 | static inline int netlink_capable(const struct socket *sock, unsigned int flag) | 612 | static inline int netlink_capable(const struct socket *sock, unsigned int flag) |
613 | { | 613 | { |
614 | return (nl_table[sock->sk->sk_protocol].flags & flag) || | 614 | return (nl_table[sock->sk->sk_protocol].flags & flag) || |
615 | capable(CAP_NET_ADMIN); | 615 | ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN); |
616 | } | 616 | } |
617 | 617 | ||
618 | static void | 618 | static void |
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig index 8d8d9bc4b6ff..60c3bbb63e8e 100644 --- a/net/nfc/Kconfig +++ b/net/nfc/Kconfig | |||
@@ -3,8 +3,8 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | menuconfig NFC | 5 | menuconfig NFC |
6 | depends on NET && EXPERIMENTAL | 6 | depends on NET |
7 | tristate "NFC subsystem support (EXPERIMENTAL)" | 7 | tristate "NFC subsystem support" |
8 | default n | 8 | default n |
9 | help | 9 | help |
10 | Say Y here if you want to build support for NFC (Near field | 10 | Say Y here if you want to build support for NFC (Near field |
diff --git a/net/nfc/core.c b/net/nfc/core.c index 479bee36dc3e..aa64ea441676 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c | |||
@@ -40,6 +40,9 @@ | |||
40 | int nfc_devlist_generation; | 40 | int nfc_devlist_generation; |
41 | DEFINE_MUTEX(nfc_devlist_mutex); | 41 | DEFINE_MUTEX(nfc_devlist_mutex); |
42 | 42 | ||
43 | /* NFC device ID bitmap */ | ||
44 | static DEFINE_IDA(nfc_index_ida); | ||
45 | |||
43 | /** | 46 | /** |
44 | * nfc_dev_up - turn on the NFC device | 47 | * nfc_dev_up - turn on the NFC device |
45 | * | 48 | * |
@@ -181,6 +184,7 @@ int nfc_stop_poll(struct nfc_dev *dev) | |||
181 | 184 | ||
182 | dev->ops->stop_poll(dev); | 185 | dev->ops->stop_poll(dev); |
183 | dev->polling = false; | 186 | dev->polling = false; |
187 | dev->rf_mode = NFC_RF_NONE; | ||
184 | 188 | ||
185 | error: | 189 | error: |
186 | device_unlock(&dev->dev); | 190 | device_unlock(&dev->dev); |
@@ -194,7 +198,7 @@ static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx) | |||
194 | if (dev->n_targets == 0) | 198 | if (dev->n_targets == 0) |
195 | return NULL; | 199 | return NULL; |
196 | 200 | ||
197 | for (i = 0; i < dev->n_targets ; i++) { | 201 | for (i = 0; i < dev->n_targets; i++) { |
198 | if (dev->targets[i].idx == target_idx) | 202 | if (dev->targets[i].idx == target_idx) |
199 | return &dev->targets[i]; | 203 | return &dev->targets[i]; |
200 | } | 204 | } |
@@ -274,12 +278,14 @@ int nfc_dep_link_down(struct nfc_dev *dev) | |||
274 | if (!rc) { | 278 | if (!rc) { |
275 | dev->dep_link_up = false; | 279 | dev->dep_link_up = false; |
276 | dev->active_target = NULL; | 280 | dev->active_target = NULL; |
281 | dev->rf_mode = NFC_RF_NONE; | ||
277 | nfc_llcp_mac_is_down(dev); | 282 | nfc_llcp_mac_is_down(dev); |
278 | nfc_genl_dep_link_down_event(dev); | 283 | nfc_genl_dep_link_down_event(dev); |
279 | } | 284 | } |
280 | 285 | ||
281 | error: | 286 | error: |
282 | device_unlock(&dev->dev); | 287 | device_unlock(&dev->dev); |
288 | |||
283 | return rc; | 289 | return rc; |
284 | } | 290 | } |
285 | 291 | ||
@@ -503,6 +509,7 @@ EXPORT_SYMBOL(nfc_tm_activated); | |||
503 | int nfc_tm_deactivated(struct nfc_dev *dev) | 509 | int nfc_tm_deactivated(struct nfc_dev *dev) |
504 | { | 510 | { |
505 | dev->dep_link_up = false; | 511 | dev->dep_link_up = false; |
512 | dev->rf_mode = NFC_RF_NONE; | ||
506 | 513 | ||
507 | return nfc_genl_tm_deactivated(dev); | 514 | return nfc_genl_tm_deactivated(dev); |
508 | } | 515 | } |
@@ -697,6 +704,8 @@ static void nfc_check_pres_work(struct work_struct *work) | |||
697 | 704 | ||
698 | if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) { | 705 | if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) { |
699 | rc = dev->ops->check_presence(dev, dev->active_target); | 706 | rc = dev->ops->check_presence(dev, dev->active_target); |
707 | if (rc == -EOPNOTSUPP) | ||
708 | goto exit; | ||
700 | if (!rc) { | 709 | if (!rc) { |
701 | mod_timer(&dev->check_pres_timer, jiffies + | 710 | mod_timer(&dev->check_pres_timer, jiffies + |
702 | msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); | 711 | msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); |
@@ -708,6 +717,7 @@ static void nfc_check_pres_work(struct work_struct *work) | |||
708 | } | 717 | } |
709 | } | 718 | } |
710 | 719 | ||
720 | exit: | ||
711 | device_unlock(&dev->dev); | 721 | device_unlock(&dev->dev); |
712 | } | 722 | } |
713 | 723 | ||
@@ -753,7 +763,6 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, | |||
753 | u32 supported_protocols, | 763 | u32 supported_protocols, |
754 | int tx_headroom, int tx_tailroom) | 764 | int tx_headroom, int tx_tailroom) |
755 | { | 765 | { |
756 | static atomic_t dev_no = ATOMIC_INIT(0); | ||
757 | struct nfc_dev *dev; | 766 | struct nfc_dev *dev; |
758 | 767 | ||
759 | if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || | 768 | if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || |
@@ -767,11 +776,6 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, | |||
767 | if (!dev) | 776 | if (!dev) |
768 | return NULL; | 777 | return NULL; |
769 | 778 | ||
770 | dev->dev.class = &nfc_class; | ||
771 | dev->idx = atomic_inc_return(&dev_no) - 1; | ||
772 | dev_set_name(&dev->dev, "nfc%d", dev->idx); | ||
773 | device_initialize(&dev->dev); | ||
774 | |||
775 | dev->ops = ops; | 779 | dev->ops = ops; |
776 | dev->supported_protocols = supported_protocols; | 780 | dev->supported_protocols = supported_protocols; |
777 | dev->tx_headroom = tx_headroom; | 781 | dev->tx_headroom = tx_headroom; |
@@ -779,6 +783,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, | |||
779 | 783 | ||
780 | nfc_genl_data_init(&dev->genl_data); | 784 | nfc_genl_data_init(&dev->genl_data); |
781 | 785 | ||
786 | dev->rf_mode = NFC_RF_NONE; | ||
782 | 787 | ||
783 | /* first generation must not be 0 */ | 788 | /* first generation must not be 0 */ |
784 | dev->targets_generation = 1; | 789 | dev->targets_generation = 1; |
@@ -806,6 +811,14 @@ int nfc_register_device(struct nfc_dev *dev) | |||
806 | 811 | ||
807 | pr_debug("dev_name=%s\n", dev_name(&dev->dev)); | 812 | pr_debug("dev_name=%s\n", dev_name(&dev->dev)); |
808 | 813 | ||
814 | dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL); | ||
815 | if (dev->idx < 0) | ||
816 | return dev->idx; | ||
817 | |||
818 | dev->dev.class = &nfc_class; | ||
819 | dev_set_name(&dev->dev, "nfc%d", dev->idx); | ||
820 | device_initialize(&dev->dev); | ||
821 | |||
809 | mutex_lock(&nfc_devlist_mutex); | 822 | mutex_lock(&nfc_devlist_mutex); |
810 | nfc_devlist_generation++; | 823 | nfc_devlist_generation++; |
811 | rc = device_add(&dev->dev); | 824 | rc = device_add(&dev->dev); |
@@ -834,10 +847,12 @@ EXPORT_SYMBOL(nfc_register_device); | |||
834 | */ | 847 | */ |
835 | void nfc_unregister_device(struct nfc_dev *dev) | 848 | void nfc_unregister_device(struct nfc_dev *dev) |
836 | { | 849 | { |
837 | int rc; | 850 | int rc, id; |
838 | 851 | ||
839 | pr_debug("dev_name=%s\n", dev_name(&dev->dev)); | 852 | pr_debug("dev_name=%s\n", dev_name(&dev->dev)); |
840 | 853 | ||
854 | id = dev->idx; | ||
855 | |||
841 | mutex_lock(&nfc_devlist_mutex); | 856 | mutex_lock(&nfc_devlist_mutex); |
842 | nfc_devlist_generation++; | 857 | nfc_devlist_generation++; |
843 | 858 | ||
@@ -856,6 +871,8 @@ void nfc_unregister_device(struct nfc_dev *dev) | |||
856 | pr_debug("The userspace won't be notified that the device %s was removed\n", | 871 | pr_debug("The userspace won't be notified that the device %s was removed\n", |
857 | dev_name(&dev->dev)); | 872 | dev_name(&dev->dev)); |
858 | 873 | ||
874 | ida_simple_remove(&nfc_index_ida, id); | ||
875 | |||
859 | } | 876 | } |
860 | EXPORT_SYMBOL(nfc_unregister_device); | 877 | EXPORT_SYMBOL(nfc_unregister_device); |
861 | 878 | ||
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c index 71c6a7086b8f..7d99410e6c1a 100644 --- a/net/nfc/hci/command.c +++ b/net/nfc/hci/command.c | |||
@@ -257,16 +257,16 @@ static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host, | |||
257 | *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, | 257 | *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, |
258 | NFC_HCI_ADM_CREATE_PIPE, | 258 | NFC_HCI_ADM_CREATE_PIPE, |
259 | (u8 *) ¶ms, sizeof(params), &skb); | 259 | (u8 *) ¶ms, sizeof(params), &skb); |
260 | if (*result == 0) { | 260 | if (*result < 0) |
261 | resp = (struct hci_create_pipe_resp *)skb->data; | 261 | return NFC_HCI_INVALID_PIPE; |
262 | pipe = resp->pipe; | ||
263 | kfree_skb(skb); | ||
264 | 262 | ||
265 | pr_debug("pipe created=%d\n", pipe); | 263 | resp = (struct hci_create_pipe_resp *)skb->data; |
264 | pipe = resp->pipe; | ||
265 | kfree_skb(skb); | ||
266 | 266 | ||
267 | return pipe; | 267 | pr_debug("pipe created=%d\n", pipe); |
268 | } else | 268 | |
269 | return NFC_HCI_INVALID_PIPE; | 269 | return pipe; |
270 | } | 270 | } |
271 | 271 | ||
272 | static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe) | 272 | static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe) |
@@ -279,8 +279,6 @@ static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe) | |||
279 | 279 | ||
280 | static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev) | 280 | static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev) |
281 | { | 281 | { |
282 | int r; | ||
283 | |||
284 | u8 param[2]; | 282 | u8 param[2]; |
285 | 283 | ||
286 | /* TODO: Find out what the identity reference data is | 284 | /* TODO: Find out what the identity reference data is |
@@ -288,10 +286,8 @@ static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev) | |||
288 | 286 | ||
289 | pr_debug("\n"); | 287 | pr_debug("\n"); |
290 | 288 | ||
291 | r = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, | 289 | return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, |
292 | NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL); | 290 | NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL); |
293 | |||
294 | return 0; | ||
295 | } | 291 | } |
296 | 292 | ||
297 | int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate) | 293 | int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate) |
@@ -348,7 +344,7 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate, | |||
348 | return -EADDRINUSE; | 344 | return -EADDRINUSE; |
349 | 345 | ||
350 | if (pipe != NFC_HCI_INVALID_PIPE) | 346 | if (pipe != NFC_HCI_INVALID_PIPE) |
351 | goto pipe_is_open; | 347 | goto open_pipe; |
352 | 348 | ||
353 | switch (dest_gate) { | 349 | switch (dest_gate) { |
354 | case NFC_HCI_LINK_MGMT_GATE: | 350 | case NFC_HCI_LINK_MGMT_GATE: |
@@ -365,6 +361,7 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate, | |||
365 | break; | 361 | break; |
366 | } | 362 | } |
367 | 363 | ||
364 | open_pipe: | ||
368 | r = nfc_hci_open_pipe(hdev, pipe); | 365 | r = nfc_hci_open_pipe(hdev, pipe); |
369 | if (r < 0) { | 366 | if (r < 0) { |
370 | if (pipe_created) | 367 | if (pipe_created) |
@@ -375,7 +372,6 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate, | |||
375 | return r; | 372 | return r; |
376 | } | 373 | } |
377 | 374 | ||
378 | pipe_is_open: | ||
379 | hdev->gate2pipe[dest_gate] = pipe; | 375 | hdev->gate2pipe[dest_gate] = pipe; |
380 | 376 | ||
381 | return 0; | 377 | return 0; |
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index 5fbb6e40793e..7bea574d5934 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c | |||
@@ -33,17 +33,20 @@ | |||
33 | /* Largest headroom needed for outgoing HCI commands */ | 33 | /* Largest headroom needed for outgoing HCI commands */ |
34 | #define HCI_CMDS_HEADROOM 1 | 34 | #define HCI_CMDS_HEADROOM 1 |
35 | 35 | ||
36 | static int nfc_hci_result_to_errno(u8 result) | 36 | int nfc_hci_result_to_errno(u8 result) |
37 | { | 37 | { |
38 | switch (result) { | 38 | switch (result) { |
39 | case NFC_HCI_ANY_OK: | 39 | case NFC_HCI_ANY_OK: |
40 | return 0; | 40 | return 0; |
41 | case NFC_HCI_ANY_E_REG_PAR_UNKNOWN: | ||
42 | return -EOPNOTSUPP; | ||
41 | case NFC_HCI_ANY_E_TIMEOUT: | 43 | case NFC_HCI_ANY_E_TIMEOUT: |
42 | return -ETIME; | 44 | return -ETIME; |
43 | default: | 45 | default: |
44 | return -1; | 46 | return -1; |
45 | } | 47 | } |
46 | } | 48 | } |
49 | EXPORT_SYMBOL(nfc_hci_result_to_errno); | ||
47 | 50 | ||
48 | static void nfc_hci_msg_tx_work(struct work_struct *work) | 51 | static void nfc_hci_msg_tx_work(struct work_struct *work) |
49 | { | 52 | { |
@@ -65,8 +68,9 @@ static void nfc_hci_msg_tx_work(struct work_struct *work) | |||
65 | -ETIME); | 68 | -ETIME); |
66 | kfree(hdev->cmd_pending_msg); | 69 | kfree(hdev->cmd_pending_msg); |
67 | hdev->cmd_pending_msg = NULL; | 70 | hdev->cmd_pending_msg = NULL; |
68 | } else | 71 | } else { |
69 | goto exit; | 72 | goto exit; |
73 | } | ||
70 | } | 74 | } |
71 | 75 | ||
72 | next_msg: | 76 | next_msg: |
@@ -166,7 +170,7 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, | |||
166 | kfree_skb(skb); | 170 | kfree_skb(skb); |
167 | } | 171 | } |
168 | 172 | ||
169 | static u32 nfc_hci_sak_to_protocol(u8 sak) | 173 | u32 nfc_hci_sak_to_protocol(u8 sak) |
170 | { | 174 | { |
171 | switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) { | 175 | switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) { |
172 | case NFC_HCI_TYPE_A_SEL_PROT_MIFARE: | 176 | case NFC_HCI_TYPE_A_SEL_PROT_MIFARE: |
@@ -181,8 +185,9 @@ static u32 nfc_hci_sak_to_protocol(u8 sak) | |||
181 | return 0xffffffff; | 185 | return 0xffffffff; |
182 | } | 186 | } |
183 | } | 187 | } |
188 | EXPORT_SYMBOL(nfc_hci_sak_to_protocol); | ||
184 | 189 | ||
185 | static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate) | 190 | int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate) |
186 | { | 191 | { |
187 | struct nfc_target *targets; | 192 | struct nfc_target *targets; |
188 | struct sk_buff *atqa_skb = NULL; | 193 | struct sk_buff *atqa_skb = NULL; |
@@ -263,7 +268,9 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate) | |||
263 | break; | 268 | break; |
264 | } | 269 | } |
265 | 270 | ||
266 | targets->hci_reader_gate = gate; | 271 | /* if driver set the new gate, we will skip the old one */ |
272 | if (targets->hci_reader_gate == 0x00) | ||
273 | targets->hci_reader_gate = gate; | ||
267 | 274 | ||
268 | r = nfc_targets_found(hdev->ndev, targets, 1); | 275 | r = nfc_targets_found(hdev->ndev, targets, 1); |
269 | 276 | ||
@@ -275,11 +282,18 @@ exit: | |||
275 | 282 | ||
276 | return r; | 283 | return r; |
277 | } | 284 | } |
285 | EXPORT_SYMBOL(nfc_hci_target_discovered); | ||
278 | 286 | ||
279 | void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, | 287 | void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, |
280 | struct sk_buff *skb) | 288 | struct sk_buff *skb) |
281 | { | 289 | { |
282 | int r = 0; | 290 | int r = 0; |
291 | u8 gate = nfc_hci_pipe2gate(hdev, pipe); | ||
292 | |||
293 | if (gate == 0xff) { | ||
294 | pr_err("Discarded event %x to unopened pipe %x\n", event, pipe); | ||
295 | goto exit; | ||
296 | } | ||
283 | 297 | ||
284 | switch (event) { | 298 | switch (event) { |
285 | case NFC_HCI_EVT_TARGET_DISCOVERED: | 299 | case NFC_HCI_EVT_TARGET_DISCOVERED: |
@@ -303,12 +317,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, | |||
303 | goto exit; | 317 | goto exit; |
304 | } | 318 | } |
305 | 319 | ||
306 | r = nfc_hci_target_discovered(hdev, | 320 | r = nfc_hci_target_discovered(hdev, gate); |
307 | nfc_hci_pipe2gate(hdev, pipe)); | ||
308 | break; | 321 | break; |
309 | default: | 322 | default: |
310 | /* TODO: Unknown events are hardware specific | 323 | if (hdev->ops->event_received) { |
311 | * pass them to the driver (needs a new hci_ops) */ | 324 | hdev->ops->event_received(hdev, gate, event, skb); |
325 | return; | ||
326 | } | ||
327 | |||
312 | break; | 328 | break; |
313 | } | 329 | } |
314 | 330 | ||
@@ -410,6 +426,10 @@ static int hci_dev_version(struct nfc_hci_dev *hdev) | |||
410 | 426 | ||
411 | r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, | 427 | r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, |
412 | NFC_HCI_ID_MGMT_VERSION_SW, &skb); | 428 | NFC_HCI_ID_MGMT_VERSION_SW, &skb); |
429 | if (r == -EOPNOTSUPP) { | ||
430 | pr_info("Software/Hardware info not available\n"); | ||
431 | return 0; | ||
432 | } | ||
413 | if (r < 0) | 433 | if (r < 0) |
414 | return r; | 434 | return r; |
415 | 435 | ||
@@ -527,7 +547,8 @@ static int hci_start_poll(struct nfc_dev *nfc_dev, | |||
527 | return hdev->ops->start_poll(hdev, im_protocols, tm_protocols); | 547 | return hdev->ops->start_poll(hdev, im_protocols, tm_protocols); |
528 | else | 548 | else |
529 | return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, | 549 | return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, |
530 | NFC_HCI_EVT_READER_REQUESTED, NULL, 0); | 550 | NFC_HCI_EVT_READER_REQUESTED, |
551 | NULL, 0); | ||
531 | } | 552 | } |
532 | 553 | ||
533 | static void hci_stop_poll(struct nfc_dev *nfc_dev) | 554 | static void hci_stop_poll(struct nfc_dev *nfc_dev) |
@@ -538,6 +559,28 @@ static void hci_stop_poll(struct nfc_dev *nfc_dev) | |||
538 | NFC_HCI_EVT_END_OPERATION, NULL, 0); | 559 | NFC_HCI_EVT_END_OPERATION, NULL, 0); |
539 | } | 560 | } |
540 | 561 | ||
562 | static int hci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, | ||
563 | __u8 comm_mode, __u8 *gb, size_t gb_len) | ||
564 | { | ||
565 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | ||
566 | |||
567 | if (hdev->ops->dep_link_up) | ||
568 | return hdev->ops->dep_link_up(hdev, target, comm_mode, | ||
569 | gb, gb_len); | ||
570 | |||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | static int hci_dep_link_down(struct nfc_dev *nfc_dev) | ||
575 | { | ||
576 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | ||
577 | |||
578 | if (hdev->ops->dep_link_down) | ||
579 | return hdev->ops->dep_link_down(hdev); | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | |||
541 | static int hci_activate_target(struct nfc_dev *nfc_dev, | 584 | static int hci_activate_target(struct nfc_dev *nfc_dev, |
542 | struct nfc_target *target, u32 protocol) | 585 | struct nfc_target *target, u32 protocol) |
543 | { | 586 | { |
@@ -586,8 +629,8 @@ static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, | |||
586 | switch (target->hci_reader_gate) { | 629 | switch (target->hci_reader_gate) { |
587 | case NFC_HCI_RF_READER_A_GATE: | 630 | case NFC_HCI_RF_READER_A_GATE: |
588 | case NFC_HCI_RF_READER_B_GATE: | 631 | case NFC_HCI_RF_READER_B_GATE: |
589 | if (hdev->ops->data_exchange) { | 632 | if (hdev->ops->im_transceive) { |
590 | r = hdev->ops->data_exchange(hdev, target, skb, cb, | 633 | r = hdev->ops->im_transceive(hdev, target, skb, cb, |
591 | cb_context); | 634 | cb_context); |
592 | if (r <= 0) /* handled */ | 635 | if (r <= 0) /* handled */ |
593 | break; | 636 | break; |
@@ -604,14 +647,14 @@ static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, | |||
604 | skb->len, hci_transceive_cb, hdev); | 647 | skb->len, hci_transceive_cb, hdev); |
605 | break; | 648 | break; |
606 | default: | 649 | default: |
607 | if (hdev->ops->data_exchange) { | 650 | if (hdev->ops->im_transceive) { |
608 | r = hdev->ops->data_exchange(hdev, target, skb, cb, | 651 | r = hdev->ops->im_transceive(hdev, target, skb, cb, |
609 | cb_context); | 652 | cb_context); |
610 | if (r == 1) | 653 | if (r == 1) |
611 | r = -ENOTSUPP; | 654 | r = -ENOTSUPP; |
612 | } | 655 | } else { |
613 | else | ||
614 | r = -ENOTSUPP; | 656 | r = -ENOTSUPP; |
657 | } | ||
615 | break; | 658 | break; |
616 | } | 659 | } |
617 | 660 | ||
@@ -620,6 +663,16 @@ static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, | |||
620 | return r; | 663 | return r; |
621 | } | 664 | } |
622 | 665 | ||
666 | static int hci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) | ||
667 | { | ||
668 | struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); | ||
669 | |||
670 | if (hdev->ops->tm_send) | ||
671 | return hdev->ops->tm_send(hdev, skb); | ||
672 | else | ||
673 | return -ENOTSUPP; | ||
674 | } | ||
675 | |||
623 | static int hci_check_presence(struct nfc_dev *nfc_dev, | 676 | static int hci_check_presence(struct nfc_dev *nfc_dev, |
624 | struct nfc_target *target) | 677 | struct nfc_target *target) |
625 | { | 678 | { |
@@ -723,9 +776,12 @@ static struct nfc_ops hci_nfc_ops = { | |||
723 | .dev_down = hci_dev_down, | 776 | .dev_down = hci_dev_down, |
724 | .start_poll = hci_start_poll, | 777 | .start_poll = hci_start_poll, |
725 | .stop_poll = hci_stop_poll, | 778 | .stop_poll = hci_stop_poll, |
779 | .dep_link_up = hci_dep_link_up, | ||
780 | .dep_link_down = hci_dep_link_down, | ||
726 | .activate_target = hci_activate_target, | 781 | .activate_target = hci_activate_target, |
727 | .deactivate_target = hci_deactivate_target, | 782 | .deactivate_target = hci_deactivate_target, |
728 | .im_transceive = hci_transceive, | 783 | .im_transceive = hci_transceive, |
784 | .tm_send = hci_tm_send, | ||
729 | .check_presence = hci_check_presence, | 785 | .check_presence = hci_check_presence, |
730 | }; | 786 | }; |
731 | 787 | ||
@@ -848,7 +904,7 @@ void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err) | |||
848 | } | 904 | } |
849 | EXPORT_SYMBOL(nfc_hci_driver_failure); | 905 | EXPORT_SYMBOL(nfc_hci_driver_failure); |
850 | 906 | ||
851 | void inline nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb) | 907 | void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb) |
852 | { | 908 | { |
853 | nfc_llc_rcv_from_drv(hdev->llc, skb); | 909 | nfc_llc_rcv_from_drv(hdev->llc, skb); |
854 | } | 910 | } |
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c index ae1205ded87f..fe5e966e5b88 100644 --- a/net/nfc/hci/llc.c +++ b/net/nfc/hci/llc.c | |||
@@ -72,7 +72,7 @@ int nfc_llc_register(const char *name, struct nfc_llc_ops *ops) | |||
72 | llc_engine->ops = ops; | 72 | llc_engine->ops = ops; |
73 | 73 | ||
74 | INIT_LIST_HEAD(&llc_engine->entry); | 74 | INIT_LIST_HEAD(&llc_engine->entry); |
75 | list_add_tail (&llc_engine->entry, &llc_engines); | 75 | list_add_tail(&llc_engine->entry, &llc_engines); |
76 | 76 | ||
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c index 01cbc72943cd..27b313befc35 100644 --- a/net/nfc/hci/llc_shdlc.c +++ b/net/nfc/hci/llc_shdlc.c | |||
@@ -634,9 +634,9 @@ static void llc_shdlc_sm_work(struct work_struct *work) | |||
634 | r = llc_shdlc_connect_initiate(shdlc); | 634 | r = llc_shdlc_connect_initiate(shdlc); |
635 | else | 635 | else |
636 | r = -ETIME; | 636 | r = -ETIME; |
637 | if (r < 0) | 637 | if (r < 0) { |
638 | llc_shdlc_connect_complete(shdlc, r); | 638 | llc_shdlc_connect_complete(shdlc, r); |
639 | else { | 639 | } else { |
640 | mod_timer(&shdlc->connect_timer, jiffies + | 640 | mod_timer(&shdlc->connect_timer, jiffies + |
641 | msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS)); | 641 | msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS)); |
642 | 642 | ||
@@ -682,9 +682,8 @@ static void llc_shdlc_sm_work(struct work_struct *work) | |||
682 | llc_shdlc_handle_send_queue(shdlc); | 682 | llc_shdlc_handle_send_queue(shdlc); |
683 | } | 683 | } |
684 | 684 | ||
685 | if (shdlc->hard_fault) { | 685 | if (shdlc->hard_fault) |
686 | shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault); | 686 | shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault); |
687 | } | ||
688 | break; | 687 | break; |
689 | default: | 688 | default: |
690 | break; | 689 | break; |
diff --git a/net/nfc/llcp/Kconfig b/net/nfc/llcp/Kconfig index fbf5e8150908..a1a41cd68255 100644 --- a/net/nfc/llcp/Kconfig +++ b/net/nfc/llcp/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config NFC_LLCP | 1 | config NFC_LLCP |
2 | depends on NFC && EXPERIMENTAL | 2 | depends on NFC |
3 | bool "NFC LLCP support (EXPERIMENTAL)" | 3 | bool "NFC LLCP support" |
4 | default n | 4 | default n |
5 | help | 5 | help |
6 | Say Y here if you want to build support for a kernel NFC LLCP | 6 | Say Y here if you want to build support for a kernel NFC LLCP |
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c index c45ccd6c094c..df24be48d4da 100644 --- a/net/nfc/llcp/commands.c +++ b/net/nfc/llcp/commands.c | |||
@@ -261,7 +261,6 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock) | |||
261 | struct sk_buff *skb; | 261 | struct sk_buff *skb; |
262 | struct nfc_dev *dev; | 262 | struct nfc_dev *dev; |
263 | struct nfc_llcp_local *local; | 263 | struct nfc_llcp_local *local; |
264 | u16 size = 0; | ||
265 | 264 | ||
266 | pr_debug("Sending DISC\n"); | 265 | pr_debug("Sending DISC\n"); |
267 | 266 | ||
@@ -273,17 +272,10 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock) | |||
273 | if (dev == NULL) | 272 | if (dev == NULL) |
274 | return -ENODEV; | 273 | return -ENODEV; |
275 | 274 | ||
276 | size += LLCP_HEADER_SIZE; | 275 | skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0); |
277 | size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; | ||
278 | |||
279 | skb = alloc_skb(size, GFP_ATOMIC); | ||
280 | if (skb == NULL) | 276 | if (skb == NULL) |
281 | return -ENOMEM; | 277 | return -ENOMEM; |
282 | 278 | ||
283 | skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); | ||
284 | |||
285 | skb = llcp_add_header(skb, sock->dsap, sock->ssap, LLCP_PDU_DISC); | ||
286 | |||
287 | skb_queue_tail(&local->tx_queue, skb); | 279 | skb_queue_tail(&local->tx_queue, skb); |
288 | 280 | ||
289 | return 0; | 281 | return 0; |
@@ -324,8 +316,7 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) | |||
324 | struct sk_buff *skb; | 316 | struct sk_buff *skb; |
325 | u8 *service_name_tlv = NULL, service_name_tlv_length; | 317 | u8 *service_name_tlv = NULL, service_name_tlv_length; |
326 | u8 *miux_tlv = NULL, miux_tlv_length; | 318 | u8 *miux_tlv = NULL, miux_tlv_length; |
327 | u8 *rw_tlv = NULL, rw_tlv_length, rw; | 319 | u8 *rw_tlv = NULL, rw_tlv_length; |
328 | __be16 miux; | ||
329 | int err; | 320 | int err; |
330 | u16 size = 0; | 321 | u16 size = 0; |
331 | 322 | ||
@@ -343,13 +334,11 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) | |||
343 | size += service_name_tlv_length; | 334 | size += service_name_tlv_length; |
344 | } | 335 | } |
345 | 336 | ||
346 | miux = cpu_to_be16(LLCP_MAX_MIUX); | 337 | miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, |
347 | miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, | ||
348 | &miux_tlv_length); | 338 | &miux_tlv_length); |
349 | size += miux_tlv_length; | 339 | size += miux_tlv_length; |
350 | 340 | ||
351 | rw = LLCP_MAX_RW; | 341 | rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &local->rw, 0, &rw_tlv_length); |
352 | rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); | ||
353 | size += rw_tlv_length; | 342 | size += rw_tlv_length; |
354 | 343 | ||
355 | pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); | 344 | pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); |
@@ -386,8 +375,7 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock) | |||
386 | struct nfc_llcp_local *local; | 375 | struct nfc_llcp_local *local; |
387 | struct sk_buff *skb; | 376 | struct sk_buff *skb; |
388 | u8 *miux_tlv = NULL, miux_tlv_length; | 377 | u8 *miux_tlv = NULL, miux_tlv_length; |
389 | u8 *rw_tlv = NULL, rw_tlv_length, rw; | 378 | u8 *rw_tlv = NULL, rw_tlv_length; |
390 | __be16 miux; | ||
391 | int err; | 379 | int err; |
392 | u16 size = 0; | 380 | u16 size = 0; |
393 | 381 | ||
@@ -397,13 +385,11 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock) | |||
397 | if (local == NULL) | 385 | if (local == NULL) |
398 | return -ENODEV; | 386 | return -ENODEV; |
399 | 387 | ||
400 | miux = cpu_to_be16(LLCP_MAX_MIUX); | 388 | miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, |
401 | miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, | ||
402 | &miux_tlv_length); | 389 | &miux_tlv_length); |
403 | size += miux_tlv_length; | 390 | size += miux_tlv_length; |
404 | 391 | ||
405 | rw = LLCP_MAX_RW; | 392 | rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &local->rw, 0, &rw_tlv_length); |
406 | rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); | ||
407 | size += rw_tlv_length; | 393 | size += rw_tlv_length; |
408 | 394 | ||
409 | skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); | 395 | skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); |
@@ -428,6 +414,52 @@ error_tlv: | |||
428 | return err; | 414 | return err; |
429 | } | 415 | } |
430 | 416 | ||
417 | int nfc_llcp_send_snl(struct nfc_llcp_local *local, u8 tid, u8 sap) | ||
418 | { | ||
419 | struct sk_buff *skb; | ||
420 | struct nfc_dev *dev; | ||
421 | u8 *sdres_tlv = NULL, sdres_tlv_length, sdres[2]; | ||
422 | u16 size = 0; | ||
423 | |||
424 | pr_debug("Sending SNL tid 0x%x sap 0x%x\n", tid, sap); | ||
425 | |||
426 | if (local == NULL) | ||
427 | return -ENODEV; | ||
428 | |||
429 | dev = local->dev; | ||
430 | if (dev == NULL) | ||
431 | return -ENODEV; | ||
432 | |||
433 | sdres[0] = tid; | ||
434 | sdres[1] = sap; | ||
435 | sdres_tlv = nfc_llcp_build_tlv(LLCP_TLV_SDRES, sdres, 0, | ||
436 | &sdres_tlv_length); | ||
437 | if (sdres_tlv == NULL) | ||
438 | return -ENOMEM; | ||
439 | |||
440 | size += LLCP_HEADER_SIZE; | ||
441 | size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; | ||
442 | size += sdres_tlv_length; | ||
443 | |||
444 | skb = alloc_skb(size, GFP_KERNEL); | ||
445 | if (skb == NULL) { | ||
446 | kfree(sdres_tlv); | ||
447 | return -ENOMEM; | ||
448 | } | ||
449 | |||
450 | skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); | ||
451 | |||
452 | skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL); | ||
453 | |||
454 | memcpy(skb_put(skb, sdres_tlv_length), sdres_tlv, sdres_tlv_length); | ||
455 | |||
456 | skb_queue_tail(&local->tx_queue, skb); | ||
457 | |||
458 | kfree(sdres_tlv); | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
431 | int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) | 463 | int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) |
432 | { | 464 | { |
433 | struct sk_buff *skb; | 465 | struct sk_buff *skb; |
@@ -496,6 +528,23 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, | |||
496 | if (local == NULL) | 528 | if (local == NULL) |
497 | return -ENODEV; | 529 | return -ENODEV; |
498 | 530 | ||
531 | /* Remote is ready but has not acknowledged our frames */ | ||
532 | if((sock->remote_ready && | ||
533 | skb_queue_len(&sock->tx_pending_queue) >= sock->rw && | ||
534 | skb_queue_len(&sock->tx_queue) >= 2 * sock->rw)) { | ||
535 | pr_err("Pending queue is full %d frames\n", | ||
536 | skb_queue_len(&sock->tx_pending_queue)); | ||
537 | return -ENOBUFS; | ||
538 | } | ||
539 | |||
540 | /* Remote is not ready and we've been queueing enough frames */ | ||
541 | if ((!sock->remote_ready && | ||
542 | skb_queue_len(&sock->tx_queue) >= 2 * sock->rw)) { | ||
543 | pr_err("Tx queue is full %d frames\n", | ||
544 | skb_queue_len(&sock->tx_queue)); | ||
545 | return -ENOBUFS; | ||
546 | } | ||
547 | |||
499 | msg_data = kzalloc(len, GFP_KERNEL); | 548 | msg_data = kzalloc(len, GFP_KERNEL); |
500 | if (msg_data == NULL) | 549 | if (msg_data == NULL) |
501 | return -ENOMEM; | 550 | return -ENOMEM; |
@@ -541,6 +590,63 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, | |||
541 | return len; | 590 | return len; |
542 | } | 591 | } |
543 | 592 | ||
593 | int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, | ||
594 | struct msghdr *msg, size_t len) | ||
595 | { | ||
596 | struct sk_buff *pdu; | ||
597 | struct nfc_llcp_local *local; | ||
598 | size_t frag_len = 0, remaining_len; | ||
599 | u8 *msg_ptr, *msg_data; | ||
600 | int err; | ||
601 | |||
602 | pr_debug("Send UI frame len %zd\n", len); | ||
603 | |||
604 | local = sock->local; | ||
605 | if (local == NULL) | ||
606 | return -ENODEV; | ||
607 | |||
608 | msg_data = kzalloc(len, GFP_KERNEL); | ||
609 | if (msg_data == NULL) | ||
610 | return -ENOMEM; | ||
611 | |||
612 | if (memcpy_fromiovec(msg_data, msg->msg_iov, len)) { | ||
613 | kfree(msg_data); | ||
614 | return -EFAULT; | ||
615 | } | ||
616 | |||
617 | remaining_len = len; | ||
618 | msg_ptr = msg_data; | ||
619 | |||
620 | while (remaining_len > 0) { | ||
621 | |||
622 | frag_len = min_t(size_t, sock->miu, remaining_len); | ||
623 | |||
624 | pr_debug("Fragment %zd bytes remaining %zd", | ||
625 | frag_len, remaining_len); | ||
626 | |||
627 | pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, | ||
628 | frag_len + LLCP_HEADER_SIZE, &err); | ||
629 | if (pdu == NULL) { | ||
630 | pr_err("Could not allocate PDU\n"); | ||
631 | continue; | ||
632 | } | ||
633 | |||
634 | pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); | ||
635 | |||
636 | memcpy(skb_put(pdu, frag_len), msg_ptr, frag_len); | ||
637 | |||
638 | /* No need to check for the peer RW for UI frames */ | ||
639 | skb_queue_tail(&local->tx_queue, pdu); | ||
640 | |||
641 | remaining_len -= frag_len; | ||
642 | msg_ptr += frag_len; | ||
643 | } | ||
644 | |||
645 | kfree(msg_data); | ||
646 | |||
647 | return len; | ||
648 | } | ||
649 | |||
544 | int nfc_llcp_send_rr(struct nfc_llcp_sock *sock) | 650 | int nfc_llcp_send_rr(struct nfc_llcp_sock *sock) |
545 | { | 651 | { |
546 | struct sk_buff *skb; | 652 | struct sk_buff *skb; |
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 9e8f4b2801f6..2df87056c6df 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c | |||
@@ -45,12 +45,38 @@ void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk) | |||
45 | write_unlock(&l->lock); | 45 | write_unlock(&l->lock); |
46 | } | 46 | } |
47 | 47 | ||
48 | static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock) | ||
49 | { | ||
50 | struct nfc_llcp_local *local = sock->local; | ||
51 | struct sk_buff *s, *tmp; | ||
52 | |||
53 | pr_debug("%p\n", &sock->sk); | ||
54 | |||
55 | skb_queue_purge(&sock->tx_queue); | ||
56 | skb_queue_purge(&sock->tx_pending_queue); | ||
57 | skb_queue_purge(&sock->tx_backlog_queue); | ||
58 | |||
59 | if (local == NULL) | ||
60 | return; | ||
61 | |||
62 | /* Search for local pending SKBs that are related to this socket */ | ||
63 | skb_queue_walk_safe(&local->tx_queue, s, tmp) { | ||
64 | if (s->sk != &sock->sk) | ||
65 | continue; | ||
66 | |||
67 | skb_unlink(s, &local->tx_queue); | ||
68 | kfree_skb(s); | ||
69 | } | ||
70 | } | ||
71 | |||
48 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | 72 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) |
49 | { | 73 | { |
50 | struct sock *sk; | 74 | struct sock *sk; |
51 | struct hlist_node *node, *tmp; | 75 | struct hlist_node *node, *tmp; |
52 | struct nfc_llcp_sock *llcp_sock; | 76 | struct nfc_llcp_sock *llcp_sock; |
53 | 77 | ||
78 | skb_queue_purge(&local->tx_queue); | ||
79 | |||
54 | write_lock(&local->sockets.lock); | 80 | write_lock(&local->sockets.lock); |
55 | 81 | ||
56 | sk_for_each_safe(sk, node, tmp, &local->sockets.head) { | 82 | sk_for_each_safe(sk, node, tmp, &local->sockets.head) { |
@@ -58,6 +84,8 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | |||
58 | 84 | ||
59 | bh_lock_sock(sk); | 85 | bh_lock_sock(sk); |
60 | 86 | ||
87 | nfc_llcp_socket_purge(llcp_sock); | ||
88 | |||
61 | if (sk->sk_state == LLCP_CONNECTED) | 89 | if (sk->sk_state == LLCP_CONNECTED) |
62 | nfc_put_device(llcp_sock->dev); | 90 | nfc_put_device(llcp_sock->dev); |
63 | 91 | ||
@@ -65,7 +93,8 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | |||
65 | struct nfc_llcp_sock *lsk, *n; | 93 | struct nfc_llcp_sock *lsk, *n; |
66 | struct sock *accept_sk; | 94 | struct sock *accept_sk; |
67 | 95 | ||
68 | list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, | 96 | list_for_each_entry_safe(lsk, n, |
97 | &llcp_sock->accept_queue, | ||
69 | accept_queue) { | 98 | accept_queue) { |
70 | accept_sk = &lsk->sk; | 99 | accept_sk = &lsk->sk; |
71 | bh_lock_sock(accept_sk); | 100 | bh_lock_sock(accept_sk); |
@@ -85,6 +114,16 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | |||
85 | } | 114 | } |
86 | } | 115 | } |
87 | 116 | ||
117 | /* | ||
118 | * If we have a connection less socket bound, we keep it alive | ||
119 | * if the device is still present. | ||
120 | */ | ||
121 | if (sk->sk_state == LLCP_BOUND && sk->sk_type == SOCK_DGRAM && | ||
122 | listen == true) { | ||
123 | bh_unlock_sock(sk); | ||
124 | continue; | ||
125 | } | ||
126 | |||
88 | sk->sk_state = LLCP_CLOSED; | 127 | sk->sk_state = LLCP_CLOSED; |
89 | 128 | ||
90 | bh_unlock_sock(sk); | 129 | bh_unlock_sock(sk); |
@@ -134,7 +173,7 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, | |||
134 | { | 173 | { |
135 | struct sock *sk; | 174 | struct sock *sk; |
136 | struct hlist_node *node; | 175 | struct hlist_node *node; |
137 | struct nfc_llcp_sock *llcp_sock; | 176 | struct nfc_llcp_sock *llcp_sock, *tmp_sock; |
138 | 177 | ||
139 | pr_debug("ssap dsap %d %d\n", ssap, dsap); | 178 | pr_debug("ssap dsap %d %d\n", ssap, dsap); |
140 | 179 | ||
@@ -146,10 +185,12 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, | |||
146 | llcp_sock = NULL; | 185 | llcp_sock = NULL; |
147 | 186 | ||
148 | sk_for_each(sk, node, &local->sockets.head) { | 187 | sk_for_each(sk, node, &local->sockets.head) { |
149 | llcp_sock = nfc_llcp_sock(sk); | 188 | tmp_sock = nfc_llcp_sock(sk); |
150 | 189 | ||
151 | if (llcp_sock->ssap == ssap && llcp_sock->dsap == dsap) | 190 | if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) { |
191 | llcp_sock = tmp_sock; | ||
152 | break; | 192 | break; |
193 | } | ||
153 | } | 194 | } |
154 | 195 | ||
155 | read_unlock(&local->sockets.lock); | 196 | read_unlock(&local->sockets.lock); |
@@ -249,7 +290,12 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local, | |||
249 | 290 | ||
250 | pr_debug("llcp sock %p\n", tmp_sock); | 291 | pr_debug("llcp sock %p\n", tmp_sock); |
251 | 292 | ||
252 | if (tmp_sock->sk.sk_state != LLCP_LISTEN) | 293 | if (tmp_sock->sk.sk_type == SOCK_STREAM && |
294 | tmp_sock->sk.sk_state != LLCP_LISTEN) | ||
295 | continue; | ||
296 | |||
297 | if (tmp_sock->sk.sk_type == SOCK_DGRAM && | ||
298 | tmp_sock->sk.sk_state != LLCP_BOUND) | ||
253 | continue; | 299 | continue; |
254 | 300 | ||
255 | if (tmp_sock->service_name == NULL || | 301 | if (tmp_sock->service_name == NULL || |
@@ -421,10 +467,9 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local) | |||
421 | static int nfc_llcp_build_gb(struct nfc_llcp_local *local) | 467 | static int nfc_llcp_build_gb(struct nfc_llcp_local *local) |
422 | { | 468 | { |
423 | u8 *gb_cur, *version_tlv, version, version_length; | 469 | u8 *gb_cur, *version_tlv, version, version_length; |
424 | u8 *lto_tlv, lto, lto_length; | 470 | u8 *lto_tlv, lto_length; |
425 | u8 *wks_tlv, wks_length; | 471 | u8 *wks_tlv, wks_length; |
426 | u8 *miux_tlv, miux_length; | 472 | u8 *miux_tlv, miux_length; |
427 | __be16 miux; | ||
428 | u8 gb_len = 0; | 473 | u8 gb_len = 0; |
429 | int ret = 0; | 474 | int ret = 0; |
430 | 475 | ||
@@ -433,9 +478,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) | |||
433 | 1, &version_length); | 478 | 1, &version_length); |
434 | gb_len += version_length; | 479 | gb_len += version_length; |
435 | 480 | ||
436 | /* 1500 ms */ | 481 | lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, <o_length); |
437 | lto = 150; | ||
438 | lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, <o, 1, <o_length); | ||
439 | gb_len += lto_length; | 482 | gb_len += lto_length; |
440 | 483 | ||
441 | pr_debug("Local wks 0x%lx\n", local->local_wks); | 484 | pr_debug("Local wks 0x%lx\n", local->local_wks); |
@@ -443,8 +486,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) | |||
443 | &wks_length); | 486 | &wks_length); |
444 | gb_len += wks_length; | 487 | gb_len += wks_length; |
445 | 488 | ||
446 | miux = cpu_to_be16(LLCP_MAX_MIUX); | 489 | miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, |
447 | miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, | ||
448 | &miux_length); | 490 | &miux_length); |
449 | gb_len += miux_length; | 491 | gb_len += miux_length; |
450 | 492 | ||
@@ -610,7 +652,12 @@ static void nfc_llcp_tx_work(struct work_struct *work) | |||
610 | if (skb != NULL) { | 652 | if (skb != NULL) { |
611 | sk = skb->sk; | 653 | sk = skb->sk; |
612 | llcp_sock = nfc_llcp_sock(sk); | 654 | llcp_sock = nfc_llcp_sock(sk); |
613 | if (llcp_sock != NULL) { | 655 | |
656 | if (llcp_sock == NULL && nfc_llcp_ptype(skb) == LLCP_PDU_I) { | ||
657 | nfc_llcp_send_symm(local->dev); | ||
658 | } else { | ||
659 | struct sk_buff *copy_skb = NULL; | ||
660 | u8 ptype = nfc_llcp_ptype(skb); | ||
614 | int ret; | 661 | int ret; |
615 | 662 | ||
616 | pr_debug("Sending pending skb\n"); | 663 | pr_debug("Sending pending skb\n"); |
@@ -618,24 +665,29 @@ static void nfc_llcp_tx_work(struct work_struct *work) | |||
618 | DUMP_PREFIX_OFFSET, 16, 1, | 665 | DUMP_PREFIX_OFFSET, 16, 1, |
619 | skb->data, skb->len, true); | 666 | skb->data, skb->len, true); |
620 | 667 | ||
668 | if (ptype == LLCP_PDU_I) | ||
669 | copy_skb = skb_copy(skb, GFP_ATOMIC); | ||
670 | |||
621 | nfc_llcp_send_to_raw_sock(local, skb, | 671 | nfc_llcp_send_to_raw_sock(local, skb, |
622 | NFC_LLCP_DIRECTION_TX); | 672 | NFC_LLCP_DIRECTION_TX); |
623 | 673 | ||
624 | ret = nfc_data_exchange(local->dev, local->target_idx, | 674 | ret = nfc_data_exchange(local->dev, local->target_idx, |
625 | skb, nfc_llcp_recv, local); | 675 | skb, nfc_llcp_recv, local); |
626 | 676 | ||
627 | if (!ret && nfc_llcp_ptype(skb) == LLCP_PDU_I) { | 677 | if (ret) { |
628 | skb = skb_get(skb); | 678 | kfree_skb(copy_skb); |
629 | skb_queue_tail(&llcp_sock->tx_pending_queue, | 679 | goto out; |
630 | skb); | ||
631 | } | 680 | } |
632 | } else { | 681 | |
633 | nfc_llcp_send_symm(local->dev); | 682 | if (ptype == LLCP_PDU_I && copy_skb) |
683 | skb_queue_tail(&llcp_sock->tx_pending_queue, | ||
684 | copy_skb); | ||
634 | } | 685 | } |
635 | } else { | 686 | } else { |
636 | nfc_llcp_send_symm(local->dev); | 687 | nfc_llcp_send_symm(local->dev); |
637 | } | 688 | } |
638 | 689 | ||
690 | out: | ||
639 | mod_timer(&local->link_timer, | 691 | mod_timer(&local->link_timer, |
640 | jiffies + msecs_to_jiffies(2 * local->remote_lto)); | 692 | jiffies + msecs_to_jiffies(2 * local->remote_lto)); |
641 | } | 693 | } |
@@ -704,6 +756,39 @@ static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len) | |||
704 | return NULL; | 756 | return NULL; |
705 | } | 757 | } |
706 | 758 | ||
759 | static void nfc_llcp_recv_ui(struct nfc_llcp_local *local, | ||
760 | struct sk_buff *skb) | ||
761 | { | ||
762 | struct nfc_llcp_sock *llcp_sock; | ||
763 | struct nfc_llcp_ui_cb *ui_cb; | ||
764 | u8 dsap, ssap; | ||
765 | |||
766 | dsap = nfc_llcp_dsap(skb); | ||
767 | ssap = nfc_llcp_ssap(skb); | ||
768 | |||
769 | ui_cb = nfc_llcp_ui_skb_cb(skb); | ||
770 | ui_cb->dsap = dsap; | ||
771 | ui_cb->ssap = ssap; | ||
772 | |||
773 | printk("%s %d %d\n", __func__, dsap, ssap); | ||
774 | |||
775 | pr_debug("%d %d\n", dsap, ssap); | ||
776 | |||
777 | /* We're looking for a bound socket, not a client one */ | ||
778 | llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP); | ||
779 | if (llcp_sock == NULL || llcp_sock->sk.sk_type != SOCK_DGRAM) | ||
780 | return; | ||
781 | |||
782 | /* There is no sequence with UI frames */ | ||
783 | skb_pull(skb, LLCP_HEADER_SIZE); | ||
784 | if (sock_queue_rcv_skb(&llcp_sock->sk, skb)) { | ||
785 | pr_err("receive queue is full\n"); | ||
786 | skb_queue_head(&llcp_sock->tx_backlog_queue, skb); | ||
787 | } | ||
788 | |||
789 | nfc_llcp_sock_put(llcp_sock); | ||
790 | } | ||
791 | |||
707 | static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, | 792 | static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, |
708 | struct sk_buff *skb) | 793 | struct sk_buff *skb) |
709 | { | 794 | { |
@@ -823,9 +908,6 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, | |||
823 | fail: | 908 | fail: |
824 | /* Send DM */ | 909 | /* Send DM */ |
825 | nfc_llcp_send_dm(local, dsap, ssap, reason); | 910 | nfc_llcp_send_dm(local, dsap, ssap, reason); |
826 | |||
827 | return; | ||
828 | |||
829 | } | 911 | } |
830 | 912 | ||
831 | int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock) | 913 | int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock) |
@@ -953,6 +1035,9 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local, | |||
953 | 1035 | ||
954 | sk = &llcp_sock->sk; | 1036 | sk = &llcp_sock->sk; |
955 | lock_sock(sk); | 1037 | lock_sock(sk); |
1038 | |||
1039 | nfc_llcp_socket_purge(llcp_sock); | ||
1040 | |||
956 | if (sk->sk_state == LLCP_CLOSED) { | 1041 | if (sk->sk_state == LLCP_CLOSED) { |
957 | release_sock(sk); | 1042 | release_sock(sk); |
958 | nfc_llcp_sock_put(llcp_sock); | 1043 | nfc_llcp_sock_put(llcp_sock); |
@@ -1027,7 +1112,7 @@ static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, struct sk_buff *skb) | |||
1027 | } | 1112 | } |
1028 | 1113 | ||
1029 | if (llcp_sock == NULL) { | 1114 | if (llcp_sock == NULL) { |
1030 | pr_err("Invalid DM\n"); | 1115 | pr_debug("Already closed\n"); |
1031 | return; | 1116 | return; |
1032 | } | 1117 | } |
1033 | 1118 | ||
@@ -1038,8 +1123,100 @@ static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, struct sk_buff *skb) | |||
1038 | sk->sk_state_change(sk); | 1123 | sk->sk_state_change(sk); |
1039 | 1124 | ||
1040 | nfc_llcp_sock_put(llcp_sock); | 1125 | nfc_llcp_sock_put(llcp_sock); |
1126 | } | ||
1041 | 1127 | ||
1042 | return; | 1128 | static void nfc_llcp_recv_snl(struct nfc_llcp_local *local, |
1129 | struct sk_buff *skb) | ||
1130 | { | ||
1131 | struct nfc_llcp_sock *llcp_sock; | ||
1132 | u8 dsap, ssap, *tlv, type, length, tid, sap; | ||
1133 | u16 tlv_len, offset; | ||
1134 | char *service_name; | ||
1135 | size_t service_name_len; | ||
1136 | |||
1137 | dsap = nfc_llcp_dsap(skb); | ||
1138 | ssap = nfc_llcp_ssap(skb); | ||
1139 | |||
1140 | pr_debug("%d %d\n", dsap, ssap); | ||
1141 | |||
1142 | if (dsap != LLCP_SAP_SDP || ssap != LLCP_SAP_SDP) { | ||
1143 | pr_err("Wrong SNL SAP\n"); | ||
1144 | return; | ||
1145 | } | ||
1146 | |||
1147 | tlv = &skb->data[LLCP_HEADER_SIZE]; | ||
1148 | tlv_len = skb->len - LLCP_HEADER_SIZE; | ||
1149 | offset = 0; | ||
1150 | |||
1151 | while (offset < tlv_len) { | ||
1152 | type = tlv[0]; | ||
1153 | length = tlv[1]; | ||
1154 | |||
1155 | switch (type) { | ||
1156 | case LLCP_TLV_SDREQ: | ||
1157 | tid = tlv[2]; | ||
1158 | service_name = (char *) &tlv[3]; | ||
1159 | service_name_len = length - 1; | ||
1160 | |||
1161 | pr_debug("Looking for %.16s\n", service_name); | ||
1162 | |||
1163 | if (service_name_len == strlen("urn:nfc:sn:sdp") && | ||
1164 | !strncmp(service_name, "urn:nfc:sn:sdp", | ||
1165 | service_name_len)) { | ||
1166 | sap = 1; | ||
1167 | goto send_snl; | ||
1168 | } | ||
1169 | |||
1170 | llcp_sock = nfc_llcp_sock_from_sn(local, service_name, | ||
1171 | service_name_len); | ||
1172 | if (!llcp_sock) { | ||
1173 | sap = 0; | ||
1174 | goto send_snl; | ||
1175 | } | ||
1176 | |||
1177 | /* | ||
1178 | * We found a socket but its ssap has not been reserved | ||
1179 | * yet. We need to assign it for good and send a reply. | ||
1180 | * The ssap will be freed when the socket is closed. | ||
1181 | */ | ||
1182 | if (llcp_sock->ssap == LLCP_SDP_UNBOUND) { | ||
1183 | atomic_t *client_count; | ||
1184 | |||
1185 | sap = nfc_llcp_reserve_sdp_ssap(local); | ||
1186 | |||
1187 | pr_debug("Reserving %d\n", sap); | ||
1188 | |||
1189 | if (sap == LLCP_SAP_MAX) { | ||
1190 | sap = 0; | ||
1191 | goto send_snl; | ||
1192 | } | ||
1193 | |||
1194 | client_count = | ||
1195 | &local->local_sdp_cnt[sap - | ||
1196 | LLCP_WKS_NUM_SAP]; | ||
1197 | |||
1198 | atomic_inc(client_count); | ||
1199 | |||
1200 | llcp_sock->ssap = sap; | ||
1201 | llcp_sock->reserved_ssap = sap; | ||
1202 | } else { | ||
1203 | sap = llcp_sock->ssap; | ||
1204 | } | ||
1205 | |||
1206 | pr_debug("%p %d\n", llcp_sock, sap); | ||
1207 | |||
1208 | send_snl: | ||
1209 | nfc_llcp_send_snl(local, tid, sap); | ||
1210 | break; | ||
1211 | |||
1212 | default: | ||
1213 | pr_err("Invalid SNL tlv value 0x%x\n", type); | ||
1214 | break; | ||
1215 | } | ||
1216 | |||
1217 | offset += length + 2; | ||
1218 | tlv += length + 2; | ||
1219 | } | ||
1043 | } | 1220 | } |
1044 | 1221 | ||
1045 | static void nfc_llcp_rx_work(struct work_struct *work) | 1222 | static void nfc_llcp_rx_work(struct work_struct *work) |
@@ -1072,6 +1249,11 @@ static void nfc_llcp_rx_work(struct work_struct *work) | |||
1072 | pr_debug("SYMM\n"); | 1249 | pr_debug("SYMM\n"); |
1073 | break; | 1250 | break; |
1074 | 1251 | ||
1252 | case LLCP_PDU_UI: | ||
1253 | pr_debug("UI\n"); | ||
1254 | nfc_llcp_recv_ui(local, skb); | ||
1255 | break; | ||
1256 | |||
1075 | case LLCP_PDU_CONNECT: | 1257 | case LLCP_PDU_CONNECT: |
1076 | pr_debug("CONNECT\n"); | 1258 | pr_debug("CONNECT\n"); |
1077 | nfc_llcp_recv_connect(local, skb); | 1259 | nfc_llcp_recv_connect(local, skb); |
@@ -1092,6 +1274,11 @@ static void nfc_llcp_rx_work(struct work_struct *work) | |||
1092 | nfc_llcp_recv_dm(local, skb); | 1274 | nfc_llcp_recv_dm(local, skb); |
1093 | break; | 1275 | break; |
1094 | 1276 | ||
1277 | case LLCP_PDU_SNL: | ||
1278 | pr_debug("SNL\n"); | ||
1279 | nfc_llcp_recv_snl(local, skb); | ||
1280 | break; | ||
1281 | |||
1095 | case LLCP_PDU_I: | 1282 | case LLCP_PDU_I: |
1096 | case LLCP_PDU_RR: | 1283 | case LLCP_PDU_RR: |
1097 | case LLCP_PDU_RNR: | 1284 | case LLCP_PDU_RNR: |
@@ -1104,8 +1291,6 @@ static void nfc_llcp_rx_work(struct work_struct *work) | |||
1104 | schedule_work(&local->tx_work); | 1291 | schedule_work(&local->tx_work); |
1105 | kfree_skb(local->rx_pending); | 1292 | kfree_skb(local->rx_pending); |
1106 | local->rx_pending = NULL; | 1293 | local->rx_pending = NULL; |
1107 | |||
1108 | return; | ||
1109 | } | 1294 | } |
1110 | 1295 | ||
1111 | void nfc_llcp_recv(void *data, struct sk_buff *skb, int err) | 1296 | void nfc_llcp_recv(void *data, struct sk_buff *skb, int err) |
@@ -1121,8 +1306,6 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err) | |||
1121 | local->rx_pending = skb_get(skb); | 1306 | local->rx_pending = skb_get(skb); |
1122 | del_timer(&local->link_timer); | 1307 | del_timer(&local->link_timer); |
1123 | schedule_work(&local->rx_work); | 1308 | schedule_work(&local->rx_work); |
1124 | |||
1125 | return; | ||
1126 | } | 1309 | } |
1127 | 1310 | ||
1128 | int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb) | 1311 | int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb) |
@@ -1205,6 +1388,10 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) | |||
1205 | rwlock_init(&local->connecting_sockets.lock); | 1388 | rwlock_init(&local->connecting_sockets.lock); |
1206 | rwlock_init(&local->raw_sockets.lock); | 1389 | rwlock_init(&local->raw_sockets.lock); |
1207 | 1390 | ||
1391 | local->lto = 150; /* 1500 ms */ | ||
1392 | local->rw = LLCP_MAX_RW; | ||
1393 | local->miux = cpu_to_be16(LLCP_MAX_MIUX); | ||
1394 | |||
1208 | nfc_llcp_build_gb(local); | 1395 | nfc_llcp_build_gb(local); |
1209 | 1396 | ||
1210 | local->remote_miu = LLCP_DEFAULT_MIU; | 1397 | local->remote_miu = LLCP_DEFAULT_MIU; |
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h index fdb2d24e60bd..0d62366f8cc3 100644 --- a/net/nfc/llcp/llcp.h +++ b/net/nfc/llcp/llcp.h | |||
@@ -64,6 +64,9 @@ struct nfc_llcp_local { | |||
64 | u32 target_idx; | 64 | u32 target_idx; |
65 | u8 rf_mode; | 65 | u8 rf_mode; |
66 | u8 comm_mode; | 66 | u8 comm_mode; |
67 | u8 lto; | ||
68 | u8 rw; | ||
69 | __be16 miux; | ||
67 | unsigned long local_wks; /* Well known services */ | 70 | unsigned long local_wks; /* Well known services */ |
68 | unsigned long local_sdp; /* Local services */ | 71 | unsigned long local_sdp; /* Local services */ |
69 | unsigned long local_sap; /* Local SAPs, not available for discovery */ | 72 | unsigned long local_sap; /* Local SAPs, not available for discovery */ |
@@ -124,6 +127,13 @@ struct nfc_llcp_sock { | |||
124 | struct sock *parent; | 127 | struct sock *parent; |
125 | }; | 128 | }; |
126 | 129 | ||
130 | struct nfc_llcp_ui_cb { | ||
131 | __u8 dsap; | ||
132 | __u8 ssap; | ||
133 | }; | ||
134 | |||
135 | #define nfc_llcp_ui_skb_cb(__skb) ((struct nfc_llcp_ui_cb *)&((__skb)->cb[0])) | ||
136 | |||
127 | #define nfc_llcp_sock(sk) ((struct nfc_llcp_sock *) (sk)) | 137 | #define nfc_llcp_sock(sk) ((struct nfc_llcp_sock *) (sk)) |
128 | #define nfc_llcp_dev(sk) (nfc_llcp_sock((sk))->dev) | 138 | #define nfc_llcp_dev(sk) (nfc_llcp_sock((sk))->dev) |
129 | 139 | ||
@@ -209,10 +219,13 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock); | |||
209 | int nfc_llcp_send_symm(struct nfc_dev *dev); | 219 | int nfc_llcp_send_symm(struct nfc_dev *dev); |
210 | int nfc_llcp_send_connect(struct nfc_llcp_sock *sock); | 220 | int nfc_llcp_send_connect(struct nfc_llcp_sock *sock); |
211 | int nfc_llcp_send_cc(struct nfc_llcp_sock *sock); | 221 | int nfc_llcp_send_cc(struct nfc_llcp_sock *sock); |
222 | int nfc_llcp_send_snl(struct nfc_llcp_local *local, u8 tid, u8 sap); | ||
212 | int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason); | 223 | int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason); |
213 | int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock); | 224 | int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock); |
214 | int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, | 225 | int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, |
215 | struct msghdr *msg, size_t len); | 226 | struct msghdr *msg, size_t len); |
227 | int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, | ||
228 | struct msghdr *msg, size_t len); | ||
216 | int nfc_llcp_send_rr(struct nfc_llcp_sock *sock); | 229 | int nfc_llcp_send_rr(struct nfc_llcp_sock *sock); |
217 | 230 | ||
218 | /* Socket API */ | 231 | /* Socket API */ |
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 63e4cdc92376..0fa1e92ceac8 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
@@ -205,8 +205,8 @@ static int llcp_sock_listen(struct socket *sock, int backlog) | |||
205 | 205 | ||
206 | lock_sock(sk); | 206 | lock_sock(sk); |
207 | 207 | ||
208 | if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) | 208 | if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) || |
209 | || sk->sk_state != LLCP_BOUND) { | 209 | sk->sk_state != LLCP_BOUND) { |
210 | ret = -EBADFD; | 210 | ret = -EBADFD; |
211 | goto error; | 211 | goto error; |
212 | } | 212 | } |
@@ -608,6 +608,25 @@ static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
608 | 608 | ||
609 | lock_sock(sk); | 609 | lock_sock(sk); |
610 | 610 | ||
611 | if (sk->sk_type == SOCK_DGRAM) { | ||
612 | struct sockaddr_nfc_llcp *addr = | ||
613 | (struct sockaddr_nfc_llcp *)msg->msg_name; | ||
614 | |||
615 | if (msg->msg_namelen < sizeof(*addr)) { | ||
616 | release_sock(sk); | ||
617 | |||
618 | pr_err("Invalid socket address length %d\n", | ||
619 | msg->msg_namelen); | ||
620 | |||
621 | return -EINVAL; | ||
622 | } | ||
623 | |||
624 | release_sock(sk); | ||
625 | |||
626 | return nfc_llcp_send_ui_frame(llcp_sock, addr->dsap, addr->ssap, | ||
627 | msg, len); | ||
628 | } | ||
629 | |||
611 | if (sk->sk_state != LLCP_CONNECTED) { | 630 | if (sk->sk_state != LLCP_CONNECTED) { |
612 | release_sock(sk); | 631 | release_sock(sk); |
613 | return -ENOTCONN; | 632 | return -ENOTCONN; |
@@ -663,11 +682,28 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
663 | return -EFAULT; | 682 | return -EFAULT; |
664 | } | 683 | } |
665 | 684 | ||
685 | if (sk->sk_type == SOCK_DGRAM && msg->msg_name) { | ||
686 | struct nfc_llcp_ui_cb *ui_cb = nfc_llcp_ui_skb_cb(skb); | ||
687 | struct sockaddr_nfc_llcp sockaddr; | ||
688 | |||
689 | pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); | ||
690 | |||
691 | sockaddr.sa_family = AF_NFC; | ||
692 | sockaddr.nfc_protocol = NFC_PROTO_NFC_DEP; | ||
693 | sockaddr.dsap = ui_cb->dsap; | ||
694 | sockaddr.ssap = ui_cb->ssap; | ||
695 | |||
696 | memcpy(msg->msg_name, &sockaddr, sizeof(sockaddr)); | ||
697 | msg->msg_namelen = sizeof(sockaddr); | ||
698 | } | ||
699 | |||
666 | /* Mark read part of skb as used */ | 700 | /* Mark read part of skb as used */ |
667 | if (!(flags & MSG_PEEK)) { | 701 | if (!(flags & MSG_PEEK)) { |
668 | 702 | ||
669 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ | 703 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ |
670 | if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_RAW) { | 704 | if (sk->sk_type == SOCK_STREAM || |
705 | sk->sk_type == SOCK_DGRAM || | ||
706 | sk->sk_type == SOCK_RAW) { | ||
671 | skb_pull(skb, copied); | 707 | skb_pull(skb, copied); |
672 | if (skb->len) { | 708 | if (skb->len) { |
673 | skb_queue_head(&sk->sk_receive_queue, skb); | 709 | skb_queue_head(&sk->sk_receive_queue, skb); |
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig index decdc49b26d8..6d69b5f0f19b 100644 --- a/net/nfc/nci/Kconfig +++ b/net/nfc/nci/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config NFC_NCI | 1 | config NFC_NCI |
2 | depends on NFC && EXPERIMENTAL | 2 | depends on NFC |
3 | tristate "NCI protocol support (EXPERIMENTAL)" | 3 | tristate "NCI protocol support" |
4 | default n | 4 | default n |
5 | help | 5 | help |
6 | NCI (NFC Controller Interface) is a communication protocol between | 6 | NCI (NFC Controller Interface) is a communication protocol between |
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index acf9abb7d99b..5f98dc1bf039 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c | |||
@@ -205,10 +205,10 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) | |||
205 | cmd.num_disc_configs = 0; | 205 | cmd.num_disc_configs = 0; |
206 | 206 | ||
207 | if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && | 207 | if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && |
208 | (protocols & NFC_PROTO_JEWEL_MASK | 208 | (protocols & NFC_PROTO_JEWEL_MASK || |
209 | || protocols & NFC_PROTO_MIFARE_MASK | 209 | protocols & NFC_PROTO_MIFARE_MASK || |
210 | || protocols & NFC_PROTO_ISO14443_MASK | 210 | protocols & NFC_PROTO_ISO14443_MASK || |
211 | || protocols & NFC_PROTO_NFC_DEP_MASK)) { | 211 | protocols & NFC_PROTO_NFC_DEP_MASK)) { |
212 | cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = | 212 | cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = |
213 | NCI_NFC_A_PASSIVE_POLL_MODE; | 213 | NCI_NFC_A_PASSIVE_POLL_MODE; |
214 | cmd.disc_configs[cmd.num_disc_configs].frequency = 1; | 214 | cmd.disc_configs[cmd.num_disc_configs].frequency = 1; |
@@ -224,8 +224,8 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt) | |||
224 | } | 224 | } |
225 | 225 | ||
226 | if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && | 226 | if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && |
227 | (protocols & NFC_PROTO_FELICA_MASK | 227 | (protocols & NFC_PROTO_FELICA_MASK || |
228 | || protocols & NFC_PROTO_NFC_DEP_MASK)) { | 228 | protocols & NFC_PROTO_NFC_DEP_MASK)) { |
229 | cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = | 229 | cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = |
230 | NCI_NFC_F_PASSIVE_POLL_MODE; | 230 | NCI_NFC_F_PASSIVE_POLL_MODE; |
231 | cmd.disc_configs[cmd.num_disc_configs].frequency = 1; | 231 | cmd.disc_configs[cmd.num_disc_configs].frequency = 1; |
@@ -414,13 +414,13 @@ static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev) | |||
414 | struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); | 414 | struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); |
415 | struct nci_set_config_param param; | 415 | struct nci_set_config_param param; |
416 | __u8 local_gb[NFC_MAX_GT_LEN]; | 416 | __u8 local_gb[NFC_MAX_GT_LEN]; |
417 | int i, rc = 0; | 417 | int i; |
418 | 418 | ||
419 | param.val = nfc_get_local_general_bytes(nfc_dev, ¶m.len); | 419 | param.val = nfc_get_local_general_bytes(nfc_dev, ¶m.len); |
420 | if ((param.val == NULL) || (param.len == 0)) | 420 | if ((param.val == NULL) || (param.len == 0)) |
421 | return rc; | 421 | return 0; |
422 | 422 | ||
423 | if (param.len > NCI_MAX_PARAM_LEN) | 423 | if (param.len > NFC_MAX_GT_LEN) |
424 | return -EINVAL; | 424 | return -EINVAL; |
425 | 425 | ||
426 | for (i = 0; i < param.len; i++) | 426 | for (i = 0; i < param.len; i++) |
@@ -429,10 +429,8 @@ static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev) | |||
429 | param.id = NCI_PN_ATR_REQ_GEN_BYTES; | 429 | param.id = NCI_PN_ATR_REQ_GEN_BYTES; |
430 | param.val = local_gb; | 430 | param.val = local_gb; |
431 | 431 | ||
432 | rc = nci_request(ndev, nci_set_config_req, (unsigned long)¶m, | 432 | return nci_request(ndev, nci_set_config_req, (unsigned long)¶m, |
433 | msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); | 433 | msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); |
434 | |||
435 | return rc; | ||
436 | } | 434 | } |
437 | 435 | ||
438 | static int nci_start_poll(struct nfc_dev *nfc_dev, | 436 | static int nci_start_poll(struct nfc_dev *nfc_dev, |
@@ -579,7 +577,6 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, | |||
579 | } | 577 | } |
580 | } | 578 | } |
581 | 579 | ||
582 | |||
583 | static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, | 580 | static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, |
584 | __u8 comm_mode, __u8 *gb, size_t gb_len) | 581 | __u8 comm_mode, __u8 *gb, size_t gb_len) |
585 | { | 582 | { |
@@ -806,8 +803,8 @@ int nci_recv_frame(struct sk_buff *skb) | |||
806 | 803 | ||
807 | pr_debug("len %d\n", skb->len); | 804 | pr_debug("len %d\n", skb->len); |
808 | 805 | ||
809 | if (!ndev || (!test_bit(NCI_UP, &ndev->flags) | 806 | if (!ndev || (!test_bit(NCI_UP, &ndev->flags) && |
810 | && !test_bit(NCI_INIT, &ndev->flags))) { | 807 | !test_bit(NCI_INIT, &ndev->flags))) { |
811 | kfree_skb(skb); | 808 | kfree_skb(skb); |
812 | return -ENXIO; | 809 | return -ENXIO; |
813 | } | 810 | } |
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index c1b5285cbde7..3568ae16786d 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c | |||
@@ -29,6 +29,8 @@ | |||
29 | 29 | ||
30 | #include "nfc.h" | 30 | #include "nfc.h" |
31 | 31 | ||
32 | #include "llcp/llcp.h" | ||
33 | |||
32 | static struct genl_multicast_group nfc_genl_event_mcgrp = { | 34 | static struct genl_multicast_group nfc_genl_event_mcgrp = { |
33 | .name = NFC_GENL_MCAST_EVENT_NAME, | 35 | .name = NFC_GENL_MCAST_EVENT_NAME, |
34 | }; | 36 | }; |
@@ -364,7 +366,8 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, | |||
364 | if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || | 366 | if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || |
365 | nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || | 367 | nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || |
366 | nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || | 368 | nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || |
367 | nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up)) | 369 | nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) || |
370 | nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) | ||
368 | goto nla_put_failure; | 371 | goto nla_put_failure; |
369 | 372 | ||
370 | return genlmsg_end(msg, hdr); | 373 | return genlmsg_end(msg, hdr); |
@@ -590,7 +593,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info) | |||
590 | if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || | 593 | if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || |
591 | ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] && | 594 | ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] && |
592 | !info->attrs[NFC_ATTR_PROTOCOLS]) && | 595 | !info->attrs[NFC_ATTR_PROTOCOLS]) && |
593 | !info->attrs[NFC_ATTR_TM_PROTOCOLS])) | 596 | !info->attrs[NFC_ATTR_TM_PROTOCOLS])) |
594 | return -EINVAL; | 597 | return -EINVAL; |
595 | 598 | ||
596 | idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); | 599 | idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); |
@@ -715,6 +718,146 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info) | |||
715 | return rc; | 718 | return rc; |
716 | } | 719 | } |
717 | 720 | ||
721 | static int nfc_genl_send_params(struct sk_buff *msg, | ||
722 | struct nfc_llcp_local *local, | ||
723 | u32 portid, u32 seq) | ||
724 | { | ||
725 | void *hdr; | ||
726 | |||
727 | hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, 0, | ||
728 | NFC_CMD_LLC_GET_PARAMS); | ||
729 | if (!hdr) | ||
730 | return -EMSGSIZE; | ||
731 | |||
732 | if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, local->dev->idx) || | ||
733 | nla_put_u8(msg, NFC_ATTR_LLC_PARAM_LTO, local->lto) || | ||
734 | nla_put_u8(msg, NFC_ATTR_LLC_PARAM_RW, local->rw) || | ||
735 | nla_put_u16(msg, NFC_ATTR_LLC_PARAM_MIUX, be16_to_cpu(local->miux))) | ||
736 | goto nla_put_failure; | ||
737 | |||
738 | return genlmsg_end(msg, hdr); | ||
739 | |||
740 | nla_put_failure: | ||
741 | |||
742 | genlmsg_cancel(msg, hdr); | ||
743 | return -EMSGSIZE; | ||
744 | } | ||
745 | |||
746 | static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info) | ||
747 | { | ||
748 | struct nfc_dev *dev; | ||
749 | struct nfc_llcp_local *local; | ||
750 | int rc = 0; | ||
751 | struct sk_buff *msg = NULL; | ||
752 | u32 idx; | ||
753 | |||
754 | if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) | ||
755 | return -EINVAL; | ||
756 | |||
757 | idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); | ||
758 | |||
759 | dev = nfc_get_device(idx); | ||
760 | if (!dev) | ||
761 | return -ENODEV; | ||
762 | |||
763 | device_lock(&dev->dev); | ||
764 | |||
765 | local = nfc_llcp_find_local(dev); | ||
766 | if (!local) { | ||
767 | rc = -ENODEV; | ||
768 | goto exit; | ||
769 | } | ||
770 | |||
771 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
772 | if (!msg) { | ||
773 | rc = -ENOMEM; | ||
774 | goto exit; | ||
775 | } | ||
776 | |||
777 | rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq); | ||
778 | |||
779 | exit: | ||
780 | device_unlock(&dev->dev); | ||
781 | |||
782 | nfc_put_device(dev); | ||
783 | |||
784 | if (rc < 0) { | ||
785 | if (msg) | ||
786 | nlmsg_free(msg); | ||
787 | |||
788 | return rc; | ||
789 | } | ||
790 | |||
791 | return genlmsg_reply(msg, info); | ||
792 | } | ||
793 | |||
794 | static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info) | ||
795 | { | ||
796 | struct nfc_dev *dev; | ||
797 | struct nfc_llcp_local *local; | ||
798 | u8 rw = 0; | ||
799 | u16 miux = 0; | ||
800 | u32 idx; | ||
801 | int rc = 0; | ||
802 | |||
803 | if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || | ||
804 | (!info->attrs[NFC_ATTR_LLC_PARAM_LTO] && | ||
805 | !info->attrs[NFC_ATTR_LLC_PARAM_RW] && | ||
806 | !info->attrs[NFC_ATTR_LLC_PARAM_MIUX])) | ||
807 | return -EINVAL; | ||
808 | |||
809 | if (info->attrs[NFC_ATTR_LLC_PARAM_RW]) { | ||
810 | rw = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_RW]); | ||
811 | |||
812 | if (rw > LLCP_MAX_RW) | ||
813 | return -EINVAL; | ||
814 | } | ||
815 | |||
816 | if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX]) { | ||
817 | miux = nla_get_u16(info->attrs[NFC_ATTR_LLC_PARAM_MIUX]); | ||
818 | |||
819 | if (miux > LLCP_MAX_MIUX) | ||
820 | return -EINVAL; | ||
821 | } | ||
822 | |||
823 | idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); | ||
824 | |||
825 | dev = nfc_get_device(idx); | ||
826 | if (!dev) | ||
827 | return -ENODEV; | ||
828 | |||
829 | device_lock(&dev->dev); | ||
830 | |||
831 | local = nfc_llcp_find_local(dev); | ||
832 | if (!local) { | ||
833 | nfc_put_device(dev); | ||
834 | rc = -ENODEV; | ||
835 | goto exit; | ||
836 | } | ||
837 | |||
838 | if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) { | ||
839 | if (dev->dep_link_up) { | ||
840 | rc = -EINPROGRESS; | ||
841 | goto exit; | ||
842 | } | ||
843 | |||
844 | local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]); | ||
845 | } | ||
846 | |||
847 | if (info->attrs[NFC_ATTR_LLC_PARAM_RW]) | ||
848 | local->rw = rw; | ||
849 | |||
850 | if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX]) | ||
851 | local->miux = cpu_to_be16(miux); | ||
852 | |||
853 | exit: | ||
854 | device_unlock(&dev->dev); | ||
855 | |||
856 | nfc_put_device(dev); | ||
857 | |||
858 | return rc; | ||
859 | } | ||
860 | |||
718 | static struct genl_ops nfc_genl_ops[] = { | 861 | static struct genl_ops nfc_genl_ops[] = { |
719 | { | 862 | { |
720 | .cmd = NFC_CMD_GET_DEVICE, | 863 | .cmd = NFC_CMD_GET_DEVICE, |
@@ -759,6 +902,16 @@ static struct genl_ops nfc_genl_ops[] = { | |||
759 | .done = nfc_genl_dump_targets_done, | 902 | .done = nfc_genl_dump_targets_done, |
760 | .policy = nfc_genl_policy, | 903 | .policy = nfc_genl_policy, |
761 | }, | 904 | }, |
905 | { | ||
906 | .cmd = NFC_CMD_LLC_GET_PARAMS, | ||
907 | .doit = nfc_genl_llc_get_params, | ||
908 | .policy = nfc_genl_policy, | ||
909 | }, | ||
910 | { | ||
911 | .cmd = NFC_CMD_LLC_SET_PARAMS, | ||
912 | .doit = nfc_genl_llc_set_params, | ||
913 | .policy = nfc_genl_policy, | ||
914 | }, | ||
762 | }; | 915 | }; |
763 | 916 | ||
764 | 917 | ||
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index c5e42b79a418..87d914d2876a 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h | |||
@@ -56,6 +56,7 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev); | |||
56 | int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len); | 56 | int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len); |
57 | u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len); | 57 | u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len); |
58 | int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb); | 58 | int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb); |
59 | struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); | ||
59 | int __init nfc_llcp_init(void); | 60 | int __init nfc_llcp_init(void); |
60 | void nfc_llcp_exit(void); | 61 | void nfc_llcp_exit(void); |
61 | 62 | ||
@@ -97,6 +98,11 @@ static inline int nfc_llcp_data_received(struct nfc_dev *dev, | |||
97 | return 0; | 98 | return 0; |
98 | } | 99 | } |
99 | 100 | ||
101 | static inline struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev) | ||
102 | { | ||
103 | return NULL; | ||
104 | } | ||
105 | |||
100 | static inline int nfc_llcp_init(void) | 106 | static inline int nfc_llcp_init(void) |
101 | { | 107 | { |
102 | return 0; | 108 | return 0; |
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 8b8a6a2b2bad..313bf1bc848a 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c | |||
@@ -256,7 +256,6 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
256 | return rc ? : copied; | 256 | return rc ? : copied; |
257 | } | 257 | } |
258 | 258 | ||
259 | |||
260 | static const struct proto_ops rawsock_ops = { | 259 | static const struct proto_ops rawsock_ops = { |
261 | .family = PF_NFC, | 260 | .family = PF_NFC, |
262 | .owner = THIS_MODULE, | 261 | .owner = THIS_MODULE, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 94060edbbd70..e639645e8fec 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1881,7 +1881,35 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
1881 | skb_reserve(skb, hlen); | 1881 | skb_reserve(skb, hlen); |
1882 | skb_reset_network_header(skb); | 1882 | skb_reset_network_header(skb); |
1883 | 1883 | ||
1884 | data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); | 1884 | if (po->tp_tx_has_off) { |
1885 | int off_min, off_max, off; | ||
1886 | off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); | ||
1887 | off_max = po->tx_ring.frame_size - tp_len; | ||
1888 | if (sock->type == SOCK_DGRAM) { | ||
1889 | switch (po->tp_version) { | ||
1890 | case TPACKET_V2: | ||
1891 | off = ph.h2->tp_net; | ||
1892 | break; | ||
1893 | default: | ||
1894 | off = ph.h1->tp_net; | ||
1895 | break; | ||
1896 | } | ||
1897 | } else { | ||
1898 | switch (po->tp_version) { | ||
1899 | case TPACKET_V2: | ||
1900 | off = ph.h2->tp_mac; | ||
1901 | break; | ||
1902 | default: | ||
1903 | off = ph.h1->tp_mac; | ||
1904 | break; | ||
1905 | } | ||
1906 | } | ||
1907 | if (unlikely((off < off_min) || (off_max < off))) | ||
1908 | return -EINVAL; | ||
1909 | data = ph.raw + off; | ||
1910 | } else { | ||
1911 | data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); | ||
1912 | } | ||
1885 | to_write = tp_len; | 1913 | to_write = tp_len; |
1886 | 1914 | ||
1887 | if (sock->type == SOCK_DGRAM) { | 1915 | if (sock->type == SOCK_DGRAM) { |
@@ -1907,7 +1935,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
1907 | to_write -= dev->hard_header_len; | 1935 | to_write -= dev->hard_header_len; |
1908 | } | 1936 | } |
1909 | 1937 | ||
1910 | err = -EFAULT; | ||
1911 | offset = offset_in_page(data); | 1938 | offset = offset_in_page(data); |
1912 | len_max = PAGE_SIZE - offset; | 1939 | len_max = PAGE_SIZE - offset; |
1913 | len = ((to_write > len_max) ? len_max : to_write); | 1940 | len = ((to_write > len_max) ? len_max : to_write); |
@@ -1957,7 +1984,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
1957 | 1984 | ||
1958 | mutex_lock(&po->pg_vec_lock); | 1985 | mutex_lock(&po->pg_vec_lock); |
1959 | 1986 | ||
1960 | err = -EBUSY; | ||
1961 | if (saddr == NULL) { | 1987 | if (saddr == NULL) { |
1962 | dev = po->prot_hook.dev; | 1988 | dev = po->prot_hook.dev; |
1963 | proto = po->num; | 1989 | proto = po->num; |
@@ -2478,7 +2504,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, | |||
2478 | __be16 proto = (__force __be16)protocol; /* weird, but documented */ | 2504 | __be16 proto = (__force __be16)protocol; /* weird, but documented */ |
2479 | int err; | 2505 | int err; |
2480 | 2506 | ||
2481 | if (!capable(CAP_NET_RAW)) | 2507 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) |
2482 | return -EPERM; | 2508 | return -EPERM; |
2483 | if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && | 2509 | if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && |
2484 | sock->type != SOCK_PACKET) | 2510 | sock->type != SOCK_PACKET) |
@@ -3111,6 +3137,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
3111 | 3137 | ||
3112 | return fanout_add(sk, val & 0xffff, val >> 16); | 3138 | return fanout_add(sk, val & 0xffff, val >> 16); |
3113 | } | 3139 | } |
3140 | case PACKET_TX_HAS_OFF: | ||
3141 | { | ||
3142 | unsigned int val; | ||
3143 | |||
3144 | if (optlen != sizeof(val)) | ||
3145 | return -EINVAL; | ||
3146 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) | ||
3147 | return -EBUSY; | ||
3148 | if (copy_from_user(&val, optval, sizeof(val))) | ||
3149 | return -EFAULT; | ||
3150 | po->tp_tx_has_off = !!val; | ||
3151 | return 0; | ||
3152 | } | ||
3114 | default: | 3153 | default: |
3115 | return -ENOPROTOOPT; | 3154 | return -ENOPROTOOPT; |
3116 | } | 3155 | } |
@@ -3202,6 +3241,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
3202 | ((u32)po->fanout->type << 16)) : | 3241 | ((u32)po->fanout->type << 16)) : |
3203 | 0); | 3242 | 0); |
3204 | break; | 3243 | break; |
3244 | case PACKET_TX_HAS_OFF: | ||
3245 | val = po->tp_tx_has_off; | ||
3246 | break; | ||
3205 | default: | 3247 | default: |
3206 | return -ENOPROTOOPT; | 3248 | return -ENOPROTOOPT; |
3207 | } | 3249 | } |
diff --git a/net/packet/internal.h b/net/packet/internal.h index 44945f6b7252..e84cab8cb7a9 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h | |||
@@ -109,6 +109,7 @@ struct packet_sock { | |||
109 | unsigned int tp_hdrlen; | 109 | unsigned int tp_hdrlen; |
110 | unsigned int tp_reserve; | 110 | unsigned int tp_reserve; |
111 | unsigned int tp_loss:1; | 111 | unsigned int tp_loss:1; |
112 | unsigned int tp_tx_has_off:1; | ||
112 | unsigned int tp_tstamp; | 113 | unsigned int tp_tstamp; |
113 | struct packet_type prot_hook ____cacheline_aligned_in_smp; | 114 | struct packet_type prot_hook ____cacheline_aligned_in_smp; |
114 | }; | 115 | }; |
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index 83a8389619aa..0193630d3061 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c | |||
@@ -70,6 +70,9 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) | |||
70 | int err; | 70 | int err; |
71 | u8 pnaddr; | 71 | u8 pnaddr; |
72 | 72 | ||
73 | if (!capable(CAP_NET_ADMIN)) | ||
74 | return -EPERM; | ||
75 | |||
73 | if (!capable(CAP_SYS_ADMIN)) | 76 | if (!capable(CAP_SYS_ADMIN)) |
74 | return -EPERM; | 77 | return -EPERM; |
75 | 78 | ||
@@ -230,6 +233,9 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) | |||
230 | int err; | 233 | int err; |
231 | u8 dst; | 234 | u8 dst; |
232 | 235 | ||
236 | if (!capable(CAP_NET_ADMIN)) | ||
237 | return -EPERM; | ||
238 | |||
233 | if (!capable(CAP_SYS_ADMIN)) | 239 | if (!capable(CAP_SYS_ADMIN)) |
234 | return -EPERM; | 240 | return -EPERM; |
235 | 241 | ||
diff --git a/net/rds/ib.h b/net/rds/ib.h index 8d2b3d5a7c21..7280ab8810c2 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -50,7 +50,7 @@ struct rds_ib_cache_head { | |||
50 | }; | 50 | }; |
51 | 51 | ||
52 | struct rds_ib_refill_cache { | 52 | struct rds_ib_refill_cache { |
53 | struct rds_ib_cache_head *percpu; | 53 | struct rds_ib_cache_head __percpu *percpu; |
54 | struct list_head *xfer; | 54 | struct list_head *xfer; |
55 | struct list_head *ready; | 55 | struct list_head *ready; |
56 | }; | 56 | }; |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 8d194912c695..8c5bc857f04d 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -418,20 +418,21 @@ static void rds_ib_recv_cache_put(struct list_head *new_item, | |||
418 | struct rds_ib_refill_cache *cache) | 418 | struct rds_ib_refill_cache *cache) |
419 | { | 419 | { |
420 | unsigned long flags; | 420 | unsigned long flags; |
421 | struct rds_ib_cache_head *chp; | ||
422 | struct list_head *old; | 421 | struct list_head *old; |
422 | struct list_head __percpu *chpfirst; | ||
423 | 423 | ||
424 | local_irq_save(flags); | 424 | local_irq_save(flags); |
425 | 425 | ||
426 | chp = per_cpu_ptr(cache->percpu, smp_processor_id()); | 426 | chpfirst = __this_cpu_read(cache->percpu->first); |
427 | if (!chp->first) | 427 | if (!chpfirst) |
428 | INIT_LIST_HEAD(new_item); | 428 | INIT_LIST_HEAD(new_item); |
429 | else /* put on front */ | 429 | else /* put on front */ |
430 | list_add_tail(new_item, chp->first); | 430 | list_add_tail(new_item, chpfirst); |
431 | chp->first = new_item; | ||
432 | chp->count++; | ||
433 | 431 | ||
434 | if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT) | 432 | __this_cpu_write(chpfirst, new_item); |
433 | __this_cpu_inc(cache->percpu->count); | ||
434 | |||
435 | if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) | ||
435 | goto end; | 436 | goto end; |
436 | 437 | ||
437 | /* | 438 | /* |
@@ -443,12 +444,13 @@ static void rds_ib_recv_cache_put(struct list_head *new_item, | |||
443 | do { | 444 | do { |
444 | old = xchg(&cache->xfer, NULL); | 445 | old = xchg(&cache->xfer, NULL); |
445 | if (old) | 446 | if (old) |
446 | list_splice_entire_tail(old, chp->first); | 447 | list_splice_entire_tail(old, chpfirst); |
447 | old = cmpxchg(&cache->xfer, NULL, chp->first); | 448 | old = cmpxchg(&cache->xfer, NULL, chpfirst); |
448 | } while (old); | 449 | } while (old); |
449 | 450 | ||
450 | chp->first = NULL; | 451 | |
451 | chp->count = 0; | 452 | __this_cpu_write(chpfirst, NULL); |
453 | __this_cpu_write(cache->percpu->count, 0); | ||
452 | end: | 454 | end: |
453 | local_irq_restore(flags); | 455 | local_irq_restore(flags); |
454 | } | 456 | } |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 62fb51face8a..235e01acac51 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -509,7 +509,7 @@ config NET_EMATCH_TEXT | |||
509 | 509 | ||
510 | config NET_EMATCH_CANID | 510 | config NET_EMATCH_CANID |
511 | tristate "CAN Identifier" | 511 | tristate "CAN Identifier" |
512 | depends on NET_EMATCH && CAN | 512 | depends on NET_EMATCH && (CAN=y || CAN=m) |
513 | ---help--- | 513 | ---help--- |
514 | Say Y here if you want to be able to classify CAN frames based | 514 | Say Y here if you want to be able to classify CAN frames based |
515 | on CAN Identifier. | 515 | on CAN Identifier. |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 102761d294cb..65d240cbf74b 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -987,6 +987,9 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
987 | u32 portid = skb ? NETLINK_CB(skb).portid : 0; | 987 | u32 portid = skb ? NETLINK_CB(skb).portid : 0; |
988 | int ret = 0, ovr = 0; | 988 | int ret = 0, ovr = 0; |
989 | 989 | ||
990 | if ((n->nlmsg_type != RTM_GETACTION) && !capable(CAP_NET_ADMIN)) | ||
991 | return -EPERM; | ||
992 | |||
990 | ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); | 993 | ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); |
991 | if (ret < 0) | 994 | if (ret < 0) |
992 | return ret; | 995 | return ret; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 7ae02892437c..ff55ed6c49b2 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -139,6 +139,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
139 | int err; | 139 | int err; |
140 | int tp_created = 0; | 140 | int tp_created = 0; |
141 | 141 | ||
142 | if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN)) | ||
143 | return -EPERM; | ||
142 | replay: | 144 | replay: |
143 | t = nlmsg_data(n); | 145 | t = nlmsg_data(n); |
144 | protocol = TC_H_MIN(t->tcm_info); | 146 | protocol = TC_H_MIN(t->tcm_info); |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 2ecde225ae60..709b0fb38a18 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
18 | #include <linux/cgroup.h> | 18 | #include <linux/cgroup.h> |
19 | #include <linux/rcupdate.h> | 19 | #include <linux/rcupdate.h> |
20 | #include <linux/fdtable.h> | ||
20 | #include <net/rtnetlink.h> | 21 | #include <net/rtnetlink.h> |
21 | #include <net/pkt_cls.h> | 22 | #include <net/pkt_cls.h> |
22 | #include <net/sock.h> | 23 | #include <net/sock.h> |
@@ -53,6 +54,28 @@ static void cgrp_destroy(struct cgroup *cgrp) | |||
53 | kfree(cgrp_cls_state(cgrp)); | 54 | kfree(cgrp_cls_state(cgrp)); |
54 | } | 55 | } |
55 | 56 | ||
57 | static int update_classid(const void *v, struct file *file, unsigned n) | ||
58 | { | ||
59 | int err; | ||
60 | struct socket *sock = sock_from_file(file, &err); | ||
61 | if (sock) | ||
62 | sock->sk->sk_classid = (u32)(unsigned long)v; | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) | ||
67 | { | ||
68 | struct task_struct *p; | ||
69 | void *v; | ||
70 | |||
71 | cgroup_taskset_for_each(p, cgrp, tset) { | ||
72 | task_lock(p); | ||
73 | v = (void *)(unsigned long)task_cls_classid(p); | ||
74 | iterate_fd(p->files, 0, update_classid, v); | ||
75 | task_unlock(p); | ||
76 | } | ||
77 | } | ||
78 | |||
56 | static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) | 79 | static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) |
57 | { | 80 | { |
58 | return cgrp_cls_state(cgrp)->classid; | 81 | return cgrp_cls_state(cgrp)->classid; |
@@ -77,6 +100,7 @@ struct cgroup_subsys net_cls_subsys = { | |||
77 | .name = "net_cls", | 100 | .name = "net_cls", |
78 | .create = cgrp_create, | 101 | .create = cgrp_create, |
79 | .destroy = cgrp_destroy, | 102 | .destroy = cgrp_destroy, |
103 | .attach = cgrp_attach, | ||
80 | .subsys_id = net_cls_subsys_id, | 104 | .subsys_id = net_cls_subsys_id, |
81 | .base_cftypes = ss_files, | 105 | .base_cftypes = ss_files, |
82 | .module = THIS_MODULE, | 106 | .module = THIS_MODULE, |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index a18d975db59c..4799c4840c1a 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -495,16 +495,15 @@ EXPORT_SYMBOL(qdisc_watchdog_init); | |||
495 | 495 | ||
496 | void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) | 496 | void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) |
497 | { | 497 | { |
498 | ktime_t time; | ||
499 | |||
500 | if (test_bit(__QDISC_STATE_DEACTIVATED, | 498 | if (test_bit(__QDISC_STATE_DEACTIVATED, |
501 | &qdisc_root_sleeping(wd->qdisc)->state)) | 499 | &qdisc_root_sleeping(wd->qdisc)->state)) |
502 | return; | 500 | return; |
503 | 501 | ||
504 | qdisc_throttled(wd->qdisc); | 502 | qdisc_throttled(wd->qdisc); |
505 | time = ktime_set(0, 0); | 503 | |
506 | time = ktime_add_ns(time, PSCHED_TICKS2NS(expires)); | 504 | hrtimer_start(&wd->timer, |
507 | hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); | 505 | ns_to_ktime(PSCHED_TICKS2NS(expires)), |
506 | HRTIMER_MODE_ABS); | ||
508 | } | 507 | } |
509 | EXPORT_SYMBOL(qdisc_watchdog_schedule); | 508 | EXPORT_SYMBOL(qdisc_watchdog_schedule); |
510 | 509 | ||
@@ -981,6 +980,9 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
981 | struct Qdisc *p = NULL; | 980 | struct Qdisc *p = NULL; |
982 | int err; | 981 | int err; |
983 | 982 | ||
983 | if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN)) | ||
984 | return -EPERM; | ||
985 | |||
984 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); | 986 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
985 | if (!dev) | 987 | if (!dev) |
986 | return -ENODEV; | 988 | return -ENODEV; |
@@ -1044,6 +1046,9 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1044 | struct Qdisc *q, *p; | 1046 | struct Qdisc *q, *p; |
1045 | int err; | 1047 | int err; |
1046 | 1048 | ||
1049 | if (!capable(CAP_NET_ADMIN)) | ||
1050 | return -EPERM; | ||
1051 | |||
1047 | replay: | 1052 | replay: |
1048 | /* Reinit, just in case something touches this. */ | 1053 | /* Reinit, just in case something touches this. */ |
1049 | tcm = nlmsg_data(n); | 1054 | tcm = nlmsg_data(n); |
@@ -1380,6 +1385,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1380 | u32 qid = TC_H_MAJ(clid); | 1385 | u32 qid = TC_H_MAJ(clid); |
1381 | int err; | 1386 | int err; |
1382 | 1387 | ||
1388 | if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN)) | ||
1389 | return -EPERM; | ||
1390 | |||
1383 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); | 1391 | dev = __dev_get_by_index(net, tcm->tcm_ifindex); |
1384 | if (!dev) | 1392 | if (!dev) |
1385 | return -ENODEV; | 1393 | return -ENODEV; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 564b9fc8efd3..0e19948470b8 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -509,8 +509,7 @@ static void cbq_ovl_delay(struct cbq_class *cl) | |||
509 | cl->cpriority = TC_CBQ_MAXPRIO; | 509 | cl->cpriority = TC_CBQ_MAXPRIO; |
510 | q->pmask |= (1<<TC_CBQ_MAXPRIO); | 510 | q->pmask |= (1<<TC_CBQ_MAXPRIO); |
511 | 511 | ||
512 | expires = ktime_set(0, 0); | 512 | expires = ns_to_ktime(PSCHED_TICKS2NS(sched)); |
513 | expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched)); | ||
514 | if (hrtimer_try_to_cancel(&q->delay_timer) && | 513 | if (hrtimer_try_to_cancel(&q->delay_timer) && |
515 | ktime_to_ns(ktime_sub( | 514 | ktime_to_ns(ktime_sub( |
516 | hrtimer_get_expires(&q->delay_timer), | 515 | hrtimer_get_expires(&q->delay_timer), |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 9d75b7761313..d2922c0ef57a 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -71,6 +71,12 @@ enum htb_cmode { | |||
71 | HTB_CAN_SEND /* class can send */ | 71 | HTB_CAN_SEND /* class can send */ |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct htb_rate_cfg { | ||
75 | u64 rate_bps; | ||
76 | u32 mult; | ||
77 | u32 shift; | ||
78 | }; | ||
79 | |||
74 | /* interior & leaf nodes; props specific to leaves are marked L: */ | 80 | /* interior & leaf nodes; props specific to leaves are marked L: */ |
75 | struct htb_class { | 81 | struct htb_class { |
76 | struct Qdisc_class_common common; | 82 | struct Qdisc_class_common common; |
@@ -118,11 +124,11 @@ struct htb_class { | |||
118 | int filter_cnt; | 124 | int filter_cnt; |
119 | 125 | ||
120 | /* token bucket parameters */ | 126 | /* token bucket parameters */ |
121 | struct qdisc_rate_table *rate; /* rate table of the class itself */ | 127 | struct htb_rate_cfg rate; |
122 | struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ | 128 | struct htb_rate_cfg ceil; |
123 | long buffer, cbuffer; /* token bucket depth/rate */ | 129 | s64 buffer, cbuffer; /* token bucket depth/rate */ |
124 | psched_tdiff_t mbuffer; /* max wait time */ | 130 | psched_tdiff_t mbuffer; /* max wait time */ |
125 | long tokens, ctokens; /* current number of tokens */ | 131 | s64 tokens, ctokens; /* current number of tokens */ |
126 | psched_time_t t_c; /* checkpoint time */ | 132 | psched_time_t t_c; /* checkpoint time */ |
127 | }; | 133 | }; |
128 | 134 | ||
@@ -162,6 +168,45 @@ struct htb_sched { | |||
162 | struct work_struct work; | 168 | struct work_struct work; |
163 | }; | 169 | }; |
164 | 170 | ||
171 | static u64 l2t_ns(struct htb_rate_cfg *r, unsigned int len) | ||
172 | { | ||
173 | return ((u64)len * r->mult) >> r->shift; | ||
174 | } | ||
175 | |||
176 | static void htb_precompute_ratedata(struct htb_rate_cfg *r) | ||
177 | { | ||
178 | u64 factor; | ||
179 | u64 mult; | ||
180 | int shift; | ||
181 | |||
182 | r->shift = 0; | ||
183 | r->mult = 1; | ||
184 | /* | ||
185 | * Calibrate mult, shift so that token counting is accurate | ||
186 | * for smallest packet size (64 bytes). Token (time in ns) is | ||
187 | * computed as (bytes * 8) * NSEC_PER_SEC / rate_bps. It will | ||
188 | * work as long as the smallest packet transfer time can be | ||
189 | * accurately represented in nanosec. | ||
190 | */ | ||
191 | if (r->rate_bps > 0) { | ||
192 | /* | ||
193 | * Higher shift gives better accuracy. Find the largest | ||
194 | * shift such that mult fits in 32 bits. | ||
195 | */ | ||
196 | for (shift = 0; shift < 16; shift++) { | ||
197 | r->shift = shift; | ||
198 | factor = 8LLU * NSEC_PER_SEC * (1 << r->shift); | ||
199 | mult = div64_u64(factor, r->rate_bps); | ||
200 | if (mult > UINT_MAX) | ||
201 | break; | ||
202 | } | ||
203 | |||
204 | r->shift = shift - 1; | ||
205 | factor = 8LLU * NSEC_PER_SEC * (1 << r->shift); | ||
206 | r->mult = div64_u64(factor, r->rate_bps); | ||
207 | } | ||
208 | } | ||
209 | |||
165 | /* find class in global hash table using given handle */ | 210 | /* find class in global hash table using given handle */ |
166 | static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) | 211 | static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) |
167 | { | 212 | { |
@@ -273,7 +318,7 @@ static void htb_add_to_id_tree(struct rb_root *root, | |||
273 | * already in the queue. | 318 | * already in the queue. |
274 | */ | 319 | */ |
275 | static void htb_add_to_wait_tree(struct htb_sched *q, | 320 | static void htb_add_to_wait_tree(struct htb_sched *q, |
276 | struct htb_class *cl, long delay) | 321 | struct htb_class *cl, s64 delay) |
277 | { | 322 | { |
278 | struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; | 323 | struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; |
279 | 324 | ||
@@ -441,14 +486,14 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) | |||
441 | htb_remove_class_from_row(q, cl, mask); | 486 | htb_remove_class_from_row(q, cl, mask); |
442 | } | 487 | } |
443 | 488 | ||
444 | static inline long htb_lowater(const struct htb_class *cl) | 489 | static inline s64 htb_lowater(const struct htb_class *cl) |
445 | { | 490 | { |
446 | if (htb_hysteresis) | 491 | if (htb_hysteresis) |
447 | return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; | 492 | return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; |
448 | else | 493 | else |
449 | return 0; | 494 | return 0; |
450 | } | 495 | } |
451 | static inline long htb_hiwater(const struct htb_class *cl) | 496 | static inline s64 htb_hiwater(const struct htb_class *cl) |
452 | { | 497 | { |
453 | if (htb_hysteresis) | 498 | if (htb_hysteresis) |
454 | return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; | 499 | return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; |
@@ -469,9 +514,9 @@ static inline long htb_hiwater(const struct htb_class *cl) | |||
469 | * mode transitions per time unit. The speed gain is about 1/6. | 514 | * mode transitions per time unit. The speed gain is about 1/6. |
470 | */ | 515 | */ |
471 | static inline enum htb_cmode | 516 | static inline enum htb_cmode |
472 | htb_class_mode(struct htb_class *cl, long *diff) | 517 | htb_class_mode(struct htb_class *cl, s64 *diff) |
473 | { | 518 | { |
474 | long toks; | 519 | s64 toks; |
475 | 520 | ||
476 | if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { | 521 | if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { |
477 | *diff = -toks; | 522 | *diff = -toks; |
@@ -495,7 +540,7 @@ htb_class_mode(struct htb_class *cl, long *diff) | |||
495 | * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree). | 540 | * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree). |
496 | */ | 541 | */ |
497 | static void | 542 | static void |
498 | htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff) | 543 | htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) |
499 | { | 544 | { |
500 | enum htb_cmode new_mode = htb_class_mode(cl, diff); | 545 | enum htb_cmode new_mode = htb_class_mode(cl, diff); |
501 | 546 | ||
@@ -581,26 +626,26 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
581 | return NET_XMIT_SUCCESS; | 626 | return NET_XMIT_SUCCESS; |
582 | } | 627 | } |
583 | 628 | ||
584 | static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff) | 629 | static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff) |
585 | { | 630 | { |
586 | long toks = diff + cl->tokens; | 631 | s64 toks = diff + cl->tokens; |
587 | 632 | ||
588 | if (toks > cl->buffer) | 633 | if (toks > cl->buffer) |
589 | toks = cl->buffer; | 634 | toks = cl->buffer; |
590 | toks -= (long) qdisc_l2t(cl->rate, bytes); | 635 | toks -= (s64) l2t_ns(&cl->rate, bytes); |
591 | if (toks <= -cl->mbuffer) | 636 | if (toks <= -cl->mbuffer) |
592 | toks = 1 - cl->mbuffer; | 637 | toks = 1 - cl->mbuffer; |
593 | 638 | ||
594 | cl->tokens = toks; | 639 | cl->tokens = toks; |
595 | } | 640 | } |
596 | 641 | ||
597 | static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff) | 642 | static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff) |
598 | { | 643 | { |
599 | long toks = diff + cl->ctokens; | 644 | s64 toks = diff + cl->ctokens; |
600 | 645 | ||
601 | if (toks > cl->cbuffer) | 646 | if (toks > cl->cbuffer) |
602 | toks = cl->cbuffer; | 647 | toks = cl->cbuffer; |
603 | toks -= (long) qdisc_l2t(cl->ceil, bytes); | 648 | toks -= (s64) l2t_ns(&cl->ceil, bytes); |
604 | if (toks <= -cl->mbuffer) | 649 | if (toks <= -cl->mbuffer) |
605 | toks = 1 - cl->mbuffer; | 650 | toks = 1 - cl->mbuffer; |
606 | 651 | ||
@@ -623,10 +668,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
623 | { | 668 | { |
624 | int bytes = qdisc_pkt_len(skb); | 669 | int bytes = qdisc_pkt_len(skb); |
625 | enum htb_cmode old_mode; | 670 | enum htb_cmode old_mode; |
626 | long diff; | 671 | s64 diff; |
627 | 672 | ||
628 | while (cl) { | 673 | while (cl) { |
629 | diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); | 674 | diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); |
630 | if (cl->level >= level) { | 675 | if (cl->level >= level) { |
631 | if (cl->level == level) | 676 | if (cl->level == level) |
632 | cl->xstats.lends++; | 677 | cl->xstats.lends++; |
@@ -673,7 +718,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, | |||
673 | unsigned long stop_at = start + 2; | 718 | unsigned long stop_at = start + 2; |
674 | while (time_before(jiffies, stop_at)) { | 719 | while (time_before(jiffies, stop_at)) { |
675 | struct htb_class *cl; | 720 | struct htb_class *cl; |
676 | long diff; | 721 | s64 diff; |
677 | struct rb_node *p = rb_first(&q->wait_pq[level]); | 722 | struct rb_node *p = rb_first(&q->wait_pq[level]); |
678 | 723 | ||
679 | if (!p) | 724 | if (!p) |
@@ -684,7 +729,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level, | |||
684 | return cl->pq_key; | 729 | return cl->pq_key; |
685 | 730 | ||
686 | htb_safe_rb_erase(p, q->wait_pq + level); | 731 | htb_safe_rb_erase(p, q->wait_pq + level); |
687 | diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); | 732 | diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); |
688 | htb_change_class_mode(q, cl, &diff); | 733 | htb_change_class_mode(q, cl, &diff); |
689 | if (cl->cmode != HTB_CAN_SEND) | 734 | if (cl->cmode != HTB_CAN_SEND) |
690 | htb_add_to_wait_tree(q, cl, diff); | 735 | htb_add_to_wait_tree(q, cl, diff); |
@@ -871,10 +916,10 @@ ok: | |||
871 | 916 | ||
872 | if (!sch->q.qlen) | 917 | if (!sch->q.qlen) |
873 | goto fin; | 918 | goto fin; |
874 | q->now = psched_get_time(); | 919 | q->now = ktime_to_ns(ktime_get()); |
875 | start_at = jiffies; | 920 | start_at = jiffies; |
876 | 921 | ||
877 | next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; | 922 | next_event = q->now + 5 * NSEC_PER_SEC; |
878 | 923 | ||
879 | for (level = 0; level < TC_HTB_MAXDEPTH; level++) { | 924 | for (level = 0; level < TC_HTB_MAXDEPTH; level++) { |
880 | /* common case optimization - skip event handler quickly */ | 925 | /* common case optimization - skip event handler quickly */ |
@@ -884,7 +929,7 @@ ok: | |||
884 | if (q->now >= q->near_ev_cache[level]) { | 929 | if (q->now >= q->near_ev_cache[level]) { |
885 | event = htb_do_events(q, level, start_at); | 930 | event = htb_do_events(q, level, start_at); |
886 | if (!event) | 931 | if (!event) |
887 | event = q->now + PSCHED_TICKS_PER_SEC; | 932 | event = q->now + NSEC_PER_SEC; |
888 | q->near_ev_cache[level] = event; | 933 | q->near_ev_cache[level] = event; |
889 | } else | 934 | } else |
890 | event = q->near_ev_cache[level]; | 935 | event = q->near_ev_cache[level]; |
@@ -903,10 +948,17 @@ ok: | |||
903 | } | 948 | } |
904 | } | 949 | } |
905 | sch->qstats.overlimits++; | 950 | sch->qstats.overlimits++; |
906 | if (likely(next_event > q->now)) | 951 | if (likely(next_event > q->now)) { |
907 | qdisc_watchdog_schedule(&q->watchdog, next_event); | 952 | if (!test_bit(__QDISC_STATE_DEACTIVATED, |
908 | else | 953 | &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { |
954 | ktime_t time = ns_to_ktime(next_event); | ||
955 | qdisc_throttled(q->watchdog.qdisc); | ||
956 | hrtimer_start(&q->watchdog.timer, time, | ||
957 | HRTIMER_MODE_ABS); | ||
958 | } | ||
959 | } else { | ||
909 | schedule_work(&q->work); | 960 | schedule_work(&q->work); |
961 | } | ||
910 | fin: | 962 | fin: |
911 | return skb; | 963 | return skb; |
912 | } | 964 | } |
@@ -1082,9 +1134,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1082 | 1134 | ||
1083 | memset(&opt, 0, sizeof(opt)); | 1135 | memset(&opt, 0, sizeof(opt)); |
1084 | 1136 | ||
1085 | opt.rate = cl->rate->rate; | 1137 | opt.rate.rate = cl->rate.rate_bps >> 3; |
1086 | opt.buffer = cl->buffer; | 1138 | opt.buffer = cl->buffer; |
1087 | opt.ceil = cl->ceil->rate; | 1139 | opt.ceil.rate = cl->ceil.rate_bps >> 3; |
1088 | opt.cbuffer = cl->cbuffer; | 1140 | opt.cbuffer = cl->cbuffer; |
1089 | opt.quantum = cl->quantum; | 1141 | opt.quantum = cl->quantum; |
1090 | opt.prio = cl->prio; | 1142 | opt.prio = cl->prio; |
@@ -1203,9 +1255,6 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | |||
1203 | qdisc_destroy(cl->un.leaf.q); | 1255 | qdisc_destroy(cl->un.leaf.q); |
1204 | } | 1256 | } |
1205 | gen_kill_estimator(&cl->bstats, &cl->rate_est); | 1257 | gen_kill_estimator(&cl->bstats, &cl->rate_est); |
1206 | qdisc_put_rtab(cl->rate); | ||
1207 | qdisc_put_rtab(cl->ceil); | ||
1208 | |||
1209 | tcf_destroy_chain(&cl->filter_list); | 1258 | tcf_destroy_chain(&cl->filter_list); |
1210 | kfree(cl); | 1259 | kfree(cl); |
1211 | } | 1260 | } |
@@ -1307,7 +1356,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1307 | struct htb_sched *q = qdisc_priv(sch); | 1356 | struct htb_sched *q = qdisc_priv(sch); |
1308 | struct htb_class *cl = (struct htb_class *)*arg, *parent; | 1357 | struct htb_class *cl = (struct htb_class *)*arg, *parent; |
1309 | struct nlattr *opt = tca[TCA_OPTIONS]; | 1358 | struct nlattr *opt = tca[TCA_OPTIONS]; |
1310 | struct qdisc_rate_table *rtab = NULL, *ctab = NULL; | ||
1311 | struct nlattr *tb[__TCA_HTB_MAX]; | 1359 | struct nlattr *tb[__TCA_HTB_MAX]; |
1312 | struct tc_htb_opt *hopt; | 1360 | struct tc_htb_opt *hopt; |
1313 | 1361 | ||
@@ -1326,10 +1374,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1326 | parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); | 1374 | parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); |
1327 | 1375 | ||
1328 | hopt = nla_data(tb[TCA_HTB_PARMS]); | 1376 | hopt = nla_data(tb[TCA_HTB_PARMS]); |
1329 | 1377 | if (!hopt->rate.rate || !hopt->ceil.rate) | |
1330 | rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); | ||
1331 | ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); | ||
1332 | if (!rtab || !ctab) | ||
1333 | goto failure; | 1378 | goto failure; |
1334 | 1379 | ||
1335 | if (!cl) { /* new class */ | 1380 | if (!cl) { /* new class */ |
@@ -1439,7 +1484,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1439 | * is really leaf before changing cl->un.leaf ! | 1484 | * is really leaf before changing cl->un.leaf ! |
1440 | */ | 1485 | */ |
1441 | if (!cl->level) { | 1486 | if (!cl->level) { |
1442 | cl->quantum = rtab->rate.rate / q->rate2quantum; | 1487 | cl->quantum = hopt->rate.rate / q->rate2quantum; |
1443 | if (!hopt->quantum && cl->quantum < 1000) { | 1488 | if (!hopt->quantum && cl->quantum < 1000) { |
1444 | pr_warning( | 1489 | pr_warning( |
1445 | "HTB: quantum of class %X is small. Consider r2q change.\n", | 1490 | "HTB: quantum of class %X is small. Consider r2q change.\n", |
@@ -1460,12 +1505,16 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1460 | 1505 | ||
1461 | cl->buffer = hopt->buffer; | 1506 | cl->buffer = hopt->buffer; |
1462 | cl->cbuffer = hopt->cbuffer; | 1507 | cl->cbuffer = hopt->cbuffer; |
1463 | if (cl->rate) | 1508 | |
1464 | qdisc_put_rtab(cl->rate); | 1509 | cl->rate.rate_bps = (u64)hopt->rate.rate << 3; |
1465 | cl->rate = rtab; | 1510 | cl->ceil.rate_bps = (u64)hopt->ceil.rate << 3; |
1466 | if (cl->ceil) | 1511 | |
1467 | qdisc_put_rtab(cl->ceil); | 1512 | htb_precompute_ratedata(&cl->rate); |
1468 | cl->ceil = ctab; | 1513 | htb_precompute_ratedata(&cl->ceil); |
1514 | |||
1515 | cl->buffer = hopt->buffer << PSCHED_SHIFT; | ||
1516 | cl->cbuffer = hopt->buffer << PSCHED_SHIFT; | ||
1517 | |||
1469 | sch_tree_unlock(sch); | 1518 | sch_tree_unlock(sch); |
1470 | 1519 | ||
1471 | qdisc_class_hash_grow(sch, &q->clhash); | 1520 | qdisc_class_hash_grow(sch, &q->clhash); |
@@ -1474,10 +1523,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1474 | return 0; | 1523 | return 0; |
1475 | 1524 | ||
1476 | failure: | 1525 | failure: |
1477 | if (rtab) | ||
1478 | qdisc_put_rtab(rtab); | ||
1479 | if (ctab) | ||
1480 | qdisc_put_rtab(ctab); | ||
1481 | return err; | 1526 | return err; |
1482 | } | 1527 | } |
1483 | 1528 | ||
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 9687fa1c2275..6ed37652a4c3 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * net/sched/sch_qfq.c Quick Fair Queueing Scheduler. | 2 | * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler. |
3 | * | 3 | * |
4 | * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente. | 4 | * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente. |
5 | * Copyright (c) 2012 Paolo Valente. | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
@@ -19,12 +20,18 @@ | |||
19 | #include <net/pkt_cls.h> | 20 | #include <net/pkt_cls.h> |
20 | 21 | ||
21 | 22 | ||
22 | /* Quick Fair Queueing | 23 | /* Quick Fair Queueing Plus |
23 | =================== | 24 | ======================== |
24 | 25 | ||
25 | Sources: | 26 | Sources: |
26 | 27 | ||
27 | Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient | 28 | [1] Paolo Valente, |
29 | "Reducing the Execution Time of Fair-Queueing Schedulers." | ||
30 | http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf | ||
31 | |||
32 | Sources for QFQ: | ||
33 | |||
34 | [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient | ||
28 | Packet Scheduling with Tight Bandwidth Distribution Guarantees." | 35 | Packet Scheduling with Tight Bandwidth Distribution Guarantees." |
29 | 36 | ||
30 | See also: | 37 | See also: |
@@ -33,6 +40,20 @@ | |||
33 | 40 | ||
34 | /* | 41 | /* |
35 | 42 | ||
43 | QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES | ||
44 | classes. Each aggregate is timestamped with a virtual start time S | ||
45 | and a virtual finish time F, and scheduled according to its | ||
46 | timestamps. S and F are computed as a function of a system virtual | ||
47 | time function V. The classes within each aggregate are instead | ||
48 | scheduled with DRR. | ||
49 | |||
50 | To speed up operations, QFQ+ divides also aggregates into a limited | ||
51 | number of groups. Which group a class belongs to depends on the | ||
52 | ratio between the maximum packet length for the class and the weight | ||
53 | of the class. Groups have their own S and F. In the end, QFQ+ | ||
54 | schedules groups, then aggregates within groups, then classes within | ||
55 | aggregates. See [1] and [2] for a full description. | ||
56 | |||
36 | Virtual time computations. | 57 | Virtual time computations. |
37 | 58 | ||
38 | S, F and V are all computed in fixed point arithmetic with | 59 | S, F and V are all computed in fixed point arithmetic with |
@@ -76,27 +97,28 @@ | |||
76 | #define QFQ_MAX_SLOTS 32 | 97 | #define QFQ_MAX_SLOTS 32 |
77 | 98 | ||
78 | /* | 99 | /* |
79 | * Shifts used for class<->group mapping. We allow class weights that are | 100 | * Shifts used for aggregate<->group mapping. We allow class weights that are |
80 | * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the | 101 | * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the |
81 | * group with the smallest index that can support the L_i / r_i configured | 102 | * group with the smallest index that can support the L_i / r_i configured |
82 | * for the class. | 103 | * for the classes in the aggregate. |
83 | * | 104 | * |
84 | * grp->index is the index of the group; and grp->slot_shift | 105 | * grp->index is the index of the group; and grp->slot_shift |
85 | * is the shift for the corresponding (scaled) sigma_i. | 106 | * is the shift for the corresponding (scaled) sigma_i. |
86 | */ | 107 | */ |
87 | #define QFQ_MAX_INDEX 24 | 108 | #define QFQ_MAX_INDEX 24 |
88 | #define QFQ_MAX_WSHIFT 12 | 109 | #define QFQ_MAX_WSHIFT 10 |
89 | 110 | ||
90 | #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) | 111 | #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */ |
91 | #define QFQ_MAX_WSUM (16*QFQ_MAX_WEIGHT) | 112 | #define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT) |
92 | 113 | ||
93 | #define FRAC_BITS 30 /* fixed point arithmetic */ | 114 | #define FRAC_BITS 30 /* fixed point arithmetic */ |
94 | #define ONE_FP (1UL << FRAC_BITS) | 115 | #define ONE_FP (1UL << FRAC_BITS) |
95 | #define IWSUM (ONE_FP/QFQ_MAX_WSUM) | 116 | #define IWSUM (ONE_FP/QFQ_MAX_WSUM) |
96 | 117 | ||
97 | #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */ | 118 | #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */ |
98 | #define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX) | 119 | #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */ |
99 | #define QFQ_MIN_LMAX 256 /* min possible lmax for a class */ | 120 | |
121 | #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */ | ||
100 | 122 | ||
101 | /* | 123 | /* |
102 | * Possible group states. These values are used as indexes for the bitmaps | 124 | * Possible group states. These values are used as indexes for the bitmaps |
@@ -106,6 +128,8 @@ enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE }; | |||
106 | 128 | ||
107 | struct qfq_group; | 129 | struct qfq_group; |
108 | 130 | ||
131 | struct qfq_aggregate; | ||
132 | |||
109 | struct qfq_class { | 133 | struct qfq_class { |
110 | struct Qdisc_class_common common; | 134 | struct Qdisc_class_common common; |
111 | 135 | ||
@@ -116,7 +140,12 @@ struct qfq_class { | |||
116 | struct gnet_stats_queue qstats; | 140 | struct gnet_stats_queue qstats; |
117 | struct gnet_stats_rate_est rate_est; | 141 | struct gnet_stats_rate_est rate_est; |
118 | struct Qdisc *qdisc; | 142 | struct Qdisc *qdisc; |
143 | struct list_head alist; /* Link for active-classes list. */ | ||
144 | struct qfq_aggregate *agg; /* Parent aggregate. */ | ||
145 | int deficit; /* DRR deficit counter. */ | ||
146 | }; | ||
119 | 147 | ||
148 | struct qfq_aggregate { | ||
120 | struct hlist_node next; /* Link for the slot list. */ | 149 | struct hlist_node next; /* Link for the slot list. */ |
121 | u64 S, F; /* flow timestamps (exact) */ | 150 | u64 S, F; /* flow timestamps (exact) */ |
122 | 151 | ||
@@ -127,8 +156,18 @@ struct qfq_class { | |||
127 | struct qfq_group *grp; | 156 | struct qfq_group *grp; |
128 | 157 | ||
129 | /* these are copied from the flowset. */ | 158 | /* these are copied from the flowset. */ |
130 | u32 inv_w; /* ONE_FP/weight */ | 159 | u32 class_weight; /* Weight of each class in this aggregate. */ |
131 | u32 lmax; /* Max packet size for this flow. */ | 160 | /* Max pkt size for the classes in this aggregate, DRR quantum. */ |
161 | int lmax; | ||
162 | |||
163 | u32 inv_w; /* ONE_FP/(sum of weights of classes in aggr.). */ | ||
164 | u32 budgetmax; /* Max budget for this aggregate. */ | ||
165 | u32 initial_budget, budget; /* Initial and current budget. */ | ||
166 | |||
167 | int num_classes; /* Number of classes in this aggr. */ | ||
168 | struct list_head active; /* DRR queue of active classes. */ | ||
169 | |||
170 | struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */ | ||
132 | }; | 171 | }; |
133 | 172 | ||
134 | struct qfq_group { | 173 | struct qfq_group { |
@@ -138,7 +177,7 @@ struct qfq_group { | |||
138 | unsigned int front; /* Index of the front slot. */ | 177 | unsigned int front; /* Index of the front slot. */ |
139 | unsigned long full_slots; /* non-empty slots */ | 178 | unsigned long full_slots; /* non-empty slots */ |
140 | 179 | ||
141 | /* Array of RR lists of active classes. */ | 180 | /* Array of RR lists of active aggregates. */ |
142 | struct hlist_head slots[QFQ_MAX_SLOTS]; | 181 | struct hlist_head slots[QFQ_MAX_SLOTS]; |
143 | }; | 182 | }; |
144 | 183 | ||
@@ -146,13 +185,28 @@ struct qfq_sched { | |||
146 | struct tcf_proto *filter_list; | 185 | struct tcf_proto *filter_list; |
147 | struct Qdisc_class_hash clhash; | 186 | struct Qdisc_class_hash clhash; |
148 | 187 | ||
149 | u64 V; /* Precise virtual time. */ | 188 | u64 oldV, V; /* Precise virtual times. */ |
150 | u32 wsum; /* weight sum */ | 189 | struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */ |
190 | u32 num_active_agg; /* Num. of active aggregates */ | ||
191 | u32 wsum; /* weight sum */ | ||
151 | 192 | ||
152 | unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */ | 193 | unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */ |
153 | struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */ | 194 | struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */ |
195 | u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */ | ||
196 | |||
197 | u32 max_agg_classes; /* Max number of classes per aggr. */ | ||
198 | struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */ | ||
154 | }; | 199 | }; |
155 | 200 | ||
201 | /* | ||
202 | * Possible reasons why the timestamps of an aggregate are updated | ||
203 | * enqueue: the aggregate switches from idle to active and must scheduled | ||
204 | * for service | ||
205 | * requeue: the aggregate finishes its budget, so it stops being served and | ||
206 | * must be rescheduled for service | ||
207 | */ | ||
208 | enum update_reason {enqueue, requeue}; | ||
209 | |||
156 | static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) | 210 | static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) |
157 | { | 211 | { |
158 | struct qfq_sched *q = qdisc_priv(sch); | 212 | struct qfq_sched *q = qdisc_priv(sch); |
@@ -182,18 +236,18 @@ static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { | |||
182 | * index = log_2(maxlen/weight) but we need to apply the scaling. | 236 | * index = log_2(maxlen/weight) but we need to apply the scaling. |
183 | * This is used only once at flow creation. | 237 | * This is used only once at flow creation. |
184 | */ | 238 | */ |
185 | static int qfq_calc_index(u32 inv_w, unsigned int maxlen) | 239 | static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift) |
186 | { | 240 | { |
187 | u64 slot_size = (u64)maxlen * inv_w; | 241 | u64 slot_size = (u64)maxlen * inv_w; |
188 | unsigned long size_map; | 242 | unsigned long size_map; |
189 | int index = 0; | 243 | int index = 0; |
190 | 244 | ||
191 | size_map = slot_size >> QFQ_MIN_SLOT_SHIFT; | 245 | size_map = slot_size >> min_slot_shift; |
192 | if (!size_map) | 246 | if (!size_map) |
193 | goto out; | 247 | goto out; |
194 | 248 | ||
195 | index = __fls(size_map) + 1; /* basically a log_2 */ | 249 | index = __fls(size_map) + 1; /* basically a log_2 */ |
196 | index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1))); | 250 | index -= !(slot_size - (1ULL << (index + min_slot_shift - 1))); |
197 | 251 | ||
198 | if (index < 0) | 252 | if (index < 0) |
199 | index = 0; | 253 | index = 0; |
@@ -204,66 +258,150 @@ out: | |||
204 | return index; | 258 | return index; |
205 | } | 259 | } |
206 | 260 | ||
207 | /* Length of the next packet (0 if the queue is empty). */ | 261 | static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *); |
208 | static unsigned int qdisc_peek_len(struct Qdisc *sch) | 262 | static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *, |
263 | enum update_reason); | ||
264 | |||
265 | static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | ||
266 | u32 lmax, u32 weight) | ||
209 | { | 267 | { |
210 | struct sk_buff *skb; | 268 | INIT_LIST_HEAD(&agg->active); |
269 | hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); | ||
270 | |||
271 | agg->lmax = lmax; | ||
272 | agg->class_weight = weight; | ||
273 | } | ||
274 | |||
275 | static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, | ||
276 | u32 lmax, u32 weight) | ||
277 | { | ||
278 | struct qfq_aggregate *agg; | ||
279 | struct hlist_node *n; | ||
280 | |||
281 | hlist_for_each_entry(agg, n, &q->nonfull_aggs, nonfull_next) | ||
282 | if (agg->lmax == lmax && agg->class_weight == weight) | ||
283 | return agg; | ||
284 | |||
285 | return NULL; | ||
286 | } | ||
287 | |||
211 | 288 | ||
212 | skb = sch->ops->peek(sch); | 289 | /* Update aggregate as a function of the new number of classes. */ |
213 | return skb ? qdisc_pkt_len(skb) : 0; | 290 | static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, |
291 | int new_num_classes) | ||
292 | { | ||
293 | u32 new_agg_weight; | ||
294 | |||
295 | if (new_num_classes == q->max_agg_classes) | ||
296 | hlist_del_init(&agg->nonfull_next); | ||
297 | |||
298 | if (agg->num_classes > new_num_classes && | ||
299 | new_num_classes == q->max_agg_classes - 1) /* agg no more full */ | ||
300 | hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); | ||
301 | |||
302 | agg->budgetmax = new_num_classes * agg->lmax; | ||
303 | new_agg_weight = agg->class_weight * new_num_classes; | ||
304 | agg->inv_w = ONE_FP/new_agg_weight; | ||
305 | |||
306 | if (agg->grp == NULL) { | ||
307 | int i = qfq_calc_index(agg->inv_w, agg->budgetmax, | ||
308 | q->min_slot_shift); | ||
309 | agg->grp = &q->groups[i]; | ||
310 | } | ||
311 | |||
312 | q->wsum += | ||
313 | (int) agg->class_weight * (new_num_classes - agg->num_classes); | ||
314 | |||
315 | agg->num_classes = new_num_classes; | ||
316 | } | ||
317 | |||
318 | /* Add class to aggregate. */ | ||
319 | static void qfq_add_to_agg(struct qfq_sched *q, | ||
320 | struct qfq_aggregate *agg, | ||
321 | struct qfq_class *cl) | ||
322 | { | ||
323 | cl->agg = agg; | ||
324 | |||
325 | qfq_update_agg(q, agg, agg->num_classes+1); | ||
326 | if (cl->qdisc->q.qlen > 0) { /* adding an active class */ | ||
327 | list_add_tail(&cl->alist, &agg->active); | ||
328 | if (list_first_entry(&agg->active, struct qfq_class, alist) == | ||
329 | cl && q->in_serv_agg != agg) /* agg was inactive */ | ||
330 | qfq_activate_agg(q, agg, enqueue); /* schedule agg */ | ||
331 | } | ||
214 | } | 332 | } |
215 | 333 | ||
216 | static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *); | 334 | static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *); |
217 | static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, | ||
218 | unsigned int len); | ||
219 | 335 | ||
220 | static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl, | 336 | static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg) |
221 | u32 lmax, u32 inv_w, int delta_w) | ||
222 | { | 337 | { |
223 | int i; | 338 | if (!hlist_unhashed(&agg->nonfull_next)) |
339 | hlist_del_init(&agg->nonfull_next); | ||
340 | if (q->in_serv_agg == agg) | ||
341 | q->in_serv_agg = qfq_choose_next_agg(q); | ||
342 | kfree(agg); | ||
343 | } | ||
224 | 344 | ||
225 | /* update qfq-specific data */ | 345 | /* Deschedule class from within its parent aggregate. */ |
226 | cl->lmax = lmax; | 346 | static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) |
227 | cl->inv_w = inv_w; | 347 | { |
228 | i = qfq_calc_index(cl->inv_w, cl->lmax); | 348 | struct qfq_aggregate *agg = cl->agg; |
229 | 349 | ||
230 | cl->grp = &q->groups[i]; | ||
231 | 350 | ||
232 | q->wsum += delta_w; | 351 | list_del(&cl->alist); /* remove from RR queue of the aggregate */ |
352 | if (list_empty(&agg->active)) /* agg is now inactive */ | ||
353 | qfq_deactivate_agg(q, agg); | ||
233 | } | 354 | } |
234 | 355 | ||
235 | static void qfq_update_reactivate_class(struct qfq_sched *q, | 356 | /* Remove class from its parent aggregate. */ |
236 | struct qfq_class *cl, | 357 | static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) |
237 | u32 inv_w, u32 lmax, int delta_w) | ||
238 | { | 358 | { |
239 | bool need_reactivation = false; | 359 | struct qfq_aggregate *agg = cl->agg; |
240 | int i = qfq_calc_index(inv_w, lmax); | ||
241 | 360 | ||
242 | if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { | 361 | cl->agg = NULL; |
243 | /* | 362 | if (agg->num_classes == 1) { /* agg being emptied, destroy it */ |
244 | * shift cl->F back, to not charge the | 363 | qfq_destroy_agg(q, agg); |
245 | * class for the not-yet-served head | 364 | return; |
246 | * packet | ||
247 | */ | ||
248 | cl->F = cl->S; | ||
249 | /* remove class from its slot in the old group */ | ||
250 | qfq_deactivate_class(q, cl); | ||
251 | need_reactivation = true; | ||
252 | } | 365 | } |
366 | qfq_update_agg(q, agg, agg->num_classes-1); | ||
367 | } | ||
253 | 368 | ||
254 | qfq_update_class_params(q, cl, lmax, inv_w, delta_w); | 369 | /* Deschedule class and remove it from its parent aggregate. */ |
370 | static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) | ||
371 | { | ||
372 | if (cl->qdisc->q.qlen > 0) /* class is active */ | ||
373 | qfq_deactivate_class(q, cl); | ||
255 | 374 | ||
256 | if (need_reactivation) /* activate in new group */ | 375 | qfq_rm_from_agg(q, cl); |
257 | qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); | ||
258 | } | 376 | } |
259 | 377 | ||
378 | /* Move class to a new aggregate, matching the new class weight and/or lmax */ | ||
379 | static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight, | ||
380 | u32 lmax) | ||
381 | { | ||
382 | struct qfq_sched *q = qdisc_priv(sch); | ||
383 | struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight); | ||
384 | |||
385 | if (new_agg == NULL) { /* create new aggregate */ | ||
386 | new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC); | ||
387 | if (new_agg == NULL) | ||
388 | return -ENOBUFS; | ||
389 | qfq_init_agg(q, new_agg, lmax, weight); | ||
390 | } | ||
391 | qfq_deact_rm_from_agg(q, cl); | ||
392 | qfq_add_to_agg(q, new_agg, cl); | ||
393 | |||
394 | return 0; | ||
395 | } | ||
260 | 396 | ||
261 | static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | 397 | static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, |
262 | struct nlattr **tca, unsigned long *arg) | 398 | struct nlattr **tca, unsigned long *arg) |
263 | { | 399 | { |
264 | struct qfq_sched *q = qdisc_priv(sch); | 400 | struct qfq_sched *q = qdisc_priv(sch); |
265 | struct qfq_class *cl = (struct qfq_class *)*arg; | 401 | struct qfq_class *cl = (struct qfq_class *)*arg; |
402 | bool existing = false; | ||
266 | struct nlattr *tb[TCA_QFQ_MAX + 1]; | 403 | struct nlattr *tb[TCA_QFQ_MAX + 1]; |
404 | struct qfq_aggregate *new_agg = NULL; | ||
267 | u32 weight, lmax, inv_w; | 405 | u32 weight, lmax, inv_w; |
268 | int err; | 406 | int err; |
269 | int delta_w; | 407 | int delta_w; |
@@ -286,15 +424,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
286 | } else | 424 | } else |
287 | weight = 1; | 425 | weight = 1; |
288 | 426 | ||
289 | inv_w = ONE_FP / weight; | ||
290 | weight = ONE_FP / inv_w; | ||
291 | delta_w = weight - (cl ? ONE_FP / cl->inv_w : 0); | ||
292 | if (q->wsum + delta_w > QFQ_MAX_WSUM) { | ||
293 | pr_notice("qfq: total weight out of range (%u + %u)\n", | ||
294 | delta_w, q->wsum); | ||
295 | return -EINVAL; | ||
296 | } | ||
297 | |||
298 | if (tb[TCA_QFQ_LMAX]) { | 427 | if (tb[TCA_QFQ_LMAX]) { |
299 | lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); | 428 | lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); |
300 | if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { | 429 | if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { |
@@ -304,7 +433,23 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
304 | } else | 433 | } else |
305 | lmax = psched_mtu(qdisc_dev(sch)); | 434 | lmax = psched_mtu(qdisc_dev(sch)); |
306 | 435 | ||
307 | if (cl != NULL) { | 436 | inv_w = ONE_FP / weight; |
437 | weight = ONE_FP / inv_w; | ||
438 | |||
439 | if (cl != NULL && | ||
440 | lmax == cl->agg->lmax && | ||
441 | weight == cl->agg->class_weight) | ||
442 | return 0; /* nothing to change */ | ||
443 | |||
444 | delta_w = weight - (cl ? cl->agg->class_weight : 0); | ||
445 | |||
446 | if (q->wsum + delta_w > QFQ_MAX_WSUM) { | ||
447 | pr_notice("qfq: total weight out of range (%d + %u)\n", | ||
448 | delta_w, q->wsum); | ||
449 | return -EINVAL; | ||
450 | } | ||
451 | |||
452 | if (cl != NULL) { /* modify existing class */ | ||
308 | if (tca[TCA_RATE]) { | 453 | if (tca[TCA_RATE]) { |
309 | err = gen_replace_estimator(&cl->bstats, &cl->rate_est, | 454 | err = gen_replace_estimator(&cl->bstats, &cl->rate_est, |
310 | qdisc_root_sleeping_lock(sch), | 455 | qdisc_root_sleeping_lock(sch), |
@@ -312,25 +457,18 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
312 | if (err) | 457 | if (err) |
313 | return err; | 458 | return err; |
314 | } | 459 | } |
315 | 460 | existing = true; | |
316 | if (lmax == cl->lmax && inv_w == cl->inv_w) | 461 | goto set_change_agg; |
317 | return 0; /* nothing to update */ | ||
318 | |||
319 | sch_tree_lock(sch); | ||
320 | qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w); | ||
321 | sch_tree_unlock(sch); | ||
322 | |||
323 | return 0; | ||
324 | } | 462 | } |
325 | 463 | ||
464 | /* create and init new class */ | ||
326 | cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL); | 465 | cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL); |
327 | if (cl == NULL) | 466 | if (cl == NULL) |
328 | return -ENOBUFS; | 467 | return -ENOBUFS; |
329 | 468 | ||
330 | cl->refcnt = 1; | 469 | cl->refcnt = 1; |
331 | cl->common.classid = classid; | 470 | cl->common.classid = classid; |
332 | 471 | cl->deficit = lmax; | |
333 | qfq_update_class_params(q, cl, lmax, inv_w, delta_w); | ||
334 | 472 | ||
335 | cl->qdisc = qdisc_create_dflt(sch->dev_queue, | 473 | cl->qdisc = qdisc_create_dflt(sch->dev_queue, |
336 | &pfifo_qdisc_ops, classid); | 474 | &pfifo_qdisc_ops, classid); |
@@ -341,11 +479,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
341 | err = gen_new_estimator(&cl->bstats, &cl->rate_est, | 479 | err = gen_new_estimator(&cl->bstats, &cl->rate_est, |
342 | qdisc_root_sleeping_lock(sch), | 480 | qdisc_root_sleeping_lock(sch), |
343 | tca[TCA_RATE]); | 481 | tca[TCA_RATE]); |
344 | if (err) { | 482 | if (err) |
345 | qdisc_destroy(cl->qdisc); | 483 | goto destroy_class; |
346 | kfree(cl); | ||
347 | return err; | ||
348 | } | ||
349 | } | 484 | } |
350 | 485 | ||
351 | sch_tree_lock(sch); | 486 | sch_tree_lock(sch); |
@@ -354,19 +489,39 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
354 | 489 | ||
355 | qdisc_class_hash_grow(sch, &q->clhash); | 490 | qdisc_class_hash_grow(sch, &q->clhash); |
356 | 491 | ||
492 | set_change_agg: | ||
493 | sch_tree_lock(sch); | ||
494 | new_agg = qfq_find_agg(q, lmax, weight); | ||
495 | if (new_agg == NULL) { /* create new aggregate */ | ||
496 | sch_tree_unlock(sch); | ||
497 | new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL); | ||
498 | if (new_agg == NULL) { | ||
499 | err = -ENOBUFS; | ||
500 | gen_kill_estimator(&cl->bstats, &cl->rate_est); | ||
501 | goto destroy_class; | ||
502 | } | ||
503 | sch_tree_lock(sch); | ||
504 | qfq_init_agg(q, new_agg, lmax, weight); | ||
505 | } | ||
506 | if (existing) | ||
507 | qfq_deact_rm_from_agg(q, cl); | ||
508 | qfq_add_to_agg(q, new_agg, cl); | ||
509 | sch_tree_unlock(sch); | ||
510 | |||
357 | *arg = (unsigned long)cl; | 511 | *arg = (unsigned long)cl; |
358 | return 0; | 512 | return 0; |
513 | |||
514 | destroy_class: | ||
515 | qdisc_destroy(cl->qdisc); | ||
516 | kfree(cl); | ||
517 | return err; | ||
359 | } | 518 | } |
360 | 519 | ||
361 | static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) | 520 | static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl) |
362 | { | 521 | { |
363 | struct qfq_sched *q = qdisc_priv(sch); | 522 | struct qfq_sched *q = qdisc_priv(sch); |
364 | 523 | ||
365 | if (cl->inv_w) { | 524 | qfq_rm_from_agg(q, cl); |
366 | q->wsum -= ONE_FP / cl->inv_w; | ||
367 | cl->inv_w = 0; | ||
368 | } | ||
369 | |||
370 | gen_kill_estimator(&cl->bstats, &cl->rate_est); | 525 | gen_kill_estimator(&cl->bstats, &cl->rate_est); |
371 | qdisc_destroy(cl->qdisc); | 526 | qdisc_destroy(cl->qdisc); |
372 | kfree(cl); | 527 | kfree(cl); |
@@ -481,8 +636,8 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg, | |||
481 | nest = nla_nest_start(skb, TCA_OPTIONS); | 636 | nest = nla_nest_start(skb, TCA_OPTIONS); |
482 | if (nest == NULL) | 637 | if (nest == NULL) |
483 | goto nla_put_failure; | 638 | goto nla_put_failure; |
484 | if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) || | 639 | if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) || |
485 | nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax)) | 640 | nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax)) |
486 | goto nla_put_failure; | 641 | goto nla_put_failure; |
487 | return nla_nest_end(skb, nest); | 642 | return nla_nest_end(skb, nest); |
488 | 643 | ||
@@ -500,8 +655,8 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, | |||
500 | memset(&xstats, 0, sizeof(xstats)); | 655 | memset(&xstats, 0, sizeof(xstats)); |
501 | cl->qdisc->qstats.qlen = cl->qdisc->q.qlen; | 656 | cl->qdisc->qstats.qlen = cl->qdisc->q.qlen; |
502 | 657 | ||
503 | xstats.weight = ONE_FP/cl->inv_w; | 658 | xstats.weight = cl->agg->class_weight; |
504 | xstats.lmax = cl->lmax; | 659 | xstats.lmax = cl->agg->lmax; |
505 | 660 | ||
506 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 661 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
507 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || | 662 | gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
@@ -652,16 +807,16 @@ static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F) | |||
652 | * perhaps | 807 | * perhaps |
653 | * | 808 | * |
654 | old_V ^= q->V; | 809 | old_V ^= q->V; |
655 | old_V >>= QFQ_MIN_SLOT_SHIFT; | 810 | old_V >>= q->min_slot_shift; |
656 | if (old_V) { | 811 | if (old_V) { |
657 | ... | 812 | ... |
658 | } | 813 | } |
659 | * | 814 | * |
660 | */ | 815 | */ |
661 | static void qfq_make_eligible(struct qfq_sched *q, u64 old_V) | 816 | static void qfq_make_eligible(struct qfq_sched *q) |
662 | { | 817 | { |
663 | unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT; | 818 | unsigned long vslot = q->V >> q->min_slot_shift; |
664 | unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT; | 819 | unsigned long old_vslot = q->oldV >> q->min_slot_shift; |
665 | 820 | ||
666 | if (vslot != old_vslot) { | 821 | if (vslot != old_vslot) { |
667 | unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; | 822 | unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; |
@@ -672,34 +827,38 @@ static void qfq_make_eligible(struct qfq_sched *q, u64 old_V) | |||
672 | 827 | ||
673 | 828 | ||
674 | /* | 829 | /* |
675 | * If the weight and lmax (max_pkt_size) of the classes do not change, | 830 | * The index of the slot in which the aggregate is to be inserted must |
676 | * then QFQ guarantees that the slot index is never higher than | 831 | * not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1' |
677 | * 2 + ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM). | 832 | * because the start time of the group may be moved backward by one |
833 | * slot after the aggregate has been inserted, and this would cause | ||
834 | * non-empty slots to be right-shifted by one position. | ||
678 | * | 835 | * |
679 | * With the current values of the above constants, the index is | 836 | * If the weight and lmax (max_pkt_size) of the classes do not change, |
680 | * then guaranteed to never be higher than 2 + 256 * (1 / 16) = 18. | 837 | * then QFQ+ does meet the above contraint according to the current |
838 | * values of its parameters. In fact, if the weight and lmax of the | ||
839 | * classes do not change, then, from the theory, QFQ+ guarantees that | ||
840 | * the slot index is never higher than | ||
841 | * 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) * | ||
842 | * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18 | ||
681 | * | 843 | * |
682 | * When the weight of a class is increased or the lmax of the class is | 844 | * When the weight of a class is increased or the lmax of the class is |
683 | * decreased, a new class with smaller slot size may happen to be | 845 | * decreased, a new aggregate with smaller slot size than the original |
684 | * activated. The activation of this class should be properly delayed | 846 | * parent aggregate of the class may happen to be activated. The |
685 | * to when the service of the class has finished in the ideal system | 847 | * activation of this aggregate should be properly delayed to when the |
686 | * tracked by QFQ. If the activation of the class is not delayed to | 848 | * service of the class has finished in the ideal system tracked by |
687 | * this reference time instant, then this class may be unjustly served | 849 | * QFQ+. If the activation of the aggregate is not delayed to this |
688 | * before other classes waiting for service. This may cause | 850 | * reference time instant, then this aggregate may be unjustly served |
689 | * (unfrequently) the above bound to the slot index to be violated for | 851 | * before other aggregates waiting for service. This may cause the |
690 | * some of these unlucky classes. | 852 | * above bound to the slot index to be violated for some of these |
853 | * unlucky aggregates. | ||
691 | * | 854 | * |
692 | * Instead of delaying the activation of the new class, which is quite | 855 | * Instead of delaying the activation of the new aggregate, which is |
693 | * complex, the following inaccurate but simple solution is used: if | 856 | * quite complex, the following inaccurate but simple solution is used: |
694 | * the slot index is higher than QFQ_MAX_SLOTS-2, then the timestamps | 857 | * if the slot index is higher than QFQ_MAX_SLOTS-2, then the |
695 | * of the class are shifted backward so as to let the slot index | 858 | * timestamps of the aggregate are shifted backward so as to let the |
696 | * become equal to QFQ_MAX_SLOTS-2. This threshold is used because, if | 859 | * slot index become equal to QFQ_MAX_SLOTS-2. |
697 | * the slot index is above it, then the data structure implementing | ||
698 | * the bucket list either gets immediately corrupted or may get | ||
699 | * corrupted on a possible next packet arrival that causes the start | ||
700 | * time of the group to be shifted backward. | ||
701 | */ | 860 | */ |
702 | static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl, | 861 | static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg, |
703 | u64 roundedS) | 862 | u64 roundedS) |
704 | { | 863 | { |
705 | u64 slot = (roundedS - grp->S) >> grp->slot_shift; | 864 | u64 slot = (roundedS - grp->S) >> grp->slot_shift; |
@@ -708,22 +867,22 @@ static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl, | |||
708 | if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { | 867 | if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { |
709 | u64 deltaS = roundedS - grp->S - | 868 | u64 deltaS = roundedS - grp->S - |
710 | ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift); | 869 | ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift); |
711 | cl->S -= deltaS; | 870 | agg->S -= deltaS; |
712 | cl->F -= deltaS; | 871 | agg->F -= deltaS; |
713 | slot = QFQ_MAX_SLOTS - 2; | 872 | slot = QFQ_MAX_SLOTS - 2; |
714 | } | 873 | } |
715 | 874 | ||
716 | i = (grp->front + slot) % QFQ_MAX_SLOTS; | 875 | i = (grp->front + slot) % QFQ_MAX_SLOTS; |
717 | 876 | ||
718 | hlist_add_head(&cl->next, &grp->slots[i]); | 877 | hlist_add_head(&agg->next, &grp->slots[i]); |
719 | __set_bit(slot, &grp->full_slots); | 878 | __set_bit(slot, &grp->full_slots); |
720 | } | 879 | } |
721 | 880 | ||
722 | /* Maybe introduce hlist_first_entry?? */ | 881 | /* Maybe introduce hlist_first_entry?? */ |
723 | static struct qfq_class *qfq_slot_head(struct qfq_group *grp) | 882 | static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp) |
724 | { | 883 | { |
725 | return hlist_entry(grp->slots[grp->front].first, | 884 | return hlist_entry(grp->slots[grp->front].first, |
726 | struct qfq_class, next); | 885 | struct qfq_aggregate, next); |
727 | } | 886 | } |
728 | 887 | ||
729 | /* | 888 | /* |
@@ -731,20 +890,20 @@ static struct qfq_class *qfq_slot_head(struct qfq_group *grp) | |||
731 | */ | 890 | */ |
732 | static void qfq_front_slot_remove(struct qfq_group *grp) | 891 | static void qfq_front_slot_remove(struct qfq_group *grp) |
733 | { | 892 | { |
734 | struct qfq_class *cl = qfq_slot_head(grp); | 893 | struct qfq_aggregate *agg = qfq_slot_head(grp); |
735 | 894 | ||
736 | BUG_ON(!cl); | 895 | BUG_ON(!agg); |
737 | hlist_del(&cl->next); | 896 | hlist_del(&agg->next); |
738 | if (hlist_empty(&grp->slots[grp->front])) | 897 | if (hlist_empty(&grp->slots[grp->front])) |
739 | __clear_bit(0, &grp->full_slots); | 898 | __clear_bit(0, &grp->full_slots); |
740 | } | 899 | } |
741 | 900 | ||
742 | /* | 901 | /* |
743 | * Returns the first full queue in a group. As a side effect, | 902 | * Returns the first aggregate in the first non-empty bucket of the |
744 | * adjust the bucket list so the first non-empty bucket is at | 903 | * group. As a side effect, adjusts the bucket list so the first |
745 | * position 0 in full_slots. | 904 | * non-empty bucket is at position 0 in full_slots. |
746 | */ | 905 | */ |
747 | static struct qfq_class *qfq_slot_scan(struct qfq_group *grp) | 906 | static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp) |
748 | { | 907 | { |
749 | unsigned int i; | 908 | unsigned int i; |
750 | 909 | ||
@@ -780,7 +939,7 @@ static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS) | |||
780 | grp->front = (grp->front - i) % QFQ_MAX_SLOTS; | 939 | grp->front = (grp->front - i) % QFQ_MAX_SLOTS; |
781 | } | 940 | } |
782 | 941 | ||
783 | static void qfq_update_eligible(struct qfq_sched *q, u64 old_V) | 942 | static void qfq_update_eligible(struct qfq_sched *q) |
784 | { | 943 | { |
785 | struct qfq_group *grp; | 944 | struct qfq_group *grp; |
786 | unsigned long ineligible; | 945 | unsigned long ineligible; |
@@ -792,137 +951,226 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V) | |||
792 | if (qfq_gt(grp->S, q->V)) | 951 | if (qfq_gt(grp->S, q->V)) |
793 | q->V = grp->S; | 952 | q->V = grp->S; |
794 | } | 953 | } |
795 | qfq_make_eligible(q, old_V); | 954 | qfq_make_eligible(q); |
796 | } | 955 | } |
797 | } | 956 | } |
798 | 957 | ||
799 | /* | 958 | /* Dequeue head packet of the head class in the DRR queue of the aggregate. */ |
800 | * Updates the class, returns true if also the group needs to be updated. | 959 | static void agg_dequeue(struct qfq_aggregate *agg, |
801 | */ | 960 | struct qfq_class *cl, unsigned int len) |
802 | static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl) | ||
803 | { | 961 | { |
804 | unsigned int len = qdisc_peek_len(cl->qdisc); | 962 | qdisc_dequeue_peeked(cl->qdisc); |
805 | 963 | ||
806 | cl->S = cl->F; | 964 | cl->deficit -= (int) len; |
807 | if (!len) | ||
808 | qfq_front_slot_remove(grp); /* queue is empty */ | ||
809 | else { | ||
810 | u64 roundedS; | ||
811 | 965 | ||
812 | cl->F = cl->S + (u64)len * cl->inv_w; | 966 | if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ |
813 | roundedS = qfq_round_down(cl->S, grp->slot_shift); | 967 | list_del(&cl->alist); |
814 | if (roundedS == grp->S) | 968 | else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) { |
815 | return false; | 969 | cl->deficit += agg->lmax; |
816 | 970 | list_move_tail(&cl->alist, &agg->active); | |
817 | qfq_front_slot_remove(grp); | ||
818 | qfq_slot_insert(grp, cl, roundedS); | ||
819 | } | 971 | } |
972 | } | ||
973 | |||
974 | static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, | ||
975 | struct qfq_class **cl, | ||
976 | unsigned int *len) | ||
977 | { | ||
978 | struct sk_buff *skb; | ||
820 | 979 | ||
821 | return true; | 980 | *cl = list_first_entry(&agg->active, struct qfq_class, alist); |
981 | skb = (*cl)->qdisc->ops->peek((*cl)->qdisc); | ||
982 | if (skb == NULL) | ||
983 | WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n"); | ||
984 | else | ||
985 | *len = qdisc_pkt_len(skb); | ||
986 | |||
987 | return skb; | ||
988 | } | ||
989 | |||
990 | /* Update F according to the actual service received by the aggregate. */ | ||
991 | static inline void charge_actual_service(struct qfq_aggregate *agg) | ||
992 | { | ||
993 | /* compute the service received by the aggregate */ | ||
994 | u32 service_received = agg->initial_budget - agg->budget; | ||
995 | |||
996 | agg->F = agg->S + (u64)service_received * agg->inv_w; | ||
822 | } | 997 | } |
823 | 998 | ||
824 | static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | 999 | static struct sk_buff *qfq_dequeue(struct Qdisc *sch) |
825 | { | 1000 | { |
826 | struct qfq_sched *q = qdisc_priv(sch); | 1001 | struct qfq_sched *q = qdisc_priv(sch); |
827 | struct qfq_group *grp; | 1002 | struct qfq_aggregate *in_serv_agg = q->in_serv_agg; |
828 | struct qfq_class *cl; | 1003 | struct qfq_class *cl; |
829 | struct sk_buff *skb; | 1004 | struct sk_buff *skb = NULL; |
830 | unsigned int len; | 1005 | /* next-packet len, 0 means no more active classes in in-service agg */ |
831 | u64 old_V; | 1006 | unsigned int len = 0; |
832 | 1007 | ||
833 | if (!q->bitmaps[ER]) | 1008 | if (in_serv_agg == NULL) |
834 | return NULL; | 1009 | return NULL; |
835 | 1010 | ||
836 | grp = qfq_ffs(q, q->bitmaps[ER]); | 1011 | if (!list_empty(&in_serv_agg->active)) |
1012 | skb = qfq_peek_skb(in_serv_agg, &cl, &len); | ||
837 | 1013 | ||
838 | cl = qfq_slot_head(grp); | 1014 | /* |
839 | skb = qdisc_dequeue_peeked(cl->qdisc); | 1015 | * If there are no active classes in the in-service aggregate, |
840 | if (!skb) { | 1016 | * or if the aggregate has not enough budget to serve its next |
841 | WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n"); | 1017 | * class, then choose the next aggregate to serve. |
842 | return NULL; | 1018 | */ |
1019 | if (len == 0 || in_serv_agg->budget < len) { | ||
1020 | charge_actual_service(in_serv_agg); | ||
1021 | |||
1022 | /* recharge the budget of the aggregate */ | ||
1023 | in_serv_agg->initial_budget = in_serv_agg->budget = | ||
1024 | in_serv_agg->budgetmax; | ||
1025 | |||
1026 | if (!list_empty(&in_serv_agg->active)) | ||
1027 | /* | ||
1028 | * Still active: reschedule for | ||
1029 | * service. Possible optimization: if no other | ||
1030 | * aggregate is active, then there is no point | ||
1031 | * in rescheduling this aggregate, and we can | ||
1032 | * just keep it as the in-service one. This | ||
1033 | * should be however a corner case, and to | ||
1034 | * handle it, we would need to maintain an | ||
1035 | * extra num_active_aggs field. | ||
1036 | */ | ||
1037 | qfq_activate_agg(q, in_serv_agg, requeue); | ||
1038 | else if (sch->q.qlen == 0) { /* no aggregate to serve */ | ||
1039 | q->in_serv_agg = NULL; | ||
1040 | return NULL; | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * If we get here, there are other aggregates queued: | ||
1045 | * choose the new aggregate to serve. | ||
1046 | */ | ||
1047 | in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q); | ||
1048 | skb = qfq_peek_skb(in_serv_agg, &cl, &len); | ||
843 | } | 1049 | } |
1050 | if (!skb) | ||
1051 | return NULL; | ||
844 | 1052 | ||
845 | sch->q.qlen--; | 1053 | sch->q.qlen--; |
846 | qdisc_bstats_update(sch, skb); | 1054 | qdisc_bstats_update(sch, skb); |
847 | 1055 | ||
848 | old_V = q->V; | 1056 | agg_dequeue(in_serv_agg, cl, len); |
849 | len = qdisc_pkt_len(skb); | 1057 | in_serv_agg->budget -= len; |
850 | q->V += (u64)len * IWSUM; | 1058 | q->V += (u64)len * IWSUM; |
851 | pr_debug("qfq dequeue: len %u F %lld now %lld\n", | 1059 | pr_debug("qfq dequeue: len %u F %lld now %lld\n", |
852 | len, (unsigned long long) cl->F, (unsigned long long) q->V); | 1060 | len, (unsigned long long) in_serv_agg->F, |
1061 | (unsigned long long) q->V); | ||
853 | 1062 | ||
854 | if (qfq_update_class(grp, cl)) { | 1063 | return skb; |
855 | u64 old_F = grp->F; | 1064 | } |
856 | 1065 | ||
857 | cl = qfq_slot_scan(grp); | 1066 | static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) |
858 | if (!cl) | 1067 | { |
859 | __clear_bit(grp->index, &q->bitmaps[ER]); | 1068 | struct qfq_group *grp; |
860 | else { | 1069 | struct qfq_aggregate *agg, *new_front_agg; |
861 | u64 roundedS = qfq_round_down(cl->S, grp->slot_shift); | 1070 | u64 old_F; |
862 | unsigned int s; | ||
863 | 1071 | ||
864 | if (grp->S == roundedS) | 1072 | qfq_update_eligible(q); |
865 | goto skip_unblock; | 1073 | q->oldV = q->V; |
866 | grp->S = roundedS; | 1074 | |
867 | grp->F = roundedS + (2ULL << grp->slot_shift); | 1075 | if (!q->bitmaps[ER]) |
868 | __clear_bit(grp->index, &q->bitmaps[ER]); | 1076 | return NULL; |
869 | s = qfq_calc_state(q, grp); | 1077 | |
870 | __set_bit(grp->index, &q->bitmaps[s]); | 1078 | grp = qfq_ffs(q, q->bitmaps[ER]); |
871 | } | 1079 | old_F = grp->F; |
1080 | |||
1081 | agg = qfq_slot_head(grp); | ||
872 | 1082 | ||
873 | qfq_unblock_groups(q, grp->index, old_F); | 1083 | /* agg starts to be served, remove it from schedule */ |
1084 | qfq_front_slot_remove(grp); | ||
1085 | |||
1086 | new_front_agg = qfq_slot_scan(grp); | ||
1087 | |||
1088 | if (new_front_agg == NULL) /* group is now inactive, remove from ER */ | ||
1089 | __clear_bit(grp->index, &q->bitmaps[ER]); | ||
1090 | else { | ||
1091 | u64 roundedS = qfq_round_down(new_front_agg->S, | ||
1092 | grp->slot_shift); | ||
1093 | unsigned int s; | ||
1094 | |||
1095 | if (grp->S == roundedS) | ||
1096 | return agg; | ||
1097 | grp->S = roundedS; | ||
1098 | grp->F = roundedS + (2ULL << grp->slot_shift); | ||
1099 | __clear_bit(grp->index, &q->bitmaps[ER]); | ||
1100 | s = qfq_calc_state(q, grp); | ||
1101 | __set_bit(grp->index, &q->bitmaps[s]); | ||
874 | } | 1102 | } |
875 | 1103 | ||
876 | skip_unblock: | 1104 | qfq_unblock_groups(q, grp->index, old_F); |
877 | qfq_update_eligible(q, old_V); | ||
878 | 1105 | ||
879 | return skb; | 1106 | return agg; |
880 | } | 1107 | } |
881 | 1108 | ||
882 | /* | 1109 | /* |
883 | * Assign a reasonable start time for a new flow k in group i. | 1110 | * Assign a reasonable start time for a new aggregate in group i. |
884 | * Admissible values for \hat(F) are multiples of \sigma_i | 1111 | * Admissible values for \hat(F) are multiples of \sigma_i |
885 | * no greater than V+\sigma_i . Larger values mean that | 1112 | * no greater than V+\sigma_i . Larger values mean that |
886 | * we had a wraparound so we consider the timestamp to be stale. | 1113 | * we had a wraparound so we consider the timestamp to be stale. |
887 | * | 1114 | * |
888 | * If F is not stale and F >= V then we set S = F. | 1115 | * If F is not stale and F >= V then we set S = F. |
889 | * Otherwise we should assign S = V, but this may violate | 1116 | * Otherwise we should assign S = V, but this may violate |
890 | * the ordering in ER. So, if we have groups in ER, set S to | 1117 | * the ordering in EB (see [2]). So, if we have groups in ER, |
891 | * the F_j of the first group j which would be blocking us. | 1118 | * set S to the F_j of the first group j which would be blocking us. |
892 | * We are guaranteed not to move S backward because | 1119 | * We are guaranteed not to move S backward because |
893 | * otherwise our group i would still be blocked. | 1120 | * otherwise our group i would still be blocked. |
894 | */ | 1121 | */ |
895 | static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl) | 1122 | static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg) |
896 | { | 1123 | { |
897 | unsigned long mask; | 1124 | unsigned long mask; |
898 | u64 limit, roundedF; | 1125 | u64 limit, roundedF; |
899 | int slot_shift = cl->grp->slot_shift; | 1126 | int slot_shift = agg->grp->slot_shift; |
900 | 1127 | ||
901 | roundedF = qfq_round_down(cl->F, slot_shift); | 1128 | roundedF = qfq_round_down(agg->F, slot_shift); |
902 | limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift); | 1129 | limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift); |
903 | 1130 | ||
904 | if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) { | 1131 | if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) { |
905 | /* timestamp was stale */ | 1132 | /* timestamp was stale */ |
906 | mask = mask_from(q->bitmaps[ER], cl->grp->index); | 1133 | mask = mask_from(q->bitmaps[ER], agg->grp->index); |
907 | if (mask) { | 1134 | if (mask) { |
908 | struct qfq_group *next = qfq_ffs(q, mask); | 1135 | struct qfq_group *next = qfq_ffs(q, mask); |
909 | if (qfq_gt(roundedF, next->F)) { | 1136 | if (qfq_gt(roundedF, next->F)) { |
910 | if (qfq_gt(limit, next->F)) | 1137 | if (qfq_gt(limit, next->F)) |
911 | cl->S = next->F; | 1138 | agg->S = next->F; |
912 | else /* preserve timestamp correctness */ | 1139 | else /* preserve timestamp correctness */ |
913 | cl->S = limit; | 1140 | agg->S = limit; |
914 | return; | 1141 | return; |
915 | } | 1142 | } |
916 | } | 1143 | } |
917 | cl->S = q->V; | 1144 | agg->S = q->V; |
918 | } else /* timestamp is not stale */ | 1145 | } else /* timestamp is not stale */ |
919 | cl->S = cl->F; | 1146 | agg->S = agg->F; |
920 | } | 1147 | } |
921 | 1148 | ||
1149 | /* | ||
1150 | * Update the timestamps of agg before scheduling/rescheduling it for | ||
1151 | * service. In particular, assign to agg->F its maximum possible | ||
1152 | * value, i.e., the virtual finish time with which the aggregate | ||
1153 | * should be labeled if it used all its budget once in service. | ||
1154 | */ | ||
1155 | static inline void | ||
1156 | qfq_update_agg_ts(struct qfq_sched *q, | ||
1157 | struct qfq_aggregate *agg, enum update_reason reason) | ||
1158 | { | ||
1159 | if (reason != requeue) | ||
1160 | qfq_update_start(q, agg); | ||
1161 | else /* just charge agg for the service received */ | ||
1162 | agg->S = agg->F; | ||
1163 | |||
1164 | agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w; | ||
1165 | } | ||
1166 | |||
1167 | static void qfq_schedule_agg(struct qfq_sched *, struct qfq_aggregate *); | ||
1168 | |||
922 | static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 1169 | static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
923 | { | 1170 | { |
924 | struct qfq_sched *q = qdisc_priv(sch); | 1171 | struct qfq_sched *q = qdisc_priv(sch); |
925 | struct qfq_class *cl; | 1172 | struct qfq_class *cl; |
1173 | struct qfq_aggregate *agg; | ||
926 | int err = 0; | 1174 | int err = 0; |
927 | 1175 | ||
928 | cl = qfq_classify(skb, sch, &err); | 1176 | cl = qfq_classify(skb, sch, &err); |
@@ -934,11 +1182,13 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
934 | } | 1182 | } |
935 | pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); | 1183 | pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); |
936 | 1184 | ||
937 | if (unlikely(cl->lmax < qdisc_pkt_len(skb))) { | 1185 | if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) { |
938 | pr_debug("qfq: increasing maxpkt from %u to %u for class %u", | 1186 | pr_debug("qfq: increasing maxpkt from %u to %u for class %u", |
939 | cl->lmax, qdisc_pkt_len(skb), cl->common.classid); | 1187 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); |
940 | qfq_update_reactivate_class(q, cl, cl->inv_w, | 1188 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, |
941 | qdisc_pkt_len(skb), 0); | 1189 | qdisc_pkt_len(skb)); |
1190 | if (err) | ||
1191 | return err; | ||
942 | } | 1192 | } |
943 | 1193 | ||
944 | err = qdisc_enqueue(skb, cl->qdisc); | 1194 | err = qdisc_enqueue(skb, cl->qdisc); |
@@ -954,35 +1204,50 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
954 | bstats_update(&cl->bstats, skb); | 1204 | bstats_update(&cl->bstats, skb); |
955 | ++sch->q.qlen; | 1205 | ++sch->q.qlen; |
956 | 1206 | ||
957 | /* If the new skb is not the head of queue, then done here. */ | 1207 | agg = cl->agg; |
958 | if (cl->qdisc->q.qlen != 1) | 1208 | /* if the queue was not empty, then done here */ |
1209 | if (cl->qdisc->q.qlen != 1) { | ||
1210 | if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && | ||
1211 | list_first_entry(&agg->active, struct qfq_class, alist) | ||
1212 | == cl && cl->deficit < qdisc_pkt_len(skb)) | ||
1213 | list_move_tail(&cl->alist, &agg->active); | ||
1214 | |||
959 | return err; | 1215 | return err; |
1216 | } | ||
1217 | |||
1218 | /* schedule class for service within the aggregate */ | ||
1219 | cl->deficit = agg->lmax; | ||
1220 | list_add_tail(&cl->alist, &agg->active); | ||
960 | 1221 | ||
961 | /* If reach this point, queue q was idle */ | 1222 | if (list_first_entry(&agg->active, struct qfq_class, alist) != cl) |
962 | qfq_activate_class(q, cl, qdisc_pkt_len(skb)); | 1223 | return err; /* aggregate was not empty, nothing else to do */ |
1224 | |||
1225 | /* recharge budget */ | ||
1226 | agg->initial_budget = agg->budget = agg->budgetmax; | ||
1227 | |||
1228 | qfq_update_agg_ts(q, agg, enqueue); | ||
1229 | if (q->in_serv_agg == NULL) | ||
1230 | q->in_serv_agg = agg; | ||
1231 | else if (agg != q->in_serv_agg) | ||
1232 | qfq_schedule_agg(q, agg); | ||
963 | 1233 | ||
964 | return err; | 1234 | return err; |
965 | } | 1235 | } |
966 | 1236 | ||
967 | /* | 1237 | /* |
968 | * Handle class switch from idle to backlogged. | 1238 | * Schedule aggregate according to its timestamps. |
969 | */ | 1239 | */ |
970 | static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, | 1240 | static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) |
971 | unsigned int pkt_len) | ||
972 | { | 1241 | { |
973 | struct qfq_group *grp = cl->grp; | 1242 | struct qfq_group *grp = agg->grp; |
974 | u64 roundedS; | 1243 | u64 roundedS; |
975 | int s; | 1244 | int s; |
976 | 1245 | ||
977 | qfq_update_start(q, cl); | 1246 | roundedS = qfq_round_down(agg->S, grp->slot_shift); |
978 | |||
979 | /* compute new finish time and rounded start. */ | ||
980 | cl->F = cl->S + (u64)pkt_len * cl->inv_w; | ||
981 | roundedS = qfq_round_down(cl->S, grp->slot_shift); | ||
982 | 1247 | ||
983 | /* | 1248 | /* |
984 | * insert cl in the correct bucket. | 1249 | * Insert agg in the correct bucket. |
985 | * If cl->S >= grp->S we don't need to adjust the | 1250 | * If agg->S >= grp->S we don't need to adjust the |
986 | * bucket list and simply go to the insertion phase. | 1251 | * bucket list and simply go to the insertion phase. |
987 | * Otherwise grp->S is decreasing, we must make room | 1252 | * Otherwise grp->S is decreasing, we must make room |
988 | * in the bucket list, and also recompute the group state. | 1253 | * in the bucket list, and also recompute the group state. |
@@ -990,10 +1255,10 @@ static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, | |||
990 | * was in ER make sure to adjust V. | 1255 | * was in ER make sure to adjust V. |
991 | */ | 1256 | */ |
992 | if (grp->full_slots) { | 1257 | if (grp->full_slots) { |
993 | if (!qfq_gt(grp->S, cl->S)) | 1258 | if (!qfq_gt(grp->S, agg->S)) |
994 | goto skip_update; | 1259 | goto skip_update; |
995 | 1260 | ||
996 | /* create a slot for this cl->S */ | 1261 | /* create a slot for this agg->S */ |
997 | qfq_slot_rotate(grp, roundedS); | 1262 | qfq_slot_rotate(grp, roundedS); |
998 | /* group was surely ineligible, remove */ | 1263 | /* group was surely ineligible, remove */ |
999 | __clear_bit(grp->index, &q->bitmaps[IR]); | 1264 | __clear_bit(grp->index, &q->bitmaps[IR]); |
@@ -1008,46 +1273,61 @@ static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, | |||
1008 | 1273 | ||
1009 | pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n", | 1274 | pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n", |
1010 | s, q->bitmaps[s], | 1275 | s, q->bitmaps[s], |
1011 | (unsigned long long) cl->S, | 1276 | (unsigned long long) agg->S, |
1012 | (unsigned long long) cl->F, | 1277 | (unsigned long long) agg->F, |
1013 | (unsigned long long) q->V); | 1278 | (unsigned long long) q->V); |
1014 | 1279 | ||
1015 | skip_update: | 1280 | skip_update: |
1016 | qfq_slot_insert(grp, cl, roundedS); | 1281 | qfq_slot_insert(grp, agg, roundedS); |
1017 | } | 1282 | } |
1018 | 1283 | ||
1019 | 1284 | ||
1285 | /* Update agg ts and schedule agg for service */ | ||
1286 | static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | ||
1287 | enum update_reason reason) | ||
1288 | { | ||
1289 | qfq_update_agg_ts(q, agg, reason); | ||
1290 | qfq_schedule_agg(q, agg); | ||
1291 | } | ||
1292 | |||
1020 | static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, | 1293 | static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, |
1021 | struct qfq_class *cl) | 1294 | struct qfq_aggregate *agg) |
1022 | { | 1295 | { |
1023 | unsigned int i, offset; | 1296 | unsigned int i, offset; |
1024 | u64 roundedS; | 1297 | u64 roundedS; |
1025 | 1298 | ||
1026 | roundedS = qfq_round_down(cl->S, grp->slot_shift); | 1299 | roundedS = qfq_round_down(agg->S, grp->slot_shift); |
1027 | offset = (roundedS - grp->S) >> grp->slot_shift; | 1300 | offset = (roundedS - grp->S) >> grp->slot_shift; |
1301 | |||
1028 | i = (grp->front + offset) % QFQ_MAX_SLOTS; | 1302 | i = (grp->front + offset) % QFQ_MAX_SLOTS; |
1029 | 1303 | ||
1030 | hlist_del(&cl->next); | 1304 | hlist_del(&agg->next); |
1031 | if (hlist_empty(&grp->slots[i])) | 1305 | if (hlist_empty(&grp->slots[i])) |
1032 | __clear_bit(offset, &grp->full_slots); | 1306 | __clear_bit(offset, &grp->full_slots); |
1033 | } | 1307 | } |
1034 | 1308 | ||
1035 | /* | 1309 | /* |
1036 | * called to forcibly destroy a queue. | 1310 | * Called to forcibly deschedule an aggregate. If the aggregate is |
1037 | * If the queue is not in the front bucket, or if it has | 1311 | * not in the front bucket, or if the latter has other aggregates in |
1038 | * other queues in the front bucket, we can simply remove | 1312 | * the front bucket, we can simply remove the aggregate with no other |
1039 | * the queue with no other side effects. | 1313 | * side effects. |
1040 | * Otherwise we must propagate the event up. | 1314 | * Otherwise we must propagate the event up. |
1041 | */ | 1315 | */ |
1042 | static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) | 1316 | static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) |
1043 | { | 1317 | { |
1044 | struct qfq_group *grp = cl->grp; | 1318 | struct qfq_group *grp = agg->grp; |
1045 | unsigned long mask; | 1319 | unsigned long mask; |
1046 | u64 roundedS; | 1320 | u64 roundedS; |
1047 | int s; | 1321 | int s; |
1048 | 1322 | ||
1049 | cl->F = cl->S; | 1323 | if (agg == q->in_serv_agg) { |
1050 | qfq_slot_remove(q, grp, cl); | 1324 | charge_actual_service(agg); |
1325 | q->in_serv_agg = qfq_choose_next_agg(q); | ||
1326 | return; | ||
1327 | } | ||
1328 | |||
1329 | agg->F = agg->S; | ||
1330 | qfq_slot_remove(q, grp, agg); | ||
1051 | 1331 | ||
1052 | if (!grp->full_slots) { | 1332 | if (!grp->full_slots) { |
1053 | __clear_bit(grp->index, &q->bitmaps[IR]); | 1333 | __clear_bit(grp->index, &q->bitmaps[IR]); |
@@ -1066,8 +1346,8 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) | |||
1066 | } | 1346 | } |
1067 | __clear_bit(grp->index, &q->bitmaps[ER]); | 1347 | __clear_bit(grp->index, &q->bitmaps[ER]); |
1068 | } else if (hlist_empty(&grp->slots[grp->front])) { | 1348 | } else if (hlist_empty(&grp->slots[grp->front])) { |
1069 | cl = qfq_slot_scan(grp); | 1349 | agg = qfq_slot_scan(grp); |
1070 | roundedS = qfq_round_down(cl->S, grp->slot_shift); | 1350 | roundedS = qfq_round_down(agg->S, grp->slot_shift); |
1071 | if (grp->S != roundedS) { | 1351 | if (grp->S != roundedS) { |
1072 | __clear_bit(grp->index, &q->bitmaps[ER]); | 1352 | __clear_bit(grp->index, &q->bitmaps[ER]); |
1073 | __clear_bit(grp->index, &q->bitmaps[IR]); | 1353 | __clear_bit(grp->index, &q->bitmaps[IR]); |
@@ -1080,7 +1360,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) | |||
1080 | } | 1360 | } |
1081 | } | 1361 | } |
1082 | 1362 | ||
1083 | qfq_update_eligible(q, q->V); | 1363 | qfq_update_eligible(q); |
1084 | } | 1364 | } |
1085 | 1365 | ||
1086 | static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) | 1366 | static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) |
@@ -1092,6 +1372,32 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) | |||
1092 | qfq_deactivate_class(q, cl); | 1372 | qfq_deactivate_class(q, cl); |
1093 | } | 1373 | } |
1094 | 1374 | ||
1375 | static unsigned int qfq_drop_from_slot(struct qfq_sched *q, | ||
1376 | struct hlist_head *slot) | ||
1377 | { | ||
1378 | struct qfq_aggregate *agg; | ||
1379 | struct hlist_node *n; | ||
1380 | struct qfq_class *cl; | ||
1381 | unsigned int len; | ||
1382 | |||
1383 | hlist_for_each_entry(agg, n, slot, next) { | ||
1384 | list_for_each_entry(cl, &agg->active, alist) { | ||
1385 | |||
1386 | if (!cl->qdisc->ops->drop) | ||
1387 | continue; | ||
1388 | |||
1389 | len = cl->qdisc->ops->drop(cl->qdisc); | ||
1390 | if (len > 0) { | ||
1391 | if (cl->qdisc->q.qlen == 0) | ||
1392 | qfq_deactivate_class(q, cl); | ||
1393 | |||
1394 | return len; | ||
1395 | } | ||
1396 | } | ||
1397 | } | ||
1398 | return 0; | ||
1399 | } | ||
1400 | |||
1095 | static unsigned int qfq_drop(struct Qdisc *sch) | 1401 | static unsigned int qfq_drop(struct Qdisc *sch) |
1096 | { | 1402 | { |
1097 | struct qfq_sched *q = qdisc_priv(sch); | 1403 | struct qfq_sched *q = qdisc_priv(sch); |
@@ -1101,24 +1407,13 @@ static unsigned int qfq_drop(struct Qdisc *sch) | |||
1101 | for (i = 0; i <= QFQ_MAX_INDEX; i++) { | 1407 | for (i = 0; i <= QFQ_MAX_INDEX; i++) { |
1102 | grp = &q->groups[i]; | 1408 | grp = &q->groups[i]; |
1103 | for (j = 0; j < QFQ_MAX_SLOTS; j++) { | 1409 | for (j = 0; j < QFQ_MAX_SLOTS; j++) { |
1104 | struct qfq_class *cl; | 1410 | len = qfq_drop_from_slot(q, &grp->slots[j]); |
1105 | struct hlist_node *n; | 1411 | if (len > 0) { |
1106 | 1412 | sch->q.qlen--; | |
1107 | hlist_for_each_entry(cl, n, &grp->slots[j], next) { | 1413 | return len; |
1108 | |||
1109 | if (!cl->qdisc->ops->drop) | ||
1110 | continue; | ||
1111 | |||
1112 | len = cl->qdisc->ops->drop(cl->qdisc); | ||
1113 | if (len > 0) { | ||
1114 | sch->q.qlen--; | ||
1115 | if (!cl->qdisc->q.qlen) | ||
1116 | qfq_deactivate_class(q, cl); | ||
1117 | |||
1118 | return len; | ||
1119 | } | ||
1120 | } | 1414 | } |
1121 | } | 1415 | } |
1416 | |||
1122 | } | 1417 | } |
1123 | 1418 | ||
1124 | return 0; | 1419 | return 0; |
@@ -1129,44 +1424,51 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | |||
1129 | struct qfq_sched *q = qdisc_priv(sch); | 1424 | struct qfq_sched *q = qdisc_priv(sch); |
1130 | struct qfq_group *grp; | 1425 | struct qfq_group *grp; |
1131 | int i, j, err; | 1426 | int i, j, err; |
1427 | u32 max_cl_shift, maxbudg_shift, max_classes; | ||
1132 | 1428 | ||
1133 | err = qdisc_class_hash_init(&q->clhash); | 1429 | err = qdisc_class_hash_init(&q->clhash); |
1134 | if (err < 0) | 1430 | if (err < 0) |
1135 | return err; | 1431 | return err; |
1136 | 1432 | ||
1433 | if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES) | ||
1434 | max_classes = QFQ_MAX_AGG_CLASSES; | ||
1435 | else | ||
1436 | max_classes = qdisc_dev(sch)->tx_queue_len + 1; | ||
1437 | /* max_cl_shift = floor(log_2(max_classes)) */ | ||
1438 | max_cl_shift = __fls(max_classes); | ||
1439 | q->max_agg_classes = 1<<max_cl_shift; | ||
1440 | |||
1441 | /* maxbudg_shift = log2(max_len * max_classes_per_agg) */ | ||
1442 | maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift; | ||
1443 | q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX; | ||
1444 | |||
1137 | for (i = 0; i <= QFQ_MAX_INDEX; i++) { | 1445 | for (i = 0; i <= QFQ_MAX_INDEX; i++) { |
1138 | grp = &q->groups[i]; | 1446 | grp = &q->groups[i]; |
1139 | grp->index = i; | 1447 | grp->index = i; |
1140 | grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS | 1448 | grp->slot_shift = q->min_slot_shift + i; |
1141 | - (QFQ_MAX_INDEX - i); | ||
1142 | for (j = 0; j < QFQ_MAX_SLOTS; j++) | 1449 | for (j = 0; j < QFQ_MAX_SLOTS; j++) |
1143 | INIT_HLIST_HEAD(&grp->slots[j]); | 1450 | INIT_HLIST_HEAD(&grp->slots[j]); |
1144 | } | 1451 | } |
1145 | 1452 | ||
1453 | INIT_HLIST_HEAD(&q->nonfull_aggs); | ||
1454 | |||
1146 | return 0; | 1455 | return 0; |
1147 | } | 1456 | } |
1148 | 1457 | ||
1149 | static void qfq_reset_qdisc(struct Qdisc *sch) | 1458 | static void qfq_reset_qdisc(struct Qdisc *sch) |
1150 | { | 1459 | { |
1151 | struct qfq_sched *q = qdisc_priv(sch); | 1460 | struct qfq_sched *q = qdisc_priv(sch); |
1152 | struct qfq_group *grp; | ||
1153 | struct qfq_class *cl; | 1461 | struct qfq_class *cl; |
1154 | struct hlist_node *n, *tmp; | 1462 | struct hlist_node *n; |
1155 | unsigned int i, j; | 1463 | unsigned int i; |
1156 | 1464 | ||
1157 | for (i = 0; i <= QFQ_MAX_INDEX; i++) { | 1465 | for (i = 0; i < q->clhash.hashsize; i++) { |
1158 | grp = &q->groups[i]; | 1466 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) { |
1159 | for (j = 0; j < QFQ_MAX_SLOTS; j++) { | 1467 | if (cl->qdisc->q.qlen > 0) |
1160 | hlist_for_each_entry_safe(cl, n, tmp, | ||
1161 | &grp->slots[j], next) { | ||
1162 | qfq_deactivate_class(q, cl); | 1468 | qfq_deactivate_class(q, cl); |
1163 | } | ||
1164 | } | ||
1165 | } | ||
1166 | 1469 | ||
1167 | for (i = 0; i < q->clhash.hashsize; i++) { | ||
1168 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) | ||
1169 | qdisc_reset(cl->qdisc); | 1470 | qdisc_reset(cl->qdisc); |
1471 | } | ||
1170 | } | 1472 | } |
1171 | sch->q.qlen = 0; | 1473 | sch->q.qlen = 0; |
1172 | } | 1474 | } |
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index 126b014eb79b..a9edd2e205f4 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig | |||
@@ -9,7 +9,6 @@ menuconfig IP_SCTP | |||
9 | select CRYPTO | 9 | select CRYPTO |
10 | select CRYPTO_HMAC | 10 | select CRYPTO_HMAC |
11 | select CRYPTO_SHA1 | 11 | select CRYPTO_SHA1 |
12 | select CRYPTO_MD5 if SCTP_HMAC_MD5 | ||
13 | select LIBCRC32C | 12 | select LIBCRC32C |
14 | ---help--- | 13 | ---help--- |
15 | Stream Control Transmission Protocol | 14 | Stream Control Transmission Protocol |
@@ -68,33 +67,21 @@ config SCTP_DBG_OBJCNT | |||
68 | 67 | ||
69 | If unsure, say N | 68 | If unsure, say N |
70 | 69 | ||
71 | choice | 70 | config SCTP_COOKIE_HMAC_MD5 |
72 | prompt "SCTP: Cookie HMAC Algorithm" | 71 | bool "Enable optional MD5 hmac cookie generation" |
73 | default SCTP_HMAC_MD5 | ||
74 | help | 72 | help |
75 | HMAC algorithm to be used during association initialization. It | 73 | Enable optional MD5 hmac based SCTP cookie generation |
76 | is strongly recommended to use HMAC-SHA1 or HMAC-MD5. See | 74 | default y |
77 | configuration for Cryptographic API and enable those algorithms | 75 | select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5 |
78 | to make usable by SCTP. | 76 | select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5 |
79 | 77 | ||
80 | config SCTP_HMAC_NONE | 78 | config SCTP_COOKIE_HMAC_SHA1 |
81 | bool "None" | 79 | bool "Enable optional SHA1 hmac cookie generation" |
82 | help | ||
83 | Choosing this disables the use of an HMAC during association | ||
84 | establishment. It is advised to use either HMAC-MD5 or HMAC-SHA1. | ||
85 | |||
86 | config SCTP_HMAC_SHA1 | ||
87 | bool "HMAC-SHA1" | ||
88 | help | ||
89 | Enable the use of HMAC-SHA1 during association establishment. It | ||
90 | is advised to use either HMAC-MD5 or HMAC-SHA1. | ||
91 | |||
92 | config SCTP_HMAC_MD5 | ||
93 | bool "HMAC-MD5" | ||
94 | help | 80 | help |
95 | Enable the use of HMAC-MD5 during association establishment. It is | 81 | Enable optional SHA1 hmac based SCTP cookie generation |
96 | advised to use either HMAC-MD5 or HMAC-SHA1. | 82 | default y |
83 | select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1 | ||
84 | select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1 | ||
97 | 85 | ||
98 | endchoice | ||
99 | 86 | ||
100 | endif # IP_SCTP | 87 | endif # IP_SCTP |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 2d518425d598..456bc3dbdd51 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -1190,6 +1190,15 @@ static int sctp_net_init(struct net *net) | |||
1190 | /* Whether Cookie Preservative is enabled(1) or not(0) */ | 1190 | /* Whether Cookie Preservative is enabled(1) or not(0) */ |
1191 | net->sctp.cookie_preserve_enable = 1; | 1191 | net->sctp.cookie_preserve_enable = 1; |
1192 | 1192 | ||
1193 | /* Default sctp sockets to use md5 as their hmac alg */ | ||
1194 | #if defined (CONFIG_CRYPTO_MD5) | ||
1195 | net->sctp.sctp_hmac_alg = "md5"; | ||
1196 | #elif defined (CONFIG_CRYPTO_SHA1) | ||
1197 | net->sctp.sctp_hmac_alg = "sha1"; | ||
1198 | #else | ||
1199 | net->sctp.sctp_hmac_alg = NULL; | ||
1200 | #endif | ||
1201 | |||
1193 | /* Max.Burst - 4 */ | 1202 | /* Max.Burst - 4 */ |
1194 | net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST; | 1203 | net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST; |
1195 | 1204 | ||
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index fbe1636309a7..e0f01a4e8cd6 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -1090,6 +1090,25 @@ nodata: | |||
1090 | return retval; | 1090 | return retval; |
1091 | } | 1091 | } |
1092 | 1092 | ||
1093 | struct sctp_chunk *sctp_make_violation_max_retrans( | ||
1094 | const struct sctp_association *asoc, | ||
1095 | const struct sctp_chunk *chunk) | ||
1096 | { | ||
1097 | struct sctp_chunk *retval; | ||
1098 | static const char error[] = "Association exceeded its max_retans count"; | ||
1099 | size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t); | ||
1100 | |||
1101 | retval = sctp_make_abort(asoc, chunk, payload_len); | ||
1102 | if (!retval) | ||
1103 | goto nodata; | ||
1104 | |||
1105 | sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error)); | ||
1106 | sctp_addto_chunk(retval, sizeof(error), error); | ||
1107 | |||
1108 | nodata: | ||
1109 | return retval; | ||
1110 | } | ||
1111 | |||
1093 | /* Make a HEARTBEAT chunk. */ | 1112 | /* Make a HEARTBEAT chunk. */ |
1094 | struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, | 1113 | struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, |
1095 | const struct sctp_transport *transport) | 1114 | const struct sctp_transport *transport) |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 6773d7803627..c0769569b05d 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -577,7 +577,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, | |||
577 | unsigned int error) | 577 | unsigned int error) |
578 | { | 578 | { |
579 | struct sctp_ulpevent *event; | 579 | struct sctp_ulpevent *event; |
580 | 580 | struct sctp_chunk *abort; | |
581 | /* Cancel any partial delivery in progress. */ | 581 | /* Cancel any partial delivery in progress. */ |
582 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | 582 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); |
583 | 583 | ||
@@ -593,6 +593,13 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, | |||
593 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | 593 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, |
594 | SCTP_ULPEVENT(event)); | 594 | SCTP_ULPEVENT(event)); |
595 | 595 | ||
596 | if (asoc->overall_error_count >= asoc->max_retrans) { | ||
597 | abort = sctp_make_violation_max_retrans(asoc, chunk); | ||
598 | if (abort) | ||
599 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
600 | SCTP_CHUNK(abort)); | ||
601 | } | ||
602 | |||
596 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | 603 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, |
597 | SCTP_STATE(SCTP_STATE_CLOSED)); | 604 | SCTP_STATE(SCTP_STATE_CLOSED)); |
598 | 605 | ||
@@ -1268,14 +1275,14 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1268 | sctp_outq_uncork(&asoc->outqueue); | 1275 | sctp_outq_uncork(&asoc->outqueue); |
1269 | local_cork = 0; | 1276 | local_cork = 0; |
1270 | } | 1277 | } |
1271 | asoc = cmd->obj.ptr; | 1278 | asoc = cmd->obj.asoc; |
1272 | /* Register with the endpoint. */ | 1279 | /* Register with the endpoint. */ |
1273 | sctp_endpoint_add_asoc(ep, asoc); | 1280 | sctp_endpoint_add_asoc(ep, asoc); |
1274 | sctp_hash_established(asoc); | 1281 | sctp_hash_established(asoc); |
1275 | break; | 1282 | break; |
1276 | 1283 | ||
1277 | case SCTP_CMD_UPDATE_ASSOC: | 1284 | case SCTP_CMD_UPDATE_ASSOC: |
1278 | sctp_assoc_update(asoc, cmd->obj.ptr); | 1285 | sctp_assoc_update(asoc, cmd->obj.asoc); |
1279 | break; | 1286 | break; |
1280 | 1287 | ||
1281 | case SCTP_CMD_PURGE_OUTQUEUE: | 1288 | case SCTP_CMD_PURGE_OUTQUEUE: |
@@ -1315,7 +1322,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1315 | break; | 1322 | break; |
1316 | 1323 | ||
1317 | case SCTP_CMD_PROCESS_FWDTSN: | 1324 | case SCTP_CMD_PROCESS_FWDTSN: |
1318 | sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr); | 1325 | sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk); |
1319 | break; | 1326 | break; |
1320 | 1327 | ||
1321 | case SCTP_CMD_GEN_SACK: | 1328 | case SCTP_CMD_GEN_SACK: |
@@ -1331,7 +1338,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1331 | case SCTP_CMD_PROCESS_SACK: | 1338 | case SCTP_CMD_PROCESS_SACK: |
1332 | /* Process an inbound SACK. */ | 1339 | /* Process an inbound SACK. */ |
1333 | error = sctp_cmd_process_sack(commands, asoc, | 1340 | error = sctp_cmd_process_sack(commands, asoc, |
1334 | cmd->obj.ptr); | 1341 | cmd->obj.chunk); |
1335 | break; | 1342 | break; |
1336 | 1343 | ||
1337 | case SCTP_CMD_GEN_INIT_ACK: | 1344 | case SCTP_CMD_GEN_INIT_ACK: |
@@ -1352,15 +1359,15 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1352 | * layer which will bail. | 1359 | * layer which will bail. |
1353 | */ | 1360 | */ |
1354 | error = sctp_cmd_process_init(commands, asoc, chunk, | 1361 | error = sctp_cmd_process_init(commands, asoc, chunk, |
1355 | cmd->obj.ptr, gfp); | 1362 | cmd->obj.init, gfp); |
1356 | break; | 1363 | break; |
1357 | 1364 | ||
1358 | case SCTP_CMD_GEN_COOKIE_ECHO: | 1365 | case SCTP_CMD_GEN_COOKIE_ECHO: |
1359 | /* Generate a COOKIE ECHO chunk. */ | 1366 | /* Generate a COOKIE ECHO chunk. */ |
1360 | new_obj = sctp_make_cookie_echo(asoc, chunk); | 1367 | new_obj = sctp_make_cookie_echo(asoc, chunk); |
1361 | if (!new_obj) { | 1368 | if (!new_obj) { |
1362 | if (cmd->obj.ptr) | 1369 | if (cmd->obj.chunk) |
1363 | sctp_chunk_free(cmd->obj.ptr); | 1370 | sctp_chunk_free(cmd->obj.chunk); |
1364 | goto nomem; | 1371 | goto nomem; |
1365 | } | 1372 | } |
1366 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | 1373 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, |
@@ -1369,9 +1376,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1369 | /* If there is an ERROR chunk to be sent along with | 1376 | /* If there is an ERROR chunk to be sent along with |
1370 | * the COOKIE_ECHO, send it, too. | 1377 | * the COOKIE_ECHO, send it, too. |
1371 | */ | 1378 | */ |
1372 | if (cmd->obj.ptr) | 1379 | if (cmd->obj.chunk) |
1373 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | 1380 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, |
1374 | SCTP_CHUNK(cmd->obj.ptr)); | 1381 | SCTP_CHUNK(cmd->obj.chunk)); |
1375 | 1382 | ||
1376 | if (new_obj->transport) { | 1383 | if (new_obj->transport) { |
1377 | new_obj->transport->init_sent_count++; | 1384 | new_obj->transport->init_sent_count++; |
@@ -1417,18 +1424,18 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1417 | case SCTP_CMD_CHUNK_ULP: | 1424 | case SCTP_CMD_CHUNK_ULP: |
1418 | /* Send a chunk to the sockets layer. */ | 1425 | /* Send a chunk to the sockets layer. */ |
1419 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", | 1426 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", |
1420 | "chunk_up:", cmd->obj.ptr, | 1427 | "chunk_up:", cmd->obj.chunk, |
1421 | "ulpq:", &asoc->ulpq); | 1428 | "ulpq:", &asoc->ulpq); |
1422 | sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr, | 1429 | sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk, |
1423 | GFP_ATOMIC); | 1430 | GFP_ATOMIC); |
1424 | break; | 1431 | break; |
1425 | 1432 | ||
1426 | case SCTP_CMD_EVENT_ULP: | 1433 | case SCTP_CMD_EVENT_ULP: |
1427 | /* Send a notification to the sockets layer. */ | 1434 | /* Send a notification to the sockets layer. */ |
1428 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", | 1435 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", |
1429 | "event_up:",cmd->obj.ptr, | 1436 | "event_up:",cmd->obj.ulpevent, |
1430 | "ulpq:",&asoc->ulpq); | 1437 | "ulpq:",&asoc->ulpq); |
1431 | sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr); | 1438 | sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent); |
1432 | break; | 1439 | break; |
1433 | 1440 | ||
1434 | case SCTP_CMD_REPLY: | 1441 | case SCTP_CMD_REPLY: |
@@ -1438,12 +1445,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1438 | local_cork = 1; | 1445 | local_cork = 1; |
1439 | } | 1446 | } |
1440 | /* Send a chunk to our peer. */ | 1447 | /* Send a chunk to our peer. */ |
1441 | error = sctp_outq_tail(&asoc->outqueue, cmd->obj.ptr); | 1448 | error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk); |
1442 | break; | 1449 | break; |
1443 | 1450 | ||
1444 | case SCTP_CMD_SEND_PKT: | 1451 | case SCTP_CMD_SEND_PKT: |
1445 | /* Send a full packet to our peer. */ | 1452 | /* Send a full packet to our peer. */ |
1446 | packet = cmd->obj.ptr; | 1453 | packet = cmd->obj.packet; |
1447 | sctp_packet_transmit(packet); | 1454 | sctp_packet_transmit(packet); |
1448 | sctp_ootb_pkt_free(packet); | 1455 | sctp_ootb_pkt_free(packet); |
1449 | break; | 1456 | break; |
@@ -1480,7 +1487,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1480 | break; | 1487 | break; |
1481 | 1488 | ||
1482 | case SCTP_CMD_SETUP_T2: | 1489 | case SCTP_CMD_SETUP_T2: |
1483 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); | 1490 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk); |
1484 | break; | 1491 | break; |
1485 | 1492 | ||
1486 | case SCTP_CMD_TIMER_START_ONCE: | 1493 | case SCTP_CMD_TIMER_START_ONCE: |
@@ -1514,7 +1521,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1514 | break; | 1521 | break; |
1515 | 1522 | ||
1516 | case SCTP_CMD_INIT_CHOOSE_TRANSPORT: | 1523 | case SCTP_CMD_INIT_CHOOSE_TRANSPORT: |
1517 | chunk = cmd->obj.ptr; | 1524 | chunk = cmd->obj.chunk; |
1518 | t = sctp_assoc_choose_alter_transport(asoc, | 1525 | t = sctp_assoc_choose_alter_transport(asoc, |
1519 | asoc->init_last_sent_to); | 1526 | asoc->init_last_sent_to); |
1520 | asoc->init_last_sent_to = t; | 1527 | asoc->init_last_sent_to = t; |
@@ -1665,17 +1672,16 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1665 | break; | 1672 | break; |
1666 | 1673 | ||
1667 | case SCTP_CMD_PART_DELIVER: | 1674 | case SCTP_CMD_PART_DELIVER: |
1668 | sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr, | 1675 | sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC); |
1669 | GFP_ATOMIC); | ||
1670 | break; | 1676 | break; |
1671 | 1677 | ||
1672 | case SCTP_CMD_RENEGE: | 1678 | case SCTP_CMD_RENEGE: |
1673 | sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr, | 1679 | sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk, |
1674 | GFP_ATOMIC); | 1680 | GFP_ATOMIC); |
1675 | break; | 1681 | break; |
1676 | 1682 | ||
1677 | case SCTP_CMD_SETUP_T4: | 1683 | case SCTP_CMD_SETUP_T4: |
1678 | sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr); | 1684 | sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk); |
1679 | break; | 1685 | break; |
1680 | 1686 | ||
1681 | case SCTP_CMD_PROCESS_OPERR: | 1687 | case SCTP_CMD_PROCESS_OPERR: |
@@ -1734,8 +1740,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1734 | break; | 1740 | break; |
1735 | 1741 | ||
1736 | default: | 1742 | default: |
1737 | pr_warn("Impossible command: %u, %p\n", | 1743 | pr_warn("Impossible command: %u\n", |
1738 | cmd->verb, cmd->obj.ptr); | 1744 | cmd->verb); |
1739 | break; | 1745 | break; |
1740 | } | 1746 | } |
1741 | 1747 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 406d957d08fb..bc1624913c42 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -110,7 +110,6 @@ static int sctp_do_bind(struct sock *, union sctp_addr *, int); | |||
110 | static int sctp_autobind(struct sock *sk); | 110 | static int sctp_autobind(struct sock *sk); |
111 | static void sctp_sock_migrate(struct sock *, struct sock *, | 111 | static void sctp_sock_migrate(struct sock *, struct sock *, |
112 | struct sctp_association *, sctp_socket_type_t); | 112 | struct sctp_association *, sctp_socket_type_t); |
113 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; | ||
114 | 113 | ||
115 | extern struct kmem_cache *sctp_bucket_cachep; | 114 | extern struct kmem_cache *sctp_bucket_cachep; |
116 | extern long sysctl_sctp_mem[3]; | 115 | extern long sysctl_sctp_mem[3]; |
@@ -336,6 +335,7 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, | |||
336 | /* Bind a local address either to an endpoint or to an association. */ | 335 | /* Bind a local address either to an endpoint or to an association. */ |
337 | SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | 336 | SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) |
338 | { | 337 | { |
338 | struct net *net = sock_net(sk); | ||
339 | struct sctp_sock *sp = sctp_sk(sk); | 339 | struct sctp_sock *sp = sctp_sk(sk); |
340 | struct sctp_endpoint *ep = sp->ep; | 340 | struct sctp_endpoint *ep = sp->ep; |
341 | struct sctp_bind_addr *bp = &ep->base.bind_addr; | 341 | struct sctp_bind_addr *bp = &ep->base.bind_addr; |
@@ -379,7 +379,8 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
379 | } | 379 | } |
380 | } | 380 | } |
381 | 381 | ||
382 | if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) | 382 | if (snum && snum < PROT_SOCK && |
383 | !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) | ||
383 | return -EACCES; | 384 | return -EACCES; |
384 | 385 | ||
385 | /* See if the address matches any of the addresses we may have | 386 | /* See if the address matches any of the addresses we may have |
@@ -1162,7 +1163,7 @@ static int __sctp_connect(struct sock* sk, | |||
1162 | * be permitted to open new associations. | 1163 | * be permitted to open new associations. |
1163 | */ | 1164 | */ |
1164 | if (ep->base.bind_addr.port < PROT_SOCK && | 1165 | if (ep->base.bind_addr.port < PROT_SOCK && |
1165 | !capable(CAP_NET_BIND_SERVICE)) { | 1166 | !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { |
1166 | err = -EACCES; | 1167 | err = -EACCES; |
1167 | goto out_free; | 1168 | goto out_free; |
1168 | } | 1169 | } |
@@ -1791,7 +1792,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
1791 | * associations. | 1792 | * associations. |
1792 | */ | 1793 | */ |
1793 | if (ep->base.bind_addr.port < PROT_SOCK && | 1794 | if (ep->base.bind_addr.port < PROT_SOCK && |
1794 | !capable(CAP_NET_BIND_SERVICE)) { | 1795 | !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { |
1795 | err = -EACCES; | 1796 | err = -EACCES; |
1796 | goto out_unlock; | 1797 | goto out_unlock; |
1797 | } | 1798 | } |
@@ -3890,6 +3891,8 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) | |||
3890 | sp->default_rcv_context = 0; | 3891 | sp->default_rcv_context = 0; |
3891 | sp->max_burst = net->sctp.max_burst; | 3892 | sp->max_burst = net->sctp.max_burst; |
3892 | 3893 | ||
3894 | sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; | ||
3895 | |||
3893 | /* Initialize default setup parameters. These parameters | 3896 | /* Initialize default setup parameters. These parameters |
3894 | * can be modified with the SCTP_INITMSG socket option or | 3897 | * can be modified with the SCTP_INITMSG socket option or |
3895 | * overridden by the SCTP_INIT CMSG. | 3898 | * overridden by the SCTP_INIT CMSG. |
@@ -5981,13 +5984,15 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog) | |||
5981 | struct sctp_sock *sp = sctp_sk(sk); | 5984 | struct sctp_sock *sp = sctp_sk(sk); |
5982 | struct sctp_endpoint *ep = sp->ep; | 5985 | struct sctp_endpoint *ep = sp->ep; |
5983 | struct crypto_hash *tfm = NULL; | 5986 | struct crypto_hash *tfm = NULL; |
5987 | char alg[32]; | ||
5984 | 5988 | ||
5985 | /* Allocate HMAC for generating cookie. */ | 5989 | /* Allocate HMAC for generating cookie. */ |
5986 | if (!sctp_sk(sk)->hmac && sctp_hmac_alg) { | 5990 | if (!sp->hmac && sp->sctp_hmac_alg) { |
5987 | tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); | 5991 | sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); |
5992 | tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); | ||
5988 | if (IS_ERR(tfm)) { | 5993 | if (IS_ERR(tfm)) { |
5989 | net_info_ratelimited("failed to load transform for %s: %ld\n", | 5994 | net_info_ratelimited("failed to load transform for %s: %ld\n", |
5990 | sctp_hmac_alg, PTR_ERR(tfm)); | 5995 | sp->sctp_hmac_alg, PTR_ERR(tfm)); |
5991 | return -ENOSYS; | 5996 | return -ENOSYS; |
5992 | } | 5997 | } |
5993 | sctp_sk(sk)->hmac = tfm; | 5998 | sctp_sk(sk)->hmac = tfm; |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 70e3ba5cb50b..043889ac86c0 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -62,6 +62,11 @@ extern long sysctl_sctp_mem[3]; | |||
62 | extern int sysctl_sctp_rmem[3]; | 62 | extern int sysctl_sctp_rmem[3]; |
63 | extern int sysctl_sctp_wmem[3]; | 63 | extern int sysctl_sctp_wmem[3]; |
64 | 64 | ||
65 | static int proc_sctp_do_hmac_alg(ctl_table *ctl, | ||
66 | int write, | ||
67 | void __user *buffer, size_t *lenp, | ||
68 | |||
69 | loff_t *ppos); | ||
65 | static ctl_table sctp_table[] = { | 70 | static ctl_table sctp_table[] = { |
66 | { | 71 | { |
67 | .procname = "sctp_mem", | 72 | .procname = "sctp_mem", |
@@ -147,6 +152,12 @@ static ctl_table sctp_net_table[] = { | |||
147 | .proc_handler = proc_dointvec, | 152 | .proc_handler = proc_dointvec, |
148 | }, | 153 | }, |
149 | { | 154 | { |
155 | .procname = "cookie_hmac_alg", | ||
156 | .maxlen = 8, | ||
157 | .mode = 0644, | ||
158 | .proc_handler = proc_sctp_do_hmac_alg, | ||
159 | }, | ||
160 | { | ||
150 | .procname = "valid_cookie_life", | 161 | .procname = "valid_cookie_life", |
151 | .data = &init_net.sctp.valid_cookie_life, | 162 | .data = &init_net.sctp.valid_cookie_life, |
152 | .maxlen = sizeof(unsigned int), | 163 | .maxlen = sizeof(unsigned int), |
@@ -289,6 +300,54 @@ static ctl_table sctp_net_table[] = { | |||
289 | { /* sentinel */ } | 300 | { /* sentinel */ } |
290 | }; | 301 | }; |
291 | 302 | ||
303 | static int proc_sctp_do_hmac_alg(ctl_table *ctl, | ||
304 | int write, | ||
305 | void __user *buffer, size_t *lenp, | ||
306 | loff_t *ppos) | ||
307 | { | ||
308 | struct net *net = current->nsproxy->net_ns; | ||
309 | char tmp[8]; | ||
310 | ctl_table tbl; | ||
311 | int ret; | ||
312 | int changed = 0; | ||
313 | char *none = "none"; | ||
314 | |||
315 | memset(&tbl, 0, sizeof(struct ctl_table)); | ||
316 | |||
317 | if (write) { | ||
318 | tbl.data = tmp; | ||
319 | tbl.maxlen = 8; | ||
320 | } else { | ||
321 | tbl.data = net->sctp.sctp_hmac_alg ? : none; | ||
322 | tbl.maxlen = strlen(tbl.data); | ||
323 | } | ||
324 | ret = proc_dostring(&tbl, write, buffer, lenp, ppos); | ||
325 | |||
326 | if (write) { | ||
327 | #ifdef CONFIG_CRYPTO_MD5 | ||
328 | if (!strncmp(tmp, "md5", 3)) { | ||
329 | net->sctp.sctp_hmac_alg = "md5"; | ||
330 | changed = 1; | ||
331 | } | ||
332 | #endif | ||
333 | #ifdef CONFIG_CRYPTO_SHA1 | ||
334 | if (!strncmp(tmp, "sha1", 4)) { | ||
335 | net->sctp.sctp_hmac_alg = "sha1"; | ||
336 | changed = 1; | ||
337 | } | ||
338 | #endif | ||
339 | if (!strncmp(tmp, "none", 4)) { | ||
340 | net->sctp.sctp_hmac_alg = NULL; | ||
341 | changed = 1; | ||
342 | } | ||
343 | |||
344 | if (!changed) | ||
345 | ret = -EINVAL; | ||
346 | } | ||
347 | |||
348 | return ret; | ||
349 | } | ||
350 | |||
292 | int sctp_sysctl_net_register(struct net *net) | 351 | int sctp_sysctl_net_register(struct net *net) |
293 | { | 352 | { |
294 | struct ctl_table *table; | 353 | struct ctl_table *table; |
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index b5fb7c409023..5f25e0c92c31 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c | |||
@@ -272,7 +272,7 @@ __u16 sctp_tsnmap_pending(struct sctp_tsnmap *map) | |||
272 | __u32 max_tsn = map->max_tsn_seen; | 272 | __u32 max_tsn = map->max_tsn_seen; |
273 | __u32 base_tsn = map->base_tsn; | 273 | __u32 base_tsn = map->base_tsn; |
274 | __u16 pending_data; | 274 | __u16 pending_data; |
275 | u32 gap, i; | 275 | u32 gap; |
276 | 276 | ||
277 | pending_data = max_tsn - cum_tsn; | 277 | pending_data = max_tsn - cum_tsn; |
278 | gap = max_tsn - base_tsn; | 278 | gap = max_tsn - base_tsn; |
@@ -280,11 +280,7 @@ __u16 sctp_tsnmap_pending(struct sctp_tsnmap *map) | |||
280 | if (gap == 0 || gap >= map->len) | 280 | if (gap == 0 || gap >= map->len) |
281 | goto out; | 281 | goto out; |
282 | 282 | ||
283 | for (i = 0; i < gap+1; i++) { | 283 | pending_data -= bitmap_weight(map->tsn_map, gap + 1); |
284 | if (test_bit(i, map->tsn_map)) | ||
285 | pending_data--; | ||
286 | } | ||
287 | |||
288 | out: | 284 | out: |
289 | return pending_data; | 285 | return pending_data; |
290 | } | 286 | } |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 360d8697b95c..ada17464b65b 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -997,7 +997,6 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) | |||
997 | 997 | ||
998 | /* Partial deliver the first message as there is pressure on rwnd. */ | 998 | /* Partial deliver the first message as there is pressure on rwnd. */ |
999 | void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, | 999 | void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, |
1000 | struct sctp_chunk *chunk, | ||
1001 | gfp_t gfp) | 1000 | gfp_t gfp) |
1002 | { | 1001 | { |
1003 | struct sctp_ulpevent *event; | 1002 | struct sctp_ulpevent *event; |
@@ -1060,7 +1059,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
1060 | sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); | 1059 | sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); |
1061 | sctp_ulpq_tail_data(ulpq, chunk, gfp); | 1060 | sctp_ulpq_tail_data(ulpq, chunk, gfp); |
1062 | 1061 | ||
1063 | sctp_ulpq_partial_delivery(ulpq, chunk, gfp); | 1062 | sctp_ulpq_partial_delivery(ulpq, gfp); |
1064 | } | 1063 | } |
1065 | 1064 | ||
1066 | sk_mem_reclaim(asoc->base.sk); | 1065 | sk_mem_reclaim(asoc->base.sk); |
diff --git a/net/socket.c b/net/socket.c index d92c490e66fa..2ca51c719ef9 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -620,8 +620,6 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, | |||
620 | { | 620 | { |
621 | struct sock_iocb *si = kiocb_to_siocb(iocb); | 621 | struct sock_iocb *si = kiocb_to_siocb(iocb); |
622 | 622 | ||
623 | sock_update_classid(sock->sk); | ||
624 | |||
625 | si->sock = sock; | 623 | si->sock = sock; |
626 | si->scm = NULL; | 624 | si->scm = NULL; |
627 | si->msg = msg; | 625 | si->msg = msg; |
@@ -784,8 +782,6 @@ static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, | |||
784 | { | 782 | { |
785 | struct sock_iocb *si = kiocb_to_siocb(iocb); | 783 | struct sock_iocb *si = kiocb_to_siocb(iocb); |
786 | 784 | ||
787 | sock_update_classid(sock->sk); | ||
788 | |||
789 | si->sock = sock; | 785 | si->sock = sock; |
790 | si->scm = NULL; | 786 | si->scm = NULL; |
791 | si->msg = msg; | 787 | si->msg = msg; |
@@ -896,8 +892,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | |||
896 | if (unlikely(!sock->ops->splice_read)) | 892 | if (unlikely(!sock->ops->splice_read)) |
897 | return -EINVAL; | 893 | return -EINVAL; |
898 | 894 | ||
899 | sock_update_classid(sock->sk); | ||
900 | |||
901 | return sock->ops->splice_read(sock, ppos, pipe, len, flags); | 895 | return sock->ops->splice_read(sock, ppos, pipe, len, flags); |
902 | } | 896 | } |
903 | 897 | ||
@@ -3437,8 +3431,6 @@ EXPORT_SYMBOL(kernel_setsockopt); | |||
3437 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, | 3431 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, |
3438 | size_t size, int flags) | 3432 | size_t size, int flags) |
3439 | { | 3433 | { |
3440 | sock_update_classid(sock->sk); | ||
3441 | |||
3442 | if (sock->ops->sendpage) | 3434 | if (sock->ops->sendpage) |
3443 | return sock->ops->sendpage(sock, page, offset, size, flags); | 3435 | return sock->ops->sendpage(sock, page, offset, size, flags); |
3444 | 3436 | ||
diff --git a/net/sysctl_net.c b/net/sysctl_net.c index e3a6e37cd1c5..9bc6db04be3e 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c | |||
@@ -38,15 +38,24 @@ static int is_seen(struct ctl_table_set *set) | |||
38 | } | 38 | } |
39 | 39 | ||
40 | /* Return standard mode bits for table entry. */ | 40 | /* Return standard mode bits for table entry. */ |
41 | static int net_ctl_permissions(struct ctl_table_root *root, | 41 | static int net_ctl_permissions(struct ctl_table_header *head, |
42 | struct nsproxy *nsproxy, | ||
43 | struct ctl_table *table) | 42 | struct ctl_table *table) |
44 | { | 43 | { |
44 | struct net *net = container_of(head->set, struct net, sysctls); | ||
45 | kuid_t root_uid = make_kuid(net->user_ns, 0); | ||
46 | kgid_t root_gid = make_kgid(net->user_ns, 0); | ||
47 | |||
45 | /* Allow network administrator to have same access as root. */ | 48 | /* Allow network administrator to have same access as root. */ |
46 | if (capable(CAP_NET_ADMIN)) { | 49 | if (ns_capable(net->user_ns, CAP_NET_ADMIN) || |
50 | uid_eq(root_uid, current_uid())) { | ||
47 | int mode = (table->mode >> 6) & 7; | 51 | int mode = (table->mode >> 6) & 7; |
48 | return (mode << 6) | (mode << 3) | mode; | 52 | return (mode << 6) | (mode << 3) | mode; |
49 | } | 53 | } |
54 | /* Allow netns root group to have the same access as the root group */ | ||
55 | if (gid_eq(root_gid, current_gid())) { | ||
56 | int mode = (table->mode >> 3) & 7; | ||
57 | return (mode << 3) | mode; | ||
58 | } | ||
50 | return table->mode; | 59 | return table->mode; |
51 | } | 60 | } |
52 | 61 | ||
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig index 585460180ffb..bc41bd31eadc 100644 --- a/net/tipc/Kconfig +++ b/net/tipc/Kconfig | |||
@@ -20,18 +20,9 @@ menuconfig TIPC | |||
20 | 20 | ||
21 | If in doubt, say N. | 21 | If in doubt, say N. |
22 | 22 | ||
23 | if TIPC | ||
24 | |||
25 | config TIPC_ADVANCED | ||
26 | bool "Advanced TIPC configuration" | ||
27 | default n | ||
28 | help | ||
29 | Saying Y here will open some advanced configuration for TIPC. | ||
30 | Most users do not need to bother; if unsure, just say N. | ||
31 | |||
32 | config TIPC_PORTS | 23 | config TIPC_PORTS |
33 | int "Maximum number of ports in a node" | 24 | int "Maximum number of ports in a node" |
34 | depends on TIPC_ADVANCED | 25 | depends on TIPC |
35 | range 127 65535 | 26 | range 127 65535 |
36 | default "8191" | 27 | default "8191" |
37 | help | 28 | help |
@@ -40,5 +31,3 @@ config TIPC_PORTS | |||
40 | 31 | ||
41 | Setting this to a smaller value saves some memory, | 32 | Setting this to a smaller value saves some memory, |
42 | setting it to higher allows for more ports. | 33 | setting it to higher allows for more ports. |
43 | |||
44 | endif # TIPC | ||
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index e4e6d8cd47e6..54f89f90ac33 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -347,7 +347,7 @@ static void bclink_peek_nack(struct tipc_msg *msg) | |||
347 | 347 | ||
348 | tipc_node_lock(n_ptr); | 348 | tipc_node_lock(n_ptr); |
349 | 349 | ||
350 | if (n_ptr->bclink.supported && | 350 | if (n_ptr->bclink.recv_permitted && |
351 | (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && | 351 | (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && |
352 | (n_ptr->bclink.last_in == msg_bcgap_after(msg))) | 352 | (n_ptr->bclink.last_in == msg_bcgap_after(msg))) |
353 | n_ptr->bclink.oos_state = 2; | 353 | n_ptr->bclink.oos_state = 2; |
@@ -429,7 +429,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) | |||
429 | goto exit; | 429 | goto exit; |
430 | 430 | ||
431 | tipc_node_lock(node); | 431 | tipc_node_lock(node); |
432 | if (unlikely(!node->bclink.supported)) | 432 | if (unlikely(!node->bclink.recv_permitted)) |
433 | goto unlock; | 433 | goto unlock; |
434 | 434 | ||
435 | /* Handle broadcast protocol message */ | 435 | /* Handle broadcast protocol message */ |
@@ -564,7 +564,7 @@ exit: | |||
564 | 564 | ||
565 | u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) | 565 | u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) |
566 | { | 566 | { |
567 | return (n_ptr->bclink.supported && | 567 | return (n_ptr->bclink.recv_permitted && |
568 | (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); | 568 | (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); |
569 | } | 569 | } |
570 | 570 | ||
@@ -619,16 +619,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, | |||
619 | if (bcbearer->remains_new.count == bcbearer->remains.count) | 619 | if (bcbearer->remains_new.count == bcbearer->remains.count) |
620 | continue; /* bearer pair doesn't add anything */ | 620 | continue; /* bearer pair doesn't add anything */ |
621 | 621 | ||
622 | if (p->blocked || | 622 | if (!tipc_bearer_blocked(p)) |
623 | p->media->send_msg(buf, p, &p->media->bcast_addr)) { | 623 | tipc_bearer_send(p, buf, &p->media->bcast_addr); |
624 | else if (s && !tipc_bearer_blocked(s)) | ||
624 | /* unable to send on primary bearer */ | 625 | /* unable to send on primary bearer */ |
625 | if (!s || s->blocked || | 626 | tipc_bearer_send(s, buf, &s->media->bcast_addr); |
626 | s->media->send_msg(buf, s, | 627 | else |
627 | &s->media->bcast_addr)) { | 628 | /* unable to send on either bearer */ |
628 | /* unable to send on either bearer */ | 629 | continue; |
629 | continue; | ||
630 | } | ||
631 | } | ||
632 | 630 | ||
633 | if (s) { | 631 | if (s) { |
634 | bcbearer->bpairs[bp_index].primary = s; | 632 | bcbearer->bpairs[bp_index].primary = s; |
@@ -731,8 +729,8 @@ int tipc_bclink_stats(char *buf, const u32 buf_size) | |||
731 | " TX naks:%u acks:%u dups:%u\n", | 729 | " TX naks:%u acks:%u dups:%u\n", |
732 | s->sent_nacks, s->sent_acks, s->retransmitted); | 730 | s->sent_nacks, s->sent_acks, s->retransmitted); |
733 | ret += tipc_snprintf(buf + ret, buf_size - ret, | 731 | ret += tipc_snprintf(buf + ret, buf_size - ret, |
734 | " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", | 732 | " Congestion link:%u Send queue max:%u avg:%u\n", |
735 | s->bearer_congs, s->link_congs, s->max_queue_sz, | 733 | s->link_congs, s->max_queue_sz, |
736 | s->queue_sz_counts ? | 734 | s->queue_sz_counts ? |
737 | (s->accu_queue_sz / s->queue_sz_counts) : 0); | 735 | (s->accu_queue_sz / s->queue_sz_counts) : 0); |
738 | 736 | ||
@@ -766,7 +764,6 @@ int tipc_bclink_set_queue_limits(u32 limit) | |||
766 | 764 | ||
767 | void tipc_bclink_init(void) | 765 | void tipc_bclink_init(void) |
768 | { | 766 | { |
769 | INIT_LIST_HEAD(&bcbearer->bearer.cong_links); | ||
770 | bcbearer->bearer.media = &bcbearer->media; | 767 | bcbearer->bearer.media = &bcbearer->media; |
771 | bcbearer->media.send_msg = tipc_bcbearer_send; | 768 | bcbearer->media.send_msg = tipc_bcbearer_send; |
772 | sprintf(bcbearer->media.name, "tipc-broadcast"); | 769 | sprintf(bcbearer->media.name, "tipc-broadcast"); |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 4ec5c80e8a7c..aa62f93a9127 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -279,116 +279,31 @@ void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest) | |||
279 | } | 279 | } |
280 | 280 | ||
281 | /* | 281 | /* |
282 | * bearer_push(): Resolve bearer congestion. Force the waiting | 282 | * Interrupt enabling new requests after bearer blocking: |
283 | * links to push out their unsent packets, one packet per link | ||
284 | * per iteration, until all packets are gone or congestion reoccurs. | ||
285 | * 'tipc_net_lock' is read_locked when this function is called | ||
286 | * bearer.lock must be taken before calling | ||
287 | * Returns binary true(1) ore false(0) | ||
288 | */ | ||
289 | static int bearer_push(struct tipc_bearer *b_ptr) | ||
290 | { | ||
291 | u32 res = 0; | ||
292 | struct tipc_link *ln, *tln; | ||
293 | |||
294 | if (b_ptr->blocked) | ||
295 | return 0; | ||
296 | |||
297 | while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) { | ||
298 | list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) { | ||
299 | res = tipc_link_push_packet(ln); | ||
300 | if (res == PUSH_FAILED) | ||
301 | break; | ||
302 | if (res == PUSH_FINISHED) | ||
303 | list_move_tail(&ln->link_list, &b_ptr->links); | ||
304 | } | ||
305 | } | ||
306 | return list_empty(&b_ptr->cong_links); | ||
307 | } | ||
308 | |||
309 | void tipc_bearer_lock_push(struct tipc_bearer *b_ptr) | ||
310 | { | ||
311 | spin_lock_bh(&b_ptr->lock); | ||
312 | bearer_push(b_ptr); | ||
313 | spin_unlock_bh(&b_ptr->lock); | ||
314 | } | ||
315 | |||
316 | |||
317 | /* | ||
318 | * Interrupt enabling new requests after bearer congestion or blocking: | ||
319 | * See bearer_send(). | 283 | * See bearer_send(). |
320 | */ | 284 | */ |
321 | void tipc_continue(struct tipc_bearer *b_ptr) | 285 | void tipc_continue(struct tipc_bearer *b) |
322 | { | 286 | { |
323 | spin_lock_bh(&b_ptr->lock); | 287 | spin_lock_bh(&b->lock); |
324 | if (!list_empty(&b_ptr->cong_links)) | 288 | b->blocked = 0; |
325 | tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr); | 289 | spin_unlock_bh(&b->lock); |
326 | b_ptr->blocked = 0; | ||
327 | spin_unlock_bh(&b_ptr->lock); | ||
328 | } | 290 | } |
329 | 291 | ||
330 | /* | 292 | /* |
331 | * Schedule link for sending of messages after the bearer | 293 | * tipc_bearer_blocked - determines if bearer is currently blocked |
332 | * has been deblocked by 'continue()'. This method is called | ||
333 | * when somebody tries to send a message via this link while | ||
334 | * the bearer is congested. 'tipc_net_lock' is in read_lock here | ||
335 | * bearer.lock is busy | ||
336 | */ | 294 | */ |
337 | static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, | 295 | int tipc_bearer_blocked(struct tipc_bearer *b) |
338 | struct tipc_link *l_ptr) | ||
339 | { | 296 | { |
340 | list_move_tail(&l_ptr->link_list, &b_ptr->cong_links); | 297 | int res; |
341 | } | ||
342 | |||
343 | /* | ||
344 | * Schedule link for sending of messages after the bearer | ||
345 | * has been deblocked by 'continue()'. This method is called | ||
346 | * when somebody tries to send a message via this link while | ||
347 | * the bearer is congested. 'tipc_net_lock' is in read_lock here, | ||
348 | * bearer.lock is free | ||
349 | */ | ||
350 | void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) | ||
351 | { | ||
352 | spin_lock_bh(&b_ptr->lock); | ||
353 | tipc_bearer_schedule_unlocked(b_ptr, l_ptr); | ||
354 | spin_unlock_bh(&b_ptr->lock); | ||
355 | } | ||
356 | |||
357 | 298 | ||
358 | /* | 299 | spin_lock_bh(&b->lock); |
359 | * tipc_bearer_resolve_congestion(): Check if there is bearer congestion, | 300 | res = b->blocked; |
360 | * and if there is, try to resolve it before returning. | 301 | spin_unlock_bh(&b->lock); |
361 | * 'tipc_net_lock' is read_locked when this function is called | ||
362 | */ | ||
363 | int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, | ||
364 | struct tipc_link *l_ptr) | ||
365 | { | ||
366 | int res = 1; | ||
367 | 302 | ||
368 | if (list_empty(&b_ptr->cong_links)) | ||
369 | return 1; | ||
370 | spin_lock_bh(&b_ptr->lock); | ||
371 | if (!bearer_push(b_ptr)) { | ||
372 | tipc_bearer_schedule_unlocked(b_ptr, l_ptr); | ||
373 | res = 0; | ||
374 | } | ||
375 | spin_unlock_bh(&b_ptr->lock); | ||
376 | return res; | 303 | return res; |
377 | } | 304 | } |
378 | 305 | ||
379 | /** | 306 | /** |
380 | * tipc_bearer_congested - determines if bearer is currently congested | ||
381 | */ | ||
382 | int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) | ||
383 | { | ||
384 | if (unlikely(b_ptr->blocked)) | ||
385 | return 1; | ||
386 | if (likely(list_empty(&b_ptr->cong_links))) | ||
387 | return 0; | ||
388 | return !tipc_bearer_resolve_congestion(b_ptr, l_ptr); | ||
389 | } | ||
390 | |||
391 | /** | ||
392 | * tipc_enable_bearer - enable bearer with the given name | 307 | * tipc_enable_bearer - enable bearer with the given name |
393 | */ | 308 | */ |
394 | int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) | 309 | int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) |
@@ -489,7 +404,6 @@ restart: | |||
489 | b_ptr->net_plane = bearer_id + 'A'; | 404 | b_ptr->net_plane = bearer_id + 'A'; |
490 | b_ptr->active = 1; | 405 | b_ptr->active = 1; |
491 | b_ptr->priority = priority; | 406 | b_ptr->priority = priority; |
492 | INIT_LIST_HEAD(&b_ptr->cong_links); | ||
493 | INIT_LIST_HEAD(&b_ptr->links); | 407 | INIT_LIST_HEAD(&b_ptr->links); |
494 | spin_lock_init(&b_ptr->lock); | 408 | spin_lock_init(&b_ptr->lock); |
495 | 409 | ||
@@ -528,7 +442,6 @@ int tipc_block_bearer(const char *name) | |||
528 | pr_info("Blocking bearer <%s>\n", name); | 442 | pr_info("Blocking bearer <%s>\n", name); |
529 | spin_lock_bh(&b_ptr->lock); | 443 | spin_lock_bh(&b_ptr->lock); |
530 | b_ptr->blocked = 1; | 444 | b_ptr->blocked = 1; |
531 | list_splice_init(&b_ptr->cong_links, &b_ptr->links); | ||
532 | list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { | 445 | list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { |
533 | struct tipc_node *n_ptr = l_ptr->owner; | 446 | struct tipc_node *n_ptr = l_ptr->owner; |
534 | 447 | ||
@@ -555,7 +468,6 @@ static void bearer_disable(struct tipc_bearer *b_ptr) | |||
555 | spin_lock_bh(&b_ptr->lock); | 468 | spin_lock_bh(&b_ptr->lock); |
556 | b_ptr->blocked = 1; | 469 | b_ptr->blocked = 1; |
557 | b_ptr->media->disable_bearer(b_ptr); | 470 | b_ptr->media->disable_bearer(b_ptr); |
558 | list_splice_init(&b_ptr->cong_links, &b_ptr->links); | ||
559 | list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { | 471 | list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { |
560 | tipc_link_delete(l_ptr); | 472 | tipc_link_delete(l_ptr); |
561 | } | 473 | } |
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index dd4c2abf08e7..39f1192d04bf 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h | |||
@@ -120,7 +120,6 @@ struct tipc_media { | |||
120 | * @identity: array index of this bearer within TIPC bearer array | 120 | * @identity: array index of this bearer within TIPC bearer array |
121 | * @link_req: ptr to (optional) structure making periodic link setup requests | 121 | * @link_req: ptr to (optional) structure making periodic link setup requests |
122 | * @links: list of non-congested links associated with bearer | 122 | * @links: list of non-congested links associated with bearer |
123 | * @cong_links: list of congested links associated with bearer | ||
124 | * @active: non-zero if bearer structure is represents a bearer | 123 | * @active: non-zero if bearer structure is represents a bearer |
125 | * @net_plane: network plane ('A' through 'H') currently associated with bearer | 124 | * @net_plane: network plane ('A' through 'H') currently associated with bearer |
126 | * @nodes: indicates which nodes in cluster can be reached through bearer | 125 | * @nodes: indicates which nodes in cluster can be reached through bearer |
@@ -143,7 +142,6 @@ struct tipc_bearer { | |||
143 | u32 identity; | 142 | u32 identity; |
144 | struct tipc_link_req *link_req; | 143 | struct tipc_link_req *link_req; |
145 | struct list_head links; | 144 | struct list_head links; |
146 | struct list_head cong_links; | ||
147 | int active; | 145 | int active; |
148 | char net_plane; | 146 | char net_plane; |
149 | struct tipc_node_map nodes; | 147 | struct tipc_node_map nodes; |
@@ -185,39 +183,23 @@ struct sk_buff *tipc_media_get_names(void); | |||
185 | struct sk_buff *tipc_bearer_get_names(void); | 183 | struct sk_buff *tipc_bearer_get_names(void); |
186 | void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest); | 184 | void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest); |
187 | void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest); | 185 | void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest); |
188 | void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr); | ||
189 | struct tipc_bearer *tipc_bearer_find(const char *name); | 186 | struct tipc_bearer *tipc_bearer_find(const char *name); |
190 | struct tipc_bearer *tipc_bearer_find_interface(const char *if_name); | 187 | struct tipc_bearer *tipc_bearer_find_interface(const char *if_name); |
191 | struct tipc_media *tipc_media_find(const char *name); | 188 | struct tipc_media *tipc_media_find(const char *name); |
192 | int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, | 189 | int tipc_bearer_blocked(struct tipc_bearer *b_ptr); |
193 | struct tipc_link *l_ptr); | ||
194 | int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr); | ||
195 | void tipc_bearer_stop(void); | 190 | void tipc_bearer_stop(void); |
196 | void tipc_bearer_lock_push(struct tipc_bearer *b_ptr); | ||
197 | |||
198 | 191 | ||
199 | /** | 192 | /** |
200 | * tipc_bearer_send- sends buffer to destination over bearer | 193 | * tipc_bearer_send- sends buffer to destination over bearer |
201 | * | 194 | * |
202 | * Returns true (1) if successful, or false (0) if unable to send | ||
203 | * | ||
204 | * IMPORTANT: | 195 | * IMPORTANT: |
205 | * The media send routine must not alter the buffer being passed in | 196 | * The media send routine must not alter the buffer being passed in |
206 | * as it may be needed for later retransmission! | 197 | * as it may be needed for later retransmission! |
207 | * | ||
208 | * If the media send routine returns a non-zero value (indicating that | ||
209 | * it was unable to send the buffer), it must: | ||
210 | * 1) mark the bearer as blocked, | ||
211 | * 2) call tipc_continue() once the bearer is able to send again. | ||
212 | * Media types that are unable to meet these two critera must ensure their | ||
213 | * send routine always returns success -- even if the buffer was not sent -- | ||
214 | * and let TIPC's link code deal with the undelivered message. | ||
215 | */ | 198 | */ |
216 | static inline int tipc_bearer_send(struct tipc_bearer *b_ptr, | 199 | static inline void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf, |
217 | struct sk_buff *buf, | ||
218 | struct tipc_media_addr *dest) | 200 | struct tipc_media_addr *dest) |
219 | { | 201 | { |
220 | return !b_ptr->media->send_msg(buf, b_ptr, dest); | 202 | b->media->send_msg(buf, b, dest); |
221 | } | 203 | } |
222 | 204 | ||
223 | #endif /* _TIPC_BEARER_H */ | 205 | #endif /* _TIPC_BEARER_H */ |
diff --git a/net/tipc/core.c b/net/tipc/core.c index bfe8af88469a..fc05cecd7481 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -42,11 +42,6 @@ | |||
42 | 42 | ||
43 | #include <linux/module.h> | 43 | #include <linux/module.h> |
44 | 44 | ||
45 | #ifndef CONFIG_TIPC_PORTS | ||
46 | #define CONFIG_TIPC_PORTS 8191 | ||
47 | #endif | ||
48 | |||
49 | |||
50 | /* global variables used by multiple sub-systems within TIPC */ | 45 | /* global variables used by multiple sub-systems within TIPC */ |
51 | int tipc_random __read_mostly; | 46 | int tipc_random __read_mostly; |
52 | 47 | ||
diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 50eaa403eb6e..1074b9587e81 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c | |||
@@ -243,7 +243,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) | |||
243 | if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) { | 243 | if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) { |
244 | rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr); | 244 | rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr); |
245 | if (rbuf) { | 245 | if (rbuf) { |
246 | b_ptr->media->send_msg(rbuf, b_ptr, &media_addr); | 246 | tipc_bearer_send(b_ptr, rbuf, &media_addr); |
247 | kfree_skb(rbuf); | 247 | kfree_skb(rbuf); |
248 | } | 248 | } |
249 | } | 249 | } |
diff --git a/net/tipc/link.c b/net/tipc/link.c index a79c755cb417..87bf5aad704b 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/link.c: TIPC link code | 2 | * net/tipc/link.c: TIPC link code |
3 | * | 3 | * |
4 | * Copyright (c) 1996-2007, Ericsson AB | 4 | * Copyright (c) 1996-2007, 2012, Ericsson AB |
5 | * Copyright (c) 2004-2007, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2004-2007, 2010-2011, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -103,6 +103,8 @@ static void link_reset_statistics(struct tipc_link *l_ptr); | |||
103 | static void link_print(struct tipc_link *l_ptr, const char *str); | 103 | static void link_print(struct tipc_link *l_ptr, const char *str); |
104 | static void link_start(struct tipc_link *l_ptr); | 104 | static void link_start(struct tipc_link *l_ptr); |
105 | static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); | 105 | static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); |
106 | static void tipc_link_send_sync(struct tipc_link *l); | ||
107 | static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf); | ||
106 | 108 | ||
107 | /* | 109 | /* |
108 | * Simple link routines | 110 | * Simple link routines |
@@ -712,6 +714,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
712 | link_activate(l_ptr); | 714 | link_activate(l_ptr); |
713 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 715 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); |
714 | l_ptr->fsm_msg_cnt++; | 716 | l_ptr->fsm_msg_cnt++; |
717 | if (l_ptr->owner->working_links == 1) | ||
718 | tipc_link_send_sync(l_ptr); | ||
715 | link_set_timer(l_ptr, cont_intv); | 719 | link_set_timer(l_ptr, cont_intv); |
716 | break; | 720 | break; |
717 | case RESET_MSG: | 721 | case RESET_MSG: |
@@ -745,6 +749,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
745 | link_activate(l_ptr); | 749 | link_activate(l_ptr); |
746 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 750 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); |
747 | l_ptr->fsm_msg_cnt++; | 751 | l_ptr->fsm_msg_cnt++; |
752 | if (l_ptr->owner->working_links == 1) | ||
753 | tipc_link_send_sync(l_ptr); | ||
748 | link_set_timer(l_ptr, cont_intv); | 754 | link_set_timer(l_ptr, cont_intv); |
749 | break; | 755 | break; |
750 | case RESET_MSG: | 756 | case RESET_MSG: |
@@ -872,17 +878,12 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) | |||
872 | return link_send_long_buf(l_ptr, buf); | 878 | return link_send_long_buf(l_ptr, buf); |
873 | 879 | ||
874 | /* Packet can be queued or sent. */ | 880 | /* Packet can be queued or sent. */ |
875 | if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && | 881 | if (likely(!tipc_bearer_blocked(l_ptr->b_ptr) && |
876 | !link_congested(l_ptr))) { | 882 | !link_congested(l_ptr))) { |
877 | link_add_to_outqueue(l_ptr, buf, msg); | 883 | link_add_to_outqueue(l_ptr, buf, msg); |
878 | 884 | ||
879 | if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) { | 885 | tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); |
880 | l_ptr->unacked_window = 0; | 886 | l_ptr->unacked_window = 0; |
881 | } else { | ||
882 | tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); | ||
883 | l_ptr->stats.bearer_congs++; | ||
884 | l_ptr->next_out = buf; | ||
885 | } | ||
886 | return dsz; | 887 | return dsz; |
887 | } | 888 | } |
888 | /* Congestion: can message be bundled ? */ | 889 | /* Congestion: can message be bundled ? */ |
@@ -891,10 +892,8 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) | |||
891 | 892 | ||
892 | /* Try adding message to an existing bundle */ | 893 | /* Try adding message to an existing bundle */ |
893 | if (l_ptr->next_out && | 894 | if (l_ptr->next_out && |
894 | link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { | 895 | link_bundle_buf(l_ptr, l_ptr->last_out, buf)) |
895 | tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); | ||
896 | return dsz; | 896 | return dsz; |
897 | } | ||
898 | 897 | ||
899 | /* Try creating a new bundle */ | 898 | /* Try creating a new bundle */ |
900 | if (size <= max_packet * 2 / 3) { | 899 | if (size <= max_packet * 2 / 3) { |
@@ -917,7 +916,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) | |||
917 | if (!l_ptr->next_out) | 916 | if (!l_ptr->next_out) |
918 | l_ptr->next_out = buf; | 917 | l_ptr->next_out = buf; |
919 | link_add_to_outqueue(l_ptr, buf, msg); | 918 | link_add_to_outqueue(l_ptr, buf, msg); |
920 | tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); | ||
921 | return dsz; | 919 | return dsz; |
922 | } | 920 | } |
923 | 921 | ||
@@ -949,7 +947,48 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) | |||
949 | return res; | 947 | return res; |
950 | } | 948 | } |
951 | 949 | ||
952 | /** | 950 | /* |
951 | * tipc_link_send_sync - synchronize broadcast link endpoints. | ||
952 | * | ||
953 | * Give a newly added peer node the sequence number where it should | ||
954 | * start receiving and acking broadcast packets. | ||
955 | * | ||
956 | * Called with node locked | ||
957 | */ | ||
958 | static void tipc_link_send_sync(struct tipc_link *l) | ||
959 | { | ||
960 | struct sk_buff *buf; | ||
961 | struct tipc_msg *msg; | ||
962 | |||
963 | buf = tipc_buf_acquire(INT_H_SIZE); | ||
964 | if (!buf) | ||
965 | return; | ||
966 | |||
967 | msg = buf_msg(buf); | ||
968 | tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr); | ||
969 | msg_set_last_bcast(msg, l->owner->bclink.acked); | ||
970 | link_add_chain_to_outqueue(l, buf, 0); | ||
971 | tipc_link_push_queue(l); | ||
972 | } | ||
973 | |||
974 | /* | ||
975 | * tipc_link_recv_sync - synchronize broadcast link endpoints. | ||
976 | * Receive the sequence number where we should start receiving and | ||
977 | * acking broadcast packets from a newly added peer node, and open | ||
978 | * up for reception of such packets. | ||
979 | * | ||
980 | * Called with node locked | ||
981 | */ | ||
982 | static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf) | ||
983 | { | ||
984 | struct tipc_msg *msg = buf_msg(buf); | ||
985 | |||
986 | n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); | ||
987 | n->bclink.recv_permitted = true; | ||
988 | kfree_skb(buf); | ||
989 | } | ||
990 | |||
991 | /* | ||
953 | * tipc_link_send_names - send name table entries to new neighbor | 992 | * tipc_link_send_names - send name table entries to new neighbor |
954 | * | 993 | * |
955 | * Send routine for bulk delivery of name table messages when contact | 994 | * Send routine for bulk delivery of name table messages when contact |
@@ -1006,16 +1045,11 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, | |||
1006 | 1045 | ||
1007 | if (likely(!link_congested(l_ptr))) { | 1046 | if (likely(!link_congested(l_ptr))) { |
1008 | if (likely(msg_size(msg) <= l_ptr->max_pkt)) { | 1047 | if (likely(msg_size(msg) <= l_ptr->max_pkt)) { |
1009 | if (likely(list_empty(&l_ptr->b_ptr->cong_links))) { | 1048 | if (likely(!tipc_bearer_blocked(l_ptr->b_ptr))) { |
1010 | link_add_to_outqueue(l_ptr, buf, msg); | 1049 | link_add_to_outqueue(l_ptr, buf, msg); |
1011 | if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, | 1050 | tipc_bearer_send(l_ptr->b_ptr, buf, |
1012 | &l_ptr->media_addr))) { | 1051 | &l_ptr->media_addr); |
1013 | l_ptr->unacked_window = 0; | 1052 | l_ptr->unacked_window = 0; |
1014 | return res; | ||
1015 | } | ||
1016 | tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); | ||
1017 | l_ptr->stats.bearer_congs++; | ||
1018 | l_ptr->next_out = buf; | ||
1019 | return res; | 1053 | return res; |
1020 | } | 1054 | } |
1021 | } else | 1055 | } else |
@@ -1106,7 +1140,7 @@ exit: | |||
1106 | 1140 | ||
1107 | /* Exit if link (or bearer) is congested */ | 1141 | /* Exit if link (or bearer) is congested */ |
1108 | if (link_congested(l_ptr) || | 1142 | if (link_congested(l_ptr) || |
1109 | !list_empty(&l_ptr->b_ptr->cong_links)) { | 1143 | tipc_bearer_blocked(l_ptr->b_ptr)) { |
1110 | res = link_schedule_port(l_ptr, | 1144 | res = link_schedule_port(l_ptr, |
1111 | sender->ref, res); | 1145 | sender->ref, res); |
1112 | goto exit; | 1146 | goto exit; |
@@ -1329,15 +1363,11 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) | |||
1329 | if (r_q_size && buf) { | 1363 | if (r_q_size && buf) { |
1330 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); | 1364 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); |
1331 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); | 1365 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); |
1332 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { | 1366 | tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); |
1333 | l_ptr->retransm_queue_head = mod(++r_q_head); | 1367 | l_ptr->retransm_queue_head = mod(++r_q_head); |
1334 | l_ptr->retransm_queue_size = --r_q_size; | 1368 | l_ptr->retransm_queue_size = --r_q_size; |
1335 | l_ptr->stats.retransmitted++; | 1369 | l_ptr->stats.retransmitted++; |
1336 | return 0; | 1370 | return 0; |
1337 | } else { | ||
1338 | l_ptr->stats.bearer_congs++; | ||
1339 | return PUSH_FAILED; | ||
1340 | } | ||
1341 | } | 1371 | } |
1342 | 1372 | ||
1343 | /* Send deferred protocol message, if any: */ | 1373 | /* Send deferred protocol message, if any: */ |
@@ -1345,15 +1375,11 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) | |||
1345 | if (buf) { | 1375 | if (buf) { |
1346 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); | 1376 | msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); |
1347 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); | 1377 | msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); |
1348 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { | 1378 | tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); |
1349 | l_ptr->unacked_window = 0; | 1379 | l_ptr->unacked_window = 0; |
1350 | kfree_skb(buf); | 1380 | kfree_skb(buf); |
1351 | l_ptr->proto_msg_queue = NULL; | 1381 | l_ptr->proto_msg_queue = NULL; |
1352 | return 0; | 1382 | return 0; |
1353 | } else { | ||
1354 | l_ptr->stats.bearer_congs++; | ||
1355 | return PUSH_FAILED; | ||
1356 | } | ||
1357 | } | 1383 | } |
1358 | 1384 | ||
1359 | /* Send one deferred data message, if send window not full: */ | 1385 | /* Send one deferred data message, if send window not full: */ |
@@ -1366,18 +1392,14 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr) | |||
1366 | if (mod(next - first) < l_ptr->queue_limit[0]) { | 1392 | if (mod(next - first) < l_ptr->queue_limit[0]) { |
1367 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1393 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
1368 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1394 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); |
1369 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { | 1395 | tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); |
1370 | if (msg_user(msg) == MSG_BUNDLER) | 1396 | if (msg_user(msg) == MSG_BUNDLER) |
1371 | msg_set_type(msg, CLOSED_MSG); | 1397 | msg_set_type(msg, CLOSED_MSG); |
1372 | l_ptr->next_out = buf->next; | 1398 | l_ptr->next_out = buf->next; |
1373 | return 0; | 1399 | return 0; |
1374 | } else { | ||
1375 | l_ptr->stats.bearer_congs++; | ||
1376 | return PUSH_FAILED; | ||
1377 | } | ||
1378 | } | 1400 | } |
1379 | } | 1401 | } |
1380 | return PUSH_FINISHED; | 1402 | return 1; |
1381 | } | 1403 | } |
1382 | 1404 | ||
1383 | /* | 1405 | /* |
@@ -1388,15 +1410,12 @@ void tipc_link_push_queue(struct tipc_link *l_ptr) | |||
1388 | { | 1410 | { |
1389 | u32 res; | 1411 | u32 res; |
1390 | 1412 | ||
1391 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) | 1413 | if (tipc_bearer_blocked(l_ptr->b_ptr)) |
1392 | return; | 1414 | return; |
1393 | 1415 | ||
1394 | do { | 1416 | do { |
1395 | res = tipc_link_push_packet(l_ptr); | 1417 | res = tipc_link_push_packet(l_ptr); |
1396 | } while (!res); | 1418 | } while (!res); |
1397 | |||
1398 | if (res == PUSH_FAILED) | ||
1399 | tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); | ||
1400 | } | 1419 | } |
1401 | 1420 | ||
1402 | static void link_reset_all(unsigned long addr) | 1421 | static void link_reset_all(unsigned long addr) |
@@ -1454,9 +1473,8 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, | |||
1454 | 1473 | ||
1455 | tipc_addr_string_fill(addr_string, n_ptr->addr); | 1474 | tipc_addr_string_fill(addr_string, n_ptr->addr); |
1456 | pr_info("Broadcast link info for %s\n", addr_string); | 1475 | pr_info("Broadcast link info for %s\n", addr_string); |
1457 | pr_info("Supportable: %d, Supported: %d, Acked: %u\n", | 1476 | pr_info("Reception permitted: %d, Acked: %u\n", |
1458 | n_ptr->bclink.supportable, | 1477 | n_ptr->bclink.recv_permitted, |
1459 | n_ptr->bclink.supported, | ||
1460 | n_ptr->bclink.acked); | 1478 | n_ptr->bclink.acked); |
1461 | pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", | 1479 | pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", |
1462 | n_ptr->bclink.last_in, | 1480 | n_ptr->bclink.last_in, |
@@ -1481,7 +1499,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, | |||
1481 | 1499 | ||
1482 | msg = buf_msg(buf); | 1500 | msg = buf_msg(buf); |
1483 | 1501 | ||
1484 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { | 1502 | if (tipc_bearer_blocked(l_ptr->b_ptr)) { |
1485 | if (l_ptr->retransm_queue_size == 0) { | 1503 | if (l_ptr->retransm_queue_size == 0) { |
1486 | l_ptr->retransm_queue_head = msg_seqno(msg); | 1504 | l_ptr->retransm_queue_head = msg_seqno(msg); |
1487 | l_ptr->retransm_queue_size = retransmits; | 1505 | l_ptr->retransm_queue_size = retransmits; |
@@ -1491,7 +1509,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, | |||
1491 | } | 1509 | } |
1492 | return; | 1510 | return; |
1493 | } else { | 1511 | } else { |
1494 | /* Detect repeated retransmit failures on uncongested bearer */ | 1512 | /* Detect repeated retransmit failures on unblocked bearer */ |
1495 | if (l_ptr->last_retransmitted == msg_seqno(msg)) { | 1513 | if (l_ptr->last_retransmitted == msg_seqno(msg)) { |
1496 | if (++l_ptr->stale_count > 100) { | 1514 | if (++l_ptr->stale_count > 100) { |
1497 | link_retransmit_failure(l_ptr, buf); | 1515 | link_retransmit_failure(l_ptr, buf); |
@@ -1507,17 +1525,10 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, | |||
1507 | msg = buf_msg(buf); | 1525 | msg = buf_msg(buf); |
1508 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1526 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
1509 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1527 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); |
1510 | if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { | 1528 | tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); |
1511 | buf = buf->next; | 1529 | buf = buf->next; |
1512 | retransmits--; | 1530 | retransmits--; |
1513 | l_ptr->stats.retransmitted++; | 1531 | l_ptr->stats.retransmitted++; |
1514 | } else { | ||
1515 | tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); | ||
1516 | l_ptr->stats.bearer_congs++; | ||
1517 | l_ptr->retransm_queue_head = buf_seqno(buf); | ||
1518 | l_ptr->retransm_queue_size = retransmits; | ||
1519 | return; | ||
1520 | } | ||
1521 | } | 1532 | } |
1522 | 1533 | ||
1523 | l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; | 1534 | l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; |
@@ -1676,7 +1687,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) | |||
1676 | ackd = msg_ack(msg); | 1687 | ackd = msg_ack(msg); |
1677 | 1688 | ||
1678 | /* Release acked messages */ | 1689 | /* Release acked messages */ |
1679 | if (n_ptr->bclink.supported) | 1690 | if (n_ptr->bclink.recv_permitted) |
1680 | tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); | 1691 | tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); |
1681 | 1692 | ||
1682 | crs = l_ptr->first_out; | 1693 | crs = l_ptr->first_out; |
@@ -1727,9 +1738,14 @@ deliver: | |||
1727 | tipc_link_recv_bundle(buf); | 1738 | tipc_link_recv_bundle(buf); |
1728 | continue; | 1739 | continue; |
1729 | case NAME_DISTRIBUTOR: | 1740 | case NAME_DISTRIBUTOR: |
1741 | n_ptr->bclink.recv_permitted = true; | ||
1730 | tipc_node_unlock(n_ptr); | 1742 | tipc_node_unlock(n_ptr); |
1731 | tipc_named_recv(buf); | 1743 | tipc_named_recv(buf); |
1732 | continue; | 1744 | continue; |
1745 | case BCAST_PROTOCOL: | ||
1746 | tipc_link_recv_sync(n_ptr, buf); | ||
1747 | tipc_node_unlock(n_ptr); | ||
1748 | continue; | ||
1733 | case CONN_MANAGER: | 1749 | case CONN_MANAGER: |
1734 | tipc_node_unlock(n_ptr); | 1750 | tipc_node_unlock(n_ptr); |
1735 | tipc_port_recv_proto_msg(buf); | 1751 | tipc_port_recv_proto_msg(buf); |
@@ -1772,16 +1788,19 @@ deliver: | |||
1772 | continue; | 1788 | continue; |
1773 | } | 1789 | } |
1774 | 1790 | ||
1791 | /* Link is not in state WORKING_WORKING */ | ||
1775 | if (msg_user(msg) == LINK_PROTOCOL) { | 1792 | if (msg_user(msg) == LINK_PROTOCOL) { |
1776 | link_recv_proto_msg(l_ptr, buf); | 1793 | link_recv_proto_msg(l_ptr, buf); |
1777 | head = link_insert_deferred_queue(l_ptr, head); | 1794 | head = link_insert_deferred_queue(l_ptr, head); |
1778 | tipc_node_unlock(n_ptr); | 1795 | tipc_node_unlock(n_ptr); |
1779 | continue; | 1796 | continue; |
1780 | } | 1797 | } |
1798 | |||
1799 | /* Traffic message. Conditionally activate link */ | ||
1781 | link_state_event(l_ptr, TRAFFIC_MSG_EVT); | 1800 | link_state_event(l_ptr, TRAFFIC_MSG_EVT); |
1782 | 1801 | ||
1783 | if (link_working_working(l_ptr)) { | 1802 | if (link_working_working(l_ptr)) { |
1784 | /* Re-insert in front of queue */ | 1803 | /* Re-insert buffer in front of queue */ |
1785 | buf->next = head; | 1804 | buf->next = head; |
1786 | head = buf; | 1805 | head = buf; |
1787 | tipc_node_unlock(n_ptr); | 1806 | tipc_node_unlock(n_ptr); |
@@ -1972,21 +1991,13 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, | |||
1972 | 1991 | ||
1973 | skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); | 1992 | skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); |
1974 | 1993 | ||
1975 | /* Defer message if bearer is already congested */ | 1994 | /* Defer message if bearer is already blocked */ |
1976 | if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { | 1995 | if (tipc_bearer_blocked(l_ptr->b_ptr)) { |
1977 | l_ptr->proto_msg_queue = buf; | ||
1978 | return; | ||
1979 | } | ||
1980 | |||
1981 | /* Defer message if attempting to send results in bearer congestion */ | ||
1982 | if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { | ||
1983 | tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); | ||
1984 | l_ptr->proto_msg_queue = buf; | 1996 | l_ptr->proto_msg_queue = buf; |
1985 | l_ptr->stats.bearer_congs++; | ||
1986 | return; | 1997 | return; |
1987 | } | 1998 | } |
1988 | 1999 | ||
1989 | /* Discard message if it was sent successfully */ | 2000 | tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); |
1990 | l_ptr->unacked_window = 0; | 2001 | l_ptr->unacked_window = 0; |
1991 | kfree_skb(buf); | 2002 | kfree_skb(buf); |
1992 | } | 2003 | } |
@@ -2057,7 +2068,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) | |||
2057 | } else { | 2068 | } else { |
2058 | l_ptr->max_pkt = l_ptr->max_pkt_target; | 2069 | l_ptr->max_pkt = l_ptr->max_pkt_target; |
2059 | } | 2070 | } |
2060 | l_ptr->owner->bclink.supportable = (max_pkt_info != 0); | ||
2061 | 2071 | ||
2062 | /* Synchronize broadcast link info, if not done previously */ | 2072 | /* Synchronize broadcast link info, if not done previously */ |
2063 | if (!tipc_node_is_up(l_ptr->owner)) { | 2073 | if (!tipc_node_is_up(l_ptr->owner)) { |
@@ -2112,7 +2122,7 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) | |||
2112 | } | 2122 | } |
2113 | 2123 | ||
2114 | /* Protocol message before retransmits, reduce loss risk */ | 2124 | /* Protocol message before retransmits, reduce loss risk */ |
2115 | if (l_ptr->owner->bclink.supported) | 2125 | if (l_ptr->owner->bclink.recv_permitted) |
2116 | tipc_bclink_update_link_state(l_ptr->owner, | 2126 | tipc_bclink_update_link_state(l_ptr->owner, |
2117 | msg_last_bcast(msg)); | 2127 | msg_last_bcast(msg)); |
2118 | 2128 | ||
@@ -2937,8 +2947,8 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) | |||
2937 | s->sent_nacks, s->sent_acks, s->retransmitted); | 2947 | s->sent_nacks, s->sent_acks, s->retransmitted); |
2938 | 2948 | ||
2939 | ret += tipc_snprintf(buf + ret, buf_size - ret, | 2949 | ret += tipc_snprintf(buf + ret, buf_size - ret, |
2940 | " Congestion bearer:%u link:%u Send queue" | 2950 | " Congestion link:%u Send queue" |
2941 | " max:%u avg:%u\n", s->bearer_congs, s->link_congs, | 2951 | " max:%u avg:%u\n", s->link_congs, |
2942 | s->max_queue_sz, s->queue_sz_counts ? | 2952 | s->max_queue_sz, s->queue_sz_counts ? |
2943 | (s->accu_queue_sz / s->queue_sz_counts) : 0); | 2953 | (s->accu_queue_sz / s->queue_sz_counts) : 0); |
2944 | 2954 | ||
diff --git a/net/tipc/link.h b/net/tipc/link.h index 6e921121be06..c048ed1cbd76 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h | |||
@@ -40,9 +40,6 @@ | |||
40 | #include "msg.h" | 40 | #include "msg.h" |
41 | #include "node.h" | 41 | #include "node.h" |
42 | 42 | ||
43 | #define PUSH_FAILED 1 | ||
44 | #define PUSH_FINISHED 2 | ||
45 | |||
46 | /* | 43 | /* |
47 | * Out-of-range value for link sequence numbers | 44 | * Out-of-range value for link sequence numbers |
48 | */ | 45 | */ |
@@ -82,7 +79,6 @@ struct tipc_stats { | |||
82 | u32 recv_fragmented; | 79 | u32 recv_fragmented; |
83 | u32 recv_fragments; | 80 | u32 recv_fragments; |
84 | u32 link_congs; /* # port sends blocked by congestion */ | 81 | u32 link_congs; /* # port sends blocked by congestion */ |
85 | u32 bearer_congs; | ||
86 | u32 deferred_recv; | 82 | u32 deferred_recv; |
87 | u32 duplicates; | 83 | u32 duplicates; |
88 | u32 max_queue_sz; /* send queue size high water mark */ | 84 | u32 max_queue_sz; /* send queue size high water mark */ |
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 55d3928dfd67..e0d08055754e 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c | |||
@@ -262,7 +262,7 @@ void tipc_named_node_up(unsigned long nodearg) | |||
262 | named_distribute(&message_list, node, &publ_zone, max_item_buf); | 262 | named_distribute(&message_list, node, &publ_zone, max_item_buf); |
263 | read_unlock_bh(&tipc_nametbl_lock); | 263 | read_unlock_bh(&tipc_nametbl_lock); |
264 | 264 | ||
265 | tipc_link_send_names(&message_list, (u32)node); | 265 | tipc_link_send_names(&message_list, node); |
266 | } | 266 | } |
267 | 267 | ||
268 | /** | 268 | /** |
diff --git a/net/tipc/node.c b/net/tipc/node.c index d21db204e25a..48f39dd3eae8 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/node.c: TIPC node management routines | 2 | * net/tipc/node.c: TIPC node management routines |
3 | * | 3 | * |
4 | * Copyright (c) 2000-2006, Ericsson AB | 4 | * Copyright (c) 2000-2006, 2012 Ericsson AB |
5 | * Copyright (c) 2005-2006, 2010-2011, Wind River Systems | 5 | * Copyright (c) 2005-2006, 2010-2011, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -263,12 +263,9 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
263 | static void node_established_contact(struct tipc_node *n_ptr) | 263 | static void node_established_contact(struct tipc_node *n_ptr) |
264 | { | 264 | { |
265 | tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); | 265 | tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); |
266 | 266 | n_ptr->bclink.oos_state = 0; | |
267 | if (n_ptr->bclink.supportable) { | 267 | n_ptr->bclink.acked = tipc_bclink_get_last_sent(); |
268 | n_ptr->bclink.acked = tipc_bclink_get_last_sent(); | 268 | tipc_bclink_add_node(n_ptr->addr); |
269 | tipc_bclink_add_node(n_ptr->addr); | ||
270 | n_ptr->bclink.supported = 1; | ||
271 | } | ||
272 | } | 269 | } |
273 | 270 | ||
274 | static void node_name_purge_complete(unsigned long node_addr) | 271 | static void node_name_purge_complete(unsigned long node_addr) |
@@ -294,7 +291,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) | |||
294 | tipc_addr_string_fill(addr_string, n_ptr->addr)); | 291 | tipc_addr_string_fill(addr_string, n_ptr->addr)); |
295 | 292 | ||
296 | /* Flush broadcast link info associated with lost node */ | 293 | /* Flush broadcast link info associated with lost node */ |
297 | if (n_ptr->bclink.supported) { | 294 | if (n_ptr->bclink.recv_permitted) { |
298 | while (n_ptr->bclink.deferred_head) { | 295 | while (n_ptr->bclink.deferred_head) { |
299 | struct sk_buff *buf = n_ptr->bclink.deferred_head; | 296 | struct sk_buff *buf = n_ptr->bclink.deferred_head; |
300 | n_ptr->bclink.deferred_head = buf->next; | 297 | n_ptr->bclink.deferred_head = buf->next; |
@@ -310,7 +307,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) | |||
310 | tipc_bclink_remove_node(n_ptr->addr); | 307 | tipc_bclink_remove_node(n_ptr->addr); |
311 | tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); | 308 | tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); |
312 | 309 | ||
313 | n_ptr->bclink.supported = 0; | 310 | n_ptr->bclink.recv_permitted = false; |
314 | } | 311 | } |
315 | 312 | ||
316 | /* Abort link changeover */ | 313 | /* Abort link changeover */ |
diff --git a/net/tipc/node.h b/net/tipc/node.h index cfcaf4d6e480..3c189b35b102 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h | |||
@@ -67,8 +67,6 @@ | |||
67 | * @permit_changeover: non-zero if node has redundant links to this system | 67 | * @permit_changeover: non-zero if node has redundant links to this system |
68 | * @signature: node instance identifier | 68 | * @signature: node instance identifier |
69 | * @bclink: broadcast-related info | 69 | * @bclink: broadcast-related info |
70 | * @supportable: non-zero if node supports TIPC b'cast link capability | ||
71 | * @supported: non-zero if node supports TIPC b'cast capability | ||
72 | * @acked: sequence # of last outbound b'cast message acknowledged by node | 70 | * @acked: sequence # of last outbound b'cast message acknowledged by node |
73 | * @last_in: sequence # of last in-sequence b'cast message received from node | 71 | * @last_in: sequence # of last in-sequence b'cast message received from node |
74 | * @last_sent: sequence # of last b'cast message sent by node | 72 | * @last_sent: sequence # of last b'cast message sent by node |
@@ -77,6 +75,7 @@ | |||
77 | * @deferred_head: oldest OOS b'cast message received from node | 75 | * @deferred_head: oldest OOS b'cast message received from node |
78 | * @deferred_tail: newest OOS b'cast message received from node | 76 | * @deferred_tail: newest OOS b'cast message received from node |
79 | * @defragm: list of partially reassembled b'cast message fragments from node | 77 | * @defragm: list of partially reassembled b'cast message fragments from node |
78 | * @recv_permitted: true if node is allowed to receive b'cast messages | ||
80 | */ | 79 | */ |
81 | struct tipc_node { | 80 | struct tipc_node { |
82 | u32 addr; | 81 | u32 addr; |
@@ -92,8 +91,6 @@ struct tipc_node { | |||
92 | int permit_changeover; | 91 | int permit_changeover; |
93 | u32 signature; | 92 | u32 signature; |
94 | struct { | 93 | struct { |
95 | u8 supportable; | ||
96 | u8 supported; | ||
97 | u32 acked; | 94 | u32 acked; |
98 | u32 last_in; | 95 | u32 last_in; |
99 | u32 last_sent; | 96 | u32 last_sent; |
@@ -102,6 +99,7 @@ struct tipc_node { | |||
102 | struct sk_buff *deferred_head; | 99 | struct sk_buff *deferred_head; |
103 | struct sk_buff *deferred_tail; | 100 | struct sk_buff *deferred_tail; |
104 | struct sk_buff *defragm; | 101 | struct sk_buff *defragm; |
102 | bool recv_permitted; | ||
105 | } bclink; | 103 | } bclink; |
106 | }; | 104 | }; |
107 | 105 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index fd5f042dbff4..1a720c86e80a 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -62,6 +62,8 @@ struct tipc_sock { | |||
62 | static int backlog_rcv(struct sock *sk, struct sk_buff *skb); | 62 | static int backlog_rcv(struct sock *sk, struct sk_buff *skb); |
63 | static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf); | 63 | static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf); |
64 | static void wakeupdispatch(struct tipc_port *tport); | 64 | static void wakeupdispatch(struct tipc_port *tport); |
65 | static void tipc_data_ready(struct sock *sk, int len); | ||
66 | static void tipc_write_space(struct sock *sk); | ||
65 | 67 | ||
66 | static const struct proto_ops packet_ops; | 68 | static const struct proto_ops packet_ops; |
67 | static const struct proto_ops stream_ops; | 69 | static const struct proto_ops stream_ops; |
@@ -221,6 +223,8 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol, | |||
221 | sock_init_data(sock, sk); | 223 | sock_init_data(sock, sk); |
222 | sk->sk_backlog_rcv = backlog_rcv; | 224 | sk->sk_backlog_rcv = backlog_rcv; |
223 | sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2; | 225 | sk->sk_rcvbuf = TIPC_FLOW_CONTROL_WIN * 2 * TIPC_MAX_USER_MSG_SIZE * 2; |
226 | sk->sk_data_ready = tipc_data_ready; | ||
227 | sk->sk_write_space = tipc_write_space; | ||
224 | tipc_sk(sk)->p = tp_ptr; | 228 | tipc_sk(sk)->p = tp_ptr; |
225 | tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; | 229 | tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; |
226 | 230 | ||
@@ -408,7 +412,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, | |||
408 | * socket state flags set | 412 | * socket state flags set |
409 | * ------------ --------- | 413 | * ------------ --------- |
410 | * unconnected no read flags | 414 | * unconnected no read flags |
411 | * no write flags | 415 | * POLLOUT if port is not congested |
412 | * | 416 | * |
413 | * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue | 417 | * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue |
414 | * no write flags | 418 | * no write flags |
@@ -435,9 +439,13 @@ static unsigned int poll(struct file *file, struct socket *sock, | |||
435 | struct sock *sk = sock->sk; | 439 | struct sock *sk = sock->sk; |
436 | u32 mask = 0; | 440 | u32 mask = 0; |
437 | 441 | ||
438 | poll_wait(file, sk_sleep(sk), wait); | 442 | sock_poll_wait(file, sk_sleep(sk), wait); |
439 | 443 | ||
440 | switch ((int)sock->state) { | 444 | switch ((int)sock->state) { |
445 | case SS_UNCONNECTED: | ||
446 | if (!tipc_sk_port(sk)->congested) | ||
447 | mask |= POLLOUT; | ||
448 | break; | ||
441 | case SS_READY: | 449 | case SS_READY: |
442 | case SS_CONNECTED: | 450 | case SS_CONNECTED: |
443 | if (!tipc_sk_port(sk)->congested) | 451 | if (!tipc_sk_port(sk)->congested) |
@@ -1126,6 +1134,39 @@ exit: | |||
1126 | } | 1134 | } |
1127 | 1135 | ||
1128 | /** | 1136 | /** |
1137 | * tipc_write_space - wake up thread if port congestion is released | ||
1138 | * @sk: socket | ||
1139 | */ | ||
1140 | static void tipc_write_space(struct sock *sk) | ||
1141 | { | ||
1142 | struct socket_wq *wq; | ||
1143 | |||
1144 | rcu_read_lock(); | ||
1145 | wq = rcu_dereference(sk->sk_wq); | ||
1146 | if (wq_has_sleeper(wq)) | ||
1147 | wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | | ||
1148 | POLLWRNORM | POLLWRBAND); | ||
1149 | rcu_read_unlock(); | ||
1150 | } | ||
1151 | |||
1152 | /** | ||
1153 | * tipc_data_ready - wake up threads to indicate messages have been received | ||
1154 | * @sk: socket | ||
1155 | * @len: the length of messages | ||
1156 | */ | ||
1157 | static void tipc_data_ready(struct sock *sk, int len) | ||
1158 | { | ||
1159 | struct socket_wq *wq; | ||
1160 | |||
1161 | rcu_read_lock(); | ||
1162 | wq = rcu_dereference(sk->sk_wq); | ||
1163 | if (wq_has_sleeper(wq)) | ||
1164 | wake_up_interruptible_sync_poll(&wq->wait, POLLIN | | ||
1165 | POLLRDNORM | POLLRDBAND); | ||
1166 | rcu_read_unlock(); | ||
1167 | } | ||
1168 | |||
1169 | /** | ||
1129 | * rx_queue_full - determine if receive queue can accept another message | 1170 | * rx_queue_full - determine if receive queue can accept another message |
1130 | * @msg: message to be added to queue | 1171 | * @msg: message to be added to queue |
1131 | * @queue_size: current size of queue | 1172 | * @queue_size: current size of queue |
@@ -1222,8 +1263,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) | |||
1222 | tipc_disconnect_port(tipc_sk_port(sk)); | 1263 | tipc_disconnect_port(tipc_sk_port(sk)); |
1223 | } | 1264 | } |
1224 | 1265 | ||
1225 | if (waitqueue_active(sk_sleep(sk))) | 1266 | sk->sk_data_ready(sk, 0); |
1226 | wake_up_interruptible(sk_sleep(sk)); | ||
1227 | return TIPC_OK; | 1267 | return TIPC_OK; |
1228 | } | 1268 | } |
1229 | 1269 | ||
@@ -1290,8 +1330,7 @@ static void wakeupdispatch(struct tipc_port *tport) | |||
1290 | { | 1330 | { |
1291 | struct sock *sk = (struct sock *)tport->usr_handle; | 1331 | struct sock *sk = (struct sock *)tport->usr_handle; |
1292 | 1332 | ||
1293 | if (waitqueue_active(sk_sleep(sk))) | 1333 | sk->sk_write_space(sk); |
1294 | wake_up_interruptible(sk_sleep(sk)); | ||
1295 | } | 1334 | } |
1296 | 1335 | ||
1297 | /** | 1336 | /** |
@@ -1556,10 +1595,11 @@ restart: | |||
1556 | 1595 | ||
1557 | case SS_DISCONNECTING: | 1596 | case SS_DISCONNECTING: |
1558 | 1597 | ||
1559 | /* Discard any unreceived messages; wake up sleeping tasks */ | 1598 | /* Discard any unreceived messages */ |
1560 | discard_rx_queue(sk); | 1599 | discard_rx_queue(sk); |
1561 | if (waitqueue_active(sk_sleep(sk))) | 1600 | |
1562 | wake_up_interruptible(sk_sleep(sk)); | 1601 | /* Wake up anyone sleeping in poll */ |
1602 | sk->sk_state_change(sk); | ||
1563 | res = 0; | 1603 | res = 0; |
1564 | break; | 1604 | break; |
1565 | 1605 | ||
diff --git a/net/unix/diag.c b/net/unix/diag.c index 06748f108a57..5ac19dc1d5e4 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c | |||
@@ -151,6 +151,9 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r | |||
151 | sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) | 151 | sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) |
152 | goto out_nlmsg_trim; | 152 | goto out_nlmsg_trim; |
153 | 153 | ||
154 | if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown)) | ||
155 | goto out_nlmsg_trim; | ||
156 | |||
154 | return nlmsg_end(skb, nlh); | 157 | return nlmsg_end(skb, nlh); |
155 | 158 | ||
156 | out_nlmsg_trim: | 159 | out_nlmsg_trim: |
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c index b34b5b9792f0..8800604c93f4 100644 --- a/net/unix/sysctl_net_unix.c +++ b/net/unix/sysctl_net_unix.c | |||
@@ -34,6 +34,10 @@ int __net_init unix_sysctl_register(struct net *net) | |||
34 | if (table == NULL) | 34 | if (table == NULL) |
35 | goto err_alloc; | 35 | goto err_alloc; |
36 | 36 | ||
37 | /* Don't export sysctls to unprivileged users */ | ||
38 | if (net->user_ns != &init_user_ns) | ||
39 | table[0].procname = NULL; | ||
40 | |||
37 | table[0].data = &net->unx.sysctl_max_dgram_qlen; | 41 | table[0].data = &net->unx.sysctl_max_dgram_qlen; |
38 | net->unx.ctl = register_net_sysctl(net, "net/unix", table); | 42 | net->unx.ctl = register_net_sysctl(net, "net/unix", table); |
39 | if (net->unx.ctl == NULL) | 43 | if (net->unx.ctl == NULL) |
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index fe4adb12b3ef..16d08b399210 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig | |||
@@ -140,14 +140,13 @@ config CFG80211_WEXT | |||
140 | extensions with cfg80211-based drivers. | 140 | extensions with cfg80211-based drivers. |
141 | 141 | ||
142 | config LIB80211 | 142 | config LIB80211 |
143 | tristate "Common routines for IEEE802.11 drivers" | 143 | tristate |
144 | default n | 144 | default n |
145 | help | 145 | help |
146 | This options enables a library of common routines used | 146 | This options enables a library of common routines used |
147 | by IEEE802.11 wireless LAN drivers. | 147 | by IEEE802.11 wireless LAN drivers. |
148 | 148 | ||
149 | Drivers should select this themselves if needed. Say Y if | 149 | Drivers should select this themselves if needed. |
150 | you want this built into your kernel. | ||
151 | 150 | ||
152 | config LIB80211_CRYPT_WEP | 151 | config LIB80211_CRYPT_WEP |
153 | tristate | 152 | tristate |
diff --git a/net/wireless/Makefile b/net/wireless/Makefile index 0f7e0d621ab0..a761670af31d 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile | |||
@@ -10,11 +10,13 @@ obj-$(CONFIG_WEXT_SPY) += wext-spy.o | |||
10 | obj-$(CONFIG_WEXT_PRIV) += wext-priv.o | 10 | obj-$(CONFIG_WEXT_PRIV) += wext-priv.o |
11 | 11 | ||
12 | cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o | 12 | cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o |
13 | cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o | 13 | cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o |
14 | cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o | 14 | cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o |
15 | cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o | 15 | cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o |
16 | cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o | 16 | cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o |
17 | 17 | ||
18 | CFLAGS_trace.o := -I$(src) | ||
19 | |||
18 | ccflags-y += -D__CHECK_ENDIAN__ | 20 | ccflags-y += -D__CHECK_ENDIAN__ |
19 | 21 | ||
20 | $(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk | 22 | $(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk |
diff --git a/net/wireless/ap.c b/net/wireless/ap.c index fcc60d8dbefa..324e8d851dc4 100644 --- a/net/wireless/ap.c +++ b/net/wireless/ap.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <net/cfg80211.h> | 3 | #include <net/cfg80211.h> |
4 | #include "nl80211.h" | 4 | #include "nl80211.h" |
5 | #include "core.h" | 5 | #include "core.h" |
6 | #include "rdev-ops.h" | ||
6 | 7 | ||
7 | 8 | ||
8 | static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, | 9 | static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, |
@@ -23,10 +24,11 @@ static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, | |||
23 | if (!wdev->beacon_interval) | 24 | if (!wdev->beacon_interval) |
24 | return -ENOENT; | 25 | return -ENOENT; |
25 | 26 | ||
26 | err = rdev->ops->stop_ap(&rdev->wiphy, dev); | 27 | err = rdev_stop_ap(rdev, dev); |
27 | if (!err) { | 28 | if (!err) { |
28 | wdev->beacon_interval = 0; | 29 | wdev->beacon_interval = 0; |
29 | wdev->channel = NULL; | 30 | wdev->channel = NULL; |
31 | wdev->ssid_len = 0; | ||
30 | } | 32 | } |
31 | 33 | ||
32 | return err; | 34 | return err; |
diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 2f876b9ee344..bf2dfd54ff3b 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c | |||
@@ -9,90 +9,266 @@ | |||
9 | #include <linux/export.h> | 9 | #include <linux/export.h> |
10 | #include <net/cfg80211.h> | 10 | #include <net/cfg80211.h> |
11 | #include "core.h" | 11 | #include "core.h" |
12 | #include "rdev-ops.h" | ||
12 | 13 | ||
13 | struct ieee80211_channel * | 14 | void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, |
14 | rdev_freq_to_chan(struct cfg80211_registered_device *rdev, | 15 | struct ieee80211_channel *chan, |
15 | int freq, enum nl80211_channel_type channel_type) | 16 | enum nl80211_channel_type chan_type) |
16 | { | 17 | { |
17 | struct ieee80211_channel *chan; | 18 | if (WARN_ON(!chan)) |
18 | struct ieee80211_sta_ht_cap *ht_cap; | 19 | return; |
20 | |||
21 | chandef->chan = chan; | ||
22 | chandef->center_freq2 = 0; | ||
23 | |||
24 | switch (chan_type) { | ||
25 | case NL80211_CHAN_NO_HT: | ||
26 | chandef->width = NL80211_CHAN_WIDTH_20_NOHT; | ||
27 | chandef->center_freq1 = chan->center_freq; | ||
28 | break; | ||
29 | case NL80211_CHAN_HT20: | ||
30 | chandef->width = NL80211_CHAN_WIDTH_20; | ||
31 | chandef->center_freq1 = chan->center_freq; | ||
32 | break; | ||
33 | case NL80211_CHAN_HT40PLUS: | ||
34 | chandef->width = NL80211_CHAN_WIDTH_40; | ||
35 | chandef->center_freq1 = chan->center_freq + 10; | ||
36 | break; | ||
37 | case NL80211_CHAN_HT40MINUS: | ||
38 | chandef->width = NL80211_CHAN_WIDTH_40; | ||
39 | chandef->center_freq1 = chan->center_freq - 10; | ||
40 | break; | ||
41 | default: | ||
42 | WARN_ON(1); | ||
43 | } | ||
44 | } | ||
45 | EXPORT_SYMBOL(cfg80211_chandef_create); | ||
46 | |||
47 | bool cfg80211_chan_def_valid(const struct cfg80211_chan_def *chandef) | ||
48 | { | ||
49 | u32 control_freq; | ||
50 | |||
51 | if (!chandef->chan) | ||
52 | return false; | ||
19 | 53 | ||
20 | chan = ieee80211_get_channel(&rdev->wiphy, freq); | 54 | control_freq = chandef->chan->center_freq; |
21 | 55 | ||
22 | /* Primary channel not allowed */ | 56 | switch (chandef->width) { |
23 | if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) | 57 | case NL80211_CHAN_WIDTH_20: |
58 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
59 | if (chandef->center_freq1 != control_freq) | ||
60 | return false; | ||
61 | if (chandef->center_freq2) | ||
62 | return false; | ||
63 | break; | ||
64 | case NL80211_CHAN_WIDTH_40: | ||
65 | if (chandef->center_freq1 != control_freq + 10 && | ||
66 | chandef->center_freq1 != control_freq - 10) | ||
67 | return false; | ||
68 | if (chandef->center_freq2) | ||
69 | return false; | ||
70 | break; | ||
71 | case NL80211_CHAN_WIDTH_80P80: | ||
72 | if (chandef->center_freq1 != control_freq + 30 && | ||
73 | chandef->center_freq1 != control_freq + 10 && | ||
74 | chandef->center_freq1 != control_freq - 10 && | ||
75 | chandef->center_freq1 != control_freq - 30) | ||
76 | return false; | ||
77 | if (!chandef->center_freq2) | ||
78 | return false; | ||
79 | break; | ||
80 | case NL80211_CHAN_WIDTH_80: | ||
81 | if (chandef->center_freq1 != control_freq + 30 && | ||
82 | chandef->center_freq1 != control_freq + 10 && | ||
83 | chandef->center_freq1 != control_freq - 10 && | ||
84 | chandef->center_freq1 != control_freq - 30) | ||
85 | return false; | ||
86 | if (chandef->center_freq2) | ||
87 | return false; | ||
88 | break; | ||
89 | case NL80211_CHAN_WIDTH_160: | ||
90 | if (chandef->center_freq1 != control_freq + 70 && | ||
91 | chandef->center_freq1 != control_freq + 50 && | ||
92 | chandef->center_freq1 != control_freq + 30 && | ||
93 | chandef->center_freq1 != control_freq + 10 && | ||
94 | chandef->center_freq1 != control_freq - 10 && | ||
95 | chandef->center_freq1 != control_freq - 30 && | ||
96 | chandef->center_freq1 != control_freq - 50 && | ||
97 | chandef->center_freq1 != control_freq - 70) | ||
98 | return false; | ||
99 | if (chandef->center_freq2) | ||
100 | return false; | ||
101 | break; | ||
102 | default: | ||
103 | return false; | ||
104 | } | ||
105 | |||
106 | return true; | ||
107 | } | ||
108 | |||
109 | static void chandef_primary_freqs(const struct cfg80211_chan_def *c, | ||
110 | int *pri40, int *pri80) | ||
111 | { | ||
112 | int tmp; | ||
113 | |||
114 | switch (c->width) { | ||
115 | case NL80211_CHAN_WIDTH_40: | ||
116 | *pri40 = c->center_freq1; | ||
117 | *pri80 = 0; | ||
118 | break; | ||
119 | case NL80211_CHAN_WIDTH_80: | ||
120 | case NL80211_CHAN_WIDTH_80P80: | ||
121 | *pri80 = c->center_freq1; | ||
122 | /* n_P20 */ | ||
123 | tmp = (30 + c->chan->center_freq - c->center_freq1)/20; | ||
124 | /* n_P40 */ | ||
125 | tmp /= 2; | ||
126 | /* freq_P40 */ | ||
127 | *pri40 = c->center_freq1 - 20 + 40 * tmp; | ||
128 | break; | ||
129 | case NL80211_CHAN_WIDTH_160: | ||
130 | /* n_P20 */ | ||
131 | tmp = (70 + c->chan->center_freq - c->center_freq1)/20; | ||
132 | /* n_P40 */ | ||
133 | tmp /= 2; | ||
134 | /* freq_P40 */ | ||
135 | *pri40 = c->center_freq1 - 60 + 40 * tmp; | ||
136 | /* n_P80 */ | ||
137 | tmp /= 2; | ||
138 | *pri80 = c->center_freq1 - 40 + 80 * tmp; | ||
139 | break; | ||
140 | default: | ||
141 | WARN_ON_ONCE(1); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | const struct cfg80211_chan_def * | ||
146 | cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1, | ||
147 | const struct cfg80211_chan_def *c2) | ||
148 | { | ||
149 | u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80; | ||
150 | |||
151 | /* If they are identical, return */ | ||
152 | if (cfg80211_chandef_identical(c1, c2)) | ||
153 | return c1; | ||
154 | |||
155 | /* otherwise, must have same control channel */ | ||
156 | if (c1->chan != c2->chan) | ||
24 | return NULL; | 157 | return NULL; |
25 | 158 | ||
26 | if (channel_type == NL80211_CHAN_HT40MINUS && | 159 | /* |
27 | chan->flags & IEEE80211_CHAN_NO_HT40MINUS) | 160 | * If they have the same width, but aren't identical, |
161 | * then they can't be compatible. | ||
162 | */ | ||
163 | if (c1->width == c2->width) | ||
28 | return NULL; | 164 | return NULL; |
29 | else if (channel_type == NL80211_CHAN_HT40PLUS && | 165 | |
30 | chan->flags & IEEE80211_CHAN_NO_HT40PLUS) | 166 | if (c1->width == NL80211_CHAN_WIDTH_20_NOHT || |
167 | c1->width == NL80211_CHAN_WIDTH_20) | ||
168 | return c2; | ||
169 | |||
170 | if (c2->width == NL80211_CHAN_WIDTH_20_NOHT || | ||
171 | c2->width == NL80211_CHAN_WIDTH_20) | ||
172 | return c1; | ||
173 | |||
174 | chandef_primary_freqs(c1, &c1_pri40, &c1_pri80); | ||
175 | chandef_primary_freqs(c2, &c2_pri40, &c2_pri80); | ||
176 | |||
177 | if (c1_pri40 != c2_pri40) | ||
178 | return NULL; | ||
179 | |||
180 | WARN_ON(!c1_pri80 && !c2_pri80); | ||
181 | if (c1_pri80 && c2_pri80 && c1_pri80 != c2_pri80) | ||
31 | return NULL; | 182 | return NULL; |
32 | 183 | ||
33 | ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap; | 184 | if (c1->width > c2->width) |
185 | return c1; | ||
186 | return c2; | ||
187 | } | ||
188 | EXPORT_SYMBOL(cfg80211_chandef_compatible); | ||
34 | 189 | ||
35 | if (channel_type != NL80211_CHAN_NO_HT) { | 190 | bool cfg80211_secondary_chans_ok(struct wiphy *wiphy, |
36 | if (!ht_cap->ht_supported) | 191 | u32 center_freq, u32 bandwidth, |
37 | return NULL; | 192 | u32 prohibited_flags) |
193 | { | ||
194 | struct ieee80211_channel *c; | ||
195 | u32 freq; | ||
38 | 196 | ||
39 | if (channel_type != NL80211_CHAN_HT20 && | 197 | for (freq = center_freq - bandwidth/2 + 10; |
40 | (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || | 198 | freq <= center_freq + bandwidth/2 - 10; |
41 | ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)) | 199 | freq += 20) { |
42 | return NULL; | 200 | c = ieee80211_get_channel(wiphy, freq); |
201 | if (!c || c->flags & prohibited_flags) | ||
202 | return false; | ||
43 | } | 203 | } |
44 | 204 | ||
45 | return chan; | 205 | return true; |
46 | } | 206 | } |
47 | 207 | ||
48 | bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, | 208 | static bool cfg80211_check_beacon_chans(struct wiphy *wiphy, |
49 | struct ieee80211_channel *chan, | 209 | u32 center_freq, u32 bw) |
50 | enum nl80211_channel_type channel_type) | ||
51 | { | 210 | { |
52 | struct ieee80211_channel *sec_chan; | 211 | return cfg80211_secondary_chans_ok(wiphy, center_freq, bw, |
53 | int diff; | 212 | IEEE80211_CHAN_DISABLED | |
213 | IEEE80211_CHAN_PASSIVE_SCAN | | ||
214 | IEEE80211_CHAN_NO_IBSS | | ||
215 | IEEE80211_CHAN_RADAR); | ||
216 | } | ||
54 | 217 | ||
55 | switch (channel_type) { | 218 | bool cfg80211_reg_can_beacon(struct wiphy *wiphy, |
56 | case NL80211_CHAN_HT40PLUS: | 219 | struct cfg80211_chan_def *chandef) |
57 | diff = 20; | 220 | { |
221 | u32 width; | ||
222 | bool res; | ||
223 | |||
224 | trace_cfg80211_reg_can_beacon(wiphy, chandef); | ||
225 | |||
226 | if (WARN_ON(!cfg80211_chan_def_valid(chandef))) { | ||
227 | trace_cfg80211_return_bool(false); | ||
228 | return false; | ||
229 | } | ||
230 | |||
231 | switch (chandef->width) { | ||
232 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
233 | case NL80211_CHAN_WIDTH_20: | ||
234 | width = 20; | ||
58 | break; | 235 | break; |
59 | case NL80211_CHAN_HT40MINUS: | 236 | case NL80211_CHAN_WIDTH_40: |
60 | diff = -20; | 237 | width = 40; |
238 | break; | ||
239 | case NL80211_CHAN_WIDTH_80: | ||
240 | case NL80211_CHAN_WIDTH_80P80: | ||
241 | width = 80; | ||
242 | break; | ||
243 | case NL80211_CHAN_WIDTH_160: | ||
244 | width = 160; | ||
61 | break; | 245 | break; |
62 | default: | 246 | default: |
63 | return true; | 247 | WARN_ON_ONCE(1); |
248 | trace_cfg80211_return_bool(false); | ||
249 | return false; | ||
64 | } | 250 | } |
65 | 251 | ||
66 | sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff); | 252 | res = cfg80211_check_beacon_chans(wiphy, chandef->center_freq1, width); |
67 | if (!sec_chan) | ||
68 | return false; | ||
69 | 253 | ||
70 | /* we'll need a DFS capability later */ | 254 | if (res && chandef->center_freq2) |
71 | if (sec_chan->flags & (IEEE80211_CHAN_DISABLED | | 255 | res = cfg80211_check_beacon_chans(wiphy, chandef->center_freq2, |
72 | IEEE80211_CHAN_PASSIVE_SCAN | | 256 | width); |
73 | IEEE80211_CHAN_NO_IBSS | | ||
74 | IEEE80211_CHAN_RADAR)) | ||
75 | return false; | ||
76 | 257 | ||
77 | return true; | 258 | trace_cfg80211_return_bool(res); |
259 | return res; | ||
78 | } | 260 | } |
79 | EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan); | 261 | EXPORT_SYMBOL(cfg80211_reg_can_beacon); |
80 | 262 | ||
81 | int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, | 263 | int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, |
82 | int freq, enum nl80211_channel_type chantype) | 264 | struct cfg80211_chan_def *chandef) |
83 | { | 265 | { |
84 | struct ieee80211_channel *chan; | ||
85 | |||
86 | if (!rdev->ops->set_monitor_channel) | 266 | if (!rdev->ops->set_monitor_channel) |
87 | return -EOPNOTSUPP; | 267 | return -EOPNOTSUPP; |
88 | if (!cfg80211_has_monitors_only(rdev)) | 268 | if (!cfg80211_has_monitors_only(rdev)) |
89 | return -EBUSY; | 269 | return -EBUSY; |
90 | 270 | ||
91 | chan = rdev_freq_to_chan(rdev, freq, chantype); | 271 | return rdev_set_monitor_channel(rdev, chandef); |
92 | if (!chan) | ||
93 | return -EINVAL; | ||
94 | |||
95 | return rdev->ops->set_monitor_channel(&rdev->wiphy, chan, chantype); | ||
96 | } | 272 | } |
97 | 273 | ||
98 | void | 274 | void |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 3f7253052088..14d990400354 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "debugfs.h" | 26 | #include "debugfs.h" |
27 | #include "wext-compat.h" | 27 | #include "wext-compat.h" |
28 | #include "ethtool.h" | 28 | #include "ethtool.h" |
29 | #include "rdev-ops.h" | ||
29 | 30 | ||
30 | /* name for sysfs, %d is appended */ | 31 | /* name for sysfs, %d is appended */ |
31 | #define PHY_NAME "phy" | 32 | #define PHY_NAME "phy" |
@@ -216,7 +217,7 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) | |||
216 | { | 217 | { |
217 | struct cfg80211_registered_device *rdev = data; | 218 | struct cfg80211_registered_device *rdev = data; |
218 | 219 | ||
219 | rdev->ops->rfkill_poll(&rdev->wiphy); | 220 | rdev_rfkill_poll(rdev); |
220 | } | 221 | } |
221 | 222 | ||
222 | static int cfg80211_rfkill_set_block(void *data, bool blocked) | 223 | static int cfg80211_rfkill_set_block(void *data, bool blocked) |
@@ -240,7 +241,7 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) | |||
240 | case NL80211_IFTYPE_P2P_DEVICE: | 241 | case NL80211_IFTYPE_P2P_DEVICE: |
241 | if (!wdev->p2p_started) | 242 | if (!wdev->p2p_started) |
242 | break; | 243 | break; |
243 | rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); | 244 | rdev_stop_p2p_device(rdev, wdev); |
244 | wdev->p2p_started = false; | 245 | wdev->p2p_started = false; |
245 | rdev->opencount--; | 246 | rdev->opencount--; |
246 | break; | 247 | break; |
@@ -325,6 +326,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
325 | mutex_init(&rdev->devlist_mtx); | 326 | mutex_init(&rdev->devlist_mtx); |
326 | mutex_init(&rdev->sched_scan_mtx); | 327 | mutex_init(&rdev->sched_scan_mtx); |
327 | INIT_LIST_HEAD(&rdev->wdev_list); | 328 | INIT_LIST_HEAD(&rdev->wdev_list); |
329 | INIT_LIST_HEAD(&rdev->beacon_registrations); | ||
330 | spin_lock_init(&rdev->beacon_registrations_lock); | ||
328 | spin_lock_init(&rdev->bss_lock); | 331 | spin_lock_init(&rdev->bss_lock); |
329 | INIT_LIST_HEAD(&rdev->bss_list); | 332 | INIT_LIST_HEAD(&rdev->bss_list); |
330 | INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); | 333 | INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); |
@@ -370,6 +373,8 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
370 | rdev->wiphy.rts_threshold = (u32) -1; | 373 | rdev->wiphy.rts_threshold = (u32) -1; |
371 | rdev->wiphy.coverage_class = 0; | 374 | rdev->wiphy.coverage_class = 0; |
372 | 375 | ||
376 | rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH; | ||
377 | |||
373 | return &rdev->wiphy; | 378 | return &rdev->wiphy; |
374 | } | 379 | } |
375 | EXPORT_SYMBOL(wiphy_new); | 380 | EXPORT_SYMBOL(wiphy_new); |
@@ -687,7 +692,7 @@ void wiphy_unregister(struct wiphy *wiphy) | |||
687 | flush_work(&rdev->event_work); | 692 | flush_work(&rdev->event_work); |
688 | 693 | ||
689 | if (rdev->wowlan && rdev->ops->set_wakeup) | 694 | if (rdev->wowlan && rdev->ops->set_wakeup) |
690 | rdev->ops->set_wakeup(&rdev->wiphy, false); | 695 | rdev_set_wakeup(rdev, false); |
691 | cfg80211_rdev_free_wowlan(rdev); | 696 | cfg80211_rdev_free_wowlan(rdev); |
692 | } | 697 | } |
693 | EXPORT_SYMBOL(wiphy_unregister); | 698 | EXPORT_SYMBOL(wiphy_unregister); |
@@ -695,10 +700,15 @@ EXPORT_SYMBOL(wiphy_unregister); | |||
695 | void cfg80211_dev_free(struct cfg80211_registered_device *rdev) | 700 | void cfg80211_dev_free(struct cfg80211_registered_device *rdev) |
696 | { | 701 | { |
697 | struct cfg80211_internal_bss *scan, *tmp; | 702 | struct cfg80211_internal_bss *scan, *tmp; |
703 | struct cfg80211_beacon_registration *reg, *treg; | ||
698 | rfkill_destroy(rdev->rfkill); | 704 | rfkill_destroy(rdev->rfkill); |
699 | mutex_destroy(&rdev->mtx); | 705 | mutex_destroy(&rdev->mtx); |
700 | mutex_destroy(&rdev->devlist_mtx); | 706 | mutex_destroy(&rdev->devlist_mtx); |
701 | mutex_destroy(&rdev->sched_scan_mtx); | 707 | mutex_destroy(&rdev->sched_scan_mtx); |
708 | list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) { | ||
709 | list_del(®->list); | ||
710 | kfree(reg); | ||
711 | } | ||
702 | list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) | 712 | list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) |
703 | cfg80211_put_bss(&scan->pub); | 713 | cfg80211_put_bss(&scan->pub); |
704 | kfree(rdev); | 714 | kfree(rdev); |
@@ -770,7 +780,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) | |||
770 | case NL80211_IFTYPE_P2P_DEVICE: | 780 | case NL80211_IFTYPE_P2P_DEVICE: |
771 | if (!wdev->p2p_started) | 781 | if (!wdev->p2p_started) |
772 | break; | 782 | break; |
773 | rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); | 783 | rdev_stop_p2p_device(rdev, wdev); |
774 | wdev->p2p_started = false; | 784 | wdev->p2p_started = false; |
775 | rdev->opencount--; | 785 | rdev->opencount--; |
776 | break; | 786 | break; |
@@ -961,9 +971,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
961 | if ((wdev->iftype == NL80211_IFTYPE_STATION || | 971 | if ((wdev->iftype == NL80211_IFTYPE_STATION || |
962 | wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && | 972 | wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && |
963 | rdev->ops->set_power_mgmt) | 973 | rdev->ops->set_power_mgmt) |
964 | if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, | 974 | if (rdev_set_power_mgmt(rdev, dev, wdev->ps, |
965 | wdev->ps, | 975 | wdev->ps_timeout)) { |
966 | wdev->ps_timeout)) { | ||
967 | /* assume this means it's off */ | 976 | /* assume this means it's off */ |
968 | wdev->ps = false; | 977 | wdev->ps = false; |
969 | } | 978 | } |
diff --git a/net/wireless/core.h b/net/wireless/core.h index a343be4a52bd..a0c8decf6a47 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -55,7 +55,8 @@ struct cfg80211_registered_device { | |||
55 | int opencount; /* also protected by devlist_mtx */ | 55 | int opencount; /* also protected by devlist_mtx */ |
56 | wait_queue_head_t dev_wait; | 56 | wait_queue_head_t dev_wait; |
57 | 57 | ||
58 | u32 ap_beacons_nlportid; | 58 | struct list_head beacon_registrations; |
59 | spinlock_t beacon_registrations_lock; | ||
59 | 60 | ||
60 | /* protected by RTNL only */ | 61 | /* protected by RTNL only */ |
61 | int num_running_ifaces; | 62 | int num_running_ifaces; |
@@ -260,6 +261,10 @@ enum cfg80211_chan_mode { | |||
260 | CHAN_MODE_EXCLUSIVE, | 261 | CHAN_MODE_EXCLUSIVE, |
261 | }; | 262 | }; |
262 | 263 | ||
264 | struct cfg80211_beacon_registration { | ||
265 | struct list_head list; | ||
266 | u32 nlportid; | ||
267 | }; | ||
263 | 268 | ||
264 | /* free object */ | 269 | /* free object */ |
265 | extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev); | 270 | extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev); |
@@ -304,9 +309,9 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev, | |||
304 | const struct mesh_config *conf); | 309 | const struct mesh_config *conf); |
305 | int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, | 310 | int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, |
306 | struct net_device *dev); | 311 | struct net_device *dev); |
307 | int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev, | 312 | int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev, |
308 | struct wireless_dev *wdev, int freq, | 313 | struct wireless_dev *wdev, |
309 | enum nl80211_channel_type channel_type); | 314 | struct cfg80211_chan_def *chandef); |
310 | 315 | ||
311 | /* AP */ | 316 | /* AP */ |
312 | int cfg80211_stop_ap(struct cfg80211_registered_device *rdev, | 317 | int cfg80211_stop_ap(struct cfg80211_registered_device *rdev, |
@@ -320,13 +325,15 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
320 | const u8 *bssid, | 325 | const u8 *bssid, |
321 | const u8 *ssid, int ssid_len, | 326 | const u8 *ssid, int ssid_len, |
322 | const u8 *ie, int ie_len, | 327 | const u8 *ie, int ie_len, |
323 | const u8 *key, int key_len, int key_idx); | 328 | const u8 *key, int key_len, int key_idx, |
329 | const u8 *sae_data, int sae_data_len); | ||
324 | int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | 330 | int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, |
325 | struct net_device *dev, struct ieee80211_channel *chan, | 331 | struct net_device *dev, struct ieee80211_channel *chan, |
326 | enum nl80211_auth_type auth_type, const u8 *bssid, | 332 | enum nl80211_auth_type auth_type, const u8 *bssid, |
327 | const u8 *ssid, int ssid_len, | 333 | const u8 *ssid, int ssid_len, |
328 | const u8 *ie, int ie_len, | 334 | const u8 *ie, int ie_len, |
329 | const u8 *key, int key_len, int key_idx); | 335 | const u8 *key, int key_len, int key_idx, |
336 | const u8 *sae_data, int sae_data_len); | ||
330 | int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | 337 | int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, |
331 | struct net_device *dev, | 338 | struct net_device *dev, |
332 | struct ieee80211_channel *chan, | 339 | struct ieee80211_channel *chan, |
@@ -371,10 +378,8 @@ void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev); | |||
371 | int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, | 378 | int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, |
372 | struct wireless_dev *wdev, | 379 | struct wireless_dev *wdev, |
373 | struct ieee80211_channel *chan, bool offchan, | 380 | struct ieee80211_channel *chan, bool offchan, |
374 | enum nl80211_channel_type channel_type, | 381 | unsigned int wait, const u8 *buf, size_t len, |
375 | bool channel_type_valid, unsigned int wait, | 382 | bool no_cck, bool dont_wait_for_ack, u64 *cookie); |
376 | const u8 *buf, size_t len, bool no_cck, | ||
377 | bool dont_wait_for_ack, u64 *cookie); | ||
378 | void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, | 383 | void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, |
379 | const struct ieee80211_ht_cap *ht_capa_mask); | 384 | const struct ieee80211_ht_cap *ht_capa_mask); |
380 | 385 | ||
@@ -465,11 +470,8 @@ cfg80211_get_chan_state(struct wireless_dev *wdev, | |||
465 | struct ieee80211_channel **chan, | 470 | struct ieee80211_channel **chan, |
466 | enum cfg80211_chan_mode *chanmode); | 471 | enum cfg80211_chan_mode *chanmode); |
467 | 472 | ||
468 | struct ieee80211_channel * | ||
469 | rdev_freq_to_chan(struct cfg80211_registered_device *rdev, | ||
470 | int freq, enum nl80211_channel_type channel_type); | ||
471 | int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, | 473 | int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, |
472 | int freq, enum nl80211_channel_type chantype); | 474 | struct cfg80211_chan_def *chandef); |
473 | 475 | ||
474 | int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, | 476 | int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, |
475 | const u8 *rates, unsigned int n_rates, | 477 | const u8 *rates, unsigned int n_rates, |
@@ -481,6 +483,12 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, | |||
481 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, | 483 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, |
482 | enum nl80211_iftype iftype, int num); | 484 | enum nl80211_iftype iftype, int num); |
483 | 485 | ||
486 | bool cfg80211_chan_def_valid(const struct cfg80211_chan_def *chandef); | ||
487 | |||
488 | bool cfg80211_secondary_chans_ok(struct wiphy *wiphy, | ||
489 | u32 center_freq, u32 bandwidth, | ||
490 | u32 prohibited_flags); | ||
491 | |||
484 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 | 492 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 |
485 | 493 | ||
486 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS | 494 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS |
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c index 7eecdf40cf80..48c48ffafa1d 100644 --- a/net/wireless/ethtool.c +++ b/net/wireless/ethtool.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <net/cfg80211.h> | 2 | #include <net/cfg80211.h> |
3 | #include "core.h" | 3 | #include "core.h" |
4 | #include "ethtool.h" | 4 | #include "ethtool.h" |
5 | #include "rdev-ops.h" | ||
5 | 6 | ||
6 | static void cfg80211_get_drvinfo(struct net_device *dev, | 7 | static void cfg80211_get_drvinfo(struct net_device *dev, |
7 | struct ethtool_drvinfo *info) | 8 | struct ethtool_drvinfo *info) |
@@ -47,9 +48,8 @@ static void cfg80211_get_ringparam(struct net_device *dev, | |||
47 | memset(rp, 0, sizeof(*rp)); | 48 | memset(rp, 0, sizeof(*rp)); |
48 | 49 | ||
49 | if (rdev->ops->get_ringparam) | 50 | if (rdev->ops->get_ringparam) |
50 | rdev->ops->get_ringparam(wdev->wiphy, | 51 | rdev_get_ringparam(rdev, &rp->tx_pending, &rp->tx_max_pending, |
51 | &rp->tx_pending, &rp->tx_max_pending, | 52 | &rp->rx_pending, &rp->rx_max_pending); |
52 | &rp->rx_pending, &rp->rx_max_pending); | ||
53 | } | 53 | } |
54 | 54 | ||
55 | static int cfg80211_set_ringparam(struct net_device *dev, | 55 | static int cfg80211_set_ringparam(struct net_device *dev, |
@@ -62,8 +62,7 @@ static int cfg80211_set_ringparam(struct net_device *dev, | |||
62 | return -EINVAL; | 62 | return -EINVAL; |
63 | 63 | ||
64 | if (rdev->ops->set_ringparam) | 64 | if (rdev->ops->set_ringparam) |
65 | return rdev->ops->set_ringparam(wdev->wiphy, | 65 | return rdev_set_ringparam(rdev, rp->tx_pending, rp->rx_pending); |
66 | rp->tx_pending, rp->rx_pending); | ||
67 | 66 | ||
68 | return -ENOTSUPP; | 67 | return -ENOTSUPP; |
69 | } | 68 | } |
@@ -73,7 +72,7 @@ static int cfg80211_get_sset_count(struct net_device *dev, int sset) | |||
73 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 72 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
74 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 73 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
75 | if (rdev->ops->get_et_sset_count) | 74 | if (rdev->ops->get_et_sset_count) |
76 | return rdev->ops->get_et_sset_count(wdev->wiphy, dev, sset); | 75 | return rdev_get_et_sset_count(rdev, dev, sset); |
77 | return -EOPNOTSUPP; | 76 | return -EOPNOTSUPP; |
78 | } | 77 | } |
79 | 78 | ||
@@ -83,7 +82,7 @@ static void cfg80211_get_stats(struct net_device *dev, | |||
83 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 82 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
84 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 83 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
85 | if (rdev->ops->get_et_stats) | 84 | if (rdev->ops->get_et_stats) |
86 | rdev->ops->get_et_stats(wdev->wiphy, dev, stats, data); | 85 | rdev_get_et_stats(rdev, dev, stats, data); |
87 | } | 86 | } |
88 | 87 | ||
89 | static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data) | 88 | static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data) |
@@ -91,7 +90,7 @@ static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data) | |||
91 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 90 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
92 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 91 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
93 | if (rdev->ops->get_et_strings) | 92 | if (rdev->ops->get_et_strings) |
94 | rdev->ops->get_et_strings(wdev->wiphy, dev, sset, data); | 93 | rdev_get_et_strings(rdev, dev, sset, data); |
95 | } | 94 | } |
96 | 95 | ||
97 | const struct ethtool_ops cfg80211_ethtool_ops = { | 96 | const struct ethtool_ops cfg80211_ethtool_ops = { |
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index ca5672f6ee2f..9b9551e4a6f9 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <net/cfg80211.h> | 11 | #include <net/cfg80211.h> |
12 | #include "wext-compat.h" | 12 | #include "wext-compat.h" |
13 | #include "nl80211.h" | 13 | #include "nl80211.h" |
14 | #include "rdev-ops.h" | ||
14 | 15 | ||
15 | 16 | ||
16 | void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) | 17 | void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) |
@@ -61,6 +62,8 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) | |||
61 | struct cfg80211_event *ev; | 62 | struct cfg80211_event *ev; |
62 | unsigned long flags; | 63 | unsigned long flags; |
63 | 64 | ||
65 | trace_cfg80211_ibss_joined(dev, bssid); | ||
66 | |||
64 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); | 67 | CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); |
65 | 68 | ||
66 | ev = kzalloc(sizeof(*ev), gfp); | 69 | ev = kzalloc(sizeof(*ev), gfp); |
@@ -97,9 +100,9 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
97 | * 11a for maximum compatibility. | 100 | * 11a for maximum compatibility. |
98 | */ | 101 | */ |
99 | struct ieee80211_supported_band *sband = | 102 | struct ieee80211_supported_band *sband = |
100 | rdev->wiphy.bands[params->channel->band]; | 103 | rdev->wiphy.bands[params->chandef.chan->band]; |
101 | int j; | 104 | int j; |
102 | u32 flag = params->channel->band == IEEE80211_BAND_5GHZ ? | 105 | u32 flag = params->chandef.chan->band == IEEE80211_BAND_5GHZ ? |
103 | IEEE80211_RATE_MANDATORY_A : | 106 | IEEE80211_RATE_MANDATORY_A : |
104 | IEEE80211_RATE_MANDATORY_B; | 107 | IEEE80211_RATE_MANDATORY_B; |
105 | 108 | ||
@@ -115,11 +118,11 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
115 | 118 | ||
116 | wdev->ibss_fixed = params->channel_fixed; | 119 | wdev->ibss_fixed = params->channel_fixed; |
117 | #ifdef CONFIG_CFG80211_WEXT | 120 | #ifdef CONFIG_CFG80211_WEXT |
118 | wdev->wext.ibss.channel = params->channel; | 121 | wdev->wext.ibss.chandef = params->chandef; |
119 | #endif | 122 | #endif |
120 | wdev->sme_state = CFG80211_SME_CONNECTING; | 123 | wdev->sme_state = CFG80211_SME_CONNECTING; |
121 | 124 | ||
122 | err = cfg80211_can_use_chan(rdev, wdev, params->channel, | 125 | err = cfg80211_can_use_chan(rdev, wdev, params->chandef.chan, |
123 | params->channel_fixed | 126 | params->channel_fixed |
124 | ? CHAN_MODE_SHARED | 127 | ? CHAN_MODE_SHARED |
125 | : CHAN_MODE_EXCLUSIVE); | 128 | : CHAN_MODE_EXCLUSIVE); |
@@ -128,7 +131,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, | |||
128 | return err; | 131 | return err; |
129 | } | 132 | } |
130 | 133 | ||
131 | err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); | 134 | err = rdev_join_ibss(rdev, dev, params); |
132 | if (err) { | 135 | if (err) { |
133 | wdev->connect_keys = NULL; | 136 | wdev->connect_keys = NULL; |
134 | wdev->sme_state = CFG80211_SME_IDLE; | 137 | wdev->sme_state = CFG80211_SME_IDLE; |
@@ -175,7 +178,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) | |||
175 | */ | 178 | */ |
176 | if (rdev->ops->del_key) | 179 | if (rdev->ops->del_key) |
177 | for (i = 0; i < 6; i++) | 180 | for (i = 0; i < 6; i++) |
178 | rdev->ops->del_key(wdev->wiphy, dev, i, false, NULL); | 181 | rdev_del_key(rdev, dev, i, false, NULL); |
179 | 182 | ||
180 | if (wdev->current_bss) { | 183 | if (wdev->current_bss) { |
181 | cfg80211_unhold_bss(wdev->current_bss); | 184 | cfg80211_unhold_bss(wdev->current_bss); |
@@ -211,7 +214,7 @@ int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev, | |||
211 | if (!wdev->ssid_len) | 214 | if (!wdev->ssid_len) |
212 | return -ENOLINK; | 215 | return -ENOLINK; |
213 | 216 | ||
214 | err = rdev->ops->leave_ibss(&rdev->wiphy, dev); | 217 | err = rdev_leave_ibss(rdev, dev); |
215 | 218 | ||
216 | if (err) | 219 | if (err) |
217 | return err; | 220 | return err; |
@@ -248,7 +251,9 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, | |||
248 | wdev->wext.ibss.beacon_interval = 100; | 251 | wdev->wext.ibss.beacon_interval = 100; |
249 | 252 | ||
250 | /* try to find an IBSS channel if none requested ... */ | 253 | /* try to find an IBSS channel if none requested ... */ |
251 | if (!wdev->wext.ibss.channel) { | 254 | if (!wdev->wext.ibss.chandef.chan) { |
255 | wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; | ||
256 | |||
252 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 257 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
253 | struct ieee80211_supported_band *sband; | 258 | struct ieee80211_supported_band *sband; |
254 | struct ieee80211_channel *chan; | 259 | struct ieee80211_channel *chan; |
@@ -263,15 +268,15 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, | |||
263 | continue; | 268 | continue; |
264 | if (chan->flags & IEEE80211_CHAN_DISABLED) | 269 | if (chan->flags & IEEE80211_CHAN_DISABLED) |
265 | continue; | 270 | continue; |
266 | wdev->wext.ibss.channel = chan; | 271 | wdev->wext.ibss.chandef.chan = chan; |
267 | break; | 272 | break; |
268 | } | 273 | } |
269 | 274 | ||
270 | if (wdev->wext.ibss.channel) | 275 | if (wdev->wext.ibss.chandef.chan) |
271 | break; | 276 | break; |
272 | } | 277 | } |
273 | 278 | ||
274 | if (!wdev->wext.ibss.channel) | 279 | if (!wdev->wext.ibss.chandef.chan) |
275 | return -EINVAL; | 280 | return -EINVAL; |
276 | } | 281 | } |
277 | 282 | ||
@@ -333,7 +338,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev, | |||
333 | return -EINVAL; | 338 | return -EINVAL; |
334 | } | 339 | } |
335 | 340 | ||
336 | if (wdev->wext.ibss.channel == chan) | 341 | if (wdev->wext.ibss.chandef.chan == chan) |
337 | return 0; | 342 | return 0; |
338 | 343 | ||
339 | wdev_lock(wdev); | 344 | wdev_lock(wdev); |
@@ -346,7 +351,8 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev, | |||
346 | return err; | 351 | return err; |
347 | 352 | ||
348 | if (chan) { | 353 | if (chan) { |
349 | wdev->wext.ibss.channel = chan; | 354 | wdev->wext.ibss.chandef.chan = chan; |
355 | wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; | ||
350 | wdev->wext.ibss.channel_fixed = true; | 356 | wdev->wext.ibss.channel_fixed = true; |
351 | } else { | 357 | } else { |
352 | /* cfg80211_ibss_wext_join will pick one if needed */ | 358 | /* cfg80211_ibss_wext_join will pick one if needed */ |
@@ -376,8 +382,8 @@ int cfg80211_ibss_wext_giwfreq(struct net_device *dev, | |||
376 | wdev_lock(wdev); | 382 | wdev_lock(wdev); |
377 | if (wdev->current_bss) | 383 | if (wdev->current_bss) |
378 | chan = wdev->current_bss->pub.channel; | 384 | chan = wdev->current_bss->pub.channel; |
379 | else if (wdev->wext.ibss.channel) | 385 | else if (wdev->wext.ibss.chandef.chan) |
380 | chan = wdev->wext.ibss.channel; | 386 | chan = wdev->wext.ibss.chandef.chan; |
381 | wdev_unlock(wdev); | 387 | wdev_unlock(wdev); |
382 | 388 | ||
383 | if (chan) { | 389 | if (chan) { |
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index c384e77ff77a..3ee5a7282283 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <net/cfg80211.h> | 3 | #include <net/cfg80211.h> |
4 | #include "nl80211.h" | 4 | #include "nl80211.h" |
5 | #include "core.h" | 5 | #include "core.h" |
6 | #include "rdev-ops.h" | ||
6 | 7 | ||
7 | /* Default values, timeouts in ms */ | 8 | /* Default values, timeouts in ms */ |
8 | #define MESH_TTL 31 | 9 | #define MESH_TTL 31 |
@@ -72,8 +73,6 @@ const struct mesh_config default_mesh_config = { | |||
72 | 73 | ||
73 | const struct mesh_setup default_mesh_setup = { | 74 | const struct mesh_setup default_mesh_setup = { |
74 | /* cfg80211_join_mesh() will pick a channel if needed */ | 75 | /* cfg80211_join_mesh() will pick a channel if needed */ |
75 | .channel = NULL, | ||
76 | .channel_type = NL80211_CHAN_NO_HT, | ||
77 | .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, | 76 | .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, |
78 | .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, | 77 | .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, |
79 | .path_metric = IEEE80211_PATH_METRIC_AIRTIME, | 78 | .path_metric = IEEE80211_PATH_METRIC_AIRTIME, |
@@ -110,13 +109,12 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, | |||
110 | if (!rdev->ops->join_mesh) | 109 | if (!rdev->ops->join_mesh) |
111 | return -EOPNOTSUPP; | 110 | return -EOPNOTSUPP; |
112 | 111 | ||
113 | if (!setup->channel) { | 112 | if (!setup->chandef.chan) { |
114 | /* if no channel explicitly given, use preset channel */ | 113 | /* if no channel explicitly given, use preset channel */ |
115 | setup->channel = wdev->preset_chan; | 114 | setup->chandef = wdev->preset_chandef; |
116 | setup->channel_type = wdev->preset_chantype; | ||
117 | } | 115 | } |
118 | 116 | ||
119 | if (!setup->channel) { | 117 | if (!setup->chandef.chan) { |
120 | /* if we don't have that either, use the first usable channel */ | 118 | /* if we don't have that either, use the first usable channel */ |
121 | enum ieee80211_band band; | 119 | enum ieee80211_band band; |
122 | 120 | ||
@@ -136,35 +134,34 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, | |||
136 | IEEE80211_CHAN_DISABLED | | 134 | IEEE80211_CHAN_DISABLED | |
137 | IEEE80211_CHAN_RADAR)) | 135 | IEEE80211_CHAN_RADAR)) |
138 | continue; | 136 | continue; |
139 | setup->channel = chan; | 137 | setup->chandef.chan = chan; |
140 | break; | 138 | break; |
141 | } | 139 | } |
142 | 140 | ||
143 | if (setup->channel) | 141 | if (setup->chandef.chan) |
144 | break; | 142 | break; |
145 | } | 143 | } |
146 | 144 | ||
147 | /* no usable channel ... */ | 145 | /* no usable channel ... */ |
148 | if (!setup->channel) | 146 | if (!setup->chandef.chan) |
149 | return -EINVAL; | 147 | return -EINVAL; |
150 | 148 | ||
151 | setup->channel_type = NL80211_CHAN_NO_HT; | 149 | setup->chandef.width = NL80211_CHAN_WIDTH_20_NOHT;; |
152 | } | 150 | } |
153 | 151 | ||
154 | if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, setup->channel, | 152 | if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef)) |
155 | setup->channel_type)) | ||
156 | return -EINVAL; | 153 | return -EINVAL; |
157 | 154 | ||
158 | err = cfg80211_can_use_chan(rdev, wdev, setup->channel, | 155 | err = cfg80211_can_use_chan(rdev, wdev, setup->chandef.chan, |
159 | CHAN_MODE_SHARED); | 156 | CHAN_MODE_SHARED); |
160 | if (err) | 157 | if (err) |
161 | return err; | 158 | return err; |
162 | 159 | ||
163 | err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup); | 160 | err = rdev_join_mesh(rdev, dev, conf, setup); |
164 | if (!err) { | 161 | if (!err) { |
165 | memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len); | 162 | memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len); |
166 | wdev->mesh_id_len = setup->mesh_id_len; | 163 | wdev->mesh_id_len = setup->mesh_id_len; |
167 | wdev->channel = setup->channel; | 164 | wdev->channel = setup->chandef.chan; |
168 | } | 165 | } |
169 | 166 | ||
170 | return err; | 167 | return err; |
@@ -187,20 +184,12 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev, | |||
187 | return err; | 184 | return err; |
188 | } | 185 | } |
189 | 186 | ||
190 | int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev, | 187 | int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev, |
191 | struct wireless_dev *wdev, int freq, | 188 | struct wireless_dev *wdev, |
192 | enum nl80211_channel_type channel_type) | 189 | struct cfg80211_chan_def *chandef) |
193 | { | 190 | { |
194 | struct ieee80211_channel *channel; | ||
195 | int err; | 191 | int err; |
196 | 192 | ||
197 | channel = rdev_freq_to_chan(rdev, freq, channel_type); | ||
198 | if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy, | ||
199 | channel, | ||
200 | channel_type)) { | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | |||
204 | /* | 193 | /* |
205 | * Workaround for libertas (only!), it puts the interface | 194 | * Workaround for libertas (only!), it puts the interface |
206 | * into mesh mode but doesn't implement join_mesh. Instead, | 195 | * into mesh mode but doesn't implement join_mesh. Instead, |
@@ -209,22 +198,21 @@ int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev, | |||
209 | * compatible with 802.11 mesh. | 198 | * compatible with 802.11 mesh. |
210 | */ | 199 | */ |
211 | if (rdev->ops->libertas_set_mesh_channel) { | 200 | if (rdev->ops->libertas_set_mesh_channel) { |
212 | if (channel_type != NL80211_CHAN_NO_HT) | 201 | if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT) |
213 | return -EINVAL; | 202 | return -EINVAL; |
214 | 203 | ||
215 | if (!netif_running(wdev->netdev)) | 204 | if (!netif_running(wdev->netdev)) |
216 | return -ENETDOWN; | 205 | return -ENETDOWN; |
217 | 206 | ||
218 | err = cfg80211_can_use_chan(rdev, wdev, channel, | 207 | err = cfg80211_can_use_chan(rdev, wdev, chandef->chan, |
219 | CHAN_MODE_SHARED); | 208 | CHAN_MODE_SHARED); |
220 | if (err) | 209 | if (err) |
221 | return err; | 210 | return err; |
222 | 211 | ||
223 | err = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy, | 212 | err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev, |
224 | wdev->netdev, | 213 | chandef->chan); |
225 | channel); | ||
226 | if (!err) | 214 | if (!err) |
227 | wdev->channel = channel; | 215 | wdev->channel = chandef->chan; |
228 | 216 | ||
229 | return err; | 217 | return err; |
230 | } | 218 | } |
@@ -232,8 +220,7 @@ int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev, | |||
232 | if (wdev->mesh_id_len) | 220 | if (wdev->mesh_id_len) |
233 | return -EBUSY; | 221 | return -EBUSY; |
234 | 222 | ||
235 | wdev->preset_chan = channel; | 223 | wdev->preset_chandef = *chandef; |
236 | wdev->preset_chantype = channel_type; | ||
237 | return 0; | 224 | return 0; |
238 | } | 225 | } |
239 | 226 | ||
@@ -242,6 +229,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, | |||
242 | { | 229 | { |
243 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 230 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
244 | 231 | ||
232 | trace_cfg80211_notify_new_peer_candidate(dev, macaddr); | ||
245 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT)) | 233 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT)) |
246 | return; | 234 | return; |
247 | 235 | ||
@@ -267,7 +255,7 @@ static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, | |||
267 | if (!wdev->mesh_id_len) | 255 | if (!wdev->mesh_id_len) |
268 | return -ENOTCONN; | 256 | return -ENOTCONN; |
269 | 257 | ||
270 | err = rdev->ops->leave_mesh(&rdev->wiphy, dev); | 258 | err = rdev_leave_mesh(rdev, dev); |
271 | if (!err) { | 259 | if (!err) { |
272 | wdev->mesh_id_len = 0; | 260 | wdev->mesh_id_len = 0; |
273 | wdev->channel = NULL; | 261 | wdev->channel = NULL; |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 904a7f368325..5e8123ee63fd 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <net/iw_handler.h> | 15 | #include <net/iw_handler.h> |
16 | #include "core.h" | 16 | #include "core.h" |
17 | #include "nl80211.h" | 17 | #include "nl80211.h" |
18 | #include "rdev-ops.h" | ||
19 | |||
18 | 20 | ||
19 | void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) | 21 | void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) |
20 | { | 22 | { |
@@ -22,6 +24,7 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) | |||
22 | struct wiphy *wiphy = wdev->wiphy; | 24 | struct wiphy *wiphy = wdev->wiphy; |
23 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 25 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
24 | 26 | ||
27 | trace_cfg80211_send_rx_auth(dev); | ||
25 | wdev_lock(wdev); | 28 | wdev_lock(wdev); |
26 | 29 | ||
27 | nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); | 30 | nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); |
@@ -42,6 +45,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, | |||
42 | u8 *ie = mgmt->u.assoc_resp.variable; | 45 | u8 *ie = mgmt->u.assoc_resp.variable; |
43 | int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); | 46 | int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); |
44 | 47 | ||
48 | trace_cfg80211_send_rx_assoc(dev, bss); | ||
45 | wdev_lock(wdev); | 49 | wdev_lock(wdev); |
46 | 50 | ||
47 | status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); | 51 | status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); |
@@ -98,6 +102,7 @@ void __cfg80211_send_deauth(struct net_device *dev, | |||
98 | const u8 *bssid = mgmt->bssid; | 102 | const u8 *bssid = mgmt->bssid; |
99 | bool was_current = false; | 103 | bool was_current = false; |
100 | 104 | ||
105 | trace___cfg80211_send_deauth(dev); | ||
101 | ASSERT_WDEV_LOCK(wdev); | 106 | ASSERT_WDEV_LOCK(wdev); |
102 | 107 | ||
103 | if (wdev->current_bss && | 108 | if (wdev->current_bss && |
@@ -147,6 +152,7 @@ void __cfg80211_send_disassoc(struct net_device *dev, | |||
147 | u16 reason_code; | 152 | u16 reason_code; |
148 | bool from_ap; | 153 | bool from_ap; |
149 | 154 | ||
155 | trace___cfg80211_send_disassoc(dev); | ||
150 | ASSERT_WDEV_LOCK(wdev); | 156 | ASSERT_WDEV_LOCK(wdev); |
151 | 157 | ||
152 | nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); | 158 | nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); |
@@ -188,6 +194,7 @@ void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf, | |||
188 | struct wiphy *wiphy = wdev->wiphy; | 194 | struct wiphy *wiphy = wdev->wiphy; |
189 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 195 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
190 | 196 | ||
197 | trace_cfg80211_send_unprot_deauth(dev); | ||
191 | nl80211_send_unprot_deauth(rdev, dev, buf, len, GFP_ATOMIC); | 198 | nl80211_send_unprot_deauth(rdev, dev, buf, len, GFP_ATOMIC); |
192 | } | 199 | } |
193 | EXPORT_SYMBOL(cfg80211_send_unprot_deauth); | 200 | EXPORT_SYMBOL(cfg80211_send_unprot_deauth); |
@@ -199,6 +206,7 @@ void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf, | |||
199 | struct wiphy *wiphy = wdev->wiphy; | 206 | struct wiphy *wiphy = wdev->wiphy; |
200 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 207 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
201 | 208 | ||
209 | trace_cfg80211_send_unprot_disassoc(dev); | ||
202 | nl80211_send_unprot_disassoc(rdev, dev, buf, len, GFP_ATOMIC); | 210 | nl80211_send_unprot_disassoc(rdev, dev, buf, len, GFP_ATOMIC); |
203 | } | 211 | } |
204 | EXPORT_SYMBOL(cfg80211_send_unprot_disassoc); | 212 | EXPORT_SYMBOL(cfg80211_send_unprot_disassoc); |
@@ -209,6 +217,7 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) | |||
209 | struct wiphy *wiphy = wdev->wiphy; | 217 | struct wiphy *wiphy = wdev->wiphy; |
210 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 218 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
211 | 219 | ||
220 | trace_cfg80211_send_auth_timeout(dev, addr); | ||
212 | wdev_lock(wdev); | 221 | wdev_lock(wdev); |
213 | 222 | ||
214 | nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); | 223 | nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); |
@@ -227,6 +236,7 @@ void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) | |||
227 | struct wiphy *wiphy = wdev->wiphy; | 236 | struct wiphy *wiphy = wdev->wiphy; |
228 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 237 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
229 | 238 | ||
239 | trace_cfg80211_send_assoc_timeout(dev, addr); | ||
230 | wdev_lock(wdev); | 240 | wdev_lock(wdev); |
231 | 241 | ||
232 | nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); | 242 | nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); |
@@ -261,6 +271,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, | |||
261 | } | 271 | } |
262 | #endif | 272 | #endif |
263 | 273 | ||
274 | trace_cfg80211_michael_mic_failure(dev, addr, key_type, key_id, tsc); | ||
264 | nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp); | 275 | nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp); |
265 | } | 276 | } |
266 | EXPORT_SYMBOL(cfg80211_michael_mic_failure); | 277 | EXPORT_SYMBOL(cfg80211_michael_mic_failure); |
@@ -273,7 +284,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
273 | const u8 *bssid, | 284 | const u8 *bssid, |
274 | const u8 *ssid, int ssid_len, | 285 | const u8 *ssid, int ssid_len, |
275 | const u8 *ie, int ie_len, | 286 | const u8 *ie, int ie_len, |
276 | const u8 *key, int key_len, int key_idx) | 287 | const u8 *key, int key_len, int key_idx, |
288 | const u8 *sae_data, int sae_data_len) | ||
277 | { | 289 | { |
278 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 290 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
279 | struct cfg80211_auth_request req; | 291 | struct cfg80211_auth_request req; |
@@ -293,6 +305,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
293 | 305 | ||
294 | req.ie = ie; | 306 | req.ie = ie; |
295 | req.ie_len = ie_len; | 307 | req.ie_len = ie_len; |
308 | req.sae_data = sae_data; | ||
309 | req.sae_data_len = sae_data_len; | ||
296 | req.auth_type = auth_type; | 310 | req.auth_type = auth_type; |
297 | req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, | 311 | req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, |
298 | WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); | 312 | WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); |
@@ -307,7 +321,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
307 | if (err) | 321 | if (err) |
308 | goto out; | 322 | goto out; |
309 | 323 | ||
310 | err = rdev->ops->auth(&rdev->wiphy, dev, &req); | 324 | err = rdev_auth(rdev, dev, &req); |
311 | 325 | ||
312 | out: | 326 | out: |
313 | cfg80211_put_bss(req.bss); | 327 | cfg80211_put_bss(req.bss); |
@@ -319,7 +333,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
319 | enum nl80211_auth_type auth_type, const u8 *bssid, | 333 | enum nl80211_auth_type auth_type, const u8 *bssid, |
320 | const u8 *ssid, int ssid_len, | 334 | const u8 *ssid, int ssid_len, |
321 | const u8 *ie, int ie_len, | 335 | const u8 *ie, int ie_len, |
322 | const u8 *key, int key_len, int key_idx) | 336 | const u8 *key, int key_len, int key_idx, |
337 | const u8 *sae_data, int sae_data_len) | ||
323 | { | 338 | { |
324 | int err; | 339 | int err; |
325 | 340 | ||
@@ -327,7 +342,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, | |||
327 | wdev_lock(dev->ieee80211_ptr); | 342 | wdev_lock(dev->ieee80211_ptr); |
328 | err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, | 343 | err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, |
329 | ssid, ssid_len, ie, ie_len, | 344 | ssid, ssid_len, ie, ie_len, |
330 | key, key_len, key_idx); | 345 | key, key_len, key_idx, |
346 | sae_data, sae_data_len); | ||
331 | wdev_unlock(dev->ieee80211_ptr); | 347 | wdev_unlock(dev->ieee80211_ptr); |
332 | mutex_unlock(&rdev->devlist_mtx); | 348 | mutex_unlock(&rdev->devlist_mtx); |
333 | 349 | ||
@@ -410,7 +426,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, | |||
410 | if (err) | 426 | if (err) |
411 | goto out; | 427 | goto out; |
412 | 428 | ||
413 | err = rdev->ops->assoc(&rdev->wiphy, dev, &req); | 429 | err = rdev_assoc(rdev, dev, &req); |
414 | 430 | ||
415 | out: | 431 | out: |
416 | if (err) { | 432 | if (err) { |
@@ -466,7 +482,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | |||
466 | !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) | 482 | !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) |
467 | return 0; | 483 | return 0; |
468 | 484 | ||
469 | return rdev->ops->deauth(&rdev->wiphy, dev, &req); | 485 | return rdev_deauth(rdev, dev, &req); |
470 | } | 486 | } |
471 | 487 | ||
472 | int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, | 488 | int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, |
@@ -511,7 +527,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | |||
511 | else | 527 | else |
512 | return -ENOTCONN; | 528 | return -ENOTCONN; |
513 | 529 | ||
514 | return rdev->ops->disassoc(&rdev->wiphy, dev, &req); | 530 | return rdev_disassoc(rdev, dev, &req); |
515 | } | 531 | } |
516 | 532 | ||
517 | int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, | 533 | int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, |
@@ -552,7 +568,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, | |||
552 | 568 | ||
553 | memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); | 569 | memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); |
554 | req.bssid = bssid; | 570 | req.bssid = bssid; |
555 | rdev->ops->deauth(&rdev->wiphy, dev, &req); | 571 | rdev_deauth(rdev, dev, &req); |
556 | 572 | ||
557 | if (wdev->current_bss) { | 573 | if (wdev->current_bss) { |
558 | cfg80211_unhold_bss(wdev->current_bss); | 574 | cfg80211_unhold_bss(wdev->current_bss); |
@@ -563,27 +579,25 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, | |||
563 | 579 | ||
564 | void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, | 580 | void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, |
565 | struct ieee80211_channel *chan, | 581 | struct ieee80211_channel *chan, |
566 | enum nl80211_channel_type channel_type, | ||
567 | unsigned int duration, gfp_t gfp) | 582 | unsigned int duration, gfp_t gfp) |
568 | { | 583 | { |
569 | struct wiphy *wiphy = wdev->wiphy; | 584 | struct wiphy *wiphy = wdev->wiphy; |
570 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 585 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
571 | 586 | ||
572 | nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, channel_type, | 587 | trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration); |
573 | duration, gfp); | 588 | nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, duration, gfp); |
574 | } | 589 | } |
575 | EXPORT_SYMBOL(cfg80211_ready_on_channel); | 590 | EXPORT_SYMBOL(cfg80211_ready_on_channel); |
576 | 591 | ||
577 | void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie, | 592 | void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie, |
578 | struct ieee80211_channel *chan, | 593 | struct ieee80211_channel *chan, |
579 | enum nl80211_channel_type channel_type, | ||
580 | gfp_t gfp) | 594 | gfp_t gfp) |
581 | { | 595 | { |
582 | struct wiphy *wiphy = wdev->wiphy; | 596 | struct wiphy *wiphy = wdev->wiphy; |
583 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 597 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
584 | 598 | ||
585 | nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan, | 599 | trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan); |
586 | channel_type, gfp); | 600 | nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan, gfp); |
587 | } | 601 | } |
588 | EXPORT_SYMBOL(cfg80211_remain_on_channel_expired); | 602 | EXPORT_SYMBOL(cfg80211_remain_on_channel_expired); |
589 | 603 | ||
@@ -593,6 +607,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, | |||
593 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 607 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; |
594 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 608 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
595 | 609 | ||
610 | trace_cfg80211_new_sta(dev, mac_addr, sinfo); | ||
596 | nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp); | 611 | nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp); |
597 | } | 612 | } |
598 | EXPORT_SYMBOL(cfg80211_new_sta); | 613 | EXPORT_SYMBOL(cfg80211_new_sta); |
@@ -602,6 +617,7 @@ void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp) | |||
602 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; | 617 | struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; |
603 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 618 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
604 | 619 | ||
620 | trace_cfg80211_del_sta(dev, mac_addr); | ||
605 | nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp); | 621 | nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp); |
606 | } | 622 | } |
607 | EXPORT_SYMBOL(cfg80211_del_sta); | 623 | EXPORT_SYMBOL(cfg80211_del_sta); |
@@ -682,7 +698,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid, | |||
682 | list_add(&nreg->list, &wdev->mgmt_registrations); | 698 | list_add(&nreg->list, &wdev->mgmt_registrations); |
683 | 699 | ||
684 | if (rdev->ops->mgmt_frame_register) | 700 | if (rdev->ops->mgmt_frame_register) |
685 | rdev->ops->mgmt_frame_register(wiphy, wdev, frame_type, true); | 701 | rdev_mgmt_frame_register(rdev, wdev, frame_type, true); |
686 | 702 | ||
687 | out: | 703 | out: |
688 | spin_unlock_bh(&wdev->mgmt_registrations_lock); | 704 | spin_unlock_bh(&wdev->mgmt_registrations_lock); |
@@ -705,8 +721,8 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid) | |||
705 | if (rdev->ops->mgmt_frame_register) { | 721 | if (rdev->ops->mgmt_frame_register) { |
706 | u16 frame_type = le16_to_cpu(reg->frame_type); | 722 | u16 frame_type = le16_to_cpu(reg->frame_type); |
707 | 723 | ||
708 | rdev->ops->mgmt_frame_register(wiphy, wdev, | 724 | rdev_mgmt_frame_register(rdev, wdev, |
709 | frame_type, false); | 725 | frame_type, false); |
710 | } | 726 | } |
711 | 727 | ||
712 | list_del(®->list); | 728 | list_del(®->list); |
@@ -736,10 +752,8 @@ void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) | |||
736 | int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, | 752 | int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, |
737 | struct wireless_dev *wdev, | 753 | struct wireless_dev *wdev, |
738 | struct ieee80211_channel *chan, bool offchan, | 754 | struct ieee80211_channel *chan, bool offchan, |
739 | enum nl80211_channel_type channel_type, | 755 | unsigned int wait, const u8 *buf, size_t len, |
740 | bool channel_type_valid, unsigned int wait, | 756 | bool no_cck, bool dont_wait_for_ack, u64 *cookie) |
741 | const u8 *buf, size_t len, bool no_cck, | ||
742 | bool dont_wait_for_ack, u64 *cookie) | ||
743 | { | 757 | { |
744 | const struct ieee80211_mgmt *mgmt; | 758 | const struct ieee80211_mgmt *mgmt; |
745 | u16 stype; | 759 | u16 stype; |
@@ -832,10 +846,9 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, | |||
832 | return -EINVAL; | 846 | return -EINVAL; |
833 | 847 | ||
834 | /* Transmit the Action frame as requested by user space */ | 848 | /* Transmit the Action frame as requested by user space */ |
835 | return rdev->ops->mgmt_tx(&rdev->wiphy, wdev, chan, offchan, | 849 | return rdev_mgmt_tx(rdev, wdev, chan, offchan, |
836 | channel_type, channel_type_valid, | 850 | wait, buf, len, no_cck, dont_wait_for_ack, |
837 | wait, buf, len, no_cck, dont_wait_for_ack, | 851 | cookie); |
838 | cookie); | ||
839 | } | 852 | } |
840 | 853 | ||
841 | bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, | 854 | bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, |
@@ -854,10 +867,13 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, | |||
854 | cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); | 867 | cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); |
855 | u16 stype; | 868 | u16 stype; |
856 | 869 | ||
870 | trace_cfg80211_rx_mgmt(wdev, freq, sig_mbm); | ||
857 | stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4; | 871 | stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4; |
858 | 872 | ||
859 | if (!(stypes->rx & BIT(stype))) | 873 | if (!(stypes->rx & BIT(stype))) { |
874 | trace_cfg80211_return_bool(false); | ||
860 | return false; | 875 | return false; |
876 | } | ||
861 | 877 | ||
862 | data = buf + ieee80211_hdrlen(mgmt->frame_control); | 878 | data = buf + ieee80211_hdrlen(mgmt->frame_control); |
863 | data_len = len - ieee80211_hdrlen(mgmt->frame_control); | 879 | data_len = len - ieee80211_hdrlen(mgmt->frame_control); |
@@ -888,6 +904,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, | |||
888 | 904 | ||
889 | spin_unlock_bh(&wdev->mgmt_registrations_lock); | 905 | spin_unlock_bh(&wdev->mgmt_registrations_lock); |
890 | 906 | ||
907 | trace_cfg80211_return_bool(result); | ||
891 | return result; | 908 | return result; |
892 | } | 909 | } |
893 | EXPORT_SYMBOL(cfg80211_rx_mgmt); | 910 | EXPORT_SYMBOL(cfg80211_rx_mgmt); |
@@ -898,6 +915,8 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, | |||
898 | struct wiphy *wiphy = wdev->wiphy; | 915 | struct wiphy *wiphy = wdev->wiphy; |
899 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 916 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
900 | 917 | ||
918 | trace_cfg80211_mgmt_tx_status(wdev, cookie, ack); | ||
919 | |||
901 | /* Indicate TX status of the Action frame to user space */ | 920 | /* Indicate TX status of the Action frame to user space */ |
902 | nl80211_send_mgmt_tx_status(rdev, wdev, cookie, buf, len, ack, gfp); | 921 | nl80211_send_mgmt_tx_status(rdev, wdev, cookie, buf, len, ack, gfp); |
903 | } | 922 | } |
@@ -911,6 +930,8 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, | |||
911 | struct wiphy *wiphy = wdev->wiphy; | 930 | struct wiphy *wiphy = wdev->wiphy; |
912 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 931 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
913 | 932 | ||
933 | trace_cfg80211_cqm_rssi_notify(dev, rssi_event); | ||
934 | |||
914 | /* Indicate roaming trigger event to user space */ | 935 | /* Indicate roaming trigger event to user space */ |
915 | nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp); | 936 | nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp); |
916 | } | 937 | } |
@@ -923,6 +944,8 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev, | |||
923 | struct wiphy *wiphy = wdev->wiphy; | 944 | struct wiphy *wiphy = wdev->wiphy; |
924 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 945 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
925 | 946 | ||
947 | trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets); | ||
948 | |||
926 | /* Indicate roaming trigger event to user space */ | 949 | /* Indicate roaming trigger event to user space */ |
927 | nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp); | 950 | nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp); |
928 | } | 951 | } |
@@ -948,6 +971,7 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, | |||
948 | struct wiphy *wiphy = wdev->wiphy; | 971 | struct wiphy *wiphy = wdev->wiphy; |
949 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 972 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
950 | 973 | ||
974 | trace_cfg80211_gtk_rekey_notify(dev, bssid); | ||
951 | nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); | 975 | nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); |
952 | } | 976 | } |
953 | EXPORT_SYMBOL(cfg80211_gtk_rekey_notify); | 977 | EXPORT_SYMBOL(cfg80211_gtk_rekey_notify); |
@@ -959,17 +983,19 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, | |||
959 | struct wiphy *wiphy = wdev->wiphy; | 983 | struct wiphy *wiphy = wdev->wiphy; |
960 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 984 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
961 | 985 | ||
986 | trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth); | ||
962 | nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); | 987 | nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); |
963 | } | 988 | } |
964 | EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); | 989 | EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); |
965 | 990 | ||
966 | void cfg80211_ch_switch_notify(struct net_device *dev, int freq, | 991 | void cfg80211_ch_switch_notify(struct net_device *dev, |
967 | enum nl80211_channel_type type) | 992 | struct cfg80211_chan_def *chandef) |
968 | { | 993 | { |
969 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 994 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
970 | struct wiphy *wiphy = wdev->wiphy; | 995 | struct wiphy *wiphy = wdev->wiphy; |
971 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 996 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
972 | struct ieee80211_channel *chan; | 997 | |
998 | trace_cfg80211_ch_switch_notify(dev, chandef); | ||
973 | 999 | ||
974 | wdev_lock(wdev); | 1000 | wdev_lock(wdev); |
975 | 1001 | ||
@@ -977,12 +1003,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq, | |||
977 | wdev->iftype != NL80211_IFTYPE_P2P_GO)) | 1003 | wdev->iftype != NL80211_IFTYPE_P2P_GO)) |
978 | goto out; | 1004 | goto out; |
979 | 1005 | ||
980 | chan = rdev_freq_to_chan(rdev, freq, type); | 1006 | wdev->channel = chandef->chan; |
981 | if (WARN_ON(!chan)) | 1007 | nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL); |
982 | goto out; | ||
983 | |||
984 | wdev->channel = chan; | ||
985 | nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL); | ||
986 | out: | 1008 | out: |
987 | wdev_unlock(wdev); | 1009 | wdev_unlock(wdev); |
988 | return; | 1010 | return; |
@@ -993,12 +1015,18 @@ bool cfg80211_rx_spurious_frame(struct net_device *dev, | |||
993 | const u8 *addr, gfp_t gfp) | 1015 | const u8 *addr, gfp_t gfp) |
994 | { | 1016 | { |
995 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 1017 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
1018 | bool ret; | ||
1019 | |||
1020 | trace_cfg80211_rx_spurious_frame(dev, addr); | ||
996 | 1021 | ||
997 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && | 1022 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && |
998 | wdev->iftype != NL80211_IFTYPE_P2P_GO)) | 1023 | wdev->iftype != NL80211_IFTYPE_P2P_GO)) { |
1024 | trace_cfg80211_return_bool(false); | ||
999 | return false; | 1025 | return false; |
1000 | 1026 | } | |
1001 | return nl80211_unexpected_frame(dev, addr, gfp); | 1027 | ret = nl80211_unexpected_frame(dev, addr, gfp); |
1028 | trace_cfg80211_return_bool(ret); | ||
1029 | return ret; | ||
1002 | } | 1030 | } |
1003 | EXPORT_SYMBOL(cfg80211_rx_spurious_frame); | 1031 | EXPORT_SYMBOL(cfg80211_rx_spurious_frame); |
1004 | 1032 | ||
@@ -1006,12 +1034,18 @@ bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, | |||
1006 | const u8 *addr, gfp_t gfp) | 1034 | const u8 *addr, gfp_t gfp) |
1007 | { | 1035 | { |
1008 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 1036 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
1037 | bool ret; | ||
1038 | |||
1039 | trace_cfg80211_rx_unexpected_4addr_frame(dev, addr); | ||
1009 | 1040 | ||
1010 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && | 1041 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && |
1011 | wdev->iftype != NL80211_IFTYPE_P2P_GO && | 1042 | wdev->iftype != NL80211_IFTYPE_P2P_GO && |
1012 | wdev->iftype != NL80211_IFTYPE_AP_VLAN)) | 1043 | wdev->iftype != NL80211_IFTYPE_AP_VLAN)) { |
1044 | trace_cfg80211_return_bool(false); | ||
1013 | return false; | 1045 | return false; |
1014 | 1046 | } | |
1015 | return nl80211_unexpected_4addr_frame(dev, addr, gfp); | 1047 | ret = nl80211_unexpected_4addr_frame(dev, addr, gfp); |
1048 | trace_cfg80211_return_bool(ret); | ||
1049 | return ret; | ||
1016 | } | 1050 | } |
1017 | EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame); | 1051 | EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 0418a6d5c1a6..d038fa45ecd1 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -22,8 +22,8 @@ | |||
22 | #include "core.h" | 22 | #include "core.h" |
23 | #include "nl80211.h" | 23 | #include "nl80211.h" |
24 | #include "reg.h" | 24 | #include "reg.h" |
25 | #include "rdev-ops.h" | ||
25 | 26 | ||
26 | static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type); | ||
27 | static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, | 27 | static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, |
28 | struct genl_info *info, | 28 | struct genl_info *info, |
29 | struct cfg80211_crypto_settings *settings, | 29 | struct cfg80211_crypto_settings *settings, |
@@ -223,8 +223,13 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { | |||
223 | [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, | 223 | [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, |
224 | .len = 20-1 }, | 224 | .len = 20-1 }, |
225 | [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, | 225 | [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, |
226 | |||
226 | [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, | 227 | [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, |
227 | [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, | 228 | [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, |
229 | [NL80211_ATTR_CHANNEL_WIDTH] = { .type = NLA_U32 }, | ||
230 | [NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 }, | ||
231 | [NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 }, | ||
232 | |||
228 | [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 }, | 233 | [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 }, |
229 | [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, | 234 | [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, |
230 | [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, | 235 | [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, |
@@ -355,6 +360,9 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { | |||
355 | [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 }, | 360 | [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 }, |
356 | [NL80211_ATTR_WDEV] = { .type = NLA_U64 }, | 361 | [NL80211_ATTR_WDEV] = { .type = NLA_U64 }, |
357 | [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 }, | 362 | [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 }, |
363 | [NL80211_ATTR_SAE_DATA] = { .type = NLA_BINARY, }, | ||
364 | [NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN }, | ||
365 | [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, | ||
358 | }; | 366 | }; |
359 | 367 | ||
360 | /* policy for the key attributes */ | 368 | /* policy for the key attributes */ |
@@ -690,7 +698,7 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) | |||
690 | 698 | ||
691 | static struct cfg80211_cached_keys * | 699 | static struct cfg80211_cached_keys * |
692 | nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, | 700 | nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, |
693 | struct nlattr *keys) | 701 | struct nlattr *keys, bool *no_ht) |
694 | { | 702 | { |
695 | struct key_parse parse; | 703 | struct key_parse parse; |
696 | struct nlattr *key; | 704 | struct nlattr *key; |
@@ -733,6 +741,12 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, | |||
733 | result->params[parse.idx].key_len = parse.p.key_len; | 741 | result->params[parse.idx].key_len = parse.p.key_len; |
734 | result->params[parse.idx].key = result->data[parse.idx]; | 742 | result->params[parse.idx].key = result->data[parse.idx]; |
735 | memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len); | 743 | memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len); |
744 | |||
745 | if (parse.p.cipher == WLAN_CIPHER_SUITE_WEP40 || | ||
746 | parse.p.cipher == WLAN_CIPHER_SUITE_WEP104) { | ||
747 | if (no_ht) | ||
748 | *no_ht = true; | ||
749 | } | ||
736 | } | 750 | } |
737 | 751 | ||
738 | return result; | 752 | return result; |
@@ -943,7 +957,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
943 | dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { | 957 | dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { |
944 | u32 tx_ant = 0, rx_ant = 0; | 958 | u32 tx_ant = 0, rx_ant = 0; |
945 | int res; | 959 | int res; |
946 | res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant); | 960 | res = rdev_get_antenna(dev, &tx_ant, &rx_ant); |
947 | if (!res) { | 961 | if (!res) { |
948 | if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, | 962 | if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, |
949 | tx_ant) || | 963 | tx_ant) || |
@@ -1101,6 +1115,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
1101 | goto nla_put_failure; | 1115 | goto nla_put_failure; |
1102 | } | 1116 | } |
1103 | CMD(start_p2p_device, START_P2P_DEVICE); | 1117 | CMD(start_p2p_device, START_P2P_DEVICE); |
1118 | CMD(set_mcast_rate, SET_MCAST_RATE); | ||
1104 | 1119 | ||
1105 | #ifdef CONFIG_NL80211_TESTMODE | 1120 | #ifdef CONFIG_NL80211_TESTMODE |
1106 | CMD(testmode_cmd, TESTMODE); | 1121 | CMD(testmode_cmd, TESTMODE); |
@@ -1350,51 +1365,139 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev) | |||
1350 | wdev->iftype == NL80211_IFTYPE_P2P_GO; | 1365 | wdev->iftype == NL80211_IFTYPE_P2P_GO; |
1351 | } | 1366 | } |
1352 | 1367 | ||
1353 | static bool nl80211_valid_channel_type(struct genl_info *info, | 1368 | static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, |
1354 | enum nl80211_channel_type *channel_type) | 1369 | struct genl_info *info, |
1370 | struct cfg80211_chan_def *chandef) | ||
1355 | { | 1371 | { |
1356 | enum nl80211_channel_type tmp; | 1372 | struct ieee80211_sta_ht_cap *ht_cap; |
1373 | struct ieee80211_sta_vht_cap *vht_cap; | ||
1374 | u32 control_freq, width; | ||
1357 | 1375 | ||
1358 | if (!info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) | 1376 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) |
1359 | return false; | 1377 | return -EINVAL; |
1360 | 1378 | ||
1361 | tmp = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); | 1379 | control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); |
1362 | if (tmp != NL80211_CHAN_NO_HT && | ||
1363 | tmp != NL80211_CHAN_HT20 && | ||
1364 | tmp != NL80211_CHAN_HT40PLUS && | ||
1365 | tmp != NL80211_CHAN_HT40MINUS) | ||
1366 | return false; | ||
1367 | 1380 | ||
1368 | if (channel_type) | 1381 | chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq); |
1369 | *channel_type = tmp; | 1382 | chandef->width = NL80211_CHAN_WIDTH_20_NOHT; |
1383 | chandef->center_freq1 = control_freq; | ||
1384 | chandef->center_freq2 = 0; | ||
1370 | 1385 | ||
1371 | return true; | 1386 | /* Primary channel not allowed */ |
1387 | if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) | ||
1388 | return -EINVAL; | ||
1389 | |||
1390 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { | ||
1391 | enum nl80211_channel_type chantype; | ||
1392 | |||
1393 | chantype = nla_get_u32( | ||
1394 | info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); | ||
1395 | |||
1396 | switch (chantype) { | ||
1397 | case NL80211_CHAN_NO_HT: | ||
1398 | case NL80211_CHAN_HT20: | ||
1399 | case NL80211_CHAN_HT40PLUS: | ||
1400 | case NL80211_CHAN_HT40MINUS: | ||
1401 | cfg80211_chandef_create(chandef, chandef->chan, | ||
1402 | chantype); | ||
1403 | break; | ||
1404 | default: | ||
1405 | return -EINVAL; | ||
1406 | } | ||
1407 | } else if (info->attrs[NL80211_ATTR_CHANNEL_WIDTH]) { | ||
1408 | chandef->width = | ||
1409 | nla_get_u32(info->attrs[NL80211_ATTR_CHANNEL_WIDTH]); | ||
1410 | if (info->attrs[NL80211_ATTR_CENTER_FREQ1]) | ||
1411 | chandef->center_freq1 = | ||
1412 | nla_get_u32( | ||
1413 | info->attrs[NL80211_ATTR_CENTER_FREQ1]); | ||
1414 | if (info->attrs[NL80211_ATTR_CENTER_FREQ2]) | ||
1415 | chandef->center_freq2 = | ||
1416 | nla_get_u32( | ||
1417 | info->attrs[NL80211_ATTR_CENTER_FREQ2]); | ||
1418 | } | ||
1419 | |||
1420 | ht_cap = &rdev->wiphy.bands[chandef->chan->band]->ht_cap; | ||
1421 | vht_cap = &rdev->wiphy.bands[chandef->chan->band]->vht_cap; | ||
1422 | |||
1423 | if (!cfg80211_chan_def_valid(chandef)) | ||
1424 | return -EINVAL; | ||
1425 | |||
1426 | switch (chandef->width) { | ||
1427 | case NL80211_CHAN_WIDTH_20: | ||
1428 | if (!ht_cap->ht_supported) | ||
1429 | return -EINVAL; | ||
1430 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
1431 | width = 20; | ||
1432 | break; | ||
1433 | case NL80211_CHAN_WIDTH_40: | ||
1434 | width = 40; | ||
1435 | /* quick early regulatory check */ | ||
1436 | if (chandef->center_freq1 < control_freq && | ||
1437 | chandef->chan->flags & IEEE80211_CHAN_NO_HT40MINUS) | ||
1438 | return -EINVAL; | ||
1439 | if (chandef->center_freq1 > control_freq && | ||
1440 | chandef->chan->flags & IEEE80211_CHAN_NO_HT40PLUS) | ||
1441 | return -EINVAL; | ||
1442 | if (!ht_cap->ht_supported) | ||
1443 | return -EINVAL; | ||
1444 | if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || | ||
1445 | ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) | ||
1446 | return -EINVAL; | ||
1447 | break; | ||
1448 | case NL80211_CHAN_WIDTH_80: | ||
1449 | width = 80; | ||
1450 | if (!vht_cap->vht_supported) | ||
1451 | return -EINVAL; | ||
1452 | break; | ||
1453 | case NL80211_CHAN_WIDTH_80P80: | ||
1454 | width = 80; | ||
1455 | if (!vht_cap->vht_supported) | ||
1456 | return -EINVAL; | ||
1457 | if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)) | ||
1458 | return -EINVAL; | ||
1459 | break; | ||
1460 | case NL80211_CHAN_WIDTH_160: | ||
1461 | width = 160; | ||
1462 | if (!vht_cap->vht_supported) | ||
1463 | return -EINVAL; | ||
1464 | if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)) | ||
1465 | return -EINVAL; | ||
1466 | break; | ||
1467 | default: | ||
1468 | return -EINVAL; | ||
1469 | } | ||
1470 | |||
1471 | if (!cfg80211_secondary_chans_ok(&rdev->wiphy, chandef->center_freq1, | ||
1472 | width, IEEE80211_CHAN_DISABLED)) | ||
1473 | return -EINVAL; | ||
1474 | if (chandef->center_freq2 && | ||
1475 | !cfg80211_secondary_chans_ok(&rdev->wiphy, chandef->center_freq2, | ||
1476 | width, IEEE80211_CHAN_DISABLED)) | ||
1477 | return -EINVAL; | ||
1478 | |||
1479 | /* TODO: missing regulatory check on bandwidth */ | ||
1480 | |||
1481 | return 0; | ||
1372 | } | 1482 | } |
1373 | 1483 | ||
1374 | static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, | 1484 | static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, |
1375 | struct wireless_dev *wdev, | 1485 | struct wireless_dev *wdev, |
1376 | struct genl_info *info) | 1486 | struct genl_info *info) |
1377 | { | 1487 | { |
1378 | struct ieee80211_channel *channel; | 1488 | struct cfg80211_chan_def chandef; |
1379 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | ||
1380 | u32 freq; | ||
1381 | int result; | 1489 | int result; |
1382 | enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR; | 1490 | enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR; |
1383 | 1491 | ||
1384 | if (wdev) | 1492 | if (wdev) |
1385 | iftype = wdev->iftype; | 1493 | iftype = wdev->iftype; |
1386 | 1494 | ||
1387 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) | ||
1388 | return -EINVAL; | ||
1389 | |||
1390 | if (!nl80211_can_set_dev_channel(wdev)) | 1495 | if (!nl80211_can_set_dev_channel(wdev)) |
1391 | return -EOPNOTSUPP; | 1496 | return -EOPNOTSUPP; |
1392 | 1497 | ||
1393 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] && | 1498 | result = nl80211_parse_chandef(rdev, info, &chandef); |
1394 | !nl80211_valid_channel_type(info, &channel_type)) | 1499 | if (result) |
1395 | return -EINVAL; | 1500 | return result; |
1396 | |||
1397 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | ||
1398 | 1501 | ||
1399 | mutex_lock(&rdev->devlist_mtx); | 1502 | mutex_lock(&rdev->devlist_mtx); |
1400 | switch (iftype) { | 1503 | switch (iftype) { |
@@ -1404,22 +1507,18 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, | |||
1404 | result = -EBUSY; | 1507 | result = -EBUSY; |
1405 | break; | 1508 | break; |
1406 | } | 1509 | } |
1407 | channel = rdev_freq_to_chan(rdev, freq, channel_type); | 1510 | if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef)) { |
1408 | if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy, | ||
1409 | channel, | ||
1410 | channel_type)) { | ||
1411 | result = -EINVAL; | 1511 | result = -EINVAL; |
1412 | break; | 1512 | break; |
1413 | } | 1513 | } |
1414 | wdev->preset_chan = channel; | 1514 | wdev->preset_chandef = chandef; |
1415 | wdev->preset_chantype = channel_type; | ||
1416 | result = 0; | 1515 | result = 0; |
1417 | break; | 1516 | break; |
1418 | case NL80211_IFTYPE_MESH_POINT: | 1517 | case NL80211_IFTYPE_MESH_POINT: |
1419 | result = cfg80211_set_mesh_freq(rdev, wdev, freq, channel_type); | 1518 | result = cfg80211_set_mesh_channel(rdev, wdev, &chandef); |
1420 | break; | 1519 | break; |
1421 | case NL80211_IFTYPE_MONITOR: | 1520 | case NL80211_IFTYPE_MONITOR: |
1422 | result = cfg80211_set_monitor_channel(rdev, freq, channel_type); | 1521 | result = cfg80211_set_monitor_channel(rdev, &chandef); |
1423 | break; | 1522 | break; |
1424 | default: | 1523 | default: |
1425 | result = -EINVAL; | 1524 | result = -EINVAL; |
@@ -1457,7 +1556,7 @@ static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info) | |||
1457 | return -EOPNOTSUPP; | 1556 | return -EOPNOTSUPP; |
1458 | 1557 | ||
1459 | bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); | 1558 | bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); |
1460 | return rdev->ops->set_wds_peer(wdev->wiphy, dev, bssid); | 1559 | return rdev_set_wds_peer(rdev, dev, bssid); |
1461 | } | 1560 | } |
1462 | 1561 | ||
1463 | 1562 | ||
@@ -1507,10 +1606,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
1507 | result = 0; | 1606 | result = 0; |
1508 | 1607 | ||
1509 | mutex_lock(&rdev->mtx); | 1608 | mutex_lock(&rdev->mtx); |
1510 | } else if (nl80211_can_set_dev_channel(netdev->ieee80211_ptr)) | 1609 | } else |
1511 | wdev = netdev->ieee80211_ptr; | 1610 | wdev = netdev->ieee80211_ptr; |
1512 | else | ||
1513 | wdev = NULL; | ||
1514 | 1611 | ||
1515 | /* | 1612 | /* |
1516 | * end workaround code, by now the rdev is available | 1613 | * end workaround code, by now the rdev is available |
@@ -1562,24 +1659,29 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
1562 | if (result) | 1659 | if (result) |
1563 | goto bad_res; | 1660 | goto bad_res; |
1564 | 1661 | ||
1565 | result = rdev->ops->set_txq_params(&rdev->wiphy, | 1662 | result = rdev_set_txq_params(rdev, netdev, |
1566 | netdev, | 1663 | &txq_params); |
1567 | &txq_params); | ||
1568 | if (result) | 1664 | if (result) |
1569 | goto bad_res; | 1665 | goto bad_res; |
1570 | } | 1666 | } |
1571 | } | 1667 | } |
1572 | 1668 | ||
1573 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { | 1669 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { |
1574 | result = __nl80211_set_channel(rdev, wdev, info); | 1670 | result = __nl80211_set_channel(rdev, |
1671 | nl80211_can_set_dev_channel(wdev) ? wdev : NULL, | ||
1672 | info); | ||
1575 | if (result) | 1673 | if (result) |
1576 | goto bad_res; | 1674 | goto bad_res; |
1577 | } | 1675 | } |
1578 | 1676 | ||
1579 | if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) { | 1677 | if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) { |
1678 | struct wireless_dev *txp_wdev = wdev; | ||
1580 | enum nl80211_tx_power_setting type; | 1679 | enum nl80211_tx_power_setting type; |
1581 | int idx, mbm = 0; | 1680 | int idx, mbm = 0; |
1582 | 1681 | ||
1682 | if (!(rdev->wiphy.features & NL80211_FEATURE_VIF_TXPOWER)) | ||
1683 | txp_wdev = NULL; | ||
1684 | |||
1583 | if (!rdev->ops->set_tx_power) { | 1685 | if (!rdev->ops->set_tx_power) { |
1584 | result = -EOPNOTSUPP; | 1686 | result = -EOPNOTSUPP; |
1585 | goto bad_res; | 1687 | goto bad_res; |
@@ -1599,7 +1701,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
1599 | mbm = nla_get_u32(info->attrs[idx]); | 1701 | mbm = nla_get_u32(info->attrs[idx]); |
1600 | } | 1702 | } |
1601 | 1703 | ||
1602 | result = rdev->ops->set_tx_power(&rdev->wiphy, type, mbm); | 1704 | result = rdev_set_tx_power(rdev, txp_wdev, type, mbm); |
1603 | if (result) | 1705 | if (result) |
1604 | goto bad_res; | 1706 | goto bad_res; |
1605 | } | 1707 | } |
@@ -1628,7 +1730,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
1628 | tx_ant = tx_ant & rdev->wiphy.available_antennas_tx; | 1730 | tx_ant = tx_ant & rdev->wiphy.available_antennas_tx; |
1629 | rx_ant = rx_ant & rdev->wiphy.available_antennas_rx; | 1731 | rx_ant = rx_ant & rdev->wiphy.available_antennas_rx; |
1630 | 1732 | ||
1631 | result = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant); | 1733 | result = rdev_set_antenna(rdev, tx_ant, rx_ant); |
1632 | if (result) | 1734 | if (result) |
1633 | goto bad_res; | 1735 | goto bad_res; |
1634 | } | 1736 | } |
@@ -1713,7 +1815,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
1713 | if (changed & WIPHY_PARAM_COVERAGE_CLASS) | 1815 | if (changed & WIPHY_PARAM_COVERAGE_CLASS) |
1714 | rdev->wiphy.coverage_class = coverage_class; | 1816 | rdev->wiphy.coverage_class = coverage_class; |
1715 | 1817 | ||
1716 | result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); | 1818 | result = rdev_set_wiphy_params(rdev, changed); |
1717 | if (result) { | 1819 | if (result) { |
1718 | rdev->wiphy.retry_short = old_retry_short; | 1820 | rdev->wiphy.retry_short = old_retry_short; |
1719 | rdev->wiphy.retry_long = old_retry_long; | 1821 | rdev->wiphy.retry_long = old_retry_long; |
@@ -1736,6 +1838,35 @@ static inline u64 wdev_id(struct wireless_dev *wdev) | |||
1736 | ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32); | 1838 | ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32); |
1737 | } | 1839 | } |
1738 | 1840 | ||
1841 | static int nl80211_send_chandef(struct sk_buff *msg, | ||
1842 | struct cfg80211_chan_def *chandef) | ||
1843 | { | ||
1844 | WARN_ON(!cfg80211_chan_def_valid(chandef)); | ||
1845 | |||
1846 | if (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, | ||
1847 | chandef->chan->center_freq)) | ||
1848 | return -ENOBUFS; | ||
1849 | switch (chandef->width) { | ||
1850 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
1851 | case NL80211_CHAN_WIDTH_20: | ||
1852 | case NL80211_CHAN_WIDTH_40: | ||
1853 | if (nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, | ||
1854 | cfg80211_get_chandef_type(chandef))) | ||
1855 | return -ENOBUFS; | ||
1856 | break; | ||
1857 | default: | ||
1858 | break; | ||
1859 | } | ||
1860 | if (nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, chandef->width)) | ||
1861 | return -ENOBUFS; | ||
1862 | if (nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ1, chandef->center_freq1)) | ||
1863 | return -ENOBUFS; | ||
1864 | if (chandef->center_freq2 && | ||
1865 | nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ2, chandef->center_freq2)) | ||
1866 | return -ENOBUFS; | ||
1867 | return 0; | ||
1868 | } | ||
1869 | |||
1739 | static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, | 1870 | static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, |
1740 | struct cfg80211_registered_device *rdev, | 1871 | struct cfg80211_registered_device *rdev, |
1741 | struct wireless_dev *wdev) | 1872 | struct wireless_dev *wdev) |
@@ -1762,16 +1893,18 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
1762 | goto nla_put_failure; | 1893 | goto nla_put_failure; |
1763 | 1894 | ||
1764 | if (rdev->ops->get_channel) { | 1895 | if (rdev->ops->get_channel) { |
1765 | struct ieee80211_channel *chan; | 1896 | int ret; |
1766 | enum nl80211_channel_type channel_type; | 1897 | struct cfg80211_chan_def chandef; |
1767 | 1898 | ||
1768 | chan = rdev->ops->get_channel(&rdev->wiphy, wdev, | 1899 | ret = rdev_get_channel(rdev, wdev, &chandef); |
1769 | &channel_type); | 1900 | if (ret == 0) { |
1770 | if (chan && | 1901 | if (nl80211_send_chandef(msg, &chandef)) |
1771 | (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, | 1902 | goto nla_put_failure; |
1772 | chan->center_freq) || | 1903 | } |
1773 | nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, | 1904 | } |
1774 | channel_type))) | 1905 | |
1906 | if (wdev->ssid_len) { | ||
1907 | if (nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid)) | ||
1775 | goto nla_put_failure; | 1908 | goto nla_put_failure; |
1776 | } | 1909 | } |
1777 | 1910 | ||
@@ -2014,9 +2147,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
2014 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? | 2147 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? |
2015 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, | 2148 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, |
2016 | &flags); | 2149 | &flags); |
2017 | wdev = rdev->ops->add_virtual_intf(&rdev->wiphy, | 2150 | wdev = rdev_add_virtual_intf(rdev, |
2018 | nla_data(info->attrs[NL80211_ATTR_IFNAME]), | 2151 | nla_data(info->attrs[NL80211_ATTR_IFNAME]), |
2019 | type, err ? NULL : &flags, ¶ms); | 2152 | type, err ? NULL : &flags, ¶ms); |
2020 | if (IS_ERR(wdev)) { | 2153 | if (IS_ERR(wdev)) { |
2021 | nlmsg_free(msg); | 2154 | nlmsg_free(msg); |
2022 | return PTR_ERR(wdev); | 2155 | return PTR_ERR(wdev); |
@@ -2083,7 +2216,7 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) | |||
2083 | if (!wdev->netdev) | 2216 | if (!wdev->netdev) |
2084 | info->user_ptr[1] = NULL; | 2217 | info->user_ptr[1] = NULL; |
2085 | 2218 | ||
2086 | return rdev->ops->del_virtual_intf(&rdev->wiphy, wdev); | 2219 | return rdev_del_virtual_intf(rdev, wdev); |
2087 | } | 2220 | } |
2088 | 2221 | ||
2089 | static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) | 2222 | static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) |
@@ -2100,7 +2233,7 @@ static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) | |||
2100 | 2233 | ||
2101 | noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]); | 2234 | noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]); |
2102 | 2235 | ||
2103 | return rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map); | 2236 | return rdev_set_noack_map(rdev, dev, noack_map); |
2104 | } | 2237 | } |
2105 | 2238 | ||
2106 | struct get_key_cookie { | 2239 | struct get_key_cookie { |
@@ -2210,8 +2343,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
2210 | !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) | 2343 | !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) |
2211 | return -ENOENT; | 2344 | return -ENOENT; |
2212 | 2345 | ||
2213 | err = rdev->ops->get_key(&rdev->wiphy, dev, key_idx, pairwise, | 2346 | err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, |
2214 | mac_addr, &cookie, get_key_callback); | 2347 | get_key_callback); |
2215 | 2348 | ||
2216 | if (err) | 2349 | if (err) |
2217 | goto free_msg; | 2350 | goto free_msg; |
@@ -2259,7 +2392,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) | |||
2259 | if (err) | 2392 | if (err) |
2260 | goto out; | 2393 | goto out; |
2261 | 2394 | ||
2262 | err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx, | 2395 | err = rdev_set_default_key(rdev, dev, key.idx, |
2263 | key.def_uni, key.def_multi); | 2396 | key.def_uni, key.def_multi); |
2264 | 2397 | ||
2265 | if (err) | 2398 | if (err) |
@@ -2283,8 +2416,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) | |||
2283 | if (err) | 2416 | if (err) |
2284 | goto out; | 2417 | goto out; |
2285 | 2418 | ||
2286 | err = rdev->ops->set_default_mgmt_key(&rdev->wiphy, | 2419 | err = rdev_set_default_mgmt_key(rdev, dev, key.idx); |
2287 | dev, key.idx); | ||
2288 | if (err) | 2420 | if (err) |
2289 | goto out; | 2421 | goto out; |
2290 | 2422 | ||
@@ -2340,9 +2472,9 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) | |||
2340 | wdev_lock(dev->ieee80211_ptr); | 2472 | wdev_lock(dev->ieee80211_ptr); |
2341 | err = nl80211_key_allowed(dev->ieee80211_ptr); | 2473 | err = nl80211_key_allowed(dev->ieee80211_ptr); |
2342 | if (!err) | 2474 | if (!err) |
2343 | err = rdev->ops->add_key(&rdev->wiphy, dev, key.idx, | 2475 | err = rdev_add_key(rdev, dev, key.idx, |
2344 | key.type == NL80211_KEYTYPE_PAIRWISE, | 2476 | key.type == NL80211_KEYTYPE_PAIRWISE, |
2345 | mac_addr, &key.p); | 2477 | mac_addr, &key.p); |
2346 | wdev_unlock(dev->ieee80211_ptr); | 2478 | wdev_unlock(dev->ieee80211_ptr); |
2347 | 2479 | ||
2348 | return err; | 2480 | return err; |
@@ -2386,9 +2518,9 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) | |||
2386 | err = -ENOENT; | 2518 | err = -ENOENT; |
2387 | 2519 | ||
2388 | if (!err) | 2520 | if (!err) |
2389 | err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, | 2521 | err = rdev_del_key(rdev, dev, key.idx, |
2390 | key.type == NL80211_KEYTYPE_PAIRWISE, | 2522 | key.type == NL80211_KEYTYPE_PAIRWISE, |
2391 | mac_addr); | 2523 | mac_addr); |
2392 | 2524 | ||
2393 | #ifdef CONFIG_CFG80211_WEXT | 2525 | #ifdef CONFIG_CFG80211_WEXT |
2394 | if (!err) { | 2526 | if (!err) { |
@@ -2476,11 +2608,10 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, | |||
2476 | wdev->iftype != NL80211_IFTYPE_P2P_GO) | 2608 | wdev->iftype != NL80211_IFTYPE_P2P_GO) |
2477 | continue; | 2609 | continue; |
2478 | 2610 | ||
2479 | if (!wdev->preset_chan) | 2611 | if (!wdev->preset_chandef.chan) |
2480 | continue; | 2612 | continue; |
2481 | 2613 | ||
2482 | params->channel = wdev->preset_chan; | 2614 | params->chandef = wdev->preset_chandef; |
2483 | params->channel_type = wdev->preset_chantype; | ||
2484 | ret = true; | 2615 | ret = true; |
2485 | break; | 2616 | break; |
2486 | } | 2617 | } |
@@ -2490,6 +2621,30 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, | |||
2490 | return ret; | 2621 | return ret; |
2491 | } | 2622 | } |
2492 | 2623 | ||
2624 | static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev, | ||
2625 | enum nl80211_auth_type auth_type, | ||
2626 | enum nl80211_commands cmd) | ||
2627 | { | ||
2628 | if (auth_type > NL80211_AUTHTYPE_MAX) | ||
2629 | return false; | ||
2630 | |||
2631 | switch (cmd) { | ||
2632 | case NL80211_CMD_AUTHENTICATE: | ||
2633 | if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) && | ||
2634 | auth_type == NL80211_AUTHTYPE_SAE) | ||
2635 | return false; | ||
2636 | return true; | ||
2637 | case NL80211_CMD_CONNECT: | ||
2638 | case NL80211_CMD_START_AP: | ||
2639 | /* SAE not supported yet */ | ||
2640 | if (auth_type == NL80211_AUTHTYPE_SAE) | ||
2641 | return false; | ||
2642 | return true; | ||
2643 | default: | ||
2644 | return false; | ||
2645 | } | ||
2646 | } | ||
2647 | |||
2493 | static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) | 2648 | static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) |
2494 | { | 2649 | { |
2495 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; | 2650 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; |
@@ -2559,7 +2714,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) | |||
2559 | if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { | 2714 | if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { |
2560 | params.auth_type = nla_get_u32( | 2715 | params.auth_type = nla_get_u32( |
2561 | info->attrs[NL80211_ATTR_AUTH_TYPE]); | 2716 | info->attrs[NL80211_ATTR_AUTH_TYPE]); |
2562 | if (!nl80211_valid_auth_type(params.auth_type)) | 2717 | if (!nl80211_valid_auth_type(rdev, params.auth_type, |
2718 | NL80211_CMD_START_AP)) | ||
2563 | return -EINVAL; | 2719 | return -EINVAL; |
2564 | } else | 2720 | } else |
2565 | params.auth_type = NL80211_AUTHTYPE_AUTOMATIC; | 2721 | params.auth_type = NL80211_AUTHTYPE_AUTOMATIC; |
@@ -2577,42 +2733,32 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) | |||
2577 | } | 2733 | } |
2578 | 2734 | ||
2579 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { | 2735 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { |
2580 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | 2736 | err = nl80211_parse_chandef(rdev, info, ¶ms.chandef); |
2581 | 2737 | if (err) | |
2582 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] && | 2738 | return err; |
2583 | !nl80211_valid_channel_type(info, &channel_type)) | 2739 | } else if (wdev->preset_chandef.chan) { |
2584 | return -EINVAL; | 2740 | params.chandef = wdev->preset_chandef; |
2585 | |||
2586 | params.channel = rdev_freq_to_chan(rdev, | ||
2587 | nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]), | ||
2588 | channel_type); | ||
2589 | if (!params.channel) | ||
2590 | return -EINVAL; | ||
2591 | params.channel_type = channel_type; | ||
2592 | } else if (wdev->preset_chan) { | ||
2593 | params.channel = wdev->preset_chan; | ||
2594 | params.channel_type = wdev->preset_chantype; | ||
2595 | } else if (!nl80211_get_ap_channel(rdev, ¶ms)) | 2741 | } else if (!nl80211_get_ap_channel(rdev, ¶ms)) |
2596 | return -EINVAL; | 2742 | return -EINVAL; |
2597 | 2743 | ||
2598 | if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, params.channel, | 2744 | if (!cfg80211_reg_can_beacon(&rdev->wiphy, ¶ms.chandef)) |
2599 | params.channel_type)) | ||
2600 | return -EINVAL; | 2745 | return -EINVAL; |
2601 | 2746 | ||
2602 | mutex_lock(&rdev->devlist_mtx); | 2747 | mutex_lock(&rdev->devlist_mtx); |
2603 | err = cfg80211_can_use_chan(rdev, wdev, params.channel, | 2748 | err = cfg80211_can_use_chan(rdev, wdev, params.chandef.chan, |
2604 | CHAN_MODE_SHARED); | 2749 | CHAN_MODE_SHARED); |
2605 | mutex_unlock(&rdev->devlist_mtx); | 2750 | mutex_unlock(&rdev->devlist_mtx); |
2606 | 2751 | ||
2607 | if (err) | 2752 | if (err) |
2608 | return err; | 2753 | return err; |
2609 | 2754 | ||
2610 | err = rdev->ops->start_ap(&rdev->wiphy, dev, ¶ms); | 2755 | err = rdev_start_ap(rdev, dev, ¶ms); |
2611 | if (!err) { | 2756 | if (!err) { |
2612 | wdev->preset_chan = params.channel; | 2757 | wdev->preset_chandef = params.chandef; |
2613 | wdev->preset_chantype = params.channel_type; | ||
2614 | wdev->beacon_interval = params.beacon_interval; | 2758 | wdev->beacon_interval = params.beacon_interval; |
2615 | wdev->channel = params.channel; | 2759 | wdev->channel = params.chandef.chan; |
2760 | wdev->ssid_len = params.ssid_len; | ||
2761 | memcpy(wdev->ssid, params.ssid, wdev->ssid_len); | ||
2616 | } | 2762 | } |
2617 | return err; | 2763 | return err; |
2618 | } | 2764 | } |
@@ -2639,7 +2785,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) | |||
2639 | if (err) | 2785 | if (err) |
2640 | return err; | 2786 | return err; |
2641 | 2787 | ||
2642 | return rdev->ops->change_beacon(&rdev->wiphy, dev, ¶ms); | 2788 | return rdev_change_beacon(rdev, dev, ¶ms); |
2643 | } | 2789 | } |
2644 | 2790 | ||
2645 | static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info) | 2791 | static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info) |
@@ -2744,29 +2890,52 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, | |||
2744 | 2890 | ||
2745 | rate = nla_nest_start(msg, attr); | 2891 | rate = nla_nest_start(msg, attr); |
2746 | if (!rate) | 2892 | if (!rate) |
2747 | goto nla_put_failure; | 2893 | return false; |
2748 | 2894 | ||
2749 | /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ | 2895 | /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ |
2750 | bitrate = cfg80211_calculate_bitrate(info); | 2896 | bitrate = cfg80211_calculate_bitrate(info); |
2751 | /* report 16-bit bitrate only if we can */ | 2897 | /* report 16-bit bitrate only if we can */ |
2752 | bitrate_compat = bitrate < (1UL << 16) ? bitrate : 0; | 2898 | bitrate_compat = bitrate < (1UL << 16) ? bitrate : 0; |
2753 | if ((bitrate > 0 && | 2899 | if (bitrate > 0 && |
2754 | nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate)) || | 2900 | nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate)) |
2755 | (bitrate_compat > 0 && | 2901 | return false; |
2756 | nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat)) || | 2902 | if (bitrate_compat > 0 && |
2757 | ((info->flags & RATE_INFO_FLAGS_MCS) && | 2903 | nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat)) |
2758 | nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) || | 2904 | return false; |
2759 | ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) && | 2905 | |
2760 | nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) || | 2906 | if (info->flags & RATE_INFO_FLAGS_MCS) { |
2761 | ((info->flags & RATE_INFO_FLAGS_SHORT_GI) && | 2907 | if (nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) |
2762 | nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))) | 2908 | return false; |
2763 | goto nla_put_failure; | 2909 | if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH && |
2910 | nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) | ||
2911 | return false; | ||
2912 | if (info->flags & RATE_INFO_FLAGS_SHORT_GI && | ||
2913 | nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) | ||
2914 | return false; | ||
2915 | } else if (info->flags & RATE_INFO_FLAGS_VHT_MCS) { | ||
2916 | if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_MCS, info->mcs)) | ||
2917 | return false; | ||
2918 | if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_NSS, info->nss)) | ||
2919 | return false; | ||
2920 | if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH && | ||
2921 | nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) | ||
2922 | return false; | ||
2923 | if (info->flags & RATE_INFO_FLAGS_80_MHZ_WIDTH && | ||
2924 | nla_put_flag(msg, NL80211_RATE_INFO_80_MHZ_WIDTH)) | ||
2925 | return false; | ||
2926 | if (info->flags & RATE_INFO_FLAGS_80P80_MHZ_WIDTH && | ||
2927 | nla_put_flag(msg, NL80211_RATE_INFO_80P80_MHZ_WIDTH)) | ||
2928 | return false; | ||
2929 | if (info->flags & RATE_INFO_FLAGS_160_MHZ_WIDTH && | ||
2930 | nla_put_flag(msg, NL80211_RATE_INFO_160_MHZ_WIDTH)) | ||
2931 | return false; | ||
2932 | if (info->flags & RATE_INFO_FLAGS_SHORT_GI && | ||
2933 | nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) | ||
2934 | return false; | ||
2935 | } | ||
2764 | 2936 | ||
2765 | nla_nest_end(msg, rate); | 2937 | nla_nest_end(msg, rate); |
2766 | return true; | 2938 | return true; |
2767 | |||
2768 | nla_put_failure: | ||
2769 | return false; | ||
2770 | } | 2939 | } |
2771 | 2940 | ||
2772 | static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq, | 2941 | static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq, |
@@ -2923,8 +3092,8 @@ static int nl80211_dump_station(struct sk_buff *skb, | |||
2923 | 3092 | ||
2924 | while (1) { | 3093 | while (1) { |
2925 | memset(&sinfo, 0, sizeof(sinfo)); | 3094 | memset(&sinfo, 0, sizeof(sinfo)); |
2926 | err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, | 3095 | err = rdev_dump_station(dev, netdev, sta_idx, |
2927 | mac_addr, &sinfo); | 3096 | mac_addr, &sinfo); |
2928 | if (err == -ENOENT) | 3097 | if (err == -ENOENT) |
2929 | break; | 3098 | break; |
2930 | if (err) | 3099 | if (err) |
@@ -2969,7 +3138,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) | |||
2969 | if (!rdev->ops->get_station) | 3138 | if (!rdev->ops->get_station) |
2970 | return -EOPNOTSUPP; | 3139 | return -EOPNOTSUPP; |
2971 | 3140 | ||
2972 | err = rdev->ops->get_station(&rdev->wiphy, dev, mac_addr, &sinfo); | 3141 | err = rdev_get_station(rdev, dev, mac_addr, &sinfo); |
2973 | if (err) | 3142 | if (err) |
2974 | return err; | 3143 | return err; |
2975 | 3144 | ||
@@ -3146,7 +3315,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) | |||
3146 | 3315 | ||
3147 | /* be aware of params.vlan when changing code here */ | 3316 | /* be aware of params.vlan when changing code here */ |
3148 | 3317 | ||
3149 | err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, ¶ms); | 3318 | err = rdev_change_station(rdev, dev, mac_addr, ¶ms); |
3150 | 3319 | ||
3151 | if (params.vlan) | 3320 | if (params.vlan) |
3152 | dev_put(params.vlan); | 3321 | dev_put(params.vlan); |
@@ -3198,6 +3367,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
3198 | params.ht_capa = | 3367 | params.ht_capa = |
3199 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); | 3368 | nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); |
3200 | 3369 | ||
3370 | if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) | ||
3371 | params.vht_capa = | ||
3372 | nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); | ||
3373 | |||
3201 | if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) | 3374 | if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) |
3202 | params.plink_action = | 3375 | params.plink_action = |
3203 | nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); | 3376 | nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); |
@@ -3275,7 +3448,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
3275 | 3448 | ||
3276 | /* be aware of params.vlan when changing code here */ | 3449 | /* be aware of params.vlan when changing code here */ |
3277 | 3450 | ||
3278 | err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, ¶ms); | 3451 | err = rdev_add_station(rdev, dev, mac_addr, ¶ms); |
3279 | 3452 | ||
3280 | if (params.vlan) | 3453 | if (params.vlan) |
3281 | dev_put(params.vlan); | 3454 | dev_put(params.vlan); |
@@ -3300,7 +3473,7 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) | |||
3300 | if (!rdev->ops->del_station) | 3473 | if (!rdev->ops->del_station) |
3301 | return -EOPNOTSUPP; | 3474 | return -EOPNOTSUPP; |
3302 | 3475 | ||
3303 | return rdev->ops->del_station(&rdev->wiphy, dev, mac_addr); | 3476 | return rdev_del_station(rdev, dev, mac_addr); |
3304 | } | 3477 | } |
3305 | 3478 | ||
3306 | static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq, | 3479 | static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq, |
@@ -3382,8 +3555,8 @@ static int nl80211_dump_mpath(struct sk_buff *skb, | |||
3382 | } | 3555 | } |
3383 | 3556 | ||
3384 | while (1) { | 3557 | while (1) { |
3385 | err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx, | 3558 | err = rdev_dump_mpath(dev, netdev, path_idx, dst, next_hop, |
3386 | dst, next_hop, &pinfo); | 3559 | &pinfo); |
3387 | if (err == -ENOENT) | 3560 | if (err == -ENOENT) |
3388 | break; | 3561 | break; |
3389 | if (err) | 3562 | if (err) |
@@ -3430,7 +3603,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) | |||
3430 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) | 3603 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) |
3431 | return -EOPNOTSUPP; | 3604 | return -EOPNOTSUPP; |
3432 | 3605 | ||
3433 | err = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, &pinfo); | 3606 | err = rdev_get_mpath(rdev, dev, dst, next_hop, &pinfo); |
3434 | if (err) | 3607 | if (err) |
3435 | return err; | 3608 | return err; |
3436 | 3609 | ||
@@ -3469,7 +3642,7 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) | |||
3469 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) | 3642 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) |
3470 | return -EOPNOTSUPP; | 3643 | return -EOPNOTSUPP; |
3471 | 3644 | ||
3472 | return rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop); | 3645 | return rdev_change_mpath(rdev, dev, dst, next_hop); |
3473 | } | 3646 | } |
3474 | 3647 | ||
3475 | static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) | 3648 | static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) |
@@ -3494,7 +3667,7 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) | |||
3494 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) | 3667 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) |
3495 | return -EOPNOTSUPP; | 3668 | return -EOPNOTSUPP; |
3496 | 3669 | ||
3497 | return rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop); | 3670 | return rdev_add_mpath(rdev, dev, dst, next_hop); |
3498 | } | 3671 | } |
3499 | 3672 | ||
3500 | static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) | 3673 | static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) |
@@ -3509,7 +3682,7 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) | |||
3509 | if (!rdev->ops->del_mpath) | 3682 | if (!rdev->ops->del_mpath) |
3510 | return -EOPNOTSUPP; | 3683 | return -EOPNOTSUPP; |
3511 | 3684 | ||
3512 | return rdev->ops->del_mpath(&rdev->wiphy, dev, dst); | 3685 | return rdev_del_mpath(rdev, dev, dst); |
3513 | } | 3686 | } |
3514 | 3687 | ||
3515 | static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) | 3688 | static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) |
@@ -3554,7 +3727,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) | |||
3554 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) | 3727 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) |
3555 | return -EOPNOTSUPP; | 3728 | return -EOPNOTSUPP; |
3556 | 3729 | ||
3557 | return rdev->ops->change_bss(&rdev->wiphy, dev, ¶ms); | 3730 | return rdev_change_bss(rdev, dev, ¶ms); |
3558 | } | 3731 | } |
3559 | 3732 | ||
3560 | static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { | 3733 | static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { |
@@ -3668,8 +3841,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, | |||
3668 | if (!wdev->mesh_id_len) | 3841 | if (!wdev->mesh_id_len) |
3669 | memcpy(&cur_params, &default_mesh_config, sizeof(cur_params)); | 3842 | memcpy(&cur_params, &default_mesh_config, sizeof(cur_params)); |
3670 | else | 3843 | else |
3671 | err = rdev->ops->get_mesh_config(&rdev->wiphy, dev, | 3844 | err = rdev_get_mesh_config(rdev, dev, &cur_params); |
3672 | &cur_params); | ||
3673 | wdev_unlock(wdev); | 3845 | wdev_unlock(wdev); |
3674 | 3846 | ||
3675 | if (err) | 3847 | if (err) |
@@ -3971,8 +4143,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb, | |||
3971 | err = -ENOLINK; | 4143 | err = -ENOLINK; |
3972 | 4144 | ||
3973 | if (!err) | 4145 | if (!err) |
3974 | err = rdev->ops->update_mesh_config(&rdev->wiphy, dev, | 4146 | err = rdev_update_mesh_config(rdev, dev, mask, &cfg); |
3975 | mask, &cfg); | ||
3976 | 4147 | ||
3977 | wdev_unlock(wdev); | 4148 | wdev_unlock(wdev); |
3978 | 4149 | ||
@@ -4337,14 +4508,27 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
4337 | } | 4508 | } |
4338 | } | 4509 | } |
4339 | 4510 | ||
4511 | if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) { | ||
4512 | request->flags = nla_get_u32( | ||
4513 | info->attrs[NL80211_ATTR_SCAN_FLAGS]); | ||
4514 | if (((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && | ||
4515 | !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) || | ||
4516 | ((request->flags & NL80211_SCAN_FLAG_FLUSH) && | ||
4517 | !(wiphy->features & NL80211_FEATURE_SCAN_FLUSH))) { | ||
4518 | err = -EOPNOTSUPP; | ||
4519 | goto out_free; | ||
4520 | } | ||
4521 | } | ||
4522 | |||
4340 | request->no_cck = | 4523 | request->no_cck = |
4341 | nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); | 4524 | nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); |
4342 | 4525 | ||
4343 | request->wdev = wdev; | 4526 | request->wdev = wdev; |
4344 | request->wiphy = &rdev->wiphy; | 4527 | request->wiphy = &rdev->wiphy; |
4528 | request->scan_start = jiffies; | ||
4345 | 4529 | ||
4346 | rdev->scan_req = request; | 4530 | rdev->scan_req = request; |
4347 | err = rdev->ops->scan(&rdev->wiphy, request); | 4531 | err = rdev_scan(rdev, request); |
4348 | 4532 | ||
4349 | if (!err) { | 4533 | if (!err) { |
4350 | nl80211_send_scan_start(rdev, wdev); | 4534 | nl80211_send_scan_start(rdev, wdev); |
@@ -4568,11 +4752,24 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, | |||
4568 | request->ie_len); | 4752 | request->ie_len); |
4569 | } | 4753 | } |
4570 | 4754 | ||
4755 | if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) { | ||
4756 | request->flags = nla_get_u32( | ||
4757 | info->attrs[NL80211_ATTR_SCAN_FLAGS]); | ||
4758 | if (((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && | ||
4759 | !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) || | ||
4760 | ((request->flags & NL80211_SCAN_FLAG_FLUSH) && | ||
4761 | !(wiphy->features & NL80211_FEATURE_SCAN_FLUSH))) { | ||
4762 | err = -EOPNOTSUPP; | ||
4763 | goto out_free; | ||
4764 | } | ||
4765 | } | ||
4766 | |||
4571 | request->dev = dev; | 4767 | request->dev = dev; |
4572 | request->wiphy = &rdev->wiphy; | 4768 | request->wiphy = &rdev->wiphy; |
4573 | request->interval = interval; | 4769 | request->interval = interval; |
4770 | request->scan_start = jiffies; | ||
4574 | 4771 | ||
4575 | err = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request); | 4772 | err = rdev_sched_scan_start(rdev, dev, request); |
4576 | if (!err) { | 4773 | if (!err) { |
4577 | rdev->sched_scan_req = request; | 4774 | rdev->sched_scan_req = request; |
4578 | nl80211_send_sched_scan(rdev, dev, | 4775 | nl80211_send_sched_scan(rdev, dev, |
@@ -4815,8 +5012,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, | |||
4815 | while (1) { | 5012 | while (1) { |
4816 | struct ieee80211_channel *chan; | 5013 | struct ieee80211_channel *chan; |
4817 | 5014 | ||
4818 | res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx, | 5015 | res = rdev_dump_survey(dev, netdev, survey_idx, &survey); |
4819 | &survey); | ||
4820 | if (res == -ENOENT) | 5016 | if (res == -ENOENT) |
4821 | break; | 5017 | break; |
4822 | if (res) | 5018 | if (res) |
@@ -4852,11 +5048,6 @@ static int nl80211_dump_survey(struct sk_buff *skb, | |||
4852 | return res; | 5048 | return res; |
4853 | } | 5049 | } |
4854 | 5050 | ||
4855 | static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type) | ||
4856 | { | ||
4857 | return auth_type <= NL80211_AUTHTYPE_MAX; | ||
4858 | } | ||
4859 | |||
4860 | static bool nl80211_valid_wpa_versions(u32 wpa_versions) | 5051 | static bool nl80211_valid_wpa_versions(u32 wpa_versions) |
4861 | { | 5052 | { |
4862 | return !(wpa_versions & ~(NL80211_WPA_VERSION_1 | | 5053 | return !(wpa_versions & ~(NL80211_WPA_VERSION_1 | |
@@ -4868,8 +5059,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) | |||
4868 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; | 5059 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; |
4869 | struct net_device *dev = info->user_ptr[1]; | 5060 | struct net_device *dev = info->user_ptr[1]; |
4870 | struct ieee80211_channel *chan; | 5061 | struct ieee80211_channel *chan; |
4871 | const u8 *bssid, *ssid, *ie = NULL; | 5062 | const u8 *bssid, *ssid, *ie = NULL, *sae_data = NULL; |
4872 | int err, ssid_len, ie_len = 0; | 5063 | int err, ssid_len, ie_len = 0, sae_data_len = 0; |
4873 | enum nl80211_auth_type auth_type; | 5064 | enum nl80211_auth_type auth_type; |
4874 | struct key_parse key; | 5065 | struct key_parse key; |
4875 | bool local_state_change; | 5066 | bool local_state_change; |
@@ -4945,9 +5136,23 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) | |||
4945 | } | 5136 | } |
4946 | 5137 | ||
4947 | auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); | 5138 | auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); |
4948 | if (!nl80211_valid_auth_type(auth_type)) | 5139 | if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE)) |
4949 | return -EINVAL; | 5140 | return -EINVAL; |
4950 | 5141 | ||
5142 | if (auth_type == NL80211_AUTHTYPE_SAE && | ||
5143 | !info->attrs[NL80211_ATTR_SAE_DATA]) | ||
5144 | return -EINVAL; | ||
5145 | |||
5146 | if (info->attrs[NL80211_ATTR_SAE_DATA]) { | ||
5147 | if (auth_type != NL80211_AUTHTYPE_SAE) | ||
5148 | return -EINVAL; | ||
5149 | sae_data = nla_data(info->attrs[NL80211_ATTR_SAE_DATA]); | ||
5150 | sae_data_len = nla_len(info->attrs[NL80211_ATTR_SAE_DATA]); | ||
5151 | /* need to include at least Auth Transaction and Status Code */ | ||
5152 | if (sae_data_len < 4) | ||
5153 | return -EINVAL; | ||
5154 | } | ||
5155 | |||
4951 | local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; | 5156 | local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; |
4952 | 5157 | ||
4953 | /* | 5158 | /* |
@@ -4959,7 +5164,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) | |||
4959 | 5164 | ||
4960 | return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, | 5165 | return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, |
4961 | ssid, ssid_len, ie, ie_len, | 5166 | ssid, ssid_len, ie, ie_len, |
4962 | key.p.key, key.p.key_len, key.idx); | 5167 | key.p.key, key.p.key_len, key.idx, |
5168 | sae_data, sae_data_len); | ||
4963 | } | 5169 | } |
4964 | 5170 | ||
4965 | static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, | 5171 | static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, |
@@ -5250,8 +5456,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) | |||
5250 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) | 5456 | if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) |
5251 | return -EINVAL; | 5457 | return -EINVAL; |
5252 | 5458 | ||
5253 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || | 5459 | if (!info->attrs[NL80211_ATTR_SSID] || |
5254 | !info->attrs[NL80211_ATTR_SSID] || | ||
5255 | !nla_len(info->attrs[NL80211_ATTR_SSID])) | 5460 | !nla_len(info->attrs[NL80211_ATTR_SSID])) |
5256 | return -EINVAL; | 5461 | return -EINVAL; |
5257 | 5462 | ||
@@ -5286,35 +5491,17 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) | |||
5286 | ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 5491 | ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
5287 | } | 5492 | } |
5288 | 5493 | ||
5289 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { | 5494 | err = nl80211_parse_chandef(rdev, info, &ibss.chandef); |
5290 | enum nl80211_channel_type channel_type; | 5495 | if (err) |
5291 | 5496 | return err; | |
5292 | if (!nl80211_valid_channel_type(info, &channel_type)) | ||
5293 | return -EINVAL; | ||
5294 | |||
5295 | if (channel_type != NL80211_CHAN_NO_HT && | ||
5296 | !(wiphy->features & NL80211_FEATURE_HT_IBSS)) | ||
5297 | return -EINVAL; | ||
5298 | |||
5299 | ibss.channel_type = channel_type; | ||
5300 | } else { | ||
5301 | ibss.channel_type = NL80211_CHAN_NO_HT; | ||
5302 | } | ||
5303 | 5497 | ||
5304 | ibss.channel = rdev_freq_to_chan(rdev, | 5498 | if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef)) |
5305 | nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]), | ||
5306 | ibss.channel_type); | ||
5307 | if (!ibss.channel || | ||
5308 | ibss.channel->flags & IEEE80211_CHAN_NO_IBSS || | ||
5309 | ibss.channel->flags & IEEE80211_CHAN_DISABLED) | ||
5310 | return -EINVAL; | 5499 | return -EINVAL; |
5311 | 5500 | ||
5312 | /* Both channels should be able to initiate communication */ | 5501 | if (ibss.chandef.width > NL80211_CHAN_WIDTH_40) |
5313 | if ((ibss.channel_type == NL80211_CHAN_HT40PLUS || | ||
5314 | ibss.channel_type == NL80211_CHAN_HT40MINUS) && | ||
5315 | !cfg80211_can_beacon_sec_chan(&rdev->wiphy, ibss.channel, | ||
5316 | ibss.channel_type)) | ||
5317 | return -EINVAL; | 5502 | return -EINVAL; |
5503 | if (ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT && | ||
5504 | !(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS)) | ||
5318 | 5505 | ||
5319 | ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; | 5506 | ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; |
5320 | ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; | 5507 | ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; |
@@ -5325,7 +5512,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) | |||
5325 | int n_rates = | 5512 | int n_rates = |
5326 | nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); | 5513 | nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); |
5327 | struct ieee80211_supported_band *sband = | 5514 | struct ieee80211_supported_band *sband = |
5328 | wiphy->bands[ibss.channel->band]; | 5515 | wiphy->bands[ibss.chandef.chan->band]; |
5329 | 5516 | ||
5330 | err = ieee80211_get_ratemask(sband, rates, n_rates, | 5517 | err = ieee80211_get_ratemask(sband, rates, n_rates, |
5331 | &ibss.basic_rates); | 5518 | &ibss.basic_rates); |
@@ -5339,10 +5526,19 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) | |||
5339 | return -EINVAL; | 5526 | return -EINVAL; |
5340 | 5527 | ||
5341 | if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { | 5528 | if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { |
5529 | bool no_ht = false; | ||
5530 | |||
5342 | connkeys = nl80211_parse_connkeys(rdev, | 5531 | connkeys = nl80211_parse_connkeys(rdev, |
5343 | info->attrs[NL80211_ATTR_KEYS]); | 5532 | info->attrs[NL80211_ATTR_KEYS], |
5533 | &no_ht); | ||
5344 | if (IS_ERR(connkeys)) | 5534 | if (IS_ERR(connkeys)) |
5345 | return PTR_ERR(connkeys); | 5535 | return PTR_ERR(connkeys); |
5536 | |||
5537 | if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) && | ||
5538 | no_ht) { | ||
5539 | kfree(connkeys); | ||
5540 | return -EINVAL; | ||
5541 | } | ||
5346 | } | 5542 | } |
5347 | 5543 | ||
5348 | ibss.control_port = | 5544 | ibss.control_port = |
@@ -5368,6 +5564,36 @@ static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info) | |||
5368 | return cfg80211_leave_ibss(rdev, dev, false); | 5564 | return cfg80211_leave_ibss(rdev, dev, false); |
5369 | } | 5565 | } |
5370 | 5566 | ||
5567 | static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info) | ||
5568 | { | ||
5569 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; | ||
5570 | struct net_device *dev = info->user_ptr[1]; | ||
5571 | int mcast_rate[IEEE80211_NUM_BANDS]; | ||
5572 | u32 nla_rate; | ||
5573 | int err; | ||
5574 | |||
5575 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && | ||
5576 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) | ||
5577 | return -EOPNOTSUPP; | ||
5578 | |||
5579 | if (!rdev->ops->set_mcast_rate) | ||
5580 | return -EOPNOTSUPP; | ||
5581 | |||
5582 | memset(mcast_rate, 0, sizeof(mcast_rate)); | ||
5583 | |||
5584 | if (!info->attrs[NL80211_ATTR_MCAST_RATE]) | ||
5585 | return -EINVAL; | ||
5586 | |||
5587 | nla_rate = nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]); | ||
5588 | if (!nl80211_parse_mcast_rate(rdev, mcast_rate, nla_rate)) | ||
5589 | return -EINVAL; | ||
5590 | |||
5591 | err = rdev->ops->set_mcast_rate(&rdev->wiphy, dev, mcast_rate); | ||
5592 | |||
5593 | return err; | ||
5594 | } | ||
5595 | |||
5596 | |||
5371 | #ifdef CONFIG_NL80211_TESTMODE | 5597 | #ifdef CONFIG_NL80211_TESTMODE |
5372 | static struct genl_multicast_group nl80211_testmode_mcgrp = { | 5598 | static struct genl_multicast_group nl80211_testmode_mcgrp = { |
5373 | .name = "testmode", | 5599 | .name = "testmode", |
@@ -5384,7 +5610,7 @@ static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) | |||
5384 | err = -EOPNOTSUPP; | 5610 | err = -EOPNOTSUPP; |
5385 | if (rdev->ops->testmode_cmd) { | 5611 | if (rdev->ops->testmode_cmd) { |
5386 | rdev->testmode_info = info; | 5612 | rdev->testmode_info = info; |
5387 | err = rdev->ops->testmode_cmd(&rdev->wiphy, | 5613 | err = rdev_testmode_cmd(rdev, |
5388 | nla_data(info->attrs[NL80211_ATTR_TESTDATA]), | 5614 | nla_data(info->attrs[NL80211_ATTR_TESTDATA]), |
5389 | nla_len(info->attrs[NL80211_ATTR_TESTDATA])); | 5615 | nla_len(info->attrs[NL80211_ATTR_TESTDATA])); |
5390 | rdev->testmode_info = NULL; | 5616 | rdev->testmode_info = NULL; |
@@ -5466,8 +5692,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb, | |||
5466 | genlmsg_cancel(skb, hdr); | 5692 | genlmsg_cancel(skb, hdr); |
5467 | break; | 5693 | break; |
5468 | } | 5694 | } |
5469 | err = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, | 5695 | err = rdev_testmode_dump(rdev, skb, cb, data, data_len); |
5470 | data, data_len); | ||
5471 | nla_nest_end(skb, tmdata); | 5696 | nla_nest_end(skb, tmdata); |
5472 | 5697 | ||
5473 | if (err == -ENOBUFS || err == -ENOENT) { | 5698 | if (err == -ENOBUFS || err == -ENOENT) { |
@@ -5596,7 +5821,8 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) | |||
5596 | if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { | 5821 | if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { |
5597 | connect.auth_type = | 5822 | connect.auth_type = |
5598 | nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); | 5823 | nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); |
5599 | if (!nl80211_valid_auth_type(connect.auth_type)) | 5824 | if (!nl80211_valid_auth_type(rdev, connect.auth_type, |
5825 | NL80211_CMD_CONNECT)) | ||
5600 | return -EINVAL; | 5826 | return -EINVAL; |
5601 | } else | 5827 | } else |
5602 | connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; | 5828 | connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; |
@@ -5642,7 +5868,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) | |||
5642 | 5868 | ||
5643 | if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { | 5869 | if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { |
5644 | connkeys = nl80211_parse_connkeys(rdev, | 5870 | connkeys = nl80211_parse_connkeys(rdev, |
5645 | info->attrs[NL80211_ATTR_KEYS]); | 5871 | info->attrs[NL80211_ATTR_KEYS], NULL); |
5646 | if (IS_ERR(connkeys)) | 5872 | if (IS_ERR(connkeys)) |
5647 | return PTR_ERR(connkeys); | 5873 | return PTR_ERR(connkeys); |
5648 | } | 5874 | } |
@@ -5771,7 +5997,7 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info) | |||
5771 | if (!rdev->ops->flush_pmksa) | 5997 | if (!rdev->ops->flush_pmksa) |
5772 | return -EOPNOTSUPP; | 5998 | return -EOPNOTSUPP; |
5773 | 5999 | ||
5774 | return rdev->ops->flush_pmksa(&rdev->wiphy, dev); | 6000 | return rdev_flush_pmksa(rdev, dev); |
5775 | } | 6001 | } |
5776 | 6002 | ||
5777 | static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info) | 6003 | static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info) |
@@ -5798,10 +6024,10 @@ static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
5798 | status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); | 6024 | status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); |
5799 | dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]); | 6025 | dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]); |
5800 | 6026 | ||
5801 | return rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code, | 6027 | return rdev_tdls_mgmt(rdev, dev, peer, action_code, |
5802 | dialog_token, status_code, | 6028 | dialog_token, status_code, |
5803 | nla_data(info->attrs[NL80211_ATTR_IE]), | 6029 | nla_data(info->attrs[NL80211_ATTR_IE]), |
5804 | nla_len(info->attrs[NL80211_ATTR_IE])); | 6030 | nla_len(info->attrs[NL80211_ATTR_IE])); |
5805 | } | 6031 | } |
5806 | 6032 | ||
5807 | static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info) | 6033 | static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info) |
@@ -5822,7 +6048,7 @@ static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info) | |||
5822 | operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]); | 6048 | operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]); |
5823 | peer = nla_data(info->attrs[NL80211_ATTR_MAC]); | 6049 | peer = nla_data(info->attrs[NL80211_ATTR_MAC]); |
5824 | 6050 | ||
5825 | return rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, operation); | 6051 | return rdev_tdls_oper(rdev, dev, peer, operation); |
5826 | } | 6052 | } |
5827 | 6053 | ||
5828 | static int nl80211_remain_on_channel(struct sk_buff *skb, | 6054 | static int nl80211_remain_on_channel(struct sk_buff *skb, |
@@ -5830,12 +6056,11 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, | |||
5830 | { | 6056 | { |
5831 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; | 6057 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; |
5832 | struct wireless_dev *wdev = info->user_ptr[1]; | 6058 | struct wireless_dev *wdev = info->user_ptr[1]; |
5833 | struct ieee80211_channel *chan; | 6059 | struct cfg80211_chan_def chandef; |
5834 | struct sk_buff *msg; | 6060 | struct sk_buff *msg; |
5835 | void *hdr; | 6061 | void *hdr; |
5836 | u64 cookie; | 6062 | u64 cookie; |
5837 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | 6063 | u32 duration; |
5838 | u32 freq, duration; | ||
5839 | int err; | 6064 | int err; |
5840 | 6065 | ||
5841 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || | 6066 | if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || |
@@ -5856,14 +6081,9 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, | |||
5856 | duration > rdev->wiphy.max_remain_on_channel_duration) | 6081 | duration > rdev->wiphy.max_remain_on_channel_duration) |
5857 | return -EINVAL; | 6082 | return -EINVAL; |
5858 | 6083 | ||
5859 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] && | 6084 | err = nl80211_parse_chandef(rdev, info, &chandef); |
5860 | !nl80211_valid_channel_type(info, &channel_type)) | 6085 | if (err) |
5861 | return -EINVAL; | 6086 | return err; |
5862 | |||
5863 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | ||
5864 | chan = rdev_freq_to_chan(rdev, freq, channel_type); | ||
5865 | if (chan == NULL) | ||
5866 | return -EINVAL; | ||
5867 | 6087 | ||
5868 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 6088 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
5869 | if (!msg) | 6089 | if (!msg) |
@@ -5877,8 +6097,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, | |||
5877 | goto free_msg; | 6097 | goto free_msg; |
5878 | } | 6098 | } |
5879 | 6099 | ||
5880 | err = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan, | 6100 | err = rdev_remain_on_channel(rdev, wdev, chandef.chan, |
5881 | channel_type, duration, &cookie); | 6101 | duration, &cookie); |
5882 | 6102 | ||
5883 | if (err) | 6103 | if (err) |
5884 | goto free_msg; | 6104 | goto free_msg; |
@@ -5912,7 +6132,7 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb, | |||
5912 | 6132 | ||
5913 | cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); | 6133 | cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); |
5914 | 6134 | ||
5915 | return rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie); | 6135 | return rdev_cancel_remain_on_channel(rdev, wdev, cookie); |
5916 | } | 6136 | } |
5917 | 6137 | ||
5918 | static u32 rateset_to_mask(struct ieee80211_supported_band *sband, | 6138 | static u32 rateset_to_mask(struct ieee80211_supported_band *sband, |
@@ -6055,7 +6275,7 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, | |||
6055 | } | 6275 | } |
6056 | } | 6276 | } |
6057 | 6277 | ||
6058 | return rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask); | 6278 | return rdev_set_bitrate_mask(rdev, dev, NULL, &mask); |
6059 | } | 6279 | } |
6060 | 6280 | ||
6061 | static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info) | 6281 | static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info) |
@@ -6097,10 +6317,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
6097 | { | 6317 | { |
6098 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; | 6318 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; |
6099 | struct wireless_dev *wdev = info->user_ptr[1]; | 6319 | struct wireless_dev *wdev = info->user_ptr[1]; |
6100 | struct ieee80211_channel *chan; | 6320 | struct cfg80211_chan_def chandef; |
6101 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | ||
6102 | bool channel_type_valid = false; | ||
6103 | u32 freq; | ||
6104 | int err; | 6321 | int err; |
6105 | void *hdr = NULL; | 6322 | void *hdr = NULL; |
6106 | u64 cookie; | 6323 | u64 cookie; |
@@ -6110,8 +6327,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
6110 | 6327 | ||
6111 | dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK]; | 6328 | dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK]; |
6112 | 6329 | ||
6113 | if (!info->attrs[NL80211_ATTR_FRAME] || | 6330 | if (!info->attrs[NL80211_ATTR_FRAME]) |
6114 | !info->attrs[NL80211_ATTR_WIPHY_FREQ]) | ||
6115 | return -EINVAL; | 6331 | return -EINVAL; |
6116 | 6332 | ||
6117 | if (!rdev->ops->mgmt_tx) | 6333 | if (!rdev->ops->mgmt_tx) |
@@ -6146,12 +6362,6 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
6146 | 6362 | ||
6147 | } | 6363 | } |
6148 | 6364 | ||
6149 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { | ||
6150 | if (!nl80211_valid_channel_type(info, &channel_type)) | ||
6151 | return -EINVAL; | ||
6152 | channel_type_valid = true; | ||
6153 | } | ||
6154 | |||
6155 | offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; | 6365 | offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; |
6156 | 6366 | ||
6157 | if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) | 6367 | if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) |
@@ -6159,10 +6369,9 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
6159 | 6369 | ||
6160 | no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); | 6370 | no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); |
6161 | 6371 | ||
6162 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | 6372 | err = nl80211_parse_chandef(rdev, info, &chandef); |
6163 | chan = rdev_freq_to_chan(rdev, freq, channel_type); | 6373 | if (err) |
6164 | if (chan == NULL) | 6374 | return err; |
6165 | return -EINVAL; | ||
6166 | 6375 | ||
6167 | if (!dont_wait_for_ack) { | 6376 | if (!dont_wait_for_ack) { |
6168 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 6377 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
@@ -6178,8 +6387,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
6178 | } | 6387 | } |
6179 | } | 6388 | } |
6180 | 6389 | ||
6181 | err = cfg80211_mlme_mgmt_tx(rdev, wdev, chan, offchan, channel_type, | 6390 | err = cfg80211_mlme_mgmt_tx(rdev, wdev, chandef.chan, offchan, wait, |
6182 | channel_type_valid, wait, | ||
6183 | nla_data(info->attrs[NL80211_ATTR_FRAME]), | 6391 | nla_data(info->attrs[NL80211_ATTR_FRAME]), |
6184 | nla_len(info->attrs[NL80211_ATTR_FRAME]), | 6392 | nla_len(info->attrs[NL80211_ATTR_FRAME]), |
6185 | no_cck, dont_wait_for_ack, &cookie); | 6393 | no_cck, dont_wait_for_ack, &cookie); |
@@ -6230,7 +6438,7 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in | |||
6230 | 6438 | ||
6231 | cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); | 6439 | cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); |
6232 | 6440 | ||
6233 | return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie); | 6441 | return rdev_mgmt_tx_cancel_wait(rdev, wdev, cookie); |
6234 | } | 6442 | } |
6235 | 6443 | ||
6236 | static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info) | 6444 | static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info) |
@@ -6260,8 +6468,7 @@ static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info) | |||
6260 | if (state == wdev->ps) | 6468 | if (state == wdev->ps) |
6261 | return 0; | 6469 | return 0; |
6262 | 6470 | ||
6263 | err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, state, | 6471 | err = rdev_set_power_mgmt(rdev, dev, state, wdev->ps_timeout); |
6264 | wdev->ps_timeout); | ||
6265 | if (!err) | 6472 | if (!err) |
6266 | wdev->ps = state; | 6473 | wdev->ps = state; |
6267 | return err; | 6474 | return err; |
@@ -6341,8 +6548,7 @@ static int nl80211_set_cqm_txe(struct genl_info *info, | |||
6341 | wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) | 6548 | wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) |
6342 | return -EOPNOTSUPP; | 6549 | return -EOPNOTSUPP; |
6343 | 6550 | ||
6344 | return rdev->ops->set_cqm_txe_config(wdev->wiphy, dev, | 6551 | return rdev_set_cqm_txe_config(rdev, dev, rate, pkts, intvl); |
6345 | rate, pkts, intvl); | ||
6346 | } | 6552 | } |
6347 | 6553 | ||
6348 | static int nl80211_set_cqm_rssi(struct genl_info *info, | 6554 | static int nl80211_set_cqm_rssi(struct genl_info *info, |
@@ -6364,8 +6570,7 @@ static int nl80211_set_cqm_rssi(struct genl_info *info, | |||
6364 | wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) | 6570 | wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) |
6365 | return -EOPNOTSUPP; | 6571 | return -EOPNOTSUPP; |
6366 | 6572 | ||
6367 | return rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev, | 6573 | return rdev_set_cqm_rssi_config(rdev, dev, threshold, hysteresis); |
6368 | threshold, hysteresis); | ||
6369 | } | 6574 | } |
6370 | 6575 | ||
6371 | static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) | 6576 | static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) |
@@ -6446,21 +6651,12 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) | |||
6446 | } | 6651 | } |
6447 | 6652 | ||
6448 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { | 6653 | if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { |
6449 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | 6654 | err = nl80211_parse_chandef(rdev, info, &setup.chandef); |
6450 | 6655 | if (err) | |
6451 | if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] && | 6656 | return err; |
6452 | !nl80211_valid_channel_type(info, &channel_type)) | ||
6453 | return -EINVAL; | ||
6454 | |||
6455 | setup.channel = rdev_freq_to_chan(rdev, | ||
6456 | nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]), | ||
6457 | channel_type); | ||
6458 | if (!setup.channel) | ||
6459 | return -EINVAL; | ||
6460 | setup.channel_type = channel_type; | ||
6461 | } else { | 6657 | } else { |
6462 | /* cfg80211_join_mesh() will sort it out */ | 6658 | /* cfg80211_join_mesh() will sort it out */ |
6463 | setup.channel = NULL; | 6659 | setup.chandef.chan = NULL; |
6464 | } | 6660 | } |
6465 | 6661 | ||
6466 | return cfg80211_join_mesh(rdev, dev, &setup, &cfg); | 6662 | return cfg80211_join_mesh(rdev, dev, &setup, &cfg); |
@@ -6690,7 +6886,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) | |||
6690 | 6886 | ||
6691 | set_wakeup: | 6887 | set_wakeup: |
6692 | if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan) | 6888 | if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan) |
6693 | rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan); | 6889 | rdev_set_wakeup(rdev, rdev->wowlan); |
6694 | 6890 | ||
6695 | return 0; | 6891 | return 0; |
6696 | error: | 6892 | error: |
@@ -6746,7 +6942,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) | |||
6746 | goto out; | 6942 | goto out; |
6747 | } | 6943 | } |
6748 | 6944 | ||
6749 | err = rdev->ops->set_rekey_data(&rdev->wiphy, dev, &rekey_data); | 6945 | err = rdev_set_rekey_data(rdev, dev, &rekey_data); |
6750 | out: | 6946 | out: |
6751 | wdev_unlock(wdev); | 6947 | wdev_unlock(wdev); |
6752 | return err; | 6948 | return err; |
@@ -6805,7 +7001,7 @@ static int nl80211_probe_client(struct sk_buff *skb, | |||
6805 | 7001 | ||
6806 | addr = nla_data(info->attrs[NL80211_ATTR_MAC]); | 7002 | addr = nla_data(info->attrs[NL80211_ATTR_MAC]); |
6807 | 7003 | ||
6808 | err = rdev->ops->probe_client(&rdev->wiphy, dev, addr, &cookie); | 7004 | err = rdev_probe_client(rdev, dev, addr, &cookie); |
6809 | if (err) | 7005 | if (err) |
6810 | goto free_msg; | 7006 | goto free_msg; |
6811 | 7007 | ||
@@ -6826,16 +7022,35 @@ static int nl80211_probe_client(struct sk_buff *skb, | |||
6826 | static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info) | 7022 | static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info) |
6827 | { | 7023 | { |
6828 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; | 7024 | struct cfg80211_registered_device *rdev = info->user_ptr[0]; |
7025 | struct cfg80211_beacon_registration *reg, *nreg; | ||
7026 | int rv; | ||
6829 | 7027 | ||
6830 | if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS)) | 7028 | if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS)) |
6831 | return -EOPNOTSUPP; | 7029 | return -EOPNOTSUPP; |
6832 | 7030 | ||
6833 | if (rdev->ap_beacons_nlportid) | 7031 | nreg = kzalloc(sizeof(*nreg), GFP_KERNEL); |
6834 | return -EBUSY; | 7032 | if (!nreg) |
7033 | return -ENOMEM; | ||
6835 | 7034 | ||
6836 | rdev->ap_beacons_nlportid = info->snd_portid; | 7035 | /* First, check if already registered. */ |
7036 | spin_lock_bh(&rdev->beacon_registrations_lock); | ||
7037 | list_for_each_entry(reg, &rdev->beacon_registrations, list) { | ||
7038 | if (reg->nlportid == info->snd_portid) { | ||
7039 | rv = -EALREADY; | ||
7040 | goto out_err; | ||
7041 | } | ||
7042 | } | ||
7043 | /* Add it to the list */ | ||
7044 | nreg->nlportid = info->snd_portid; | ||
7045 | list_add(&nreg->list, &rdev->beacon_registrations); | ||
7046 | |||
7047 | spin_unlock_bh(&rdev->beacon_registrations_lock); | ||
6837 | 7048 | ||
6838 | return 0; | 7049 | return 0; |
7050 | out_err: | ||
7051 | spin_unlock_bh(&rdev->beacon_registrations_lock); | ||
7052 | kfree(nreg); | ||
7053 | return rv; | ||
6839 | } | 7054 | } |
6840 | 7055 | ||
6841 | static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info) | 7056 | static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info) |
@@ -6859,7 +7074,7 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info) | |||
6859 | if (err) | 7074 | if (err) |
6860 | return err; | 7075 | return err; |
6861 | 7076 | ||
6862 | err = rdev->ops->start_p2p_device(&rdev->wiphy, wdev); | 7077 | err = rdev_start_p2p_device(rdev, wdev); |
6863 | if (err) | 7078 | if (err) |
6864 | return err; | 7079 | return err; |
6865 | 7080 | ||
@@ -6885,7 +7100,7 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info) | |||
6885 | if (!wdev->p2p_started) | 7100 | if (!wdev->p2p_started) |
6886 | return 0; | 7101 | return 0; |
6887 | 7102 | ||
6888 | rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); | 7103 | rdev_stop_p2p_device(rdev, wdev); |
6889 | wdev->p2p_started = false; | 7104 | wdev->p2p_started = false; |
6890 | 7105 | ||
6891 | mutex_lock(&rdev->devlist_mtx); | 7106 | mutex_lock(&rdev->devlist_mtx); |
@@ -7552,6 +7767,14 @@ static struct genl_ops nl80211_ops[] = { | |||
7552 | .internal_flags = NL80211_FLAG_NEED_WDEV_UP | | 7767 | .internal_flags = NL80211_FLAG_NEED_WDEV_UP | |
7553 | NL80211_FLAG_NEED_RTNL, | 7768 | NL80211_FLAG_NEED_RTNL, |
7554 | }, | 7769 | }, |
7770 | { | ||
7771 | .cmd = NL80211_CMD_SET_MCAST_RATE, | ||
7772 | .doit = nl80211_set_mcast_rate, | ||
7773 | .policy = nl80211_policy, | ||
7774 | .flags = GENL_ADMIN_PERM, | ||
7775 | .internal_flags = NL80211_FLAG_NEED_NETDEV | | ||
7776 | NL80211_FLAG_NEED_RTNL, | ||
7777 | }, | ||
7555 | }; | 7778 | }; |
7556 | 7779 | ||
7557 | static struct genl_multicast_group nl80211_mlme_mcgrp = { | 7780 | static struct genl_multicast_group nl80211_mlme_mcgrp = { |
@@ -7622,6 +7845,9 @@ static int nl80211_add_scan_req(struct sk_buff *msg, | |||
7622 | nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) | 7845 | nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) |
7623 | goto nla_put_failure; | 7846 | goto nla_put_failure; |
7624 | 7847 | ||
7848 | if (req->flags) | ||
7849 | nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags); | ||
7850 | |||
7625 | return 0; | 7851 | return 0; |
7626 | nla_put_failure: | 7852 | nla_put_failure: |
7627 | return -ENOBUFS; | 7853 | return -ENOBUFS; |
@@ -8250,7 +8476,6 @@ static void nl80211_send_remain_on_chan_event( | |||
8250 | int cmd, struct cfg80211_registered_device *rdev, | 8476 | int cmd, struct cfg80211_registered_device *rdev, |
8251 | struct wireless_dev *wdev, u64 cookie, | 8477 | struct wireless_dev *wdev, u64 cookie, |
8252 | struct ieee80211_channel *chan, | 8478 | struct ieee80211_channel *chan, |
8253 | enum nl80211_channel_type channel_type, | ||
8254 | unsigned int duration, gfp_t gfp) | 8479 | unsigned int duration, gfp_t gfp) |
8255 | { | 8480 | { |
8256 | struct sk_buff *msg; | 8481 | struct sk_buff *msg; |
@@ -8271,7 +8496,8 @@ static void nl80211_send_remain_on_chan_event( | |||
8271 | wdev->netdev->ifindex)) || | 8496 | wdev->netdev->ifindex)) || |
8272 | nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || | 8497 | nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) || |
8273 | nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) || | 8498 | nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) || |
8274 | nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) || | 8499 | nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, |
8500 | NL80211_CHAN_NO_HT) || | ||
8275 | nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) | 8501 | nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie)) |
8276 | goto nla_put_failure; | 8502 | goto nla_put_failure; |
8277 | 8503 | ||
@@ -8293,23 +8519,20 @@ static void nl80211_send_remain_on_chan_event( | |||
8293 | void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, | 8519 | void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, |
8294 | struct wireless_dev *wdev, u64 cookie, | 8520 | struct wireless_dev *wdev, u64 cookie, |
8295 | struct ieee80211_channel *chan, | 8521 | struct ieee80211_channel *chan, |
8296 | enum nl80211_channel_type channel_type, | ||
8297 | unsigned int duration, gfp_t gfp) | 8522 | unsigned int duration, gfp_t gfp) |
8298 | { | 8523 | { |
8299 | nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, | 8524 | nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, |
8300 | rdev, wdev, cookie, chan, | 8525 | rdev, wdev, cookie, chan, |
8301 | channel_type, duration, gfp); | 8526 | duration, gfp); |
8302 | } | 8527 | } |
8303 | 8528 | ||
8304 | void nl80211_send_remain_on_channel_cancel( | 8529 | void nl80211_send_remain_on_channel_cancel( |
8305 | struct cfg80211_registered_device *rdev, | 8530 | struct cfg80211_registered_device *rdev, |
8306 | struct wireless_dev *wdev, | 8531 | struct wireless_dev *wdev, |
8307 | u64 cookie, struct ieee80211_channel *chan, | 8532 | u64 cookie, struct ieee80211_channel *chan, gfp_t gfp) |
8308 | enum nl80211_channel_type channel_type, gfp_t gfp) | ||
8309 | { | 8533 | { |
8310 | nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, | 8534 | nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, |
8311 | rdev, wdev, cookie, chan, | 8535 | rdev, wdev, cookie, chan, 0, gfp); |
8312 | channel_type, 0, gfp); | ||
8313 | } | 8536 | } |
8314 | 8537 | ||
8315 | void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, | 8538 | void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, |
@@ -8665,8 +8888,8 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, | |||
8665 | } | 8888 | } |
8666 | 8889 | ||
8667 | void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, | 8890 | void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, |
8668 | struct net_device *netdev, int freq, | 8891 | struct net_device *netdev, |
8669 | enum nl80211_channel_type type, gfp_t gfp) | 8892 | struct cfg80211_chan_def *chandef, gfp_t gfp) |
8670 | { | 8893 | { |
8671 | struct sk_buff *msg; | 8894 | struct sk_buff *msg; |
8672 | void *hdr; | 8895 | void *hdr; |
@@ -8681,9 +8904,10 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, | |||
8681 | return; | 8904 | return; |
8682 | } | 8905 | } |
8683 | 8906 | ||
8684 | if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || | 8907 | if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) |
8685 | nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || | 8908 | goto nla_put_failure; |
8686 | nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type)) | 8909 | |
8910 | if (nl80211_send_chandef(msg, chandef)) | ||
8687 | goto nla_put_failure; | 8911 | goto nla_put_failure; |
8688 | 8912 | ||
8689 | genlmsg_end(msg, hdr); | 8913 | genlmsg_end(msg, hdr); |
@@ -8800,7 +9024,10 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, | |||
8800 | void *hdr; | 9024 | void *hdr; |
8801 | int err; | 9025 | int err; |
8802 | 9026 | ||
9027 | trace_cfg80211_probe_status(dev, addr, cookie, acked); | ||
9028 | |||
8803 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); | 9029 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); |
9030 | |||
8804 | if (!msg) | 9031 | if (!msg) |
8805 | return; | 9032 | return; |
8806 | 9033 | ||
@@ -8835,44 +9062,96 @@ EXPORT_SYMBOL(cfg80211_probe_status); | |||
8835 | 9062 | ||
8836 | void cfg80211_report_obss_beacon(struct wiphy *wiphy, | 9063 | void cfg80211_report_obss_beacon(struct wiphy *wiphy, |
8837 | const u8 *frame, size_t len, | 9064 | const u8 *frame, size_t len, |
8838 | int freq, int sig_dbm, gfp_t gfp) | 9065 | int freq, int sig_dbm) |
8839 | { | 9066 | { |
8840 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 9067 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
8841 | struct sk_buff *msg; | 9068 | struct sk_buff *msg; |
8842 | void *hdr; | 9069 | void *hdr; |
8843 | u32 nlportid = ACCESS_ONCE(rdev->ap_beacons_nlportid); | 9070 | struct cfg80211_beacon_registration *reg; |
8844 | 9071 | ||
8845 | if (!nlportid) | 9072 | trace_cfg80211_report_obss_beacon(wiphy, frame, len, freq, sig_dbm); |
8846 | return; | 9073 | |
9074 | spin_lock_bh(&rdev->beacon_registrations_lock); | ||
9075 | list_for_each_entry(reg, &rdev->beacon_registrations, list) { | ||
9076 | msg = nlmsg_new(len + 100, GFP_ATOMIC); | ||
9077 | if (!msg) { | ||
9078 | spin_unlock_bh(&rdev->beacon_registrations_lock); | ||
9079 | return; | ||
9080 | } | ||
9081 | |||
9082 | hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); | ||
9083 | if (!hdr) | ||
9084 | goto nla_put_failure; | ||
9085 | |||
9086 | if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || | ||
9087 | (freq && | ||
9088 | nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) || | ||
9089 | (sig_dbm && | ||
9090 | nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || | ||
9091 | nla_put(msg, NL80211_ATTR_FRAME, len, frame)) | ||
9092 | goto nla_put_failure; | ||
9093 | |||
9094 | genlmsg_end(msg, hdr); | ||
9095 | |||
9096 | genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, reg->nlportid); | ||
9097 | } | ||
9098 | spin_unlock_bh(&rdev->beacon_registrations_lock); | ||
9099 | return; | ||
9100 | |||
9101 | nla_put_failure: | ||
9102 | spin_unlock_bh(&rdev->beacon_registrations_lock); | ||
9103 | if (hdr) | ||
9104 | genlmsg_cancel(msg, hdr); | ||
9105 | nlmsg_free(msg); | ||
9106 | } | ||
9107 | EXPORT_SYMBOL(cfg80211_report_obss_beacon); | ||
9108 | |||
9109 | void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, | ||
9110 | enum nl80211_tdls_operation oper, | ||
9111 | u16 reason_code, gfp_t gfp) | ||
9112 | { | ||
9113 | struct wireless_dev *wdev = dev->ieee80211_ptr; | ||
9114 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | ||
9115 | struct sk_buff *msg; | ||
9116 | void *hdr; | ||
9117 | int err; | ||
9118 | |||
9119 | trace_cfg80211_tdls_oper_request(wdev->wiphy, dev, peer, oper, | ||
9120 | reason_code); | ||
8847 | 9121 | ||
8848 | msg = nlmsg_new(len + 100, gfp); | 9122 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); |
8849 | if (!msg) | 9123 | if (!msg) |
8850 | return; | 9124 | return; |
8851 | 9125 | ||
8852 | hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); | 9126 | hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_TDLS_OPER); |
8853 | if (!hdr) { | 9127 | if (!hdr) { |
8854 | nlmsg_free(msg); | 9128 | nlmsg_free(msg); |
8855 | return; | 9129 | return; |
8856 | } | 9130 | } |
8857 | 9131 | ||
8858 | if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || | 9132 | if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || |
8859 | (freq && | 9133 | nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || |
8860 | nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) || | 9134 | nla_put_u8(msg, NL80211_ATTR_TDLS_OPERATION, oper) || |
8861 | (sig_dbm && | 9135 | nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer) || |
8862 | nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || | 9136 | (reason_code > 0 && |
8863 | nla_put(msg, NL80211_ATTR_FRAME, len, frame)) | 9137 | nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason_code))) |
8864 | goto nla_put_failure; | 9138 | goto nla_put_failure; |
8865 | 9139 | ||
8866 | genlmsg_end(msg, hdr); | 9140 | err = genlmsg_end(msg, hdr); |
9141 | if (err < 0) { | ||
9142 | nlmsg_free(msg); | ||
9143 | return; | ||
9144 | } | ||
8867 | 9145 | ||
8868 | genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); | 9146 | genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, |
9147 | nl80211_mlme_mcgrp.id, gfp); | ||
8869 | return; | 9148 | return; |
8870 | 9149 | ||
8871 | nla_put_failure: | 9150 | nla_put_failure: |
8872 | genlmsg_cancel(msg, hdr); | 9151 | genlmsg_cancel(msg, hdr); |
8873 | nlmsg_free(msg); | 9152 | nlmsg_free(msg); |
8874 | } | 9153 | } |
8875 | EXPORT_SYMBOL(cfg80211_report_obss_beacon); | 9154 | EXPORT_SYMBOL(cfg80211_tdls_oper_request); |
8876 | 9155 | ||
8877 | static int nl80211_netlink_notify(struct notifier_block * nb, | 9156 | static int nl80211_netlink_notify(struct notifier_block * nb, |
8878 | unsigned long state, | 9157 | unsigned long state, |
@@ -8881,6 +9160,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb, | |||
8881 | struct netlink_notify *notify = _notify; | 9160 | struct netlink_notify *notify = _notify; |
8882 | struct cfg80211_registered_device *rdev; | 9161 | struct cfg80211_registered_device *rdev; |
8883 | struct wireless_dev *wdev; | 9162 | struct wireless_dev *wdev; |
9163 | struct cfg80211_beacon_registration *reg, *tmp; | ||
8884 | 9164 | ||
8885 | if (state != NETLINK_URELEASE) | 9165 | if (state != NETLINK_URELEASE) |
8886 | return NOTIFY_DONE; | 9166 | return NOTIFY_DONE; |
@@ -8890,8 +9170,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb, | |||
8890 | list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { | 9170 | list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { |
8891 | list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) | 9171 | list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) |
8892 | cfg80211_mlme_unregister_socket(wdev, notify->portid); | 9172 | cfg80211_mlme_unregister_socket(wdev, notify->portid); |
8893 | if (rdev->ap_beacons_nlportid == notify->portid) | 9173 | |
8894 | rdev->ap_beacons_nlportid = 0; | 9174 | spin_lock_bh(&rdev->beacon_registrations_lock); |
9175 | list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations, | ||
9176 | list) { | ||
9177 | if (reg->nlportid == notify->portid) { | ||
9178 | list_del(®->list); | ||
9179 | kfree(reg); | ||
9180 | break; | ||
9181 | } | ||
9182 | } | ||
9183 | spin_unlock_bh(&rdev->beacon_registrations_lock); | ||
8895 | } | 9184 | } |
8896 | 9185 | ||
8897 | rcu_read_unlock(); | 9186 | rcu_read_unlock(); |
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index f6153516068c..2acba8477e9d 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h | |||
@@ -76,13 +76,11 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, | |||
76 | void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, | 76 | void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, |
77 | struct wireless_dev *wdev, u64 cookie, | 77 | struct wireless_dev *wdev, u64 cookie, |
78 | struct ieee80211_channel *chan, | 78 | struct ieee80211_channel *chan, |
79 | enum nl80211_channel_type channel_type, | ||
80 | unsigned int duration, gfp_t gfp); | 79 | unsigned int duration, gfp_t gfp); |
81 | void nl80211_send_remain_on_channel_cancel( | 80 | void nl80211_send_remain_on_channel_cancel( |
82 | struct cfg80211_registered_device *rdev, | 81 | struct cfg80211_registered_device *rdev, |
83 | struct wireless_dev *wdev, | 82 | struct wireless_dev *wdev, |
84 | u64 cookie, struct ieee80211_channel *chan, | 83 | u64 cookie, struct ieee80211_channel *chan, gfp_t gfp); |
85 | enum nl80211_channel_type channel_type, gfp_t gfp); | ||
86 | 84 | ||
87 | void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, | 85 | void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, |
88 | struct net_device *dev, const u8 *mac_addr, | 86 | struct net_device *dev, const u8 *mac_addr, |
@@ -129,8 +127,8 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, | |||
129 | const u8 *bssid, bool preauth, gfp_t gfp); | 127 | const u8 *bssid, bool preauth, gfp_t gfp); |
130 | 128 | ||
131 | void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, | 129 | void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, |
132 | struct net_device *dev, int freq, | 130 | struct net_device *dev, |
133 | enum nl80211_channel_type type, gfp_t gfp); | 131 | struct cfg80211_chan_def *chandef, gfp_t gfp); |
134 | 132 | ||
135 | bool nl80211_unexpected_frame(struct net_device *dev, | 133 | bool nl80211_unexpected_frame(struct net_device *dev, |
136 | const u8 *addr, gfp_t gfp); | 134 | const u8 *addr, gfp_t gfp); |
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h new file mode 100644 index 000000000000..6c0c8191f837 --- /dev/null +++ b/net/wireless/rdev-ops.h | |||
@@ -0,0 +1,878 @@ | |||
1 | #ifndef __CFG80211_RDEV_OPS | ||
2 | #define __CFG80211_RDEV_OPS | ||
3 | |||
4 | #include <linux/rtnetlink.h> | ||
5 | #include <net/cfg80211.h> | ||
6 | #include "core.h" | ||
7 | #include "trace.h" | ||
8 | |||
9 | static inline int rdev_suspend(struct cfg80211_registered_device *rdev) | ||
10 | { | ||
11 | int ret; | ||
12 | trace_rdev_suspend(&rdev->wiphy, rdev->wowlan); | ||
13 | ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan); | ||
14 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
15 | return ret; | ||
16 | } | ||
17 | |||
18 | static inline int rdev_resume(struct cfg80211_registered_device *rdev) | ||
19 | { | ||
20 | int ret; | ||
21 | trace_rdev_resume(&rdev->wiphy); | ||
22 | ret = rdev->ops->resume(&rdev->wiphy); | ||
23 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
24 | return ret; | ||
25 | } | ||
26 | |||
27 | static inline void rdev_set_wakeup(struct cfg80211_registered_device *rdev, | ||
28 | bool enabled) | ||
29 | { | ||
30 | trace_rdev_set_wakeup(&rdev->wiphy, enabled); | ||
31 | rdev->ops->set_wakeup(&rdev->wiphy, enabled); | ||
32 | trace_rdev_return_void(&rdev->wiphy); | ||
33 | } | ||
34 | |||
35 | static inline struct wireless_dev | ||
36 | *rdev_add_virtual_intf(struct cfg80211_registered_device *rdev, char *name, | ||
37 | enum nl80211_iftype type, u32 *flags, | ||
38 | struct vif_params *params) | ||
39 | { | ||
40 | struct wireless_dev *ret; | ||
41 | trace_rdev_add_virtual_intf(&rdev->wiphy, name, type); | ||
42 | ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, type, flags, | ||
43 | params); | ||
44 | trace_rdev_return_wdev(&rdev->wiphy, ret); | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | static inline int | ||
49 | rdev_del_virtual_intf(struct cfg80211_registered_device *rdev, | ||
50 | struct wireless_dev *wdev) | ||
51 | { | ||
52 | int ret; | ||
53 | trace_rdev_del_virtual_intf(&rdev->wiphy, wdev); | ||
54 | ret = rdev->ops->del_virtual_intf(&rdev->wiphy, wdev); | ||
55 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | static inline int | ||
60 | rdev_change_virtual_intf(struct cfg80211_registered_device *rdev, | ||
61 | struct net_device *dev, enum nl80211_iftype type, | ||
62 | u32 *flags, struct vif_params *params) | ||
63 | { | ||
64 | int ret; | ||
65 | trace_rdev_change_virtual_intf(&rdev->wiphy, dev, type); | ||
66 | ret = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, type, flags, | ||
67 | params); | ||
68 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | static inline int rdev_add_key(struct cfg80211_registered_device *rdev, | ||
73 | struct net_device *netdev, u8 key_index, | ||
74 | bool pairwise, const u8 *mac_addr, | ||
75 | struct key_params *params) | ||
76 | { | ||
77 | int ret; | ||
78 | trace_rdev_add_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr); | ||
79 | ret = rdev->ops->add_key(&rdev->wiphy, netdev, key_index, pairwise, | ||
80 | mac_addr, params); | ||
81 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | static inline int | ||
86 | rdev_get_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, | ||
87 | u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, | ||
88 | void (*callback)(void *cookie, struct key_params*)) | ||
89 | { | ||
90 | int ret; | ||
91 | trace_rdev_get_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr); | ||
92 | ret = rdev->ops->get_key(&rdev->wiphy, netdev, key_index, pairwise, | ||
93 | mac_addr, cookie, callback); | ||
94 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
95 | return ret; | ||
96 | } | ||
97 | |||
98 | static inline int rdev_del_key(struct cfg80211_registered_device *rdev, | ||
99 | struct net_device *netdev, u8 key_index, | ||
100 | bool pairwise, const u8 *mac_addr) | ||
101 | { | ||
102 | int ret; | ||
103 | trace_rdev_del_key(&rdev->wiphy, netdev, key_index, pairwise, mac_addr); | ||
104 | ret = rdev->ops->del_key(&rdev->wiphy, netdev, key_index, pairwise, | ||
105 | mac_addr); | ||
106 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
107 | return ret; | ||
108 | } | ||
109 | |||
110 | static inline int | ||
111 | rdev_set_default_key(struct cfg80211_registered_device *rdev, | ||
112 | struct net_device *netdev, u8 key_index, bool unicast, | ||
113 | bool multicast) | ||
114 | { | ||
115 | int ret; | ||
116 | trace_rdev_set_default_key(&rdev->wiphy, netdev, key_index, | ||
117 | unicast, multicast); | ||
118 | ret = rdev->ops->set_default_key(&rdev->wiphy, netdev, key_index, | ||
119 | unicast, multicast); | ||
120 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
121 | return ret; | ||
122 | } | ||
123 | |||
124 | static inline int | ||
125 | rdev_set_default_mgmt_key(struct cfg80211_registered_device *rdev, | ||
126 | struct net_device *netdev, u8 key_index) | ||
127 | { | ||
128 | int ret; | ||
129 | trace_rdev_set_default_mgmt_key(&rdev->wiphy, netdev, key_index); | ||
130 | ret = rdev->ops->set_default_mgmt_key(&rdev->wiphy, netdev, | ||
131 | key_index); | ||
132 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | static inline int rdev_start_ap(struct cfg80211_registered_device *rdev, | ||
137 | struct net_device *dev, | ||
138 | struct cfg80211_ap_settings *settings) | ||
139 | { | ||
140 | int ret; | ||
141 | trace_rdev_start_ap(&rdev->wiphy, dev, settings); | ||
142 | ret = rdev->ops->start_ap(&rdev->wiphy, dev, settings); | ||
143 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
144 | return ret; | ||
145 | } | ||
146 | |||
147 | static inline int rdev_change_beacon(struct cfg80211_registered_device *rdev, | ||
148 | struct net_device *dev, | ||
149 | struct cfg80211_beacon_data *info) | ||
150 | { | ||
151 | int ret; | ||
152 | trace_rdev_change_beacon(&rdev->wiphy, dev, info); | ||
153 | ret = rdev->ops->change_beacon(&rdev->wiphy, dev, info); | ||
154 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | static inline int rdev_stop_ap(struct cfg80211_registered_device *rdev, | ||
159 | struct net_device *dev) | ||
160 | { | ||
161 | int ret; | ||
162 | trace_rdev_stop_ap(&rdev->wiphy, dev); | ||
163 | ret = rdev->ops->stop_ap(&rdev->wiphy, dev); | ||
164 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | static inline int rdev_add_station(struct cfg80211_registered_device *rdev, | ||
169 | struct net_device *dev, u8 *mac, | ||
170 | struct station_parameters *params) | ||
171 | { | ||
172 | int ret; | ||
173 | trace_rdev_add_station(&rdev->wiphy, dev, mac, params); | ||
174 | ret = rdev->ops->add_station(&rdev->wiphy, dev, mac, params); | ||
175 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | static inline int rdev_del_station(struct cfg80211_registered_device *rdev, | ||
180 | struct net_device *dev, u8 *mac) | ||
181 | { | ||
182 | int ret; | ||
183 | trace_rdev_del_station(&rdev->wiphy, dev, mac); | ||
184 | ret = rdev->ops->del_station(&rdev->wiphy, dev, mac); | ||
185 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | static inline int rdev_change_station(struct cfg80211_registered_device *rdev, | ||
190 | struct net_device *dev, u8 *mac, | ||
191 | struct station_parameters *params) | ||
192 | { | ||
193 | int ret; | ||
194 | trace_rdev_change_station(&rdev->wiphy, dev, mac, params); | ||
195 | ret = rdev->ops->change_station(&rdev->wiphy, dev, mac, params); | ||
196 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | static inline int rdev_get_station(struct cfg80211_registered_device *rdev, | ||
201 | struct net_device *dev, u8 *mac, | ||
202 | struct station_info *sinfo) | ||
203 | { | ||
204 | int ret; | ||
205 | trace_rdev_get_station(&rdev->wiphy, dev, mac); | ||
206 | ret = rdev->ops->get_station(&rdev->wiphy, dev, mac, sinfo); | ||
207 | trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo); | ||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | static inline int rdev_dump_station(struct cfg80211_registered_device *rdev, | ||
212 | struct net_device *dev, int idx, u8 *mac, | ||
213 | struct station_info *sinfo) | ||
214 | { | ||
215 | int ret; | ||
216 | trace_rdev_dump_station(&rdev->wiphy, dev, idx, mac); | ||
217 | ret = rdev->ops->dump_station(&rdev->wiphy, dev, idx, mac, sinfo); | ||
218 | trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | static inline int rdev_add_mpath(struct cfg80211_registered_device *rdev, | ||
223 | struct net_device *dev, u8 *dst, u8 *next_hop) | ||
224 | { | ||
225 | int ret; | ||
226 | trace_rdev_add_mpath(&rdev->wiphy, dev, dst, next_hop); | ||
227 | ret = rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop); | ||
228 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | static inline int rdev_del_mpath(struct cfg80211_registered_device *rdev, | ||
233 | struct net_device *dev, u8 *dst) | ||
234 | { | ||
235 | int ret; | ||
236 | trace_rdev_del_mpath(&rdev->wiphy, dev, dst); | ||
237 | ret = rdev->ops->del_mpath(&rdev->wiphy, dev, dst); | ||
238 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | static inline int rdev_change_mpath(struct cfg80211_registered_device *rdev, | ||
243 | struct net_device *dev, u8 *dst, | ||
244 | u8 *next_hop) | ||
245 | { | ||
246 | int ret; | ||
247 | trace_rdev_change_mpath(&rdev->wiphy, dev, dst, next_hop); | ||
248 | ret = rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop); | ||
249 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
250 | return ret; | ||
251 | } | ||
252 | |||
253 | static inline int rdev_get_mpath(struct cfg80211_registered_device *rdev, | ||
254 | struct net_device *dev, u8 *dst, u8 *next_hop, | ||
255 | struct mpath_info *pinfo) | ||
256 | { | ||
257 | int ret; | ||
258 | trace_rdev_get_mpath(&rdev->wiphy, dev, dst, next_hop); | ||
259 | ret = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, pinfo); | ||
260 | trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); | ||
261 | return ret; | ||
262 | |||
263 | } | ||
264 | |||
265 | static inline int rdev_dump_mpath(struct cfg80211_registered_device *rdev, | ||
266 | struct net_device *dev, int idx, u8 *dst, | ||
267 | u8 *next_hop, struct mpath_info *pinfo) | ||
268 | |||
269 | { | ||
270 | int ret; | ||
271 | trace_rdev_dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop); | ||
272 | ret = rdev->ops->dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop, | ||
273 | pinfo); | ||
274 | trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); | ||
275 | return ret; | ||
276 | } | ||
277 | |||
278 | static inline int | ||
279 | rdev_get_mesh_config(struct cfg80211_registered_device *rdev, | ||
280 | struct net_device *dev, struct mesh_config *conf) | ||
281 | { | ||
282 | int ret; | ||
283 | trace_rdev_get_mesh_config(&rdev->wiphy, dev); | ||
284 | ret = rdev->ops->get_mesh_config(&rdev->wiphy, dev, conf); | ||
285 | trace_rdev_return_int_mesh_config(&rdev->wiphy, ret, conf); | ||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | static inline int | ||
290 | rdev_update_mesh_config(struct cfg80211_registered_device *rdev, | ||
291 | struct net_device *dev, u32 mask, | ||
292 | const struct mesh_config *nconf) | ||
293 | { | ||
294 | int ret; | ||
295 | trace_rdev_update_mesh_config(&rdev->wiphy, dev, mask, nconf); | ||
296 | ret = rdev->ops->update_mesh_config(&rdev->wiphy, dev, mask, nconf); | ||
297 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
298 | return ret; | ||
299 | } | ||
300 | |||
301 | static inline int rdev_join_mesh(struct cfg80211_registered_device *rdev, | ||
302 | struct net_device *dev, | ||
303 | const struct mesh_config *conf, | ||
304 | const struct mesh_setup *setup) | ||
305 | { | ||
306 | int ret; | ||
307 | trace_rdev_join_mesh(&rdev->wiphy, dev, conf, setup); | ||
308 | ret = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup); | ||
309 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
310 | return ret; | ||
311 | } | ||
312 | |||
313 | |||
314 | static inline int rdev_leave_mesh(struct cfg80211_registered_device *rdev, | ||
315 | struct net_device *dev) | ||
316 | { | ||
317 | int ret; | ||
318 | trace_rdev_leave_mesh(&rdev->wiphy, dev); | ||
319 | ret = rdev->ops->leave_mesh(&rdev->wiphy, dev); | ||
320 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | static inline int rdev_change_bss(struct cfg80211_registered_device *rdev, | ||
325 | struct net_device *dev, | ||
326 | struct bss_parameters *params) | ||
327 | |||
328 | { | ||
329 | int ret; | ||
330 | trace_rdev_change_bss(&rdev->wiphy, dev, params); | ||
331 | ret = rdev->ops->change_bss(&rdev->wiphy, dev, params); | ||
332 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
333 | return ret; | ||
334 | } | ||
335 | |||
336 | static inline int rdev_set_txq_params(struct cfg80211_registered_device *rdev, | ||
337 | struct net_device *dev, | ||
338 | struct ieee80211_txq_params *params) | ||
339 | |||
340 | { | ||
341 | int ret; | ||
342 | trace_rdev_set_txq_params(&rdev->wiphy, dev, params); | ||
343 | ret = rdev->ops->set_txq_params(&rdev->wiphy, dev, params); | ||
344 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
345 | return ret; | ||
346 | } | ||
347 | |||
348 | static inline int | ||
349 | rdev_libertas_set_mesh_channel(struct cfg80211_registered_device *rdev, | ||
350 | struct net_device *dev, | ||
351 | struct ieee80211_channel *chan) | ||
352 | { | ||
353 | int ret; | ||
354 | trace_rdev_libertas_set_mesh_channel(&rdev->wiphy, dev, chan); | ||
355 | ret = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy, dev, chan); | ||
356 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
357 | return ret; | ||
358 | } | ||
359 | |||
360 | static inline int | ||
361 | rdev_set_monitor_channel(struct cfg80211_registered_device *rdev, | ||
362 | struct cfg80211_chan_def *chandef) | ||
363 | { | ||
364 | int ret; | ||
365 | trace_rdev_set_monitor_channel(&rdev->wiphy, chandef); | ||
366 | ret = rdev->ops->set_monitor_channel(&rdev->wiphy, chandef); | ||
367 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
368 | return ret; | ||
369 | } | ||
370 | |||
371 | static inline int rdev_scan(struct cfg80211_registered_device *rdev, | ||
372 | struct cfg80211_scan_request *request) | ||
373 | { | ||
374 | int ret; | ||
375 | trace_rdev_scan(&rdev->wiphy, request); | ||
376 | ret = rdev->ops->scan(&rdev->wiphy, request); | ||
377 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
378 | return ret; | ||
379 | } | ||
380 | |||
381 | static inline int rdev_auth(struct cfg80211_registered_device *rdev, | ||
382 | struct net_device *dev, | ||
383 | struct cfg80211_auth_request *req) | ||
384 | { | ||
385 | int ret; | ||
386 | trace_rdev_auth(&rdev->wiphy, dev, req); | ||
387 | ret = rdev->ops->auth(&rdev->wiphy, dev, req); | ||
388 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
389 | return ret; | ||
390 | } | ||
391 | |||
392 | static inline int rdev_assoc(struct cfg80211_registered_device *rdev, | ||
393 | struct net_device *dev, | ||
394 | struct cfg80211_assoc_request *req) | ||
395 | { | ||
396 | int ret; | ||
397 | trace_rdev_assoc(&rdev->wiphy, dev, req); | ||
398 | ret = rdev->ops->assoc(&rdev->wiphy, dev, req); | ||
399 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
400 | return ret; | ||
401 | } | ||
402 | |||
403 | static inline int rdev_deauth(struct cfg80211_registered_device *rdev, | ||
404 | struct net_device *dev, | ||
405 | struct cfg80211_deauth_request *req) | ||
406 | { | ||
407 | int ret; | ||
408 | trace_rdev_deauth(&rdev->wiphy, dev, req); | ||
409 | ret = rdev->ops->deauth(&rdev->wiphy, dev, req); | ||
410 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
411 | return ret; | ||
412 | } | ||
413 | |||
414 | static inline int rdev_disassoc(struct cfg80211_registered_device *rdev, | ||
415 | struct net_device *dev, | ||
416 | struct cfg80211_disassoc_request *req) | ||
417 | { | ||
418 | int ret; | ||
419 | trace_rdev_disassoc(&rdev->wiphy, dev, req); | ||
420 | ret = rdev->ops->disassoc(&rdev->wiphy, dev, req); | ||
421 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
422 | return ret; | ||
423 | } | ||
424 | |||
425 | static inline int rdev_connect(struct cfg80211_registered_device *rdev, | ||
426 | struct net_device *dev, | ||
427 | struct cfg80211_connect_params *sme) | ||
428 | { | ||
429 | int ret; | ||
430 | trace_rdev_connect(&rdev->wiphy, dev, sme); | ||
431 | ret = rdev->ops->connect(&rdev->wiphy, dev, sme); | ||
432 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
433 | return ret; | ||
434 | } | ||
435 | |||
436 | static inline int rdev_disconnect(struct cfg80211_registered_device *rdev, | ||
437 | struct net_device *dev, u16 reason_code) | ||
438 | { | ||
439 | int ret; | ||
440 | trace_rdev_disconnect(&rdev->wiphy, dev, reason_code); | ||
441 | ret = rdev->ops->disconnect(&rdev->wiphy, dev, reason_code); | ||
442 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
443 | return ret; | ||
444 | } | ||
445 | |||
446 | static inline int rdev_join_ibss(struct cfg80211_registered_device *rdev, | ||
447 | struct net_device *dev, | ||
448 | struct cfg80211_ibss_params *params) | ||
449 | { | ||
450 | int ret; | ||
451 | trace_rdev_join_ibss(&rdev->wiphy, dev, params); | ||
452 | ret = rdev->ops->join_ibss(&rdev->wiphy, dev, params); | ||
453 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
454 | return ret; | ||
455 | } | ||
456 | |||
457 | static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev, | ||
458 | struct net_device *dev) | ||
459 | { | ||
460 | int ret; | ||
461 | trace_rdev_leave_ibss(&rdev->wiphy, dev); | ||
462 | ret = rdev->ops->leave_ibss(&rdev->wiphy, dev); | ||
463 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
464 | return ret; | ||
465 | } | ||
466 | |||
467 | static inline int | ||
468 | rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed) | ||
469 | { | ||
470 | int ret; | ||
471 | trace_rdev_set_wiphy_params(&rdev->wiphy, changed); | ||
472 | ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); | ||
473 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
474 | return ret; | ||
475 | } | ||
476 | |||
477 | static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev, | ||
478 | struct wireless_dev *wdev, | ||
479 | enum nl80211_tx_power_setting type, int mbm) | ||
480 | { | ||
481 | int ret; | ||
482 | trace_rdev_set_tx_power(&rdev->wiphy, wdev, type, mbm); | ||
483 | ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, type, mbm); | ||
484 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
485 | return ret; | ||
486 | } | ||
487 | |||
488 | static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev, | ||
489 | struct wireless_dev *wdev, int *dbm) | ||
490 | { | ||
491 | int ret; | ||
492 | trace_rdev_get_tx_power(&rdev->wiphy, wdev); | ||
493 | ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, dbm); | ||
494 | trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm); | ||
495 | return ret; | ||
496 | } | ||
497 | |||
498 | static inline int rdev_set_wds_peer(struct cfg80211_registered_device *rdev, | ||
499 | struct net_device *dev, const u8 *addr) | ||
500 | { | ||
501 | int ret; | ||
502 | trace_rdev_set_wds_peer(&rdev->wiphy, dev, addr); | ||
503 | ret = rdev->ops->set_wds_peer(&rdev->wiphy, dev, addr); | ||
504 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
505 | return ret; | ||
506 | } | ||
507 | |||
508 | static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev) | ||
509 | { | ||
510 | trace_rdev_rfkill_poll(&rdev->wiphy); | ||
511 | rdev->ops->rfkill_poll(&rdev->wiphy); | ||
512 | trace_rdev_return_void(&rdev->wiphy); | ||
513 | } | ||
514 | |||
515 | |||
516 | #ifdef CONFIG_NL80211_TESTMODE | ||
517 | static inline int rdev_testmode_cmd(struct cfg80211_registered_device *rdev, | ||
518 | void *data, int len) | ||
519 | { | ||
520 | int ret; | ||
521 | trace_rdev_testmode_cmd(&rdev->wiphy); | ||
522 | ret = rdev->ops->testmode_cmd(&rdev->wiphy, data, len); | ||
523 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
524 | return ret; | ||
525 | } | ||
526 | |||
527 | static inline int rdev_testmode_dump(struct cfg80211_registered_device *rdev, | ||
528 | struct sk_buff *skb, | ||
529 | struct netlink_callback *cb, void *data, | ||
530 | int len) | ||
531 | { | ||
532 | int ret; | ||
533 | trace_rdev_testmode_dump(&rdev->wiphy); | ||
534 | ret = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, data, len); | ||
535 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
536 | return ret; | ||
537 | } | ||
538 | #endif | ||
539 | |||
540 | static inline int | ||
541 | rdev_set_bitrate_mask(struct cfg80211_registered_device *rdev, | ||
542 | struct net_device *dev, const u8 *peer, | ||
543 | const struct cfg80211_bitrate_mask *mask) | ||
544 | { | ||
545 | int ret; | ||
546 | trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, peer, mask); | ||
547 | ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, peer, mask); | ||
548 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | static inline int rdev_dump_survey(struct cfg80211_registered_device *rdev, | ||
553 | struct net_device *netdev, int idx, | ||
554 | struct survey_info *info) | ||
555 | { | ||
556 | int ret; | ||
557 | trace_rdev_dump_survey(&rdev->wiphy, netdev, idx); | ||
558 | ret = rdev->ops->dump_survey(&rdev->wiphy, netdev, idx, info); | ||
559 | if (ret < 0) | ||
560 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
561 | else | ||
562 | trace_rdev_return_int_survey_info(&rdev->wiphy, ret, info); | ||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | static inline int rdev_set_pmksa(struct cfg80211_registered_device *rdev, | ||
567 | struct net_device *netdev, | ||
568 | struct cfg80211_pmksa *pmksa) | ||
569 | { | ||
570 | int ret; | ||
571 | trace_rdev_set_pmksa(&rdev->wiphy, netdev, pmksa); | ||
572 | ret = rdev->ops->set_pmksa(&rdev->wiphy, netdev, pmksa); | ||
573 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
574 | return ret; | ||
575 | } | ||
576 | |||
577 | static inline int rdev_del_pmksa(struct cfg80211_registered_device *rdev, | ||
578 | struct net_device *netdev, | ||
579 | struct cfg80211_pmksa *pmksa) | ||
580 | { | ||
581 | int ret; | ||
582 | trace_rdev_del_pmksa(&rdev->wiphy, netdev, pmksa); | ||
583 | ret = rdev->ops->del_pmksa(&rdev->wiphy, netdev, pmksa); | ||
584 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | static inline int rdev_flush_pmksa(struct cfg80211_registered_device *rdev, | ||
589 | struct net_device *netdev) | ||
590 | { | ||
591 | int ret; | ||
592 | trace_rdev_flush_pmksa(&rdev->wiphy, netdev); | ||
593 | ret = rdev->ops->flush_pmksa(&rdev->wiphy, netdev); | ||
594 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
595 | return ret; | ||
596 | } | ||
597 | |||
598 | static inline int | ||
599 | rdev_remain_on_channel(struct cfg80211_registered_device *rdev, | ||
600 | struct wireless_dev *wdev, | ||
601 | struct ieee80211_channel *chan, | ||
602 | unsigned int duration, u64 *cookie) | ||
603 | { | ||
604 | int ret; | ||
605 | trace_rdev_remain_on_channel(&rdev->wiphy, wdev, chan, duration); | ||
606 | ret = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan, | ||
607 | duration, cookie); | ||
608 | trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); | ||
609 | return ret; | ||
610 | } | ||
611 | |||
612 | static inline int | ||
613 | rdev_cancel_remain_on_channel(struct cfg80211_registered_device *rdev, | ||
614 | struct wireless_dev *wdev, u64 cookie) | ||
615 | { | ||
616 | int ret; | ||
617 | trace_rdev_cancel_remain_on_channel(&rdev->wiphy, wdev, cookie); | ||
618 | ret = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie); | ||
619 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
620 | return ret; | ||
621 | } | ||
622 | |||
623 | static inline int rdev_mgmt_tx(struct cfg80211_registered_device *rdev, | ||
624 | struct wireless_dev *wdev, | ||
625 | struct ieee80211_channel *chan, bool offchan, | ||
626 | unsigned int wait, const u8 *buf, size_t len, | ||
627 | bool no_cck, bool dont_wait_for_ack, u64 *cookie) | ||
628 | { | ||
629 | int ret; | ||
630 | trace_rdev_mgmt_tx(&rdev->wiphy, wdev, chan, offchan, | ||
631 | wait, no_cck, dont_wait_for_ack); | ||
632 | ret = rdev->ops->mgmt_tx(&rdev->wiphy, wdev, chan, offchan, | ||
633 | wait, buf, len, no_cck, | ||
634 | dont_wait_for_ack, cookie); | ||
635 | trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); | ||
636 | return ret; | ||
637 | } | ||
638 | |||
639 | static inline int | ||
640 | rdev_mgmt_tx_cancel_wait(struct cfg80211_registered_device *rdev, | ||
641 | struct wireless_dev *wdev, u64 cookie) | ||
642 | { | ||
643 | int ret; | ||
644 | trace_rdev_mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie); | ||
645 | ret = rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie); | ||
646 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
647 | return ret; | ||
648 | } | ||
649 | |||
650 | static inline int rdev_set_power_mgmt(struct cfg80211_registered_device *rdev, | ||
651 | struct net_device *dev, bool enabled, | ||
652 | int timeout) | ||
653 | { | ||
654 | int ret; | ||
655 | trace_rdev_set_power_mgmt(&rdev->wiphy, dev, enabled, timeout); | ||
656 | ret = rdev->ops->set_power_mgmt(&rdev->wiphy, dev, enabled, timeout); | ||
657 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
658 | return ret; | ||
659 | } | ||
660 | |||
661 | static inline int | ||
662 | rdev_set_cqm_rssi_config(struct cfg80211_registered_device *rdev, | ||
663 | struct net_device *dev, s32 rssi_thold, u32 rssi_hyst) | ||
664 | { | ||
665 | int ret; | ||
666 | trace_rdev_set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold, | ||
667 | rssi_hyst); | ||
668 | ret = rdev->ops->set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold, | ||
669 | rssi_hyst); | ||
670 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
671 | return ret; | ||
672 | } | ||
673 | |||
674 | static inline int | ||
675 | rdev_set_cqm_txe_config(struct cfg80211_registered_device *rdev, | ||
676 | struct net_device *dev, u32 rate, u32 pkts, u32 intvl) | ||
677 | { | ||
678 | int ret; | ||
679 | trace_rdev_set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, intvl); | ||
680 | ret = rdev->ops->set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, | ||
681 | intvl); | ||
682 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
683 | return ret; | ||
684 | } | ||
685 | |||
686 | static inline void | ||
687 | rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev, | ||
688 | struct wireless_dev *wdev, u16 frame_type, bool reg) | ||
689 | { | ||
690 | trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg); | ||
691 | rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg); | ||
692 | trace_rdev_return_void(&rdev->wiphy); | ||
693 | } | ||
694 | |||
695 | static inline int rdev_set_antenna(struct cfg80211_registered_device *rdev, | ||
696 | u32 tx_ant, u32 rx_ant) | ||
697 | { | ||
698 | int ret; | ||
699 | trace_rdev_set_antenna(&rdev->wiphy, tx_ant, rx_ant); | ||
700 | ret = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant); | ||
701 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
702 | return ret; | ||
703 | } | ||
704 | |||
705 | static inline int rdev_get_antenna(struct cfg80211_registered_device *rdev, | ||
706 | u32 *tx_ant, u32 *rx_ant) | ||
707 | { | ||
708 | int ret; | ||
709 | trace_rdev_get_antenna(&rdev->wiphy); | ||
710 | ret = rdev->ops->get_antenna(&rdev->wiphy, tx_ant, rx_ant); | ||
711 | if (ret) | ||
712 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
713 | else | ||
714 | trace_rdev_return_int_tx_rx(&rdev->wiphy, ret, *tx_ant, | ||
715 | *rx_ant); | ||
716 | return ret; | ||
717 | } | ||
718 | |||
719 | static inline int rdev_set_ringparam(struct cfg80211_registered_device *rdev, | ||
720 | u32 tx, u32 rx) | ||
721 | { | ||
722 | int ret; | ||
723 | trace_rdev_set_ringparam(&rdev->wiphy, tx, rx); | ||
724 | ret = rdev->ops->set_ringparam(&rdev->wiphy, tx, rx); | ||
725 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
726 | return ret; | ||
727 | } | ||
728 | |||
729 | static inline void rdev_get_ringparam(struct cfg80211_registered_device *rdev, | ||
730 | u32 *tx, u32 *tx_max, u32 *rx, | ||
731 | u32 *rx_max) | ||
732 | { | ||
733 | trace_rdev_get_ringparam(&rdev->wiphy); | ||
734 | rdev->ops->get_ringparam(&rdev->wiphy, tx, tx_max, rx, rx_max); | ||
735 | trace_rdev_return_void_tx_rx(&rdev->wiphy, *tx, *tx_max, *rx, *rx_max); | ||
736 | } | ||
737 | |||
738 | static inline int | ||
739 | rdev_sched_scan_start(struct cfg80211_registered_device *rdev, | ||
740 | struct net_device *dev, | ||
741 | struct cfg80211_sched_scan_request *request) | ||
742 | { | ||
743 | int ret; | ||
744 | trace_rdev_sched_scan_start(&rdev->wiphy, dev, request); | ||
745 | ret = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request); | ||
746 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
747 | return ret; | ||
748 | } | ||
749 | |||
750 | static inline int rdev_sched_scan_stop(struct cfg80211_registered_device *rdev, | ||
751 | struct net_device *dev) | ||
752 | { | ||
753 | int ret; | ||
754 | trace_rdev_sched_scan_stop(&rdev->wiphy, dev); | ||
755 | ret = rdev->ops->sched_scan_stop(&rdev->wiphy, dev); | ||
756 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
757 | return ret; | ||
758 | } | ||
759 | |||
760 | static inline int rdev_set_rekey_data(struct cfg80211_registered_device *rdev, | ||
761 | struct net_device *dev, | ||
762 | struct cfg80211_gtk_rekey_data *data) | ||
763 | { | ||
764 | int ret; | ||
765 | trace_rdev_set_rekey_data(&rdev->wiphy, dev); | ||
766 | ret = rdev->ops->set_rekey_data(&rdev->wiphy, dev, data); | ||
767 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
768 | return ret; | ||
769 | } | ||
770 | |||
771 | static inline int rdev_tdls_mgmt(struct cfg80211_registered_device *rdev, | ||
772 | struct net_device *dev, u8 *peer, | ||
773 | u8 action_code, u8 dialog_token, | ||
774 | u16 status_code, const u8 *buf, size_t len) | ||
775 | { | ||
776 | int ret; | ||
777 | trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code, | ||
778 | dialog_token, status_code, buf, len); | ||
779 | ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code, | ||
780 | dialog_token, status_code, buf, len); | ||
781 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
782 | return ret; | ||
783 | } | ||
784 | |||
785 | static inline int rdev_tdls_oper(struct cfg80211_registered_device *rdev, | ||
786 | struct net_device *dev, u8 *peer, | ||
787 | enum nl80211_tdls_operation oper) | ||
788 | { | ||
789 | int ret; | ||
790 | trace_rdev_tdls_oper(&rdev->wiphy, dev, peer, oper); | ||
791 | ret = rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, oper); | ||
792 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
793 | return ret; | ||
794 | } | ||
795 | |||
796 | static inline int rdev_probe_client(struct cfg80211_registered_device *rdev, | ||
797 | struct net_device *dev, const u8 *peer, | ||
798 | u64 *cookie) | ||
799 | { | ||
800 | int ret; | ||
801 | trace_rdev_probe_client(&rdev->wiphy, dev, peer); | ||
802 | ret = rdev->ops->probe_client(&rdev->wiphy, dev, peer, cookie); | ||
803 | trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); | ||
804 | return ret; | ||
805 | } | ||
806 | |||
807 | static inline int rdev_set_noack_map(struct cfg80211_registered_device *rdev, | ||
808 | struct net_device *dev, u16 noack_map) | ||
809 | { | ||
810 | int ret; | ||
811 | trace_rdev_set_noack_map(&rdev->wiphy, dev, noack_map); | ||
812 | ret = rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map); | ||
813 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
814 | return ret; | ||
815 | } | ||
816 | |||
817 | static inline int | ||
818 | rdev_get_et_sset_count(struct cfg80211_registered_device *rdev, | ||
819 | struct net_device *dev, int sset) | ||
820 | { | ||
821 | int ret; | ||
822 | trace_rdev_get_et_sset_count(&rdev->wiphy, dev, sset); | ||
823 | ret = rdev->ops->get_et_sset_count(&rdev->wiphy, dev, sset); | ||
824 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
825 | return ret; | ||
826 | } | ||
827 | |||
828 | static inline void rdev_get_et_stats(struct cfg80211_registered_device *rdev, | ||
829 | struct net_device *dev, | ||
830 | struct ethtool_stats *stats, u64 *data) | ||
831 | { | ||
832 | trace_rdev_get_et_stats(&rdev->wiphy, dev); | ||
833 | rdev->ops->get_et_stats(&rdev->wiphy, dev, stats, data); | ||
834 | trace_rdev_return_void(&rdev->wiphy); | ||
835 | } | ||
836 | |||
837 | static inline void rdev_get_et_strings(struct cfg80211_registered_device *rdev, | ||
838 | struct net_device *dev, u32 sset, | ||
839 | u8 *data) | ||
840 | { | ||
841 | trace_rdev_get_et_strings(&rdev->wiphy, dev, sset); | ||
842 | rdev->ops->get_et_strings(&rdev->wiphy, dev, sset, data); | ||
843 | trace_rdev_return_void(&rdev->wiphy); | ||
844 | } | ||
845 | |||
846 | static inline int | ||
847 | rdev_get_channel(struct cfg80211_registered_device *rdev, | ||
848 | struct wireless_dev *wdev, | ||
849 | struct cfg80211_chan_def *chandef) | ||
850 | { | ||
851 | int ret; | ||
852 | |||
853 | trace_rdev_get_channel(&rdev->wiphy, wdev); | ||
854 | ret = rdev->ops->get_channel(&rdev->wiphy, wdev, chandef); | ||
855 | trace_rdev_return_chandef(&rdev->wiphy, ret, chandef); | ||
856 | |||
857 | return ret; | ||
858 | } | ||
859 | |||
860 | static inline int rdev_start_p2p_device(struct cfg80211_registered_device *rdev, | ||
861 | struct wireless_dev *wdev) | ||
862 | { | ||
863 | int ret; | ||
864 | |||
865 | trace_rdev_start_p2p_device(&rdev->wiphy, wdev); | ||
866 | ret = rdev->ops->start_p2p_device(&rdev->wiphy, wdev); | ||
867 | trace_rdev_return_int(&rdev->wiphy, ret); | ||
868 | return ret; | ||
869 | } | ||
870 | |||
871 | static inline void rdev_stop_p2p_device(struct cfg80211_registered_device *rdev, | ||
872 | struct wireless_dev *wdev) | ||
873 | { | ||
874 | trace_rdev_stop_p2p_device(&rdev->wiphy, wdev); | ||
875 | rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); | ||
876 | trace_rdev_return_void(&rdev->wiphy); | ||
877 | } | ||
878 | #endif /* __CFG80211_RDEV_OPS */ | ||
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 9730c9862bdc..9596015975d2 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -17,9 +17,58 @@ | |||
17 | #include "core.h" | 17 | #include "core.h" |
18 | #include "nl80211.h" | 18 | #include "nl80211.h" |
19 | #include "wext-compat.h" | 19 | #include "wext-compat.h" |
20 | #include "rdev-ops.h" | ||
20 | 21 | ||
21 | #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) | 22 | #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) |
22 | 23 | ||
24 | static void bss_release(struct kref *ref) | ||
25 | { | ||
26 | struct cfg80211_internal_bss *bss; | ||
27 | |||
28 | bss = container_of(ref, struct cfg80211_internal_bss, ref); | ||
29 | if (bss->pub.free_priv) | ||
30 | bss->pub.free_priv(&bss->pub); | ||
31 | |||
32 | if (bss->beacon_ies_allocated) | ||
33 | kfree(bss->pub.beacon_ies); | ||
34 | if (bss->proberesp_ies_allocated) | ||
35 | kfree(bss->pub.proberesp_ies); | ||
36 | |||
37 | BUG_ON(atomic_read(&bss->hold)); | ||
38 | |||
39 | kfree(bss); | ||
40 | } | ||
41 | |||
42 | /* must hold dev->bss_lock! */ | ||
43 | static void __cfg80211_unlink_bss(struct cfg80211_registered_device *dev, | ||
44 | struct cfg80211_internal_bss *bss) | ||
45 | { | ||
46 | list_del_init(&bss->list); | ||
47 | rb_erase(&bss->rbn, &dev->bss_tree); | ||
48 | kref_put(&bss->ref, bss_release); | ||
49 | } | ||
50 | |||
51 | /* must hold dev->bss_lock! */ | ||
52 | static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev, | ||
53 | unsigned long expire_time) | ||
54 | { | ||
55 | struct cfg80211_internal_bss *bss, *tmp; | ||
56 | bool expired = false; | ||
57 | |||
58 | list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { | ||
59 | if (atomic_read(&bss->hold)) | ||
60 | continue; | ||
61 | if (!time_after(expire_time, bss->ts)) | ||
62 | continue; | ||
63 | |||
64 | __cfg80211_unlink_bss(dev, bss); | ||
65 | expired = true; | ||
66 | } | ||
67 | |||
68 | if (expired) | ||
69 | dev->bss_generation++; | ||
70 | } | ||
71 | |||
23 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) | 72 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) |
24 | { | 73 | { |
25 | struct cfg80211_scan_request *request; | 74 | struct cfg80211_scan_request *request; |
@@ -45,10 +94,17 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) | |||
45 | if (wdev->netdev) | 94 | if (wdev->netdev) |
46 | cfg80211_sme_scan_done(wdev->netdev); | 95 | cfg80211_sme_scan_done(wdev->netdev); |
47 | 96 | ||
48 | if (request->aborted) | 97 | if (request->aborted) { |
49 | nl80211_send_scan_aborted(rdev, wdev); | 98 | nl80211_send_scan_aborted(rdev, wdev); |
50 | else | 99 | } else { |
100 | if (request->flags & NL80211_SCAN_FLAG_FLUSH) { | ||
101 | /* flush entries from previous scans */ | ||
102 | spin_lock_bh(&rdev->bss_lock); | ||
103 | __cfg80211_bss_expire(rdev, request->scan_start); | ||
104 | spin_unlock_bh(&rdev->bss_lock); | ||
105 | } | ||
51 | nl80211_send_scan_done(rdev, wdev); | 106 | nl80211_send_scan_done(rdev, wdev); |
107 | } | ||
52 | 108 | ||
53 | #ifdef CONFIG_CFG80211_WEXT | 109 | #ifdef CONFIG_CFG80211_WEXT |
54 | if (wdev->netdev && !request->aborted) { | 110 | if (wdev->netdev && !request->aborted) { |
@@ -89,6 +145,7 @@ void __cfg80211_scan_done(struct work_struct *wk) | |||
89 | 145 | ||
90 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | 146 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) |
91 | { | 147 | { |
148 | trace_cfg80211_scan_done(request, aborted); | ||
92 | WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); | 149 | WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); |
93 | 150 | ||
94 | request->aborted = aborted; | 151 | request->aborted = aborted; |
@@ -99,22 +156,34 @@ EXPORT_SYMBOL(cfg80211_scan_done); | |||
99 | void __cfg80211_sched_scan_results(struct work_struct *wk) | 156 | void __cfg80211_sched_scan_results(struct work_struct *wk) |
100 | { | 157 | { |
101 | struct cfg80211_registered_device *rdev; | 158 | struct cfg80211_registered_device *rdev; |
159 | struct cfg80211_sched_scan_request *request; | ||
102 | 160 | ||
103 | rdev = container_of(wk, struct cfg80211_registered_device, | 161 | rdev = container_of(wk, struct cfg80211_registered_device, |
104 | sched_scan_results_wk); | 162 | sched_scan_results_wk); |
105 | 163 | ||
164 | request = rdev->sched_scan_req; | ||
165 | |||
106 | mutex_lock(&rdev->sched_scan_mtx); | 166 | mutex_lock(&rdev->sched_scan_mtx); |
107 | 167 | ||
108 | /* we don't have sched_scan_req anymore if the scan is stopping */ | 168 | /* we don't have sched_scan_req anymore if the scan is stopping */ |
109 | if (rdev->sched_scan_req) | 169 | if (request) { |
110 | nl80211_send_sched_scan_results(rdev, | 170 | if (request->flags & NL80211_SCAN_FLAG_FLUSH) { |
111 | rdev->sched_scan_req->dev); | 171 | /* flush entries from previous scans */ |
172 | spin_lock_bh(&rdev->bss_lock); | ||
173 | __cfg80211_bss_expire(rdev, request->scan_start); | ||
174 | spin_unlock_bh(&rdev->bss_lock); | ||
175 | request->scan_start = | ||
176 | jiffies + msecs_to_jiffies(request->interval); | ||
177 | } | ||
178 | nl80211_send_sched_scan_results(rdev, request->dev); | ||
179 | } | ||
112 | 180 | ||
113 | mutex_unlock(&rdev->sched_scan_mtx); | 181 | mutex_unlock(&rdev->sched_scan_mtx); |
114 | } | 182 | } |
115 | 183 | ||
116 | void cfg80211_sched_scan_results(struct wiphy *wiphy) | 184 | void cfg80211_sched_scan_results(struct wiphy *wiphy) |
117 | { | 185 | { |
186 | trace_cfg80211_sched_scan_results(wiphy); | ||
118 | /* ignore if we're not scanning */ | 187 | /* ignore if we're not scanning */ |
119 | if (wiphy_to_dev(wiphy)->sched_scan_req) | 188 | if (wiphy_to_dev(wiphy)->sched_scan_req) |
120 | queue_work(cfg80211_wq, | 189 | queue_work(cfg80211_wq, |
@@ -126,6 +195,8 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy) | |||
126 | { | 195 | { |
127 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); | 196 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); |
128 | 197 | ||
198 | trace_cfg80211_sched_scan_stopped(wiphy); | ||
199 | |||
129 | mutex_lock(&rdev->sched_scan_mtx); | 200 | mutex_lock(&rdev->sched_scan_mtx); |
130 | __cfg80211_stop_sched_scan(rdev, true); | 201 | __cfg80211_stop_sched_scan(rdev, true); |
131 | mutex_unlock(&rdev->sched_scan_mtx); | 202 | mutex_unlock(&rdev->sched_scan_mtx); |
@@ -145,7 +216,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, | |||
145 | dev = rdev->sched_scan_req->dev; | 216 | dev = rdev->sched_scan_req->dev; |
146 | 217 | ||
147 | if (!driver_initiated) { | 218 | if (!driver_initiated) { |
148 | int err = rdev->ops->sched_scan_stop(&rdev->wiphy, dev); | 219 | int err = rdev_sched_scan_stop(rdev, dev); |
149 | if (err) | 220 | if (err) |
150 | return err; | 221 | return err; |
151 | } | 222 | } |
@@ -158,24 +229,6 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, | |||
158 | return 0; | 229 | return 0; |
159 | } | 230 | } |
160 | 231 | ||
161 | static void bss_release(struct kref *ref) | ||
162 | { | ||
163 | struct cfg80211_internal_bss *bss; | ||
164 | |||
165 | bss = container_of(ref, struct cfg80211_internal_bss, ref); | ||
166 | if (bss->pub.free_priv) | ||
167 | bss->pub.free_priv(&bss->pub); | ||
168 | |||
169 | if (bss->beacon_ies_allocated) | ||
170 | kfree(bss->pub.beacon_ies); | ||
171 | if (bss->proberesp_ies_allocated) | ||
172 | kfree(bss->pub.proberesp_ies); | ||
173 | |||
174 | BUG_ON(atomic_read(&bss->hold)); | ||
175 | |||
176 | kfree(bss); | ||
177 | } | ||
178 | |||
179 | /* must hold dev->bss_lock! */ | 232 | /* must hold dev->bss_lock! */ |
180 | void cfg80211_bss_age(struct cfg80211_registered_device *dev, | 233 | void cfg80211_bss_age(struct cfg80211_registered_device *dev, |
181 | unsigned long age_secs) | 234 | unsigned long age_secs) |
@@ -188,32 +241,9 @@ void cfg80211_bss_age(struct cfg80211_registered_device *dev, | |||
188 | } | 241 | } |
189 | } | 242 | } |
190 | 243 | ||
191 | /* must hold dev->bss_lock! */ | ||
192 | static void __cfg80211_unlink_bss(struct cfg80211_registered_device *dev, | ||
193 | struct cfg80211_internal_bss *bss) | ||
194 | { | ||
195 | list_del_init(&bss->list); | ||
196 | rb_erase(&bss->rbn, &dev->bss_tree); | ||
197 | kref_put(&bss->ref, bss_release); | ||
198 | } | ||
199 | |||
200 | /* must hold dev->bss_lock! */ | ||
201 | void cfg80211_bss_expire(struct cfg80211_registered_device *dev) | 244 | void cfg80211_bss_expire(struct cfg80211_registered_device *dev) |
202 | { | 245 | { |
203 | struct cfg80211_internal_bss *bss, *tmp; | 246 | __cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE); |
204 | bool expired = false; | ||
205 | |||
206 | list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) { | ||
207 | if (atomic_read(&bss->hold)) | ||
208 | continue; | ||
209 | if (!time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE)) | ||
210 | continue; | ||
211 | __cfg80211_unlink_bss(dev, bss); | ||
212 | expired = true; | ||
213 | } | ||
214 | |||
215 | if (expired) | ||
216 | dev->bss_generation++; | ||
217 | } | 247 | } |
218 | 248 | ||
219 | const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) | 249 | const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len) |
@@ -459,6 +489,9 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, | |||
459 | struct cfg80211_internal_bss *bss, *res = NULL; | 489 | struct cfg80211_internal_bss *bss, *res = NULL; |
460 | unsigned long now = jiffies; | 490 | unsigned long now = jiffies; |
461 | 491 | ||
492 | trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask, | ||
493 | capa_val); | ||
494 | |||
462 | spin_lock_bh(&dev->bss_lock); | 495 | spin_lock_bh(&dev->bss_lock); |
463 | 496 | ||
464 | list_for_each_entry(bss, &dev->bss_list, list) { | 497 | list_for_each_entry(bss, &dev->bss_list, list) { |
@@ -480,6 +513,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, | |||
480 | spin_unlock_bh(&dev->bss_lock); | 513 | spin_unlock_bh(&dev->bss_lock); |
481 | if (!res) | 514 | if (!res) |
482 | return NULL; | 515 | return NULL; |
516 | trace_cfg80211_return_bss(&res->pub); | ||
483 | return &res->pub; | 517 | return &res->pub; |
484 | } | 518 | } |
485 | EXPORT_SYMBOL(cfg80211_get_bss); | 519 | EXPORT_SYMBOL(cfg80211_get_bss); |
@@ -737,6 +771,38 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
737 | return found; | 771 | return found; |
738 | } | 772 | } |
739 | 773 | ||
774 | static struct ieee80211_channel * | ||
775 | cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, | ||
776 | struct ieee80211_channel *channel) | ||
777 | { | ||
778 | const u8 *tmp; | ||
779 | u32 freq; | ||
780 | int channel_number = -1; | ||
781 | |||
782 | tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen); | ||
783 | if (tmp && tmp[1] == 1) { | ||
784 | channel_number = tmp[2]; | ||
785 | } else { | ||
786 | tmp = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ie, ielen); | ||
787 | if (tmp && tmp[1] >= sizeof(struct ieee80211_ht_operation)) { | ||
788 | struct ieee80211_ht_operation *htop = (void *)(tmp + 2); | ||
789 | |||
790 | channel_number = htop->primary_chan; | ||
791 | } | ||
792 | } | ||
793 | |||
794 | if (channel_number < 0) | ||
795 | return channel; | ||
796 | |||
797 | freq = ieee80211_channel_to_frequency(channel_number, channel->band); | ||
798 | channel = ieee80211_get_channel(wiphy, freq); | ||
799 | if (!channel) | ||
800 | return NULL; | ||
801 | if (channel->flags & IEEE80211_CHAN_DISABLED) | ||
802 | return NULL; | ||
803 | return channel; | ||
804 | } | ||
805 | |||
740 | struct cfg80211_bss* | 806 | struct cfg80211_bss* |
741 | cfg80211_inform_bss(struct wiphy *wiphy, | 807 | cfg80211_inform_bss(struct wiphy *wiphy, |
742 | struct ieee80211_channel *channel, | 808 | struct ieee80211_channel *channel, |
@@ -756,6 +822,10 @@ cfg80211_inform_bss(struct wiphy *wiphy, | |||
756 | (signal < 0 || signal > 100))) | 822 | (signal < 0 || signal > 100))) |
757 | return NULL; | 823 | return NULL; |
758 | 824 | ||
825 | channel = cfg80211_get_bss_channel(wiphy, ie, ielen, channel); | ||
826 | if (!channel) | ||
827 | return NULL; | ||
828 | |||
759 | res = kzalloc(sizeof(*res) + privsz + ielen, gfp); | 829 | res = kzalloc(sizeof(*res) + privsz + ielen, gfp); |
760 | if (!res) | 830 | if (!res) |
761 | return NULL; | 831 | return NULL; |
@@ -792,6 +862,7 @@ cfg80211_inform_bss(struct wiphy *wiphy, | |||
792 | if (res->pub.capability & WLAN_CAPABILITY_ESS) | 862 | if (res->pub.capability & WLAN_CAPABILITY_ESS) |
793 | regulatory_hint_found_beacon(wiphy, channel, gfp); | 863 | regulatory_hint_found_beacon(wiphy, channel, gfp); |
794 | 864 | ||
865 | trace_cfg80211_return_bss(&res->pub); | ||
795 | /* cfg80211_bss_update gives us a referenced result */ | 866 | /* cfg80211_bss_update gives us a referenced result */ |
796 | return &res->pub; | 867 | return &res->pub; |
797 | } | 868 | } |
@@ -808,6 +879,11 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, | |||
808 | u.probe_resp.variable); | 879 | u.probe_resp.variable); |
809 | size_t privsz; | 880 | size_t privsz; |
810 | 881 | ||
882 | BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) != | ||
883 | offsetof(struct ieee80211_mgmt, u.beacon.variable)); | ||
884 | |||
885 | trace_cfg80211_inform_bss_frame(wiphy, channel, mgmt, len, signal); | ||
886 | |||
811 | if (WARN_ON(!mgmt)) | 887 | if (WARN_ON(!mgmt)) |
812 | return NULL; | 888 | return NULL; |
813 | 889 | ||
@@ -823,6 +899,11 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, | |||
823 | 899 | ||
824 | privsz = wiphy->bss_priv_size; | 900 | privsz = wiphy->bss_priv_size; |
825 | 901 | ||
902 | channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable, | ||
903 | ielen, channel); | ||
904 | if (!channel) | ||
905 | return NULL; | ||
906 | |||
826 | res = kzalloc(sizeof(*res) + privsz + ielen, gfp); | 907 | res = kzalloc(sizeof(*res) + privsz + ielen, gfp); |
827 | if (!res) | 908 | if (!res) |
828 | return NULL; | 909 | return NULL; |
@@ -861,6 +942,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, | |||
861 | if (res->pub.capability & WLAN_CAPABILITY_ESS) | 942 | if (res->pub.capability & WLAN_CAPABILITY_ESS) |
862 | regulatory_hint_found_beacon(wiphy, channel, gfp); | 943 | regulatory_hint_found_beacon(wiphy, channel, gfp); |
863 | 944 | ||
945 | trace_cfg80211_return_bss(&res->pub); | ||
864 | /* cfg80211_bss_update gives us a referenced result */ | 946 | /* cfg80211_bss_update gives us a referenced result */ |
865 | return &res->pub; | 947 | return &res->pub; |
866 | } | 948 | } |
@@ -962,6 +1044,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
962 | creq->ssids = (void *)&creq->channels[n_channels]; | 1044 | creq->ssids = (void *)&creq->channels[n_channels]; |
963 | creq->n_channels = n_channels; | 1045 | creq->n_channels = n_channels; |
964 | creq->n_ssids = 1; | 1046 | creq->n_ssids = 1; |
1047 | creq->scan_start = jiffies; | ||
965 | 1048 | ||
966 | /* translate "Scan on frequencies" request */ | 1049 | /* translate "Scan on frequencies" request */ |
967 | i = 0; | 1050 | i = 0; |
@@ -1026,7 +1109,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
1026 | creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; | 1109 | creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; |
1027 | 1110 | ||
1028 | rdev->scan_req = creq; | 1111 | rdev->scan_req = creq; |
1029 | err = rdev->ops->scan(wiphy, creq); | 1112 | err = rdev_scan(rdev, creq); |
1030 | if (err) { | 1113 | if (err) { |
1031 | rdev->scan_req = NULL; | 1114 | rdev->scan_req = NULL; |
1032 | /* creq will be freed below */ | 1115 | /* creq will be freed below */ |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 6f39cb808302..c7490027237d 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <net/rtnetlink.h> | 16 | #include <net/rtnetlink.h> |
17 | #include "nl80211.h" | 17 | #include "nl80211.h" |
18 | #include "reg.h" | 18 | #include "reg.h" |
19 | #include "rdev-ops.h" | ||
19 | 20 | ||
20 | struct cfg80211_conn { | 21 | struct cfg80211_conn { |
21 | struct cfg80211_connect_params params; | 22 | struct cfg80211_connect_params params; |
@@ -138,10 +139,11 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) | |||
138 | 139 | ||
139 | request->wdev = wdev; | 140 | request->wdev = wdev; |
140 | request->wiphy = &rdev->wiphy; | 141 | request->wiphy = &rdev->wiphy; |
142 | request->scan_start = jiffies; | ||
141 | 143 | ||
142 | rdev->scan_req = request; | 144 | rdev->scan_req = request; |
143 | 145 | ||
144 | err = rdev->ops->scan(wdev->wiphy, request); | 146 | err = rdev_scan(rdev, request); |
145 | if (!err) { | 147 | if (!err) { |
146 | wdev->conn->state = CFG80211_CONN_SCANNING; | 148 | wdev->conn->state = CFG80211_CONN_SCANNING; |
147 | nl80211_send_scan_start(rdev, wdev); | 149 | nl80211_send_scan_start(rdev, wdev); |
@@ -179,7 +181,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) | |||
179 | params->ssid, params->ssid_len, | 181 | params->ssid, params->ssid_len, |
180 | NULL, 0, | 182 | NULL, 0, |
181 | params->key, params->key_len, | 183 | params->key, params->key_len, |
182 | params->key_idx); | 184 | params->key_idx, NULL, 0); |
183 | case CFG80211_CONN_ASSOCIATE_NEXT: | 185 | case CFG80211_CONN_ASSOCIATE_NEXT: |
184 | BUG_ON(!rdev->ops->assoc); | 186 | BUG_ON(!rdev->ops->assoc); |
185 | wdev->conn->state = CFG80211_CONN_ASSOCIATING; | 187 | wdev->conn->state = CFG80211_CONN_ASSOCIATING; |
@@ -716,7 +718,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | |||
716 | */ | 718 | */ |
717 | if (rdev->ops->del_key) | 719 | if (rdev->ops->del_key) |
718 | for (i = 0; i < 6; i++) | 720 | for (i = 0; i < 6; i++) |
719 | rdev->ops->del_key(wdev->wiphy, dev, i, false, NULL); | 721 | rdev_del_key(rdev, dev, i, false, NULL); |
720 | 722 | ||
721 | #ifdef CONFIG_CFG80211_WEXT | 723 | #ifdef CONFIG_CFG80211_WEXT |
722 | memset(&wrqu, 0, sizeof(wrqu)); | 724 | memset(&wrqu, 0, sizeof(wrqu)); |
@@ -892,7 +894,7 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
892 | } else { | 894 | } else { |
893 | wdev->sme_state = CFG80211_SME_CONNECTING; | 895 | wdev->sme_state = CFG80211_SME_CONNECTING; |
894 | wdev->connect_keys = connkeys; | 896 | wdev->connect_keys = connkeys; |
895 | err = rdev->ops->connect(&rdev->wiphy, dev, connect); | 897 | err = rdev_connect(rdev, dev, connect); |
896 | if (err) { | 898 | if (err) { |
897 | wdev->connect_keys = NULL; | 899 | wdev->connect_keys = NULL; |
898 | wdev->sme_state = CFG80211_SME_IDLE; | 900 | wdev->sme_state = CFG80211_SME_IDLE; |
@@ -964,7 +966,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, | |||
964 | if (err) | 966 | if (err) |
965 | return err; | 967 | return err; |
966 | } else { | 968 | } else { |
967 | err = rdev->ops->disconnect(&rdev->wiphy, dev, reason); | 969 | err = rdev_disconnect(rdev, dev, reason); |
968 | if (err) | 970 | if (err) |
969 | return err; | 971 | return err; |
970 | } | 972 | } |
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index ff574597a854..9bf6d5e32166 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <net/cfg80211.h> | 16 | #include <net/cfg80211.h> |
17 | #include "sysfs.h" | 17 | #include "sysfs.h" |
18 | #include "core.h" | 18 | #include "core.h" |
19 | #include "rdev-ops.h" | ||
19 | 20 | ||
20 | static inline struct cfg80211_registered_device *dev_to_rdev( | 21 | static inline struct cfg80211_registered_device *dev_to_rdev( |
21 | struct device *dev) | 22 | struct device *dev) |
@@ -94,7 +95,7 @@ static int wiphy_suspend(struct device *dev, pm_message_t state) | |||
94 | if (rdev->ops->suspend) { | 95 | if (rdev->ops->suspend) { |
95 | rtnl_lock(); | 96 | rtnl_lock(); |
96 | if (rdev->wiphy.registered) | 97 | if (rdev->wiphy.registered) |
97 | ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan); | 98 | ret = rdev_suspend(rdev); |
98 | rtnl_unlock(); | 99 | rtnl_unlock(); |
99 | } | 100 | } |
100 | 101 | ||
@@ -114,7 +115,7 @@ static int wiphy_resume(struct device *dev) | |||
114 | if (rdev->ops->resume) { | 115 | if (rdev->ops->resume) { |
115 | rtnl_lock(); | 116 | rtnl_lock(); |
116 | if (rdev->wiphy.registered) | 117 | if (rdev->wiphy.registered) |
117 | ret = rdev->ops->resume(&rdev->wiphy); | 118 | ret = rdev_resume(rdev); |
118 | rtnl_unlock(); | 119 | rtnl_unlock(); |
119 | } | 120 | } |
120 | 121 | ||
diff --git a/net/wireless/trace.c b/net/wireless/trace.c new file mode 100644 index 000000000000..95f997fad755 --- /dev/null +++ b/net/wireless/trace.c | |||
@@ -0,0 +1,7 @@ | |||
1 | #include <linux/module.h> | ||
2 | |||
3 | #ifndef __CHECKER__ | ||
4 | #define CREATE_TRACE_POINTS | ||
5 | #include "trace.h" | ||
6 | |||
7 | #endif | ||
diff --git a/net/wireless/trace.h b/net/wireless/trace.h new file mode 100644 index 000000000000..2134576f426e --- /dev/null +++ b/net/wireless/trace.h | |||
@@ -0,0 +1,2324 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM cfg80211 | ||
3 | |||
4 | #if !defined(__RDEV_OPS_TRACE) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define __RDEV_OPS_TRACE | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | #include <linux/rtnetlink.h> | ||
10 | #include <net/cfg80211.h> | ||
11 | #include "core.h" | ||
12 | |||
13 | #define MAC_ENTRY(entry_mac) __array(u8, entry_mac, ETH_ALEN) | ||
14 | #define MAC_ASSIGN(entry_mac, given_mac) do { \ | ||
15 | if (given_mac) \ | ||
16 | memcpy(__entry->entry_mac, given_mac, ETH_ALEN); \ | ||
17 | else \ | ||
18 | memset(__entry->entry_mac, 0, ETH_ALEN); \ | ||
19 | } while (0) | ||
20 | #define MAC_PR_FMT "%pM" | ||
21 | #define MAC_PR_ARG(entry_mac) (__entry->entry_mac) | ||
22 | |||
23 | #define MAXNAME 32 | ||
24 | #define WIPHY_ENTRY __array(char, wiphy_name, 32) | ||
25 | #define WIPHY_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(wiphy), MAXNAME) | ||
26 | #define WIPHY_PR_FMT "%s" | ||
27 | #define WIPHY_PR_ARG __entry->wiphy_name | ||
28 | |||
29 | #define WDEV_ENTRY __field(u32, id) | ||
30 | #define WDEV_ASSIGN (__entry->id) = (wdev ? wdev->identifier : 0) | ||
31 | #define WDEV_PR_FMT "wdev(%u)" | ||
32 | #define WDEV_PR_ARG (__entry->id) | ||
33 | |||
34 | #define NETDEV_ENTRY __array(char, name, IFNAMSIZ) \ | ||
35 | __field(int, ifindex) | ||
36 | #define NETDEV_ASSIGN \ | ||
37 | do { \ | ||
38 | memcpy(__entry->name, netdev->name, IFNAMSIZ); \ | ||
39 | (__entry->ifindex) = (netdev->ifindex); \ | ||
40 | } while (0) | ||
41 | #define NETDEV_PR_FMT "netdev:%s(%d)" | ||
42 | #define NETDEV_PR_ARG __entry->name, __entry->ifindex | ||
43 | |||
44 | #define MESH_CFG_ENTRY __field(u16, dot11MeshRetryTimeout) \ | ||
45 | __field(u16, dot11MeshConfirmTimeout) \ | ||
46 | __field(u16, dot11MeshHoldingTimeout) \ | ||
47 | __field(u16, dot11MeshMaxPeerLinks) \ | ||
48 | __field(u8, dot11MeshMaxRetries) \ | ||
49 | __field(u8, dot11MeshTTL) \ | ||
50 | __field(u8, element_ttl) \ | ||
51 | __field(bool, auto_open_plinks) \ | ||
52 | __field(u32, dot11MeshNbrOffsetMaxNeighbor) \ | ||
53 | __field(u8, dot11MeshHWMPmaxPREQretries) \ | ||
54 | __field(u32, path_refresh_time) \ | ||
55 | __field(u32, dot11MeshHWMPactivePathTimeout) \ | ||
56 | __field(u16, min_discovery_timeout) \ | ||
57 | __field(u16, dot11MeshHWMPpreqMinInterval) \ | ||
58 | __field(u16, dot11MeshHWMPperrMinInterval) \ | ||
59 | __field(u16, dot11MeshHWMPnetDiameterTraversalTime) \ | ||
60 | __field(u8, dot11MeshHWMPRootMode) \ | ||
61 | __field(u16, dot11MeshHWMPRannInterval) \ | ||
62 | __field(bool, dot11MeshGateAnnouncementProtocol) \ | ||
63 | __field(bool, dot11MeshForwarding) \ | ||
64 | __field(s32, rssi_threshold) \ | ||
65 | __field(u16, ht_opmode) \ | ||
66 | __field(u32, dot11MeshHWMPactivePathToRootTimeout) \ | ||
67 | __field(u16, dot11MeshHWMProotInterval) \ | ||
68 | __field(u16, dot11MeshHWMPconfirmationInterval) | ||
69 | #define MESH_CFG_ASSIGN \ | ||
70 | do { \ | ||
71 | __entry->dot11MeshRetryTimeout = conf->dot11MeshRetryTimeout; \ | ||
72 | __entry->dot11MeshConfirmTimeout = \ | ||
73 | conf->dot11MeshConfirmTimeout; \ | ||
74 | __entry->dot11MeshHoldingTimeout = \ | ||
75 | conf->dot11MeshHoldingTimeout; \ | ||
76 | __entry->dot11MeshMaxPeerLinks = conf->dot11MeshMaxPeerLinks; \ | ||
77 | __entry->dot11MeshMaxRetries = conf->dot11MeshMaxRetries; \ | ||
78 | __entry->dot11MeshTTL = conf->dot11MeshTTL; \ | ||
79 | __entry->element_ttl = conf->element_ttl; \ | ||
80 | __entry->auto_open_plinks = conf->auto_open_plinks; \ | ||
81 | __entry->dot11MeshNbrOffsetMaxNeighbor = \ | ||
82 | conf->dot11MeshNbrOffsetMaxNeighbor; \ | ||
83 | __entry->dot11MeshHWMPmaxPREQretries = \ | ||
84 | conf->dot11MeshHWMPmaxPREQretries; \ | ||
85 | __entry->path_refresh_time = conf->path_refresh_time; \ | ||
86 | __entry->dot11MeshHWMPactivePathTimeout = \ | ||
87 | conf->dot11MeshHWMPactivePathTimeout; \ | ||
88 | __entry->min_discovery_timeout = conf->min_discovery_timeout; \ | ||
89 | __entry->dot11MeshHWMPpreqMinInterval = \ | ||
90 | conf->dot11MeshHWMPpreqMinInterval; \ | ||
91 | __entry->dot11MeshHWMPperrMinInterval = \ | ||
92 | conf->dot11MeshHWMPperrMinInterval; \ | ||
93 | __entry->dot11MeshHWMPnetDiameterTraversalTime = \ | ||
94 | conf->dot11MeshHWMPnetDiameterTraversalTime; \ | ||
95 | __entry->dot11MeshHWMPRootMode = conf->dot11MeshHWMPRootMode; \ | ||
96 | __entry->dot11MeshHWMPRannInterval = \ | ||
97 | conf->dot11MeshHWMPRannInterval; \ | ||
98 | __entry->dot11MeshGateAnnouncementProtocol = \ | ||
99 | conf->dot11MeshGateAnnouncementProtocol; \ | ||
100 | __entry->dot11MeshForwarding = conf->dot11MeshForwarding; \ | ||
101 | __entry->rssi_threshold = conf->rssi_threshold; \ | ||
102 | __entry->ht_opmode = conf->ht_opmode; \ | ||
103 | __entry->dot11MeshHWMPactivePathToRootTimeout = \ | ||
104 | conf->dot11MeshHWMPactivePathToRootTimeout; \ | ||
105 | __entry->dot11MeshHWMProotInterval = \ | ||
106 | conf->dot11MeshHWMProotInterval; \ | ||
107 | __entry->dot11MeshHWMPconfirmationInterval = \ | ||
108 | conf->dot11MeshHWMPconfirmationInterval; \ | ||
109 | } while (0) | ||
110 | |||
111 | #define CHAN_ENTRY __field(enum ieee80211_band, band) \ | ||
112 | __field(u16, center_freq) | ||
113 | #define CHAN_ASSIGN(chan) \ | ||
114 | do { \ | ||
115 | if (chan) { \ | ||
116 | __entry->band = chan->band; \ | ||
117 | __entry->center_freq = chan->center_freq; \ | ||
118 | } else { \ | ||
119 | __entry->band = 0; \ | ||
120 | __entry->center_freq = 0; \ | ||
121 | } \ | ||
122 | } while (0) | ||
123 | #define CHAN_PR_FMT "band: %d, freq: %u" | ||
124 | #define CHAN_PR_ARG __entry->band, __entry->center_freq | ||
125 | |||
126 | #define CHAN_DEF_ENTRY __field(enum ieee80211_band, band) \ | ||
127 | __field(u32, control_freq) \ | ||
128 | __field(u32, width) \ | ||
129 | __field(u32, center_freq1) \ | ||
130 | __field(u32, center_freq2) | ||
131 | #define CHAN_DEF_ASSIGN(chandef) \ | ||
132 | do { \ | ||
133 | if ((chandef) && (chandef)->chan) { \ | ||
134 | __entry->band = (chandef)->chan->band; \ | ||
135 | __entry->control_freq = \ | ||
136 | (chandef)->chan->center_freq; \ | ||
137 | __entry->width = (chandef)->width; \ | ||
138 | __entry->center_freq1 = (chandef)->center_freq1;\ | ||
139 | __entry->center_freq2 = (chandef)->center_freq2;\ | ||
140 | } else { \ | ||
141 | __entry->band = 0; \ | ||
142 | __entry->control_freq = 0; \ | ||
143 | __entry->width = 0; \ | ||
144 | __entry->center_freq1 = 0; \ | ||
145 | __entry->center_freq2 = 0; \ | ||
146 | } \ | ||
147 | } while (0) | ||
148 | #define CHAN_DEF_PR_FMT \ | ||
149 | "band: %d, control freq: %u, width: %d, cf1: %u, cf2: %u" | ||
150 | #define CHAN_DEF_PR_ARG __entry->band, __entry->control_freq, \ | ||
151 | __entry->width, __entry->center_freq1, \ | ||
152 | __entry->center_freq2 | ||
153 | |||
154 | #define SINFO_ENTRY __field(int, generation) \ | ||
155 | __field(u32, connected_time) \ | ||
156 | __field(u32, inactive_time) \ | ||
157 | __field(u32, rx_bytes) \ | ||
158 | __field(u32, tx_bytes) \ | ||
159 | __field(u32, rx_packets) \ | ||
160 | __field(u32, tx_packets) \ | ||
161 | __field(u32, tx_retries) \ | ||
162 | __field(u32, tx_failed) \ | ||
163 | __field(u32, rx_dropped_misc) \ | ||
164 | __field(u32, beacon_loss_count) \ | ||
165 | __field(u16, llid) \ | ||
166 | __field(u16, plid) \ | ||
167 | __field(u8, plink_state) | ||
168 | #define SINFO_ASSIGN \ | ||
169 | do { \ | ||
170 | __entry->generation = sinfo->generation; \ | ||
171 | __entry->connected_time = sinfo->connected_time; \ | ||
172 | __entry->inactive_time = sinfo->inactive_time; \ | ||
173 | __entry->rx_bytes = sinfo->rx_bytes; \ | ||
174 | __entry->tx_bytes = sinfo->tx_bytes; \ | ||
175 | __entry->rx_packets = sinfo->rx_packets; \ | ||
176 | __entry->tx_packets = sinfo->tx_packets; \ | ||
177 | __entry->tx_retries = sinfo->tx_retries; \ | ||
178 | __entry->tx_failed = sinfo->tx_failed; \ | ||
179 | __entry->rx_dropped_misc = sinfo->rx_dropped_misc; \ | ||
180 | __entry->beacon_loss_count = sinfo->beacon_loss_count; \ | ||
181 | __entry->llid = sinfo->llid; \ | ||
182 | __entry->plid = sinfo->plid; \ | ||
183 | __entry->plink_state = sinfo->plink_state; \ | ||
184 | } while (0) | ||
185 | |||
186 | #define BOOL_TO_STR(bo) (bo) ? "true" : "false" | ||
187 | |||
188 | /************************************************************* | ||
189 | * rdev->ops traces * | ||
190 | *************************************************************/ | ||
191 | |||
192 | TRACE_EVENT(rdev_suspend, | ||
193 | TP_PROTO(struct wiphy *wiphy, struct cfg80211_wowlan *wow), | ||
194 | TP_ARGS(wiphy, wow), | ||
195 | TP_STRUCT__entry( | ||
196 | WIPHY_ENTRY | ||
197 | __field(bool, any) | ||
198 | __field(bool, disconnect) | ||
199 | __field(bool, magic_pkt) | ||
200 | __field(bool, gtk_rekey_failure) | ||
201 | __field(bool, eap_identity_req) | ||
202 | __field(bool, four_way_handshake) | ||
203 | __field(bool, rfkill_release) | ||
204 | __field(bool, valid_wow) | ||
205 | ), | ||
206 | TP_fast_assign( | ||
207 | WIPHY_ASSIGN; | ||
208 | if (wow) { | ||
209 | __entry->any = wow->any; | ||
210 | __entry->disconnect = wow->disconnect; | ||
211 | __entry->magic_pkt = wow->magic_pkt; | ||
212 | __entry->gtk_rekey_failure = wow->gtk_rekey_failure; | ||
213 | __entry->eap_identity_req = wow->eap_identity_req; | ||
214 | __entry->four_way_handshake = wow->four_way_handshake; | ||
215 | __entry->rfkill_release = wow->rfkill_release; | ||
216 | __entry->valid_wow = true; | ||
217 | } else { | ||
218 | __entry->valid_wow = false; | ||
219 | } | ||
220 | ), | ||
221 | TP_printk(WIPHY_PR_FMT ", wow%s - any: %d, disconnect: %d, " | ||
222 | "magic pkt: %d, gtk rekey failure: %d, eap identify req: %d, " | ||
223 | "four way handshake: %d, rfkill release: %d.", | ||
224 | WIPHY_PR_ARG, __entry->valid_wow ? "" : "(Not configured!)", | ||
225 | __entry->any, __entry->disconnect, __entry->magic_pkt, | ||
226 | __entry->gtk_rekey_failure, __entry->eap_identity_req, | ||
227 | __entry->four_way_handshake, __entry->rfkill_release) | ||
228 | ); | ||
229 | |||
230 | TRACE_EVENT(rdev_return_int, | ||
231 | TP_PROTO(struct wiphy *wiphy, int ret), | ||
232 | TP_ARGS(wiphy, ret), | ||
233 | TP_STRUCT__entry( | ||
234 | WIPHY_ENTRY | ||
235 | __field(int, ret) | ||
236 | ), | ||
237 | TP_fast_assign( | ||
238 | WIPHY_ASSIGN; | ||
239 | __entry->ret = ret; | ||
240 | ), | ||
241 | TP_printk(WIPHY_PR_FMT ", returned: %d", WIPHY_PR_ARG, __entry->ret) | ||
242 | ); | ||
243 | |||
244 | TRACE_EVENT(rdev_scan, | ||
245 | TP_PROTO(struct wiphy *wiphy, struct cfg80211_scan_request *request), | ||
246 | TP_ARGS(wiphy, request), | ||
247 | TP_STRUCT__entry( | ||
248 | WIPHY_ENTRY | ||
249 | ), | ||
250 | TP_fast_assign( | ||
251 | WIPHY_ASSIGN; | ||
252 | ), | ||
253 | TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) | ||
254 | ); | ||
255 | |||
256 | DECLARE_EVENT_CLASS(wiphy_only_evt, | ||
257 | TP_PROTO(struct wiphy *wiphy), | ||
258 | TP_ARGS(wiphy), | ||
259 | TP_STRUCT__entry( | ||
260 | WIPHY_ENTRY | ||
261 | ), | ||
262 | TP_fast_assign( | ||
263 | WIPHY_ASSIGN; | ||
264 | ), | ||
265 | TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) | ||
266 | ); | ||
267 | |||
268 | DEFINE_EVENT(wiphy_only_evt, rdev_resume, | ||
269 | TP_PROTO(struct wiphy *wiphy), | ||
270 | TP_ARGS(wiphy) | ||
271 | ); | ||
272 | |||
273 | DEFINE_EVENT(wiphy_only_evt, rdev_return_void, | ||
274 | TP_PROTO(struct wiphy *wiphy), | ||
275 | TP_ARGS(wiphy) | ||
276 | ); | ||
277 | |||
278 | DEFINE_EVENT(wiphy_only_evt, rdev_get_ringparam, | ||
279 | TP_PROTO(struct wiphy *wiphy), | ||
280 | TP_ARGS(wiphy) | ||
281 | ); | ||
282 | |||
283 | DEFINE_EVENT(wiphy_only_evt, rdev_get_antenna, | ||
284 | TP_PROTO(struct wiphy *wiphy), | ||
285 | TP_ARGS(wiphy) | ||
286 | ); | ||
287 | |||
288 | DEFINE_EVENT(wiphy_only_evt, rdev_rfkill_poll, | ||
289 | TP_PROTO(struct wiphy *wiphy), | ||
290 | TP_ARGS(wiphy) | ||
291 | ); | ||
292 | |||
293 | DECLARE_EVENT_CLASS(wiphy_enabled_evt, | ||
294 | TP_PROTO(struct wiphy *wiphy, bool enabled), | ||
295 | TP_ARGS(wiphy, enabled), | ||
296 | TP_STRUCT__entry( | ||
297 | WIPHY_ENTRY | ||
298 | __field(bool, enabled) | ||
299 | ), | ||
300 | TP_fast_assign( | ||
301 | WIPHY_ASSIGN; | ||
302 | __entry->enabled = enabled; | ||
303 | ), | ||
304 | TP_printk(WIPHY_PR_FMT ", %senabled ", | ||
305 | WIPHY_PR_ARG, __entry->enabled ? "" : "not ") | ||
306 | ); | ||
307 | |||
308 | DEFINE_EVENT(wiphy_enabled_evt, rdev_set_wakeup, | ||
309 | TP_PROTO(struct wiphy *wiphy, bool enabled), | ||
310 | TP_ARGS(wiphy, enabled) | ||
311 | ); | ||
312 | |||
313 | TRACE_EVENT(rdev_add_virtual_intf, | ||
314 | TP_PROTO(struct wiphy *wiphy, char *name, enum nl80211_iftype type), | ||
315 | TP_ARGS(wiphy, name, type), | ||
316 | TP_STRUCT__entry( | ||
317 | WIPHY_ENTRY | ||
318 | __string(vir_intf_name, name ? name : "<noname>") | ||
319 | __field(enum nl80211_iftype, type) | ||
320 | ), | ||
321 | TP_fast_assign( | ||
322 | WIPHY_ASSIGN; | ||
323 | __assign_str(vir_intf_name, name ? name : "<noname>"); | ||
324 | __entry->type = type; | ||
325 | ), | ||
326 | TP_printk(WIPHY_PR_FMT ", virtual intf name: %s, type: %d", | ||
327 | WIPHY_PR_ARG, __get_str(vir_intf_name), __entry->type) | ||
328 | ); | ||
329 | |||
330 | DECLARE_EVENT_CLASS(wiphy_wdev_evt, | ||
331 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), | ||
332 | TP_ARGS(wiphy, wdev), | ||
333 | TP_STRUCT__entry( | ||
334 | WIPHY_ENTRY | ||
335 | WDEV_ENTRY | ||
336 | ), | ||
337 | TP_fast_assign( | ||
338 | WIPHY_ASSIGN; | ||
339 | WDEV_ASSIGN; | ||
340 | ), | ||
341 | TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) | ||
342 | ); | ||
343 | |||
344 | DEFINE_EVENT(wiphy_wdev_evt, rdev_return_wdev, | ||
345 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), | ||
346 | TP_ARGS(wiphy, wdev) | ||
347 | ); | ||
348 | |||
349 | DEFINE_EVENT(wiphy_wdev_evt, rdev_del_virtual_intf, | ||
350 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), | ||
351 | TP_ARGS(wiphy, wdev) | ||
352 | ); | ||
353 | |||
354 | TRACE_EVENT(rdev_change_virtual_intf, | ||
355 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
356 | enum nl80211_iftype type), | ||
357 | TP_ARGS(wiphy, netdev, type), | ||
358 | TP_STRUCT__entry( | ||
359 | WIPHY_ENTRY | ||
360 | NETDEV_ENTRY | ||
361 | __field(enum nl80211_iftype, type) | ||
362 | ), | ||
363 | TP_fast_assign( | ||
364 | WIPHY_ASSIGN; | ||
365 | NETDEV_ASSIGN; | ||
366 | __entry->type = type; | ||
367 | ), | ||
368 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", type: %d", | ||
369 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->type) | ||
370 | ); | ||
371 | |||
372 | DECLARE_EVENT_CLASS(key_handle, | ||
373 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, | ||
374 | bool pairwise, const u8 *mac_addr), | ||
375 | TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr), | ||
376 | TP_STRUCT__entry( | ||
377 | WIPHY_ENTRY | ||
378 | NETDEV_ENTRY | ||
379 | MAC_ENTRY(mac_addr) | ||
380 | __field(u8, key_index) | ||
381 | __field(bool, pairwise) | ||
382 | ), | ||
383 | TP_fast_assign( | ||
384 | WIPHY_ASSIGN; | ||
385 | NETDEV_ASSIGN; | ||
386 | MAC_ASSIGN(mac_addr, mac_addr); | ||
387 | __entry->key_index = key_index; | ||
388 | __entry->pairwise = pairwise; | ||
389 | ), | ||
390 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key_index: %u, pairwise: %s, mac addr: " MAC_PR_FMT, | ||
391 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index, | ||
392 | BOOL_TO_STR(__entry->pairwise), MAC_PR_ARG(mac_addr)) | ||
393 | ); | ||
394 | |||
395 | DEFINE_EVENT(key_handle, rdev_add_key, | ||
396 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, | ||
397 | bool pairwise, const u8 *mac_addr), | ||
398 | TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr) | ||
399 | ); | ||
400 | |||
401 | DEFINE_EVENT(key_handle, rdev_get_key, | ||
402 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, | ||
403 | bool pairwise, const u8 *mac_addr), | ||
404 | TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr) | ||
405 | ); | ||
406 | |||
407 | DEFINE_EVENT(key_handle, rdev_del_key, | ||
408 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, | ||
409 | bool pairwise, const u8 *mac_addr), | ||
410 | TP_ARGS(wiphy, netdev, key_index, pairwise, mac_addr) | ||
411 | ); | ||
412 | |||
413 | TRACE_EVENT(rdev_set_default_key, | ||
414 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, | ||
415 | bool unicast, bool multicast), | ||
416 | TP_ARGS(wiphy, netdev, key_index, unicast, multicast), | ||
417 | TP_STRUCT__entry( | ||
418 | WIPHY_ENTRY | ||
419 | NETDEV_ENTRY | ||
420 | __field(u8, key_index) | ||
421 | __field(bool, unicast) | ||
422 | __field(bool, multicast) | ||
423 | ), | ||
424 | TP_fast_assign( | ||
425 | WIPHY_ASSIGN; | ||
426 | NETDEV_ASSIGN; | ||
427 | __entry->key_index = key_index; | ||
428 | __entry->unicast = unicast; | ||
429 | __entry->multicast = multicast; | ||
430 | ), | ||
431 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u, unicast: %s, multicast: %s", | ||
432 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index, | ||
433 | BOOL_TO_STR(__entry->unicast), | ||
434 | BOOL_TO_STR(__entry->multicast)) | ||
435 | ); | ||
436 | |||
437 | TRACE_EVENT(rdev_set_default_mgmt_key, | ||
438 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index), | ||
439 | TP_ARGS(wiphy, netdev, key_index), | ||
440 | TP_STRUCT__entry( | ||
441 | WIPHY_ENTRY | ||
442 | NETDEV_ENTRY | ||
443 | __field(u8, key_index) | ||
444 | ), | ||
445 | TP_fast_assign( | ||
446 | WIPHY_ASSIGN; | ||
447 | NETDEV_ASSIGN; | ||
448 | __entry->key_index = key_index; | ||
449 | ), | ||
450 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u", | ||
451 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index) | ||
452 | ); | ||
453 | |||
454 | TRACE_EVENT(rdev_start_ap, | ||
455 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
456 | struct cfg80211_ap_settings *settings), | ||
457 | TP_ARGS(wiphy, netdev, settings), | ||
458 | TP_STRUCT__entry( | ||
459 | WIPHY_ENTRY | ||
460 | NETDEV_ENTRY | ||
461 | CHAN_DEF_ENTRY | ||
462 | __field(int, beacon_interval) | ||
463 | __field(int, dtim_period) | ||
464 | __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1) | ||
465 | __field(enum nl80211_hidden_ssid, hidden_ssid) | ||
466 | __field(u32, wpa_ver) | ||
467 | __field(bool, privacy) | ||
468 | __field(enum nl80211_auth_type, auth_type) | ||
469 | __field(int, inactivity_timeout) | ||
470 | ), | ||
471 | TP_fast_assign( | ||
472 | WIPHY_ASSIGN; | ||
473 | NETDEV_ASSIGN; | ||
474 | CHAN_DEF_ASSIGN(&settings->chandef); | ||
475 | __entry->beacon_interval = settings->beacon_interval; | ||
476 | __entry->dtim_period = settings->dtim_period; | ||
477 | __entry->hidden_ssid = settings->hidden_ssid; | ||
478 | __entry->wpa_ver = settings->crypto.wpa_versions; | ||
479 | __entry->privacy = settings->privacy; | ||
480 | __entry->auth_type = settings->auth_type; | ||
481 | __entry->inactivity_timeout = settings->inactivity_timeout; | ||
482 | memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); | ||
483 | memcpy(__entry->ssid, settings->ssid, settings->ssid_len); | ||
484 | ), | ||
485 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", AP settings - ssid: %s, " | ||
486 | CHAN_DEF_PR_FMT ", beacon interval: %d, dtim period: %d, " | ||
487 | "hidden ssid: %d, wpa versions: %u, privacy: %s, " | ||
488 | "auth type: %d, inactivity timeout: %d", | ||
489 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ssid, CHAN_DEF_PR_ARG, | ||
490 | __entry->beacon_interval, __entry->dtim_period, | ||
491 | __entry->hidden_ssid, __entry->wpa_ver, | ||
492 | BOOL_TO_STR(__entry->privacy), __entry->auth_type, | ||
493 | __entry->inactivity_timeout) | ||
494 | ); | ||
495 | |||
496 | TRACE_EVENT(rdev_change_beacon, | ||
497 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
498 | struct cfg80211_beacon_data *info), | ||
499 | TP_ARGS(wiphy, netdev, info), | ||
500 | TP_STRUCT__entry( | ||
501 | WIPHY_ENTRY | ||
502 | NETDEV_ENTRY | ||
503 | __dynamic_array(u8, head, info ? info->head_len : 0) | ||
504 | __dynamic_array(u8, tail, info ? info->tail_len : 0) | ||
505 | __dynamic_array(u8, beacon_ies, info ? info->beacon_ies_len : 0) | ||
506 | __dynamic_array(u8, proberesp_ies, | ||
507 | info ? info->proberesp_ies_len : 0) | ||
508 | __dynamic_array(u8, assocresp_ies, | ||
509 | info ? info->assocresp_ies_len : 0) | ||
510 | __dynamic_array(u8, probe_resp, info ? info->probe_resp_len : 0) | ||
511 | ), | ||
512 | TP_fast_assign( | ||
513 | WIPHY_ASSIGN; | ||
514 | NETDEV_ASSIGN; | ||
515 | if (info) { | ||
516 | if (info->head) | ||
517 | memcpy(__get_dynamic_array(head), info->head, | ||
518 | info->head_len); | ||
519 | if (info->tail) | ||
520 | memcpy(__get_dynamic_array(tail), info->tail, | ||
521 | info->tail_len); | ||
522 | if (info->beacon_ies) | ||
523 | memcpy(__get_dynamic_array(beacon_ies), | ||
524 | info->beacon_ies, info->beacon_ies_len); | ||
525 | if (info->proberesp_ies) | ||
526 | memcpy(__get_dynamic_array(proberesp_ies), | ||
527 | info->proberesp_ies, | ||
528 | info->proberesp_ies_len); | ||
529 | if (info->assocresp_ies) | ||
530 | memcpy(__get_dynamic_array(assocresp_ies), | ||
531 | info->assocresp_ies, | ||
532 | info->assocresp_ies_len); | ||
533 | if (info->probe_resp) | ||
534 | memcpy(__get_dynamic_array(probe_resp), | ||
535 | info->probe_resp, info->probe_resp_len); | ||
536 | } | ||
537 | ), | ||
538 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG) | ||
539 | ); | ||
540 | |||
541 | DECLARE_EVENT_CLASS(wiphy_netdev_evt, | ||
542 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), | ||
543 | TP_ARGS(wiphy, netdev), | ||
544 | TP_STRUCT__entry( | ||
545 | WIPHY_ENTRY | ||
546 | NETDEV_ENTRY | ||
547 | ), | ||
548 | TP_fast_assign( | ||
549 | WIPHY_ASSIGN; | ||
550 | NETDEV_ASSIGN; | ||
551 | ), | ||
552 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG) | ||
553 | ); | ||
554 | |||
555 | DEFINE_EVENT(wiphy_netdev_evt, rdev_stop_ap, | ||
556 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), | ||
557 | TP_ARGS(wiphy, netdev) | ||
558 | ); | ||
559 | |||
560 | DEFINE_EVENT(wiphy_netdev_evt, rdev_get_et_stats, | ||
561 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), | ||
562 | TP_ARGS(wiphy, netdev) | ||
563 | ); | ||
564 | |||
565 | DEFINE_EVENT(wiphy_netdev_evt, rdev_sched_scan_stop, | ||
566 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), | ||
567 | TP_ARGS(wiphy, netdev) | ||
568 | ); | ||
569 | |||
570 | DEFINE_EVENT(wiphy_netdev_evt, rdev_set_rekey_data, | ||
571 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), | ||
572 | TP_ARGS(wiphy, netdev) | ||
573 | ); | ||
574 | |||
575 | DEFINE_EVENT(wiphy_netdev_evt, rdev_get_mesh_config, | ||
576 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), | ||
577 | TP_ARGS(wiphy, netdev) | ||
578 | ); | ||
579 | |||
580 | DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_mesh, | ||
581 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), | ||
582 | TP_ARGS(wiphy, netdev) | ||
583 | ); | ||
584 | |||
585 | DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ibss, | ||
586 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), | ||
587 | TP_ARGS(wiphy, netdev) | ||
588 | ); | ||
589 | |||
590 | DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa, | ||
591 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), | ||
592 | TP_ARGS(wiphy, netdev) | ||
593 | ); | ||
594 | |||
595 | DECLARE_EVENT_CLASS(station_add_change, | ||
596 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, | ||
597 | struct station_parameters *params), | ||
598 | TP_ARGS(wiphy, netdev, mac, params), | ||
599 | TP_STRUCT__entry( | ||
600 | WIPHY_ENTRY | ||
601 | NETDEV_ENTRY | ||
602 | MAC_ENTRY(sta_mac) | ||
603 | __field(u32, sta_flags_mask) | ||
604 | __field(u32, sta_flags_set) | ||
605 | __field(u32, sta_modify_mask) | ||
606 | __field(int, listen_interval) | ||
607 | __field(u16, aid) | ||
608 | __field(u8, plink_action) | ||
609 | __field(u8, plink_state) | ||
610 | __field(u8, uapsd_queues) | ||
611 | __array(u8, ht_capa, (int)sizeof(struct ieee80211_ht_cap)) | ||
612 | ), | ||
613 | TP_fast_assign( | ||
614 | WIPHY_ASSIGN; | ||
615 | NETDEV_ASSIGN; | ||
616 | MAC_ASSIGN(sta_mac, mac); | ||
617 | __entry->sta_flags_mask = params->sta_flags_mask; | ||
618 | __entry->sta_flags_set = params->sta_flags_set; | ||
619 | __entry->sta_modify_mask = params->sta_modify_mask; | ||
620 | __entry->listen_interval = params->listen_interval; | ||
621 | __entry->aid = params->aid; | ||
622 | __entry->plink_action = params->plink_action; | ||
623 | __entry->plink_state = params->plink_state; | ||
624 | __entry->uapsd_queues = params->uapsd_queues; | ||
625 | memset(__entry->ht_capa, 0, sizeof(struct ieee80211_ht_cap)); | ||
626 | if (params->ht_capa) | ||
627 | memcpy(__entry->ht_capa, params->ht_capa, | ||
628 | sizeof(struct ieee80211_ht_cap)); | ||
629 | ), | ||
630 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT | ||
631 | ", station flags mask: %u, station flags set: %u, " | ||
632 | "station modify mask: %u, listen interval: %d, aid: %u, " | ||
633 | "plink action: %u, plink state: %u, uapsd queues: %u", | ||
634 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac), | ||
635 | __entry->sta_flags_mask, __entry->sta_flags_set, | ||
636 | __entry->sta_modify_mask, __entry->listen_interval, | ||
637 | __entry->aid, __entry->plink_action, __entry->plink_state, | ||
638 | __entry->uapsd_queues) | ||
639 | ); | ||
640 | |||
641 | DEFINE_EVENT(station_add_change, rdev_add_station, | ||
642 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, | ||
643 | struct station_parameters *params), | ||
644 | TP_ARGS(wiphy, netdev, mac, params) | ||
645 | ); | ||
646 | |||
647 | DEFINE_EVENT(station_add_change, rdev_change_station, | ||
648 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, | ||
649 | struct station_parameters *params), | ||
650 | TP_ARGS(wiphy, netdev, mac, params) | ||
651 | ); | ||
652 | |||
653 | DECLARE_EVENT_CLASS(wiphy_netdev_mac_evt, | ||
654 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), | ||
655 | TP_ARGS(wiphy, netdev, mac), | ||
656 | TP_STRUCT__entry( | ||
657 | WIPHY_ENTRY | ||
658 | NETDEV_ENTRY | ||
659 | MAC_ENTRY(sta_mac) | ||
660 | ), | ||
661 | TP_fast_assign( | ||
662 | WIPHY_ASSIGN; | ||
663 | NETDEV_ASSIGN; | ||
664 | MAC_ASSIGN(sta_mac, mac); | ||
665 | ), | ||
666 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac: " MAC_PR_FMT, | ||
667 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac)) | ||
668 | ); | ||
669 | |||
670 | DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_station, | ||
671 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), | ||
672 | TP_ARGS(wiphy, netdev, mac) | ||
673 | ); | ||
674 | |||
675 | DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_get_station, | ||
676 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), | ||
677 | TP_ARGS(wiphy, netdev, mac) | ||
678 | ); | ||
679 | |||
680 | DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_mpath, | ||
681 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), | ||
682 | TP_ARGS(wiphy, netdev, mac) | ||
683 | ); | ||
684 | |||
685 | DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_set_wds_peer, | ||
686 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), | ||
687 | TP_ARGS(wiphy, netdev, mac) | ||
688 | ); | ||
689 | |||
690 | TRACE_EVENT(rdev_dump_station, | ||
691 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx, | ||
692 | u8 *mac), | ||
693 | TP_ARGS(wiphy, netdev, idx, mac), | ||
694 | TP_STRUCT__entry( | ||
695 | WIPHY_ENTRY | ||
696 | NETDEV_ENTRY | ||
697 | MAC_ENTRY(sta_mac) | ||
698 | __field(int, idx) | ||
699 | ), | ||
700 | TP_fast_assign( | ||
701 | WIPHY_ASSIGN; | ||
702 | NETDEV_ASSIGN; | ||
703 | MAC_ASSIGN(sta_mac, mac); | ||
704 | __entry->idx = idx; | ||
705 | ), | ||
706 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT ", idx: %d", | ||
707 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac), | ||
708 | __entry->idx) | ||
709 | ); | ||
710 | |||
711 | TRACE_EVENT(rdev_return_int_station_info, | ||
712 | TP_PROTO(struct wiphy *wiphy, int ret, struct station_info *sinfo), | ||
713 | TP_ARGS(wiphy, ret, sinfo), | ||
714 | TP_STRUCT__entry( | ||
715 | WIPHY_ENTRY | ||
716 | __field(int, ret) | ||
717 | SINFO_ENTRY | ||
718 | ), | ||
719 | TP_fast_assign( | ||
720 | WIPHY_ASSIGN; | ||
721 | __entry->ret = ret; | ||
722 | SINFO_ASSIGN; | ||
723 | ), | ||
724 | TP_printk(WIPHY_PR_FMT ", returned %d" , | ||
725 | WIPHY_PR_ARG, __entry->ret) | ||
726 | ); | ||
727 | |||
728 | DECLARE_EVENT_CLASS(mpath_evt, | ||
729 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, | ||
730 | u8 *next_hop), | ||
731 | TP_ARGS(wiphy, netdev, dst, next_hop), | ||
732 | TP_STRUCT__entry( | ||
733 | WIPHY_ENTRY | ||
734 | NETDEV_ENTRY | ||
735 | MAC_ENTRY(dst) | ||
736 | MAC_ENTRY(next_hop) | ||
737 | ), | ||
738 | TP_fast_assign( | ||
739 | WIPHY_ASSIGN; | ||
740 | NETDEV_ASSIGN; | ||
741 | MAC_ASSIGN(dst, dst); | ||
742 | MAC_ASSIGN(next_hop, next_hop); | ||
743 | ), | ||
744 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: " MAC_PR_FMT ", next hop: " MAC_PR_FMT, | ||
745 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dst), | ||
746 | MAC_PR_ARG(next_hop)) | ||
747 | ); | ||
748 | |||
749 | DEFINE_EVENT(mpath_evt, rdev_add_mpath, | ||
750 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, | ||
751 | u8 *next_hop), | ||
752 | TP_ARGS(wiphy, netdev, dst, next_hop) | ||
753 | ); | ||
754 | |||
755 | DEFINE_EVENT(mpath_evt, rdev_change_mpath, | ||
756 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, | ||
757 | u8 *next_hop), | ||
758 | TP_ARGS(wiphy, netdev, dst, next_hop) | ||
759 | ); | ||
760 | |||
761 | DEFINE_EVENT(mpath_evt, rdev_get_mpath, | ||
762 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, | ||
763 | u8 *next_hop), | ||
764 | TP_ARGS(wiphy, netdev, dst, next_hop) | ||
765 | ); | ||
766 | |||
767 | TRACE_EVENT(rdev_dump_mpath, | ||
768 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx, | ||
769 | u8 *dst, u8 *next_hop), | ||
770 | TP_ARGS(wiphy, netdev, idx, dst, next_hop), | ||
771 | TP_STRUCT__entry( | ||
772 | WIPHY_ENTRY | ||
773 | NETDEV_ENTRY | ||
774 | MAC_ENTRY(dst) | ||
775 | MAC_ENTRY(next_hop) | ||
776 | __field(int, idx) | ||
777 | ), | ||
778 | TP_fast_assign( | ||
779 | WIPHY_ASSIGN; | ||
780 | NETDEV_ASSIGN; | ||
781 | MAC_ASSIGN(dst, dst); | ||
782 | MAC_ASSIGN(next_hop, next_hop); | ||
783 | __entry->idx = idx; | ||
784 | ), | ||
785 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: " | ||
786 | MAC_PR_FMT ", next hop: " MAC_PR_FMT, | ||
787 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, MAC_PR_ARG(dst), | ||
788 | MAC_PR_ARG(next_hop)) | ||
789 | ); | ||
790 | |||
791 | TRACE_EVENT(rdev_return_int_mpath_info, | ||
792 | TP_PROTO(struct wiphy *wiphy, int ret, struct mpath_info *pinfo), | ||
793 | TP_ARGS(wiphy, ret, pinfo), | ||
794 | TP_STRUCT__entry( | ||
795 | WIPHY_ENTRY | ||
796 | __field(int, ret) | ||
797 | __field(int, generation) | ||
798 | __field(u32, filled) | ||
799 | __field(u32, frame_qlen) | ||
800 | __field(u32, sn) | ||
801 | __field(u32, metric) | ||
802 | __field(u32, exptime) | ||
803 | __field(u32, discovery_timeout) | ||
804 | __field(u8, discovery_retries) | ||
805 | __field(u8, flags) | ||
806 | ), | ||
807 | TP_fast_assign( | ||
808 | WIPHY_ASSIGN; | ||
809 | __entry->ret = ret; | ||
810 | __entry->generation = pinfo->generation; | ||
811 | __entry->filled = pinfo->filled; | ||
812 | __entry->frame_qlen = pinfo->frame_qlen; | ||
813 | __entry->sn = pinfo->sn; | ||
814 | __entry->metric = pinfo->metric; | ||
815 | __entry->exptime = pinfo->exptime; | ||
816 | __entry->discovery_timeout = pinfo->discovery_timeout; | ||
817 | __entry->discovery_retries = pinfo->discovery_retries; | ||
818 | __entry->flags = pinfo->flags; | ||
819 | ), | ||
820 | TP_printk(WIPHY_PR_FMT ", returned %d. mpath info - generation: %d, " | ||
821 | "filled: %u, frame qlen: %u, sn: %u, metric: %u, exptime: %u," | ||
822 | " discovery timeout: %u, discovery retries: %u, flags: %u", | ||
823 | WIPHY_PR_ARG, __entry->ret, __entry->generation, | ||
824 | __entry->filled, __entry->frame_qlen, __entry->sn, | ||
825 | __entry->metric, __entry->exptime, __entry->discovery_timeout, | ||
826 | __entry->discovery_retries, __entry->flags) | ||
827 | ); | ||
828 | |||
829 | TRACE_EVENT(rdev_return_int_mesh_config, | ||
830 | TP_PROTO(struct wiphy *wiphy, int ret, struct mesh_config *conf), | ||
831 | TP_ARGS(wiphy, ret, conf), | ||
832 | TP_STRUCT__entry( | ||
833 | WIPHY_ENTRY | ||
834 | MESH_CFG_ENTRY | ||
835 | __field(int, ret) | ||
836 | ), | ||
837 | TP_fast_assign( | ||
838 | WIPHY_ASSIGN; | ||
839 | MESH_CFG_ASSIGN; | ||
840 | __entry->ret = ret; | ||
841 | ), | ||
842 | TP_printk(WIPHY_PR_FMT ", returned: %d", | ||
843 | WIPHY_PR_ARG, __entry->ret) | ||
844 | ); | ||
845 | |||
846 | TRACE_EVENT(rdev_update_mesh_config, | ||
847 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 mask, | ||
848 | const struct mesh_config *conf), | ||
849 | TP_ARGS(wiphy, netdev, mask, conf), | ||
850 | TP_STRUCT__entry( | ||
851 | WIPHY_ENTRY | ||
852 | NETDEV_ENTRY | ||
853 | MESH_CFG_ENTRY | ||
854 | __field(u32, mask) | ||
855 | ), | ||
856 | TP_fast_assign( | ||
857 | WIPHY_ASSIGN; | ||
858 | NETDEV_ASSIGN; | ||
859 | MESH_CFG_ASSIGN; | ||
860 | __entry->mask = mask; | ||
861 | ), | ||
862 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mask: %u", | ||
863 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mask) | ||
864 | ); | ||
865 | |||
866 | TRACE_EVENT(rdev_join_mesh, | ||
867 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
868 | const struct mesh_config *conf, | ||
869 | const struct mesh_setup *setup), | ||
870 | TP_ARGS(wiphy, netdev, conf, setup), | ||
871 | TP_STRUCT__entry( | ||
872 | WIPHY_ENTRY | ||
873 | NETDEV_ENTRY | ||
874 | MESH_CFG_ENTRY | ||
875 | ), | ||
876 | TP_fast_assign( | ||
877 | WIPHY_ASSIGN; | ||
878 | NETDEV_ASSIGN; | ||
879 | MESH_CFG_ASSIGN; | ||
880 | ), | ||
881 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, | ||
882 | WIPHY_PR_ARG, NETDEV_PR_ARG) | ||
883 | ); | ||
884 | |||
885 | TRACE_EVENT(rdev_change_bss, | ||
886 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
887 | struct bss_parameters *params), | ||
888 | TP_ARGS(wiphy, netdev, params), | ||
889 | TP_STRUCT__entry( | ||
890 | WIPHY_ENTRY | ||
891 | NETDEV_ENTRY | ||
892 | __field(int, use_cts_prot) | ||
893 | __field(int, use_short_preamble) | ||
894 | __field(int, use_short_slot_time) | ||
895 | __field(int, ap_isolate) | ||
896 | __field(int, ht_opmode) | ||
897 | ), | ||
898 | TP_fast_assign( | ||
899 | WIPHY_ASSIGN; | ||
900 | NETDEV_ASSIGN; | ||
901 | __entry->use_cts_prot = params->use_cts_prot; | ||
902 | __entry->use_short_preamble = params->use_short_preamble; | ||
903 | __entry->use_short_slot_time = params->use_short_slot_time; | ||
904 | __entry->ap_isolate = params->ap_isolate; | ||
905 | __entry->ht_opmode = params->ht_opmode; | ||
906 | ), | ||
907 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", use cts prot: %d, " | ||
908 | "use short preamble: %d, use short slot time: %d, " | ||
909 | "ap isolate: %d, ht opmode: %d", | ||
910 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->use_cts_prot, | ||
911 | __entry->use_short_preamble, __entry->use_short_slot_time, | ||
912 | __entry->ap_isolate, __entry->ht_opmode) | ||
913 | ); | ||
914 | |||
915 | TRACE_EVENT(rdev_set_txq_params, | ||
916 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
917 | struct ieee80211_txq_params *params), | ||
918 | TP_ARGS(wiphy, netdev, params), | ||
919 | TP_STRUCT__entry( | ||
920 | WIPHY_ENTRY | ||
921 | NETDEV_ENTRY | ||
922 | __field(enum nl80211_ac, ac) | ||
923 | __field(u16, txop) | ||
924 | __field(u16, cwmin) | ||
925 | __field(u16, cwmax) | ||
926 | __field(u8, aifs) | ||
927 | ), | ||
928 | TP_fast_assign( | ||
929 | WIPHY_ASSIGN; | ||
930 | NETDEV_ASSIGN; | ||
931 | __entry->ac = params->ac; | ||
932 | __entry->txop = params->txop; | ||
933 | __entry->cwmin = params->cwmin; | ||
934 | __entry->cwmax = params->cwmax; | ||
935 | __entry->aifs = params->aifs; | ||
936 | ), | ||
937 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", ac: %d, txop: %u, cwmin: %u, cwmax: %u, aifs: %u", | ||
938 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ac, __entry->txop, | ||
939 | __entry->cwmin, __entry->cwmax, __entry->aifs) | ||
940 | ); | ||
941 | |||
942 | TRACE_EVENT(rdev_libertas_set_mesh_channel, | ||
943 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
944 | struct ieee80211_channel *chan), | ||
945 | TP_ARGS(wiphy, netdev, chan), | ||
946 | TP_STRUCT__entry( | ||
947 | WIPHY_ENTRY | ||
948 | NETDEV_ENTRY | ||
949 | CHAN_ENTRY | ||
950 | ), | ||
951 | TP_fast_assign( | ||
952 | WIPHY_ASSIGN; | ||
953 | NETDEV_ASSIGN; | ||
954 | CHAN_ASSIGN(chan); | ||
955 | ), | ||
956 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_PR_FMT, WIPHY_PR_ARG, | ||
957 | NETDEV_PR_ARG, CHAN_PR_ARG) | ||
958 | ); | ||
959 | |||
960 | TRACE_EVENT(rdev_set_monitor_channel, | ||
961 | TP_PROTO(struct wiphy *wiphy, | ||
962 | struct cfg80211_chan_def *chandef), | ||
963 | TP_ARGS(wiphy, chandef), | ||
964 | TP_STRUCT__entry( | ||
965 | WIPHY_ENTRY | ||
966 | CHAN_DEF_ENTRY | ||
967 | ), | ||
968 | TP_fast_assign( | ||
969 | WIPHY_ASSIGN; | ||
970 | CHAN_DEF_ASSIGN(chandef); | ||
971 | ), | ||
972 | TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, | ||
973 | WIPHY_PR_ARG, CHAN_DEF_PR_ARG) | ||
974 | ); | ||
975 | |||
976 | TRACE_EVENT(rdev_auth, | ||
977 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
978 | struct cfg80211_auth_request *req), | ||
979 | TP_ARGS(wiphy, netdev, req), | ||
980 | TP_STRUCT__entry( | ||
981 | WIPHY_ENTRY | ||
982 | NETDEV_ENTRY | ||
983 | MAC_ENTRY(bssid) | ||
984 | __field(enum nl80211_auth_type, auth_type) | ||
985 | ), | ||
986 | TP_fast_assign( | ||
987 | WIPHY_ASSIGN; | ||
988 | NETDEV_ASSIGN; | ||
989 | if (req->bss) | ||
990 | MAC_ASSIGN(bssid, req->bss->bssid); | ||
991 | else | ||
992 | memset(__entry->bssid, 0, ETH_ALEN); | ||
993 | __entry->auth_type = req->auth_type; | ||
994 | ), | ||
995 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: " MAC_PR_FMT, | ||
996 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->auth_type, | ||
997 | MAC_PR_ARG(bssid)) | ||
998 | ); | ||
999 | |||
1000 | TRACE_EVENT(rdev_assoc, | ||
1001 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1002 | struct cfg80211_assoc_request *req), | ||
1003 | TP_ARGS(wiphy, netdev, req), | ||
1004 | TP_STRUCT__entry( | ||
1005 | WIPHY_ENTRY | ||
1006 | NETDEV_ENTRY | ||
1007 | MAC_ENTRY(bssid) | ||
1008 | MAC_ENTRY(prev_bssid) | ||
1009 | __field(bool, use_mfp) | ||
1010 | __field(u32, flags) | ||
1011 | ), | ||
1012 | TP_fast_assign( | ||
1013 | WIPHY_ASSIGN; | ||
1014 | NETDEV_ASSIGN; | ||
1015 | if (req->bss) | ||
1016 | MAC_ASSIGN(bssid, req->bss->bssid); | ||
1017 | else | ||
1018 | memset(__entry->bssid, 0, ETH_ALEN); | ||
1019 | MAC_ASSIGN(prev_bssid, req->prev_bssid); | ||
1020 | __entry->use_mfp = req->use_mfp; | ||
1021 | __entry->flags = req->flags; | ||
1022 | ), | ||
1023 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT | ||
1024 | ", previous bssid: " MAC_PR_FMT ", use mfp: %s, flags: %u", | ||
1025 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), | ||
1026 | MAC_PR_ARG(prev_bssid), BOOL_TO_STR(__entry->use_mfp), | ||
1027 | __entry->flags) | ||
1028 | ); | ||
1029 | |||
1030 | TRACE_EVENT(rdev_deauth, | ||
1031 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1032 | struct cfg80211_deauth_request *req), | ||
1033 | TP_ARGS(wiphy, netdev, req), | ||
1034 | TP_STRUCT__entry( | ||
1035 | WIPHY_ENTRY | ||
1036 | NETDEV_ENTRY | ||
1037 | MAC_ENTRY(bssid) | ||
1038 | __field(u16, reason_code) | ||
1039 | ), | ||
1040 | TP_fast_assign( | ||
1041 | WIPHY_ASSIGN; | ||
1042 | NETDEV_ASSIGN; | ||
1043 | MAC_ASSIGN(bssid, req->bssid); | ||
1044 | __entry->reason_code = req->reason_code; | ||
1045 | ), | ||
1046 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", reason: %u", | ||
1047 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), | ||
1048 | __entry->reason_code) | ||
1049 | ); | ||
1050 | |||
1051 | TRACE_EVENT(rdev_disassoc, | ||
1052 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1053 | struct cfg80211_disassoc_request *req), | ||
1054 | TP_ARGS(wiphy, netdev, req), | ||
1055 | TP_STRUCT__entry( | ||
1056 | WIPHY_ENTRY | ||
1057 | NETDEV_ENTRY | ||
1058 | MAC_ENTRY(bssid) | ||
1059 | __field(u16, reason_code) | ||
1060 | __field(bool, local_state_change) | ||
1061 | ), | ||
1062 | TP_fast_assign( | ||
1063 | WIPHY_ASSIGN; | ||
1064 | NETDEV_ASSIGN; | ||
1065 | if (req->bss) | ||
1066 | MAC_ASSIGN(bssid, req->bss->bssid); | ||
1067 | else | ||
1068 | memset(__entry->bssid, 0, ETH_ALEN); | ||
1069 | __entry->reason_code = req->reason_code; | ||
1070 | __entry->local_state_change = req->local_state_change; | ||
1071 | ), | ||
1072 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT | ||
1073 | ", reason: %u, local state change: %s", | ||
1074 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), | ||
1075 | __entry->reason_code, | ||
1076 | BOOL_TO_STR(__entry->local_state_change)) | ||
1077 | ); | ||
1078 | |||
1079 | TRACE_EVENT(rdev_mgmt_tx_cancel_wait, | ||
1080 | TP_PROTO(struct wiphy *wiphy, | ||
1081 | struct wireless_dev *wdev, u64 cookie), | ||
1082 | TP_ARGS(wiphy, wdev, cookie), | ||
1083 | TP_STRUCT__entry( | ||
1084 | WIPHY_ENTRY | ||
1085 | WDEV_ENTRY | ||
1086 | __field(u64, cookie) | ||
1087 | ), | ||
1088 | TP_fast_assign( | ||
1089 | WIPHY_ASSIGN; | ||
1090 | WDEV_ASSIGN; | ||
1091 | __entry->cookie = cookie; | ||
1092 | ), | ||
1093 | TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %llu ", | ||
1094 | WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie) | ||
1095 | ); | ||
1096 | |||
1097 | TRACE_EVENT(rdev_set_power_mgmt, | ||
1098 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1099 | bool enabled, int timeout), | ||
1100 | TP_ARGS(wiphy, netdev, enabled, timeout), | ||
1101 | TP_STRUCT__entry( | ||
1102 | WIPHY_ENTRY | ||
1103 | NETDEV_ENTRY | ||
1104 | __field(bool, enabled) | ||
1105 | __field(int, timeout) | ||
1106 | ), | ||
1107 | TP_fast_assign( | ||
1108 | WIPHY_ASSIGN; | ||
1109 | NETDEV_ASSIGN; | ||
1110 | __entry->enabled = enabled; | ||
1111 | __entry->timeout = timeout; | ||
1112 | ), | ||
1113 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %senabled, timeout: %d ", | ||
1114 | WIPHY_PR_ARG, NETDEV_PR_ARG, | ||
1115 | __entry->enabled ? "" : "not ", __entry->timeout) | ||
1116 | ); | ||
1117 | |||
1118 | TRACE_EVENT(rdev_connect, | ||
1119 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1120 | struct cfg80211_connect_params *sme), | ||
1121 | TP_ARGS(wiphy, netdev, sme), | ||
1122 | TP_STRUCT__entry( | ||
1123 | WIPHY_ENTRY | ||
1124 | NETDEV_ENTRY | ||
1125 | MAC_ENTRY(bssid) | ||
1126 | __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1) | ||
1127 | __field(enum nl80211_auth_type, auth_type) | ||
1128 | __field(bool, privacy) | ||
1129 | __field(u32, wpa_versions) | ||
1130 | __field(u32, flags) | ||
1131 | ), | ||
1132 | TP_fast_assign( | ||
1133 | WIPHY_ASSIGN; | ||
1134 | NETDEV_ASSIGN; | ||
1135 | MAC_ASSIGN(bssid, sme->bssid); | ||
1136 | memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); | ||
1137 | memcpy(__entry->ssid, sme->ssid, sme->ssid_len); | ||
1138 | __entry->auth_type = sme->auth_type; | ||
1139 | __entry->privacy = sme->privacy; | ||
1140 | __entry->wpa_versions = sme->crypto.wpa_versions; | ||
1141 | __entry->flags = sme->flags; | ||
1142 | ), | ||
1143 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT | ||
1144 | ", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, " | ||
1145 | "flags: %u", | ||
1146 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid, | ||
1147 | __entry->auth_type, BOOL_TO_STR(__entry->privacy), | ||
1148 | __entry->wpa_versions, __entry->flags) | ||
1149 | ); | ||
1150 | |||
1151 | TRACE_EVENT(rdev_set_cqm_rssi_config, | ||
1152 | TP_PROTO(struct wiphy *wiphy, | ||
1153 | struct net_device *netdev, s32 rssi_thold, | ||
1154 | u32 rssi_hyst), | ||
1155 | TP_ARGS(wiphy, netdev, rssi_thold, rssi_hyst), | ||
1156 | TP_STRUCT__entry( | ||
1157 | WIPHY_ENTRY | ||
1158 | NETDEV_ENTRY | ||
1159 | __field(s32, rssi_thold) | ||
1160 | __field(u32, rssi_hyst) | ||
1161 | ), | ||
1162 | TP_fast_assign( | ||
1163 | WIPHY_ASSIGN; | ||
1164 | NETDEV_ASSIGN; | ||
1165 | __entry->rssi_thold = rssi_thold; | ||
1166 | __entry->rssi_hyst = rssi_hyst; | ||
1167 | ), | ||
1168 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT | ||
1169 | ", rssi_thold: %d, rssi_hyst: %u ", | ||
1170 | WIPHY_PR_ARG, NETDEV_PR_ARG, | ||
1171 | __entry->rssi_thold, __entry->rssi_hyst) | ||
1172 | ); | ||
1173 | |||
1174 | TRACE_EVENT(rdev_set_cqm_txe_config, | ||
1175 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 rate, | ||
1176 | u32 pkts, u32 intvl), | ||
1177 | TP_ARGS(wiphy, netdev, rate, pkts, intvl), | ||
1178 | TP_STRUCT__entry( | ||
1179 | WIPHY_ENTRY | ||
1180 | NETDEV_ENTRY | ||
1181 | __field(u32, rate) | ||
1182 | __field(u32, pkts) | ||
1183 | __field(u32, intvl) | ||
1184 | ), | ||
1185 | TP_fast_assign( | ||
1186 | WIPHY_ASSIGN; | ||
1187 | NETDEV_ASSIGN; | ||
1188 | __entry->rate = rate; | ||
1189 | __entry->pkts = pkts; | ||
1190 | __entry->intvl = intvl; | ||
1191 | ), | ||
1192 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", rate: %u, packets: %u, interval: %u", | ||
1193 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->rate, __entry->pkts, | ||
1194 | __entry->intvl) | ||
1195 | ); | ||
1196 | |||
1197 | TRACE_EVENT(rdev_disconnect, | ||
1198 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1199 | u16 reason_code), | ||
1200 | TP_ARGS(wiphy, netdev, reason_code), | ||
1201 | TP_STRUCT__entry( | ||
1202 | WIPHY_ENTRY | ||
1203 | NETDEV_ENTRY | ||
1204 | __field(u16, reason_code) | ||
1205 | ), | ||
1206 | TP_fast_assign( | ||
1207 | WIPHY_ASSIGN; | ||
1208 | NETDEV_ASSIGN; | ||
1209 | __entry->reason_code = reason_code; | ||
1210 | ), | ||
1211 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", reason code: %u", WIPHY_PR_ARG, | ||
1212 | NETDEV_PR_ARG, __entry->reason_code) | ||
1213 | ); | ||
1214 | |||
1215 | TRACE_EVENT(rdev_join_ibss, | ||
1216 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1217 | struct cfg80211_ibss_params *params), | ||
1218 | TP_ARGS(wiphy, netdev, params), | ||
1219 | TP_STRUCT__entry( | ||
1220 | WIPHY_ENTRY | ||
1221 | NETDEV_ENTRY | ||
1222 | MAC_ENTRY(bssid) | ||
1223 | __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1) | ||
1224 | ), | ||
1225 | TP_fast_assign( | ||
1226 | WIPHY_ASSIGN; | ||
1227 | NETDEV_ASSIGN; | ||
1228 | MAC_ASSIGN(bssid, params->bssid); | ||
1229 | memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); | ||
1230 | memcpy(__entry->ssid, params->ssid, params->ssid_len); | ||
1231 | ), | ||
1232 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", ssid: %s", | ||
1233 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid) | ||
1234 | ); | ||
1235 | |||
1236 | TRACE_EVENT(rdev_set_wiphy_params, | ||
1237 | TP_PROTO(struct wiphy *wiphy, u32 changed), | ||
1238 | TP_ARGS(wiphy, changed), | ||
1239 | TP_STRUCT__entry( | ||
1240 | WIPHY_ENTRY | ||
1241 | __field(u32, changed) | ||
1242 | ), | ||
1243 | TP_fast_assign( | ||
1244 | WIPHY_ASSIGN; | ||
1245 | __entry->changed = changed; | ||
1246 | ), | ||
1247 | TP_printk(WIPHY_PR_FMT ", changed: %u", | ||
1248 | WIPHY_PR_ARG, __entry->changed) | ||
1249 | ); | ||
1250 | |||
1251 | DEFINE_EVENT(wiphy_wdev_evt, rdev_get_tx_power, | ||
1252 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), | ||
1253 | TP_ARGS(wiphy, wdev) | ||
1254 | ); | ||
1255 | |||
1256 | TRACE_EVENT(rdev_set_tx_power, | ||
1257 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, | ||
1258 | enum nl80211_tx_power_setting type, int mbm), | ||
1259 | TP_ARGS(wiphy, wdev, type, mbm), | ||
1260 | TP_STRUCT__entry( | ||
1261 | WIPHY_ENTRY | ||
1262 | WDEV_ENTRY | ||
1263 | __field(enum nl80211_tx_power_setting, type) | ||
1264 | __field(int, mbm) | ||
1265 | ), | ||
1266 | TP_fast_assign( | ||
1267 | WIPHY_ASSIGN; | ||
1268 | WDEV_ASSIGN; | ||
1269 | __entry->type = type; | ||
1270 | __entry->mbm = mbm; | ||
1271 | ), | ||
1272 | TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type: %u, mbm: %d", | ||
1273 | WIPHY_PR_ARG, WDEV_PR_ARG,__entry->type, __entry->mbm) | ||
1274 | ); | ||
1275 | |||
1276 | TRACE_EVENT(rdev_return_int_int, | ||
1277 | TP_PROTO(struct wiphy *wiphy, int func_ret, int func_fill), | ||
1278 | TP_ARGS(wiphy, func_ret, func_fill), | ||
1279 | TP_STRUCT__entry( | ||
1280 | WIPHY_ENTRY | ||
1281 | __field(int, func_ret) | ||
1282 | __field(int, func_fill) | ||
1283 | ), | ||
1284 | TP_fast_assign( | ||
1285 | WIPHY_ASSIGN; | ||
1286 | __entry->func_ret = func_ret; | ||
1287 | __entry->func_fill = func_fill; | ||
1288 | ), | ||
1289 | TP_printk(WIPHY_PR_FMT ", function returns: %d, function filled: %d", | ||
1290 | WIPHY_PR_ARG, __entry->func_ret, __entry->func_fill) | ||
1291 | ); | ||
1292 | |||
1293 | #ifdef CONFIG_NL80211_TESTMODE | ||
1294 | TRACE_EVENT(rdev_testmode_cmd, | ||
1295 | TP_PROTO(struct wiphy *wiphy), | ||
1296 | TP_ARGS(wiphy), | ||
1297 | TP_STRUCT__entry( | ||
1298 | WIPHY_ENTRY | ||
1299 | ), | ||
1300 | TP_fast_assign( | ||
1301 | WIPHY_ASSIGN; | ||
1302 | ), | ||
1303 | TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) | ||
1304 | ); | ||
1305 | |||
1306 | TRACE_EVENT(rdev_testmode_dump, | ||
1307 | TP_PROTO(struct wiphy *wiphy), | ||
1308 | TP_ARGS(wiphy), | ||
1309 | TP_STRUCT__entry( | ||
1310 | WIPHY_ENTRY | ||
1311 | ), | ||
1312 | TP_fast_assign( | ||
1313 | WIPHY_ASSIGN; | ||
1314 | ), | ||
1315 | TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) | ||
1316 | ); | ||
1317 | #endif /* CONFIG_NL80211_TESTMODE */ | ||
1318 | |||
1319 | TRACE_EVENT(rdev_set_bitrate_mask, | ||
1320 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1321 | const u8 *peer, const struct cfg80211_bitrate_mask *mask), | ||
1322 | TP_ARGS(wiphy, netdev, peer, mask), | ||
1323 | TP_STRUCT__entry( | ||
1324 | WIPHY_ENTRY | ||
1325 | NETDEV_ENTRY | ||
1326 | MAC_ENTRY(peer) | ||
1327 | ), | ||
1328 | TP_fast_assign( | ||
1329 | WIPHY_ASSIGN; | ||
1330 | NETDEV_ASSIGN; | ||
1331 | MAC_ASSIGN(peer, peer); | ||
1332 | ), | ||
1333 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT, | ||
1334 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer)) | ||
1335 | ); | ||
1336 | |||
1337 | TRACE_EVENT(rdev_mgmt_frame_register, | ||
1338 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, | ||
1339 | u16 frame_type, bool reg), | ||
1340 | TP_ARGS(wiphy, wdev, frame_type, reg), | ||
1341 | TP_STRUCT__entry( | ||
1342 | WIPHY_ENTRY | ||
1343 | WDEV_ENTRY | ||
1344 | __field(u16, frame_type) | ||
1345 | __field(bool, reg) | ||
1346 | ), | ||
1347 | TP_fast_assign( | ||
1348 | WIPHY_ASSIGN; | ||
1349 | WDEV_ASSIGN; | ||
1350 | __entry->frame_type = frame_type; | ||
1351 | __entry->reg = reg; | ||
1352 | ), | ||
1353 | TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", frame_type: 0x%.2x, reg: %s ", | ||
1354 | WIPHY_PR_ARG, WDEV_PR_ARG, __entry->frame_type, | ||
1355 | __entry->reg ? "true" : "false") | ||
1356 | ); | ||
1357 | |||
1358 | TRACE_EVENT(rdev_return_int_tx_rx, | ||
1359 | TP_PROTO(struct wiphy *wiphy, int ret, u32 tx, u32 rx), | ||
1360 | TP_ARGS(wiphy, ret, tx, rx), | ||
1361 | TP_STRUCT__entry( | ||
1362 | WIPHY_ENTRY | ||
1363 | __field(int, ret) | ||
1364 | __field(u32, tx) | ||
1365 | __field(u32, rx) | ||
1366 | ), | ||
1367 | TP_fast_assign( | ||
1368 | WIPHY_ASSIGN; | ||
1369 | __entry->ret = ret; | ||
1370 | __entry->tx = tx; | ||
1371 | __entry->rx = rx; | ||
1372 | ), | ||
1373 | TP_printk(WIPHY_PR_FMT ", returned %d, tx: %u, rx: %u", | ||
1374 | WIPHY_PR_ARG, __entry->ret, __entry->tx, __entry->rx) | ||
1375 | ); | ||
1376 | |||
1377 | TRACE_EVENT(rdev_return_void_tx_rx, | ||
1378 | TP_PROTO(struct wiphy *wiphy, u32 tx, u32 tx_max, | ||
1379 | u32 rx, u32 rx_max), | ||
1380 | TP_ARGS(wiphy, tx, tx_max, rx, rx_max), | ||
1381 | TP_STRUCT__entry( | ||
1382 | WIPHY_ENTRY | ||
1383 | __field(u32, tx) | ||
1384 | __field(u32, tx_max) | ||
1385 | __field(u32, rx) | ||
1386 | __field(u32, rx_max) | ||
1387 | ), | ||
1388 | TP_fast_assign( | ||
1389 | WIPHY_ASSIGN; | ||
1390 | __entry->tx = tx; | ||
1391 | __entry->tx_max = tx_max; | ||
1392 | __entry->rx = rx; | ||
1393 | __entry->rx_max = rx_max; | ||
1394 | ), | ||
1395 | TP_printk(WIPHY_PR_FMT ", tx: %u, tx_max: %u, rx: %u, rx_max: %u ", | ||
1396 | WIPHY_PR_ARG, __entry->tx, __entry->tx_max, __entry->rx, | ||
1397 | __entry->rx_max) | ||
1398 | ); | ||
1399 | |||
1400 | DECLARE_EVENT_CLASS(tx_rx_evt, | ||
1401 | TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx), | ||
1402 | TP_ARGS(wiphy, rx, tx), | ||
1403 | TP_STRUCT__entry( | ||
1404 | WIPHY_ENTRY | ||
1405 | __field(u32, tx) | ||
1406 | __field(u32, rx) | ||
1407 | ), | ||
1408 | TP_fast_assign( | ||
1409 | WIPHY_ASSIGN; | ||
1410 | __entry->tx = tx; | ||
1411 | __entry->rx = rx; | ||
1412 | ), | ||
1413 | TP_printk(WIPHY_PR_FMT ", tx: %u, rx: %u ", | ||
1414 | WIPHY_PR_ARG, __entry->tx, __entry->rx) | ||
1415 | ); | ||
1416 | |||
1417 | DEFINE_EVENT(tx_rx_evt, rdev_set_ringparam, | ||
1418 | TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx), | ||
1419 | TP_ARGS(wiphy, rx, tx) | ||
1420 | ); | ||
1421 | |||
1422 | DEFINE_EVENT(tx_rx_evt, rdev_set_antenna, | ||
1423 | TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx), | ||
1424 | TP_ARGS(wiphy, rx, tx) | ||
1425 | ); | ||
1426 | |||
1427 | TRACE_EVENT(rdev_sched_scan_start, | ||
1428 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1429 | struct cfg80211_sched_scan_request *request), | ||
1430 | TP_ARGS(wiphy, netdev, request), | ||
1431 | TP_STRUCT__entry( | ||
1432 | WIPHY_ENTRY | ||
1433 | NETDEV_ENTRY | ||
1434 | ), | ||
1435 | TP_fast_assign( | ||
1436 | WIPHY_ASSIGN; | ||
1437 | NETDEV_ASSIGN; | ||
1438 | ), | ||
1439 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, | ||
1440 | WIPHY_PR_ARG, NETDEV_PR_ARG) | ||
1441 | ); | ||
1442 | |||
1443 | TRACE_EVENT(rdev_tdls_mgmt, | ||
1444 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1445 | u8 *peer, u8 action_code, u8 dialog_token, | ||
1446 | u16 status_code, const u8 *buf, size_t len), | ||
1447 | TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code, | ||
1448 | buf, len), | ||
1449 | TP_STRUCT__entry( | ||
1450 | WIPHY_ENTRY | ||
1451 | NETDEV_ENTRY | ||
1452 | MAC_ENTRY(peer) | ||
1453 | __field(u8, action_code) | ||
1454 | __field(u8, dialog_token) | ||
1455 | __field(u16, status_code) | ||
1456 | __dynamic_array(u8, buf, len) | ||
1457 | ), | ||
1458 | TP_fast_assign( | ||
1459 | WIPHY_ASSIGN; | ||
1460 | NETDEV_ASSIGN; | ||
1461 | MAC_ASSIGN(peer, peer); | ||
1462 | __entry->action_code = action_code; | ||
1463 | __entry->dialog_token = dialog_token; | ||
1464 | __entry->status_code = status_code; | ||
1465 | memcpy(__get_dynamic_array(buf), buf, len); | ||
1466 | ), | ||
1467 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", action_code: %u, " | ||
1468 | "dialog_token: %u, status_code: %u, buf: %#.2x ", | ||
1469 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), | ||
1470 | __entry->action_code, __entry->dialog_token, | ||
1471 | __entry->status_code, ((u8 *)__get_dynamic_array(buf))[0]) | ||
1472 | ); | ||
1473 | |||
1474 | TRACE_EVENT(rdev_dump_survey, | ||
1475 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx), | ||
1476 | TP_ARGS(wiphy, netdev, idx), | ||
1477 | TP_STRUCT__entry( | ||
1478 | WIPHY_ENTRY | ||
1479 | NETDEV_ENTRY | ||
1480 | __field(int, idx) | ||
1481 | ), | ||
1482 | TP_fast_assign( | ||
1483 | WIPHY_ASSIGN; | ||
1484 | NETDEV_ASSIGN; | ||
1485 | __entry->idx = idx; | ||
1486 | ), | ||
1487 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d", | ||
1488 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx) | ||
1489 | ); | ||
1490 | |||
1491 | TRACE_EVENT(rdev_return_int_survey_info, | ||
1492 | TP_PROTO(struct wiphy *wiphy, int ret, struct survey_info *info), | ||
1493 | TP_ARGS(wiphy, ret, info), | ||
1494 | TP_STRUCT__entry( | ||
1495 | WIPHY_ENTRY | ||
1496 | CHAN_ENTRY | ||
1497 | __field(int, ret) | ||
1498 | __field(u64, channel_time) | ||
1499 | __field(u64, channel_time_busy) | ||
1500 | __field(u64, channel_time_ext_busy) | ||
1501 | __field(u64, channel_time_rx) | ||
1502 | __field(u64, channel_time_tx) | ||
1503 | __field(u32, filled) | ||
1504 | __field(s8, noise) | ||
1505 | ), | ||
1506 | TP_fast_assign( | ||
1507 | WIPHY_ASSIGN; | ||
1508 | CHAN_ASSIGN(info->channel); | ||
1509 | __entry->ret = ret; | ||
1510 | __entry->channel_time = info->channel_time; | ||
1511 | __entry->channel_time_busy = info->channel_time_busy; | ||
1512 | __entry->channel_time_ext_busy = info->channel_time_ext_busy; | ||
1513 | __entry->channel_time_rx = info->channel_time_rx; | ||
1514 | __entry->channel_time_tx = info->channel_time_tx; | ||
1515 | __entry->filled = info->filled; | ||
1516 | __entry->noise = info->noise; | ||
1517 | ), | ||
1518 | TP_printk(WIPHY_PR_FMT ", returned: %d, " CHAN_PR_FMT | ||
1519 | ", channel time: %llu, channel time busy: %llu, " | ||
1520 | "channel time extension busy: %llu, channel time rx: %llu, " | ||
1521 | "channel time tx: %llu, filled: %u, noise: %d", | ||
1522 | WIPHY_PR_ARG, __entry->ret, CHAN_PR_ARG, | ||
1523 | __entry->channel_time, __entry->channel_time_busy, | ||
1524 | __entry->channel_time_ext_busy, __entry->channel_time_rx, | ||
1525 | __entry->channel_time_tx, __entry->filled, __entry->noise) | ||
1526 | ); | ||
1527 | |||
1528 | TRACE_EVENT(rdev_tdls_oper, | ||
1529 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1530 | u8 *peer, enum nl80211_tdls_operation oper), | ||
1531 | TP_ARGS(wiphy, netdev, peer, oper), | ||
1532 | TP_STRUCT__entry( | ||
1533 | WIPHY_ENTRY | ||
1534 | NETDEV_ENTRY | ||
1535 | MAC_ENTRY(peer) | ||
1536 | __field(enum nl80211_tdls_operation, oper) | ||
1537 | ), | ||
1538 | TP_fast_assign( | ||
1539 | WIPHY_ASSIGN; | ||
1540 | NETDEV_ASSIGN; | ||
1541 | MAC_ASSIGN(peer, peer); | ||
1542 | __entry->oper = oper; | ||
1543 | ), | ||
1544 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", oper: %d", | ||
1545 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper) | ||
1546 | ); | ||
1547 | |||
1548 | DECLARE_EVENT_CLASS(rdev_pmksa, | ||
1549 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1550 | struct cfg80211_pmksa *pmksa), | ||
1551 | TP_ARGS(wiphy, netdev, pmksa), | ||
1552 | TP_STRUCT__entry( | ||
1553 | WIPHY_ENTRY | ||
1554 | NETDEV_ENTRY | ||
1555 | MAC_ENTRY(bssid) | ||
1556 | ), | ||
1557 | TP_fast_assign( | ||
1558 | WIPHY_ASSIGN; | ||
1559 | NETDEV_ASSIGN; | ||
1560 | MAC_ASSIGN(bssid, pmksa->bssid); | ||
1561 | ), | ||
1562 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT, | ||
1563 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid)) | ||
1564 | ); | ||
1565 | |||
1566 | TRACE_EVENT(rdev_probe_client, | ||
1567 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1568 | const u8 *peer), | ||
1569 | TP_ARGS(wiphy, netdev, peer), | ||
1570 | TP_STRUCT__entry( | ||
1571 | WIPHY_ENTRY | ||
1572 | NETDEV_ENTRY | ||
1573 | MAC_ENTRY(peer) | ||
1574 | ), | ||
1575 | TP_fast_assign( | ||
1576 | WIPHY_ASSIGN; | ||
1577 | NETDEV_ASSIGN; | ||
1578 | MAC_ASSIGN(peer, peer); | ||
1579 | ), | ||
1580 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT, | ||
1581 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer)) | ||
1582 | ); | ||
1583 | |||
1584 | DEFINE_EVENT(rdev_pmksa, rdev_set_pmksa, | ||
1585 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1586 | struct cfg80211_pmksa *pmksa), | ||
1587 | TP_ARGS(wiphy, netdev, pmksa) | ||
1588 | ); | ||
1589 | |||
1590 | DEFINE_EVENT(rdev_pmksa, rdev_del_pmksa, | ||
1591 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1592 | struct cfg80211_pmksa *pmksa), | ||
1593 | TP_ARGS(wiphy, netdev, pmksa) | ||
1594 | ); | ||
1595 | |||
1596 | TRACE_EVENT(rdev_remain_on_channel, | ||
1597 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, | ||
1598 | struct ieee80211_channel *chan, | ||
1599 | unsigned int duration), | ||
1600 | TP_ARGS(wiphy, wdev, chan, duration), | ||
1601 | TP_STRUCT__entry( | ||
1602 | WIPHY_ENTRY | ||
1603 | WDEV_ENTRY | ||
1604 | CHAN_ENTRY | ||
1605 | __field(unsigned int, duration) | ||
1606 | ), | ||
1607 | TP_fast_assign( | ||
1608 | WIPHY_ASSIGN; | ||
1609 | WDEV_ASSIGN; | ||
1610 | CHAN_ASSIGN(chan); | ||
1611 | __entry->duration = duration; | ||
1612 | ), | ||
1613 | TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", " CHAN_PR_FMT ", duration: %u", | ||
1614 | WIPHY_PR_ARG, WDEV_PR_ARG, CHAN_PR_ARG, __entry->duration) | ||
1615 | ); | ||
1616 | |||
1617 | TRACE_EVENT(rdev_return_int_cookie, | ||
1618 | TP_PROTO(struct wiphy *wiphy, int ret, u64 cookie), | ||
1619 | TP_ARGS(wiphy, ret, cookie), | ||
1620 | TP_STRUCT__entry( | ||
1621 | WIPHY_ENTRY | ||
1622 | __field(int, ret) | ||
1623 | __field(u64, cookie) | ||
1624 | ), | ||
1625 | TP_fast_assign( | ||
1626 | WIPHY_ASSIGN; | ||
1627 | __entry->ret = ret; | ||
1628 | __entry->cookie = cookie; | ||
1629 | ), | ||
1630 | TP_printk(WIPHY_PR_FMT ", returned %d, cookie: %llu", | ||
1631 | WIPHY_PR_ARG, __entry->ret, __entry->cookie) | ||
1632 | ); | ||
1633 | |||
1634 | TRACE_EVENT(rdev_cancel_remain_on_channel, | ||
1635 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), | ||
1636 | TP_ARGS(wiphy, wdev, cookie), | ||
1637 | TP_STRUCT__entry( | ||
1638 | WIPHY_ENTRY | ||
1639 | WDEV_ENTRY | ||
1640 | __field(u64, cookie) | ||
1641 | ), | ||
1642 | TP_fast_assign( | ||
1643 | WIPHY_ASSIGN; | ||
1644 | WDEV_ASSIGN; | ||
1645 | __entry->cookie = cookie; | ||
1646 | ), | ||
1647 | TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %llu", | ||
1648 | WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie) | ||
1649 | ); | ||
1650 | |||
1651 | TRACE_EVENT(rdev_mgmt_tx, | ||
1652 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, | ||
1653 | struct ieee80211_channel *chan, bool offchan, | ||
1654 | unsigned int wait, bool no_cck, bool dont_wait_for_ack), | ||
1655 | TP_ARGS(wiphy, wdev, chan, offchan, wait, no_cck, dont_wait_for_ack), | ||
1656 | TP_STRUCT__entry( | ||
1657 | WIPHY_ENTRY | ||
1658 | WDEV_ENTRY | ||
1659 | CHAN_ENTRY | ||
1660 | __field(bool, offchan) | ||
1661 | __field(unsigned int, wait) | ||
1662 | __field(bool, no_cck) | ||
1663 | __field(bool, dont_wait_for_ack) | ||
1664 | ), | ||
1665 | TP_fast_assign( | ||
1666 | WIPHY_ASSIGN; | ||
1667 | WDEV_ASSIGN; | ||
1668 | CHAN_ASSIGN(chan); | ||
1669 | __entry->offchan = offchan; | ||
1670 | __entry->wait = wait; | ||
1671 | __entry->no_cck = no_cck; | ||
1672 | __entry->dont_wait_for_ack = dont_wait_for_ack; | ||
1673 | ), | ||
1674 | TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", " CHAN_PR_FMT ", offchan: %s," | ||
1675 | " wait: %u, no cck: %s, dont wait for ack: %s", | ||
1676 | WIPHY_PR_ARG, WDEV_PR_ARG, CHAN_PR_ARG, | ||
1677 | BOOL_TO_STR(__entry->offchan), __entry->wait, | ||
1678 | BOOL_TO_STR(__entry->no_cck), | ||
1679 | BOOL_TO_STR(__entry->dont_wait_for_ack)) | ||
1680 | ); | ||
1681 | |||
1682 | TRACE_EVENT(rdev_set_noack_map, | ||
1683 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, | ||
1684 | u16 noack_map), | ||
1685 | TP_ARGS(wiphy, netdev, noack_map), | ||
1686 | TP_STRUCT__entry( | ||
1687 | WIPHY_ENTRY | ||
1688 | NETDEV_ENTRY | ||
1689 | __field(u16, noack_map) | ||
1690 | ), | ||
1691 | TP_fast_assign( | ||
1692 | WIPHY_ASSIGN; | ||
1693 | NETDEV_ASSIGN; | ||
1694 | __entry->noack_map = noack_map; | ||
1695 | ), | ||
1696 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", noack_map: %u", | ||
1697 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->noack_map) | ||
1698 | ); | ||
1699 | |||
1700 | TRACE_EVENT(rdev_get_et_sset_count, | ||
1701 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int sset), | ||
1702 | TP_ARGS(wiphy, netdev, sset), | ||
1703 | TP_STRUCT__entry( | ||
1704 | WIPHY_ENTRY | ||
1705 | NETDEV_ENTRY | ||
1706 | __field(int, sset) | ||
1707 | ), | ||
1708 | TP_fast_assign( | ||
1709 | WIPHY_ASSIGN; | ||
1710 | NETDEV_ASSIGN; | ||
1711 | __entry->sset = sset; | ||
1712 | ), | ||
1713 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", sset: %d", | ||
1714 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sset) | ||
1715 | ); | ||
1716 | |||
1717 | TRACE_EVENT(rdev_get_et_strings, | ||
1718 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 sset), | ||
1719 | TP_ARGS(wiphy, netdev, sset), | ||
1720 | TP_STRUCT__entry( | ||
1721 | WIPHY_ENTRY | ||
1722 | NETDEV_ENTRY | ||
1723 | __field(u32, sset) | ||
1724 | ), | ||
1725 | TP_fast_assign( | ||
1726 | WIPHY_ASSIGN; | ||
1727 | NETDEV_ASSIGN; | ||
1728 | __entry->sset = sset; | ||
1729 | ), | ||
1730 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", sset: %u", | ||
1731 | WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sset) | ||
1732 | ); | ||
1733 | |||
1734 | DEFINE_EVENT(wiphy_wdev_evt, rdev_get_channel, | ||
1735 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), | ||
1736 | TP_ARGS(wiphy, wdev) | ||
1737 | ); | ||
1738 | |||
1739 | TRACE_EVENT(rdev_return_chandef, | ||
1740 | TP_PROTO(struct wiphy *wiphy, int ret, | ||
1741 | struct cfg80211_chan_def *chandef), | ||
1742 | TP_ARGS(wiphy, ret, chandef), | ||
1743 | TP_STRUCT__entry( | ||
1744 | WIPHY_ENTRY | ||
1745 | __field(int, ret) | ||
1746 | CHAN_DEF_ENTRY | ||
1747 | ), | ||
1748 | TP_fast_assign( | ||
1749 | WIPHY_ASSIGN; | ||
1750 | if (ret == 0) | ||
1751 | CHAN_DEF_ASSIGN(chandef); | ||
1752 | else | ||
1753 | CHAN_DEF_ASSIGN((struct cfg80211_chan_def *)NULL); | ||
1754 | __entry->ret = ret; | ||
1755 | ), | ||
1756 | TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", ret: %d", | ||
1757 | WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->ret) | ||
1758 | ); | ||
1759 | |||
1760 | DEFINE_EVENT(wiphy_wdev_evt, rdev_start_p2p_device, | ||
1761 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), | ||
1762 | TP_ARGS(wiphy, wdev) | ||
1763 | ); | ||
1764 | |||
1765 | DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_p2p_device, | ||
1766 | TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), | ||
1767 | TP_ARGS(wiphy, wdev) | ||
1768 | ); | ||
1769 | |||
1770 | /************************************************************* | ||
1771 | * cfg80211 exported functions traces * | ||
1772 | *************************************************************/ | ||
1773 | |||
1774 | TRACE_EVENT(cfg80211_return_bool, | ||
1775 | TP_PROTO(bool ret), | ||
1776 | TP_ARGS(ret), | ||
1777 | TP_STRUCT__entry( | ||
1778 | __field(bool, ret) | ||
1779 | ), | ||
1780 | TP_fast_assign( | ||
1781 | __entry->ret = ret; | ||
1782 | ), | ||
1783 | TP_printk("returned %s", BOOL_TO_STR(__entry->ret)) | ||
1784 | ); | ||
1785 | |||
1786 | DECLARE_EVENT_CLASS(cfg80211_netdev_mac_evt, | ||
1787 | TP_PROTO(struct net_device *netdev, const u8 *macaddr), | ||
1788 | TP_ARGS(netdev, macaddr), | ||
1789 | TP_STRUCT__entry( | ||
1790 | NETDEV_ENTRY | ||
1791 | MAC_ENTRY(macaddr) | ||
1792 | ), | ||
1793 | TP_fast_assign( | ||
1794 | NETDEV_ASSIGN; | ||
1795 | MAC_ASSIGN(macaddr, macaddr); | ||
1796 | ), | ||
1797 | TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT, | ||
1798 | NETDEV_PR_ARG, MAC_PR_ARG(macaddr)) | ||
1799 | ); | ||
1800 | |||
1801 | DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_notify_new_peer_candidate, | ||
1802 | TP_PROTO(struct net_device *netdev, const u8 *macaddr), | ||
1803 | TP_ARGS(netdev, macaddr) | ||
1804 | ); | ||
1805 | |||
1806 | DECLARE_EVENT_CLASS(netdev_evt_only, | ||
1807 | TP_PROTO(struct net_device *netdev), | ||
1808 | TP_ARGS(netdev), | ||
1809 | TP_STRUCT__entry( | ||
1810 | NETDEV_ENTRY | ||
1811 | ), | ||
1812 | TP_fast_assign( | ||
1813 | NETDEV_ASSIGN; | ||
1814 | ), | ||
1815 | TP_printk(NETDEV_PR_FMT , NETDEV_PR_ARG) | ||
1816 | ); | ||
1817 | |||
1818 | DEFINE_EVENT(netdev_evt_only, cfg80211_send_rx_auth, | ||
1819 | TP_PROTO(struct net_device *netdev), | ||
1820 | TP_ARGS(netdev) | ||
1821 | ); | ||
1822 | |||
1823 | TRACE_EVENT(cfg80211_send_rx_assoc, | ||
1824 | TP_PROTO(struct net_device *netdev, struct cfg80211_bss *bss), | ||
1825 | TP_ARGS(netdev, bss), | ||
1826 | TP_STRUCT__entry( | ||
1827 | NETDEV_ENTRY | ||
1828 | MAC_ENTRY(bssid) | ||
1829 | CHAN_ENTRY | ||
1830 | ), | ||
1831 | TP_fast_assign( | ||
1832 | NETDEV_ASSIGN; | ||
1833 | MAC_ASSIGN(bssid, bss->bssid); | ||
1834 | CHAN_ASSIGN(bss->channel); | ||
1835 | ), | ||
1836 | TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", " CHAN_PR_FMT, | ||
1837 | NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG) | ||
1838 | ); | ||
1839 | |||
1840 | DEFINE_EVENT(netdev_evt_only, __cfg80211_send_deauth, | ||
1841 | TP_PROTO(struct net_device *netdev), | ||
1842 | TP_ARGS(netdev) | ||
1843 | ); | ||
1844 | |||
1845 | DEFINE_EVENT(netdev_evt_only, __cfg80211_send_disassoc, | ||
1846 | TP_PROTO(struct net_device *netdev), | ||
1847 | TP_ARGS(netdev) | ||
1848 | ); | ||
1849 | |||
1850 | DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_deauth, | ||
1851 | TP_PROTO(struct net_device *netdev), | ||
1852 | TP_ARGS(netdev) | ||
1853 | ); | ||
1854 | |||
1855 | DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_disassoc, | ||
1856 | TP_PROTO(struct net_device *netdev), | ||
1857 | TP_ARGS(netdev) | ||
1858 | ); | ||
1859 | |||
1860 | DECLARE_EVENT_CLASS(netdev_mac_evt, | ||
1861 | TP_PROTO(struct net_device *netdev, const u8 *mac), | ||
1862 | TP_ARGS(netdev, mac), | ||
1863 | TP_STRUCT__entry( | ||
1864 | NETDEV_ENTRY | ||
1865 | MAC_ENTRY(mac) | ||
1866 | ), | ||
1867 | TP_fast_assign( | ||
1868 | NETDEV_ASSIGN; | ||
1869 | MAC_ASSIGN(mac, mac) | ||
1870 | ), | ||
1871 | TP_printk(NETDEV_PR_FMT ", mac: " MAC_PR_FMT, | ||
1872 | NETDEV_PR_ARG, MAC_PR_ARG(mac)) | ||
1873 | ); | ||
1874 | |||
1875 | DEFINE_EVENT(netdev_mac_evt, cfg80211_send_auth_timeout, | ||
1876 | TP_PROTO(struct net_device *netdev, const u8 *mac), | ||
1877 | TP_ARGS(netdev, mac) | ||
1878 | ); | ||
1879 | |||
1880 | DEFINE_EVENT(netdev_mac_evt, cfg80211_send_assoc_timeout, | ||
1881 | TP_PROTO(struct net_device *netdev, const u8 *mac), | ||
1882 | TP_ARGS(netdev, mac) | ||
1883 | ); | ||
1884 | |||
1885 | TRACE_EVENT(cfg80211_michael_mic_failure, | ||
1886 | TP_PROTO(struct net_device *netdev, const u8 *addr, | ||
1887 | enum nl80211_key_type key_type, int key_id, const u8 *tsc), | ||
1888 | TP_ARGS(netdev, addr, key_type, key_id, tsc), | ||
1889 | TP_STRUCT__entry( | ||
1890 | NETDEV_ENTRY | ||
1891 | MAC_ENTRY(addr) | ||
1892 | __field(enum nl80211_key_type, key_type) | ||
1893 | __field(int, key_id) | ||
1894 | __array(u8, tsc, 6) | ||
1895 | ), | ||
1896 | TP_fast_assign( | ||
1897 | NETDEV_ASSIGN; | ||
1898 | MAC_ASSIGN(addr, addr); | ||
1899 | __entry->key_type = key_type; | ||
1900 | __entry->key_id = key_id; | ||
1901 | memcpy(__entry->tsc, tsc, 6); | ||
1902 | ), | ||
1903 | TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm", | ||
1904 | NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type, | ||
1905 | __entry->key_id, __entry->tsc) | ||
1906 | ); | ||
1907 | |||
1908 | TRACE_EVENT(cfg80211_ready_on_channel, | ||
1909 | TP_PROTO(struct wireless_dev *wdev, u64 cookie, | ||
1910 | struct ieee80211_channel *chan, | ||
1911 | unsigned int duration), | ||
1912 | TP_ARGS(wdev, cookie, chan, duration), | ||
1913 | TP_STRUCT__entry( | ||
1914 | WDEV_ENTRY | ||
1915 | __field(u64, cookie) | ||
1916 | CHAN_ENTRY | ||
1917 | __field(unsigned int, duration) | ||
1918 | ), | ||
1919 | TP_fast_assign( | ||
1920 | WDEV_ASSIGN; | ||
1921 | __entry->cookie = cookie; | ||
1922 | CHAN_ASSIGN(chan); | ||
1923 | __entry->duration = duration; | ||
1924 | ), | ||
1925 | TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT ", duration: %u", | ||
1926 | WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG, | ||
1927 | __entry->duration) | ||
1928 | ); | ||
1929 | |||
1930 | TRACE_EVENT(cfg80211_ready_on_channel_expired, | ||
1931 | TP_PROTO(struct wireless_dev *wdev, u64 cookie, | ||
1932 | struct ieee80211_channel *chan), | ||
1933 | TP_ARGS(wdev, cookie, chan), | ||
1934 | TP_STRUCT__entry( | ||
1935 | WDEV_ENTRY | ||
1936 | __field(u64, cookie) | ||
1937 | CHAN_ENTRY | ||
1938 | ), | ||
1939 | TP_fast_assign( | ||
1940 | WDEV_ASSIGN; | ||
1941 | __entry->cookie = cookie; | ||
1942 | CHAN_ASSIGN(chan); | ||
1943 | ), | ||
1944 | TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT, | ||
1945 | WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG) | ||
1946 | ); | ||
1947 | |||
1948 | TRACE_EVENT(cfg80211_new_sta, | ||
1949 | TP_PROTO(struct net_device *netdev, const u8 *mac_addr, | ||
1950 | struct station_info *sinfo), | ||
1951 | TP_ARGS(netdev, mac_addr, sinfo), | ||
1952 | TP_STRUCT__entry( | ||
1953 | NETDEV_ENTRY | ||
1954 | MAC_ENTRY(mac_addr) | ||
1955 | SINFO_ENTRY | ||
1956 | ), | ||
1957 | TP_fast_assign( | ||
1958 | NETDEV_ASSIGN; | ||
1959 | MAC_ASSIGN(mac_addr, mac_addr); | ||
1960 | SINFO_ASSIGN; | ||
1961 | ), | ||
1962 | TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, | ||
1963 | NETDEV_PR_ARG, MAC_PR_ARG(mac_addr)) | ||
1964 | ); | ||
1965 | |||
1966 | DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta, | ||
1967 | TP_PROTO(struct net_device *netdev, const u8 *macaddr), | ||
1968 | TP_ARGS(netdev, macaddr) | ||
1969 | ); | ||
1970 | |||
1971 | TRACE_EVENT(cfg80211_rx_mgmt, | ||
1972 | TP_PROTO(struct wireless_dev *wdev, int freq, int sig_mbm), | ||
1973 | TP_ARGS(wdev, freq, sig_mbm), | ||
1974 | TP_STRUCT__entry( | ||
1975 | WDEV_ENTRY | ||
1976 | __field(int, freq) | ||
1977 | __field(int, sig_mbm) | ||
1978 | ), | ||
1979 | TP_fast_assign( | ||
1980 | WDEV_ASSIGN; | ||
1981 | __entry->freq = freq; | ||
1982 | __entry->sig_mbm = sig_mbm; | ||
1983 | ), | ||
1984 | TP_printk(WDEV_PR_FMT ", freq: %d, sig mbm: %d", | ||
1985 | WDEV_PR_ARG, __entry->freq, __entry->sig_mbm) | ||
1986 | ); | ||
1987 | |||
1988 | TRACE_EVENT(cfg80211_mgmt_tx_status, | ||
1989 | TP_PROTO(struct wireless_dev *wdev, u64 cookie, bool ack), | ||
1990 | TP_ARGS(wdev, cookie, ack), | ||
1991 | TP_STRUCT__entry( | ||
1992 | WDEV_ENTRY | ||
1993 | __field(u64, cookie) | ||
1994 | __field(bool, ack) | ||
1995 | ), | ||
1996 | TP_fast_assign( | ||
1997 | WDEV_ASSIGN; | ||
1998 | __entry->cookie = cookie; | ||
1999 | __entry->ack = ack; | ||
2000 | ), | ||
2001 | TP_printk(WDEV_PR_FMT", cookie: %llu, ack: %s", | ||
2002 | WDEV_PR_ARG, __entry->cookie, BOOL_TO_STR(__entry->ack)) | ||
2003 | ); | ||
2004 | |||
2005 | TRACE_EVENT(cfg80211_cqm_rssi_notify, | ||
2006 | TP_PROTO(struct net_device *netdev, | ||
2007 | enum nl80211_cqm_rssi_threshold_event rssi_event), | ||
2008 | TP_ARGS(netdev, rssi_event), | ||
2009 | TP_STRUCT__entry( | ||
2010 | NETDEV_ENTRY | ||
2011 | __field(enum nl80211_cqm_rssi_threshold_event, rssi_event) | ||
2012 | ), | ||
2013 | TP_fast_assign( | ||
2014 | NETDEV_ASSIGN; | ||
2015 | __entry->rssi_event = rssi_event; | ||
2016 | ), | ||
2017 | TP_printk(NETDEV_PR_FMT ", rssi event: %d", | ||
2018 | NETDEV_PR_ARG, __entry->rssi_event) | ||
2019 | ); | ||
2020 | |||
2021 | TRACE_EVENT(cfg80211_reg_can_beacon, | ||
2022 | TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), | ||
2023 | TP_ARGS(wiphy, chandef), | ||
2024 | TP_STRUCT__entry( | ||
2025 | WIPHY_ENTRY | ||
2026 | CHAN_DEF_ENTRY | ||
2027 | ), | ||
2028 | TP_fast_assign( | ||
2029 | WIPHY_ASSIGN; | ||
2030 | CHAN_DEF_ASSIGN(chandef); | ||
2031 | ), | ||
2032 | TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, | ||
2033 | WIPHY_PR_ARG, CHAN_DEF_PR_ARG) | ||
2034 | ); | ||
2035 | |||
2036 | TRACE_EVENT(cfg80211_ch_switch_notify, | ||
2037 | TP_PROTO(struct net_device *netdev, | ||
2038 | struct cfg80211_chan_def *chandef), | ||
2039 | TP_ARGS(netdev, chandef), | ||
2040 | TP_STRUCT__entry( | ||
2041 | NETDEV_ENTRY | ||
2042 | CHAN_DEF_ENTRY | ||
2043 | ), | ||
2044 | TP_fast_assign( | ||
2045 | NETDEV_ASSIGN; | ||
2046 | CHAN_DEF_ASSIGN(chandef); | ||
2047 | ), | ||
2048 | TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT, | ||
2049 | NETDEV_PR_ARG, CHAN_DEF_PR_ARG) | ||
2050 | ); | ||
2051 | |||
2052 | DECLARE_EVENT_CLASS(cfg80211_rx_evt, | ||
2053 | TP_PROTO(struct net_device *netdev, const u8 *addr), | ||
2054 | TP_ARGS(netdev, addr), | ||
2055 | TP_STRUCT__entry( | ||
2056 | NETDEV_ENTRY | ||
2057 | MAC_ENTRY(addr) | ||
2058 | ), | ||
2059 | TP_fast_assign( | ||
2060 | NETDEV_ASSIGN; | ||
2061 | MAC_ASSIGN(addr, addr); | ||
2062 | ), | ||
2063 | TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(addr)) | ||
2064 | ); | ||
2065 | |||
2066 | DEFINE_EVENT(cfg80211_rx_evt, cfg80211_ibss_joined, | ||
2067 | TP_PROTO(struct net_device *netdev, const u8 *addr), | ||
2068 | TP_ARGS(netdev, addr) | ||
2069 | ); | ||
2070 | |||
2071 | DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame, | ||
2072 | TP_PROTO(struct net_device *netdev, const u8 *addr), | ||
2073 | TP_ARGS(netdev, addr) | ||
2074 | ); | ||
2075 | |||
2076 | DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_unexpected_4addr_frame, | ||
2077 | TP_PROTO(struct net_device *netdev, const u8 *addr), | ||
2078 | TP_ARGS(netdev, addr) | ||
2079 | ); | ||
2080 | |||
2081 | TRACE_EVENT(cfg80211_probe_status, | ||
2082 | TP_PROTO(struct net_device *netdev, const u8 *addr, u64 cookie, | ||
2083 | bool acked), | ||
2084 | TP_ARGS(netdev, addr, cookie, acked), | ||
2085 | TP_STRUCT__entry( | ||
2086 | NETDEV_ENTRY | ||
2087 | MAC_ENTRY(addr) | ||
2088 | __field(u64, cookie) | ||
2089 | __field(bool, acked) | ||
2090 | ), | ||
2091 | TP_fast_assign( | ||
2092 | NETDEV_ASSIGN; | ||
2093 | MAC_ASSIGN(addr, addr); | ||
2094 | __entry->cookie = cookie; | ||
2095 | __entry->acked = acked; | ||
2096 | ), | ||
2097 | TP_printk(NETDEV_PR_FMT " addr:" MAC_PR_FMT ", cookie: %llu, acked: %s", | ||
2098 | NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->cookie, | ||
2099 | BOOL_TO_STR(__entry->acked)) | ||
2100 | ); | ||
2101 | |||
2102 | TRACE_EVENT(cfg80211_cqm_pktloss_notify, | ||
2103 | TP_PROTO(struct net_device *netdev, const u8 *peer, u32 num_packets), | ||
2104 | TP_ARGS(netdev, peer, num_packets), | ||
2105 | TP_STRUCT__entry( | ||
2106 | NETDEV_ENTRY | ||
2107 | MAC_ENTRY(peer) | ||
2108 | __field(u32, num_packets) | ||
2109 | ), | ||
2110 | TP_fast_assign( | ||
2111 | NETDEV_ASSIGN; | ||
2112 | MAC_ASSIGN(peer, peer); | ||
2113 | __entry->num_packets = num_packets; | ||
2114 | ), | ||
2115 | TP_printk(NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", num of lost packets: %u", | ||
2116 | NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->num_packets) | ||
2117 | ); | ||
2118 | |||
2119 | DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_gtk_rekey_notify, | ||
2120 | TP_PROTO(struct net_device *netdev, const u8 *macaddr), | ||
2121 | TP_ARGS(netdev, macaddr) | ||
2122 | ); | ||
2123 | |||
2124 | TRACE_EVENT(cfg80211_pmksa_candidate_notify, | ||
2125 | TP_PROTO(struct net_device *netdev, int index, const u8 *bssid, | ||
2126 | bool preauth), | ||
2127 | TP_ARGS(netdev, index, bssid, preauth), | ||
2128 | TP_STRUCT__entry( | ||
2129 | NETDEV_ENTRY | ||
2130 | __field(int, index) | ||
2131 | MAC_ENTRY(bssid) | ||
2132 | __field(bool, preauth) | ||
2133 | ), | ||
2134 | TP_fast_assign( | ||
2135 | NETDEV_ASSIGN; | ||
2136 | __entry->index = index; | ||
2137 | MAC_ASSIGN(bssid, bssid); | ||
2138 | __entry->preauth = preauth; | ||
2139 | ), | ||
2140 | TP_printk(NETDEV_PR_FMT ", index:%d, bssid: " MAC_PR_FMT ", pre auth: %s", | ||
2141 | NETDEV_PR_ARG, __entry->index, MAC_PR_ARG(bssid), | ||
2142 | BOOL_TO_STR(__entry->preauth)) | ||
2143 | ); | ||
2144 | |||
2145 | TRACE_EVENT(cfg80211_report_obss_beacon, | ||
2146 | TP_PROTO(struct wiphy *wiphy, const u8 *frame, size_t len, | ||
2147 | int freq, int sig_dbm), | ||
2148 | TP_ARGS(wiphy, frame, len, freq, sig_dbm), | ||
2149 | TP_STRUCT__entry( | ||
2150 | WIPHY_ENTRY | ||
2151 | __field(int, freq) | ||
2152 | __field(int, sig_dbm) | ||
2153 | ), | ||
2154 | TP_fast_assign( | ||
2155 | WIPHY_ASSIGN; | ||
2156 | __entry->freq = freq; | ||
2157 | __entry->sig_dbm = sig_dbm; | ||
2158 | ), | ||
2159 | TP_printk(WIPHY_PR_FMT ", freq: %d, sig_dbm: %d", | ||
2160 | WIPHY_PR_ARG, __entry->freq, __entry->sig_dbm) | ||
2161 | ); | ||
2162 | |||
2163 | TRACE_EVENT(cfg80211_tdls_oper_request, | ||
2164 | TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer, | ||
2165 | enum nl80211_tdls_operation oper, u16 reason_code), | ||
2166 | TP_ARGS(wiphy, netdev, peer, oper, reason_code), | ||
2167 | TP_STRUCT__entry( | ||
2168 | WIPHY_ENTRY | ||
2169 | NETDEV_ENTRY | ||
2170 | MAC_ENTRY(peer) | ||
2171 | __field(enum nl80211_tdls_operation, oper) | ||
2172 | __field(u16, reason_code) | ||
2173 | ), | ||
2174 | TP_fast_assign( | ||
2175 | WIPHY_ASSIGN; | ||
2176 | NETDEV_ASSIGN; | ||
2177 | MAC_ASSIGN(peer, peer); | ||
2178 | __entry->oper = oper; | ||
2179 | __entry->reason_code = reason_code; | ||
2180 | ), | ||
2181 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", oper: %d, reason_code %u", | ||
2182 | WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper, | ||
2183 | __entry->reason_code) | ||
2184 | ); | ||
2185 | |||
2186 | TRACE_EVENT(cfg80211_scan_done, | ||
2187 | TP_PROTO(struct cfg80211_scan_request *request, bool aborted), | ||
2188 | TP_ARGS(request, aborted), | ||
2189 | TP_STRUCT__entry( | ||
2190 | __field(u32, n_channels) | ||
2191 | __dynamic_array(u8, ie, request ? request->ie_len : 0) | ||
2192 | __array(u32, rates, IEEE80211_NUM_BANDS) | ||
2193 | __field(u32, wdev_id) | ||
2194 | MAC_ENTRY(wiphy_mac) | ||
2195 | __field(bool, no_cck) | ||
2196 | __field(bool, aborted) | ||
2197 | ), | ||
2198 | TP_fast_assign( | ||
2199 | if (request) { | ||
2200 | memcpy(__get_dynamic_array(ie), request->ie, | ||
2201 | request->ie_len); | ||
2202 | memcpy(__entry->rates, request->rates, | ||
2203 | IEEE80211_NUM_BANDS); | ||
2204 | __entry->wdev_id = request->wdev ? | ||
2205 | request->wdev->identifier : 0; | ||
2206 | if (request->wiphy) | ||
2207 | MAC_ASSIGN(wiphy_mac, | ||
2208 | request->wiphy->perm_addr); | ||
2209 | __entry->no_cck = request->no_cck; | ||
2210 | } | ||
2211 | __entry->aborted = aborted; | ||
2212 | ), | ||
2213 | TP_printk("aborted: %s", BOOL_TO_STR(__entry->aborted)) | ||
2214 | ); | ||
2215 | |||
2216 | DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_results, | ||
2217 | TP_PROTO(struct wiphy *wiphy), | ||
2218 | TP_ARGS(wiphy) | ||
2219 | ); | ||
2220 | |||
2221 | DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_stopped, | ||
2222 | TP_PROTO(struct wiphy *wiphy), | ||
2223 | TP_ARGS(wiphy) | ||
2224 | ); | ||
2225 | |||
2226 | TRACE_EVENT(cfg80211_get_bss, | ||
2227 | TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel, | ||
2228 | const u8 *bssid, const u8 *ssid, size_t ssid_len, | ||
2229 | u16 capa_mask, u16 capa_val), | ||
2230 | TP_ARGS(wiphy, channel, bssid, ssid, ssid_len, capa_mask, capa_val), | ||
2231 | TP_STRUCT__entry( | ||
2232 | WIPHY_ENTRY | ||
2233 | CHAN_ENTRY | ||
2234 | MAC_ENTRY(bssid) | ||
2235 | __dynamic_array(u8, ssid, ssid_len) | ||
2236 | __field(u16, capa_mask) | ||
2237 | __field(u16, capa_val) | ||
2238 | ), | ||
2239 | TP_fast_assign( | ||
2240 | WIPHY_ASSIGN; | ||
2241 | CHAN_ASSIGN(channel); | ||
2242 | MAC_ASSIGN(bssid, bssid); | ||
2243 | memcpy(__get_dynamic_array(ssid), ssid, ssid_len); | ||
2244 | __entry->capa_mask = capa_mask; | ||
2245 | __entry->capa_val = capa_val; | ||
2246 | ), | ||
2247 | TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", " MAC_PR_FMT ", buf: %#.2x, " | ||
2248 | "capa_mask: %d, capa_val: %u", WIPHY_PR_ARG, CHAN_PR_ARG, | ||
2249 | MAC_PR_ARG(bssid), ((u8 *)__get_dynamic_array(ssid))[0], | ||
2250 | __entry->capa_mask, __entry->capa_val) | ||
2251 | ); | ||
2252 | |||
2253 | TRACE_EVENT(cfg80211_inform_bss_frame, | ||
2254 | TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel, | ||
2255 | struct ieee80211_mgmt *mgmt, size_t len, | ||
2256 | s32 signal), | ||
2257 | TP_ARGS(wiphy, channel, mgmt, len, signal), | ||
2258 | TP_STRUCT__entry( | ||
2259 | WIPHY_ENTRY | ||
2260 | CHAN_ENTRY | ||
2261 | __dynamic_array(u8, mgmt, len) | ||
2262 | __field(s32, signal) | ||
2263 | ), | ||
2264 | TP_fast_assign( | ||
2265 | WIPHY_ASSIGN; | ||
2266 | CHAN_ASSIGN(channel); | ||
2267 | if (mgmt) | ||
2268 | memcpy(__get_dynamic_array(mgmt), mgmt, len); | ||
2269 | __entry->signal = signal; | ||
2270 | ), | ||
2271 | TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "signal: %d", | ||
2272 | WIPHY_PR_ARG, CHAN_PR_ARG, __entry->signal) | ||
2273 | ); | ||
2274 | |||
2275 | DECLARE_EVENT_CLASS(cfg80211_bss_evt, | ||
2276 | TP_PROTO(struct cfg80211_bss *pub), | ||
2277 | TP_ARGS(pub), | ||
2278 | TP_STRUCT__entry( | ||
2279 | MAC_ENTRY(bssid) | ||
2280 | CHAN_ENTRY | ||
2281 | ), | ||
2282 | TP_fast_assign( | ||
2283 | MAC_ASSIGN(bssid, pub->bssid); | ||
2284 | CHAN_ASSIGN(pub->channel); | ||
2285 | ), | ||
2286 | TP_printk(MAC_PR_FMT ", " CHAN_PR_FMT, MAC_PR_ARG(bssid), CHAN_PR_ARG) | ||
2287 | ); | ||
2288 | |||
2289 | DEFINE_EVENT(cfg80211_bss_evt, cfg80211_return_bss, | ||
2290 | TP_PROTO(struct cfg80211_bss *pub), | ||
2291 | TP_ARGS(pub) | ||
2292 | ); | ||
2293 | |||
2294 | TRACE_EVENT(cfg80211_return_uint, | ||
2295 | TP_PROTO(unsigned int ret), | ||
2296 | TP_ARGS(ret), | ||
2297 | TP_STRUCT__entry( | ||
2298 | __field(unsigned int, ret) | ||
2299 | ), | ||
2300 | TP_fast_assign( | ||
2301 | __entry->ret = ret; | ||
2302 | ), | ||
2303 | TP_printk("ret: %d", __entry->ret) | ||
2304 | ); | ||
2305 | |||
2306 | TRACE_EVENT(cfg80211_return_u32, | ||
2307 | TP_PROTO(u32 ret), | ||
2308 | TP_ARGS(ret), | ||
2309 | TP_STRUCT__entry( | ||
2310 | __field(u32, ret) | ||
2311 | ), | ||
2312 | TP_fast_assign( | ||
2313 | __entry->ret = ret; | ||
2314 | ), | ||
2315 | TP_printk("ret: %u", __entry->ret) | ||
2316 | ); | ||
2317 | |||
2318 | #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ | ||
2319 | |||
2320 | #undef TRACE_INCLUDE_PATH | ||
2321 | #define TRACE_INCLUDE_PATH . | ||
2322 | #undef TRACE_INCLUDE_FILE | ||
2323 | #define TRACE_INCLUDE_FILE trace | ||
2324 | #include <trace/define_trace.h> | ||
diff --git a/net/wireless/util.c b/net/wireless/util.c index 2762e8329986..3cce6e486219 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <net/ip.h> | 11 | #include <net/ip.h> |
12 | #include <net/dsfield.h> | 12 | #include <net/dsfield.h> |
13 | #include "core.h" | 13 | #include "core.h" |
14 | #include "rdev-ops.h" | ||
15 | |||
14 | 16 | ||
15 | struct ieee80211_rate * | 17 | struct ieee80211_rate * |
16 | ieee80211_get_response_rate(struct ieee80211_supported_band *sband, | 18 | ieee80211_get_response_rate(struct ieee80211_supported_band *sband, |
@@ -705,19 +707,18 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev) | |||
705 | for (i = 0; i < 6; i++) { | 707 | for (i = 0; i < 6; i++) { |
706 | if (!wdev->connect_keys->params[i].cipher) | 708 | if (!wdev->connect_keys->params[i].cipher) |
707 | continue; | 709 | continue; |
708 | if (rdev->ops->add_key(wdev->wiphy, dev, i, false, NULL, | 710 | if (rdev_add_key(rdev, dev, i, false, NULL, |
709 | &wdev->connect_keys->params[i])) { | 711 | &wdev->connect_keys->params[i])) { |
710 | netdev_err(dev, "failed to set key %d\n", i); | 712 | netdev_err(dev, "failed to set key %d\n", i); |
711 | continue; | 713 | continue; |
712 | } | 714 | } |
713 | if (wdev->connect_keys->def == i) | 715 | if (wdev->connect_keys->def == i) |
714 | if (rdev->ops->set_default_key(wdev->wiphy, dev, | 716 | if (rdev_set_default_key(rdev, dev, i, true, true)) { |
715 | i, true, true)) { | ||
716 | netdev_err(dev, "failed to set defkey %d\n", i); | 717 | netdev_err(dev, "failed to set defkey %d\n", i); |
717 | continue; | 718 | continue; |
718 | } | 719 | } |
719 | if (wdev->connect_keys->defmgmt == i) | 720 | if (wdev->connect_keys->defmgmt == i) |
720 | if (rdev->ops->set_default_mgmt_key(wdev->wiphy, dev, i)) | 721 | if (rdev_set_default_mgmt_key(rdev, dev, i)) |
721 | netdev_err(dev, "failed to set mgtdef %d\n", i); | 722 | netdev_err(dev, "failed to set mgtdef %d\n", i); |
722 | } | 723 | } |
723 | 724 | ||
@@ -850,8 +851,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, | |||
850 | cfg80211_process_rdev_events(rdev); | 851 | cfg80211_process_rdev_events(rdev); |
851 | } | 852 | } |
852 | 853 | ||
853 | err = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, | 854 | err = rdev_change_virtual_intf(rdev, dev, ntype, flags, params); |
854 | ntype, flags, params); | ||
855 | 855 | ||
856 | WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype); | 856 | WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype); |
857 | 857 | ||
@@ -944,14 +944,86 @@ static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate) | |||
944 | return __mcs2bitrate[rate->mcs]; | 944 | return __mcs2bitrate[rate->mcs]; |
945 | } | 945 | } |
946 | 946 | ||
947 | static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate) | ||
948 | { | ||
949 | static const u32 base[4][10] = { | ||
950 | { 6500000, | ||
951 | 13000000, | ||
952 | 19500000, | ||
953 | 26000000, | ||
954 | 39000000, | ||
955 | 52000000, | ||
956 | 58500000, | ||
957 | 65000000, | ||
958 | 78000000, | ||
959 | 0, | ||
960 | }, | ||
961 | { 13500000, | ||
962 | 27000000, | ||
963 | 40500000, | ||
964 | 54000000, | ||
965 | 81000000, | ||
966 | 108000000, | ||
967 | 121500000, | ||
968 | 135000000, | ||
969 | 162000000, | ||
970 | 180000000, | ||
971 | }, | ||
972 | { 29300000, | ||
973 | 58500000, | ||
974 | 87800000, | ||
975 | 117000000, | ||
976 | 175500000, | ||
977 | 234000000, | ||
978 | 263300000, | ||
979 | 292500000, | ||
980 | 351000000, | ||
981 | 390000000, | ||
982 | }, | ||
983 | { 58500000, | ||
984 | 117000000, | ||
985 | 175500000, | ||
986 | 234000000, | ||
987 | 351000000, | ||
988 | 468000000, | ||
989 | 526500000, | ||
990 | 585000000, | ||
991 | 702000000, | ||
992 | 780000000, | ||
993 | }, | ||
994 | }; | ||
995 | u32 bitrate; | ||
996 | int idx; | ||
997 | |||
998 | if (WARN_ON_ONCE(rate->mcs > 9)) | ||
999 | return 0; | ||
1000 | |||
1001 | idx = rate->flags & (RATE_INFO_FLAGS_160_MHZ_WIDTH | | ||
1002 | RATE_INFO_FLAGS_80P80_MHZ_WIDTH) ? 3 : | ||
1003 | rate->flags & RATE_INFO_FLAGS_80_MHZ_WIDTH ? 2 : | ||
1004 | rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH ? 1 : 0; | ||
1005 | |||
1006 | bitrate = base[idx][rate->mcs]; | ||
1007 | bitrate *= rate->nss; | ||
1008 | |||
1009 | if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) | ||
1010 | bitrate = (bitrate / 9) * 10; | ||
1011 | |||
1012 | /* do NOT round down here */ | ||
1013 | return (bitrate + 50000) / 100000; | ||
1014 | } | ||
1015 | |||
947 | u32 cfg80211_calculate_bitrate(struct rate_info *rate) | 1016 | u32 cfg80211_calculate_bitrate(struct rate_info *rate) |
948 | { | 1017 | { |
949 | int modulation, streams, bitrate; | 1018 | int modulation, streams, bitrate; |
950 | 1019 | ||
951 | if (!(rate->flags & RATE_INFO_FLAGS_MCS)) | 1020 | if (!(rate->flags & RATE_INFO_FLAGS_MCS) && |
1021 | !(rate->flags & RATE_INFO_FLAGS_VHT_MCS)) | ||
952 | return rate->legacy; | 1022 | return rate->legacy; |
953 | if (rate->flags & RATE_INFO_FLAGS_60G) | 1023 | if (rate->flags & RATE_INFO_FLAGS_60G) |
954 | return cfg80211_calculate_bitrate_60g(rate); | 1024 | return cfg80211_calculate_bitrate_60g(rate); |
1025 | if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) | ||
1026 | return cfg80211_calculate_bitrate_vht(rate); | ||
955 | 1027 | ||
956 | /* the formula below does only work for MCS values smaller than 32 */ | 1028 | /* the formula below does only work for MCS values smaller than 32 */ |
957 | if (WARN_ON_ONCE(rate->mcs >= 32)) | 1029 | if (WARN_ON_ONCE(rate->mcs >= 32)) |
@@ -980,6 +1052,106 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate) | |||
980 | } | 1052 | } |
981 | EXPORT_SYMBOL(cfg80211_calculate_bitrate); | 1053 | EXPORT_SYMBOL(cfg80211_calculate_bitrate); |
982 | 1054 | ||
1055 | int cfg80211_get_p2p_attr(const u8 *ies, unsigned int len, | ||
1056 | enum ieee80211_p2p_attr_id attr, | ||
1057 | u8 *buf, unsigned int bufsize) | ||
1058 | { | ||
1059 | u8 *out = buf; | ||
1060 | u16 attr_remaining = 0; | ||
1061 | bool desired_attr = false; | ||
1062 | u16 desired_len = 0; | ||
1063 | |||
1064 | while (len > 0) { | ||
1065 | unsigned int iedatalen; | ||
1066 | unsigned int copy; | ||
1067 | const u8 *iedata; | ||
1068 | |||
1069 | if (len < 2) | ||
1070 | return -EILSEQ; | ||
1071 | iedatalen = ies[1]; | ||
1072 | if (iedatalen + 2 > len) | ||
1073 | return -EILSEQ; | ||
1074 | |||
1075 | if (ies[0] != WLAN_EID_VENDOR_SPECIFIC) | ||
1076 | goto cont; | ||
1077 | |||
1078 | if (iedatalen < 4) | ||
1079 | goto cont; | ||
1080 | |||
1081 | iedata = ies + 2; | ||
1082 | |||
1083 | /* check WFA OUI, P2P subtype */ | ||
1084 | if (iedata[0] != 0x50 || iedata[1] != 0x6f || | ||
1085 | iedata[2] != 0x9a || iedata[3] != 0x09) | ||
1086 | goto cont; | ||
1087 | |||
1088 | iedatalen -= 4; | ||
1089 | iedata += 4; | ||
1090 | |||
1091 | /* check attribute continuation into this IE */ | ||
1092 | copy = min_t(unsigned int, attr_remaining, iedatalen); | ||
1093 | if (copy && desired_attr) { | ||
1094 | desired_len += copy; | ||
1095 | if (out) { | ||
1096 | memcpy(out, iedata, min(bufsize, copy)); | ||
1097 | out += min(bufsize, copy); | ||
1098 | bufsize -= min(bufsize, copy); | ||
1099 | } | ||
1100 | |||
1101 | |||
1102 | if (copy == attr_remaining) | ||
1103 | return desired_len; | ||
1104 | } | ||
1105 | |||
1106 | attr_remaining -= copy; | ||
1107 | if (attr_remaining) | ||
1108 | goto cont; | ||
1109 | |||
1110 | iedatalen -= copy; | ||
1111 | iedata += copy; | ||
1112 | |||
1113 | while (iedatalen > 0) { | ||
1114 | u16 attr_len; | ||
1115 | |||
1116 | /* P2P attribute ID & size must fit */ | ||
1117 | if (iedatalen < 3) | ||
1118 | return -EILSEQ; | ||
1119 | desired_attr = iedata[0] == attr; | ||
1120 | attr_len = get_unaligned_le16(iedata + 1); | ||
1121 | iedatalen -= 3; | ||
1122 | iedata += 3; | ||
1123 | |||
1124 | copy = min_t(unsigned int, attr_len, iedatalen); | ||
1125 | |||
1126 | if (desired_attr) { | ||
1127 | desired_len += copy; | ||
1128 | if (out) { | ||
1129 | memcpy(out, iedata, min(bufsize, copy)); | ||
1130 | out += min(bufsize, copy); | ||
1131 | bufsize -= min(bufsize, copy); | ||
1132 | } | ||
1133 | |||
1134 | if (copy == attr_len) | ||
1135 | return desired_len; | ||
1136 | } | ||
1137 | |||
1138 | iedata += copy; | ||
1139 | iedatalen -= copy; | ||
1140 | attr_remaining = attr_len - copy; | ||
1141 | } | ||
1142 | |||
1143 | cont: | ||
1144 | len -= ies[1] + 2; | ||
1145 | ies += ies[1] + 2; | ||
1146 | } | ||
1147 | |||
1148 | if (attr_remaining && desired_attr) | ||
1149 | return -EILSEQ; | ||
1150 | |||
1151 | return -ENOENT; | ||
1152 | } | ||
1153 | EXPORT_SYMBOL(cfg80211_get_p2p_attr); | ||
1154 | |||
983 | int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, | 1155 | int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, |
984 | u32 beacon_int) | 1156 | u32 beacon_int) |
985 | { | 1157 | { |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 494379eb464f..f9680c9cf9b3 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <net/cfg80211-wext.h> | 19 | #include <net/cfg80211-wext.h> |
20 | #include "wext-compat.h" | 20 | #include "wext-compat.h" |
21 | #include "core.h" | 21 | #include "core.h" |
22 | #include "rdev-ops.h" | ||
22 | 23 | ||
23 | int cfg80211_wext_giwname(struct net_device *dev, | 24 | int cfg80211_wext_giwname(struct net_device *dev, |
24 | struct iw_request_info *info, | 25 | struct iw_request_info *info, |
@@ -301,8 +302,7 @@ int cfg80211_wext_siwrts(struct net_device *dev, | |||
301 | else | 302 | else |
302 | wdev->wiphy->rts_threshold = rts->value; | 303 | wdev->wiphy->rts_threshold = rts->value; |
303 | 304 | ||
304 | err = rdev->ops->set_wiphy_params(wdev->wiphy, | 305 | err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_RTS_THRESHOLD); |
305 | WIPHY_PARAM_RTS_THRESHOLD); | ||
306 | if (err) | 306 | if (err) |
307 | wdev->wiphy->rts_threshold = orts; | 307 | wdev->wiphy->rts_threshold = orts; |
308 | 308 | ||
@@ -342,8 +342,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev, | |||
342 | wdev->wiphy->frag_threshold = frag->value & ~0x1; | 342 | wdev->wiphy->frag_threshold = frag->value & ~0x1; |
343 | } | 343 | } |
344 | 344 | ||
345 | err = rdev->ops->set_wiphy_params(wdev->wiphy, | 345 | err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_FRAG_THRESHOLD); |
346 | WIPHY_PARAM_FRAG_THRESHOLD); | ||
347 | if (err) | 346 | if (err) |
348 | wdev->wiphy->frag_threshold = ofrag; | 347 | wdev->wiphy->frag_threshold = ofrag; |
349 | 348 | ||
@@ -396,7 +395,7 @@ static int cfg80211_wext_siwretry(struct net_device *dev, | |||
396 | if (!changed) | 395 | if (!changed) |
397 | return 0; | 396 | return 0; |
398 | 397 | ||
399 | err = rdev->ops->set_wiphy_params(wdev->wiphy, changed); | 398 | err = rdev_set_wiphy_params(rdev, changed); |
400 | if (err) { | 399 | if (err) { |
401 | wdev->wiphy->retry_short = oshort; | 400 | wdev->wiphy->retry_short = oshort; |
402 | wdev->wiphy->retry_long = olong; | 401 | wdev->wiphy->retry_long = olong; |
@@ -490,8 +489,8 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, | |||
490 | !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) | 489 | !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) |
491 | err = -ENOENT; | 490 | err = -ENOENT; |
492 | else | 491 | else |
493 | err = rdev->ops->del_key(&rdev->wiphy, dev, idx, | 492 | err = rdev_del_key(rdev, dev, idx, pairwise, |
494 | pairwise, addr); | 493 | addr); |
495 | } | 494 | } |
496 | wdev->wext.connect.privacy = false; | 495 | wdev->wext.connect.privacy = false; |
497 | /* | 496 | /* |
@@ -525,8 +524,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, | |||
525 | 524 | ||
526 | err = 0; | 525 | err = 0; |
527 | if (wdev->current_bss) | 526 | if (wdev->current_bss) |
528 | err = rdev->ops->add_key(&rdev->wiphy, dev, idx, | 527 | err = rdev_add_key(rdev, dev, idx, pairwise, addr, params); |
529 | pairwise, addr, params); | ||
530 | if (err) | 528 | if (err) |
531 | return err; | 529 | return err; |
532 | 530 | ||
@@ -552,8 +550,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, | |||
552 | __cfg80211_leave_ibss(rdev, wdev->netdev, true); | 550 | __cfg80211_leave_ibss(rdev, wdev->netdev, true); |
553 | rejoin = true; | 551 | rejoin = true; |
554 | } | 552 | } |
555 | err = rdev->ops->set_default_key(&rdev->wiphy, dev, | 553 | err = rdev_set_default_key(rdev, dev, idx, true, true); |
556 | idx, true, true); | ||
557 | } | 554 | } |
558 | if (!err) { | 555 | if (!err) { |
559 | wdev->wext.default_key = idx; | 556 | wdev->wext.default_key = idx; |
@@ -566,8 +563,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev, | |||
566 | if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC && | 563 | if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC && |
567 | (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) { | 564 | (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) { |
568 | if (wdev->current_bss) | 565 | if (wdev->current_bss) |
569 | err = rdev->ops->set_default_mgmt_key(&rdev->wiphy, | 566 | err = rdev_set_default_mgmt_key(rdev, dev, idx); |
570 | dev, idx); | ||
571 | if (!err) | 567 | if (!err) |
572 | wdev->wext.default_mgmt_key = idx; | 568 | wdev->wext.default_mgmt_key = idx; |
573 | return err; | 569 | return err; |
@@ -631,8 +627,8 @@ static int cfg80211_wext_siwencode(struct net_device *dev, | |||
631 | err = 0; | 627 | err = 0; |
632 | wdev_lock(wdev); | 628 | wdev_lock(wdev); |
633 | if (wdev->current_bss) | 629 | if (wdev->current_bss) |
634 | err = rdev->ops->set_default_key(&rdev->wiphy, dev, | 630 | err = rdev_set_default_key(rdev, dev, idx, true, |
635 | idx, true, true); | 631 | true); |
636 | if (!err) | 632 | if (!err) |
637 | wdev->wext.default_key = idx; | 633 | wdev->wext.default_key = idx; |
638 | wdev_unlock(wdev); | 634 | wdev_unlock(wdev); |
@@ -788,6 +784,9 @@ static int cfg80211_wext_siwfreq(struct net_device *dev, | |||
788 | { | 784 | { |
789 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 785 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
790 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 786 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
787 | struct cfg80211_chan_def chandef = { | ||
788 | .width = NL80211_CHAN_WIDTH_20_NOHT, | ||
789 | }; | ||
791 | int freq, err; | 790 | int freq, err; |
792 | 791 | ||
793 | switch (wdev->iftype) { | 792 | switch (wdev->iftype) { |
@@ -801,8 +800,12 @@ static int cfg80211_wext_siwfreq(struct net_device *dev, | |||
801 | return freq; | 800 | return freq; |
802 | if (freq == 0) | 801 | if (freq == 0) |
803 | return -EINVAL; | 802 | return -EINVAL; |
803 | chandef.center_freq1 = freq; | ||
804 | chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); | ||
805 | if (!chandef.chan) | ||
806 | return -EINVAL; | ||
804 | mutex_lock(&rdev->devlist_mtx); | 807 | mutex_lock(&rdev->devlist_mtx); |
805 | err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT); | 808 | err = cfg80211_set_monitor_channel(rdev, &chandef); |
806 | mutex_unlock(&rdev->devlist_mtx); | 809 | mutex_unlock(&rdev->devlist_mtx); |
807 | return err; | 810 | return err; |
808 | case NL80211_IFTYPE_MESH_POINT: | 811 | case NL80211_IFTYPE_MESH_POINT: |
@@ -811,9 +814,12 @@ static int cfg80211_wext_siwfreq(struct net_device *dev, | |||
811 | return freq; | 814 | return freq; |
812 | if (freq == 0) | 815 | if (freq == 0) |
813 | return -EINVAL; | 816 | return -EINVAL; |
817 | chandef.center_freq1 = freq; | ||
818 | chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); | ||
819 | if (!chandef.chan) | ||
820 | return -EINVAL; | ||
814 | mutex_lock(&rdev->devlist_mtx); | 821 | mutex_lock(&rdev->devlist_mtx); |
815 | err = cfg80211_set_mesh_freq(rdev, wdev, freq, | 822 | err = cfg80211_set_mesh_channel(rdev, wdev, &chandef); |
816 | NL80211_CHAN_NO_HT); | ||
817 | mutex_unlock(&rdev->devlist_mtx); | 823 | mutex_unlock(&rdev->devlist_mtx); |
818 | return err; | 824 | return err; |
819 | default: | 825 | default: |
@@ -827,8 +833,8 @@ static int cfg80211_wext_giwfreq(struct net_device *dev, | |||
827 | { | 833 | { |
828 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 834 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
829 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); | 835 | struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); |
830 | struct ieee80211_channel *chan; | 836 | struct cfg80211_chan_def chandef; |
831 | enum nl80211_channel_type channel_type; | 837 | int ret; |
832 | 838 | ||
833 | switch (wdev->iftype) { | 839 | switch (wdev->iftype) { |
834 | case NL80211_IFTYPE_STATION: | 840 | case NL80211_IFTYPE_STATION: |
@@ -839,10 +845,10 @@ static int cfg80211_wext_giwfreq(struct net_device *dev, | |||
839 | if (!rdev->ops->get_channel) | 845 | if (!rdev->ops->get_channel) |
840 | return -EINVAL; | 846 | return -EINVAL; |
841 | 847 | ||
842 | chan = rdev->ops->get_channel(wdev->wiphy, wdev, &channel_type); | 848 | ret = rdev_get_channel(rdev, wdev, &chandef); |
843 | if (!chan) | 849 | if (ret) |
844 | return -EINVAL; | 850 | return ret; |
845 | freq->m = chan->center_freq; | 851 | freq->m = chandef.chan->center_freq; |
846 | freq->e = 6; | 852 | freq->e = 6; |
847 | return 0; | 853 | return 0; |
848 | default: | 854 | default: |
@@ -899,7 +905,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev, | |||
899 | return 0; | 905 | return 0; |
900 | } | 906 | } |
901 | 907 | ||
902 | return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm)); | 908 | return rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm)); |
903 | } | 909 | } |
904 | 910 | ||
905 | static int cfg80211_wext_giwtxpower(struct net_device *dev, | 911 | static int cfg80211_wext_giwtxpower(struct net_device *dev, |
@@ -918,7 +924,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev, | |||
918 | if (!rdev->ops->get_tx_power) | 924 | if (!rdev->ops->get_tx_power) |
919 | return -EOPNOTSUPP; | 925 | return -EOPNOTSUPP; |
920 | 926 | ||
921 | err = rdev->ops->get_tx_power(wdev->wiphy, &val); | 927 | err = rdev_get_tx_power(rdev, wdev, &val); |
922 | if (err) | 928 | if (err) |
923 | return err; | 929 | return err; |
924 | 930 | ||
@@ -1158,7 +1164,7 @@ static int cfg80211_wext_siwpower(struct net_device *dev, | |||
1158 | timeout = wrq->value / 1000; | 1164 | timeout = wrq->value / 1000; |
1159 | } | 1165 | } |
1160 | 1166 | ||
1161 | err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, ps, timeout); | 1167 | err = rdev_set_power_mgmt(rdev, dev, ps, timeout); |
1162 | if (err) | 1168 | if (err) |
1163 | return err; | 1169 | return err; |
1164 | 1170 | ||
@@ -1200,7 +1206,7 @@ static int cfg80211_wds_wext_siwap(struct net_device *dev, | |||
1200 | if (!rdev->ops->set_wds_peer) | 1206 | if (!rdev->ops->set_wds_peer) |
1201 | return -EOPNOTSUPP; | 1207 | return -EOPNOTSUPP; |
1202 | 1208 | ||
1203 | err = rdev->ops->set_wds_peer(wdev->wiphy, dev, (u8 *) &addr->sa_data); | 1209 | err = rdev_set_wds_peer(rdev, dev, (u8 *)&addr->sa_data); |
1204 | if (err) | 1210 | if (err) |
1205 | return err; | 1211 | return err; |
1206 | 1212 | ||
@@ -1272,7 +1278,7 @@ static int cfg80211_wext_siwrate(struct net_device *dev, | |||
1272 | if (!match) | 1278 | if (!match) |
1273 | return -EINVAL; | 1279 | return -EINVAL; |
1274 | 1280 | ||
1275 | return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask); | 1281 | return rdev_set_bitrate_mask(rdev, dev, NULL, &mask); |
1276 | } | 1282 | } |
1277 | 1283 | ||
1278 | static int cfg80211_wext_giwrate(struct net_device *dev, | 1284 | static int cfg80211_wext_giwrate(struct net_device *dev, |
@@ -1302,7 +1308,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev, | |||
1302 | if (err) | 1308 | if (err) |
1303 | return err; | 1309 | return err; |
1304 | 1310 | ||
1305 | err = rdev->ops->get_station(&rdev->wiphy, dev, addr, &sinfo); | 1311 | err = rdev_get_station(rdev, dev, addr, &sinfo); |
1306 | if (err) | 1312 | if (err) |
1307 | return err; | 1313 | return err; |
1308 | 1314 | ||
@@ -1339,7 +1345,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) | |||
1339 | memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); | 1345 | memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); |
1340 | wdev_unlock(wdev); | 1346 | wdev_unlock(wdev); |
1341 | 1347 | ||
1342 | if (rdev->ops->get_station(&rdev->wiphy, dev, bssid, &sinfo)) | 1348 | if (rdev_get_station(rdev, dev, bssid, &sinfo)) |
1343 | return NULL; | 1349 | return NULL; |
1344 | 1350 | ||
1345 | memset(&wstats, 0, sizeof(wstats)); | 1351 | memset(&wstats, 0, sizeof(wstats)); |
@@ -1474,19 +1480,19 @@ static int cfg80211_wext_siwpmksa(struct net_device *dev, | |||
1474 | if (!rdev->ops->set_pmksa) | 1480 | if (!rdev->ops->set_pmksa) |
1475 | return -EOPNOTSUPP; | 1481 | return -EOPNOTSUPP; |
1476 | 1482 | ||
1477 | return rdev->ops->set_pmksa(&rdev->wiphy, dev, &cfg_pmksa); | 1483 | return rdev_set_pmksa(rdev, dev, &cfg_pmksa); |
1478 | 1484 | ||
1479 | case IW_PMKSA_REMOVE: | 1485 | case IW_PMKSA_REMOVE: |
1480 | if (!rdev->ops->del_pmksa) | 1486 | if (!rdev->ops->del_pmksa) |
1481 | return -EOPNOTSUPP; | 1487 | return -EOPNOTSUPP; |
1482 | 1488 | ||
1483 | return rdev->ops->del_pmksa(&rdev->wiphy, dev, &cfg_pmksa); | 1489 | return rdev_del_pmksa(rdev, dev, &cfg_pmksa); |
1484 | 1490 | ||
1485 | case IW_PMKSA_FLUSH: | 1491 | case IW_PMKSA_FLUSH: |
1486 | if (!rdev->ops->flush_pmksa) | 1492 | if (!rdev->ops->flush_pmksa) |
1487 | return -EOPNOTSUPP; | 1493 | return -EOPNOTSUPP; |
1488 | 1494 | ||
1489 | return rdev->ops->flush_pmksa(&rdev->wiphy, dev); | 1495 | return rdev_flush_pmksa(rdev, dev); |
1490 | 1496 | ||
1491 | default: | 1497 | default: |
1492 | return -EOPNOTSUPP; | 1498 | return -EOPNOTSUPP; |
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index 1f773f668d1a..873af63187c0 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c | |||
@@ -119,7 +119,16 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
119 | * channel we disconnected above and reconnect below. | 119 | * channel we disconnected above and reconnect below. |
120 | */ | 120 | */ |
121 | if (chan && !wdev->wext.connect.ssid_len) { | 121 | if (chan && !wdev->wext.connect.ssid_len) { |
122 | err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT); | 122 | struct cfg80211_chan_def chandef = { |
123 | .width = NL80211_CHAN_WIDTH_20_NOHT, | ||
124 | .center_freq1 = freq, | ||
125 | }; | ||
126 | |||
127 | chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); | ||
128 | if (chandef.chan) | ||
129 | err = cfg80211_set_monitor_channel(rdev, &chandef); | ||
130 | else | ||
131 | err = -EINVAL; | ||
123 | goto out; | 132 | goto out; |
124 | } | 133 | } |
125 | 134 | ||
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index e5246fbe36c4..2906d520eea7 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c | |||
@@ -276,18 +276,16 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name) | |||
276 | struct crypto_comp * __percpu *tfms; | 276 | struct crypto_comp * __percpu *tfms; |
277 | int cpu; | 277 | int cpu; |
278 | 278 | ||
279 | /* This can be any valid CPU ID so we don't need locking. */ | ||
280 | cpu = raw_smp_processor_id(); | ||
281 | 279 | ||
282 | list_for_each_entry(pos, &ipcomp_tfms_list, list) { | 280 | list_for_each_entry(pos, &ipcomp_tfms_list, list) { |
283 | struct crypto_comp *tfm; | 281 | struct crypto_comp *tfm; |
284 | 282 | ||
285 | tfms = pos->tfms; | 283 | /* This can be any valid CPU ID so we don't need locking. */ |
286 | tfm = *per_cpu_ptr(tfms, cpu); | 284 | tfm = __this_cpu_read(*pos->tfms); |
287 | 285 | ||
288 | if (!strcmp(crypto_comp_name(tfm), alg_name)) { | 286 | if (!strcmp(crypto_comp_name(tfm), alg_name)) { |
289 | pos->users++; | 287 | pos->users++; |
290 | return tfms; | 288 | return pos->tfms; |
291 | } | 289 | } |
292 | } | 290 | } |
293 | 291 | ||
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 3efb07d3eb27..765f6fe951eb 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -521,13 +521,12 @@ int xfrm_init_replay(struct xfrm_state *x) | |||
521 | replay_esn->bmp_len * sizeof(__u32) * 8) | 521 | replay_esn->bmp_len * sizeof(__u32) * 8) |
522 | return -EINVAL; | 522 | return -EINVAL; |
523 | 523 | ||
524 | if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0) | 524 | if (x->props.flags & XFRM_STATE_ESN) { |
525 | return -EINVAL; | 525 | if (replay_esn->replay_window == 0) |
526 | 526 | return -EINVAL; | |
527 | if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) | 527 | x->repl = &xfrm_replay_esn; |
528 | x->repl = &xfrm_replay_esn; | 528 | } else |
529 | else | 529 | x->repl = &xfrm_replay_bmp; |
530 | x->repl = &xfrm_replay_bmp; | ||
531 | } else | 530 | } else |
532 | x->repl = &xfrm_replay_legacy; | 531 | x->repl = &xfrm_replay_legacy; |
533 | 532 | ||
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c index 380976f74c4c..05a6e3d9c258 100644 --- a/net/xfrm/xfrm_sysctl.c +++ b/net/xfrm/xfrm_sysctl.c | |||
@@ -54,6 +54,10 @@ int __net_init xfrm_sysctl_init(struct net *net) | |||
54 | table[2].data = &net->xfrm.sysctl_larval_drop; | 54 | table[2].data = &net->xfrm.sysctl_larval_drop; |
55 | table[3].data = &net->xfrm.sysctl_acq_expires; | 55 | table[3].data = &net->xfrm.sysctl_acq_expires; |
56 | 56 | ||
57 | /* Don't export sysctls to unprivileged users */ | ||
58 | if (net->user_ns != &init_user_ns) | ||
59 | table[0].procname = NULL; | ||
60 | |||
57 | net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table); | 61 | net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table); |
58 | if (!net->xfrm.sysctl_hdr) | 62 | if (!net->xfrm.sysctl_hdr) |
59 | goto out_register; | 63 | goto out_register; |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 421f98444335..eb872b2e366e 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -2349,7 +2349,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
2349 | link = &xfrm_dispatch[type]; | 2349 | link = &xfrm_dispatch[type]; |
2350 | 2350 | ||
2351 | /* All operations require privileges, even GET */ | 2351 | /* All operations require privileges, even GET */ |
2352 | if (!capable(CAP_NET_ADMIN)) | 2352 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
2353 | return -EPERM; | 2353 | return -EPERM; |
2354 | 2354 | ||
2355 | if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || | 2355 | if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || |