diff options
Diffstat (limited to 'net')
74 files changed, 732 insertions, 294 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 4a78c4de9f20..6ee48aac776f 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep); | |||
91 | 91 | ||
92 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) | 92 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
93 | { | 93 | { |
94 | return vlan_dev_priv(dev)->real_dev; | 94 | struct net_device *ret = vlan_dev_priv(dev)->real_dev; |
95 | |||
96 | while (is_vlan_dev(ret)) | ||
97 | ret = vlan_dev_priv(ret)->real_dev; | ||
98 | |||
99 | return ret; | ||
95 | } | 100 | } |
96 | EXPORT_SYMBOL(vlan_dev_real_dev); | 101 | EXPORT_SYMBOL(vlan_dev_real_dev); |
97 | 102 | ||
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index e14531f1ce1c..264de88db320 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -1529,6 +1529,8 @@ out: | |||
1529 | * in these cases, the skb is further handled by this function and | 1529 | * in these cases, the skb is further handled by this function and |
1530 | * returns 1, otherwise it returns 0 and the caller shall further | 1530 | * returns 1, otherwise it returns 0 and the caller shall further |
1531 | * process the skb. | 1531 | * process the skb. |
1532 | * | ||
1533 | * This call might reallocate skb data. | ||
1532 | */ | 1534 | */ |
1533 | int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, | 1535 | int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
1534 | unsigned short vid) | 1536 | unsigned short vid) |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index f105219f4a4b..7614af31daff 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -508,6 +508,7 @@ out: | |||
508 | return 0; | 508 | return 0; |
509 | } | 509 | } |
510 | 510 | ||
511 | /* this call might reallocate skb data */ | ||
511 | static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) | 512 | static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) |
512 | { | 513 | { |
513 | int ret = false; | 514 | int ret = false; |
@@ -568,6 +569,7 @@ out: | |||
568 | return ret; | 569 | return ret; |
569 | } | 570 | } |
570 | 571 | ||
572 | /* this call might reallocate skb data */ | ||
571 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) | 573 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) |
572 | { | 574 | { |
573 | struct ethhdr *ethhdr; | 575 | struct ethhdr *ethhdr; |
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) | |||
619 | 621 | ||
620 | if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) | 622 | if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) |
621 | return false; | 623 | return false; |
624 | |||
625 | /* skb->data might have been reallocated by pskb_may_pull() */ | ||
626 | ethhdr = (struct ethhdr *)skb->data; | ||
627 | if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) | ||
628 | ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); | ||
629 | |||
622 | udphdr = (struct udphdr *)(skb->data + *header_len); | 630 | udphdr = (struct udphdr *)(skb->data + *header_len); |
623 | *header_len += sizeof(*udphdr); | 631 | *header_len += sizeof(*udphdr); |
624 | 632 | ||
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) | |||
634 | return true; | 642 | return true; |
635 | } | 643 | } |
636 | 644 | ||
645 | /* this call might reallocate skb data */ | ||
637 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, | 646 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, |
638 | struct sk_buff *skb, struct ethhdr *ethhdr) | 647 | struct sk_buff *skb) |
639 | { | 648 | { |
640 | struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; | 649 | struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; |
641 | struct batadv_orig_node *orig_dst_node = NULL; | 650 | struct batadv_orig_node *orig_dst_node = NULL; |
642 | struct batadv_gw_node *curr_gw = NULL; | 651 | struct batadv_gw_node *curr_gw = NULL; |
652 | struct ethhdr *ethhdr; | ||
643 | bool ret, out_of_range = false; | 653 | bool ret, out_of_range = false; |
644 | unsigned int header_len = 0; | 654 | unsigned int header_len = 0; |
645 | uint8_t curr_tq_avg; | 655 | uint8_t curr_tq_avg; |
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, | |||
648 | if (!ret) | 658 | if (!ret) |
649 | goto out; | 659 | goto out; |
650 | 660 | ||
661 | ethhdr = (struct ethhdr *)skb->data; | ||
651 | orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, | 662 | orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, |
652 | ethhdr->h_dest); | 663 | ethhdr->h_dest); |
653 | if (!orig_dst_node) | 664 | if (!orig_dst_node) |
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 039902dca4a6..1037d75da51f 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h | |||
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv, | |||
34 | void batadv_gw_node_purge(struct batadv_priv *bat_priv); | 34 | void batadv_gw_node_purge(struct batadv_priv *bat_priv); |
35 | int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); | 35 | int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); |
36 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); | 36 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); |
37 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, | 37 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb); |
38 | struct sk_buff *skb, struct ethhdr *ethhdr); | ||
39 | 38 | ||
40 | #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ | 39 | #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 700d0b49742d..0f04e1c302b4 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
180 | if (batadv_bla_tx(bat_priv, skb, vid)) | 180 | if (batadv_bla_tx(bat_priv, skb, vid)) |
181 | goto dropped; | 181 | goto dropped; |
182 | 182 | ||
183 | /* skb->data might have been reallocated by batadv_bla_tx() */ | ||
184 | ethhdr = (struct ethhdr *)skb->data; | ||
185 | |||
183 | /* Register the client MAC in the transtable */ | 186 | /* Register the client MAC in the transtable */ |
184 | if (!is_multicast_ether_addr(ethhdr->h_source)) | 187 | if (!is_multicast_ether_addr(ethhdr->h_source)) |
185 | batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); | 188 | batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); |
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
220 | default: | 223 | default: |
221 | break; | 224 | break; |
222 | } | 225 | } |
226 | |||
227 | /* reminder: ethhdr might have become unusable from here on | ||
228 | * (batadv_gw_is_dhcp_target() might have reallocated skb data) | ||
229 | */ | ||
223 | } | 230 | } |
224 | 231 | ||
225 | /* ethernet packet should be broadcasted */ | 232 | /* ethernet packet should be broadcasted */ |
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
266 | /* unicast packet */ | 273 | /* unicast packet */ |
267 | } else { | 274 | } else { |
268 | if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { | 275 | if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { |
269 | ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); | 276 | ret = batadv_gw_out_of_range(bat_priv, skb); |
270 | if (ret) | 277 | if (ret) |
271 | goto dropped; | 278 | goto dropped; |
272 | } | 279 | } |
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index dc8b5d4dd636..857e1b8349ee 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size, | |||
326 | * @skb: the skb containing the payload to encapsulate | 326 | * @skb: the skb containing the payload to encapsulate |
327 | * @orig_node: the destination node | 327 | * @orig_node: the destination node |
328 | * | 328 | * |
329 | * Returns false if the payload could not be encapsulated or true otherwise | 329 | * Returns false if the payload could not be encapsulated or true otherwise. |
330 | * | ||
331 | * This call might reallocate skb data. | ||
330 | */ | 332 | */ |
331 | static bool batadv_unicast_prepare_skb(struct sk_buff *skb, | 333 | static bool batadv_unicast_prepare_skb(struct sk_buff *skb, |
332 | struct batadv_orig_node *orig_node) | 334 | struct batadv_orig_node *orig_node) |
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb, | |||
343 | * @orig_node: the destination node | 345 | * @orig_node: the destination node |
344 | * @packet_subtype: the batman 4addr packet subtype to use | 346 | * @packet_subtype: the batman 4addr packet subtype to use |
345 | * | 347 | * |
346 | * Returns false if the payload could not be encapsulated or true otherwise | 348 | * Returns false if the payload could not be encapsulated or true otherwise. |
349 | * | ||
350 | * This call might reallocate skb data. | ||
347 | */ | 351 | */ |
348 | bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, | 352 | bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, |
349 | struct sk_buff *skb, | 353 | struct sk_buff *skb, |
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv, | |||
401 | struct batadv_neigh_node *neigh_node; | 405 | struct batadv_neigh_node *neigh_node; |
402 | int data_len = skb->len; | 406 | int data_len = skb->len; |
403 | int ret = NET_RX_DROP; | 407 | int ret = NET_RX_DROP; |
404 | unsigned int dev_mtu; | 408 | unsigned int dev_mtu, header_len; |
405 | 409 | ||
406 | /* get routing information */ | 410 | /* get routing information */ |
407 | if (is_multicast_ether_addr(ethhdr->h_dest)) { | 411 | if (is_multicast_ether_addr(ethhdr->h_dest)) { |
@@ -428,11 +432,17 @@ find_router: | |||
428 | 432 | ||
429 | switch (packet_type) { | 433 | switch (packet_type) { |
430 | case BATADV_UNICAST: | 434 | case BATADV_UNICAST: |
431 | batadv_unicast_prepare_skb(skb, orig_node); | 435 | if (!batadv_unicast_prepare_skb(skb, orig_node)) |
436 | goto out; | ||
437 | |||
438 | header_len = sizeof(struct batadv_unicast_packet); | ||
432 | break; | 439 | break; |
433 | case BATADV_UNICAST_4ADDR: | 440 | case BATADV_UNICAST_4ADDR: |
434 | batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, | 441 | if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, |
435 | packet_subtype); | 442 | packet_subtype)) |
443 | goto out; | ||
444 | |||
445 | header_len = sizeof(struct batadv_unicast_4addr_packet); | ||
436 | break; | 446 | break; |
437 | default: | 447 | default: |
438 | /* this function supports UNICAST and UNICAST_4ADDR only. It | 448 | /* this function supports UNICAST and UNICAST_4ADDR only. It |
@@ -441,6 +451,7 @@ find_router: | |||
441 | goto out; | 451 | goto out; |
442 | } | 452 | } |
443 | 453 | ||
454 | ethhdr = (struct ethhdr *)(skb->data + header_len); | ||
444 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 455 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
445 | 456 | ||
446 | /* inform the destination node that we are still missing a correct route | 457 | /* inform the destination node that we are still missing a correct route |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 69363bd37f64..89659d4ed1f9 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
71 | 71 | ||
72 | mdst = br_mdb_get(br, skb, vid); | 72 | mdst = br_mdb_get(br, skb, vid); |
73 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && | 73 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && |
74 | br_multicast_querier_exists(br)) | 74 | br_multicast_querier_exists(br, eth_hdr(skb))) |
75 | br_multicast_deliver(mdst, skb); | 75 | br_multicast_deliver(mdst, skb); |
76 | else | 76 | else |
77 | br_flood_deliver(br, skb, false); | 77 | br_flood_deliver(br, skb, false); |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 60aca9109a50..ffd5874f2592 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) | |||
161 | if (!pv) | 161 | if (!pv) |
162 | return; | 162 | return; |
163 | 163 | ||
164 | for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { | 164 | for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) { |
165 | f = __br_fdb_get(br, br->dev->dev_addr, vid); | 165 | f = __br_fdb_get(br, br->dev->dev_addr, vid); |
166 | if (f && f->is_local && !f->dst) | 166 | if (f && f->is_local && !f->dst) |
167 | fdb_delete(br, f); | 167 | fdb_delete(br, f); |
@@ -730,7 +730,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
730 | /* VID was specified, so use it. */ | 730 | /* VID was specified, so use it. */ |
731 | err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); | 731 | err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); |
732 | } else { | 732 | } else { |
733 | if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { | 733 | if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) { |
734 | err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); | 734 | err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); |
735 | goto out; | 735 | goto out; |
736 | } | 736 | } |
@@ -739,7 +739,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
739 | * specify a VLAN. To be nice, add/update entry for every | 739 | * specify a VLAN. To be nice, add/update entry for every |
740 | * vlan on this port. | 740 | * vlan on this port. |
741 | */ | 741 | */ |
742 | for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { | 742 | for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { |
743 | err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); | 743 | err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); |
744 | if (err) | 744 | if (err) |
745 | goto out; | 745 | goto out; |
@@ -817,7 +817,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], | |||
817 | 817 | ||
818 | err = __br_fdb_delete(p, addr, vid); | 818 | err = __br_fdb_delete(p, addr, vid); |
819 | } else { | 819 | } else { |
820 | if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { | 820 | if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) { |
821 | err = __br_fdb_delete(p, addr, 0); | 821 | err = __br_fdb_delete(p, addr, 0); |
822 | goto out; | 822 | goto out; |
823 | } | 823 | } |
@@ -827,7 +827,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], | |||
827 | * vlan on this port. | 827 | * vlan on this port. |
828 | */ | 828 | */ |
829 | err = -ENOENT; | 829 | err = -ENOENT; |
830 | for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { | 830 | for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { |
831 | err &= __br_fdb_delete(p, addr, vid); | 831 | err &= __br_fdb_delete(p, addr, vid); |
832 | } | 832 | } |
833 | } | 833 | } |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 8c561c0aa636..a2fd37ec35f7 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -102,7 +102,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
102 | } else if (is_multicast_ether_addr(dest)) { | 102 | } else if (is_multicast_ether_addr(dest)) { |
103 | mdst = br_mdb_get(br, skb, vid); | 103 | mdst = br_mdb_get(br, skb, vid); |
104 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && | 104 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && |
105 | br_multicast_querier_exists(br)) { | 105 | br_multicast_querier_exists(br, eth_hdr(skb))) { |
106 | if ((mdst && mdst->mglist) || | 106 | if ((mdst && mdst->mglist) || |
107 | br_multicast_is_router(br)) | 107 | br_multicast_is_router(br)) |
108 | skb2 = skb; | 108 | skb2 = skb; |
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 0daae3ec2355..6319c4333c39 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -414,16 +414,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) | |||
414 | if (!netif_running(br->dev) || br->multicast_disabled) | 414 | if (!netif_running(br->dev) || br->multicast_disabled) |
415 | return -EINVAL; | 415 | return -EINVAL; |
416 | 416 | ||
417 | if (timer_pending(&br->multicast_querier_timer)) | ||
418 | return -EBUSY; | ||
419 | |||
420 | ip.proto = entry->addr.proto; | 417 | ip.proto = entry->addr.proto; |
421 | if (ip.proto == htons(ETH_P_IP)) | 418 | if (ip.proto == htons(ETH_P_IP)) { |
419 | if (timer_pending(&br->ip4_querier.timer)) | ||
420 | return -EBUSY; | ||
421 | |||
422 | ip.u.ip4 = entry->addr.u.ip4; | 422 | ip.u.ip4 = entry->addr.u.ip4; |
423 | #if IS_ENABLED(CONFIG_IPV6) | 423 | #if IS_ENABLED(CONFIG_IPV6) |
424 | else | 424 | } else { |
425 | if (timer_pending(&br->ip6_querier.timer)) | ||
426 | return -EBUSY; | ||
427 | |||
425 | ip.u.ip6 = entry->addr.u.ip6; | 428 | ip.u.ip6 = entry->addr.u.ip6; |
426 | #endif | 429 | #endif |
430 | } | ||
427 | 431 | ||
428 | spin_lock_bh(&br->multicast_lock); | 432 | spin_lock_bh(&br->multicast_lock); |
429 | mdb = mlock_dereference(br->mdb, br); | 433 | mdb = mlock_dereference(br->mdb, br); |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 61c5e819380e..bbcb43582496 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -33,7 +33,8 @@ | |||
33 | 33 | ||
34 | #include "br_private.h" | 34 | #include "br_private.h" |
35 | 35 | ||
36 | static void br_multicast_start_querier(struct net_bridge *br); | 36 | static void br_multicast_start_querier(struct net_bridge *br, |
37 | struct bridge_mcast_query *query); | ||
37 | unsigned int br_mdb_rehash_seq; | 38 | unsigned int br_mdb_rehash_seq; |
38 | 39 | ||
39 | static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) | 40 | static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) |
@@ -755,20 +756,35 @@ static void br_multicast_local_router_expired(unsigned long data) | |||
755 | { | 756 | { |
756 | } | 757 | } |
757 | 758 | ||
758 | static void br_multicast_querier_expired(unsigned long data) | 759 | static void br_multicast_querier_expired(struct net_bridge *br, |
760 | struct bridge_mcast_query *query) | ||
759 | { | 761 | { |
760 | struct net_bridge *br = (void *)data; | ||
761 | |||
762 | spin_lock(&br->multicast_lock); | 762 | spin_lock(&br->multicast_lock); |
763 | if (!netif_running(br->dev) || br->multicast_disabled) | 763 | if (!netif_running(br->dev) || br->multicast_disabled) |
764 | goto out; | 764 | goto out; |
765 | 765 | ||
766 | br_multicast_start_querier(br); | 766 | br_multicast_start_querier(br, query); |
767 | 767 | ||
768 | out: | 768 | out: |
769 | spin_unlock(&br->multicast_lock); | 769 | spin_unlock(&br->multicast_lock); |
770 | } | 770 | } |
771 | 771 | ||
772 | static void br_ip4_multicast_querier_expired(unsigned long data) | ||
773 | { | ||
774 | struct net_bridge *br = (void *)data; | ||
775 | |||
776 | br_multicast_querier_expired(br, &br->ip4_query); | ||
777 | } | ||
778 | |||
779 | #if IS_ENABLED(CONFIG_IPV6) | ||
780 | static void br_ip6_multicast_querier_expired(unsigned long data) | ||
781 | { | ||
782 | struct net_bridge *br = (void *)data; | ||
783 | |||
784 | br_multicast_querier_expired(br, &br->ip6_query); | ||
785 | } | ||
786 | #endif | ||
787 | |||
772 | static void __br_multicast_send_query(struct net_bridge *br, | 788 | static void __br_multicast_send_query(struct net_bridge *br, |
773 | struct net_bridge_port *port, | 789 | struct net_bridge_port *port, |
774 | struct br_ip *ip) | 790 | struct br_ip *ip) |
@@ -789,37 +805,45 @@ static void __br_multicast_send_query(struct net_bridge *br, | |||
789 | } | 805 | } |
790 | 806 | ||
791 | static void br_multicast_send_query(struct net_bridge *br, | 807 | static void br_multicast_send_query(struct net_bridge *br, |
792 | struct net_bridge_port *port, u32 sent) | 808 | struct net_bridge_port *port, |
809 | struct bridge_mcast_query *query) | ||
793 | { | 810 | { |
794 | unsigned long time; | 811 | unsigned long time; |
795 | struct br_ip br_group; | 812 | struct br_ip br_group; |
813 | struct bridge_mcast_querier *querier = NULL; | ||
796 | 814 | ||
797 | if (!netif_running(br->dev) || br->multicast_disabled || | 815 | if (!netif_running(br->dev) || br->multicast_disabled || |
798 | !br->multicast_querier || | 816 | !br->multicast_querier) |
799 | timer_pending(&br->multicast_querier_timer)) | ||
800 | return; | 817 | return; |
801 | 818 | ||
802 | memset(&br_group.u, 0, sizeof(br_group.u)); | 819 | memset(&br_group.u, 0, sizeof(br_group.u)); |
803 | 820 | ||
804 | br_group.proto = htons(ETH_P_IP); | 821 | if (port ? (query == &port->ip4_query) : |
805 | __br_multicast_send_query(br, port, &br_group); | 822 | (query == &br->ip4_query)) { |
806 | 823 | querier = &br->ip4_querier; | |
824 | br_group.proto = htons(ETH_P_IP); | ||
807 | #if IS_ENABLED(CONFIG_IPV6) | 825 | #if IS_ENABLED(CONFIG_IPV6) |
808 | br_group.proto = htons(ETH_P_IPV6); | 826 | } else { |
809 | __br_multicast_send_query(br, port, &br_group); | 827 | querier = &br->ip6_querier; |
828 | br_group.proto = htons(ETH_P_IPV6); | ||
810 | #endif | 829 | #endif |
830 | } | ||
831 | |||
832 | if (!querier || timer_pending(&querier->timer)) | ||
833 | return; | ||
834 | |||
835 | __br_multicast_send_query(br, port, &br_group); | ||
811 | 836 | ||
812 | time = jiffies; | 837 | time = jiffies; |
813 | time += sent < br->multicast_startup_query_count ? | 838 | time += query->startup_sent < br->multicast_startup_query_count ? |
814 | br->multicast_startup_query_interval : | 839 | br->multicast_startup_query_interval : |
815 | br->multicast_query_interval; | 840 | br->multicast_query_interval; |
816 | mod_timer(port ? &port->multicast_query_timer : | 841 | mod_timer(&query->timer, time); |
817 | &br->multicast_query_timer, time); | ||
818 | } | 842 | } |
819 | 843 | ||
820 | static void br_multicast_port_query_expired(unsigned long data) | 844 | static void br_multicast_port_query_expired(struct net_bridge_port *port, |
845 | struct bridge_mcast_query *query) | ||
821 | { | 846 | { |
822 | struct net_bridge_port *port = (void *)data; | ||
823 | struct net_bridge *br = port->br; | 847 | struct net_bridge *br = port->br; |
824 | 848 | ||
825 | spin_lock(&br->multicast_lock); | 849 | spin_lock(&br->multicast_lock); |
@@ -827,25 +851,43 @@ static void br_multicast_port_query_expired(unsigned long data) | |||
827 | port->state == BR_STATE_BLOCKING) | 851 | port->state == BR_STATE_BLOCKING) |
828 | goto out; | 852 | goto out; |
829 | 853 | ||
830 | if (port->multicast_startup_queries_sent < | 854 | if (query->startup_sent < br->multicast_startup_query_count) |
831 | br->multicast_startup_query_count) | 855 | query->startup_sent++; |
832 | port->multicast_startup_queries_sent++; | ||
833 | 856 | ||
834 | br_multicast_send_query(port->br, port, | 857 | br_multicast_send_query(port->br, port, query); |
835 | port->multicast_startup_queries_sent); | ||
836 | 858 | ||
837 | out: | 859 | out: |
838 | spin_unlock(&br->multicast_lock); | 860 | spin_unlock(&br->multicast_lock); |
839 | } | 861 | } |
840 | 862 | ||
863 | static void br_ip4_multicast_port_query_expired(unsigned long data) | ||
864 | { | ||
865 | struct net_bridge_port *port = (void *)data; | ||
866 | |||
867 | br_multicast_port_query_expired(port, &port->ip4_query); | ||
868 | } | ||
869 | |||
870 | #if IS_ENABLED(CONFIG_IPV6) | ||
871 | static void br_ip6_multicast_port_query_expired(unsigned long data) | ||
872 | { | ||
873 | struct net_bridge_port *port = (void *)data; | ||
874 | |||
875 | br_multicast_port_query_expired(port, &port->ip6_query); | ||
876 | } | ||
877 | #endif | ||
878 | |||
841 | void br_multicast_add_port(struct net_bridge_port *port) | 879 | void br_multicast_add_port(struct net_bridge_port *port) |
842 | { | 880 | { |
843 | port->multicast_router = 1; | 881 | port->multicast_router = 1; |
844 | 882 | ||
845 | setup_timer(&port->multicast_router_timer, br_multicast_router_expired, | 883 | setup_timer(&port->multicast_router_timer, br_multicast_router_expired, |
846 | (unsigned long)port); | 884 | (unsigned long)port); |
847 | setup_timer(&port->multicast_query_timer, | 885 | setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired, |
848 | br_multicast_port_query_expired, (unsigned long)port); | 886 | (unsigned long)port); |
887 | #if IS_ENABLED(CONFIG_IPV6) | ||
888 | setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired, | ||
889 | (unsigned long)port); | ||
890 | #endif | ||
849 | } | 891 | } |
850 | 892 | ||
851 | void br_multicast_del_port(struct net_bridge_port *port) | 893 | void br_multicast_del_port(struct net_bridge_port *port) |
@@ -853,13 +895,13 @@ void br_multicast_del_port(struct net_bridge_port *port) | |||
853 | del_timer_sync(&port->multicast_router_timer); | 895 | del_timer_sync(&port->multicast_router_timer); |
854 | } | 896 | } |
855 | 897 | ||
856 | static void __br_multicast_enable_port(struct net_bridge_port *port) | 898 | static void br_multicast_enable(struct bridge_mcast_query *query) |
857 | { | 899 | { |
858 | port->multicast_startup_queries_sent = 0; | 900 | query->startup_sent = 0; |
859 | 901 | ||
860 | if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || | 902 | if (try_to_del_timer_sync(&query->timer) >= 0 || |
861 | del_timer(&port->multicast_query_timer)) | 903 | del_timer(&query->timer)) |
862 | mod_timer(&port->multicast_query_timer, jiffies); | 904 | mod_timer(&query->timer, jiffies); |
863 | } | 905 | } |
864 | 906 | ||
865 | void br_multicast_enable_port(struct net_bridge_port *port) | 907 | void br_multicast_enable_port(struct net_bridge_port *port) |
@@ -870,7 +912,10 @@ void br_multicast_enable_port(struct net_bridge_port *port) | |||
870 | if (br->multicast_disabled || !netif_running(br->dev)) | 912 | if (br->multicast_disabled || !netif_running(br->dev)) |
871 | goto out; | 913 | goto out; |
872 | 914 | ||
873 | __br_multicast_enable_port(port); | 915 | br_multicast_enable(&port->ip4_query); |
916 | #if IS_ENABLED(CONFIG_IPV6) | ||
917 | br_multicast_enable(&port->ip6_query); | ||
918 | #endif | ||
874 | 919 | ||
875 | out: | 920 | out: |
876 | spin_unlock(&br->multicast_lock); | 921 | spin_unlock(&br->multicast_lock); |
@@ -889,7 +934,10 @@ void br_multicast_disable_port(struct net_bridge_port *port) | |||
889 | if (!hlist_unhashed(&port->rlist)) | 934 | if (!hlist_unhashed(&port->rlist)) |
890 | hlist_del_init_rcu(&port->rlist); | 935 | hlist_del_init_rcu(&port->rlist); |
891 | del_timer(&port->multicast_router_timer); | 936 | del_timer(&port->multicast_router_timer); |
892 | del_timer(&port->multicast_query_timer); | 937 | del_timer(&port->ip4_query.timer); |
938 | #if IS_ENABLED(CONFIG_IPV6) | ||
939 | del_timer(&port->ip6_query.timer); | ||
940 | #endif | ||
893 | spin_unlock(&br->multicast_lock); | 941 | spin_unlock(&br->multicast_lock); |
894 | } | 942 | } |
895 | 943 | ||
@@ -1014,14 +1062,15 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
1014 | } | 1062 | } |
1015 | #endif | 1063 | #endif |
1016 | 1064 | ||
1017 | static void br_multicast_update_querier_timer(struct net_bridge *br, | 1065 | static void |
1018 | unsigned long max_delay) | 1066 | br_multicast_update_querier_timer(struct net_bridge *br, |
1067 | struct bridge_mcast_querier *querier, | ||
1068 | unsigned long max_delay) | ||
1019 | { | 1069 | { |
1020 | if (!timer_pending(&br->multicast_querier_timer)) | 1070 | if (!timer_pending(&querier->timer)) |
1021 | br->multicast_querier_delay_time = jiffies + max_delay; | 1071 | querier->delay_time = jiffies + max_delay; |
1022 | 1072 | ||
1023 | mod_timer(&br->multicast_querier_timer, | 1073 | mod_timer(&querier->timer, jiffies + br->multicast_querier_interval); |
1024 | jiffies + br->multicast_querier_interval); | ||
1025 | } | 1074 | } |
1026 | 1075 | ||
1027 | /* | 1076 | /* |
@@ -1074,12 +1123,13 @@ timer: | |||
1074 | 1123 | ||
1075 | static void br_multicast_query_received(struct net_bridge *br, | 1124 | static void br_multicast_query_received(struct net_bridge *br, |
1076 | struct net_bridge_port *port, | 1125 | struct net_bridge_port *port, |
1126 | struct bridge_mcast_querier *querier, | ||
1077 | int saddr, | 1127 | int saddr, |
1078 | unsigned long max_delay) | 1128 | unsigned long max_delay) |
1079 | { | 1129 | { |
1080 | if (saddr) | 1130 | if (saddr) |
1081 | br_multicast_update_querier_timer(br, max_delay); | 1131 | br_multicast_update_querier_timer(br, querier, max_delay); |
1082 | else if (timer_pending(&br->multicast_querier_timer)) | 1132 | else if (timer_pending(&querier->timer)) |
1083 | return; | 1133 | return; |
1084 | 1134 | ||
1085 | br_multicast_mark_router(br, port); | 1135 | br_multicast_mark_router(br, port); |
@@ -1129,7 +1179,8 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
1129 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; | 1179 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; |
1130 | } | 1180 | } |
1131 | 1181 | ||
1132 | br_multicast_query_received(br, port, !!iph->saddr, max_delay); | 1182 | br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, |
1183 | max_delay); | ||
1133 | 1184 | ||
1134 | if (!group) | 1185 | if (!group) |
1135 | goto out; | 1186 | goto out; |
@@ -1195,7 +1246,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1195 | max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); | 1246 | max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); |
1196 | if (max_delay) | 1247 | if (max_delay) |
1197 | group = &mld->mld_mca; | 1248 | group = &mld->mld_mca; |
1198 | } else if (skb->len >= sizeof(*mld2q)) { | 1249 | } else { |
1199 | if (!pskb_may_pull(skb, sizeof(*mld2q))) { | 1250 | if (!pskb_may_pull(skb, sizeof(*mld2q))) { |
1200 | err = -EINVAL; | 1251 | err = -EINVAL; |
1201 | goto out; | 1252 | goto out; |
@@ -1203,11 +1254,12 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1203 | mld2q = (struct mld2_query *)icmp6_hdr(skb); | 1254 | mld2q = (struct mld2_query *)icmp6_hdr(skb); |
1204 | if (!mld2q->mld2q_nsrcs) | 1255 | if (!mld2q->mld2q_nsrcs) |
1205 | group = &mld2q->mld2q_mca; | 1256 | group = &mld2q->mld2q_mca; |
1206 | max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; | 1257 | |
1258 | max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL); | ||
1207 | } | 1259 | } |
1208 | 1260 | ||
1209 | br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), | 1261 | br_multicast_query_received(br, port, &br->ip6_querier, |
1210 | max_delay); | 1262 | !ipv6_addr_any(&ip6h->saddr), max_delay); |
1211 | 1263 | ||
1212 | if (!group) | 1264 | if (!group) |
1213 | goto out; | 1265 | goto out; |
@@ -1244,7 +1296,9 @@ out: | |||
1244 | 1296 | ||
1245 | static void br_multicast_leave_group(struct net_bridge *br, | 1297 | static void br_multicast_leave_group(struct net_bridge *br, |
1246 | struct net_bridge_port *port, | 1298 | struct net_bridge_port *port, |
1247 | struct br_ip *group) | 1299 | struct br_ip *group, |
1300 | struct bridge_mcast_querier *querier, | ||
1301 | struct bridge_mcast_query *query) | ||
1248 | { | 1302 | { |
1249 | struct net_bridge_mdb_htable *mdb; | 1303 | struct net_bridge_mdb_htable *mdb; |
1250 | struct net_bridge_mdb_entry *mp; | 1304 | struct net_bridge_mdb_entry *mp; |
@@ -1255,7 +1309,7 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1255 | spin_lock(&br->multicast_lock); | 1309 | spin_lock(&br->multicast_lock); |
1256 | if (!netif_running(br->dev) || | 1310 | if (!netif_running(br->dev) || |
1257 | (port && port->state == BR_STATE_DISABLED) || | 1311 | (port && port->state == BR_STATE_DISABLED) || |
1258 | timer_pending(&br->multicast_querier_timer)) | 1312 | timer_pending(&querier->timer)) |
1259 | goto out; | 1313 | goto out; |
1260 | 1314 | ||
1261 | mdb = mlock_dereference(br->mdb, br); | 1315 | mdb = mlock_dereference(br->mdb, br); |
@@ -1263,14 +1317,13 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1263 | if (!mp) | 1317 | if (!mp) |
1264 | goto out; | 1318 | goto out; |
1265 | 1319 | ||
1266 | if (br->multicast_querier && | 1320 | if (br->multicast_querier) { |
1267 | !timer_pending(&br->multicast_querier_timer)) { | ||
1268 | __br_multicast_send_query(br, port, &mp->addr); | 1321 | __br_multicast_send_query(br, port, &mp->addr); |
1269 | 1322 | ||
1270 | time = jiffies + br->multicast_last_member_count * | 1323 | time = jiffies + br->multicast_last_member_count * |
1271 | br->multicast_last_member_interval; | 1324 | br->multicast_last_member_interval; |
1272 | mod_timer(port ? &port->multicast_query_timer : | 1325 | |
1273 | &br->multicast_query_timer, time); | 1326 | mod_timer(&query->timer, time); |
1274 | 1327 | ||
1275 | for (p = mlock_dereference(mp->ports, br); | 1328 | for (p = mlock_dereference(mp->ports, br); |
1276 | p != NULL; | 1329 | p != NULL; |
@@ -1323,7 +1376,6 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1323 | mod_timer(&mp->timer, time); | 1376 | mod_timer(&mp->timer, time); |
1324 | } | 1377 | } |
1325 | } | 1378 | } |
1326 | |||
1327 | out: | 1379 | out: |
1328 | spin_unlock(&br->multicast_lock); | 1380 | spin_unlock(&br->multicast_lock); |
1329 | } | 1381 | } |
@@ -1334,6 +1386,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, | |||
1334 | __u16 vid) | 1386 | __u16 vid) |
1335 | { | 1387 | { |
1336 | struct br_ip br_group; | 1388 | struct br_ip br_group; |
1389 | struct bridge_mcast_query *query = port ? &port->ip4_query : | ||
1390 | &br->ip4_query; | ||
1337 | 1391 | ||
1338 | if (ipv4_is_local_multicast(group)) | 1392 | if (ipv4_is_local_multicast(group)) |
1339 | return; | 1393 | return; |
@@ -1342,7 +1396,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br, | |||
1342 | br_group.proto = htons(ETH_P_IP); | 1396 | br_group.proto = htons(ETH_P_IP); |
1343 | br_group.vid = vid; | 1397 | br_group.vid = vid; |
1344 | 1398 | ||
1345 | br_multicast_leave_group(br, port, &br_group); | 1399 | br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query); |
1346 | } | 1400 | } |
1347 | 1401 | ||
1348 | #if IS_ENABLED(CONFIG_IPV6) | 1402 | #if IS_ENABLED(CONFIG_IPV6) |
@@ -1352,6 +1406,9 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
1352 | __u16 vid) | 1406 | __u16 vid) |
1353 | { | 1407 | { |
1354 | struct br_ip br_group; | 1408 | struct br_ip br_group; |
1409 | struct bridge_mcast_query *query = port ? &port->ip6_query : | ||
1410 | &br->ip6_query; | ||
1411 | |||
1355 | 1412 | ||
1356 | if (!ipv6_is_transient_multicast(group)) | 1413 | if (!ipv6_is_transient_multicast(group)) |
1357 | return; | 1414 | return; |
@@ -1360,7 +1417,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
1360 | br_group.proto = htons(ETH_P_IPV6); | 1417 | br_group.proto = htons(ETH_P_IPV6); |
1361 | br_group.vid = vid; | 1418 | br_group.vid = vid; |
1362 | 1419 | ||
1363 | br_multicast_leave_group(br, port, &br_group); | 1420 | br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query); |
1364 | } | 1421 | } |
1365 | #endif | 1422 | #endif |
1366 | 1423 | ||
@@ -1622,19 +1679,32 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, | |||
1622 | return 0; | 1679 | return 0; |
1623 | } | 1680 | } |
1624 | 1681 | ||
1625 | static void br_multicast_query_expired(unsigned long data) | 1682 | static void br_multicast_query_expired(struct net_bridge *br, |
1683 | struct bridge_mcast_query *query) | ||
1684 | { | ||
1685 | spin_lock(&br->multicast_lock); | ||
1686 | if (query->startup_sent < br->multicast_startup_query_count) | ||
1687 | query->startup_sent++; | ||
1688 | |||
1689 | br_multicast_send_query(br, NULL, query); | ||
1690 | spin_unlock(&br->multicast_lock); | ||
1691 | } | ||
1692 | |||
1693 | static void br_ip4_multicast_query_expired(unsigned long data) | ||
1626 | { | 1694 | { |
1627 | struct net_bridge *br = (void *)data; | 1695 | struct net_bridge *br = (void *)data; |
1628 | 1696 | ||
1629 | spin_lock(&br->multicast_lock); | 1697 | br_multicast_query_expired(br, &br->ip4_query); |
1630 | if (br->multicast_startup_queries_sent < | 1698 | } |
1631 | br->multicast_startup_query_count) | ||
1632 | br->multicast_startup_queries_sent++; | ||
1633 | 1699 | ||
1634 | br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); | 1700 | #if IS_ENABLED(CONFIG_IPV6) |
1701 | static void br_ip6_multicast_query_expired(unsigned long data) | ||
1702 | { | ||
1703 | struct net_bridge *br = (void *)data; | ||
1635 | 1704 | ||
1636 | spin_unlock(&br->multicast_lock); | 1705 | br_multicast_query_expired(br, &br->ip6_query); |
1637 | } | 1706 | } |
1707 | #endif | ||
1638 | 1708 | ||
1639 | void br_multicast_init(struct net_bridge *br) | 1709 | void br_multicast_init(struct net_bridge *br) |
1640 | { | 1710 | { |
@@ -1654,25 +1724,43 @@ void br_multicast_init(struct net_bridge *br) | |||
1654 | br->multicast_querier_interval = 255 * HZ; | 1724 | br->multicast_querier_interval = 255 * HZ; |
1655 | br->multicast_membership_interval = 260 * HZ; | 1725 | br->multicast_membership_interval = 260 * HZ; |
1656 | 1726 | ||
1657 | br->multicast_querier_delay_time = 0; | 1727 | br->ip4_querier.delay_time = 0; |
1728 | #if IS_ENABLED(CONFIG_IPV6) | ||
1729 | br->ip6_querier.delay_time = 0; | ||
1730 | #endif | ||
1658 | 1731 | ||
1659 | spin_lock_init(&br->multicast_lock); | 1732 | spin_lock_init(&br->multicast_lock); |
1660 | setup_timer(&br->multicast_router_timer, | 1733 | setup_timer(&br->multicast_router_timer, |
1661 | br_multicast_local_router_expired, 0); | 1734 | br_multicast_local_router_expired, 0); |
1662 | setup_timer(&br->multicast_querier_timer, | 1735 | setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired, |
1663 | br_multicast_querier_expired, (unsigned long)br); | 1736 | (unsigned long)br); |
1664 | setup_timer(&br->multicast_query_timer, br_multicast_query_expired, | 1737 | setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired, |
1665 | (unsigned long)br); | 1738 | (unsigned long)br); |
1739 | #if IS_ENABLED(CONFIG_IPV6) | ||
1740 | setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired, | ||
1741 | (unsigned long)br); | ||
1742 | setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired, | ||
1743 | (unsigned long)br); | ||
1744 | #endif | ||
1666 | } | 1745 | } |
1667 | 1746 | ||
1668 | void br_multicast_open(struct net_bridge *br) | 1747 | static void __br_multicast_open(struct net_bridge *br, |
1748 | struct bridge_mcast_query *query) | ||
1669 | { | 1749 | { |
1670 | br->multicast_startup_queries_sent = 0; | 1750 | query->startup_sent = 0; |
1671 | 1751 | ||
1672 | if (br->multicast_disabled) | 1752 | if (br->multicast_disabled) |
1673 | return; | 1753 | return; |
1674 | 1754 | ||
1675 | mod_timer(&br->multicast_query_timer, jiffies); | 1755 | mod_timer(&query->timer, jiffies); |
1756 | } | ||
1757 | |||
1758 | void br_multicast_open(struct net_bridge *br) | ||
1759 | { | ||
1760 | __br_multicast_open(br, &br->ip4_query); | ||
1761 | #if IS_ENABLED(CONFIG_IPV6) | ||
1762 | __br_multicast_open(br, &br->ip6_query); | ||
1763 | #endif | ||
1676 | } | 1764 | } |
1677 | 1765 | ||
1678 | void br_multicast_stop(struct net_bridge *br) | 1766 | void br_multicast_stop(struct net_bridge *br) |
@@ -1684,8 +1772,12 @@ void br_multicast_stop(struct net_bridge *br) | |||
1684 | int i; | 1772 | int i; |
1685 | 1773 | ||
1686 | del_timer_sync(&br->multicast_router_timer); | 1774 | del_timer_sync(&br->multicast_router_timer); |
1687 | del_timer_sync(&br->multicast_querier_timer); | 1775 | del_timer_sync(&br->ip4_querier.timer); |
1688 | del_timer_sync(&br->multicast_query_timer); | 1776 | del_timer_sync(&br->ip4_query.timer); |
1777 | #if IS_ENABLED(CONFIG_IPV6) | ||
1778 | del_timer_sync(&br->ip6_querier.timer); | ||
1779 | del_timer_sync(&br->ip6_query.timer); | ||
1780 | #endif | ||
1689 | 1781 | ||
1690 | spin_lock_bh(&br->multicast_lock); | 1782 | spin_lock_bh(&br->multicast_lock); |
1691 | mdb = mlock_dereference(br->mdb, br); | 1783 | mdb = mlock_dereference(br->mdb, br); |
@@ -1788,18 +1880,24 @@ unlock: | |||
1788 | return err; | 1880 | return err; |
1789 | } | 1881 | } |
1790 | 1882 | ||
1791 | static void br_multicast_start_querier(struct net_bridge *br) | 1883 | static void br_multicast_start_querier(struct net_bridge *br, |
1884 | struct bridge_mcast_query *query) | ||
1792 | { | 1885 | { |
1793 | struct net_bridge_port *port; | 1886 | struct net_bridge_port *port; |
1794 | 1887 | ||
1795 | br_multicast_open(br); | 1888 | __br_multicast_open(br, query); |
1796 | 1889 | ||
1797 | list_for_each_entry(port, &br->port_list, list) { | 1890 | list_for_each_entry(port, &br->port_list, list) { |
1798 | if (port->state == BR_STATE_DISABLED || | 1891 | if (port->state == BR_STATE_DISABLED || |
1799 | port->state == BR_STATE_BLOCKING) | 1892 | port->state == BR_STATE_BLOCKING) |
1800 | continue; | 1893 | continue; |
1801 | 1894 | ||
1802 | __br_multicast_enable_port(port); | 1895 | if (query == &br->ip4_query) |
1896 | br_multicast_enable(&port->ip4_query); | ||
1897 | #if IS_ENABLED(CONFIG_IPV6) | ||
1898 | else | ||
1899 | br_multicast_enable(&port->ip6_query); | ||
1900 | #endif | ||
1803 | } | 1901 | } |
1804 | } | 1902 | } |
1805 | 1903 | ||
@@ -1834,7 +1932,10 @@ rollback: | |||
1834 | goto rollback; | 1932 | goto rollback; |
1835 | } | 1933 | } |
1836 | 1934 | ||
1837 | br_multicast_start_querier(br); | 1935 | br_multicast_start_querier(br, &br->ip4_query); |
1936 | #if IS_ENABLED(CONFIG_IPV6) | ||
1937 | br_multicast_start_querier(br, &br->ip6_query); | ||
1938 | #endif | ||
1838 | 1939 | ||
1839 | unlock: | 1940 | unlock: |
1840 | spin_unlock_bh(&br->multicast_lock); | 1941 | spin_unlock_bh(&br->multicast_lock); |
@@ -1857,10 +1958,18 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val) | |||
1857 | goto unlock; | 1958 | goto unlock; |
1858 | 1959 | ||
1859 | max_delay = br->multicast_query_response_interval; | 1960 | max_delay = br->multicast_query_response_interval; |
1860 | if (!timer_pending(&br->multicast_querier_timer)) | ||
1861 | br->multicast_querier_delay_time = jiffies + max_delay; | ||
1862 | 1961 | ||
1863 | br_multicast_start_querier(br); | 1962 | if (!timer_pending(&br->ip4_querier.timer)) |
1963 | br->ip4_querier.delay_time = jiffies + max_delay; | ||
1964 | |||
1965 | br_multicast_start_querier(br, &br->ip4_query); | ||
1966 | |||
1967 | #if IS_ENABLED(CONFIG_IPV6) | ||
1968 | if (!timer_pending(&br->ip6_querier.timer)) | ||
1969 | br->ip6_querier.delay_time = jiffies + max_delay; | ||
1970 | |||
1971 | br_multicast_start_querier(br, &br->ip6_query); | ||
1972 | #endif | ||
1864 | 1973 | ||
1865 | unlock: | 1974 | unlock: |
1866 | spin_unlock_bh(&br->multicast_lock); | 1975 | spin_unlock_bh(&br->multicast_lock); |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 1fc30abd3a52..b9259efa636e 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -132,7 +132,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, | |||
132 | else | 132 | else |
133 | pv = br_get_vlan_info(br); | 133 | pv = br_get_vlan_info(br); |
134 | 134 | ||
135 | if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) | 135 | if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) |
136 | goto done; | 136 | goto done; |
137 | 137 | ||
138 | af = nla_nest_start(skb, IFLA_AF_SPEC); | 138 | af = nla_nest_start(skb, IFLA_AF_SPEC); |
@@ -140,7 +140,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, | |||
140 | goto nla_put_failure; | 140 | goto nla_put_failure; |
141 | 141 | ||
142 | pvid = br_get_pvid(pv); | 142 | pvid = br_get_pvid(pv); |
143 | for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { | 143 | for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { |
144 | vinfo.vid = vid; | 144 | vinfo.vid = vid; |
145 | vinfo.flags = 0; | 145 | vinfo.flags = 0; |
146 | if (vid == pvid) | 146 | if (vid == pvid) |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2f7da41851bf..263ba9034468 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -66,6 +66,20 @@ struct br_ip | |||
66 | __u16 vid; | 66 | __u16 vid; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | ||
70 | /* our own querier */ | ||
71 | struct bridge_mcast_query { | ||
72 | struct timer_list timer; | ||
73 | u32 startup_sent; | ||
74 | }; | ||
75 | |||
76 | /* other querier */ | ||
77 | struct bridge_mcast_querier { | ||
78 | struct timer_list timer; | ||
79 | unsigned long delay_time; | ||
80 | }; | ||
81 | #endif | ||
82 | |||
69 | struct net_port_vlans { | 83 | struct net_port_vlans { |
70 | u16 port_idx; | 84 | u16 port_idx; |
71 | u16 pvid; | 85 | u16 pvid; |
@@ -162,10 +176,12 @@ struct net_bridge_port | |||
162 | #define BR_FLOOD 0x00000040 | 176 | #define BR_FLOOD 0x00000040 |
163 | 177 | ||
164 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 178 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
165 | u32 multicast_startup_queries_sent; | 179 | struct bridge_mcast_query ip4_query; |
180 | #if IS_ENABLED(CONFIG_IPV6) | ||
181 | struct bridge_mcast_query ip6_query; | ||
182 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | ||
166 | unsigned char multicast_router; | 183 | unsigned char multicast_router; |
167 | struct timer_list multicast_router_timer; | 184 | struct timer_list multicast_router_timer; |
168 | struct timer_list multicast_query_timer; | ||
169 | struct hlist_head mglist; | 185 | struct hlist_head mglist; |
170 | struct hlist_node rlist; | 186 | struct hlist_node rlist; |
171 | #endif | 187 | #endif |
@@ -258,7 +274,6 @@ struct net_bridge | |||
258 | u32 hash_max; | 274 | u32 hash_max; |
259 | 275 | ||
260 | u32 multicast_last_member_count; | 276 | u32 multicast_last_member_count; |
261 | u32 multicast_startup_queries_sent; | ||
262 | u32 multicast_startup_query_count; | 277 | u32 multicast_startup_query_count; |
263 | 278 | ||
264 | unsigned long multicast_last_member_interval; | 279 | unsigned long multicast_last_member_interval; |
@@ -267,15 +282,18 @@ struct net_bridge | |||
267 | unsigned long multicast_query_interval; | 282 | unsigned long multicast_query_interval; |
268 | unsigned long multicast_query_response_interval; | 283 | unsigned long multicast_query_response_interval; |
269 | unsigned long multicast_startup_query_interval; | 284 | unsigned long multicast_startup_query_interval; |
270 | unsigned long multicast_querier_delay_time; | ||
271 | 285 | ||
272 | spinlock_t multicast_lock; | 286 | spinlock_t multicast_lock; |
273 | struct net_bridge_mdb_htable __rcu *mdb; | 287 | struct net_bridge_mdb_htable __rcu *mdb; |
274 | struct hlist_head router_list; | 288 | struct hlist_head router_list; |
275 | 289 | ||
276 | struct timer_list multicast_router_timer; | 290 | struct timer_list multicast_router_timer; |
277 | struct timer_list multicast_querier_timer; | 291 | struct bridge_mcast_querier ip4_querier; |
278 | struct timer_list multicast_query_timer; | 292 | struct bridge_mcast_query ip4_query; |
293 | #if IS_ENABLED(CONFIG_IPV6) | ||
294 | struct bridge_mcast_querier ip6_querier; | ||
295 | struct bridge_mcast_query ip6_query; | ||
296 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | ||
279 | #endif | 297 | #endif |
280 | 298 | ||
281 | struct timer_list hello_timer; | 299 | struct timer_list hello_timer; |
@@ -503,11 +521,27 @@ static inline bool br_multicast_is_router(struct net_bridge *br) | |||
503 | timer_pending(&br->multicast_router_timer)); | 521 | timer_pending(&br->multicast_router_timer)); |
504 | } | 522 | } |
505 | 523 | ||
506 | static inline bool br_multicast_querier_exists(struct net_bridge *br) | 524 | static inline bool |
525 | __br_multicast_querier_exists(struct net_bridge *br, | ||
526 | struct bridge_mcast_querier *querier) | ||
527 | { | ||
528 | return time_is_before_jiffies(querier->delay_time) && | ||
529 | (br->multicast_querier || timer_pending(&querier->timer)); | ||
530 | } | ||
531 | |||
532 | static inline bool br_multicast_querier_exists(struct net_bridge *br, | ||
533 | struct ethhdr *eth) | ||
507 | { | 534 | { |
508 | return time_is_before_jiffies(br->multicast_querier_delay_time) && | 535 | switch (eth->h_proto) { |
509 | (br->multicast_querier || | 536 | case (htons(ETH_P_IP)): |
510 | timer_pending(&br->multicast_querier_timer)); | 537 | return __br_multicast_querier_exists(br, &br->ip4_querier); |
538 | #if IS_ENABLED(CONFIG_IPV6) | ||
539 | case (htons(ETH_P_IPV6)): | ||
540 | return __br_multicast_querier_exists(br, &br->ip6_querier); | ||
541 | #endif | ||
542 | default: | ||
543 | return false; | ||
544 | } | ||
511 | } | 545 | } |
512 | #else | 546 | #else |
513 | static inline int br_multicast_rcv(struct net_bridge *br, | 547 | static inline int br_multicast_rcv(struct net_bridge *br, |
@@ -565,7 +599,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br) | |||
565 | { | 599 | { |
566 | return 0; | 600 | return 0; |
567 | } | 601 | } |
568 | static inline bool br_multicast_querier_exists(struct net_bridge *br) | 602 | static inline bool br_multicast_querier_exists(struct net_bridge *br, |
603 | struct ethhdr *eth) | ||
569 | { | 604 | { |
570 | return false; | 605 | return false; |
571 | } | 606 | } |
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 394bb96b6087..3b9637fb7939 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Sysfs attributes of bridge ports | 2 | * Sysfs attributes of bridge |
3 | * Linux ethernet bridge | 3 | * Linux ethernet bridge |
4 | * | 4 | * |
5 | * Authors: | 5 | * Authors: |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index bd58b45f5f90..9a9ffe7e4019 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
@@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid) | |||
108 | 108 | ||
109 | clear_bit(vid, v->vlan_bitmap); | 109 | clear_bit(vid, v->vlan_bitmap); |
110 | v->num_vlans--; | 110 | v->num_vlans--; |
111 | if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { | 111 | if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) { |
112 | if (v->port_idx) | 112 | if (v->port_idx) |
113 | rcu_assign_pointer(v->parent.port->vlan_info, NULL); | 113 | rcu_assign_pointer(v->parent.port->vlan_info, NULL); |
114 | else | 114 | else |
@@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v) | |||
122 | { | 122 | { |
123 | smp_wmb(); | 123 | smp_wmb(); |
124 | v->pvid = 0; | 124 | v->pvid = 0; |
125 | bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN); | 125 | bitmap_zero(v->vlan_bitmap, VLAN_N_VID); |
126 | if (v->port_idx) | 126 | if (v->port_idx) |
127 | rcu_assign_pointer(v->parent.port->vlan_info, NULL); | 127 | rcu_assign_pointer(v->parent.port->vlan_info, NULL); |
128 | else | 128 | else |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 00ee068efc1c..d12e3a9a5356 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -65,6 +65,7 @@ ipv6: | |||
65 | nhoff += sizeof(struct ipv6hdr); | 65 | nhoff += sizeof(struct ipv6hdr); |
66 | break; | 66 | break; |
67 | } | 67 | } |
68 | case __constant_htons(ETH_P_8021AD): | ||
68 | case __constant_htons(ETH_P_8021Q): { | 69 | case __constant_htons(ETH_P_8021Q): { |
69 | const struct vlan_hdr *vlan; | 70 | const struct vlan_hdr *vlan; |
70 | struct vlan_hdr _vlan; | 71 | struct vlan_hdr _vlan; |
@@ -345,14 +346,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
345 | if (new_index < 0) | 346 | if (new_index < 0) |
346 | new_index = skb_tx_hash(dev, skb); | 347 | new_index = skb_tx_hash(dev, skb); |
347 | 348 | ||
348 | if (queue_index != new_index && sk) { | 349 | if (queue_index != new_index && sk && |
349 | struct dst_entry *dst = | 350 | rcu_access_pointer(sk->sk_dst_cache)) |
350 | rcu_dereference_check(sk->sk_dst_cache, 1); | 351 | sk_tx_queue_set(sk, queue_index); |
351 | |||
352 | if (dst && skb_dst(skb) == dst) | ||
353 | sk_tx_queue_set(sk, queue_index); | ||
354 | |||
355 | } | ||
356 | 352 | ||
357 | queue_index = new_index; | 353 | queue_index = new_index; |
358 | } | 354 | } |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 9232c68941ab..60533db8b72d 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, | |||
1441 | atomic_set(&p->refcnt, 1); | 1441 | atomic_set(&p->refcnt, 1); |
1442 | p->reachable_time = | 1442 | p->reachable_time = |
1443 | neigh_rand_reach_time(p->base_reachable_time); | 1443 | neigh_rand_reach_time(p->base_reachable_time); |
1444 | dev_hold(dev); | ||
1445 | p->dev = dev; | ||
1446 | write_pnet(&p->net, hold_net(net)); | ||
1447 | p->sysctl_table = NULL; | ||
1444 | 1448 | ||
1445 | if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { | 1449 | if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { |
1450 | release_net(net); | ||
1451 | dev_put(dev); | ||
1446 | kfree(p); | 1452 | kfree(p); |
1447 | return NULL; | 1453 | return NULL; |
1448 | } | 1454 | } |
1449 | 1455 | ||
1450 | dev_hold(dev); | ||
1451 | p->dev = dev; | ||
1452 | write_pnet(&p->net, hold_net(net)); | ||
1453 | p->sysctl_table = NULL; | ||
1454 | write_lock_bh(&tbl->lock); | 1456 | write_lock_bh(&tbl->lock); |
1455 | p->next = tbl->parms.next; | 1457 | p->next = tbl->parms.next; |
1456 | tbl->parms.next = p; | 1458 | tbl->parms.next = p; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 3de740834d1f..ca198c1d1d30 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm, | |||
2156 | /* If aging addresses are supported device will need to | 2156 | /* If aging addresses are supported device will need to |
2157 | * implement its own handler for this. | 2157 | * implement its own handler for this. |
2158 | */ | 2158 | */ |
2159 | if (ndm->ndm_state & NUD_PERMANENT) { | 2159 | if (!(ndm->ndm_state & NUD_PERMANENT)) { |
2160 | pr_info("%s: FDB only supports static addresses\n", dev->name); | 2160 | pr_info("%s: FDB only supports static addresses\n", dev->name); |
2161 | return -EINVAL; | 2161 | return -EINVAL; |
2162 | } | 2162 | } |
@@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
2384 | struct nlattr *extfilt; | 2384 | struct nlattr *extfilt; |
2385 | u32 filter_mask = 0; | 2385 | u32 filter_mask = 0; |
2386 | 2386 | ||
2387 | extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg), | 2387 | extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), |
2388 | IFLA_EXT_MASK); | 2388 | IFLA_EXT_MASK); |
2389 | if (extfilt) | 2389 | if (extfilt) |
2390 | filter_mask = nla_get_u32(extfilt); | 2390 | filter_mask = nla_get_u32(extfilt); |
diff --git a/net/core/scm.c b/net/core/scm.c index 03795d0147f2..b4da80b1cc07 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds) | |||
54 | return -EINVAL; | 54 | return -EINVAL; |
55 | 55 | ||
56 | if ((creds->pid == task_tgid_vnr(current) || | 56 | if ((creds->pid == task_tgid_vnr(current) || |
57 | ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && | 57 | ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) && |
58 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || | 58 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || |
59 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && | 59 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && |
60 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || | 60 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index ab3d814bc80a..109ee89f123e 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) | |||
477 | } | 477 | } |
478 | 478 | ||
479 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - | 479 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - |
480 | net_adj) & ~(align - 1)) + (net_adj - 2); | 480 | net_adj) & ~(align - 1)) + net_adj - 2; |
481 | } | 481 | } |
482 | 482 | ||
483 | static void esp4_err(struct sk_buff *skb, u32 info) | 483 | static void esp4_err(struct sk_buff *skb, u32 info) |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 108a1e9c9eac..3df6d3edb2a1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -71,7 +71,6 @@ | |||
71 | #include <linux/init.h> | 71 | #include <linux/init.h> |
72 | #include <linux/list.h> | 72 | #include <linux/list.h> |
73 | #include <linux/slab.h> | 73 | #include <linux/slab.h> |
74 | #include <linux/prefetch.h> | ||
75 | #include <linux/export.h> | 74 | #include <linux/export.h> |
76 | #include <net/net_namespace.h> | 75 | #include <net/net_namespace.h> |
77 | #include <net/ip.h> | 76 | #include <net/ip.h> |
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c) | |||
1761 | if (!c) | 1760 | if (!c) |
1762 | continue; | 1761 | continue; |
1763 | 1762 | ||
1764 | if (IS_LEAF(c)) { | 1763 | if (IS_LEAF(c)) |
1765 | prefetch(rcu_dereference_rtnl(p->child[idx])); | ||
1766 | return (struct leaf *) c; | 1764 | return (struct leaf *) c; |
1767 | } | ||
1768 | 1765 | ||
1769 | /* Rescan start scanning in new node */ | 1766 | /* Rescan start scanning in new node */ |
1770 | p = (struct tnode *) c; | 1767 | p = (struct tnode *) c; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 1f6eab66f7ce..8d6939eeb492 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, | |||
383 | if (daddr) | 383 | if (daddr) |
384 | memcpy(&iph->daddr, daddr, 4); | 384 | memcpy(&iph->daddr, daddr, 4); |
385 | if (iph->daddr) | 385 | if (iph->daddr) |
386 | return t->hlen; | 386 | return t->hlen + sizeof(*iph); |
387 | 387 | ||
388 | return -(t->hlen + sizeof(*iph)); | 388 | return -(t->hlen + sizeof(*iph)); |
389 | } | 389 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 4bcabf3ab4ca..9ee17e3d11c3 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
211 | return -EINVAL; | 211 | return -EINVAL; |
212 | } | 212 | } |
213 | 213 | ||
214 | static inline int ip_skb_dst_mtu(struct sk_buff *skb) | ||
215 | { | ||
216 | struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; | ||
217 | |||
218 | return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? | ||
219 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); | ||
220 | } | ||
221 | |||
222 | static int ip_finish_output(struct sk_buff *skb) | 214 | static int ip_finish_output(struct sk_buff *skb) |
223 | { | 215 | { |
224 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) | 216 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 7167b08977df..850525b34899 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt, | |||
76 | iph->daddr = dst; | 76 | iph->daddr = dst; |
77 | iph->saddr = src; | 77 | iph->saddr = src; |
78 | iph->ttl = ttl; | 78 | iph->ttl = ttl; |
79 | tunnel_ip_select_ident(skb, | 79 | __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
80 | (const struct iphdr *)skb_inner_network_header(skb), | ||
81 | &rt->dst); | ||
82 | 80 | ||
83 | err = ip_local_out(skb); | 81 | err = ip_local_out(skb); |
84 | if (unlikely(net_xmit_eval(err))) | 82 | if (unlikely(net_xmit_eval(err))) |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 51fc2a1dcdd3..b3ac3c3f6219 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb) | |||
190 | struct ip_tunnel *tunnel; | 190 | struct ip_tunnel *tunnel; |
191 | const struct iphdr *iph; | 191 | const struct iphdr *iph; |
192 | 192 | ||
193 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
194 | goto drop; | ||
195 | |||
196 | iph = ip_hdr(skb); | 193 | iph = ip_hdr(skb); |
197 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, | 194 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, |
198 | iph->saddr, iph->daddr, 0); | 195 | iph->saddr, iph->daddr, 0); |
199 | if (tunnel) { | 196 | if (tunnel) { |
200 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 197 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
201 | goto drop; | 198 | goto drop; |
199 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
200 | goto drop; | ||
202 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); | 201 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); |
203 | } | 202 | } |
204 | 203 | ||
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 6577a1149a47..463bd1273346 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
273 | SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), | 273 | SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), |
274 | SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), | 274 | SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), |
275 | SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), | 275 | SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), |
276 | SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), | 276 | SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS), |
277 | SNMP_MIB_SENTINEL | 277 | SNMP_MIB_SENTINEL |
278 | }; | 278 | }; |
279 | 279 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index dd44e0ab600c..61e60d67adca 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
571 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, | 571 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, |
572 | RT_SCOPE_UNIVERSE, | 572 | RT_SCOPE_UNIVERSE, |
573 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, | 573 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, |
574 | inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, | 574 | inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP | |
575 | (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), | ||
575 | daddr, saddr, 0, 0); | 576 | daddr, saddr, 0, 0); |
576 | 577 | ||
577 | if (!inet->hdrincl) { | 578 | if (!inet->hdrincl) { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5423223e93c2..b2f6c74861af 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1121,6 +1121,13 @@ new_segment: | |||
1121 | goto wait_for_memory; | 1121 | goto wait_for_memory; |
1122 | 1122 | ||
1123 | /* | 1123 | /* |
1124 | * All packets are restored as if they have | ||
1125 | * already been sent. | ||
1126 | */ | ||
1127 | if (tp->repair) | ||
1128 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | ||
1129 | |||
1130 | /* | ||
1124 | * Check whether we can use HW checksum. | 1131 | * Check whether we can use HW checksum. |
1125 | */ | 1132 | */ |
1126 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) | 1133 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) |
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index a9077f441cb2..b6ae92a51f58 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a) | |||
206 | */ | 206 | */ |
207 | static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | 207 | static inline void bictcp_update(struct bictcp *ca, u32 cwnd) |
208 | { | 208 | { |
209 | u64 offs; | 209 | u32 delta, bic_target, max_cnt; |
210 | u32 delta, t, bic_target, max_cnt; | 210 | u64 offs, t; |
211 | 211 | ||
212 | ca->ack_cnt++; /* count the number of ACKs */ | 212 | ca->ack_cnt++; /* count the number of ACKs */ |
213 | 213 | ||
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
250 | * if the cwnd < 1 million packets !!! | 250 | * if the cwnd < 1 million packets !!! |
251 | */ | 251 | */ |
252 | 252 | ||
253 | t = (s32)(tcp_time_stamp - ca->epoch_start); | ||
254 | t += msecs_to_jiffies(ca->delay_min >> 3); | ||
253 | /* change the unit from HZ to bictcp_HZ */ | 255 | /* change the unit from HZ to bictcp_HZ */ |
254 | t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) | 256 | t <<= BICTCP_HZ; |
255 | - ca->epoch_start) << BICTCP_HZ) / HZ; | 257 | do_div(t, HZ); |
256 | 258 | ||
257 | if (t < ca->bic_K) /* t - K */ | 259 | if (t < ca->bic_K) /* t - K */ |
258 | offs = ca->bic_K - t; | 260 | offs = ca->bic_K - t; |
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) | |||
414 | return; | 416 | return; |
415 | 417 | ||
416 | /* Discard delay samples right after fast recovery */ | 418 | /* Discard delay samples right after fast recovery */ |
417 | if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) | 419 | if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ) |
418 | return; | 420 | return; |
419 | 421 | ||
420 | delay = (rtt_us << 3) / USEC_PER_MSEC; | 422 | delay = (rtt_us << 3) / USEC_PER_MSEC; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 28af45abe062..3ca2139a130b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3535,7 +3535,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr | |||
3535 | ++ptr; | 3535 | ++ptr; |
3536 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | 3536 | tp->rx_opt.rcv_tsval = ntohl(*ptr); |
3537 | ++ptr; | 3537 | ++ptr; |
3538 | tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; | 3538 | if (*ptr) |
3539 | tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; | ||
3540 | else | ||
3541 | tp->rx_opt.rcv_tsecr = 0; | ||
3539 | return true; | 3542 | return true; |
3540 | } | 3543 | } |
3541 | return false; | 3544 | return false; |
@@ -3560,7 +3563,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb, | |||
3560 | } | 3563 | } |
3561 | 3564 | ||
3562 | tcp_parse_options(skb, &tp->rx_opt, 1, NULL); | 3565 | tcp_parse_options(skb, &tp->rx_opt, 1, NULL); |
3563 | if (tp->rx_opt.saw_tstamp) | 3566 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) |
3564 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; | 3567 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; |
3565 | 3568 | ||
3566 | return true; | 3569 | return true; |
@@ -5316,7 +5319,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5316 | int saved_clamp = tp->rx_opt.mss_clamp; | 5319 | int saved_clamp = tp->rx_opt.mss_clamp; |
5317 | 5320 | ||
5318 | tcp_parse_options(skb, &tp->rx_opt, 0, &foc); | 5321 | tcp_parse_options(skb, &tp->rx_opt, 0, &foc); |
5319 | if (tp->rx_opt.saw_tstamp) | 5322 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) |
5320 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; | 5323 | tp->rx_opt.rcv_tsecr -= tp->tsoffset; |
5321 | 5324 | ||
5322 | if (th->ack) { | 5325 | if (th->ack) { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 92fde8d1aa82..170737a9d56d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2670,7 +2670,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2670 | int tcp_header_size; | 2670 | int tcp_header_size; |
2671 | int mss; | 2671 | int mss; |
2672 | 2672 | ||
2673 | skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); | 2673 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); |
2674 | if (unlikely(!skb)) { | 2674 | if (unlikely(!skb)) { |
2675 | dst_release(dst); | 2675 | dst_release(dst); |
2676 | return NULL; | 2676 | return NULL; |
@@ -2814,6 +2814,8 @@ void tcp_connect_init(struct sock *sk) | |||
2814 | 2814 | ||
2815 | if (likely(!tp->repair)) | 2815 | if (likely(!tp->repair)) |
2816 | tp->rcv_nxt = 0; | 2816 | tp->rcv_nxt = 0; |
2817 | else | ||
2818 | tp->rcv_tstamp = tcp_time_stamp; | ||
2817 | tp->rcv_wup = tp->rcv_nxt; | 2819 | tp->rcv_wup = tp->rcv_nxt; |
2818 | tp->copied_seq = tp->rcv_nxt; | 2820 | tp->copied_seq = tp->rcv_nxt; |
2819 | 2821 | ||
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 327a617d594c..baa0f63731fd 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -21,7 +21,6 @@ | |||
21 | static int xfrm4_tunnel_check_size(struct sk_buff *skb) | 21 | static int xfrm4_tunnel_check_size(struct sk_buff *skb) |
22 | { | 22 | { |
23 | int mtu, ret = 0; | 23 | int mtu, ret = 0; |
24 | struct dst_entry *dst; | ||
25 | 24 | ||
26 | if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) | 25 | if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) |
27 | goto out; | 26 | goto out; |
@@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) | |||
29 | if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) | 28 | if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) |
30 | goto out; | 29 | goto out; |
31 | 30 | ||
32 | dst = skb_dst(skb); | 31 | mtu = dst_mtu(skb_dst(skb)); |
33 | mtu = dst_mtu(dst); | ||
34 | if (skb->len > mtu) { | 32 | if (skb->len > mtu) { |
35 | if (skb->sk) | 33 | if (skb->sk) |
36 | ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, | 34 | xfrm_local_error(skb, mtu); |
37 | inet_sk(skb->sk)->inet_dport, mtu); | ||
38 | else | 35 | else |
39 | icmp_send(skb, ICMP_DEST_UNREACH, | 36 | icmp_send(skb, ICMP_DEST_UNREACH, |
40 | ICMP_FRAG_NEEDED, htonl(mtu)); | 37 | ICMP_FRAG_NEEDED, htonl(mtu)); |
@@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb) | |||
99 | x->outer_mode->afinfo->output_finish, | 96 | x->outer_mode->afinfo->output_finish, |
100 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 97 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
101 | } | 98 | } |
99 | |||
100 | void xfrm4_local_error(struct sk_buff *skb, u32 mtu) | ||
101 | { | ||
102 | struct iphdr *hdr; | ||
103 | |||
104 | hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); | ||
105 | ip_local_error(skb->sk, EMSGSIZE, hdr->daddr, | ||
106 | inet_sk(skb->sk)->inet_dport, mtu); | ||
107 | } | ||
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 9258e751baba..0b2a0641526a 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c | |||
@@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = { | |||
83 | .extract_input = xfrm4_extract_input, | 83 | .extract_input = xfrm4_extract_input, |
84 | .extract_output = xfrm4_extract_output, | 84 | .extract_output = xfrm4_extract_output, |
85 | .transport_finish = xfrm4_transport_finish, | 85 | .transport_finish = xfrm4_transport_finish, |
86 | .local_error = xfrm4_local_error, | ||
86 | }; | 87 | }; |
87 | 88 | ||
88 | void __init xfrm4_state_init(void) | 89 | void __init xfrm4_state_init(void) |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index da4241c8c7da..498ea99194af 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1126,12 +1126,10 @@ retry: | |||
1126 | if (ifp->flags & IFA_F_OPTIMISTIC) | 1126 | if (ifp->flags & IFA_F_OPTIMISTIC) |
1127 | addr_flags |= IFA_F_OPTIMISTIC; | 1127 | addr_flags |= IFA_F_OPTIMISTIC; |
1128 | 1128 | ||
1129 | ift = !max_addresses || | 1129 | ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen, |
1130 | ipv6_count_addresses(idev) < max_addresses ? | 1130 | ipv6_addr_scope(&addr), addr_flags, |
1131 | ipv6_add_addr(idev, &addr, NULL, tmp_plen, | 1131 | tmp_valid_lft, tmp_prefered_lft); |
1132 | ipv6_addr_scope(&addr), addr_flags, | 1132 | if (IS_ERR(ift)) { |
1133 | tmp_valid_lft, tmp_prefered_lft) : NULL; | ||
1134 | if (IS_ERR_OR_NULL(ift)) { | ||
1135 | in6_ifa_put(ifp); | 1133 | in6_ifa_put(ifp); |
1136 | in6_dev_put(idev); | 1134 | in6_dev_put(idev); |
1137 | pr_info("%s: retry temporary address regeneration\n", __func__); | 1135 | pr_info("%s: retry temporary address regeneration\n", __func__); |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 40ffd72243a4..aeac0dc3635d 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) | |||
425 | net_adj = 0; | 425 | net_adj = 0; |
426 | 426 | ||
427 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - | 427 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - |
428 | net_adj) & ~(align - 1)) + (net_adj - 2); | 428 | net_adj) & ~(align - 1)) + net_adj - 2; |
429 | } | 429 | } |
430 | 430 | ||
431 | static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 431 | static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index bff3d821c7eb..c4ff5bbb45c4 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root, | |||
993 | 993 | ||
994 | if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { | 994 | if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { |
995 | #ifdef CONFIG_IPV6_SUBTREES | 995 | #ifdef CONFIG_IPV6_SUBTREES |
996 | if (fn->subtree) | 996 | if (fn->subtree) { |
997 | fn = fib6_lookup_1(fn->subtree, args + 1); | 997 | struct fib6_node *sfn; |
998 | sfn = fib6_lookup_1(fn->subtree, | ||
999 | args + 1); | ||
1000 | if (!sfn) | ||
1001 | goto backtrack; | ||
1002 | fn = sfn; | ||
1003 | } | ||
998 | #endif | 1004 | #endif |
999 | if (!fn || fn->fn_flags & RTN_RTINFO) | 1005 | if (fn->fn_flags & RTN_RTINFO) |
1000 | return fn; | 1006 | return fn; |
1001 | } | 1007 | } |
1002 | } | 1008 | } |
1003 | 1009 | #ifdef CONFIG_IPV6_SUBTREES | |
1010 | backtrack: | ||
1011 | #endif | ||
1004 | if (fn->fn_flags & RTN_ROOT) | 1012 | if (fn->fn_flags & RTN_ROOT) |
1005 | break; | 1013 | break; |
1006 | 1014 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index ecd60733e5e2..90747f1973fe 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -724,6 +724,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, | |||
724 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); | 724 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); |
725 | } | 725 | } |
726 | 726 | ||
727 | if (likely(!skb->encapsulation)) { | ||
728 | skb_reset_inner_headers(skb); | ||
729 | skb->encapsulation = 1; | ||
730 | } | ||
731 | |||
727 | skb_push(skb, gre_hlen); | 732 | skb_push(skb, gre_hlen); |
728 | skb_reset_network_header(skb); | 733 | skb_reset_network_header(skb); |
729 | skb_set_transport_header(skb, sizeof(*ipv6h)); | 734 | skb_set_transport_header(skb, sizeof(*ipv6h)); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 6e3ddf806ec2..e7ceb6c871d1 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -238,6 +238,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, | |||
238 | hdr->saddr = fl6->saddr; | 238 | hdr->saddr = fl6->saddr; |
239 | hdr->daddr = *first_hop; | 239 | hdr->daddr = *first_hop; |
240 | 240 | ||
241 | skb->protocol = htons(ETH_P_IPV6); | ||
241 | skb->priority = sk->sk_priority; | 242 | skb->priority = sk->sk_priority; |
242 | skb->mark = sk->sk_mark; | 243 | skb->mark = sk->sk_mark; |
243 | 244 | ||
@@ -1057,6 +1058,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
1057 | /* initialize protocol header pointer */ | 1058 | /* initialize protocol header pointer */ |
1058 | skb->transport_header = skb->network_header + fragheaderlen; | 1059 | skb->transport_header = skb->network_header + fragheaderlen; |
1059 | 1060 | ||
1061 | skb->protocol = htons(ETH_P_IPV6); | ||
1060 | skb->ip_summed = CHECKSUM_PARTIAL; | 1062 | skb->ip_summed = CHECKSUM_PARTIAL; |
1061 | skb->csum = 0; | 1063 | skb->csum = 0; |
1062 | } | 1064 | } |
@@ -1359,6 +1361,7 @@ alloc_new_skb: | |||
1359 | /* | 1361 | /* |
1360 | * Fill in the control structures | 1362 | * Fill in the control structures |
1361 | */ | 1363 | */ |
1364 | skb->protocol = htons(ETH_P_IPV6); | ||
1362 | skb->ip_summed = CHECKSUM_NONE; | 1365 | skb->ip_summed = CHECKSUM_NONE; |
1363 | skb->csum = 0; | 1366 | skb->csum = 0; |
1364 | /* reserve for fragmentation and ipsec header */ | 1367 | /* reserve for fragmentation and ipsec header */ |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 1e55866cead7..46ba243605a3 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1027,6 +1027,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
1027 | init_tel_txopt(&opt, encap_limit); | 1027 | init_tel_txopt(&opt, encap_limit); |
1028 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); | 1028 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); |
1029 | } | 1029 | } |
1030 | |||
1031 | if (likely(!skb->encapsulation)) { | ||
1032 | skb_reset_inner_headers(skb); | ||
1033 | skb->encapsulation = 1; | ||
1034 | } | ||
1035 | |||
1030 | skb_push(skb, sizeof(struct ipv6hdr)); | 1036 | skb_push(skb, sizeof(struct ipv6hdr)); |
1031 | skb_reset_network_header(skb); | 1037 | skb_reset_network_header(skb); |
1032 | ipv6h = ipv6_hdr(skb); | 1038 | ipv6h = ipv6_hdr(skb); |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 79aa9652ed86..04d31c2fbef1 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1369,8 +1369,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) | |||
1369 | if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) | 1369 | if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) |
1370 | return; | 1370 | return; |
1371 | 1371 | ||
1372 | if (!ndopts.nd_opts_rh) | 1372 | if (!ndopts.nd_opts_rh) { |
1373 | ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0); | ||
1373 | return; | 1374 | return; |
1375 | } | ||
1374 | 1376 | ||
1375 | hdr = (u8 *)ndopts.nd_opts_rh; | 1377 | hdr = (u8 *)ndopts.nd_opts_rh; |
1376 | hdr += 8; | 1378 | hdr += 8; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index c45f7a5c36e9..cdaed47ba932 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -628,6 +628,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, | |||
628 | goto error; | 628 | goto error; |
629 | skb_reserve(skb, hlen); | 629 | skb_reserve(skb, hlen); |
630 | 630 | ||
631 | skb->protocol = htons(ETH_P_IPV6); | ||
631 | skb->priority = sk->sk_priority; | 632 | skb->priority = sk->sk_priority; |
632 | skb->mark = sk->sk_mark; | 633 | skb->mark = sk->sk_mark; |
633 | skb_dst_set(skb, &rt->dst); | 634 | skb_dst_set(skb, &rt->dst); |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 790d9f4b8b0b..1aeb473b2cc6 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
490 | ipv6_hdr(head)->payload_len = htons(payload_len); | 490 | ipv6_hdr(head)->payload_len = htons(payload_len); |
491 | ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); | 491 | ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); |
492 | IP6CB(head)->nhoff = nhoff; | 492 | IP6CB(head)->nhoff = nhoff; |
493 | IP6CB(head)->flags |= IP6SKB_FRAGMENTED; | ||
493 | 494 | ||
494 | /* Yes, and fold redundant checksum back. 8) */ | 495 | /* Yes, and fold redundant checksum back. 8) */ |
495 | if (head->ip_summed == CHECKSUM_COMPLETE) | 496 | if (head->ip_summed == CHECKSUM_COMPLETE) |
@@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) | |||
524 | struct net *net = dev_net(skb_dst(skb)->dev); | 525 | struct net *net = dev_net(skb_dst(skb)->dev); |
525 | int evicted; | 526 | int evicted; |
526 | 527 | ||
528 | if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) | ||
529 | goto fail_hdr; | ||
530 | |||
527 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); | 531 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); |
528 | 532 | ||
529 | /* Jumbo payload inhibits frag. header */ | 533 | /* Jumbo payload inhibits frag. header */ |
@@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) | |||
544 | ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); | 548 | ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); |
545 | 549 | ||
546 | IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); | 550 | IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); |
551 | IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; | ||
547 | return 1; | 552 | return 1; |
548 | } | 553 | } |
549 | 554 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b70f8979003b..8d9a93ed9c59 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1178,6 +1178,27 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark) | |||
1178 | } | 1178 | } |
1179 | EXPORT_SYMBOL_GPL(ip6_redirect); | 1179 | EXPORT_SYMBOL_GPL(ip6_redirect); |
1180 | 1180 | ||
1181 | void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, | ||
1182 | u32 mark) | ||
1183 | { | ||
1184 | const struct ipv6hdr *iph = ipv6_hdr(skb); | ||
1185 | const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb); | ||
1186 | struct dst_entry *dst; | ||
1187 | struct flowi6 fl6; | ||
1188 | |||
1189 | memset(&fl6, 0, sizeof(fl6)); | ||
1190 | fl6.flowi6_oif = oif; | ||
1191 | fl6.flowi6_mark = mark; | ||
1192 | fl6.flowi6_flags = 0; | ||
1193 | fl6.daddr = msg->dest; | ||
1194 | fl6.saddr = iph->daddr; | ||
1195 | |||
1196 | dst = ip6_route_output(net, NULL, &fl6); | ||
1197 | if (!dst->error) | ||
1198 | rt6_do_redirect(dst, NULL, skb); | ||
1199 | dst_release(dst); | ||
1200 | } | ||
1201 | |||
1181 | void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) | 1202 | void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) |
1182 | { | 1203 | { |
1183 | ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark); | 1204 | ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index a3437a4cd07e..21b25dd8466b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -645,11 +645,7 @@ static int ipip_rcv(struct sk_buff *skb) | |||
645 | const struct iphdr *iph; | 645 | const struct iphdr *iph; |
646 | struct ip_tunnel *tunnel; | 646 | struct ip_tunnel *tunnel; |
647 | 647 | ||
648 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
649 | goto drop; | ||
650 | |||
651 | iph = ip_hdr(skb); | 648 | iph = ip_hdr(skb); |
652 | |||
653 | tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, | 649 | tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, |
654 | iph->saddr, iph->daddr); | 650 | iph->saddr, iph->daddr); |
655 | if (tunnel != NULL) { | 651 | if (tunnel != NULL) { |
@@ -659,6 +655,8 @@ static int ipip_rcv(struct sk_buff *skb) | |||
659 | 655 | ||
660 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 656 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
661 | goto drop; | 657 | goto drop; |
658 | if (iptunnel_pull_header(skb, 0, tpi.proto)) | ||
659 | goto drop; | ||
662 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); | 660 | return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); |
663 | } | 661 | } |
664 | 662 | ||
@@ -888,6 +886,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
888 | ttl = iph6->hop_limit; | 886 | ttl = iph6->hop_limit; |
889 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); | 887 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); |
890 | 888 | ||
889 | if (likely(!skb->encapsulation)) { | ||
890 | skb_reset_inner_headers(skb); | ||
891 | skb->encapsulation = 1; | ||
892 | } | ||
893 | |||
891 | err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, | 894 | err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, |
892 | IPPROTO_IPV6, tos, ttl, df); | 895 | IPPROTO_IPV6, tos, ttl, df); |
893 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); | 896 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 8755a3079d0f..6cd625e37706 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb) | |||
34 | struct sock *sk = skb->sk; | 34 | struct sock *sk = skb->sk; |
35 | 35 | ||
36 | if (sk) { | 36 | if (sk) { |
37 | proto = sk->sk_protocol; | 37 | if (sk->sk_family != AF_INET6) |
38 | return 0; | ||
38 | 39 | ||
40 | proto = sk->sk_protocol; | ||
39 | if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) | 41 | if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) |
40 | return inet6_sk(sk)->dontfrag; | 42 | return inet6_sk(sk)->dontfrag; |
41 | } | 43 | } |
@@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) | |||
54 | ipv6_local_rxpmtu(sk, &fl6, mtu); | 56 | ipv6_local_rxpmtu(sk, &fl6, mtu); |
55 | } | 57 | } |
56 | 58 | ||
57 | static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) | 59 | void xfrm6_local_error(struct sk_buff *skb, u32 mtu) |
58 | { | 60 | { |
59 | struct flowi6 fl6; | 61 | struct flowi6 fl6; |
62 | const struct ipv6hdr *hdr; | ||
60 | struct sock *sk = skb->sk; | 63 | struct sock *sk = skb->sk; |
61 | 64 | ||
65 | hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); | ||
62 | fl6.fl6_dport = inet_sk(sk)->inet_dport; | 66 | fl6.fl6_dport = inet_sk(sk)->inet_dport; |
63 | fl6.daddr = ipv6_hdr(skb)->daddr; | 67 | fl6.daddr = hdr->daddr; |
64 | 68 | ||
65 | ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); | 69 | ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); |
66 | } | 70 | } |
@@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) | |||
80 | if (xfrm6_local_dontfrag(skb)) | 84 | if (xfrm6_local_dontfrag(skb)) |
81 | xfrm6_local_rxpmtu(skb, mtu); | 85 | xfrm6_local_rxpmtu(skb, mtu); |
82 | else if (skb->sk) | 86 | else if (skb->sk) |
83 | xfrm6_local_error(skb, mtu); | 87 | xfrm_local_error(skb, mtu); |
84 | else | 88 | else |
85 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 89 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
86 | ret = -EMSGSIZE; | 90 | ret = -EMSGSIZE; |
@@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb) | |||
136 | { | 140 | { |
137 | struct dst_entry *dst = skb_dst(skb); | 141 | struct dst_entry *dst = skb_dst(skb); |
138 | struct xfrm_state *x = dst->xfrm; | 142 | struct xfrm_state *x = dst->xfrm; |
139 | int mtu = ip6_skb_dst_mtu(skb); | 143 | int mtu; |
144 | |||
145 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
146 | mtu = ip6_skb_dst_mtu(skb); | ||
147 | else | ||
148 | mtu = dst_mtu(skb_dst(skb)); | ||
140 | 149 | ||
141 | if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { | 150 | if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { |
142 | xfrm6_local_rxpmtu(skb, mtu); | 151 | xfrm6_local_rxpmtu(skb, mtu); |
143 | return -EMSGSIZE; | 152 | return -EMSGSIZE; |
144 | } else if (!skb->local_df && skb->len > mtu && skb->sk) { | 153 | } else if (!skb->local_df && skb->len > mtu && skb->sk) { |
145 | xfrm6_local_error(skb, mtu); | 154 | xfrm_local_error(skb, mtu); |
146 | return -EMSGSIZE; | 155 | return -EMSGSIZE; |
147 | } | 156 | } |
148 | 157 | ||
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index d8c70b8efc24..3fc970135fc6 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c | |||
@@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = { | |||
183 | .extract_input = xfrm6_extract_input, | 183 | .extract_input = xfrm6_extract_input, |
184 | .extract_output = xfrm6_extract_output, | 184 | .extract_output = xfrm6_extract_output, |
185 | .transport_finish = xfrm6_transport_finish, | 185 | .transport_finish = xfrm6_transport_finish, |
186 | .local_error = xfrm6_local_error, | ||
186 | }; | 187 | }; |
187 | 188 | ||
188 | int __init xfrm6_state_init(void) | 189 | int __init xfrm6_state_init(void) |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index ea7b9c2c7e66..2d45643c964e 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -36,7 +36,7 @@ | |||
36 | 36 | ||
37 | static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | 37 | static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, |
38 | const u8 *bssid, const int beacon_int, | 38 | const u8 *bssid, const int beacon_int, |
39 | struct ieee80211_channel *chan, | 39 | struct cfg80211_chan_def *req_chandef, |
40 | const u32 basic_rates, | 40 | const u32 basic_rates, |
41 | const u16 capability, u64 tsf, | 41 | const u16 capability, u64 tsf, |
42 | bool creator) | 42 | bool creator) |
@@ -51,6 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
51 | u32 bss_change; | 51 | u32 bss_change; |
52 | u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; | 52 | u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; |
53 | struct cfg80211_chan_def chandef; | 53 | struct cfg80211_chan_def chandef; |
54 | struct ieee80211_channel *chan; | ||
54 | struct beacon_data *presp; | 55 | struct beacon_data *presp; |
55 | int frame_len; | 56 | int frame_len; |
56 | 57 | ||
@@ -81,7 +82,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
81 | 82 | ||
82 | sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; | 83 | sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; |
83 | 84 | ||
84 | chandef = ifibss->chandef; | 85 | /* make a copy of the chandef, it could be modified below. */ |
86 | chandef = *req_chandef; | ||
87 | chan = chandef.chan; | ||
85 | if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { | 88 | if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { |
86 | chandef.width = NL80211_CHAN_WIDTH_20; | 89 | chandef.width = NL80211_CHAN_WIDTH_20; |
87 | chandef.center_freq1 = chan->center_freq; | 90 | chandef.center_freq1 = chan->center_freq; |
@@ -259,10 +262,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
259 | struct cfg80211_bss *cbss = | 262 | struct cfg80211_bss *cbss = |
260 | container_of((void *)bss, struct cfg80211_bss, priv); | 263 | container_of((void *)bss, struct cfg80211_bss, priv); |
261 | struct ieee80211_supported_band *sband; | 264 | struct ieee80211_supported_band *sband; |
265 | struct cfg80211_chan_def chandef; | ||
262 | u32 basic_rates; | 266 | u32 basic_rates; |
263 | int i, j; | 267 | int i, j; |
264 | u16 beacon_int = cbss->beacon_interval; | 268 | u16 beacon_int = cbss->beacon_interval; |
265 | const struct cfg80211_bss_ies *ies; | 269 | const struct cfg80211_bss_ies *ies; |
270 | enum nl80211_channel_type chan_type; | ||
266 | u64 tsf; | 271 | u64 tsf; |
267 | 272 | ||
268 | sdata_assert_lock(sdata); | 273 | sdata_assert_lock(sdata); |
@@ -270,6 +275,26 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
270 | if (beacon_int < 10) | 275 | if (beacon_int < 10) |
271 | beacon_int = 10; | 276 | beacon_int = 10; |
272 | 277 | ||
278 | switch (sdata->u.ibss.chandef.width) { | ||
279 | case NL80211_CHAN_WIDTH_20_NOHT: | ||
280 | case NL80211_CHAN_WIDTH_20: | ||
281 | case NL80211_CHAN_WIDTH_40: | ||
282 | chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef); | ||
283 | cfg80211_chandef_create(&chandef, cbss->channel, chan_type); | ||
284 | break; | ||
285 | case NL80211_CHAN_WIDTH_5: | ||
286 | case NL80211_CHAN_WIDTH_10: | ||
287 | cfg80211_chandef_create(&chandef, cbss->channel, | ||
288 | NL80211_CHAN_WIDTH_20_NOHT); | ||
289 | chandef.width = sdata->u.ibss.chandef.width; | ||
290 | break; | ||
291 | default: | ||
292 | /* fall back to 20 MHz for unsupported modes */ | ||
293 | cfg80211_chandef_create(&chandef, cbss->channel, | ||
294 | NL80211_CHAN_WIDTH_20_NOHT); | ||
295 | break; | ||
296 | } | ||
297 | |||
273 | sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; | 298 | sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; |
274 | 299 | ||
275 | basic_rates = 0; | 300 | basic_rates = 0; |
@@ -294,7 +319,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
294 | 319 | ||
295 | __ieee80211_sta_join_ibss(sdata, cbss->bssid, | 320 | __ieee80211_sta_join_ibss(sdata, cbss->bssid, |
296 | beacon_int, | 321 | beacon_int, |
297 | cbss->channel, | 322 | &chandef, |
298 | basic_rates, | 323 | basic_rates, |
299 | cbss->capability, | 324 | cbss->capability, |
300 | tsf, false); | 325 | tsf, false); |
@@ -736,7 +761,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) | |||
736 | sdata->drop_unencrypted = 0; | 761 | sdata->drop_unencrypted = 0; |
737 | 762 | ||
738 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, | 763 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, |
739 | ifibss->chandef.chan, ifibss->basic_rates, | 764 | &ifibss->chandef, ifibss->basic_rates, |
740 | capability, 0, true); | 765 | capability, 0, true); |
741 | } | 766 | } |
742 | 767 | ||
@@ -1138,6 +1163,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) | |||
1138 | clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); | 1163 | clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); |
1139 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | | 1164 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | |
1140 | BSS_CHANGED_IBSS); | 1165 | BSS_CHANGED_IBSS); |
1166 | ieee80211_vif_release_channel(sdata); | ||
1141 | synchronize_rcu(); | 1167 | synchronize_rcu(); |
1142 | kfree(presp); | 1168 | kfree(presp); |
1143 | 1169 | ||
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ae31968d42d3..cc9e02d79b55 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -31,10 +31,12 @@ | |||
31 | #include "led.h" | 31 | #include "led.h" |
32 | 32 | ||
33 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) | 33 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) |
34 | #define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) | ||
34 | #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) | 35 | #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) |
35 | #define IEEE80211_AUTH_MAX_TRIES 3 | 36 | #define IEEE80211_AUTH_MAX_TRIES 3 |
36 | #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) | 37 | #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) |
37 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) | 38 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) |
39 | #define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2) | ||
38 | #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) | 40 | #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) |
39 | #define IEEE80211_ASSOC_MAX_TRIES 3 | 41 | #define IEEE80211_ASSOC_MAX_TRIES 3 |
40 | 42 | ||
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
209 | struct ieee80211_channel *channel, | 211 | struct ieee80211_channel *channel, |
210 | const struct ieee80211_ht_operation *ht_oper, | 212 | const struct ieee80211_ht_operation *ht_oper, |
211 | const struct ieee80211_vht_operation *vht_oper, | 213 | const struct ieee80211_vht_operation *vht_oper, |
212 | struct cfg80211_chan_def *chandef, bool verbose) | 214 | struct cfg80211_chan_def *chandef, bool tracking) |
213 | { | 215 | { |
216 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
214 | struct cfg80211_chan_def vht_chandef; | 217 | struct cfg80211_chan_def vht_chandef; |
215 | u32 ht_cfreq, ret; | 218 | u32 ht_cfreq, ret; |
216 | 219 | ||
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
229 | ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, | 232 | ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, |
230 | channel->band); | 233 | channel->band); |
231 | /* check that channel matches the right operating channel */ | 234 | /* check that channel matches the right operating channel */ |
232 | if (channel->center_freq != ht_cfreq) { | 235 | if (!tracking && channel->center_freq != ht_cfreq) { |
233 | /* | 236 | /* |
234 | * It's possible that some APs are confused here; | 237 | * It's possible that some APs are confused here; |
235 | * Netgear WNDR3700 sometimes reports 4 higher than | 238 | * Netgear WNDR3700 sometimes reports 4 higher than |
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
237 | * since we look at probe response/beacon data here | 240 | * since we look at probe response/beacon data here |
238 | * it should be OK. | 241 | * it should be OK. |
239 | */ | 242 | */ |
240 | if (verbose) | 243 | sdata_info(sdata, |
241 | sdata_info(sdata, | 244 | "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", |
242 | "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", | 245 | channel->center_freq, ht_cfreq, |
243 | channel->center_freq, ht_cfreq, | 246 | ht_oper->primary_chan, channel->band); |
244 | ht_oper->primary_chan, channel->band); | ||
245 | ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; | 247 | ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; |
246 | goto out; | 248 | goto out; |
247 | } | 249 | } |
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
295 | channel->band); | 297 | channel->band); |
296 | break; | 298 | break; |
297 | default: | 299 | default: |
298 | if (verbose) | 300 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) |
299 | sdata_info(sdata, | 301 | sdata_info(sdata, |
300 | "AP VHT operation IE has invalid channel width (%d), disable VHT\n", | 302 | "AP VHT operation IE has invalid channel width (%d), disable VHT\n", |
301 | vht_oper->chan_width); | 303 | vht_oper->chan_width); |
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
304 | } | 306 | } |
305 | 307 | ||
306 | if (!cfg80211_chandef_valid(&vht_chandef)) { | 308 | if (!cfg80211_chandef_valid(&vht_chandef)) { |
307 | if (verbose) | 309 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) |
308 | sdata_info(sdata, | 310 | sdata_info(sdata, |
309 | "AP VHT information is invalid, disable VHT\n"); | 311 | "AP VHT information is invalid, disable VHT\n"); |
310 | ret = IEEE80211_STA_DISABLE_VHT; | 312 | ret = IEEE80211_STA_DISABLE_VHT; |
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
317 | } | 319 | } |
318 | 320 | ||
319 | if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { | 321 | if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { |
320 | if (verbose) | 322 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) |
321 | sdata_info(sdata, | 323 | sdata_info(sdata, |
322 | "AP VHT information doesn't match HT, disable VHT\n"); | 324 | "AP VHT information doesn't match HT, disable VHT\n"); |
323 | ret = IEEE80211_STA_DISABLE_VHT; | 325 | ret = IEEE80211_STA_DISABLE_VHT; |
@@ -333,18 +335,27 @@ out: | |||
333 | if (ret & IEEE80211_STA_DISABLE_VHT) | 335 | if (ret & IEEE80211_STA_DISABLE_VHT) |
334 | vht_chandef = *chandef; | 336 | vht_chandef = *chandef; |
335 | 337 | ||
338 | /* | ||
339 | * Ignore the DISABLED flag when we're already connected and only | ||
340 | * tracking the APs beacon for bandwidth changes - otherwise we | ||
341 | * might get disconnected here if we connect to an AP, update our | ||
342 | * regulatory information based on the AP's country IE and the | ||
343 | * information we have is wrong/outdated and disables the channel | ||
344 | * that we're actually using for the connection to the AP. | ||
345 | */ | ||
336 | while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, | 346 | while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, |
337 | IEEE80211_CHAN_DISABLED)) { | 347 | tracking ? 0 : |
348 | IEEE80211_CHAN_DISABLED)) { | ||
338 | if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { | 349 | if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { |
339 | ret = IEEE80211_STA_DISABLE_HT | | 350 | ret = IEEE80211_STA_DISABLE_HT | |
340 | IEEE80211_STA_DISABLE_VHT; | 351 | IEEE80211_STA_DISABLE_VHT; |
341 | goto out; | 352 | break; |
342 | } | 353 | } |
343 | 354 | ||
344 | ret |= chandef_downgrade(chandef); | 355 | ret |= chandef_downgrade(chandef); |
345 | } | 356 | } |
346 | 357 | ||
347 | if (chandef->width != vht_chandef.width && verbose) | 358 | if (chandef->width != vht_chandef.width && !tracking) |
348 | sdata_info(sdata, | 359 | sdata_info(sdata, |
349 | "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); | 360 | "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); |
350 | 361 | ||
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, | |||
384 | 395 | ||
385 | /* calculate new channel (type) based on HT/VHT operation IEs */ | 396 | /* calculate new channel (type) based on HT/VHT operation IEs */ |
386 | flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, | 397 | flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, |
387 | vht_oper, &chandef, false); | 398 | vht_oper, &chandef, true); |
388 | 399 | ||
389 | /* | 400 | /* |
390 | * Downgrade the new channel if we associated with restricted | 401 | * Downgrade the new channel if we associated with restricted |
@@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) | |||
3394 | 3405 | ||
3395 | if (tx_flags == 0) { | 3406 | if (tx_flags == 0) { |
3396 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; | 3407 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; |
3397 | ifmgd->auth_data->timeout_started = true; | 3408 | auth_data->timeout_started = true; |
3398 | run_again(sdata, auth_data->timeout); | 3409 | run_again(sdata, auth_data->timeout); |
3399 | } else { | 3410 | } else { |
3400 | auth_data->timeout_started = false; | 3411 | auth_data->timeout = |
3412 | round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); | ||
3413 | auth_data->timeout_started = true; | ||
3414 | run_again(sdata, auth_data->timeout); | ||
3401 | } | 3415 | } |
3402 | 3416 | ||
3403 | return 0; | 3417 | return 0; |
@@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) | |||
3434 | assoc_data->timeout_started = true; | 3448 | assoc_data->timeout_started = true; |
3435 | run_again(sdata, assoc_data->timeout); | 3449 | run_again(sdata, assoc_data->timeout); |
3436 | } else { | 3450 | } else { |
3437 | assoc_data->timeout_started = false; | 3451 | assoc_data->timeout = |
3452 | round_jiffies_up(jiffies + | ||
3453 | IEEE80211_ASSOC_TIMEOUT_LONG); | ||
3454 | assoc_data->timeout_started = true; | ||
3455 | run_again(sdata, assoc_data->timeout); | ||
3438 | } | 3456 | } |
3439 | 3457 | ||
3440 | return 0; | 3458 | return 0; |
@@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, | |||
3829 | ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, | 3847 | ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, |
3830 | cbss->channel, | 3848 | cbss->channel, |
3831 | ht_oper, vht_oper, | 3849 | ht_oper, vht_oper, |
3832 | &chandef, true); | 3850 | &chandef, false); |
3833 | 3851 | ||
3834 | sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), | 3852 | sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), |
3835 | local->rx_chains); | 3853 | local->rx_chains); |
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index f5aed963b22e..f3bbea1eb9e7 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -828,6 +828,9 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, | |||
828 | if (sband->band != IEEE80211_BAND_2GHZ) | 828 | if (sband->band != IEEE80211_BAND_2GHZ) |
829 | return; | 829 | return; |
830 | 830 | ||
831 | if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES)) | ||
832 | return; | ||
833 | |||
831 | mi->cck_supported = 0; | 834 | mi->cck_supported = 0; |
832 | mi->cck_supported_short = 0; | 835 | mi->cck_supported_short = 0; |
833 | for (i = 0; i < 4; i++) { | 836 | for (i = 0; i < 4; i++) { |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 7dcc376eea5f..2f8010707d01 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
526 | const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; | 526 | const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; |
527 | __u32 seq, ack, sack, end, win, swin; | 527 | __u32 seq, ack, sack, end, win, swin; |
528 | s16 receiver_offset; | 528 | s16 receiver_offset; |
529 | bool res; | 529 | bool res, in_recv_win; |
530 | 530 | ||
531 | /* | 531 | /* |
532 | * Get the required data from the packet. | 532 | * Get the required data from the packet. |
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
649 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 649 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
650 | receiver->td_scale); | 650 | receiver->td_scale); |
651 | 651 | ||
652 | /* Is the ending sequence in the receive window (if available)? */ | ||
653 | in_recv_win = !receiver->td_maxwin || | ||
654 | after(end, sender->td_end - receiver->td_maxwin - 1); | ||
655 | |||
652 | pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", | 656 | pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", |
653 | before(seq, sender->td_maxend + 1), | 657 | before(seq, sender->td_maxend + 1), |
654 | after(end, sender->td_end - receiver->td_maxwin - 1), | 658 | (in_recv_win ? 1 : 0), |
655 | before(sack, receiver->td_end + 1), | 659 | before(sack, receiver->td_end + 1), |
656 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); | 660 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); |
657 | 661 | ||
658 | if (before(seq, sender->td_maxend + 1) && | 662 | if (before(seq, sender->td_maxend + 1) && |
659 | after(end, sender->td_end - receiver->td_maxwin - 1) && | 663 | in_recv_win && |
660 | before(sack, receiver->td_end + 1) && | 664 | before(sack, receiver->td_end + 1) && |
661 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { | 665 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { |
662 | /* | 666 | /* |
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
725 | nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, | 729 | nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, |
726 | "nf_ct_tcp: %s ", | 730 | "nf_ct_tcp: %s ", |
727 | before(seq, sender->td_maxend + 1) ? | 731 | before(seq, sender->td_maxend + 1) ? |
728 | after(end, sender->td_end - receiver->td_maxwin - 1) ? | 732 | in_recv_win ? |
729 | before(sack, receiver->td_end + 1) ? | 733 | before(sack, receiver->td_end + 1) ? |
730 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" | 734 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" |
731 | : "ACK is under the lower bound (possible overly delayed ACK)" | 735 | : "ACK is under the lower bound (possible overly delayed ACK)" |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 962e9792e317..d92cc317bf8b 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log, | |||
419 | nfmsg->version = NFNETLINK_V0; | 419 | nfmsg->version = NFNETLINK_V0; |
420 | nfmsg->res_id = htons(inst->group_num); | 420 | nfmsg->res_id = htons(inst->group_num); |
421 | 421 | ||
422 | memset(&pmsg, 0, sizeof(pmsg)); | ||
422 | pmsg.hw_protocol = skb->protocol; | 423 | pmsg.hw_protocol = skb->protocol; |
423 | pmsg.hook = hooknum; | 424 | pmsg.hook = hooknum; |
424 | 425 | ||
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log, | |||
498 | if (indev && skb->dev && | 499 | if (indev && skb->dev && |
499 | skb->mac_header != skb->network_header) { | 500 | skb->mac_header != skb->network_header) { |
500 | struct nfulnl_msg_packet_hw phw; | 501 | struct nfulnl_msg_packet_hw phw; |
501 | int len = dev_parse_header(skb, phw.hw_addr); | 502 | int len; |
503 | |||
504 | memset(&phw, 0, sizeof(phw)); | ||
505 | len = dev_parse_header(skb, phw.hw_addr); | ||
502 | if (len > 0) { | 506 | if (len > 0) { |
503 | phw.hw_addrlen = htons(len); | 507 | phw.hw_addrlen = htons(len); |
504 | if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) | 508 | if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 971ea145ab3e..8a703c3dd318 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
463 | if (indev && entskb->dev && | 463 | if (indev && entskb->dev && |
464 | entskb->mac_header != entskb->network_header) { | 464 | entskb->mac_header != entskb->network_header) { |
465 | struct nfqnl_msg_packet_hw phw; | 465 | struct nfqnl_msg_packet_hw phw; |
466 | int len = dev_parse_header(entskb, phw.hw_addr); | 466 | int len; |
467 | |||
468 | memset(&phw, 0, sizeof(phw)); | ||
469 | len = dev_parse_header(entskb, phw.hw_addr); | ||
467 | if (len) { | 470 | if (len) { |
468 | phw.hw_addrlen = htons(len); | 471 | phw.hw_addrlen = htons(len); |
469 | if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) | 472 | if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) |
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 7011c71646f0..6113cc7efffc 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
52 | { | 52 | { |
53 | const struct xt_tcpmss_info *info = par->targinfo; | 53 | const struct xt_tcpmss_info *info = par->targinfo; |
54 | struct tcphdr *tcph; | 54 | struct tcphdr *tcph; |
55 | unsigned int tcplen, i; | 55 | int len, tcp_hdrlen; |
56 | unsigned int i; | ||
56 | __be16 oldval; | 57 | __be16 oldval; |
57 | u16 newmss; | 58 | u16 newmss; |
58 | u8 *opt; | 59 | u8 *opt; |
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
64 | if (!skb_make_writable(skb, skb->len)) | 65 | if (!skb_make_writable(skb, skb->len)) |
65 | return -1; | 66 | return -1; |
66 | 67 | ||
67 | tcplen = skb->len - tcphoff; | 68 | len = skb->len - tcphoff; |
69 | if (len < (int)sizeof(struct tcphdr)) | ||
70 | return -1; | ||
71 | |||
68 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); | 72 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); |
73 | tcp_hdrlen = tcph->doff * 4; | ||
69 | 74 | ||
70 | /* Header cannot be larger than the packet */ | 75 | if (len < tcp_hdrlen) |
71 | if (tcplen < tcph->doff*4) | ||
72 | return -1; | 76 | return -1; |
73 | 77 | ||
74 | if (info->mss == XT_TCPMSS_CLAMP_PMTU) { | 78 | if (info->mss == XT_TCPMSS_CLAMP_PMTU) { |
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
87 | newmss = info->mss; | 91 | newmss = info->mss; |
88 | 92 | ||
89 | opt = (u_int8_t *)tcph; | 93 | opt = (u_int8_t *)tcph; |
90 | for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) { | 94 | for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) { |
91 | if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS && | 95 | if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) { |
92 | opt[i+1] == TCPOLEN_MSS) { | ||
93 | u_int16_t oldmss; | 96 | u_int16_t oldmss; |
94 | 97 | ||
95 | oldmss = (opt[i+2] << 8) | opt[i+3]; | 98 | oldmss = (opt[i+2] << 8) | opt[i+3]; |
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
112 | } | 115 | } |
113 | 116 | ||
114 | /* There is data after the header so the option can't be added | 117 | /* There is data after the header so the option can't be added |
115 | without moving it, and doing so may make the SYN packet | 118 | * without moving it, and doing so may make the SYN packet |
116 | itself too large. Accept the packet unmodified instead. */ | 119 | * itself too large. Accept the packet unmodified instead. |
117 | if (tcplen > tcph->doff*4) | 120 | */ |
121 | if (len > tcp_hdrlen) | ||
118 | return 0; | 122 | return 0; |
119 | 123 | ||
120 | /* | 124 | /* |
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
143 | newmss = min(newmss, (u16)1220); | 147 | newmss = min(newmss, (u16)1220); |
144 | 148 | ||
145 | opt = (u_int8_t *)tcph + sizeof(struct tcphdr); | 149 | opt = (u_int8_t *)tcph + sizeof(struct tcphdr); |
146 | memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); | 150 | memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr)); |
147 | 151 | ||
148 | inet_proto_csum_replace2(&tcph->check, skb, | 152 | inet_proto_csum_replace2(&tcph->check, skb, |
149 | htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); | 153 | htons(len), htons(len + TCPOLEN_MSS), 1); |
150 | opt[0] = TCPOPT_MSS; | 154 | opt[0] = TCPOPT_MSS; |
151 | opt[1] = TCPOLEN_MSS; | 155 | opt[1] = TCPOLEN_MSS; |
152 | opt[2] = (newmss & 0xff00) >> 8; | 156 | opt[2] = (newmss & 0xff00) >> 8; |
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c index b68fa191710f..625fa1d636a0 100644 --- a/net/netfilter/xt_TCPOPTSTRIP.c +++ b/net/netfilter/xt_TCPOPTSTRIP.c | |||
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, | |||
38 | struct tcphdr *tcph; | 38 | struct tcphdr *tcph; |
39 | u_int16_t n, o; | 39 | u_int16_t n, o; |
40 | u_int8_t *opt; | 40 | u_int8_t *opt; |
41 | int len; | 41 | int len, tcp_hdrlen; |
42 | 42 | ||
43 | /* This is a fragment, no TCP header is available */ | 43 | /* This is a fragment, no TCP header is available */ |
44 | if (par->fragoff != 0) | 44 | if (par->fragoff != 0) |
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, | |||
52 | return NF_DROP; | 52 | return NF_DROP; |
53 | 53 | ||
54 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); | 54 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); |
55 | if (tcph->doff * 4 > len) | 55 | tcp_hdrlen = tcph->doff * 4; |
56 | |||
57 | if (len < tcp_hdrlen) | ||
56 | return NF_DROP; | 58 | return NF_DROP; |
57 | 59 | ||
58 | opt = (u_int8_t *)tcph; | 60 | opt = (u_int8_t *)tcph; |
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, | |||
61 | * Walk through all TCP options - if we find some option to remove, | 63 | * Walk through all TCP options - if we find some option to remove, |
62 | * set all octets to %TCPOPT_NOP and adjust checksum. | 64 | * set all octets to %TCPOPT_NOP and adjust checksum. |
63 | */ | 65 | */ |
64 | for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { | 66 | for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) { |
65 | optl = optlen(opt, i); | 67 | optl = optlen(opt, i); |
66 | 68 | ||
67 | if (i + optl > tcp_hdrlen(skb)) | 69 | if (i + optl > tcp_hdrlen) |
68 | break; | 70 | break; |
69 | 71 | ||
70 | if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) | 72 | if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 512718adb0d5..0c741cec4d0d 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops) | |||
364 | EXPORT_SYMBOL(genl_unregister_ops); | 364 | EXPORT_SYMBOL(genl_unregister_ops); |
365 | 365 | ||
366 | /** | 366 | /** |
367 | * genl_register_family - register a generic netlink family | 367 | * __genl_register_family - register a generic netlink family |
368 | * @family: generic netlink family | 368 | * @family: generic netlink family |
369 | * | 369 | * |
370 | * Registers the specified family after validating it first. Only one | 370 | * Registers the specified family after validating it first. Only one |
@@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops); | |||
374 | * | 374 | * |
375 | * Return 0 on success or a negative error code. | 375 | * Return 0 on success or a negative error code. |
376 | */ | 376 | */ |
377 | int genl_register_family(struct genl_family *family) | 377 | int __genl_register_family(struct genl_family *family) |
378 | { | 378 | { |
379 | int err = -EINVAL; | 379 | int err = -EINVAL; |
380 | 380 | ||
@@ -430,10 +430,10 @@ errout_locked: | |||
430 | errout: | 430 | errout: |
431 | return err; | 431 | return err; |
432 | } | 432 | } |
433 | EXPORT_SYMBOL(genl_register_family); | 433 | EXPORT_SYMBOL(__genl_register_family); |
434 | 434 | ||
435 | /** | 435 | /** |
436 | * genl_register_family_with_ops - register a generic netlink family | 436 | * __genl_register_family_with_ops - register a generic netlink family |
437 | * @family: generic netlink family | 437 | * @family: generic netlink family |
438 | * @ops: operations to be registered | 438 | * @ops: operations to be registered |
439 | * @n_ops: number of elements to register | 439 | * @n_ops: number of elements to register |
@@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family); | |||
457 | * | 457 | * |
458 | * Return 0 on success or a negative error code. | 458 | * Return 0 on success or a negative error code. |
459 | */ | 459 | */ |
460 | int genl_register_family_with_ops(struct genl_family *family, | 460 | int __genl_register_family_with_ops(struct genl_family *family, |
461 | struct genl_ops *ops, size_t n_ops) | 461 | struct genl_ops *ops, size_t n_ops) |
462 | { | 462 | { |
463 | int err, i; | 463 | int err, i; |
464 | 464 | ||
465 | err = genl_register_family(family); | 465 | err = __genl_register_family(family); |
466 | if (err) | 466 | if (err) |
467 | return err; | 467 | return err; |
468 | 468 | ||
@@ -476,7 +476,7 @@ err_out: | |||
476 | genl_unregister_family(family); | 476 | genl_unregister_family(family); |
477 | return err; | 477 | return err; |
478 | } | 478 | } |
479 | EXPORT_SYMBOL(genl_register_family_with_ops); | 479 | EXPORT_SYMBOL(__genl_register_family_with_ops); |
480 | 480 | ||
481 | /** | 481 | /** |
482 | * genl_unregister_family - unregister generic netlink family | 482 | * genl_unregister_family - unregister generic netlink family |
@@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, | |||
544 | } | 544 | } |
545 | EXPORT_SYMBOL(genlmsg_put); | 545 | EXPORT_SYMBOL(genlmsg_put); |
546 | 546 | ||
547 | static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | ||
548 | { | ||
549 | struct genl_ops *ops = cb->data; | ||
550 | int rc; | ||
551 | |||
552 | genl_lock(); | ||
553 | rc = ops->dumpit(skb, cb); | ||
554 | genl_unlock(); | ||
555 | return rc; | ||
556 | } | ||
557 | |||
558 | static int genl_lock_done(struct netlink_callback *cb) | ||
559 | { | ||
560 | struct genl_ops *ops = cb->data; | ||
561 | int rc = 0; | ||
562 | |||
563 | if (ops->done) { | ||
564 | genl_lock(); | ||
565 | rc = ops->done(cb); | ||
566 | genl_unlock(); | ||
567 | } | ||
568 | return rc; | ||
569 | } | ||
570 | |||
547 | static int genl_family_rcv_msg(struct genl_family *family, | 571 | static int genl_family_rcv_msg(struct genl_family *family, |
548 | struct sk_buff *skb, | 572 | struct sk_buff *skb, |
549 | struct nlmsghdr *nlh) | 573 | struct nlmsghdr *nlh) |
@@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family, | |||
572 | return -EPERM; | 596 | return -EPERM; |
573 | 597 | ||
574 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { | 598 | if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { |
575 | struct netlink_dump_control c = { | 599 | int rc; |
576 | .dump = ops->dumpit, | ||
577 | .done = ops->done, | ||
578 | }; | ||
579 | 600 | ||
580 | if (ops->dumpit == NULL) | 601 | if (ops->dumpit == NULL) |
581 | return -EOPNOTSUPP; | 602 | return -EOPNOTSUPP; |
582 | 603 | ||
583 | return netlink_dump_start(net->genl_sock, skb, nlh, &c); | 604 | if (!family->parallel_ops) { |
605 | struct netlink_dump_control c = { | ||
606 | .module = family->module, | ||
607 | .data = ops, | ||
608 | .dump = genl_lock_dumpit, | ||
609 | .done = genl_lock_done, | ||
610 | }; | ||
611 | |||
612 | genl_unlock(); | ||
613 | rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); | ||
614 | genl_lock(); | ||
615 | |||
616 | } else { | ||
617 | struct netlink_dump_control c = { | ||
618 | .module = family->module, | ||
619 | .dump = ops->dumpit, | ||
620 | .done = ops->done, | ||
621 | }; | ||
622 | |||
623 | rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); | ||
624 | } | ||
625 | |||
626 | return rc; | ||
584 | } | 627 | } |
585 | 628 | ||
586 | if (ops->doit == NULL) | 629 | if (ops->doit == NULL) |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 22c5f399f1cf..ab101f715447 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb) | |||
535 | { | 535 | { |
536 | struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); | 536 | struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); |
537 | 537 | ||
538 | OVS_CB(skb)->tun_key = NULL; | ||
538 | return do_execute_actions(dp, skb, acts->actions, | 539 | return do_execute_actions(dp, skb, acts->actions, |
539 | acts->actions_len, false); | 540 | acts->actions_len, false); |
540 | } | 541 | } |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f7e3a0d84c40..f2ed7600084e 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
2076 | ovs_notify(reply, info, &ovs_dp_vport_multicast_group); | 2076 | ovs_notify(reply, info, &ovs_dp_vport_multicast_group); |
2077 | return 0; | 2077 | return 0; |
2078 | 2078 | ||
2079 | rtnl_unlock(); | ||
2080 | return 0; | ||
2081 | |||
2082 | exit_free: | 2079 | exit_free: |
2083 | kfree_skb(reply); | 2080 | kfree_skb(reply); |
2084 | exit_unlock: | 2081 | exit_unlock: |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 5c519b121e1b..1aa84dc58777 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets) | |||
240 | struct flex_array *buckets; | 240 | struct flex_array *buckets; |
241 | int i, err; | 241 | int i, err; |
242 | 242 | ||
243 | buckets = flex_array_alloc(sizeof(struct hlist_head *), | 243 | buckets = flex_array_alloc(sizeof(struct hlist_head), |
244 | n_buckets, GFP_KERNEL); | 244 | n_buckets, GFP_KERNEL); |
245 | if (!buckets) | 245 | if (!buckets) |
246 | return NULL; | 246 | return NULL; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4b66c752eae5..75c8bbf598c8 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -3259,9 +3259,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
3259 | 3259 | ||
3260 | if (po->tp_version == TPACKET_V3) { | 3260 | if (po->tp_version == TPACKET_V3) { |
3261 | lv = sizeof(struct tpacket_stats_v3); | 3261 | lv = sizeof(struct tpacket_stats_v3); |
3262 | st.stats3.tp_packets += st.stats3.tp_drops; | ||
3262 | data = &st.stats3; | 3263 | data = &st.stats3; |
3263 | } else { | 3264 | } else { |
3264 | lv = sizeof(struct tpacket_stats); | 3265 | lv = sizeof(struct tpacket_stats); |
3266 | st.stats1.tp_packets += st.stats1.tp_drops; | ||
3265 | data = &st.stats1; | 3267 | data = &st.stats1; |
3266 | } | 3268 | } |
3267 | 3269 | ||
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 281c1bded1f6..51b968d3febb 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) | |||
285 | return q; | 285 | return q; |
286 | } | 286 | } |
287 | 287 | ||
288 | /* The linklayer setting were not transferred from iproute2, in older | ||
289 | * versions, and the rate tables lookup systems have been dropped in | ||
290 | * the kernel. To keep backward compatible with older iproute2 tc | ||
291 | * utils, we detect the linklayer setting by detecting if the rate | ||
292 | * table were modified. | ||
293 | * | ||
294 | * For linklayer ATM table entries, the rate table will be aligned to | ||
295 | * 48 bytes, thus some table entries will contain the same value. The | ||
296 | * mpu (min packet unit) is also encoded into the old rate table, thus | ||
297 | * starting from the mpu, we find low and high table entries for | ||
298 | * mapping this cell. If these entries contain the same value, when | ||
299 | * the rate tables have been modified for linklayer ATM. | ||
300 | * | ||
301 | * This is done by rounding mpu to the nearest 48 bytes cell/entry, | ||
302 | * and then roundup to the next cell, calc the table entry one below, | ||
303 | * and compare. | ||
304 | */ | ||
305 | static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab) | ||
306 | { | ||
307 | int low = roundup(r->mpu, 48); | ||
308 | int high = roundup(low+1, 48); | ||
309 | int cell_low = low >> r->cell_log; | ||
310 | int cell_high = (high >> r->cell_log) - 1; | ||
311 | |||
312 | /* rtab is too inaccurate at rates > 100Mbit/s */ | ||
313 | if ((r->rate > (100000000/8)) || (rtab[0] == 0)) { | ||
314 | pr_debug("TC linklayer: Giving up ATM detection\n"); | ||
315 | return TC_LINKLAYER_ETHERNET; | ||
316 | } | ||
317 | |||
318 | if ((cell_high > cell_low) && (cell_high < 256) | ||
319 | && (rtab[cell_low] == rtab[cell_high])) { | ||
320 | pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n", | ||
321 | cell_low, cell_high, rtab[cell_high]); | ||
322 | return TC_LINKLAYER_ATM; | ||
323 | } | ||
324 | return TC_LINKLAYER_ETHERNET; | ||
325 | } | ||
326 | |||
288 | static struct qdisc_rate_table *qdisc_rtab_list; | 327 | static struct qdisc_rate_table *qdisc_rtab_list; |
289 | 328 | ||
290 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) | 329 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) |
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta | |||
308 | rtab->rate = *r; | 347 | rtab->rate = *r; |
309 | rtab->refcnt = 1; | 348 | rtab->refcnt = 1; |
310 | memcpy(rtab->data, nla_data(tab), 1024); | 349 | memcpy(rtab->data, nla_data(tab), 1024); |
350 | if (r->linklayer == TC_LINKLAYER_UNAWARE) | ||
351 | r->linklayer = __detect_linklayer(r, rtab->data); | ||
311 | rtab->next = qdisc_rtab_list; | 352 | rtab->next = qdisc_rtab_list; |
312 | qdisc_rtab_list = rtab; | 353 | qdisc_rtab_list = rtab; |
313 | } | 354 | } |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 4626cef4b76e..48be3d5c0d92 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
26 | #include <linux/list.h> | 26 | #include <linux/list.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/if_vlan.h> | ||
28 | #include <net/sch_generic.h> | 29 | #include <net/sch_generic.h> |
29 | #include <net/pkt_sched.h> | 30 | #include <net/pkt_sched.h> |
30 | #include <net/dst.h> | 31 | #include <net/dst.h> |
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q) | |||
207 | 208 | ||
208 | unsigned long dev_trans_start(struct net_device *dev) | 209 | unsigned long dev_trans_start(struct net_device *dev) |
209 | { | 210 | { |
210 | unsigned long val, res = dev->trans_start; | 211 | unsigned long val, res; |
211 | unsigned int i; | 212 | unsigned int i; |
212 | 213 | ||
214 | if (is_vlan_dev(dev)) | ||
215 | dev = vlan_dev_real_dev(dev); | ||
216 | res = dev->trans_start; | ||
213 | for (i = 0; i < dev->num_tx_queues; i++) { | 217 | for (i = 0; i < dev->num_tx_queues; i++) { |
214 | val = netdev_get_tx_queue(dev, i)->trans_start; | 218 | val = netdev_get_tx_queue(dev, i)->trans_start; |
215 | if (val && time_after(val, res)) | 219 | if (val && time_after(val, res)) |
216 | res = val; | 220 | res = val; |
217 | } | 221 | } |
218 | dev->trans_start = res; | 222 | dev->trans_start = res; |
223 | |||
219 | return res; | 224 | return res; |
220 | } | 225 | } |
221 | EXPORT_SYMBOL(dev_trans_start); | 226 | EXPORT_SYMBOL(dev_trans_start); |
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, | |||
904 | memset(r, 0, sizeof(*r)); | 909 | memset(r, 0, sizeof(*r)); |
905 | r->overhead = conf->overhead; | 910 | r->overhead = conf->overhead; |
906 | r->rate_bytes_ps = conf->rate; | 911 | r->rate_bytes_ps = conf->rate; |
912 | r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); | ||
907 | r->mult = 1; | 913 | r->mult = 1; |
908 | /* | 914 | /* |
909 | * The deal here is to replace a divide by a reciprocal one | 915 | * The deal here is to replace a divide by a reciprocal one |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 45e751527dfc..c2178b15ca6e 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1329 | struct htb_sched *q = qdisc_priv(sch); | 1329 | struct htb_sched *q = qdisc_priv(sch); |
1330 | struct htb_class *cl = (struct htb_class *)*arg, *parent; | 1330 | struct htb_class *cl = (struct htb_class *)*arg, *parent; |
1331 | struct nlattr *opt = tca[TCA_OPTIONS]; | 1331 | struct nlattr *opt = tca[TCA_OPTIONS]; |
1332 | struct qdisc_rate_table *rtab = NULL, *ctab = NULL; | ||
1332 | struct nlattr *tb[TCA_HTB_MAX + 1]; | 1333 | struct nlattr *tb[TCA_HTB_MAX + 1]; |
1333 | struct tc_htb_opt *hopt; | 1334 | struct tc_htb_opt *hopt; |
1334 | 1335 | ||
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1350 | if (!hopt->rate.rate || !hopt->ceil.rate) | 1351 | if (!hopt->rate.rate || !hopt->ceil.rate) |
1351 | goto failure; | 1352 | goto failure; |
1352 | 1353 | ||
1354 | /* Keeping backward compatible with rate_table based iproute2 tc */ | ||
1355 | if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) { | ||
1356 | rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); | ||
1357 | if (rtab) | ||
1358 | qdisc_put_rtab(rtab); | ||
1359 | } | ||
1360 | if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) { | ||
1361 | ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); | ||
1362 | if (ctab) | ||
1363 | qdisc_put_rtab(ctab); | ||
1364 | } | ||
1365 | |||
1353 | if (!cl) { /* new class */ | 1366 | if (!cl) { /* new class */ |
1354 | struct Qdisc *new_q; | 1367 | struct Qdisc *new_q; |
1355 | int prio; | 1368 | int prio; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index bce5b79662a6..ab67efc64b24 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
846 | else | 846 | else |
847 | spc_state = SCTP_ADDR_AVAILABLE; | 847 | spc_state = SCTP_ADDR_AVAILABLE; |
848 | /* Don't inform ULP about transition from PF to | 848 | /* Don't inform ULP about transition from PF to |
849 | * active state and set cwnd to 1, see SCTP | 849 | * active state and set cwnd to 1 MTU, see SCTP |
850 | * Quick failover draft section 5.1, point 5 | 850 | * Quick failover draft section 5.1, point 5 |
851 | */ | 851 | */ |
852 | if (transport->state == SCTP_PF) { | 852 | if (transport->state == SCTP_PF) { |
853 | ulp_notify = false; | 853 | ulp_notify = false; |
854 | transport->cwnd = 1; | 854 | transport->cwnd = asoc->pathmtu; |
855 | } | 855 | } |
856 | transport->state = SCTP_ACTIVE; | 856 | transport->state = SCTP_ACTIVE; |
857 | break; | 857 | break; |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index bdbbc3fd7c14..8fdd16046d66 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport) | |||
181 | return; | 181 | return; |
182 | } | 182 | } |
183 | 183 | ||
184 | call_rcu(&transport->rcu, sctp_transport_destroy_rcu); | ||
185 | |||
186 | sctp_packet_free(&transport->packet); | 184 | sctp_packet_free(&transport->packet); |
187 | 185 | ||
188 | if (transport->asoc) | 186 | if (transport->asoc) |
189 | sctp_association_put(transport->asoc); | 187 | sctp_association_put(transport->asoc); |
188 | |||
189 | call_rcu(&transport->rcu, sctp_transport_destroy_rcu); | ||
190 | } | 190 | } |
191 | 191 | ||
192 | /* Start T3_rtx timer if it is not already running and update the heartbeat | 192 | /* Start T3_rtx timer if it is not already running and update the heartbeat |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 75edcfad6e26..1504bb11e4f3 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -207,10 +207,13 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base, | |||
207 | pgfrom_base -= copy; | 207 | pgfrom_base -= copy; |
208 | 208 | ||
209 | vto = kmap_atomic(*pgto); | 209 | vto = kmap_atomic(*pgto); |
210 | vfrom = kmap_atomic(*pgfrom); | 210 | if (*pgto != *pgfrom) { |
211 | memmove(vto + pgto_base, vfrom + pgfrom_base, copy); | 211 | vfrom = kmap_atomic(*pgfrom); |
212 | memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); | ||
213 | kunmap_atomic(vfrom); | ||
214 | } else | ||
215 | memmove(vto + pgto_base, vto + pgfrom_base, copy); | ||
212 | flush_dcache_page(*pgto); | 216 | flush_dcache_page(*pgto); |
213 | kunmap_atomic(vfrom); | ||
214 | kunmap_atomic(vto); | 217 | kunmap_atomic(vto); |
215 | 218 | ||
216 | } while ((len -= copy) != 0); | 219 | } while ((len -= copy) != 0); |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index cb29ef7ba2f0..609c30c80816 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr) | |||
460 | { | 460 | { |
461 | struct tipc_link *l_ptr; | 461 | struct tipc_link *l_ptr; |
462 | struct tipc_link *temp_l_ptr; | 462 | struct tipc_link *temp_l_ptr; |
463 | struct tipc_link_req *temp_req; | ||
463 | 464 | ||
464 | pr_info("Disabling bearer <%s>\n", b_ptr->name); | 465 | pr_info("Disabling bearer <%s>\n", b_ptr->name); |
465 | spin_lock_bh(&b_ptr->lock); | 466 | spin_lock_bh(&b_ptr->lock); |
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr) | |||
468 | list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { | 469 | list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { |
469 | tipc_link_delete(l_ptr); | 470 | tipc_link_delete(l_ptr); |
470 | } | 471 | } |
471 | if (b_ptr->link_req) | 472 | temp_req = b_ptr->link_req; |
472 | tipc_disc_delete(b_ptr->link_req); | 473 | b_ptr->link_req = NULL; |
473 | spin_unlock_bh(&b_ptr->lock); | 474 | spin_unlock_bh(&b_ptr->lock); |
475 | |||
476 | if (temp_req) | ||
477 | tipc_disc_delete(temp_req); | ||
478 | |||
474 | memset(b_ptr, 0, sizeof(struct tipc_bearer)); | 479 | memset(b_ptr, 0, sizeof(struct tipc_bearer)); |
475 | } | 480 | } |
476 | 481 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ce8249c76827..6cc7ddd2fb7c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf) | |||
1257 | /* Accept only ACK or NACK message */ | 1257 | /* Accept only ACK or NACK message */ |
1258 | if (unlikely(msg_errcode(msg))) { | 1258 | if (unlikely(msg_errcode(msg))) { |
1259 | sock->state = SS_DISCONNECTING; | 1259 | sock->state = SS_DISCONNECTING; |
1260 | sk->sk_err = -ECONNREFUSED; | 1260 | sk->sk_err = ECONNREFUSED; |
1261 | retval = TIPC_OK; | 1261 | retval = TIPC_OK; |
1262 | break; | 1262 | break; |
1263 | } | 1263 | } |
@@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf) | |||
1268 | res = auto_connect(sock, msg); | 1268 | res = auto_connect(sock, msg); |
1269 | if (res) { | 1269 | if (res) { |
1270 | sock->state = SS_DISCONNECTING; | 1270 | sock->state = SS_DISCONNECTING; |
1271 | sk->sk_err = res; | 1271 | sk->sk_err = -res; |
1272 | retval = TIPC_OK; | 1272 | retval = TIPC_OK; |
1273 | break; | 1273 | break; |
1274 | } | 1274 | } |
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 593071dabd1c..4d9334683f84 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
@@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) | |||
347 | for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { | 347 | for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { |
348 | struct vsock_sock *vsk; | 348 | struct vsock_sock *vsk; |
349 | list_for_each_entry(vsk, &vsock_connected_table[i], | 349 | list_for_each_entry(vsk, &vsock_connected_table[i], |
350 | connected_table); | 350 | connected_table) |
351 | fn(sk_vsock(vsk)); | 351 | fn(sk_vsock(vsk)); |
352 | } | 352 | } |
353 | 353 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index 4f9f216665e9..a8c29fa4f1b3 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, | |||
765 | cfg80211_leave_mesh(rdev, dev); | 765 | cfg80211_leave_mesh(rdev, dev); |
766 | break; | 766 | break; |
767 | case NL80211_IFTYPE_AP: | 767 | case NL80211_IFTYPE_AP: |
768 | case NL80211_IFTYPE_P2P_GO: | ||
768 | cfg80211_stop_ap(rdev, dev); | 769 | cfg80211_stop_ap(rdev, dev); |
769 | break; | 770 | break; |
770 | default: | 771 | default: |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 25d217d90807..5f6e982cdcf4 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, | |||
441 | goto out_unlock; | 441 | goto out_unlock; |
442 | } | 442 | } |
443 | *rdev = wiphy_to_dev((*wdev)->wiphy); | 443 | *rdev = wiphy_to_dev((*wdev)->wiphy); |
444 | cb->args[0] = (*rdev)->wiphy_idx; | 444 | /* 0 is the first index - add 1 to parse only once */ |
445 | cb->args[0] = (*rdev)->wiphy_idx + 1; | ||
445 | cb->args[1] = (*wdev)->identifier; | 446 | cb->args[1] = (*wdev)->identifier; |
446 | } else { | 447 | } else { |
447 | struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]); | 448 | /* subtract the 1 again here */ |
449 | struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); | ||
448 | struct wireless_dev *tmp; | 450 | struct wireless_dev *tmp; |
449 | 451 | ||
450 | if (!wiphy) { | 452 | if (!wiphy) { |
@@ -2620,8 +2622,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
2620 | 2622 | ||
2621 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, | 2623 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, |
2622 | NL80211_CMD_NEW_KEY); | 2624 | NL80211_CMD_NEW_KEY); |
2623 | if (IS_ERR(hdr)) | 2625 | if (!hdr) |
2624 | return PTR_ERR(hdr); | 2626 | return -ENOBUFS; |
2625 | 2627 | ||
2626 | cookie.msg = msg; | 2628 | cookie.msg = msg; |
2627 | cookie.idx = key_idx; | 2629 | cookie.idx = key_idx; |
@@ -6505,6 +6507,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb, | |||
6505 | NL80211_CMD_TESTMODE); | 6507 | NL80211_CMD_TESTMODE); |
6506 | struct nlattr *tmdata; | 6508 | struct nlattr *tmdata; |
6507 | 6509 | ||
6510 | if (!hdr) | ||
6511 | break; | ||
6512 | |||
6508 | if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { | 6513 | if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { |
6509 | genlmsg_cancel(skb, hdr); | 6514 | genlmsg_cancel(skb, hdr); |
6510 | break; | 6515 | break; |
@@ -6949,9 +6954,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, | |||
6949 | 6954 | ||
6950 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, | 6955 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, |
6951 | NL80211_CMD_REMAIN_ON_CHANNEL); | 6956 | NL80211_CMD_REMAIN_ON_CHANNEL); |
6952 | 6957 | if (!hdr) { | |
6953 | if (IS_ERR(hdr)) { | 6958 | err = -ENOBUFS; |
6954 | err = PTR_ERR(hdr); | ||
6955 | goto free_msg; | 6959 | goto free_msg; |
6956 | } | 6960 | } |
6957 | 6961 | ||
@@ -7249,9 +7253,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
7249 | 7253 | ||
7250 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, | 7254 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, |
7251 | NL80211_CMD_FRAME); | 7255 | NL80211_CMD_FRAME); |
7252 | 7256 | if (!hdr) { | |
7253 | if (IS_ERR(hdr)) { | 7257 | err = -ENOBUFS; |
7254 | err = PTR_ERR(hdr); | ||
7255 | goto free_msg; | 7258 | goto free_msg; |
7256 | } | 7259 | } |
7257 | } | 7260 | } |
@@ -8130,9 +8133,8 @@ static int nl80211_probe_client(struct sk_buff *skb, | |||
8130 | 8133 | ||
8131 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, | 8134 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, |
8132 | NL80211_CMD_PROBE_CLIENT); | 8135 | NL80211_CMD_PROBE_CLIENT); |
8133 | 8136 | if (!hdr) { | |
8134 | if (IS_ERR(hdr)) { | 8137 | err = -ENOBUFS; |
8135 | err = PTR_ERR(hdr); | ||
8136 | goto free_msg; | 8138 | goto free_msg; |
8137 | } | 8139 | } |
8138 | 8140 | ||
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 81c8a10d743c..20e86a95dc4e 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -976,21 +976,19 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, | |||
976 | struct net_device *dev, u16 reason, bool wextev) | 976 | struct net_device *dev, u16 reason, bool wextev) |
977 | { | 977 | { |
978 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 978 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
979 | int err; | 979 | int err = 0; |
980 | 980 | ||
981 | ASSERT_WDEV_LOCK(wdev); | 981 | ASSERT_WDEV_LOCK(wdev); |
982 | 982 | ||
983 | kfree(wdev->connect_keys); | 983 | kfree(wdev->connect_keys); |
984 | wdev->connect_keys = NULL; | 984 | wdev->connect_keys = NULL; |
985 | 985 | ||
986 | if (wdev->conn) { | 986 | if (wdev->conn) |
987 | err = cfg80211_sme_disconnect(wdev, reason); | 987 | err = cfg80211_sme_disconnect(wdev, reason); |
988 | } else if (!rdev->ops->disconnect) { | 988 | else if (!rdev->ops->disconnect) |
989 | cfg80211_mlme_down(rdev, dev); | 989 | cfg80211_mlme_down(rdev, dev); |
990 | err = 0; | 990 | else if (wdev->current_bss) |
991 | } else { | ||
992 | err = rdev_disconnect(rdev, dev, reason); | 991 | err = rdev_disconnect(rdev, dev, reason); |
993 | } | ||
994 | 992 | ||
995 | return err; | 993 | return err; |
996 | } | 994 | } |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index eb4a84288648..3bb2cdc13b46 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb) | |||
214 | return inner_mode->afinfo->extract_output(x, skb); | 214 | return inner_mode->afinfo->extract_output(x, skb); |
215 | } | 215 | } |
216 | 216 | ||
217 | void xfrm_local_error(struct sk_buff *skb, int mtu) | ||
218 | { | ||
219 | unsigned int proto; | ||
220 | struct xfrm_state_afinfo *afinfo; | ||
221 | |||
222 | if (skb->protocol == htons(ETH_P_IP)) | ||
223 | proto = AF_INET; | ||
224 | else if (skb->protocol == htons(ETH_P_IPV6)) | ||
225 | proto = AF_INET6; | ||
226 | else | ||
227 | return; | ||
228 | |||
229 | afinfo = xfrm_state_get_afinfo(proto); | ||
230 | if (!afinfo) | ||
231 | return; | ||
232 | |||
233 | afinfo->local_error(skb, mtu); | ||
234 | xfrm_state_put_afinfo(afinfo); | ||
235 | } | ||
236 | |||
217 | EXPORT_SYMBOL_GPL(xfrm_output); | 237 | EXPORT_SYMBOL_GPL(xfrm_output); |
218 | EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); | 238 | EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); |
239 | EXPORT_SYMBOL_GPL(xfrm_local_error); | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index e52cab3591dd..f77c371ea72b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list) | |||
320 | { | 320 | { |
321 | struct sk_buff *skb; | 321 | struct sk_buff *skb; |
322 | 322 | ||
323 | while ((skb = skb_dequeue(list)) != NULL) { | 323 | while ((skb = skb_dequeue(list)) != NULL) |
324 | dev_put(skb->dev); | ||
325 | kfree_skb(skb); | 324 | kfree_skb(skb); |
326 | } | ||
327 | } | 325 | } |
328 | 326 | ||
329 | /* Rule must be locked. Release descentant resources, announce | 327 | /* Rule must be locked. Release descentant resources, announce |
@@ -1758,7 +1756,6 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
1758 | struct sk_buff *skb; | 1756 | struct sk_buff *skb; |
1759 | struct sock *sk; | 1757 | struct sock *sk; |
1760 | struct dst_entry *dst; | 1758 | struct dst_entry *dst; |
1761 | struct net_device *dev; | ||
1762 | struct xfrm_policy *pol = (struct xfrm_policy *)arg; | 1759 | struct xfrm_policy *pol = (struct xfrm_policy *)arg; |
1763 | struct xfrm_policy_queue *pq = &pol->polq; | 1760 | struct xfrm_policy_queue *pq = &pol->polq; |
1764 | struct flowi fl; | 1761 | struct flowi fl; |
@@ -1805,7 +1802,6 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
1805 | dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, | 1802 | dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, |
1806 | &fl, skb->sk, 0); | 1803 | &fl, skb->sk, 0); |
1807 | if (IS_ERR(dst)) { | 1804 | if (IS_ERR(dst)) { |
1808 | dev_put(skb->dev); | ||
1809 | kfree_skb(skb); | 1805 | kfree_skb(skb); |
1810 | continue; | 1806 | continue; |
1811 | } | 1807 | } |
@@ -1814,9 +1810,7 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
1814 | skb_dst_drop(skb); | 1810 | skb_dst_drop(skb); |
1815 | skb_dst_set(skb, dst); | 1811 | skb_dst_set(skb, dst); |
1816 | 1812 | ||
1817 | dev = skb->dev; | ||
1818 | err = dst_output(skb); | 1813 | err = dst_output(skb); |
1819 | dev_put(dev); | ||
1820 | } | 1814 | } |
1821 | 1815 | ||
1822 | return; | 1816 | return; |
@@ -1839,7 +1833,6 @@ static int xdst_queue_output(struct sk_buff *skb) | |||
1839 | } | 1833 | } |
1840 | 1834 | ||
1841 | skb_dst_force(skb); | 1835 | skb_dst_force(skb); |
1842 | dev_hold(skb->dev); | ||
1843 | 1836 | ||
1844 | spin_lock_bh(&pq->hold_queue.lock); | 1837 | spin_lock_bh(&pq->hold_queue.lock); |
1845 | 1838 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 78f66fa92449..54c0acd29468 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock); | |||
39 | 39 | ||
40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; | 40 | static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; |
41 | 41 | ||
42 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); | ||
43 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); | ||
44 | |||
45 | static inline unsigned int xfrm_dst_hash(struct net *net, | 42 | static inline unsigned int xfrm_dst_hash(struct net *net, |
46 | const xfrm_address_t *daddr, | 43 | const xfrm_address_t *daddr, |
47 | const xfrm_address_t *saddr, | 44 | const xfrm_address_t *saddr, |
@@ -1860,7 +1857,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) | |||
1860 | } | 1857 | } |
1861 | EXPORT_SYMBOL(xfrm_state_unregister_afinfo); | 1858 | EXPORT_SYMBOL(xfrm_state_unregister_afinfo); |
1862 | 1859 | ||
1863 | static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) | 1860 | struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) |
1864 | { | 1861 | { |
1865 | struct xfrm_state_afinfo *afinfo; | 1862 | struct xfrm_state_afinfo *afinfo; |
1866 | if (unlikely(family >= NPROTO)) | 1863 | if (unlikely(family >= NPROTO)) |
@@ -1872,7 +1869,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) | |||
1872 | return afinfo; | 1869 | return afinfo; |
1873 | } | 1870 | } |
1874 | 1871 | ||
1875 | static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) | 1872 | void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) |
1876 | { | 1873 | { |
1877 | rcu_read_unlock(); | 1874 | rcu_read_unlock(); |
1878 | } | 1875 | } |