diff options
Diffstat (limited to 'net')
81 files changed, 635 insertions, 336 deletions
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 309129732285..c7e634af8516 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c | |||
| @@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev) | |||
| 171 | 171 | ||
| 172 | return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ | 172 | return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ |
| 173 | nla_total_size(2) + /* IFLA_VLAN_ID */ | 173 | nla_total_size(2) + /* IFLA_VLAN_ID */ |
| 174 | sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ | 174 | nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */ |
| 175 | vlan_qos_map_size(vlan->nr_ingress_mappings) + | 175 | vlan_qos_map_size(vlan->nr_ingress_mappings) + |
| 176 | vlan_qos_map_size(vlan->nr_egress_mappings); | 176 | vlan_qos_map_size(vlan->nr_egress_mappings); |
| 177 | } | 177 | } |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index c72d1bcdcf49..1356af660b5b 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
| @@ -65,6 +65,7 @@ static int __init batadv_init(void) | |||
| 65 | batadv_recv_handler_init(); | 65 | batadv_recv_handler_init(); |
| 66 | 66 | ||
| 67 | batadv_iv_init(); | 67 | batadv_iv_init(); |
| 68 | batadv_nc_init(); | ||
| 68 | 69 | ||
| 69 | batadv_event_workqueue = create_singlethread_workqueue("bat_events"); | 70 | batadv_event_workqueue = create_singlethread_workqueue("bat_events"); |
| 70 | 71 | ||
| @@ -142,7 +143,7 @@ int batadv_mesh_init(struct net_device *soft_iface) | |||
| 142 | if (ret < 0) | 143 | if (ret < 0) |
| 143 | goto err; | 144 | goto err; |
| 144 | 145 | ||
| 145 | ret = batadv_nc_init(bat_priv); | 146 | ret = batadv_nc_mesh_init(bat_priv); |
| 146 | if (ret < 0) | 147 | if (ret < 0) |
| 147 | goto err; | 148 | goto err; |
| 148 | 149 | ||
| @@ -167,7 +168,7 @@ void batadv_mesh_free(struct net_device *soft_iface) | |||
| 167 | batadv_vis_quit(bat_priv); | 168 | batadv_vis_quit(bat_priv); |
| 168 | 169 | ||
| 169 | batadv_gw_node_purge(bat_priv); | 170 | batadv_gw_node_purge(bat_priv); |
| 170 | batadv_nc_free(bat_priv); | 171 | batadv_nc_mesh_free(bat_priv); |
| 171 | batadv_dat_free(bat_priv); | 172 | batadv_dat_free(bat_priv); |
| 172 | batadv_bla_free(bat_priv); | 173 | batadv_bla_free(bat_priv); |
| 173 | 174 | ||
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index a487d46e0aec..4ecc0b6bf8ab 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c | |||
| @@ -35,6 +35,20 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb, | |||
| 35 | struct batadv_hard_iface *recv_if); | 35 | struct batadv_hard_iface *recv_if); |
| 36 | 36 | ||
| 37 | /** | 37 | /** |
| 38 | * batadv_nc_init - one-time initialization for network coding | ||
| 39 | */ | ||
| 40 | int __init batadv_nc_init(void) | ||
| 41 | { | ||
| 42 | int ret; | ||
| 43 | |||
| 44 | /* Register our packet type */ | ||
| 45 | ret = batadv_recv_handler_register(BATADV_CODED, | ||
| 46 | batadv_nc_recv_coded_packet); | ||
| 47 | |||
| 48 | return ret; | ||
| 49 | } | ||
| 50 | |||
| 51 | /** | ||
| 38 | * batadv_nc_start_timer - initialise the nc periodic worker | 52 | * batadv_nc_start_timer - initialise the nc periodic worker |
| 39 | * @bat_priv: the bat priv with all the soft interface information | 53 | * @bat_priv: the bat priv with all the soft interface information |
| 40 | */ | 54 | */ |
| @@ -45,10 +59,10 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv) | |||
| 45 | } | 59 | } |
| 46 | 60 | ||
| 47 | /** | 61 | /** |
| 48 | * batadv_nc_init - initialise coding hash table and start house keeping | 62 | * batadv_nc_mesh_init - initialise coding hash table and start house keeping |
| 49 | * @bat_priv: the bat priv with all the soft interface information | 63 | * @bat_priv: the bat priv with all the soft interface information |
| 50 | */ | 64 | */ |
| 51 | int batadv_nc_init(struct batadv_priv *bat_priv) | 65 | int batadv_nc_mesh_init(struct batadv_priv *bat_priv) |
| 52 | { | 66 | { |
| 53 | bat_priv->nc.timestamp_fwd_flush = jiffies; | 67 | bat_priv->nc.timestamp_fwd_flush = jiffies; |
| 54 | bat_priv->nc.timestamp_sniffed_purge = jiffies; | 68 | bat_priv->nc.timestamp_sniffed_purge = jiffies; |
| @@ -70,11 +84,6 @@ int batadv_nc_init(struct batadv_priv *bat_priv) | |||
| 70 | batadv_hash_set_lock_class(bat_priv->nc.coding_hash, | 84 | batadv_hash_set_lock_class(bat_priv->nc.coding_hash, |
| 71 | &batadv_nc_decoding_hash_lock_class_key); | 85 | &batadv_nc_decoding_hash_lock_class_key); |
| 72 | 86 | ||
| 73 | /* Register our packet type */ | ||
| 74 | if (batadv_recv_handler_register(BATADV_CODED, | ||
| 75 | batadv_nc_recv_coded_packet) < 0) | ||
| 76 | goto err; | ||
| 77 | |||
| 78 | INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); | 87 | INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); |
| 79 | batadv_nc_start_timer(bat_priv); | 88 | batadv_nc_start_timer(bat_priv); |
| 80 | 89 | ||
| @@ -1721,12 +1730,11 @@ free_nc_packet: | |||
| 1721 | } | 1730 | } |
| 1722 | 1731 | ||
| 1723 | /** | 1732 | /** |
| 1724 | * batadv_nc_free - clean up network coding memory | 1733 | * batadv_nc_mesh_free - clean up network coding memory |
| 1725 | * @bat_priv: the bat priv with all the soft interface information | 1734 | * @bat_priv: the bat priv with all the soft interface information |
| 1726 | */ | 1735 | */ |
| 1727 | void batadv_nc_free(struct batadv_priv *bat_priv) | 1736 | void batadv_nc_mesh_free(struct batadv_priv *bat_priv) |
| 1728 | { | 1737 | { |
| 1729 | batadv_recv_handler_unregister(BATADV_CODED); | ||
| 1730 | cancel_delayed_work_sync(&bat_priv->nc.work); | 1738 | cancel_delayed_work_sync(&bat_priv->nc.work); |
| 1731 | 1739 | ||
| 1732 | batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL); | 1740 | batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL); |
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h index 85a4ec81ad50..ddfa618e80bf 100644 --- a/net/batman-adv/network-coding.h +++ b/net/batman-adv/network-coding.h | |||
| @@ -22,8 +22,9 @@ | |||
| 22 | 22 | ||
| 23 | #ifdef CONFIG_BATMAN_ADV_NC | 23 | #ifdef CONFIG_BATMAN_ADV_NC |
| 24 | 24 | ||
| 25 | int batadv_nc_init(struct batadv_priv *bat_priv); | 25 | int batadv_nc_init(void); |
| 26 | void batadv_nc_free(struct batadv_priv *bat_priv); | 26 | int batadv_nc_mesh_init(struct batadv_priv *bat_priv); |
| 27 | void batadv_nc_mesh_free(struct batadv_priv *bat_priv); | ||
| 27 | void batadv_nc_update_nc_node(struct batadv_priv *bat_priv, | 28 | void batadv_nc_update_nc_node(struct batadv_priv *bat_priv, |
| 28 | struct batadv_orig_node *orig_node, | 29 | struct batadv_orig_node *orig_node, |
| 29 | struct batadv_orig_node *orig_neigh_node, | 30 | struct batadv_orig_node *orig_neigh_node, |
| @@ -46,12 +47,17 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv); | |||
| 46 | 47 | ||
| 47 | #else /* ifdef CONFIG_BATMAN_ADV_NC */ | 48 | #else /* ifdef CONFIG_BATMAN_ADV_NC */ |
| 48 | 49 | ||
| 49 | static inline int batadv_nc_init(struct batadv_priv *bat_priv) | 50 | static inline int batadv_nc_init(void) |
| 50 | { | 51 | { |
| 51 | return 0; | 52 | return 0; |
| 52 | } | 53 | } |
| 53 | 54 | ||
| 54 | static inline void batadv_nc_free(struct batadv_priv *bat_priv) | 55 | static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv) |
| 56 | { | ||
| 57 | return 0; | ||
| 58 | } | ||
| 59 | |||
| 60 | static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv) | ||
| 55 | { | 61 | { |
| 56 | return; | 62 | return; |
| 57 | } | 63 | } |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ca04163635da..e6b7fecb3af1 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
| @@ -64,7 +64,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 64 | br_flood_deliver(br, skb, false); | 64 | br_flood_deliver(br, skb, false); |
| 65 | goto out; | 65 | goto out; |
| 66 | } | 66 | } |
| 67 | if (br_multicast_rcv(br, NULL, skb)) { | 67 | if (br_multicast_rcv(br, NULL, skb, vid)) { |
| 68 | kfree_skb(skb); | 68 | kfree_skb(skb); |
| 69 | goto out; | 69 | goto out; |
| 70 | } | 70 | } |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index ffd5874f2592..33e8f23acddd 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
| @@ -700,7 +700,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
| 700 | 700 | ||
| 701 | vid = nla_get_u16(tb[NDA_VLAN]); | 701 | vid = nla_get_u16(tb[NDA_VLAN]); |
| 702 | 702 | ||
| 703 | if (vid >= VLAN_N_VID) { | 703 | if (!vid || vid >= VLAN_VID_MASK) { |
| 704 | pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", | 704 | pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", |
| 705 | vid); | 705 | vid); |
| 706 | return -EINVAL; | 706 | return -EINVAL; |
| @@ -794,7 +794,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], | |||
| 794 | 794 | ||
| 795 | vid = nla_get_u16(tb[NDA_VLAN]); | 795 | vid = nla_get_u16(tb[NDA_VLAN]); |
| 796 | 796 | ||
| 797 | if (vid >= VLAN_N_VID) { | 797 | if (!vid || vid >= VLAN_VID_MASK) { |
| 798 | pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", | 798 | pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n", |
| 799 | vid); | 799 | vid); |
| 800 | return -EINVAL; | 800 | return -EINVAL; |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index a2fd37ec35f7..7e73c32e205d 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
| @@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
| 80 | br_fdb_update(br, p, eth_hdr(skb)->h_source, vid); | 80 | br_fdb_update(br, p, eth_hdr(skb)->h_source, vid); |
| 81 | 81 | ||
| 82 | if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && | 82 | if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && |
| 83 | br_multicast_rcv(br, p, skb)) | 83 | br_multicast_rcv(br, p, skb, vid)) |
| 84 | goto drop; | 84 | goto drop; |
| 85 | 85 | ||
| 86 | if (p->state == BR_STATE_LEARNING) | 86 | if (p->state == BR_STATE_LEARNING) |
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 85a09bb5ca51..b7b1914dfa25 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
| @@ -453,7 +453,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) | |||
| 453 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 453 | call_rcu_bh(&p->rcu, br_multicast_free_pg); |
| 454 | err = 0; | 454 | err = 0; |
| 455 | 455 | ||
| 456 | if (!mp->ports && !mp->mglist && mp->timer_armed && | 456 | if (!mp->ports && !mp->mglist && |
| 457 | netif_running(br->dev)) | 457 | netif_running(br->dev)) |
| 458 | mod_timer(&mp->timer, jiffies); | 458 | mod_timer(&mp->timer, jiffies); |
| 459 | break; | 459 | break; |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index d1c578630678..686284ff3d6a 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
| @@ -272,7 +272,7 @@ static void br_multicast_del_pg(struct net_bridge *br, | |||
| 272 | del_timer(&p->timer); | 272 | del_timer(&p->timer); |
| 273 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 273 | call_rcu_bh(&p->rcu, br_multicast_free_pg); |
| 274 | 274 | ||
| 275 | if (!mp->ports && !mp->mglist && mp->timer_armed && | 275 | if (!mp->ports && !mp->mglist && |
| 276 | netif_running(br->dev)) | 276 | netif_running(br->dev)) |
| 277 | mod_timer(&mp->timer, jiffies); | 277 | mod_timer(&mp->timer, jiffies); |
| 278 | 278 | ||
| @@ -620,7 +620,6 @@ rehash: | |||
| 620 | 620 | ||
| 621 | mp->br = br; | 621 | mp->br = br; |
| 622 | mp->addr = *group; | 622 | mp->addr = *group; |
| 623 | |||
| 624 | setup_timer(&mp->timer, br_multicast_group_expired, | 623 | setup_timer(&mp->timer, br_multicast_group_expired, |
| 625 | (unsigned long)mp); | 624 | (unsigned long)mp); |
| 626 | 625 | ||
| @@ -660,6 +659,7 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
| 660 | struct net_bridge_mdb_entry *mp; | 659 | struct net_bridge_mdb_entry *mp; |
| 661 | struct net_bridge_port_group *p; | 660 | struct net_bridge_port_group *p; |
| 662 | struct net_bridge_port_group __rcu **pp; | 661 | struct net_bridge_port_group __rcu **pp; |
| 662 | unsigned long now = jiffies; | ||
| 663 | int err; | 663 | int err; |
| 664 | 664 | ||
| 665 | spin_lock(&br->multicast_lock); | 665 | spin_lock(&br->multicast_lock); |
| @@ -674,6 +674,7 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
| 674 | 674 | ||
| 675 | if (!port) { | 675 | if (!port) { |
| 676 | mp->mglist = true; | 676 | mp->mglist = true; |
| 677 | mod_timer(&mp->timer, now + br->multicast_membership_interval); | ||
| 677 | goto out; | 678 | goto out; |
| 678 | } | 679 | } |
| 679 | 680 | ||
| @@ -681,7 +682,7 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
| 681 | (p = mlock_dereference(*pp, br)) != NULL; | 682 | (p = mlock_dereference(*pp, br)) != NULL; |
| 682 | pp = &p->next) { | 683 | pp = &p->next) { |
| 683 | if (p->port == port) | 684 | if (p->port == port) |
| 684 | goto out; | 685 | goto found; |
| 685 | if ((unsigned long)p->port < (unsigned long)port) | 686 | if ((unsigned long)p->port < (unsigned long)port) |
| 686 | break; | 687 | break; |
| 687 | } | 688 | } |
| @@ -692,6 +693,8 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
| 692 | rcu_assign_pointer(*pp, p); | 693 | rcu_assign_pointer(*pp, p); |
| 693 | br_mdb_notify(br->dev, port, group, RTM_NEWMDB); | 694 | br_mdb_notify(br->dev, port, group, RTM_NEWMDB); |
| 694 | 695 | ||
| 696 | found: | ||
| 697 | mod_timer(&p->timer, now + br->multicast_membership_interval); | ||
| 695 | out: | 698 | out: |
| 696 | err = 0; | 699 | err = 0; |
| 697 | 700 | ||
| @@ -944,7 +947,8 @@ void br_multicast_disable_port(struct net_bridge_port *port) | |||
| 944 | 947 | ||
| 945 | static int br_ip4_multicast_igmp3_report(struct net_bridge *br, | 948 | static int br_ip4_multicast_igmp3_report(struct net_bridge *br, |
| 946 | struct net_bridge_port *port, | 949 | struct net_bridge_port *port, |
| 947 | struct sk_buff *skb) | 950 | struct sk_buff *skb, |
| 951 | u16 vid) | ||
| 948 | { | 952 | { |
| 949 | struct igmpv3_report *ih; | 953 | struct igmpv3_report *ih; |
| 950 | struct igmpv3_grec *grec; | 954 | struct igmpv3_grec *grec; |
| @@ -954,12 +958,10 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, | |||
| 954 | int type; | 958 | int type; |
| 955 | int err = 0; | 959 | int err = 0; |
| 956 | __be32 group; | 960 | __be32 group; |
| 957 | u16 vid = 0; | ||
| 958 | 961 | ||
| 959 | if (!pskb_may_pull(skb, sizeof(*ih))) | 962 | if (!pskb_may_pull(skb, sizeof(*ih))) |
| 960 | return -EINVAL; | 963 | return -EINVAL; |
| 961 | 964 | ||
| 962 | br_vlan_get_tag(skb, &vid); | ||
| 963 | ih = igmpv3_report_hdr(skb); | 965 | ih = igmpv3_report_hdr(skb); |
| 964 | num = ntohs(ih->ngrec); | 966 | num = ntohs(ih->ngrec); |
| 965 | len = sizeof(*ih); | 967 | len = sizeof(*ih); |
| @@ -1002,7 +1004,8 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, | |||
| 1002 | #if IS_ENABLED(CONFIG_IPV6) | 1004 | #if IS_ENABLED(CONFIG_IPV6) |
| 1003 | static int br_ip6_multicast_mld2_report(struct net_bridge *br, | 1005 | static int br_ip6_multicast_mld2_report(struct net_bridge *br, |
| 1004 | struct net_bridge_port *port, | 1006 | struct net_bridge_port *port, |
| 1005 | struct sk_buff *skb) | 1007 | struct sk_buff *skb, |
| 1008 | u16 vid) | ||
| 1006 | { | 1009 | { |
| 1007 | struct icmp6hdr *icmp6h; | 1010 | struct icmp6hdr *icmp6h; |
| 1008 | struct mld2_grec *grec; | 1011 | struct mld2_grec *grec; |
| @@ -1010,12 +1013,10 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
| 1010 | int len; | 1013 | int len; |
| 1011 | int num; | 1014 | int num; |
| 1012 | int err = 0; | 1015 | int err = 0; |
| 1013 | u16 vid = 0; | ||
| 1014 | 1016 | ||
| 1015 | if (!pskb_may_pull(skb, sizeof(*icmp6h))) | 1017 | if (!pskb_may_pull(skb, sizeof(*icmp6h))) |
| 1016 | return -EINVAL; | 1018 | return -EINVAL; |
| 1017 | 1019 | ||
| 1018 | br_vlan_get_tag(skb, &vid); | ||
| 1019 | icmp6h = icmp6_hdr(skb); | 1020 | icmp6h = icmp6_hdr(skb); |
| 1020 | num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); | 1021 | num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); |
| 1021 | len = sizeof(*icmp6h); | 1022 | len = sizeof(*icmp6h); |
| @@ -1138,7 +1139,8 @@ static void br_multicast_query_received(struct net_bridge *br, | |||
| 1138 | 1139 | ||
| 1139 | static int br_ip4_multicast_query(struct net_bridge *br, | 1140 | static int br_ip4_multicast_query(struct net_bridge *br, |
| 1140 | struct net_bridge_port *port, | 1141 | struct net_bridge_port *port, |
| 1141 | struct sk_buff *skb) | 1142 | struct sk_buff *skb, |
| 1143 | u16 vid) | ||
| 1142 | { | 1144 | { |
| 1143 | const struct iphdr *iph = ip_hdr(skb); | 1145 | const struct iphdr *iph = ip_hdr(skb); |
| 1144 | struct igmphdr *ih = igmp_hdr(skb); | 1146 | struct igmphdr *ih = igmp_hdr(skb); |
| @@ -1150,7 +1152,6 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
| 1150 | unsigned long now = jiffies; | 1152 | unsigned long now = jiffies; |
| 1151 | __be32 group; | 1153 | __be32 group; |
| 1152 | int err = 0; | 1154 | int err = 0; |
| 1153 | u16 vid = 0; | ||
| 1154 | 1155 | ||
| 1155 | spin_lock(&br->multicast_lock); | 1156 | spin_lock(&br->multicast_lock); |
| 1156 | if (!netif_running(br->dev) || | 1157 | if (!netif_running(br->dev) || |
| @@ -1186,14 +1187,10 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
| 1186 | if (!group) | 1187 | if (!group) |
| 1187 | goto out; | 1188 | goto out; |
| 1188 | 1189 | ||
| 1189 | br_vlan_get_tag(skb, &vid); | ||
| 1190 | mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); | 1190 | mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); |
| 1191 | if (!mp) | 1191 | if (!mp) |
| 1192 | goto out; | 1192 | goto out; |
| 1193 | 1193 | ||
| 1194 | mod_timer(&mp->timer, now + br->multicast_membership_interval); | ||
| 1195 | mp->timer_armed = true; | ||
| 1196 | |||
| 1197 | max_delay *= br->multicast_last_member_count; | 1194 | max_delay *= br->multicast_last_member_count; |
| 1198 | 1195 | ||
| 1199 | if (mp->mglist && | 1196 | if (mp->mglist && |
| @@ -1219,7 +1216,8 @@ out: | |||
| 1219 | #if IS_ENABLED(CONFIG_IPV6) | 1216 | #if IS_ENABLED(CONFIG_IPV6) |
| 1220 | static int br_ip6_multicast_query(struct net_bridge *br, | 1217 | static int br_ip6_multicast_query(struct net_bridge *br, |
| 1221 | struct net_bridge_port *port, | 1218 | struct net_bridge_port *port, |
| 1222 | struct sk_buff *skb) | 1219 | struct sk_buff *skb, |
| 1220 | u16 vid) | ||
| 1223 | { | 1221 | { |
| 1224 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); | 1222 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); |
| 1225 | struct mld_msg *mld; | 1223 | struct mld_msg *mld; |
| @@ -1231,7 +1229,6 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
| 1231 | unsigned long now = jiffies; | 1229 | unsigned long now = jiffies; |
| 1232 | const struct in6_addr *group = NULL; | 1230 | const struct in6_addr *group = NULL; |
| 1233 | int err = 0; | 1231 | int err = 0; |
| 1234 | u16 vid = 0; | ||
| 1235 | 1232 | ||
| 1236 | spin_lock(&br->multicast_lock); | 1233 | spin_lock(&br->multicast_lock); |
| 1237 | if (!netif_running(br->dev) || | 1234 | if (!netif_running(br->dev) || |
| @@ -1265,14 +1262,10 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
| 1265 | if (!group) | 1262 | if (!group) |
| 1266 | goto out; | 1263 | goto out; |
| 1267 | 1264 | ||
| 1268 | br_vlan_get_tag(skb, &vid); | ||
| 1269 | mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); | 1265 | mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); |
| 1270 | if (!mp) | 1266 | if (!mp) |
| 1271 | goto out; | 1267 | goto out; |
| 1272 | 1268 | ||
| 1273 | mod_timer(&mp->timer, now + br->multicast_membership_interval); | ||
| 1274 | mp->timer_armed = true; | ||
| 1275 | |||
| 1276 | max_delay *= br->multicast_last_member_count; | 1269 | max_delay *= br->multicast_last_member_count; |
| 1277 | if (mp->mglist && | 1270 | if (mp->mglist && |
| 1278 | (timer_pending(&mp->timer) ? | 1271 | (timer_pending(&mp->timer) ? |
| @@ -1358,7 +1351,7 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
| 1358 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 1351 | call_rcu_bh(&p->rcu, br_multicast_free_pg); |
| 1359 | br_mdb_notify(br->dev, port, group, RTM_DELMDB); | 1352 | br_mdb_notify(br->dev, port, group, RTM_DELMDB); |
| 1360 | 1353 | ||
| 1361 | if (!mp->ports && !mp->mglist && mp->timer_armed && | 1354 | if (!mp->ports && !mp->mglist && |
| 1362 | netif_running(br->dev)) | 1355 | netif_running(br->dev)) |
| 1363 | mod_timer(&mp->timer, jiffies); | 1356 | mod_timer(&mp->timer, jiffies); |
| 1364 | } | 1357 | } |
| @@ -1370,12 +1363,30 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
| 1370 | br->multicast_last_member_interval; | 1363 | br->multicast_last_member_interval; |
| 1371 | 1364 | ||
| 1372 | if (!port) { | 1365 | if (!port) { |
| 1373 | if (mp->mglist && mp->timer_armed && | 1366 | if (mp->mglist && |
| 1374 | (timer_pending(&mp->timer) ? | 1367 | (timer_pending(&mp->timer) ? |
| 1375 | time_after(mp->timer.expires, time) : | 1368 | time_after(mp->timer.expires, time) : |
| 1376 | try_to_del_timer_sync(&mp->timer) >= 0)) { | 1369 | try_to_del_timer_sync(&mp->timer) >= 0)) { |
| 1377 | mod_timer(&mp->timer, time); | 1370 | mod_timer(&mp->timer, time); |
| 1378 | } | 1371 | } |
| 1372 | |||
| 1373 | goto out; | ||
| 1374 | } | ||
| 1375 | |||
| 1376 | for (p = mlock_dereference(mp->ports, br); | ||
| 1377 | p != NULL; | ||
| 1378 | p = mlock_dereference(p->next, br)) { | ||
| 1379 | if (p->port != port) | ||
| 1380 | continue; | ||
| 1381 | |||
| 1382 | if (!hlist_unhashed(&p->mglist) && | ||
| 1383 | (timer_pending(&p->timer) ? | ||
| 1384 | time_after(p->timer.expires, time) : | ||
| 1385 | try_to_del_timer_sync(&p->timer) >= 0)) { | ||
| 1386 | mod_timer(&p->timer, time); | ||
| 1387 | } | ||
| 1388 | |||
| 1389 | break; | ||
| 1379 | } | 1390 | } |
| 1380 | out: | 1391 | out: |
| 1381 | spin_unlock(&br->multicast_lock); | 1392 | spin_unlock(&br->multicast_lock); |
| @@ -1424,7 +1435,8 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
| 1424 | 1435 | ||
| 1425 | static int br_multicast_ipv4_rcv(struct net_bridge *br, | 1436 | static int br_multicast_ipv4_rcv(struct net_bridge *br, |
| 1426 | struct net_bridge_port *port, | 1437 | struct net_bridge_port *port, |
| 1427 | struct sk_buff *skb) | 1438 | struct sk_buff *skb, |
| 1439 | u16 vid) | ||
| 1428 | { | 1440 | { |
| 1429 | struct sk_buff *skb2 = skb; | 1441 | struct sk_buff *skb2 = skb; |
| 1430 | const struct iphdr *iph; | 1442 | const struct iphdr *iph; |
| @@ -1432,7 +1444,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
| 1432 | unsigned int len; | 1444 | unsigned int len; |
| 1433 | unsigned int offset; | 1445 | unsigned int offset; |
| 1434 | int err; | 1446 | int err; |
| 1435 | u16 vid = 0; | ||
| 1436 | 1447 | ||
| 1437 | /* We treat OOM as packet loss for now. */ | 1448 | /* We treat OOM as packet loss for now. */ |
| 1438 | if (!pskb_may_pull(skb, sizeof(*iph))) | 1449 | if (!pskb_may_pull(skb, sizeof(*iph))) |
| @@ -1493,7 +1504,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
| 1493 | 1504 | ||
| 1494 | err = 0; | 1505 | err = 0; |
| 1495 | 1506 | ||
| 1496 | br_vlan_get_tag(skb2, &vid); | ||
| 1497 | BR_INPUT_SKB_CB(skb)->igmp = 1; | 1507 | BR_INPUT_SKB_CB(skb)->igmp = 1; |
| 1498 | ih = igmp_hdr(skb2); | 1508 | ih = igmp_hdr(skb2); |
| 1499 | 1509 | ||
| @@ -1504,10 +1514,10 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
| 1504 | err = br_ip4_multicast_add_group(br, port, ih->group, vid); | 1514 | err = br_ip4_multicast_add_group(br, port, ih->group, vid); |
| 1505 | break; | 1515 | break; |
| 1506 | case IGMPV3_HOST_MEMBERSHIP_REPORT: | 1516 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
| 1507 | err = br_ip4_multicast_igmp3_report(br, port, skb2); | 1517 | err = br_ip4_multicast_igmp3_report(br, port, skb2, vid); |
| 1508 | break; | 1518 | break; |
| 1509 | case IGMP_HOST_MEMBERSHIP_QUERY: | 1519 | case IGMP_HOST_MEMBERSHIP_QUERY: |
| 1510 | err = br_ip4_multicast_query(br, port, skb2); | 1520 | err = br_ip4_multicast_query(br, port, skb2, vid); |
| 1511 | break; | 1521 | break; |
| 1512 | case IGMP_HOST_LEAVE_MESSAGE: | 1522 | case IGMP_HOST_LEAVE_MESSAGE: |
| 1513 | br_ip4_multicast_leave_group(br, port, ih->group, vid); | 1523 | br_ip4_multicast_leave_group(br, port, ih->group, vid); |
| @@ -1525,7 +1535,8 @@ err_out: | |||
| 1525 | #if IS_ENABLED(CONFIG_IPV6) | 1535 | #if IS_ENABLED(CONFIG_IPV6) |
| 1526 | static int br_multicast_ipv6_rcv(struct net_bridge *br, | 1536 | static int br_multicast_ipv6_rcv(struct net_bridge *br, |
| 1527 | struct net_bridge_port *port, | 1537 | struct net_bridge_port *port, |
| 1528 | struct sk_buff *skb) | 1538 | struct sk_buff *skb, |
| 1539 | u16 vid) | ||
| 1529 | { | 1540 | { |
| 1530 | struct sk_buff *skb2; | 1541 | struct sk_buff *skb2; |
| 1531 | const struct ipv6hdr *ip6h; | 1542 | const struct ipv6hdr *ip6h; |
| @@ -1535,7 +1546,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, | |||
| 1535 | unsigned int len; | 1546 | unsigned int len; |
| 1536 | int offset; | 1547 | int offset; |
| 1537 | int err; | 1548 | int err; |
| 1538 | u16 vid = 0; | ||
| 1539 | 1549 | ||
| 1540 | if (!pskb_may_pull(skb, sizeof(*ip6h))) | 1550 | if (!pskb_may_pull(skb, sizeof(*ip6h))) |
| 1541 | return -EINVAL; | 1551 | return -EINVAL; |
| @@ -1625,7 +1635,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, | |||
| 1625 | 1635 | ||
| 1626 | err = 0; | 1636 | err = 0; |
| 1627 | 1637 | ||
| 1628 | br_vlan_get_tag(skb, &vid); | ||
| 1629 | BR_INPUT_SKB_CB(skb)->igmp = 1; | 1638 | BR_INPUT_SKB_CB(skb)->igmp = 1; |
| 1630 | 1639 | ||
| 1631 | switch (icmp6_type) { | 1640 | switch (icmp6_type) { |
| @@ -1642,10 +1651,10 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, | |||
| 1642 | break; | 1651 | break; |
| 1643 | } | 1652 | } |
| 1644 | case ICMPV6_MLD2_REPORT: | 1653 | case ICMPV6_MLD2_REPORT: |
| 1645 | err = br_ip6_multicast_mld2_report(br, port, skb2); | 1654 | err = br_ip6_multicast_mld2_report(br, port, skb2, vid); |
| 1646 | break; | 1655 | break; |
| 1647 | case ICMPV6_MGM_QUERY: | 1656 | case ICMPV6_MGM_QUERY: |
| 1648 | err = br_ip6_multicast_query(br, port, skb2); | 1657 | err = br_ip6_multicast_query(br, port, skb2, vid); |
| 1649 | break; | 1658 | break; |
| 1650 | case ICMPV6_MGM_REDUCTION: | 1659 | case ICMPV6_MGM_REDUCTION: |
| 1651 | { | 1660 | { |
| @@ -1666,7 +1675,7 @@ out: | |||
| 1666 | #endif | 1675 | #endif |
| 1667 | 1676 | ||
| 1668 | int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, | 1677 | int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, |
| 1669 | struct sk_buff *skb) | 1678 | struct sk_buff *skb, u16 vid) |
| 1670 | { | 1679 | { |
| 1671 | BR_INPUT_SKB_CB(skb)->igmp = 0; | 1680 | BR_INPUT_SKB_CB(skb)->igmp = 0; |
| 1672 | BR_INPUT_SKB_CB(skb)->mrouters_only = 0; | 1681 | BR_INPUT_SKB_CB(skb)->mrouters_only = 0; |
| @@ -1676,10 +1685,10 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, | |||
| 1676 | 1685 | ||
| 1677 | switch (skb->protocol) { | 1686 | switch (skb->protocol) { |
| 1678 | case htons(ETH_P_IP): | 1687 | case htons(ETH_P_IP): |
| 1679 | return br_multicast_ipv4_rcv(br, port, skb); | 1688 | return br_multicast_ipv4_rcv(br, port, skb, vid); |
| 1680 | #if IS_ENABLED(CONFIG_IPV6) | 1689 | #if IS_ENABLED(CONFIG_IPV6) |
| 1681 | case htons(ETH_P_IPV6): | 1690 | case htons(ETH_P_IPV6): |
| 1682 | return br_multicast_ipv6_rcv(br, port, skb); | 1691 | return br_multicast_ipv6_rcv(br, port, skb, vid); |
| 1683 | #endif | 1692 | #endif |
| 1684 | } | 1693 | } |
| 1685 | 1694 | ||
| @@ -1798,7 +1807,6 @@ void br_multicast_stop(struct net_bridge *br) | |||
| 1798 | hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], | 1807 | hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], |
| 1799 | hlist[ver]) { | 1808 | hlist[ver]) { |
| 1800 | del_timer(&mp->timer); | 1809 | del_timer(&mp->timer); |
| 1801 | mp->timer_armed = false; | ||
| 1802 | call_rcu_bh(&mp->rcu, br_multicast_free_group); | 1810 | call_rcu_bh(&mp->rcu, br_multicast_free_group); |
| 1803 | } | 1811 | } |
| 1804 | } | 1812 | } |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index e74ddc1c29a8..f75d92e4f96b 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
| @@ -243,7 +243,7 @@ static int br_afspec(struct net_bridge *br, | |||
| 243 | 243 | ||
| 244 | vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); | 244 | vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]); |
| 245 | 245 | ||
| 246 | if (vinfo->vid >= VLAN_N_VID) | 246 | if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) |
| 247 | return -EINVAL; | 247 | return -EINVAL; |
| 248 | 248 | ||
| 249 | switch (cmd) { | 249 | switch (cmd) { |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index efb57d911569..2e8244efb262 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
| @@ -126,7 +126,6 @@ struct net_bridge_mdb_entry | |||
| 126 | struct timer_list timer; | 126 | struct timer_list timer; |
| 127 | struct br_ip addr; | 127 | struct br_ip addr; |
| 128 | bool mglist; | 128 | bool mglist; |
| 129 | bool timer_armed; | ||
| 130 | }; | 129 | }; |
| 131 | 130 | ||
| 132 | struct net_bridge_mdb_htable | 131 | struct net_bridge_mdb_htable |
| @@ -452,7 +451,8 @@ extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __us | |||
| 452 | extern unsigned int br_mdb_rehash_seq; | 451 | extern unsigned int br_mdb_rehash_seq; |
| 453 | extern int br_multicast_rcv(struct net_bridge *br, | 452 | extern int br_multicast_rcv(struct net_bridge *br, |
| 454 | struct net_bridge_port *port, | 453 | struct net_bridge_port *port, |
| 455 | struct sk_buff *skb); | 454 | struct sk_buff *skb, |
| 455 | u16 vid); | ||
| 456 | extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 456 | extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
| 457 | struct sk_buff *skb, u16 vid); | 457 | struct sk_buff *skb, u16 vid); |
| 458 | extern void br_multicast_add_port(struct net_bridge_port *port); | 458 | extern void br_multicast_add_port(struct net_bridge_port *port); |
| @@ -523,7 +523,8 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br, | |||
| 523 | #else | 523 | #else |
| 524 | static inline int br_multicast_rcv(struct net_bridge *br, | 524 | static inline int br_multicast_rcv(struct net_bridge *br, |
| 525 | struct net_bridge_port *port, | 525 | struct net_bridge_port *port, |
| 526 | struct sk_buff *skb) | 526 | struct sk_buff *skb, |
| 527 | u16 vid) | ||
| 527 | { | 528 | { |
| 528 | return 0; | 529 | return 0; |
| 529 | } | 530 | } |
| @@ -643,9 +644,7 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v) | |||
| 643 | * vid wasn't set | 644 | * vid wasn't set |
| 644 | */ | 645 | */ |
| 645 | smp_rmb(); | 646 | smp_rmb(); |
| 646 | return (v->pvid & VLAN_TAG_PRESENT) ? | 647 | return v->pvid ?: VLAN_N_VID; |
| 647 | (v->pvid & ~VLAN_TAG_PRESENT) : | ||
| 648 | VLAN_N_VID; | ||
| 649 | } | 648 | } |
| 650 | 649 | ||
| 651 | #else | 650 | #else |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 108084a04671..656a6f3e40de 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
| @@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br) | |||
| 134 | 134 | ||
| 135 | if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) | 135 | if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) |
| 136 | __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); | 136 | __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); |
| 137 | else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY) | 137 | else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY) |
| 138 | __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); | 138 | __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); |
| 139 | 139 | ||
| 140 | if (r == 0) { | 140 | if (r == 0) { |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 9a9ffe7e4019..53f0990eab58 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
| @@ -45,37 +45,34 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags) | |||
| 45 | return 0; | 45 | return 0; |
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | if (vid) { | 48 | if (v->port_idx) { |
| 49 | if (v->port_idx) { | 49 | p = v->parent.port; |
| 50 | p = v->parent.port; | 50 | br = p->br; |
| 51 | br = p->br; | 51 | dev = p->dev; |
| 52 | dev = p->dev; | 52 | } else { |
| 53 | } else { | 53 | br = v->parent.br; |
| 54 | br = v->parent.br; | 54 | dev = br->dev; |
| 55 | dev = br->dev; | 55 | } |
| 56 | } | 56 | ops = dev->netdev_ops; |
| 57 | ops = dev->netdev_ops; | 57 | |
| 58 | 58 | if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { | |
| 59 | if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { | 59 | /* Add VLAN to the device filter if it is supported. |
| 60 | /* Add VLAN to the device filter if it is supported. | 60 | * Stricly speaking, this is not necessary now, since |
| 61 | * Stricly speaking, this is not necessary now, since | 61 | * devices are made promiscuous by the bridge, but if |
| 62 | * devices are made promiscuous by the bridge, but if | 62 | * that ever changes this code will allow tagged |
| 63 | * that ever changes this code will allow tagged | 63 | * traffic to enter the bridge. |
| 64 | * traffic to enter the bridge. | 64 | */ |
| 65 | */ | 65 | err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), |
| 66 | err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q), | 66 | vid); |
| 67 | vid); | 67 | if (err) |
| 68 | if (err) | 68 | return err; |
| 69 | return err; | 69 | } |
| 70 | } | ||
| 71 | |||
| 72 | err = br_fdb_insert(br, p, dev->dev_addr, vid); | ||
| 73 | if (err) { | ||
| 74 | br_err(br, "failed insert local address into bridge " | ||
| 75 | "forwarding table\n"); | ||
| 76 | goto out_filt; | ||
| 77 | } | ||
| 78 | 70 | ||
| 71 | err = br_fdb_insert(br, p, dev->dev_addr, vid); | ||
| 72 | if (err) { | ||
| 73 | br_err(br, "failed insert local address into bridge " | ||
| 74 | "forwarding table\n"); | ||
| 75 | goto out_filt; | ||
| 79 | } | 76 | } |
| 80 | 77 | ||
| 81 | set_bit(vid, v->vlan_bitmap); | 78 | set_bit(vid, v->vlan_bitmap); |
| @@ -98,7 +95,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid) | |||
| 98 | __vlan_delete_pvid(v, vid); | 95 | __vlan_delete_pvid(v, vid); |
| 99 | clear_bit(vid, v->untagged_bitmap); | 96 | clear_bit(vid, v->untagged_bitmap); |
| 100 | 97 | ||
| 101 | if (v->port_idx && vid) { | 98 | if (v->port_idx) { |
| 102 | struct net_device *dev = v->parent.port->dev; | 99 | struct net_device *dev = v->parent.port->dev; |
| 103 | const struct net_device_ops *ops = dev->netdev_ops; | 100 | const struct net_device_ops *ops = dev->netdev_ops; |
| 104 | 101 | ||
| @@ -192,6 +189,8 @@ out: | |||
| 192 | bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | 189 | bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, |
| 193 | struct sk_buff *skb, u16 *vid) | 190 | struct sk_buff *skb, u16 *vid) |
| 194 | { | 191 | { |
| 192 | int err; | ||
| 193 | |||
| 195 | /* If VLAN filtering is disabled on the bridge, all packets are | 194 | /* If VLAN filtering is disabled on the bridge, all packets are |
| 196 | * permitted. | 195 | * permitted. |
| 197 | */ | 196 | */ |
| @@ -204,20 +203,32 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
| 204 | if (!v) | 203 | if (!v) |
| 205 | return false; | 204 | return false; |
| 206 | 205 | ||
| 207 | if (br_vlan_get_tag(skb, vid)) { | 206 | err = br_vlan_get_tag(skb, vid); |
| 207 | if (!*vid) { | ||
| 208 | u16 pvid = br_get_pvid(v); | 208 | u16 pvid = br_get_pvid(v); |
| 209 | 209 | ||
| 210 | /* Frame did not have a tag. See if pvid is set | 210 | /* Frame had a tag with VID 0 or did not have a tag. |
| 211 | * on this port. That tells us which vlan untagged | 211 | * See if pvid is set on this port. That tells us which |
| 212 | * traffic belongs to. | 212 | * vlan untagged or priority-tagged traffic belongs to. |
| 213 | */ | 213 | */ |
| 214 | if (pvid == VLAN_N_VID) | 214 | if (pvid == VLAN_N_VID) |
| 215 | return false; | 215 | return false; |
| 216 | 216 | ||
| 217 | /* PVID is set on this port. Any untagged ingress | 217 | /* PVID is set on this port. Any untagged or priority-tagged |
| 218 | * frame is considered to belong to this vlan. | 218 | * ingress frame is considered to belong to this vlan. |
| 219 | */ | 219 | */ |
| 220 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); | 220 | *vid = pvid; |
| 221 | if (likely(err)) | ||
| 222 | /* Untagged Frame. */ | ||
| 223 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid); | ||
| 224 | else | ||
| 225 | /* Priority-tagged Frame. | ||
| 226 | * At this point, We know that skb->vlan_tci had | ||
| 227 | * VLAN_TAG_PRESENT bit and its VID field was 0x000. | ||
| 228 | * We update only VID field and preserve PCP field. | ||
| 229 | */ | ||
| 230 | skb->vlan_tci |= pvid; | ||
| 231 | |||
| 221 | return true; | 232 | return true; |
| 222 | } | 233 | } |
| 223 | 234 | ||
| @@ -248,7 +259,9 @@ bool br_allowed_egress(struct net_bridge *br, | |||
| 248 | return false; | 259 | return false; |
| 249 | } | 260 | } |
| 250 | 261 | ||
| 251 | /* Must be protected by RTNL */ | 262 | /* Must be protected by RTNL. |
| 263 | * Must be called with vid in range from 1 to 4094 inclusive. | ||
| 264 | */ | ||
| 252 | int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) | 265 | int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags) |
| 253 | { | 266 | { |
| 254 | struct net_port_vlans *pv = NULL; | 267 | struct net_port_vlans *pv = NULL; |
| @@ -278,7 +291,9 @@ out: | |||
| 278 | return err; | 291 | return err; |
| 279 | } | 292 | } |
| 280 | 293 | ||
| 281 | /* Must be protected by RTNL */ | 294 | /* Must be protected by RTNL. |
| 295 | * Must be called with vid in range from 1 to 4094 inclusive. | ||
| 296 | */ | ||
| 282 | int br_vlan_delete(struct net_bridge *br, u16 vid) | 297 | int br_vlan_delete(struct net_bridge *br, u16 vid) |
| 283 | { | 298 | { |
| 284 | struct net_port_vlans *pv; | 299 | struct net_port_vlans *pv; |
| @@ -289,14 +304,9 @@ int br_vlan_delete(struct net_bridge *br, u16 vid) | |||
| 289 | if (!pv) | 304 | if (!pv) |
| 290 | return -EINVAL; | 305 | return -EINVAL; |
| 291 | 306 | ||
| 292 | if (vid) { | 307 | spin_lock_bh(&br->hash_lock); |
| 293 | /* If the VID !=0 remove fdb for this vid. VID 0 is special | 308 | fdb_delete_by_addr(br, br->dev->dev_addr, vid); |
| 294 | * in that it's the default and is always there in the fdb. | 309 | spin_unlock_bh(&br->hash_lock); |
| 295 | */ | ||
| 296 | spin_lock_bh(&br->hash_lock); | ||
| 297 | fdb_delete_by_addr(br, br->dev->dev_addr, vid); | ||
| 298 | spin_unlock_bh(&br->hash_lock); | ||
| 299 | } | ||
| 300 | 310 | ||
| 301 | __vlan_del(pv, vid); | 311 | __vlan_del(pv, vid); |
| 302 | return 0; | 312 | return 0; |
| @@ -329,7 +339,9 @@ unlock: | |||
| 329 | return 0; | 339 | return 0; |
| 330 | } | 340 | } |
| 331 | 341 | ||
| 332 | /* Must be protected by RTNL */ | 342 | /* Must be protected by RTNL. |
| 343 | * Must be called with vid in range from 1 to 4094 inclusive. | ||
| 344 | */ | ||
| 333 | int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) | 345 | int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags) |
| 334 | { | 346 | { |
| 335 | struct net_port_vlans *pv = NULL; | 347 | struct net_port_vlans *pv = NULL; |
| @@ -363,7 +375,9 @@ clean_up: | |||
| 363 | return err; | 375 | return err; |
| 364 | } | 376 | } |
| 365 | 377 | ||
| 366 | /* Must be protected by RTNL */ | 378 | /* Must be protected by RTNL. |
| 379 | * Must be called with vid in range from 1 to 4094 inclusive. | ||
| 380 | */ | ||
| 367 | int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) | 381 | int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) |
| 368 | { | 382 | { |
| 369 | struct net_port_vlans *pv; | 383 | struct net_port_vlans *pv; |
| @@ -374,14 +388,9 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid) | |||
| 374 | if (!pv) | 388 | if (!pv) |
| 375 | return -EINVAL; | 389 | return -EINVAL; |
| 376 | 390 | ||
| 377 | if (vid) { | 391 | spin_lock_bh(&port->br->hash_lock); |
| 378 | /* If the VID !=0 remove fdb for this vid. VID 0 is special | 392 | fdb_delete_by_addr(port->br, port->dev->dev_addr, vid); |
| 379 | * in that it's the default and is always there in the fdb. | 393 | spin_unlock_bh(&port->br->hash_lock); |
| 380 | */ | ||
| 381 | spin_lock_bh(&port->br->hash_lock); | ||
| 382 | fdb_delete_by_addr(port->br, port->dev->dev_addr, vid); | ||
| 383 | spin_unlock_bh(&port->br->hash_lock); | ||
| 384 | } | ||
| 385 | 394 | ||
| 386 | return __vlan_del(pv, vid); | 395 | return __vlan_del(pv, vid); |
| 387 | } | 396 | } |
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index 518093802d1d..7c470c371e14 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c | |||
| @@ -181,6 +181,7 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr, | |||
| 181 | ub->qlen++; | 181 | ub->qlen++; |
| 182 | 182 | ||
| 183 | pm = nlmsg_data(nlh); | 183 | pm = nlmsg_data(nlh); |
| 184 | memset(pm, 0, sizeof(*pm)); | ||
| 184 | 185 | ||
| 185 | /* Fill in the ulog data */ | 186 | /* Fill in the ulog data */ |
| 186 | pm->version = EBT_ULOG_VERSION; | 187 | pm->version = EBT_ULOG_VERSION; |
| @@ -193,8 +194,6 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr, | |||
| 193 | pm->hook = hooknr; | 194 | pm->hook = hooknr; |
| 194 | if (uloginfo->prefix != NULL) | 195 | if (uloginfo->prefix != NULL) |
| 195 | strcpy(pm->prefix, uloginfo->prefix); | 196 | strcpy(pm->prefix, uloginfo->prefix); |
| 196 | else | ||
| 197 | *(pm->prefix) = '\0'; | ||
| 198 | 197 | ||
| 199 | if (in) { | 198 | if (in) { |
| 200 | strcpy(pm->physindev, in->name); | 199 | strcpy(pm->physindev, in->name); |
| @@ -204,16 +203,14 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr, | |||
| 204 | strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name); | 203 | strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name); |
| 205 | else | 204 | else |
| 206 | strcpy(pm->indev, in->name); | 205 | strcpy(pm->indev, in->name); |
| 207 | } else | 206 | } |
| 208 | pm->indev[0] = pm->physindev[0] = '\0'; | ||
| 209 | 207 | ||
| 210 | if (out) { | 208 | if (out) { |
| 211 | /* If out exists, then out is a bridge port */ | 209 | /* If out exists, then out is a bridge port */ |
| 212 | strcpy(pm->physoutdev, out->name); | 210 | strcpy(pm->physoutdev, out->name); |
| 213 | /* rcu_read_lock()ed by nf_hook_slow */ | 211 | /* rcu_read_lock()ed by nf_hook_slow */ |
| 214 | strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name); | 212 | strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name); |
| 215 | } else | 213 | } |
| 216 | pm->outdev[0] = pm->physoutdev[0] = '\0'; | ||
| 217 | 214 | ||
| 218 | if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0) | 215 | if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0) |
| 219 | BUG(); | 216 | BUG(); |
diff --git a/net/compat.c b/net/compat.c index f0a1ba6c8086..89032580bd1d 100644 --- a/net/compat.c +++ b/net/compat.c | |||
| @@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) | |||
| 71 | __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || | 71 | __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || |
| 72 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) | 72 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) |
| 73 | return -EFAULT; | 73 | return -EFAULT; |
| 74 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | ||
| 75 | return -EINVAL; | ||
| 74 | kmsg->msg_name = compat_ptr(tmp1); | 76 | kmsg->msg_name = compat_ptr(tmp1); |
| 75 | kmsg->msg_iov = compat_ptr(tmp2); | 77 | kmsg->msg_iov = compat_ptr(tmp2); |
| 76 | kmsg->msg_control = compat_ptr(tmp3); | 78 | kmsg->msg_control = compat_ptr(tmp3); |
diff --git a/net/core/dev.c b/net/core/dev.c index 65f829cfd928..3430b1ed12e5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1917,7 +1917,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map, | |||
| 1917 | return new_map; | 1917 | return new_map; |
| 1918 | } | 1918 | } |
| 1919 | 1919 | ||
| 1920 | int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index) | 1920 | int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
| 1921 | u16 index) | ||
| 1921 | { | 1922 | { |
| 1922 | struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; | 1923 | struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; |
| 1923 | struct xps_map *map, *new_map; | 1924 | struct xps_map *map, *new_map; |
diff --git a/net/core/filter.c b/net/core/filter.c index 6438f29ff266..01b780856db2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu) | |||
| 644 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); | 644 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); |
| 645 | 645 | ||
| 646 | bpf_jit_free(fp); | 646 | bpf_jit_free(fp); |
| 647 | kfree(fp); | ||
| 648 | } | 647 | } |
| 649 | EXPORT_SYMBOL(sk_filter_release_rcu); | 648 | EXPORT_SYMBOL(sk_filter_release_rcu); |
| 650 | 649 | ||
| @@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp, | |||
| 683 | if (fprog->filter == NULL) | 682 | if (fprog->filter == NULL) |
| 684 | return -EINVAL; | 683 | return -EINVAL; |
| 685 | 684 | ||
| 686 | fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL); | 685 | fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL); |
| 687 | if (!fp) | 686 | if (!fp) |
| 688 | return -ENOMEM; | 687 | return -ENOMEM; |
| 689 | memcpy(fp->insns, fprog->filter, fsize); | 688 | memcpy(fp->insns, fprog->filter, fsize); |
| @@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
| 723 | { | 722 | { |
| 724 | struct sk_filter *fp, *old_fp; | 723 | struct sk_filter *fp, *old_fp; |
| 725 | unsigned int fsize = sizeof(struct sock_filter) * fprog->len; | 724 | unsigned int fsize = sizeof(struct sock_filter) * fprog->len; |
| 725 | unsigned int sk_fsize = sk_filter_size(fprog->len); | ||
| 726 | int err; | 726 | int err; |
| 727 | 727 | ||
| 728 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) | 728 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
| @@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
| 732 | if (fprog->filter == NULL) | 732 | if (fprog->filter == NULL) |
| 733 | return -EINVAL; | 733 | return -EINVAL; |
| 734 | 734 | ||
| 735 | fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); | 735 | fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL); |
| 736 | if (!fp) | 736 | if (!fp) |
| 737 | return -ENOMEM; | 737 | return -ENOMEM; |
| 738 | if (copy_from_user(fp->insns, fprog->filter, fsize)) { | 738 | if (copy_from_user(fp->insns, fprog->filter, fsize)) { |
| 739 | sock_kfree_s(sk, fp, fsize+sizeof(*fp)); | 739 | sock_kfree_s(sk, fp, sk_fsize); |
| 740 | return -EFAULT; | 740 | return -EFAULT; |
| 741 | } | 741 | } |
| 742 | 742 | ||
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 8d7d0dd72db2..143b6fdb9647 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
| @@ -40,7 +40,7 @@ again: | |||
| 40 | struct iphdr _iph; | 40 | struct iphdr _iph; |
| 41 | ip: | 41 | ip: |
| 42 | iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); | 42 | iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); |
| 43 | if (!iph) | 43 | if (!iph || iph->ihl < 5) |
| 44 | return false; | 44 | return false; |
| 45 | 45 | ||
| 46 | if (ip_is_fragment(iph)) | 46 | if (ip_is_fragment(iph)) |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index d954b56b4e47..325dee863e46 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
| @@ -1344,17 +1344,19 @@ int netdev_register_kobject(struct net_device *net) | |||
| 1344 | return error; | 1344 | return error; |
| 1345 | } | 1345 | } |
| 1346 | 1346 | ||
| 1347 | int netdev_class_create_file(struct class_attribute *class_attr) | 1347 | int netdev_class_create_file_ns(struct class_attribute *class_attr, |
| 1348 | const void *ns) | ||
| 1348 | { | 1349 | { |
| 1349 | return class_create_file(&net_class, class_attr); | 1350 | return class_create_file_ns(&net_class, class_attr, ns); |
| 1350 | } | 1351 | } |
| 1351 | EXPORT_SYMBOL(netdev_class_create_file); | 1352 | EXPORT_SYMBOL(netdev_class_create_file_ns); |
| 1352 | 1353 | ||
| 1353 | void netdev_class_remove_file(struct class_attribute *class_attr) | 1354 | void netdev_class_remove_file_ns(struct class_attribute *class_attr, |
| 1355 | const void *ns) | ||
| 1354 | { | 1356 | { |
| 1355 | class_remove_file(&net_class, class_attr); | 1357 | class_remove_file_ns(&net_class, class_attr, ns); |
| 1356 | } | 1358 | } |
| 1357 | EXPORT_SYMBOL(netdev_class_remove_file); | 1359 | EXPORT_SYMBOL(netdev_class_remove_file_ns); |
| 1358 | 1360 | ||
| 1359 | int netdev_kobject_init(void) | 1361 | int netdev_kobject_init(void) |
| 1360 | { | 1362 | { |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index fc75c9e461b8..8f971990677c 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -636,8 +636,9 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo | |||
| 636 | 636 | ||
| 637 | netpoll_send_skb(np, send_skb); | 637 | netpoll_send_skb(np, send_skb); |
| 638 | 638 | ||
| 639 | /* If there are several rx_hooks for the same address, | 639 | /* If there are several rx_skb_hooks for the same |
| 640 | we're fine by sending a single reply */ | 640 | * address we're fine by sending a single reply |
| 641 | */ | ||
| 641 | break; | 642 | break; |
| 642 | } | 643 | } |
| 643 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 644 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
| @@ -719,8 +720,9 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo | |||
| 719 | 720 | ||
| 720 | netpoll_send_skb(np, send_skb); | 721 | netpoll_send_skb(np, send_skb); |
| 721 | 722 | ||
| 722 | /* If there are several rx_hooks for the same address, | 723 | /* If there are several rx_skb_hooks for the same |
| 723 | we're fine by sending a single reply */ | 724 | * address, we're fine by sending a single reply |
| 725 | */ | ||
| 724 | break; | 726 | break; |
| 725 | } | 727 | } |
| 726 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 728 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
| @@ -756,11 +758,12 @@ static bool pkt_is_ns(struct sk_buff *skb) | |||
| 756 | 758 | ||
| 757 | int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) | 759 | int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) |
| 758 | { | 760 | { |
| 759 | int proto, len, ulen; | 761 | int proto, len, ulen, data_len; |
| 760 | int hits = 0; | 762 | int hits = 0, offset; |
| 761 | const struct iphdr *iph; | 763 | const struct iphdr *iph; |
| 762 | struct udphdr *uh; | 764 | struct udphdr *uh; |
| 763 | struct netpoll *np, *tmp; | 765 | struct netpoll *np, *tmp; |
| 766 | uint16_t source; | ||
| 764 | 767 | ||
| 765 | if (list_empty(&npinfo->rx_np)) | 768 | if (list_empty(&npinfo->rx_np)) |
| 766 | goto out; | 769 | goto out; |
| @@ -820,7 +823,10 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) | |||
| 820 | 823 | ||
| 821 | len -= iph->ihl*4; | 824 | len -= iph->ihl*4; |
| 822 | uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); | 825 | uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); |
| 826 | offset = (unsigned char *)(uh + 1) - skb->data; | ||
| 823 | ulen = ntohs(uh->len); | 827 | ulen = ntohs(uh->len); |
| 828 | data_len = skb->len - offset; | ||
| 829 | source = ntohs(uh->source); | ||
| 824 | 830 | ||
| 825 | if (ulen != len) | 831 | if (ulen != len) |
| 826 | goto out; | 832 | goto out; |
| @@ -834,9 +840,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) | |||
| 834 | if (np->local_port && np->local_port != ntohs(uh->dest)) | 840 | if (np->local_port && np->local_port != ntohs(uh->dest)) |
| 835 | continue; | 841 | continue; |
| 836 | 842 | ||
| 837 | np->rx_hook(np, ntohs(uh->source), | 843 | np->rx_skb_hook(np, source, skb, offset, data_len); |
| 838 | (char *)(uh+1), | ||
| 839 | ulen - sizeof(struct udphdr)); | ||
| 840 | hits++; | 844 | hits++; |
| 841 | } | 845 | } |
| 842 | } else { | 846 | } else { |
| @@ -859,7 +863,10 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) | |||
| 859 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) | 863 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
| 860 | goto out; | 864 | goto out; |
| 861 | uh = udp_hdr(skb); | 865 | uh = udp_hdr(skb); |
| 866 | offset = (unsigned char *)(uh + 1) - skb->data; | ||
| 862 | ulen = ntohs(uh->len); | 867 | ulen = ntohs(uh->len); |
| 868 | data_len = skb->len - offset; | ||
| 869 | source = ntohs(uh->source); | ||
| 863 | if (ulen != skb->len) | 870 | if (ulen != skb->len) |
| 864 | goto out; | 871 | goto out; |
| 865 | if (udp6_csum_init(skb, uh, IPPROTO_UDP)) | 872 | if (udp6_csum_init(skb, uh, IPPROTO_UDP)) |
| @@ -872,9 +879,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) | |||
| 872 | if (np->local_port && np->local_port != ntohs(uh->dest)) | 879 | if (np->local_port && np->local_port != ntohs(uh->dest)) |
| 873 | continue; | 880 | continue; |
| 874 | 881 | ||
| 875 | np->rx_hook(np, ntohs(uh->source), | 882 | np->rx_skb_hook(np, source, skb, offset, data_len); |
| 876 | (char *)(uh+1), | ||
| 877 | ulen - sizeof(struct udphdr)); | ||
| 878 | hits++; | 883 | hits++; |
| 879 | } | 884 | } |
| 880 | #endif | 885 | #endif |
| @@ -1062,7 +1067,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) | |||
| 1062 | 1067 | ||
| 1063 | npinfo->netpoll = np; | 1068 | npinfo->netpoll = np; |
| 1064 | 1069 | ||
| 1065 | if (np->rx_hook) { | 1070 | if (np->rx_skb_hook) { |
| 1066 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 1071 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
| 1067 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; | 1072 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; |
| 1068 | list_add_tail(&np->rx, &npinfo->rx_np); | 1073 | list_add_tail(&np->rx, &npinfo->rx_np); |
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 3f1ec1586ae1..8d9d05edd2eb 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | #include <net/secure_seq.h> | 11 | #include <net/secure_seq.h> |
| 12 | 12 | ||
| 13 | #if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET) | ||
| 13 | #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4) | 14 | #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4) |
| 14 | 15 | ||
| 15 | static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned; | 16 | static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned; |
| @@ -29,6 +30,7 @@ static void net_secret_init(void) | |||
| 29 | cmpxchg(&net_secret[--i], 0, tmp); | 30 | cmpxchg(&net_secret[--i], 0, tmp); |
| 30 | } | 31 | } |
| 31 | } | 32 | } |
| 33 | #endif | ||
| 32 | 34 | ||
| 33 | #ifdef CONFIG_INET | 35 | #ifdef CONFIG_INET |
| 34 | static u32 seq_scale(u32 seq) | 36 | static u32 seq_scale(u32 seq) |
diff --git a/net/core/sock.c b/net/core/sock.c index 5b6beba494a3..0b39e7ae4383 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -2319,6 +2319,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
| 2319 | sk->sk_ll_usec = sysctl_net_busy_read; | 2319 | sk->sk_ll_usec = sysctl_net_busy_read; |
| 2320 | #endif | 2320 | #endif |
| 2321 | 2321 | ||
| 2322 | sk->sk_pacing_rate = ~0U; | ||
| 2322 | /* | 2323 | /* |
| 2323 | * Before updating sk_refcnt, we must commit prior changes to memory | 2324 | * Before updating sk_refcnt, we must commit prior changes to memory |
| 2324 | * (Documentation/RCU/rculist_nulls.txt for details) | 2325 | * (Documentation/RCU/rculist_nulls.txt for details) |
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index c85e71e0c7ff..ff41b4d60d30 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c | |||
| @@ -1372,6 +1372,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, | |||
| 1372 | real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); | 1372 | real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); |
| 1373 | if (!real_dev) | 1373 | if (!real_dev) |
| 1374 | return -ENODEV; | 1374 | return -ENODEV; |
| 1375 | if (real_dev->type != ARPHRD_IEEE802154) | ||
| 1376 | return -EINVAL; | ||
| 1375 | 1377 | ||
| 1376 | lowpan_dev_info(dev)->real_dev = real_dev; | 1378 | lowpan_dev_info(dev)->real_dev = real_dev; |
| 1377 | lowpan_dev_info(dev)->fragment_tag = 0; | 1379 | lowpan_dev_info(dev)->fragment_tag = 0; |
| @@ -1386,6 +1388,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, | |||
| 1386 | 1388 | ||
| 1387 | entry->ldev = dev; | 1389 | entry->ldev = dev; |
| 1388 | 1390 | ||
| 1391 | /* Set the lowpan harware address to the wpan hardware address. */ | ||
| 1392 | memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN); | ||
| 1393 | |||
| 1389 | mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); | 1394 | mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); |
| 1390 | INIT_LIST_HEAD(&entry->list); | 1395 | INIT_LIST_HEAD(&entry->list); |
| 1391 | list_add_tail(&entry->list, &lowpan_devices); | 1396 | list_add_tail(&entry->list, &lowpan_devices); |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 7bd8983dbfcf..96da9c77deca 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
| @@ -287,7 +287,7 @@ begintw: | |||
| 287 | if (unlikely(!INET_TW_MATCH(sk, net, acookie, | 287 | if (unlikely(!INET_TW_MATCH(sk, net, acookie, |
| 288 | saddr, daddr, ports, | 288 | saddr, daddr, ports, |
| 289 | dif))) { | 289 | dif))) { |
| 290 | sock_put(sk); | 290 | inet_twsk_put(inet_twsk(sk)); |
| 291 | goto begintw; | 291 | goto begintw; |
| 292 | } | 292 | } |
| 293 | goto out; | 293 | goto out; |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index a04d872c54f9..3982eabf61e1 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk, | |||
| 772 | /* initialize protocol header pointer */ | 772 | /* initialize protocol header pointer */ |
| 773 | skb->transport_header = skb->network_header + fragheaderlen; | 773 | skb->transport_header = skb->network_header + fragheaderlen; |
| 774 | 774 | ||
| 775 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
| 776 | skb->csum = 0; | 775 | skb->csum = 0; |
| 777 | 776 | ||
| 778 | /* specify the length of each IP datagram fragment */ | 777 | |
| 779 | skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen; | ||
| 780 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | ||
| 781 | __skb_queue_tail(queue, skb); | 778 | __skb_queue_tail(queue, skb); |
| 779 | } else if (skb_is_gso(skb)) { | ||
| 780 | goto append; | ||
| 782 | } | 781 | } |
| 783 | 782 | ||
| 783 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
| 784 | /* specify the length of each IP datagram fragment */ | ||
| 785 | skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen; | ||
| 786 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | ||
| 787 | |||
| 788 | append: | ||
| 784 | return skb_append_datato_frags(sk, skb, getfrag, from, | 789 | return skb_append_datato_frags(sk, skb, getfrag, from, |
| 785 | (length - transhdrlen)); | 790 | (length - transhdrlen)); |
| 786 | } | 791 | } |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index e805e7b3030e..6e87f853d033 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
| @@ -125,8 +125,17 @@ static int vti_rcv(struct sk_buff *skb) | |||
| 125 | iph->saddr, iph->daddr, 0); | 125 | iph->saddr, iph->daddr, 0); |
| 126 | if (tunnel != NULL) { | 126 | if (tunnel != NULL) { |
| 127 | struct pcpu_tstats *tstats; | 127 | struct pcpu_tstats *tstats; |
| 128 | u32 oldmark = skb->mark; | ||
| 129 | int ret; | ||
| 128 | 130 | ||
| 129 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 131 | |
| 132 | /* temporarily mark the skb with the tunnel o_key, to | ||
| 133 | * only match policies with this mark. | ||
| 134 | */ | ||
| 135 | skb->mark = be32_to_cpu(tunnel->parms.o_key); | ||
| 136 | ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb); | ||
| 137 | skb->mark = oldmark; | ||
| 138 | if (!ret) | ||
| 130 | return -1; | 139 | return -1; |
| 131 | 140 | ||
| 132 | tstats = this_cpu_ptr(tunnel->dev->tstats); | 141 | tstats = this_cpu_ptr(tunnel->dev->tstats); |
| @@ -135,7 +144,6 @@ static int vti_rcv(struct sk_buff *skb) | |||
| 135 | tstats->rx_bytes += skb->len; | 144 | tstats->rx_bytes += skb->len; |
| 136 | u64_stats_update_end(&tstats->syncp); | 145 | u64_stats_update_end(&tstats->syncp); |
| 137 | 146 | ||
| 138 | skb->mark = 0; | ||
| 139 | secpath_reset(skb); | 147 | secpath_reset(skb); |
| 140 | skb->dev = tunnel->dev; | 148 | skb->dev = tunnel->dev; |
| 141 | return 1; | 149 | return 1; |
| @@ -167,7 +175,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 167 | 175 | ||
| 168 | memset(&fl4, 0, sizeof(fl4)); | 176 | memset(&fl4, 0, sizeof(fl4)); |
| 169 | flowi4_init_output(&fl4, tunnel->parms.link, | 177 | flowi4_init_output(&fl4, tunnel->parms.link, |
| 170 | be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos), | 178 | be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos), |
| 171 | RT_SCOPE_UNIVERSE, | 179 | RT_SCOPE_UNIVERSE, |
| 172 | IPPROTO_IPIP, 0, | 180 | IPPROTO_IPIP, 0, |
| 173 | dst, tiph->saddr, 0, 0); | 181 | dst, tiph->saddr, 0, 0); |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 85a4f21aac1a..59da7cde0724 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
| @@ -271,6 +271,11 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
| 271 | local_bh_disable(); | 271 | local_bh_disable(); |
| 272 | addend = xt_write_recseq_begin(); | 272 | addend = xt_write_recseq_begin(); |
| 273 | private = table->private; | 273 | private = table->private; |
| 274 | /* | ||
| 275 | * Ensure we load private-> members after we've fetched the base | ||
| 276 | * pointer. | ||
| 277 | */ | ||
| 278 | smp_read_barrier_depends(); | ||
| 274 | table_base = private->entries[smp_processor_id()]; | 279 | table_base = private->entries[smp_processor_id()]; |
| 275 | 280 | ||
| 276 | e = get_entry(table_base, private->hook_entry[hook]); | 281 | e = get_entry(table_base, private->hook_entry[hook]); |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index d23118d95ff9..718dfbd30cbe 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
| @@ -327,6 +327,11 @@ ipt_do_table(struct sk_buff *skb, | |||
| 327 | addend = xt_write_recseq_begin(); | 327 | addend = xt_write_recseq_begin(); |
| 328 | private = table->private; | 328 | private = table->private; |
| 329 | cpu = smp_processor_id(); | 329 | cpu = smp_processor_id(); |
| 330 | /* | ||
| 331 | * Ensure we load private-> members after we've fetched the base | ||
| 332 | * pointer. | ||
| 333 | */ | ||
| 334 | smp_read_barrier_depends(); | ||
| 330 | table_base = private->entries[cpu]; | 335 | table_base = private->entries[cpu]; |
| 331 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; | 336 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; |
| 332 | stackptr = per_cpu_ptr(private->stackptr, cpu); | 337 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index cbc22158af49..9cb993cd224b 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c | |||
| @@ -220,6 +220,7 @@ static void ipt_ulog_packet(struct net *net, | |||
| 220 | ub->qlen++; | 220 | ub->qlen++; |
| 221 | 221 | ||
| 222 | pm = nlmsg_data(nlh); | 222 | pm = nlmsg_data(nlh); |
| 223 | memset(pm, 0, sizeof(*pm)); | ||
| 223 | 224 | ||
| 224 | /* We might not have a timestamp, get one */ | 225 | /* We might not have a timestamp, get one */ |
| 225 | if (skb->tstamp.tv64 == 0) | 226 | if (skb->tstamp.tv64 == 0) |
| @@ -238,8 +239,6 @@ static void ipt_ulog_packet(struct net *net, | |||
| 238 | } | 239 | } |
| 239 | else if (loginfo->prefix[0] != '\0') | 240 | else if (loginfo->prefix[0] != '\0') |
| 240 | strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix)); | 241 | strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix)); |
| 241 | else | ||
| 242 | *(pm->prefix) = '\0'; | ||
| 243 | 242 | ||
| 244 | if (in && in->hard_header_len > 0 && | 243 | if (in && in->hard_header_len > 0 && |
| 245 | skb->mac_header != skb->network_header && | 244 | skb->mac_header != skb->network_header && |
| @@ -251,13 +250,9 @@ static void ipt_ulog_packet(struct net *net, | |||
| 251 | 250 | ||
| 252 | if (in) | 251 | if (in) |
| 253 | strncpy(pm->indev_name, in->name, sizeof(pm->indev_name)); | 252 | strncpy(pm->indev_name, in->name, sizeof(pm->indev_name)); |
| 254 | else | ||
| 255 | pm->indev_name[0] = '\0'; | ||
| 256 | 253 | ||
| 257 | if (out) | 254 | if (out) |
| 258 | strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name)); | 255 | strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name)); |
| 259 | else | ||
| 260 | pm->outdev_name[0] = '\0'; | ||
| 261 | 256 | ||
| 262 | /* copy_len <= skb->len, so can't fail. */ | 257 | /* copy_len <= skb->len, so can't fail. */ |
| 263 | if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0) | 258 | if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 727f4365bcdf..6011615e810d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
| 2072 | RT_SCOPE_LINK); | 2072 | RT_SCOPE_LINK); |
| 2073 | goto make_route; | 2073 | goto make_route; |
| 2074 | } | 2074 | } |
| 2075 | if (fl4->saddr) { | 2075 | if (!fl4->saddr) { |
| 2076 | if (ipv4_is_multicast(fl4->daddr)) | 2076 | if (ipv4_is_multicast(fl4->daddr)) |
| 2077 | fl4->saddr = inet_select_addr(dev_out, 0, | 2077 | fl4->saddr = inet_select_addr(dev_out, 0, |
| 2078 | fl4->flowi4_scope); | 2078 | fl4->flowi4_scope); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 25a89eaa669d..068c8fb0d158 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -1284,7 +1284,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | |||
| 1284 | tp->lost_cnt_hint -= tcp_skb_pcount(prev); | 1284 | tp->lost_cnt_hint -= tcp_skb_pcount(prev); |
| 1285 | } | 1285 | } |
| 1286 | 1286 | ||
| 1287 | TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; | 1287 | TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; |
| 1288 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) | ||
| 1289 | TCP_SKB_CB(prev)->end_seq++; | ||
| 1290 | |||
| 1288 | if (skb == tcp_highest_sack(sk)) | 1291 | if (skb == tcp_highest_sack(sk)) |
| 1289 | tcp_advance_highest_sack(sk, skb); | 1292 | tcp_advance_highest_sack(sk, skb); |
| 1290 | 1293 | ||
| @@ -2853,7 +2856,8 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, | |||
| 2853 | * left edge of the send window. | 2856 | * left edge of the send window. |
| 2854 | * See draft-ietf-tcplw-high-performance-00, section 3.3. | 2857 | * See draft-ietf-tcplw-high-performance-00, section 3.3. |
| 2855 | */ | 2858 | */ |
| 2856 | if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) | 2859 | if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && |
| 2860 | flag & FLAG_ACKED) | ||
| 2857 | seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; | 2861 | seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; |
| 2858 | 2862 | ||
| 2859 | if (seq_rtt < 0) | 2863 | if (seq_rtt < 0) |
| @@ -2868,14 +2872,19 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, | |||
| 2868 | } | 2872 | } |
| 2869 | 2873 | ||
| 2870 | /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ | 2874 | /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ |
| 2871 | static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) | 2875 | static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp) |
| 2872 | { | 2876 | { |
| 2873 | struct tcp_sock *tp = tcp_sk(sk); | 2877 | struct tcp_sock *tp = tcp_sk(sk); |
| 2874 | s32 seq_rtt = -1; | 2878 | s32 seq_rtt = -1; |
| 2875 | 2879 | ||
| 2876 | if (tp->lsndtime && !tp->total_retrans) | 2880 | if (synack_stamp && !tp->total_retrans) |
| 2877 | seq_rtt = tcp_time_stamp - tp->lsndtime; | 2881 | seq_rtt = tcp_time_stamp - synack_stamp; |
| 2878 | tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1); | 2882 | |
| 2883 | /* If the ACK acks both the SYNACK and the (Fast Open'd) data packets | ||
| 2884 | * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack() | ||
| 2885 | */ | ||
| 2886 | if (!tp->srtt) | ||
| 2887 | tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1); | ||
| 2879 | } | 2888 | } |
| 2880 | 2889 | ||
| 2881 | static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | 2890 | static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) |
| @@ -2978,6 +2987,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
| 2978 | s32 seq_rtt = -1; | 2987 | s32 seq_rtt = -1; |
| 2979 | s32 ca_seq_rtt = -1; | 2988 | s32 ca_seq_rtt = -1; |
| 2980 | ktime_t last_ackt = net_invalid_timestamp(); | 2989 | ktime_t last_ackt = net_invalid_timestamp(); |
| 2990 | bool rtt_update; | ||
| 2981 | 2991 | ||
| 2982 | while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { | 2992 | while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { |
| 2983 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); | 2993 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); |
| @@ -3054,14 +3064,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
| 3054 | if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) | 3064 | if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) |
| 3055 | flag |= FLAG_SACK_RENEGING; | 3065 | flag |= FLAG_SACK_RENEGING; |
| 3056 | 3066 | ||
| 3057 | if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) || | 3067 | rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt); |
| 3058 | (flag & FLAG_ACKED)) | ||
| 3059 | tcp_rearm_rto(sk); | ||
| 3060 | 3068 | ||
| 3061 | if (flag & FLAG_ACKED) { | 3069 | if (flag & FLAG_ACKED) { |
| 3062 | const struct tcp_congestion_ops *ca_ops | 3070 | const struct tcp_congestion_ops *ca_ops |
| 3063 | = inet_csk(sk)->icsk_ca_ops; | 3071 | = inet_csk(sk)->icsk_ca_ops; |
| 3064 | 3072 | ||
| 3073 | tcp_rearm_rto(sk); | ||
| 3065 | if (unlikely(icsk->icsk_mtup.probe_size && | 3074 | if (unlikely(icsk->icsk_mtup.probe_size && |
| 3066 | !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { | 3075 | !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { |
| 3067 | tcp_mtup_probe_success(sk); | 3076 | tcp_mtup_probe_success(sk); |
| @@ -3100,6 +3109,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
| 3100 | 3109 | ||
| 3101 | ca_ops->pkts_acked(sk, pkts_acked, rtt_us); | 3110 | ca_ops->pkts_acked(sk, pkts_acked, rtt_us); |
| 3102 | } | 3111 | } |
| 3112 | } else if (skb && rtt_update && sack_rtt >= 0 && | ||
| 3113 | sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) { | ||
| 3114 | /* Do not re-arm RTO if the sack RTT is measured from data sent | ||
| 3115 | * after when the head was last (re)transmitted. Otherwise the | ||
| 3116 | * timeout may continue to extend in loss recovery. | ||
| 3117 | */ | ||
| 3118 | tcp_rearm_rto(sk); | ||
| 3103 | } | 3119 | } |
| 3104 | 3120 | ||
| 3105 | #if FASTRETRANS_DEBUG > 0 | 3121 | #if FASTRETRANS_DEBUG > 0 |
| @@ -3288,7 +3304,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) | |||
| 3288 | tcp_init_cwnd_reduction(sk, true); | 3304 | tcp_init_cwnd_reduction(sk, true); |
| 3289 | tcp_set_ca_state(sk, TCP_CA_CWR); | 3305 | tcp_set_ca_state(sk, TCP_CA_CWR); |
| 3290 | tcp_end_cwnd_reduction(sk); | 3306 | tcp_end_cwnd_reduction(sk); |
| 3291 | tcp_set_ca_state(sk, TCP_CA_Open); | 3307 | tcp_try_keep_open(sk); |
| 3292 | NET_INC_STATS_BH(sock_net(sk), | 3308 | NET_INC_STATS_BH(sock_net(sk), |
| 3293 | LINUX_MIB_TCPLOSSPROBERECOVERY); | 3309 | LINUX_MIB_TCPLOSSPROBERECOVERY); |
| 3294 | } | 3310 | } |
| @@ -5584,6 +5600,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 5584 | struct request_sock *req; | 5600 | struct request_sock *req; |
| 5585 | int queued = 0; | 5601 | int queued = 0; |
| 5586 | bool acceptable; | 5602 | bool acceptable; |
| 5603 | u32 synack_stamp; | ||
| 5587 | 5604 | ||
| 5588 | tp->rx_opt.saw_tstamp = 0; | 5605 | tp->rx_opt.saw_tstamp = 0; |
| 5589 | 5606 | ||
| @@ -5666,9 +5683,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 5666 | * so release it. | 5683 | * so release it. |
| 5667 | */ | 5684 | */ |
| 5668 | if (req) { | 5685 | if (req) { |
| 5686 | synack_stamp = tcp_rsk(req)->snt_synack; | ||
| 5669 | tp->total_retrans = req->num_retrans; | 5687 | tp->total_retrans = req->num_retrans; |
| 5670 | reqsk_fastopen_remove(sk, req, false); | 5688 | reqsk_fastopen_remove(sk, req, false); |
| 5671 | } else { | 5689 | } else { |
| 5690 | synack_stamp = tp->lsndtime; | ||
| 5672 | /* Make sure socket is routed, for correct metrics. */ | 5691 | /* Make sure socket is routed, for correct metrics. */ |
| 5673 | icsk->icsk_af_ops->rebuild_header(sk); | 5692 | icsk->icsk_af_ops->rebuild_header(sk); |
| 5674 | tcp_init_congestion_control(sk); | 5693 | tcp_init_congestion_control(sk); |
| @@ -5691,7 +5710,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 5691 | tp->snd_una = TCP_SKB_CB(skb)->ack_seq; | 5710 | tp->snd_una = TCP_SKB_CB(skb)->ack_seq; |
| 5692 | tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; | 5711 | tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; |
| 5693 | tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); | 5712 | tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); |
| 5694 | tcp_synack_rtt_meas(sk, req); | 5713 | tcp_synack_rtt_meas(sk, synack_stamp); |
| 5695 | 5714 | ||
| 5696 | if (tp->rx_opt.tstamp_ok) | 5715 | if (tp->rx_opt.tstamp_ok) |
| 5697 | tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; | 5716 | tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; |
| @@ -5709,6 +5728,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 5709 | } else | 5728 | } else |
| 5710 | tcp_init_metrics(sk); | 5729 | tcp_init_metrics(sk); |
| 5711 | 5730 | ||
| 5731 | tcp_update_pacing_rate(sk); | ||
| 5732 | |||
| 5712 | /* Prevent spurious tcp_cwnd_restart() on first data packet */ | 5733 | /* Prevent spurious tcp_cwnd_restart() on first data packet */ |
| 5713 | tp->lsndtime = tcp_time_stamp; | 5734 | tp->lsndtime = tcp_time_stamp; |
| 5714 | 5735 | ||
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 3a7525e6c086..533c58a5cfb7 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c | |||
| @@ -18,6 +18,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, | |||
| 18 | netdev_features_t features) | 18 | netdev_features_t features) |
| 19 | { | 19 | { |
| 20 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 20 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
| 21 | unsigned int sum_truesize = 0; | ||
| 21 | struct tcphdr *th; | 22 | struct tcphdr *th; |
| 22 | unsigned int thlen; | 23 | unsigned int thlen; |
| 23 | unsigned int seq; | 24 | unsigned int seq; |
| @@ -102,13 +103,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, | |||
| 102 | if (copy_destructor) { | 103 | if (copy_destructor) { |
| 103 | skb->destructor = gso_skb->destructor; | 104 | skb->destructor = gso_skb->destructor; |
| 104 | skb->sk = gso_skb->sk; | 105 | skb->sk = gso_skb->sk; |
| 105 | /* {tcp|sock}_wfree() use exact truesize accounting : | 106 | sum_truesize += skb->truesize; |
| 106 | * sum(skb->truesize) MUST be exactly be gso_skb->truesize | ||
| 107 | * So we account mss bytes of 'true size' for each segment. | ||
| 108 | * The last segment will contain the remaining. | ||
| 109 | */ | ||
| 110 | skb->truesize = mss; | ||
| 111 | gso_skb->truesize -= mss; | ||
| 112 | } | 107 | } |
| 113 | skb = skb->next; | 108 | skb = skb->next; |
| 114 | th = tcp_hdr(skb); | 109 | th = tcp_hdr(skb); |
| @@ -125,7 +120,9 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, | |||
| 125 | if (copy_destructor) { | 120 | if (copy_destructor) { |
| 126 | swap(gso_skb->sk, skb->sk); | 121 | swap(gso_skb->sk, skb->sk); |
| 127 | swap(gso_skb->destructor, skb->destructor); | 122 | swap(gso_skb->destructor, skb->destructor); |
| 128 | swap(gso_skb->truesize, skb->truesize); | 123 | sum_truesize += skb->truesize; |
| 124 | atomic_add(sum_truesize - gso_skb->truesize, | ||
| 125 | &skb->sk->sk_wmem_alloc); | ||
| 129 | } | 126 | } |
| 130 | 127 | ||
| 131 | delta = htonl(oldlen + (skb_tail_pointer(skb) - | 128 | delta = htonl(oldlen + (skb_tail_pointer(skb) - |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e6bb8256e59f..d46f2143305c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -637,6 +637,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb | |||
| 637 | unsigned int size = 0; | 637 | unsigned int size = 0; |
| 638 | unsigned int eff_sacks; | 638 | unsigned int eff_sacks; |
| 639 | 639 | ||
| 640 | opts->options = 0; | ||
| 641 | |||
| 640 | #ifdef CONFIG_TCP_MD5SIG | 642 | #ifdef CONFIG_TCP_MD5SIG |
| 641 | *md5 = tp->af_specific->md5_lookup(sk, sk); | 643 | *md5 = tp->af_specific->md5_lookup(sk, sk); |
| 642 | if (unlikely(*md5)) { | 644 | if (unlikely(*md5)) { |
| @@ -984,8 +986,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) | |||
| 984 | static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, | 986 | static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, |
| 985 | unsigned int mss_now) | 987 | unsigned int mss_now) |
| 986 | { | 988 | { |
| 987 | if (skb->len <= mss_now || !sk_can_gso(sk) || | 989 | /* Make sure we own this skb before messing gso_size/gso_segs */ |
| 988 | skb->ip_summed == CHECKSUM_NONE) { | 990 | WARN_ON_ONCE(skb_cloned(skb)); |
| 991 | |||
| 992 | if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { | ||
| 989 | /* Avoid the costly divide in the normal | 993 | /* Avoid the costly divide in the normal |
| 990 | * non-TSO case. | 994 | * non-TSO case. |
| 991 | */ | 995 | */ |
| @@ -1065,9 +1069,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, | |||
| 1065 | if (nsize < 0) | 1069 | if (nsize < 0) |
| 1066 | nsize = 0; | 1070 | nsize = 0; |
| 1067 | 1071 | ||
| 1068 | if (skb_cloned(skb) && | 1072 | if (skb_unclone(skb, GFP_ATOMIC)) |
| 1069 | skb_is_nonlinear(skb) && | ||
| 1070 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | ||
| 1071 | return -ENOMEM; | 1073 | return -ENOMEM; |
| 1072 | 1074 | ||
| 1073 | /* Get a new skb... force flag on. */ | 1075 | /* Get a new skb... force flag on. */ |
| @@ -2342,6 +2344,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
| 2342 | int oldpcount = tcp_skb_pcount(skb); | 2344 | int oldpcount = tcp_skb_pcount(skb); |
| 2343 | 2345 | ||
| 2344 | if (unlikely(oldpcount > 1)) { | 2346 | if (unlikely(oldpcount > 1)) { |
| 2347 | if (skb_unclone(skb, GFP_ATOMIC)) | ||
| 2348 | return -ENOMEM; | ||
| 2345 | tcp_init_tso_segs(sk, skb, cur_mss); | 2349 | tcp_init_tso_segs(sk, skb, cur_mss); |
| 2346 | tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); | 2350 | tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); |
| 2347 | } | 2351 | } |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 9a459be24af7..e1a63930a967 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
| @@ -104,9 +104,14 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
| 104 | const struct iphdr *iph = ip_hdr(skb); | 104 | const struct iphdr *iph = ip_hdr(skb); |
| 105 | u8 *xprth = skb_network_header(skb) + iph->ihl * 4; | 105 | u8 *xprth = skb_network_header(skb) + iph->ihl * 4; |
| 106 | struct flowi4 *fl4 = &fl->u.ip4; | 106 | struct flowi4 *fl4 = &fl->u.ip4; |
| 107 | int oif = 0; | ||
| 108 | |||
| 109 | if (skb_dst(skb)) | ||
| 110 | oif = skb_dst(skb)->dev->ifindex; | ||
| 107 | 111 | ||
| 108 | memset(fl4, 0, sizeof(struct flowi4)); | 112 | memset(fl4, 0, sizeof(struct flowi4)); |
| 109 | fl4->flowi4_mark = skb->mark; | 113 | fl4->flowi4_mark = skb->mark; |
| 114 | fl4->flowi4_oif = reverse ? skb->skb_iif : oif; | ||
| 110 | 115 | ||
| 111 | if (!ip_is_fragment(iph)) { | 116 | if (!ip_is_fragment(iph)) { |
| 112 | switch (iph->protocol) { | 117 | switch (iph->protocol) { |
| @@ -235,7 +240,7 @@ static struct dst_ops xfrm4_dst_ops = { | |||
| 235 | .destroy = xfrm4_dst_destroy, | 240 | .destroy = xfrm4_dst_destroy, |
| 236 | .ifdown = xfrm4_dst_ifdown, | 241 | .ifdown = xfrm4_dst_ifdown, |
| 237 | .local_out = __ip_local_out, | 242 | .local_out = __ip_local_out, |
| 238 | .gc_thresh = 1024, | 243 | .gc_thresh = 32768, |
| 239 | }; | 244 | }; |
| 240 | 245 | ||
| 241 | static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { | 246 | static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 73784c3d4642..82e1da3a40b9 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
| @@ -618,8 +618,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 618 | struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); | 618 | struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); |
| 619 | struct xfrm_state *x; | 619 | struct xfrm_state *x; |
| 620 | 620 | ||
| 621 | if (type != ICMPV6_DEST_UNREACH && | 621 | if (type != ICMPV6_PKT_TOOBIG && |
| 622 | type != ICMPV6_PKT_TOOBIG && | ||
| 623 | type != NDISC_REDIRECT) | 622 | type != NDISC_REDIRECT) |
| 624 | return; | 623 | return; |
| 625 | 624 | ||
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index d3618a78fcac..e67e63f9858d 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
| @@ -436,8 +436,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 436 | struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); | 436 | struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); |
| 437 | struct xfrm_state *x; | 437 | struct xfrm_state *x; |
| 438 | 438 | ||
| 439 | if (type != ICMPV6_DEST_UNREACH && | 439 | if (type != ICMPV6_PKT_TOOBIG && |
| 440 | type != ICMPV6_PKT_TOOBIG && | ||
| 441 | type != NDISC_REDIRECT) | 440 | type != NDISC_REDIRECT) |
| 442 | return; | 441 | return; |
| 443 | 442 | ||
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 32b4a1675d82..066640e0ba8e 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
| @@ -116,7 +116,7 @@ begintw: | |||
| 116 | } | 116 | } |
| 117 | if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr, | 117 | if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr, |
| 118 | ports, dif))) { | 118 | ports, dif))) { |
| 119 | sock_put(sk); | 119 | inet_twsk_put(inet_twsk(sk)); |
| 120 | goto begintw; | 120 | goto begintw; |
| 121 | } | 121 | } |
| 122 | goto out; | 122 | goto out; |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 7bb5446b9d73..bf4a9a084de5 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -976,6 +976,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | |||
| 976 | if (t->parms.o_flags&GRE_SEQ) | 976 | if (t->parms.o_flags&GRE_SEQ) |
| 977 | addend += 4; | 977 | addend += 4; |
| 978 | } | 978 | } |
| 979 | t->hlen = addend; | ||
| 979 | 980 | ||
| 980 | if (p->flags & IP6_TNL_F_CAP_XMIT) { | 981 | if (p->flags & IP6_TNL_F_CAP_XMIT) { |
| 981 | int strict = (ipv6_addr_type(&p->raddr) & | 982 | int strict = (ipv6_addr_type(&p->raddr) & |
| @@ -1002,8 +1003,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | |||
| 1002 | } | 1003 | } |
| 1003 | ip6_rt_put(rt); | 1004 | ip6_rt_put(rt); |
| 1004 | } | 1005 | } |
| 1005 | |||
| 1006 | t->hlen = addend; | ||
| 1007 | } | 1006 | } |
| 1008 | 1007 | ||
| 1009 | static int ip6gre_tnl_change(struct ip6_tnl *t, | 1008 | static int ip6gre_tnl_change(struct ip6_tnl *t, |
| @@ -1173,9 +1172,8 @@ done: | |||
| 1173 | 1172 | ||
| 1174 | static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu) | 1173 | static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu) |
| 1175 | { | 1174 | { |
| 1176 | struct ip6_tnl *tunnel = netdev_priv(dev); | ||
| 1177 | if (new_mtu < 68 || | 1175 | if (new_mtu < 68 || |
| 1178 | new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen) | 1176 | new_mtu > 0xFFF8 - dev->hard_header_len) |
| 1179 | return -EINVAL; | 1177 | return -EINVAL; |
| 1180 | dev->mtu = new_mtu; | 1178 | dev->mtu = new_mtu; |
| 1181 | return 0; | 1179 | return 0; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a54c45ce4a48..91fb4e8212f5 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -105,7 +105,7 @@ static int ip6_finish_output2(struct sk_buff *skb) | |||
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | rcu_read_lock_bh(); | 107 | rcu_read_lock_bh(); |
| 108 | nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); | 108 | nexthop = rt6_nexthop((struct rt6_info *)dst); |
| 109 | neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); | 109 | neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); |
| 110 | if (unlikely(!neigh)) | 110 | if (unlikely(!neigh)) |
| 111 | neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); | 111 | neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); |
| @@ -874,7 +874,7 @@ static int ip6_dst_lookup_tail(struct sock *sk, | |||
| 874 | */ | 874 | */ |
| 875 | rt = (struct rt6_info *) *dst; | 875 | rt = (struct rt6_info *) *dst; |
| 876 | rcu_read_lock_bh(); | 876 | rcu_read_lock_bh(); |
| 877 | n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr)); | 877 | n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt)); |
| 878 | err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; | 878 | err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; |
| 879 | rcu_read_unlock_bh(); | 879 | rcu_read_unlock_bh(); |
| 880 | 880 | ||
| @@ -1008,6 +1008,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
| 1008 | 1008 | ||
| 1009 | { | 1009 | { |
| 1010 | struct sk_buff *skb; | 1010 | struct sk_buff *skb; |
| 1011 | struct frag_hdr fhdr; | ||
| 1011 | int err; | 1012 | int err; |
| 1012 | 1013 | ||
| 1013 | /* There is support for UDP large send offload by network | 1014 | /* There is support for UDP large send offload by network |
| @@ -1015,8 +1016,6 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
| 1015 | * udp datagram | 1016 | * udp datagram |
| 1016 | */ | 1017 | */ |
| 1017 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { | 1018 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { |
| 1018 | struct frag_hdr fhdr; | ||
| 1019 | |||
| 1020 | skb = sock_alloc_send_skb(sk, | 1019 | skb = sock_alloc_send_skb(sk, |
| 1021 | hh_len + fragheaderlen + transhdrlen + 20, | 1020 | hh_len + fragheaderlen + transhdrlen + 20, |
| 1022 | (flags & MSG_DONTWAIT), &err); | 1021 | (flags & MSG_DONTWAIT), &err); |
| @@ -1036,20 +1035,24 @@ static inline int ip6_ufo_append_data(struct sock *sk, | |||
| 1036 | skb->transport_header = skb->network_header + fragheaderlen; | 1035 | skb->transport_header = skb->network_header + fragheaderlen; |
| 1037 | 1036 | ||
| 1038 | skb->protocol = htons(ETH_P_IPV6); | 1037 | skb->protocol = htons(ETH_P_IPV6); |
| 1039 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
| 1040 | skb->csum = 0; | 1038 | skb->csum = 0; |
| 1041 | 1039 | ||
| 1042 | /* Specify the length of each IPv6 datagram fragment. | ||
| 1043 | * It has to be a multiple of 8. | ||
| 1044 | */ | ||
| 1045 | skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - | ||
| 1046 | sizeof(struct frag_hdr)) & ~7; | ||
| 1047 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | ||
| 1048 | ipv6_select_ident(&fhdr, rt); | ||
| 1049 | skb_shinfo(skb)->ip6_frag_id = fhdr.identification; | ||
| 1050 | __skb_queue_tail(&sk->sk_write_queue, skb); | 1040 | __skb_queue_tail(&sk->sk_write_queue, skb); |
| 1041 | } else if (skb_is_gso(skb)) { | ||
| 1042 | goto append; | ||
| 1051 | } | 1043 | } |
| 1052 | 1044 | ||
| 1045 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
| 1046 | /* Specify the length of each IPv6 datagram fragment. | ||
| 1047 | * It has to be a multiple of 8. | ||
| 1048 | */ | ||
| 1049 | skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - | ||
| 1050 | sizeof(struct frag_hdr)) & ~7; | ||
| 1051 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | ||
| 1052 | ipv6_select_ident(&fhdr, rt); | ||
| 1053 | skb_shinfo(skb)->ip6_frag_id = fhdr.identification; | ||
| 1054 | |||
| 1055 | append: | ||
| 1053 | return skb_append_datato_frags(sk, skb, getfrag, from, | 1056 | return skb_append_datato_frags(sk, skb, getfrag, from, |
| 1054 | (length - transhdrlen)); | 1057 | (length - transhdrlen)); |
| 1055 | } | 1058 | } |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index a791552e0422..583b77e2f69b 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -1430,9 +1430,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 1430 | static int | 1430 | static int |
| 1431 | ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) | 1431 | ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) |
| 1432 | { | 1432 | { |
| 1433 | if (new_mtu < IPV6_MIN_MTU) { | 1433 | struct ip6_tnl *tnl = netdev_priv(dev); |
| 1434 | return -EINVAL; | 1434 | |
| 1435 | if (tnl->parms.proto == IPPROTO_IPIP) { | ||
| 1436 | if (new_mtu < 68) | ||
| 1437 | return -EINVAL; | ||
| 1438 | } else { | ||
| 1439 | if (new_mtu < IPV6_MIN_MTU) | ||
| 1440 | return -EINVAL; | ||
| 1435 | } | 1441 | } |
| 1442 | if (new_mtu > 0xFFF8 - dev->hard_header_len) | ||
| 1443 | return -EINVAL; | ||
| 1436 | dev->mtu = new_mtu; | 1444 | dev->mtu = new_mtu; |
| 1437 | return 0; | 1445 | return 0; |
| 1438 | } | 1446 | } |
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 5636a912074a..ce507d9e1c90 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c | |||
| @@ -64,8 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 64 | (struct ip_comp_hdr *)(skb->data + offset); | 64 | (struct ip_comp_hdr *)(skb->data + offset); |
| 65 | struct xfrm_state *x; | 65 | struct xfrm_state *x; |
| 66 | 66 | ||
| 67 | if (type != ICMPV6_DEST_UNREACH && | 67 | if (type != ICMPV6_PKT_TOOBIG && |
| 68 | type != ICMPV6_PKT_TOOBIG && | ||
| 69 | type != NDISC_REDIRECT) | 68 | type != NDISC_REDIRECT) |
| 70 | return; | 69 | return; |
| 71 | 70 | ||
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 44400c216dc6..710238f58aa9 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -349,6 +349,11 @@ ip6t_do_table(struct sk_buff *skb, | |||
| 349 | local_bh_disable(); | 349 | local_bh_disable(); |
| 350 | addend = xt_write_recseq_begin(); | 350 | addend = xt_write_recseq_begin(); |
| 351 | private = table->private; | 351 | private = table->private; |
| 352 | /* | ||
| 353 | * Ensure we load private-> members after we've fetched the base | ||
| 354 | * pointer. | ||
| 355 | */ | ||
| 356 | smp_read_barrier_depends(); | ||
| 352 | cpu = smp_processor_id(); | 357 | cpu = smp_processor_id(); |
| 353 | table_base = private->entries[cpu]; | 358 | table_base = private->entries[cpu]; |
| 354 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; | 359 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c979dd96d82a..04e17b3309fb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -476,6 +476,24 @@ out: | |||
| 476 | } | 476 | } |
| 477 | 477 | ||
| 478 | #ifdef CONFIG_IPV6_ROUTER_PREF | 478 | #ifdef CONFIG_IPV6_ROUTER_PREF |
| 479 | struct __rt6_probe_work { | ||
| 480 | struct work_struct work; | ||
| 481 | struct in6_addr target; | ||
| 482 | struct net_device *dev; | ||
| 483 | }; | ||
| 484 | |||
| 485 | static void rt6_probe_deferred(struct work_struct *w) | ||
| 486 | { | ||
| 487 | struct in6_addr mcaddr; | ||
| 488 | struct __rt6_probe_work *work = | ||
| 489 | container_of(w, struct __rt6_probe_work, work); | ||
| 490 | |||
| 491 | addrconf_addr_solict_mult(&work->target, &mcaddr); | ||
| 492 | ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL); | ||
| 493 | dev_put(work->dev); | ||
| 494 | kfree(w); | ||
| 495 | } | ||
| 496 | |||
| 479 | static void rt6_probe(struct rt6_info *rt) | 497 | static void rt6_probe(struct rt6_info *rt) |
| 480 | { | 498 | { |
| 481 | struct neighbour *neigh; | 499 | struct neighbour *neigh; |
| @@ -499,17 +517,23 @@ static void rt6_probe(struct rt6_info *rt) | |||
| 499 | 517 | ||
| 500 | if (!neigh || | 518 | if (!neigh || |
| 501 | time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { | 519 | time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { |
| 502 | struct in6_addr mcaddr; | 520 | struct __rt6_probe_work *work; |
| 503 | struct in6_addr *target; | 521 | |
| 522 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | ||
| 504 | 523 | ||
| 505 | if (neigh) { | 524 | if (neigh && work) |
| 506 | neigh->updated = jiffies; | 525 | neigh->updated = jiffies; |
| 526 | |||
| 527 | if (neigh) | ||
| 507 | write_unlock(&neigh->lock); | 528 | write_unlock(&neigh->lock); |
| 508 | } | ||
| 509 | 529 | ||
| 510 | target = (struct in6_addr *)&rt->rt6i_gateway; | 530 | if (work) { |
| 511 | addrconf_addr_solict_mult(target, &mcaddr); | 531 | INIT_WORK(&work->work, rt6_probe_deferred); |
| 512 | ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL); | 532 | work->target = rt->rt6i_gateway; |
| 533 | dev_hold(rt->dst.dev); | ||
| 534 | work->dev = rt->dst.dev; | ||
| 535 | schedule_work(&work->work); | ||
| 536 | } | ||
| 513 | } else { | 537 | } else { |
| 514 | out: | 538 | out: |
| 515 | write_unlock(&neigh->lock); | 539 | write_unlock(&neigh->lock); |
| @@ -851,7 +875,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, | |||
| 851 | if (ort->rt6i_dst.plen != 128 && | 875 | if (ort->rt6i_dst.plen != 128 && |
| 852 | ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) | 876 | ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) |
| 853 | rt->rt6i_flags |= RTF_ANYCAST; | 877 | rt->rt6i_flags |= RTF_ANYCAST; |
| 854 | rt->rt6i_gateway = *daddr; | ||
| 855 | } | 878 | } |
| 856 | 879 | ||
| 857 | rt->rt6i_flags |= RTF_CACHE; | 880 | rt->rt6i_flags |= RTF_CACHE; |
| @@ -1064,10 +1087,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) | |||
| 1064 | if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev))) | 1087 | if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev))) |
| 1065 | return NULL; | 1088 | return NULL; |
| 1066 | 1089 | ||
| 1067 | if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) | 1090 | if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) |
| 1068 | return dst; | 1091 | return NULL; |
| 1069 | 1092 | ||
| 1070 | return NULL; | 1093 | if (rt6_check_expired(rt)) |
| 1094 | return NULL; | ||
| 1095 | |||
| 1096 | return dst; | ||
| 1071 | } | 1097 | } |
| 1072 | 1098 | ||
| 1073 | static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) | 1099 | static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) |
| @@ -1338,6 +1364,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
| 1338 | rt->dst.flags |= DST_HOST; | 1364 | rt->dst.flags |= DST_HOST; |
| 1339 | rt->dst.output = ip6_output; | 1365 | rt->dst.output = ip6_output; |
| 1340 | atomic_set(&rt->dst.__refcnt, 1); | 1366 | atomic_set(&rt->dst.__refcnt, 1); |
| 1367 | rt->rt6i_gateway = fl6->daddr; | ||
| 1341 | rt->rt6i_dst.addr = fl6->daddr; | 1368 | rt->rt6i_dst.addr = fl6->daddr; |
| 1342 | rt->rt6i_dst.plen = 128; | 1369 | rt->rt6i_dst.plen = 128; |
| 1343 | rt->rt6i_idev = idev; | 1370 | rt->rt6i_idev = idev; |
| @@ -1873,7 +1900,10 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, | |||
| 1873 | in6_dev_hold(rt->rt6i_idev); | 1900 | in6_dev_hold(rt->rt6i_idev); |
| 1874 | rt->dst.lastuse = jiffies; | 1901 | rt->dst.lastuse = jiffies; |
| 1875 | 1902 | ||
| 1876 | rt->rt6i_gateway = ort->rt6i_gateway; | 1903 | if (ort->rt6i_flags & RTF_GATEWAY) |
| 1904 | rt->rt6i_gateway = ort->rt6i_gateway; | ||
| 1905 | else | ||
| 1906 | rt->rt6i_gateway = *dest; | ||
| 1877 | rt->rt6i_flags = ort->rt6i_flags; | 1907 | rt->rt6i_flags = ort->rt6i_flags; |
| 1878 | if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == | 1908 | if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == |
| 1879 | (RTF_DEFAULT | RTF_ADDRCONF)) | 1909 | (RTF_DEFAULT | RTF_ADDRCONF)) |
| @@ -2160,6 +2190,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
| 2160 | else | 2190 | else |
| 2161 | rt->rt6i_flags |= RTF_LOCAL; | 2191 | rt->rt6i_flags |= RTF_LOCAL; |
| 2162 | 2192 | ||
| 2193 | rt->rt6i_gateway = *addr; | ||
| 2163 | rt->rt6i_dst.addr = *addr; | 2194 | rt->rt6i_dst.addr = *addr; |
| 2164 | rt->rt6i_dst.plen = 128; | 2195 | rt->rt6i_dst.plen = 128; |
| 2165 | rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); | 2196 | rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 72b7eaaf3ca0..18786098fd41 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -1225,9 +1225,6 @@ do_udp_sendmsg: | |||
| 1225 | if (tclass < 0) | 1225 | if (tclass < 0) |
| 1226 | tclass = np->tclass; | 1226 | tclass = np->tclass; |
| 1227 | 1227 | ||
| 1228 | if (dontfrag < 0) | ||
| 1229 | dontfrag = np->dontfrag; | ||
| 1230 | |||
| 1231 | if (msg->msg_flags&MSG_CONFIRM) | 1228 | if (msg->msg_flags&MSG_CONFIRM) |
| 1232 | goto do_confirm; | 1229 | goto do_confirm; |
| 1233 | back_from_confirm: | 1230 | back_from_confirm: |
| @@ -1246,6 +1243,8 @@ back_from_confirm: | |||
| 1246 | up->pending = AF_INET6; | 1243 | up->pending = AF_INET6; |
| 1247 | 1244 | ||
| 1248 | do_append_data: | 1245 | do_append_data: |
| 1246 | if (dontfrag < 0) | ||
| 1247 | dontfrag = np->dontfrag; | ||
| 1249 | up->len += ulen; | 1248 | up->len += ulen; |
| 1250 | getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; | 1249 | getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; |
| 1251 | err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, | 1250 | err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 23ed03d786c8..5f8e128c512d 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
| @@ -135,9 +135,14 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
| 135 | struct ipv6_opt_hdr *exthdr; | 135 | struct ipv6_opt_hdr *exthdr; |
| 136 | const unsigned char *nh = skb_network_header(skb); | 136 | const unsigned char *nh = skb_network_header(skb); |
| 137 | u8 nexthdr = nh[IP6CB(skb)->nhoff]; | 137 | u8 nexthdr = nh[IP6CB(skb)->nhoff]; |
| 138 | int oif = 0; | ||
| 139 | |||
| 140 | if (skb_dst(skb)) | ||
| 141 | oif = skb_dst(skb)->dev->ifindex; | ||
| 138 | 142 | ||
| 139 | memset(fl6, 0, sizeof(struct flowi6)); | 143 | memset(fl6, 0, sizeof(struct flowi6)); |
| 140 | fl6->flowi6_mark = skb->mark; | 144 | fl6->flowi6_mark = skb->mark; |
| 145 | fl6->flowi6_oif = reverse ? skb->skb_iif : oif; | ||
| 141 | 146 | ||
| 142 | fl6->daddr = reverse ? hdr->saddr : hdr->daddr; | 147 | fl6->daddr = reverse ? hdr->saddr : hdr->daddr; |
| 143 | fl6->saddr = reverse ? hdr->daddr : hdr->saddr; | 148 | fl6->saddr = reverse ? hdr->daddr : hdr->saddr; |
| @@ -284,7 +289,7 @@ static struct dst_ops xfrm6_dst_ops = { | |||
| 284 | .destroy = xfrm6_dst_destroy, | 289 | .destroy = xfrm6_dst_destroy, |
| 285 | .ifdown = xfrm6_dst_ifdown, | 290 | .ifdown = xfrm6_dst_ifdown, |
| 286 | .local_out = __ip6_local_out, | 291 | .local_out = __ip6_local_out, |
| 287 | .gc_thresh = 1024, | 292 | .gc_thresh = 32768, |
| 288 | }; | 293 | }; |
| 289 | 294 | ||
| 290 | static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { | 295 | static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 9d585370c5b4..911ef03bf8fb 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
| @@ -1098,7 +1098,8 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, | |||
| 1098 | 1098 | ||
| 1099 | x->id.proto = proto; | 1099 | x->id.proto = proto; |
| 1100 | x->id.spi = sa->sadb_sa_spi; | 1100 | x->id.spi = sa->sadb_sa_spi; |
| 1101 | x->props.replay_window = sa->sadb_sa_replay; | 1101 | x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay, |
| 1102 | (sizeof(x->replay.bitmap) * 8)); | ||
| 1102 | if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN) | 1103 | if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN) |
| 1103 | x->props.flags |= XFRM_STATE_NOECN; | 1104 | x->props.flags |= XFRM_STATE_NOECN; |
| 1104 | if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP) | 1105 | if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP) |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index feae495a0a30..b076e8309bc2 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -115,6 +115,11 @@ struct l2tp_net { | |||
| 115 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); | 115 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); |
| 116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | 116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); |
| 117 | 117 | ||
| 118 | static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) | ||
| 119 | { | ||
| 120 | return sk->sk_user_data; | ||
| 121 | } | ||
| 122 | |||
| 118 | static inline struct l2tp_net *l2tp_pernet(struct net *net) | 123 | static inline struct l2tp_net *l2tp_pernet(struct net *net) |
| 119 | { | 124 | { |
| 120 | BUG_ON(!net); | 125 | BUG_ON(!net); |
| @@ -504,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk, | |||
| 504 | return 0; | 509 | return 0; |
| 505 | 510 | ||
| 506 | #if IS_ENABLED(CONFIG_IPV6) | 511 | #if IS_ENABLED(CONFIG_IPV6) |
| 507 | if (sk->sk_family == PF_INET6) { | 512 | if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) { |
| 508 | if (!uh->check) { | 513 | if (!uh->check) { |
| 509 | LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n"); | 514 | LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n"); |
| 510 | return 1; | 515 | return 1; |
| @@ -1128,7 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, | |||
| 1128 | /* Queue the packet to IP for output */ | 1133 | /* Queue the packet to IP for output */ |
| 1129 | skb->local_df = 1; | 1134 | skb->local_df = 1; |
| 1130 | #if IS_ENABLED(CONFIG_IPV6) | 1135 | #if IS_ENABLED(CONFIG_IPV6) |
| 1131 | if (skb->sk->sk_family == PF_INET6) | 1136 | if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped) |
| 1132 | error = inet6_csk_xmit(skb, NULL); | 1137 | error = inet6_csk_xmit(skb, NULL); |
| 1133 | else | 1138 | else |
| 1134 | #endif | 1139 | #endif |
| @@ -1255,7 +1260,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len | |||
| 1255 | 1260 | ||
| 1256 | /* Calculate UDP checksum if configured to do so */ | 1261 | /* Calculate UDP checksum if configured to do so */ |
| 1257 | #if IS_ENABLED(CONFIG_IPV6) | 1262 | #if IS_ENABLED(CONFIG_IPV6) |
| 1258 | if (sk->sk_family == PF_INET6) | 1263 | if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) |
| 1259 | l2tp_xmit_ipv6_csum(sk, skb, udp_len); | 1264 | l2tp_xmit_ipv6_csum(sk, skb, udp_len); |
| 1260 | else | 1265 | else |
| 1261 | #endif | 1266 | #endif |
| @@ -1304,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb); | |||
| 1304 | */ | 1309 | */ |
| 1305 | static void l2tp_tunnel_destruct(struct sock *sk) | 1310 | static void l2tp_tunnel_destruct(struct sock *sk) |
| 1306 | { | 1311 | { |
| 1307 | struct l2tp_tunnel *tunnel; | 1312 | struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); |
| 1308 | struct l2tp_net *pn; | 1313 | struct l2tp_net *pn; |
| 1309 | 1314 | ||
| 1310 | tunnel = sk->sk_user_data; | ||
| 1311 | if (tunnel == NULL) | 1315 | if (tunnel == NULL) |
| 1312 | goto end; | 1316 | goto end; |
| 1313 | 1317 | ||
| @@ -1675,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
| 1675 | } | 1679 | } |
| 1676 | 1680 | ||
| 1677 | /* Check if this socket has already been prepped */ | 1681 | /* Check if this socket has already been prepped */ |
| 1678 | tunnel = (struct l2tp_tunnel *)sk->sk_user_data; | 1682 | tunnel = l2tp_tunnel(sk); |
| 1679 | if (tunnel != NULL) { | 1683 | if (tunnel != NULL) { |
| 1680 | /* This socket has already been prepped */ | 1684 | /* This socket has already been prepped */ |
| 1681 | err = -EBUSY; | 1685 | err = -EBUSY; |
| @@ -1704,6 +1708,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
| 1704 | if (cfg != NULL) | 1708 | if (cfg != NULL) |
| 1705 | tunnel->debug = cfg->debug; | 1709 | tunnel->debug = cfg->debug; |
| 1706 | 1710 | ||
| 1711 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 1712 | if (sk->sk_family == PF_INET6) { | ||
| 1713 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
| 1714 | |||
| 1715 | if (ipv6_addr_v4mapped(&np->saddr) && | ||
| 1716 | ipv6_addr_v4mapped(&np->daddr)) { | ||
| 1717 | struct inet_sock *inet = inet_sk(sk); | ||
| 1718 | |||
| 1719 | tunnel->v4mapped = true; | ||
| 1720 | inet->inet_saddr = np->saddr.s6_addr32[3]; | ||
| 1721 | inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3]; | ||
| 1722 | inet->inet_daddr = np->daddr.s6_addr32[3]; | ||
| 1723 | } else { | ||
| 1724 | tunnel->v4mapped = false; | ||
| 1725 | } | ||
| 1726 | } | ||
| 1727 | #endif | ||
| 1728 | |||
| 1707 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | 1729 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ |
| 1708 | tunnel->encap = encap; | 1730 | tunnel->encap = encap; |
| 1709 | if (encap == L2TP_ENCAPTYPE_UDP) { | 1731 | if (encap == L2TP_ENCAPTYPE_UDP) { |
| @@ -1712,7 +1734,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
| 1712 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; | 1734 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; |
| 1713 | udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; | 1735 | udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; |
| 1714 | #if IS_ENABLED(CONFIG_IPV6) | 1736 | #if IS_ENABLED(CONFIG_IPV6) |
| 1715 | if (sk->sk_family == PF_INET6) | 1737 | if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) |
| 1716 | udpv6_encap_enable(); | 1738 | udpv6_encap_enable(); |
| 1717 | else | 1739 | else |
| 1718 | #endif | 1740 | #endif |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 66a559b104b6..6f251cbc2ed7 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
| @@ -194,6 +194,9 @@ struct l2tp_tunnel { | |||
| 194 | struct sock *sock; /* Parent socket */ | 194 | struct sock *sock; /* Parent socket */ |
| 195 | int fd; /* Parent fd, if tunnel socket | 195 | int fd; /* Parent fd, if tunnel socket |
| 196 | * was created by userspace */ | 196 | * was created by userspace */ |
| 197 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 198 | bool v4mapped; | ||
| 199 | #endif | ||
| 197 | 200 | ||
| 198 | struct work_struct del_work; | 201 | struct work_struct del_work; |
| 199 | 202 | ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 5ebee2ded9e9..8c46b271064a 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
| @@ -353,7 +353,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh | |||
| 353 | goto error_put_sess_tun; | 353 | goto error_put_sess_tun; |
| 354 | } | 354 | } |
| 355 | 355 | ||
| 356 | local_bh_disable(); | ||
| 356 | l2tp_xmit_skb(session, skb, session->hdr_len); | 357 | l2tp_xmit_skb(session, skb, session->hdr_len); |
| 358 | local_bh_enable(); | ||
| 357 | 359 | ||
| 358 | sock_put(ps->tunnel_sock); | 360 | sock_put(ps->tunnel_sock); |
| 359 | sock_put(sk); | 361 | sock_put(sk); |
| @@ -422,7 +424,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
| 422 | skb->data[0] = ppph[0]; | 424 | skb->data[0] = ppph[0]; |
| 423 | skb->data[1] = ppph[1]; | 425 | skb->data[1] = ppph[1]; |
| 424 | 426 | ||
| 427 | local_bh_disable(); | ||
| 425 | l2tp_xmit_skb(session, skb, session->hdr_len); | 428 | l2tp_xmit_skb(session, skb, session->hdr_len); |
| 429 | local_bh_enable(); | ||
| 426 | 430 | ||
| 427 | sock_put(sk_tun); | 431 | sock_put(sk_tun); |
| 428 | sock_put(sk); | 432 | sock_put(sk); |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 2e7855a1b10d..629dee7ec9bf 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
| @@ -3518,7 +3518,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, | |||
| 3518 | return -EINVAL; | 3518 | return -EINVAL; |
| 3519 | } | 3519 | } |
| 3520 | band = chanctx_conf->def.chan->band; | 3520 | band = chanctx_conf->def.chan->band; |
| 3521 | sta = sta_info_get(sdata, peer); | 3521 | sta = sta_info_get_bss(sdata, peer); |
| 3522 | if (sta) { | 3522 | if (sta) { |
| 3523 | qos = test_sta_flag(sta, WLAN_STA_WME); | 3523 | qos = test_sta_flag(sta, WLAN_STA_WME); |
| 3524 | } else { | 3524 | } else { |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index b6186517ec56..611abfcfb5eb 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
| @@ -893,6 +893,8 @@ struct tpt_led_trigger { | |||
| 893 | * that the scan completed. | 893 | * that the scan completed. |
| 894 | * @SCAN_ABORTED: Set for our scan work function when the driver reported | 894 | * @SCAN_ABORTED: Set for our scan work function when the driver reported |
| 895 | * a scan complete for an aborted scan. | 895 | * a scan complete for an aborted scan. |
| 896 | * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being | ||
| 897 | * cancelled. | ||
| 896 | */ | 898 | */ |
| 897 | enum { | 899 | enum { |
| 898 | SCAN_SW_SCANNING, | 900 | SCAN_SW_SCANNING, |
| @@ -900,6 +902,7 @@ enum { | |||
| 900 | SCAN_ONCHANNEL_SCANNING, | 902 | SCAN_ONCHANNEL_SCANNING, |
| 901 | SCAN_COMPLETED, | 903 | SCAN_COMPLETED, |
| 902 | SCAN_ABORTED, | 904 | SCAN_ABORTED, |
| 905 | SCAN_HW_CANCELLED, | ||
| 903 | }; | 906 | }; |
| 904 | 907 | ||
| 905 | /** | 908 | /** |
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index acd1f71adc03..0c2a29484c07 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
| @@ -394,6 +394,8 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
| 394 | 394 | ||
| 395 | if (started) | 395 | if (started) |
| 396 | ieee80211_start_next_roc(local); | 396 | ieee80211_start_next_roc(local); |
| 397 | else if (list_empty(&local->roc_list)) | ||
| 398 | ieee80211_run_deferred_scan(local); | ||
| 397 | } | 399 | } |
| 398 | 400 | ||
| 399 | out_unlock: | 401 | out_unlock: |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 54395d7583ba..674eac1f996c 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -3056,6 +3056,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx, | |||
| 3056 | case NL80211_IFTYPE_ADHOC: | 3056 | case NL80211_IFTYPE_ADHOC: |
| 3057 | if (!bssid) | 3057 | if (!bssid) |
| 3058 | return 0; | 3058 | return 0; |
| 3059 | if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || | ||
| 3060 | ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) | ||
| 3061 | return 0; | ||
| 3059 | if (ieee80211_is_beacon(hdr->frame_control)) { | 3062 | if (ieee80211_is_beacon(hdr->frame_control)) { |
| 3060 | return 1; | 3063 | return 1; |
| 3061 | } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { | 3064 | } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { |
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 08afe74b98f4..d2d17a449224 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
| @@ -238,6 +238,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) | |||
| 238 | enum ieee80211_band band; | 238 | enum ieee80211_band band; |
| 239 | int i, ielen, n_chans; | 239 | int i, ielen, n_chans; |
| 240 | 240 | ||
| 241 | if (test_bit(SCAN_HW_CANCELLED, &local->scanning)) | ||
| 242 | return false; | ||
| 243 | |||
| 241 | do { | 244 | do { |
| 242 | if (local->hw_scan_band == IEEE80211_NUM_BANDS) | 245 | if (local->hw_scan_band == IEEE80211_NUM_BANDS) |
| 243 | return false; | 246 | return false; |
| @@ -940,7 +943,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local) | |||
| 940 | if (!local->scan_req) | 943 | if (!local->scan_req) |
| 941 | goto out; | 944 | goto out; |
| 942 | 945 | ||
| 946 | /* | ||
| 947 | * We have a scan running and the driver already reported completion, | ||
| 948 | * but the worker hasn't run yet or is stuck on the mutex - mark it as | ||
| 949 | * cancelled. | ||
| 950 | */ | ||
| 951 | if (test_bit(SCAN_HW_SCANNING, &local->scanning) && | ||
| 952 | test_bit(SCAN_COMPLETED, &local->scanning)) { | ||
| 953 | set_bit(SCAN_HW_CANCELLED, &local->scanning); | ||
| 954 | goto out; | ||
| 955 | } | ||
| 956 | |||
| 943 | if (test_bit(SCAN_HW_SCANNING, &local->scanning)) { | 957 | if (test_bit(SCAN_HW_SCANNING, &local->scanning)) { |
| 958 | /* | ||
| 959 | * Make sure that __ieee80211_scan_completed doesn't trigger a | ||
| 960 | * scan on another band. | ||
| 961 | */ | ||
| 962 | set_bit(SCAN_HW_CANCELLED, &local->scanning); | ||
| 944 | if (local->ops->cancel_hw_scan) | 963 | if (local->ops->cancel_hw_scan) |
| 945 | drv_cancel_hw_scan(local, | 964 | drv_cancel_hw_scan(local, |
| 946 | rcu_dereference_protected(local->scan_sdata, | 965 | rcu_dereference_protected(local->scan_sdata, |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 368837fe3b80..78dc2e99027e 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
| @@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) | |||
| 180 | struct ieee80211_local *local = sta->local; | 180 | struct ieee80211_local *local = sta->local; |
| 181 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 181 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
| 182 | 182 | ||
| 183 | if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) | ||
| 184 | sta->last_rx = jiffies; | ||
| 185 | |||
| 183 | if (ieee80211_is_data_qos(mgmt->frame_control)) { | 186 | if (ieee80211_is_data_qos(mgmt->frame_control)) { |
| 184 | struct ieee80211_hdr *hdr = (void *) skb->data; | 187 | struct ieee80211_hdr *hdr = (void *) skb->data; |
| 185 | u8 *qc = ieee80211_get_qos_ctl(hdr); | 188 | u8 *qc = ieee80211_get_qos_ctl(hdr); |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3456c0486b48..70b5a05c0a4e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -1120,7 +1120,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
| 1120 | tx->sta = rcu_dereference(sdata->u.vlan.sta); | 1120 | tx->sta = rcu_dereference(sdata->u.vlan.sta); |
| 1121 | if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) | 1121 | if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) |
| 1122 | return TX_DROP; | 1122 | return TX_DROP; |
| 1123 | } else if (info->flags & IEEE80211_TX_CTL_INJECTED || | 1123 | } else if (info->flags & (IEEE80211_TX_CTL_INJECTED | |
| 1124 | IEEE80211_TX_INTFL_NL80211_FRAME_TX) || | ||
| 1124 | tx->sdata->control_port_protocol == tx->skb->protocol) { | 1125 | tx->sdata->control_port_protocol == tx->skb->protocol) { |
| 1125 | tx->sta = sta_info_get_bss(sdata, hdr->addr1); | 1126 | tx->sta = sta_info_get_bss(sdata, hdr->addr1); |
| 1126 | } | 1127 | } |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index e1b34a18b243..69e4ef5348a0 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
| @@ -2103,7 +2103,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, | |||
| 2103 | { | 2103 | { |
| 2104 | struct ieee80211_local *local = sdata->local; | 2104 | struct ieee80211_local *local = sdata->local; |
| 2105 | struct ieee80211_supported_band *sband; | 2105 | struct ieee80211_supported_band *sband; |
| 2106 | int rate, skip, shift; | 2106 | int rate, shift; |
| 2107 | u8 i, exrates, *pos; | 2107 | u8 i, exrates, *pos; |
| 2108 | u32 basic_rates = sdata->vif.bss_conf.basic_rates; | 2108 | u32 basic_rates = sdata->vif.bss_conf.basic_rates; |
| 2109 | u32 rate_flags; | 2109 | u32 rate_flags; |
| @@ -2131,14 +2131,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata, | |||
| 2131 | pos = skb_put(skb, exrates + 2); | 2131 | pos = skb_put(skb, exrates + 2); |
| 2132 | *pos++ = WLAN_EID_EXT_SUPP_RATES; | 2132 | *pos++ = WLAN_EID_EXT_SUPP_RATES; |
| 2133 | *pos++ = exrates; | 2133 | *pos++ = exrates; |
| 2134 | skip = 0; | ||
| 2135 | for (i = 8; i < sband->n_bitrates; i++) { | 2134 | for (i = 8; i < sband->n_bitrates; i++) { |
| 2136 | u8 basic = 0; | 2135 | u8 basic = 0; |
| 2137 | if ((rate_flags & sband->bitrates[i].flags) | 2136 | if ((rate_flags & sband->bitrates[i].flags) |
| 2138 | != rate_flags) | 2137 | != rate_flags) |
| 2139 | continue; | 2138 | continue; |
| 2140 | if (skip++ < 8) | ||
| 2141 | continue; | ||
| 2142 | if (need_basic && basic_rates & BIT(i)) | 2139 | if (need_basic && basic_rates & BIT(i)) |
| 2143 | basic = 0x80; | 2140 | basic = 0x80; |
| 2144 | rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, | 2141 | rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, |
| @@ -2241,6 +2238,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, | |||
| 2241 | } | 2238 | } |
| 2242 | 2239 | ||
| 2243 | rate = cfg80211_calculate_bitrate(&ri); | 2240 | rate = cfg80211_calculate_bitrate(&ri); |
| 2241 | if (WARN_ONCE(!rate, | ||
| 2242 | "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n", | ||
| 2243 | status->flag, status->rate_idx, status->vht_nss)) | ||
| 2244 | return 0; | ||
| 2244 | 2245 | ||
| 2245 | /* rewind from end of MPDU */ | 2246 | /* rewind from end of MPDU */ |
| 2246 | if (status->flag & RX_FLAG_MACTIME_END) | 2247 | if (status->flag & RX_FLAG_MACTIME_END) |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index bdebd03bc8cd..70866d192efc 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
| @@ -778,8 +778,8 @@ static int callforward_do_filter(const union nf_inet_addr *src, | |||
| 778 | flowi6_to_flowi(&fl1), false)) { | 778 | flowi6_to_flowi(&fl1), false)) { |
| 779 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, | 779 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, |
| 780 | flowi6_to_flowi(&fl2), false)) { | 780 | flowi6_to_flowi(&fl2), false)) { |
| 781 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, | 781 | if (ipv6_addr_equal(rt6_nexthop(rt1), |
| 782 | sizeof(rt1->rt6i_gateway)) && | 782 | rt6_nexthop(rt2)) && |
| 783 | rt1->dst.dev == rt2->dst.dev) | 783 | rt1->dst.dev == rt2->dst.dev) |
| 784 | ret = 1; | 784 | ret = 1; |
| 785 | dst_release(&rt2->dst); | 785 | dst_release(&rt2->dst); |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 8b03028cca69..227aa11e8409 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
| @@ -845,8 +845,13 @@ xt_replace_table(struct xt_table *table, | |||
| 845 | return NULL; | 845 | return NULL; |
| 846 | } | 846 | } |
| 847 | 847 | ||
| 848 | table->private = newinfo; | ||
| 849 | newinfo->initial_entries = private->initial_entries; | 848 | newinfo->initial_entries = private->initial_entries; |
| 849 | /* | ||
| 850 | * Ensure contents of newinfo are visible before assigning to | ||
| 851 | * private. | ||
| 852 | */ | ||
| 853 | smp_wmb(); | ||
| 854 | table->private = newinfo; | ||
| 850 | 855 | ||
| 851 | /* | 856 | /* |
| 852 | * Even though table entries have now been swapped, other CPU's | 857 | * Even though table entries have now been swapped, other CPU's |
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c index 1e2fae32f81b..ed00fef58996 100644 --- a/net/netfilter/xt_NFQUEUE.c +++ b/net/netfilter/xt_NFQUEUE.c | |||
| @@ -147,6 +147,7 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 147 | { | 147 | { |
| 148 | const struct xt_NFQ_info_v3 *info = par->targinfo; | 148 | const struct xt_NFQ_info_v3 *info = par->targinfo; |
| 149 | u32 queue = info->queuenum; | 149 | u32 queue = info->queuenum; |
| 150 | int ret; | ||
| 150 | 151 | ||
| 151 | if (info->queues_total > 1) { | 152 | if (info->queues_total > 1) { |
| 152 | if (info->flags & NFQ_FLAG_CPU_FANOUT) { | 153 | if (info->flags & NFQ_FLAG_CPU_FANOUT) { |
| @@ -157,7 +158,11 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 157 | queue = nfqueue_hash(skb, par); | 158 | queue = nfqueue_hash(skb, par); |
| 158 | } | 159 | } |
| 159 | 160 | ||
| 160 | return NF_QUEUE_NR(queue); | 161 | ret = NF_QUEUE_NR(queue); |
| 162 | if (info->flags & NFQ_FLAG_BYPASS) | ||
| 163 | ret |= NF_VERDICT_FLAG_QUEUE_BYPASS; | ||
| 164 | |||
| 165 | return ret; | ||
| 161 | } | 166 | } |
| 162 | 167 | ||
| 163 | static struct xt_target nfqueue_tg_reg[] __read_mostly = { | 168 | static struct xt_target nfqueue_tg_reg[] __read_mostly = { |
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c index c3235675f359..5c2dab276109 100644 --- a/net/openvswitch/dp_notify.c +++ b/net/openvswitch/dp_notify.c | |||
| @@ -65,8 +65,7 @@ void ovs_dp_notify_wq(struct work_struct *work) | |||
| 65 | continue; | 65 | continue; |
| 66 | 66 | ||
| 67 | netdev_vport = netdev_vport_priv(vport); | 67 | netdev_vport = netdev_vport_priv(vport); |
| 68 | if (netdev_vport->dev->reg_state == NETREG_UNREGISTERED || | 68 | if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)) |
| 69 | netdev_vport->dev->reg_state == NETREG_UNREGISTERING) | ||
| 70 | dp_detach_port_notify(vport); | 69 | dp_detach_port_notify(vport); |
| 71 | } | 70 | } |
| 72 | } | 71 | } |
| @@ -88,6 +87,10 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event, | |||
| 88 | return NOTIFY_DONE; | 87 | return NOTIFY_DONE; |
| 89 | 88 | ||
| 90 | if (event == NETDEV_UNREGISTER) { | 89 | if (event == NETDEV_UNREGISTER) { |
| 90 | /* upper_dev_unlink and decrement promisc immediately */ | ||
| 91 | ovs_netdev_detach_dev(vport); | ||
| 92 | |||
| 93 | /* schedule vport destroy, dev_put and genl notification */ | ||
| 91 | ovs_net = net_generic(dev_net(dev), ovs_net_id); | 94 | ovs_net = net_generic(dev_net(dev), ovs_net_id); |
| 92 | queue_work(system_wq, &ovs_net->dp_notify_work); | 95 | queue_work(system_wq, &ovs_net->dp_notify_work); |
| 93 | } | 96 | } |
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 09d93c13cfd6..d21f77d875ba 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c | |||
| @@ -150,15 +150,25 @@ static void free_port_rcu(struct rcu_head *rcu) | |||
| 150 | ovs_vport_free(vport_from_priv(netdev_vport)); | 150 | ovs_vport_free(vport_from_priv(netdev_vport)); |
| 151 | } | 151 | } |
| 152 | 152 | ||
| 153 | static void netdev_destroy(struct vport *vport) | 153 | void ovs_netdev_detach_dev(struct vport *vport) |
| 154 | { | 154 | { |
| 155 | struct netdev_vport *netdev_vport = netdev_vport_priv(vport); | 155 | struct netdev_vport *netdev_vport = netdev_vport_priv(vport); |
| 156 | 156 | ||
| 157 | rtnl_lock(); | 157 | ASSERT_RTNL(); |
| 158 | netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; | 158 | netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; |
| 159 | netdev_rx_handler_unregister(netdev_vport->dev); | 159 | netdev_rx_handler_unregister(netdev_vport->dev); |
| 160 | netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp)); | 160 | netdev_upper_dev_unlink(netdev_vport->dev, |
| 161 | netdev_master_upper_dev_get(netdev_vport->dev)); | ||
| 161 | dev_set_promiscuity(netdev_vport->dev, -1); | 162 | dev_set_promiscuity(netdev_vport->dev, -1); |
| 163 | } | ||
| 164 | |||
| 165 | static void netdev_destroy(struct vport *vport) | ||
| 166 | { | ||
| 167 | struct netdev_vport *netdev_vport = netdev_vport_priv(vport); | ||
| 168 | |||
| 169 | rtnl_lock(); | ||
| 170 | if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH) | ||
| 171 | ovs_netdev_detach_dev(vport); | ||
| 162 | rtnl_unlock(); | 172 | rtnl_unlock(); |
| 163 | 173 | ||
| 164 | call_rcu(&netdev_vport->rcu, free_port_rcu); | 174 | call_rcu(&netdev_vport->rcu, free_port_rcu); |
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h index dd298b5c5cdb..8df01c1127e5 100644 --- a/net/openvswitch/vport-netdev.h +++ b/net/openvswitch/vport-netdev.h | |||
| @@ -39,5 +39,6 @@ netdev_vport_priv(const struct vport *vport) | |||
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | const char *ovs_netdev_get_name(const struct vport *); | 41 | const char *ovs_netdev_get_name(const struct vport *); |
| 42 | void ovs_netdev_detach_dev(struct vport *); | ||
| 42 | 43 | ||
| 43 | #endif /* vport_netdev.h */ | 44 | #endif /* vport_netdev.h */ |
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index a2fef8b10b96..fdc041c57853 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c | |||
| @@ -255,6 +255,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) | |||
| 255 | f->socket_hash != sk->sk_hash)) { | 255 | f->socket_hash != sk->sk_hash)) { |
| 256 | f->credit = q->initial_quantum; | 256 | f->credit = q->initial_quantum; |
| 257 | f->socket_hash = sk->sk_hash; | 257 | f->socket_hash = sk->sk_hash; |
| 258 | f->time_next_packet = 0ULL; | ||
| 258 | } | 259 | } |
| 259 | return f; | 260 | return f; |
| 260 | } | 261 | } |
| @@ -472,20 +473,16 @@ begin: | |||
| 472 | if (f->credit > 0 || !q->rate_enable) | 473 | if (f->credit > 0 || !q->rate_enable) |
| 473 | goto out; | 474 | goto out; |
| 474 | 475 | ||
| 475 | if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) { | 476 | rate = q->flow_max_rate; |
| 476 | rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate; | 477 | if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) |
| 478 | rate = min(skb->sk->sk_pacing_rate, rate); | ||
| 477 | 479 | ||
| 478 | rate = min(rate, q->flow_max_rate); | 480 | if (rate != ~0U) { |
| 479 | } else { | ||
| 480 | rate = q->flow_max_rate; | ||
| 481 | if (rate == ~0U) | ||
| 482 | goto out; | ||
| 483 | } | ||
| 484 | if (rate) { | ||
| 485 | u32 plen = max(qdisc_pkt_len(skb), q->quantum); | 481 | u32 plen = max(qdisc_pkt_len(skb), q->quantum); |
| 486 | u64 len = (u64)plen * NSEC_PER_SEC; | 482 | u64 len = (u64)plen * NSEC_PER_SEC; |
| 487 | 483 | ||
| 488 | do_div(len, rate); | 484 | if (likely(rate)) |
| 485 | do_div(len, rate); | ||
| 489 | /* Since socket rate can change later, | 486 | /* Since socket rate can change later, |
| 490 | * clamp the delay to 125 ms. | 487 | * clamp the delay to 125 ms. |
| 491 | * TODO: maybe segment the too big skb, as in commit | 488 | * TODO: maybe segment the too big skb, as in commit |
| @@ -656,7 +653,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) | |||
| 656 | q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); | 653 | q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); |
| 657 | 654 | ||
| 658 | if (tb[TCA_FQ_INITIAL_QUANTUM]) | 655 | if (tb[TCA_FQ_INITIAL_QUANTUM]) |
| 659 | q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); | 656 | q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); |
| 660 | 657 | ||
| 661 | if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) | 658 | if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) |
| 662 | q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]); | 659 | q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]); |
| @@ -735,12 +732,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
| 735 | if (opts == NULL) | 732 | if (opts == NULL) |
| 736 | goto nla_put_failure; | 733 | goto nla_put_failure; |
| 737 | 734 | ||
| 735 | /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore, | ||
| 736 | * do not bother giving its value | ||
| 737 | */ | ||
| 738 | if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || | 738 | if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || |
| 739 | nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || | 739 | nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || |
| 740 | nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || | 740 | nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || |
| 741 | nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || | 741 | nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || |
| 742 | nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || | 742 | nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || |
| 743 | nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) || | ||
| 744 | nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || | 743 | nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || |
| 745 | nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) | 744 | nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) |
| 746 | goto nla_put_failure; | 745 | goto nla_put_failure; |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index a6d788d45216..b87e83d07478 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
| @@ -358,6 +358,21 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche | |||
| 358 | return PSCHED_NS2TICKS(ticks); | 358 | return PSCHED_NS2TICKS(ticks); |
| 359 | } | 359 | } |
| 360 | 360 | ||
| 361 | static void tfifo_reset(struct Qdisc *sch) | ||
| 362 | { | ||
| 363 | struct netem_sched_data *q = qdisc_priv(sch); | ||
| 364 | struct rb_node *p; | ||
| 365 | |||
| 366 | while ((p = rb_first(&q->t_root))) { | ||
| 367 | struct sk_buff *skb = netem_rb_to_skb(p); | ||
| 368 | |||
| 369 | rb_erase(p, &q->t_root); | ||
| 370 | skb->next = NULL; | ||
| 371 | skb->prev = NULL; | ||
| 372 | kfree_skb(skb); | ||
| 373 | } | ||
| 374 | } | ||
| 375 | |||
| 361 | static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | 376 | static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) |
| 362 | { | 377 | { |
| 363 | struct netem_sched_data *q = qdisc_priv(sch); | 378 | struct netem_sched_data *q = qdisc_priv(sch); |
| @@ -520,6 +535,7 @@ static unsigned int netem_drop(struct Qdisc *sch) | |||
| 520 | skb->next = NULL; | 535 | skb->next = NULL; |
| 521 | skb->prev = NULL; | 536 | skb->prev = NULL; |
| 522 | len = qdisc_pkt_len(skb); | 537 | len = qdisc_pkt_len(skb); |
| 538 | sch->qstats.backlog -= len; | ||
| 523 | kfree_skb(skb); | 539 | kfree_skb(skb); |
| 524 | } | 540 | } |
| 525 | } | 541 | } |
| @@ -609,6 +625,7 @@ static void netem_reset(struct Qdisc *sch) | |||
| 609 | struct netem_sched_data *q = qdisc_priv(sch); | 625 | struct netem_sched_data *q = qdisc_priv(sch); |
| 610 | 626 | ||
| 611 | qdisc_reset_queue(sch); | 627 | qdisc_reset_queue(sch); |
| 628 | tfifo_reset(sch); | ||
| 612 | if (q->qdisc) | 629 | if (q->qdisc) |
| 613 | qdisc_reset(q->qdisc); | 630 | qdisc_reset(q->qdisc); |
| 614 | qdisc_watchdog_cancel(&q->watchdog); | 631 | qdisc_watchdog_cancel(&q->watchdog); |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index e7b2d4fe2b6a..96a55910262c 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -279,7 +279,9 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
| 279 | sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port)); | 279 | sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port)); |
| 280 | rcu_read_lock(); | 280 | rcu_read_lock(); |
| 281 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { | 281 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
| 282 | if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) | 282 | if (!laddr->valid || laddr->state == SCTP_ADDR_DEL || |
| 283 | (laddr->state != SCTP_ADDR_SRC && | ||
| 284 | !asoc->src_out_of_asoc_ok)) | ||
| 283 | continue; | 285 | continue; |
| 284 | 286 | ||
| 285 | /* Do not compare against v4 addrs */ | 287 | /* Do not compare against v4 addrs */ |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 0ac3a65daccb..319137340d15 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -536,7 +536,8 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
| 536 | * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. | 536 | * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. |
| 537 | */ | 537 | */ |
| 538 | if (!sctp_checksum_disable) { | 538 | if (!sctp_checksum_disable) { |
| 539 | if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) { | 539 | if (!(dst->dev->features & NETIF_F_SCTP_CSUM) || |
| 540 | (dst_xfrm(dst) != NULL) || packet->ipfragok) { | ||
| 540 | __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); | 541 | __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); |
| 541 | 542 | ||
| 542 | /* 3) Put the resultant value into the checksum field in the | 543 | /* 3) Put the resultant value into the checksum field in the |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 666c66842799..1a6eef39ab2f 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
| @@ -860,7 +860,6 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, | |||
| 860 | (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) | 860 | (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) |
| 861 | return; | 861 | return; |
| 862 | 862 | ||
| 863 | BUG_ON(asoc->peer.primary_path == NULL); | ||
| 864 | sctp_unhash_established(asoc); | 863 | sctp_unhash_established(asoc); |
| 865 | sctp_association_free(asoc); | 864 | sctp_association_free(asoc); |
| 866 | } | 865 | } |
diff --git a/net/socket.c b/net/socket.c index ebed4b68f768..c226aceee65b 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -1964,6 +1964,16 @@ struct used_address { | |||
| 1964 | unsigned int name_len; | 1964 | unsigned int name_len; |
| 1965 | }; | 1965 | }; |
| 1966 | 1966 | ||
| 1967 | static int copy_msghdr_from_user(struct msghdr *kmsg, | ||
| 1968 | struct msghdr __user *umsg) | ||
| 1969 | { | ||
| 1970 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) | ||
| 1971 | return -EFAULT; | ||
| 1972 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | ||
| 1973 | return -EINVAL; | ||
| 1974 | return 0; | ||
| 1975 | } | ||
| 1976 | |||
| 1967 | static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, | 1977 | static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, |
| 1968 | struct msghdr *msg_sys, unsigned int flags, | 1978 | struct msghdr *msg_sys, unsigned int flags, |
| 1969 | struct used_address *used_address) | 1979 | struct used_address *used_address) |
| @@ -1982,8 +1992,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, | |||
| 1982 | if (MSG_CMSG_COMPAT & flags) { | 1992 | if (MSG_CMSG_COMPAT & flags) { |
| 1983 | if (get_compat_msghdr(msg_sys, msg_compat)) | 1993 | if (get_compat_msghdr(msg_sys, msg_compat)) |
| 1984 | return -EFAULT; | 1994 | return -EFAULT; |
| 1985 | } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) | 1995 | } else { |
| 1986 | return -EFAULT; | 1996 | err = copy_msghdr_from_user(msg_sys, msg); |
| 1997 | if (err) | ||
| 1998 | return err; | ||
| 1999 | } | ||
| 1987 | 2000 | ||
| 1988 | if (msg_sys->msg_iovlen > UIO_FASTIOV) { | 2001 | if (msg_sys->msg_iovlen > UIO_FASTIOV) { |
| 1989 | err = -EMSGSIZE; | 2002 | err = -EMSGSIZE; |
| @@ -2191,8 +2204,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, | |||
| 2191 | if (MSG_CMSG_COMPAT & flags) { | 2204 | if (MSG_CMSG_COMPAT & flags) { |
| 2192 | if (get_compat_msghdr(msg_sys, msg_compat)) | 2205 | if (get_compat_msghdr(msg_sys, msg_compat)) |
| 2193 | return -EFAULT; | 2206 | return -EFAULT; |
| 2194 | } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) | 2207 | } else { |
| 2195 | return -EFAULT; | 2208 | err = copy_msghdr_from_user(msg_sys, msg); |
| 2209 | if (err) | ||
| 2210 | return err; | ||
| 2211 | } | ||
| 2196 | 2212 | ||
| 2197 | if (msg_sys->msg_iovlen > UIO_FASTIOV) { | 2213 | if (msg_sys->msg_iovlen > UIO_FASTIOV) { |
| 2198 | err = -EMSGSIZE; | 2214 | err = -EMSGSIZE; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 86de99ad2976..c1f403bed683 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -1246,6 +1246,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb) | |||
| 1246 | return 0; | 1246 | return 0; |
| 1247 | } | 1247 | } |
| 1248 | 1248 | ||
| 1249 | static void unix_sock_inherit_flags(const struct socket *old, | ||
| 1250 | struct socket *new) | ||
| 1251 | { | ||
| 1252 | if (test_bit(SOCK_PASSCRED, &old->flags)) | ||
| 1253 | set_bit(SOCK_PASSCRED, &new->flags); | ||
| 1254 | if (test_bit(SOCK_PASSSEC, &old->flags)) | ||
| 1255 | set_bit(SOCK_PASSSEC, &new->flags); | ||
| 1256 | } | ||
| 1257 | |||
| 1249 | static int unix_accept(struct socket *sock, struct socket *newsock, int flags) | 1258 | static int unix_accept(struct socket *sock, struct socket *newsock, int flags) |
| 1250 | { | 1259 | { |
| 1251 | struct sock *sk = sock->sk; | 1260 | struct sock *sk = sock->sk; |
| @@ -1280,6 +1289,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags) | |||
| 1280 | /* attach accepted sock to socket */ | 1289 | /* attach accepted sock to socket */ |
| 1281 | unix_state_lock(tsk); | 1290 | unix_state_lock(tsk); |
| 1282 | newsock->state = SS_CONNECTED; | 1291 | newsock->state = SS_CONNECTED; |
| 1292 | unix_sock_inherit_flags(sock, newsock); | ||
| 1283 | sock_graft(tsk, newsock); | 1293 | sock_graft(tsk, newsock); |
| 1284 | unix_state_unlock(tsk); | 1294 | unix_state_unlock(tsk); |
| 1285 | return 0; | 1295 | return 0; |
diff --git a/net/unix/diag.c b/net/unix/diag.c index d591091603bf..86fa0f3b2caf 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c | |||
| @@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r | |||
| 124 | rep->udiag_family = AF_UNIX; | 124 | rep->udiag_family = AF_UNIX; |
| 125 | rep->udiag_type = sk->sk_type; | 125 | rep->udiag_type = sk->sk_type; |
| 126 | rep->udiag_state = sk->sk_state; | 126 | rep->udiag_state = sk->sk_state; |
| 127 | rep->pad = 0; | ||
| 127 | rep->udiag_ino = sk_ino; | 128 | rep->udiag_ino = sk_ino; |
| 128 | sock_diag_save_cookie(sk, rep->udiag_cookie); | 129 | sock_diag_save_cookie(sk, rep->udiag_cookie); |
| 129 | 130 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index 67153964aad2..aff959e5a1b3 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
| @@ -566,18 +566,13 @@ int wiphy_register(struct wiphy *wiphy) | |||
| 566 | /* check and set up bitrates */ | 566 | /* check and set up bitrates */ |
| 567 | ieee80211_set_bitrate_flags(wiphy); | 567 | ieee80211_set_bitrate_flags(wiphy); |
| 568 | 568 | ||
| 569 | 569 | rtnl_lock(); | |
| 570 | res = device_add(&rdev->wiphy.dev); | 570 | res = device_add(&rdev->wiphy.dev); |
| 571 | if (res) | ||
| 572 | return res; | ||
| 573 | |||
| 574 | res = rfkill_register(rdev->rfkill); | ||
| 575 | if (res) { | 571 | if (res) { |
| 576 | device_del(&rdev->wiphy.dev); | 572 | rtnl_unlock(); |
| 577 | return res; | 573 | return res; |
| 578 | } | 574 | } |
| 579 | 575 | ||
| 580 | rtnl_lock(); | ||
| 581 | /* set up regulatory info */ | 576 | /* set up regulatory info */ |
| 582 | wiphy_regulatory_register(wiphy); | 577 | wiphy_regulatory_register(wiphy); |
| 583 | 578 | ||
| @@ -606,6 +601,15 @@ int wiphy_register(struct wiphy *wiphy) | |||
| 606 | 601 | ||
| 607 | rdev->wiphy.registered = true; | 602 | rdev->wiphy.registered = true; |
| 608 | rtnl_unlock(); | 603 | rtnl_unlock(); |
| 604 | |||
| 605 | res = rfkill_register(rdev->rfkill); | ||
| 606 | if (res) { | ||
| 607 | rfkill_destroy(rdev->rfkill); | ||
| 608 | rdev->rfkill = NULL; | ||
| 609 | wiphy_unregister(&rdev->wiphy); | ||
| 610 | return res; | ||
| 611 | } | ||
| 612 | |||
| 609 | return 0; | 613 | return 0; |
| 610 | } | 614 | } |
| 611 | EXPORT_SYMBOL(wiphy_register); | 615 | EXPORT_SYMBOL(wiphy_register); |
| @@ -640,7 +644,8 @@ void wiphy_unregister(struct wiphy *wiphy) | |||
| 640 | rtnl_unlock(); | 644 | rtnl_unlock(); |
| 641 | __count == 0; })); | 645 | __count == 0; })); |
| 642 | 646 | ||
| 643 | rfkill_unregister(rdev->rfkill); | 647 | if (rdev->rfkill) |
| 648 | rfkill_unregister(rdev->rfkill); | ||
| 644 | 649 | ||
| 645 | rtnl_lock(); | 650 | rtnl_lock(); |
| 646 | rdev->wiphy.registered = false; | 651 | rdev->wiphy.registered = false; |
| @@ -953,8 +958,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
| 953 | case NETDEV_PRE_UP: | 958 | case NETDEV_PRE_UP: |
| 954 | if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) | 959 | if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) |
| 955 | return notifier_from_errno(-EOPNOTSUPP); | 960 | return notifier_from_errno(-EOPNOTSUPP); |
| 956 | if (rfkill_blocked(rdev->rfkill)) | ||
| 957 | return notifier_from_errno(-ERFKILL); | ||
| 958 | ret = cfg80211_can_add_interface(rdev, wdev->iftype); | 961 | ret = cfg80211_can_add_interface(rdev, wdev->iftype); |
| 959 | if (ret) | 962 | if (ret) |
| 960 | return notifier_from_errno(ret); | 963 | return notifier_from_errno(ret); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 9ad43c619c54..3159e9c284c5 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
| @@ -411,6 +411,9 @@ static inline int | |||
| 411 | cfg80211_can_add_interface(struct cfg80211_registered_device *rdev, | 411 | cfg80211_can_add_interface(struct cfg80211_registered_device *rdev, |
| 412 | enum nl80211_iftype iftype) | 412 | enum nl80211_iftype iftype) |
| 413 | { | 413 | { |
| 414 | if (rfkill_blocked(rdev->rfkill)) | ||
| 415 | return -ERFKILL; | ||
| 416 | |||
| 414 | return cfg80211_can_change_interface(rdev, NULL, iftype); | 417 | return cfg80211_can_change_interface(rdev, NULL, iftype); |
| 415 | } | 418 | } |
| 416 | 419 | ||
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 39bff7d36768..403fe29c024d 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c | |||
| @@ -263,6 +263,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev, | |||
| 263 | if (chan->flags & IEEE80211_CHAN_DISABLED) | 263 | if (chan->flags & IEEE80211_CHAN_DISABLED) |
| 264 | continue; | 264 | continue; |
| 265 | wdev->wext.ibss.chandef.chan = chan; | 265 | wdev->wext.ibss.chandef.chan = chan; |
| 266 | wdev->wext.ibss.chandef.center_freq1 = | ||
| 267 | chan->center_freq; | ||
| 266 | break; | 268 | break; |
| 267 | } | 269 | } |
| 268 | 270 | ||
| @@ -347,6 +349,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev, | |||
| 347 | if (chan) { | 349 | if (chan) { |
| 348 | wdev->wext.ibss.chandef.chan = chan; | 350 | wdev->wext.ibss.chandef.chan = chan; |
| 349 | wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; | 351 | wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT; |
| 352 | wdev->wext.ibss.chandef.center_freq1 = freq; | ||
| 350 | wdev->wext.ibss.channel_fixed = true; | 353 | wdev->wext.ibss.channel_fixed = true; |
| 351 | } else { | 354 | } else { |
| 352 | /* cfg80211_ibss_wext_join will pick one if needed */ | 355 | /* cfg80211_ibss_wext_join will pick one if needed */ |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index af8d84a4a5b2..626dc3b5fd8d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -2421,7 +2421,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) | |||
| 2421 | change = true; | 2421 | change = true; |
| 2422 | } | 2422 | } |
| 2423 | 2423 | ||
| 2424 | if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) && | 2424 | if (flags && (*flags & MONITOR_FLAG_ACTIVE) && |
| 2425 | !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) | 2425 | !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) |
| 2426 | return -EOPNOTSUPP; | 2426 | return -EOPNOTSUPP; |
| 2427 | 2427 | ||
| @@ -2483,7 +2483,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
| 2483 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, | 2483 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, |
| 2484 | &flags); | 2484 | &flags); |
| 2485 | 2485 | ||
| 2486 | if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) && | 2486 | if (!err && (flags & MONITOR_FLAG_ACTIVE) && |
| 2487 | !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) | 2487 | !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) |
| 2488 | return -EOPNOTSUPP; | 2488 | return -EOPNOTSUPP; |
| 2489 | 2489 | ||
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c index 7d604c06c3dc..a271c27fac77 100644 --- a/net/wireless/radiotap.c +++ b/net/wireless/radiotap.c | |||
| @@ -97,6 +97,10 @@ int ieee80211_radiotap_iterator_init( | |||
| 97 | struct ieee80211_radiotap_header *radiotap_header, | 97 | struct ieee80211_radiotap_header *radiotap_header, |
| 98 | int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) | 98 | int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) |
| 99 | { | 99 | { |
| 100 | /* check the radiotap header can actually be present */ | ||
| 101 | if (max_length < sizeof(struct ieee80211_radiotap_header)) | ||
| 102 | return -EINVAL; | ||
| 103 | |||
| 100 | /* Linux only supports version 0 radiotap format */ | 104 | /* Linux only supports version 0 radiotap format */ |
| 101 | if (radiotap_header->it_version) | 105 | if (radiotap_header->it_version) |
| 102 | return -EINVAL; | 106 | return -EINVAL; |
| @@ -131,7 +135,8 @@ int ieee80211_radiotap_iterator_init( | |||
| 131 | */ | 135 | */ |
| 132 | 136 | ||
| 133 | if ((unsigned long)iterator->_arg - | 137 | if ((unsigned long)iterator->_arg - |
| 134 | (unsigned long)iterator->_rtheader > | 138 | (unsigned long)iterator->_rtheader + |
| 139 | sizeof(uint32_t) > | ||
| 135 | (unsigned long)iterator->_max_length) | 140 | (unsigned long)iterator->_max_length) |
| 136 | return -EINVAL; | 141 | return -EINVAL; |
| 137 | } | 142 | } |
diff --git a/net/x25/Kconfig b/net/x25/Kconfig index c959312c45e3..e2fa133f9fba 100644 --- a/net/x25/Kconfig +++ b/net/x25/Kconfig | |||
| @@ -16,8 +16,8 @@ config X25 | |||
| 16 | if you want that) and the lower level data link layer protocol LAPB | 16 | if you want that) and the lower level data link layer protocol LAPB |
| 17 | (say Y to "LAPB Data Link Driver" below if you want that). | 17 | (say Y to "LAPB Data Link Driver" below if you want that). |
| 18 | 18 | ||
| 19 | You can read more about X.25 at <http://www.sangoma.com/x25.htm> and | 19 | You can read more about X.25 at <http://www.sangoma.com/tutorials/x25/> and |
| 20 | <http://www.cisco.com/univercd/cc/td/doc/product/software/ios11/cbook/cx25.htm>. | 20 | <http://docwiki.cisco.com/wiki/X.25>. |
| 21 | Information about X.25 for Linux is contained in the files | 21 | Information about X.25 for Linux is contained in the files |
| 22 | <file:Documentation/networking/x25.txt> and | 22 | <file:Documentation/networking/x25.txt> and |
| 23 | <file:Documentation/networking/x25-iface.txt>. | 23 | <file:Documentation/networking/x25-iface.txt>. |
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 2906d520eea7..3be02b680268 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c | |||
| @@ -141,14 +141,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) | |||
| 141 | const int plen = skb->len; | 141 | const int plen = skb->len; |
| 142 | int dlen = IPCOMP_SCRATCH_SIZE; | 142 | int dlen = IPCOMP_SCRATCH_SIZE; |
| 143 | u8 *start = skb->data; | 143 | u8 *start = skb->data; |
| 144 | const int cpu = get_cpu(); | 144 | struct crypto_comp *tfm; |
| 145 | u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); | 145 | u8 *scratch; |
| 146 | struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); | ||
| 147 | int err; | 146 | int err; |
| 148 | 147 | ||
| 149 | local_bh_disable(); | 148 | local_bh_disable(); |
| 149 | scratch = *this_cpu_ptr(ipcomp_scratches); | ||
| 150 | tfm = *this_cpu_ptr(ipcd->tfms); | ||
| 150 | err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); | 151 | err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); |
| 151 | local_bh_enable(); | ||
| 152 | if (err) | 152 | if (err) |
| 153 | goto out; | 153 | goto out; |
| 154 | 154 | ||
| @@ -158,13 +158,13 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) | |||
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); | 160 | memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); |
| 161 | put_cpu(); | 161 | local_bh_enable(); |
| 162 | 162 | ||
| 163 | pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); | 163 | pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); |
| 164 | return 0; | 164 | return 0; |
| 165 | 165 | ||
| 166 | out: | 166 | out: |
| 167 | put_cpu(); | 167 | local_bh_enable(); |
| 168 | return err; | 168 | return err; |
| 169 | } | 169 | } |
| 170 | 170 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index ed38d5d81f9e..76e1873811d4 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
| @@ -334,7 +334,8 @@ static void xfrm_policy_kill(struct xfrm_policy *policy) | |||
| 334 | 334 | ||
| 335 | atomic_inc(&policy->genid); | 335 | atomic_inc(&policy->genid); |
| 336 | 336 | ||
| 337 | del_timer(&policy->polq.hold_timer); | 337 | if (del_timer(&policy->polq.hold_timer)) |
| 338 | xfrm_pol_put(policy); | ||
| 338 | xfrm_queue_purge(&policy->polq.hold_queue); | 339 | xfrm_queue_purge(&policy->polq.hold_queue); |
| 339 | 340 | ||
| 340 | if (del_timer(&policy->timer)) | 341 | if (del_timer(&policy->timer)) |
| @@ -589,7 +590,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, | |||
| 589 | 590 | ||
| 590 | spin_lock_bh(&pq->hold_queue.lock); | 591 | spin_lock_bh(&pq->hold_queue.lock); |
| 591 | skb_queue_splice_init(&pq->hold_queue, &list); | 592 | skb_queue_splice_init(&pq->hold_queue, &list); |
| 592 | del_timer(&pq->hold_timer); | 593 | if (del_timer(&pq->hold_timer)) |
| 594 | xfrm_pol_put(old); | ||
| 593 | spin_unlock_bh(&pq->hold_queue.lock); | 595 | spin_unlock_bh(&pq->hold_queue.lock); |
| 594 | 596 | ||
| 595 | if (skb_queue_empty(&list)) | 597 | if (skb_queue_empty(&list)) |
| @@ -600,7 +602,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, | |||
| 600 | spin_lock_bh(&pq->hold_queue.lock); | 602 | spin_lock_bh(&pq->hold_queue.lock); |
| 601 | skb_queue_splice(&list, &pq->hold_queue); | 603 | skb_queue_splice(&list, &pq->hold_queue); |
| 602 | pq->timeout = XFRM_QUEUE_TMO_MIN; | 604 | pq->timeout = XFRM_QUEUE_TMO_MIN; |
| 603 | mod_timer(&pq->hold_timer, jiffies); | 605 | if (!mod_timer(&pq->hold_timer, jiffies)) |
| 606 | xfrm_pol_hold(new); | ||
| 604 | spin_unlock_bh(&pq->hold_queue.lock); | 607 | spin_unlock_bh(&pq->hold_queue.lock); |
| 605 | } | 608 | } |
| 606 | 609 | ||
| @@ -1769,6 +1772,10 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
| 1769 | 1772 | ||
| 1770 | spin_lock(&pq->hold_queue.lock); | 1773 | spin_lock(&pq->hold_queue.lock); |
| 1771 | skb = skb_peek(&pq->hold_queue); | 1774 | skb = skb_peek(&pq->hold_queue); |
| 1775 | if (!skb) { | ||
| 1776 | spin_unlock(&pq->hold_queue.lock); | ||
| 1777 | goto out; | ||
| 1778 | } | ||
| 1772 | dst = skb_dst(skb); | 1779 | dst = skb_dst(skb); |
| 1773 | sk = skb->sk; | 1780 | sk = skb->sk; |
| 1774 | xfrm_decode_session(skb, &fl, dst->ops->family); | 1781 | xfrm_decode_session(skb, &fl, dst->ops->family); |
| @@ -1787,8 +1794,9 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
| 1787 | goto purge_queue; | 1794 | goto purge_queue; |
| 1788 | 1795 | ||
| 1789 | pq->timeout = pq->timeout << 1; | 1796 | pq->timeout = pq->timeout << 1; |
| 1790 | mod_timer(&pq->hold_timer, jiffies + pq->timeout); | 1797 | if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout)) |
| 1791 | return; | 1798 | xfrm_pol_hold(pol); |
| 1799 | goto out; | ||
| 1792 | } | 1800 | } |
| 1793 | 1801 | ||
| 1794 | dst_release(dst); | 1802 | dst_release(dst); |
| @@ -1819,11 +1827,14 @@ static void xfrm_policy_queue_process(unsigned long arg) | |||
| 1819 | err = dst_output(skb); | 1827 | err = dst_output(skb); |
| 1820 | } | 1828 | } |
| 1821 | 1829 | ||
| 1830 | out: | ||
| 1831 | xfrm_pol_put(pol); | ||
| 1822 | return; | 1832 | return; |
| 1823 | 1833 | ||
| 1824 | purge_queue: | 1834 | purge_queue: |
| 1825 | pq->timeout = 0; | 1835 | pq->timeout = 0; |
| 1826 | xfrm_queue_purge(&pq->hold_queue); | 1836 | xfrm_queue_purge(&pq->hold_queue); |
| 1837 | xfrm_pol_put(pol); | ||
| 1827 | } | 1838 | } |
| 1828 | 1839 | ||
| 1829 | static int xdst_queue_output(struct sk_buff *skb) | 1840 | static int xdst_queue_output(struct sk_buff *skb) |
| @@ -1831,7 +1842,8 @@ static int xdst_queue_output(struct sk_buff *skb) | |||
| 1831 | unsigned long sched_next; | 1842 | unsigned long sched_next; |
| 1832 | struct dst_entry *dst = skb_dst(skb); | 1843 | struct dst_entry *dst = skb_dst(skb); |
| 1833 | struct xfrm_dst *xdst = (struct xfrm_dst *) dst; | 1844 | struct xfrm_dst *xdst = (struct xfrm_dst *) dst; |
| 1834 | struct xfrm_policy_queue *pq = &xdst->pols[0]->polq; | 1845 | struct xfrm_policy *pol = xdst->pols[0]; |
| 1846 | struct xfrm_policy_queue *pq = &pol->polq; | ||
| 1835 | 1847 | ||
| 1836 | if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { | 1848 | if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { |
| 1837 | kfree_skb(skb); | 1849 | kfree_skb(skb); |
| @@ -1850,10 +1862,12 @@ static int xdst_queue_output(struct sk_buff *skb) | |||
| 1850 | if (del_timer(&pq->hold_timer)) { | 1862 | if (del_timer(&pq->hold_timer)) { |
| 1851 | if (time_before(pq->hold_timer.expires, sched_next)) | 1863 | if (time_before(pq->hold_timer.expires, sched_next)) |
| 1852 | sched_next = pq->hold_timer.expires; | 1864 | sched_next = pq->hold_timer.expires; |
| 1865 | xfrm_pol_put(pol); | ||
| 1853 | } | 1866 | } |
| 1854 | 1867 | ||
| 1855 | __skb_queue_tail(&pq->hold_queue, skb); | 1868 | __skb_queue_tail(&pq->hold_queue, skb); |
| 1856 | mod_timer(&pq->hold_timer, sched_next); | 1869 | if (!mod_timer(&pq->hold_timer, sched_next)) |
| 1870 | xfrm_pol_hold(pol); | ||
| 1857 | 1871 | ||
| 1858 | spin_unlock_bh(&pq->hold_queue.lock); | 1872 | spin_unlock_bh(&pq->hold_queue.lock); |
| 1859 | 1873 | ||
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 8dafe6d3c6e4..dab57daae408 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
| @@ -61,9 +61,9 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event) | |||
| 61 | 61 | ||
| 62 | switch (event) { | 62 | switch (event) { |
| 63 | case XFRM_REPLAY_UPDATE: | 63 | case XFRM_REPLAY_UPDATE: |
| 64 | if (x->replay_maxdiff && | 64 | if (!x->replay_maxdiff || |
| 65 | (x->replay.seq - x->preplay.seq < x->replay_maxdiff) && | 65 | ((x->replay.seq - x->preplay.seq < x->replay_maxdiff) && |
| 66 | (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) { | 66 | (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))) { |
| 67 | if (x->xflags & XFRM_TIME_DEFER) | 67 | if (x->xflags & XFRM_TIME_DEFER) |
| 68 | event = XFRM_REPLAY_TIMEOUT; | 68 | event = XFRM_REPLAY_TIMEOUT; |
| 69 | else | 69 | else |
| @@ -129,8 +129,7 @@ static int xfrm_replay_check(struct xfrm_state *x, | |||
| 129 | return 0; | 129 | return 0; |
| 130 | 130 | ||
| 131 | diff = x->replay.seq - seq; | 131 | diff = x->replay.seq - seq; |
| 132 | if (diff >= min_t(unsigned int, x->props.replay_window, | 132 | if (diff >= x->props.replay_window) { |
| 133 | sizeof(x->replay.bitmap) * 8)) { | ||
| 134 | x->stats.replay_window++; | 133 | x->stats.replay_window++; |
| 135 | goto err; | 134 | goto err; |
| 136 | } | 135 | } |
| @@ -302,9 +301,10 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) | |||
| 302 | 301 | ||
| 303 | switch (event) { | 302 | switch (event) { |
| 304 | case XFRM_REPLAY_UPDATE: | 303 | case XFRM_REPLAY_UPDATE: |
| 305 | if (x->replay_maxdiff && | 304 | if (!x->replay_maxdiff || |
| 306 | (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && | 305 | ((replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && |
| 307 | (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) { | 306 | (replay_esn->oseq - preplay_esn->oseq |
| 307 | < x->replay_maxdiff))) { | ||
| 308 | if (x->xflags & XFRM_TIME_DEFER) | 308 | if (x->xflags & XFRM_TIME_DEFER) |
| 309 | event = XFRM_REPLAY_TIMEOUT; | 309 | event = XFRM_REPLAY_TIMEOUT; |
| 310 | else | 310 | else |
| @@ -353,28 +353,30 @@ static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) | |||
| 353 | 353 | ||
| 354 | switch (event) { | 354 | switch (event) { |
| 355 | case XFRM_REPLAY_UPDATE: | 355 | case XFRM_REPLAY_UPDATE: |
| 356 | if (!x->replay_maxdiff) | 356 | if (x->replay_maxdiff) { |
| 357 | break; | 357 | if (replay_esn->seq_hi == preplay_esn->seq_hi) |
| 358 | 358 | seq_diff = replay_esn->seq - preplay_esn->seq; | |
| 359 | if (replay_esn->seq_hi == preplay_esn->seq_hi) | 359 | else |
| 360 | seq_diff = replay_esn->seq - preplay_esn->seq; | 360 | seq_diff = ~preplay_esn->seq + replay_esn->seq |
| 361 | else | 361 | + 1; |
| 362 | seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; | ||
| 363 | |||
| 364 | if (replay_esn->oseq_hi == preplay_esn->oseq_hi) | ||
| 365 | oseq_diff = replay_esn->oseq - preplay_esn->oseq; | ||
| 366 | else | ||
| 367 | oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; | ||
| 368 | |||
| 369 | if (seq_diff < x->replay_maxdiff && | ||
| 370 | oseq_diff < x->replay_maxdiff) { | ||
| 371 | 362 | ||
| 372 | if (x->xflags & XFRM_TIME_DEFER) | 363 | if (replay_esn->oseq_hi == preplay_esn->oseq_hi) |
| 373 | event = XFRM_REPLAY_TIMEOUT; | 364 | oseq_diff = replay_esn->oseq |
| 365 | - preplay_esn->oseq; | ||
| 374 | else | 366 | else |
| 375 | return; | 367 | oseq_diff = ~preplay_esn->oseq |
| 368 | + replay_esn->oseq + 1; | ||
| 369 | |||
| 370 | if (seq_diff >= x->replay_maxdiff || | ||
| 371 | oseq_diff >= x->replay_maxdiff) | ||
| 372 | break; | ||
| 376 | } | 373 | } |
| 377 | 374 | ||
| 375 | if (x->xflags & XFRM_TIME_DEFER) | ||
| 376 | event = XFRM_REPLAY_TIMEOUT; | ||
| 377 | else | ||
| 378 | return; | ||
| 379 | |||
| 378 | break; | 380 | break; |
| 379 | 381 | ||
| 380 | case XFRM_REPLAY_TIMEOUT: | 382 | case XFRM_REPLAY_TIMEOUT: |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 3f565e495ac6..f964d4c00ffb 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
| @@ -446,7 +446,8 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info * | |||
| 446 | memcpy(&x->sel, &p->sel, sizeof(x->sel)); | 446 | memcpy(&x->sel, &p->sel, sizeof(x->sel)); |
| 447 | memcpy(&x->lft, &p->lft, sizeof(x->lft)); | 447 | memcpy(&x->lft, &p->lft, sizeof(x->lft)); |
| 448 | x->props.mode = p->mode; | 448 | x->props.mode = p->mode; |
| 449 | x->props.replay_window = p->replay_window; | 449 | x->props.replay_window = min_t(unsigned int, p->replay_window, |
| 450 | sizeof(x->replay.bitmap) * 8); | ||
| 450 | x->props.reqid = p->reqid; | 451 | x->props.reqid = p->reqid; |
| 451 | x->props.family = p->family; | 452 | x->props.family = p->family; |
| 452 | memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); | 453 | memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); |
| @@ -1856,7 +1857,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
| 1856 | if (x->km.state != XFRM_STATE_VALID) | 1857 | if (x->km.state != XFRM_STATE_VALID) |
| 1857 | goto out; | 1858 | goto out; |
| 1858 | 1859 | ||
| 1859 | err = xfrm_replay_verify_len(x->replay_esn, rp); | 1860 | err = xfrm_replay_verify_len(x->replay_esn, re); |
| 1860 | if (err) | 1861 | if (err) |
| 1861 | goto out; | 1862 | goto out; |
| 1862 | 1863 | ||
