aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/gateway_client.c13
-rw-r--r--net/batman-adv/gateway_client.h3
-rw-r--r--net/batman-adv/soft-interface.c9
-rw-r--r--net/batman-adv/unicast.c23
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_fdb.c10
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c14
-rw-r--r--net/bridge/br_multicast.c263
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h57
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/core/flow_dissector.c12
-rw-r--r--net/core/neighbour.c10
-rw-r--r--net/core/net-sysfs.c136
-rw-r--r--net/core/netprio_cgroup.c72
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/scm.c2
-rw-r--r--net/ieee802154/wpan-class.c23
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_trie.c5
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/ipip.c5
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_cubic.c12
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_memcontrol.c12
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/xfrm4_output.c16
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_gre.c5
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/route.c21
-rw-r--r--net/ipv6/sit.c11
-rw-r--r--net/ipv6/xfrm6_output.c21
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/mac80211/ibss.c34
-rw-r--r--net/mac80211/mlme.c54
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c12
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue_core.c5
-rw-r--r--net/netfilter/xt_TCPMSS.c28
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c10
-rw-r--r--net/netlink/genetlink.c67
-rw-r--r--net/openvswitch/actions.c1
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rfkill/core.c90
-rw-r--r--net/sched/cls_cgroup.c39
-rw-r--r--net/sched/sch_api.c41
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/transport.c4
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/netns.h1
-rw-r--r--net/sunrpc/rpcb_clnt.c48
-rw-r--r--net/sunrpc/xdr.c9
-rw-r--r--net/tipc/bearer.c9
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/nl80211.c28
-rw-r--r--net/wireless/sme.c10
-rw-r--r--net/wireless/sysfs.c25
-rw-r--r--net/xfrm/xfrm_output.c21
-rw-r--r--net/xfrm/xfrm_policy.c9
-rw-r--r--net/xfrm/xfrm_state.c7
84 files changed, 977 insertions, 499 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4a78c4de9f20..6ee48aac776f 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
91 91
92struct net_device *vlan_dev_real_dev(const struct net_device *dev) 92struct net_device *vlan_dev_real_dev(const struct net_device *dev)
93{ 93{
94 return vlan_dev_priv(dev)->real_dev; 94 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
95
96 while (is_vlan_dev(ret))
97 ret = vlan_dev_priv(ret)->real_dev;
98
99 return ret;
95} 100}
96EXPORT_SYMBOL(vlan_dev_real_dev); 101EXPORT_SYMBOL(vlan_dev_real_dev);
97 102
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index e14531f1ce1c..264de88db320 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1529,6 +1529,8 @@ out:
1529 * in these cases, the skb is further handled by this function and 1529 * in these cases, the skb is further handled by this function and
1530 * returns 1, otherwise it returns 0 and the caller shall further 1530 * returns 1, otherwise it returns 0 and the caller shall further
1531 * process the skb. 1531 * process the skb.
1532 *
1533 * This call might reallocate skb data.
1532 */ 1534 */
1533int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1535int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1534 unsigned short vid) 1536 unsigned short vid)
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index f105219f4a4b..7614af31daff 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -508,6 +508,7 @@ out:
508 return 0; 508 return 0;
509} 509}
510 510
511/* this call might reallocate skb data */
511static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) 512static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
512{ 513{
513 int ret = false; 514 int ret = false;
@@ -568,6 +569,7 @@ out:
568 return ret; 569 return ret;
569} 570}
570 571
572/* this call might reallocate skb data */
571bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) 573bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
572{ 574{
573 struct ethhdr *ethhdr; 575 struct ethhdr *ethhdr;
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
619 621
620 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) 622 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
621 return false; 623 return false;
624
625 /* skb->data might have been reallocated by pskb_may_pull() */
626 ethhdr = (struct ethhdr *)skb->data;
627 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
628 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
629
622 udphdr = (struct udphdr *)(skb->data + *header_len); 630 udphdr = (struct udphdr *)(skb->data + *header_len);
623 *header_len += sizeof(*udphdr); 631 *header_len += sizeof(*udphdr);
624 632
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
634 return true; 642 return true;
635} 643}
636 644
645/* this call might reallocate skb data */
637bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 646bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
638 struct sk_buff *skb, struct ethhdr *ethhdr) 647 struct sk_buff *skb)
639{ 648{
640 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; 649 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
641 struct batadv_orig_node *orig_dst_node = NULL; 650 struct batadv_orig_node *orig_dst_node = NULL;
642 struct batadv_gw_node *curr_gw = NULL; 651 struct batadv_gw_node *curr_gw = NULL;
652 struct ethhdr *ethhdr;
643 bool ret, out_of_range = false; 653 bool ret, out_of_range = false;
644 unsigned int header_len = 0; 654 unsigned int header_len = 0;
645 uint8_t curr_tq_avg; 655 uint8_t curr_tq_avg;
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
648 if (!ret) 658 if (!ret)
649 goto out; 659 goto out;
650 660
661 ethhdr = (struct ethhdr *)skb->data;
651 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, 662 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
652 ethhdr->h_dest); 663 ethhdr->h_dest);
653 if (!orig_dst_node) 664 if (!orig_dst_node)
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 039902dca4a6..1037d75da51f 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
34void batadv_gw_node_purge(struct batadv_priv *bat_priv); 34void batadv_gw_node_purge(struct batadv_priv *bat_priv);
35int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); 35int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
36bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); 36bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
37bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 37bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
38 struct sk_buff *skb, struct ethhdr *ethhdr);
39 38
40#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ 39#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 700d0b49742d..0f04e1c302b4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
180 if (batadv_bla_tx(bat_priv, skb, vid)) 180 if (batadv_bla_tx(bat_priv, skb, vid))
181 goto dropped; 181 goto dropped;
182 182
183 /* skb->data might have been reallocated by batadv_bla_tx() */
184 ethhdr = (struct ethhdr *)skb->data;
185
183 /* Register the client MAC in the transtable */ 186 /* Register the client MAC in the transtable */
184 if (!is_multicast_ether_addr(ethhdr->h_source)) 187 if (!is_multicast_ether_addr(ethhdr->h_source))
185 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); 188 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
220 default: 223 default:
221 break; 224 break;
222 } 225 }
226
227 /* reminder: ethhdr might have become unusable from here on
228 * (batadv_gw_is_dhcp_target() might have reallocated skb data)
229 */
223 } 230 }
224 231
225 /* ethernet packet should be broadcasted */ 232 /* ethernet packet should be broadcasted */
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
266 /* unicast packet */ 273 /* unicast packet */
267 } else { 274 } else {
268 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { 275 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
269 ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); 276 ret = batadv_gw_out_of_range(bat_priv, skb);
270 if (ret) 277 if (ret)
271 goto dropped; 278 goto dropped;
272 } 279 }
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc8b5d4dd636..857e1b8349ee 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
326 * @skb: the skb containing the payload to encapsulate 326 * @skb: the skb containing the payload to encapsulate
327 * @orig_node: the destination node 327 * @orig_node: the destination node
328 * 328 *
329 * Returns false if the payload could not be encapsulated or true otherwise 329 * Returns false if the payload could not be encapsulated or true otherwise.
330 *
331 * This call might reallocate skb data.
330 */ 332 */
331static bool batadv_unicast_prepare_skb(struct sk_buff *skb, 333static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
332 struct batadv_orig_node *orig_node) 334 struct batadv_orig_node *orig_node)
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
343 * @orig_node: the destination node 345 * @orig_node: the destination node
344 * @packet_subtype: the batman 4addr packet subtype to use 346 * @packet_subtype: the batman 4addr packet subtype to use
345 * 347 *
346 * Returns false if the payload could not be encapsulated or true otherwise 348 * Returns false if the payload could not be encapsulated or true otherwise.
349 *
350 * This call might reallocate skb data.
347 */ 351 */
348bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, 352bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
349 struct sk_buff *skb, 353 struct sk_buff *skb,
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
401 struct batadv_neigh_node *neigh_node; 405 struct batadv_neigh_node *neigh_node;
402 int data_len = skb->len; 406 int data_len = skb->len;
403 int ret = NET_RX_DROP; 407 int ret = NET_RX_DROP;
404 unsigned int dev_mtu; 408 unsigned int dev_mtu, header_len;
405 409
406 /* get routing information */ 410 /* get routing information */
407 if (is_multicast_ether_addr(ethhdr->h_dest)) { 411 if (is_multicast_ether_addr(ethhdr->h_dest)) {
@@ -428,11 +432,17 @@ find_router:
428 432
429 switch (packet_type) { 433 switch (packet_type) {
430 case BATADV_UNICAST: 434 case BATADV_UNICAST:
431 batadv_unicast_prepare_skb(skb, orig_node); 435 if (!batadv_unicast_prepare_skb(skb, orig_node))
436 goto out;
437
438 header_len = sizeof(struct batadv_unicast_packet);
432 break; 439 break;
433 case BATADV_UNICAST_4ADDR: 440 case BATADV_UNICAST_4ADDR:
434 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, 441 if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
435 packet_subtype); 442 packet_subtype))
443 goto out;
444
445 header_len = sizeof(struct batadv_unicast_4addr_packet);
436 break; 446 break;
437 default: 447 default:
438 /* this function supports UNICAST and UNICAST_4ADDR only. It 448 /* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -441,6 +451,7 @@ find_router:
441 goto out; 451 goto out;
442 } 452 }
443 453
454 ethhdr = (struct ethhdr *)(skb->data + header_len);
444 unicast_packet = (struct batadv_unicast_packet *)skb->data; 455 unicast_packet = (struct batadv_unicast_packet *)skb->data;
445 456
446 /* inform the destination node that we are still missing a correct route 457 /* inform the destination node that we are still missing a correct route
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 69363bd37f64..89659d4ed1f9 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
71 71
72 mdst = br_mdb_get(br, skb, vid); 72 mdst = br_mdb_get(br, skb, vid);
73 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 73 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
74 br_multicast_querier_exists(br)) 74 br_multicast_querier_exists(br, eth_hdr(skb)))
75 br_multicast_deliver(mdst, skb); 75 br_multicast_deliver(mdst, skb);
76 else 76 else
77 br_flood_deliver(br, skb, false); 77 br_flood_deliver(br, skb, false);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 60aca9109a50..ffd5874f2592 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
161 if (!pv) 161 if (!pv)
162 return; 162 return;
163 163
164 for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 164 for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
165 f = __br_fdb_get(br, br->dev->dev_addr, vid); 165 f = __br_fdb_get(br, br->dev->dev_addr, vid);
166 if (f && f->is_local && !f->dst) 166 if (f && f->is_local && !f->dst)
167 fdb_delete(br, f); 167 fdb_delete(br, f);
@@ -730,7 +730,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
730 /* VID was specified, so use it. */ 730 /* VID was specified, so use it. */
731 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 731 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
732 } else { 732 } else {
733 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 733 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
734 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); 734 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
735 goto out; 735 goto out;
736 } 736 }
@@ -739,7 +739,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
739 * specify a VLAN. To be nice, add/update entry for every 739 * specify a VLAN. To be nice, add/update entry for every
740 * vlan on this port. 740 * vlan on this port.
741 */ 741 */
742 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 742 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
743 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 743 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
744 if (err) 744 if (err)
745 goto out; 745 goto out;
@@ -817,7 +817,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
817 817
818 err = __br_fdb_delete(p, addr, vid); 818 err = __br_fdb_delete(p, addr, vid);
819 } else { 819 } else {
820 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 820 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
821 err = __br_fdb_delete(p, addr, 0); 821 err = __br_fdb_delete(p, addr, 0);
822 goto out; 822 goto out;
823 } 823 }
@@ -827,7 +827,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
827 * vlan on this port. 827 * vlan on this port.
828 */ 828 */
829 err = -ENOENT; 829 err = -ENOENT;
830 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 830 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
831 err &= __br_fdb_delete(p, addr, vid); 831 err &= __br_fdb_delete(p, addr, vid);
832 } 832 }
833 } 833 }
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 8c561c0aa636..a2fd37ec35f7 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -102,7 +102,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
102 } else if (is_multicast_ether_addr(dest)) { 102 } else if (is_multicast_ether_addr(dest)) {
103 mdst = br_mdb_get(br, skb, vid); 103 mdst = br_mdb_get(br, skb, vid);
104 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 104 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
105 br_multicast_querier_exists(br)) { 105 br_multicast_querier_exists(br, eth_hdr(skb))) {
106 if ((mdst && mdst->mglist) || 106 if ((mdst && mdst->mglist) ||
107 br_multicast_is_router(br)) 107 br_multicast_is_router(br))
108 skb2 = skb; 108 skb2 = skb;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 0daae3ec2355..6319c4333c39 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -414,16 +414,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
414 if (!netif_running(br->dev) || br->multicast_disabled) 414 if (!netif_running(br->dev) || br->multicast_disabled)
415 return -EINVAL; 415 return -EINVAL;
416 416
417 if (timer_pending(&br->multicast_querier_timer))
418 return -EBUSY;
419
420 ip.proto = entry->addr.proto; 417 ip.proto = entry->addr.proto;
421 if (ip.proto == htons(ETH_P_IP)) 418 if (ip.proto == htons(ETH_P_IP)) {
419 if (timer_pending(&br->ip4_querier.timer))
420 return -EBUSY;
421
422 ip.u.ip4 = entry->addr.u.ip4; 422 ip.u.ip4 = entry->addr.u.ip4;
423#if IS_ENABLED(CONFIG_IPV6) 423#if IS_ENABLED(CONFIG_IPV6)
424 else 424 } else {
425 if (timer_pending(&br->ip6_querier.timer))
426 return -EBUSY;
427
425 ip.u.ip6 = entry->addr.u.ip6; 428 ip.u.ip6 = entry->addr.u.ip6;
426#endif 429#endif
430 }
427 431
428 spin_lock_bh(&br->multicast_lock); 432 spin_lock_bh(&br->multicast_lock);
429 mdb = mlock_dereference(br->mdb, br); 433 mdb = mlock_dereference(br->mdb, br);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 61c5e819380e..bbcb43582496 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -33,7 +33,8 @@
33 33
34#include "br_private.h" 34#include "br_private.h"
35 35
36static void br_multicast_start_querier(struct net_bridge *br); 36static void br_multicast_start_querier(struct net_bridge *br,
37 struct bridge_mcast_query *query);
37unsigned int br_mdb_rehash_seq; 38unsigned int br_mdb_rehash_seq;
38 39
39static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 40static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -755,20 +756,35 @@ static void br_multicast_local_router_expired(unsigned long data)
755{ 756{
756} 757}
757 758
758static void br_multicast_querier_expired(unsigned long data) 759static void br_multicast_querier_expired(struct net_bridge *br,
760 struct bridge_mcast_query *query)
759{ 761{
760 struct net_bridge *br = (void *)data;
761
762 spin_lock(&br->multicast_lock); 762 spin_lock(&br->multicast_lock);
763 if (!netif_running(br->dev) || br->multicast_disabled) 763 if (!netif_running(br->dev) || br->multicast_disabled)
764 goto out; 764 goto out;
765 765
766 br_multicast_start_querier(br); 766 br_multicast_start_querier(br, query);
767 767
768out: 768out:
769 spin_unlock(&br->multicast_lock); 769 spin_unlock(&br->multicast_lock);
770} 770}
771 771
772static void br_ip4_multicast_querier_expired(unsigned long data)
773{
774 struct net_bridge *br = (void *)data;
775
776 br_multicast_querier_expired(br, &br->ip4_query);
777}
778
779#if IS_ENABLED(CONFIG_IPV6)
780static void br_ip6_multicast_querier_expired(unsigned long data)
781{
782 struct net_bridge *br = (void *)data;
783
784 br_multicast_querier_expired(br, &br->ip6_query);
785}
786#endif
787
772static void __br_multicast_send_query(struct net_bridge *br, 788static void __br_multicast_send_query(struct net_bridge *br,
773 struct net_bridge_port *port, 789 struct net_bridge_port *port,
774 struct br_ip *ip) 790 struct br_ip *ip)
@@ -789,37 +805,45 @@ static void __br_multicast_send_query(struct net_bridge *br,
789} 805}
790 806
791static void br_multicast_send_query(struct net_bridge *br, 807static void br_multicast_send_query(struct net_bridge *br,
792 struct net_bridge_port *port, u32 sent) 808 struct net_bridge_port *port,
809 struct bridge_mcast_query *query)
793{ 810{
794 unsigned long time; 811 unsigned long time;
795 struct br_ip br_group; 812 struct br_ip br_group;
813 struct bridge_mcast_querier *querier = NULL;
796 814
797 if (!netif_running(br->dev) || br->multicast_disabled || 815 if (!netif_running(br->dev) || br->multicast_disabled ||
798 !br->multicast_querier || 816 !br->multicast_querier)
799 timer_pending(&br->multicast_querier_timer))
800 return; 817 return;
801 818
802 memset(&br_group.u, 0, sizeof(br_group.u)); 819 memset(&br_group.u, 0, sizeof(br_group.u));
803 820
804 br_group.proto = htons(ETH_P_IP); 821 if (port ? (query == &port->ip4_query) :
805 __br_multicast_send_query(br, port, &br_group); 822 (query == &br->ip4_query)) {
806 823 querier = &br->ip4_querier;
824 br_group.proto = htons(ETH_P_IP);
807#if IS_ENABLED(CONFIG_IPV6) 825#if IS_ENABLED(CONFIG_IPV6)
808 br_group.proto = htons(ETH_P_IPV6); 826 } else {
809 __br_multicast_send_query(br, port, &br_group); 827 querier = &br->ip6_querier;
828 br_group.proto = htons(ETH_P_IPV6);
810#endif 829#endif
830 }
831
832 if (!querier || timer_pending(&querier->timer))
833 return;
834
835 __br_multicast_send_query(br, port, &br_group);
811 836
812 time = jiffies; 837 time = jiffies;
813 time += sent < br->multicast_startup_query_count ? 838 time += query->startup_sent < br->multicast_startup_query_count ?
814 br->multicast_startup_query_interval : 839 br->multicast_startup_query_interval :
815 br->multicast_query_interval; 840 br->multicast_query_interval;
816 mod_timer(port ? &port->multicast_query_timer : 841 mod_timer(&query->timer, time);
817 &br->multicast_query_timer, time);
818} 842}
819 843
820static void br_multicast_port_query_expired(unsigned long data) 844static void br_multicast_port_query_expired(struct net_bridge_port *port,
845 struct bridge_mcast_query *query)
821{ 846{
822 struct net_bridge_port *port = (void *)data;
823 struct net_bridge *br = port->br; 847 struct net_bridge *br = port->br;
824 848
825 spin_lock(&br->multicast_lock); 849 spin_lock(&br->multicast_lock);
@@ -827,25 +851,43 @@ static void br_multicast_port_query_expired(unsigned long data)
827 port->state == BR_STATE_BLOCKING) 851 port->state == BR_STATE_BLOCKING)
828 goto out; 852 goto out;
829 853
830 if (port->multicast_startup_queries_sent < 854 if (query->startup_sent < br->multicast_startup_query_count)
831 br->multicast_startup_query_count) 855 query->startup_sent++;
832 port->multicast_startup_queries_sent++;
833 856
834 br_multicast_send_query(port->br, port, 857 br_multicast_send_query(port->br, port, query);
835 port->multicast_startup_queries_sent);
836 858
837out: 859out:
838 spin_unlock(&br->multicast_lock); 860 spin_unlock(&br->multicast_lock);
839} 861}
840 862
863static void br_ip4_multicast_port_query_expired(unsigned long data)
864{
865 struct net_bridge_port *port = (void *)data;
866
867 br_multicast_port_query_expired(port, &port->ip4_query);
868}
869
870#if IS_ENABLED(CONFIG_IPV6)
871static void br_ip6_multicast_port_query_expired(unsigned long data)
872{
873 struct net_bridge_port *port = (void *)data;
874
875 br_multicast_port_query_expired(port, &port->ip6_query);
876}
877#endif
878
841void br_multicast_add_port(struct net_bridge_port *port) 879void br_multicast_add_port(struct net_bridge_port *port)
842{ 880{
843 port->multicast_router = 1; 881 port->multicast_router = 1;
844 882
845 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 883 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
846 (unsigned long)port); 884 (unsigned long)port);
847 setup_timer(&port->multicast_query_timer, 885 setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired,
848 br_multicast_port_query_expired, (unsigned long)port); 886 (unsigned long)port);
887#if IS_ENABLED(CONFIG_IPV6)
888 setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired,
889 (unsigned long)port);
890#endif
849} 891}
850 892
851void br_multicast_del_port(struct net_bridge_port *port) 893void br_multicast_del_port(struct net_bridge_port *port)
@@ -853,13 +895,13 @@ void br_multicast_del_port(struct net_bridge_port *port)
853 del_timer_sync(&port->multicast_router_timer); 895 del_timer_sync(&port->multicast_router_timer);
854} 896}
855 897
856static void __br_multicast_enable_port(struct net_bridge_port *port) 898static void br_multicast_enable(struct bridge_mcast_query *query)
857{ 899{
858 port->multicast_startup_queries_sent = 0; 900 query->startup_sent = 0;
859 901
860 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || 902 if (try_to_del_timer_sync(&query->timer) >= 0 ||
861 del_timer(&port->multicast_query_timer)) 903 del_timer(&query->timer))
862 mod_timer(&port->multicast_query_timer, jiffies); 904 mod_timer(&query->timer, jiffies);
863} 905}
864 906
865void br_multicast_enable_port(struct net_bridge_port *port) 907void br_multicast_enable_port(struct net_bridge_port *port)
@@ -870,7 +912,10 @@ void br_multicast_enable_port(struct net_bridge_port *port)
870 if (br->multicast_disabled || !netif_running(br->dev)) 912 if (br->multicast_disabled || !netif_running(br->dev))
871 goto out; 913 goto out;
872 914
873 __br_multicast_enable_port(port); 915 br_multicast_enable(&port->ip4_query);
916#if IS_ENABLED(CONFIG_IPV6)
917 br_multicast_enable(&port->ip6_query);
918#endif
874 919
875out: 920out:
876 spin_unlock(&br->multicast_lock); 921 spin_unlock(&br->multicast_lock);
@@ -889,7 +934,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
889 if (!hlist_unhashed(&port->rlist)) 934 if (!hlist_unhashed(&port->rlist))
890 hlist_del_init_rcu(&port->rlist); 935 hlist_del_init_rcu(&port->rlist);
891 del_timer(&port->multicast_router_timer); 936 del_timer(&port->multicast_router_timer);
892 del_timer(&port->multicast_query_timer); 937 del_timer(&port->ip4_query.timer);
938#if IS_ENABLED(CONFIG_IPV6)
939 del_timer(&port->ip6_query.timer);
940#endif
893 spin_unlock(&br->multicast_lock); 941 spin_unlock(&br->multicast_lock);
894} 942}
895 943
@@ -1014,14 +1062,15 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1014} 1062}
1015#endif 1063#endif
1016 1064
1017static void br_multicast_update_querier_timer(struct net_bridge *br, 1065static void
1018 unsigned long max_delay) 1066br_multicast_update_querier_timer(struct net_bridge *br,
1067 struct bridge_mcast_querier *querier,
1068 unsigned long max_delay)
1019{ 1069{
1020 if (!timer_pending(&br->multicast_querier_timer)) 1070 if (!timer_pending(&querier->timer))
1021 br->multicast_querier_delay_time = jiffies + max_delay; 1071 querier->delay_time = jiffies + max_delay;
1022 1072
1023 mod_timer(&br->multicast_querier_timer, 1073 mod_timer(&querier->timer, jiffies + br->multicast_querier_interval);
1024 jiffies + br->multicast_querier_interval);
1025} 1074}
1026 1075
1027/* 1076/*
@@ -1074,12 +1123,13 @@ timer:
1074 1123
1075static void br_multicast_query_received(struct net_bridge *br, 1124static void br_multicast_query_received(struct net_bridge *br,
1076 struct net_bridge_port *port, 1125 struct net_bridge_port *port,
1126 struct bridge_mcast_querier *querier,
1077 int saddr, 1127 int saddr,
1078 unsigned long max_delay) 1128 unsigned long max_delay)
1079{ 1129{
1080 if (saddr) 1130 if (saddr)
1081 br_multicast_update_querier_timer(br, max_delay); 1131 br_multicast_update_querier_timer(br, querier, max_delay);
1082 else if (timer_pending(&br->multicast_querier_timer)) 1132 else if (timer_pending(&querier->timer))
1083 return; 1133 return;
1084 1134
1085 br_multicast_mark_router(br, port); 1135 br_multicast_mark_router(br, port);
@@ -1129,7 +1179,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1129 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1179 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1130 } 1180 }
1131 1181
1132 br_multicast_query_received(br, port, !!iph->saddr, max_delay); 1182 br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
1183 max_delay);
1133 1184
1134 if (!group) 1185 if (!group)
1135 goto out; 1186 goto out;
@@ -1195,7 +1246,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1195 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1246 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1196 if (max_delay) 1247 if (max_delay)
1197 group = &mld->mld_mca; 1248 group = &mld->mld_mca;
1198 } else if (skb->len >= sizeof(*mld2q)) { 1249 } else {
1199 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1250 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1200 err = -EINVAL; 1251 err = -EINVAL;
1201 goto out; 1252 goto out;
@@ -1203,11 +1254,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1203 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1254 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1204 if (!mld2q->mld2q_nsrcs) 1255 if (!mld2q->mld2q_nsrcs)
1205 group = &mld2q->mld2q_mca; 1256 group = &mld2q->mld2q_mca;
1206 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; 1257
1258 max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
1207 } 1259 }
1208 1260
1209 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), 1261 br_multicast_query_received(br, port, &br->ip6_querier,
1210 max_delay); 1262 !ipv6_addr_any(&ip6h->saddr), max_delay);
1211 1263
1212 if (!group) 1264 if (!group)
1213 goto out; 1265 goto out;
@@ -1244,7 +1296,9 @@ out:
1244 1296
1245static void br_multicast_leave_group(struct net_bridge *br, 1297static void br_multicast_leave_group(struct net_bridge *br,
1246 struct net_bridge_port *port, 1298 struct net_bridge_port *port,
1247 struct br_ip *group) 1299 struct br_ip *group,
1300 struct bridge_mcast_querier *querier,
1301 struct bridge_mcast_query *query)
1248{ 1302{
1249 struct net_bridge_mdb_htable *mdb; 1303 struct net_bridge_mdb_htable *mdb;
1250 struct net_bridge_mdb_entry *mp; 1304 struct net_bridge_mdb_entry *mp;
@@ -1255,7 +1309,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1255 spin_lock(&br->multicast_lock); 1309 spin_lock(&br->multicast_lock);
1256 if (!netif_running(br->dev) || 1310 if (!netif_running(br->dev) ||
1257 (port && port->state == BR_STATE_DISABLED) || 1311 (port && port->state == BR_STATE_DISABLED) ||
1258 timer_pending(&br->multicast_querier_timer)) 1312 timer_pending(&querier->timer))
1259 goto out; 1313 goto out;
1260 1314
1261 mdb = mlock_dereference(br->mdb, br); 1315 mdb = mlock_dereference(br->mdb, br);
@@ -1263,14 +1317,13 @@ static void br_multicast_leave_group(struct net_bridge *br,
1263 if (!mp) 1317 if (!mp)
1264 goto out; 1318 goto out;
1265 1319
1266 if (br->multicast_querier && 1320 if (br->multicast_querier) {
1267 !timer_pending(&br->multicast_querier_timer)) {
1268 __br_multicast_send_query(br, port, &mp->addr); 1321 __br_multicast_send_query(br, port, &mp->addr);
1269 1322
1270 time = jiffies + br->multicast_last_member_count * 1323 time = jiffies + br->multicast_last_member_count *
1271 br->multicast_last_member_interval; 1324 br->multicast_last_member_interval;
1272 mod_timer(port ? &port->multicast_query_timer : 1325
1273 &br->multicast_query_timer, time); 1326 mod_timer(&query->timer, time);
1274 1327
1275 for (p = mlock_dereference(mp->ports, br); 1328 for (p = mlock_dereference(mp->ports, br);
1276 p != NULL; 1329 p != NULL;
@@ -1323,7 +1376,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1323 mod_timer(&mp->timer, time); 1376 mod_timer(&mp->timer, time);
1324 } 1377 }
1325 } 1378 }
1326
1327out: 1379out:
1328 spin_unlock(&br->multicast_lock); 1380 spin_unlock(&br->multicast_lock);
1329} 1381}
@@ -1334,6 +1386,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1334 __u16 vid) 1386 __u16 vid)
1335{ 1387{
1336 struct br_ip br_group; 1388 struct br_ip br_group;
1389 struct bridge_mcast_query *query = port ? &port->ip4_query :
1390 &br->ip4_query;
1337 1391
1338 if (ipv4_is_local_multicast(group)) 1392 if (ipv4_is_local_multicast(group))
1339 return; 1393 return;
@@ -1342,7 +1396,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1342 br_group.proto = htons(ETH_P_IP); 1396 br_group.proto = htons(ETH_P_IP);
1343 br_group.vid = vid; 1397 br_group.vid = vid;
1344 1398
1345 br_multicast_leave_group(br, port, &br_group); 1399 br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query);
1346} 1400}
1347 1401
1348#if IS_ENABLED(CONFIG_IPV6) 1402#if IS_ENABLED(CONFIG_IPV6)
@@ -1352,6 +1406,9 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1352 __u16 vid) 1406 __u16 vid)
1353{ 1407{
1354 struct br_ip br_group; 1408 struct br_ip br_group;
1409 struct bridge_mcast_query *query = port ? &port->ip6_query :
1410 &br->ip6_query;
1411
1355 1412
1356 if (!ipv6_is_transient_multicast(group)) 1413 if (!ipv6_is_transient_multicast(group))
1357 return; 1414 return;
@@ -1360,7 +1417,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1360 br_group.proto = htons(ETH_P_IPV6); 1417 br_group.proto = htons(ETH_P_IPV6);
1361 br_group.vid = vid; 1418 br_group.vid = vid;
1362 1419
1363 br_multicast_leave_group(br, port, &br_group); 1420 br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query);
1364} 1421}
1365#endif 1422#endif
1366 1423
@@ -1622,19 +1679,32 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1622 return 0; 1679 return 0;
1623} 1680}
1624 1681
1625static void br_multicast_query_expired(unsigned long data) 1682static void br_multicast_query_expired(struct net_bridge *br,
1683 struct bridge_mcast_query *query)
1684{
1685 spin_lock(&br->multicast_lock);
1686 if (query->startup_sent < br->multicast_startup_query_count)
1687 query->startup_sent++;
1688
1689 br_multicast_send_query(br, NULL, query);
1690 spin_unlock(&br->multicast_lock);
1691}
1692
1693static void br_ip4_multicast_query_expired(unsigned long data)
1626{ 1694{
1627 struct net_bridge *br = (void *)data; 1695 struct net_bridge *br = (void *)data;
1628 1696
1629 spin_lock(&br->multicast_lock); 1697 br_multicast_query_expired(br, &br->ip4_query);
1630 if (br->multicast_startup_queries_sent < 1698}
1631 br->multicast_startup_query_count)
1632 br->multicast_startup_queries_sent++;
1633 1699
1634 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); 1700#if IS_ENABLED(CONFIG_IPV6)
1701static void br_ip6_multicast_query_expired(unsigned long data)
1702{
1703 struct net_bridge *br = (void *)data;
1635 1704
1636 spin_unlock(&br->multicast_lock); 1705 br_multicast_query_expired(br, &br->ip6_query);
1637} 1706}
1707#endif
1638 1708
1639void br_multicast_init(struct net_bridge *br) 1709void br_multicast_init(struct net_bridge *br)
1640{ 1710{
@@ -1654,25 +1724,43 @@ void br_multicast_init(struct net_bridge *br)
1654 br->multicast_querier_interval = 255 * HZ; 1724 br->multicast_querier_interval = 255 * HZ;
1655 br->multicast_membership_interval = 260 * HZ; 1725 br->multicast_membership_interval = 260 * HZ;
1656 1726
1657 br->multicast_querier_delay_time = 0; 1727 br->ip4_querier.delay_time = 0;
1728#if IS_ENABLED(CONFIG_IPV6)
1729 br->ip6_querier.delay_time = 0;
1730#endif
1658 1731
1659 spin_lock_init(&br->multicast_lock); 1732 spin_lock_init(&br->multicast_lock);
1660 setup_timer(&br->multicast_router_timer, 1733 setup_timer(&br->multicast_router_timer,
1661 br_multicast_local_router_expired, 0); 1734 br_multicast_local_router_expired, 0);
1662 setup_timer(&br->multicast_querier_timer, 1735 setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired,
1663 br_multicast_querier_expired, (unsigned long)br); 1736 (unsigned long)br);
1664 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1737 setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired,
1665 (unsigned long)br); 1738 (unsigned long)br);
1739#if IS_ENABLED(CONFIG_IPV6)
1740 setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired,
1741 (unsigned long)br);
1742 setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired,
1743 (unsigned long)br);
1744#endif
1666} 1745}
1667 1746
1668void br_multicast_open(struct net_bridge *br) 1747static void __br_multicast_open(struct net_bridge *br,
1748 struct bridge_mcast_query *query)
1669{ 1749{
1670 br->multicast_startup_queries_sent = 0; 1750 query->startup_sent = 0;
1671 1751
1672 if (br->multicast_disabled) 1752 if (br->multicast_disabled)
1673 return; 1753 return;
1674 1754
1675 mod_timer(&br->multicast_query_timer, jiffies); 1755 mod_timer(&query->timer, jiffies);
1756}
1757
1758void br_multicast_open(struct net_bridge *br)
1759{
1760 __br_multicast_open(br, &br->ip4_query);
1761#if IS_ENABLED(CONFIG_IPV6)
1762 __br_multicast_open(br, &br->ip6_query);
1763#endif
1676} 1764}
1677 1765
1678void br_multicast_stop(struct net_bridge *br) 1766void br_multicast_stop(struct net_bridge *br)
@@ -1684,8 +1772,12 @@ void br_multicast_stop(struct net_bridge *br)
1684 int i; 1772 int i;
1685 1773
1686 del_timer_sync(&br->multicast_router_timer); 1774 del_timer_sync(&br->multicast_router_timer);
1687 del_timer_sync(&br->multicast_querier_timer); 1775 del_timer_sync(&br->ip4_querier.timer);
1688 del_timer_sync(&br->multicast_query_timer); 1776 del_timer_sync(&br->ip4_query.timer);
1777#if IS_ENABLED(CONFIG_IPV6)
1778 del_timer_sync(&br->ip6_querier.timer);
1779 del_timer_sync(&br->ip6_query.timer);
1780#endif
1689 1781
1690 spin_lock_bh(&br->multicast_lock); 1782 spin_lock_bh(&br->multicast_lock);
1691 mdb = mlock_dereference(br->mdb, br); 1783 mdb = mlock_dereference(br->mdb, br);
@@ -1788,18 +1880,24 @@ unlock:
1788 return err; 1880 return err;
1789} 1881}
1790 1882
1791static void br_multicast_start_querier(struct net_bridge *br) 1883static void br_multicast_start_querier(struct net_bridge *br,
1884 struct bridge_mcast_query *query)
1792{ 1885{
1793 struct net_bridge_port *port; 1886 struct net_bridge_port *port;
1794 1887
1795 br_multicast_open(br); 1888 __br_multicast_open(br, query);
1796 1889
1797 list_for_each_entry(port, &br->port_list, list) { 1890 list_for_each_entry(port, &br->port_list, list) {
1798 if (port->state == BR_STATE_DISABLED || 1891 if (port->state == BR_STATE_DISABLED ||
1799 port->state == BR_STATE_BLOCKING) 1892 port->state == BR_STATE_BLOCKING)
1800 continue; 1893 continue;
1801 1894
1802 __br_multicast_enable_port(port); 1895 if (query == &br->ip4_query)
1896 br_multicast_enable(&port->ip4_query);
1897#if IS_ENABLED(CONFIG_IPV6)
1898 else
1899 br_multicast_enable(&port->ip6_query);
1900#endif
1803 } 1901 }
1804} 1902}
1805 1903
@@ -1834,7 +1932,10 @@ rollback:
1834 goto rollback; 1932 goto rollback;
1835 } 1933 }
1836 1934
1837 br_multicast_start_querier(br); 1935 br_multicast_start_querier(br, &br->ip4_query);
1936#if IS_ENABLED(CONFIG_IPV6)
1937 br_multicast_start_querier(br, &br->ip6_query);
1938#endif
1838 1939
1839unlock: 1940unlock:
1840 spin_unlock_bh(&br->multicast_lock); 1941 spin_unlock_bh(&br->multicast_lock);
@@ -1857,10 +1958,18 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1857 goto unlock; 1958 goto unlock;
1858 1959
1859 max_delay = br->multicast_query_response_interval; 1960 max_delay = br->multicast_query_response_interval;
1860 if (!timer_pending(&br->multicast_querier_timer))
1861 br->multicast_querier_delay_time = jiffies + max_delay;
1862 1961
1863 br_multicast_start_querier(br); 1962 if (!timer_pending(&br->ip4_querier.timer))
1963 br->ip4_querier.delay_time = jiffies + max_delay;
1964
1965 br_multicast_start_querier(br, &br->ip4_query);
1966
1967#if IS_ENABLED(CONFIG_IPV6)
1968 if (!timer_pending(&br->ip6_querier.timer))
1969 br->ip6_querier.delay_time = jiffies + max_delay;
1970
1971 br_multicast_start_querier(br, &br->ip6_query);
1972#endif
1864 1973
1865unlock: 1974unlock:
1866 spin_unlock_bh(&br->multicast_lock); 1975 spin_unlock_bh(&br->multicast_lock);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 1fc30abd3a52..b9259efa636e 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -132,7 +132,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
132 else 132 else
133 pv = br_get_vlan_info(br); 133 pv = br_get_vlan_info(br);
134 134
135 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) 135 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
136 goto done; 136 goto done;
137 137
138 af = nla_nest_start(skb, IFLA_AF_SPEC); 138 af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -140,7 +140,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
140 goto nla_put_failure; 140 goto nla_put_failure;
141 141
142 pvid = br_get_pvid(pv); 142 pvid = br_get_pvid(pv);
143 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 143 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
144 vinfo.vid = vid; 144 vinfo.vid = vid;
145 vinfo.flags = 0; 145 vinfo.flags = 0;
146 if (vid == pvid) 146 if (vid == pvid)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2f7da41851bf..263ba9034468 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -66,6 +66,20 @@ struct br_ip
66 __u16 vid; 66 __u16 vid;
67}; 67};
68 68
69#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
70/* our own querier */
71struct bridge_mcast_query {
72 struct timer_list timer;
73 u32 startup_sent;
74};
75
76/* other querier */
77struct bridge_mcast_querier {
78 struct timer_list timer;
79 unsigned long delay_time;
80};
81#endif
82
69struct net_port_vlans { 83struct net_port_vlans {
70 u16 port_idx; 84 u16 port_idx;
71 u16 pvid; 85 u16 pvid;
@@ -162,10 +176,12 @@ struct net_bridge_port
162#define BR_FLOOD 0x00000040 176#define BR_FLOOD 0x00000040
163 177
164#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 178#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
165 u32 multicast_startup_queries_sent; 179 struct bridge_mcast_query ip4_query;
180#if IS_ENABLED(CONFIG_IPV6)
181 struct bridge_mcast_query ip6_query;
182#endif /* IS_ENABLED(CONFIG_IPV6) */
166 unsigned char multicast_router; 183 unsigned char multicast_router;
167 struct timer_list multicast_router_timer; 184 struct timer_list multicast_router_timer;
168 struct timer_list multicast_query_timer;
169 struct hlist_head mglist; 185 struct hlist_head mglist;
170 struct hlist_node rlist; 186 struct hlist_node rlist;
171#endif 187#endif
@@ -258,7 +274,6 @@ struct net_bridge
258 u32 hash_max; 274 u32 hash_max;
259 275
260 u32 multicast_last_member_count; 276 u32 multicast_last_member_count;
261 u32 multicast_startup_queries_sent;
262 u32 multicast_startup_query_count; 277 u32 multicast_startup_query_count;
263 278
264 unsigned long multicast_last_member_interval; 279 unsigned long multicast_last_member_interval;
@@ -267,15 +282,18 @@ struct net_bridge
267 unsigned long multicast_query_interval; 282 unsigned long multicast_query_interval;
268 unsigned long multicast_query_response_interval; 283 unsigned long multicast_query_response_interval;
269 unsigned long multicast_startup_query_interval; 284 unsigned long multicast_startup_query_interval;
270 unsigned long multicast_querier_delay_time;
271 285
272 spinlock_t multicast_lock; 286 spinlock_t multicast_lock;
273 struct net_bridge_mdb_htable __rcu *mdb; 287 struct net_bridge_mdb_htable __rcu *mdb;
274 struct hlist_head router_list; 288 struct hlist_head router_list;
275 289
276 struct timer_list multicast_router_timer; 290 struct timer_list multicast_router_timer;
277 struct timer_list multicast_querier_timer; 291 struct bridge_mcast_querier ip4_querier;
278 struct timer_list multicast_query_timer; 292 struct bridge_mcast_query ip4_query;
293#if IS_ENABLED(CONFIG_IPV6)
294 struct bridge_mcast_querier ip6_querier;
295 struct bridge_mcast_query ip6_query;
296#endif /* IS_ENABLED(CONFIG_IPV6) */
279#endif 297#endif
280 298
281 struct timer_list hello_timer; 299 struct timer_list hello_timer;
@@ -503,11 +521,27 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
503 timer_pending(&br->multicast_router_timer)); 521 timer_pending(&br->multicast_router_timer));
504} 522}
505 523
506static inline bool br_multicast_querier_exists(struct net_bridge *br) 524static inline bool
525__br_multicast_querier_exists(struct net_bridge *br,
526 struct bridge_mcast_querier *querier)
527{
528 return time_is_before_jiffies(querier->delay_time) &&
529 (br->multicast_querier || timer_pending(&querier->timer));
530}
531
532static inline bool br_multicast_querier_exists(struct net_bridge *br,
533 struct ethhdr *eth)
507{ 534{
508 return time_is_before_jiffies(br->multicast_querier_delay_time) && 535 switch (eth->h_proto) {
509 (br->multicast_querier || 536 case (htons(ETH_P_IP)):
510 timer_pending(&br->multicast_querier_timer)); 537 return __br_multicast_querier_exists(br, &br->ip4_querier);
538#if IS_ENABLED(CONFIG_IPV6)
539 case (htons(ETH_P_IPV6)):
540 return __br_multicast_querier_exists(br, &br->ip6_querier);
541#endif
542 default:
543 return false;
544 }
511} 545}
512#else 546#else
513static inline int br_multicast_rcv(struct net_bridge *br, 547static inline int br_multicast_rcv(struct net_bridge *br,
@@ -565,7 +599,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
565{ 599{
566 return 0; 600 return 0;
567} 601}
568static inline bool br_multicast_querier_exists(struct net_bridge *br) 602static inline bool br_multicast_querier_exists(struct net_bridge *br,
603 struct ethhdr *eth)
569{ 604{
570 return false; 605 return false;
571} 606}
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 394bb96b6087..3b9637fb7939 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Sysfs attributes of bridge ports 2 * Sysfs attributes of bridge
3 * Linux ethernet bridge 3 * Linux ethernet bridge
4 * 4 *
5 * Authors: 5 * Authors:
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index bd58b45f5f90..9a9ffe7e4019 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
108 108
109 clear_bit(vid, v->vlan_bitmap); 109 clear_bit(vid, v->vlan_bitmap);
110 v->num_vlans--; 110 v->num_vlans--;
111 if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 111 if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
112 if (v->port_idx) 112 if (v->port_idx)
113 rcu_assign_pointer(v->parent.port->vlan_info, NULL); 113 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
114 else 114 else
@@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v)
122{ 122{
123 smp_wmb(); 123 smp_wmb();
124 v->pvid = 0; 124 v->pvid = 0;
125 bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN); 125 bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
126 if (v->port_idx) 126 if (v->port_idx)
127 rcu_assign_pointer(v->parent.port->vlan_info, NULL); 127 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
128 else 128 else
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 00ee068efc1c..d12e3a9a5356 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -65,6 +65,7 @@ ipv6:
65 nhoff += sizeof(struct ipv6hdr); 65 nhoff += sizeof(struct ipv6hdr);
66 break; 66 break;
67 } 67 }
68 case __constant_htons(ETH_P_8021AD):
68 case __constant_htons(ETH_P_8021Q): { 69 case __constant_htons(ETH_P_8021Q): {
69 const struct vlan_hdr *vlan; 70 const struct vlan_hdr *vlan;
70 struct vlan_hdr _vlan; 71 struct vlan_hdr _vlan;
@@ -345,14 +346,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
345 if (new_index < 0) 346 if (new_index < 0)
346 new_index = skb_tx_hash(dev, skb); 347 new_index = skb_tx_hash(dev, skb);
347 348
348 if (queue_index != new_index && sk) { 349 if (queue_index != new_index && sk &&
349 struct dst_entry *dst = 350 rcu_access_pointer(sk->sk_dst_cache))
350 rcu_dereference_check(sk->sk_dst_cache, 1); 351 sk_tx_queue_set(sk, queue_index);
351
352 if (dst && skb_dst(skb) == dst)
353 sk_tx_queue_set(sk, queue_index);
354
355 }
356 352
357 queue_index = new_index; 353 queue_index = new_index;
358 } 354 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9232c68941ab..60533db8b72d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1441 atomic_set(&p->refcnt, 1); 1441 atomic_set(&p->refcnt, 1);
1442 p->reachable_time = 1442 p->reachable_time =
1443 neigh_rand_reach_time(p->base_reachable_time); 1443 neigh_rand_reach_time(p->base_reachable_time);
1444 dev_hold(dev);
1445 p->dev = dev;
1446 write_pnet(&p->net, hold_net(net));
1447 p->sysctl_table = NULL;
1444 1448
1445 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1449 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1450 release_net(net);
1451 dev_put(dev);
1446 kfree(p); 1452 kfree(p);
1447 return NULL; 1453 return NULL;
1448 } 1454 }
1449 1455
1450 dev_hold(dev);
1451 p->dev = dev;
1452 write_pnet(&p->net, hold_net(net));
1453 p->sysctl_table = NULL;
1454 write_lock_bh(&tbl->lock); 1456 write_lock_bh(&tbl->lock);
1455 p->next = tbl->parms.next; 1457 p->next = tbl->parms.next;
1456 tbl->parms.next = p; 1458 tbl->parms.next = p;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 981fed397d1d..707c3134ddf2 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -60,12 +60,19 @@ static ssize_t format_##field(const struct net_device *net, char *buf) \
60{ \ 60{ \
61 return sprintf(buf, format_string, net->field); \ 61 return sprintf(buf, format_string, net->field); \
62} \ 62} \
63static ssize_t show_##field(struct device *dev, \ 63static ssize_t field##_show(struct device *dev, \
64 struct device_attribute *attr, char *buf) \ 64 struct device_attribute *attr, char *buf) \
65{ \ 65{ \
66 return netdev_show(dev, attr, buf, format_##field); \ 66 return netdev_show(dev, attr, buf, format_##field); \
67} 67} \
68
69#define NETDEVICE_SHOW_RO(field, format_string) \
70NETDEVICE_SHOW(field, format_string); \
71static DEVICE_ATTR_RO(field)
68 72
73#define NETDEVICE_SHOW_RW(field, format_string) \
74NETDEVICE_SHOW(field, format_string); \
75static DEVICE_ATTR_RW(field)
69 76
70/* use same locking and permission rules as SIF* ioctl's */ 77/* use same locking and permission rules as SIF* ioctl's */
71static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, 78static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
@@ -96,16 +103,16 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
96 return ret; 103 return ret;
97} 104}
98 105
99NETDEVICE_SHOW(dev_id, fmt_hex); 106NETDEVICE_SHOW_RO(dev_id, fmt_hex);
100NETDEVICE_SHOW(addr_assign_type, fmt_dec); 107NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
101NETDEVICE_SHOW(addr_len, fmt_dec); 108NETDEVICE_SHOW_RO(addr_len, fmt_dec);
102NETDEVICE_SHOW(iflink, fmt_dec); 109NETDEVICE_SHOW_RO(iflink, fmt_dec);
103NETDEVICE_SHOW(ifindex, fmt_dec); 110NETDEVICE_SHOW_RO(ifindex, fmt_dec);
104NETDEVICE_SHOW(type, fmt_dec); 111NETDEVICE_SHOW_RO(type, fmt_dec);
105NETDEVICE_SHOW(link_mode, fmt_dec); 112NETDEVICE_SHOW_RO(link_mode, fmt_dec);
106 113
107/* use same locking rules as GIFHWADDR ioctl's */ 114/* use same locking rules as GIFHWADDR ioctl's */
108static ssize_t show_address(struct device *dev, struct device_attribute *attr, 115static ssize_t address_show(struct device *dev, struct device_attribute *attr,
109 char *buf) 116 char *buf)
110{ 117{
111 struct net_device *net = to_net_dev(dev); 118 struct net_device *net = to_net_dev(dev);
@@ -117,15 +124,17 @@ static ssize_t show_address(struct device *dev, struct device_attribute *attr,
117 read_unlock(&dev_base_lock); 124 read_unlock(&dev_base_lock);
118 return ret; 125 return ret;
119} 126}
127static DEVICE_ATTR_RO(address);
120 128
121static ssize_t show_broadcast(struct device *dev, 129static ssize_t broadcast_show(struct device *dev,
122 struct device_attribute *attr, char *buf) 130 struct device_attribute *attr, char *buf)
123{ 131{
124 struct net_device *net = to_net_dev(dev); 132 struct net_device *net = to_net_dev(dev);
125 if (dev_isalive(net)) 133 if (dev_isalive(net))
126 return sysfs_format_mac(buf, net->broadcast, net->addr_len); 134 return sysfs_format_mac(buf, net->broadcast, net->addr_len);
127 return -EINVAL; 135 return -EINVAL;
128} 136}
137static DEVICE_ATTR_RO(broadcast);
129 138
130static int change_carrier(struct net_device *net, unsigned long new_carrier) 139static int change_carrier(struct net_device *net, unsigned long new_carrier)
131{ 140{
@@ -134,13 +143,13 @@ static int change_carrier(struct net_device *net, unsigned long new_carrier)
134 return dev_change_carrier(net, (bool) new_carrier); 143 return dev_change_carrier(net, (bool) new_carrier);
135} 144}
136 145
137static ssize_t store_carrier(struct device *dev, struct device_attribute *attr, 146static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
138 const char *buf, size_t len) 147 const char *buf, size_t len)
139{ 148{
140 return netdev_store(dev, attr, buf, len, change_carrier); 149 return netdev_store(dev, attr, buf, len, change_carrier);
141} 150}
142 151
143static ssize_t show_carrier(struct device *dev, 152static ssize_t carrier_show(struct device *dev,
144 struct device_attribute *attr, char *buf) 153 struct device_attribute *attr, char *buf)
145{ 154{
146 struct net_device *netdev = to_net_dev(dev); 155 struct net_device *netdev = to_net_dev(dev);
@@ -149,8 +158,9 @@ static ssize_t show_carrier(struct device *dev,
149 } 158 }
150 return -EINVAL; 159 return -EINVAL;
151} 160}
161static DEVICE_ATTR_RW(carrier);
152 162
153static ssize_t show_speed(struct device *dev, 163static ssize_t speed_show(struct device *dev,
154 struct device_attribute *attr, char *buf) 164 struct device_attribute *attr, char *buf)
155{ 165{
156 struct net_device *netdev = to_net_dev(dev); 166 struct net_device *netdev = to_net_dev(dev);
@@ -167,8 +177,9 @@ static ssize_t show_speed(struct device *dev,
167 rtnl_unlock(); 177 rtnl_unlock();
168 return ret; 178 return ret;
169} 179}
180static DEVICE_ATTR_RO(speed);
170 181
171static ssize_t show_duplex(struct device *dev, 182static ssize_t duplex_show(struct device *dev,
172 struct device_attribute *attr, char *buf) 183 struct device_attribute *attr, char *buf)
173{ 184{
174 struct net_device *netdev = to_net_dev(dev); 185 struct net_device *netdev = to_net_dev(dev);
@@ -198,8 +209,9 @@ static ssize_t show_duplex(struct device *dev,
198 rtnl_unlock(); 209 rtnl_unlock();
199 return ret; 210 return ret;
200} 211}
212static DEVICE_ATTR_RO(duplex);
201 213
202static ssize_t show_dormant(struct device *dev, 214static ssize_t dormant_show(struct device *dev,
203 struct device_attribute *attr, char *buf) 215 struct device_attribute *attr, char *buf)
204{ 216{
205 struct net_device *netdev = to_net_dev(dev); 217 struct net_device *netdev = to_net_dev(dev);
@@ -209,6 +221,7 @@ static ssize_t show_dormant(struct device *dev,
209 221
210 return -EINVAL; 222 return -EINVAL;
211} 223}
224static DEVICE_ATTR_RO(dormant);
212 225
213static const char *const operstates[] = { 226static const char *const operstates[] = {
214 "unknown", 227 "unknown",
@@ -220,7 +233,7 @@ static const char *const operstates[] = {
220 "up" 233 "up"
221}; 234};
222 235
223static ssize_t show_operstate(struct device *dev, 236static ssize_t operstate_show(struct device *dev,
224 struct device_attribute *attr, char *buf) 237 struct device_attribute *attr, char *buf)
225{ 238{
226 const struct net_device *netdev = to_net_dev(dev); 239 const struct net_device *netdev = to_net_dev(dev);
@@ -237,35 +250,33 @@ static ssize_t show_operstate(struct device *dev,
237 250
238 return sprintf(buf, "%s\n", operstates[operstate]); 251 return sprintf(buf, "%s\n", operstates[operstate]);
239} 252}
253static DEVICE_ATTR_RO(operstate);
240 254
241/* read-write attributes */ 255/* read-write attributes */
242NETDEVICE_SHOW(mtu, fmt_dec);
243 256
244static int change_mtu(struct net_device *net, unsigned long new_mtu) 257static int change_mtu(struct net_device *net, unsigned long new_mtu)
245{ 258{
246 return dev_set_mtu(net, (int) new_mtu); 259 return dev_set_mtu(net, (int) new_mtu);
247} 260}
248 261
249static ssize_t store_mtu(struct device *dev, struct device_attribute *attr, 262static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
250 const char *buf, size_t len) 263 const char *buf, size_t len)
251{ 264{
252 return netdev_store(dev, attr, buf, len, change_mtu); 265 return netdev_store(dev, attr, buf, len, change_mtu);
253} 266}
254 267NETDEVICE_SHOW_RW(mtu, fmt_dec);
255NETDEVICE_SHOW(flags, fmt_hex);
256 268
257static int change_flags(struct net_device *net, unsigned long new_flags) 269static int change_flags(struct net_device *net, unsigned long new_flags)
258{ 270{
259 return dev_change_flags(net, (unsigned int) new_flags); 271 return dev_change_flags(net, (unsigned int) new_flags);
260} 272}
261 273
262static ssize_t store_flags(struct device *dev, struct device_attribute *attr, 274static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
263 const char *buf, size_t len) 275 const char *buf, size_t len)
264{ 276{
265 return netdev_store(dev, attr, buf, len, change_flags); 277 return netdev_store(dev, attr, buf, len, change_flags);
266} 278}
267 279NETDEVICE_SHOW_RW(flags, fmt_hex);
268NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
269 280
270static int change_tx_queue_len(struct net_device *net, unsigned long new_len) 281static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
271{ 282{
@@ -273,7 +284,7 @@ static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
273 return 0; 284 return 0;
274} 285}
275 286
276static ssize_t store_tx_queue_len(struct device *dev, 287static ssize_t tx_queue_len_store(struct device *dev,
277 struct device_attribute *attr, 288 struct device_attribute *attr,
278 const char *buf, size_t len) 289 const char *buf, size_t len)
279{ 290{
@@ -282,8 +293,9 @@ static ssize_t store_tx_queue_len(struct device *dev,
282 293
283 return netdev_store(dev, attr, buf, len, change_tx_queue_len); 294 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
284} 295}
296NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
285 297
286static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr, 298static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
287 const char *buf, size_t len) 299 const char *buf, size_t len)
288{ 300{
289 struct net_device *netdev = to_net_dev(dev); 301 struct net_device *netdev = to_net_dev(dev);
@@ -306,7 +318,7 @@ static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
306 return ret < 0 ? ret : len; 318 return ret < 0 ? ret : len;
307} 319}
308 320
309static ssize_t show_ifalias(struct device *dev, 321static ssize_t ifalias_show(struct device *dev,
310 struct device_attribute *attr, char *buf) 322 struct device_attribute *attr, char *buf)
311{ 323{
312 const struct net_device *netdev = to_net_dev(dev); 324 const struct net_device *netdev = to_net_dev(dev);
@@ -319,8 +331,7 @@ static ssize_t show_ifalias(struct device *dev,
319 rtnl_unlock(); 331 rtnl_unlock();
320 return ret; 332 return ret;
321} 333}
322 334static DEVICE_ATTR_RW(ifalias);
323NETDEVICE_SHOW(group, fmt_dec);
324 335
325static int change_group(struct net_device *net, unsigned long new_group) 336static int change_group(struct net_device *net, unsigned long new_group)
326{ 337{
@@ -328,35 +339,37 @@ static int change_group(struct net_device *net, unsigned long new_group)
328 return 0; 339 return 0;
329} 340}
330 341
331static ssize_t store_group(struct device *dev, struct device_attribute *attr, 342static ssize_t group_store(struct device *dev, struct device_attribute *attr,
332 const char *buf, size_t len) 343 const char *buf, size_t len)
333{ 344{
334 return netdev_store(dev, attr, buf, len, change_group); 345 return netdev_store(dev, attr, buf, len, change_group);
335} 346}
336 347NETDEVICE_SHOW(group, fmt_dec);
337static struct device_attribute net_class_attributes[] = { 348static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
338 __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL), 349
339 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), 350static struct attribute *net_class_attrs[] = {
340 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL), 351 &dev_attr_netdev_group.attr,
341 __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias), 352 &dev_attr_type.attr,
342 __ATTR(iflink, S_IRUGO, show_iflink, NULL), 353 &dev_attr_dev_id.attr,
343 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), 354 &dev_attr_iflink.attr,
344 __ATTR(type, S_IRUGO, show_type, NULL), 355 &dev_attr_ifindex.attr,
345 __ATTR(link_mode, S_IRUGO, show_link_mode, NULL), 356 &dev_attr_addr_assign_type.attr,
346 __ATTR(address, S_IRUGO, show_address, NULL), 357 &dev_attr_addr_len.attr,
347 __ATTR(broadcast, S_IRUGO, show_broadcast, NULL), 358 &dev_attr_link_mode.attr,
348 __ATTR(carrier, S_IRUGO | S_IWUSR, show_carrier, store_carrier), 359 &dev_attr_address.attr,
349 __ATTR(speed, S_IRUGO, show_speed, NULL), 360 &dev_attr_broadcast.attr,
350 __ATTR(duplex, S_IRUGO, show_duplex, NULL), 361 &dev_attr_speed.attr,
351 __ATTR(dormant, S_IRUGO, show_dormant, NULL), 362 &dev_attr_duplex.attr,
352 __ATTR(operstate, S_IRUGO, show_operstate, NULL), 363 &dev_attr_dormant.attr,
353 __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu), 364 &dev_attr_operstate.attr,
354 __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags), 365 &dev_attr_ifalias.attr,
355 __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, 366 &dev_attr_carrier.attr,
356 store_tx_queue_len), 367 &dev_attr_mtu.attr,
357 __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group), 368 &dev_attr_flags.attr,
358 {} 369 &dev_attr_tx_queue_len.attr,
370 NULL,
359}; 371};
372ATTRIBUTE_GROUPS(net_class);
360 373
361/* Show a given an attribute in the statistics group */ 374/* Show a given an attribute in the statistics group */
362static ssize_t netstat_show(const struct device *d, 375static ssize_t netstat_show(const struct device *d,
@@ -382,13 +395,13 @@ static ssize_t netstat_show(const struct device *d,
382 395
383/* generate a read-only statistics attribute */ 396/* generate a read-only statistics attribute */
384#define NETSTAT_ENTRY(name) \ 397#define NETSTAT_ENTRY(name) \
385static ssize_t show_##name(struct device *d, \ 398static ssize_t name##_show(struct device *d, \
386 struct device_attribute *attr, char *buf) \ 399 struct device_attribute *attr, char *buf) \
387{ \ 400{ \
388 return netstat_show(d, attr, buf, \ 401 return netstat_show(d, attr, buf, \
389 offsetof(struct rtnl_link_stats64, name)); \ 402 offsetof(struct rtnl_link_stats64, name)); \
390} \ 403} \
391static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) 404static DEVICE_ATTR_RO(name)
392 405
393NETSTAT_ENTRY(rx_packets); 406NETSTAT_ENTRY(rx_packets);
394NETSTAT_ENTRY(tx_packets); 407NETSTAT_ENTRY(tx_packets);
@@ -457,6 +470,9 @@ static struct attribute_group wireless_group = {
457 .attrs = wireless_attrs, 470 .attrs = wireless_attrs,
458}; 471};
459#endif 472#endif
473
474#else /* CONFIG_SYSFS */
475#define net_class_groups NULL
460#endif /* CONFIG_SYSFS */ 476#endif /* CONFIG_SYSFS */
461 477
462#ifdef CONFIG_RPS 478#ifdef CONFIG_RPS
@@ -1229,9 +1245,7 @@ static const void *net_namespace(struct device *d)
1229static struct class net_class = { 1245static struct class net_class = {
1230 .name = "net", 1246 .name = "net",
1231 .dev_release = netdev_release, 1247 .dev_release = netdev_release,
1232#ifdef CONFIG_SYSFS 1248 .dev_groups = net_class_groups,
1233 .dev_attrs = net_class_attributes,
1234#endif /* CONFIG_SYSFS */
1235 .dev_uevent = netdev_uevent, 1249 .dev_uevent = netdev_uevent,
1236 .ns_type = &net_ns_type_operations, 1250 .ns_type = &net_ns_type_operations,
1237 .namespace = net_namespace, 1251 .namespace = net_namespace,
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index e533259dce3c..d9cd627e6a16 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -29,12 +29,6 @@
29 29
30#define PRIOMAP_MIN_SZ 128 30#define PRIOMAP_MIN_SZ 128
31 31
32static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
33{
34 return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id),
35 struct cgroup_netprio_state, css);
36}
37
38/* 32/*
39 * Extend @dev->priomap so that it's large enough to accomodate 33 * Extend @dev->priomap so that it's large enough to accomodate
40 * @target_idx. @dev->priomap.priomap_len > @target_idx after successful 34 * @target_idx. @dev->priomap.priomap_len > @target_idx after successful
@@ -87,67 +81,70 @@ static int extend_netdev_table(struct net_device *dev, u32 target_idx)
87 81
88/** 82/**
89 * netprio_prio - return the effective netprio of a cgroup-net_device pair 83 * netprio_prio - return the effective netprio of a cgroup-net_device pair
90 * @cgrp: cgroup part of the target pair 84 * @css: css part of the target pair
91 * @dev: net_device part of the target pair 85 * @dev: net_device part of the target pair
92 * 86 *
93 * Should be called under RCU read or rtnl lock. 87 * Should be called under RCU read or rtnl lock.
94 */ 88 */
95static u32 netprio_prio(struct cgroup *cgrp, struct net_device *dev) 89static u32 netprio_prio(struct cgroup_subsys_state *css, struct net_device *dev)
96{ 90{
97 struct netprio_map *map = rcu_dereference_rtnl(dev->priomap); 91 struct netprio_map *map = rcu_dereference_rtnl(dev->priomap);
92 int id = css->cgroup->id;
98 93
99 if (map && cgrp->id < map->priomap_len) 94 if (map && id < map->priomap_len)
100 return map->priomap[cgrp->id]; 95 return map->priomap[id];
101 return 0; 96 return 0;
102} 97}
103 98
104/** 99/**
105 * netprio_set_prio - set netprio on a cgroup-net_device pair 100 * netprio_set_prio - set netprio on a cgroup-net_device pair
106 * @cgrp: cgroup part of the target pair 101 * @css: css part of the target pair
107 * @dev: net_device part of the target pair 102 * @dev: net_device part of the target pair
108 * @prio: prio to set 103 * @prio: prio to set
109 * 104 *
110 * Set netprio to @prio on @cgrp-@dev pair. Should be called under rtnl 105 * Set netprio to @prio on @css-@dev pair. Should be called under rtnl
111 * lock and may fail under memory pressure for non-zero @prio. 106 * lock and may fail under memory pressure for non-zero @prio.
112 */ 107 */
113static int netprio_set_prio(struct cgroup *cgrp, struct net_device *dev, 108static int netprio_set_prio(struct cgroup_subsys_state *css,
114 u32 prio) 109 struct net_device *dev, u32 prio)
115{ 110{
116 struct netprio_map *map; 111 struct netprio_map *map;
112 int id = css->cgroup->id;
117 int ret; 113 int ret;
118 114
119 /* avoid extending priomap for zero writes */ 115 /* avoid extending priomap for zero writes */
120 map = rtnl_dereference(dev->priomap); 116 map = rtnl_dereference(dev->priomap);
121 if (!prio && (!map || map->priomap_len <= cgrp->id)) 117 if (!prio && (!map || map->priomap_len <= id))
122 return 0; 118 return 0;
123 119
124 ret = extend_netdev_table(dev, cgrp->id); 120 ret = extend_netdev_table(dev, id);
125 if (ret) 121 if (ret)
126 return ret; 122 return ret;
127 123
128 map = rtnl_dereference(dev->priomap); 124 map = rtnl_dereference(dev->priomap);
129 map->priomap[cgrp->id] = prio; 125 map->priomap[id] = prio;
130 return 0; 126 return 0;
131} 127}
132 128
133static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp) 129static struct cgroup_subsys_state *
130cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
134{ 131{
135 struct cgroup_netprio_state *cs; 132 struct cgroup_subsys_state *css;
136 133
137 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 134 css = kzalloc(sizeof(*css), GFP_KERNEL);
138 if (!cs) 135 if (!css)
139 return ERR_PTR(-ENOMEM); 136 return ERR_PTR(-ENOMEM);
140 137
141 return &cs->css; 138 return css;
142} 139}
143 140
144static int cgrp_css_online(struct cgroup *cgrp) 141static int cgrp_css_online(struct cgroup_subsys_state *css)
145{ 142{
146 struct cgroup *parent = cgrp->parent; 143 struct cgroup_subsys_state *parent_css = css_parent(css);
147 struct net_device *dev; 144 struct net_device *dev;
148 int ret = 0; 145 int ret = 0;
149 146
150 if (!parent) 147 if (!parent_css)
151 return 0; 148 return 0;
152 149
153 rtnl_lock(); 150 rtnl_lock();
@@ -156,9 +153,9 @@ static int cgrp_css_online(struct cgroup *cgrp)
156 * onlining, there is no need to clear them on offline. 153 * onlining, there is no need to clear them on offline.
157 */ 154 */
158 for_each_netdev(&init_net, dev) { 155 for_each_netdev(&init_net, dev) {
159 u32 prio = netprio_prio(parent, dev); 156 u32 prio = netprio_prio(parent_css, dev);
160 157
161 ret = netprio_set_prio(cgrp, dev, prio); 158 ret = netprio_set_prio(css, dev, prio);
162 if (ret) 159 if (ret)
163 break; 160 break;
164 } 161 }
@@ -166,29 +163,29 @@ static int cgrp_css_online(struct cgroup *cgrp)
166 return ret; 163 return ret;
167} 164}
168 165
169static void cgrp_css_free(struct cgroup *cgrp) 166static void cgrp_css_free(struct cgroup_subsys_state *css)
170{ 167{
171 kfree(cgrp_netprio_state(cgrp)); 168 kfree(css);
172} 169}
173 170
174static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft) 171static u64 read_prioidx(struct cgroup_subsys_state *css, struct cftype *cft)
175{ 172{
176 return cgrp->id; 173 return css->cgroup->id;
177} 174}
178 175
179static int read_priomap(struct cgroup *cont, struct cftype *cft, 176static int read_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
180 struct cgroup_map_cb *cb) 177 struct cgroup_map_cb *cb)
181{ 178{
182 struct net_device *dev; 179 struct net_device *dev;
183 180
184 rcu_read_lock(); 181 rcu_read_lock();
185 for_each_netdev_rcu(&init_net, dev) 182 for_each_netdev_rcu(&init_net, dev)
186 cb->fill(cb, dev->name, netprio_prio(cont, dev)); 183 cb->fill(cb, dev->name, netprio_prio(css, dev));
187 rcu_read_unlock(); 184 rcu_read_unlock();
188 return 0; 185 return 0;
189} 186}
190 187
191static int write_priomap(struct cgroup *cgrp, struct cftype *cft, 188static int write_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
192 const char *buffer) 189 const char *buffer)
193{ 190{
194 char devname[IFNAMSIZ + 1]; 191 char devname[IFNAMSIZ + 1];
@@ -205,7 +202,7 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
205 202
206 rtnl_lock(); 203 rtnl_lock();
207 204
208 ret = netprio_set_prio(cgrp, dev, prio); 205 ret = netprio_set_prio(css, dev, prio);
209 206
210 rtnl_unlock(); 207 rtnl_unlock();
211 dev_put(dev); 208 dev_put(dev);
@@ -221,12 +218,13 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
221 return 0; 218 return 0;
222} 219}
223 220
224static void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 221static void net_prio_attach(struct cgroup_subsys_state *css,
222 struct cgroup_taskset *tset)
225{ 223{
226 struct task_struct *p; 224 struct task_struct *p;
227 void *v; 225 void *v;
228 226
229 cgroup_taskset_for_each(p, cgrp, tset) { 227 cgroup_taskset_for_each(p, css, tset) {
230 task_lock(p); 228 task_lock(p);
231 v = (void *)(unsigned long)task_netprioidx(p); 229 v = (void *)(unsigned long)task_netprioidx(p);
232 iterate_fd(p->files, 0, update_netprio, v); 230 iterate_fd(p->files, 0, update_netprio, v);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 3de740834d1f..ca198c1d1d30 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
2156 /* If aging addresses are supported device will need to 2156 /* If aging addresses are supported device will need to
2157 * implement its own handler for this. 2157 * implement its own handler for this.
2158 */ 2158 */
2159 if (ndm->ndm_state & NUD_PERMANENT) { 2159 if (!(ndm->ndm_state & NUD_PERMANENT)) {
2160 pr_info("%s: FDB only supports static addresses\n", dev->name); 2160 pr_info("%s: FDB only supports static addresses\n", dev->name);
2161 return -EINVAL; 2161 return -EINVAL;
2162 } 2162 }
@@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2384 struct nlattr *extfilt; 2384 struct nlattr *extfilt;
2385 u32 filter_mask = 0; 2385 u32 filter_mask = 0;
2386 2386
2387 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg), 2387 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
2388 IFLA_EXT_MASK); 2388 IFLA_EXT_MASK);
2389 if (extfilt) 2389 if (extfilt)
2390 filter_mask = nla_get_u32(extfilt); 2390 filter_mask = nla_get_u32(extfilt);
diff --git a/net/core/scm.c b/net/core/scm.c
index 03795d0147f2..b4da80b1cc07 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
54 return -EINVAL; 54 return -EINVAL;
55 55
56 if ((creds->pid == task_tgid_vnr(current) || 56 if ((creds->pid == task_tgid_vnr(current) ||
57 ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && 57 ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || 58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && 59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || 60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index 13571eae6bae..ef56ab5b35fe 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -36,7 +36,8 @@ static ssize_t name ## _show(struct device *dev, \
36 ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \ 36 ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \
37 mutex_unlock(&phy->pib_lock); \ 37 mutex_unlock(&phy->pib_lock); \
38 return ret; \ 38 return ret; \
39} 39} \
40static DEVICE_ATTR_RO(name);
40 41
41#define MASTER_SHOW(field, format_string) \ 42#define MASTER_SHOW(field, format_string) \
42 MASTER_SHOW_COMPLEX(field, format_string, phy->field) 43 MASTER_SHOW_COMPLEX(field, format_string, phy->field)
@@ -66,15 +67,17 @@ static ssize_t channels_supported_show(struct device *dev,
66 mutex_unlock(&phy->pib_lock); 67 mutex_unlock(&phy->pib_lock);
67 return len; 68 return len;
68} 69}
69 70static DEVICE_ATTR_RO(channels_supported);
70static struct device_attribute pmib_attrs[] = { 71
71 __ATTR_RO(current_channel), 72static struct attribute *pmib_attrs[] = {
72 __ATTR_RO(current_page), 73 &dev_attr_current_channel.attr,
73 __ATTR_RO(channels_supported), 74 &dev_attr_current_page.attr,
74 __ATTR_RO(transmit_power), 75 &dev_attr_channels_supported.attr,
75 __ATTR_RO(cca_mode), 76 &dev_attr_transmit_power.attr,
76 {}, 77 &dev_attr_cca_mode.attr,
78 NULL,
77}; 79};
80ATTRIBUTE_GROUPS(pmib);
78 81
79static void wpan_phy_release(struct device *d) 82static void wpan_phy_release(struct device *d)
80{ 83{
@@ -85,7 +88,7 @@ static void wpan_phy_release(struct device *d)
85static struct class wpan_phy_class = { 88static struct class wpan_phy_class = {
86 .name = "ieee802154", 89 .name = "ieee802154",
87 .dev_release = wpan_phy_release, 90 .dev_release = wpan_phy_release,
88 .dev_attrs = pmib_attrs, 91 .dev_groups = pmib_groups,
89}; 92};
90 93
91static DEFINE_MUTEX(wpan_phy_mutex); 94static DEFINE_MUTEX(wpan_phy_mutex);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index ab3d814bc80a..109ee89f123e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
477 } 477 }
478 478
479 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - 479 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
480 net_adj) & ~(align - 1)) + (net_adj - 2); 480 net_adj) & ~(align - 1)) + net_adj - 2;
481} 481}
482 482
483static void esp4_err(struct sk_buff *skb, u32 info) 483static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 108a1e9c9eac..3df6d3edb2a1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,7 +71,6 @@
71#include <linux/init.h> 71#include <linux/init.h>
72#include <linux/list.h> 72#include <linux/list.h>
73#include <linux/slab.h> 73#include <linux/slab.h>
74#include <linux/prefetch.h>
75#include <linux/export.h> 74#include <linux/export.h>
76#include <net/net_namespace.h> 75#include <net/net_namespace.h>
77#include <net/ip.h> 76#include <net/ip.h>
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
1761 if (!c) 1760 if (!c)
1762 continue; 1761 continue;
1763 1762
1764 if (IS_LEAF(c)) { 1763 if (IS_LEAF(c))
1765 prefetch(rcu_dereference_rtnl(p->child[idx]));
1766 return (struct leaf *) c; 1764 return (struct leaf *) c;
1767 }
1768 1765
1769 /* Rescan start scanning in new node */ 1766 /* Rescan start scanning in new node */
1770 p = (struct tnode *) c; 1767 p = (struct tnode *) c;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 1f6eab66f7ce..8d6939eeb492 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
383 if (daddr) 383 if (daddr)
384 memcpy(&iph->daddr, daddr, 4); 384 memcpy(&iph->daddr, daddr, 4);
385 if (iph->daddr) 385 if (iph->daddr)
386 return t->hlen; 386 return t->hlen + sizeof(*iph);
387 387
388 return -(t->hlen + sizeof(*iph)); 388 return -(t->hlen + sizeof(*iph));
389} 389}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4bcabf3ab4ca..9ee17e3d11c3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb)
211 return -EINVAL; 211 return -EINVAL;
212} 212}
213 213
214static inline int ip_skb_dst_mtu(struct sk_buff *skb)
215{
216 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
217
218 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
219 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
220}
221
222static int ip_finish_output(struct sk_buff *skb) 214static int ip_finish_output(struct sk_buff *skb)
223{ 215{
224#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 216#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 7167b08977df..850525b34899 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt,
76 iph->daddr = dst; 76 iph->daddr = dst;
77 iph->saddr = src; 77 iph->saddr = src;
78 iph->ttl = ttl; 78 iph->ttl = ttl;
79 tunnel_ip_select_ident(skb, 79 __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
80 (const struct iphdr *)skb_inner_network_header(skb),
81 &rt->dst);
82 80
83 err = ip_local_out(skb); 81 err = ip_local_out(skb);
84 if (unlikely(net_xmit_eval(err))) 82 if (unlikely(net_xmit_eval(err)))
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 51fc2a1dcdd3..b3ac3c3f6219 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb)
190 struct ip_tunnel *tunnel; 190 struct ip_tunnel *tunnel;
191 const struct iphdr *iph; 191 const struct iphdr *iph;
192 192
193 if (iptunnel_pull_header(skb, 0, tpi.proto))
194 goto drop;
195
196 iph = ip_hdr(skb); 193 iph = ip_hdr(skb);
197 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 194 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
198 iph->saddr, iph->daddr, 0); 195 iph->saddr, iph->daddr, 0);
199 if (tunnel) { 196 if (tunnel) {
200 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 197 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
201 goto drop; 198 goto drop;
199 if (iptunnel_pull_header(skb, 0, tpi.proto))
200 goto drop;
202 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); 201 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
203 } 202 }
204 203
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6577a1149a47..463bd1273346 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
273 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), 273 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
274 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), 274 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
275 SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), 275 SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
276 SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), 276 SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
277 SNMP_MIB_SENTINEL 277 SNMP_MIB_SENTINEL
278}; 278};
279 279
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index dd44e0ab600c..61e60d67adca 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
571 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 571 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
572 RT_SCOPE_UNIVERSE, 572 RT_SCOPE_UNIVERSE,
573 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 573 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
574 inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, 574 inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
575 (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
575 daddr, saddr, 0, 0); 576 daddr, saddr, 0, 0);
576 577
577 if (!inet->hdrincl) { 578 if (!inet->hdrincl) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5423223e93c2..b2f6c74861af 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1121,6 +1121,13 @@ new_segment:
1121 goto wait_for_memory; 1121 goto wait_for_memory;
1122 1122
1123 /* 1123 /*
1124 * All packets are restored as if they have
1125 * already been sent.
1126 */
1127 if (tp->repair)
1128 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1129
1130 /*
1124 * Check whether we can use HW checksum. 1131 * Check whether we can use HW checksum.
1125 */ 1132 */
1126 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 1133 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9077f441cb2..b6ae92a51f58 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
206 */ 206 */
207static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 207static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
208{ 208{
209 u64 offs; 209 u32 delta, bic_target, max_cnt;
210 u32 delta, t, bic_target, max_cnt; 210 u64 offs, t;
211 211
212 ca->ack_cnt++; /* count the number of ACKs */ 212 ca->ack_cnt++; /* count the number of ACKs */
213 213
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
250 * if the cwnd < 1 million packets !!! 250 * if the cwnd < 1 million packets !!!
251 */ 251 */
252 252
253 t = (s32)(tcp_time_stamp - ca->epoch_start);
254 t += msecs_to_jiffies(ca->delay_min >> 3);
253 /* change the unit from HZ to bictcp_HZ */ 255 /* change the unit from HZ to bictcp_HZ */
254 t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) 256 t <<= BICTCP_HZ;
255 - ca->epoch_start) << BICTCP_HZ) / HZ; 257 do_div(t, HZ);
256 258
257 if (t < ca->bic_K) /* t - K */ 259 if (t < ca->bic_K) /* t - K */
258 offs = ca->bic_K - t; 260 offs = ca->bic_K - t;
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
414 return; 416 return;
415 417
416 /* Discard delay samples right after fast recovery */ 418 /* Discard delay samples right after fast recovery */
417 if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) 419 if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
418 return; 420 return;
419 421
420 delay = (rtt_us << 3) / USEC_PER_MSEC; 422 delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28af45abe062..3ca2139a130b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3535,7 +3535,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
3535 ++ptr; 3535 ++ptr;
3536 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3536 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3537 ++ptr; 3537 ++ptr;
3538 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; 3538 if (*ptr)
3539 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
3540 else
3541 tp->rx_opt.rcv_tsecr = 0;
3539 return true; 3542 return true;
3540 } 3543 }
3541 return false; 3544 return false;
@@ -3560,7 +3563,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
3560 } 3563 }
3561 3564
3562 tcp_parse_options(skb, &tp->rx_opt, 1, NULL); 3565 tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
3563 if (tp->rx_opt.saw_tstamp) 3566 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
3564 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 3567 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
3565 3568
3566 return true; 3569 return true;
@@ -5316,7 +5319,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5316 int saved_clamp = tp->rx_opt.mss_clamp; 5319 int saved_clamp = tp->rx_opt.mss_clamp;
5317 5320
5318 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); 5321 tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
5319 if (tp->rx_opt.saw_tstamp) 5322 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
5320 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 5323 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
5321 5324
5322 if (th->ack) { 5325 if (th->ack) {
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index da14436c1735..8a57d79b0b16 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -132,10 +132,10 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
132 return 0; 132 return 0;
133} 133}
134 134
135static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft, 135static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
136 const char *buffer) 136 const char *buffer)
137{ 137{
138 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 138 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
139 unsigned long long val; 139 unsigned long long val;
140 int ret = 0; 140 int ret = 0;
141 141
@@ -180,9 +180,9 @@ static u64 tcp_read_usage(struct mem_cgroup *memcg)
180 return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE); 180 return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
181} 181}
182 182
183static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft) 183static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
184{ 184{
185 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 185 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
186 u64 val; 186 u64 val;
187 187
188 switch (cft->private) { 188 switch (cft->private) {
@@ -202,13 +202,13 @@ static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
202 return val; 202 return val;
203} 203}
204 204
205static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event) 205static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
206{ 206{
207 struct mem_cgroup *memcg; 207 struct mem_cgroup *memcg;
208 struct tcp_memcontrol *tcp; 208 struct tcp_memcontrol *tcp;
209 struct cg_proto *cg_proto; 209 struct cg_proto *cg_proto;
210 210
211 memcg = mem_cgroup_from_cont(cont); 211 memcg = mem_cgroup_from_css(css);
212 cg_proto = tcp_prot.proto_cgroup(memcg); 212 cg_proto = tcp_prot.proto_cgroup(memcg);
213 if (!cg_proto) 213 if (!cg_proto)
214 return 0; 214 return 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 92fde8d1aa82..170737a9d56d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2670,7 +2670,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2670 int tcp_header_size; 2670 int tcp_header_size;
2671 int mss; 2671 int mss;
2672 2672
2673 skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); 2673 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2674 if (unlikely(!skb)) { 2674 if (unlikely(!skb)) {
2675 dst_release(dst); 2675 dst_release(dst);
2676 return NULL; 2676 return NULL;
@@ -2814,6 +2814,8 @@ void tcp_connect_init(struct sock *sk)
2814 2814
2815 if (likely(!tp->repair)) 2815 if (likely(!tp->repair))
2816 tp->rcv_nxt = 0; 2816 tp->rcv_nxt = 0;
2817 else
2818 tp->rcv_tstamp = tcp_time_stamp;
2817 tp->rcv_wup = tp->rcv_nxt; 2819 tp->rcv_wup = tp->rcv_nxt;
2818 tp->copied_seq = tp->rcv_nxt; 2820 tp->copied_seq = tp->rcv_nxt;
2819 2821
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 327a617d594c..baa0f63731fd 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -21,7 +21,6 @@
21static int xfrm4_tunnel_check_size(struct sk_buff *skb) 21static int xfrm4_tunnel_check_size(struct sk_buff *skb)
22{ 22{
23 int mtu, ret = 0; 23 int mtu, ret = 0;
24 struct dst_entry *dst;
25 24
26 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) 25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
27 goto out; 26 goto out;
@@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
29 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) 28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
30 goto out; 29 goto out;
31 30
32 dst = skb_dst(skb); 31 mtu = dst_mtu(skb_dst(skb));
33 mtu = dst_mtu(dst);
34 if (skb->len > mtu) { 32 if (skb->len > mtu) {
35 if (skb->sk) 33 if (skb->sk)
36 ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, 34 xfrm_local_error(skb, mtu);
37 inet_sk(skb->sk)->inet_dport, mtu);
38 else 35 else
39 icmp_send(skb, ICMP_DEST_UNREACH, 36 icmp_send(skb, ICMP_DEST_UNREACH,
40 ICMP_FRAG_NEEDED, htonl(mtu)); 37 ICMP_FRAG_NEEDED, htonl(mtu));
@@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb)
99 x->outer_mode->afinfo->output_finish, 96 x->outer_mode->afinfo->output_finish,
100 !(IPCB(skb)->flags & IPSKB_REROUTED)); 97 !(IPCB(skb)->flags & IPSKB_REROUTED));
101} 98}
99
100void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
101{
102 struct iphdr *hdr;
103
104 hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
105 ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
106 inet_sk(skb->sk)->inet_dport, mtu);
107}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 9258e751baba..0b2a0641526a 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
83 .extract_input = xfrm4_extract_input, 83 .extract_input = xfrm4_extract_input,
84 .extract_output = xfrm4_extract_output, 84 .extract_output = xfrm4_extract_output,
85 .transport_finish = xfrm4_transport_finish, 85 .transport_finish = xfrm4_transport_finish,
86 .local_error = xfrm4_local_error,
86}; 87};
87 88
88void __init xfrm4_state_init(void) 89void __init xfrm4_state_init(void)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index da4241c8c7da..498ea99194af 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1126,12 +1126,10 @@ retry:
1126 if (ifp->flags & IFA_F_OPTIMISTIC) 1126 if (ifp->flags & IFA_F_OPTIMISTIC)
1127 addr_flags |= IFA_F_OPTIMISTIC; 1127 addr_flags |= IFA_F_OPTIMISTIC;
1128 1128
1129 ift = !max_addresses || 1129 ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
1130 ipv6_count_addresses(idev) < max_addresses ? 1130 ipv6_addr_scope(&addr), addr_flags,
1131 ipv6_add_addr(idev, &addr, NULL, tmp_plen, 1131 tmp_valid_lft, tmp_prefered_lft);
1132 ipv6_addr_scope(&addr), addr_flags, 1132 if (IS_ERR(ift)) {
1133 tmp_valid_lft, tmp_prefered_lft) : NULL;
1134 if (IS_ERR_OR_NULL(ift)) {
1135 in6_ifa_put(ifp); 1133 in6_ifa_put(ifp);
1136 in6_dev_put(idev); 1134 in6_dev_put(idev);
1137 pr_info("%s: retry temporary address regeneration\n", __func__); 1135 pr_info("%s: retry temporary address regeneration\n", __func__);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 40ffd72243a4..aeac0dc3635d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
425 net_adj = 0; 425 net_adj = 0;
426 426
427 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - 427 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
428 net_adj) & ~(align - 1)) + (net_adj - 2); 428 net_adj) & ~(align - 1)) + net_adj - 2;
429} 429}
430 430
431static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 431static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bff3d821c7eb..c4ff5bbb45c4 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
993 993
994 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { 994 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
995#ifdef CONFIG_IPV6_SUBTREES 995#ifdef CONFIG_IPV6_SUBTREES
996 if (fn->subtree) 996 if (fn->subtree) {
997 fn = fib6_lookup_1(fn->subtree, args + 1); 997 struct fib6_node *sfn;
998 sfn = fib6_lookup_1(fn->subtree,
999 args + 1);
1000 if (!sfn)
1001 goto backtrack;
1002 fn = sfn;
1003 }
998#endif 1004#endif
999 if (!fn || fn->fn_flags & RTN_RTINFO) 1005 if (fn->fn_flags & RTN_RTINFO)
1000 return fn; 1006 return fn;
1001 } 1007 }
1002 } 1008 }
1003 1009#ifdef CONFIG_IPV6_SUBTREES
1010backtrack:
1011#endif
1004 if (fn->fn_flags & RTN_ROOT) 1012 if (fn->fn_flags & RTN_ROOT)
1005 break; 1013 break;
1006 1014
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index ecd60733e5e2..90747f1973fe 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -724,6 +724,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
724 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 724 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
725 } 725 }
726 726
727 if (likely(!skb->encapsulation)) {
728 skb_reset_inner_headers(skb);
729 skb->encapsulation = 1;
730 }
731
727 skb_push(skb, gre_hlen); 732 skb_push(skb, gre_hlen);
728 skb_reset_network_header(skb); 733 skb_reset_network_header(skb);
729 skb_set_transport_header(skb, sizeof(*ipv6h)); 734 skb_set_transport_header(skb, sizeof(*ipv6h));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 6e3ddf806ec2..e7ceb6c871d1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -238,6 +238,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
238 hdr->saddr = fl6->saddr; 238 hdr->saddr = fl6->saddr;
239 hdr->daddr = *first_hop; 239 hdr->daddr = *first_hop;
240 240
241 skb->protocol = htons(ETH_P_IPV6);
241 skb->priority = sk->sk_priority; 242 skb->priority = sk->sk_priority;
242 skb->mark = sk->sk_mark; 243 skb->mark = sk->sk_mark;
243 244
@@ -1057,6 +1058,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1057 /* initialize protocol header pointer */ 1058 /* initialize protocol header pointer */
1058 skb->transport_header = skb->network_header + fragheaderlen; 1059 skb->transport_header = skb->network_header + fragheaderlen;
1059 1060
1061 skb->protocol = htons(ETH_P_IPV6);
1060 skb->ip_summed = CHECKSUM_PARTIAL; 1062 skb->ip_summed = CHECKSUM_PARTIAL;
1061 skb->csum = 0; 1063 skb->csum = 0;
1062 } 1064 }
@@ -1359,6 +1361,7 @@ alloc_new_skb:
1359 /* 1361 /*
1360 * Fill in the control structures 1362 * Fill in the control structures
1361 */ 1363 */
1364 skb->protocol = htons(ETH_P_IPV6);
1362 skb->ip_summed = CHECKSUM_NONE; 1365 skb->ip_summed = CHECKSUM_NONE;
1363 skb->csum = 0; 1366 skb->csum = 0;
1364 /* reserve for fragmentation and ipsec header */ 1367 /* reserve for fragmentation and ipsec header */
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1e55866cead7..46ba243605a3 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1027,6 +1027,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1027 init_tel_txopt(&opt, encap_limit); 1027 init_tel_txopt(&opt, encap_limit);
1028 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 1028 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
1029 } 1029 }
1030
1031 if (likely(!skb->encapsulation)) {
1032 skb_reset_inner_headers(skb);
1033 skb->encapsulation = 1;
1034 }
1035
1030 skb_push(skb, sizeof(struct ipv6hdr)); 1036 skb_push(skb, sizeof(struct ipv6hdr));
1031 skb_reset_network_header(skb); 1037 skb_reset_network_header(skb);
1032 ipv6h = ipv6_hdr(skb); 1038 ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 79aa9652ed86..04d31c2fbef1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1369,8 +1369,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1369 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) 1369 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
1370 return; 1370 return;
1371 1371
1372 if (!ndopts.nd_opts_rh) 1372 if (!ndopts.nd_opts_rh) {
1373 ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0);
1373 return; 1374 return;
1375 }
1374 1376
1375 hdr = (u8 *)ndopts.nd_opts_rh; 1377 hdr = (u8 *)ndopts.nd_opts_rh;
1376 hdr += 8; 1378 hdr += 8;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c45f7a5c36e9..cdaed47ba932 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -628,6 +628,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
628 goto error; 628 goto error;
629 skb_reserve(skb, hlen); 629 skb_reserve(skb, hlen);
630 630
631 skb->protocol = htons(ETH_P_IPV6);
631 skb->priority = sk->sk_priority; 632 skb->priority = sk->sk_priority;
632 skb->mark = sk->sk_mark; 633 skb->mark = sk->sk_mark;
633 skb_dst_set(skb, &rt->dst); 634 skb_dst_set(skb, &rt->dst);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 790d9f4b8b0b..1aeb473b2cc6 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
490 ipv6_hdr(head)->payload_len = htons(payload_len); 490 ipv6_hdr(head)->payload_len = htons(payload_len);
491 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); 491 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
492 IP6CB(head)->nhoff = nhoff; 492 IP6CB(head)->nhoff = nhoff;
493 IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
493 494
494 /* Yes, and fold redundant checksum back. 8) */ 495 /* Yes, and fold redundant checksum back. 8) */
495 if (head->ip_summed == CHECKSUM_COMPLETE) 496 if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
524 struct net *net = dev_net(skb_dst(skb)->dev); 525 struct net *net = dev_net(skb_dst(skb)->dev);
525 int evicted; 526 int evicted;
526 527
528 if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
529 goto fail_hdr;
530
527 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 531 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
528 532
529 /* Jumbo payload inhibits frag. header */ 533 /* Jumbo payload inhibits frag. header */
@@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
544 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); 548 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
545 549
546 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 550 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
551 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
547 return 1; 552 return 1;
548 } 553 }
549 554
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b70f8979003b..8d9a93ed9c59 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1178,6 +1178,27 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1178} 1178}
1179EXPORT_SYMBOL_GPL(ip6_redirect); 1179EXPORT_SYMBOL_GPL(ip6_redirect);
1180 1180
1181void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1182 u32 mark)
1183{
1184 const struct ipv6hdr *iph = ipv6_hdr(skb);
1185 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1186 struct dst_entry *dst;
1187 struct flowi6 fl6;
1188
1189 memset(&fl6, 0, sizeof(fl6));
1190 fl6.flowi6_oif = oif;
1191 fl6.flowi6_mark = mark;
1192 fl6.flowi6_flags = 0;
1193 fl6.daddr = msg->dest;
1194 fl6.saddr = iph->daddr;
1195
1196 dst = ip6_route_output(net, NULL, &fl6);
1197 if (!dst->error)
1198 rt6_do_redirect(dst, NULL, skb);
1199 dst_release(dst);
1200}
1201
1181void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) 1202void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1182{ 1203{
1183 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark); 1204 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a3437a4cd07e..21b25dd8466b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -645,11 +645,7 @@ static int ipip_rcv(struct sk_buff *skb)
645 const struct iphdr *iph; 645 const struct iphdr *iph;
646 struct ip_tunnel *tunnel; 646 struct ip_tunnel *tunnel;
647 647
648 if (iptunnel_pull_header(skb, 0, tpi.proto))
649 goto drop;
650
651 iph = ip_hdr(skb); 648 iph = ip_hdr(skb);
652
653 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, 649 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
654 iph->saddr, iph->daddr); 650 iph->saddr, iph->daddr);
655 if (tunnel != NULL) { 651 if (tunnel != NULL) {
@@ -659,6 +655,8 @@ static int ipip_rcv(struct sk_buff *skb)
659 655
660 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 656 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
661 goto drop; 657 goto drop;
658 if (iptunnel_pull_header(skb, 0, tpi.proto))
659 goto drop;
662 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); 660 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
663 } 661 }
664 662
@@ -888,6 +886,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
888 ttl = iph6->hop_limit; 886 ttl = iph6->hop_limit;
889 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 887 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
890 888
889 if (likely(!skb->encapsulation)) {
890 skb_reset_inner_headers(skb);
891 skb->encapsulation = 1;
892 }
893
891 err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, 894 err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr,
892 IPPROTO_IPV6, tos, ttl, df); 895 IPPROTO_IPV6, tos, ttl, df);
893 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 896 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8755a3079d0f..6cd625e37706 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb)
34 struct sock *sk = skb->sk; 34 struct sock *sk = skb->sk;
35 35
36 if (sk) { 36 if (sk) {
37 proto = sk->sk_protocol; 37 if (sk->sk_family != AF_INET6)
38 return 0;
38 39
40 proto = sk->sk_protocol;
39 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) 41 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
40 return inet6_sk(sk)->dontfrag; 42 return inet6_sk(sk)->dontfrag;
41 } 43 }
@@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
54 ipv6_local_rxpmtu(sk, &fl6, mtu); 56 ipv6_local_rxpmtu(sk, &fl6, mtu);
55} 57}
56 58
57static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) 59void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
58{ 60{
59 struct flowi6 fl6; 61 struct flowi6 fl6;
62 const struct ipv6hdr *hdr;
60 struct sock *sk = skb->sk; 63 struct sock *sk = skb->sk;
61 64
65 hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
62 fl6.fl6_dport = inet_sk(sk)->inet_dport; 66 fl6.fl6_dport = inet_sk(sk)->inet_dport;
63 fl6.daddr = ipv6_hdr(skb)->daddr; 67 fl6.daddr = hdr->daddr;
64 68
65 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); 69 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
66} 70}
@@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
80 if (xfrm6_local_dontfrag(skb)) 84 if (xfrm6_local_dontfrag(skb))
81 xfrm6_local_rxpmtu(skb, mtu); 85 xfrm6_local_rxpmtu(skb, mtu);
82 else if (skb->sk) 86 else if (skb->sk)
83 xfrm6_local_error(skb, mtu); 87 xfrm_local_error(skb, mtu);
84 else 88 else
85 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 89 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
86 ret = -EMSGSIZE; 90 ret = -EMSGSIZE;
@@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb)
136{ 140{
137 struct dst_entry *dst = skb_dst(skb); 141 struct dst_entry *dst = skb_dst(skb);
138 struct xfrm_state *x = dst->xfrm; 142 struct xfrm_state *x = dst->xfrm;
139 int mtu = ip6_skb_dst_mtu(skb); 143 int mtu;
144
145 if (skb->protocol == htons(ETH_P_IPV6))
146 mtu = ip6_skb_dst_mtu(skb);
147 else
148 mtu = dst_mtu(skb_dst(skb));
140 149
141 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { 150 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
142 xfrm6_local_rxpmtu(skb, mtu); 151 xfrm6_local_rxpmtu(skb, mtu);
143 return -EMSGSIZE; 152 return -EMSGSIZE;
144 } else if (!skb->local_df && skb->len > mtu && skb->sk) { 153 } else if (!skb->local_df && skb->len > mtu && skb->sk) {
145 xfrm6_local_error(skb, mtu); 154 xfrm_local_error(skb, mtu);
146 return -EMSGSIZE; 155 return -EMSGSIZE;
147 } 156 }
148 157
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index d8c70b8efc24..3fc970135fc6 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
183 .extract_input = xfrm6_extract_input, 183 .extract_input = xfrm6_extract_input,
184 .extract_output = xfrm6_extract_output, 184 .extract_output = xfrm6_extract_output,
185 .transport_finish = xfrm6_transport_finish, 185 .transport_finish = xfrm6_transport_finish,
186 .local_error = xfrm6_local_error,
186}; 187};
187 188
188int __init xfrm6_state_init(void) 189int __init xfrm6_state_init(void)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index ea7b9c2c7e66..2d45643c964e 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -36,7 +36,7 @@
36 36
37static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 37static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
38 const u8 *bssid, const int beacon_int, 38 const u8 *bssid, const int beacon_int,
39 struct ieee80211_channel *chan, 39 struct cfg80211_chan_def *req_chandef,
40 const u32 basic_rates, 40 const u32 basic_rates,
41 const u16 capability, u64 tsf, 41 const u16 capability, u64 tsf,
42 bool creator) 42 bool creator)
@@ -51,6 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
51 u32 bss_change; 51 u32 bss_change;
52 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; 52 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
53 struct cfg80211_chan_def chandef; 53 struct cfg80211_chan_def chandef;
54 struct ieee80211_channel *chan;
54 struct beacon_data *presp; 55 struct beacon_data *presp;
55 int frame_len; 56 int frame_len;
56 57
@@ -81,7 +82,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
81 82
82 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; 83 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
83 84
84 chandef = ifibss->chandef; 85 /* make a copy of the chandef, it could be modified below. */
86 chandef = *req_chandef;
87 chan = chandef.chan;
85 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { 88 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
86 chandef.width = NL80211_CHAN_WIDTH_20; 89 chandef.width = NL80211_CHAN_WIDTH_20;
87 chandef.center_freq1 = chan->center_freq; 90 chandef.center_freq1 = chan->center_freq;
@@ -259,10 +262,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
259 struct cfg80211_bss *cbss = 262 struct cfg80211_bss *cbss =
260 container_of((void *)bss, struct cfg80211_bss, priv); 263 container_of((void *)bss, struct cfg80211_bss, priv);
261 struct ieee80211_supported_band *sband; 264 struct ieee80211_supported_band *sband;
265 struct cfg80211_chan_def chandef;
262 u32 basic_rates; 266 u32 basic_rates;
263 int i, j; 267 int i, j;
264 u16 beacon_int = cbss->beacon_interval; 268 u16 beacon_int = cbss->beacon_interval;
265 const struct cfg80211_bss_ies *ies; 269 const struct cfg80211_bss_ies *ies;
270 enum nl80211_channel_type chan_type;
266 u64 tsf; 271 u64 tsf;
267 272
268 sdata_assert_lock(sdata); 273 sdata_assert_lock(sdata);
@@ -270,6 +275,26 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
270 if (beacon_int < 10) 275 if (beacon_int < 10)
271 beacon_int = 10; 276 beacon_int = 10;
272 277
278 switch (sdata->u.ibss.chandef.width) {
279 case NL80211_CHAN_WIDTH_20_NOHT:
280 case NL80211_CHAN_WIDTH_20:
281 case NL80211_CHAN_WIDTH_40:
282 chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef);
283 cfg80211_chandef_create(&chandef, cbss->channel, chan_type);
284 break;
285 case NL80211_CHAN_WIDTH_5:
286 case NL80211_CHAN_WIDTH_10:
287 cfg80211_chandef_create(&chandef, cbss->channel,
288 NL80211_CHAN_WIDTH_20_NOHT);
289 chandef.width = sdata->u.ibss.chandef.width;
290 break;
291 default:
292 /* fall back to 20 MHz for unsupported modes */
293 cfg80211_chandef_create(&chandef, cbss->channel,
294 NL80211_CHAN_WIDTH_20_NOHT);
295 break;
296 }
297
273 sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; 298 sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
274 299
275 basic_rates = 0; 300 basic_rates = 0;
@@ -294,7 +319,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
294 319
295 __ieee80211_sta_join_ibss(sdata, cbss->bssid, 320 __ieee80211_sta_join_ibss(sdata, cbss->bssid,
296 beacon_int, 321 beacon_int,
297 cbss->channel, 322 &chandef,
298 basic_rates, 323 basic_rates,
299 cbss->capability, 324 cbss->capability,
300 tsf, false); 325 tsf, false);
@@ -736,7 +761,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
736 sdata->drop_unencrypted = 0; 761 sdata->drop_unencrypted = 0;
737 762
738 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, 763 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
739 ifibss->chandef.chan, ifibss->basic_rates, 764 &ifibss->chandef, ifibss->basic_rates,
740 capability, 0, true); 765 capability, 0, true);
741} 766}
742 767
@@ -1138,6 +1163,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1138 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 1163 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
1139 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 1164 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
1140 BSS_CHANGED_IBSS); 1165 BSS_CHANGED_IBSS);
1166 ieee80211_vif_release_channel(sdata);
1141 synchronize_rcu(); 1167 synchronize_rcu();
1142 kfree(presp); 1168 kfree(presp);
1143 1169
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ae31968d42d3..cc9e02d79b55 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -31,10 +31,12 @@
31#include "led.h" 31#include "led.h"
32 32
33#define IEEE80211_AUTH_TIMEOUT (HZ / 5) 33#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
34#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
34#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) 35#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
35#define IEEE80211_AUTH_MAX_TRIES 3 36#define IEEE80211_AUTH_MAX_TRIES 3
36#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) 37#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
37#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 38#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
39#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
38#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) 40#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
39#define IEEE80211_ASSOC_MAX_TRIES 3 41#define IEEE80211_ASSOC_MAX_TRIES 3
40 42
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
209 struct ieee80211_channel *channel, 211 struct ieee80211_channel *channel,
210 const struct ieee80211_ht_operation *ht_oper, 212 const struct ieee80211_ht_operation *ht_oper,
211 const struct ieee80211_vht_operation *vht_oper, 213 const struct ieee80211_vht_operation *vht_oper,
212 struct cfg80211_chan_def *chandef, bool verbose) 214 struct cfg80211_chan_def *chandef, bool tracking)
213{ 215{
216 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
214 struct cfg80211_chan_def vht_chandef; 217 struct cfg80211_chan_def vht_chandef;
215 u32 ht_cfreq, ret; 218 u32 ht_cfreq, ret;
216 219
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
229 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, 232 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
230 channel->band); 233 channel->band);
231 /* check that channel matches the right operating channel */ 234 /* check that channel matches the right operating channel */
232 if (channel->center_freq != ht_cfreq) { 235 if (!tracking && channel->center_freq != ht_cfreq) {
233 /* 236 /*
234 * It's possible that some APs are confused here; 237 * It's possible that some APs are confused here;
235 * Netgear WNDR3700 sometimes reports 4 higher than 238 * Netgear WNDR3700 sometimes reports 4 higher than
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
237 * since we look at probe response/beacon data here 240 * since we look at probe response/beacon data here
238 * it should be OK. 241 * it should be OK.
239 */ 242 */
240 if (verbose) 243 sdata_info(sdata,
241 sdata_info(sdata, 244 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
242 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", 245 channel->center_freq, ht_cfreq,
243 channel->center_freq, ht_cfreq, 246 ht_oper->primary_chan, channel->band);
244 ht_oper->primary_chan, channel->band);
245 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; 247 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
246 goto out; 248 goto out;
247 } 249 }
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
295 channel->band); 297 channel->band);
296 break; 298 break;
297 default: 299 default:
298 if (verbose) 300 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
299 sdata_info(sdata, 301 sdata_info(sdata,
300 "AP VHT operation IE has invalid channel width (%d), disable VHT\n", 302 "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
301 vht_oper->chan_width); 303 vht_oper->chan_width);
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
304 } 306 }
305 307
306 if (!cfg80211_chandef_valid(&vht_chandef)) { 308 if (!cfg80211_chandef_valid(&vht_chandef)) {
307 if (verbose) 309 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
308 sdata_info(sdata, 310 sdata_info(sdata,
309 "AP VHT information is invalid, disable VHT\n"); 311 "AP VHT information is invalid, disable VHT\n");
310 ret = IEEE80211_STA_DISABLE_VHT; 312 ret = IEEE80211_STA_DISABLE_VHT;
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
317 } 319 }
318 320
319 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { 321 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
320 if (verbose) 322 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
321 sdata_info(sdata, 323 sdata_info(sdata,
322 "AP VHT information doesn't match HT, disable VHT\n"); 324 "AP VHT information doesn't match HT, disable VHT\n");
323 ret = IEEE80211_STA_DISABLE_VHT; 325 ret = IEEE80211_STA_DISABLE_VHT;
@@ -333,18 +335,27 @@ out:
333 if (ret & IEEE80211_STA_DISABLE_VHT) 335 if (ret & IEEE80211_STA_DISABLE_VHT)
334 vht_chandef = *chandef; 336 vht_chandef = *chandef;
335 337
338 /*
339 * Ignore the DISABLED flag when we're already connected and only
340 * tracking the APs beacon for bandwidth changes - otherwise we
341 * might get disconnected here if we connect to an AP, update our
342 * regulatory information based on the AP's country IE and the
343 * information we have is wrong/outdated and disables the channel
344 * that we're actually using for the connection to the AP.
345 */
336 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, 346 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
337 IEEE80211_CHAN_DISABLED)) { 347 tracking ? 0 :
348 IEEE80211_CHAN_DISABLED)) {
338 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { 349 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
339 ret = IEEE80211_STA_DISABLE_HT | 350 ret = IEEE80211_STA_DISABLE_HT |
340 IEEE80211_STA_DISABLE_VHT; 351 IEEE80211_STA_DISABLE_VHT;
341 goto out; 352 break;
342 } 353 }
343 354
344 ret |= chandef_downgrade(chandef); 355 ret |= chandef_downgrade(chandef);
345 } 356 }
346 357
347 if (chandef->width != vht_chandef.width && verbose) 358 if (chandef->width != vht_chandef.width && !tracking)
348 sdata_info(sdata, 359 sdata_info(sdata,
349 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); 360 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
350 361
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
384 395
385 /* calculate new channel (type) based on HT/VHT operation IEs */ 396 /* calculate new channel (type) based on HT/VHT operation IEs */
386 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, 397 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
387 vht_oper, &chandef, false); 398 vht_oper, &chandef, true);
388 399
389 /* 400 /*
390 * Downgrade the new channel if we associated with restricted 401 * Downgrade the new channel if we associated with restricted
@@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
3394 3405
3395 if (tx_flags == 0) { 3406 if (tx_flags == 0) {
3396 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 3407 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
3397 ifmgd->auth_data->timeout_started = true; 3408 auth_data->timeout_started = true;
3398 run_again(sdata, auth_data->timeout); 3409 run_again(sdata, auth_data->timeout);
3399 } else { 3410 } else {
3400 auth_data->timeout_started = false; 3411 auth_data->timeout =
3412 round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
3413 auth_data->timeout_started = true;
3414 run_again(sdata, auth_data->timeout);
3401 } 3415 }
3402 3416
3403 return 0; 3417 return 0;
@@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
3434 assoc_data->timeout_started = true; 3448 assoc_data->timeout_started = true;
3435 run_again(sdata, assoc_data->timeout); 3449 run_again(sdata, assoc_data->timeout);
3436 } else { 3450 } else {
3437 assoc_data->timeout_started = false; 3451 assoc_data->timeout =
3452 round_jiffies_up(jiffies +
3453 IEEE80211_ASSOC_TIMEOUT_LONG);
3454 assoc_data->timeout_started = true;
3455 run_again(sdata, assoc_data->timeout);
3438 } 3456 }
3439 3457
3440 return 0; 3458 return 0;
@@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3829 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, 3847 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
3830 cbss->channel, 3848 cbss->channel,
3831 ht_oper, vht_oper, 3849 ht_oper, vht_oper,
3832 &chandef, true); 3850 &chandef, false);
3833 3851
3834 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), 3852 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
3835 local->rx_chains); 3853 local->rx_chains);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index f5aed963b22e..f3bbea1eb9e7 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -828,6 +828,9 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
828 if (sband->band != IEEE80211_BAND_2GHZ) 828 if (sband->band != IEEE80211_BAND_2GHZ)
829 return; 829 return;
830 830
831 if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
832 return;
833
831 mi->cck_supported = 0; 834 mi->cck_supported = 0;
832 mi->cck_supported_short = 0; 835 mi->cck_supported_short = 0;
833 for (i = 0; i < 4; i++) { 836 for (i = 0; i < 4; i++) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 7dcc376eea5f..2f8010707d01 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
527 __u32 seq, ack, sack, end, win, swin; 527 __u32 seq, ack, sack, end, win, swin;
528 s16 receiver_offset; 528 s16 receiver_offset;
529 bool res; 529 bool res, in_recv_win;
530 530
531 /* 531 /*
532 * Get the required data from the packet. 532 * Get the required data from the packet.
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
649 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 649 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
650 receiver->td_scale); 650 receiver->td_scale);
651 651
652 /* Is the ending sequence in the receive window (if available)? */
653 in_recv_win = !receiver->td_maxwin ||
654 after(end, sender->td_end - receiver->td_maxwin - 1);
655
652 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", 656 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
653 before(seq, sender->td_maxend + 1), 657 before(seq, sender->td_maxend + 1),
654 after(end, sender->td_end - receiver->td_maxwin - 1), 658 (in_recv_win ? 1 : 0),
655 before(sack, receiver->td_end + 1), 659 before(sack, receiver->td_end + 1),
656 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); 660 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
657 661
658 if (before(seq, sender->td_maxend + 1) && 662 if (before(seq, sender->td_maxend + 1) &&
659 after(end, sender->td_end - receiver->td_maxwin - 1) && 663 in_recv_win &&
660 before(sack, receiver->td_end + 1) && 664 before(sack, receiver->td_end + 1) &&
661 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { 665 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
662 /* 666 /*
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
725 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 729 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
726 "nf_ct_tcp: %s ", 730 "nf_ct_tcp: %s ",
727 before(seq, sender->td_maxend + 1) ? 731 before(seq, sender->td_maxend + 1) ?
728 after(end, sender->td_end - receiver->td_maxwin - 1) ? 732 in_recv_win ?
729 before(sack, receiver->td_end + 1) ? 733 before(sack, receiver->td_end + 1) ?
730 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" 734 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
731 : "ACK is under the lower bound (possible overly delayed ACK)" 735 : "ACK is under the lower bound (possible overly delayed ACK)"
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 962e9792e317..d92cc317bf8b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
419 nfmsg->version = NFNETLINK_V0; 419 nfmsg->version = NFNETLINK_V0;
420 nfmsg->res_id = htons(inst->group_num); 420 nfmsg->res_id = htons(inst->group_num);
421 421
422 memset(&pmsg, 0, sizeof(pmsg));
422 pmsg.hw_protocol = skb->protocol; 423 pmsg.hw_protocol = skb->protocol;
423 pmsg.hook = hooknum; 424 pmsg.hook = hooknum;
424 425
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
498 if (indev && skb->dev && 499 if (indev && skb->dev &&
499 skb->mac_header != skb->network_header) { 500 skb->mac_header != skb->network_header) {
500 struct nfulnl_msg_packet_hw phw; 501 struct nfulnl_msg_packet_hw phw;
501 int len = dev_parse_header(skb, phw.hw_addr); 502 int len;
503
504 memset(&phw, 0, sizeof(phw));
505 len = dev_parse_header(skb, phw.hw_addr);
502 if (len > 0) { 506 if (len > 0) {
503 phw.hw_addrlen = htons(len); 507 phw.hw_addrlen = htons(len);
504 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) 508 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 971ea145ab3e..8a703c3dd318 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
463 if (indev && entskb->dev && 463 if (indev && entskb->dev &&
464 entskb->mac_header != entskb->network_header) { 464 entskb->mac_header != entskb->network_header) {
465 struct nfqnl_msg_packet_hw phw; 465 struct nfqnl_msg_packet_hw phw;
466 int len = dev_parse_header(entskb, phw.hw_addr); 466 int len;
467
468 memset(&phw, 0, sizeof(phw));
469 len = dev_parse_header(entskb, phw.hw_addr);
467 if (len) { 470 if (len) {
468 phw.hw_addrlen = htons(len); 471 phw.hw_addrlen = htons(len);
469 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) 472 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 7011c71646f0..6113cc7efffc 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
52{ 52{
53 const struct xt_tcpmss_info *info = par->targinfo; 53 const struct xt_tcpmss_info *info = par->targinfo;
54 struct tcphdr *tcph; 54 struct tcphdr *tcph;
55 unsigned int tcplen, i; 55 int len, tcp_hdrlen;
56 unsigned int i;
56 __be16 oldval; 57 __be16 oldval;
57 u16 newmss; 58 u16 newmss;
58 u8 *opt; 59 u8 *opt;
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
64 if (!skb_make_writable(skb, skb->len)) 65 if (!skb_make_writable(skb, skb->len))
65 return -1; 66 return -1;
66 67
67 tcplen = skb->len - tcphoff; 68 len = skb->len - tcphoff;
69 if (len < (int)sizeof(struct tcphdr))
70 return -1;
71
68 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 72 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
73 tcp_hdrlen = tcph->doff * 4;
69 74
70 /* Header cannot be larger than the packet */ 75 if (len < tcp_hdrlen)
71 if (tcplen < tcph->doff*4)
72 return -1; 76 return -1;
73 77
74 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 78 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
87 newmss = info->mss; 91 newmss = info->mss;
88 92
89 opt = (u_int8_t *)tcph; 93 opt = (u_int8_t *)tcph;
90 for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) { 94 for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
91 if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS && 95 if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
92 opt[i+1] == TCPOLEN_MSS) {
93 u_int16_t oldmss; 96 u_int16_t oldmss;
94 97
95 oldmss = (opt[i+2] << 8) | opt[i+3]; 98 oldmss = (opt[i+2] << 8) | opt[i+3];
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
112 } 115 }
113 116
114 /* There is data after the header so the option can't be added 117 /* There is data after the header so the option can't be added
115 without moving it, and doing so may make the SYN packet 118 * without moving it, and doing so may make the SYN packet
116 itself too large. Accept the packet unmodified instead. */ 119 * itself too large. Accept the packet unmodified instead.
117 if (tcplen > tcph->doff*4) 120 */
121 if (len > tcp_hdrlen)
118 return 0; 122 return 0;
119 123
120 /* 124 /*
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
143 newmss = min(newmss, (u16)1220); 147 newmss = min(newmss, (u16)1220);
144 148
145 opt = (u_int8_t *)tcph + sizeof(struct tcphdr); 149 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
146 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); 150 memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
147 151
148 inet_proto_csum_replace2(&tcph->check, skb, 152 inet_proto_csum_replace2(&tcph->check, skb,
149 htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); 153 htons(len), htons(len + TCPOLEN_MSS), 1);
150 opt[0] = TCPOPT_MSS; 154 opt[0] = TCPOPT_MSS;
151 opt[1] = TCPOLEN_MSS; 155 opt[1] = TCPOLEN_MSS;
152 opt[2] = (newmss & 0xff00) >> 8; 156 opt[2] = (newmss & 0xff00) >> 8;
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index b68fa191710f..625fa1d636a0 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
38 struct tcphdr *tcph; 38 struct tcphdr *tcph;
39 u_int16_t n, o; 39 u_int16_t n, o;
40 u_int8_t *opt; 40 u_int8_t *opt;
41 int len; 41 int len, tcp_hdrlen;
42 42
43 /* This is a fragment, no TCP header is available */ 43 /* This is a fragment, no TCP header is available */
44 if (par->fragoff != 0) 44 if (par->fragoff != 0)
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
52 return NF_DROP; 52 return NF_DROP;
53 53
54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
55 if (tcph->doff * 4 > len) 55 tcp_hdrlen = tcph->doff * 4;
56
57 if (len < tcp_hdrlen)
56 return NF_DROP; 58 return NF_DROP;
57 59
58 opt = (u_int8_t *)tcph; 60 opt = (u_int8_t *)tcph;
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
61 * Walk through all TCP options - if we find some option to remove, 63 * Walk through all TCP options - if we find some option to remove,
62 * set all octets to %TCPOPT_NOP and adjust checksum. 64 * set all octets to %TCPOPT_NOP and adjust checksum.
63 */ 65 */
64 for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { 66 for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
65 optl = optlen(opt, i); 67 optl = optlen(opt, i);
66 68
67 if (i + optl > tcp_hdrlen(skb)) 69 if (i + optl > tcp_hdrlen)
68 break; 70 break;
69 71
70 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) 72 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 512718adb0d5..0c741cec4d0d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
364EXPORT_SYMBOL(genl_unregister_ops); 364EXPORT_SYMBOL(genl_unregister_ops);
365 365
366/** 366/**
367 * genl_register_family - register a generic netlink family 367 * __genl_register_family - register a generic netlink family
368 * @family: generic netlink family 368 * @family: generic netlink family
369 * 369 *
370 * Registers the specified family after validating it first. Only one 370 * Registers the specified family after validating it first. Only one
@@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops);
374 * 374 *
375 * Return 0 on success or a negative error code. 375 * Return 0 on success or a negative error code.
376 */ 376 */
377int genl_register_family(struct genl_family *family) 377int __genl_register_family(struct genl_family *family)
378{ 378{
379 int err = -EINVAL; 379 int err = -EINVAL;
380 380
@@ -430,10 +430,10 @@ errout_locked:
430errout: 430errout:
431 return err; 431 return err;
432} 432}
433EXPORT_SYMBOL(genl_register_family); 433EXPORT_SYMBOL(__genl_register_family);
434 434
435/** 435/**
436 * genl_register_family_with_ops - register a generic netlink family 436 * __genl_register_family_with_ops - register a generic netlink family
437 * @family: generic netlink family 437 * @family: generic netlink family
438 * @ops: operations to be registered 438 * @ops: operations to be registered
439 * @n_ops: number of elements to register 439 * @n_ops: number of elements to register
@@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family);
457 * 457 *
458 * Return 0 on success or a negative error code. 458 * Return 0 on success or a negative error code.
459 */ 459 */
460int genl_register_family_with_ops(struct genl_family *family, 460int __genl_register_family_with_ops(struct genl_family *family,
461 struct genl_ops *ops, size_t n_ops) 461 struct genl_ops *ops, size_t n_ops)
462{ 462{
463 int err, i; 463 int err, i;
464 464
465 err = genl_register_family(family); 465 err = __genl_register_family(family);
466 if (err) 466 if (err)
467 return err; 467 return err;
468 468
@@ -476,7 +476,7 @@ err_out:
476 genl_unregister_family(family); 476 genl_unregister_family(family);
477 return err; 477 return err;
478} 478}
479EXPORT_SYMBOL(genl_register_family_with_ops); 479EXPORT_SYMBOL(__genl_register_family_with_ops);
480 480
481/** 481/**
482 * genl_unregister_family - unregister generic netlink family 482 * genl_unregister_family - unregister generic netlink family
@@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
544} 544}
545EXPORT_SYMBOL(genlmsg_put); 545EXPORT_SYMBOL(genlmsg_put);
546 546
547static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
548{
549 struct genl_ops *ops = cb->data;
550 int rc;
551
552 genl_lock();
553 rc = ops->dumpit(skb, cb);
554 genl_unlock();
555 return rc;
556}
557
558static int genl_lock_done(struct netlink_callback *cb)
559{
560 struct genl_ops *ops = cb->data;
561 int rc = 0;
562
563 if (ops->done) {
564 genl_lock();
565 rc = ops->done(cb);
566 genl_unlock();
567 }
568 return rc;
569}
570
547static int genl_family_rcv_msg(struct genl_family *family, 571static int genl_family_rcv_msg(struct genl_family *family,
548 struct sk_buff *skb, 572 struct sk_buff *skb,
549 struct nlmsghdr *nlh) 573 struct nlmsghdr *nlh)
@@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family,
572 return -EPERM; 596 return -EPERM;
573 597
574 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 598 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
575 struct netlink_dump_control c = { 599 int rc;
576 .dump = ops->dumpit,
577 .done = ops->done,
578 };
579 600
580 if (ops->dumpit == NULL) 601 if (ops->dumpit == NULL)
581 return -EOPNOTSUPP; 602 return -EOPNOTSUPP;
582 603
583 return netlink_dump_start(net->genl_sock, skb, nlh, &c); 604 if (!family->parallel_ops) {
605 struct netlink_dump_control c = {
606 .module = family->module,
607 .data = ops,
608 .dump = genl_lock_dumpit,
609 .done = genl_lock_done,
610 };
611
612 genl_unlock();
613 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
614 genl_lock();
615
616 } else {
617 struct netlink_dump_control c = {
618 .module = family->module,
619 .dump = ops->dumpit,
620 .done = ops->done,
621 };
622
623 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
624 }
625
626 return rc;
584 } 627 }
585 628
586 if (ops->doit == NULL) 629 if (ops->doit == NULL)
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 22c5f399f1cf..ab101f715447 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
535{ 535{
536 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); 536 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
537 537
538 OVS_CB(skb)->tun_key = NULL;
538 return do_execute_actions(dp, skb, acts->actions, 539 return do_execute_actions(dp, skb, acts->actions,
539 acts->actions_len, false); 540 acts->actions_len, false);
540} 541}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f7e3a0d84c40..f2ed7600084e 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2076 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 2076 ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
2077 return 0; 2077 return 0;
2078 2078
2079 rtnl_unlock();
2080 return 0;
2081
2082exit_free: 2079exit_free:
2083 kfree_skb(reply); 2080 kfree_skb(reply);
2084exit_unlock: 2081exit_unlock:
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 5c519b121e1b..1aa84dc58777 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
240 struct flex_array *buckets; 240 struct flex_array *buckets;
241 int i, err; 241 int i, err;
242 242
243 buckets = flex_array_alloc(sizeof(struct hlist_head *), 243 buckets = flex_array_alloc(sizeof(struct hlist_head),
244 n_buckets, GFP_KERNEL); 244 n_buckets, GFP_KERNEL);
245 if (!buckets) 245 if (!buckets)
246 return NULL; 246 return NULL;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4b66c752eae5..75c8bbf598c8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3259,9 +3259,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3259 3259
3260 if (po->tp_version == TPACKET_V3) { 3260 if (po->tp_version == TPACKET_V3) {
3261 lv = sizeof(struct tpacket_stats_v3); 3261 lv = sizeof(struct tpacket_stats_v3);
3262 st.stats3.tp_packets += st.stats3.tp_drops;
3262 data = &st.stats3; 3263 data = &st.stats3;
3263 } else { 3264 } else {
3264 lv = sizeof(struct tpacket_stats); 3265 lv = sizeof(struct tpacket_stats);
3266 st.stats1.tp_packets += st.stats1.tp_drops;
3265 data = &st.stats1; 3267 data = &st.stats1;
3266 } 3268 }
3267 3269
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 1cec5e4f3a5e..1bacc1079942 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -576,14 +576,14 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
576} 576}
577EXPORT_SYMBOL(rfkill_set_states); 577EXPORT_SYMBOL(rfkill_set_states);
578 578
579static ssize_t rfkill_name_show(struct device *dev, 579static ssize_t name_show(struct device *dev, struct device_attribute *attr,
580 struct device_attribute *attr, 580 char *buf)
581 char *buf)
582{ 581{
583 struct rfkill *rfkill = to_rfkill(dev); 582 struct rfkill *rfkill = to_rfkill(dev);
584 583
585 return sprintf(buf, "%s\n", rfkill->name); 584 return sprintf(buf, "%s\n", rfkill->name);
586} 585}
586static DEVICE_ATTR_RO(name);
587 587
588static const char *rfkill_get_type_str(enum rfkill_type type) 588static const char *rfkill_get_type_str(enum rfkill_type type)
589{ 589{
@@ -611,54 +611,52 @@ static const char *rfkill_get_type_str(enum rfkill_type type)
611 } 611 }
612} 612}
613 613
614static ssize_t rfkill_type_show(struct device *dev, 614static ssize_t type_show(struct device *dev, struct device_attribute *attr,
615 struct device_attribute *attr, 615 char *buf)
616 char *buf)
617{ 616{
618 struct rfkill *rfkill = to_rfkill(dev); 617 struct rfkill *rfkill = to_rfkill(dev);
619 618
620 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); 619 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
621} 620}
621static DEVICE_ATTR_RO(type);
622 622
623static ssize_t rfkill_idx_show(struct device *dev, 623static ssize_t index_show(struct device *dev, struct device_attribute *attr,
624 struct device_attribute *attr, 624 char *buf)
625 char *buf)
626{ 625{
627 struct rfkill *rfkill = to_rfkill(dev); 626 struct rfkill *rfkill = to_rfkill(dev);
628 627
629 return sprintf(buf, "%d\n", rfkill->idx); 628 return sprintf(buf, "%d\n", rfkill->idx);
630} 629}
630static DEVICE_ATTR_RO(index);
631 631
632static ssize_t rfkill_persistent_show(struct device *dev, 632static ssize_t persistent_show(struct device *dev,
633 struct device_attribute *attr, 633 struct device_attribute *attr, char *buf)
634 char *buf)
635{ 634{
636 struct rfkill *rfkill = to_rfkill(dev); 635 struct rfkill *rfkill = to_rfkill(dev);
637 636
638 return sprintf(buf, "%d\n", rfkill->persistent); 637 return sprintf(buf, "%d\n", rfkill->persistent);
639} 638}
639static DEVICE_ATTR_RO(persistent);
640 640
641static ssize_t rfkill_hard_show(struct device *dev, 641static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
642 struct device_attribute *attr, 642 char *buf)
643 char *buf)
644{ 643{
645 struct rfkill *rfkill = to_rfkill(dev); 644 struct rfkill *rfkill = to_rfkill(dev);
646 645
647 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 ); 646 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
648} 647}
648static DEVICE_ATTR_RO(hard);
649 649
650static ssize_t rfkill_soft_show(struct device *dev, 650static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
651 struct device_attribute *attr, 651 char *buf)
652 char *buf)
653{ 652{
654 struct rfkill *rfkill = to_rfkill(dev); 653 struct rfkill *rfkill = to_rfkill(dev);
655 654
656 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 ); 655 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
657} 656}
658 657
659static ssize_t rfkill_soft_store(struct device *dev, 658static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
660 struct device_attribute *attr, 659 const char *buf, size_t count)
661 const char *buf, size_t count)
662{ 660{
663 struct rfkill *rfkill = to_rfkill(dev); 661 struct rfkill *rfkill = to_rfkill(dev);
664 unsigned long state; 662 unsigned long state;
@@ -680,6 +678,7 @@ static ssize_t rfkill_soft_store(struct device *dev,
680 678
681 return count; 679 return count;
682} 680}
681static DEVICE_ATTR_RW(soft);
683 682
684static u8 user_state_from_blocked(unsigned long state) 683static u8 user_state_from_blocked(unsigned long state)
685{ 684{
@@ -691,18 +690,16 @@ static u8 user_state_from_blocked(unsigned long state)
691 return RFKILL_USER_STATE_UNBLOCKED; 690 return RFKILL_USER_STATE_UNBLOCKED;
692} 691}
693 692
694static ssize_t rfkill_state_show(struct device *dev, 693static ssize_t state_show(struct device *dev, struct device_attribute *attr,
695 struct device_attribute *attr, 694 char *buf)
696 char *buf)
697{ 695{
698 struct rfkill *rfkill = to_rfkill(dev); 696 struct rfkill *rfkill = to_rfkill(dev);
699 697
700 return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state)); 698 return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
701} 699}
702 700
703static ssize_t rfkill_state_store(struct device *dev, 701static ssize_t state_store(struct device *dev, struct device_attribute *attr,
704 struct device_attribute *attr, 702 const char *buf, size_t count)
705 const char *buf, size_t count)
706{ 703{
707 struct rfkill *rfkill = to_rfkill(dev); 704 struct rfkill *rfkill = to_rfkill(dev);
708 unsigned long state; 705 unsigned long state;
@@ -725,32 +722,27 @@ static ssize_t rfkill_state_store(struct device *dev,
725 722
726 return count; 723 return count;
727} 724}
725static DEVICE_ATTR_RW(state);
728 726
729static ssize_t rfkill_claim_show(struct device *dev, 727static ssize_t claim_show(struct device *dev, struct device_attribute *attr,
730 struct device_attribute *attr, 728 char *buf)
731 char *buf)
732{ 729{
733 return sprintf(buf, "%d\n", 0); 730 return sprintf(buf, "%d\n", 0);
734} 731}
735 732static DEVICE_ATTR_RO(claim);
736static ssize_t rfkill_claim_store(struct device *dev, 733
737 struct device_attribute *attr, 734static struct attribute *rfkill_dev_attrs[] = {
738 const char *buf, size_t count) 735 &dev_attr_name.attr,
739{ 736 &dev_attr_type.attr,
740 return -EOPNOTSUPP; 737 &dev_attr_index.attr,
741} 738 &dev_attr_persistent.attr,
742 739 &dev_attr_state.attr,
743static struct device_attribute rfkill_dev_attrs[] = { 740 &dev_attr_claim.attr,
744 __ATTR(name, S_IRUGO, rfkill_name_show, NULL), 741 &dev_attr_soft.attr,
745 __ATTR(type, S_IRUGO, rfkill_type_show, NULL), 742 &dev_attr_hard.attr,
746 __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), 743 NULL,
747 __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
748 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
749 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
750 __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
751 __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
752 __ATTR_NULL
753}; 744};
745ATTRIBUTE_GROUPS(rfkill_dev);
754 746
755static void rfkill_release(struct device *dev) 747static void rfkill_release(struct device *dev)
756{ 748{
@@ -830,7 +822,7 @@ static int rfkill_resume(struct device *dev)
830static struct class rfkill_class = { 822static struct class rfkill_class = {
831 .name = "rfkill", 823 .name = "rfkill",
832 .dev_release = rfkill_release, 824 .dev_release = rfkill_release,
833 .dev_attrs = rfkill_dev_attrs, 825 .dev_groups = rfkill_dev_groups,
834 .dev_uevent = rfkill_dev_uevent, 826 .dev_uevent = rfkill_dev_uevent,
835 .suspend = rfkill_suspend, 827 .suspend = rfkill_suspend,
836 .resume = rfkill_resume, 828 .resume = rfkill_resume,
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 3a294eb98d61..867b4a3e3980 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -23,19 +23,18 @@
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/cls_cgroup.h> 24#include <net/cls_cgroup.h>
25 25
26static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) 26static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css)
27{ 27{
28 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), 28 return css ? container_of(css, struct cgroup_cls_state, css) : NULL;
29 struct cgroup_cls_state, css);
30} 29}
31 30
32static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p) 31static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
33{ 32{
34 return container_of(task_subsys_state(p, net_cls_subsys_id), 33 return css_cls_state(task_css(p, net_cls_subsys_id));
35 struct cgroup_cls_state, css);
36} 34}
37 35
38static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp) 36static struct cgroup_subsys_state *
37cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
39{ 38{
40 struct cgroup_cls_state *cs; 39 struct cgroup_cls_state *cs;
41 40
@@ -45,17 +44,19 @@ static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
45 return &cs->css; 44 return &cs->css;
46} 45}
47 46
48static int cgrp_css_online(struct cgroup *cgrp) 47static int cgrp_css_online(struct cgroup_subsys_state *css)
49{ 48{
50 if (cgrp->parent) 49 struct cgroup_cls_state *cs = css_cls_state(css);
51 cgrp_cls_state(cgrp)->classid = 50 struct cgroup_cls_state *parent = css_cls_state(css_parent(css));
52 cgrp_cls_state(cgrp->parent)->classid; 51
52 if (parent)
53 cs->classid = parent->classid;
53 return 0; 54 return 0;
54} 55}
55 56
56static void cgrp_css_free(struct cgroup *cgrp) 57static void cgrp_css_free(struct cgroup_subsys_state *css)
57{ 58{
58 kfree(cgrp_cls_state(cgrp)); 59 kfree(css_cls_state(css));
59} 60}
60 61
61static int update_classid(const void *v, struct file *file, unsigned n) 62static int update_classid(const void *v, struct file *file, unsigned n)
@@ -67,12 +68,13 @@ static int update_classid(const void *v, struct file *file, unsigned n)
67 return 0; 68 return 0;
68} 69}
69 70
70static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 71static void cgrp_attach(struct cgroup_subsys_state *css,
72 struct cgroup_taskset *tset)
71{ 73{
72 struct task_struct *p; 74 struct task_struct *p;
73 void *v; 75 void *v;
74 76
75 cgroup_taskset_for_each(p, cgrp, tset) { 77 cgroup_taskset_for_each(p, css, tset) {
76 task_lock(p); 78 task_lock(p);
77 v = (void *)(unsigned long)task_cls_classid(p); 79 v = (void *)(unsigned long)task_cls_classid(p);
78 iterate_fd(p->files, 0, update_classid, v); 80 iterate_fd(p->files, 0, update_classid, v);
@@ -80,14 +82,15 @@ static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
80 } 82 }
81} 83}
82 84
83static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) 85static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
84{ 86{
85 return cgrp_cls_state(cgrp)->classid; 87 return css_cls_state(css)->classid;
86} 88}
87 89
88static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) 90static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
91 u64 value)
89{ 92{
90 cgrp_cls_state(cgrp)->classid = (u32) value; 93 css_cls_state(css)->classid = (u32) value;
91 return 0; 94 return 0;
92} 95}
93 96
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 281c1bded1f6..51b968d3febb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
285 return q; 285 return q;
286} 286}
287 287
288/* The linklayer setting were not transferred from iproute2, in older
289 * versions, and the rate tables lookup systems have been dropped in
290 * the kernel. To keep backward compatible with older iproute2 tc
291 * utils, we detect the linklayer setting by detecting if the rate
292 * table were modified.
293 *
294 * For linklayer ATM table entries, the rate table will be aligned to
295 * 48 bytes, thus some table entries will contain the same value. The
296 * mpu (min packet unit) is also encoded into the old rate table, thus
297 * starting from the mpu, we find low and high table entries for
298 * mapping this cell. If these entries contain the same value, when
299 * the rate tables have been modified for linklayer ATM.
300 *
301 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
302 * and then roundup to the next cell, calc the table entry one below,
303 * and compare.
304 */
305static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
306{
307 int low = roundup(r->mpu, 48);
308 int high = roundup(low+1, 48);
309 int cell_low = low >> r->cell_log;
310 int cell_high = (high >> r->cell_log) - 1;
311
312 /* rtab is too inaccurate at rates > 100Mbit/s */
313 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
314 pr_debug("TC linklayer: Giving up ATM detection\n");
315 return TC_LINKLAYER_ETHERNET;
316 }
317
318 if ((cell_high > cell_low) && (cell_high < 256)
319 && (rtab[cell_low] == rtab[cell_high])) {
320 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
321 cell_low, cell_high, rtab[cell_high]);
322 return TC_LINKLAYER_ATM;
323 }
324 return TC_LINKLAYER_ETHERNET;
325}
326
288static struct qdisc_rate_table *qdisc_rtab_list; 327static struct qdisc_rate_table *qdisc_rtab_list;
289 328
290struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) 329struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
308 rtab->rate = *r; 347 rtab->rate = *r;
309 rtab->refcnt = 1; 348 rtab->refcnt = 1;
310 memcpy(rtab->data, nla_data(tab), 1024); 349 memcpy(rtab->data, nla_data(tab), 1024);
350 if (r->linklayer == TC_LINKLAYER_UNAWARE)
351 r->linklayer = __detect_linklayer(r, rtab->data);
311 rtab->next = qdisc_rtab_list; 352 rtab->next = qdisc_rtab_list;
312 qdisc_rtab_list = rtab; 353 qdisc_rtab_list = rtab;
313 } 354 }
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4626cef4b76e..48be3d5c0d92 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/if_vlan.h>
28#include <net/sch_generic.h> 29#include <net/sch_generic.h>
29#include <net/pkt_sched.h> 30#include <net/pkt_sched.h>
30#include <net/dst.h> 31#include <net/dst.h>
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q)
207 208
208unsigned long dev_trans_start(struct net_device *dev) 209unsigned long dev_trans_start(struct net_device *dev)
209{ 210{
210 unsigned long val, res = dev->trans_start; 211 unsigned long val, res;
211 unsigned int i; 212 unsigned int i;
212 213
214 if (is_vlan_dev(dev))
215 dev = vlan_dev_real_dev(dev);
216 res = dev->trans_start;
213 for (i = 0; i < dev->num_tx_queues; i++) { 217 for (i = 0; i < dev->num_tx_queues; i++) {
214 val = netdev_get_tx_queue(dev, i)->trans_start; 218 val = netdev_get_tx_queue(dev, i)->trans_start;
215 if (val && time_after(val, res)) 219 if (val && time_after(val, res))
216 res = val; 220 res = val;
217 } 221 }
218 dev->trans_start = res; 222 dev->trans_start = res;
223
219 return res; 224 return res;
220} 225}
221EXPORT_SYMBOL(dev_trans_start); 226EXPORT_SYMBOL(dev_trans_start);
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
904 memset(r, 0, sizeof(*r)); 909 memset(r, 0, sizeof(*r));
905 r->overhead = conf->overhead; 910 r->overhead = conf->overhead;
906 r->rate_bytes_ps = conf->rate; 911 r->rate_bytes_ps = conf->rate;
912 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
907 r->mult = 1; 913 r->mult = 1;
908 /* 914 /*
909 * The deal here is to replace a divide by a reciprocal one 915 * The deal here is to replace a divide by a reciprocal one
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 45e751527dfc..c2178b15ca6e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1329 struct htb_sched *q = qdisc_priv(sch); 1329 struct htb_sched *q = qdisc_priv(sch);
1330 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1330 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1331 struct nlattr *opt = tca[TCA_OPTIONS]; 1331 struct nlattr *opt = tca[TCA_OPTIONS];
1332 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1332 struct nlattr *tb[TCA_HTB_MAX + 1]; 1333 struct nlattr *tb[TCA_HTB_MAX + 1];
1333 struct tc_htb_opt *hopt; 1334 struct tc_htb_opt *hopt;
1334 1335
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1350 if (!hopt->rate.rate || !hopt->ceil.rate) 1351 if (!hopt->rate.rate || !hopt->ceil.rate)
1351 goto failure; 1352 goto failure;
1352 1353
1354 /* Keeping backward compatible with rate_table based iproute2 tc */
1355 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
1356 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1357 if (rtab)
1358 qdisc_put_rtab(rtab);
1359 }
1360 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
1361 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1362 if (ctab)
1363 qdisc_put_rtab(ctab);
1364 }
1365
1353 if (!cl) { /* new class */ 1366 if (!cl) { /* new class */
1354 struct Qdisc *new_q; 1367 struct Qdisc *new_q;
1355 int prio; 1368 int prio;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index bce5b79662a6..ab67efc64b24 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
846 else 846 else
847 spc_state = SCTP_ADDR_AVAILABLE; 847 spc_state = SCTP_ADDR_AVAILABLE;
848 /* Don't inform ULP about transition from PF to 848 /* Don't inform ULP about transition from PF to
849 * active state and set cwnd to 1, see SCTP 849 * active state and set cwnd to 1 MTU, see SCTP
850 * Quick failover draft section 5.1, point 5 850 * Quick failover draft section 5.1, point 5
851 */ 851 */
852 if (transport->state == SCTP_PF) { 852 if (transport->state == SCTP_PF) {
853 ulp_notify = false; 853 ulp_notify = false;
854 transport->cwnd = 1; 854 transport->cwnd = asoc->pathmtu;
855 } 855 }
856 transport->state = SCTP_ACTIVE; 856 transport->state = SCTP_ACTIVE;
857 break; 857 break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index bdbbc3fd7c14..8fdd16046d66 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
181 return; 181 return;
182 } 182 }
183 183
184 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
185
186 sctp_packet_free(&transport->packet); 184 sctp_packet_free(&transport->packet);
187 185
188 if (transport->asoc) 186 if (transport->asoc)
189 sctp_association_put(transport->asoc); 187 sctp_association_put(transport->asoc);
188
189 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
190} 190}
191 191
192/* Start T3_rtx timer if it is not already running and update the heartbeat 192/* Start T3_rtx timer if it is not already running and update the heartbeat
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 74f6a704e374..ecbc4e3d83ad 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1660,6 +1660,10 @@ call_connect(struct rpc_task *task)
1660 task->tk_action = call_connect_status; 1660 task->tk_action = call_connect_status;
1661 if (task->tk_status < 0) 1661 if (task->tk_status < 0)
1662 return; 1662 return;
1663 if (task->tk_flags & RPC_TASK_NOCONNECT) {
1664 rpc_exit(task, -ENOTCONN);
1665 return;
1666 }
1663 xprt_connect(task); 1667 xprt_connect(task);
1664 } 1668 }
1665} 1669}
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index 74d948f5d5a1..779742cfc1ff 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -23,6 +23,7 @@ struct sunrpc_net {
23 struct rpc_clnt *rpcb_local_clnt4; 23 struct rpc_clnt *rpcb_local_clnt4;
24 spinlock_t rpcb_clnt_lock; 24 spinlock_t rpcb_clnt_lock;
25 unsigned int rpcb_users; 25 unsigned int rpcb_users;
26 unsigned int rpcb_is_af_local : 1;
26 27
27 struct mutex gssp_lock; 28 struct mutex gssp_lock;
28 wait_queue_head_t gssp_wq; 29 wait_queue_head_t gssp_wq;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3df764dc330c..1891a1022c17 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -204,13 +204,15 @@ void rpcb_put_local(struct net *net)
204} 204}
205 205
206static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt, 206static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
207 struct rpc_clnt *clnt4) 207 struct rpc_clnt *clnt4,
208 bool is_af_local)
208{ 209{
209 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 210 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
210 211
211 /* Protected by rpcb_create_local_mutex */ 212 /* Protected by rpcb_create_local_mutex */
212 sn->rpcb_local_clnt = clnt; 213 sn->rpcb_local_clnt = clnt;
213 sn->rpcb_local_clnt4 = clnt4; 214 sn->rpcb_local_clnt4 = clnt4;
215 sn->rpcb_is_af_local = is_af_local ? 1 : 0;
214 smp_wmb(); 216 smp_wmb();
215 sn->rpcb_users = 1; 217 sn->rpcb_users = 1;
216 dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: " 218 dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
@@ -238,6 +240,14 @@ static int rpcb_create_local_unix(struct net *net)
238 .program = &rpcb_program, 240 .program = &rpcb_program,
239 .version = RPCBVERS_2, 241 .version = RPCBVERS_2,
240 .authflavor = RPC_AUTH_NULL, 242 .authflavor = RPC_AUTH_NULL,
243 /*
244 * We turn off the idle timeout to prevent the kernel
245 * from automatically disconnecting the socket.
246 * Otherwise, we'd have to cache the mount namespace
247 * of the caller and somehow pass that to the socket
248 * reconnect code.
249 */
250 .flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
241 }; 251 };
242 struct rpc_clnt *clnt, *clnt4; 252 struct rpc_clnt *clnt, *clnt4;
243 int result = 0; 253 int result = 0;
@@ -263,7 +273,7 @@ static int rpcb_create_local_unix(struct net *net)
263 clnt4 = NULL; 273 clnt4 = NULL;
264 } 274 }
265 275
266 rpcb_set_local(net, clnt, clnt4); 276 rpcb_set_local(net, clnt, clnt4, true);
267 277
268out: 278out:
269 return result; 279 return result;
@@ -315,7 +325,7 @@ static int rpcb_create_local_net(struct net *net)
315 clnt4 = NULL; 325 clnt4 = NULL;
316 } 326 }
317 327
318 rpcb_set_local(net, clnt, clnt4); 328 rpcb_set_local(net, clnt, clnt4, false);
319 329
320out: 330out:
321 return result; 331 return result;
@@ -376,13 +386,16 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
376 return rpc_create(&args); 386 return rpc_create(&args);
377} 387}
378 388
379static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg) 389static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
380{ 390{
381 int result, error = 0; 391 int flags = RPC_TASK_NOCONNECT;
392 int error, result = 0;
382 393
394 if (is_set || !sn->rpcb_is_af_local)
395 flags = RPC_TASK_SOFTCONN;
383 msg->rpc_resp = &result; 396 msg->rpc_resp = &result;
384 397
385 error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN); 398 error = rpc_call_sync(clnt, msg, flags);
386 if (error < 0) { 399 if (error < 0) {
387 dprintk("RPC: failed to contact local rpcbind " 400 dprintk("RPC: failed to contact local rpcbind "
388 "server (errno %d).\n", -error); 401 "server (errno %d).\n", -error);
@@ -439,16 +452,19 @@ int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short
439 .rpc_argp = &map, 452 .rpc_argp = &map,
440 }; 453 };
441 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 454 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
455 bool is_set = false;
442 456
443 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " 457 dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
444 "rpcbind\n", (port ? "" : "un"), 458 "rpcbind\n", (port ? "" : "un"),
445 prog, vers, prot, port); 459 prog, vers, prot, port);
446 460
447 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET]; 461 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
448 if (port) 462 if (port != 0) {
449 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; 463 msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
464 is_set = true;
465 }
450 466
451 return rpcb_register_call(sn->rpcb_local_clnt, &msg); 467 return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
452} 468}
453 469
454/* 470/*
@@ -461,6 +477,7 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
461 const struct sockaddr_in *sin = (const struct sockaddr_in *)sap; 477 const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
462 struct rpcbind_args *map = msg->rpc_argp; 478 struct rpcbind_args *map = msg->rpc_argp;
463 unsigned short port = ntohs(sin->sin_port); 479 unsigned short port = ntohs(sin->sin_port);
480 bool is_set = false;
464 int result; 481 int result;
465 482
466 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); 483 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -471,10 +488,12 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
471 map->r_addr, map->r_netid); 488 map->r_addr, map->r_netid);
472 489
473 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; 490 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
474 if (port) 491 if (port != 0) {
475 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 492 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
493 is_set = true;
494 }
476 495
477 result = rpcb_register_call(sn->rpcb_local_clnt4, msg); 496 result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
478 kfree(map->r_addr); 497 kfree(map->r_addr);
479 return result; 498 return result;
480} 499}
@@ -489,6 +508,7 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
489 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap; 508 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
490 struct rpcbind_args *map = msg->rpc_argp; 509 struct rpcbind_args *map = msg->rpc_argp;
491 unsigned short port = ntohs(sin6->sin6_port); 510 unsigned short port = ntohs(sin6->sin6_port);
511 bool is_set = false;
492 int result; 512 int result;
493 513
494 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); 514 map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -499,10 +519,12 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
499 map->r_addr, map->r_netid); 519 map->r_addr, map->r_netid);
500 520
501 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; 521 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
502 if (port) 522 if (port != 0) {
503 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 523 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
524 is_set = true;
525 }
504 526
505 result = rpcb_register_call(sn->rpcb_local_clnt4, msg); 527 result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
506 kfree(map->r_addr); 528 kfree(map->r_addr);
507 return result; 529 return result;
508} 530}
@@ -519,7 +541,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
519 map->r_addr = ""; 541 map->r_addr = "";
520 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; 542 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
521 543
522 return rpcb_register_call(sn->rpcb_local_clnt4, msg); 544 return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
523} 545}
524 546
525/** 547/**
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 75edcfad6e26..1504bb11e4f3 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -207,10 +207,13 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
207 pgfrom_base -= copy; 207 pgfrom_base -= copy;
208 208
209 vto = kmap_atomic(*pgto); 209 vto = kmap_atomic(*pgto);
210 vfrom = kmap_atomic(*pgfrom); 210 if (*pgto != *pgfrom) {
211 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 211 vfrom = kmap_atomic(*pgfrom);
212 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
213 kunmap_atomic(vfrom);
214 } else
215 memmove(vto + pgto_base, vto + pgfrom_base, copy);
212 flush_dcache_page(*pgto); 216 flush_dcache_page(*pgto);
213 kunmap_atomic(vfrom);
214 kunmap_atomic(vto); 217 kunmap_atomic(vto);
215 218
216 } while ((len -= copy) != 0); 219 } while ((len -= copy) != 0);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index cb29ef7ba2f0..609c30c80816 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
460{ 460{
461 struct tipc_link *l_ptr; 461 struct tipc_link *l_ptr;
462 struct tipc_link *temp_l_ptr; 462 struct tipc_link *temp_l_ptr;
463 struct tipc_link_req *temp_req;
463 464
464 pr_info("Disabling bearer <%s>\n", b_ptr->name); 465 pr_info("Disabling bearer <%s>\n", b_ptr->name);
465 spin_lock_bh(&b_ptr->lock); 466 spin_lock_bh(&b_ptr->lock);
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
468 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 469 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
469 tipc_link_delete(l_ptr); 470 tipc_link_delete(l_ptr);
470 } 471 }
471 if (b_ptr->link_req) 472 temp_req = b_ptr->link_req;
472 tipc_disc_delete(b_ptr->link_req); 473 b_ptr->link_req = NULL;
473 spin_unlock_bh(&b_ptr->lock); 474 spin_unlock_bh(&b_ptr->lock);
475
476 if (temp_req)
477 tipc_disc_delete(temp_req);
478
474 memset(b_ptr, 0, sizeof(struct tipc_bearer)); 479 memset(b_ptr, 0, sizeof(struct tipc_bearer));
475} 480}
476 481
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ce8249c76827..6cc7ddd2fb7c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1257 /* Accept only ACK or NACK message */ 1257 /* Accept only ACK or NACK message */
1258 if (unlikely(msg_errcode(msg))) { 1258 if (unlikely(msg_errcode(msg))) {
1259 sock->state = SS_DISCONNECTING; 1259 sock->state = SS_DISCONNECTING;
1260 sk->sk_err = -ECONNREFUSED; 1260 sk->sk_err = ECONNREFUSED;
1261 retval = TIPC_OK; 1261 retval = TIPC_OK;
1262 break; 1262 break;
1263 } 1263 }
@@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1268 res = auto_connect(sock, msg); 1268 res = auto_connect(sock, msg);
1269 if (res) { 1269 if (res) {
1270 sock->state = SS_DISCONNECTING; 1270 sock->state = SS_DISCONNECTING;
1271 sk->sk_err = res; 1271 sk->sk_err = -res;
1272 retval = TIPC_OK; 1272 retval = TIPC_OK;
1273 break; 1273 break;
1274 } 1274 }
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 593071dabd1c..4d9334683f84 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
347 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { 347 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
348 struct vsock_sock *vsk; 348 struct vsock_sock *vsk;
349 list_for_each_entry(vsk, &vsock_connected_table[i], 349 list_for_each_entry(vsk, &vsock_connected_table[i],
350 connected_table); 350 connected_table)
351 fn(sk_vsock(vsk)); 351 fn(sk_vsock(vsk));
352 } 352 }
353 353
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 4f9f216665e9..a8c29fa4f1b3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
765 cfg80211_leave_mesh(rdev, dev); 765 cfg80211_leave_mesh(rdev, dev);
766 break; 766 break;
767 case NL80211_IFTYPE_AP: 767 case NL80211_IFTYPE_AP:
768 case NL80211_IFTYPE_P2P_GO:
768 cfg80211_stop_ap(rdev, dev); 769 cfg80211_stop_ap(rdev, dev);
769 break; 770 break;
770 default: 771 default:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 25d217d90807..5f6e982cdcf4 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
441 goto out_unlock; 441 goto out_unlock;
442 } 442 }
443 *rdev = wiphy_to_dev((*wdev)->wiphy); 443 *rdev = wiphy_to_dev((*wdev)->wiphy);
444 cb->args[0] = (*rdev)->wiphy_idx; 444 /* 0 is the first index - add 1 to parse only once */
445 cb->args[0] = (*rdev)->wiphy_idx + 1;
445 cb->args[1] = (*wdev)->identifier; 446 cb->args[1] = (*wdev)->identifier;
446 } else { 447 } else {
447 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]); 448 /* subtract the 1 again here */
449 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
448 struct wireless_dev *tmp; 450 struct wireless_dev *tmp;
449 451
450 if (!wiphy) { 452 if (!wiphy) {
@@ -2620,8 +2622,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2620 2622
2621 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 2623 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
2622 NL80211_CMD_NEW_KEY); 2624 NL80211_CMD_NEW_KEY);
2623 if (IS_ERR(hdr)) 2625 if (!hdr)
2624 return PTR_ERR(hdr); 2626 return -ENOBUFS;
2625 2627
2626 cookie.msg = msg; 2628 cookie.msg = msg;
2627 cookie.idx = key_idx; 2629 cookie.idx = key_idx;
@@ -6505,6 +6507,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
6505 NL80211_CMD_TESTMODE); 6507 NL80211_CMD_TESTMODE);
6506 struct nlattr *tmdata; 6508 struct nlattr *tmdata;
6507 6509
6510 if (!hdr)
6511 break;
6512
6508 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { 6513 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
6509 genlmsg_cancel(skb, hdr); 6514 genlmsg_cancel(skb, hdr);
6510 break; 6515 break;
@@ -6949,9 +6954,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
6949 6954
6950 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 6955 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
6951 NL80211_CMD_REMAIN_ON_CHANNEL); 6956 NL80211_CMD_REMAIN_ON_CHANNEL);
6952 6957 if (!hdr) {
6953 if (IS_ERR(hdr)) { 6958 err = -ENOBUFS;
6954 err = PTR_ERR(hdr);
6955 goto free_msg; 6959 goto free_msg;
6956 } 6960 }
6957 6961
@@ -7249,9 +7253,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
7249 7253
7250 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 7254 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
7251 NL80211_CMD_FRAME); 7255 NL80211_CMD_FRAME);
7252 7256 if (!hdr) {
7253 if (IS_ERR(hdr)) { 7257 err = -ENOBUFS;
7254 err = PTR_ERR(hdr);
7255 goto free_msg; 7258 goto free_msg;
7256 } 7259 }
7257 } 7260 }
@@ -8130,9 +8133,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
8130 8133
8131 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 8134 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
8132 NL80211_CMD_PROBE_CLIENT); 8135 NL80211_CMD_PROBE_CLIENT);
8133 8136 if (!hdr) {
8134 if (IS_ERR(hdr)) { 8137 err = -ENOBUFS;
8135 err = PTR_ERR(hdr);
8136 goto free_msg; 8138 goto free_msg;
8137 } 8139 }
8138 8140
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 81c8a10d743c..20e86a95dc4e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -976,21 +976,19 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
976 struct net_device *dev, u16 reason, bool wextev) 976 struct net_device *dev, u16 reason, bool wextev)
977{ 977{
978 struct wireless_dev *wdev = dev->ieee80211_ptr; 978 struct wireless_dev *wdev = dev->ieee80211_ptr;
979 int err; 979 int err = 0;
980 980
981 ASSERT_WDEV_LOCK(wdev); 981 ASSERT_WDEV_LOCK(wdev);
982 982
983 kfree(wdev->connect_keys); 983 kfree(wdev->connect_keys);
984 wdev->connect_keys = NULL; 984 wdev->connect_keys = NULL;
985 985
986 if (wdev->conn) { 986 if (wdev->conn)
987 err = cfg80211_sme_disconnect(wdev, reason); 987 err = cfg80211_sme_disconnect(wdev, reason);
988 } else if (!rdev->ops->disconnect) { 988 else if (!rdev->ops->disconnect)
989 cfg80211_mlme_down(rdev, dev); 989 cfg80211_mlme_down(rdev, dev);
990 err = 0; 990 else if (wdev->current_bss)
991 } else {
992 err = rdev_disconnect(rdev, dev, reason); 991 err = rdev_disconnect(rdev, dev, reason);
993 }
994 992
995 return err; 993 return err;
996} 994}
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index a23253e06358..9ee6bc1a7610 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -30,7 +30,8 @@ static ssize_t name ## _show(struct device *dev, \
30 char *buf) \ 30 char *buf) \
31{ \ 31{ \
32 return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \ 32 return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \
33} 33} \
34static DEVICE_ATTR_RO(name)
34 35
35SHOW_FMT(index, "%d", wiphy_idx); 36SHOW_FMT(index, "%d", wiphy_idx);
36SHOW_FMT(macaddress, "%pM", wiphy.perm_addr); 37SHOW_FMT(macaddress, "%pM", wiphy.perm_addr);
@@ -42,7 +43,7 @@ static ssize_t name_show(struct device *dev,
42 struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy; 43 struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
43 return sprintf(buf, "%s\n", dev_name(&wiphy->dev)); 44 return sprintf(buf, "%s\n", dev_name(&wiphy->dev));
44} 45}
45 46static DEVICE_ATTR_RO(name);
46 47
47static ssize_t addresses_show(struct device *dev, 48static ssize_t addresses_show(struct device *dev,
48 struct device_attribute *attr, 49 struct device_attribute *attr,
@@ -60,15 +61,17 @@ static ssize_t addresses_show(struct device *dev,
60 61
61 return buf - start; 62 return buf - start;
62} 63}
63 64static DEVICE_ATTR_RO(addresses);
64static struct device_attribute ieee80211_dev_attrs[] = { 65
65 __ATTR_RO(index), 66static struct attribute *ieee80211_attrs[] = {
66 __ATTR_RO(macaddress), 67 &dev_attr_index.attr,
67 __ATTR_RO(address_mask), 68 &dev_attr_macaddress.attr,
68 __ATTR_RO(addresses), 69 &dev_attr_address_mask.attr,
69 __ATTR_RO(name), 70 &dev_attr_addresses.attr,
70 {} 71 &dev_attr_name.attr,
72 NULL,
71}; 73};
74ATTRIBUTE_GROUPS(ieee80211);
72 75
73static void wiphy_dev_release(struct device *dev) 76static void wiphy_dev_release(struct device *dev)
74{ 77{
@@ -146,7 +149,7 @@ struct class ieee80211_class = {
146 .name = "ieee80211", 149 .name = "ieee80211",
147 .owner = THIS_MODULE, 150 .owner = THIS_MODULE,
148 .dev_release = wiphy_dev_release, 151 .dev_release = wiphy_dev_release,
149 .dev_attrs = ieee80211_dev_attrs, 152 .dev_groups = ieee80211_groups,
150 .dev_uevent = wiphy_uevent, 153 .dev_uevent = wiphy_uevent,
151#ifdef CONFIG_PM 154#ifdef CONFIG_PM
152 .suspend = wiphy_suspend, 155 .suspend = wiphy_suspend,
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index eb4a84288648..3bb2cdc13b46 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
214 return inner_mode->afinfo->extract_output(x, skb); 214 return inner_mode->afinfo->extract_output(x, skb);
215} 215}
216 216
217void xfrm_local_error(struct sk_buff *skb, int mtu)
218{
219 unsigned int proto;
220 struct xfrm_state_afinfo *afinfo;
221
222 if (skb->protocol == htons(ETH_P_IP))
223 proto = AF_INET;
224 else if (skb->protocol == htons(ETH_P_IPV6))
225 proto = AF_INET6;
226 else
227 return;
228
229 afinfo = xfrm_state_get_afinfo(proto);
230 if (!afinfo)
231 return;
232
233 afinfo->local_error(skb, mtu);
234 xfrm_state_put_afinfo(afinfo);
235}
236
217EXPORT_SYMBOL_GPL(xfrm_output); 237EXPORT_SYMBOL_GPL(xfrm_output);
218EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); 238EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
239EXPORT_SYMBOL_GPL(xfrm_local_error);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index e52cab3591dd..f77c371ea72b 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list)
320{ 320{
321 struct sk_buff *skb; 321 struct sk_buff *skb;
322 322
323 while ((skb = skb_dequeue(list)) != NULL) { 323 while ((skb = skb_dequeue(list)) != NULL)
324 dev_put(skb->dev);
325 kfree_skb(skb); 324 kfree_skb(skb);
326 }
327} 325}
328 326
329/* Rule must be locked. Release descentant resources, announce 327/* Rule must be locked. Release descentant resources, announce
@@ -1758,7 +1756,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
1758 struct sk_buff *skb; 1756 struct sk_buff *skb;
1759 struct sock *sk; 1757 struct sock *sk;
1760 struct dst_entry *dst; 1758 struct dst_entry *dst;
1761 struct net_device *dev;
1762 struct xfrm_policy *pol = (struct xfrm_policy *)arg; 1759 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1763 struct xfrm_policy_queue *pq = &pol->polq; 1760 struct xfrm_policy_queue *pq = &pol->polq;
1764 struct flowi fl; 1761 struct flowi fl;
@@ -1805,7 +1802,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
1805 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, 1802 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1806 &fl, skb->sk, 0); 1803 &fl, skb->sk, 0);
1807 if (IS_ERR(dst)) { 1804 if (IS_ERR(dst)) {
1808 dev_put(skb->dev);
1809 kfree_skb(skb); 1805 kfree_skb(skb);
1810 continue; 1806 continue;
1811 } 1807 }
@@ -1814,9 +1810,7 @@ static void xfrm_policy_queue_process(unsigned long arg)
1814 skb_dst_drop(skb); 1810 skb_dst_drop(skb);
1815 skb_dst_set(skb, dst); 1811 skb_dst_set(skb, dst);
1816 1812
1817 dev = skb->dev;
1818 err = dst_output(skb); 1813 err = dst_output(skb);
1819 dev_put(dev);
1820 } 1814 }
1821 1815
1822 return; 1816 return;
@@ -1839,7 +1833,6 @@ static int xdst_queue_output(struct sk_buff *skb)
1839 } 1833 }
1840 1834
1841 skb_dst_force(skb); 1835 skb_dst_force(skb);
1842 dev_hold(skb->dev);
1843 1836
1844 spin_lock_bh(&pq->hold_queue.lock); 1837 spin_lock_bh(&pq->hold_queue.lock);
1845 1838
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 78f66fa92449..54c0acd29468 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
39 39
40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
41 41
42static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
43static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
44
45static inline unsigned int xfrm_dst_hash(struct net *net, 42static inline unsigned int xfrm_dst_hash(struct net *net,
46 const xfrm_address_t *daddr, 43 const xfrm_address_t *daddr,
47 const xfrm_address_t *saddr, 44 const xfrm_address_t *saddr,
@@ -1860,7 +1857,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1860} 1857}
1861EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1858EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1862 1859
1863static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 1860struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1864{ 1861{
1865 struct xfrm_state_afinfo *afinfo; 1862 struct xfrm_state_afinfo *afinfo;
1866 if (unlikely(family >= NPROTO)) 1863 if (unlikely(family >= NPROTO))
@@ -1872,7 +1869,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1872 return afinfo; 1869 return afinfo;
1873} 1870}
1874 1871
1875static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1872void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1876{ 1873{
1877 rcu_read_unlock(); 1874 rcu_read_unlock();
1878} 1875}