aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-29 23:33:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-29 23:33:20 -0400
commit2a26d99b251b8625d27aed14e97fc10707a3a81f (patch)
tree69eb8aa0476294236ceb8a864be9a697e2303ace /net
parenta909d3e636995ba7c349e2ca5dbb528154d4ac30 (diff)
parentfceb9c3e38252992bbf1a3028cc2f7b871211533 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Lots of fixes, mostly drivers as is usually the case. 1) Don't treat zero DMA address as invalid in vmxnet3, from Alexey Khoroshilov. 2) Fix element timeouts in netfilter's nft_dynset, from Anders K. Pedersen. 3) Don't put aead_req crypto struct on the stack in mac80211, from Ard Biesheuvel. 4) Several uninitialized variable warning fixes from Arnd Bergmann. 5) Fix memory leak in cxgb4, from Colin Ian King. 6) Fix bpf handling of VLAN header push/pop, from Daniel Borkmann. 7) Several VRF semantic fixes from David Ahern. 8) Set skb->protocol properly in ip6_tnl_xmit(), from Eli Cooper. 9) Socket needs to be locked in udp_disconnect(), from Eric Dumazet. 10) Div-by-zero on 32-bit fix in mlx4 driver, from Eugenia Emantayev. 11) Fix stale link state during failover in NCSCI driver, from Gavin Shan. 12) Fix netdev lower adjacency list traversal, from Ido Schimmel. 13) Propvide proper handle when emitting notifications of filter deletes, from Jamal Hadi Salim. 14) Memory leaks and big-endian issues in rtl8xxxu, from Jes Sorensen. 15) Fix DESYNC_FACTOR handling in ipv6, from Jiri Bohac. 16) Several routing offload fixes in mlxsw driver, from Jiri Pirko. 17) Fix broadcast sync problem in TIPC, from Jon Paul Maloy. 18) Validate chunk len before using it in SCTP, from Marcelo Ricardo Leitner. 19) Revert a netns locking change that causes regressions, from Paul Moore. 20) Add recursion limit to GRO handling, from Sabrina Dubroca. 21) GFP_KERNEL in irq context fix in ibmvnic, from Thomas Falcon. 22) Avoid accessing stale vxlan/geneve socket in data path, from Pravin Shelar" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (189 commits) geneve: avoid using stale geneve socket. vxlan: avoid using stale vxlan socket. qede: Fix out-of-bound fastpath memory access net: phy: dp83848: add dp83822 PHY support enic: fix rq disable tipc: fix broadcast link synchronization problem ibmvnic: Fix missing brackets in init_sub_crq_irqs ibmvnic: Fix releasing of sub-CRQ IRQs in interrupt context Revert "ibmvnic: Fix releasing of sub-CRQ IRQs in interrupt context" arch/powerpc: Update parameters for csum_tcpudp_magic & csum_tcpudp_nofold net/mlx4_en: Save slave ethtool stats command net/mlx4_en: Fix potential deadlock in port statistics flow net/mlx4: Fix firmware command timeout during interrupt test net/mlx4_core: Do not access comm channel if it has not yet been initialized net/mlx4_en: Fix panic during reboot net/mlx4_en: Process all completions in RX rings after port goes up net/mlx4_en: Resolve dividing by zero in 32-bit system net/mlx4_core: Change the default value of enable_qos net/mlx4_core: Avoid setting ports to auto when only one port type is supported net/mlx4_core: Fix the resource-type enum in res tracker to conform to FW spec ...
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/batman-adv/hard-interface.c1
-rw-r--r--net/batman-adv/log.h2
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/bluetooth/hci_request.c49
-rw-r--r--net/bluetooth/hci_request.h2
-rw-r--r--net/bluetooth/mgmt.c26
-rw-r--r--net/bridge/br_multicast.c23
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/flow_dissector.c12
-rw-r--r--net/core/net_namespace.c35
-rw-r--r--net/core/pktgen.c17
-rw-r--r--net/core/sock_reuseport.c1
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/hsr/hsr_forward.c4
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c11
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c8
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/addrconf.c101
-rw-r--r--net/ipv6/inet6_hashtables.c13
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c17
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c3
-rw-r--r--net/ipv6/route.c74
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/mac80211/aes_ccm.c46
-rw-r--r--net/mac80211/aes_ccm.h8
-rw-r--r--net/mac80211/aes_gcm.c43
-rw-r--r--net/mac80211/aes_gcm.h6
-rw-r--r--net/mac80211/aes_gmac.c26
-rw-r--r--net/mac80211/aes_gmac.h4
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rx.c51
-rw-r--r--net/mac80211/wpa.c22
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-aen.c18
-rw-r--r--net/ncsi/ncsi-manage.c126
-rw-r--r--net/netfilter/core.c13
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_queue.c48
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/netfilter/nft_dynset.c6
-rw-r--r--net/netfilter/nft_exthdr.c3
-rw-r--r--net/netfilter/nft_hash.c1
-rw-r--r--net/netfilter/nft_range.c26
-rw-r--r--net/netfilter/x_tables.c2
-rw-r--r--net/netfilter/xt_NFLOG.c1
-rw-r--r--net/netfilter/xt_hashlimit.c4
-rw-r--r--net/netfilter/xt_ipcomp.c2
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/rds/Makefile2
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/peer_object.c4
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_mirred.c5
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/switchdev/switchdev.c9
-rw-r--r--net/tipc/bcast.c14
-rw-r--r--net/tipc/bcast.h3
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/msg.h17
-rw-r--r--net/tipc/name_distr.c1
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/wireless/sysfs.c5
-rw-r--r--net/wireless/util.c34
85 files changed, 675 insertions, 412 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8de138d3306b..f2531ad66b68 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
664 664
665 skb_gro_pull(skb, sizeof(*vhdr)); 665 skb_gro_pull(skb, sizeof(*vhdr));
666 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); 666 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
667 pp = ptype->callbacks.gro_receive(head, skb); 667 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
668 668
669out_unlock: 669out_unlock:
670 rcu_read_unlock(); 670 rcu_read_unlock();
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 08ce36147c4c..e034afbd1bb0 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -652,7 +652,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
652 batadv_softif_destroy_sysfs(hard_iface->soft_iface); 652 batadv_softif_destroy_sysfs(hard_iface->soft_iface);
653 } 653 }
654 654
655 hard_iface->soft_iface = NULL;
656 batadv_hardif_put(hard_iface); 655 batadv_hardif_put(hard_iface);
657 656
658out: 657out:
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index e0e1a88c3e58..d2905a855d1b 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -63,7 +63,7 @@ enum batadv_dbg_level {
63 BATADV_DBG_NC = BIT(5), 63 BATADV_DBG_NC = BIT(5),
64 BATADV_DBG_MCAST = BIT(6), 64 BATADV_DBG_MCAST = BIT(6),
65 BATADV_DBG_TP_METER = BIT(7), 65 BATADV_DBG_TP_METER = BIT(7),
66 BATADV_DBG_ALL = 127, 66 BATADV_DBG_ALL = 255,
67}; 67};
68 68
69#ifdef CONFIG_BATMAN_ADV_DEBUG 69#ifdef CONFIG_BATMAN_ADV_DEBUG
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 5f3bfc41aeb1..7c8d16086f0f 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -544,7 +544,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
544 if (bat_priv->algo_ops->neigh.hardif_init) 544 if (bat_priv->algo_ops->neigh.hardif_init)
545 bat_priv->algo_ops->neigh.hardif_init(hardif_neigh); 545 bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
546 546
547 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list); 547 hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
548 548
549out: 549out:
550 spin_unlock_bh(&hard_iface->neigh_list_lock); 550 spin_unlock_bh(&hard_iface->neigh_list_lock);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e2288421fe6b..1015d9c8d97d 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -969,41 +969,38 @@ void __hci_req_enable_advertising(struct hci_request *req)
969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); 969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
970} 970}
971 971
972static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) 972u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
973{ 973{
974 size_t complete_len;
975 size_t short_len; 974 size_t short_len;
976 int max_len; 975 size_t complete_len;
977
978 max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
979 complete_len = strlen(hdev->dev_name);
980 short_len = strlen(hdev->short_name);
981
982 /* no space left for name */
983 if (max_len < 1)
984 return ad_len;
985 976
986 /* no name set */ 977 /* no space left for name (+ NULL + type + len) */
987 if (!complete_len) 978 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
988 return ad_len; 979 return ad_len;
989 980
990 /* complete name fits and is eq to max short name len or smaller */ 981 /* use complete name if present and fits */
991 if (complete_len <= max_len && 982 complete_len = strlen(hdev->dev_name);
992 complete_len <= HCI_MAX_SHORT_NAME_LENGTH) { 983 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
993 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, 984 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
994 hdev->dev_name, complete_len); 985 hdev->dev_name, complete_len + 1);
995 }
996 986
997 /* short name set and fits */ 987 /* use short name if present */
998 if (short_len && short_len <= max_len) { 988 short_len = strlen(hdev->short_name);
989 if (short_len)
999 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, 990 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1000 hdev->short_name, short_len); 991 hdev->short_name, short_len + 1);
1001 }
1002 992
1003 /* no short name set so shorten complete name */ 993 /* use shortened full name if present, we already know that name
1004 if (!short_len) { 994 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1005 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, 995 */
1006 hdev->dev_name, max_len); 996 if (complete_len) {
997 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
998
999 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1000 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1001
1002 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1003 sizeof(name));
1007 } 1004 }
1008 1005
1009 return ad_len; 1006 return ad_len;
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 6b06629245a8..dde77bd59f91 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -106,6 +106,8 @@ static inline void hci_update_background_scan(struct hci_dev *hdev)
106void hci_request_setup(struct hci_dev *hdev); 106void hci_request_setup(struct hci_dev *hdev);
107void hci_request_cancel_all(struct hci_dev *hdev); 107void hci_request_cancel_all(struct hci_dev *hdev);
108 108
109u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
110
109static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, 111static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
110 u8 *data, u8 data_len) 112 u8 *data, u8 data_len)
111{ 113{
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 736038085feb..1fba2a03f8ae 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -6017,7 +6017,15 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6017 return err; 6017 return err;
6018} 6018}
6019 6019
6020static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data) 6020static u8 calculate_name_len(struct hci_dev *hdev)
6021{
6022 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6023
6024 return append_local_name(hdev, buf, 0);
6025}
6026
6027static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6028 bool is_adv_data)
6021{ 6029{
6022 u8 max_len = HCI_MAX_AD_LENGTH; 6030 u8 max_len = HCI_MAX_AD_LENGTH;
6023 6031
@@ -6030,9 +6038,8 @@ static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
6030 if (adv_flags & MGMT_ADV_FLAG_TX_POWER) 6038 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6031 max_len -= 3; 6039 max_len -= 3;
6032 } else { 6040 } else {
6033 /* at least 1 byte of name should fit in */
6034 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME) 6041 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6035 max_len -= 3; 6042 max_len -= calculate_name_len(hdev);
6036 6043
6037 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE)) 6044 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6038 max_len -= 4; 6045 max_len -= 4;
@@ -6063,12 +6070,13 @@ static bool appearance_managed(u32 adv_flags)
6063 return adv_flags & MGMT_ADV_FLAG_APPEARANCE; 6070 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6064} 6071}
6065 6072
6066static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data) 6073static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6074 u8 len, bool is_adv_data)
6067{ 6075{
6068 int i, cur_len; 6076 int i, cur_len;
6069 u8 max_len; 6077 u8 max_len;
6070 6078
6071 max_len = tlv_data_max_len(adv_flags, is_adv_data); 6079 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6072 6080
6073 if (len > max_len) 6081 if (len > max_len)
6074 return false; 6082 return false;
@@ -6215,8 +6223,8 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6215 goto unlock; 6223 goto unlock;
6216 } 6224 }
6217 6225
6218 if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) || 6226 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6219 !tlv_data_is_valid(flags, cp->data + cp->adv_data_len, 6227 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6220 cp->scan_rsp_len, false)) { 6228 cp->scan_rsp_len, false)) {
6221 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, 6229 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6222 MGMT_STATUS_INVALID_PARAMS); 6230 MGMT_STATUS_INVALID_PARAMS);
@@ -6429,8 +6437,8 @@ static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6429 6437
6430 rp.instance = cp->instance; 6438 rp.instance = cp->instance;
6431 rp.flags = cp->flags; 6439 rp.flags = cp->flags;
6432 rp.max_adv_data_len = tlv_data_max_len(flags, true); 6440 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6433 rp.max_scan_rsp_len = tlv_data_max_len(flags, false); 6441 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6434 6442
6435 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, 6443 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6436 MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); 6444 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index c5fea9393946..2136e45f5277 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -972,13 +972,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
972 mod_timer(&query->timer, jiffies); 972 mod_timer(&query->timer, jiffies);
973} 973}
974 974
975void br_multicast_enable_port(struct net_bridge_port *port) 975static void __br_multicast_enable_port(struct net_bridge_port *port)
976{ 976{
977 struct net_bridge *br = port->br; 977 struct net_bridge *br = port->br;
978 978
979 spin_lock(&br->multicast_lock);
980 if (br->multicast_disabled || !netif_running(br->dev)) 979 if (br->multicast_disabled || !netif_running(br->dev))
981 goto out; 980 return;
982 981
983 br_multicast_enable(&port->ip4_own_query); 982 br_multicast_enable(&port->ip4_own_query);
984#if IS_ENABLED(CONFIG_IPV6) 983#if IS_ENABLED(CONFIG_IPV6)
@@ -987,8 +986,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
987 if (port->multicast_router == MDB_RTR_TYPE_PERM && 986 if (port->multicast_router == MDB_RTR_TYPE_PERM &&
988 hlist_unhashed(&port->rlist)) 987 hlist_unhashed(&port->rlist))
989 br_multicast_add_router(br, port); 988 br_multicast_add_router(br, port);
989}
990 990
991out: 991void br_multicast_enable_port(struct net_bridge_port *port)
992{
993 struct net_bridge *br = port->br;
994
995 spin_lock(&br->multicast_lock);
996 __br_multicast_enable_port(port);
992 spin_unlock(&br->multicast_lock); 997 spin_unlock(&br->multicast_lock);
993} 998}
994 999
@@ -1994,8 +1999,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
1994 1999
1995int br_multicast_toggle(struct net_bridge *br, unsigned long val) 2000int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1996{ 2001{
1997 int err = 0;
1998 struct net_bridge_mdb_htable *mdb; 2002 struct net_bridge_mdb_htable *mdb;
2003 struct net_bridge_port *port;
2004 int err = 0;
1999 2005
2000 spin_lock_bh(&br->multicast_lock); 2006 spin_lock_bh(&br->multicast_lock);
2001 if (br->multicast_disabled == !val) 2007 if (br->multicast_disabled == !val)
@@ -2023,10 +2029,9 @@ rollback:
2023 goto rollback; 2029 goto rollback;
2024 } 2030 }
2025 2031
2026 br_multicast_start_querier(br, &br->ip4_own_query); 2032 br_multicast_open(br);
2027#if IS_ENABLED(CONFIG_IPV6) 2033 list_for_each_entry(port, &br->port_list, list)
2028 br_multicast_start_querier(br, &br->ip6_own_query); 2034 __br_multicast_enable_port(port);
2029#endif
2030 2035
2031unlock: 2036unlock:
2032 spin_unlock_bh(&br->multicast_lock); 2037 spin_unlock_bh(&br->multicast_lock);
diff --git a/net/core/dev.c b/net/core/dev.c
index 4bc19a164ba5..820bac239738 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3035,6 +3035,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
3035 } 3035 }
3036 return head; 3036 return head;
3037} 3037}
3038EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3038 3039
3039static void qdisc_pkt_len_init(struct sk_buff *skb) 3040static void qdisc_pkt_len_init(struct sk_buff *skb)
3040{ 3041{
@@ -4511,6 +4512,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
4511 NAPI_GRO_CB(skb)->flush = 0; 4512 NAPI_GRO_CB(skb)->flush = 0;
4512 NAPI_GRO_CB(skb)->free = 0; 4513 NAPI_GRO_CB(skb)->free = 0;
4513 NAPI_GRO_CB(skb)->encap_mark = 0; 4514 NAPI_GRO_CB(skb)->encap_mark = 0;
4515 NAPI_GRO_CB(skb)->recursion_counter = 0;
4514 NAPI_GRO_CB(skb)->is_fou = 0; 4516 NAPI_GRO_CB(skb)->is_fou = 0;
4515 NAPI_GRO_CB(skb)->is_atomic = 1; 4517 NAPI_GRO_CB(skb)->is_atomic = 1;
4516 NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 4518 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
@@ -5511,10 +5513,14 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
5511{ 5513{
5512 struct netdev_adjacent *lower; 5514 struct netdev_adjacent *lower;
5513 5515
5514 lower = list_first_or_null_rcu(&dev->all_adj_list.lower, 5516 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5515 struct netdev_adjacent, list); 5517
5518 if (&lower->list == &dev->all_adj_list.lower)
5519 return NULL;
5520
5521 *iter = &lower->list;
5516 5522
5517 return lower ? lower->dev : NULL; 5523 return lower->dev;
5518} 5524}
5519EXPORT_SYMBOL(netdev_all_lower_get_next_rcu); 5525EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
5520 5526
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 1a7b80f73376..ab193e5def07 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -246,15 +246,13 @@ ipv6:
246 case htons(ETH_P_8021AD): 246 case htons(ETH_P_8021AD):
247 case htons(ETH_P_8021Q): { 247 case htons(ETH_P_8021Q): {
248 const struct vlan_hdr *vlan; 248 const struct vlan_hdr *vlan;
249 struct vlan_hdr _vlan;
250 bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
249 251
250 if (skb_vlan_tag_present(skb)) 252 if (vlan_tag_present)
251 proto = skb->protocol; 253 proto = skb->protocol;
252 254
253 if (!skb_vlan_tag_present(skb) || 255 if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
254 proto == cpu_to_be16(ETH_P_8021Q) ||
255 proto == cpu_to_be16(ETH_P_8021AD)) {
256 struct vlan_hdr _vlan;
257
258 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), 256 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
259 data, hlen, &_vlan); 257 data, hlen, &_vlan);
260 if (!vlan) 258 if (!vlan)
@@ -272,7 +270,7 @@ ipv6:
272 FLOW_DISSECTOR_KEY_VLAN, 270 FLOW_DISSECTOR_KEY_VLAN,
273 target_container); 271 target_container);
274 272
275 if (skb_vlan_tag_present(skb)) { 273 if (vlan_tag_present) {
276 key_vlan->vlan_id = skb_vlan_tag_get_id(skb); 274 key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
277 key_vlan->vlan_priority = 275 key_vlan->vlan_priority =
278 (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT); 276 (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 989434f36f96..f61c0e02a413 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -215,13 +215,14 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
215 */ 215 */
216int peernet2id_alloc(struct net *net, struct net *peer) 216int peernet2id_alloc(struct net *net, struct net *peer)
217{ 217{
218 unsigned long flags;
218 bool alloc; 219 bool alloc;
219 int id; 220 int id;
220 221
221 spin_lock_bh(&net->nsid_lock); 222 spin_lock_irqsave(&net->nsid_lock, flags);
222 alloc = atomic_read(&peer->count) == 0 ? false : true; 223 alloc = atomic_read(&peer->count) == 0 ? false : true;
223 id = __peernet2id_alloc(net, peer, &alloc); 224 id = __peernet2id_alloc(net, peer, &alloc);
224 spin_unlock_bh(&net->nsid_lock); 225 spin_unlock_irqrestore(&net->nsid_lock, flags);
225 if (alloc && id >= 0) 226 if (alloc && id >= 0)
226 rtnl_net_notifyid(net, RTM_NEWNSID, id); 227 rtnl_net_notifyid(net, RTM_NEWNSID, id);
227 return id; 228 return id;
@@ -230,11 +231,12 @@ int peernet2id_alloc(struct net *net, struct net *peer)
230/* This function returns, if assigned, the id of a peer netns. */ 231/* This function returns, if assigned, the id of a peer netns. */
231int peernet2id(struct net *net, struct net *peer) 232int peernet2id(struct net *net, struct net *peer)
232{ 233{
234 unsigned long flags;
233 int id; 235 int id;
234 236
235 spin_lock_bh(&net->nsid_lock); 237 spin_lock_irqsave(&net->nsid_lock, flags);
236 id = __peernet2id(net, peer); 238 id = __peernet2id(net, peer);
237 spin_unlock_bh(&net->nsid_lock); 239 spin_unlock_irqrestore(&net->nsid_lock, flags);
238 return id; 240 return id;
239} 241}
240EXPORT_SYMBOL(peernet2id); 242EXPORT_SYMBOL(peernet2id);
@@ -249,17 +251,18 @@ bool peernet_has_id(struct net *net, struct net *peer)
249 251
250struct net *get_net_ns_by_id(struct net *net, int id) 252struct net *get_net_ns_by_id(struct net *net, int id)
251{ 253{
254 unsigned long flags;
252 struct net *peer; 255 struct net *peer;
253 256
254 if (id < 0) 257 if (id < 0)
255 return NULL; 258 return NULL;
256 259
257 rcu_read_lock(); 260 rcu_read_lock();
258 spin_lock_bh(&net->nsid_lock); 261 spin_lock_irqsave(&net->nsid_lock, flags);
259 peer = idr_find(&net->netns_ids, id); 262 peer = idr_find(&net->netns_ids, id);
260 if (peer) 263 if (peer)
261 get_net(peer); 264 get_net(peer);
262 spin_unlock_bh(&net->nsid_lock); 265 spin_unlock_irqrestore(&net->nsid_lock, flags);
263 rcu_read_unlock(); 266 rcu_read_unlock();
264 267
265 return peer; 268 return peer;
@@ -422,17 +425,17 @@ static void cleanup_net(struct work_struct *work)
422 for_each_net(tmp) { 425 for_each_net(tmp) {
423 int id; 426 int id;
424 427
425 spin_lock_bh(&tmp->nsid_lock); 428 spin_lock_irq(&tmp->nsid_lock);
426 id = __peernet2id(tmp, net); 429 id = __peernet2id(tmp, net);
427 if (id >= 0) 430 if (id >= 0)
428 idr_remove(&tmp->netns_ids, id); 431 idr_remove(&tmp->netns_ids, id);
429 spin_unlock_bh(&tmp->nsid_lock); 432 spin_unlock_irq(&tmp->nsid_lock);
430 if (id >= 0) 433 if (id >= 0)
431 rtnl_net_notifyid(tmp, RTM_DELNSID, id); 434 rtnl_net_notifyid(tmp, RTM_DELNSID, id);
432 } 435 }
433 spin_lock_bh(&net->nsid_lock); 436 spin_lock_irq(&net->nsid_lock);
434 idr_destroy(&net->netns_ids); 437 idr_destroy(&net->netns_ids);
435 spin_unlock_bh(&net->nsid_lock); 438 spin_unlock_irq(&net->nsid_lock);
436 439
437 } 440 }
438 rtnl_unlock(); 441 rtnl_unlock();
@@ -561,6 +564,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
561{ 564{
562 struct net *net = sock_net(skb->sk); 565 struct net *net = sock_net(skb->sk);
563 struct nlattr *tb[NETNSA_MAX + 1]; 566 struct nlattr *tb[NETNSA_MAX + 1];
567 unsigned long flags;
564 struct net *peer; 568 struct net *peer;
565 int nsid, err; 569 int nsid, err;
566 570
@@ -581,15 +585,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
581 if (IS_ERR(peer)) 585 if (IS_ERR(peer))
582 return PTR_ERR(peer); 586 return PTR_ERR(peer);
583 587
584 spin_lock_bh(&net->nsid_lock); 588 spin_lock_irqsave(&net->nsid_lock, flags);
585 if (__peernet2id(net, peer) >= 0) { 589 if (__peernet2id(net, peer) >= 0) {
586 spin_unlock_bh(&net->nsid_lock); 590 spin_unlock_irqrestore(&net->nsid_lock, flags);
587 err = -EEXIST; 591 err = -EEXIST;
588 goto out; 592 goto out;
589 } 593 }
590 594
591 err = alloc_netid(net, peer, nsid); 595 err = alloc_netid(net, peer, nsid);
592 spin_unlock_bh(&net->nsid_lock); 596 spin_unlock_irqrestore(&net->nsid_lock, flags);
593 if (err >= 0) { 597 if (err >= 0) {
594 rtnl_net_notifyid(net, RTM_NEWNSID, err); 598 rtnl_net_notifyid(net, RTM_NEWNSID, err);
595 err = 0; 599 err = 0;
@@ -711,10 +715,11 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
711 .idx = 0, 715 .idx = 0,
712 .s_idx = cb->args[0], 716 .s_idx = cb->args[0],
713 }; 717 };
718 unsigned long flags;
714 719
715 spin_lock_bh(&net->nsid_lock); 720 spin_lock_irqsave(&net->nsid_lock, flags);
716 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); 721 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
717 spin_unlock_bh(&net->nsid_lock); 722 spin_unlock_irqrestore(&net->nsid_lock, flags);
718 723
719 cb->args[0] = net_cb.idx; 724 cb->args[0] = net_cb.idx;
720 return skb->len; 725 return skb->len;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 5219a9e2127a..306b8f0e03c1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -216,8 +216,8 @@
216#define M_QUEUE_XMIT 2 /* Inject packet into qdisc */ 216#define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
217 217
218/* If lock -- protects updating of if_list */ 218/* If lock -- protects updating of if_list */
219#define if_lock(t) spin_lock(&(t->if_lock)); 219#define if_lock(t) mutex_lock(&(t->if_lock));
220#define if_unlock(t) spin_unlock(&(t->if_lock)); 220#define if_unlock(t) mutex_unlock(&(t->if_lock));
221 221
222/* Used to help with determining the pkts on receive */ 222/* Used to help with determining the pkts on receive */
223#define PKTGEN_MAGIC 0xbe9be955 223#define PKTGEN_MAGIC 0xbe9be955
@@ -423,7 +423,7 @@ struct pktgen_net {
423}; 423};
424 424
425struct pktgen_thread { 425struct pktgen_thread {
426 spinlock_t if_lock; /* for list of devices */ 426 struct mutex if_lock; /* for list of devices */
427 struct list_head if_list; /* All device here */ 427 struct list_head if_list; /* All device here */
428 struct list_head th_list; 428 struct list_head th_list;
429 struct task_struct *tsk; 429 struct task_struct *tsk;
@@ -2010,11 +2010,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
2010{ 2010{
2011 struct pktgen_thread *t; 2011 struct pktgen_thread *t;
2012 2012
2013 mutex_lock(&pktgen_thread_lock);
2014
2013 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 2015 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
2014 struct pktgen_dev *pkt_dev; 2016 struct pktgen_dev *pkt_dev;
2015 2017
2016 rcu_read_lock(); 2018 if_lock(t);
2017 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { 2019 list_for_each_entry(pkt_dev, &t->if_list, list) {
2018 if (pkt_dev->odev != dev) 2020 if (pkt_dev->odev != dev)
2019 continue; 2021 continue;
2020 2022
@@ -2029,8 +2031,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
2029 dev->name); 2031 dev->name);
2030 break; 2032 break;
2031 } 2033 }
2032 rcu_read_unlock(); 2034 if_unlock(t);
2033 } 2035 }
2036 mutex_unlock(&pktgen_thread_lock);
2034} 2037}
2035 2038
2036static int pktgen_device_event(struct notifier_block *unused, 2039static int pktgen_device_event(struct notifier_block *unused,
@@ -3762,7 +3765,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
3762 return -ENOMEM; 3765 return -ENOMEM;
3763 } 3766 }
3764 3767
3765 spin_lock_init(&t->if_lock); 3768 mutex_init(&t->if_lock);
3766 t->cpu = cpu; 3769 t->cpu = cpu;
3767 3770
3768 INIT_LIST_HEAD(&t->if_list); 3771 INIT_LIST_HEAD(&t->if_list);
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index e92b759d906c..9a1a352fd1eb 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -129,7 +129,6 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
129 129
130 return 0; 130 return 0;
131} 131}
132EXPORT_SYMBOL(reuseport_add_sock);
133 132
134static void reuseport_free_rcu(struct rcu_head *head) 133static void reuseport_free_rcu(struct rcu_head *head)
135{ 134{
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 66dff5e3d772..02acfff36028 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -439,7 +439,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
439 439
440 skb_gro_pull(skb, sizeof(*eh)); 440 skb_gro_pull(skb, sizeof(*eh));
441 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); 441 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
442 pp = ptype->callbacks.gro_receive(head, skb); 442 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
443 443
444out_unlock: 444out_unlock:
445 rcu_read_unlock(); 445 rcu_read_unlock();
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 5ee1d43f1310..4ebe2aa3e7d3 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -300,10 +300,6 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
300static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, 300static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
301 struct hsr_frame_info *frame) 301 struct hsr_frame_info *frame)
302{ 302{
303 struct net_device *master_dev;
304
305 master_dev = hsr_port_get_hsr(hsr, HSR_PT_MASTER)->dev;
306
307 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) { 303 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
308 frame->is_local_exclusive = true; 304 frame->is_local_exclusive = true;
309 skb->pkt_type = PACKET_HOST; 305 skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1effc986739e..9648c97e541f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1391,7 +1391,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
1391 skb_gro_pull(skb, sizeof(*iph)); 1391 skb_gro_pull(skb, sizeof(*iph));
1392 skb_set_transport_header(skb, skb_gro_offset(skb)); 1392 skb_set_transport_header(skb, skb_gro_offset(skb));
1393 1393
1394 pp = ops->callbacks.gro_receive(head, skb); 1394 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1395 1395
1396out_unlock: 1396out_unlock:
1397 rcu_read_unlock(); 1397 rcu_read_unlock();
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index cf50f7e2b012..030d1531e897 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk,
249 if (!ops || !ops->callbacks.gro_receive) 249 if (!ops || !ops->callbacks.gro_receive)
250 goto out_unlock; 250 goto out_unlock;
251 251
252 pp = ops->callbacks.gro_receive(head, skb); 252 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
253 253
254out_unlock: 254out_unlock:
255 rcu_read_unlock(); 255 rcu_read_unlock();
@@ -441,7 +441,7 @@ next_proto:
441 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) 441 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
442 goto out_unlock; 442 goto out_unlock;
443 443
444 pp = ops->callbacks.gro_receive(head, skb); 444 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
445 flush = 0; 445 flush = 0;
446 446
447out_unlock: 447out_unlock:
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 96e0efecefa6..d5cac99170b1 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -229,7 +229,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
229 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ 229 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
230 skb_gro_postpull_rcsum(skb, greh, grehlen); 230 skb_gro_postpull_rcsum(skb, greh, grehlen);
231 231
232 pp = ptype->callbacks.gro_receive(head, skb); 232 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
233 flush = 0; 233 flush = 0;
234 234
235out_unlock: 235out_unlock:
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 77c20a489218..ca97835bfec4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -25,6 +25,7 @@
25#include <net/inet_hashtables.h> 25#include <net/inet_hashtables.h>
26#include <net/secure_seq.h> 26#include <net/secure_seq.h>
27#include <net/ip.h> 27#include <net/ip.h>
28#include <net/tcp.h>
28#include <net/sock_reuseport.h> 29#include <net/sock_reuseport.h>
29 30
30static u32 inet_ehashfn(const struct net *net, const __be32 laddr, 31static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
@@ -172,7 +173,7 @@ EXPORT_SYMBOL_GPL(__inet_inherit_port);
172 173
173static inline int compute_score(struct sock *sk, struct net *net, 174static inline int compute_score(struct sock *sk, struct net *net,
174 const unsigned short hnum, const __be32 daddr, 175 const unsigned short hnum, const __be32 daddr,
175 const int dif) 176 const int dif, bool exact_dif)
176{ 177{
177 int score = -1; 178 int score = -1;
178 struct inet_sock *inet = inet_sk(sk); 179 struct inet_sock *inet = inet_sk(sk);
@@ -186,7 +187,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
186 return -1; 187 return -1;
187 score += 4; 188 score += 4;
188 } 189 }
189 if (sk->sk_bound_dev_if) { 190 if (sk->sk_bound_dev_if || exact_dif) {
190 if (sk->sk_bound_dev_if != dif) 191 if (sk->sk_bound_dev_if != dif)
191 return -1; 192 return -1;
192 score += 4; 193 score += 4;
@@ -215,11 +216,12 @@ struct sock *__inet_lookup_listener(struct net *net,
215 unsigned int hash = inet_lhashfn(net, hnum); 216 unsigned int hash = inet_lhashfn(net, hnum);
216 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; 217 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
217 int score, hiscore = 0, matches = 0, reuseport = 0; 218 int score, hiscore = 0, matches = 0, reuseport = 0;
219 bool exact_dif = inet_exact_dif_match(net, skb);
218 struct sock *sk, *result = NULL; 220 struct sock *sk, *result = NULL;
219 u32 phash = 0; 221 u32 phash = 0;
220 222
221 sk_for_each_rcu(sk, &ilb->head) { 223 sk_for_each_rcu(sk, &ilb->head) {
222 score = compute_score(sk, net, hnum, daddr, dif); 224 score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
223 if (score > hiscore) { 225 if (score > hiscore) {
224 reuseport = sk->sk_reuseport; 226 reuseport = sk->sk_reuseport;
225 if (reuseport) { 227 if (reuseport) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 05d105832bdb..03e7f7310423 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -538,7 +538,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
538{ 538{
539 struct iphdr *iph; 539 struct iphdr *iph;
540 int ptr; 540 int ptr;
541 struct net_device *dev;
542 struct sk_buff *skb2; 541 struct sk_buff *skb2;
543 unsigned int mtu, hlen, left, len, ll_rs; 542 unsigned int mtu, hlen, left, len, ll_rs;
544 int offset; 543 int offset;
@@ -546,8 +545,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
546 struct rtable *rt = skb_rtable(skb); 545 struct rtable *rt = skb_rtable(skb);
547 int err = 0; 546 int err = 0;
548 547
549 dev = rt->dst.dev;
550
551 /* for offloaded checksums cleanup checksum before fragmentation */ 548 /* for offloaded checksums cleanup checksum before fragmentation */
552 if (skb->ip_summed == CHECKSUM_PARTIAL && 549 if (skb->ip_summed == CHECKSUM_PARTIAL &&
553 (err = skb_checksum_help(skb))) 550 (err = skb_checksum_help(skb)))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index af4919792b6a..b8a2d63d1fb8 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
98} 98}
99 99
100static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, 100static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
101 int offset) 101 int tlen, int offset)
102{ 102{
103 __wsum csum = skb->csum; 103 __wsum csum = skb->csum;
104 104
@@ -106,8 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
106 return; 106 return;
107 107
108 if (offset != 0) 108 if (offset != 0)
109 csum = csum_sub(csum, csum_partial(skb_transport_header(skb), 109 csum = csum_sub(csum,
110 offset, 0)); 110 csum_partial(skb_transport_header(skb) + tlen,
111 offset, 0));
111 112
112 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); 113 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
113} 114}
@@ -153,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
153} 154}
154 155
155void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, 156void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
156 int offset) 157 int tlen, int offset)
157{ 158{
158 struct inet_sock *inet = inet_sk(skb->sk); 159 struct inet_sock *inet = inet_sk(skb->sk);
159 unsigned int flags = inet->cmsg_flags; 160 unsigned int flags = inet->cmsg_flags;
@@ -216,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
216 } 217 }
217 218
218 if (flags & IP_CMSG_CHECKSUM) 219 if (flags & IP_CMSG_CHECKSUM)
219 ip_cmsg_recv_checksum(msg, skb, offset); 220 ip_cmsg_recv_checksum(msg, skb, tlen, offset);
220} 221}
221EXPORT_SYMBOL(ip_cmsg_recv_offset); 222EXPORT_SYMBOL(ip_cmsg_recv_offset);
222 223
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 7cf7d6e380c2..205e2000d395 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -994,7 +994,7 @@ struct proto ping_prot = {
994 .init = ping_init_sock, 994 .init = ping_init_sock,
995 .close = ping_close, 995 .close = ping_close,
996 .connect = ip4_datagram_connect, 996 .connect = ip4_datagram_connect,
997 .disconnect = udp_disconnect, 997 .disconnect = __udp_disconnect,
998 .setsockopt = ip_setsockopt, 998 .setsockopt = ip_setsockopt,
999 .getsockopt = ip_getsockopt, 999 .getsockopt = ip_getsockopt,
1000 .sendmsg = ping_v4_sendmsg, 1000 .sendmsg = ping_v4_sendmsg,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 90a85c955872..ecbe5a7c2d6d 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -918,7 +918,7 @@ struct proto raw_prot = {
918 .close = raw_close, 918 .close = raw_close,
919 .destroy = raw_destroy, 919 .destroy = raw_destroy,
920 .connect = ip4_datagram_connect, 920 .connect = ip4_datagram_connect,
921 .disconnect = udp_disconnect, 921 .disconnect = __udp_disconnect,
922 .ioctl = raw_ioctl, 922 .ioctl = raw_ioctl,
923 .init = raw_init, 923 .init = raw_init,
924 .setsockopt = raw_setsockopt, 924 .setsockopt = raw_setsockopt,
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1cb67de106fe..80bc36b25de2 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -96,11 +96,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
96 container_of(table->data, struct net, ipv4.ping_group_range.range); 96 container_of(table->data, struct net, ipv4.ping_group_range.range);
97 unsigned int seq; 97 unsigned int seq;
98 do { 98 do {
99 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock); 99 seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
100 100
101 *low = data[0]; 101 *low = data[0];
102 *high = data[1]; 102 *high = data[1];
103 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq)); 103 } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
104} 104}
105 105
106/* Update system visible IP port range */ 106/* Update system visible IP port range */
@@ -109,10 +109,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
109 kgid_t *data = table->data; 109 kgid_t *data = table->data;
110 struct net *net = 110 struct net *net =
111 container_of(table->data, struct net, ipv4.ping_group_range.range); 111 container_of(table->data, struct net, ipv4.ping_group_range.range);
112 write_seqlock(&net->ipv4.ip_local_ports.lock); 112 write_seqlock(&net->ipv4.ping_group_range.lock);
113 data[0] = low; 113 data[0] = low;
114 data[1] = high; 114 data[1] = high;
115 write_sequnlock(&net->ipv4.ip_local_ports.lock); 115 write_sequnlock(&net->ipv4.ping_group_range.lock);
116} 116}
117 117
118/* Validate changes from /proc interface. */ 118/* Validate changes from /proc interface. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bd5e8d10893f..61b7be303eec 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -86,7 +86,6 @@
86 86
87int sysctl_tcp_tw_reuse __read_mostly; 87int sysctl_tcp_tw_reuse __read_mostly;
88int sysctl_tcp_low_latency __read_mostly; 88int sysctl_tcp_low_latency __read_mostly;
89EXPORT_SYMBOL(sysctl_tcp_low_latency);
90 89
91#ifdef CONFIG_TCP_MD5SIG 90#ifdef CONFIG_TCP_MD5SIG
92static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, 91static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
@@ -1887,7 +1886,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
1887 struct tcp_iter_state *st = seq->private; 1886 struct tcp_iter_state *st = seq->private;
1888 struct net *net = seq_file_net(seq); 1887 struct net *net = seq_file_net(seq);
1889 struct inet_listen_hashbucket *ilb; 1888 struct inet_listen_hashbucket *ilb;
1890 struct inet_connection_sock *icsk;
1891 struct sock *sk = cur; 1889 struct sock *sk = cur;
1892 1890
1893 if (!sk) { 1891 if (!sk) {
@@ -1909,7 +1907,6 @@ get_sk:
1909 continue; 1907 continue;
1910 if (sk->sk_family == st->family) 1908 if (sk->sk_family == st->family)
1911 return sk; 1909 return sk;
1912 icsk = inet_csk(sk);
1913 } 1910 }
1914 spin_unlock_bh(&ilb->lock); 1911 spin_unlock_bh(&ilb->lock);
1915 st->offset = 0; 1912 st->offset = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7d96dc2d3d08..d123d68f4d1d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1322,7 +1322,7 @@ try_again:
1322 *addr_len = sizeof(*sin); 1322 *addr_len = sizeof(*sin);
1323 } 1323 }
1324 if (inet->cmsg_flags) 1324 if (inet->cmsg_flags)
1325 ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off); 1325 ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
1326 1326
1327 err = copied; 1327 err = copied;
1328 if (flags & MSG_TRUNC) 1328 if (flags & MSG_TRUNC)
@@ -1345,7 +1345,7 @@ csum_copy_err:
1345 goto try_again; 1345 goto try_again;
1346} 1346}
1347 1347
1348int udp_disconnect(struct sock *sk, int flags) 1348int __udp_disconnect(struct sock *sk, int flags)
1349{ 1349{
1350 struct inet_sock *inet = inet_sk(sk); 1350 struct inet_sock *inet = inet_sk(sk);
1351 /* 1351 /*
@@ -1367,6 +1367,15 @@ int udp_disconnect(struct sock *sk, int flags)
1367 sk_dst_reset(sk); 1367 sk_dst_reset(sk);
1368 return 0; 1368 return 0;
1369} 1369}
1370EXPORT_SYMBOL(__udp_disconnect);
1371
1372int udp_disconnect(struct sock *sk, int flags)
1373{
1374 lock_sock(sk);
1375 __udp_disconnect(sk, flags);
1376 release_sock(sk);
1377 return 0;
1378}
1370EXPORT_SYMBOL(udp_disconnect); 1379EXPORT_SYMBOL(udp_disconnect);
1371 1380
1372void udp_lib_unhash(struct sock *sk) 1381void udp_lib_unhash(struct sock *sk)
@@ -2193,7 +2202,7 @@ int udp_abort(struct sock *sk, int err)
2193 2202
2194 sk->sk_err = err; 2203 sk->sk_err = err;
2195 sk->sk_error_report(sk); 2204 sk->sk_error_report(sk);
2196 udp_disconnect(sk, 0); 2205 __udp_disconnect(sk, 0);
2197 2206
2198 release_sock(sk); 2207 release_sock(sk);
2199 2208
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index f9333c963607..b2be1d9757ef 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -295,7 +295,7 @@ unflush:
295 295
296 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ 296 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
297 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); 297 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
298 pp = udp_sk(sk)->gro_receive(sk, head, skb); 298 pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
299 299
300out_unlock: 300out_unlock:
301 rcu_read_unlock(); 301 rcu_read_unlock();
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d8983e15f859..060dd9922018 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -147,9 +147,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
147} 147}
148#endif 148#endif
149 149
150static void __ipv6_regen_rndid(struct inet6_dev *idev); 150static void ipv6_regen_rndid(struct inet6_dev *idev);
151static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr); 151static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
152static void ipv6_regen_rndid(unsigned long data);
153 152
154static int ipv6_generate_eui64(u8 *eui, struct net_device *dev); 153static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
155static int ipv6_count_addresses(struct inet6_dev *idev); 154static int ipv6_count_addresses(struct inet6_dev *idev);
@@ -409,9 +408,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
409 goto err_release; 408 goto err_release;
410 } 409 }
411 410
412 /* One reference from device. We must do this before 411 /* One reference from device. */
413 * we invoke __ipv6_regen_rndid().
414 */
415 in6_dev_hold(ndev); 412 in6_dev_hold(ndev);
416 413
417 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) 414 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
@@ -425,17 +422,15 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
425#endif 422#endif
426 423
427 INIT_LIST_HEAD(&ndev->tempaddr_list); 424 INIT_LIST_HEAD(&ndev->tempaddr_list);
428 setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); 425 ndev->desync_factor = U32_MAX;
429 if ((dev->flags&IFF_LOOPBACK) || 426 if ((dev->flags&IFF_LOOPBACK) ||
430 dev->type == ARPHRD_TUNNEL || 427 dev->type == ARPHRD_TUNNEL ||
431 dev->type == ARPHRD_TUNNEL6 || 428 dev->type == ARPHRD_TUNNEL6 ||
432 dev->type == ARPHRD_SIT || 429 dev->type == ARPHRD_SIT ||
433 dev->type == ARPHRD_NONE) { 430 dev->type == ARPHRD_NONE) {
434 ndev->cnf.use_tempaddr = -1; 431 ndev->cnf.use_tempaddr = -1;
435 } else { 432 } else
436 in6_dev_hold(ndev); 433 ipv6_regen_rndid(ndev);
437 ipv6_regen_rndid((unsigned long) ndev);
438 }
439 434
440 ndev->token = in6addr_any; 435 ndev->token = in6addr_any;
441 436
@@ -447,7 +442,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
447 err = addrconf_sysctl_register(ndev); 442 err = addrconf_sysctl_register(ndev);
448 if (err) { 443 if (err) {
449 ipv6_mc_destroy_dev(ndev); 444 ipv6_mc_destroy_dev(ndev);
450 del_timer(&ndev->regen_timer);
451 snmp6_unregister_dev(ndev); 445 snmp6_unregister_dev(ndev);
452 goto err_release; 446 goto err_release;
453 } 447 }
@@ -1190,6 +1184,8 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
1190 int ret = 0; 1184 int ret = 0;
1191 u32 addr_flags; 1185 u32 addr_flags;
1192 unsigned long now = jiffies; 1186 unsigned long now = jiffies;
1187 long max_desync_factor;
1188 s32 cnf_temp_preferred_lft;
1193 1189
1194 write_lock_bh(&idev->lock); 1190 write_lock_bh(&idev->lock);
1195 if (ift) { 1191 if (ift) {
@@ -1222,23 +1218,42 @@ retry:
1222 } 1218 }
1223 in6_ifa_hold(ifp); 1219 in6_ifa_hold(ifp);
1224 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8); 1220 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1225 __ipv6_try_regen_rndid(idev, tmpaddr); 1221 ipv6_try_regen_rndid(idev, tmpaddr);
1226 memcpy(&addr.s6_addr[8], idev->rndid, 8); 1222 memcpy(&addr.s6_addr[8], idev->rndid, 8);
1227 age = (now - ifp->tstamp) / HZ; 1223 age = (now - ifp->tstamp) / HZ;
1224
1225 regen_advance = idev->cnf.regen_max_retry *
1226 idev->cnf.dad_transmits *
1227 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1228
1229 /* recalculate max_desync_factor each time and update
1230 * idev->desync_factor if it's larger
1231 */
1232 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1233 max_desync_factor = min_t(__u32,
1234 idev->cnf.max_desync_factor,
1235 cnf_temp_preferred_lft - regen_advance);
1236
1237 if (unlikely(idev->desync_factor > max_desync_factor)) {
1238 if (max_desync_factor > 0) {
1239 get_random_bytes(&idev->desync_factor,
1240 sizeof(idev->desync_factor));
1241 idev->desync_factor %= max_desync_factor;
1242 } else {
1243 idev->desync_factor = 0;
1244 }
1245 }
1246
1228 tmp_valid_lft = min_t(__u32, 1247 tmp_valid_lft = min_t(__u32,
1229 ifp->valid_lft, 1248 ifp->valid_lft,
1230 idev->cnf.temp_valid_lft + age); 1249 idev->cnf.temp_valid_lft + age);
1231 tmp_prefered_lft = min_t(__u32, 1250 tmp_prefered_lft = cnf_temp_preferred_lft + age -
1232 ifp->prefered_lft, 1251 idev->desync_factor;
1233 idev->cnf.temp_prefered_lft + age - 1252 tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
1234 idev->cnf.max_desync_factor);
1235 tmp_plen = ifp->prefix_len; 1253 tmp_plen = ifp->prefix_len;
1236 tmp_tstamp = ifp->tstamp; 1254 tmp_tstamp = ifp->tstamp;
1237 spin_unlock_bh(&ifp->lock); 1255 spin_unlock_bh(&ifp->lock);
1238 1256
1239 regen_advance = idev->cnf.regen_max_retry *
1240 idev->cnf.dad_transmits *
1241 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1242 write_unlock_bh(&idev->lock); 1257 write_unlock_bh(&idev->lock);
1243 1258
1244 /* A temporary address is created only if this calculated Preferred 1259 /* A temporary address is created only if this calculated Preferred
@@ -2150,7 +2165,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2150} 2165}
2151 2166
2152/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */ 2167/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
2153static void __ipv6_regen_rndid(struct inet6_dev *idev) 2168static void ipv6_regen_rndid(struct inet6_dev *idev)
2154{ 2169{
2155regen: 2170regen:
2156 get_random_bytes(idev->rndid, sizeof(idev->rndid)); 2171 get_random_bytes(idev->rndid, sizeof(idev->rndid));
@@ -2179,43 +2194,10 @@ regen:
2179 } 2194 }
2180} 2195}
2181 2196
2182static void ipv6_regen_rndid(unsigned long data) 2197static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2183{
2184 struct inet6_dev *idev = (struct inet6_dev *) data;
2185 unsigned long expires;
2186
2187 rcu_read_lock_bh();
2188 write_lock_bh(&idev->lock);
2189
2190 if (idev->dead)
2191 goto out;
2192
2193 __ipv6_regen_rndid(idev);
2194
2195 expires = jiffies +
2196 idev->cnf.temp_prefered_lft * HZ -
2197 idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
2198 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
2199 idev->cnf.max_desync_factor * HZ;
2200 if (time_before(expires, jiffies)) {
2201 pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
2202 __func__, idev->dev->name);
2203 goto out;
2204 }
2205
2206 if (!mod_timer(&idev->regen_timer, expires))
2207 in6_dev_hold(idev);
2208
2209out:
2210 write_unlock_bh(&idev->lock);
2211 rcu_read_unlock_bh();
2212 in6_dev_put(idev);
2213}
2214
2215static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2216{ 2198{
2217 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0) 2199 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
2218 __ipv6_regen_rndid(idev); 2200 ipv6_regen_rndid(idev);
2219} 2201}
2220 2202
2221/* 2203/*
@@ -2356,7 +2338,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
2356 max_valid = 0; 2338 max_valid = 0;
2357 2339
2358 max_prefered = idev->cnf.temp_prefered_lft - 2340 max_prefered = idev->cnf.temp_prefered_lft -
2359 idev->cnf.max_desync_factor - age; 2341 idev->desync_factor - age;
2360 if (max_prefered < 0) 2342 if (max_prefered < 0)
2361 max_prefered = 0; 2343 max_prefered = 0;
2362 2344
@@ -3018,7 +3000,7 @@ static void init_loopback(struct net_device *dev)
3018 * lo device down, release this obsolete dst and 3000 * lo device down, release this obsolete dst and
3019 * reallocate a new router for ifa. 3001 * reallocate a new router for ifa.
3020 */ 3002 */
3021 if (sp_ifa->rt->dst.obsolete > 0) { 3003 if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
3022 ip6_rt_put(sp_ifa->rt); 3004 ip6_rt_put(sp_ifa->rt);
3023 sp_ifa->rt = NULL; 3005 sp_ifa->rt = NULL;
3024 } else { 3006 } else {
@@ -3594,9 +3576,6 @@ restart:
3594 if (!how) 3576 if (!how)
3595 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); 3577 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3596 3578
3597 if (how && del_timer(&idev->regen_timer))
3598 in6_dev_put(idev);
3599
3600 /* Step 3: clear tempaddr list */ 3579 /* Step 3: clear tempaddr list */
3601 while (!list_empty(&idev->tempaddr_list)) { 3580 while (!list_empty(&idev->tempaddr_list)) {
3602 ifa = list_first_entry(&idev->tempaddr_list, 3581 ifa = list_first_entry(&idev->tempaddr_list,
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 00cf28ad4565..02761c9fe43e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -96,7 +96,7 @@ EXPORT_SYMBOL(__inet6_lookup_established);
96static inline int compute_score(struct sock *sk, struct net *net, 96static inline int compute_score(struct sock *sk, struct net *net,
97 const unsigned short hnum, 97 const unsigned short hnum,
98 const struct in6_addr *daddr, 98 const struct in6_addr *daddr,
99 const int dif) 99 const int dif, bool exact_dif)
100{ 100{
101 int score = -1; 101 int score = -1;
102 102
@@ -109,7 +109,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
109 return -1; 109 return -1;
110 score++; 110 score++;
111 } 111 }
112 if (sk->sk_bound_dev_if) { 112 if (sk->sk_bound_dev_if || exact_dif) {
113 if (sk->sk_bound_dev_if != dif) 113 if (sk->sk_bound_dev_if != dif)
114 return -1; 114 return -1;
115 score++; 115 score++;
@@ -131,11 +131,12 @@ struct sock *inet6_lookup_listener(struct net *net,
131 unsigned int hash = inet_lhashfn(net, hnum); 131 unsigned int hash = inet_lhashfn(net, hnum);
132 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; 132 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
133 int score, hiscore = 0, matches = 0, reuseport = 0; 133 int score, hiscore = 0, matches = 0, reuseport = 0;
134 bool exact_dif = inet6_exact_dif_match(net, skb);
134 struct sock *sk, *result = NULL; 135 struct sock *sk, *result = NULL;
135 u32 phash = 0; 136 u32 phash = 0;
136 137
137 sk_for_each(sk, &ilb->head) { 138 sk_for_each(sk, &ilb->head) {
138 score = compute_score(sk, net, hnum, daddr, dif); 139 score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
139 if (score > hiscore) { 140 if (score > hiscore) {
140 reuseport = sk->sk_reuseport; 141 reuseport = sk->sk_reuseport;
141 if (reuseport) { 142 if (reuseport) {
@@ -263,13 +264,15 @@ EXPORT_SYMBOL_GPL(inet6_hash_connect);
263 264
264int inet6_hash(struct sock *sk) 265int inet6_hash(struct sock *sk)
265{ 266{
267 int err = 0;
268
266 if (sk->sk_state != TCP_CLOSE) { 269 if (sk->sk_state != TCP_CLOSE) {
267 local_bh_disable(); 270 local_bh_disable();
268 __inet_hash(sk, NULL, ipv6_rcv_saddr_equal); 271 err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
269 local_bh_enable(); 272 local_bh_enable();
270 } 273 }
271 274
272 return 0; 275 return err;
273} 276}
274EXPORT_SYMBOL_GPL(inet6_hash); 277EXPORT_SYMBOL_GPL(inet6_hash);
275 278
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e7bfd55899a3..1fcf61f1cbc3 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -246,7 +246,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
246 246
247 skb_gro_postpull_rcsum(skb, iph, nlen); 247 skb_gro_postpull_rcsum(skb, iph, nlen);
248 248
249 pp = ops->callbacks.gro_receive(head, skb); 249 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
250 250
251out_unlock: 251out_unlock:
252 rcu_read_unlock(); 252 rcu_read_unlock();
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6a66adba0c22..87784560dc46 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -157,6 +157,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
157 hash = HASH(&any, local); 157 hash = HASH(&any, local);
158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 if (ipv6_addr_equal(local, &t->parms.laddr) && 159 if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 ipv6_addr_any(&t->parms.raddr) &&
160 (t->dev->flags & IFF_UP)) 161 (t->dev->flags & IFF_UP))
161 return t; 162 return t;
162 } 163 }
@@ -164,6 +165,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
164 hash = HASH(remote, &any); 165 hash = HASH(remote, &any);
165 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { 166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
166 if (ipv6_addr_equal(remote, &t->parms.raddr) && 167 if (ipv6_addr_equal(remote, &t->parms.raddr) &&
168 ipv6_addr_any(&t->parms.laddr) &&
167 (t->dev->flags & IFF_UP)) 169 (t->dev->flags & IFF_UP))
168 return t; 170 return t;
169 } 171 }
@@ -1170,6 +1172,7 @@ route_lookup:
1170 if (err) 1172 if (err)
1171 return err; 1173 return err;
1172 1174
1175 skb->protocol = htons(ETH_P_IPV6);
1173 skb_push(skb, sizeof(struct ipv6hdr)); 1176 skb_push(skb, sizeof(struct ipv6hdr));
1174 skb_reset_network_header(skb); 1177 skb_reset_network_header(skb);
1175 ipv6h = ipv6_hdr(skb); 1178 ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 5330262ab673..636ec56f5f50 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -120,6 +120,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
120static bool setsockopt_needs_rtnl(int optname) 120static bool setsockopt_needs_rtnl(int optname)
121{ 121{
122 switch (optname) { 122 switch (optname) {
123 case IPV6_ADDRFORM:
123 case IPV6_ADD_MEMBERSHIP: 124 case IPV6_ADD_MEMBERSHIP:
124 case IPV6_DROP_MEMBERSHIP: 125 case IPV6_DROP_MEMBERSHIP:
125 case IPV6_JOIN_ANYCAST: 126 case IPV6_JOIN_ANYCAST:
@@ -198,7 +199,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
198 } 199 }
199 200
200 fl6_free_socklist(sk); 201 fl6_free_socklist(sk);
201 ipv6_sock_mc_close(sk); 202 __ipv6_sock_mc_close(sk);
202 203
203 /* 204 /*
204 * Sock is moving from IPv6 to IPv4 (sk_prot), so 205 * Sock is moving from IPv6 to IPv4 (sk_prot), so
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 75c1fc54f188..14a3903f1c82 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -276,16 +276,14 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
276 return idev; 276 return idev;
277} 277}
278 278
279void ipv6_sock_mc_close(struct sock *sk) 279void __ipv6_sock_mc_close(struct sock *sk)
280{ 280{
281 struct ipv6_pinfo *np = inet6_sk(sk); 281 struct ipv6_pinfo *np = inet6_sk(sk);
282 struct ipv6_mc_socklist *mc_lst; 282 struct ipv6_mc_socklist *mc_lst;
283 struct net *net = sock_net(sk); 283 struct net *net = sock_net(sk);
284 284
285 if (!rcu_access_pointer(np->ipv6_mc_list)) 285 ASSERT_RTNL();
286 return;
287 286
288 rtnl_lock();
289 while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) { 287 while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
290 struct net_device *dev; 288 struct net_device *dev;
291 289
@@ -303,8 +301,17 @@ void ipv6_sock_mc_close(struct sock *sk)
303 301
304 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); 302 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
305 kfree_rcu(mc_lst, rcu); 303 kfree_rcu(mc_lst, rcu);
306
307 } 304 }
305}
306
307void ipv6_sock_mc_close(struct sock *sk)
308{
309 struct ipv6_pinfo *np = inet6_sk(sk);
310
311 if (!rcu_access_pointer(np->ipv6_mc_list))
312 return;
313 rtnl_lock();
314 __ipv6_sock_mc_close(sk);
308 rtnl_unlock(); 315 rtnl_unlock();
309} 316}
310 317
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 0e983b694ee8..66e2d9dfc43a 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -180,7 +180,7 @@ struct proto pingv6_prot = {
180 .init = ping_init_sock, 180 .init = ping_init_sock,
181 .close = ping_close, 181 .close = ping_close,
182 .connect = ip6_datagram_connect_v6_only, 182 .connect = ip6_datagram_connect_v6_only,
183 .disconnect = udp_disconnect, 183 .disconnect = __udp_disconnect,
184 .setsockopt = ipv6_setsockopt, 184 .setsockopt = ipv6_setsockopt,
185 .getsockopt = ipv6_getsockopt, 185 .getsockopt = ipv6_getsockopt,
186 .sendmsg = ping_v6_sendmsg, 186 .sendmsg = ping_v6_sendmsg,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 54404f08efcc..054a1d84fc5e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1241,7 +1241,7 @@ struct proto rawv6_prot = {
1241 .close = rawv6_close, 1241 .close = rawv6_close,
1242 .destroy = raw6_destroy, 1242 .destroy = raw6_destroy,
1243 .connect = ip6_datagram_connect_v6_only, 1243 .connect = ip6_datagram_connect_v6_only,
1244 .disconnect = udp_disconnect, 1244 .disconnect = __udp_disconnect,
1245 .ioctl = rawv6_ioctl, 1245 .ioctl = rawv6_ioctl,
1246 .init = rawv6_init_sk, 1246 .init = rawv6_init_sk,
1247 .setsockopt = rawv6_setsockopt, 1247 .setsockopt = rawv6_setsockopt,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2160d5d009cb..3815e8505ed2 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -456,7 +456,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
456 skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; 456 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
457 memmove(head->head + sizeof(struct frag_hdr), head->head, 457 memmove(head->head + sizeof(struct frag_hdr), head->head,
458 (head->data - head->head) - sizeof(struct frag_hdr)); 458 (head->data - head->head) - sizeof(struct frag_hdr));
459 head->mac_header += sizeof(struct frag_hdr); 459 if (skb_mac_header_was_set(head))
460 head->mac_header += sizeof(struct frag_hdr);
460 head->network_header += sizeof(struct frag_hdr); 461 head->network_header += sizeof(struct frag_hdr);
461 462
462 skb_reset_transport_header(head); 463 skb_reset_transport_header(head);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bdbc38e8bf29..947ed1ded026 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -102,11 +102,13 @@ static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
102#ifdef CONFIG_IPV6_ROUTE_INFO 102#ifdef CONFIG_IPV6_ROUTE_INFO
103static struct rt6_info *rt6_add_route_info(struct net *net, 103static struct rt6_info *rt6_add_route_info(struct net *net,
104 const struct in6_addr *prefix, int prefixlen, 104 const struct in6_addr *prefix, int prefixlen,
105 const struct in6_addr *gwaddr, int ifindex, 105 const struct in6_addr *gwaddr,
106 struct net_device *dev,
106 unsigned int pref); 107 unsigned int pref);
107static struct rt6_info *rt6_get_route_info(struct net *net, 108static struct rt6_info *rt6_get_route_info(struct net *net,
108 const struct in6_addr *prefix, int prefixlen, 109 const struct in6_addr *prefix, int prefixlen,
109 const struct in6_addr *gwaddr, int ifindex); 110 const struct in6_addr *gwaddr,
111 struct net_device *dev);
110#endif 112#endif
111 113
112struct uncached_list { 114struct uncached_list {
@@ -656,7 +658,8 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
656 struct net_device *dev = rt->dst.dev; 658 struct net_device *dev = rt->dst.dev;
657 659
658 if (dev && !netif_carrier_ok(dev) && 660 if (dev && !netif_carrier_ok(dev) &&
659 idev->cnf.ignore_routes_with_linkdown) 661 idev->cnf.ignore_routes_with_linkdown &&
662 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
660 goto out; 663 goto out;
661 664
662 if (rt6_check_expired(rt)) 665 if (rt6_check_expired(rt))
@@ -803,7 +806,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
803 rt = rt6_get_dflt_router(gwaddr, dev); 806 rt = rt6_get_dflt_router(gwaddr, dev);
804 else 807 else
805 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, 808 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
806 gwaddr, dev->ifindex); 809 gwaddr, dev);
807 810
808 if (rt && !lifetime) { 811 if (rt && !lifetime) {
809 ip6_del_rt(rt); 812 ip6_del_rt(rt);
@@ -811,8 +814,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
811 } 814 }
812 815
813 if (!rt && lifetime) 816 if (!rt && lifetime)
814 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex, 817 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
815 pref); 818 dev, pref);
816 else if (rt) 819 else if (rt)
817 rt->rt6i_flags = RTF_ROUTEINFO | 820 rt->rt6i_flags = RTF_ROUTEINFO |
818 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 821 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@ -1050,6 +1053,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1050 int strict = 0; 1053 int strict = 0;
1051 1054
1052 strict |= flags & RT6_LOOKUP_F_IFACE; 1055 strict |= flags & RT6_LOOKUP_F_IFACE;
1056 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1053 if (net->ipv6.devconf_all->forwarding == 0) 1057 if (net->ipv6.devconf_all->forwarding == 0)
1054 strict |= RT6_LOOKUP_F_REACHABLE; 1058 strict |= RT6_LOOKUP_F_REACHABLE;
1055 1059
@@ -1789,7 +1793,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
1789 }; 1793 };
1790 struct fib6_table *table; 1794 struct fib6_table *table;
1791 struct rt6_info *rt; 1795 struct rt6_info *rt;
1792 int flags = RT6_LOOKUP_F_IFACE; 1796 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
1793 1797
1794 table = fib6_get_table(net, cfg->fc_table); 1798 table = fib6_get_table(net, cfg->fc_table);
1795 if (!table) 1799 if (!table)
@@ -2325,13 +2329,16 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2325#ifdef CONFIG_IPV6_ROUTE_INFO 2329#ifdef CONFIG_IPV6_ROUTE_INFO
2326static struct rt6_info *rt6_get_route_info(struct net *net, 2330static struct rt6_info *rt6_get_route_info(struct net *net,
2327 const struct in6_addr *prefix, int prefixlen, 2331 const struct in6_addr *prefix, int prefixlen,
2328 const struct in6_addr *gwaddr, int ifindex) 2332 const struct in6_addr *gwaddr,
2333 struct net_device *dev)
2329{ 2334{
2335 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
2336 int ifindex = dev->ifindex;
2330 struct fib6_node *fn; 2337 struct fib6_node *fn;
2331 struct rt6_info *rt = NULL; 2338 struct rt6_info *rt = NULL;
2332 struct fib6_table *table; 2339 struct fib6_table *table;
2333 2340
2334 table = fib6_get_table(net, RT6_TABLE_INFO); 2341 table = fib6_get_table(net, tb_id);
2335 if (!table) 2342 if (!table)
2336 return NULL; 2343 return NULL;
2337 2344
@@ -2357,12 +2364,13 @@ out:
2357 2364
2358static struct rt6_info *rt6_add_route_info(struct net *net, 2365static struct rt6_info *rt6_add_route_info(struct net *net,
2359 const struct in6_addr *prefix, int prefixlen, 2366 const struct in6_addr *prefix, int prefixlen,
2360 const struct in6_addr *gwaddr, int ifindex, 2367 const struct in6_addr *gwaddr,
2368 struct net_device *dev,
2361 unsigned int pref) 2369 unsigned int pref)
2362{ 2370{
2363 struct fib6_config cfg = { 2371 struct fib6_config cfg = {
2364 .fc_metric = IP6_RT_PRIO_USER, 2372 .fc_metric = IP6_RT_PRIO_USER,
2365 .fc_ifindex = ifindex, 2373 .fc_ifindex = dev->ifindex,
2366 .fc_dst_len = prefixlen, 2374 .fc_dst_len = prefixlen,
2367 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 2375 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2368 RTF_UP | RTF_PREF(pref), 2376 RTF_UP | RTF_PREF(pref),
@@ -2371,7 +2379,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
2371 .fc_nlinfo.nl_net = net, 2379 .fc_nlinfo.nl_net = net,
2372 }; 2380 };
2373 2381
2374 cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO; 2382 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
2375 cfg.fc_dst = *prefix; 2383 cfg.fc_dst = *prefix;
2376 cfg.fc_gateway = *gwaddr; 2384 cfg.fc_gateway = *gwaddr;
2377 2385
@@ -2381,16 +2389,17 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
2381 2389
2382 ip6_route_add(&cfg); 2390 ip6_route_add(&cfg);
2383 2391
2384 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex); 2392 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
2385} 2393}
2386#endif 2394#endif
2387 2395
2388struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev) 2396struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2389{ 2397{
2398 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
2390 struct rt6_info *rt; 2399 struct rt6_info *rt;
2391 struct fib6_table *table; 2400 struct fib6_table *table;
2392 2401
2393 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT); 2402 table = fib6_get_table(dev_net(dev), tb_id);
2394 if (!table) 2403 if (!table)
2395 return NULL; 2404 return NULL;
2396 2405
@@ -2424,20 +2433,20 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2424 2433
2425 cfg.fc_gateway = *gwaddr; 2434 cfg.fc_gateway = *gwaddr;
2426 2435
2427 ip6_route_add(&cfg); 2436 if (!ip6_route_add(&cfg)) {
2437 struct fib6_table *table;
2438
2439 table = fib6_get_table(dev_net(dev), cfg.fc_table);
2440 if (table)
2441 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
2442 }
2428 2443
2429 return rt6_get_dflt_router(gwaddr, dev); 2444 return rt6_get_dflt_router(gwaddr, dev);
2430} 2445}
2431 2446
2432void rt6_purge_dflt_routers(struct net *net) 2447static void __rt6_purge_dflt_routers(struct fib6_table *table)
2433{ 2448{
2434 struct rt6_info *rt; 2449 struct rt6_info *rt;
2435 struct fib6_table *table;
2436
2437 /* NOTE: Keep consistent with rt6_get_dflt_router */
2438 table = fib6_get_table(net, RT6_TABLE_DFLT);
2439 if (!table)
2440 return;
2441 2450
2442restart: 2451restart:
2443 read_lock_bh(&table->tb6_lock); 2452 read_lock_bh(&table->tb6_lock);
@@ -2451,6 +2460,27 @@ restart:
2451 } 2460 }
2452 } 2461 }
2453 read_unlock_bh(&table->tb6_lock); 2462 read_unlock_bh(&table->tb6_lock);
2463
2464 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
2465}
2466
2467void rt6_purge_dflt_routers(struct net *net)
2468{
2469 struct fib6_table *table;
2470 struct hlist_head *head;
2471 unsigned int h;
2472
2473 rcu_read_lock();
2474
2475 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
2476 head = &net->ipv6.fib_table_hash[h];
2477 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
2478 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
2479 __rt6_purge_dflt_routers(table);
2480 }
2481 }
2482
2483 rcu_read_unlock();
2454} 2484}
2455 2485
2456static void rtmsg_to_fib6_config(struct net *net, 2486static void rtmsg_to_fib6_config(struct net *net,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9aa7c1c7a9ce..b2ef061e6836 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -427,7 +427,8 @@ try_again:
427 427
428 if (is_udp4) { 428 if (is_udp4) {
429 if (inet->cmsg_flags) 429 if (inet->cmsg_flags)
430 ip_cmsg_recv(msg, skb); 430 ip_cmsg_recv_offset(msg, skb,
431 sizeof(struct udphdr), off);
431 } else { 432 } else {
432 if (np->rxopt.all) 433 if (np->rxopt.all)
433 ip6_datagram_recv_specific_ctl(sk, msg, skb); 434 ip6_datagram_recv_specific_ctl(sk, msg, skb);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 42de4ccd159f..fce25afb652a 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -338,7 +338,7 @@ static int l2tp_ip_disconnect(struct sock *sk, int flags)
338 if (sock_flag(sk, SOCK_ZAPPED)) 338 if (sock_flag(sk, SOCK_ZAPPED))
339 return 0; 339 return 0;
340 340
341 return udp_disconnect(sk, flags); 341 return __udp_disconnect(sk, flags);
342} 342}
343 343
344static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, 344static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index ea2ae6664cc8..ad3468c32b53 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -410,7 +410,7 @@ static int l2tp_ip6_disconnect(struct sock *sk, int flags)
410 if (sock_flag(sk, SOCK_ZAPPED)) 410 if (sock_flag(sk, SOCK_ZAPPED))
411 return 0; 411 return 0;
412 412
413 return udp_disconnect(sk, flags); 413 return __udp_disconnect(sk, flags);
414} 414}
415 415
416static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, 416static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 7663c28ba353..a4e0d59a40dd 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -18,21 +18,24 @@
18#include "key.h" 18#include "key.h"
19#include "aes_ccm.h" 19#include "aes_ccm.h"
20 20
21void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, 21int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
22 u8 *data, size_t data_len, u8 *mic, 22 u8 *data, size_t data_len, u8 *mic,
23 size_t mic_len) 23 size_t mic_len)
24{ 24{
25 struct scatterlist sg[3]; 25 struct scatterlist sg[3];
26 struct aead_request *aead_req;
27 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
28 u8 *__aad;
26 29
27 char aead_req_data[sizeof(struct aead_request) + 30 aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
28 crypto_aead_reqsize(tfm)] 31 if (!aead_req)
29 __aligned(__alignof__(struct aead_request)); 32 return -ENOMEM;
30 struct aead_request *aead_req = (void *) aead_req_data;
31 33
32 memset(aead_req, 0, sizeof(aead_req_data)); 34 __aad = (u8 *)aead_req + reqsize;
35 memcpy(__aad, aad, CCM_AAD_LEN);
33 36
34 sg_init_table(sg, 3); 37 sg_init_table(sg, 3);
35 sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); 38 sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
36 sg_set_buf(&sg[1], data, data_len); 39 sg_set_buf(&sg[1], data, data_len);
37 sg_set_buf(&sg[2], mic, mic_len); 40 sg_set_buf(&sg[2], mic, mic_len);
38 41
@@ -41,6 +44,9 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
41 aead_request_set_ad(aead_req, sg[0].length); 44 aead_request_set_ad(aead_req, sg[0].length);
42 45
43 crypto_aead_encrypt(aead_req); 46 crypto_aead_encrypt(aead_req);
47 kzfree(aead_req);
48
49 return 0;
44} 50}
45 51
46int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, 52int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
@@ -48,18 +54,23 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
48 size_t mic_len) 54 size_t mic_len)
49{ 55{
50 struct scatterlist sg[3]; 56 struct scatterlist sg[3];
51 char aead_req_data[sizeof(struct aead_request) + 57 struct aead_request *aead_req;
52 crypto_aead_reqsize(tfm)] 58 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
53 __aligned(__alignof__(struct aead_request)); 59 u8 *__aad;
54 struct aead_request *aead_req = (void *) aead_req_data; 60 int err;
55 61
56 if (data_len == 0) 62 if (data_len == 0)
57 return -EINVAL; 63 return -EINVAL;
58 64
59 memset(aead_req, 0, sizeof(aead_req_data)); 65 aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
66 if (!aead_req)
67 return -ENOMEM;
68
69 __aad = (u8 *)aead_req + reqsize;
70 memcpy(__aad, aad, CCM_AAD_LEN);
60 71
61 sg_init_table(sg, 3); 72 sg_init_table(sg, 3);
62 sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); 73 sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
63 sg_set_buf(&sg[1], data, data_len); 74 sg_set_buf(&sg[1], data, data_len);
64 sg_set_buf(&sg[2], mic, mic_len); 75 sg_set_buf(&sg[2], mic, mic_len);
65 76
@@ -67,7 +78,10 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
67 aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0); 78 aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0);
68 aead_request_set_ad(aead_req, sg[0].length); 79 aead_request_set_ad(aead_req, sg[0].length);
69 80
70 return crypto_aead_decrypt(aead_req); 81 err = crypto_aead_decrypt(aead_req);
82 kzfree(aead_req);
83
84 return err;
71} 85}
72 86
73struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[], 87struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 6a73d1e4d186..fcd3254c5cf0 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -12,12 +12,14 @@
12 12
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14 14
15#define CCM_AAD_LEN 32
16
15struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[], 17struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
16 size_t key_len, 18 size_t key_len,
17 size_t mic_len); 19 size_t mic_len);
18void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, 20int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
19 u8 *data, size_t data_len, u8 *mic, 21 u8 *data, size_t data_len, u8 *mic,
20 size_t mic_len); 22 size_t mic_len);
21int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, 23int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
22 u8 *data, size_t data_len, u8 *mic, 24 u8 *data, size_t data_len, u8 *mic,
23 size_t mic_len); 25 size_t mic_len);
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c
index 3afe361fd27c..8a4397cc1b08 100644
--- a/net/mac80211/aes_gcm.c
+++ b/net/mac80211/aes_gcm.c
@@ -15,20 +15,23 @@
15#include "key.h" 15#include "key.h"
16#include "aes_gcm.h" 16#include "aes_gcm.h"
17 17
18void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, 18int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
19 u8 *data, size_t data_len, u8 *mic) 19 u8 *data, size_t data_len, u8 *mic)
20{ 20{
21 struct scatterlist sg[3]; 21 struct scatterlist sg[3];
22 struct aead_request *aead_req;
23 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
24 u8 *__aad;
22 25
23 char aead_req_data[sizeof(struct aead_request) + 26 aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
24 crypto_aead_reqsize(tfm)] 27 if (!aead_req)
25 __aligned(__alignof__(struct aead_request)); 28 return -ENOMEM;
26 struct aead_request *aead_req = (void *)aead_req_data;
27 29
28 memset(aead_req, 0, sizeof(aead_req_data)); 30 __aad = (u8 *)aead_req + reqsize;
31 memcpy(__aad, aad, GCM_AAD_LEN);
29 32
30 sg_init_table(sg, 3); 33 sg_init_table(sg, 3);
31 sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); 34 sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
32 sg_set_buf(&sg[1], data, data_len); 35 sg_set_buf(&sg[1], data, data_len);
33 sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); 36 sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
34 37
@@ -37,24 +40,31 @@ void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
37 aead_request_set_ad(aead_req, sg[0].length); 40 aead_request_set_ad(aead_req, sg[0].length);
38 41
39 crypto_aead_encrypt(aead_req); 42 crypto_aead_encrypt(aead_req);
43 kzfree(aead_req);
44 return 0;
40} 45}
41 46
42int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, 47int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
43 u8 *data, size_t data_len, u8 *mic) 48 u8 *data, size_t data_len, u8 *mic)
44{ 49{
45 struct scatterlist sg[3]; 50 struct scatterlist sg[3];
46 char aead_req_data[sizeof(struct aead_request) + 51 struct aead_request *aead_req;
47 crypto_aead_reqsize(tfm)] 52 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
48 __aligned(__alignof__(struct aead_request)); 53 u8 *__aad;
49 struct aead_request *aead_req = (void *)aead_req_data; 54 int err;
50 55
51 if (data_len == 0) 56 if (data_len == 0)
52 return -EINVAL; 57 return -EINVAL;
53 58
54 memset(aead_req, 0, sizeof(aead_req_data)); 59 aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
60 if (!aead_req)
61 return -ENOMEM;
62
63 __aad = (u8 *)aead_req + reqsize;
64 memcpy(__aad, aad, GCM_AAD_LEN);
55 65
56 sg_init_table(sg, 3); 66 sg_init_table(sg, 3);
57 sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad)); 67 sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
58 sg_set_buf(&sg[1], data, data_len); 68 sg_set_buf(&sg[1], data, data_len);
59 sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN); 69 sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
60 70
@@ -63,7 +73,10 @@ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
63 data_len + IEEE80211_GCMP_MIC_LEN, j_0); 73 data_len + IEEE80211_GCMP_MIC_LEN, j_0);
64 aead_request_set_ad(aead_req, sg[0].length); 74 aead_request_set_ad(aead_req, sg[0].length);
65 75
66 return crypto_aead_decrypt(aead_req); 76 err = crypto_aead_decrypt(aead_req);
77 kzfree(aead_req);
78
79 return err;
67} 80}
68 81
69struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], 82struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h
index 1347fda6b76a..55aed5352494 100644
--- a/net/mac80211/aes_gcm.h
+++ b/net/mac80211/aes_gcm.h
@@ -11,8 +11,10 @@
11 11
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13 13
14void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, 14#define GCM_AAD_LEN 32
15 u8 *data, size_t data_len, u8 *mic); 15
16int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
17 u8 *data, size_t data_len, u8 *mic);
16int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, 18int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
17 u8 *data, size_t data_len, u8 *mic); 19 u8 *data, size_t data_len, u8 *mic);
18struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], 20struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
index 3ddd927aaf30..bd72a862ddb7 100644
--- a/net/mac80211/aes_gmac.c
+++ b/net/mac80211/aes_gmac.c
@@ -17,28 +17,27 @@
17#include "key.h" 17#include "key.h"
18#include "aes_gmac.h" 18#include "aes_gmac.h"
19 19
20#define GMAC_MIC_LEN 16
21#define GMAC_NONCE_LEN 12
22#define AAD_LEN 20
23
24int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, 20int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
25 const u8 *data, size_t data_len, u8 *mic) 21 const u8 *data, size_t data_len, u8 *mic)
26{ 22{
27 struct scatterlist sg[4]; 23 struct scatterlist sg[4];
28 char aead_req_data[sizeof(struct aead_request) + 24 u8 *zero, *__aad, iv[AES_BLOCK_SIZE];
29 crypto_aead_reqsize(tfm)] 25 struct aead_request *aead_req;
30 __aligned(__alignof__(struct aead_request)); 26 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
31 struct aead_request *aead_req = (void *)aead_req_data;
32 u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE];
33 27
34 if (data_len < GMAC_MIC_LEN) 28 if (data_len < GMAC_MIC_LEN)
35 return -EINVAL; 29 return -EINVAL;
36 30
37 memset(aead_req, 0, sizeof(aead_req_data)); 31 aead_req = kzalloc(reqsize + GMAC_MIC_LEN + GMAC_AAD_LEN, GFP_ATOMIC);
32 if (!aead_req)
33 return -ENOMEM;
34
35 zero = (u8 *)aead_req + reqsize;
36 __aad = zero + GMAC_MIC_LEN;
37 memcpy(__aad, aad, GMAC_AAD_LEN);
38 38
39 memset(zero, 0, GMAC_MIC_LEN);
40 sg_init_table(sg, 4); 39 sg_init_table(sg, 4);
41 sg_set_buf(&sg[0], aad, AAD_LEN); 40 sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN);
42 sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN); 41 sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
43 sg_set_buf(&sg[2], zero, GMAC_MIC_LEN); 42 sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
44 sg_set_buf(&sg[3], mic, GMAC_MIC_LEN); 43 sg_set_buf(&sg[3], mic, GMAC_MIC_LEN);
@@ -49,9 +48,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
49 48
50 aead_request_set_tfm(aead_req, tfm); 49 aead_request_set_tfm(aead_req, tfm);
51 aead_request_set_crypt(aead_req, sg, sg, 0, iv); 50 aead_request_set_crypt(aead_req, sg, sg, 0, iv);
52 aead_request_set_ad(aead_req, AAD_LEN + data_len); 51 aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
53 52
54 crypto_aead_encrypt(aead_req); 53 crypto_aead_encrypt(aead_req);
54 kzfree(aead_req);
55 55
56 return 0; 56 return 0;
57} 57}
diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h
index d328204d73a8..32e6442c95be 100644
--- a/net/mac80211/aes_gmac.h
+++ b/net/mac80211/aes_gmac.h
@@ -11,6 +11,10 @@
11 11
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13 13
14#define GMAC_AAD_LEN 20
15#define GMAC_MIC_LEN 16
16#define GMAC_NONCE_LEN 12
17
14struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[], 18struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
15 size_t key_len); 19 size_t key_len);
16int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce, 20int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index c3f610bba3fe..eede5c6db8d5 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -820,7 +820,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
820 mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) 820 mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
821 break; 821 break;
822 rcu_read_lock(); 822 rcu_read_lock();
823 sta = sta_info_get(sdata, mgmt->da); 823 sta = sta_info_get_bss(sdata, mgmt->da);
824 rcu_read_unlock(); 824 rcu_read_unlock();
825 if (!sta) 825 if (!sta)
826 return -ENOLINK; 826 return -ENOLINK;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6175db385ba7..a47bbc973f2d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2298,6 +2298,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2298 __le16 fc = hdr->frame_control; 2298 __le16 fc = hdr->frame_control;
2299 struct sk_buff_head frame_list; 2299 struct sk_buff_head frame_list;
2300 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2300 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2301 struct ethhdr ethhdr;
2302 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2301 2303
2302 if (unlikely(!ieee80211_is_data(fc))) 2304 if (unlikely(!ieee80211_is_data(fc)))
2303 return RX_CONTINUE; 2305 return RX_CONTINUE;
@@ -2308,24 +2310,53 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2308 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2310 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2309 return RX_CONTINUE; 2311 return RX_CONTINUE;
2310 2312
2311 if (ieee80211_has_a4(hdr->frame_control) && 2313 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2312 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2314 switch (rx->sdata->vif.type) {
2313 !rx->sdata->u.vlan.sta) 2315 case NL80211_IFTYPE_AP_VLAN:
2314 return RX_DROP_UNUSABLE; 2316 if (!rx->sdata->u.vlan.sta)
2317 return RX_DROP_UNUSABLE;
2318 break;
2319 case NL80211_IFTYPE_STATION:
2320 if (!rx->sdata->u.mgd.use_4addr)
2321 return RX_DROP_UNUSABLE;
2322 break;
2323 default:
2324 return RX_DROP_UNUSABLE;
2325 }
2326 check_da = NULL;
2327 check_sa = NULL;
2328 } else switch (rx->sdata->vif.type) {
2329 case NL80211_IFTYPE_AP:
2330 case NL80211_IFTYPE_AP_VLAN:
2331 check_da = NULL;
2332 break;
2333 case NL80211_IFTYPE_STATION:
2334 if (!rx->sta ||
2335 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2336 check_sa = NULL;
2337 break;
2338 case NL80211_IFTYPE_MESH_POINT:
2339 check_sa = NULL;
2340 break;
2341 default:
2342 break;
2343 }
2315 2344
2316 if (is_multicast_ether_addr(hdr->addr1) && 2345 if (is_multicast_ether_addr(hdr->addr1))
2317 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2318 rx->sdata->u.vlan.sta) ||
2319 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
2320 rx->sdata->u.mgd.use_4addr)))
2321 return RX_DROP_UNUSABLE; 2346 return RX_DROP_UNUSABLE;
2322 2347
2323 skb->dev = dev; 2348 skb->dev = dev;
2324 __skb_queue_head_init(&frame_list); 2349 __skb_queue_head_init(&frame_list);
2325 2350
2351 if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
2352 rx->sdata->vif.addr,
2353 rx->sdata->vif.type))
2354 return RX_DROP_UNUSABLE;
2355
2326 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2356 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2327 rx->sdata->vif.type, 2357 rx->sdata->vif.type,
2328 rx->local->hw.extra_tx_headroom, true); 2358 rx->local->hw.extra_tx_headroom,
2359 check_da, check_sa);
2329 2360
2330 while (!skb_queue_empty(&frame_list)) { 2361 while (!skb_queue_empty(&frame_list)) {
2331 rx->skb = __skb_dequeue(&frame_list); 2362 rx->skb = __skb_dequeue(&frame_list);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b48c1e13e281..42ce9bd4426f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -405,7 +405,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
405 u8 *pos; 405 u8 *pos;
406 u8 pn[6]; 406 u8 pn[6];
407 u64 pn64; 407 u64 pn64;
408 u8 aad[2 * AES_BLOCK_SIZE]; 408 u8 aad[CCM_AAD_LEN];
409 u8 b_0[AES_BLOCK_SIZE]; 409 u8 b_0[AES_BLOCK_SIZE];
410 410
411 if (info->control.hw_key && 411 if (info->control.hw_key &&
@@ -461,10 +461,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
461 461
462 pos += IEEE80211_CCMP_HDR_LEN; 462 pos += IEEE80211_CCMP_HDR_LEN;
463 ccmp_special_blocks(skb, pn, b_0, aad); 463 ccmp_special_blocks(skb, pn, b_0, aad);
464 ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len, 464 return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
465 skb_put(skb, mic_len), mic_len); 465 skb_put(skb, mic_len), mic_len);
466
467 return 0;
468} 466}
469 467
470 468
@@ -639,7 +637,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
639 u8 *pos; 637 u8 *pos;
640 u8 pn[6]; 638 u8 pn[6];
641 u64 pn64; 639 u64 pn64;
642 u8 aad[2 * AES_BLOCK_SIZE]; 640 u8 aad[GCM_AAD_LEN];
643 u8 j_0[AES_BLOCK_SIZE]; 641 u8 j_0[AES_BLOCK_SIZE];
644 642
645 if (info->control.hw_key && 643 if (info->control.hw_key &&
@@ -696,10 +694,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
696 694
697 pos += IEEE80211_GCMP_HDR_LEN; 695 pos += IEEE80211_GCMP_HDR_LEN;
698 gcmp_special_blocks(skb, pn, j_0, aad); 696 gcmp_special_blocks(skb, pn, j_0, aad);
699 ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len, 697 return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
700 skb_put(skb, IEEE80211_GCMP_MIC_LEN)); 698 skb_put(skb, IEEE80211_GCMP_MIC_LEN));
701
702 return 0;
703} 699}
704 700
705ieee80211_tx_result 701ieee80211_tx_result
@@ -1123,9 +1119,9 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
1123 struct ieee80211_key *key = tx->key; 1119 struct ieee80211_key *key = tx->key;
1124 struct ieee80211_mmie_16 *mmie; 1120 struct ieee80211_mmie_16 *mmie;
1125 struct ieee80211_hdr *hdr; 1121 struct ieee80211_hdr *hdr;
1126 u8 aad[20]; 1122 u8 aad[GMAC_AAD_LEN];
1127 u64 pn64; 1123 u64 pn64;
1128 u8 nonce[12]; 1124 u8 nonce[GMAC_NONCE_LEN];
1129 1125
1130 if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) 1126 if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
1131 return TX_DROP; 1127 return TX_DROP;
@@ -1171,7 +1167,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
1171 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1167 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1172 struct ieee80211_key *key = rx->key; 1168 struct ieee80211_key *key = rx->key;
1173 struct ieee80211_mmie_16 *mmie; 1169 struct ieee80211_mmie_16 *mmie;
1174 u8 aad[20], mic[16], ipn[6], nonce[12]; 1170 u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
1175 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1171 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1176 1172
1177 if (!ieee80211_is_mgmt(hdr->frame_control)) 1173 if (!ieee80211_is_mgmt(hdr->frame_control))
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 13290a70fa71..1308a56f2591 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -246,6 +246,7 @@ enum {
246 ncsi_dev_state_config_gls, 246 ncsi_dev_state_config_gls,
247 ncsi_dev_state_config_done, 247 ncsi_dev_state_config_done,
248 ncsi_dev_state_suspend_select = 0x0401, 248 ncsi_dev_state_suspend_select = 0x0401,
249 ncsi_dev_state_suspend_gls,
249 ncsi_dev_state_suspend_dcnt, 250 ncsi_dev_state_suspend_dcnt,
250 ncsi_dev_state_suspend_dc, 251 ncsi_dev_state_suspend_dc,
251 ncsi_dev_state_suspend_deselect, 252 ncsi_dev_state_suspend_deselect,
@@ -264,6 +265,7 @@ struct ncsi_dev_priv {
264#endif 265#endif
265 unsigned int package_num; /* Number of packages */ 266 unsigned int package_num; /* Number of packages */
266 struct list_head packages; /* List of packages */ 267 struct list_head packages; /* List of packages */
268 struct ncsi_channel *hot_channel; /* Channel was ever active */
267 struct ncsi_request requests[256]; /* Request table */ 269 struct ncsi_request requests[256]; /* Request table */
268 unsigned int request_id; /* Last used request ID */ 270 unsigned int request_id; /* Last used request ID */
269#define NCSI_REQ_START_IDX 1 271#define NCSI_REQ_START_IDX 1
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index b41a6617d498..6898e7229285 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -141,23 +141,35 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
141 return -ENODEV; 141 return -ENODEV;
142 142
143 /* If the channel is active one, we need reconfigure it */ 143 /* If the channel is active one, we need reconfigure it */
144 spin_lock_irqsave(&nc->lock, flags);
144 ncm = &nc->modes[NCSI_MODE_LINK]; 145 ncm = &nc->modes[NCSI_MODE_LINK];
145 hncdsc = (struct ncsi_aen_hncdsc_pkt *)h; 146 hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
146 ncm->data[3] = ntohl(hncdsc->status); 147 ncm->data[3] = ntohl(hncdsc->status);
147 if (!list_empty(&nc->link) || 148 if (!list_empty(&nc->link) ||
148 nc->state != NCSI_CHANNEL_ACTIVE || 149 nc->state != NCSI_CHANNEL_ACTIVE) {
149 (ncm->data[3] & 0x1)) 150 spin_unlock_irqrestore(&nc->lock, flags);
150 return 0; 151 return 0;
152 }
151 153
152 if (ndp->flags & NCSI_DEV_HWA) 154 spin_unlock_irqrestore(&nc->lock, flags);
155 if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1))
153 ndp->flags |= NCSI_DEV_RESHUFFLE; 156 ndp->flags |= NCSI_DEV_RESHUFFLE;
154 157
155 /* If this channel is the active one and the link doesn't 158 /* If this channel is the active one and the link doesn't
156 * work, we have to choose another channel to be active one. 159 * work, we have to choose another channel to be active one.
157 * The logic here is exactly similar to what we do when link 160 * The logic here is exactly similar to what we do when link
158 * is down on the active channel. 161 * is down on the active channel.
162 *
163 * On the other hand, we need configure it when host driver
164 * state on the active channel becomes ready.
159 */ 165 */
160 ncsi_stop_channel_monitor(nc); 166 ncsi_stop_channel_monitor(nc);
167
168 spin_lock_irqsave(&nc->lock, flags);
169 nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE :
170 NCSI_CHANNEL_ACTIVE;
171 spin_unlock_irqrestore(&nc->lock, flags);
172
161 spin_lock_irqsave(&ndp->lock, flags); 173 spin_lock_irqsave(&ndp->lock, flags);
162 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 174 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
163 spin_unlock_irqrestore(&ndp->lock, flags); 175 spin_unlock_irqrestore(&ndp->lock, flags);
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 5e509e547c2d..a3bd5fa8ad09 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -540,42 +540,86 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
540 nd->state = ncsi_dev_state_suspend_select; 540 nd->state = ncsi_dev_state_suspend_select;
541 /* Fall through */ 541 /* Fall through */
542 case ncsi_dev_state_suspend_select: 542 case ncsi_dev_state_suspend_select:
543 case ncsi_dev_state_suspend_dcnt:
544 case ncsi_dev_state_suspend_dc:
545 case ncsi_dev_state_suspend_deselect:
546 ndp->pending_req_num = 1; 543 ndp->pending_req_num = 1;
547 544
548 np = ndp->active_package; 545 nca.type = NCSI_PKT_CMD_SP;
549 nc = ndp->active_channel;
550 nca.package = np->id; 546 nca.package = np->id;
551 if (nd->state == ncsi_dev_state_suspend_select) { 547 nca.channel = NCSI_RESERVED_CHANNEL;
552 nca.type = NCSI_PKT_CMD_SP; 548 if (ndp->flags & NCSI_DEV_HWA)
553 nca.channel = NCSI_RESERVED_CHANNEL; 549 nca.bytes[0] = 0;
554 if (ndp->flags & NCSI_DEV_HWA) 550 else
555 nca.bytes[0] = 0; 551 nca.bytes[0] = 1;
556 else 552
557 nca.bytes[0] = 1; 553 /* To retrieve the last link states of channels in current
554 * package when current active channel needs fail over to
555 * another one. It means we will possibly select another
556 * channel as next active one. The link states of channels
557 * are most important factor of the selection. So we need
558 * accurate link states. Unfortunately, the link states on
559 * inactive channels can't be updated with LSC AEN in time.
560 */
561 if (ndp->flags & NCSI_DEV_RESHUFFLE)
562 nd->state = ncsi_dev_state_suspend_gls;
563 else
558 nd->state = ncsi_dev_state_suspend_dcnt; 564 nd->state = ncsi_dev_state_suspend_dcnt;
559 } else if (nd->state == ncsi_dev_state_suspend_dcnt) { 565 ret = ncsi_xmit_cmd(&nca);
560 nca.type = NCSI_PKT_CMD_DCNT; 566 if (ret)
561 nca.channel = nc->id; 567 goto error;
562 nd->state = ncsi_dev_state_suspend_dc; 568
563 } else if (nd->state == ncsi_dev_state_suspend_dc) { 569 break;
564 nca.type = NCSI_PKT_CMD_DC; 570 case ncsi_dev_state_suspend_gls:
571 ndp->pending_req_num = np->channel_num;
572
573 nca.type = NCSI_PKT_CMD_GLS;
574 nca.package = np->id;
575
576 nd->state = ncsi_dev_state_suspend_dcnt;
577 NCSI_FOR_EACH_CHANNEL(np, nc) {
565 nca.channel = nc->id; 578 nca.channel = nc->id;
566 nca.bytes[0] = 1; 579 ret = ncsi_xmit_cmd(&nca);
567 nd->state = ncsi_dev_state_suspend_deselect; 580 if (ret)
568 } else if (nd->state == ncsi_dev_state_suspend_deselect) { 581 goto error;
569 nca.type = NCSI_PKT_CMD_DP;
570 nca.channel = NCSI_RESERVED_CHANNEL;
571 nd->state = ncsi_dev_state_suspend_done;
572 } 582 }
573 583
584 break;
585 case ncsi_dev_state_suspend_dcnt:
586 ndp->pending_req_num = 1;
587
588 nca.type = NCSI_PKT_CMD_DCNT;
589 nca.package = np->id;
590 nca.channel = nc->id;
591
592 nd->state = ncsi_dev_state_suspend_dc;
574 ret = ncsi_xmit_cmd(&nca); 593 ret = ncsi_xmit_cmd(&nca);
575 if (ret) { 594 if (ret)
576 nd->state = ncsi_dev_state_functional; 595 goto error;
577 return; 596
578 } 597 break;
598 case ncsi_dev_state_suspend_dc:
599 ndp->pending_req_num = 1;
600
601 nca.type = NCSI_PKT_CMD_DC;
602 nca.package = np->id;
603 nca.channel = nc->id;
604 nca.bytes[0] = 1;
605
606 nd->state = ncsi_dev_state_suspend_deselect;
607 ret = ncsi_xmit_cmd(&nca);
608 if (ret)
609 goto error;
610
611 break;
612 case ncsi_dev_state_suspend_deselect:
613 ndp->pending_req_num = 1;
614
615 nca.type = NCSI_PKT_CMD_DP;
616 nca.package = np->id;
617 nca.channel = NCSI_RESERVED_CHANNEL;
618
619 nd->state = ncsi_dev_state_suspend_done;
620 ret = ncsi_xmit_cmd(&nca);
621 if (ret)
622 goto error;
579 623
580 break; 624 break;
581 case ncsi_dev_state_suspend_done: 625 case ncsi_dev_state_suspend_done:
@@ -589,6 +633,10 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
589 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n", 633 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
590 nd->state); 634 nd->state);
591 } 635 }
636
637 return;
638error:
639 nd->state = ncsi_dev_state_functional;
592} 640}
593 641
594static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) 642static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
@@ -597,6 +645,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
597 struct net_device *dev = nd->dev; 645 struct net_device *dev = nd->dev;
598 struct ncsi_package *np = ndp->active_package; 646 struct ncsi_package *np = ndp->active_package;
599 struct ncsi_channel *nc = ndp->active_channel; 647 struct ncsi_channel *nc = ndp->active_channel;
648 struct ncsi_channel *hot_nc = NULL;
600 struct ncsi_cmd_arg nca; 649 struct ncsi_cmd_arg nca;
601 unsigned char index; 650 unsigned char index;
602 unsigned long flags; 651 unsigned long flags;
@@ -702,12 +751,20 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
702 break; 751 break;
703 case ncsi_dev_state_config_done: 752 case ncsi_dev_state_config_done:
704 spin_lock_irqsave(&nc->lock, flags); 753 spin_lock_irqsave(&nc->lock, flags);
705 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) 754 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
755 hot_nc = nc;
706 nc->state = NCSI_CHANNEL_ACTIVE; 756 nc->state = NCSI_CHANNEL_ACTIVE;
707 else 757 } else {
758 hot_nc = NULL;
708 nc->state = NCSI_CHANNEL_INACTIVE; 759 nc->state = NCSI_CHANNEL_INACTIVE;
760 }
709 spin_unlock_irqrestore(&nc->lock, flags); 761 spin_unlock_irqrestore(&nc->lock, flags);
710 762
763 /* Update the hot channel */
764 spin_lock_irqsave(&ndp->lock, flags);
765 ndp->hot_channel = hot_nc;
766 spin_unlock_irqrestore(&ndp->lock, flags);
767
711 ncsi_start_channel_monitor(nc); 768 ncsi_start_channel_monitor(nc);
712 ncsi_process_next_channel(ndp); 769 ncsi_process_next_channel(ndp);
713 break; 770 break;
@@ -725,10 +782,14 @@ error:
725static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp) 782static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
726{ 783{
727 struct ncsi_package *np; 784 struct ncsi_package *np;
728 struct ncsi_channel *nc, *found; 785 struct ncsi_channel *nc, *found, *hot_nc;
729 struct ncsi_channel_mode *ncm; 786 struct ncsi_channel_mode *ncm;
730 unsigned long flags; 787 unsigned long flags;
731 788
789 spin_lock_irqsave(&ndp->lock, flags);
790 hot_nc = ndp->hot_channel;
791 spin_unlock_irqrestore(&ndp->lock, flags);
792
732 /* The search is done once an inactive channel with up 793 /* The search is done once an inactive channel with up
733 * link is found. 794 * link is found.
734 */ 795 */
@@ -746,6 +807,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
746 if (!found) 807 if (!found)
747 found = nc; 808 found = nc;
748 809
810 if (nc == hot_nc)
811 found = nc;
812
749 ncm = &nc->modes[NCSI_MODE_LINK]; 813 ncm = &nc->modes[NCSI_MODE_LINK];
750 if (ncm->data[2] & 0x1) { 814 if (ncm->data[2] & 0x1) {
751 spin_unlock_irqrestore(&nc->lock, flags); 815 spin_unlock_irqrestore(&nc->lock, flags);
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index fcb5d1df11e9..004af030ef1a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -361,16 +361,9 @@ next_hook:
361 if (ret == 0) 361 if (ret == 0)
362 ret = -EPERM; 362 ret = -EPERM;
363 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { 363 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
364 int err; 364 ret = nf_queue(skb, state, &entry, verdict);
365 365 if (ret == 1 && entry)
366 RCU_INIT_POINTER(state->hook_entries, entry); 366 goto next_hook;
367 err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
368 if (err < 0) {
369 if (err == -ESRCH &&
370 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
371 goto next_hook;
372 kfree_skb(skb);
373 }
374 } 367 }
375 return ret; 368 return ret;
376} 369}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ba6a1d421222..df2f5a3901df 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -983,7 +983,7 @@ static void gc_worker(struct work_struct *work)
983 return; 983 return;
984 984
985 ratio = scanned ? expired_count * 100 / scanned : 0; 985 ratio = scanned ? expired_count * 100 / scanned : 0;
986 if (ratio >= 90) 986 if (ratio >= 90 || expired_count == GC_MAX_EVICTS)
987 next_run = 0; 987 next_run = 0;
988 988
989 gc_work->last_bucket = i; 989 gc_work->last_bucket = i;
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index e0adb5959342..9fdb655f85bc 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -18,7 +18,7 @@ unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state,
18 18
19/* nf_queue.c */ 19/* nf_queue.c */
20int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, 20int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
21 unsigned int queuenum); 21 struct nf_hook_entry **entryp, unsigned int verdict);
22void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry); 22void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry);
23int __init netfilter_queue_init(void); 23int __init netfilter_queue_init(void);
24 24
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 96964a0070e1..8f08d759844a 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -107,13 +107,8 @@ void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
107 rcu_read_unlock(); 107 rcu_read_unlock();
108} 108}
109 109
110/* 110static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
111 * Any packet that leaves via this function must come back 111 unsigned int queuenum)
112 * through nf_reinject().
113 */
114int nf_queue(struct sk_buff *skb,
115 struct nf_hook_state *state,
116 unsigned int queuenum)
117{ 112{
118 int status = -ENOENT; 113 int status = -ENOENT;
119 struct nf_queue_entry *entry = NULL; 114 struct nf_queue_entry *entry = NULL;
@@ -161,6 +156,27 @@ err:
161 return status; 156 return status;
162} 157}
163 158
159/* Packets leaving via this function must come back through nf_reinject(). */
160int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
161 struct nf_hook_entry **entryp, unsigned int verdict)
162{
163 struct nf_hook_entry *entry = *entryp;
164 int ret;
165
166 RCU_INIT_POINTER(state->hook_entries, entry);
167 ret = __nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
168 if (ret < 0) {
169 if (ret == -ESRCH &&
170 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
171 *entryp = rcu_dereference(entry->next);
172 return 1;
173 }
174 kfree_skb(skb);
175 }
176
177 return 0;
178}
179
164void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) 180void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
165{ 181{
166 struct nf_hook_entry *hook_entry; 182 struct nf_hook_entry *hook_entry;
@@ -187,26 +203,26 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
187 entry->state.thresh = INT_MIN; 203 entry->state.thresh = INT_MIN;
188 204
189 if (verdict == NF_ACCEPT) { 205 if (verdict == NF_ACCEPT) {
190 next_hook: 206 hook_entry = rcu_dereference(hook_entry->next);
191 verdict = nf_iterate(skb, &entry->state, &hook_entry); 207 if (hook_entry)
208next_hook:
209 verdict = nf_iterate(skb, &entry->state, &hook_entry);
192 } 210 }
193 211
194 switch (verdict & NF_VERDICT_MASK) { 212 switch (verdict & NF_VERDICT_MASK) {
195 case NF_ACCEPT: 213 case NF_ACCEPT:
196 case NF_STOP: 214 case NF_STOP:
215okfn:
197 local_bh_disable(); 216 local_bh_disable();
198 entry->state.okfn(entry->state.net, entry->state.sk, skb); 217 entry->state.okfn(entry->state.net, entry->state.sk, skb);
199 local_bh_enable(); 218 local_bh_enable();
200 break; 219 break;
201 case NF_QUEUE: 220 case NF_QUEUE:
202 RCU_INIT_POINTER(entry->state.hook_entries, hook_entry); 221 err = nf_queue(skb, &entry->state, &hook_entry, verdict);
203 err = nf_queue(skb, &entry->state, 222 if (err == 1) {
204 verdict >> NF_VERDICT_QBITS); 223 if (hook_entry)
205 if (err < 0) {
206 if (err == -ESRCH &&
207 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
208 goto next_hook; 224 goto next_hook;
209 kfree_skb(skb); 225 goto okfn;
210 } 226 }
211 break; 227 break;
212 case NF_STOLEN: 228 case NF_STOLEN:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index b70d3ea1430e..24db22257586 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4423,7 +4423,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
4423 */ 4423 */
4424unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) 4424unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
4425{ 4425{
4426 int val; 4426 u32 val;
4427 4427
4428 val = ntohl(nla_get_be32(attr)); 4428 val = ntohl(nla_get_be32(attr));
4429 if (val > max) 4429 if (val > max)
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index e3b83c31da2e..517f08767a3c 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -158,7 +158,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
158 if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { 158 if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
159 if (!(set->flags & NFT_SET_TIMEOUT)) 159 if (!(set->flags & NFT_SET_TIMEOUT))
160 return -EINVAL; 160 return -EINVAL;
161 timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT])); 161 timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
162 tb[NFTA_DYNSET_TIMEOUT])));
162 } 163 }
163 164
164 priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]); 165 priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
@@ -246,7 +247,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
246 goto nla_put_failure; 247 goto nla_put_failure;
247 if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name)) 248 if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
248 goto nla_put_failure; 249 goto nla_put_failure;
249 if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout), 250 if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
251 cpu_to_be64(jiffies_to_msecs(priv->timeout)),
250 NFTA_DYNSET_PAD)) 252 NFTA_DYNSET_PAD))
251 goto nla_put_failure; 253 goto nla_put_failure;
252 if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr)) 254 if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index a84cf3d66056..47beb3abcc9d 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -59,7 +59,8 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
59 const struct nlattr * const tb[]) 59 const struct nlattr * const tb[])
60{ 60{
61 struct nft_exthdr *priv = nft_expr_priv(expr); 61 struct nft_exthdr *priv = nft_expr_priv(expr);
62 u32 offset, len, err; 62 u32 offset, len;
63 int err;
63 64
64 if (tb[NFTA_EXTHDR_DREG] == NULL || 65 if (tb[NFTA_EXTHDR_DREG] == NULL ||
65 tb[NFTA_EXTHDR_TYPE] == NULL || 66 tb[NFTA_EXTHDR_TYPE] == NULL ||
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 09473b415b95..baf694de3935 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -44,6 +44,7 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
44 [NFTA_HASH_LEN] = { .type = NLA_U32 }, 44 [NFTA_HASH_LEN] = { .type = NLA_U32 },
45 [NFTA_HASH_MODULUS] = { .type = NLA_U32 }, 45 [NFTA_HASH_MODULUS] = { .type = NLA_U32 },
46 [NFTA_HASH_SEED] = { .type = NLA_U32 }, 46 [NFTA_HASH_SEED] = { .type = NLA_U32 },
47 [NFTA_HASH_OFFSET] = { .type = NLA_U32 },
47}; 48};
48 49
49static int nft_hash_init(const struct nft_ctx *ctx, 50static int nft_hash_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index c6d5358482d1..fbc88009ca2e 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -28,22 +28,20 @@ static void nft_range_eval(const struct nft_expr *expr,
28 const struct nft_pktinfo *pkt) 28 const struct nft_pktinfo *pkt)
29{ 29{
30 const struct nft_range_expr *priv = nft_expr_priv(expr); 30 const struct nft_range_expr *priv = nft_expr_priv(expr);
31 bool mismatch;
32 int d1, d2; 31 int d1, d2;
33 32
34 d1 = memcmp(&regs->data[priv->sreg], &priv->data_from, priv->len); 33 d1 = memcmp(&regs->data[priv->sreg], &priv->data_from, priv->len);
35 d2 = memcmp(&regs->data[priv->sreg], &priv->data_to, priv->len); 34 d2 = memcmp(&regs->data[priv->sreg], &priv->data_to, priv->len);
36 switch (priv->op) { 35 switch (priv->op) {
37 case NFT_RANGE_EQ: 36 case NFT_RANGE_EQ:
38 mismatch = (d1 < 0 || d2 > 0); 37 if (d1 < 0 || d2 > 0)
38 regs->verdict.code = NFT_BREAK;
39 break; 39 break;
40 case NFT_RANGE_NEQ: 40 case NFT_RANGE_NEQ:
41 mismatch = (d1 >= 0 && d2 <= 0); 41 if (d1 >= 0 && d2 <= 0)
42 regs->verdict.code = NFT_BREAK;
42 break; 43 break;
43 } 44 }
44
45 if (mismatch)
46 regs->verdict.code = NFT_BREAK;
47} 45}
48 46
49static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = { 47static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = {
@@ -59,6 +57,7 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
59 struct nft_range_expr *priv = nft_expr_priv(expr); 57 struct nft_range_expr *priv = nft_expr_priv(expr);
60 struct nft_data_desc desc_from, desc_to; 58 struct nft_data_desc desc_from, desc_to;
61 int err; 59 int err;
60 u32 op;
62 61
63 err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from), 62 err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
64 &desc_from, tb[NFTA_RANGE_FROM_DATA]); 63 &desc_from, tb[NFTA_RANGE_FROM_DATA]);
@@ -80,7 +79,20 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
80 if (err < 0) 79 if (err < 0)
81 goto err2; 80 goto err2;
82 81
83 priv->op = ntohl(nla_get_be32(tb[NFTA_RANGE_OP])); 82 err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op);
83 if (err < 0)
84 goto err2;
85
86 switch (op) {
87 case NFT_RANGE_EQ:
88 case NFT_RANGE_NEQ:
89 break;
90 default:
91 err = -EINVAL;
92 goto err2;
93 }
94
95 priv->op = op;
84 priv->len = desc_from.len; 96 priv->len = desc_from.len;
85 return 0; 97 return 0;
86err2: 98err2:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index e0aa7c1d0224..fc4977456c30 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1513,7 +1513,7 @@ xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1513 if (!num_hooks) 1513 if (!num_hooks)
1514 return ERR_PTR(-EINVAL); 1514 return ERR_PTR(-EINVAL);
1515 1515
1516 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); 1516 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1517 if (ops == NULL) 1517 if (ops == NULL)
1518 return ERR_PTR(-ENOMEM); 1518 return ERR_PTR(-ENOMEM);
1519 1519
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index 018eed7e1ff1..8668a5c18dc3 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
32 li.u.ulog.copy_len = info->len; 32 li.u.ulog.copy_len = info->len;
33 li.u.ulog.group = info->group; 33 li.u.ulog.group = info->group;
34 li.u.ulog.qthreshold = info->threshold; 34 li.u.ulog.qthreshold = info->threshold;
35 li.u.ulog.flags = 0;
35 36
36 if (info->flags & XT_NFLOG_F_COPY_LEN) 37 if (info->flags & XT_NFLOG_F_COPY_LEN)
37 li.u.ulog.flags |= NF_LOG_F_COPY_LEN; 38 li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 2fab0c65aa94..b89b688e9d01 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -431,7 +431,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
431 CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie. 431 CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
432*/ 432*/
433#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24)) 433#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
434#define MAX_CPJ (0xFFFFFFFFFFFFFFFF / (HZ*60*60*24)) 434#define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24))
435 435
436/* Repeated shift and or gives us all 1s, final shift and add 1 gives 436/* Repeated shift and or gives us all 1s, final shift and add 1 gives
437 * us the power of 2 below the theoretical max, so GCC simply does a 437 * us the power of 2 below the theoretical max, so GCC simply does a
@@ -473,7 +473,7 @@ static u64 user2credits(u64 user, int revision)
473 return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1, 473 return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1,
474 XT_HASHLIMIT_SCALE); 474 XT_HASHLIMIT_SCALE);
475 } else { 475 } else {
476 if (user > 0xFFFFFFFFFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) 476 if (user > 0xFFFFFFFFFFFFFFFFULL / (HZ*CREDITS_PER_JIFFY))
477 return div64_u64(user, XT_HASHLIMIT_SCALE_v2) 477 return div64_u64(user, XT_HASHLIMIT_SCALE_v2)
478 * HZ * CREDITS_PER_JIFFY; 478 * HZ * CREDITS_PER_JIFFY;
479 479
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c
index 89d53104c6b3..000e70377f85 100644
--- a/net/netfilter/xt_ipcomp.c
+++ b/net/netfilter/xt_ipcomp.c
@@ -26,6 +26,8 @@
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Fan Du <fan.du@windriver.com>"); 27MODULE_AUTHOR("Fan Du <fan.du@windriver.com>");
28MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match"); 28MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
29MODULE_ALIAS("ipt_ipcomp");
30MODULE_ALIAS("ip6t_ipcomp");
29 31
30/* Returns 1 if the spi is matched by the range, 0 otherwise */ 32/* Returns 1 if the spi is matched by the range, 0 otherwise */
31static inline bool 33static inline bool
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 11db0d619c00..d2238b204691 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -250,7 +250,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
250static int packet_direct_xmit(struct sk_buff *skb) 250static int packet_direct_xmit(struct sk_buff *skb)
251{ 251{
252 struct net_device *dev = skb->dev; 252 struct net_device *dev = skb->dev;
253 netdev_features_t features; 253 struct sk_buff *orig_skb = skb;
254 struct netdev_queue *txq; 254 struct netdev_queue *txq;
255 int ret = NETDEV_TX_BUSY; 255 int ret = NETDEV_TX_BUSY;
256 256
@@ -258,9 +258,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
258 !netif_carrier_ok(dev))) 258 !netif_carrier_ok(dev)))
259 goto drop; 259 goto drop;
260 260
261 features = netif_skb_features(skb); 261 skb = validate_xmit_skb_list(skb, dev);
262 if (skb_needs_linearize(skb, features) && 262 if (skb != orig_skb)
263 __skb_linearize(skb))
264 goto drop; 263 goto drop;
265 264
266 txq = skb_get_tx_queue(dev, skb); 265 txq = skb_get_tx_queue(dev, skb);
@@ -280,7 +279,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
280 return ret; 279 return ret;
281drop: 280drop:
282 atomic_long_inc(&dev->tx_dropped); 281 atomic_long_inc(&dev->tx_dropped);
283 kfree_skb(skb); 282 kfree_skb_list(skb);
284 return NET_XMIT_DROP; 283 return NET_XMIT_DROP;
285} 284}
286 285
diff --git a/net/rds/Makefile b/net/rds/Makefile
index 0e72bec1529f..56c7d27eefee 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -13,5 +13,5 @@ obj-$(CONFIG_RDS_TCP) += rds_tcp.o
13rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \ 13rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
14 tcp_send.o tcp_stats.o 14 tcp_send.o tcp_stats.o
15 15
16ccflags-$(CONFIG_RDS_DEBUG) := -DDEBUG 16ccflags-$(CONFIG_RDS_DEBUG) := -DRDS_DEBUG
17 17
diff --git a/net/rds/rds.h b/net/rds/rds.h
index fd0bccb2f9f9..67ba67c058b1 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -33,7 +33,7 @@
33#define KERNEL_HAS_ATOMIC64 33#define KERNEL_HAS_ATOMIC64
34#endif 34#endif
35 35
36#ifdef DEBUG 36#ifdef RDS_DEBUG
37#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args) 37#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38#else 38#else
39/* sigh, pr_debug() causes unused variable warnings */ 39/* sigh, pr_debug() causes unused variable warnings */
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 4353a29f3b57..1ed18d8c9c9f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -276,7 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
276 goto error; 276 goto error;
277 277
278 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), 278 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
279 here, ERR_PTR(ret)); 279 here, NULL);
280 280
281 spin_lock_bh(&call->conn->params.peer->lock); 281 spin_lock_bh(&call->conn->params.peer->lock);
282 hlist_add_head(&call->error_link, 282 hlist_add_head(&call->error_link,
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 941b724d523b..862eea6b266c 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -193,8 +193,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
193 fl6->fl6_dport = htons(7001); 193 fl6->fl6_dport = htons(7001);
194 fl6->fl6_sport = htons(7000); 194 fl6->fl6_sport = htons(7000);
195 dst = ip6_route_output(&init_net, NULL, fl6); 195 dst = ip6_route_output(&init_net, NULL, fl6);
196 if (IS_ERR(dst)) { 196 if (dst->error) {
197 _leave(" [route err %ld]", PTR_ERR(dst)); 197 _leave(" [route err %d]", dst->error);
198 return; 198 return;
199 } 199 }
200 break; 200 break;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index a512b18c0088..f893d180da1c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1028,8 +1028,7 @@ static struct nlattr *find_dump_kind(const struct nlmsghdr *n)
1028 1028
1029 if (tb[1] == NULL) 1029 if (tb[1] == NULL)
1030 return NULL; 1030 return NULL;
1031 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), 1031 if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL) < 0)
1032 nla_len(tb[1]), NULL) < 0)
1033 return NULL; 1032 return NULL;
1034 kind = tb2[TCA_ACT_KIND]; 1033 kind = tb2[TCA_ACT_KIND];
1035 1034
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 667dc382df82..6b07fba5770b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -207,8 +207,11 @@ out:
207static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, 207static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
208 u64 lastuse) 208 u64 lastuse)
209{ 209{
210 tcf_lastuse_update(&a->tcfa_tm); 210 struct tcf_mirred *m = to_mirred(a);
211 struct tcf_t *tm = &m->tcf_tm;
212
211 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 213 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
214 tm->lastuse = lastuse;
212} 215}
213 216
214static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, 217static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2ee29a3375f6..2b2a7974e4bb 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -345,7 +345,8 @@ replay:
345 if (err == 0) { 345 if (err == 0) {
346 struct tcf_proto *next = rtnl_dereference(tp->next); 346 struct tcf_proto *next = rtnl_dereference(tp->next);
347 347
348 tfilter_notify(net, skb, n, tp, fh, 348 tfilter_notify(net, skb, n, tp,
349 t->tcm_handle,
349 RTM_DELTFILTER, false); 350 RTM_DELTFILTER, false);
350 if (tcf_destroy(tp, false)) 351 if (tcf_destroy(tp, false))
351 RCU_INIT_POINTER(*back, next); 352 RCU_INIT_POINTER(*back, next);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 2a5c1896d18f..6cb0df859195 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -418,6 +418,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
418 __u8 has_data = 0; 418 __u8 has_data = 0;
419 int gso = 0; 419 int gso = 0;
420 int pktcount = 0; 420 int pktcount = 0;
421 int auth_len = 0;
421 struct dst_entry *dst; 422 struct dst_entry *dst;
422 unsigned char *auth = NULL; /* pointer to auth in skb data */ 423 unsigned char *auth = NULL; /* pointer to auth in skb data */
423 424
@@ -510,7 +511,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
510 list_for_each_entry(chunk, &packet->chunk_list, list) { 511 list_for_each_entry(chunk, &packet->chunk_list, list) {
511 int padded = SCTP_PAD4(chunk->skb->len); 512 int padded = SCTP_PAD4(chunk->skb->len);
512 513
513 if (pkt_size + padded > tp->pathmtu) 514 if (chunk == packet->auth)
515 auth_len = padded;
516 else if (auth_len + padded + packet->overhead >
517 tp->pathmtu)
518 goto nomem;
519 else if (pkt_size + padded > tp->pathmtu)
514 break; 520 break;
515 pkt_size += padded; 521 pkt_size += padded;
516 } 522 }
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 026e3bca4a94..8ec20a64a3f8 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3422,6 +3422,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
3422 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, 3422 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3423 commands); 3423 commands);
3424 3424
3425 /* Report violation if chunk len overflows */
3426 ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
3427 if (ch_end > skb_tail_pointer(skb))
3428 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3429 commands);
3430
3425 /* Now that we know we at least have a chunk header, 3431 /* Now that we know we at least have a chunk header,
3426 * do things that are type appropriate. 3432 * do things that are type appropriate.
3427 */ 3433 */
@@ -3453,12 +3459,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
3453 } 3459 }
3454 } 3460 }
3455 3461
3456 /* Report violation if chunk len overflows */
3457 ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
3458 if (ch_end > skb_tail_pointer(skb))
3459 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
3460 commands);
3461
3462 ch = (sctp_chunkhdr_t *) ch_end; 3462 ch = (sctp_chunkhdr_t *) ch_end;
3463 } while (ch_end < skb_tail_pointer(skb)); 3463 } while (ch_end < skb_tail_pointer(skb));
3464 3464
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fb02c7033307..9fbb6feb8c27 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4687,7 +4687,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4687static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 4687static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4688 int __user *optlen) 4688 int __user *optlen)
4689{ 4689{
4690 if (len <= 0) 4690 if (len == 0)
4691 return -EINVAL; 4691 return -EINVAL;
4692 if (len > sizeof(struct sctp_event_subscribe)) 4692 if (len > sizeof(struct sctp_event_subscribe))
4693 len = sizeof(struct sctp_event_subscribe); 4693 len = sizeof(struct sctp_event_subscribe);
@@ -6430,6 +6430,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
6430 if (get_user(len, optlen)) 6430 if (get_user(len, optlen))
6431 return -EFAULT; 6431 return -EFAULT;
6432 6432
6433 if (len < 0)
6434 return -EINVAL;
6435
6433 lock_sock(sk); 6436 lock_sock(sk);
6434 6437
6435 switch (optname) { 6438 switch (optname) {
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 02beb35f577f..3b95fe980fa2 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -771,6 +771,9 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
771 u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD; 771 u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
772 int err; 772 int err;
773 773
774 if (!netif_is_bridge_port(dev))
775 return -EOPNOTSUPP;
776
774 err = switchdev_port_attr_get(dev, &attr); 777 err = switchdev_port_attr_get(dev, &attr);
775 if (err && err != -EOPNOTSUPP) 778 if (err && err != -EOPNOTSUPP)
776 return err; 779 return err;
@@ -926,6 +929,9 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
926 struct nlattr *afspec; 929 struct nlattr *afspec;
927 int err = 0; 930 int err = 0;
928 931
932 if (!netif_is_bridge_port(dev))
933 return -EOPNOTSUPP;
934
929 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 935 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
930 IFLA_PROTINFO); 936 IFLA_PROTINFO);
931 if (protinfo) { 937 if (protinfo) {
@@ -959,6 +965,9 @@ int switchdev_port_bridge_dellink(struct net_device *dev,
959{ 965{
960 struct nlattr *afspec; 966 struct nlattr *afspec;
961 967
968 if (!netif_is_bridge_port(dev))
969 return -EOPNOTSUPP;
970
962 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), 971 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
963 IFLA_AF_SPEC); 972 IFLA_AF_SPEC);
964 if (afspec) 973 if (afspec)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 753f774cb46f..aa1babbea385 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -247,11 +247,17 @@ int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
247 * 247 *
248 * RCU is locked, no other locks set 248 * RCU is locked, no other locks set
249 */ 249 */
250void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked) 250void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
251 struct tipc_msg *hdr)
251{ 252{
252 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq; 253 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
254 u16 acked = msg_bcast_ack(hdr);
253 struct sk_buff_head xmitq; 255 struct sk_buff_head xmitq;
254 256
257 /* Ignore bc acks sent by peer before bcast synch point was received */
258 if (msg_bc_ack_invalid(hdr))
259 return;
260
255 __skb_queue_head_init(&xmitq); 261 __skb_queue_head_init(&xmitq);
256 262
257 tipc_bcast_lock(net); 263 tipc_bcast_lock(net);
@@ -279,11 +285,11 @@ int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
279 __skb_queue_head_init(&xmitq); 285 __skb_queue_head_init(&xmitq);
280 286
281 tipc_bcast_lock(net); 287 tipc_bcast_lock(net);
282 if (msg_type(hdr) == STATE_MSG) { 288 if (msg_type(hdr) != STATE_MSG) {
289 tipc_link_bc_init_rcv(l, hdr);
290 } else if (!msg_bc_ack_invalid(hdr)) {
283 tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq); 291 tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
284 rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq); 292 rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
285 } else {
286 tipc_link_bc_init_rcv(l, hdr);
287 } 293 }
288 tipc_bcast_unlock(net); 294 tipc_bcast_unlock(net);
289 295
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5ffe34472ccd..855d53c64ab3 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -55,7 +55,8 @@ void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
55int tipc_bcast_get_mtu(struct net *net); 55int tipc_bcast_get_mtu(struct net *net);
56int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list); 56int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
57int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); 57int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
58void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked); 58void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
59 struct tipc_msg *hdr);
59int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l, 60int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
60 struct tipc_msg *hdr); 61 struct tipc_msg *hdr);
61int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg); 62int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b36e16cdc945..1055164c6232 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1312,6 +1312,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1312 msg_set_next_sent(hdr, l->snd_nxt); 1312 msg_set_next_sent(hdr, l->snd_nxt);
1313 msg_set_ack(hdr, l->rcv_nxt - 1); 1313 msg_set_ack(hdr, l->rcv_nxt - 1);
1314 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1); 1314 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1315 msg_set_bc_ack_invalid(hdr, !node_up);
1315 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); 1316 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1316 msg_set_link_tolerance(hdr, tolerance); 1317 msg_set_link_tolerance(hdr, tolerance);
1317 msg_set_linkprio(hdr, priority); 1318 msg_set_linkprio(hdr, priority);
@@ -1574,6 +1575,7 @@ static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1574 __skb_queue_head_init(&list); 1575 __skb_queue_head_init(&list);
1575 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) 1576 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1576 return; 1577 return;
1578 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
1577 tipc_link_xmit(l, &list, xmitq); 1579 tipc_link_xmit(l, &list, xmitq);
1578} 1580}
1579 1581
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index c3832cdf2278..50a739860d37 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -714,6 +714,23 @@ static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s)
714 msg_set_bits(m, 5, 13, 0x1, s); 714 msg_set_bits(m, 5, 13, 0x1, s);
715} 715}
716 716
717static inline bool msg_bc_ack_invalid(struct tipc_msg *m)
718{
719 switch (msg_user(m)) {
720 case BCAST_PROTOCOL:
721 case NAME_DISTRIBUTOR:
722 case LINK_PROTOCOL:
723 return msg_bits(m, 5, 14, 0x1);
724 default:
725 return false;
726 }
727}
728
729static inline void msg_set_bc_ack_invalid(struct tipc_msg *m, bool invalid)
730{
731 msg_set_bits(m, 5, 14, 0x1, invalid);
732}
733
717static inline char *msg_media_addr(struct tipc_msg *m) 734static inline char *msg_media_addr(struct tipc_msg *m)
718{ 735{
719 return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET]; 736 return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index a04fe9be1c60..c1cfd92de17a 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -156,6 +156,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
156 pr_warn("Bulk publication failure\n"); 156 pr_warn("Bulk publication failure\n");
157 return; 157 return;
158 } 158 }
159 msg_set_bc_ack_invalid(buf_msg(skb), true);
159 item = (struct distr_item *)msg_data(buf_msg(skb)); 160 item = (struct distr_item *)msg_data(buf_msg(skb));
160 } 161 }
161 162
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 7ef14e2d2356..9d2f4c2b08ab 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1535,7 +1535,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1535 if (unlikely(usr == LINK_PROTOCOL)) 1535 if (unlikely(usr == LINK_PROTOCOL))
1536 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 1536 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
1537 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) 1537 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
1538 tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack); 1538 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
1539 1539
1540 /* Receive packet directly if conditions permit */ 1540 /* Receive packet directly if conditions permit */
1541 tipc_node_read_lock(n); 1541 tipc_node_read_lock(n);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 0082f4b01795..14b3f007826d 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -104,13 +104,16 @@ static int wiphy_suspend(struct device *dev)
104 104
105 rtnl_lock(); 105 rtnl_lock();
106 if (rdev->wiphy.registered) { 106 if (rdev->wiphy.registered) {
107 if (!rdev->wiphy.wowlan_config) 107 if (!rdev->wiphy.wowlan_config) {
108 cfg80211_leave_all(rdev); 108 cfg80211_leave_all(rdev);
109 cfg80211_process_rdev_events(rdev);
110 }
109 if (rdev->ops->suspend) 111 if (rdev->ops->suspend)
110 ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); 112 ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
111 if (ret == 1) { 113 if (ret == 1) {
112 /* Driver refuse to configure wowlan */ 114 /* Driver refuse to configure wowlan */
113 cfg80211_leave_all(rdev); 115 cfg80211_leave_all(rdev);
116 cfg80211_process_rdev_events(rdev);
114 ret = rdev_suspend(rdev, NULL); 117 ret = rdev_suspend(rdev, NULL);
115 } 118 }
116 } 119 }
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 8edce22d1b93..5ea12afc7706 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -420,8 +420,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
420} 420}
421EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); 421EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
422 422
423static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr, 423int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
424 const u8 *addr, enum nl80211_iftype iftype) 424 const u8 *addr, enum nl80211_iftype iftype)
425{ 425{
426 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 426 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
427 struct { 427 struct {
@@ -525,13 +525,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
525 525
526 return 0; 526 return 0;
527} 527}
528 528EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr);
529int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
530 enum nl80211_iftype iftype)
531{
532 return __ieee80211_data_to_8023(skb, NULL, addr, iftype);
533}
534EXPORT_SYMBOL(ieee80211_data_to_8023);
535 529
536int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, 530int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
537 enum nl80211_iftype iftype, 531 enum nl80211_iftype iftype,
@@ -746,24 +740,18 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
746void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, 740void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
747 const u8 *addr, enum nl80211_iftype iftype, 741 const u8 *addr, enum nl80211_iftype iftype,
748 const unsigned int extra_headroom, 742 const unsigned int extra_headroom,
749 bool has_80211_header) 743 const u8 *check_da, const u8 *check_sa)
750{ 744{
751 unsigned int hlen = ALIGN(extra_headroom, 4); 745 unsigned int hlen = ALIGN(extra_headroom, 4);
752 struct sk_buff *frame = NULL; 746 struct sk_buff *frame = NULL;
753 u16 ethertype; 747 u16 ethertype;
754 u8 *payload; 748 u8 *payload;
755 int offset = 0, remaining, err; 749 int offset = 0, remaining;
756 struct ethhdr eth; 750 struct ethhdr eth;
757 bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb); 751 bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb);
758 bool reuse_skb = false; 752 bool reuse_skb = false;
759 bool last = false; 753 bool last = false;
760 754
761 if (has_80211_header) {
762 err = __ieee80211_data_to_8023(skb, &eth, addr, iftype);
763 if (err)
764 goto out;
765 }
766
767 while (!last) { 755 while (!last) {
768 unsigned int subframe_len; 756 unsigned int subframe_len;
769 int len; 757 int len;
@@ -780,8 +768,17 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
780 goto purge; 768 goto purge;
781 769
782 offset += sizeof(struct ethhdr); 770 offset += sizeof(struct ethhdr);
783 /* reuse skb for the last subframe */
784 last = remaining <= subframe_len + padding; 771 last = remaining <= subframe_len + padding;
772
773 /* FIXME: should we really accept multicast DA? */
774 if ((check_da && !is_multicast_ether_addr(eth.h_dest) &&
775 !ether_addr_equal(check_da, eth.h_dest)) ||
776 (check_sa && !ether_addr_equal(check_sa, eth.h_source))) {
777 offset += len + padding;
778 continue;
779 }
780
781 /* reuse skb for the last subframe */
785 if (!skb_is_nonlinear(skb) && !reuse_frag && last) { 782 if (!skb_is_nonlinear(skb) && !reuse_frag && last) {
786 skb_pull(skb, offset); 783 skb_pull(skb, offset);
787 frame = skb; 784 frame = skb;
@@ -819,7 +816,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
819 816
820 purge: 817 purge:
821 __skb_queue_purge(list); 818 __skb_queue_purge(list);
822 out:
823 dev_kfree_skb(skb); 819 dev_kfree_skb(skb);
824} 820}
825EXPORT_SYMBOL(ieee80211_amsdu_to_8023s); 821EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);