aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-12-29 02:20:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-12-29 02:20:21 -0500
commit2758b3e3e630ba304fc4aca434d591e70e528298 (patch)
tree8f3532775102a65dc2e23fc75298c33cc107295c
parentfd84b751ddb7dab7a8c22826e7bff85f3ff3f9a6 (diff)
parentd5902f6d1fbdb27e6a33c418063466d94be9dfa2 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) IPv6 gre tunnels end up with different default features enabled depending upon whether netlink or ioctls are used to bring them up. Fix from Alexey Kodanev. 2) Fix read past end of user control message in RDS< from Avinash Repaka. 3) Missing RCU barrier in mini qdisc code, from Cong Wang. 4) Missing policy put when reusing per-cpu route entries, from Florian Westphal. 5) Handle nested PCI errors properly in bnx2x driver, from Guilherme G. Piccoli. 6) Run nested transport mode IPSEC packets via tasklet, from Herbert Xu. 7) Fix handling poll() for stream sockets in tipc, from Parthasarathy Bhuvaragan. 8) Fix two stack-out-of-bounds issues in IPSEC, from Steffen Klassert. 9) Another zerocopy ubuf handling fix, from Willem de Bruijn. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (33 commits) strparser: Call sock_owned_by_user_nocheck sock: Add sock_owned_by_user_nocheck skbuff: in skb_copy_ubufs unclone before releasing zerocopy tipc: fix hanging poll() for stream sockets sctp: Replace use of sockets_allocated with specified macro. bnx2x: Improve reliability in case of nested PCI errors tg3: Enable PHY reset in MTU change path for 5720 tg3: Add workaround to restrict 5762 MRRS to 2048 tg3: Update copyright net: fec: unmap the xmit buffer that are not transferred by DMA tipc: fix tipc_mon_delete() oops in tipc_enable_bearer() error path tipc: error path leak fixes in tipc_enable_bearer() RDS: Check cmsg_len before dereferencing CMSG_DATA tcp: Avoid preprocessor directives in tracepoint macro args tipc: fix memory leak of group member when peer node is lost net: sched: fix possible null pointer deref in tcf_block_put tipc: base group replicast ack counter on number of actual receivers net_sched: fix a missing rcu barrier in mini_qdisc_pair_swap() net: phy: micrel: ksz9031: reconfigure autoneg after phy autoneg workaround ip6_gre: fix device features for ioctl setup ...
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c19
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h7
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/phy/micrel.c1
-rw-r--r--drivers/net/phy/phylink.c2
-rw-r--r--include/net/sock.h5
-rw-r--r--include/net/xfrm.h3
-rw-r--r--include/trace/events/tcp.h97
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/ipv4/xfrm4_input.c12
-rw-r--r--net/ipv6/ip6_gre.c57
-rw-r--r--net/ipv6/xfrm6_input.c10
-rw-r--r--net/rds/send.c3
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/sch_generic.c4
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/strparser/strparser.c2
-rw-r--r--net/tipc/bearer.c5
-rw-r--r--net/tipc/group.c31
-rw-r--r--net/tipc/monitor.c6
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/xfrm/xfrm_input.c69
-rw-r--r--net/xfrm/xfrm_policy.c9
-rw-r--r--net/xfrm/xfrm_state.c1
-rw-r--r--net/xfrm/xfrm_user.c26
-rw-r--r--tools/bpf/bpftool/map.c8
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/testing/selftests/bpf/Makefile2
30 files changed, 298 insertions, 121 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4c739d5355d2..8ae269ec17a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3030 3030
3031 del_timer_sync(&bp->timer); 3031 del_timer_sync(&bp->timer);
3032 3032
3033 if (IS_PF(bp)) { 3033 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3034 /* Set ALWAYS_ALIVE bit in shmem */ 3034 /* Set ALWAYS_ALIVE bit in shmem */
3035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 3035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3036 bnx2x_drv_pulse(bp); 3036 bnx2x_drv_pulse(bp);
@@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3116 bp->cnic_loaded = false; 3116 bp->cnic_loaded = false;
3117 3117
3118 /* Clear driver version indication in shmem */ 3118 /* Clear driver version indication in shmem */
3119 if (IS_PF(bp)) 3119 if (IS_PF(bp) && !BP_NOMCP(bp))
3120 bnx2x_update_mng_version(bp); 3120 bnx2x_update_mng_version(bp);
3121 3121
3122 /* Check if there are pending parity attentions. If there are - set 3122 /* Check if there are pending parity attentions. If there are - set
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 91e2a7560b48..ddd5d3ebd201 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
9578 9578
9579 do { 9579 do {
9580 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9580 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9581
9582 /* If we read all 0xFFs, means we are in PCI error state and
9583 * should bail out to avoid crashes on adapter's FW reads.
9584 */
9585 if (bp->common.shmem_base == 0xFFFFFFFF) {
9586 bp->flags |= NO_MCP_FLAG;
9587 return -ENODEV;
9588 }
9589
9581 if (bp->common.shmem_base) { 9590 if (bp->common.shmem_base) {
9582 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9591 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9583 if (val & SHR_MEM_VALIDITY_MB) 9592 if (val & SHR_MEM_VALIDITY_MB)
@@ -14320,7 +14329,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14320 BNX2X_ERR("IO slot reset --> driver unload\n"); 14329 BNX2X_ERR("IO slot reset --> driver unload\n");
14321 14330
14322 /* MCP should have been reset; Need to wait for validity */ 14331 /* MCP should have been reset; Need to wait for validity */
14323 bnx2x_init_shmem(bp); 14332 if (bnx2x_init_shmem(bp)) {
14333 rtnl_unlock();
14334 return PCI_ERS_RESULT_DISCONNECT;
14335 }
14324 14336
14325 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 14337 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14326 u32 v; 14338 u32 v;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d09c5a9c53b5..8995cfefbfcf 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,11 +4,13 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation. 7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
8 * 9 *
9 * Firmware is: 10 * Firmware is:
10 * Derived from proprietary unpublished source code, 11 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation. 12 * Copyright (C) 2000-2016 Broadcom Corporation.
13 * Copyright (C) 2016-2017 Broadcom Ltd.
12 * 14 *
13 * Permission is hereby granted for the distribution of this firmware 15 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright 16 * data in hexadecimal or equivalent format, provided this copyright
@@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10052 10054
10053 tw32(GRC_MODE, tp->grc_mode | val); 10055 tw32(GRC_MODE, tp->grc_mode | val);
10054 10056
10057 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10058 * south bridge limitation. As a workaround, Driver is setting MRRS
10059 * to 2048 instead of default 4096.
10060 */
10061 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10062 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10063 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10064 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10065 }
10066
10055 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 10067 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10056 val = tr32(GRC_MISC_CFG); 10068 val = tr32(GRC_MISC_CFG);
10057 val &= ~0xff; 10069 val &= ~0xff;
@@ -14227,7 +14239,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14227 */ 14239 */
14228 if (tg3_asic_rev(tp) == ASIC_REV_57766 || 14240 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14229 tg3_asic_rev(tp) == ASIC_REV_5717 || 14241 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14230 tg3_asic_rev(tp) == ASIC_REV_5719) 14242 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14243 tg3_asic_rev(tp) == ASIC_REV_5720)
14231 reset_phy = true; 14244 reset_phy = true;
14232 14245
14233 err = tg3_restart_hw(tp, reset_phy); 14246 err = tg3_restart_hw(tp, reset_phy);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index c2d02d02d1e6..1f0271fa7c74 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -5,7 +5,8 @@
5 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
6 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 6 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
7 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2004 Sun Microsystems Inc.
8 * Copyright (C) 2007-2014 Broadcom Corporation. 8 * Copyright (C) 2007-2016 Broadcom Corporation.
9 * Copyright (C) 2016-2017 Broadcom Limited.
9 */ 10 */
10 11
11#ifndef _T3_H 12#ifndef _T3_H
@@ -96,6 +97,7 @@
96#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 97#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
97#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 98#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
98#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a 99#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
100#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0
99#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ 101#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
100#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c 102#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
101#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a 103#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
@@ -281,6 +283,9 @@
281#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ 283#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
282#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ 284#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
283/* 0xa8 --> 0xb8 unused */ 285/* 0xa8 --> 0xb8 unused */
286#define TG3PCI_DEV_STATUS_CTRL 0x000000b4
287#define MAX_READ_REQ_SIZE_2048 0x00004000
288#define MAX_READ_REQ_MASK 0x00007000
284#define TG3PCI_DUAL_MAC_CTRL 0x000000b8 289#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
285#define DUAL_MAC_CTRL_CH_MASK 0x00000003 290#define DUAL_MAC_CTRL_CH_MASK 0x00000003
286#define DUAL_MAC_CTRL_ID 0x00000004 291#define DUAL_MAC_CTRL_ID 0x00000004
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 610573855213..8184d2fca9be 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev)
818 for (i = 0; i < txq->bd.ring_size; i++) { 818 for (i = 0; i < txq->bd.ring_size; i++) {
819 /* Initialize the BD for every fragment in the page. */ 819 /* Initialize the BD for every fragment in the page. */
820 bdp->cbd_sc = cpu_to_fec16(0); 820 bdp->cbd_sc = cpu_to_fec16(0);
821 if (bdp->cbd_bufaddr &&
822 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
823 dma_unmap_single(&fep->pdev->dev,
824 fec32_to_cpu(bdp->cbd_bufaddr),
825 fec16_to_cpu(bdp->cbd_datlen),
826 DMA_TO_DEVICE);
821 if (txq->tx_skbuff[i]) { 827 if (txq->tx_skbuff[i]) {
822 dev_kfree_skb_any(txq->tx_skbuff[i]); 828 dev_kfree_skb_any(txq->tx_skbuff[i]);
823 txq->tx_skbuff[i] = NULL; 829 txq->tx_skbuff[i] = NULL;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ab4614113403..422ff6333c52 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -624,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
624 phydev->link = 0; 624 phydev->link = 0;
625 if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) 625 if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
626 phydev->drv->config_intr(phydev); 626 phydev->drv->config_intr(phydev);
627 return genphy_config_aneg(phydev);
627 } 628 }
628 629
629 return 0; 630 return 0;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 5dc9668dde34..827f3f92560e 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -526,6 +526,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
526 pl->link_config.pause = MLO_PAUSE_AN; 526 pl->link_config.pause = MLO_PAUSE_AN;
527 pl->link_config.speed = SPEED_UNKNOWN; 527 pl->link_config.speed = SPEED_UNKNOWN;
528 pl->link_config.duplex = DUPLEX_UNKNOWN; 528 pl->link_config.duplex = DUPLEX_UNKNOWN;
529 pl->link_config.an_enabled = true;
529 pl->ops = ops; 530 pl->ops = ops;
530 __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); 531 __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
531 532
@@ -951,6 +952,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
951 mutex_lock(&pl->state_mutex); 952 mutex_lock(&pl->state_mutex);
952 /* Configure the MAC to match the new settings */ 953 /* Configure the MAC to match the new settings */
953 linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); 954 linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
955 pl->link_config.interface = config.interface;
954 pl->link_config.speed = our_kset.base.speed; 956 pl->link_config.speed = our_kset.base.speed;
955 pl->link_config.duplex = our_kset.base.duplex; 957 pl->link_config.duplex = our_kset.base.duplex;
956 pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; 958 pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
diff --git a/include/net/sock.h b/include/net/sock.h
index 9155da422692..7a7b14e9628a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1514,6 +1514,11 @@ static inline bool sock_owned_by_user(const struct sock *sk)
1514 return sk->sk_lock.owned; 1514 return sk->sk_lock.owned;
1515} 1515}
1516 1516
1517static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1518{
1519 return sk->sk_lock.owned;
1520}
1521
1517/* no reclassification while locks are held */ 1522/* no reclassification while locks are held */
1518static inline bool sock_allow_reclassification(const struct sock *csk) 1523static inline bool sock_allow_reclassification(const struct sock *csk)
1519{ 1524{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index dc28a98ce97c..ae35991b5877 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1570,6 +1570,9 @@ int xfrm_init_state(struct xfrm_state *x);
1570int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); 1570int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1571int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); 1571int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1572int xfrm_input_resume(struct sk_buff *skb, int nexthdr); 1572int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1573int xfrm_trans_queue(struct sk_buff *skb,
1574 int (*finish)(struct net *, struct sock *,
1575 struct sk_buff *));
1573int xfrm_output_resume(struct sk_buff *skb, int err); 1576int xfrm_output_resume(struct sk_buff *skb, int err);
1574int xfrm_output(struct sock *sk, struct sk_buff *skb); 1577int xfrm_output(struct sock *sk, struct sk_buff *skb);
1575int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1578int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 07cccca6cbf1..ab34c561f26b 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -25,6 +25,35 @@
25 tcp_state_name(TCP_CLOSING), \ 25 tcp_state_name(TCP_CLOSING), \
26 tcp_state_name(TCP_NEW_SYN_RECV)) 26 tcp_state_name(TCP_NEW_SYN_RECV))
27 27
28#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
29 do { \
30 struct in6_addr *pin6; \
31 \
32 pin6 = (struct in6_addr *)__entry->saddr_v6; \
33 ipv6_addr_set_v4mapped(saddr, pin6); \
34 pin6 = (struct in6_addr *)__entry->daddr_v6; \
35 ipv6_addr_set_v4mapped(daddr, pin6); \
36 } while (0)
37
38#if IS_ENABLED(CONFIG_IPV6)
39#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
40 do { \
41 if (sk->sk_family == AF_INET6) { \
42 struct in6_addr *pin6; \
43 \
44 pin6 = (struct in6_addr *)__entry->saddr_v6; \
45 *pin6 = saddr6; \
46 pin6 = (struct in6_addr *)__entry->daddr_v6; \
47 *pin6 = daddr6; \
48 } else { \
49 TP_STORE_V4MAPPED(__entry, saddr, daddr); \
50 } \
51 } while (0)
52#else
53#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \
54 TP_STORE_V4MAPPED(__entry, saddr, daddr)
55#endif
56
28/* 57/*
29 * tcp event with arguments sk and skb 58 * tcp event with arguments sk and skb
30 * 59 *
@@ -50,7 +79,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
50 79
51 TP_fast_assign( 80 TP_fast_assign(
52 struct inet_sock *inet = inet_sk(sk); 81 struct inet_sock *inet = inet_sk(sk);
53 struct in6_addr *pin6;
54 __be32 *p32; 82 __be32 *p32;
55 83
56 __entry->skbaddr = skb; 84 __entry->skbaddr = skb;
@@ -65,20 +93,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
65 p32 = (__be32 *) __entry->daddr; 93 p32 = (__be32 *) __entry->daddr;
66 *p32 = inet->inet_daddr; 94 *p32 = inet->inet_daddr;
67 95
68#if IS_ENABLED(CONFIG_IPV6) 96 TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
69 if (sk->sk_family == AF_INET6) { 97 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
70 pin6 = (struct in6_addr *)__entry->saddr_v6;
71 *pin6 = sk->sk_v6_rcv_saddr;
72 pin6 = (struct in6_addr *)__entry->daddr_v6;
73 *pin6 = sk->sk_v6_daddr;
74 } else
75#endif
76 {
77 pin6 = (struct in6_addr *)__entry->saddr_v6;
78 ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
79 pin6 = (struct in6_addr *)__entry->daddr_v6;
80 ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
81 }
82 ), 98 ),
83 99
84 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", 100 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -127,7 +143,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
127 143
128 TP_fast_assign( 144 TP_fast_assign(
129 struct inet_sock *inet = inet_sk(sk); 145 struct inet_sock *inet = inet_sk(sk);
130 struct in6_addr *pin6;
131 __be32 *p32; 146 __be32 *p32;
132 147
133 __entry->skaddr = sk; 148 __entry->skaddr = sk;
@@ -141,20 +156,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
141 p32 = (__be32 *) __entry->daddr; 156 p32 = (__be32 *) __entry->daddr;
142 *p32 = inet->inet_daddr; 157 *p32 = inet->inet_daddr;
143 158
144#if IS_ENABLED(CONFIG_IPV6) 159 TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
145 if (sk->sk_family == AF_INET6) { 160 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
146 pin6 = (struct in6_addr *)__entry->saddr_v6;
147 *pin6 = sk->sk_v6_rcv_saddr;
148 pin6 = (struct in6_addr *)__entry->daddr_v6;
149 *pin6 = sk->sk_v6_daddr;
150 } else
151#endif
152 {
153 pin6 = (struct in6_addr *)__entry->saddr_v6;
154 ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
155 pin6 = (struct in6_addr *)__entry->daddr_v6;
156 ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
157 }
158 ), 161 ),
159 162
160 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", 163 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -197,7 +200,6 @@ TRACE_EVENT(tcp_set_state,
197 200
198 TP_fast_assign( 201 TP_fast_assign(
199 struct inet_sock *inet = inet_sk(sk); 202 struct inet_sock *inet = inet_sk(sk);
200 struct in6_addr *pin6;
201 __be32 *p32; 203 __be32 *p32;
202 204
203 __entry->skaddr = sk; 205 __entry->skaddr = sk;
@@ -213,20 +215,8 @@ TRACE_EVENT(tcp_set_state,
213 p32 = (__be32 *) __entry->daddr; 215 p32 = (__be32 *) __entry->daddr;
214 *p32 = inet->inet_daddr; 216 *p32 = inet->inet_daddr;
215 217
216#if IS_ENABLED(CONFIG_IPV6) 218 TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
217 if (sk->sk_family == AF_INET6) { 219 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
218 pin6 = (struct in6_addr *)__entry->saddr_v6;
219 *pin6 = sk->sk_v6_rcv_saddr;
220 pin6 = (struct in6_addr *)__entry->daddr_v6;
221 *pin6 = sk->sk_v6_daddr;
222 } else
223#endif
224 {
225 pin6 = (struct in6_addr *)__entry->saddr_v6;
226 ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
227 pin6 = (struct in6_addr *)__entry->daddr_v6;
228 ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
229 }
230 ), 220 ),
231 221
232 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s", 222 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
@@ -256,7 +246,6 @@ TRACE_EVENT(tcp_retransmit_synack,
256 246
257 TP_fast_assign( 247 TP_fast_assign(
258 struct inet_request_sock *ireq = inet_rsk(req); 248 struct inet_request_sock *ireq = inet_rsk(req);
259 struct in6_addr *pin6;
260 __be32 *p32; 249 __be32 *p32;
261 250
262 __entry->skaddr = sk; 251 __entry->skaddr = sk;
@@ -271,20 +260,8 @@ TRACE_EVENT(tcp_retransmit_synack,
271 p32 = (__be32 *) __entry->daddr; 260 p32 = (__be32 *) __entry->daddr;
272 *p32 = ireq->ir_rmt_addr; 261 *p32 = ireq->ir_rmt_addr;
273 262
274#if IS_ENABLED(CONFIG_IPV6) 263 TP_STORE_ADDRS(__entry, ireq->ir_loc_addr, ireq->ir_rmt_addr,
275 if (sk->sk_family == AF_INET6) { 264 ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr);
276 pin6 = (struct in6_addr *)__entry->saddr_v6;
277 *pin6 = ireq->ir_v6_loc_addr;
278 pin6 = (struct in6_addr *)__entry->daddr_v6;
279 *pin6 = ireq->ir_v6_rmt_addr;
280 } else
281#endif
282 {
283 pin6 = (struct in6_addr *)__entry->saddr_v6;
284 ipv6_addr_set_v4mapped(ireq->ir_loc_addr, pin6);
285 pin6 = (struct in6_addr *)__entry->daddr_v6;
286 ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, pin6);
287 }
288 ), 265 ),
289 266
290 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", 267 TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a3cb0be4c6f3..08f574081315 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1177,12 +1177,12 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1177 int i, new_frags; 1177 int i, new_frags;
1178 u32 d_off; 1178 u32 d_off;
1179 1179
1180 if (!num_frags)
1181 goto release;
1182
1183 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1180 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1184 return -EINVAL; 1181 return -EINVAL;
1185 1182
1183 if (!num_frags)
1184 goto release;
1185
1186 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1186 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1187 for (i = 0; i < new_frags; i++) { 1187 for (i = 0; i < new_frags; i++) {
1188 page = alloc_page(gfp_mask); 1188 page = alloc_page(gfp_mask);
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index e50b7fea57ee..bcfc00e88756 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -23,6 +23,12 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
23 return xfrm4_extract_header(skb); 23 return xfrm4_extract_header(skb);
24} 24}
25 25
26static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk,
27 struct sk_buff *skb)
28{
29 return dst_input(skb);
30}
31
26static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk, 32static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
27 struct sk_buff *skb) 33 struct sk_buff *skb)
28{ 34{
@@ -33,7 +39,11 @@ static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
33 iph->tos, skb->dev)) 39 iph->tos, skb->dev))
34 goto drop; 40 goto drop;
35 } 41 }
36 return dst_input(skb); 42
43 if (xfrm_trans_queue(skb, xfrm4_rcv_encap_finish2))
44 goto drop;
45
46 return 0;
37drop: 47drop:
38 kfree_skb(skb); 48 kfree_skb(skb);
39 return NET_RX_DROP; 49 return NET_RX_DROP;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 416c8913f132..772695960890 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1014,6 +1014,36 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
1014 eth_random_addr(dev->perm_addr); 1014 eth_random_addr(dev->perm_addr);
1015} 1015}
1016 1016
1017#define GRE6_FEATURES (NETIF_F_SG | \
1018 NETIF_F_FRAGLIST | \
1019 NETIF_F_HIGHDMA | \
1020 NETIF_F_HW_CSUM)
1021
1022static void ip6gre_tnl_init_features(struct net_device *dev)
1023{
1024 struct ip6_tnl *nt = netdev_priv(dev);
1025
1026 dev->features |= GRE6_FEATURES;
1027 dev->hw_features |= GRE6_FEATURES;
1028
1029 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1030 /* TCP offload with GRE SEQ is not supported, nor
1031 * can we support 2 levels of outer headers requiring
1032 * an update.
1033 */
1034 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1035 nt->encap.type == TUNNEL_ENCAP_NONE) {
1036 dev->features |= NETIF_F_GSO_SOFTWARE;
1037 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1038 }
1039
1040 /* Can use a lockless transmit, unless we generate
1041 * output sequences
1042 */
1043 dev->features |= NETIF_F_LLTX;
1044 }
1045}
1046
1017static int ip6gre_tunnel_init_common(struct net_device *dev) 1047static int ip6gre_tunnel_init_common(struct net_device *dev)
1018{ 1048{
1019 struct ip6_tnl *tunnel; 1049 struct ip6_tnl *tunnel;
@@ -1048,6 +1078,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
1048 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1078 if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1049 dev->mtu -= 8; 1079 dev->mtu -= 8;
1050 1080
1081 ip6gre_tnl_init_features(dev);
1082
1051 return 0; 1083 return 0;
1052} 1084}
1053 1085
@@ -1298,11 +1330,6 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
1298 .ndo_get_iflink = ip6_tnl_get_iflink, 1330 .ndo_get_iflink = ip6_tnl_get_iflink,
1299}; 1331};
1300 1332
1301#define GRE6_FEATURES (NETIF_F_SG | \
1302 NETIF_F_FRAGLIST | \
1303 NETIF_F_HIGHDMA | \
1304 NETIF_F_HW_CSUM)
1305
1306static void ip6gre_tap_setup(struct net_device *dev) 1333static void ip6gre_tap_setup(struct net_device *dev)
1307{ 1334{
1308 1335
@@ -1383,26 +1410,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1383 nt->net = dev_net(dev); 1410 nt->net = dev_net(dev);
1384 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); 1411 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1385 1412
1386 dev->features |= GRE6_FEATURES;
1387 dev->hw_features |= GRE6_FEATURES;
1388
1389 if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
1390 /* TCP offload with GRE SEQ is not supported, nor
1391 * can we support 2 levels of outer headers requiring
1392 * an update.
1393 */
1394 if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
1395 (nt->encap.type == TUNNEL_ENCAP_NONE)) {
1396 dev->features |= NETIF_F_GSO_SOFTWARE;
1397 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1398 }
1399
1400 /* Can use a lockless transmit, unless we generate
1401 * output sequences
1402 */
1403 dev->features |= NETIF_F_LLTX;
1404 }
1405
1406 err = register_netdevice(dev); 1413 err = register_netdevice(dev);
1407 if (err) 1414 if (err)
1408 goto out; 1415 goto out;
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index fe04e23af986..841f4a07438e 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -32,6 +32,14 @@ int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
32} 32}
33EXPORT_SYMBOL(xfrm6_rcv_spi); 33EXPORT_SYMBOL(xfrm6_rcv_spi);
34 34
35static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
36 struct sk_buff *skb)
37{
38 if (xfrm_trans_queue(skb, ip6_rcv_finish))
39 __kfree_skb(skb);
40 return -1;
41}
42
35int xfrm6_transport_finish(struct sk_buff *skb, int async) 43int xfrm6_transport_finish(struct sk_buff *skb, int async)
36{ 44{
37 struct xfrm_offload *xo = xfrm_offload(skb); 45 struct xfrm_offload *xo = xfrm_offload(skb);
@@ -56,7 +64,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
56 64
57 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, 65 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
58 dev_net(skb->dev), NULL, skb, skb->dev, NULL, 66 dev_net(skb->dev), NULL, skb, skb->dev, NULL,
59 ip6_rcv_finish); 67 xfrm6_transport_finish2);
60 return -1; 68 return -1;
61} 69}
62 70
diff --git a/net/rds/send.c b/net/rds/send.c
index b52cdc8ae428..f72466c63f0c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1009,6 +1009,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
1009 continue; 1009 continue;
1010 1010
1011 if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) { 1011 if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
1012 if (cmsg->cmsg_len <
1013 CMSG_LEN(sizeof(struct rds_rdma_args)))
1014 return -EINVAL;
1012 args = CMSG_DATA(cmsg); 1015 args = CMSG_DATA(cmsg);
1013 *rdma_bytes += args->remote_vec.bytes; 1016 *rdma_bytes += args->remote_vec.bytes;
1014 } 1017 }
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b91ea03e3afa..b9d63d2246e6 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -379,6 +379,8 @@ void tcf_block_put(struct tcf_block *block)
379{ 379{
380 struct tcf_block_ext_info ei = {0, }; 380 struct tcf_block_ext_info ei = {0, };
381 381
382 if (!block)
383 return;
382 tcf_block_put_ext(block, block->q, &ei); 384 tcf_block_put_ext(block, block->q, &ei);
383} 385}
384 386
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cd1b200acae7..661c7144b53a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1040,6 +1040,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1040 1040
1041 if (!tp_head) { 1041 if (!tp_head) {
1042 RCU_INIT_POINTER(*miniqp->p_miniq, NULL); 1042 RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1043 /* Wait for flying RCU callback before it is freed. */
1044 rcu_barrier_bh();
1043 return; 1045 return;
1044 } 1046 }
1045 1047
@@ -1055,7 +1057,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1055 rcu_assign_pointer(*miniqp->p_miniq, miniq); 1057 rcu_assign_pointer(*miniqp->p_miniq, miniq);
1056 1058
1057 if (miniq_old) 1059 if (miniq_old)
1058 /* This is counterpart of the rcu barrier above. We need to 1060 /* This is counterpart of the rcu barriers above. We need to
1059 * block potential new user of miniq_old until all readers 1061 * block potential new user of miniq_old until all readers
1060 * are not seeing it. 1062 * are not seeing it.
1061 */ 1063 */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3253f724a995..b4fb6e4886d2 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4498,7 +4498,7 @@ static int sctp_init_sock(struct sock *sk)
4498 SCTP_DBG_OBJCNT_INC(sock); 4498 SCTP_DBG_OBJCNT_INC(sock);
4499 4499
4500 local_bh_disable(); 4500 local_bh_disable();
4501 percpu_counter_inc(&sctp_sockets_allocated); 4501 sk_sockets_allocated_inc(sk);
4502 sock_prot_inuse_add(net, sk->sk_prot, 1); 4502 sock_prot_inuse_add(net, sk->sk_prot, 1);
4503 4503
4504 /* Nothing can fail after this block, otherwise 4504 /* Nothing can fail after this block, otherwise
@@ -4542,7 +4542,7 @@ static void sctp_destroy_sock(struct sock *sk)
4542 } 4542 }
4543 sctp_endpoint_free(sp->ep); 4543 sctp_endpoint_free(sp->ep);
4544 local_bh_disable(); 4544 local_bh_disable();
4545 percpu_counter_dec(&sctp_sockets_allocated); 4545 sk_sockets_allocated_dec(sk);
4546 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 4546 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
4547 local_bh_enable(); 4547 local_bh_enable();
4548} 4548}
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index c5fda15ba319..1fdab5c4eda8 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -401,7 +401,7 @@ void strp_data_ready(struct strparser *strp)
401 * allows a thread in BH context to safely check if the process 401 * allows a thread in BH context to safely check if the process
402 * lock is held. In this case, if the lock is held, queue work. 402 * lock is held. In this case, if the lock is held, queue work.
403 */ 403 */
404 if (sock_owned_by_user(strp->sk)) { 404 if (sock_owned_by_user_nocheck(strp->sk)) {
405 queue_work(strp_wq, &strp->work); 405 queue_work(strp_wq, &strp->work);
406 return; 406 return;
407 } 407 }
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 47ec121574ce..c8001471da6c 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -324,6 +324,7 @@ restart:
324 if (res) { 324 if (res) {
325 pr_warn("Bearer <%s> rejected, enable failure (%d)\n", 325 pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
326 name, -res); 326 name, -res);
327 kfree(b);
327 return -EINVAL; 328 return -EINVAL;
328 } 329 }
329 330
@@ -347,8 +348,10 @@ restart:
347 if (skb) 348 if (skb)
348 tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); 349 tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
349 350
350 if (tipc_mon_create(net, bearer_id)) 351 if (tipc_mon_create(net, bearer_id)) {
352 bearer_disable(net, b);
351 return -ENOMEM; 353 return -ENOMEM;
354 }
352 355
353 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 356 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
354 name, 357 name,
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 7ebbdeb2a90e..8e12ab55346b 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -368,18 +368,20 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
368 u16 prev = grp->bc_snd_nxt - 1; 368 u16 prev = grp->bc_snd_nxt - 1;
369 struct tipc_member *m; 369 struct tipc_member *m;
370 struct rb_node *n; 370 struct rb_node *n;
371 u16 ackers = 0;
371 372
372 for (n = rb_first(&grp->members); n; n = rb_next(n)) { 373 for (n = rb_first(&grp->members); n; n = rb_next(n)) {
373 m = container_of(n, struct tipc_member, tree_node); 374 m = container_of(n, struct tipc_member, tree_node);
374 if (tipc_group_is_enabled(m)) { 375 if (tipc_group_is_enabled(m)) {
375 tipc_group_update_member(m, len); 376 tipc_group_update_member(m, len);
376 m->bc_acked = prev; 377 m->bc_acked = prev;
378 ackers++;
377 } 379 }
378 } 380 }
379 381
380 /* Mark number of acknowledges to expect, if any */ 382 /* Mark number of acknowledges to expect, if any */
381 if (ack) 383 if (ack)
382 grp->bc_ackers = grp->member_cnt; 384 grp->bc_ackers = ackers;
383 grp->bc_snd_nxt++; 385 grp->bc_snd_nxt++;
384} 386}
385 387
@@ -848,17 +850,26 @@ void tipc_group_member_evt(struct tipc_group *grp,
848 *usr_wakeup = true; 850 *usr_wakeup = true;
849 m->usr_pending = false; 851 m->usr_pending = false;
850 node_up = tipc_node_is_up(net, node); 852 node_up = tipc_node_is_up(net, node);
851 853 m->event_msg = NULL;
852 /* Hold back event if more messages might be expected */ 854
853 if (m->state != MBR_LEAVING && node_up) { 855 if (node_up) {
854 m->event_msg = skb; 856 /* Hold back event if a LEAVE msg should be expected */
855 tipc_group_decr_active(grp, m); 857 if (m->state != MBR_LEAVING) {
856 m->state = MBR_LEAVING; 858 m->event_msg = skb;
857 } else { 859 tipc_group_decr_active(grp, m);
858 if (node_up) 860 m->state = MBR_LEAVING;
861 } else {
859 msg_set_grp_bc_seqno(hdr, m->bc_syncpt); 862 msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
860 else 863 __skb_queue_tail(inputq, skb);
864 }
865 } else {
866 if (m->state != MBR_LEAVING) {
867 tipc_group_decr_active(grp, m);
868 m->state = MBR_LEAVING;
861 msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt); 869 msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
870 } else {
871 msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
872 }
862 __skb_queue_tail(inputq, skb); 873 __skb_queue_tail(inputq, skb);
863 } 874 }
864 list_del_init(&m->list); 875 list_del_init(&m->list);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 8e884ed06d4b..32dc33a94bc7 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -642,9 +642,13 @@ void tipc_mon_delete(struct net *net, int bearer_id)
642{ 642{
643 struct tipc_net *tn = tipc_net(net); 643 struct tipc_net *tn = tipc_net(net);
644 struct tipc_monitor *mon = tipc_monitor(net, bearer_id); 644 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
645 struct tipc_peer *self = get_self(net, bearer_id); 645 struct tipc_peer *self;
646 struct tipc_peer *peer, *tmp; 646 struct tipc_peer *peer, *tmp;
647 647
648 if (!mon)
649 return;
650
651 self = get_self(net, bearer_id);
648 write_lock_bh(&mon->lock); 652 write_lock_bh(&mon->lock);
649 tn->monitors[bearer_id] = NULL; 653 tn->monitors[bearer_id] = NULL;
650 list_for_each_entry_safe(peer, tmp, &self->list, list) { 654 list_for_each_entry_safe(peer, tmp, &self->list, list) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 41127d0b925e..3b4084480377 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -727,11 +727,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
727 727
728 switch (sk->sk_state) { 728 switch (sk->sk_state) {
729 case TIPC_ESTABLISHED: 729 case TIPC_ESTABLISHED:
730 case TIPC_CONNECTING:
730 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) 731 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
731 revents |= POLLOUT; 732 revents |= POLLOUT;
732 /* fall thru' */ 733 /* fall thru' */
733 case TIPC_LISTEN: 734 case TIPC_LISTEN:
734 case TIPC_CONNECTING:
735 if (!skb_queue_empty(&sk->sk_receive_queue)) 735 if (!skb_queue_empty(&sk->sk_receive_queue))
736 revents |= POLLIN | POLLRDNORM; 736 revents |= POLLIN | POLLRDNORM;
737 break; 737 break;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 347ab31574d5..3f6f6f8c9fa5 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -8,15 +8,29 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/bottom_half.h>
12#include <linux/interrupt.h>
11#include <linux/slab.h> 13#include <linux/slab.h>
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/percpu.h>
14#include <net/dst.h> 17#include <net/dst.h>
15#include <net/ip.h> 18#include <net/ip.h>
16#include <net/xfrm.h> 19#include <net/xfrm.h>
17#include <net/ip_tunnels.h> 20#include <net/ip_tunnels.h>
18#include <net/ip6_tunnel.h> 21#include <net/ip6_tunnel.h>
19 22
23struct xfrm_trans_tasklet {
24 struct tasklet_struct tasklet;
25 struct sk_buff_head queue;
26};
27
28struct xfrm_trans_cb {
29 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
30};
31
32#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
33
20static struct kmem_cache *secpath_cachep __read_mostly; 34static struct kmem_cache *secpath_cachep __read_mostly;
21 35
22static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); 36static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
@@ -25,6 +39,8 @@ static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
25static struct gro_cells gro_cells; 39static struct gro_cells gro_cells;
26static struct net_device xfrm_napi_dev; 40static struct net_device xfrm_napi_dev;
27 41
42static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
43
28int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) 44int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
29{ 45{
30 int err = 0; 46 int err = 0;
@@ -207,7 +223,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
207 xfrm_address_t *daddr; 223 xfrm_address_t *daddr;
208 struct xfrm_mode *inner_mode; 224 struct xfrm_mode *inner_mode;
209 u32 mark = skb->mark; 225 u32 mark = skb->mark;
210 unsigned int family; 226 unsigned int family = AF_UNSPEC;
211 int decaps = 0; 227 int decaps = 0;
212 int async = 0; 228 int async = 0;
213 bool xfrm_gro = false; 229 bool xfrm_gro = false;
@@ -216,6 +232,16 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
216 232
217 if (encap_type < 0) { 233 if (encap_type < 0) {
218 x = xfrm_input_state(skb); 234 x = xfrm_input_state(skb);
235
236 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
237 if (x->km.state == XFRM_STATE_ACQ)
238 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
239 else
240 XFRM_INC_STATS(net,
241 LINUX_MIB_XFRMINSTATEINVALID);
242 goto drop;
243 }
244
219 family = x->outer_mode->afinfo->family; 245 family = x->outer_mode->afinfo->family;
220 246
221 /* An encap_type of -1 indicates async resumption. */ 247 /* An encap_type of -1 indicates async resumption. */
@@ -467,9 +493,41 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
467} 493}
468EXPORT_SYMBOL(xfrm_input_resume); 494EXPORT_SYMBOL(xfrm_input_resume);
469 495
496static void xfrm_trans_reinject(unsigned long data)
497{
498 struct xfrm_trans_tasklet *trans = (void *)data;
499 struct sk_buff_head queue;
500 struct sk_buff *skb;
501
502 __skb_queue_head_init(&queue);
503 skb_queue_splice_init(&trans->queue, &queue);
504
505 while ((skb = __skb_dequeue(&queue)))
506 XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
507}
508
509int xfrm_trans_queue(struct sk_buff *skb,
510 int (*finish)(struct net *, struct sock *,
511 struct sk_buff *))
512{
513 struct xfrm_trans_tasklet *trans;
514
515 trans = this_cpu_ptr(&xfrm_trans_tasklet);
516
517 if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
518 return -ENOBUFS;
519
520 XFRM_TRANS_SKB_CB(skb)->finish = finish;
521 skb_queue_tail(&trans->queue, skb);
522 tasklet_schedule(&trans->tasklet);
523 return 0;
524}
525EXPORT_SYMBOL(xfrm_trans_queue);
526
470void __init xfrm_input_init(void) 527void __init xfrm_input_init(void)
471{ 528{
472 int err; 529 int err;
530 int i;
473 531
474 init_dummy_netdev(&xfrm_napi_dev); 532 init_dummy_netdev(&xfrm_napi_dev);
475 err = gro_cells_init(&gro_cells, &xfrm_napi_dev); 533 err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
@@ -480,4 +538,13 @@ void __init xfrm_input_init(void)
480 sizeof(struct sec_path), 538 sizeof(struct sec_path),
481 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 539 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
482 NULL); 540 NULL);
541
542 for_each_possible_cpu(i) {
543 struct xfrm_trans_tasklet *trans;
544
545 trans = &per_cpu(xfrm_trans_tasklet, i);
546 __skb_queue_head_init(&trans->queue);
547 tasklet_init(&trans->tasklet, xfrm_trans_reinject,
548 (unsigned long)trans);
549 }
483} 550}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9542975eb2f9..70aa5cb0c659 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1168,9 +1168,15 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1168 again: 1168 again:
1169 pol = rcu_dereference(sk->sk_policy[dir]); 1169 pol = rcu_dereference(sk->sk_policy[dir]);
1170 if (pol != NULL) { 1170 if (pol != NULL) {
1171 bool match = xfrm_selector_match(&pol->selector, fl, family); 1171 bool match;
1172 int err = 0; 1172 int err = 0;
1173 1173
1174 if (pol->family != family) {
1175 pol = NULL;
1176 goto out;
1177 }
1178
1179 match = xfrm_selector_match(&pol->selector, fl, family);
1174 if (match) { 1180 if (match) {
1175 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { 1181 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1176 pol = NULL; 1182 pol = NULL;
@@ -1833,6 +1839,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1833 sizeof(struct xfrm_policy *) * num_pols) == 0 && 1839 sizeof(struct xfrm_policy *) * num_pols) == 0 &&
1834 xfrm_xdst_can_reuse(xdst, xfrm, err)) { 1840 xfrm_xdst_can_reuse(xdst, xfrm, err)) {
1835 dst_hold(&xdst->u.dst); 1841 dst_hold(&xdst->u.dst);
1842 xfrm_pols_put(pols, num_pols);
1836 while (err > 0) 1843 while (err > 0)
1837 xfrm_state_put(xfrm[--err]); 1844 xfrm_state_put(xfrm[--err]);
1838 return xdst; 1845 return xdst;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 065d89606888..500b3391f474 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1343,6 +1343,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1343 1343
1344 if (orig->aead) { 1344 if (orig->aead) {
1345 x->aead = xfrm_algo_aead_clone(orig->aead); 1345 x->aead = xfrm_algo_aead_clone(orig->aead);
1346 x->geniv = orig->geniv;
1346 if (!x->aead) 1347 if (!x->aead)
1347 goto error; 1348 goto error;
1348 } 1349 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 983b0233767b..bdb48e5dba04 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1419,11 +1419,14 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1419 1419
1420static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) 1420static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1421{ 1421{
1422 u16 prev_family;
1422 int i; 1423 int i;
1423 1424
1424 if (nr > XFRM_MAX_DEPTH) 1425 if (nr > XFRM_MAX_DEPTH)
1425 return -EINVAL; 1426 return -EINVAL;
1426 1427
1428 prev_family = family;
1429
1427 for (i = 0; i < nr; i++) { 1430 for (i = 0; i < nr; i++) {
1428 /* We never validated the ut->family value, so many 1431 /* We never validated the ut->family value, so many
1429 * applications simply leave it at zero. The check was 1432 * applications simply leave it at zero. The check was
@@ -1435,6 +1438,12 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1435 if (!ut[i].family) 1438 if (!ut[i].family)
1436 ut[i].family = family; 1439 ut[i].family = family;
1437 1440
1441 if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
1442 (ut[i].family != prev_family))
1443 return -EINVAL;
1444
1445 prev_family = ut[i].family;
1446
1438 switch (ut[i].family) { 1447 switch (ut[i].family) {
1439 case AF_INET: 1448 case AF_INET:
1440 break; 1449 break;
@@ -1445,6 +1454,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1445 default: 1454 default:
1446 return -EINVAL; 1455 return -EINVAL;
1447 } 1456 }
1457
1458 switch (ut[i].id.proto) {
1459 case IPPROTO_AH:
1460 case IPPROTO_ESP:
1461 case IPPROTO_COMP:
1462#if IS_ENABLED(CONFIG_IPV6)
1463 case IPPROTO_ROUTING:
1464 case IPPROTO_DSTOPTS:
1465#endif
1466 case IPSEC_PROTO_ANY:
1467 break;
1468 default:
1469 return -EINVAL;
1470 }
1471
1448 } 1472 }
1449 1473
1450 return 0; 1474 return 0;
@@ -2470,7 +2494,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2470 [XFRMA_PROTO] = { .type = NLA_U8 }, 2494 [XFRMA_PROTO] = { .type = NLA_U8 },
2471 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, 2495 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
2472 [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, 2496 [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
2473 [XFRMA_OUTPUT_MARK] = { .len = NLA_U32 }, 2497 [XFRMA_OUTPUT_MARK] = { .type = NLA_U32 },
2474}; 2498};
2475 2499
2476static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { 2500static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e2450c8e88e6..a8c3a33dd185 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -523,21 +523,23 @@ static int do_show(int argc, char **argv)
523 break; 523 break;
524 p_err("can't get next map: %s%s", strerror(errno), 524 p_err("can't get next map: %s%s", strerror(errno),
525 errno == EINVAL ? " -- kernel too old?" : ""); 525 errno == EINVAL ? " -- kernel too old?" : "");
526 return -1; 526 break;
527 } 527 }
528 528
529 fd = bpf_map_get_fd_by_id(id); 529 fd = bpf_map_get_fd_by_id(id);
530 if (fd < 0) { 530 if (fd < 0) {
531 if (errno == ENOENT)
532 continue;
531 p_err("can't get map by id (%u): %s", 533 p_err("can't get map by id (%u): %s",
532 id, strerror(errno)); 534 id, strerror(errno));
533 return -1; 535 break;
534 } 536 }
535 537
536 err = bpf_obj_get_info_by_fd(fd, &info, &len); 538 err = bpf_obj_get_info_by_fd(fd, &info, &len);
537 if (err) { 539 if (err) {
538 p_err("can't get map info: %s", strerror(errno)); 540 p_err("can't get map info: %s", strerror(errno));
539 close(fd); 541 close(fd);
540 return -1; 542 break;
541 } 543 }
542 544
543 if (json_output) 545 if (json_output)
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index ad619b96c276..dded77345bfb 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -382,6 +382,8 @@ static int do_show(int argc, char **argv)
382 382
383 fd = bpf_prog_get_fd_by_id(id); 383 fd = bpf_prog_get_fd_by_id(id);
384 if (fd < 0) { 384 if (fd < 0) {
385 if (errno == ENOENT)
386 continue;
385 p_err("can't get prog by id (%u): %s", 387 p_err("can't get prog by id (%u): %s",
386 id, strerror(errno)); 388 id, strerror(errno));
387 err = -1; 389 err = -1;
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 05fc4e2e7b3a..9316e648a880 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -39,7 +39,7 @@ $(BPFOBJ): force
39CLANG ?= clang 39CLANG ?= clang
40LLC ?= llc 40LLC ?= llc
41 41
42PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) 42PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
43 43
44# Let newer LLVM versions transparently probe the kernel for availability 44# Let newer LLVM versions transparently probe the kernel for availability
45# of full BPF instruction set. 45# of full BPF instruction set.