aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-10 20:53:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-10 20:53:18 -0500
commit64fce444f126b9d26574221330d9599fe998944e (patch)
treef2dad9f37eb5204802c201507881cb9e035225c6
parent5f615b97cdea733c955e152774e1501107564fbd (diff)
parentccc12b11c5332c84442ef120dcd631523be75089 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) BPF speculation prevention and BPF_JIT_ALWAYS_ON, from Alexei Starovoitov. 2) Revert dev_get_random_name() changes as adjust the error code returns seen by userspace definitely breaks stuff. 3) Fix TX DMA map/unmap on older iwlwifi devices, from Emmanuel Grumbach. 4) From wrong AF family when requesting sock diag modules, from Andrii Vladyka. 5) Don't add new ipv6 routes attached to the null_entry, from Wei Wang. 6) Some SCTP sockopt length fixes from Marcelo Ricardo Leitner. 7) Don't leak when removing VLAN ID 0, from Cong Wang. 8) Hey there's a potential leak in ipv6_make_skb() too, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (27 commits) ipv6: sr: fix TLVs not being copied using setsockopt ipv6: fix possible mem leaks in ipv6_make_skb() mlxsw: spectrum_qdisc: Don't use variable array in mlxsw_sp_tclass_congestion_enable mlxsw: pci: Wait after reset before accessing HW nfp: always unmask aux interrupts at init 8021q: fix a memory leak for VLAN 0 device of_mdio: avoid MDIO bus removal when a PHY is missing caif_usb: use strlcpy() instead of strncpy() doc: clarification about setting SO_ZEROCOPY net: gianfar_ptp: move set_fipers() to spinlock protecting area sctp: make use of pre-calculated len sctp: add a ceiling to optlen in some sockopts sctp: GFP_ATOMIC is not needed in sctp_setsockopt_events bpf: introduce BPF_JIT_ALWAYS_ON config bpf: avoid false sharing of map refcount with max_entries ipv6: remove null_entry before adding default route SolutionEngine771x: add Ether TSU resource SolutionEngine771x: fix Ether platform data docs-rst: networking: wire up msg_zerocopy net: ipv4: emulate READ_ONCE() on ->hdrincl bit-field in raw_sendmsg() ...
-rw-r--r--Documentation/networking/index.rst2
-rw-r--r--Documentation/networking/msg_zerocopy.rst4
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c24
-rw-r--r--arch/sh/include/mach-se/mach/se.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c23
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/of/of_mdio.c9
-rw-r--r--include/linux/bpf.h26
-rw-r--r--init/Kconfig7
-rw-r--r--kernel/bpf/arraymap.c47
-rw-r--r--kernel/bpf/core.c19
-rw-r--r--kernel/bpf/sockmap.c11
-rw-r--r--kernel/bpf/verifier.c36
-rw-r--r--lib/test_bpf.c11
-rw-r--r--net/8021q/vlan.c7
-rw-r--r--net/caif/caif_dev.c5
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/caif/cfcnfg.c10
-rw-r--r--net/caif/cfctrl.c4
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/core/sysctl_net_core.c6
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv6/exthdrs.c9
-rw-r--r--net/ipv6/ip6_fib.c38
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/sctp/socket.c28
-rw-r--r--net/socket.c9
-rw-r--r--tools/testing/selftests/bpf/test_align.c22
37 files changed, 303 insertions, 131 deletions
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 66e620866245..7d4b15977d61 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -9,6 +9,7 @@ Contents:
9 batman-adv 9 batman-adv
10 kapi 10 kapi
11 z8530book 11 z8530book
12 msg_zerocopy
12 13
13.. only:: subproject 14.. only:: subproject
14 15
@@ -16,4 +17,3 @@ Contents:
16 ======= 17 =======
17 18
18 * :ref:`genindex` 19 * :ref:`genindex`
19
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index 77f6d7e25cfd..291a01264967 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -72,6 +72,10 @@ this flag, a process must first signal intent by setting a socket option:
72 if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one))) 72 if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
73 error(1, errno, "setsockopt zerocopy"); 73 error(1, errno, "setsockopt zerocopy");
74 74
75Setting the socket option only works when the socket is in its initial
76(TCP_CLOSED) state. Trying to set the option for a socket returned by accept(),
77for example, will lead to an EBUSY error. In this case, the option should be set
78to the listening socket and it will be inherited by the accepted sockets.
75 79
76Transmission 80Transmission
77------------ 81------------
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 77c35350ee77..412326d59e6f 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/sh_eth.h>
12#include <mach-se/mach/se.h> 13#include <mach-se/mach/se.h>
13#include <mach-se/mach/mrshpc.h> 14#include <mach-se/mach/mrshpc.h>
14#include <asm/machvec.h> 15#include <asm/machvec.h>
@@ -115,13 +116,23 @@ static struct platform_device heartbeat_device = {
115#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ 116#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
116 defined(CONFIG_CPU_SUBTYPE_SH7712) 117 defined(CONFIG_CPU_SUBTYPE_SH7712)
117/* SH771X Ethernet driver */ 118/* SH771X Ethernet driver */
119static struct sh_eth_plat_data sh_eth_plat = {
120 .phy = PHY_ID,
121 .phy_interface = PHY_INTERFACE_MODE_MII,
122};
123
118static struct resource sh_eth0_resources[] = { 124static struct resource sh_eth0_resources[] = {
119 [0] = { 125 [0] = {
120 .start = SH_ETH0_BASE, 126 .start = SH_ETH0_BASE,
121 .end = SH_ETH0_BASE + 0x1B8, 127 .end = SH_ETH0_BASE + 0x1B8 - 1,
122 .flags = IORESOURCE_MEM, 128 .flags = IORESOURCE_MEM,
123 }, 129 },
124 [1] = { 130 [1] = {
131 .start = SH_TSU_BASE,
132 .end = SH_TSU_BASE + 0x200 - 1,
133 .flags = IORESOURCE_MEM,
134 },
135 [2] = {
125 .start = SH_ETH0_IRQ, 136 .start = SH_ETH0_IRQ,
126 .end = SH_ETH0_IRQ, 137 .end = SH_ETH0_IRQ,
127 .flags = IORESOURCE_IRQ, 138 .flags = IORESOURCE_IRQ,
@@ -132,7 +143,7 @@ static struct platform_device sh_eth0_device = {
132 .name = "sh771x-ether", 143 .name = "sh771x-ether",
133 .id = 0, 144 .id = 0,
134 .dev = { 145 .dev = {
135 .platform_data = PHY_ID, 146 .platform_data = &sh_eth_plat,
136 }, 147 },
137 .num_resources = ARRAY_SIZE(sh_eth0_resources), 148 .num_resources = ARRAY_SIZE(sh_eth0_resources),
138 .resource = sh_eth0_resources, 149 .resource = sh_eth0_resources,
@@ -141,10 +152,15 @@ static struct platform_device sh_eth0_device = {
141static struct resource sh_eth1_resources[] = { 152static struct resource sh_eth1_resources[] = {
142 [0] = { 153 [0] = {
143 .start = SH_ETH1_BASE, 154 .start = SH_ETH1_BASE,
144 .end = SH_ETH1_BASE + 0x1B8, 155 .end = SH_ETH1_BASE + 0x1B8 - 1,
145 .flags = IORESOURCE_MEM, 156 .flags = IORESOURCE_MEM,
146 }, 157 },
147 [1] = { 158 [1] = {
159 .start = SH_TSU_BASE,
160 .end = SH_TSU_BASE + 0x200 - 1,
161 .flags = IORESOURCE_MEM,
162 },
163 [2] = {
148 .start = SH_ETH1_IRQ, 164 .start = SH_ETH1_IRQ,
149 .end = SH_ETH1_IRQ, 165 .end = SH_ETH1_IRQ,
150 .flags = IORESOURCE_IRQ, 166 .flags = IORESOURCE_IRQ,
@@ -155,7 +171,7 @@ static struct platform_device sh_eth1_device = {
155 .name = "sh771x-ether", 171 .name = "sh771x-ether",
156 .id = 1, 172 .id = 1,
157 .dev = { 173 .dev = {
158 .platform_data = PHY_ID, 174 .platform_data = &sh_eth_plat,
159 }, 175 },
160 .num_resources = ARRAY_SIZE(sh_eth1_resources), 176 .num_resources = ARRAY_SIZE(sh_eth1_resources),
161 .resource = sh_eth1_resources, 177 .resource = sh_eth1_resources,
diff --git a/arch/sh/include/mach-se/mach/se.h b/arch/sh/include/mach-se/mach/se.h
index 4246ef9b07a3..aa83fe1ff0b1 100644
--- a/arch/sh/include/mach-se/mach/se.h
+++ b/arch/sh/include/mach-se/mach/se.h
@@ -100,6 +100,7 @@
100/* Base address */ 100/* Base address */
101#define SH_ETH0_BASE 0xA7000000 101#define SH_ETH0_BASE 0xA7000000
102#define SH_ETH1_BASE 0xA7000400 102#define SH_ETH1_BASE 0xA7000400
103#define SH_TSU_BASE 0xA7000800
103/* PHY ID */ 104/* PHY ID */
104#if defined(CONFIG_CPU_SUBTYPE_SH7710) 105#if defined(CONFIG_CPU_SUBTYPE_SH7710)
105# define PHY_ID 0x00 106# define PHY_ID 0x00
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 544114281ea7..9f8d4f8e57e3 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
319 now = tmr_cnt_read(etsects); 319 now = tmr_cnt_read(etsects);
320 now += delta; 320 now += delta;
321 tmr_cnt_write(etsects, now); 321 tmr_cnt_write(etsects, now);
322 set_fipers(etsects);
322 323
323 spin_unlock_irqrestore(&etsects->lock, flags); 324 spin_unlock_irqrestore(&etsects->lock, flags);
324 325
325 set_fipers(etsects);
326
327 return 0; 326 return 0;
328} 327}
329 328
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 23f7d828cf67..6ef20e5cc77d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1643 return 0; 1643 return 0;
1644 } 1644 }
1645 1645
1646 wmb(); /* reset needs to be written before we read control register */ 1646 /* Reset needs to be written before we read control register, and
1647 * we must wait for the HW to become responsive once again
1648 */
1649 wmb();
1650 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1651
1647 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); 1652 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1648 do { 1653 do {
1649 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1654 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a6441208e9d9..fb082ad21b00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -59,6 +59,7 @@
59#define MLXSW_PCI_SW_RESET 0xF0010 59#define MLXSW_PCI_SW_RESET 0xF0010
60#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) 60#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
61#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 61#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
62#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
62#define MLXSW_PCI_FW_READY 0xA1844 63#define MLXSW_PCI_FW_READY 0xA1844
63#define MLXSW_PCI_FW_READY_MASK 0xFFFF 64#define MLXSW_PCI_FW_READY_MASK 0xFFFF
64#define MLXSW_PCI_FW_READY_MAGIC 0x5E 65#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index c33beac5def0..b5397da94d7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -46,7 +46,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
46 int tclass_num, u32 min, u32 max, 46 int tclass_num, u32 min, u32 max,
47 u32 probability, bool is_ecn) 47 u32 probability, bool is_ecn)
48{ 48{
49 char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)]; 49 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
50 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
50 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 51 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
51 int err; 52 int err;
52 53
@@ -60,10 +61,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
60 if (err) 61 if (err)
61 return err; 62 return err;
62 63
63 mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num, 64 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
64 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); 65 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
65 66
66 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd); 67 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
67} 68}
68 69
69static int 70static int
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1a603fdd9e80..99b0487b6d82 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
568 return err; 568 return err;
569 } 569 }
570 nn_writeb(nn, ctrl_offset, entry->entry); 570 nn_writeb(nn, ctrl_offset, entry->entry);
571 nfp_net_irq_unmask(nn, entry->entry);
571 572
572 return 0; 573 return 0;
573} 574}
@@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
582 unsigned int vector_idx) 583 unsigned int vector_idx)
583{ 584{
584 nn_writeb(nn, ctrl_offset, 0xff); 585 nn_writeb(nn, ctrl_offset, 0xff);
586 nn_pci_flush(nn);
585 free_irq(nn->irq_entries[vector_idx].vector, nn); 587 free_irq(nn->irq_entries[vector_idx].vector, nn);
586} 588}
587 589
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index f7d228b5ba93..987f1252a3cf 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
384 } 384 }
385 } 385 }
386 386
387 if (changed & IEEE80211_CONF_CHANGE_PS) {
388 list_for_each_entry(tmp, &wcn->vif_list, list) {
389 vif = wcn36xx_priv_to_vif(tmp);
390 if (hw->conf.flags & IEEE80211_CONF_PS) {
391 if (vif->bss_conf.ps) /* ps allowed ? */
392 wcn36xx_pmc_enter_bmps_state(wcn, vif);
393 } else {
394 wcn36xx_pmc_exit_bmps_state(wcn, vif);
395 }
396 }
397 }
398
387 mutex_unlock(&wcn->conf_mutex); 399 mutex_unlock(&wcn->conf_mutex);
388 400
389 return 0; 401 return 0;
@@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
747 vif_priv->dtim_period = bss_conf->dtim_period; 759 vif_priv->dtim_period = bss_conf->dtim_period;
748 } 760 }
749 761
750 if (changed & BSS_CHANGED_PS) {
751 wcn36xx_dbg(WCN36XX_DBG_MAC,
752 "mac bss PS set %d\n",
753 bss_conf->ps);
754 if (bss_conf->ps) {
755 wcn36xx_pmc_enter_bmps_state(wcn, vif);
756 } else {
757 wcn36xx_pmc_exit_bmps_state(wcn, vif);
758 }
759 }
760
761 if (changed & BSS_CHANGED_BSSID) { 762 if (changed & BSS_CHANGED_BSSID) {
762 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n", 763 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
763 bss_conf->bssid); 764 bss_conf->bssid);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 589fe5f70971..1976b80c235f 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
45 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 45 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
46 46
47 if (WCN36XX_BMPS != vif_priv->pw_state) { 47 if (WCN36XX_BMPS != vif_priv->pw_state) {
48 wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n"); 48 /* Unbalanced call or last BMPS enter failed */
49 return -EINVAL; 49 wcn36xx_dbg(WCN36XX_DBG_PMC,
50 "Not in BMPS mode, no need to exit\n");
51 return -EALREADY;
50 } 52 }
51 wcn36xx_smd_exit_bmps(wcn, vif); 53 wcn36xx_smd_exit_bmps(wcn, vif);
52 vif_priv->pw_state = WCN36XX_FULL_POWER; 54 vif_priv->pw_state = WCN36XX_FULL_POWER;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d749abeca3ae..403e65c309d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -670,11 +670,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
670 return index & (q->n_window - 1); 670 return index & (q->n_window - 1);
671} 671}
672 672
673static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, 673static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
674 struct iwl_txq *txq, int idx) 674 struct iwl_txq *txq, int idx)
675{ 675{
676 return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, 676 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
677 idx); 677
678 if (trans->cfg->use_tfh)
679 idx = iwl_pcie_get_cmd_index(txq, idx);
680
681 return txq->tfds + trans_pcie->tfd_size * idx;
678} 682}
679 683
680static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 684static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 16b345f54ff0..6d0a907d5ba5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
171 171
172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
173{ 173{
174 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
175
176 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 174 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
177 * idx is bounded by n_window 175 * idx is bounded by n_window
178 */ 176 */
@@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
181 lockdep_assert_held(&txq->lock); 179 lockdep_assert_held(&txq->lock);
182 180
183 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 181 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
184 iwl_pcie_get_tfd(trans_pcie, txq, idx)); 182 iwl_pcie_get_tfd(trans, txq, idx));
185 183
186 /* free SKB */ 184 /* free SKB */
187 if (txq->entries) { 185 if (txq->entries) {
@@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
364 struct sk_buff *skb, 362 struct sk_buff *skb,
365 struct iwl_cmd_meta *out_meta) 363 struct iwl_cmd_meta *out_meta)
366{ 364{
367 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
368 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
369 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 366 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
370 struct iwl_tfh_tfd *tfd = 367 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
371 iwl_pcie_get_tfd(trans_pcie, txq, idx);
372 dma_addr_t tb_phys; 368 dma_addr_t tb_phys;
373 bool amsdu; 369 bool amsdu;
374 int i, len, tb1_len, tb2_len, hdr_len; 370 int i, len, tb1_len, tb2_len, hdr_len;
@@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
565 u8 group_id = iwl_cmd_groupid(cmd->id); 561 u8 group_id = iwl_cmd_groupid(cmd->id);
566 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 562 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
567 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 563 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
568 struct iwl_tfh_tfd *tfd = 564 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
569 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
570 565
571 memset(tfd, 0, sizeof(*tfd)); 566 memset(tfd, 0, sizeof(*tfd));
572 567
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fed6d842a5e1..3f85713c41dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
373{ 373{
374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 374 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
375 int i, num_tbs; 375 int i, num_tbs;
376 void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); 376 void *tfd = iwl_pcie_get_tfd(trans, txq, index);
377 377
378 /* Sanity check on number of chunks */ 378 /* Sanity check on number of chunks */
379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
@@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
2018 } 2018 }
2019 2019
2020 trace_iwlwifi_dev_tx(trans->dev, skb, 2020 trace_iwlwifi_dev_tx(trans->dev, skb,
2021 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2021 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2022 trans_pcie->tfd_size, 2022 trans_pcie->tfd_size,
2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2023 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2024 hdr_len); 2024 hdr_len);
@@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2092 IEEE80211_CCMP_HDR_LEN : 0; 2092 IEEE80211_CCMP_HDR_LEN : 0;
2093 2093
2094 trace_iwlwifi_dev_tx(trans->dev, skb, 2094 trace_iwlwifi_dev_tx(trans->dev, skb,
2095 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), 2095 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2096 trans_pcie->tfd_size, 2096 trans_pcie->tfd_size,
2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 2097 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
2098 2098
@@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, 2425 memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
2426 IWL_FIRST_TB_SIZE); 2426 IWL_FIRST_TB_SIZE);
2427 2427
2428 tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); 2428 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
2429 /* Set up entry for this TFD in Tx byte-count array */ 2429 /* Set up entry for this TFD in Tx byte-count array */
2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2430 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2431 iwl_pcie_tfd_get_num_tbs(trans, tfd)); 2431 iwl_pcie_tfd_get_num_tbs(trans, tfd));
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 3481e69738b5..a327be1d264b 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -231,7 +231,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
231 rc = of_mdiobus_register_phy(mdio, child, addr); 231 rc = of_mdiobus_register_phy(mdio, child, addr);
232 else 232 else
233 rc = of_mdiobus_register_device(mdio, child, addr); 233 rc = of_mdiobus_register_device(mdio, child, addr);
234 if (rc) 234
235 if (rc == -ENODEV)
236 dev_err(&mdio->dev,
237 "MDIO device at address %d is missing.\n",
238 addr);
239 else if (rc)
235 goto unregister; 240 goto unregister;
236 } 241 }
237 242
@@ -255,7 +260,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
255 260
256 if (of_mdiobus_child_is_phy(child)) { 261 if (of_mdiobus_child_is_phy(child)) {
257 rc = of_mdiobus_register_phy(mdio, child, addr); 262 rc = of_mdiobus_register_phy(mdio, child, addr);
258 if (rc) 263 if (rc && rc != -ENODEV)
259 goto unregister; 264 goto unregister;
260 } 265 }
261 } 266 }
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b63a592ad29d..0b25cf87b6d6 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -43,7 +43,14 @@ struct bpf_map_ops {
43}; 43};
44 44
45struct bpf_map { 45struct bpf_map {
46 atomic_t refcnt; 46 /* 1st cacheline with read-mostly members of which some
47 * are also accessed in fast-path (e.g. ops, max_entries).
48 */
49 const struct bpf_map_ops *ops ____cacheline_aligned;
50 struct bpf_map *inner_map_meta;
51#ifdef CONFIG_SECURITY
52 void *security;
53#endif
47 enum bpf_map_type map_type; 54 enum bpf_map_type map_type;
48 u32 key_size; 55 u32 key_size;
49 u32 value_size; 56 u32 value_size;
@@ -52,15 +59,17 @@ struct bpf_map {
52 u32 pages; 59 u32 pages;
53 u32 id; 60 u32 id;
54 int numa_node; 61 int numa_node;
55 struct user_struct *user; 62 bool unpriv_array;
56 const struct bpf_map_ops *ops; 63 /* 7 bytes hole */
57 struct work_struct work; 64
65 /* 2nd cacheline with misc members to avoid false sharing
66 * particularly with refcounting.
67 */
68 struct user_struct *user ____cacheline_aligned;
69 atomic_t refcnt;
58 atomic_t usercnt; 70 atomic_t usercnt;
59 struct bpf_map *inner_map_meta; 71 struct work_struct work;
60 char name[BPF_OBJ_NAME_LEN]; 72 char name[BPF_OBJ_NAME_LEN];
61#ifdef CONFIG_SECURITY
62 void *security;
63#endif
64}; 73};
65 74
66/* function argument constraints */ 75/* function argument constraints */
@@ -221,6 +230,7 @@ struct bpf_prog_aux {
221struct bpf_array { 230struct bpf_array {
222 struct bpf_map map; 231 struct bpf_map map;
223 u32 elem_size; 232 u32 elem_size;
233 u32 index_mask;
224 /* 'ownership' of prog_array is claimed by the first program that 234 /* 'ownership' of prog_array is claimed by the first program that
225 * is going to use this map or by the first program which FD is stored 235 * is going to use this map or by the first program which FD is stored
226 * in the map to make sure that all callers and callees have the same 236 * in the map to make sure that all callers and callees have the same
diff --git a/init/Kconfig b/init/Kconfig
index 690a381adee0..19a6b845d834 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1396,6 +1396,13 @@ config BPF_SYSCALL
1396 Enable the bpf() system call that allows to manipulate eBPF 1396 Enable the bpf() system call that allows to manipulate eBPF
1397 programs and maps via file descriptors. 1397 programs and maps via file descriptors.
1398 1398
1399config BPF_JIT_ALWAYS_ON
1400 bool "Permanently enable BPF JIT and remove BPF interpreter"
1401 depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
1402 help
1403 Enables BPF JIT and removes BPF interpreter to avoid
1404 speculative execution of BPF instructions by the interpreter
1405
1399config USERFAULTFD 1406config USERFAULTFD
1400 bool "Enable userfaultfd() system call" 1407 bool "Enable userfaultfd() system call"
1401 select ANON_INODES 1408 select ANON_INODES
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 7c25426d3cf5..aaa319848e7d 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -53,9 +53,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
53{ 53{
54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 int numa_node = bpf_map_attr_numa_node(attr); 55 int numa_node = bpf_map_attr_numa_node(attr);
56 u32 elem_size, index_mask, max_entries;
57 bool unpriv = !capable(CAP_SYS_ADMIN);
56 struct bpf_array *array; 58 struct bpf_array *array;
57 u64 array_size; 59 u64 array_size;
58 u32 elem_size;
59 60
60 /* check sanity of attributes */ 61 /* check sanity of attributes */
61 if (attr->max_entries == 0 || attr->key_size != 4 || 62 if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -72,11 +73,20 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
72 73
73 elem_size = round_up(attr->value_size, 8); 74 elem_size = round_up(attr->value_size, 8);
74 75
76 max_entries = attr->max_entries;
77 index_mask = roundup_pow_of_two(max_entries) - 1;
78
79 if (unpriv)
80 /* round up array size to nearest power of 2,
81 * since cpu will speculate within index_mask limits
82 */
83 max_entries = index_mask + 1;
84
75 array_size = sizeof(*array); 85 array_size = sizeof(*array);
76 if (percpu) 86 if (percpu)
77 array_size += (u64) attr->max_entries * sizeof(void *); 87 array_size += (u64) max_entries * sizeof(void *);
78 else 88 else
79 array_size += (u64) attr->max_entries * elem_size; 89 array_size += (u64) max_entries * elem_size;
80 90
81 /* make sure there is no u32 overflow later in round_up() */ 91 /* make sure there is no u32 overflow later in round_up() */
82 if (array_size >= U32_MAX - PAGE_SIZE) 92 if (array_size >= U32_MAX - PAGE_SIZE)
@@ -86,6 +96,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
86 array = bpf_map_area_alloc(array_size, numa_node); 96 array = bpf_map_area_alloc(array_size, numa_node);
87 if (!array) 97 if (!array)
88 return ERR_PTR(-ENOMEM); 98 return ERR_PTR(-ENOMEM);
99 array->index_mask = index_mask;
100 array->map.unpriv_array = unpriv;
89 101
90 /* copy mandatory map attributes */ 102 /* copy mandatory map attributes */
91 array->map.map_type = attr->map_type; 103 array->map.map_type = attr->map_type;
@@ -121,12 +133,13 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
121 if (unlikely(index >= array->map.max_entries)) 133 if (unlikely(index >= array->map.max_entries))
122 return NULL; 134 return NULL;
123 135
124 return array->value + array->elem_size * index; 136 return array->value + array->elem_size * (index & array->index_mask);
125} 137}
126 138
127/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 139/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
128static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 140static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
129{ 141{
142 struct bpf_array *array = container_of(map, struct bpf_array, map);
130 struct bpf_insn *insn = insn_buf; 143 struct bpf_insn *insn = insn_buf;
131 u32 elem_size = round_up(map->value_size, 8); 144 u32 elem_size = round_up(map->value_size, 8);
132 const int ret = BPF_REG_0; 145 const int ret = BPF_REG_0;
@@ -135,7 +148,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
135 148
136 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 149 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
137 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 150 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
138 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 151 if (map->unpriv_array) {
152 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
153 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
154 } else {
155 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
156 }
139 157
140 if (is_power_of_2(elem_size)) { 158 if (is_power_of_2(elem_size)) {
141 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 159 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
@@ -157,7 +175,7 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
157 if (unlikely(index >= array->map.max_entries)) 175 if (unlikely(index >= array->map.max_entries))
158 return NULL; 176 return NULL;
159 177
160 return this_cpu_ptr(array->pptrs[index]); 178 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
161} 179}
162 180
163int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 181int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
@@ -177,7 +195,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
177 */ 195 */
178 size = round_up(map->value_size, 8); 196 size = round_up(map->value_size, 8);
179 rcu_read_lock(); 197 rcu_read_lock();
180 pptr = array->pptrs[index]; 198 pptr = array->pptrs[index & array->index_mask];
181 for_each_possible_cpu(cpu) { 199 for_each_possible_cpu(cpu) {
182 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); 200 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
183 off += size; 201 off += size;
@@ -225,10 +243,11 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
225 return -EEXIST; 243 return -EEXIST;
226 244
227 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 245 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
228 memcpy(this_cpu_ptr(array->pptrs[index]), 246 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
229 value, map->value_size); 247 value, map->value_size);
230 else 248 else
231 memcpy(array->value + array->elem_size * index, 249 memcpy(array->value +
250 array->elem_size * (index & array->index_mask),
232 value, map->value_size); 251 value, map->value_size);
233 return 0; 252 return 0;
234} 253}
@@ -262,7 +281,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
262 */ 281 */
263 size = round_up(map->value_size, 8); 282 size = round_up(map->value_size, 8);
264 rcu_read_lock(); 283 rcu_read_lock();
265 pptr = array->pptrs[index]; 284 pptr = array->pptrs[index & array->index_mask];
266 for_each_possible_cpu(cpu) { 285 for_each_possible_cpu(cpu) {
267 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); 286 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
268 off += size; 287 off += size;
@@ -613,6 +632,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
613static u32 array_of_map_gen_lookup(struct bpf_map *map, 632static u32 array_of_map_gen_lookup(struct bpf_map *map,
614 struct bpf_insn *insn_buf) 633 struct bpf_insn *insn_buf)
615{ 634{
635 struct bpf_array *array = container_of(map, struct bpf_array, map);
616 u32 elem_size = round_up(map->value_size, 8); 636 u32 elem_size = round_up(map->value_size, 8);
617 struct bpf_insn *insn = insn_buf; 637 struct bpf_insn *insn = insn_buf;
618 const int ret = BPF_REG_0; 638 const int ret = BPF_REG_0;
@@ -621,7 +641,12 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
621 641
622 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 642 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
623 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 643 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
624 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 644 if (map->unpriv_array) {
645 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
646 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
647 } else {
648 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
649 }
625 if (is_power_of_2(elem_size)) 650 if (is_power_of_2(elem_size))
626 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 651 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
627 else 652 else
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 86b50aa26ee8..51ec2dda7f08 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -767,6 +767,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
767} 767}
768EXPORT_SYMBOL_GPL(__bpf_call_base); 768EXPORT_SYMBOL_GPL(__bpf_call_base);
769 769
770#ifndef CONFIG_BPF_JIT_ALWAYS_ON
770/** 771/**
771 * __bpf_prog_run - run eBPF program on a given context 772 * __bpf_prog_run - run eBPF program on a given context
772 * @ctx: is the data we are operating on 773 * @ctx: is the data we are operating on
@@ -1317,6 +1318,14 @@ EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1317EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) 1318EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1318}; 1319};
1319 1320
1321#else
1322static unsigned int __bpf_prog_ret0(const void *ctx,
1323 const struct bpf_insn *insn)
1324{
1325 return 0;
1326}
1327#endif
1328
1320bool bpf_prog_array_compatible(struct bpf_array *array, 1329bool bpf_prog_array_compatible(struct bpf_array *array,
1321 const struct bpf_prog *fp) 1330 const struct bpf_prog *fp)
1322{ 1331{
@@ -1364,9 +1373,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
1364 */ 1373 */
1365struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) 1374struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1366{ 1375{
1376#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1367 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); 1377 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1368 1378
1369 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; 1379 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1380#else
1381 fp->bpf_func = __bpf_prog_ret0;
1382#endif
1370 1383
1371 /* eBPF JITs can rewrite the program in case constant 1384 /* eBPF JITs can rewrite the program in case constant
1372 * blinding is active. However, in case of error during 1385 * blinding is active. However, in case of error during
@@ -1376,6 +1389,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1376 */ 1389 */
1377 if (!bpf_prog_is_dev_bound(fp->aux)) { 1390 if (!bpf_prog_is_dev_bound(fp->aux)) {
1378 fp = bpf_int_jit_compile(fp); 1391 fp = bpf_int_jit_compile(fp);
1392#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1393 if (!fp->jited) {
1394 *err = -ENOTSUPP;
1395 return fp;
1396 }
1397#endif
1379 } else { 1398 } else {
1380 *err = bpf_prog_offload_compile(fp); 1399 *err = bpf_prog_offload_compile(fp);
1381 if (*err) 1400 if (*err)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 5ee2e41893d9..1712d319c2d8 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -591,8 +591,15 @@ static void sock_map_free(struct bpf_map *map)
591 591
592 write_lock_bh(&sock->sk_callback_lock); 592 write_lock_bh(&sock->sk_callback_lock);
593 psock = smap_psock_sk(sock); 593 psock = smap_psock_sk(sock);
594 smap_list_remove(psock, &stab->sock_map[i]); 594 /* This check handles a racing sock event that can get the
595 smap_release_sock(psock, sock); 595 * sk_callback_lock before this case but after xchg happens
596 * causing the refcnt to hit zero and sock user data (psock)
597 * to be null and queued for garbage collection.
598 */
599 if (likely(psock)) {
600 smap_list_remove(psock, &stab->sock_map[i]);
601 smap_release_sock(psock, sock);
602 }
596 write_unlock_bh(&sock->sk_callback_lock); 603 write_unlock_bh(&sock->sk_callback_lock);
597 } 604 }
598 rcu_read_unlock(); 605 rcu_read_unlock();
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 04b24876cd23..b414d6b2d470 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1729,6 +1729,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1729 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1729 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1730 if (err) 1730 if (err)
1731 return err; 1731 return err;
1732 if (func_id == BPF_FUNC_tail_call) {
1733 if (meta.map_ptr == NULL) {
1734 verbose(env, "verifier bug\n");
1735 return -EINVAL;
1736 }
1737 env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
1738 }
1732 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1739 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1733 if (err) 1740 if (err)
1734 return err; 1741 return err;
@@ -4456,6 +4463,35 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
4456 */ 4463 */
4457 insn->imm = 0; 4464 insn->imm = 0;
4458 insn->code = BPF_JMP | BPF_TAIL_CALL; 4465 insn->code = BPF_JMP | BPF_TAIL_CALL;
4466
4467 /* instead of changing every JIT dealing with tail_call
4468 * emit two extra insns:
4469 * if (index >= max_entries) goto out;
4470 * index &= array->index_mask;
4471 * to avoid out-of-bounds cpu speculation
4472 */
4473 map_ptr = env->insn_aux_data[i + delta].map_ptr;
4474 if (map_ptr == BPF_MAP_PTR_POISON) {
4475 verbose(env, "tail_call obusing map_ptr\n");
4476 return -EINVAL;
4477 }
4478 if (!map_ptr->unpriv_array)
4479 continue;
4480 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
4481 map_ptr->max_entries, 2);
4482 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
4483 container_of(map_ptr,
4484 struct bpf_array,
4485 map)->index_mask);
4486 insn_buf[2] = *insn;
4487 cnt = 3;
4488 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4489 if (!new_prog)
4490 return -ENOMEM;
4491
4492 delta += cnt - 1;
4493 env->prog = prog = new_prog;
4494 insn = new_prog->insnsi + i + delta;
4459 continue; 4495 continue;
4460 } 4496 }
4461 4497
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 9e9748089270..f369889e521d 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6250,9 +6250,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
6250 return NULL; 6250 return NULL;
6251 } 6251 }
6252 } 6252 }
6253 /* We don't expect to fail. */
6254 if (*err) { 6253 if (*err) {
6255 pr_cont("FAIL to attach err=%d len=%d\n", 6254 pr_cont("FAIL to prog_create err=%d len=%d\n",
6256 *err, fprog.len); 6255 *err, fprog.len);
6257 return NULL; 6256 return NULL;
6258 } 6257 }
@@ -6276,6 +6275,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
6276 * checks. 6275 * checks.
6277 */ 6276 */
6278 fp = bpf_prog_select_runtime(fp, err); 6277 fp = bpf_prog_select_runtime(fp, err);
6278 if (*err) {
6279 pr_cont("FAIL to select_runtime err=%d\n", *err);
6280 return NULL;
6281 }
6279 break; 6282 break;
6280 } 6283 }
6281 6284
@@ -6461,8 +6464,8 @@ static __init int test_bpf(void)
6461 pass_cnt++; 6464 pass_cnt++;
6462 continue; 6465 continue;
6463 } 6466 }
6464 6467 err_cnt++;
6465 return err; 6468 continue;
6466 } 6469 }
6467 6470
6468 pr_cont("jited:%u ", fp->jited); 6471 pr_cont("jited:%u ", fp->jited);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8dfdd94e430f..bad01b14a4ad 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
111 vlan_gvrp_uninit_applicant(real_dev); 111 vlan_gvrp_uninit_applicant(real_dev);
112 } 112 }
113 113
114 /* Take it out of our own structures, but be sure to interlock with 114 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
115 * HW accelerating devices or SW vlan input packet processing if
116 * VLAN is not 0 (leave it there for 802.1p).
117 */
118 if (vlan_id)
119 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
120 115
121 /* Get rid of the vlan's reference to real_dev */ 116 /* Get rid of the vlan's reference to real_dev */
122 dev_put(real_dev); 117 dev_put(real_dev);
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 2d38b6e34203..e0adcd123f48 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -334,9 +334,8 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
334 mutex_lock(&caifdevs->lock); 334 mutex_lock(&caifdevs->lock);
335 list_add_rcu(&caifd->list, &caifdevs->list); 335 list_add_rcu(&caifd->list, &caifdevs->list);
336 336
337 strncpy(caifd->layer.name, dev->name, 337 strlcpy(caifd->layer.name, dev->name,
338 sizeof(caifd->layer.name) - 1); 338 sizeof(caifd->layer.name));
339 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
340 caifd->layer.transmit = transmit; 339 caifd->layer.transmit = transmit;
341 cfcnfg_add_phy_layer(cfg, 340 cfcnfg_add_phy_layer(cfg,
342 dev, 341 dev,
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index 5cd44f001f64..1a082a946045 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -176,9 +176,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
176 dev_add_pack(&caif_usb_type); 176 dev_add_pack(&caif_usb_type);
177 pack_added = true; 177 pack_added = true;
178 178
179 strncpy(layer->name, dev->name, 179 strlcpy(layer->name, dev->name, sizeof(layer->name));
180 sizeof(layer->name) - 1);
181 layer->name[sizeof(layer->name) - 1] = 0;
182 180
183 return 0; 181 return 0;
184} 182}
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 273cb07f57d8..8f00bea093b9 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -268,17 +268,15 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
268 case CAIFPROTO_RFM: 268 case CAIFPROTO_RFM:
269 l->linktype = CFCTRL_SRV_RFM; 269 l->linktype = CFCTRL_SRV_RFM;
270 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; 270 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
271 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, 271 strlcpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
272 sizeof(l->u.rfm.volume)-1); 272 sizeof(l->u.rfm.volume));
273 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
274 break; 273 break;
275 case CAIFPROTO_UTIL: 274 case CAIFPROTO_UTIL:
276 l->linktype = CFCTRL_SRV_UTIL; 275 l->linktype = CFCTRL_SRV_UTIL;
277 l->endpoint = 0x00; 276 l->endpoint = 0x00;
278 l->chtype = 0x00; 277 l->chtype = 0x00;
279 strncpy(l->u.utility.name, s->sockaddr.u.util.service, 278 strlcpy(l->u.utility.name, s->sockaddr.u.util.service,
280 sizeof(l->u.utility.name)-1); 279 sizeof(l->u.utility.name));
281 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
282 caif_assert(sizeof(l->u.utility.name) > 10); 280 caif_assert(sizeof(l->u.utility.name) > 10);
283 l->u.utility.paramlen = s->param.size; 281 l->u.utility.paramlen = s->param.size;
284 if (l->u.utility.paramlen > sizeof(l->u.utility.params)) 282 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index f5afda1abc76..655ed7032150 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -258,8 +258,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
258 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs); 258 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
259 cfpkt_add_body(pkt, &tmp16, 2); 259 cfpkt_add_body(pkt, &tmp16, 2);
260 memset(utility_name, 0, sizeof(utility_name)); 260 memset(utility_name, 0, sizeof(utility_name));
261 strncpy(utility_name, param->u.utility.name, 261 strlcpy(utility_name, param->u.utility.name,
262 UTILITY_NAME_LENGTH - 1); 262 UTILITY_NAME_LENGTH);
263 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH); 263 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
264 tmp8 = param->u.utility.paramlen; 264 tmp8 = param->u.utility.paramlen;
265 cfpkt_add_body(pkt, &tmp8, 1); 265 cfpkt_add_body(pkt, &tmp8, 1);
diff --git a/net/core/filter.c b/net/core/filter.c
index 6a85e67fafce..d339ef170df6 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1054,11 +1054,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1054 */ 1054 */
1055 goto out_err_free; 1055 goto out_err_free;
1056 1056
1057 /* We are guaranteed to never error here with cBPF to eBPF
1058 * transitions, since there's no issue with type compatibility
1059 * checks on program arrays.
1060 */
1061 fp = bpf_prog_select_runtime(fp, &err); 1057 fp = bpf_prog_select_runtime(fp, &err);
1058 if (err)
1059 goto out_err_free;
1062 1060
1063 kfree(old_prog); 1061 kfree(old_prog);
1064 return fp; 1062 return fp;
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 217f4e3b82f6..146b50e30659 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -288,7 +288,7 @@ static int sock_diag_bind(struct net *net, int group)
288 case SKNLGRP_INET6_UDP_DESTROY: 288 case SKNLGRP_INET6_UDP_DESTROY:
289 if (!sock_diag_handlers[AF_INET6]) 289 if (!sock_diag_handlers[AF_INET6])
290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
291 NETLINK_SOCK_DIAG, AF_INET); 291 NETLINK_SOCK_DIAG, AF_INET6);
292 break; 292 break;
293 } 293 }
294 return 0; 294 return 0;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index cbc3dde4cfcc..a47ad6cd41c0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -325,7 +325,13 @@ static struct ctl_table net_core_table[] = {
325 .data = &bpf_jit_enable, 325 .data = &bpf_jit_enable,
326 .maxlen = sizeof(int), 326 .maxlen = sizeof(int),
327 .mode = 0644, 327 .mode = 0644,
328#ifndef CONFIG_BPF_JIT_ALWAYS_ON
328 .proc_handler = proc_dointvec 329 .proc_handler = proc_dointvec
330#else
331 .proc_handler = proc_dointvec_minmax,
332 .extra1 = &one,
333 .extra2 = &one,
334#endif
329 }, 335 },
330# ifdef CONFIG_HAVE_EBPF_JIT 336# ifdef CONFIG_HAVE_EBPF_JIT
331 { 337 {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 125c1eab3eaa..5e570aa9e43b 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -520,9 +520,11 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
520 goto out; 520 goto out;
521 521
522 /* hdrincl should be READ_ONCE(inet->hdrincl) 522 /* hdrincl should be READ_ONCE(inet->hdrincl)
523 * but READ_ONCE() doesn't work with bit fields 523 * but READ_ONCE() doesn't work with bit fields.
524 * Doing this indirectly yields the same result.
524 */ 525 */
525 hdrincl = inet->hdrincl; 526 hdrincl = inet->hdrincl;
527 hdrincl = READ_ONCE(hdrincl);
526 /* 528 /*
527 * Check the flags. 529 * Check the flags.
528 */ 530 */
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 83bd75713535..bc68eb661970 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -925,6 +925,15 @@ static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
925 sr_phdr->segments[0] = **addr_p; 925 sr_phdr->segments[0] = **addr_p;
926 *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left]; 926 *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
927 927
928 if (sr_ihdr->hdrlen > hops * 2) {
929 int tlvs_offset, tlvs_length;
930
931 tlvs_offset = (1 + hops * 2) << 3;
932 tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
933 memcpy((char *)sr_phdr + tlvs_offset,
934 (char *)sr_ihdr + tlvs_offset, tlvs_length);
935 }
936
928#ifdef CONFIG_IPV6_SEG6_HMAC 937#ifdef CONFIG_IPV6_SEG6_HMAC
929 if (sr_has_hmac(sr_phdr)) { 938 if (sr_has_hmac(sr_phdr)) {
930 struct net *net = NULL; 939 struct net *net = NULL;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d11a5578e4f8..9dcc3924a975 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -640,6 +640,11 @@ static struct fib6_node *fib6_add_1(struct net *net,
640 if (!(fn->fn_flags & RTN_RTINFO)) { 640 if (!(fn->fn_flags & RTN_RTINFO)) {
641 RCU_INIT_POINTER(fn->leaf, NULL); 641 RCU_INIT_POINTER(fn->leaf, NULL);
642 rt6_release(leaf); 642 rt6_release(leaf);
643 /* remove null_entry in the root node */
644 } else if (fn->fn_flags & RTN_TL_ROOT &&
645 rcu_access_pointer(fn->leaf) ==
646 net->ipv6.ip6_null_entry) {
647 RCU_INIT_POINTER(fn->leaf, NULL);
643 } 648 }
644 649
645 return fn; 650 return fn;
@@ -1270,13 +1275,17 @@ out:
1270 return err; 1275 return err;
1271 1276
1272failure: 1277failure:
1273 /* fn->leaf could be NULL if fn is an intermediate node and we 1278 /* fn->leaf could be NULL and fib6_repair_tree() needs to be called if:
1274 * failed to add the new route to it in both subtree creation 1279 * 1. fn is an intermediate node and we failed to add the new
1275 * failure and fib6_add_rt2node() failure case. 1280 * route to it in both subtree creation failure and fib6_add_rt2node()
1276 * In both cases, fib6_repair_tree() should be called to fix 1281 * failure case.
1277 * fn->leaf. 1282 * 2. fn is the root node in the table and we fail to add the first
1283 * default route to it.
1278 */ 1284 */
1279 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 1285 if (fn &&
1286 (!(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)) ||
1287 (fn->fn_flags & RTN_TL_ROOT &&
1288 !rcu_access_pointer(fn->leaf))))
1280 fib6_repair_tree(info->nl_net, table, fn); 1289 fib6_repair_tree(info->nl_net, table, fn);
1281 /* Always release dst as dst->__refcnt is guaranteed 1290 /* Always release dst as dst->__refcnt is guaranteed
1282 * to be taken before entering this function 1291 * to be taken before entering this function
@@ -1531,6 +1540,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
1531 struct fib6_walker *w; 1540 struct fib6_walker *w;
1532 int iter = 0; 1541 int iter = 0;
1533 1542
1543 /* Set fn->leaf to null_entry for root node. */
1544 if (fn->fn_flags & RTN_TL_ROOT) {
1545 rcu_assign_pointer(fn->leaf, net->ipv6.ip6_null_entry);
1546 return fn;
1547 }
1548
1534 for (;;) { 1549 for (;;) {
1535 struct fib6_node *fn_r = rcu_dereference_protected(fn->right, 1550 struct fib6_node *fn_r = rcu_dereference_protected(fn->right,
1536 lockdep_is_held(&table->tb6_lock)); 1551 lockdep_is_held(&table->tb6_lock));
@@ -1685,10 +1700,15 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
1685 } 1700 }
1686 read_unlock(&net->ipv6.fib6_walker_lock); 1701 read_unlock(&net->ipv6.fib6_walker_lock);
1687 1702
1688 /* If it was last route, expunge its radix tree node */ 1703 /* If it was last route, call fib6_repair_tree() to:
1704 * 1. For root node, put back null_entry as how the table was created.
1705 * 2. For other nodes, expunge its radix tree node.
1706 */
1689 if (!rcu_access_pointer(fn->leaf)) { 1707 if (!rcu_access_pointer(fn->leaf)) {
1690 fn->fn_flags &= ~RTN_RTINFO; 1708 if (!(fn->fn_flags & RTN_TL_ROOT)) {
1691 net->ipv6.rt6_stats->fib_route_nodes--; 1709 fn->fn_flags &= ~RTN_RTINFO;
1710 net->ipv6.rt6_stats->fib_route_nodes--;
1711 }
1692 fn = fib6_repair_tree(net, table, fn); 1712 fn = fib6_repair_tree(net, table, fn);
1693 } 1713 }
1694 1714
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index f7dd51c42314..688ba5f7516b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1735,9 +1735,10 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
1735 cork.base.opt = NULL; 1735 cork.base.opt = NULL;
1736 v6_cork.opt = NULL; 1736 v6_cork.opt = NULL;
1737 err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6); 1737 err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1738 if (err) 1738 if (err) {
1739 ip6_cork_release(&cork, &v6_cork);
1739 return ERR_PTR(err); 1740 return ERR_PTR(err);
1740 1741 }
1741 if (ipc6->dontfrag < 0) 1742 if (ipc6->dontfrag < 0)
1742 ipc6->dontfrag = inet6_sk(sk)->dontfrag; 1743 ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1743 1744
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b4fb6e4886d2..9b01e994f661 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2277,7 +2277,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2277 2277
2278 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { 2278 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2279 event = sctp_ulpevent_make_sender_dry_event(asoc, 2279 event = sctp_ulpevent_make_sender_dry_event(asoc,
2280 GFP_ATOMIC); 2280 GFP_USER | __GFP_NOWARN);
2281 if (!event) 2281 if (!event)
2282 return -ENOMEM; 2282 return -ENOMEM;
2283 2283
@@ -3498,6 +3498,8 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3498 3498
3499 if (optlen < sizeof(struct sctp_hmacalgo)) 3499 if (optlen < sizeof(struct sctp_hmacalgo))
3500 return -EINVAL; 3500 return -EINVAL;
3501 optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
3502 SCTP_AUTH_NUM_HMACS * sizeof(u16));
3501 3503
3502 hmacs = memdup_user(optval, optlen); 3504 hmacs = memdup_user(optval, optlen);
3503 if (IS_ERR(hmacs)) 3505 if (IS_ERR(hmacs))
@@ -3536,6 +3538,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3536 3538
3537 if (optlen <= sizeof(struct sctp_authkey)) 3539 if (optlen <= sizeof(struct sctp_authkey))
3538 return -EINVAL; 3540 return -EINVAL;
3541 /* authkey->sca_keylength is u16, so optlen can't be bigger than
3542 * this.
3543 */
3544 optlen = min_t(unsigned int, optlen, USHRT_MAX +
3545 sizeof(struct sctp_authkey));
3539 3546
3540 authkey = memdup_user(optval, optlen); 3547 authkey = memdup_user(optval, optlen);
3541 if (IS_ERR(authkey)) 3548 if (IS_ERR(authkey))
@@ -3893,6 +3900,9 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
3893 3900
3894 if (optlen < sizeof(*params)) 3901 if (optlen < sizeof(*params))
3895 return -EINVAL; 3902 return -EINVAL;
3903 /* srs_number_streams is u16, so optlen can't be bigger than this. */
3904 optlen = min_t(unsigned int, optlen, USHRT_MAX +
3905 sizeof(__u16) * sizeof(*params));
3896 3906
3897 params = memdup_user(optval, optlen); 3907 params = memdup_user(optval, optlen);
3898 if (IS_ERR(params)) 3908 if (IS_ERR(params))
@@ -5015,7 +5025,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
5015 len = sizeof(int); 5025 len = sizeof(int);
5016 if (put_user(len, optlen)) 5026 if (put_user(len, optlen))
5017 return -EFAULT; 5027 return -EFAULT;
5018 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 5028 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
5019 return -EFAULT; 5029 return -EFAULT;
5020 return 0; 5030 return 0;
5021} 5031}
@@ -5645,6 +5655,9 @@ copy_getaddrs:
5645 err = -EFAULT; 5655 err = -EFAULT;
5646 goto out; 5656 goto out;
5647 } 5657 }
5658 /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
5659 * but we can't change it anymore.
5660 */
5648 if (put_user(bytes_copied, optlen)) 5661 if (put_user(bytes_copied, optlen))
5649 err = -EFAULT; 5662 err = -EFAULT;
5650out: 5663out:
@@ -6081,7 +6094,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
6081 params.assoc_id = 0; 6094 params.assoc_id = 0;
6082 } else if (len >= sizeof(struct sctp_assoc_value)) { 6095 } else if (len >= sizeof(struct sctp_assoc_value)) {
6083 len = sizeof(struct sctp_assoc_value); 6096 len = sizeof(struct sctp_assoc_value);
6084 if (copy_from_user(&params, optval, sizeof(params))) 6097 if (copy_from_user(&params, optval, len))
6085 return -EFAULT; 6098 return -EFAULT;
6086 } else 6099 } else
6087 return -EINVAL; 6100 return -EINVAL;
@@ -6251,7 +6264,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
6251 6264
6252 if (len < sizeof(struct sctp_authkeyid)) 6265 if (len < sizeof(struct sctp_authkeyid))
6253 return -EINVAL; 6266 return -EINVAL;
6254 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) 6267
6268 len = sizeof(struct sctp_authkeyid);
6269 if (copy_from_user(&val, optval, len))
6255 return -EFAULT; 6270 return -EFAULT;
6256 6271
6257 asoc = sctp_id2assoc(sk, val.scact_assoc_id); 6272 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
@@ -6263,7 +6278,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
6263 else 6278 else
6264 val.scact_keynumber = ep->active_key_id; 6279 val.scact_keynumber = ep->active_key_id;
6265 6280
6266 len = sizeof(struct sctp_authkeyid);
6267 if (put_user(len, optlen)) 6281 if (put_user(len, optlen))
6268 return -EFAULT; 6282 return -EFAULT;
6269 if (copy_to_user(optval, &val, len)) 6283 if (copy_to_user(optval, &val, len))
@@ -6289,7 +6303,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
6289 if (len < sizeof(struct sctp_authchunks)) 6303 if (len < sizeof(struct sctp_authchunks))
6290 return -EINVAL; 6304 return -EINVAL;
6291 6305
6292 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6306 if (copy_from_user(&val, optval, sizeof(val)))
6293 return -EFAULT; 6307 return -EFAULT;
6294 6308
6295 to = p->gauth_chunks; 6309 to = p->gauth_chunks;
@@ -6334,7 +6348,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
6334 if (len < sizeof(struct sctp_authchunks)) 6348 if (len < sizeof(struct sctp_authchunks))
6335 return -EINVAL; 6349 return -EINVAL;
6336 6350
6337 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) 6351 if (copy_from_user(&val, optval, sizeof(val)))
6338 return -EFAULT; 6352 return -EFAULT;
6339 6353
6340 to = p->gauth_chunks; 6354 to = p->gauth_chunks;
diff --git a/net/socket.c b/net/socket.c
index 05f361faec45..78acd6ce74c7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2619,6 +2619,15 @@ out_fs:
2619 2619
2620core_initcall(sock_init); /* early initcall */ 2620core_initcall(sock_init); /* early initcall */
2621 2621
2622static int __init jit_init(void)
2623{
2624#ifdef CONFIG_BPF_JIT_ALWAYS_ON
2625 bpf_jit_enable = 1;
2626#endif
2627 return 0;
2628}
2629pure_initcall(jit_init);
2630
2622#ifdef CONFIG_PROC_FS 2631#ifdef CONFIG_PROC_FS
2623void socket_seq_show(struct seq_file *seq) 2632void socket_seq_show(struct seq_file *seq)
2624{ 2633{
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 8591c89c0828..471bbbdb94db 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -474,27 +474,7 @@ static struct bpf_align_test tests[] = {
474 .result = REJECT, 474 .result = REJECT,
475 .matches = { 475 .matches = {
476 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"}, 476 {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
477 /* ptr & 0x40 == either 0 or 0x40 */ 477 /* R5 bitwise operator &= on pointer prohibited */
478 {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
479 /* ptr << 2 == unknown, (4n) */
480 {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
481 /* (4n) + 14 == (4n+2). We blow our bounds, because
482 * the add could overflow.
483 */
484 {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
485 /* Checked s>=0 */
486 {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
487 /* packet pointer + nonnegative (4n+2) */
488 {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
489 {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
490 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
491 * We checked the bounds, but it might have been able
492 * to overflow if the packet pointer started in the
493 * upper half of the address space.
494 * So we did not get a 'range' on R6, and the access
495 * attempt will fail.
496 */
497 {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
498 } 478 }
499 }, 479 },
500 { 480 {