diff options
120 files changed, 1358 insertions, 465 deletions
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 6d6c07cf1a9a..63912ef34606 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt | |||
| @@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and | |||
| 67 | Switch tagging protocols | 67 | Switch tagging protocols |
| 68 | ------------------------ | 68 | ------------------------ |
| 69 | 69 | ||
| 70 | DSA currently supports 4 different tagging protocols, and a tag-less mode as | 70 | DSA currently supports 5 different tagging protocols, and a tag-less mode as |
| 71 | well. The different protocols are implemented in: | 71 | well. The different protocols are implemented in: |
| 72 | 72 | ||
| 73 | net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy) | 73 | net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy) |
| 74 | net/dsa/tag_dsa.c: Marvell's original DSA tag | 74 | net/dsa/tag_dsa.c: Marvell's original DSA tag |
| 75 | net/dsa/tag_edsa.c: Marvell's enhanced DSA tag | 75 | net/dsa/tag_edsa.c: Marvell's enhanced DSA tag |
| 76 | net/dsa/tag_brcm.c: Broadcom's 4 bytes tag | 76 | net/dsa/tag_brcm.c: Broadcom's 4 bytes tag |
| 77 | net/dsa/tag_qca.c: Qualcomm's 2 bytes tag | ||
| 77 | 78 | ||
| 78 | The exact format of the tag protocol is vendor specific, but in general, they | 79 | The exact format of the tag protocol is vendor specific, but in general, they |
| 79 | all contain something which: | 80 | all contain something which: |
diff --git a/MAINTAINERS b/MAINTAINERS index 851b89b9edcb..2a58eeac9452 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -8057,6 +8057,7 @@ F: drivers/infiniband/hw/mlx4/ | |||
| 8057 | F: include/linux/mlx4/ | 8057 | F: include/linux/mlx4/ |
| 8058 | 8058 | ||
| 8059 | MELLANOX MLX5 core VPI driver | 8059 | MELLANOX MLX5 core VPI driver |
| 8060 | M: Saeed Mahameed <saeedm@mellanox.com> | ||
| 8060 | M: Matan Barak <matanb@mellanox.com> | 8061 | M: Matan Barak <matanb@mellanox.com> |
| 8061 | M: Leon Romanovsky <leonro@mellanox.com> | 8062 | M: Leon Romanovsky <leonro@mellanox.com> |
| 8062 | L: netdev@vger.kernel.org | 8063 | L: netdev@vger.kernel.org |
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 3eb7430dffbf..f8ff25c8ee2e 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c | |||
| @@ -142,6 +142,9 @@ struct plx_pci_card { | |||
| 142 | #define CTI_PCI_VENDOR_ID 0x12c4 | 142 | #define CTI_PCI_VENDOR_ID 0x12c4 |
| 143 | #define CTI_PCI_DEVICE_ID_CRG001 0x0900 | 143 | #define CTI_PCI_DEVICE_ID_CRG001 0x0900 |
| 144 | 144 | ||
| 145 | #define MOXA_PCI_VENDOR_ID 0x1393 | ||
| 146 | #define MOXA_PCI_DEVICE_ID 0x0100 | ||
| 147 | |||
| 145 | static void plx_pci_reset_common(struct pci_dev *pdev); | 148 | static void plx_pci_reset_common(struct pci_dev *pdev); |
| 146 | static void plx9056_pci_reset_common(struct pci_dev *pdev); | 149 | static void plx9056_pci_reset_common(struct pci_dev *pdev); |
| 147 | static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); | 150 | static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); |
| @@ -258,6 +261,14 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = { | |||
| 258 | /* based on PLX9030 */ | 261 | /* based on PLX9030 */ |
| 259 | }; | 262 | }; |
| 260 | 263 | ||
| 264 | static struct plx_pci_card_info plx_pci_card_info_moxa = { | ||
| 265 | "MOXA", 2, | ||
| 266 | PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, | ||
| 267 | {0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} }, | ||
| 268 | &plx_pci_reset_common | ||
| 269 | /* based on PLX9052 */ | ||
| 270 | }; | ||
| 271 | |||
| 261 | static const struct pci_device_id plx_pci_tbl[] = { | 272 | static const struct pci_device_id plx_pci_tbl[] = { |
| 262 | { | 273 | { |
| 263 | /* Adlink PCI-7841/cPCI-7841 */ | 274 | /* Adlink PCI-7841/cPCI-7841 */ |
| @@ -357,6 +368,13 @@ static const struct pci_device_id plx_pci_tbl[] = { | |||
| 357 | 0, 0, | 368 | 0, 0, |
| 358 | (kernel_ulong_t)&plx_pci_card_info_elcus | 369 | (kernel_ulong_t)&plx_pci_card_info_elcus |
| 359 | }, | 370 | }, |
| 371 | { | ||
| 372 | /* moxa */ | ||
| 373 | MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID, | ||
| 374 | PCI_ANY_ID, PCI_ANY_ID, | ||
| 375 | 0, 0, | ||
| 376 | (kernel_ulong_t)&plx_pci_card_info_moxa | ||
| 377 | }, | ||
| 360 | { 0,} | 378 | { 0,} |
| 361 | }; | 379 | }; |
| 362 | MODULE_DEVICE_TABLE(pci, plx_pci_tbl); | 380 | MODULE_DEVICE_TABLE(pci, plx_pci_tbl); |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index c481f104a8fe..5390ae89136c 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
| @@ -204,17 +204,6 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) | |||
| 204 | return num_msgs; | 204 | return num_msgs; |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) | ||
| 208 | { | ||
| 209 | u32 data = 0x7777; | ||
| 210 | |||
| 211 | xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); | ||
| 212 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); | ||
| 213 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); | ||
| 214 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); | ||
| 215 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); | ||
| 216 | } | ||
| 217 | |||
| 218 | void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, | 207 | void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, |
| 219 | struct xgene_enet_pdata *pdata, | 208 | struct xgene_enet_pdata *pdata, |
| 220 | enum xgene_enet_err_code status) | 209 | enum xgene_enet_err_code status) |
| @@ -929,5 +918,4 @@ struct xgene_ring_ops xgene_ring1_ops = { | |||
| 929 | .clear = xgene_enet_clear_ring, | 918 | .clear = xgene_enet_clear_ring, |
| 930 | .wr_cmd = xgene_enet_wr_cmd, | 919 | .wr_cmd = xgene_enet_wr_cmd, |
| 931 | .len = xgene_enet_ring_len, | 920 | .len = xgene_enet_ring_len, |
| 932 | .coalesce = xgene_enet_setup_coalescing, | ||
| 933 | }; | 921 | }; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 8456337a237d..06e598c8bc16 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | |||
| @@ -55,8 +55,10 @@ enum xgene_enet_rm { | |||
| 55 | #define PREFETCH_BUF_EN BIT(21) | 55 | #define PREFETCH_BUF_EN BIT(21) |
| 56 | #define CSR_RING_ID_BUF 0x000c | 56 | #define CSR_RING_ID_BUF 0x000c |
| 57 | #define CSR_PBM_COAL 0x0014 | 57 | #define CSR_PBM_COAL 0x0014 |
| 58 | #define CSR_PBM_CTICK0 0x0018 | ||
| 58 | #define CSR_PBM_CTICK1 0x001c | 59 | #define CSR_PBM_CTICK1 0x001c |
| 59 | #define CSR_PBM_CTICK2 0x0020 | 60 | #define CSR_PBM_CTICK2 0x0020 |
| 61 | #define CSR_PBM_CTICK3 0x0024 | ||
| 60 | #define CSR_THRESHOLD0_SET1 0x0030 | 62 | #define CSR_THRESHOLD0_SET1 0x0030 |
| 61 | #define CSR_THRESHOLD1_SET1 0x0034 | 63 | #define CSR_THRESHOLD1_SET1 0x0034 |
| 62 | #define CSR_RING_NE_INT_MODE 0x017c | 64 | #define CSR_RING_NE_INT_MODE 0x017c |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 429f18fc5503..8158d4698734 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
| @@ -1188,7 +1188,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) | |||
| 1188 | tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); | 1188 | tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); |
| 1189 | } | 1189 | } |
| 1190 | 1190 | ||
| 1191 | pdata->ring_ops->coalesce(pdata->tx_ring[0]); | 1191 | if (pdata->ring_ops->coalesce) |
| 1192 | pdata->ring_ops->coalesce(pdata->tx_ring[0]); | ||
| 1192 | pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; | 1193 | pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; |
| 1193 | 1194 | ||
| 1194 | return 0; | 1195 | return 0; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c index 2b76732add5d..af51dd5844ce 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c | |||
| @@ -30,7 +30,7 @@ static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) | |||
| 30 | ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); | 30 | ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); |
| 31 | ring_cfg[3] |= SET_BIT(X2_DEQINTEN); | 31 | ring_cfg[3] |= SET_BIT(X2_DEQINTEN); |
| 32 | } | 32 | } |
| 33 | ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1); | 33 | ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2); |
| 34 | 34 | ||
| 35 | addr >>= 8; | 35 | addr >>= 8; |
| 36 | ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); | 36 | ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); |
| @@ -192,13 +192,15 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) | |||
| 192 | 192 | ||
| 193 | static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) | 193 | static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) |
| 194 | { | 194 | { |
| 195 | u32 data = 0x7777; | 195 | u32 data = 0x77777777; |
| 196 | 196 | ||
| 197 | xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); | 197 | xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); |
| 198 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data); | ||
| 198 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); | 199 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); |
| 199 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); | 200 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data); |
| 200 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); | 201 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data); |
| 201 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); | 202 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08); |
| 203 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10); | ||
| 202 | } | 204 | } |
| 203 | 205 | ||
| 204 | struct xgene_ring_ops xgene_ring2_ops = { | 206 | struct xgene_ring_ops xgene_ring2_ops = { |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 31ca204b38d2..49f4cafe5438 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
| @@ -307,6 +307,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, | |||
| 307 | u32 ctl; | 307 | u32 ctl; |
| 308 | 308 | ||
| 309 | ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); | 309 | ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); |
| 310 | |||
| 311 | /* preserve ONLY bits 16-17 from current hardware value */ | ||
| 312 | ctl &= BGMAC_DMA_RX_ADDREXT_MASK; | ||
| 313 | |||
| 310 | if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { | 314 | if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { |
| 311 | ctl &= ~BGMAC_DMA_RX_BL_MASK; | 315 | ctl &= ~BGMAC_DMA_RX_BL_MASK; |
| 312 | ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; | 316 | ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; |
| @@ -317,7 +321,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, | |||
| 317 | ctl &= ~BGMAC_DMA_RX_PT_MASK; | 321 | ctl &= ~BGMAC_DMA_RX_PT_MASK; |
| 318 | ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; | 322 | ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; |
| 319 | } | 323 | } |
| 320 | ctl &= BGMAC_DMA_RX_ADDREXT_MASK; | ||
| 321 | ctl |= BGMAC_DMA_RX_ENABLE; | 324 | ctl |= BGMAC_DMA_RX_ENABLE; |
| 322 | ctl |= BGMAC_DMA_RX_PARITY_DISABLE; | 325 | ctl |= BGMAC_DMA_RX_PARITY_DISABLE; |
| 323 | ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; | 326 | ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; |
| @@ -1046,9 +1049,9 @@ static void bgmac_enable(struct bgmac *bgmac) | |||
| 1046 | 1049 | ||
| 1047 | mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> | 1050 | mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> |
| 1048 | BGMAC_DS_MM_SHIFT; | 1051 | BGMAC_DS_MM_SHIFT; |
| 1049 | if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0) | 1052 | if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) |
| 1050 | bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); | 1053 | bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); |
| 1051 | if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) | 1054 | if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2) |
| 1052 | bgmac_cco_ctl_maskset(bgmac, 1, ~0, | 1055 | bgmac_cco_ctl_maskset(bgmac, 1, ~0, |
| 1053 | BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); | 1056 | BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); |
| 1054 | 1057 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index b3791b394715..1f7034d739b0 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
| @@ -49,6 +49,7 @@ | |||
| 49 | #include <linux/firmware.h> | 49 | #include <linux/firmware.h> |
| 50 | #include <linux/log2.h> | 50 | #include <linux/log2.h> |
| 51 | #include <linux/aer.h> | 51 | #include <linux/aer.h> |
| 52 | #include <linux/crash_dump.h> | ||
| 52 | 53 | ||
| 53 | #if IS_ENABLED(CONFIG_CNIC) | 54 | #if IS_ENABLED(CONFIG_CNIC) |
| 54 | #define BCM_CNIC 1 | 55 | #define BCM_CNIC 1 |
| @@ -4764,15 +4765,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp) | |||
| 4764 | BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); | 4765 | BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); |
| 4765 | } | 4766 | } |
| 4766 | 4767 | ||
| 4767 | static int | 4768 | static void |
| 4768 | bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | 4769 | bnx2_wait_dma_complete(struct bnx2 *bp) |
| 4769 | { | 4770 | { |
| 4770 | u32 val; | 4771 | u32 val; |
| 4771 | int i, rc = 0; | 4772 | int i; |
| 4772 | u8 old_port; | ||
| 4773 | 4773 | ||
| 4774 | /* Wait for the current PCI transaction to complete before | 4774 | /* |
| 4775 | * issuing a reset. */ | 4775 | * Wait for the current PCI transaction to complete before |
| 4776 | * issuing a reset. | ||
| 4777 | */ | ||
| 4776 | if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || | 4778 | if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || |
| 4777 | (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { | 4779 | (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { |
| 4778 | BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, | 4780 | BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, |
| @@ -4796,6 +4798,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | |||
| 4796 | } | 4798 | } |
| 4797 | } | 4799 | } |
| 4798 | 4800 | ||
| 4801 | return; | ||
| 4802 | } | ||
| 4803 | |||
| 4804 | |||
| 4805 | static int | ||
| 4806 | bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | ||
| 4807 | { | ||
| 4808 | u32 val; | ||
| 4809 | int i, rc = 0; | ||
| 4810 | u8 old_port; | ||
| 4811 | |||
| 4812 | /* Wait for the current PCI transaction to complete before | ||
| 4813 | * issuing a reset. */ | ||
| 4814 | bnx2_wait_dma_complete(bp); | ||
| 4815 | |||
| 4799 | /* Wait for the firmware to tell us it is ok to issue a reset. */ | 4816 | /* Wait for the firmware to tell us it is ok to issue a reset. */ |
| 4800 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); | 4817 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); |
| 4801 | 4818 | ||
| @@ -6361,6 +6378,10 @@ bnx2_open(struct net_device *dev) | |||
| 6361 | struct bnx2 *bp = netdev_priv(dev); | 6378 | struct bnx2 *bp = netdev_priv(dev); |
| 6362 | int rc; | 6379 | int rc; |
| 6363 | 6380 | ||
| 6381 | rc = bnx2_request_firmware(bp); | ||
| 6382 | if (rc < 0) | ||
| 6383 | goto out; | ||
| 6384 | |||
| 6364 | netif_carrier_off(dev); | 6385 | netif_carrier_off(dev); |
| 6365 | 6386 | ||
| 6366 | bnx2_disable_int(bp); | 6387 | bnx2_disable_int(bp); |
| @@ -6429,6 +6450,7 @@ open_err: | |||
| 6429 | bnx2_free_irq(bp); | 6450 | bnx2_free_irq(bp); |
| 6430 | bnx2_free_mem(bp); | 6451 | bnx2_free_mem(bp); |
| 6431 | bnx2_del_napi(bp); | 6452 | bnx2_del_napi(bp); |
| 6453 | bnx2_release_firmware(bp); | ||
| 6432 | goto out; | 6454 | goto out; |
| 6433 | } | 6455 | } |
| 6434 | 6456 | ||
| @@ -8575,12 +8597,15 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 8575 | 8597 | ||
| 8576 | pci_set_drvdata(pdev, dev); | 8598 | pci_set_drvdata(pdev, dev); |
| 8577 | 8599 | ||
| 8578 | rc = bnx2_request_firmware(bp); | 8600 | /* |
| 8579 | if (rc < 0) | 8601 | * In-flight DMA from 1st kernel could continue going in kdump kernel. |
| 8580 | goto error; | 8602 | * New io-page table has been created before bnx2 does reset at open stage. |
| 8581 | 8603 | * We have to wait for the in-flight DMA to complete to avoid it look up | |
| 8604 | * into the newly created io-page table. | ||
| 8605 | */ | ||
| 8606 | if (is_kdump_kernel()) | ||
| 8607 | bnx2_wait_dma_complete(bp); | ||
| 8582 | 8608 | ||
| 8583 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); | ||
| 8584 | memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); | 8609 | memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); |
| 8585 | 8610 | ||
| 8586 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | | 8611 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
| @@ -8613,7 +8638,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 8613 | return 0; | 8638 | return 0; |
| 8614 | 8639 | ||
| 8615 | error: | 8640 | error: |
| 8616 | bnx2_release_firmware(bp); | ||
| 8617 | pci_iounmap(pdev, bp->regview); | 8641 | pci_iounmap(pdev, bp->regview); |
| 8618 | pci_release_regions(pdev); | 8642 | pci_release_regions(pdev); |
| 8619 | pci_disable_device(pdev); | 8643 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a9f9f3738022..c6909660e097 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -6309,6 +6309,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, | |||
| 6309 | struct tc_to_netdev *ntc) | 6309 | struct tc_to_netdev *ntc) |
| 6310 | { | 6310 | { |
| 6311 | struct bnxt *bp = netdev_priv(dev); | 6311 | struct bnxt *bp = netdev_priv(dev); |
| 6312 | bool sh = false; | ||
| 6312 | u8 tc; | 6313 | u8 tc; |
| 6313 | 6314 | ||
| 6314 | if (ntc->type != TC_SETUP_MQPRIO) | 6315 | if (ntc->type != TC_SETUP_MQPRIO) |
| @@ -6325,12 +6326,11 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, | |||
| 6325 | if (netdev_get_num_tc(dev) == tc) | 6326 | if (netdev_get_num_tc(dev) == tc) |
| 6326 | return 0; | 6327 | return 0; |
| 6327 | 6328 | ||
| 6329 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) | ||
| 6330 | sh = true; | ||
| 6331 | |||
| 6328 | if (tc) { | 6332 | if (tc) { |
| 6329 | int max_rx_rings, max_tx_rings, rc; | 6333 | int max_rx_rings, max_tx_rings, rc; |
| 6330 | bool sh = false; | ||
| 6331 | |||
| 6332 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) | ||
| 6333 | sh = true; | ||
| 6334 | 6334 | ||
| 6335 | rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); | 6335 | rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); |
| 6336 | if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) | 6336 | if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) |
| @@ -6348,7 +6348,8 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, | |||
| 6348 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; | 6348 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; |
| 6349 | netdev_reset_tc(dev); | 6349 | netdev_reset_tc(dev); |
| 6350 | } | 6350 | } |
| 6351 | bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); | 6351 | bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : |
| 6352 | bp->tx_nr_rings + bp->rx_nr_rings; | ||
| 6352 | bp->num_stat_ctxs = bp->cp_nr_rings; | 6353 | bp->num_stat_ctxs = bp->cp_nr_rings; |
| 6353 | 6354 | ||
| 6354 | if (netif_running(bp->dev)) | 6355 | if (netif_running(bp->dev)) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index ec6cd18842c3..60e2af8678bd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
| @@ -774,8 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) | |||
| 774 | 774 | ||
| 775 | if (vf->flags & BNXT_VF_LINK_UP) { | 775 | if (vf->flags & BNXT_VF_LINK_UP) { |
| 776 | /* if physical link is down, force link up on VF */ | 776 | /* if physical link is down, force link up on VF */ |
| 777 | if (phy_qcfg_resp.link == | 777 | if (phy_qcfg_resp.link != |
| 778 | PORT_PHY_QCFG_RESP_LINK_NO_LINK) { | 778 | PORT_PHY_QCFG_RESP_LINK_LINK) { |
| 779 | phy_qcfg_resp.link = | 779 | phy_qcfg_resp.link = |
| 780 | PORT_PHY_QCFG_RESP_LINK_LINK; | 780 | PORT_PHY_QCFG_RESP_LINK_LINK; |
| 781 | phy_qcfg_resp.link_speed = cpu_to_le16( | 781 | phy_qcfg_resp.link_speed = cpu_to_le16( |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index f9df4b5ae90e..f42f672b0e7e 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
| @@ -177,6 +177,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) | |||
| 177 | return 0; | 177 | return 0; |
| 178 | 178 | ||
| 179 | hw_cons = *(tcb->hw_consumer_index); | 179 | hw_cons = *(tcb->hw_consumer_index); |
| 180 | rmb(); | ||
| 180 | cons = tcb->consumer_index; | 181 | cons = tcb->consumer_index; |
| 181 | q_depth = tcb->q_depth; | 182 | q_depth = tcb->q_depth; |
| 182 | 183 | ||
| @@ -3094,7 +3095,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 3094 | BNA_QE_INDX_INC(prod, q_depth); | 3095 | BNA_QE_INDX_INC(prod, q_depth); |
| 3095 | tcb->producer_index = prod; | 3096 | tcb->producer_index = prod; |
| 3096 | 3097 | ||
| 3097 | smp_mb(); | 3098 | wmb(); |
| 3098 | 3099 | ||
| 3099 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | 3100 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) |
| 3100 | return NETDEV_TX_OK; | 3101 | return NETDEV_TX_OK; |
| @@ -3102,7 +3103,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 3102 | skb_tx_timestamp(skb); | 3103 | skb_tx_timestamp(skb); |
| 3103 | 3104 | ||
| 3104 | bna_txq_prod_indx_doorbell(tcb); | 3105 | bna_txq_prod_indx_doorbell(tcb); |
| 3105 | smp_mb(); | ||
| 3106 | 3106 | ||
| 3107 | return NETDEV_TX_OK; | 3107 | return NETDEV_TX_OK; |
| 3108 | } | 3108 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 50812a1d67bd..df1573c4a659 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | |||
| @@ -178,9 +178,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN | |||
| 178 | CH_PCI_ID_TABLE_FENTRY(0x6005), | 178 | CH_PCI_ID_TABLE_FENTRY(0x6005), |
| 179 | CH_PCI_ID_TABLE_FENTRY(0x6006), | 179 | CH_PCI_ID_TABLE_FENTRY(0x6006), |
| 180 | CH_PCI_ID_TABLE_FENTRY(0x6007), | 180 | CH_PCI_ID_TABLE_FENTRY(0x6007), |
| 181 | CH_PCI_ID_TABLE_FENTRY(0x6008), | ||
| 181 | CH_PCI_ID_TABLE_FENTRY(0x6009), | 182 | CH_PCI_ID_TABLE_FENTRY(0x6009), |
| 182 | CH_PCI_ID_TABLE_FENTRY(0x600d), | 183 | CH_PCI_ID_TABLE_FENTRY(0x600d), |
| 183 | CH_PCI_ID_TABLE_FENTRY(0x6010), | ||
| 184 | CH_PCI_ID_TABLE_FENTRY(0x6011), | 184 | CH_PCI_ID_TABLE_FENTRY(0x6011), |
| 185 | CH_PCI_ID_TABLE_FENTRY(0x6014), | 185 | CH_PCI_ID_TABLE_FENTRY(0x6014), |
| 186 | CH_PCI_ID_TABLE_FENTRY(0x6015), | 186 | CH_PCI_ID_TABLE_FENTRY(0x6015), |
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index c54c6fac0d1d..b6ed818f78ff 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c | |||
| @@ -332,8 +332,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, | |||
| 332 | return ERR_PTR(-ENODEV); | 332 | return ERR_PTR(-ENODEV); |
| 333 | 333 | ||
| 334 | handle = dev->ops->get_handle(dev, port_id); | 334 | handle = dev->ops->get_handle(dev, port_id); |
| 335 | if (IS_ERR(handle)) | 335 | if (IS_ERR(handle)) { |
| 336 | put_device(&dev->cls_dev); | ||
| 336 | return handle; | 337 | return handle; |
| 338 | } | ||
| 337 | 339 | ||
| 338 | handle->dev = dev; | 340 | handle->dev = dev; |
| 339 | handle->owner_dev = owner_dev; | 341 | handle->owner_dev = owner_dev; |
| @@ -356,6 +358,8 @@ out_when_init_queue: | |||
| 356 | for (j = i - 1; j >= 0; j--) | 358 | for (j = i - 1; j >= 0; j--) |
| 357 | hnae_fini_queue(handle->qs[j]); | 359 | hnae_fini_queue(handle->qs[j]); |
| 358 | 360 | ||
| 361 | put_device(&dev->cls_dev); | ||
| 362 | |||
| 359 | return ERR_PTR(-ENOMEM); | 363 | return ERR_PTR(-ENOMEM); |
| 360 | } | 364 | } |
| 361 | EXPORT_SYMBOL(hnae_get_handle); | 365 | EXPORT_SYMBOL(hnae_get_handle); |
| @@ -377,6 +381,8 @@ void hnae_put_handle(struct hnae_handle *h) | |||
| 377 | dev->ops->put_handle(h); | 381 | dev->ops->put_handle(h); |
| 378 | 382 | ||
| 379 | module_put(dev->owner); | 383 | module_put(dev->owner); |
| 384 | |||
| 385 | put_device(&dev->cls_dev); | ||
| 380 | } | 386 | } |
| 381 | EXPORT_SYMBOL(hnae_put_handle); | 387 | EXPORT_SYMBOL(hnae_put_handle); |
| 382 | 388 | ||
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 54efa9a5167b..bd719e25dd76 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
| @@ -2446,6 +2446,8 @@ static int ehea_open(struct net_device *dev) | |||
| 2446 | 2446 | ||
| 2447 | netif_info(port, ifup, dev, "enabling port\n"); | 2447 | netif_info(port, ifup, dev, "enabling port\n"); |
| 2448 | 2448 | ||
| 2449 | netif_carrier_off(dev); | ||
| 2450 | |||
| 2449 | ret = ehea_up(dev); | 2451 | ret = ehea_up(dev); |
| 2450 | if (!ret) { | 2452 | if (!ret) { |
| 2451 | port_napi_enable(port); | 2453 | port_napi_enable(port); |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5f44c5520fbc..4f3281a03e7e 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1505,9 +1505,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) | |||
| 1505 | adapter->max_rx_add_entries_per_subcrq > entries_page ? | 1505 | adapter->max_rx_add_entries_per_subcrq > entries_page ? |
| 1506 | entries_page : adapter->max_rx_add_entries_per_subcrq; | 1506 | entries_page : adapter->max_rx_add_entries_per_subcrq; |
| 1507 | 1507 | ||
| 1508 | /* Choosing the maximum number of queues supported by firmware*/ | 1508 | adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues; |
| 1509 | adapter->req_tx_queues = adapter->max_tx_queues; | 1509 | adapter->req_rx_queues = adapter->opt_rx_comp_queues; |
| 1510 | adapter->req_rx_queues = adapter->max_rx_queues; | ||
| 1511 | adapter->req_rx_add_queues = adapter->max_rx_add_queues; | 1510 | adapter->req_rx_add_queues = adapter->max_rx_add_queues; |
| 1512 | 1511 | ||
| 1513 | adapter->req_mtu = adapter->max_mtu; | 1512 | adapter->req_mtu = adapter->max_mtu; |
| @@ -3706,7 +3705,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 3706 | struct net_device *netdev; | 3705 | struct net_device *netdev; |
| 3707 | unsigned char *mac_addr_p; | 3706 | unsigned char *mac_addr_p; |
| 3708 | struct dentry *ent; | 3707 | struct dentry *ent; |
| 3709 | char buf[16]; /* debugfs name buf */ | 3708 | char buf[17]; /* debugfs name buf */ |
| 3710 | int rc; | 3709 | int rc; |
| 3711 | 3710 | ||
| 3712 | dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", | 3711 | dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", |
| @@ -3845,6 +3844,9 @@ static int ibmvnic_remove(struct vio_dev *dev) | |||
| 3845 | if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) | 3844 | if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) |
| 3846 | debugfs_remove_recursive(adapter->debugfs_dir); | 3845 | debugfs_remove_recursive(adapter->debugfs_dir); |
| 3847 | 3846 | ||
| 3847 | dma_unmap_single(&dev->dev, adapter->stats_token, | ||
| 3848 | sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE); | ||
| 3849 | |||
| 3848 | if (adapter->ras_comps) | 3850 | if (adapter->ras_comps) |
| 3849 | dma_free_coherent(&dev->dev, | 3851 | dma_free_coherent(&dev->dev, |
| 3850 | adapter->ras_comp_num * | 3852 | adapter->ras_comp_num * |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index bf5cc55ba24c..5b12022adf1f 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
| @@ -1381,6 +1381,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) | |||
| 1381 | temp = (val & 0x003fff00) >> 8; | 1381 | temp = (val & 0x003fff00) >> 8; |
| 1382 | 1382 | ||
| 1383 | temp *= 64000000; | 1383 | temp *= 64000000; |
| 1384 | temp += mp->t_clk / 2; | ||
| 1384 | do_div(temp, mp->t_clk); | 1385 | do_div(temp, mp->t_clk); |
| 1385 | 1386 | ||
| 1386 | return (unsigned int)temp; | 1387 | return (unsigned int)temp; |
| @@ -1417,6 +1418,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) | |||
| 1417 | 1418 | ||
| 1418 | temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; | 1419 | temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; |
| 1419 | temp *= 64000000; | 1420 | temp *= 64000000; |
| 1421 | temp += mp->t_clk / 2; | ||
| 1420 | do_div(temp, mp->t_clk); | 1422 | do_div(temp, mp->t_clk); |
| 1421 | 1423 | ||
| 1422 | return (unsigned int)temp; | 1424 | return (unsigned int)temp; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 12c99a2655f2..3a47e83d3e07 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -2202,7 +2202,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
| 2202 | 2202 | ||
| 2203 | if (!shutdown) | 2203 | if (!shutdown) |
| 2204 | free_netdev(dev); | 2204 | free_netdev(dev); |
| 2205 | dev->ethtool_ops = NULL; | ||
| 2206 | } | 2205 | } |
| 2207 | 2206 | ||
| 2208 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | 2207 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f4c687ce4c59..84e8b250e2af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -1445,6 +1445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
| 1445 | c->netdev = priv->netdev; | 1445 | c->netdev = priv->netdev; |
| 1446 | c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); | 1446 | c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); |
| 1447 | c->num_tc = priv->params.num_tc; | 1447 | c->num_tc = priv->params.num_tc; |
| 1448 | c->xdp = !!priv->xdp_prog; | ||
| 1448 | 1449 | ||
| 1449 | if (priv->params.rx_am_enabled) | 1450 | if (priv->params.rx_am_enabled) |
| 1450 | rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); | 1451 | rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); |
| @@ -1468,6 +1469,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
| 1468 | if (err) | 1469 | if (err) |
| 1469 | goto err_close_tx_cqs; | 1470 | goto err_close_tx_cqs; |
| 1470 | 1471 | ||
| 1472 | /* XDP SQ CQ params are same as normal TXQ sq CQ params */ | ||
| 1473 | err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, | ||
| 1474 | priv->params.tx_cq_moderation) : 0; | ||
| 1475 | if (err) | ||
| 1476 | goto err_close_rx_cq; | ||
| 1477 | |||
| 1471 | napi_enable(&c->napi); | 1478 | napi_enable(&c->napi); |
| 1472 | 1479 | ||
| 1473 | err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); | 1480 | err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); |
| @@ -1488,21 +1495,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
| 1488 | } | 1495 | } |
| 1489 | } | 1496 | } |
| 1490 | 1497 | ||
| 1491 | if (priv->xdp_prog) { | 1498 | err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0; |
| 1492 | /* XDP SQ CQ params are same as normal TXQ sq CQ params */ | 1499 | if (err) |
| 1493 | err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, | 1500 | goto err_close_sqs; |
| 1494 | priv->params.tx_cq_moderation); | ||
| 1495 | if (err) | ||
| 1496 | goto err_close_sqs; | ||
| 1497 | |||
| 1498 | err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq); | ||
| 1499 | if (err) { | ||
| 1500 | mlx5e_close_cq(&c->xdp_sq.cq); | ||
| 1501 | goto err_close_sqs; | ||
| 1502 | } | ||
| 1503 | } | ||
| 1504 | 1501 | ||
| 1505 | c->xdp = !!priv->xdp_prog; | ||
| 1506 | err = mlx5e_open_rq(c, &cparam->rq, &c->rq); | 1502 | err = mlx5e_open_rq(c, &cparam->rq, &c->rq); |
| 1507 | if (err) | 1503 | if (err) |
| 1508 | goto err_close_xdp_sq; | 1504 | goto err_close_xdp_sq; |
| @@ -1512,7 +1508,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
| 1512 | 1508 | ||
| 1513 | return 0; | 1509 | return 0; |
| 1514 | err_close_xdp_sq: | 1510 | err_close_xdp_sq: |
| 1515 | mlx5e_close_sq(&c->xdp_sq); | 1511 | if (c->xdp) |
| 1512 | mlx5e_close_sq(&c->xdp_sq); | ||
| 1516 | 1513 | ||
| 1517 | err_close_sqs: | 1514 | err_close_sqs: |
| 1518 | mlx5e_close_sqs(c); | 1515 | mlx5e_close_sqs(c); |
| @@ -1522,6 +1519,10 @@ err_close_icosq: | |||
| 1522 | 1519 | ||
| 1523 | err_disable_napi: | 1520 | err_disable_napi: |
| 1524 | napi_disable(&c->napi); | 1521 | napi_disable(&c->napi); |
| 1522 | if (c->xdp) | ||
| 1523 | mlx5e_close_cq(&c->xdp_sq.cq); | ||
| 1524 | |||
| 1525 | err_close_rx_cq: | ||
| 1525 | mlx5e_close_cq(&c->rq.cq); | 1526 | mlx5e_close_cq(&c->rq.cq); |
| 1526 | 1527 | ||
| 1527 | err_close_tx_cqs: | 1528 | err_close_tx_cqs: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7fe6559e4ab3..bf1c09ca73c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
| @@ -308,7 +308,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) | |||
| 308 | netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; | 308 | netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; |
| 309 | #endif | 309 | #endif |
| 310 | 310 | ||
| 311 | netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC; | 311 | netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; |
| 312 | netdev->hw_features |= NETIF_F_HW_TC; | 312 | netdev->hw_features |= NETIF_F_HW_TC; |
| 313 | 313 | ||
| 314 | eth_hw_addr_random(netdev); | 314 | eth_hw_addr_random(netdev); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ce8c54d18906..6bb21b31cfeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -237,12 +237,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec | |||
| 237 | skb_flow_dissector_target(f->dissector, | 237 | skb_flow_dissector_target(f->dissector, |
| 238 | FLOW_DISSECTOR_KEY_VLAN, | 238 | FLOW_DISSECTOR_KEY_VLAN, |
| 239 | f->mask); | 239 | f->mask); |
| 240 | if (mask->vlan_id) { | 240 | if (mask->vlan_id || mask->vlan_priority) { |
| 241 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); | 241 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); |
| 242 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); | 242 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); |
| 243 | 243 | ||
| 244 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); | 244 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); |
| 245 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); | 245 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); |
| 246 | |||
| 247 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); | ||
| 248 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); | ||
| 246 | } | 249 | } |
| 247 | } | 250 | } |
| 248 | 251 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c55ad8d00c05..d239f5d0ea36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
| @@ -57,7 +57,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, | |||
| 57 | if (esw->mode != SRIOV_OFFLOADS) | 57 | if (esw->mode != SRIOV_OFFLOADS) |
| 58 | return ERR_PTR(-EOPNOTSUPP); | 58 | return ERR_PTR(-EOPNOTSUPP); |
| 59 | 59 | ||
| 60 | action = attr->action; | 60 | /* per flow vlan pop/push is emulated, don't set that into the firmware */ |
| 61 | action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); | ||
| 61 | 62 | ||
| 62 | if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { | 63 | if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { |
| 63 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | 64 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 89696048b045..914e5466f729 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -1690,7 +1690,7 @@ static int init_root_ns(struct mlx5_flow_steering *steering) | |||
| 1690 | { | 1690 | { |
| 1691 | 1691 | ||
| 1692 | steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); | 1692 | steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); |
| 1693 | if (IS_ERR_OR_NULL(steering->root_ns)) | 1693 | if (!steering->root_ns) |
| 1694 | goto cleanup; | 1694 | goto cleanup; |
| 1695 | 1695 | ||
| 1696 | if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) | 1696 | if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d5433c49b2b0..3eb931585b3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -1226,6 +1226,9 @@ static int init_one(struct pci_dev *pdev, | |||
| 1226 | 1226 | ||
| 1227 | pci_set_drvdata(pdev, dev); | 1227 | pci_set_drvdata(pdev, dev); |
| 1228 | 1228 | ||
| 1229 | dev->pdev = pdev; | ||
| 1230 | dev->event = mlx5_core_event; | ||
| 1231 | |||
| 1229 | if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { | 1232 | if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { |
| 1230 | mlx5_core_warn(dev, | 1233 | mlx5_core_warn(dev, |
| 1231 | "selected profile out of range, selecting default (%d)\n", | 1234 | "selected profile out of range, selecting default (%d)\n", |
| @@ -1233,8 +1236,6 @@ static int init_one(struct pci_dev *pdev, | |||
| 1233 | prof_sel = MLX5_DEFAULT_PROF; | 1236 | prof_sel = MLX5_DEFAULT_PROF; |
| 1234 | } | 1237 | } |
| 1235 | dev->profile = &profile[prof_sel]; | 1238 | dev->profile = &profile[prof_sel]; |
| 1236 | dev->pdev = pdev; | ||
| 1237 | dev->event = mlx5_core_event; | ||
| 1238 | 1239 | ||
| 1239 | INIT_LIST_HEAD(&priv->ctx_list); | 1240 | INIT_LIST_HEAD(&priv->ctx_list); |
| 1240 | spin_lock_init(&priv->ctx_lock); | 1241 | spin_lock_init(&priv->ctx_lock); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1ec0a4ce3c46..dda5761e91bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -231,7 +231,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) | |||
| 231 | 231 | ||
| 232 | span_entry->used = true; | 232 | span_entry->used = true; |
| 233 | span_entry->id = index; | 233 | span_entry->id = index; |
| 234 | span_entry->ref_count = 0; | 234 | span_entry->ref_count = 1; |
| 235 | span_entry->local_port = local_port; | 235 | span_entry->local_port = local_port; |
| 236 | return span_entry; | 236 | return span_entry; |
| 237 | } | 237 | } |
| @@ -270,6 +270,7 @@ static struct mlxsw_sp_span_entry | |||
| 270 | 270 | ||
| 271 | span_entry = mlxsw_sp_span_entry_find(port); | 271 | span_entry = mlxsw_sp_span_entry_find(port); |
| 272 | if (span_entry) { | 272 | if (span_entry) { |
| 273 | /* Already exists, just take a reference */ | ||
| 273 | span_entry->ref_count++; | 274 | span_entry->ref_count++; |
| 274 | return span_entry; | 275 | return span_entry; |
| 275 | } | 276 | } |
| @@ -280,6 +281,7 @@ static struct mlxsw_sp_span_entry | |||
| 280 | static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, | 281 | static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, |
| 281 | struct mlxsw_sp_span_entry *span_entry) | 282 | struct mlxsw_sp_span_entry *span_entry) |
| 282 | { | 283 | { |
| 284 | WARN_ON(!span_entry->ref_count); | ||
| 283 | if (--span_entry->ref_count == 0) | 285 | if (--span_entry->ref_count == 0) |
| 284 | mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); | 286 | mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); |
| 285 | return 0; | 287 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 9b22863a924b..97bbc1d21df8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
| @@ -115,7 +115,7 @@ struct mlxsw_sp_rif { | |||
| 115 | struct mlxsw_sp_mid { | 115 | struct mlxsw_sp_mid { |
| 116 | struct list_head list; | 116 | struct list_head list; |
| 117 | unsigned char addr[ETH_ALEN]; | 117 | unsigned char addr[ETH_ALEN]; |
| 118 | u16 vid; | 118 | u16 fid; |
| 119 | u16 mid; | 119 | u16 mid; |
| 120 | unsigned int ref_count; | 120 | unsigned int ref_count; |
| 121 | }; | 121 | }; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 4573da2c5560..e83072da6272 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -594,21 +594,22 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) | |||
| 594 | return 0; | 594 | return 0; |
| 595 | } | 595 | } |
| 596 | 596 | ||
| 597 | static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp); | ||
| 598 | |||
| 597 | static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) | 599 | static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) |
| 598 | { | 600 | { |
| 601 | mlxsw_sp_router_fib_flush(mlxsw_sp); | ||
| 599 | kfree(mlxsw_sp->router.vrs); | 602 | kfree(mlxsw_sp->router.vrs); |
| 600 | } | 603 | } |
| 601 | 604 | ||
| 602 | struct mlxsw_sp_neigh_key { | 605 | struct mlxsw_sp_neigh_key { |
| 603 | unsigned char addr[sizeof(struct in6_addr)]; | 606 | struct neighbour *n; |
| 604 | struct net_device *dev; | ||
| 605 | }; | 607 | }; |
| 606 | 608 | ||
| 607 | struct mlxsw_sp_neigh_entry { | 609 | struct mlxsw_sp_neigh_entry { |
| 608 | struct rhash_head ht_node; | 610 | struct rhash_head ht_node; |
| 609 | struct mlxsw_sp_neigh_key key; | 611 | struct mlxsw_sp_neigh_key key; |
| 610 | u16 rif; | 612 | u16 rif; |
| 611 | struct neighbour *n; | ||
| 612 | bool offloaded; | 613 | bool offloaded; |
| 613 | struct delayed_work dw; | 614 | struct delayed_work dw; |
| 614 | struct mlxsw_sp_port *mlxsw_sp_port; | 615 | struct mlxsw_sp_port *mlxsw_sp_port; |
| @@ -646,19 +647,15 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, | |||
| 646 | static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); | 647 | static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); |
| 647 | 648 | ||
| 648 | static struct mlxsw_sp_neigh_entry * | 649 | static struct mlxsw_sp_neigh_entry * |
| 649 | mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len, | 650 | mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif) |
| 650 | struct net_device *dev, u16 rif, | ||
| 651 | struct neighbour *n) | ||
| 652 | { | 651 | { |
| 653 | struct mlxsw_sp_neigh_entry *neigh_entry; | 652 | struct mlxsw_sp_neigh_entry *neigh_entry; |
| 654 | 653 | ||
| 655 | neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); | 654 | neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); |
| 656 | if (!neigh_entry) | 655 | if (!neigh_entry) |
| 657 | return NULL; | 656 | return NULL; |
| 658 | memcpy(neigh_entry->key.addr, addr, addr_len); | 657 | neigh_entry->key.n = n; |
| 659 | neigh_entry->key.dev = dev; | ||
| 660 | neigh_entry->rif = rif; | 658 | neigh_entry->rif = rif; |
| 661 | neigh_entry->n = n; | ||
| 662 | INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); | 659 | INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); |
| 663 | INIT_LIST_HEAD(&neigh_entry->nexthop_list); | 660 | INIT_LIST_HEAD(&neigh_entry->nexthop_list); |
| 664 | return neigh_entry; | 661 | return neigh_entry; |
| @@ -671,13 +668,11 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry) | |||
| 671 | } | 668 | } |
| 672 | 669 | ||
| 673 | static struct mlxsw_sp_neigh_entry * | 670 | static struct mlxsw_sp_neigh_entry * |
| 674 | mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr, | 671 | mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) |
| 675 | size_t addr_len, struct net_device *dev) | ||
| 676 | { | 672 | { |
| 677 | struct mlxsw_sp_neigh_key key = {{ 0 } }; | 673 | struct mlxsw_sp_neigh_key key; |
| 678 | 674 | ||
| 679 | memcpy(key.addr, addr, addr_len); | 675 | key.n = n; |
| 680 | key.dev = dev; | ||
| 681 | return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, | 676 | return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, |
| 682 | &key, mlxsw_sp_neigh_ht_params); | 677 | &key, mlxsw_sp_neigh_ht_params); |
| 683 | } | 678 | } |
| @@ -689,26 +684,20 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev, | |||
| 689 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 684 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
| 690 | struct mlxsw_sp_neigh_entry *neigh_entry; | 685 | struct mlxsw_sp_neigh_entry *neigh_entry; |
| 691 | struct mlxsw_sp_rif *r; | 686 | struct mlxsw_sp_rif *r; |
| 692 | u32 dip; | ||
| 693 | int err; | 687 | int err; |
| 694 | 688 | ||
| 695 | if (n->tbl != &arp_tbl) | 689 | if (n->tbl != &arp_tbl) |
| 696 | return 0; | 690 | return 0; |
| 697 | 691 | ||
| 698 | dip = ntohl(*((__be32 *) n->primary_key)); | 692 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
| 699 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), | 693 | if (neigh_entry) |
| 700 | n->dev); | ||
| 701 | if (neigh_entry) { | ||
| 702 | WARN_ON(neigh_entry->n != n); | ||
| 703 | return 0; | 694 | return 0; |
| 704 | } | ||
| 705 | 695 | ||
| 706 | r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); | 696 | r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); |
| 707 | if (WARN_ON(!r)) | 697 | if (WARN_ON(!r)) |
| 708 | return -EINVAL; | 698 | return -EINVAL; |
| 709 | 699 | ||
| 710 | neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev, | 700 | neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif); |
| 711 | r->rif, n); | ||
| 712 | if (!neigh_entry) | 701 | if (!neigh_entry) |
| 713 | return -ENOMEM; | 702 | return -ENOMEM; |
| 714 | err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); | 703 | err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); |
| @@ -727,14 +716,11 @@ void mlxsw_sp_router_neigh_destroy(struct net_device *dev, | |||
| 727 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 716 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
| 728 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 717 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
| 729 | struct mlxsw_sp_neigh_entry *neigh_entry; | 718 | struct mlxsw_sp_neigh_entry *neigh_entry; |
| 730 | u32 dip; | ||
| 731 | 719 | ||
| 732 | if (n->tbl != &arp_tbl) | 720 | if (n->tbl != &arp_tbl) |
| 733 | return; | 721 | return; |
| 734 | 722 | ||
| 735 | dip = ntohl(*((__be32 *) n->primary_key)); | 723 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
| 736 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), | ||
| 737 | n->dev); | ||
| 738 | if (!neigh_entry) | 724 | if (!neigh_entry) |
| 739 | return; | 725 | return; |
| 740 | mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); | 726 | mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); |
| @@ -817,6 +803,26 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, | |||
| 817 | } | 803 | } |
| 818 | } | 804 | } |
| 819 | 805 | ||
| 806 | static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl) | ||
| 807 | { | ||
| 808 | u8 num_rec, last_rec_index, num_entries; | ||
| 809 | |||
| 810 | num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); | ||
| 811 | last_rec_index = num_rec - 1; | ||
| 812 | |||
| 813 | if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM) | ||
| 814 | return false; | ||
| 815 | if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) == | ||
| 816 | MLXSW_REG_RAUHTD_TYPE_IPV6) | ||
| 817 | return true; | ||
| 818 | |||
| 819 | num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, | ||
| 820 | last_rec_index); | ||
| 821 | if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC) | ||
| 822 | return true; | ||
| 823 | return false; | ||
| 824 | } | ||
| 825 | |||
| 820 | static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) | 826 | static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) |
| 821 | { | 827 | { |
| 822 | char *rauhtd_pl; | 828 | char *rauhtd_pl; |
| @@ -843,7 +849,7 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) | |||
| 843 | for (i = 0; i < num_rec; i++) | 849 | for (i = 0; i < num_rec; i++) |
| 844 | mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, | 850 | mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, |
| 845 | i); | 851 | i); |
| 846 | } while (num_rec); | 852 | } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl)); |
| 847 | rtnl_unlock(); | 853 | rtnl_unlock(); |
| 848 | 854 | ||
| 849 | kfree(rauhtd_pl); | 855 | kfree(rauhtd_pl); |
| @@ -862,7 +868,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp) | |||
| 862 | * is active regardless of the traffic. | 868 | * is active regardless of the traffic. |
| 863 | */ | 869 | */ |
| 864 | if (!list_empty(&neigh_entry->nexthop_list)) | 870 | if (!list_empty(&neigh_entry->nexthop_list)) |
| 865 | neigh_event_send(neigh_entry->n, NULL); | 871 | neigh_event_send(neigh_entry->key.n, NULL); |
| 866 | } | 872 | } |
| 867 | rtnl_unlock(); | 873 | rtnl_unlock(); |
| 868 | } | 874 | } |
| @@ -908,9 +914,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) | |||
| 908 | rtnl_lock(); | 914 | rtnl_lock(); |
| 909 | list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, | 915 | list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, |
| 910 | nexthop_neighs_list_node) { | 916 | nexthop_neighs_list_node) { |
| 911 | if (!(neigh_entry->n->nud_state & NUD_VALID) && | 917 | if (!(neigh_entry->key.n->nud_state & NUD_VALID) && |
| 912 | !list_empty(&neigh_entry->nexthop_list)) | 918 | !list_empty(&neigh_entry->nexthop_list)) |
| 913 | neigh_event_send(neigh_entry->n, NULL); | 919 | neigh_event_send(neigh_entry->key.n, NULL); |
| 914 | } | 920 | } |
| 915 | rtnl_unlock(); | 921 | rtnl_unlock(); |
| 916 | 922 | ||
| @@ -927,7 +933,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) | |||
| 927 | { | 933 | { |
| 928 | struct mlxsw_sp_neigh_entry *neigh_entry = | 934 | struct mlxsw_sp_neigh_entry *neigh_entry = |
| 929 | container_of(work, struct mlxsw_sp_neigh_entry, dw.work); | 935 | container_of(work, struct mlxsw_sp_neigh_entry, dw.work); |
| 930 | struct neighbour *n = neigh_entry->n; | 936 | struct neighbour *n = neigh_entry->key.n; |
| 931 | struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; | 937 | struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; |
| 932 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 938 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
| 933 | char rauht_pl[MLXSW_REG_RAUHT_LEN]; | 939 | char rauht_pl[MLXSW_REG_RAUHT_LEN]; |
| @@ -1030,11 +1036,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, | |||
| 1030 | 1036 | ||
| 1031 | mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 1037 | mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
| 1032 | dip = ntohl(*((__be32 *) n->primary_key)); | 1038 | dip = ntohl(*((__be32 *) n->primary_key)); |
| 1033 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, | 1039 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
| 1034 | &dip, | 1040 | if (WARN_ON(!neigh_entry)) { |
| 1035 | sizeof(__be32), | ||
| 1036 | dev); | ||
| 1037 | if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) { | ||
| 1038 | mlxsw_sp_port_dev_put(mlxsw_sp_port); | 1041 | mlxsw_sp_port_dev_put(mlxsw_sp_port); |
| 1039 | return NOTIFY_DONE; | 1042 | return NOTIFY_DONE; |
| 1040 | } | 1043 | } |
| @@ -1343,33 +1346,26 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, | |||
| 1343 | struct fib_nh *fib_nh) | 1346 | struct fib_nh *fib_nh) |
| 1344 | { | 1347 | { |
| 1345 | struct mlxsw_sp_neigh_entry *neigh_entry; | 1348 | struct mlxsw_sp_neigh_entry *neigh_entry; |
| 1346 | u32 gwip = ntohl(fib_nh->nh_gw); | ||
| 1347 | struct net_device *dev = fib_nh->nh_dev; | 1349 | struct net_device *dev = fib_nh->nh_dev; |
| 1348 | struct neighbour *n; | 1350 | struct neighbour *n; |
| 1349 | u8 nud_state; | 1351 | u8 nud_state; |
| 1350 | 1352 | ||
| 1351 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, | 1353 | /* Take a reference of neigh here ensuring that neigh would |
| 1352 | sizeof(gwip), dev); | 1354 | * not be detructed before the nexthop entry is finished. |
| 1353 | if (!neigh_entry) { | 1355 | * The reference is taken either in neigh_lookup() or |
| 1354 | __be32 gwipn = htonl(gwip); | 1356 | * in neith_create() in case n is not found. |
| 1355 | 1357 | */ | |
| 1356 | n = neigh_create(&arp_tbl, &gwipn, dev); | 1358 | n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev); |
| 1359 | if (!n) { | ||
| 1360 | n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev); | ||
| 1357 | if (IS_ERR(n)) | 1361 | if (IS_ERR(n)) |
| 1358 | return PTR_ERR(n); | 1362 | return PTR_ERR(n); |
| 1359 | neigh_event_send(n, NULL); | 1363 | neigh_event_send(n, NULL); |
| 1360 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, | 1364 | } |
| 1361 | sizeof(gwip), dev); | 1365 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
| 1362 | if (!neigh_entry) { | 1366 | if (!neigh_entry) { |
| 1363 | neigh_release(n); | 1367 | neigh_release(n); |
| 1364 | return -EINVAL; | 1368 | return -EINVAL; |
| 1365 | } | ||
| 1366 | } else { | ||
| 1367 | /* Take a reference of neigh here ensuring that neigh would | ||
| 1368 | * not be detructed before the nexthop entry is finished. | ||
| 1369 | * The second branch takes the reference in neith_create() | ||
| 1370 | */ | ||
| 1371 | n = neigh_entry->n; | ||
| 1372 | neigh_clone(n); | ||
| 1373 | } | 1369 | } |
| 1374 | 1370 | ||
| 1375 | /* If that is the first nexthop connected to that neigh, add to | 1371 | /* If that is the first nexthop connected to that neigh, add to |
| @@ -1403,7 +1399,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, | |||
| 1403 | if (list_empty(&nh->neigh_entry->nexthop_list)) | 1399 | if (list_empty(&nh->neigh_entry->nexthop_list)) |
| 1404 | list_del(&nh->neigh_entry->nexthop_neighs_list_node); | 1400 | list_del(&nh->neigh_entry->nexthop_neighs_list_node); |
| 1405 | 1401 | ||
| 1406 | neigh_release(neigh_entry->n); | 1402 | neigh_release(neigh_entry->key.n); |
| 1407 | } | 1403 | } |
| 1408 | 1404 | ||
| 1409 | static struct mlxsw_sp_nexthop_group * | 1405 | static struct mlxsw_sp_nexthop_group * |
| @@ -1463,11 +1459,11 @@ static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh, | |||
| 1463 | 1459 | ||
| 1464 | for (i = 0; i < fi->fib_nhs; i++) { | 1460 | for (i = 0; i < fi->fib_nhs; i++) { |
| 1465 | struct fib_nh *fib_nh = &fi->fib_nh[i]; | 1461 | struct fib_nh *fib_nh = &fi->fib_nh[i]; |
| 1466 | u32 gwip = ntohl(fib_nh->nh_gw); | 1462 | struct neighbour *n = nh->neigh_entry->key.n; |
| 1467 | 1463 | ||
| 1468 | if (memcmp(nh->neigh_entry->key.addr, | 1464 | if (memcmp(n->primary_key, &fib_nh->nh_gw, |
| 1469 | &gwip, sizeof(u32)) == 0 && | 1465 | sizeof(fib_nh->nh_gw)) == 0 && |
| 1470 | nh->neigh_entry->key.dev == fib_nh->nh_dev) | 1466 | n->dev == fib_nh->nh_dev) |
| 1471 | return true; | 1467 | return true; |
| 1472 | } | 1468 | } |
| 1473 | return false; | 1469 | return false; |
| @@ -1874,18 +1870,18 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) | |||
| 1874 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); | 1870 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); |
| 1875 | } | 1871 | } |
| 1876 | 1872 | ||
| 1877 | static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) | 1873 | static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) |
| 1878 | { | 1874 | { |
| 1879 | struct mlxsw_resources *resources; | 1875 | struct mlxsw_resources *resources; |
| 1880 | struct mlxsw_sp_fib_entry *fib_entry; | 1876 | struct mlxsw_sp_fib_entry *fib_entry; |
| 1881 | struct mlxsw_sp_fib_entry *tmp; | 1877 | struct mlxsw_sp_fib_entry *tmp; |
| 1882 | struct mlxsw_sp_vr *vr; | 1878 | struct mlxsw_sp_vr *vr; |
| 1883 | int i; | 1879 | int i; |
| 1884 | int err; | ||
| 1885 | 1880 | ||
| 1886 | resources = mlxsw_core_resources_get(mlxsw_sp->core); | 1881 | resources = mlxsw_core_resources_get(mlxsw_sp->core); |
| 1887 | for (i = 0; i < resources->max_virtual_routers; i++) { | 1882 | for (i = 0; i < resources->max_virtual_routers; i++) { |
| 1888 | vr = &mlxsw_sp->router.vrs[i]; | 1883 | vr = &mlxsw_sp->router.vrs[i]; |
| 1884 | |||
| 1889 | if (!vr->used) | 1885 | if (!vr->used) |
| 1890 | continue; | 1886 | continue; |
| 1891 | 1887 | ||
| @@ -1901,6 +1897,13 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) | |||
| 1901 | break; | 1897 | break; |
| 1902 | } | 1898 | } |
| 1903 | } | 1899 | } |
| 1900 | } | ||
| 1901 | |||
| 1902 | static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) | ||
| 1903 | { | ||
| 1904 | int err; | ||
| 1905 | |||
| 1906 | mlxsw_sp_router_fib_flush(mlxsw_sp); | ||
| 1904 | mlxsw_sp->router.aborted = true; | 1907 | mlxsw_sp->router.aborted = true; |
| 1905 | err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); | 1908 | err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); |
| 1906 | if (err) | 1909 | if (err) |
| @@ -1958,6 +1961,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, | |||
| 1958 | struct fib_entry_notifier_info *fen_info = ptr; | 1961 | struct fib_entry_notifier_info *fen_info = ptr; |
| 1959 | int err; | 1962 | int err; |
| 1960 | 1963 | ||
| 1964 | if (!net_eq(fen_info->info.net, &init_net)) | ||
| 1965 | return NOTIFY_DONE; | ||
| 1966 | |||
| 1961 | switch (event) { | 1967 | switch (event) { |
| 1962 | case FIB_EVENT_ENTRY_ADD: | 1968 | case FIB_EVENT_ENTRY_ADD: |
| 1963 | err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info); | 1969 | err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 5e00c79e8133..1e2c8eca3af1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
| @@ -929,12 +929,12 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, | |||
| 929 | 929 | ||
| 930 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, | 930 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, |
| 931 | const unsigned char *addr, | 931 | const unsigned char *addr, |
| 932 | u16 vid) | 932 | u16 fid) |
| 933 | { | 933 | { |
| 934 | struct mlxsw_sp_mid *mid; | 934 | struct mlxsw_sp_mid *mid; |
| 935 | 935 | ||
| 936 | list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { | 936 | list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { |
| 937 | if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) | 937 | if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) |
| 938 | return mid; | 938 | return mid; |
| 939 | } | 939 | } |
| 940 | return NULL; | 940 | return NULL; |
| @@ -942,7 +942,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, | |||
| 942 | 942 | ||
| 943 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, | 943 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, |
| 944 | const unsigned char *addr, | 944 | const unsigned char *addr, |
| 945 | u16 vid) | 945 | u16 fid) |
| 946 | { | 946 | { |
| 947 | struct mlxsw_sp_mid *mid; | 947 | struct mlxsw_sp_mid *mid; |
| 948 | u16 mid_idx; | 948 | u16 mid_idx; |
| @@ -958,7 +958,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, | |||
| 958 | 958 | ||
| 959 | set_bit(mid_idx, mlxsw_sp->br_mids.mapped); | 959 | set_bit(mid_idx, mlxsw_sp->br_mids.mapped); |
| 960 | ether_addr_copy(mid->addr, addr); | 960 | ether_addr_copy(mid->addr, addr); |
| 961 | mid->vid = vid; | 961 | mid->fid = fid; |
| 962 | mid->mid = mid_idx; | 962 | mid->mid = mid_idx; |
| 963 | mid->ref_count = 0; | 963 | mid->ref_count = 0; |
| 964 | list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); | 964 | list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); |
| @@ -991,9 +991,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 991 | if (switchdev_trans_ph_prepare(trans)) | 991 | if (switchdev_trans_ph_prepare(trans)) |
| 992 | return 0; | 992 | return 0; |
| 993 | 993 | ||
| 994 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); | 994 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); |
| 995 | if (!mid) { | 995 | if (!mid) { |
| 996 | mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); | 996 | mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid); |
| 997 | if (!mid) { | 997 | if (!mid) { |
| 998 | netdev_err(dev, "Unable to allocate MC group\n"); | 998 | netdev_err(dev, "Unable to allocate MC group\n"); |
| 999 | return -ENOMEM; | 999 | return -ENOMEM; |
| @@ -1137,7 +1137,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 1137 | u16 mid_idx; | 1137 | u16 mid_idx; |
| 1138 | int err = 0; | 1138 | int err = 0; |
| 1139 | 1139 | ||
| 1140 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); | 1140 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); |
| 1141 | if (!mid) { | 1141 | if (!mid) { |
| 1142 | netdev_err(dev, "Unable to remove port from MC DB\n"); | 1142 | netdev_err(dev, "Unable to remove port from MC DB\n"); |
| 1143 | return -EINVAL; | 1143 | return -EINVAL; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 72eee29c677f..2777d5bb4380 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h | |||
| @@ -727,9 +727,6 @@ struct core_tx_bd_flags { | |||
| 727 | #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 | 727 | #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 |
| 728 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 | 728 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 |
| 729 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 | 729 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 |
| 730 | #define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1 | ||
| 731 | #define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12 | ||
| 732 | |||
| 733 | }; | 730 | }; |
| 734 | 731 | ||
| 735 | struct core_tx_bd { | 732 | struct core_tx_bd { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 63e1a1b0ef8e..f95385cbbd40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
| @@ -1119,6 +1119,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, | |||
| 1119 | start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << | 1119 | start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << |
| 1120 | CORE_TX_BD_FLAGS_START_BD_SHIFT; | 1120 | CORE_TX_BD_FLAGS_START_BD_SHIFT; |
| 1121 | SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); | 1121 | SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); |
| 1122 | SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type); | ||
| 1122 | DMA_REGPAIR_LE(start_bd->addr, first_frag); | 1123 | DMA_REGPAIR_LE(start_bd->addr, first_frag); |
| 1123 | start_bd->nbytes = cpu_to_le16(first_frag_len); | 1124 | start_bd->nbytes = cpu_to_le16(first_frag_len); |
| 1124 | 1125 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c418360ba02a..333c7442e48a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
| @@ -839,20 +839,19 @@ static void qed_update_pf_params(struct qed_dev *cdev, | |||
| 839 | { | 839 | { |
| 840 | int i; | 840 | int i; |
| 841 | 841 | ||
| 842 | if (IS_ENABLED(CONFIG_QED_RDMA)) { | ||
| 843 | params->rdma_pf_params.num_qps = QED_ROCE_QPS; | ||
| 844 | params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; | ||
| 845 | /* divide by 3 the MRs to avoid MF ILT overflow */ | ||
| 846 | params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; | ||
| 847 | params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; | ||
| 848 | } | ||
| 849 | |||
| 842 | for (i = 0; i < cdev->num_hwfns; i++) { | 850 | for (i = 0; i < cdev->num_hwfns; i++) { |
| 843 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | 851 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 844 | 852 | ||
| 845 | p_hwfn->pf_params = *params; | 853 | p_hwfn->pf_params = *params; |
| 846 | } | 854 | } |
| 847 | |||
| 848 | if (!IS_ENABLED(CONFIG_QED_RDMA)) | ||
| 849 | return; | ||
| 850 | |||
| 851 | params->rdma_pf_params.num_qps = QED_ROCE_QPS; | ||
| 852 | params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; | ||
| 853 | /* divide by 3 the MRs to avoid MF ILT overflow */ | ||
| 854 | params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; | ||
| 855 | params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; | ||
| 856 | } | 855 | } |
| 857 | 856 | ||
| 858 | static int qed_slowpath_start(struct qed_dev *cdev, | 857 | static int qed_slowpath_start(struct qed_dev *cdev, |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 12251a1032d1..7567cc464b88 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c | |||
| @@ -175,16 +175,23 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) | |||
| 175 | for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { | 175 | for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { |
| 176 | int tc; | 176 | int tc; |
| 177 | 177 | ||
| 178 | for (j = 0; j < QEDE_NUM_RQSTATS; j++) | 178 | if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { |
| 179 | sprintf(buf + (k + j) * ETH_GSTRING_LEN, | 179 | for (j = 0; j < QEDE_NUM_RQSTATS; j++) |
| 180 | "%d: %s", i, qede_rqstats_arr[j].string); | ||
| 181 | k += QEDE_NUM_RQSTATS; | ||
| 182 | for (tc = 0; tc < edev->num_tc; tc++) { | ||
| 183 | for (j = 0; j < QEDE_NUM_TQSTATS; j++) | ||
| 184 | sprintf(buf + (k + j) * ETH_GSTRING_LEN, | 180 | sprintf(buf + (k + j) * ETH_GSTRING_LEN, |
| 185 | "%d.%d: %s", i, tc, | 181 | "%d: %s", i, |
| 186 | qede_tqstats_arr[j].string); | 182 | qede_rqstats_arr[j].string); |
| 187 | k += QEDE_NUM_TQSTATS; | 183 | k += QEDE_NUM_RQSTATS; |
| 184 | } | ||
| 185 | |||
| 186 | if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { | ||
| 187 | for (tc = 0; tc < edev->num_tc; tc++) { | ||
| 188 | for (j = 0; j < QEDE_NUM_TQSTATS; j++) | ||
| 189 | sprintf(buf + (k + j) * | ||
| 190 | ETH_GSTRING_LEN, | ||
| 191 | "%d.%d: %s", i, tc, | ||
| 192 | qede_tqstats_arr[j].string); | ||
| 193 | k += QEDE_NUM_TQSTATS; | ||
| 194 | } | ||
| 188 | } | 195 | } |
| 189 | } | 196 | } |
| 190 | 197 | ||
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 7def29aaf65c..85f46dbecd5b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
| @@ -2839,7 +2839,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) | |||
| 2839 | } | 2839 | } |
| 2840 | 2840 | ||
| 2841 | mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, | 2841 | mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, |
| 2842 | rxq->rx_buf_size, DMA_FROM_DEVICE); | 2842 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 2843 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { | 2843 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { |
| 2844 | DP_NOTICE(edev, | 2844 | DP_NOTICE(edev, |
| 2845 | "Failed to map TPA replacement buffer\n"); | 2845 | "Failed to map TPA replacement buffer\n"); |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 6fb3bee904d3..0b4deb31e742 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c | |||
| @@ -575,10 +575,11 @@ void emac_mac_start(struct emac_adapter *adpt) | |||
| 575 | 575 | ||
| 576 | mac |= TXEN | RXEN; /* enable RX/TX */ | 576 | mac |= TXEN | RXEN; /* enable RX/TX */ |
| 577 | 577 | ||
| 578 | /* We don't have ethtool support yet, so force flow-control mode | 578 | /* Configure MAC flow control to match the PHY's settings. */ |
| 579 | * to 'full' always. | 579 | if (phydev->pause) |
| 580 | */ | 580 | mac |= RXFC; |
| 581 | mac |= TXFC | RXFC; | 581 | if (phydev->pause != phydev->asym_pause) |
| 582 | mac |= TXFC; | ||
| 582 | 583 | ||
| 583 | /* setup link speed */ | 584 | /* setup link speed */ |
| 584 | mac &= ~SPEED_MASK; | 585 | mac &= ~SPEED_MASK; |
| @@ -1003,6 +1004,12 @@ int emac_mac_up(struct emac_adapter *adpt) | |||
| 1003 | writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); | 1004 | writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); |
| 1004 | writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); | 1005 | writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); |
| 1005 | 1006 | ||
| 1007 | /* Enable pause frames. Without this feature, the EMAC has been shown | ||
| 1008 | * to receive (and drop) frames with FCS errors at gigabit connections. | ||
| 1009 | */ | ||
| 1010 | adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
| 1011 | adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
| 1012 | |||
| 1006 | adpt->phydev->irq = PHY_IGNORE_INTERRUPT; | 1013 | adpt->phydev->irq = PHY_IGNORE_INTERRUPT; |
| 1007 | phy_start(adpt->phydev); | 1014 | phy_start(adpt->phydev); |
| 1008 | 1015 | ||
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index 75c1b530e39e..72fe343c7a36 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c | |||
| @@ -421,7 +421,7 @@ static const struct emac_reg_write sgmii_v2_laned[] = { | |||
| 421 | /* CDR Settings */ | 421 | /* CDR Settings */ |
| 422 | {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, | 422 | {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, |
| 423 | UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, | 423 | UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, |
| 424 | {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)}, | 424 | {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)}, |
| 425 | {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, | 425 | {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, |
| 426 | 426 | ||
| 427 | /* TX/RX Settings */ | 427 | /* TX/RX Settings */ |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3cf3557106c2..6b89e4a7b164 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
| @@ -485,6 +485,9 @@ efx_copy_channel(const struct efx_channel *old_channel) | |||
| 485 | *channel = *old_channel; | 485 | *channel = *old_channel; |
| 486 | 486 | ||
| 487 | channel->napi_dev = NULL; | 487 | channel->napi_dev = NULL; |
| 488 | INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); | ||
| 489 | channel->napi_str.napi_id = 0; | ||
| 490 | channel->napi_str.state = 0; | ||
| 488 | memset(&channel->eventq, 0, sizeof(channel->eventq)); | 491 | memset(&channel->eventq, 0, sizeof(channel->eventq)); |
| 489 | 492 | ||
| 490 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | 493 | for (j = 0; j < EFX_TXQ_TYPES; j++) { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 48e71fad4210..e2c94ec4edd0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -880,6 +880,13 @@ static int stmmac_init_phy(struct net_device *dev) | |||
| 880 | return -ENODEV; | 880 | return -ENODEV; |
| 881 | } | 881 | } |
| 882 | 882 | ||
| 883 | /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid | ||
| 884 | * subsequent PHY polling, make sure we force a link transition if | ||
| 885 | * we have a UP/DOWN/UP transition | ||
| 886 | */ | ||
| 887 | if (phydev->is_pseudo_fixed_link) | ||
| 888 | phydev->irq = PHY_POLL; | ||
| 889 | |||
| 883 | pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" | 890 | pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" |
| 884 | " Link = %d\n", dev->name, phydev->phy_id, phydev->link); | 891 | " Link = %d\n", dev->name, phydev->phy_id, phydev->link); |
| 885 | 892 | ||
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 054a8dd23dae..ba1e45ff6aae 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c | |||
| @@ -176,9 +176,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) | |||
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | dev = bus_find_device(&platform_bus_type, NULL, node, match); | 178 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
| 179 | of_node_put(node); | ||
| 179 | priv = dev_get_drvdata(dev); | 180 | priv = dev_get_drvdata(dev); |
| 180 | 181 | ||
| 181 | priv->cpsw_phy_sel(priv, phy_mode, slave); | 182 | priv->cpsw_phy_sel(priv, phy_mode, slave); |
| 183 | |||
| 184 | put_device(dev); | ||
| 182 | } | 185 | } |
| 183 | EXPORT_SYMBOL_GPL(cpsw_phy_sel); | 186 | EXPORT_SYMBOL_GPL(cpsw_phy_sel); |
| 184 | 187 | ||
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 2fd94a5bc1f3..84fbe5714f8b 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
| @@ -1410,6 +1410,7 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1410 | int i = 0; | 1410 | int i = 0; |
| 1411 | struct emac_priv *priv = netdev_priv(ndev); | 1411 | struct emac_priv *priv = netdev_priv(ndev); |
| 1412 | struct phy_device *phydev = NULL; | 1412 | struct phy_device *phydev = NULL; |
| 1413 | struct device *phy = NULL; | ||
| 1413 | 1414 | ||
| 1414 | ret = pm_runtime_get_sync(&priv->pdev->dev); | 1415 | ret = pm_runtime_get_sync(&priv->pdev->dev); |
| 1415 | if (ret < 0) { | 1416 | if (ret < 0) { |
| @@ -1488,19 +1489,20 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1488 | 1489 | ||
| 1489 | /* use the first phy on the bus if pdata did not give us a phy id */ | 1490 | /* use the first phy on the bus if pdata did not give us a phy id */ |
| 1490 | if (!phydev && !priv->phy_id) { | 1491 | if (!phydev && !priv->phy_id) { |
| 1491 | struct device *phy; | ||
| 1492 | |||
| 1493 | phy = bus_find_device(&mdio_bus_type, NULL, NULL, | 1492 | phy = bus_find_device(&mdio_bus_type, NULL, NULL, |
| 1494 | match_first_device); | 1493 | match_first_device); |
| 1495 | if (phy) | 1494 | if (phy) { |
| 1496 | priv->phy_id = dev_name(phy); | 1495 | priv->phy_id = dev_name(phy); |
| 1496 | if (!priv->phy_id || !*priv->phy_id) | ||
| 1497 | put_device(phy); | ||
| 1498 | } | ||
| 1497 | } | 1499 | } |
| 1498 | 1500 | ||
| 1499 | if (!phydev && priv->phy_id && *priv->phy_id) { | 1501 | if (!phydev && priv->phy_id && *priv->phy_id) { |
| 1500 | phydev = phy_connect(ndev, priv->phy_id, | 1502 | phydev = phy_connect(ndev, priv->phy_id, |
| 1501 | &emac_adjust_link, | 1503 | &emac_adjust_link, |
| 1502 | PHY_INTERFACE_MODE_MII); | 1504 | PHY_INTERFACE_MODE_MII); |
| 1503 | 1505 | put_device(phy); /* reference taken by bus_find_device */ | |
| 1504 | if (IS_ERR(phydev)) { | 1506 | if (IS_ERR(phydev)) { |
| 1505 | dev_err(emac_dev, "could not connect to phy %s\n", | 1507 | dev_err(emac_dev, "could not connect to phy %s\n", |
| 1506 | priv->phy_id); | 1508 | priv->phy_id); |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 446ea580ad42..928c1dca2673 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c | |||
| @@ -1694,7 +1694,7 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl) | |||
| 1694 | pr_debug("%s: bssid matched\n", __func__); | 1694 | pr_debug("%s: bssid matched\n", __func__); |
| 1695 | break; | 1695 | break; |
| 1696 | } else { | 1696 | } else { |
| 1697 | pr_debug("%s: bssid unmached\n", __func__); | 1697 | pr_debug("%s: bssid unmatched\n", __func__); |
| 1698 | continue; | 1698 | continue; |
| 1699 | } | 1699 | } |
| 1700 | } | 1700 | } |
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 7f127dc1b7ba..fa32391720fe 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c | |||
| @@ -708,8 +708,7 @@ static int eth_poll(struct napi_struct *napi, int budget) | |||
| 708 | if (!qmgr_stat_below_low_watermark(rxq) && | 708 | if (!qmgr_stat_below_low_watermark(rxq) && |
| 709 | napi_reschedule(napi)) { /* not empty again */ | 709 | napi_reschedule(napi)) { /* not empty again */ |
| 710 | #if DEBUG_RX | 710 | #if DEBUG_RX |
| 711 | printk(KERN_DEBUG "%s: eth_poll" | 711 | printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n", |
| 712 | " napi_reschedule successed\n", | ||
| 713 | dev->name); | 712 | dev->name); |
| 714 | #endif | 713 | #endif |
| 715 | qmgr_disable_irq(rxq); | 714 | qmgr_disable_irq(rxq); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 3234fcdea317..d2d6f12a112f 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -1278,6 +1278,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
| 1278 | struct net_device *lowerdev; | 1278 | struct net_device *lowerdev; |
| 1279 | int err; | 1279 | int err; |
| 1280 | int macmode; | 1280 | int macmode; |
| 1281 | bool create = false; | ||
| 1281 | 1282 | ||
| 1282 | if (!tb[IFLA_LINK]) | 1283 | if (!tb[IFLA_LINK]) |
| 1283 | return -EINVAL; | 1284 | return -EINVAL; |
| @@ -1304,12 +1305,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
| 1304 | err = macvlan_port_create(lowerdev); | 1305 | err = macvlan_port_create(lowerdev); |
| 1305 | if (err < 0) | 1306 | if (err < 0) |
| 1306 | return err; | 1307 | return err; |
| 1308 | create = true; | ||
| 1307 | } | 1309 | } |
| 1308 | port = macvlan_port_get_rtnl(lowerdev); | 1310 | port = macvlan_port_get_rtnl(lowerdev); |
| 1309 | 1311 | ||
| 1310 | /* Only 1 macvlan device can be created in passthru mode */ | 1312 | /* Only 1 macvlan device can be created in passthru mode */ |
| 1311 | if (port->passthru) | 1313 | if (port->passthru) { |
| 1312 | return -EINVAL; | 1314 | /* The macvlan port must be not created this time, |
| 1315 | * still goto destroy_macvlan_port for readability. | ||
| 1316 | */ | ||
| 1317 | err = -EINVAL; | ||
| 1318 | goto destroy_macvlan_port; | ||
| 1319 | } | ||
| 1313 | 1320 | ||
| 1314 | vlan->lowerdev = lowerdev; | 1321 | vlan->lowerdev = lowerdev; |
| 1315 | vlan->dev = dev; | 1322 | vlan->dev = dev; |
| @@ -1325,24 +1332,28 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
| 1325 | vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); | 1332 | vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); |
| 1326 | 1333 | ||
| 1327 | if (vlan->mode == MACVLAN_MODE_PASSTHRU) { | 1334 | if (vlan->mode == MACVLAN_MODE_PASSTHRU) { |
| 1328 | if (port->count) | 1335 | if (port->count) { |
| 1329 | return -EINVAL; | 1336 | err = -EINVAL; |
| 1337 | goto destroy_macvlan_port; | ||
| 1338 | } | ||
| 1330 | port->passthru = true; | 1339 | port->passthru = true; |
| 1331 | eth_hw_addr_inherit(dev, lowerdev); | 1340 | eth_hw_addr_inherit(dev, lowerdev); |
| 1332 | } | 1341 | } |
| 1333 | 1342 | ||
| 1334 | if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { | 1343 | if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { |
| 1335 | if (vlan->mode != MACVLAN_MODE_SOURCE) | 1344 | if (vlan->mode != MACVLAN_MODE_SOURCE) { |
| 1336 | return -EINVAL; | 1345 | err = -EINVAL; |
| 1346 | goto destroy_macvlan_port; | ||
| 1347 | } | ||
| 1337 | macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); | 1348 | macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); |
| 1338 | err = macvlan_changelink_sources(vlan, macmode, data); | 1349 | err = macvlan_changelink_sources(vlan, macmode, data); |
| 1339 | if (err) | 1350 | if (err) |
| 1340 | return err; | 1351 | goto destroy_macvlan_port; |
| 1341 | } | 1352 | } |
| 1342 | 1353 | ||
| 1343 | err = register_netdevice(dev); | 1354 | err = register_netdevice(dev); |
| 1344 | if (err < 0) | 1355 | if (err < 0) |
| 1345 | return err; | 1356 | goto destroy_macvlan_port; |
| 1346 | 1357 | ||
| 1347 | dev->priv_flags |= IFF_MACVLAN; | 1358 | dev->priv_flags |= IFF_MACVLAN; |
| 1348 | err = netdev_upper_dev_link(lowerdev, dev); | 1359 | err = netdev_upper_dev_link(lowerdev, dev); |
| @@ -1357,7 +1368,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
| 1357 | 1368 | ||
| 1358 | unregister_netdev: | 1369 | unregister_netdev: |
| 1359 | unregister_netdevice(dev); | 1370 | unregister_netdevice(dev); |
| 1360 | 1371 | destroy_macvlan_port: | |
| 1372 | if (create) | ||
| 1373 | macvlan_port_destroy(port->dev); | ||
| 1361 | return err; | 1374 | return err; |
| 1362 | } | 1375 | } |
| 1363 | EXPORT_SYMBOL_GPL(macvlan_common_newlink); | 1376 | EXPORT_SYMBOL_GPL(macvlan_common_newlink); |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index e977ba931878..1a4bf8acad78 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -723,6 +723,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id, | |||
| 723 | phydev = to_phy_device(d); | 723 | phydev = to_phy_device(d); |
| 724 | 724 | ||
| 725 | rc = phy_connect_direct(dev, phydev, handler, interface); | 725 | rc = phy_connect_direct(dev, phydev, handler, interface); |
| 726 | put_device(d); | ||
| 726 | if (rc) | 727 | if (rc) |
| 727 | return ERR_PTR(rc); | 728 | return ERR_PTR(rc); |
| 728 | 729 | ||
| @@ -953,6 +954,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, | |||
| 953 | phydev = to_phy_device(d); | 954 | phydev = to_phy_device(d); |
| 954 | 955 | ||
| 955 | rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); | 956 | rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); |
| 957 | put_device(d); | ||
| 956 | if (rc) | 958 | if (rc) |
| 957 | return ERR_PTR(rc); | 959 | return ERR_PTR(rc); |
| 958 | 960 | ||
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index e6338c16081a..8a6675d92b98 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c | |||
| @@ -1656,6 +1656,19 @@ static const struct driver_info ax88178a_info = { | |||
| 1656 | .tx_fixup = ax88179_tx_fixup, | 1656 | .tx_fixup = ax88179_tx_fixup, |
| 1657 | }; | 1657 | }; |
| 1658 | 1658 | ||
| 1659 | static const struct driver_info cypress_GX3_info = { | ||
| 1660 | .description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller", | ||
| 1661 | .bind = ax88179_bind, | ||
| 1662 | .unbind = ax88179_unbind, | ||
| 1663 | .status = ax88179_status, | ||
| 1664 | .link_reset = ax88179_link_reset, | ||
| 1665 | .reset = ax88179_reset, | ||
| 1666 | .stop = ax88179_stop, | ||
| 1667 | .flags = FLAG_ETHER | FLAG_FRAMING_AX, | ||
| 1668 | .rx_fixup = ax88179_rx_fixup, | ||
| 1669 | .tx_fixup = ax88179_tx_fixup, | ||
| 1670 | }; | ||
| 1671 | |||
| 1659 | static const struct driver_info dlink_dub1312_info = { | 1672 | static const struct driver_info dlink_dub1312_info = { |
| 1660 | .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", | 1673 | .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", |
| 1661 | .bind = ax88179_bind, | 1674 | .bind = ax88179_bind, |
| @@ -1718,6 +1731,10 @@ static const struct usb_device_id products[] = { | |||
| 1718 | USB_DEVICE(0x0b95, 0x178a), | 1731 | USB_DEVICE(0x0b95, 0x178a), |
| 1719 | .driver_info = (unsigned long)&ax88178a_info, | 1732 | .driver_info = (unsigned long)&ax88178a_info, |
| 1720 | }, { | 1733 | }, { |
| 1734 | /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */ | ||
| 1735 | USB_DEVICE(0x04b4, 0x3610), | ||
| 1736 | .driver_info = (unsigned long)&cypress_GX3_info, | ||
| 1737 | }, { | ||
| 1721 | /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ | 1738 | /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ |
| 1722 | USB_DEVICE(0x2001, 0x4a00), | 1739 | USB_DEVICE(0x2001, 0x4a00), |
| 1723 | .driver_info = (unsigned long)&dlink_dub1312_info, | 1740 | .driver_info = (unsigned long)&dlink_dub1312_info, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 44d439f50961..efb84f092492 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) | |||
| 1730 | u8 checksum = CHECKSUM_NONE; | 1730 | u8 checksum = CHECKSUM_NONE; |
| 1731 | u32 opts2, opts3; | 1731 | u32 opts2, opts3; |
| 1732 | 1732 | ||
| 1733 | if (tp->version == RTL_VER_01) | 1733 | if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02) |
| 1734 | goto return_result; | 1734 | goto return_result; |
| 1735 | 1735 | ||
| 1736 | opts2 = le32_to_cpu(rx_desc->opts2); | 1736 | opts2 = le32_to_cpu(rx_desc->opts2); |
| @@ -1745,7 +1745,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) | |||
| 1745 | checksum = CHECKSUM_NONE; | 1745 | checksum = CHECKSUM_NONE; |
| 1746 | else | 1746 | else |
| 1747 | checksum = CHECKSUM_UNNECESSARY; | 1747 | checksum = CHECKSUM_UNNECESSARY; |
| 1748 | } else if (RD_IPV6_CS) { | 1748 | } else if (opts2 & RD_IPV6_CS) { |
| 1749 | if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) | 1749 | if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) |
| 1750 | checksum = CHECKSUM_UNNECESSARY; | 1750 | checksum = CHECKSUM_UNNECESSARY; |
| 1751 | else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) | 1751 | else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) |
| @@ -3266,10 +3266,8 @@ static int rtl8152_open(struct net_device *netdev) | |||
| 3266 | goto out; | 3266 | goto out; |
| 3267 | 3267 | ||
| 3268 | res = usb_autopm_get_interface(tp->intf); | 3268 | res = usb_autopm_get_interface(tp->intf); |
| 3269 | if (res < 0) { | 3269 | if (res < 0) |
| 3270 | free_all_mem(tp); | 3270 | goto out_free; |
| 3271 | goto out; | ||
| 3272 | } | ||
| 3273 | 3271 | ||
| 3274 | mutex_lock(&tp->control); | 3272 | mutex_lock(&tp->control); |
| 3275 | 3273 | ||
| @@ -3285,10 +3283,9 @@ static int rtl8152_open(struct net_device *netdev) | |||
| 3285 | netif_device_detach(tp->netdev); | 3283 | netif_device_detach(tp->netdev); |
| 3286 | netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", | 3284 | netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", |
| 3287 | res); | 3285 | res); |
| 3288 | free_all_mem(tp); | 3286 | goto out_unlock; |
| 3289 | } else { | ||
| 3290 | napi_enable(&tp->napi); | ||
| 3291 | } | 3287 | } |
| 3288 | napi_enable(&tp->napi); | ||
| 3292 | 3289 | ||
| 3293 | mutex_unlock(&tp->control); | 3290 | mutex_unlock(&tp->control); |
| 3294 | 3291 | ||
| @@ -3297,7 +3294,13 @@ static int rtl8152_open(struct net_device *netdev) | |||
| 3297 | tp->pm_notifier.notifier_call = rtl_notifier; | 3294 | tp->pm_notifier.notifier_call = rtl_notifier; |
| 3298 | register_pm_notifier(&tp->pm_notifier); | 3295 | register_pm_notifier(&tp->pm_notifier); |
| 3299 | #endif | 3296 | #endif |
| 3297 | return 0; | ||
| 3300 | 3298 | ||
| 3299 | out_unlock: | ||
| 3300 | mutex_unlock(&tp->control); | ||
| 3301 | usb_autopm_put_interface(tp->intf); | ||
| 3302 | out_free: | ||
| 3303 | free_all_mem(tp); | ||
| 3301 | out: | 3304 | out: |
| 3302 | return res; | 3305 | return res; |
| 3303 | } | 3306 | } |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fad84f3f4109..fd8b1e62301f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -2038,23 +2038,33 @@ static struct virtio_device_id id_table[] = { | |||
| 2038 | { 0 }, | 2038 | { 0 }, |
| 2039 | }; | 2039 | }; |
| 2040 | 2040 | ||
| 2041 | #define VIRTNET_FEATURES \ | ||
| 2042 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ | ||
| 2043 | VIRTIO_NET_F_MAC, \ | ||
| 2044 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ | ||
| 2045 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ | ||
| 2046 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ | ||
| 2047 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ | ||
| 2048 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ | ||
| 2049 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ | ||
| 2050 | VIRTIO_NET_F_CTRL_MAC_ADDR, \ | ||
| 2051 | VIRTIO_NET_F_MTU | ||
| 2052 | |||
| 2041 | static unsigned int features[] = { | 2053 | static unsigned int features[] = { |
| 2042 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, | 2054 | VIRTNET_FEATURES, |
| 2043 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, | 2055 | }; |
| 2044 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, | 2056 | |
| 2045 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, | 2057 | static unsigned int features_legacy[] = { |
| 2046 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, | 2058 | VIRTNET_FEATURES, |
| 2047 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, | 2059 | VIRTIO_NET_F_GSO, |
| 2048 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, | ||
| 2049 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, | ||
| 2050 | VIRTIO_NET_F_CTRL_MAC_ADDR, | ||
| 2051 | VIRTIO_F_ANY_LAYOUT, | 2060 | VIRTIO_F_ANY_LAYOUT, |
| 2052 | VIRTIO_NET_F_MTU, | ||
| 2053 | }; | 2061 | }; |
| 2054 | 2062 | ||
| 2055 | static struct virtio_driver virtio_net_driver = { | 2063 | static struct virtio_driver virtio_net_driver = { |
| 2056 | .feature_table = features, | 2064 | .feature_table = features, |
| 2057 | .feature_table_size = ARRAY_SIZE(features), | 2065 | .feature_table_size = ARRAY_SIZE(features), |
| 2066 | .feature_table_legacy = features_legacy, | ||
| 2067 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), | ||
| 2058 | .driver.name = KBUILD_MODNAME, | 2068 | .driver.name = KBUILD_MODNAME, |
| 2059 | .driver.owner = THIS_MODULE, | 2069 | .driver.owner = THIS_MODULE, |
| 2060 | .id_table = id_table, | 2070 | .id_table = id_table, |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f3c2fa3ab0d5..24532cdebb00 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -944,7 +944,9 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) | |||
| 944 | { | 944 | { |
| 945 | struct vxlan_dev *vxlan; | 945 | struct vxlan_dev *vxlan; |
| 946 | struct vxlan_sock *sock4; | 946 | struct vxlan_sock *sock4; |
| 947 | struct vxlan_sock *sock6 = NULL; | 947 | #if IS_ENABLED(CONFIG_IPV6) |
| 948 | struct vxlan_sock *sock6; | ||
| 949 | #endif | ||
| 948 | unsigned short family = dev->default_dst.remote_ip.sa.sa_family; | 950 | unsigned short family = dev->default_dst.remote_ip.sa.sa_family; |
| 949 | 951 | ||
| 950 | sock4 = rtnl_dereference(dev->vn4_sock); | 952 | sock4 = rtnl_dereference(dev->vn4_sock); |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index b777e1b2f87a..78d9966a3957 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
| @@ -4516,7 +4516,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, | |||
| 4516 | /* store current 11d setting */ | 4516 | /* store current 11d setting */ |
| 4517 | if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, | 4517 | if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, |
| 4518 | &ifp->vif->is_11d)) { | 4518 | &ifp->vif->is_11d)) { |
| 4519 | supports_11d = false; | 4519 | is_11d = supports_11d = false; |
| 4520 | } else { | 4520 | } else { |
| 4521 | country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, | 4521 | country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, |
| 4522 | settings->beacon.tail_len, | 4522 | settings->beacon.tail_len, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 4fdc3dad3e85..b88e2048ae0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c | |||
| @@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, | |||
| 1087 | ret = iwl_mvm_switch_to_d3(mvm); | 1087 | ret = iwl_mvm_switch_to_d3(mvm); |
| 1088 | if (ret) | 1088 | if (ret) |
| 1089 | return ret; | 1089 | return ret; |
| 1090 | } else { | ||
| 1091 | /* In theory, we wouldn't have to stop a running sched | ||
| 1092 | * scan in order to start another one (for | ||
| 1093 | * net-detect). But in practice this doesn't seem to | ||
| 1094 | * work properly, so stop any running sched_scan now. | ||
| 1095 | */ | ||
| 1096 | ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); | ||
| 1097 | if (ret) | ||
| 1098 | return ret; | ||
| 1090 | } | 1099 | } |
| 1091 | 1100 | ||
| 1092 | /* rfkill release can be either for wowlan or netdetect */ | 1101 | /* rfkill release can be either for wowlan or netdetect */ |
| @@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, | |||
| 1254 | out: | 1263 | out: |
| 1255 | if (ret < 0) { | 1264 | if (ret < 0) { |
| 1256 | iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); | 1265 | iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); |
| 1257 | ieee80211_restart_hw(mvm->hw); | 1266 | if (mvm->restart_fw > 0) { |
| 1267 | mvm->restart_fw--; | ||
| 1268 | ieee80211_restart_hw(mvm->hw); | ||
| 1269 | } | ||
| 1258 | iwl_mvm_free_nd(mvm); | 1270 | iwl_mvm_free_nd(mvm); |
| 1259 | } | 1271 | } |
| 1260 | out_noreset: | 1272 | out_noreset: |
| @@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) | |||
| 2088 | iwl_mvm_update_changed_regdom(mvm); | 2100 | iwl_mvm_update_changed_regdom(mvm); |
| 2089 | 2101 | ||
| 2090 | if (mvm->net_detect) { | 2102 | if (mvm->net_detect) { |
| 2103 | /* If this is a non-unified image, we restart the FW, | ||
| 2104 | * so no need to stop the netdetect scan. If that | ||
| 2105 | * fails, continue and try to get the wake-up reasons, | ||
| 2106 | * but trigger a HW restart by keeping a failure code | ||
| 2107 | * in ret. | ||
| 2108 | */ | ||
| 2109 | if (unified_image) | ||
| 2110 | ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, | ||
| 2111 | false); | ||
| 2112 | |||
| 2091 | iwl_mvm_query_netdetect_reasons(mvm, vif); | 2113 | iwl_mvm_query_netdetect_reasons(mvm, vif); |
| 2092 | /* has unlocked the mutex, so skip that */ | 2114 | /* has unlocked the mutex, so skip that */ |
| 2093 | goto out; | 2115 | goto out; |
| @@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, | |||
| 2271 | static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) | 2293 | static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) |
| 2272 | { | 2294 | { |
| 2273 | struct iwl_mvm *mvm = inode->i_private; | 2295 | struct iwl_mvm *mvm = inode->i_private; |
| 2274 | int remaining_time = 10; | 2296 | bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, |
| 2297 | IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); | ||
| 2275 | 2298 | ||
| 2276 | mvm->d3_test_active = false; | 2299 | mvm->d3_test_active = false; |
| 2277 | 2300 | ||
| @@ -2282,17 +2305,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) | |||
| 2282 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; | 2305 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; |
| 2283 | 2306 | ||
| 2284 | iwl_abort_notification_waits(&mvm->notif_wait); | 2307 | iwl_abort_notification_waits(&mvm->notif_wait); |
| 2285 | ieee80211_restart_hw(mvm->hw); | 2308 | if (!unified_image) { |
| 2309 | int remaining_time = 10; | ||
| 2286 | 2310 | ||
| 2287 | /* wait for restart and disconnect all interfaces */ | 2311 | ieee80211_restart_hw(mvm->hw); |
| 2288 | while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && | 2312 | |
| 2289 | remaining_time > 0) { | 2313 | /* wait for restart and disconnect all interfaces */ |
| 2290 | remaining_time--; | 2314 | while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && |
| 2291 | msleep(1000); | 2315 | remaining_time > 0) { |
| 2292 | } | 2316 | remaining_time--; |
| 2317 | msleep(1000); | ||
| 2318 | } | ||
| 2293 | 2319 | ||
| 2294 | if (remaining_time == 0) | 2320 | if (remaining_time == 0) |
| 2295 | IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); | 2321 | IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); |
| 2322 | } | ||
| 2296 | 2323 | ||
| 2297 | ieee80211_iterate_active_interfaces_atomic( | 2324 | ieee80211_iterate_active_interfaces_atomic( |
| 2298 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, | 2325 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 07da4efe8458..7b7d2a146e30 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | |||
| @@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, | |||
| 1529 | .data = { &cmd, }, | 1529 | .data = { &cmd, }, |
| 1530 | .len = { sizeof(cmd) }, | 1530 | .len = { sizeof(cmd) }, |
| 1531 | }; | 1531 | }; |
| 1532 | size_t delta, len; | 1532 | size_t delta; |
| 1533 | ssize_t ret; | 1533 | ssize_t ret, len; |
| 1534 | 1534 | ||
| 1535 | hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, | 1535 | hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, |
| 1536 | DEBUG_GROUP, 0); | 1536 | DEBUG_GROUP, 0); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 318efd814037..1db1dc13e988 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
| @@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, | |||
| 4121 | struct iwl_mvm_internal_rxq_notif *notif, | 4121 | struct iwl_mvm_internal_rxq_notif *notif, |
| 4122 | u32 size) | 4122 | u32 size) |
| 4123 | { | 4123 | { |
| 4124 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq); | ||
| 4125 | u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; | 4124 | u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; |
| 4126 | int ret; | 4125 | int ret; |
| 4127 | 4126 | ||
| @@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, | |||
| 4143 | } | 4142 | } |
| 4144 | 4143 | ||
| 4145 | if (notif->sync) | 4144 | if (notif->sync) |
| 4146 | ret = wait_event_timeout(notif_waitq, | 4145 | ret = wait_event_timeout(mvm->rx_sync_waitq, |
| 4147 | atomic_read(&mvm->queue_sync_counter) == 0, | 4146 | atomic_read(&mvm->queue_sync_counter) == 0, |
| 4148 | HZ); | 4147 | HZ); |
| 4149 | WARN_ON_ONCE(!ret); | 4148 | WARN_ON_ONCE(!ret); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index d17cbf603f7c..c60703e0c246 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
| @@ -937,6 +937,7 @@ struct iwl_mvm { | |||
| 937 | /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ | 937 | /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ |
| 938 | spinlock_t d0i3_tx_lock; | 938 | spinlock_t d0i3_tx_lock; |
| 939 | wait_queue_head_t d0i3_exit_waitq; | 939 | wait_queue_head_t d0i3_exit_waitq; |
| 940 | wait_queue_head_t rx_sync_waitq; | ||
| 940 | 941 | ||
| 941 | /* BT-Coex */ | 942 | /* BT-Coex */ |
| 942 | struct iwl_bt_coex_profile_notif last_bt_notif; | 943 | struct iwl_bt_coex_profile_notif last_bt_notif; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 05fe6dd1a2c8..4d35deb628bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
| @@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
| 619 | spin_lock_init(&mvm->refs_lock); | 619 | spin_lock_init(&mvm->refs_lock); |
| 620 | skb_queue_head_init(&mvm->d0i3_tx); | 620 | skb_queue_head_init(&mvm->d0i3_tx); |
| 621 | init_waitqueue_head(&mvm->d0i3_exit_waitq); | 621 | init_waitqueue_head(&mvm->d0i3_exit_waitq); |
| 622 | init_waitqueue_head(&mvm->rx_sync_waitq); | ||
| 622 | 623 | ||
| 623 | atomic_set(&mvm->queue_sync_counter, 0); | 624 | atomic_set(&mvm->queue_sync_counter, 0); |
| 624 | 625 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index a57c6ef5bc14..6c802cee900c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | |||
| @@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
| 547 | "Received expired RX queue sync message\n"); | 547 | "Received expired RX queue sync message\n"); |
| 548 | return; | 548 | return; |
| 549 | } | 549 | } |
| 550 | atomic_dec(&mvm->queue_sync_counter); | 550 | if (!atomic_dec_return(&mvm->queue_sync_counter)) |
| 551 | wake_up(&mvm->rx_sync_waitq); | ||
| 551 | } | 552 | } |
| 552 | 553 | ||
| 553 | switch (internal_notif->type) { | 554 | switch (internal_notif->type) { |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index f279fdd6eb44..fa9743205491 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c | |||
| @@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm) | |||
| 1199 | 1199 | ||
| 1200 | static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) | 1200 | static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) |
| 1201 | { | 1201 | { |
| 1202 | bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, | ||
| 1203 | IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); | ||
| 1204 | |||
| 1202 | /* This looks a bit arbitrary, but the idea is that if we run | 1205 | /* This looks a bit arbitrary, but the idea is that if we run |
| 1203 | * out of possible simultaneous scans and the userspace is | 1206 | * out of possible simultaneous scans and the userspace is |
| 1204 | * trying to run a scan type that is already running, we | 1207 | * trying to run a scan type that is already running, we |
| @@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) | |||
| 1225 | return -EBUSY; | 1228 | return -EBUSY; |
| 1226 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); | 1229 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); |
| 1227 | case IWL_MVM_SCAN_NETDETECT: | 1230 | case IWL_MVM_SCAN_NETDETECT: |
| 1228 | /* No need to stop anything for net-detect since the | 1231 | /* For non-unified images, there's no need to stop |
| 1229 | * firmware is restarted anyway. This way, any sched | 1232 | * anything for net-detect since the firmware is |
| 1230 | * scans that were running will be restarted when we | 1233 | * restarted anyway. This way, any sched scans that |
| 1231 | * resume. | 1234 | * were running will be restarted when we resume. |
| 1232 | */ | 1235 | */ |
| 1233 | return 0; | 1236 | if (!unified_image) |
| 1237 | return 0; | ||
| 1238 | |||
| 1239 | /* If this is a unified image and we ran out of scans, | ||
| 1240 | * we need to stop something. Prefer stopping regular | ||
| 1241 | * scans, because the results are useless at this | ||
| 1242 | * point, and we should be able to keep running | ||
| 1243 | * another scheduled scan while suspended. | ||
| 1244 | */ | ||
| 1245 | if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) | ||
| 1246 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, | ||
| 1247 | true); | ||
| 1248 | if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) | ||
| 1249 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, | ||
| 1250 | true); | ||
| 1251 | |||
| 1252 | /* fall through, something is wrong if no scan was | ||
| 1253 | * running but we ran out of scans. | ||
| 1254 | */ | ||
| 1234 | default: | 1255 | default: |
| 1235 | WARN_ON(1); | 1256 | WARN_ON(1); |
| 1236 | break; | 1257 | break; |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 001be406a3d3..2f8134b2a504 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c | |||
| @@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
| 541 | MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); | 541 | MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); |
| 542 | 542 | ||
| 543 | #ifdef CONFIG_ACPI | 543 | #ifdef CONFIG_ACPI |
| 544 | #define SPL_METHOD "SPLC" | 544 | #define ACPI_SPLC_METHOD "SPLC" |
| 545 | #define SPL_DOMAINTYPE_MODULE BIT(0) | 545 | #define ACPI_SPLC_DOMAIN_WIFI (0x07) |
| 546 | #define SPL_DOMAINTYPE_WIFI BIT(1) | ||
| 547 | #define SPL_DOMAINTYPE_WIGIG BIT(2) | ||
| 548 | #define SPL_DOMAINTYPE_RFEM BIT(3) | ||
| 549 | 546 | ||
| 550 | static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) | 547 | static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc) |
| 551 | { | 548 | { |
| 552 | union acpi_object *limits, *domain_type, *power_limit; | 549 | union acpi_object *data_pkg, *dflt_pwr_limit; |
| 553 | 550 | int i; | |
| 554 | if (splx->type != ACPI_TYPE_PACKAGE || | 551 | |
| 555 | splx->package.count != 2 || | 552 | /* We need at least two elements, one for the revision and one |
| 556 | splx->package.elements[0].type != ACPI_TYPE_INTEGER || | 553 | * for the data itself. Also check that the revision is |
| 557 | splx->package.elements[0].integer.value != 0) { | 554 | * supported (currently only revision 0). |
| 558 | IWL_ERR(trans, "Unsupported splx structure\n"); | 555 | */ |
| 556 | if (splc->type != ACPI_TYPE_PACKAGE || | ||
| 557 | splc->package.count < 2 || | ||
| 558 | splc->package.elements[0].type != ACPI_TYPE_INTEGER || | ||
| 559 | splc->package.elements[0].integer.value != 0) { | ||
| 560 | IWL_DEBUG_INFO(trans, | ||
| 561 | "Unsupported structure returned by the SPLC method. Ignoring.\n"); | ||
| 559 | return 0; | 562 | return 0; |
| 560 | } | 563 | } |
| 561 | 564 | ||
| 562 | limits = &splx->package.elements[1]; | 565 | /* loop through all the packages to find the one for WiFi */ |
| 563 | if (limits->type != ACPI_TYPE_PACKAGE || | 566 | for (i = 1; i < splc->package.count; i++) { |
| 564 | limits->package.count < 2 || | 567 | union acpi_object *domain; |
| 565 | limits->package.elements[0].type != ACPI_TYPE_INTEGER || | 568 | |
| 566 | limits->package.elements[1].type != ACPI_TYPE_INTEGER) { | 569 | data_pkg = &splc->package.elements[i]; |
| 567 | IWL_ERR(trans, "Invalid limits element\n"); | 570 | |
| 568 | return 0; | 571 | /* Skip anything that is not a package with the right |
| 572 | * amount of elements (i.e. at least 2 integers). | ||
| 573 | */ | ||
| 574 | if (data_pkg->type != ACPI_TYPE_PACKAGE || | ||
| 575 | data_pkg->package.count < 2 || | ||
| 576 | data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || | ||
| 577 | data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) | ||
| 578 | continue; | ||
| 579 | |||
| 580 | domain = &data_pkg->package.elements[0]; | ||
| 581 | if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI) | ||
| 582 | break; | ||
| 583 | |||
| 584 | data_pkg = NULL; | ||
| 569 | } | 585 | } |
| 570 | 586 | ||
| 571 | domain_type = &limits->package.elements[0]; | 587 | if (!data_pkg) { |
| 572 | power_limit = &limits->package.elements[1]; | 588 | IWL_DEBUG_INFO(trans, |
| 573 | if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { | 589 | "No element for the WiFi domain returned by the SPLC method.\n"); |
| 574 | IWL_DEBUG_INFO(trans, "WiFi power is not limited\n"); | ||
| 575 | return 0; | 590 | return 0; |
| 576 | } | 591 | } |
| 577 | 592 | ||
| 578 | return power_limit->integer.value; | 593 | dflt_pwr_limit = &data_pkg->package.elements[1]; |
| 594 | return dflt_pwr_limit->integer.value; | ||
| 579 | } | 595 | } |
| 580 | 596 | ||
| 581 | static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) | 597 | static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) |
| 582 | { | 598 | { |
| 583 | acpi_handle pxsx_handle; | 599 | acpi_handle pxsx_handle; |
| 584 | acpi_handle handle; | 600 | acpi_handle handle; |
| 585 | struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; | 601 | struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL}; |
| 586 | acpi_status status; | 602 | acpi_status status; |
| 587 | 603 | ||
| 588 | pxsx_handle = ACPI_HANDLE(&pdev->dev); | 604 | pxsx_handle = ACPI_HANDLE(&pdev->dev); |
| @@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) | |||
| 593 | } | 609 | } |
| 594 | 610 | ||
| 595 | /* Get the method's handle */ | 611 | /* Get the method's handle */ |
| 596 | status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); | 612 | status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD, |
| 613 | &handle); | ||
| 597 | if (ACPI_FAILURE(status)) { | 614 | if (ACPI_FAILURE(status)) { |
| 598 | IWL_DEBUG_INFO(trans, "SPL method not found\n"); | 615 | IWL_DEBUG_INFO(trans, "SPLC method not found\n"); |
| 599 | return; | 616 | return; |
| 600 | } | 617 | } |
| 601 | 618 | ||
| 602 | /* Call SPLC with no arguments */ | 619 | /* Call SPLC with no arguments */ |
| 603 | status = acpi_evaluate_object(handle, NULL, NULL, &splx); | 620 | status = acpi_evaluate_object(handle, NULL, NULL, &splc); |
| 604 | if (ACPI_FAILURE(status)) { | 621 | if (ACPI_FAILURE(status)) { |
| 605 | IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); | 622 | IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); |
| 606 | return; | 623 | return; |
| 607 | } | 624 | } |
| 608 | 625 | ||
| 609 | trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); | 626 | trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer); |
| 610 | IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", | 627 | IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", |
| 611 | trans->dflt_pwr_limit); | 628 | trans->dflt_pwr_limit); |
| 612 | kfree(splx.pointer); | 629 | kfree(splc.pointer); |
| 613 | } | 630 | } |
| 614 | 631 | ||
| 615 | #else /* CONFIG_ACPI */ | 632 | #else /* CONFIG_ACPI */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index e9a278b60dfd..5f840f16f40b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c | |||
| @@ -592,6 +592,7 @@ error: | |||
| 592 | static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, | 592 | static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, |
| 593 | int slots_num, u32 txq_id) | 593 | int slots_num, u32 txq_id) |
| 594 | { | 594 | { |
| 595 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
| 595 | int ret; | 596 | int ret; |
| 596 | 597 | ||
| 597 | txq->need_update = false; | 598 | txq->need_update = false; |
| @@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, | |||
| 606 | return ret; | 607 | return ret; |
| 607 | 608 | ||
| 608 | spin_lock_init(&txq->lock); | 609 | spin_lock_init(&txq->lock); |
| 610 | |||
| 611 | if (txq_id == trans_pcie->cmd_queue) { | ||
| 612 | static struct lock_class_key iwl_pcie_cmd_queue_lock_class; | ||
| 613 | |||
| 614 | lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); | ||
| 615 | } | ||
| 616 | |||
| 609 | __skb_queue_head_init(&txq->overflow_q); | 617 | __skb_queue_head_init(&txq->overflow_q); |
| 610 | 618 | ||
| 611 | /* | 619 | /* |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e17879dd5d5a..bf2744e1e3db 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
| @@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | |||
| 304 | queue->rx_skbs[id] = skb; | 304 | queue->rx_skbs[id] = skb; |
| 305 | 305 | ||
| 306 | ref = gnttab_claim_grant_reference(&queue->gref_rx_head); | 306 | ref = gnttab_claim_grant_reference(&queue->gref_rx_head); |
| 307 | BUG_ON((signed short)ref < 0); | 307 | WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); |
| 308 | queue->grant_rx_ref[id] = ref; | 308 | queue->grant_rx_ref[id] = ref; |
| 309 | 309 | ||
| 310 | page = skb_frag_page(&skb_shinfo(skb)->frags[0]); | 310 | page = skb_frag_page(&skb_shinfo(skb)->frags[0]); |
| @@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, | |||
| 428 | id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); | 428 | id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); |
| 429 | tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); | 429 | tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); |
| 430 | ref = gnttab_claim_grant_reference(&queue->gref_tx_head); | 430 | ref = gnttab_claim_grant_reference(&queue->gref_tx_head); |
| 431 | BUG_ON((signed short)ref < 0); | 431 | WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); |
| 432 | 432 | ||
| 433 | gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, | 433 | gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, |
| 434 | gfn, GNTMAP_readonly); | 434 | gfn, GNTMAP_readonly); |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ca1ad9ebbc92..a0649973ee5b 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
| @@ -149,7 +149,7 @@ static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) | |||
| 149 | { | 149 | { |
| 150 | #if defined(CONFIG_NET_L3_MASTER_DEV) | 150 | #if defined(CONFIG_NET_L3_MASTER_DEV) |
| 151 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && | 151 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && |
| 152 | ipv6_l3mdev_skb(IP6CB(skb)->flags)) | 152 | skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) |
| 153 | return true; | 153 | return true; |
| 154 | #endif | 154 | #endif |
| 155 | return false; | 155 | return false; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 91ee3643ccc8..bf04a46f6d5b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -3354,6 +3354,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | |||
| 3354 | bool is_skb_forwardable(const struct net_device *dev, | 3354 | bool is_skb_forwardable(const struct net_device *dev, |
| 3355 | const struct sk_buff *skb); | 3355 | const struct sk_buff *skb); |
| 3356 | 3356 | ||
| 3357 | static __always_inline int ____dev_forward_skb(struct net_device *dev, | ||
| 3358 | struct sk_buff *skb) | ||
| 3359 | { | ||
| 3360 | if (skb_orphan_frags(skb, GFP_ATOMIC) || | ||
| 3361 | unlikely(!is_skb_forwardable(dev, skb))) { | ||
| 3362 | atomic_long_inc(&dev->rx_dropped); | ||
| 3363 | kfree_skb(skb); | ||
| 3364 | return NET_RX_DROP; | ||
| 3365 | } | ||
| 3366 | |||
| 3367 | skb_scrub_packet(skb, true); | ||
| 3368 | skb->priority = 0; | ||
| 3369 | return 0; | ||
| 3370 | } | ||
| 3371 | |||
| 3357 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); | 3372 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); |
| 3358 | 3373 | ||
| 3359 | extern int netdev_budget; | 3374 | extern int netdev_budget; |
diff --git a/include/net/ip.h b/include/net/ip.h index 5413883ac47f..d3a107850a41 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
| @@ -47,8 +47,7 @@ struct inet_skb_parm { | |||
| 47 | #define IPSKB_REROUTED BIT(4) | 47 | #define IPSKB_REROUTED BIT(4) |
| 48 | #define IPSKB_DOREDIRECT BIT(5) | 48 | #define IPSKB_DOREDIRECT BIT(5) |
| 49 | #define IPSKB_FRAG_PMTU BIT(6) | 49 | #define IPSKB_FRAG_PMTU BIT(6) |
| 50 | #define IPSKB_FRAG_SEGS BIT(7) | 50 | #define IPSKB_L3SLAVE BIT(7) |
| 51 | #define IPSKB_L3SLAVE BIT(8) | ||
| 52 | 51 | ||
| 53 | u16 frag_max_size; | 52 | u16 frag_max_size; |
| 54 | }; | 53 | }; |
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 20ed9699fcd4..1b1cf33cbfb0 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h | |||
| @@ -146,6 +146,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, | |||
| 146 | { | 146 | { |
| 147 | int pkt_len, err; | 147 | int pkt_len, err; |
| 148 | 148 | ||
| 149 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); | ||
| 149 | pkt_len = skb->len - skb_inner_network_offset(skb); | 150 | pkt_len = skb->len - skb_inner_network_offset(skb); |
| 150 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); | 151 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); |
| 151 | if (unlikely(net_xmit_eval(err))) | 152 | if (unlikely(net_xmit_eval(err))) |
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index 498814626e28..1723a67c0b0a 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h | |||
| @@ -30,8 +30,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) | |||
| 30 | if (net->ct.labels_used == 0) | 30 | if (net->ct.labels_used == 0) |
| 31 | return NULL; | 31 | return NULL; |
| 32 | 32 | ||
| 33 | return nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS, | 33 | return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); |
| 34 | sizeof(struct nf_conn_labels), GFP_ATOMIC); | ||
| 35 | #else | 34 | #else |
| 36 | return NULL; | 35 | return NULL; |
| 37 | #endif | 36 | #endif |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5031e072567b..d79d1e9b9546 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
| @@ -145,7 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type) | |||
| 145 | return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; | 145 | return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); | 148 | int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); |
| 149 | unsigned int nft_parse_register(const struct nlattr *attr); | 149 | unsigned int nft_parse_register(const struct nlattr *attr); |
| 150 | int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); | 150 | int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); |
| 151 | 151 | ||
| @@ -542,7 +542,8 @@ void *nft_set_elem_init(const struct nft_set *set, | |||
| 542 | const struct nft_set_ext_tmpl *tmpl, | 542 | const struct nft_set_ext_tmpl *tmpl, |
| 543 | const u32 *key, const u32 *data, | 543 | const u32 *key, const u32 *data, |
| 544 | u64 timeout, gfp_t gfp); | 544 | u64 timeout, gfp_t gfp); |
| 545 | void nft_set_elem_destroy(const struct nft_set *set, void *elem); | 545 | void nft_set_elem_destroy(const struct nft_set *set, void *elem, |
| 546 | bool destroy_expr); | ||
| 546 | 547 | ||
| 547 | /** | 548 | /** |
| 548 | * struct nft_set_gc_batch_head - nf_tables set garbage collection batch | 549 | * struct nft_set_gc_batch_head - nf_tables set garbage collection batch |
| @@ -693,7 +694,6 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) | |||
| 693 | { | 694 | { |
| 694 | int err; | 695 | int err; |
| 695 | 696 | ||
| 696 | __module_get(src->ops->type->owner); | ||
| 697 | if (src->ops->clone) { | 697 | if (src->ops->clone) { |
| 698 | dst->ops = src->ops; | 698 | dst->ops = src->ops; |
| 699 | err = src->ops->clone(dst, src); | 699 | err = src->ops->clone(dst, src); |
| @@ -702,6 +702,8 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) | |||
| 702 | } else { | 702 | } else { |
| 703 | memcpy(dst, src, src->ops->size); | 703 | memcpy(dst, src, src->ops->size); |
| 704 | } | 704 | } |
| 705 | |||
| 706 | __module_get(src->ops->type->owner); | ||
| 705 | return 0; | 707 | return 0; |
| 706 | } | 708 | } |
| 707 | 709 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 87a7f42e7639..31acc3f4f132 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
| @@ -152,7 +152,7 @@ void sctp_unhash_endpoint(struct sctp_endpoint *); | |||
| 152 | struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, | 152 | struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, |
| 153 | struct sctphdr *, struct sctp_association **, | 153 | struct sctphdr *, struct sctp_association **, |
| 154 | struct sctp_transport **); | 154 | struct sctp_transport **); |
| 155 | void sctp_err_finish(struct sock *, struct sctp_association *); | 155 | void sctp_err_finish(struct sock *, struct sctp_transport *); |
| 156 | void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, | 156 | void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, |
| 157 | struct sctp_transport *t, __u32 pmtu); | 157 | struct sctp_transport *t, __u32 pmtu); |
| 158 | void sctp_icmp_redirect(struct sock *, struct sctp_transport *, | 158 | void sctp_icmp_redirect(struct sock *, struct sctp_transport *, |
diff --git a/include/net/sock.h b/include/net/sock.h index 73c6b008f1b7..92b269709b9a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -1596,11 +1596,11 @@ static inline void sock_put(struct sock *sk) | |||
| 1596 | void sock_gen_put(struct sock *sk); | 1596 | void sock_gen_put(struct sock *sk); |
| 1597 | 1597 | ||
| 1598 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, | 1598 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, |
| 1599 | unsigned int trim_cap); | 1599 | unsigned int trim_cap, bool refcounted); |
| 1600 | static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, | 1600 | static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, |
| 1601 | const int nested) | 1601 | const int nested) |
| 1602 | { | 1602 | { |
| 1603 | return __sk_receive_skb(sk, skb, nested, 1); | 1603 | return __sk_receive_skb(sk, skb, nested, 1, true); |
| 1604 | } | 1604 | } |
| 1605 | 1605 | ||
| 1606 | static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) | 1606 | static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 5b82d4d94834..123979fe12bf 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
| @@ -805,7 +805,7 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) | |||
| 805 | { | 805 | { |
| 806 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) | 806 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) |
| 807 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && | 807 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && |
| 808 | ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) | 808 | skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) |
| 809 | return true; | 809 | return true; |
| 810 | #endif | 810 | #endif |
| 811 | return false; | 811 | return false; |
| @@ -1220,6 +1220,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) | |||
| 1220 | 1220 | ||
| 1221 | bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); | 1221 | bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); |
| 1222 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); | 1222 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); |
| 1223 | int tcp_filter(struct sock *sk, struct sk_buff *skb); | ||
| 1223 | 1224 | ||
| 1224 | #undef STATE_TRACE | 1225 | #undef STATE_TRACE |
| 1225 | 1226 | ||
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h index 5cd4d4d2dd1d..9c9c6ad55f14 100644 --- a/include/uapi/linux/atm_zatm.h +++ b/include/uapi/linux/atm_zatm.h | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | #include <linux/atmapi.h> | 15 | #include <linux/atmapi.h> |
| 16 | #include <linux/atmioc.h> | 16 | #include <linux/atmioc.h> |
| 17 | #include <linux/time.h> | ||
| 18 | 17 | ||
| 19 | #define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) | 18 | #define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) |
| 20 | /* get pool statistics */ | 19 | /* get pool statistics */ |
diff --git a/include/uapi/linux/bpqether.h b/include/uapi/linux/bpqether.h index a6c35e1a89ad..05865edaefda 100644 --- a/include/uapi/linux/bpqether.h +++ b/include/uapi/linux/bpqether.h | |||
| @@ -5,9 +5,7 @@ | |||
| 5 | * Defines for the BPQETHER pseudo device driver | 5 | * Defines for the BPQETHER pseudo device driver |
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #ifndef __LINUX_IF_ETHER_H | ||
| 9 | #include <linux/if_ether.h> | 8 | #include <linux/if_ether.h> |
| 10 | #endif | ||
| 11 | 9 | ||
| 12 | #define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ | 10 | #define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ |
| 13 | #define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1) | 11 | #define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1) |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 570eeca7bdfa..ad1bc67aff1b 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
| @@ -687,7 +687,8 @@ static void delete_all_elements(struct bpf_htab *htab) | |||
| 687 | 687 | ||
| 688 | hlist_for_each_entry_safe(l, n, head, hash_node) { | 688 | hlist_for_each_entry_safe(l, n, head, hash_node) { |
| 689 | hlist_del_rcu(&l->hash_node); | 689 | hlist_del_rcu(&l->hash_node); |
| 690 | htab_elem_free(htab, l); | 690 | if (l->state != HTAB_EXTRA_ELEM_USED) |
| 691 | htab_elem_free(htab, l); | ||
| 691 | } | 692 | } |
| 692 | } | 693 | } |
| 693 | } | 694 | } |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 228f962447a5..237f3d6a7ddc 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
| @@ -194,7 +194,7 @@ static int map_create(union bpf_attr *attr) | |||
| 194 | 194 | ||
| 195 | err = bpf_map_charge_memlock(map); | 195 | err = bpf_map_charge_memlock(map); |
| 196 | if (err) | 196 | if (err) |
| 197 | goto free_map; | 197 | goto free_map_nouncharge; |
| 198 | 198 | ||
| 199 | err = bpf_map_new_fd(map); | 199 | err = bpf_map_new_fd(map); |
| 200 | if (err < 0) | 200 | if (err < 0) |
| @@ -204,6 +204,8 @@ static int map_create(union bpf_attr *attr) | |||
| 204 | return err; | 204 | return err; |
| 205 | 205 | ||
| 206 | free_map: | 206 | free_map: |
| 207 | bpf_map_uncharge_memlock(map); | ||
| 208 | free_map_nouncharge: | ||
| 207 | map->ops->map_free(map); | 209 | map->ops->map_free(map); |
| 208 | return err; | 210 | return err; |
| 209 | } | 211 | } |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index b3f05ee20d18..cbb387a265db 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
| @@ -54,7 +54,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1 | |||
| 54 | [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, | 54 | [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, |
| 55 | [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; | 55 | [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; |
| 56 | 56 | ||
| 57 | static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { | 57 | /* |
| 58 | * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family. | ||
| 59 | * Make sure they are always aligned. | ||
| 60 | */ | ||
| 61 | static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = { | ||
| 58 | [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, | 62 | [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, |
| 59 | }; | 63 | }; |
| 60 | 64 | ||
diff --git a/net/can/bcm.c b/net/can/bcm.c index 8e999ffdf28b..8af9d25ff988 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
| @@ -1549,24 +1549,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, | |||
| 1549 | struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; | 1549 | struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; |
| 1550 | struct sock *sk = sock->sk; | 1550 | struct sock *sk = sock->sk; |
| 1551 | struct bcm_sock *bo = bcm_sk(sk); | 1551 | struct bcm_sock *bo = bcm_sk(sk); |
| 1552 | int ret = 0; | ||
| 1552 | 1553 | ||
| 1553 | if (len < sizeof(*addr)) | 1554 | if (len < sizeof(*addr)) |
| 1554 | return -EINVAL; | 1555 | return -EINVAL; |
| 1555 | 1556 | ||
| 1556 | if (bo->bound) | 1557 | lock_sock(sk); |
| 1557 | return -EISCONN; | 1558 | |
| 1559 | if (bo->bound) { | ||
| 1560 | ret = -EISCONN; | ||
| 1561 | goto fail; | ||
| 1562 | } | ||
| 1558 | 1563 | ||
| 1559 | /* bind a device to this socket */ | 1564 | /* bind a device to this socket */ |
| 1560 | if (addr->can_ifindex) { | 1565 | if (addr->can_ifindex) { |
| 1561 | struct net_device *dev; | 1566 | struct net_device *dev; |
| 1562 | 1567 | ||
| 1563 | dev = dev_get_by_index(&init_net, addr->can_ifindex); | 1568 | dev = dev_get_by_index(&init_net, addr->can_ifindex); |
| 1564 | if (!dev) | 1569 | if (!dev) { |
| 1565 | return -ENODEV; | 1570 | ret = -ENODEV; |
| 1566 | 1571 | goto fail; | |
| 1572 | } | ||
| 1567 | if (dev->type != ARPHRD_CAN) { | 1573 | if (dev->type != ARPHRD_CAN) { |
| 1568 | dev_put(dev); | 1574 | dev_put(dev); |
| 1569 | return -ENODEV; | 1575 | ret = -ENODEV; |
| 1576 | goto fail; | ||
| 1570 | } | 1577 | } |
| 1571 | 1578 | ||
| 1572 | bo->ifindex = dev->ifindex; | 1579 | bo->ifindex = dev->ifindex; |
| @@ -1577,17 +1584,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, | |||
| 1577 | bo->ifindex = 0; | 1584 | bo->ifindex = 0; |
| 1578 | } | 1585 | } |
| 1579 | 1586 | ||
| 1580 | bo->bound = 1; | ||
| 1581 | |||
| 1582 | if (proc_dir) { | 1587 | if (proc_dir) { |
| 1583 | /* unique socket address as filename */ | 1588 | /* unique socket address as filename */ |
| 1584 | sprintf(bo->procname, "%lu", sock_i_ino(sk)); | 1589 | sprintf(bo->procname, "%lu", sock_i_ino(sk)); |
| 1585 | bo->bcm_proc_read = proc_create_data(bo->procname, 0644, | 1590 | bo->bcm_proc_read = proc_create_data(bo->procname, 0644, |
| 1586 | proc_dir, | 1591 | proc_dir, |
| 1587 | &bcm_proc_fops, sk); | 1592 | &bcm_proc_fops, sk); |
| 1593 | if (!bo->bcm_proc_read) { | ||
| 1594 | ret = -ENOMEM; | ||
| 1595 | goto fail; | ||
| 1596 | } | ||
| 1588 | } | 1597 | } |
| 1589 | 1598 | ||
| 1590 | return 0; | 1599 | bo->bound = 1; |
| 1600 | |||
| 1601 | fail: | ||
| 1602 | release_sock(sk); | ||
| 1603 | |||
| 1604 | return ret; | ||
| 1591 | } | 1605 | } |
| 1592 | 1606 | ||
| 1593 | static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, | 1607 | static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, |
diff --git a/net/core/dev.c b/net/core/dev.c index 820bac239738..6666b28b6815 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); | |||
| 1766 | 1766 | ||
| 1767 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | 1767 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
| 1768 | { | 1768 | { |
| 1769 | if (skb_orphan_frags(skb, GFP_ATOMIC) || | 1769 | int ret = ____dev_forward_skb(dev, skb); |
| 1770 | unlikely(!is_skb_forwardable(dev, skb))) { | ||
| 1771 | atomic_long_inc(&dev->rx_dropped); | ||
| 1772 | kfree_skb(skb); | ||
| 1773 | return NET_RX_DROP; | ||
| 1774 | } | ||
| 1775 | 1770 | ||
| 1776 | skb_scrub_packet(skb, true); | 1771 | if (likely(!ret)) { |
| 1777 | skb->priority = 0; | 1772 | skb->protocol = eth_type_trans(skb, dev); |
| 1778 | skb->protocol = eth_type_trans(skb, dev); | 1773 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
| 1779 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | 1774 | } |
| 1780 | 1775 | ||
| 1781 | return 0; | 1776 | return ret; |
| 1782 | } | 1777 | } |
| 1783 | EXPORT_SYMBOL_GPL(__dev_forward_skb); | 1778 | EXPORT_SYMBOL_GPL(__dev_forward_skb); |
| 1784 | 1779 | ||
| @@ -2484,7 +2479,7 @@ int skb_checksum_help(struct sk_buff *skb) | |||
| 2484 | goto out; | 2479 | goto out; |
| 2485 | } | 2480 | } |
| 2486 | 2481 | ||
| 2487 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | 2482 | *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; |
| 2488 | out_set_summed: | 2483 | out_set_summed: |
| 2489 | skb->ip_summed = CHECKSUM_NONE; | 2484 | skb->ip_summed = CHECKSUM_NONE; |
| 2490 | out: | 2485 | out: |
diff --git a/net/core/filter.c b/net/core/filter.c index 00351cdf7d0c..b391209838ef 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) | |||
| 1628 | return dev_forward_skb(dev, skb); | 1628 | return dev_forward_skb(dev, skb); |
| 1629 | } | 1629 | } |
| 1630 | 1630 | ||
| 1631 | static inline int __bpf_rx_skb_no_mac(struct net_device *dev, | ||
| 1632 | struct sk_buff *skb) | ||
| 1633 | { | ||
| 1634 | int ret = ____dev_forward_skb(dev, skb); | ||
| 1635 | |||
| 1636 | if (likely(!ret)) { | ||
| 1637 | skb->dev = dev; | ||
| 1638 | ret = netif_rx(skb); | ||
| 1639 | } | ||
| 1640 | |||
| 1641 | return ret; | ||
| 1642 | } | ||
| 1643 | |||
| 1631 | static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) | 1644 | static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) |
| 1632 | { | 1645 | { |
| 1633 | int ret; | 1646 | int ret; |
| @@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) | |||
| 1647 | return ret; | 1660 | return ret; |
| 1648 | } | 1661 | } |
| 1649 | 1662 | ||
| 1663 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, | ||
| 1664 | u32 flags) | ||
| 1665 | { | ||
| 1666 | /* skb->mac_len is not set on normal egress */ | ||
| 1667 | unsigned int mlen = skb->network_header - skb->mac_header; | ||
| 1668 | |||
| 1669 | __skb_pull(skb, mlen); | ||
| 1670 | |||
| 1671 | /* At ingress, the mac header has already been pulled once. | ||
| 1672 | * At egress, skb_pospull_rcsum has to be done in case that | ||
| 1673 | * the skb is originated from ingress (i.e. a forwarded skb) | ||
| 1674 | * to ensure that rcsum starts at net header. | ||
| 1675 | */ | ||
| 1676 | if (!skb_at_tc_ingress(skb)) | ||
| 1677 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); | ||
| 1678 | skb_pop_mac_header(skb); | ||
| 1679 | skb_reset_mac_len(skb); | ||
| 1680 | return flags & BPF_F_INGRESS ? | ||
| 1681 | __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); | ||
| 1682 | } | ||
| 1683 | |||
| 1684 | static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, | ||
| 1685 | u32 flags) | ||
| 1686 | { | ||
| 1687 | bpf_push_mac_rcsum(skb); | ||
| 1688 | return flags & BPF_F_INGRESS ? | ||
| 1689 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); | ||
| 1690 | } | ||
| 1691 | |||
| 1692 | static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, | ||
| 1693 | u32 flags) | ||
| 1694 | { | ||
| 1695 | switch (dev->type) { | ||
| 1696 | case ARPHRD_TUNNEL: | ||
| 1697 | case ARPHRD_TUNNEL6: | ||
| 1698 | case ARPHRD_SIT: | ||
| 1699 | case ARPHRD_IPGRE: | ||
| 1700 | case ARPHRD_VOID: | ||
| 1701 | case ARPHRD_NONE: | ||
| 1702 | return __bpf_redirect_no_mac(skb, dev, flags); | ||
| 1703 | default: | ||
| 1704 | return __bpf_redirect_common(skb, dev, flags); | ||
| 1705 | } | ||
| 1706 | } | ||
| 1707 | |||
| 1650 | BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) | 1708 | BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) |
| 1651 | { | 1709 | { |
| 1652 | struct net_device *dev; | 1710 | struct net_device *dev; |
| @@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) | |||
| 1675 | return -ENOMEM; | 1733 | return -ENOMEM; |
| 1676 | } | 1734 | } |
| 1677 | 1735 | ||
| 1678 | bpf_push_mac_rcsum(clone); | 1736 | return __bpf_redirect(clone, dev, flags); |
| 1679 | |||
| 1680 | return flags & BPF_F_INGRESS ? | ||
| 1681 | __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone); | ||
| 1682 | } | 1737 | } |
| 1683 | 1738 | ||
| 1684 | static const struct bpf_func_proto bpf_clone_redirect_proto = { | 1739 | static const struct bpf_func_proto bpf_clone_redirect_proto = { |
| @@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb) | |||
| 1722 | return -EINVAL; | 1777 | return -EINVAL; |
| 1723 | } | 1778 | } |
| 1724 | 1779 | ||
| 1725 | bpf_push_mac_rcsum(skb); | 1780 | return __bpf_redirect(skb, dev, ri->flags); |
| 1726 | |||
| 1727 | return ri->flags & BPF_F_INGRESS ? | ||
| 1728 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); | ||
| 1729 | } | 1781 | } |
| 1730 | 1782 | ||
| 1731 | static const struct bpf_func_proto bpf_redirect_proto = { | 1783 | static const struct bpf_func_proto bpf_redirect_proto = { |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index ab193e5def07..69e4463a4b1b 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
| @@ -122,7 +122,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, | |||
| 122 | struct flow_dissector_key_keyid *key_keyid; | 122 | struct flow_dissector_key_keyid *key_keyid; |
| 123 | bool skip_vlan = false; | 123 | bool skip_vlan = false; |
| 124 | u8 ip_proto = 0; | 124 | u8 ip_proto = 0; |
| 125 | bool ret = false; | 125 | bool ret; |
| 126 | 126 | ||
| 127 | if (!data) { | 127 | if (!data) { |
| 128 | data = skb->data; | 128 | data = skb->data; |
| @@ -549,12 +549,17 @@ ip_proto_again: | |||
| 549 | out_good: | 549 | out_good: |
| 550 | ret = true; | 550 | ret = true; |
| 551 | 551 | ||
| 552 | out_bad: | 552 | key_control->thoff = (u16)nhoff; |
| 553 | out: | ||
| 553 | key_basic->n_proto = proto; | 554 | key_basic->n_proto = proto; |
| 554 | key_basic->ip_proto = ip_proto; | 555 | key_basic->ip_proto = ip_proto; |
| 555 | key_control->thoff = (u16)nhoff; | ||
| 556 | 556 | ||
| 557 | return ret; | 557 | return ret; |
| 558 | |||
| 559 | out_bad: | ||
| 560 | ret = false; | ||
| 561 | key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); | ||
| 562 | goto out; | ||
| 558 | } | 563 | } |
| 559 | EXPORT_SYMBOL(__skb_flow_dissect); | 564 | EXPORT_SYMBOL(__skb_flow_dissect); |
| 560 | 565 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index fb7348f13501..db313ec7af32 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -275,6 +275,7 @@ int rtnl_unregister(int protocol, int msgtype) | |||
| 275 | 275 | ||
| 276 | rtnl_msg_handlers[protocol][msgindex].doit = NULL; | 276 | rtnl_msg_handlers[protocol][msgindex].doit = NULL; |
| 277 | rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; | 277 | rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; |
| 278 | rtnl_msg_handlers[protocol][msgindex].calcit = NULL; | ||
| 278 | 279 | ||
| 279 | return 0; | 280 | return 0; |
| 280 | } | 281 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index c73e28fc9c2a..5e3ca414357e 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 453 | EXPORT_SYMBOL(sock_queue_rcv_skb); | 453 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
| 454 | 454 | ||
| 455 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, | 455 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, |
| 456 | const int nested, unsigned int trim_cap) | 456 | const int nested, unsigned int trim_cap, bool refcounted) |
| 457 | { | 457 | { |
| 458 | int rc = NET_RX_SUCCESS; | 458 | int rc = NET_RX_SUCCESS; |
| 459 | 459 | ||
| @@ -487,7 +487,8 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, | |||
| 487 | 487 | ||
| 488 | bh_unlock_sock(sk); | 488 | bh_unlock_sock(sk); |
| 489 | out: | 489 | out: |
| 490 | sock_put(sk); | 490 | if (refcounted) |
| 491 | sock_put(sk); | ||
| 491 | return rc; | 492 | return rc; |
| 492 | discard_and_relse: | 493 | discard_and_relse: |
| 493 | kfree_skb(skb); | 494 | kfree_skb(skb); |
| @@ -1543,6 +1544,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
| 1543 | RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); | 1544 | RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); |
| 1544 | 1545 | ||
| 1545 | newsk->sk_err = 0; | 1546 | newsk->sk_err = 0; |
| 1547 | newsk->sk_err_soft = 0; | ||
| 1546 | newsk->sk_priority = 0; | 1548 | newsk->sk_priority = 0; |
| 1547 | newsk->sk_incoming_cpu = raw_smp_processor_id(); | 1549 | newsk->sk_incoming_cpu = raw_smp_processor_id(); |
| 1548 | atomic64_set(&newsk->sk_cookie, 0); | 1550 | atomic64_set(&newsk->sk_cookie, 0); |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 345a3aeb8c7e..b567c8725aea 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
| @@ -235,7 +235,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
| 235 | { | 235 | { |
| 236 | const struct iphdr *iph = (struct iphdr *)skb->data; | 236 | const struct iphdr *iph = (struct iphdr *)skb->data; |
| 237 | const u8 offset = iph->ihl << 2; | 237 | const u8 offset = iph->ihl << 2; |
| 238 | const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); | 238 | const struct dccp_hdr *dh; |
| 239 | struct dccp_sock *dp; | 239 | struct dccp_sock *dp; |
| 240 | struct inet_sock *inet; | 240 | struct inet_sock *inet; |
| 241 | const int type = icmp_hdr(skb)->type; | 241 | const int type = icmp_hdr(skb)->type; |
| @@ -245,11 +245,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
| 245 | int err; | 245 | int err; |
| 246 | struct net *net = dev_net(skb->dev); | 246 | struct net *net = dev_net(skb->dev); |
| 247 | 247 | ||
| 248 | if (skb->len < offset + sizeof(*dh) || | 248 | /* Only need dccph_dport & dccph_sport which are the first |
| 249 | skb->len < offset + __dccp_basic_hdr_len(dh)) { | 249 | * 4 bytes in dccp header. |
| 250 | __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); | 250 | * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us. |
| 251 | return; | 251 | */ |
| 252 | } | 252 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); |
| 253 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); | ||
| 254 | dh = (struct dccp_hdr *)(skb->data + offset); | ||
| 253 | 255 | ||
| 254 | sk = __inet_lookup_established(net, &dccp_hashinfo, | 256 | sk = __inet_lookup_established(net, &dccp_hashinfo, |
| 255 | iph->daddr, dh->dccph_dport, | 257 | iph->daddr, dh->dccph_dport, |
| @@ -868,7 +870,7 @@ lookup: | |||
| 868 | goto discard_and_relse; | 870 | goto discard_and_relse; |
| 869 | nf_reset(skb); | 871 | nf_reset(skb); |
| 870 | 872 | ||
| 871 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4); | 873 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted); |
| 872 | 874 | ||
| 873 | no_dccp_socket: | 875 | no_dccp_socket: |
| 874 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 876 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 3828f94b234c..715e5d1dc107 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
| @@ -70,7 +70,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 70 | u8 type, u8 code, int offset, __be32 info) | 70 | u8 type, u8 code, int offset, __be32 info) |
| 71 | { | 71 | { |
| 72 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; | 72 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; |
| 73 | const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); | 73 | const struct dccp_hdr *dh; |
| 74 | struct dccp_sock *dp; | 74 | struct dccp_sock *dp; |
| 75 | struct ipv6_pinfo *np; | 75 | struct ipv6_pinfo *np; |
| 76 | struct sock *sk; | 76 | struct sock *sk; |
| @@ -78,12 +78,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 78 | __u64 seq; | 78 | __u64 seq; |
| 79 | struct net *net = dev_net(skb->dev); | 79 | struct net *net = dev_net(skb->dev); |
| 80 | 80 | ||
| 81 | if (skb->len < offset + sizeof(*dh) || | 81 | /* Only need dccph_dport & dccph_sport which are the first |
| 82 | skb->len < offset + __dccp_basic_hdr_len(dh)) { | 82 | * 4 bytes in dccp header. |
| 83 | __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), | 83 | * Our caller (icmpv6_notify()) already pulled 8 bytes for us. |
| 84 | ICMP6_MIB_INERRORS); | 84 | */ |
| 85 | return; | 85 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); |
| 86 | } | 86 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); |
| 87 | dh = (struct dccp_hdr *)(skb->data + offset); | ||
| 87 | 88 | ||
| 88 | sk = __inet6_lookup_established(net, &dccp_hashinfo, | 89 | sk = __inet6_lookup_established(net, &dccp_hashinfo, |
| 89 | &hdr->daddr, dh->dccph_dport, | 90 | &hdr->daddr, dh->dccph_dport, |
| @@ -738,7 +739,8 @@ lookup: | |||
| 738 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 739 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
| 739 | goto discard_and_relse; | 740 | goto discard_and_relse; |
| 740 | 741 | ||
| 741 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0; | 742 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, |
| 743 | refcounted) ? -1 : 0; | ||
| 742 | 744 | ||
| 743 | no_dccp_socket: | 745 | no_dccp_socket: |
| 744 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 746 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
| @@ -956,6 +958,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { | |||
| 956 | .getsockopt = ipv6_getsockopt, | 958 | .getsockopt = ipv6_getsockopt, |
| 957 | .addr2sockaddr = inet6_csk_addr2sockaddr, | 959 | .addr2sockaddr = inet6_csk_addr2sockaddr, |
| 958 | .sockaddr_len = sizeof(struct sockaddr_in6), | 960 | .sockaddr_len = sizeof(struct sockaddr_in6), |
| 961 | .bind_conflict = inet6_csk_bind_conflict, | ||
| 959 | #ifdef CONFIG_COMPAT | 962 | #ifdef CONFIG_COMPAT |
| 960 | .compat_setsockopt = compat_ipv6_setsockopt, | 963 | .compat_setsockopt = compat_ipv6_setsockopt, |
| 961 | .compat_getsockopt = compat_ipv6_getsockopt, | 964 | .compat_getsockopt = compat_ipv6_getsockopt, |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 41e65804ddf5..9fe25bf63296 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
| @@ -1009,6 +1009,10 @@ void dccp_close(struct sock *sk, long timeout) | |||
| 1009 | __kfree_skb(skb); | 1009 | __kfree_skb(skb); |
| 1010 | } | 1010 | } |
| 1011 | 1011 | ||
| 1012 | /* If socket has been already reset kill it. */ | ||
| 1013 | if (sk->sk_state == DCCP_CLOSED) | ||
| 1014 | goto adjudge_to_death; | ||
| 1015 | |||
| 1012 | if (data_was_unread) { | 1016 | if (data_was_unread) { |
| 1013 | /* Unread data was tossed, send an appropriate Reset Code */ | 1017 | /* Unread data was tossed, send an appropriate Reset Code */ |
| 1014 | DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); | 1018 | DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 9648c97e541f..5ddf5cda07f4 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -533,9 +533,9 @@ EXPORT_SYMBOL(inet_dgram_connect); | |||
| 533 | 533 | ||
| 534 | static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) | 534 | static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) |
| 535 | { | 535 | { |
| 536 | DEFINE_WAIT(wait); | 536 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
| 537 | 537 | ||
| 538 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 538 | add_wait_queue(sk_sleep(sk), &wait); |
| 539 | sk->sk_write_pending += writebias; | 539 | sk->sk_write_pending += writebias; |
| 540 | 540 | ||
| 541 | /* Basic assumption: if someone sets sk->sk_err, he _must_ | 541 | /* Basic assumption: if someone sets sk->sk_err, he _must_ |
| @@ -545,13 +545,12 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) | |||
| 545 | */ | 545 | */ |
| 546 | while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 546 | while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
| 547 | release_sock(sk); | 547 | release_sock(sk); |
| 548 | timeo = schedule_timeout(timeo); | 548 | timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); |
| 549 | lock_sock(sk); | 549 | lock_sock(sk); |
| 550 | if (signal_pending(current) || !timeo) | 550 | if (signal_pending(current) || !timeo) |
| 551 | break; | 551 | break; |
| 552 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
| 553 | } | 552 | } |
| 554 | finish_wait(sk_sleep(sk), &wait); | 553 | remove_wait_queue(sk_sleep(sk), &wait); |
| 555 | sk->sk_write_pending -= writebias; | 554 | sk->sk_write_pending -= writebias; |
| 556 | return timeo; | 555 | return timeo; |
| 557 | } | 556 | } |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 31cef3602585..4cff74d4133f 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
| @@ -2413,22 +2413,19 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, | |||
| 2413 | struct key_vector *l, **tp = &iter->tnode; | 2413 | struct key_vector *l, **tp = &iter->tnode; |
| 2414 | t_key key; | 2414 | t_key key; |
| 2415 | 2415 | ||
| 2416 | /* use cache location of next-to-find key */ | 2416 | /* use cached location of previously found key */ |
| 2417 | if (iter->pos > 0 && pos >= iter->pos) { | 2417 | if (iter->pos > 0 && pos >= iter->pos) { |
| 2418 | pos -= iter->pos; | ||
| 2419 | key = iter->key; | 2418 | key = iter->key; |
| 2420 | } else { | 2419 | } else { |
| 2421 | iter->pos = 0; | 2420 | iter->pos = 1; |
| 2422 | key = 0; | 2421 | key = 0; |
| 2423 | } | 2422 | } |
| 2424 | 2423 | ||
| 2425 | while ((l = leaf_walk_rcu(tp, key)) != NULL) { | 2424 | pos -= iter->pos; |
| 2425 | |||
| 2426 | while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) { | ||
| 2426 | key = l->key + 1; | 2427 | key = l->key + 1; |
| 2427 | iter->pos++; | 2428 | iter->pos++; |
| 2428 | |||
| 2429 | if (--pos <= 0) | ||
| 2430 | break; | ||
| 2431 | |||
| 2432 | l = NULL; | 2429 | l = NULL; |
| 2433 | 2430 | ||
| 2434 | /* handle unlikely case of a key wrap */ | 2431 | /* handle unlikely case of a key wrap */ |
| @@ -2437,7 +2434,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, | |||
| 2437 | } | 2434 | } |
| 2438 | 2435 | ||
| 2439 | if (l) | 2436 | if (l) |
| 2440 | iter->key = key; /* remember it */ | 2437 | iter->key = l->key; /* remember it */ |
| 2441 | else | 2438 | else |
| 2442 | iter->pos = 0; /* forget it */ | 2439 | iter->pos = 0; /* forget it */ |
| 2443 | 2440 | ||
| @@ -2465,7 +2462,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) | |||
| 2465 | return fib_route_get_idx(iter, *pos); | 2462 | return fib_route_get_idx(iter, *pos); |
| 2466 | 2463 | ||
| 2467 | iter->pos = 0; | 2464 | iter->pos = 0; |
| 2468 | iter->key = 0; | 2465 | iter->key = KEY_MAX; |
| 2469 | 2466 | ||
| 2470 | return SEQ_START_TOKEN; | 2467 | return SEQ_START_TOKEN; |
| 2471 | } | 2468 | } |
| @@ -2474,7 +2471,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 2474 | { | 2471 | { |
| 2475 | struct fib_route_iter *iter = seq->private; | 2472 | struct fib_route_iter *iter = seq->private; |
| 2476 | struct key_vector *l = NULL; | 2473 | struct key_vector *l = NULL; |
| 2477 | t_key key = iter->key; | 2474 | t_key key = iter->key + 1; |
| 2478 | 2475 | ||
| 2479 | ++*pos; | 2476 | ++*pos; |
| 2480 | 2477 | ||
| @@ -2483,7 +2480,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 2483 | l = leaf_walk_rcu(&iter->tnode, key); | 2480 | l = leaf_walk_rcu(&iter->tnode, key); |
| 2484 | 2481 | ||
| 2485 | if (l) { | 2482 | if (l) { |
| 2486 | iter->key = l->key + 1; | 2483 | iter->key = l->key; |
| 2487 | iter->pos++; | 2484 | iter->pos++; |
| 2488 | } else { | 2485 | } else { |
| 2489 | iter->pos = 0; | 2486 | iter->pos = 0; |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 38abe70e595f..48734ee6293f 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
| @@ -477,7 +477,7 @@ static struct rtable *icmp_route_lookup(struct net *net, | |||
| 477 | fl4->flowi4_proto = IPPROTO_ICMP; | 477 | fl4->flowi4_proto = IPPROTO_ICMP; |
| 478 | fl4->fl4_icmp_type = type; | 478 | fl4->fl4_icmp_type = type; |
| 479 | fl4->fl4_icmp_code = code; | 479 | fl4->fl4_icmp_code = code; |
| 480 | fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev); | 480 | fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); |
| 481 | 481 | ||
| 482 | security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); | 482 | security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); |
| 483 | rt = __ip_route_output_key_hash(net, fl4, | 483 | rt = __ip_route_output_key_hash(net, fl4, |
| @@ -502,7 +502,7 @@ static struct rtable *icmp_route_lookup(struct net *net, | |||
| 502 | if (err) | 502 | if (err) |
| 503 | goto relookup_failed; | 503 | goto relookup_failed; |
| 504 | 504 | ||
| 505 | if (inet_addr_type_dev_table(net, skb_in->dev, | 505 | if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev, |
| 506 | fl4_dec.saddr) == RTN_LOCAL) { | 506 | fl4_dec.saddr) == RTN_LOCAL) { |
| 507 | rt2 = __ip_route_output_key(net, &fl4_dec); | 507 | rt2 = __ip_route_output_key(net, &fl4_dec); |
| 508 | if (IS_ERR(rt2)) | 508 | if (IS_ERR(rt2)) |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 8b4ffd216839..9f0a7b96646f 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
| @@ -117,7 +117,7 @@ int ip_forward(struct sk_buff *skb) | |||
| 117 | if (opt->is_strictroute && rt->rt_uses_gateway) | 117 | if (opt->is_strictroute && rt->rt_uses_gateway) |
| 118 | goto sr_failed; | 118 | goto sr_failed; |
| 119 | 119 | ||
| 120 | IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; | 120 | IPCB(skb)->flags |= IPSKB_FORWARDED; |
| 121 | mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); | 121 | mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); |
| 122 | if (ip_exceeds_mtu(skb, mtu)) { | 122 | if (ip_exceeds_mtu(skb, mtu)) { |
| 123 | IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); | 123 | IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 03e7f7310423..105908d841a3 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -239,19 +239,23 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, | |||
| 239 | struct sk_buff *segs; | 239 | struct sk_buff *segs; |
| 240 | int ret = 0; | 240 | int ret = 0; |
| 241 | 241 | ||
| 242 | /* common case: fragmentation of segments is not allowed, | 242 | /* common case: seglen is <= mtu |
| 243 | * or seglen is <= mtu | ||
| 244 | */ | 243 | */ |
| 245 | if (((IPCB(skb)->flags & IPSKB_FRAG_SEGS) == 0) || | 244 | if (skb_gso_validate_mtu(skb, mtu)) |
| 246 | skb_gso_validate_mtu(skb, mtu)) | ||
| 247 | return ip_finish_output2(net, sk, skb); | 245 | return ip_finish_output2(net, sk, skb); |
| 248 | 246 | ||
| 249 | /* Slowpath - GSO segment length is exceeding the dst MTU. | 247 | /* Slowpath - GSO segment length exceeds the egress MTU. |
| 250 | * | 248 | * |
| 251 | * This can happen in two cases: | 249 | * This can happen in several cases: |
| 252 | * 1) TCP GRO packet, DF bit not set | 250 | * - Forwarding of a TCP GRO skb, when DF flag is not set. |
| 253 | * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly | 251 | * - Forwarding of an skb that arrived on a virtualization interface |
| 254 | * from host network stack. | 252 | * (virtio-net/vhost/tap) with TSO/GSO size set by other network |
| 253 | * stack. | ||
| 254 | * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an | ||
| 255 | * interface with a smaller MTU. | ||
| 256 | * - Arriving GRO skb (or GSO skb in a virtualized environment) that is | ||
| 257 | * bridged to a NETIF_F_TSO tunnel stacked over an interface with an | ||
| 258 | * insufficent MTU. | ||
| 255 | */ | 259 | */ |
| 256 | features = netif_skb_features(skb); | 260 | features = netif_skb_features(skb); |
| 257 | BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); | 261 | BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); |
| @@ -1579,7 +1583,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, | |||
| 1579 | } | 1583 | } |
| 1580 | 1584 | ||
| 1581 | oif = arg->bound_dev_if; | 1585 | oif = arg->bound_dev_if; |
| 1582 | oif = oif ? : skb->skb_iif; | 1586 | if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) |
| 1587 | oif = skb->skb_iif; | ||
| 1583 | 1588 | ||
| 1584 | flowi4_init_output(&fl4, oif, | 1589 | flowi4_init_output(&fl4, oif, |
| 1585 | IP4_REPLY_MARK(net, skb->mark), | 1590 | IP4_REPLY_MARK(net, skb->mark), |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 777bc1883870..fed3d29f9eb3 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
| @@ -63,7 +63,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, | |||
| 63 | int pkt_len = skb->len - skb_inner_network_offset(skb); | 63 | int pkt_len = skb->len - skb_inner_network_offset(skb); |
| 64 | struct net *net = dev_net(rt->dst.dev); | 64 | struct net *net = dev_net(rt->dst.dev); |
| 65 | struct net_device *dev = skb->dev; | 65 | struct net_device *dev = skb->dev; |
| 66 | int skb_iif = skb->skb_iif; | ||
| 67 | struct iphdr *iph; | 66 | struct iphdr *iph; |
| 68 | int err; | 67 | int err; |
| 69 | 68 | ||
| @@ -73,16 +72,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, | |||
| 73 | skb_dst_set(skb, &rt->dst); | 72 | skb_dst_set(skb, &rt->dst); |
| 74 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); | 73 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
| 75 | 74 | ||
| 76 | if (skb_iif && !(df & htons(IP_DF))) { | ||
| 77 | /* Arrived from an ingress interface, got encapsulated, with | ||
| 78 | * fragmentation of encapulating frames allowed. | ||
| 79 | * If skb is gso, the resulting encapsulated network segments | ||
| 80 | * may exceed dst mtu. | ||
| 81 | * Allow IP Fragmentation of segments. | ||
| 82 | */ | ||
| 83 | IPCB(skb)->flags |= IPSKB_FRAG_SEGS; | ||
| 84 | } | ||
| 85 | |||
| 86 | /* Push down and install the IP header. */ | 75 | /* Push down and install the IP header. */ |
| 87 | skb_push(skb, sizeof(struct iphdr)); | 76 | skb_push(skb, sizeof(struct iphdr)); |
| 88 | skb_reset_network_header(skb); | 77 | skb_reset_network_header(skb); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 5f006e13de56..27089f5ebbb1 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
| @@ -1749,7 +1749,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, | |||
| 1749 | vif->dev->stats.tx_bytes += skb->len; | 1749 | vif->dev->stats.tx_bytes += skb->len; |
| 1750 | } | 1750 | } |
| 1751 | 1751 | ||
| 1752 | IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; | 1752 | IPCB(skb)->flags |= IPSKB_FORWARDED; |
| 1753 | 1753 | ||
| 1754 | /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally | 1754 | /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally |
| 1755 | * not only before forwarding, but after forwarding on all output | 1755 | * not only before forwarding, but after forwarding on all output |
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c index bf855e64fc45..0c01a270bf9f 100644 --- a/net/ipv4/netfilter/nft_dup_ipv4.c +++ b/net/ipv4/netfilter/nft_dup_ipv4.c | |||
| @@ -28,7 +28,7 @@ static void nft_dup_ipv4_eval(const struct nft_expr *expr, | |||
| 28 | struct in_addr gw = { | 28 | struct in_addr gw = { |
| 29 | .s_addr = (__force __be32)regs->data[priv->sreg_addr], | 29 | .s_addr = (__force __be32)regs->data[priv->sreg_addr], |
| 30 | }; | 30 | }; |
| 31 | int oif = regs->data[priv->sreg_dev]; | 31 | int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; |
| 32 | 32 | ||
| 33 | nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif); | 33 | nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif); |
| 34 | } | 34 | } |
| @@ -59,7 +59,9 @@ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
| 59 | { | 59 | { |
| 60 | struct nft_dup_ipv4 *priv = nft_expr_priv(expr); | 60 | struct nft_dup_ipv4 *priv = nft_expr_priv(expr); |
| 61 | 61 | ||
| 62 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || | 62 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) |
| 63 | goto nla_put_failure; | ||
| 64 | if (priv->sreg_dev && | ||
| 63 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) | 65 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) |
| 64 | goto nla_put_failure; | 66 | goto nla_put_failure; |
| 65 | 67 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 62d4d90c1389..2a57566e6e91 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -753,7 +753,9 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow | |||
| 753 | goto reject_redirect; | 753 | goto reject_redirect; |
| 754 | } | 754 | } |
| 755 | 755 | ||
| 756 | n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); | 756 | n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); |
| 757 | if (!n) | ||
| 758 | n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); | ||
| 757 | if (!IS_ERR(n)) { | 759 | if (!IS_ERR(n)) { |
| 758 | if (!(n->nud_state & NUD_VALID)) { | 760 | if (!(n->nud_state & NUD_VALID)) { |
| 759 | neigh_event_send(n, NULL); | 761 | neigh_event_send(n, NULL); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3251fe71f39f..814af89c1bd3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -1164,7 +1164,7 @@ restart: | |||
| 1164 | 1164 | ||
| 1165 | err = -EPIPE; | 1165 | err = -EPIPE; |
| 1166 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 1166 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
| 1167 | goto out_err; | 1167 | goto do_error; |
| 1168 | 1168 | ||
| 1169 | sg = !!(sk->sk_route_caps & NETIF_F_SG); | 1169 | sg = !!(sk->sk_route_caps & NETIF_F_SG); |
| 1170 | 1170 | ||
| @@ -1241,7 +1241,7 @@ new_segment: | |||
| 1241 | 1241 | ||
| 1242 | if (!skb_can_coalesce(skb, i, pfrag->page, | 1242 | if (!skb_can_coalesce(skb, i, pfrag->page, |
| 1243 | pfrag->offset)) { | 1243 | pfrag->offset)) { |
| 1244 | if (i == sysctl_max_skb_frags || !sg) { | 1244 | if (i >= sysctl_max_skb_frags || !sg) { |
| 1245 | tcp_mark_push(tp, skb); | 1245 | tcp_mark_push(tp, skb); |
| 1246 | goto new_segment; | 1246 | goto new_segment; |
| 1247 | } | 1247 | } |
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 10d728b6804c..ab37c6775630 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c | |||
| @@ -56,6 +56,7 @@ struct dctcp { | |||
| 56 | u32 next_seq; | 56 | u32 next_seq; |
| 57 | u32 ce_state; | 57 | u32 ce_state; |
| 58 | u32 delayed_ack_reserved; | 58 | u32 delayed_ack_reserved; |
| 59 | u32 loss_cwnd; | ||
| 59 | }; | 60 | }; |
| 60 | 61 | ||
| 61 | static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ | 62 | static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ |
| @@ -96,6 +97,7 @@ static void dctcp_init(struct sock *sk) | |||
| 96 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); | 97 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); |
| 97 | 98 | ||
| 98 | ca->delayed_ack_reserved = 0; | 99 | ca->delayed_ack_reserved = 0; |
| 100 | ca->loss_cwnd = 0; | ||
| 99 | ca->ce_state = 0; | 101 | ca->ce_state = 0; |
| 100 | 102 | ||
| 101 | dctcp_reset(tp, ca); | 103 | dctcp_reset(tp, ca); |
| @@ -111,9 +113,10 @@ static void dctcp_init(struct sock *sk) | |||
| 111 | 113 | ||
| 112 | static u32 dctcp_ssthresh(struct sock *sk) | 114 | static u32 dctcp_ssthresh(struct sock *sk) |
| 113 | { | 115 | { |
| 114 | const struct dctcp *ca = inet_csk_ca(sk); | 116 | struct dctcp *ca = inet_csk_ca(sk); |
| 115 | struct tcp_sock *tp = tcp_sk(sk); | 117 | struct tcp_sock *tp = tcp_sk(sk); |
| 116 | 118 | ||
| 119 | ca->loss_cwnd = tp->snd_cwnd; | ||
| 117 | return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); | 120 | return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); |
| 118 | } | 121 | } |
| 119 | 122 | ||
| @@ -308,12 +311,20 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, | |||
| 308 | return 0; | 311 | return 0; |
| 309 | } | 312 | } |
| 310 | 313 | ||
| 314 | static u32 dctcp_cwnd_undo(struct sock *sk) | ||
| 315 | { | ||
| 316 | const struct dctcp *ca = inet_csk_ca(sk); | ||
| 317 | |||
| 318 | return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); | ||
| 319 | } | ||
| 320 | |||
| 311 | static struct tcp_congestion_ops dctcp __read_mostly = { | 321 | static struct tcp_congestion_ops dctcp __read_mostly = { |
| 312 | .init = dctcp_init, | 322 | .init = dctcp_init, |
| 313 | .in_ack_event = dctcp_update_alpha, | 323 | .in_ack_event = dctcp_update_alpha, |
| 314 | .cwnd_event = dctcp_cwnd_event, | 324 | .cwnd_event = dctcp_cwnd_event, |
| 315 | .ssthresh = dctcp_ssthresh, | 325 | .ssthresh = dctcp_ssthresh, |
| 316 | .cong_avoid = tcp_reno_cong_avoid, | 326 | .cong_avoid = tcp_reno_cong_avoid, |
| 327 | .undo_cwnd = dctcp_cwnd_undo, | ||
| 317 | .set_state = dctcp_state, | 328 | .set_state = dctcp_state, |
| 318 | .get_info = dctcp_get_info, | 329 | .get_info = dctcp_get_info, |
| 319 | .flags = TCP_CONG_NEEDS_ECN, | 330 | .flags = TCP_CONG_NEEDS_ECN, |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 61b7be303eec..2259114c7242 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -1564,6 +1564,21 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
| 1564 | } | 1564 | } |
| 1565 | EXPORT_SYMBOL(tcp_add_backlog); | 1565 | EXPORT_SYMBOL(tcp_add_backlog); |
| 1566 | 1566 | ||
| 1567 | int tcp_filter(struct sock *sk, struct sk_buff *skb) | ||
| 1568 | { | ||
| 1569 | struct tcphdr *th = (struct tcphdr *)skb->data; | ||
| 1570 | unsigned int eaten = skb->len; | ||
| 1571 | int err; | ||
| 1572 | |||
| 1573 | err = sk_filter_trim_cap(sk, skb, th->doff * 4); | ||
| 1574 | if (!err) { | ||
| 1575 | eaten -= skb->len; | ||
| 1576 | TCP_SKB_CB(skb)->end_seq -= eaten; | ||
| 1577 | } | ||
| 1578 | return err; | ||
| 1579 | } | ||
| 1580 | EXPORT_SYMBOL(tcp_filter); | ||
| 1581 | |||
| 1567 | /* | 1582 | /* |
| 1568 | * From tcp_input.c | 1583 | * From tcp_input.c |
| 1569 | */ | 1584 | */ |
| @@ -1676,8 +1691,10 @@ process: | |||
| 1676 | 1691 | ||
| 1677 | nf_reset(skb); | 1692 | nf_reset(skb); |
| 1678 | 1693 | ||
| 1679 | if (sk_filter(sk, skb)) | 1694 | if (tcp_filter(sk, skb)) |
| 1680 | goto discard_and_relse; | 1695 | goto discard_and_relse; |
| 1696 | th = (const struct tcphdr *)skb->data; | ||
| 1697 | iph = ip_hdr(skb); | ||
| 1681 | 1698 | ||
| 1682 | skb->dev = NULL; | 1699 | skb->dev = NULL; |
| 1683 | 1700 | ||
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index bd59c343d35f..7370ad2e693a 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
| @@ -448,7 +448,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, | |||
| 448 | if (__ipv6_addr_needs_scope_id(addr_type)) | 448 | if (__ipv6_addr_needs_scope_id(addr_type)) |
| 449 | iif = skb->dev->ifindex; | 449 | iif = skb->dev->ifindex; |
| 450 | else | 450 | else |
| 451 | iif = l3mdev_master_ifindex(skb->dev); | 451 | iif = l3mdev_master_ifindex(skb_dst(skb)->dev); |
| 452 | 452 | ||
| 453 | /* | 453 | /* |
| 454 | * Must not send error if the source does not uniquely | 454 | * Must not send error if the source does not uniquely |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 6001e781164e..59eb4ed99ce8 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -1366,7 +1366,7 @@ emsgsize: | |||
| 1366 | if (((length > mtu) || | 1366 | if (((length > mtu) || |
| 1367 | (skb && skb_is_gso(skb))) && | 1367 | (skb && skb_is_gso(skb))) && |
| 1368 | (sk->sk_protocol == IPPROTO_UDP) && | 1368 | (sk->sk_protocol == IPPROTO_UDP) && |
| 1369 | (rt->dst.dev->features & NETIF_F_UFO) && | 1369 | (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && |
| 1370 | (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { | 1370 | (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { |
| 1371 | err = ip6_ufo_append_data(sk, queue, getfrag, from, length, | 1371 | err = ip6_ufo_append_data(sk, queue, getfrag, from, length, |
| 1372 | hh_len, fragheaderlen, exthdrlen, | 1372 | hh_len, fragheaderlen, exthdrlen, |
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index a7520528ecd2..b283f293ee4a 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c | |||
| @@ -88,9 +88,6 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, | |||
| 88 | 88 | ||
| 89 | uh->len = htons(skb->len); | 89 | uh->len = htons(skb->len); |
| 90 | 90 | ||
| 91 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
| 92 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | ||
| 93 | | IPSKB_REROUTED); | ||
| 94 | skb_dst_set(skb, dst); | 91 | skb_dst_set(skb, dst); |
| 95 | 92 | ||
| 96 | udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); | 93 | udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); |
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c index 8bfd470cbe72..831f86e1ec08 100644 --- a/net/ipv6/netfilter/nft_dup_ipv6.c +++ b/net/ipv6/netfilter/nft_dup_ipv6.c | |||
| @@ -26,7 +26,7 @@ static void nft_dup_ipv6_eval(const struct nft_expr *expr, | |||
| 26 | { | 26 | { |
| 27 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); | 27 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); |
| 28 | struct in6_addr *gw = (struct in6_addr *)®s->data[priv->sreg_addr]; | 28 | struct in6_addr *gw = (struct in6_addr *)®s->data[priv->sreg_addr]; |
| 29 | int oif = regs->data[priv->sreg_dev]; | 29 | int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; |
| 30 | 30 | ||
| 31 | nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif); | 31 | nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif); |
| 32 | } | 32 | } |
| @@ -57,7 +57,9 @@ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
| 57 | { | 57 | { |
| 58 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); | 58 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); |
| 59 | 59 | ||
| 60 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || | 60 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) |
| 61 | goto nla_put_failure; | ||
| 62 | if (priv->sreg_dev && | ||
| 61 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) | 63 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) |
| 62 | goto nla_put_failure; | 64 | goto nla_put_failure; |
| 63 | 65 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 947ed1ded026..1b57e11e6e0d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -1364,6 +1364,9 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, | |||
| 1364 | if (rt6->rt6i_flags & RTF_LOCAL) | 1364 | if (rt6->rt6i_flags & RTF_LOCAL) |
| 1365 | return; | 1365 | return; |
| 1366 | 1366 | ||
| 1367 | if (dst_metric_locked(dst, RTAX_MTU)) | ||
| 1368 | return; | ||
| 1369 | |||
| 1367 | dst_confirm(dst); | 1370 | dst_confirm(dst); |
| 1368 | mtu = max_t(u32, mtu, IPV6_MIN_MTU); | 1371 | mtu = max_t(u32, mtu, IPV6_MIN_MTU); |
| 1369 | if (mtu >= dst_mtu(dst)) | 1372 | if (mtu >= dst_mtu(dst)) |
| @@ -2758,6 +2761,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) | |||
| 2758 | PMTU discouvery. | 2761 | PMTU discouvery. |
| 2759 | */ | 2762 | */ |
| 2760 | if (rt->dst.dev == arg->dev && | 2763 | if (rt->dst.dev == arg->dev && |
| 2764 | dst_metric_raw(&rt->dst, RTAX_MTU) && | ||
| 2761 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { | 2765 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { |
| 2762 | if (rt->rt6i_flags & RTF_CACHE) { | 2766 | if (rt->rt6i_flags & RTF_CACHE) { |
| 2763 | /* For RTF_CACHE with rt6i_pmtu == 0 | 2767 | /* For RTF_CACHE with rt6i_pmtu == 0 |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5a27ab4eab39..b9f1fee9a886 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -818,8 +818,12 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 | |||
| 818 | fl6.flowi6_proto = IPPROTO_TCP; | 818 | fl6.flowi6_proto = IPPROTO_TCP; |
| 819 | if (rt6_need_strict(&fl6.daddr) && !oif) | 819 | if (rt6_need_strict(&fl6.daddr) && !oif) |
| 820 | fl6.flowi6_oif = tcp_v6_iif(skb); | 820 | fl6.flowi6_oif = tcp_v6_iif(skb); |
| 821 | else | 821 | else { |
| 822 | fl6.flowi6_oif = oif ? : skb->skb_iif; | 822 | if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) |
| 823 | oif = skb->skb_iif; | ||
| 824 | |||
| 825 | fl6.flowi6_oif = oif; | ||
| 826 | } | ||
| 823 | 827 | ||
| 824 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); | 828 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); |
| 825 | fl6.fl6_dport = t1->dest; | 829 | fl6.fl6_dport = t1->dest; |
| @@ -1225,7 +1229,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 1225 | if (skb->protocol == htons(ETH_P_IP)) | 1229 | if (skb->protocol == htons(ETH_P_IP)) |
| 1226 | return tcp_v4_do_rcv(sk, skb); | 1230 | return tcp_v4_do_rcv(sk, skb); |
| 1227 | 1231 | ||
| 1228 | if (sk_filter(sk, skb)) | 1232 | if (tcp_filter(sk, skb)) |
| 1229 | goto discard; | 1233 | goto discard; |
| 1230 | 1234 | ||
| 1231 | /* | 1235 | /* |
| @@ -1453,8 +1457,10 @@ process: | |||
| 1453 | if (tcp_v6_inbound_md5_hash(sk, skb)) | 1457 | if (tcp_v6_inbound_md5_hash(sk, skb)) |
| 1454 | goto discard_and_relse; | 1458 | goto discard_and_relse; |
| 1455 | 1459 | ||
| 1456 | if (sk_filter(sk, skb)) | 1460 | if (tcp_filter(sk, skb)) |
| 1457 | goto discard_and_relse; | 1461 | goto discard_and_relse; |
| 1462 | th = (const struct tcphdr *)skb->data; | ||
| 1463 | hdr = ipv6_hdr(skb); | ||
| 1458 | 1464 | ||
| 1459 | skb->dev = NULL; | 1465 | skb->dev = NULL; |
| 1460 | 1466 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c3c809b2e712..a6e44ef2ec9a 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
| @@ -2845,7 +2845,7 @@ static struct genl_family ip_vs_genl_family = { | |||
| 2845 | .hdrsize = 0, | 2845 | .hdrsize = 0, |
| 2846 | .name = IPVS_GENL_NAME, | 2846 | .name = IPVS_GENL_NAME, |
| 2847 | .version = IPVS_GENL_VERSION, | 2847 | .version = IPVS_GENL_VERSION, |
| 2848 | .maxattr = IPVS_CMD_MAX, | 2848 | .maxattr = IPVS_CMD_ATTR_MAX, |
| 2849 | .netnsok = true, /* Make ipvsadm to work on netns */ | 2849 | .netnsok = true, /* Make ipvsadm to work on netns */ |
| 2850 | }; | 2850 | }; |
| 2851 | 2851 | ||
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 1b07578bedf3..9350530c16c1 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
| @@ -283,6 +283,7 @@ struct ip_vs_sync_buff { | |||
| 283 | */ | 283 | */ |
| 284 | static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) | 284 | static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) |
| 285 | { | 285 | { |
| 286 | memset(ho, 0, sizeof(*ho)); | ||
| 286 | ho->init_seq = get_unaligned_be32(&no->init_seq); | 287 | ho->init_seq = get_unaligned_be32(&no->init_seq); |
| 287 | ho->delta = get_unaligned_be32(&no->delta); | 288 | ho->delta = get_unaligned_be32(&no->delta); |
| 288 | ho->previous_delta = get_unaligned_be32(&no->previous_delta); | 289 | ho->previous_delta = get_unaligned_be32(&no->previous_delta); |
| @@ -917,8 +918,10 @@ static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *pa | |||
| 917 | kfree(param->pe_data); | 918 | kfree(param->pe_data); |
| 918 | } | 919 | } |
| 919 | 920 | ||
| 920 | if (opt) | 921 | if (opt) { |
| 921 | memcpy(&cp->in_seq, opt, sizeof(*opt)); | 922 | cp->in_seq = opt->in_seq; |
| 923 | cp->out_seq = opt->out_seq; | ||
| 924 | } | ||
| 922 | atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); | 925 | atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); |
| 923 | cp->state = state; | 926 | cp->state = state; |
| 924 | cp->old_state = cp->state; | 927 | cp->old_state = cp->state; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index df2f5a3901df..0f87e5d21be7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
| @@ -76,6 +76,7 @@ struct conntrack_gc_work { | |||
| 76 | struct delayed_work dwork; | 76 | struct delayed_work dwork; |
| 77 | u32 last_bucket; | 77 | u32 last_bucket; |
| 78 | bool exiting; | 78 | bool exiting; |
| 79 | long next_gc_run; | ||
| 79 | }; | 80 | }; |
| 80 | 81 | ||
| 81 | static __read_mostly struct kmem_cache *nf_conntrack_cachep; | 82 | static __read_mostly struct kmem_cache *nf_conntrack_cachep; |
| @@ -83,9 +84,11 @@ static __read_mostly spinlock_t nf_conntrack_locks_all_lock; | |||
| 83 | static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); | 84 | static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); |
| 84 | static __read_mostly bool nf_conntrack_locks_all; | 85 | static __read_mostly bool nf_conntrack_locks_all; |
| 85 | 86 | ||
| 87 | /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ | ||
| 86 | #define GC_MAX_BUCKETS_DIV 64u | 88 | #define GC_MAX_BUCKETS_DIV 64u |
| 87 | #define GC_MAX_BUCKETS 8192u | 89 | /* upper bound of scan intervals */ |
| 88 | #define GC_INTERVAL (5 * HZ) | 90 | #define GC_INTERVAL_MAX (2 * HZ) |
| 91 | /* maximum conntracks to evict per gc run */ | ||
| 89 | #define GC_MAX_EVICTS 256u | 92 | #define GC_MAX_EVICTS 256u |
| 90 | 93 | ||
| 91 | static struct conntrack_gc_work conntrack_gc_work; | 94 | static struct conntrack_gc_work conntrack_gc_work; |
| @@ -936,13 +939,13 @@ static noinline int early_drop(struct net *net, unsigned int _hash) | |||
| 936 | static void gc_worker(struct work_struct *work) | 939 | static void gc_worker(struct work_struct *work) |
| 937 | { | 940 | { |
| 938 | unsigned int i, goal, buckets = 0, expired_count = 0; | 941 | unsigned int i, goal, buckets = 0, expired_count = 0; |
| 939 | unsigned long next_run = GC_INTERVAL; | ||
| 940 | unsigned int ratio, scanned = 0; | ||
| 941 | struct conntrack_gc_work *gc_work; | 942 | struct conntrack_gc_work *gc_work; |
| 943 | unsigned int ratio, scanned = 0; | ||
| 944 | unsigned long next_run; | ||
| 942 | 945 | ||
| 943 | gc_work = container_of(work, struct conntrack_gc_work, dwork.work); | 946 | gc_work = container_of(work, struct conntrack_gc_work, dwork.work); |
| 944 | 947 | ||
| 945 | goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS); | 948 | goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; |
| 946 | i = gc_work->last_bucket; | 949 | i = gc_work->last_bucket; |
| 947 | 950 | ||
| 948 | do { | 951 | do { |
| @@ -982,17 +985,47 @@ static void gc_worker(struct work_struct *work) | |||
| 982 | if (gc_work->exiting) | 985 | if (gc_work->exiting) |
| 983 | return; | 986 | return; |
| 984 | 987 | ||
| 988 | /* | ||
| 989 | * Eviction will normally happen from the packet path, and not | ||
| 990 | * from this gc worker. | ||
| 991 | * | ||
| 992 | * This worker is only here to reap expired entries when system went | ||
| 993 | * idle after a busy period. | ||
| 994 | * | ||
| 995 | * The heuristics below are supposed to balance conflicting goals: | ||
| 996 | * | ||
| 997 | * 1. Minimize time until we notice a stale entry | ||
| 998 | * 2. Maximize scan intervals to not waste cycles | ||
| 999 | * | ||
| 1000 | * Normally, expired_count will be 0, this increases the next_run time | ||
| 1001 | * to priorize 2) above. | ||
| 1002 | * | ||
| 1003 | * As soon as a timed-out entry is found, move towards 1) and increase | ||
| 1004 | * the scan frequency. | ||
| 1005 | * In case we have lots of evictions next scan is done immediately. | ||
| 1006 | */ | ||
| 985 | ratio = scanned ? expired_count * 100 / scanned : 0; | 1007 | ratio = scanned ? expired_count * 100 / scanned : 0; |
| 986 | if (ratio >= 90 || expired_count == GC_MAX_EVICTS) | 1008 | if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { |
| 1009 | gc_work->next_gc_run = 0; | ||
| 987 | next_run = 0; | 1010 | next_run = 0; |
| 1011 | } else if (expired_count) { | ||
| 1012 | gc_work->next_gc_run /= 2U; | ||
| 1013 | next_run = msecs_to_jiffies(1); | ||
| 1014 | } else { | ||
| 1015 | if (gc_work->next_gc_run < GC_INTERVAL_MAX) | ||
| 1016 | gc_work->next_gc_run += msecs_to_jiffies(1); | ||
| 1017 | |||
| 1018 | next_run = gc_work->next_gc_run; | ||
| 1019 | } | ||
| 988 | 1020 | ||
| 989 | gc_work->last_bucket = i; | 1021 | gc_work->last_bucket = i; |
| 990 | schedule_delayed_work(&gc_work->dwork, next_run); | 1022 | queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); |
| 991 | } | 1023 | } |
| 992 | 1024 | ||
| 993 | static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) | 1025 | static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) |
| 994 | { | 1026 | { |
| 995 | INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); | 1027 | INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); |
| 1028 | gc_work->next_gc_run = GC_INTERVAL_MAX; | ||
| 996 | gc_work->exiting = false; | 1029 | gc_work->exiting = false; |
| 997 | } | 1030 | } |
| 998 | 1031 | ||
| @@ -1885,7 +1918,7 @@ int nf_conntrack_init_start(void) | |||
| 1885 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); | 1918 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); |
| 1886 | 1919 | ||
| 1887 | conntrack_gc_work_init(&conntrack_gc_work); | 1920 | conntrack_gc_work_init(&conntrack_gc_work); |
| 1888 | schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL); | 1921 | queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); |
| 1889 | 1922 | ||
| 1890 | return 0; | 1923 | return 0; |
| 1891 | 1924 | ||
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 336e21559e01..7341adf7059d 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
| @@ -138,9 +138,14 @@ __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) | |||
| 138 | 138 | ||
| 139 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 139 | for (i = 0; i < nf_ct_helper_hsize; i++) { |
| 140 | hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { | 140 | hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { |
| 141 | if (!strcmp(h->name, name) && | 141 | if (strcmp(h->name, name)) |
| 142 | h->tuple.src.l3num == l3num && | 142 | continue; |
| 143 | h->tuple.dst.protonum == protonum) | 143 | |
| 144 | if (h->tuple.src.l3num != NFPROTO_UNSPEC && | ||
| 145 | h->tuple.src.l3num != l3num) | ||
| 146 | continue; | ||
| 147 | |||
| 148 | if (h->tuple.dst.protonum == protonum) | ||
| 144 | return h; | 149 | return h; |
| 145 | } | 150 | } |
| 146 | } | 151 | } |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 621b81c7bddc..c3fc14e021ec 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
| @@ -1436,9 +1436,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff, | |||
| 1436 | handler = &sip_handlers[i]; | 1436 | handler = &sip_handlers[i]; |
| 1437 | if (handler->request == NULL) | 1437 | if (handler->request == NULL) |
| 1438 | continue; | 1438 | continue; |
| 1439 | if (*datalen < handler->len || | 1439 | if (*datalen < handler->len + 2 || |
| 1440 | strncasecmp(*dptr, handler->method, handler->len)) | 1440 | strncasecmp(*dptr, handler->method, handler->len)) |
| 1441 | continue; | 1441 | continue; |
| 1442 | if ((*dptr)[handler->len] != ' ' || | ||
| 1443 | !isalpha((*dptr)[handler->len+1])) | ||
| 1444 | continue; | ||
| 1442 | 1445 | ||
| 1443 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, | 1446 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, |
| 1444 | &matchoff, &matchlen) <= 0) { | 1447 | &matchoff, &matchlen) <= 0) { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 24db22257586..026581b04ea8 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -2956,12 +2956,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, | |||
| 2956 | 2956 | ||
| 2957 | err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); | 2957 | err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); |
| 2958 | if (err < 0) | 2958 | if (err < 0) |
| 2959 | goto err2; | 2959 | goto err3; |
| 2960 | 2960 | ||
| 2961 | list_add_tail_rcu(&set->list, &table->sets); | 2961 | list_add_tail_rcu(&set->list, &table->sets); |
| 2962 | table->use++; | 2962 | table->use++; |
| 2963 | return 0; | 2963 | return 0; |
| 2964 | 2964 | ||
| 2965 | err3: | ||
| 2966 | ops->destroy(set); | ||
| 2965 | err2: | 2967 | err2: |
| 2966 | kfree(set); | 2968 | kfree(set); |
| 2967 | err1: | 2969 | err1: |
| @@ -3452,14 +3454,15 @@ void *nft_set_elem_init(const struct nft_set *set, | |||
| 3452 | return elem; | 3454 | return elem; |
| 3453 | } | 3455 | } |
| 3454 | 3456 | ||
| 3455 | void nft_set_elem_destroy(const struct nft_set *set, void *elem) | 3457 | void nft_set_elem_destroy(const struct nft_set *set, void *elem, |
| 3458 | bool destroy_expr) | ||
| 3456 | { | 3459 | { |
| 3457 | struct nft_set_ext *ext = nft_set_elem_ext(set, elem); | 3460 | struct nft_set_ext *ext = nft_set_elem_ext(set, elem); |
| 3458 | 3461 | ||
| 3459 | nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); | 3462 | nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); |
| 3460 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) | 3463 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) |
| 3461 | nft_data_uninit(nft_set_ext_data(ext), set->dtype); | 3464 | nft_data_uninit(nft_set_ext_data(ext), set->dtype); |
| 3462 | if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) | 3465 | if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) |
| 3463 | nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); | 3466 | nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); |
| 3464 | 3467 | ||
| 3465 | kfree(elem); | 3468 | kfree(elem); |
| @@ -3565,6 +3568,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, | |||
| 3565 | dreg = nft_type_to_reg(set->dtype); | 3568 | dreg = nft_type_to_reg(set->dtype); |
| 3566 | list_for_each_entry(binding, &set->bindings, list) { | 3569 | list_for_each_entry(binding, &set->bindings, list) { |
| 3567 | struct nft_ctx bind_ctx = { | 3570 | struct nft_ctx bind_ctx = { |
| 3571 | .net = ctx->net, | ||
| 3568 | .afi = ctx->afi, | 3572 | .afi = ctx->afi, |
| 3569 | .table = ctx->table, | 3573 | .table = ctx->table, |
| 3570 | .chain = (struct nft_chain *)binding->chain, | 3574 | .chain = (struct nft_chain *)binding->chain, |
| @@ -3812,7 +3816,7 @@ void nft_set_gc_batch_release(struct rcu_head *rcu) | |||
| 3812 | 3816 | ||
| 3813 | gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); | 3817 | gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); |
| 3814 | for (i = 0; i < gcb->head.cnt; i++) | 3818 | for (i = 0; i < gcb->head.cnt; i++) |
| 3815 | nft_set_elem_destroy(gcb->head.set, gcb->elems[i]); | 3819 | nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true); |
| 3816 | kfree(gcb); | 3820 | kfree(gcb); |
| 3817 | } | 3821 | } |
| 3818 | EXPORT_SYMBOL_GPL(nft_set_gc_batch_release); | 3822 | EXPORT_SYMBOL_GPL(nft_set_gc_batch_release); |
| @@ -4030,7 +4034,7 @@ static void nf_tables_commit_release(struct nft_trans *trans) | |||
| 4030 | break; | 4034 | break; |
| 4031 | case NFT_MSG_DELSETELEM: | 4035 | case NFT_MSG_DELSETELEM: |
| 4032 | nft_set_elem_destroy(nft_trans_elem_set(trans), | 4036 | nft_set_elem_destroy(nft_trans_elem_set(trans), |
| 4033 | nft_trans_elem(trans).priv); | 4037 | nft_trans_elem(trans).priv, true); |
| 4034 | break; | 4038 | break; |
| 4035 | } | 4039 | } |
| 4036 | kfree(trans); | 4040 | kfree(trans); |
| @@ -4171,7 +4175,7 @@ static void nf_tables_abort_release(struct nft_trans *trans) | |||
| 4171 | break; | 4175 | break; |
| 4172 | case NFT_MSG_NEWSETELEM: | 4176 | case NFT_MSG_NEWSETELEM: |
| 4173 | nft_set_elem_destroy(nft_trans_elem_set(trans), | 4177 | nft_set_elem_destroy(nft_trans_elem_set(trans), |
| 4174 | nft_trans_elem(trans).priv); | 4178 | nft_trans_elem(trans).priv, true); |
| 4175 | break; | 4179 | break; |
| 4176 | } | 4180 | } |
| 4177 | kfree(trans); | 4181 | kfree(trans); |
| @@ -4421,7 +4425,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, | |||
| 4421 | * Otherwise a 0 is returned and the attribute value is stored in the | 4425 | * Otherwise a 0 is returned and the attribute value is stored in the |
| 4422 | * destination variable. | 4426 | * destination variable. |
| 4423 | */ | 4427 | */ |
| 4424 | unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) | 4428 | int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) |
| 4425 | { | 4429 | { |
| 4426 | u32 val; | 4430 | u32 val; |
| 4427 | 4431 | ||
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 517f08767a3c..31ca94793aa9 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
| @@ -44,18 +44,22 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr, | |||
| 44 | ®s->data[priv->sreg_key], | 44 | ®s->data[priv->sreg_key], |
| 45 | ®s->data[priv->sreg_data], | 45 | ®s->data[priv->sreg_data], |
| 46 | timeout, GFP_ATOMIC); | 46 | timeout, GFP_ATOMIC); |
| 47 | if (elem == NULL) { | 47 | if (elem == NULL) |
| 48 | if (set->size) | 48 | goto err1; |
| 49 | atomic_dec(&set->nelems); | ||
| 50 | return NULL; | ||
| 51 | } | ||
| 52 | 49 | ||
| 53 | ext = nft_set_elem_ext(set, elem); | 50 | ext = nft_set_elem_ext(set, elem); |
| 54 | if (priv->expr != NULL && | 51 | if (priv->expr != NULL && |
| 55 | nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) | 52 | nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) |
| 56 | return NULL; | 53 | goto err2; |
| 57 | 54 | ||
| 58 | return elem; | 55 | return elem; |
| 56 | |||
| 57 | err2: | ||
| 58 | nft_set_elem_destroy(set, elem, false); | ||
| 59 | err1: | ||
| 60 | if (set->size) | ||
| 61 | atomic_dec(&set->nelems); | ||
| 62 | return NULL; | ||
| 59 | } | 63 | } |
| 60 | 64 | ||
| 61 | static void nft_dynset_eval(const struct nft_expr *expr, | 65 | static void nft_dynset_eval(const struct nft_expr *expr, |
| @@ -139,6 +143,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx, | |||
| 139 | return PTR_ERR(set); | 143 | return PTR_ERR(set); |
| 140 | } | 144 | } |
| 141 | 145 | ||
| 146 | if (set->ops->update == NULL) | ||
| 147 | return -EOPNOTSUPP; | ||
| 148 | |||
| 142 | if (set->flags & NFT_SET_CONSTANT) | 149 | if (set->flags & NFT_SET_CONSTANT) |
| 143 | return -EBUSY; | 150 | return -EBUSY; |
| 144 | 151 | ||
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 3794cb2fc788..a3dface3e6e6 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c | |||
| @@ -98,7 +98,7 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key, | |||
| 98 | const struct nft_set_ext **ext) | 98 | const struct nft_set_ext **ext) |
| 99 | { | 99 | { |
| 100 | struct nft_hash *priv = nft_set_priv(set); | 100 | struct nft_hash *priv = nft_set_priv(set); |
| 101 | struct nft_hash_elem *he; | 101 | struct nft_hash_elem *he, *prev; |
| 102 | struct nft_hash_cmp_arg arg = { | 102 | struct nft_hash_cmp_arg arg = { |
| 103 | .genmask = NFT_GENMASK_ANY, | 103 | .genmask = NFT_GENMASK_ANY, |
| 104 | .set = set, | 104 | .set = set, |
| @@ -112,15 +112,24 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key, | |||
| 112 | he = new(set, expr, regs); | 112 | he = new(set, expr, regs); |
| 113 | if (he == NULL) | 113 | if (he == NULL) |
| 114 | goto err1; | 114 | goto err1; |
| 115 | if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node, | 115 | |
| 116 | nft_hash_params)) | 116 | prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, |
| 117 | nft_hash_params); | ||
| 118 | if (IS_ERR(prev)) | ||
| 117 | goto err2; | 119 | goto err2; |
| 120 | |||
| 121 | /* Another cpu may race to insert the element with the same key */ | ||
| 122 | if (prev) { | ||
| 123 | nft_set_elem_destroy(set, he, true); | ||
| 124 | he = prev; | ||
| 125 | } | ||
| 126 | |||
| 118 | out: | 127 | out: |
| 119 | *ext = &he->ext; | 128 | *ext = &he->ext; |
| 120 | return true; | 129 | return true; |
| 121 | 130 | ||
| 122 | err2: | 131 | err2: |
| 123 | nft_set_elem_destroy(set, he); | 132 | nft_set_elem_destroy(set, he, true); |
| 124 | err1: | 133 | err1: |
| 125 | return false; | 134 | return false; |
| 126 | } | 135 | } |
| @@ -332,7 +341,7 @@ static int nft_hash_init(const struct nft_set *set, | |||
| 332 | 341 | ||
| 333 | static void nft_hash_elem_destroy(void *ptr, void *arg) | 342 | static void nft_hash_elem_destroy(void *ptr, void *arg) |
| 334 | { | 343 | { |
| 335 | nft_set_elem_destroy((const struct nft_set *)arg, ptr); | 344 | nft_set_elem_destroy((const struct nft_set *)arg, ptr, true); |
| 336 | } | 345 | } |
| 337 | 346 | ||
| 338 | static void nft_hash_destroy(const struct nft_set *set) | 347 | static void nft_hash_destroy(const struct nft_set *set) |
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 38b5bda242f8..36493a7cae88 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c | |||
| @@ -266,7 +266,7 @@ static void nft_rbtree_destroy(const struct nft_set *set) | |||
| 266 | while ((node = priv->root.rb_node) != NULL) { | 266 | while ((node = priv->root.rb_node) != NULL) { |
| 267 | rb_erase(node, &priv->root); | 267 | rb_erase(node, &priv->root); |
| 268 | rbe = rb_entry(node, struct nft_rbtree_elem, node); | 268 | rbe = rb_entry(node, struct nft_rbtree_elem, node); |
| 269 | nft_set_elem_destroy(set, rbe); | 269 | nft_set_elem_destroy(set, rbe, true); |
| 270 | } | 270 | } |
| 271 | } | 271 | } |
| 272 | 272 | ||
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 69f78e96fdb4..b83e158e116a 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c | |||
| @@ -44,7 +44,7 @@ connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 44 | u_int32_t newmark; | 44 | u_int32_t newmark; |
| 45 | 45 | ||
| 46 | ct = nf_ct_get(skb, &ctinfo); | 46 | ct = nf_ct_get(skb, &ctinfo); |
| 47 | if (ct == NULL) | 47 | if (ct == NULL || nf_ct_is_untracked(ct)) |
| 48 | return XT_CONTINUE; | 48 | return XT_CONTINUE; |
| 49 | 49 | ||
| 50 | switch (info->mode) { | 50 | switch (info->mode) { |
| @@ -97,7 +97,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 97 | const struct nf_conn *ct; | 97 | const struct nf_conn *ct; |
| 98 | 98 | ||
| 99 | ct = nf_ct_get(skb, &ctinfo); | 99 | ct = nf_ct_get(skb, &ctinfo); |
| 100 | if (ct == NULL) | 100 | if (ct == NULL || nf_ct_is_untracked(ct)) |
| 101 | return false; | 101 | return false; |
| 102 | 102 | ||
| 103 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; | 103 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; |
diff --git a/net/netlink/diag.c b/net/netlink/diag.c index b2f0e986a6f4..a5546249fb10 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c | |||
| @@ -178,11 +178,8 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 178 | } | 178 | } |
| 179 | cb->args[1] = i; | 179 | cb->args[1] = i; |
| 180 | } else { | 180 | } else { |
| 181 | if (req->sdiag_protocol >= MAX_LINKS) { | 181 | if (req->sdiag_protocol >= MAX_LINKS) |
| 182 | read_unlock(&nl_table_lock); | ||
| 183 | rcu_read_unlock(); | ||
| 184 | return -ENOENT; | 182 | return -ENOENT; |
| 185 | } | ||
| 186 | 183 | ||
| 187 | err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); | 184 | err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); |
| 188 | } | 185 | } |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 23cc12639ba7..49c28e8ef01b 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
| @@ -404,7 +404,7 @@ int __genl_register_family(struct genl_family *family) | |||
| 404 | 404 | ||
| 405 | err = genl_validate_assign_mc_groups(family); | 405 | err = genl_validate_assign_mc_groups(family); |
| 406 | if (err) | 406 | if (err) |
| 407 | goto errout_locked; | 407 | goto errout_free; |
| 408 | 408 | ||
| 409 | list_add_tail(&family->family_list, genl_family_chain(family->id)); | 409 | list_add_tail(&family->family_list, genl_family_chain(family->id)); |
| 410 | genl_unlock_all(); | 410 | genl_unlock_all(); |
| @@ -417,6 +417,8 @@ int __genl_register_family(struct genl_family *family) | |||
| 417 | 417 | ||
| 418 | return 0; | 418 | return 0; |
| 419 | 419 | ||
| 420 | errout_free: | ||
| 421 | kfree(family->attrbuf); | ||
| 420 | errout_locked: | 422 | errout_locked: |
| 421 | genl_unlock_all(); | 423 | genl_unlock_all(); |
| 422 | errout: | 424 | errout: |
diff --git a/net/sctp/input.c b/net/sctp/input.c index a2ea1d1cc06a..a01a56ec8b8c 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
| @@ -181,9 +181,10 @@ int sctp_rcv(struct sk_buff *skb) | |||
| 181 | * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB | 181 | * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB |
| 182 | */ | 182 | */ |
| 183 | if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { | 183 | if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { |
| 184 | if (asoc) { | 184 | if (transport) { |
| 185 | sctp_association_put(asoc); | 185 | sctp_transport_put(transport); |
| 186 | asoc = NULL; | 186 | asoc = NULL; |
| 187 | transport = NULL; | ||
| 187 | } else { | 188 | } else { |
| 188 | sctp_endpoint_put(ep); | 189 | sctp_endpoint_put(ep); |
| 189 | ep = NULL; | 190 | ep = NULL; |
| @@ -269,8 +270,8 @@ int sctp_rcv(struct sk_buff *skb) | |||
| 269 | bh_unlock_sock(sk); | 270 | bh_unlock_sock(sk); |
| 270 | 271 | ||
| 271 | /* Release the asoc/ep ref we took in the lookup calls. */ | 272 | /* Release the asoc/ep ref we took in the lookup calls. */ |
| 272 | if (asoc) | 273 | if (transport) |
| 273 | sctp_association_put(asoc); | 274 | sctp_transport_put(transport); |
| 274 | else | 275 | else |
| 275 | sctp_endpoint_put(ep); | 276 | sctp_endpoint_put(ep); |
| 276 | 277 | ||
| @@ -283,8 +284,8 @@ discard_it: | |||
| 283 | 284 | ||
| 284 | discard_release: | 285 | discard_release: |
| 285 | /* Release the asoc/ep ref we took in the lookup calls. */ | 286 | /* Release the asoc/ep ref we took in the lookup calls. */ |
| 286 | if (asoc) | 287 | if (transport) |
| 287 | sctp_association_put(asoc); | 288 | sctp_transport_put(transport); |
| 288 | else | 289 | else |
| 289 | sctp_endpoint_put(ep); | 290 | sctp_endpoint_put(ep); |
| 290 | 291 | ||
| @@ -300,6 +301,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 300 | { | 301 | { |
| 301 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 302 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
| 302 | struct sctp_inq *inqueue = &chunk->rcvr->inqueue; | 303 | struct sctp_inq *inqueue = &chunk->rcvr->inqueue; |
| 304 | struct sctp_transport *t = chunk->transport; | ||
| 303 | struct sctp_ep_common *rcvr = NULL; | 305 | struct sctp_ep_common *rcvr = NULL; |
| 304 | int backloged = 0; | 306 | int backloged = 0; |
| 305 | 307 | ||
| @@ -351,7 +353,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 351 | done: | 353 | done: |
| 352 | /* Release the refs we took in sctp_add_backlog */ | 354 | /* Release the refs we took in sctp_add_backlog */ |
| 353 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 355 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) |
| 354 | sctp_association_put(sctp_assoc(rcvr)); | 356 | sctp_transport_put(t); |
| 355 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 357 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) |
| 356 | sctp_endpoint_put(sctp_ep(rcvr)); | 358 | sctp_endpoint_put(sctp_ep(rcvr)); |
| 357 | else | 359 | else |
| @@ -363,6 +365,7 @@ done: | |||
| 363 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | 365 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) |
| 364 | { | 366 | { |
| 365 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 367 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
| 368 | struct sctp_transport *t = chunk->transport; | ||
| 366 | struct sctp_ep_common *rcvr = chunk->rcvr; | 369 | struct sctp_ep_common *rcvr = chunk->rcvr; |
| 367 | int ret; | 370 | int ret; |
| 368 | 371 | ||
| @@ -373,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
| 373 | * from us | 376 | * from us |
| 374 | */ | 377 | */ |
| 375 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 378 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) |
| 376 | sctp_association_hold(sctp_assoc(rcvr)); | 379 | sctp_transport_hold(t); |
| 377 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 380 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) |
| 378 | sctp_endpoint_hold(sctp_ep(rcvr)); | 381 | sctp_endpoint_hold(sctp_ep(rcvr)); |
| 379 | else | 382 | else |
| @@ -537,15 +540,15 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, | |||
| 537 | return sk; | 540 | return sk; |
| 538 | 541 | ||
| 539 | out: | 542 | out: |
| 540 | sctp_association_put(asoc); | 543 | sctp_transport_put(transport); |
| 541 | return NULL; | 544 | return NULL; |
| 542 | } | 545 | } |
| 543 | 546 | ||
| 544 | /* Common cleanup code for icmp/icmpv6 error handler. */ | 547 | /* Common cleanup code for icmp/icmpv6 error handler. */ |
| 545 | void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) | 548 | void sctp_err_finish(struct sock *sk, struct sctp_transport *t) |
| 546 | { | 549 | { |
| 547 | bh_unlock_sock(sk); | 550 | bh_unlock_sock(sk); |
| 548 | sctp_association_put(asoc); | 551 | sctp_transport_put(t); |
| 549 | } | 552 | } |
| 550 | 553 | ||
| 551 | /* | 554 | /* |
| @@ -641,7 +644,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) | |||
| 641 | } | 644 | } |
| 642 | 645 | ||
| 643 | out_unlock: | 646 | out_unlock: |
| 644 | sctp_err_finish(sk, asoc); | 647 | sctp_err_finish(sk, transport); |
| 645 | } | 648 | } |
| 646 | 649 | ||
| 647 | /* | 650 | /* |
| @@ -952,11 +955,8 @@ static struct sctp_association *__sctp_lookup_association( | |||
| 952 | goto out; | 955 | goto out; |
| 953 | 956 | ||
| 954 | asoc = t->asoc; | 957 | asoc = t->asoc; |
| 955 | sctp_association_hold(asoc); | ||
| 956 | *pt = t; | 958 | *pt = t; |
| 957 | 959 | ||
| 958 | sctp_transport_put(t); | ||
| 959 | |||
| 960 | out: | 960 | out: |
| 961 | return asoc; | 961 | return asoc; |
| 962 | } | 962 | } |
| @@ -986,7 +986,7 @@ int sctp_has_association(struct net *net, | |||
| 986 | struct sctp_transport *transport; | 986 | struct sctp_transport *transport; |
| 987 | 987 | ||
| 988 | if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { | 988 | if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { |
| 989 | sctp_association_put(asoc); | 989 | sctp_transport_put(transport); |
| 990 | return 1; | 990 | return 1; |
| 991 | } | 991 | } |
| 992 | 992 | ||
| @@ -1021,7 +1021,6 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, | |||
| 1021 | struct sctphdr *sh = sctp_hdr(skb); | 1021 | struct sctphdr *sh = sctp_hdr(skb); |
| 1022 | union sctp_params params; | 1022 | union sctp_params params; |
| 1023 | sctp_init_chunk_t *init; | 1023 | sctp_init_chunk_t *init; |
| 1024 | struct sctp_transport *transport; | ||
| 1025 | struct sctp_af *af; | 1024 | struct sctp_af *af; |
| 1026 | 1025 | ||
| 1027 | /* | 1026 | /* |
| @@ -1052,7 +1051,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, | |||
| 1052 | 1051 | ||
| 1053 | af->from_addr_param(paddr, params.addr, sh->source, 0); | 1052 | af->from_addr_param(paddr, params.addr, sh->source, 0); |
| 1054 | 1053 | ||
| 1055 | asoc = __sctp_lookup_association(net, laddr, paddr, &transport); | 1054 | asoc = __sctp_lookup_association(net, laddr, paddr, transportp); |
| 1056 | if (asoc) | 1055 | if (asoc) |
| 1057 | return asoc; | 1056 | return asoc; |
| 1058 | } | 1057 | } |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index f473779e8b1c..176af3080a2b 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -198,7 +198,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | out_unlock: | 200 | out_unlock: |
| 201 | sctp_err_finish(sk, asoc); | 201 | sctp_err_finish(sk, transport); |
| 202 | out: | 202 | out: |
| 203 | if (likely(idev != NULL)) | 203 | if (likely(idev != NULL)) |
| 204 | in6_dev_put(idev); | 204 | in6_dev_put(idev); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9fbb6feb8c27..f23ad913dc7a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -1214,9 +1214,12 @@ static int __sctp_connect(struct sock *sk, | |||
| 1214 | 1214 | ||
| 1215 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); | 1215 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); |
| 1216 | 1216 | ||
| 1217 | err = sctp_wait_for_connect(asoc, &timeo); | 1217 | if (assoc_id) |
| 1218 | if ((err == 0 || err == -EINPROGRESS) && assoc_id) | ||
| 1219 | *assoc_id = asoc->assoc_id; | 1218 | *assoc_id = asoc->assoc_id; |
| 1219 | err = sctp_wait_for_connect(asoc, &timeo); | ||
| 1220 | /* Note: the asoc may be freed after the return of | ||
| 1221 | * sctp_wait_for_connect. | ||
| 1222 | */ | ||
| 1220 | 1223 | ||
| 1221 | /* Don't free association on exit. */ | 1224 | /* Don't free association on exit. */ |
| 1222 | asoc = NULL; | 1225 | asoc = NULL; |
| @@ -4282,19 +4285,18 @@ static void sctp_shutdown(struct sock *sk, int how) | |||
| 4282 | { | 4285 | { |
| 4283 | struct net *net = sock_net(sk); | 4286 | struct net *net = sock_net(sk); |
| 4284 | struct sctp_endpoint *ep; | 4287 | struct sctp_endpoint *ep; |
| 4285 | struct sctp_association *asoc; | ||
| 4286 | 4288 | ||
| 4287 | if (!sctp_style(sk, TCP)) | 4289 | if (!sctp_style(sk, TCP)) |
| 4288 | return; | 4290 | return; |
| 4289 | 4291 | ||
| 4290 | if (how & SEND_SHUTDOWN) { | 4292 | ep = sctp_sk(sk)->ep; |
| 4293 | if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { | ||
| 4294 | struct sctp_association *asoc; | ||
| 4295 | |||
| 4291 | sk->sk_state = SCTP_SS_CLOSING; | 4296 | sk->sk_state = SCTP_SS_CLOSING; |
| 4292 | ep = sctp_sk(sk)->ep; | 4297 | asoc = list_entry(ep->asocs.next, |
| 4293 | if (!list_empty(&ep->asocs)) { | 4298 | struct sctp_association, asocs); |
| 4294 | asoc = list_entry(ep->asocs.next, | 4299 | sctp_primitive_SHUTDOWN(net, asoc, NULL); |
| 4295 | struct sctp_association, asocs); | ||
| 4296 | sctp_primitive_SHUTDOWN(net, asoc, NULL); | ||
| 4297 | } | ||
| 4298 | } | 4300 | } |
| 4299 | } | 4301 | } |
| 4300 | 4302 | ||
| @@ -4480,12 +4482,9 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), | |||
| 4480 | if (!transport || !sctp_transport_hold(transport)) | 4482 | if (!transport || !sctp_transport_hold(transport)) |
| 4481 | goto out; | 4483 | goto out; |
| 4482 | 4484 | ||
| 4483 | sctp_association_hold(transport->asoc); | ||
| 4484 | sctp_transport_put(transport); | ||
| 4485 | |||
| 4486 | rcu_read_unlock(); | 4485 | rcu_read_unlock(); |
| 4487 | err = cb(transport, p); | 4486 | err = cb(transport, p); |
| 4488 | sctp_association_put(transport->asoc); | 4487 | sctp_transport_put(transport); |
| 4489 | 4488 | ||
| 4490 | out: | 4489 | out: |
| 4491 | return err; | 4490 | return err; |
diff --git a/net/socket.c b/net/socket.c index 5a9bf5ee2464..272518b087c8 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -2038,6 +2038,8 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, | |||
| 2038 | if (err) | 2038 | if (err) |
| 2039 | break; | 2039 | break; |
| 2040 | ++datagrams; | 2040 | ++datagrams; |
| 2041 | if (msg_data_left(&msg_sys)) | ||
| 2042 | break; | ||
| 2041 | cond_resched(); | 2043 | cond_resched(); |
| 2042 | } | 2044 | } |
| 2043 | 2045 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 145082e2ba36..5d1c14a2f268 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -2812,7 +2812,8 @@ static int unix_seq_show(struct seq_file *seq, void *v) | |||
| 2812 | i++; | 2812 | i++; |
| 2813 | } | 2813 | } |
| 2814 | for ( ; i < len; i++) | 2814 | for ( ; i < len; i++) |
| 2815 | seq_putc(seq, u->addr->name->sun_path[i]); | 2815 | seq_putc(seq, u->addr->name->sun_path[i] ?: |
| 2816 | '@'); | ||
| 2816 | } | 2817 | } |
| 2817 | unix_state_unlock(s); | 2818 | unix_state_unlock(s); |
| 2818 | seq_putc(seq, '\n'); | 2819 | seq_putc(seq, '\n'); |
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 12b7304d55dc..72c58675973e 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile | |||
| @@ -27,6 +27,7 @@ hostprogs-y += xdp2 | |||
| 27 | hostprogs-y += test_current_task_under_cgroup | 27 | hostprogs-y += test_current_task_under_cgroup |
| 28 | hostprogs-y += trace_event | 28 | hostprogs-y += trace_event |
| 29 | hostprogs-y += sampleip | 29 | hostprogs-y += sampleip |
| 30 | hostprogs-y += tc_l2_redirect | ||
| 30 | 31 | ||
| 31 | test_verifier-objs := test_verifier.o libbpf.o | 32 | test_verifier-objs := test_verifier.o libbpf.o |
| 32 | test_maps-objs := test_maps.o libbpf.o | 33 | test_maps-objs := test_maps.o libbpf.o |
| @@ -56,6 +57,7 @@ test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \ | |||
| 56 | test_current_task_under_cgroup_user.o | 57 | test_current_task_under_cgroup_user.o |
| 57 | trace_event-objs := bpf_load.o libbpf.o trace_event_user.o | 58 | trace_event-objs := bpf_load.o libbpf.o trace_event_user.o |
| 58 | sampleip-objs := bpf_load.o libbpf.o sampleip_user.o | 59 | sampleip-objs := bpf_load.o libbpf.o sampleip_user.o |
| 60 | tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o | ||
| 59 | 61 | ||
| 60 | # Tell kbuild to always build the programs | 62 | # Tell kbuild to always build the programs |
| 61 | always := $(hostprogs-y) | 63 | always := $(hostprogs-y) |
| @@ -72,6 +74,7 @@ always += test_probe_write_user_kern.o | |||
| 72 | always += trace_output_kern.o | 74 | always += trace_output_kern.o |
| 73 | always += tcbpf1_kern.o | 75 | always += tcbpf1_kern.o |
| 74 | always += tcbpf2_kern.o | 76 | always += tcbpf2_kern.o |
| 77 | always += tc_l2_redirect_kern.o | ||
| 75 | always += lathist_kern.o | 78 | always += lathist_kern.o |
| 76 | always += offwaketime_kern.o | 79 | always += offwaketime_kern.o |
| 77 | always += spintest_kern.o | 80 | always += spintest_kern.o |
| @@ -111,6 +114,7 @@ HOSTLOADLIBES_xdp2 += -lelf | |||
| 111 | HOSTLOADLIBES_test_current_task_under_cgroup += -lelf | 114 | HOSTLOADLIBES_test_current_task_under_cgroup += -lelf |
| 112 | HOSTLOADLIBES_trace_event += -lelf | 115 | HOSTLOADLIBES_trace_event += -lelf |
| 113 | HOSTLOADLIBES_sampleip += -lelf | 116 | HOSTLOADLIBES_sampleip += -lelf |
| 117 | HOSTLOADLIBES_tc_l2_redirect += -l elf | ||
| 114 | 118 | ||
| 115 | # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: | 119 | # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: |
| 116 | # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang | 120 | # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang |
diff --git a/samples/bpf/tc_l2_redirect.sh b/samples/bpf/tc_l2_redirect.sh new file mode 100755 index 000000000000..80a05591a140 --- /dev/null +++ b/samples/bpf/tc_l2_redirect.sh | |||
| @@ -0,0 +1,173 @@ | |||
| 1 | #!/bin/bash | ||
| 2 | |||
| 3 | [[ -z $TC ]] && TC='tc' | ||
| 4 | [[ -z $IP ]] && IP='ip' | ||
| 5 | |||
| 6 | REDIRECT_USER='./tc_l2_redirect' | ||
| 7 | REDIRECT_BPF='./tc_l2_redirect_kern.o' | ||
| 8 | |||
| 9 | RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter) | ||
| 10 | IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding) | ||
| 11 | |||
| 12 | function config_common { | ||
| 13 | local tun_type=$1 | ||
| 14 | |||
| 15 | $IP netns add ns1 | ||
| 16 | $IP netns add ns2 | ||
| 17 | $IP link add ve1 type veth peer name vens1 | ||
| 18 | $IP link add ve2 type veth peer name vens2 | ||
| 19 | $IP link set dev ve1 up | ||
| 20 | $IP link set dev ve2 up | ||
| 21 | $IP link set dev ve1 mtu 1500 | ||
| 22 | $IP link set dev ve2 mtu 1500 | ||
| 23 | $IP link set dev vens1 netns ns1 | ||
| 24 | $IP link set dev vens2 netns ns2 | ||
| 25 | |||
| 26 | $IP -n ns1 link set dev lo up | ||
| 27 | $IP -n ns1 link set dev vens1 up | ||
| 28 | $IP -n ns1 addr add 10.1.1.101/24 dev vens1 | ||
| 29 | $IP -n ns1 addr add 2401:db01::65/64 dev vens1 nodad | ||
| 30 | $IP -n ns1 route add default via 10.1.1.1 dev vens1 | ||
| 31 | $IP -n ns1 route add default via 2401:db01::1 dev vens1 | ||
| 32 | |||
| 33 | $IP -n ns2 link set dev lo up | ||
| 34 | $IP -n ns2 link set dev vens2 up | ||
| 35 | $IP -n ns2 addr add 10.2.1.102/24 dev vens2 | ||
| 36 | $IP -n ns2 addr add 2401:db02::66/64 dev vens2 nodad | ||
| 37 | $IP -n ns2 addr add 10.10.1.102 dev lo | ||
| 38 | $IP -n ns2 addr add 2401:face::66/64 dev lo nodad | ||
| 39 | $IP -n ns2 link add ipt2 type ipip local 10.2.1.102 remote 10.2.1.1 | ||
| 40 | $IP -n ns2 link add ip6t2 type ip6tnl mode any local 2401:db02::66 remote 2401:db02::1 | ||
| 41 | $IP -n ns2 link set dev ipt2 up | ||
| 42 | $IP -n ns2 link set dev ip6t2 up | ||
| 43 | $IP netns exec ns2 $TC qdisc add dev vens2 clsact | ||
| 44 | $IP netns exec ns2 $TC filter add dev vens2 ingress bpf da obj $REDIRECT_BPF sec drop_non_tun_vip | ||
| 45 | if [[ $tun_type == "ipip" ]]; then | ||
| 46 | $IP -n ns2 route add 10.1.1.0/24 dev ipt2 | ||
| 47 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0 | ||
| 48 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ipt2.rp_filter=0 | ||
| 49 | else | ||
| 50 | $IP -n ns2 route add 10.1.1.0/24 dev ip6t2 | ||
| 51 | $IP -n ns2 route add 2401:db01::/64 dev ip6t2 | ||
| 52 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0 | ||
| 53 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ip6t2.rp_filter=0 | ||
| 54 | fi | ||
| 55 | |||
| 56 | $IP addr add 10.1.1.1/24 dev ve1 | ||
| 57 | $IP addr add 2401:db01::1/64 dev ve1 nodad | ||
| 58 | $IP addr add 10.2.1.1/24 dev ve2 | ||
| 59 | $IP addr add 2401:db02::1/64 dev ve2 nodad | ||
| 60 | |||
| 61 | $TC qdisc add dev ve2 clsact | ||
| 62 | $TC filter add dev ve2 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_forward | ||
| 63 | |||
| 64 | sysctl -q -w net.ipv4.conf.all.rp_filter=0 | ||
| 65 | sysctl -q -w net.ipv6.conf.all.forwarding=1 | ||
| 66 | } | ||
| 67 | |||
| 68 | function cleanup { | ||
| 69 | set +e | ||
| 70 | [[ -z $DEBUG ]] || set +x | ||
| 71 | $IP netns delete ns1 >& /dev/null | ||
| 72 | $IP netns delete ns2 >& /dev/null | ||
| 73 | $IP link del ve1 >& /dev/null | ||
| 74 | $IP link del ve2 >& /dev/null | ||
| 75 | $IP link del ipt >& /dev/null | ||
| 76 | $IP link del ip6t >& /dev/null | ||
| 77 | sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER | ||
| 78 | sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING | ||
| 79 | rm -f /sys/fs/bpf/tc/globals/tun_iface | ||
| 80 | [[ -z $DEBUG ]] || set -x | ||
| 81 | set -e | ||
| 82 | } | ||
| 83 | |||
| 84 | function l2_to_ipip { | ||
| 85 | echo -n "l2_to_ipip $1: " | ||
| 86 | |||
| 87 | local dir=$1 | ||
| 88 | |||
| 89 | config_common ipip | ||
| 90 | |||
| 91 | $IP link add ipt type ipip external | ||
| 92 | $IP link set dev ipt up | ||
| 93 | sysctl -q -w net.ipv4.conf.ipt.rp_filter=0 | ||
| 94 | sysctl -q -w net.ipv4.conf.ipt.forwarding=1 | ||
| 95 | |||
| 96 | if [[ $dir == "egress" ]]; then | ||
| 97 | $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2 | ||
| 98 | $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect | ||
| 99 | sysctl -q -w net.ipv4.conf.ve1.forwarding=1 | ||
| 100 | else | ||
| 101 | $TC qdisc add dev ve1 clsact | ||
| 102 | $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect | ||
| 103 | fi | ||
| 104 | |||
| 105 | $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ipt/ifindex) | ||
| 106 | |||
| 107 | $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null | ||
| 108 | |||
| 109 | if [[ $dir == "egress" ]]; then | ||
| 110 | # test direct egress to ve2 (i.e. not forwarding from | ||
| 111 | # ve1 to ve2). | ||
| 112 | ping -c1 10.10.1.102 >& /dev/null | ||
| 113 | fi | ||
| 114 | |||
| 115 | cleanup | ||
| 116 | |||
| 117 | echo "OK" | ||
| 118 | } | ||
| 119 | |||
| 120 | function l2_to_ip6tnl { | ||
| 121 | echo -n "l2_to_ip6tnl $1: " | ||
| 122 | |||
| 123 | local dir=$1 | ||
| 124 | |||
| 125 | config_common ip6tnl | ||
| 126 | |||
| 127 | $IP link add ip6t type ip6tnl mode any external | ||
| 128 | $IP link set dev ip6t up | ||
| 129 | sysctl -q -w net.ipv4.conf.ip6t.rp_filter=0 | ||
| 130 | sysctl -q -w net.ipv4.conf.ip6t.forwarding=1 | ||
| 131 | |||
| 132 | if [[ $dir == "egress" ]]; then | ||
| 133 | $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2 | ||
| 134 | $IP route add 2401:face::/64 via 2401:db02::66 dev ve2 | ||
| 135 | $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect | ||
| 136 | sysctl -q -w net.ipv4.conf.ve1.forwarding=1 | ||
| 137 | else | ||
| 138 | $TC qdisc add dev ve1 clsact | ||
| 139 | $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect | ||
| 140 | fi | ||
| 141 | |||
| 142 | $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ip6t/ifindex) | ||
| 143 | |||
| 144 | $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null | ||
| 145 | $IP netns exec ns1 ping -6 -c1 2401:face::66 >& /dev/null | ||
| 146 | |||
| 147 | if [[ $dir == "egress" ]]; then | ||
| 148 | # test direct egress to ve2 (i.e. not forwarding from | ||
| 149 | # ve1 to ve2). | ||
| 150 | ping -c1 10.10.1.102 >& /dev/null | ||
| 151 | ping -6 -c1 2401:face::66 >& /dev/null | ||
| 152 | fi | ||
| 153 | |||
| 154 | cleanup | ||
| 155 | |||
| 156 | echo "OK" | ||
| 157 | } | ||
| 158 | |||
| 159 | cleanup | ||
| 160 | test_names="l2_to_ipip l2_to_ip6tnl" | ||
| 161 | test_dirs="ingress egress" | ||
| 162 | if [[ $# -ge 2 ]]; then | ||
| 163 | test_names=$1 | ||
| 164 | test_dirs=$2 | ||
| 165 | elif [[ $# -ge 1 ]]; then | ||
| 166 | test_names=$1 | ||
| 167 | fi | ||
| 168 | |||
| 169 | for t in $test_names; do | ||
| 170 | for d in $test_dirs; do | ||
| 171 | $t $d | ||
| 172 | done | ||
| 173 | done | ||
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c new file mode 100644 index 000000000000..92a44729dbe4 --- /dev/null +++ b/samples/bpf/tc_l2_redirect_kern.c | |||
| @@ -0,0 +1,236 @@ | |||
| 1 | /* Copyright (c) 2016 Facebook | ||
| 2 | * | ||
| 3 | * This program is free software; you can redistribute it and/or | ||
| 4 | * modify it under the terms of version 2 of the GNU General Public | ||
| 5 | * License as published by the Free Software Foundation. | ||
| 6 | */ | ||
| 7 | #include <uapi/linux/bpf.h> | ||
| 8 | #include <uapi/linux/if_ether.h> | ||
| 9 | #include <uapi/linux/if_packet.h> | ||
| 10 | #include <uapi/linux/ip.h> | ||
| 11 | #include <uapi/linux/ipv6.h> | ||
| 12 | #include <uapi/linux/in.h> | ||
| 13 | #include <uapi/linux/tcp.h> | ||
| 14 | #include <uapi/linux/filter.h> | ||
| 15 | #include <uapi/linux/pkt_cls.h> | ||
| 16 | #include <net/ipv6.h> | ||
| 17 | #include "bpf_helpers.h" | ||
| 18 | |||
| 19 | #define _htonl __builtin_bswap32 | ||
| 20 | |||
| 21 | #define PIN_GLOBAL_NS 2 | ||
| 22 | struct bpf_elf_map { | ||
| 23 | __u32 type; | ||
| 24 | __u32 size_key; | ||
| 25 | __u32 size_value; | ||
| 26 | __u32 max_elem; | ||
| 27 | __u32 flags; | ||
| 28 | __u32 id; | ||
| 29 | __u32 pinning; | ||
| 30 | }; | ||
| 31 | |||
| 32 | /* copy of 'struct ethhdr' without __packed */ | ||
| 33 | struct eth_hdr { | ||
| 34 | unsigned char h_dest[ETH_ALEN]; | ||
| 35 | unsigned char h_source[ETH_ALEN]; | ||
| 36 | unsigned short h_proto; | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct bpf_elf_map SEC("maps") tun_iface = { | ||
| 40 | .type = BPF_MAP_TYPE_ARRAY, | ||
| 41 | .size_key = sizeof(int), | ||
| 42 | .size_value = sizeof(int), | ||
| 43 | .pinning = PIN_GLOBAL_NS, | ||
| 44 | .max_elem = 1, | ||
| 45 | }; | ||
| 46 | |||
| 47 | static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr) | ||
| 48 | { | ||
| 49 | if (eth_proto == htons(ETH_P_IP)) | ||
| 50 | return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100); | ||
| 51 | else if (eth_proto == htons(ETH_P_IPV6)) | ||
| 52 | return (daddr == _htonl(0x2401face)); | ||
| 53 | |||
| 54 | return false; | ||
| 55 | } | ||
| 56 | |||
| 57 | SEC("l2_to_iptun_ingress_forward") | ||
| 58 | int _l2_to_iptun_ingress_forward(struct __sk_buff *skb) | ||
| 59 | { | ||
| 60 | struct bpf_tunnel_key tkey = {}; | ||
| 61 | void *data = (void *)(long)skb->data; | ||
| 62 | struct eth_hdr *eth = data; | ||
| 63 | void *data_end = (void *)(long)skb->data_end; | ||
| 64 | int key = 0, *ifindex; | ||
| 65 | |||
| 66 | int ret; | ||
| 67 | |||
| 68 | if (data + sizeof(*eth) > data_end) | ||
| 69 | return TC_ACT_OK; | ||
| 70 | |||
| 71 | ifindex = bpf_map_lookup_elem(&tun_iface, &key); | ||
| 72 | if (!ifindex) | ||
| 73 | return TC_ACT_OK; | ||
| 74 | |||
| 75 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
| 76 | char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n"; | ||
| 77 | struct iphdr *iph = data + sizeof(*eth); | ||
| 78 | |||
| 79 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
| 80 | return TC_ACT_OK; | ||
| 81 | |||
| 82 | if (iph->protocol != IPPROTO_IPIP) | ||
| 83 | return TC_ACT_OK; | ||
| 84 | |||
| 85 | bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex, | ||
| 86 | _htonl(iph->daddr)); | ||
| 87 | return bpf_redirect(*ifindex, BPF_F_INGRESS); | ||
| 88 | } else if (eth->h_proto == htons(ETH_P_IPV6)) { | ||
| 89 | char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n"; | ||
| 90 | struct ipv6hdr *ip6h = data + sizeof(*eth); | ||
| 91 | |||
| 92 | if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) | ||
| 93 | return TC_ACT_OK; | ||
| 94 | |||
| 95 | if (ip6h->nexthdr != IPPROTO_IPIP && | ||
| 96 | ip6h->nexthdr != IPPROTO_IPV6) | ||
| 97 | return TC_ACT_OK; | ||
| 98 | |||
| 99 | bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex, | ||
| 100 | _htonl(ip6h->daddr.s6_addr32[0]), | ||
| 101 | _htonl(ip6h->daddr.s6_addr32[3])); | ||
| 102 | return bpf_redirect(*ifindex, BPF_F_INGRESS); | ||
| 103 | } | ||
| 104 | |||
| 105 | return TC_ACT_OK; | ||
| 106 | } | ||
| 107 | |||
| 108 | SEC("l2_to_iptun_ingress_redirect") | ||
| 109 | int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb) | ||
| 110 | { | ||
| 111 | struct bpf_tunnel_key tkey = {}; | ||
| 112 | void *data = (void *)(long)skb->data; | ||
| 113 | struct eth_hdr *eth = data; | ||
| 114 | void *data_end = (void *)(long)skb->data_end; | ||
| 115 | int key = 0, *ifindex; | ||
| 116 | |||
| 117 | int ret; | ||
| 118 | |||
| 119 | if (data + sizeof(*eth) > data_end) | ||
| 120 | return TC_ACT_OK; | ||
| 121 | |||
| 122 | ifindex = bpf_map_lookup_elem(&tun_iface, &key); | ||
| 123 | if (!ifindex) | ||
| 124 | return TC_ACT_OK; | ||
| 125 | |||
| 126 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
| 127 | char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; | ||
| 128 | struct iphdr *iph = data + sizeof(*eth); | ||
| 129 | __be32 daddr = iph->daddr; | ||
| 130 | |||
| 131 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
| 132 | return TC_ACT_OK; | ||
| 133 | |||
| 134 | if (!is_vip_addr(eth->h_proto, daddr)) | ||
| 135 | return TC_ACT_OK; | ||
| 136 | |||
| 137 | bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex); | ||
| 138 | } else { | ||
| 139 | return TC_ACT_OK; | ||
| 140 | } | ||
| 141 | |||
| 142 | tkey.tunnel_id = 10000; | ||
| 143 | tkey.tunnel_ttl = 64; | ||
| 144 | tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */ | ||
| 145 | bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0); | ||
| 146 | return bpf_redirect(*ifindex, 0); | ||
| 147 | } | ||
| 148 | |||
| 149 | SEC("l2_to_ip6tun_ingress_redirect") | ||
| 150 | int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb) | ||
| 151 | { | ||
| 152 | struct bpf_tunnel_key tkey = {}; | ||
| 153 | void *data = (void *)(long)skb->data; | ||
| 154 | struct eth_hdr *eth = data; | ||
| 155 | void *data_end = (void *)(long)skb->data_end; | ||
| 156 | int key = 0, *ifindex; | ||
| 157 | |||
| 158 | if (data + sizeof(*eth) > data_end) | ||
| 159 | return TC_ACT_OK; | ||
| 160 | |||
| 161 | ifindex = bpf_map_lookup_elem(&tun_iface, &key); | ||
| 162 | if (!ifindex) | ||
| 163 | return TC_ACT_OK; | ||
| 164 | |||
| 165 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
| 166 | char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; | ||
| 167 | struct iphdr *iph = data + sizeof(*eth); | ||
| 168 | |||
| 169 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
| 170 | return TC_ACT_OK; | ||
| 171 | |||
| 172 | if (!is_vip_addr(eth->h_proto, iph->daddr)) | ||
| 173 | return TC_ACT_OK; | ||
| 174 | |||
| 175 | bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr), | ||
| 176 | *ifindex); | ||
| 177 | } else if (eth->h_proto == htons(ETH_P_IPV6)) { | ||
| 178 | char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n"; | ||
| 179 | struct ipv6hdr *ip6h = data + sizeof(*eth); | ||
| 180 | |||
| 181 | if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) | ||
| 182 | return TC_ACT_OK; | ||
| 183 | |||
| 184 | if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) | ||
| 185 | return TC_ACT_OK; | ||
| 186 | |||
| 187 | bpf_trace_printk(fmt6, sizeof(fmt6), | ||
| 188 | _htonl(ip6h->daddr.s6_addr32[0]), *ifindex); | ||
| 189 | } else { | ||
| 190 | return TC_ACT_OK; | ||
| 191 | } | ||
| 192 | |||
| 193 | tkey.tunnel_id = 10000; | ||
| 194 | tkey.tunnel_ttl = 64; | ||
| 195 | /* 2401:db02:0:0:0:0:0:66 */ | ||
| 196 | tkey.remote_ipv6[0] = _htonl(0x2401db02); | ||
| 197 | tkey.remote_ipv6[1] = 0; | ||
| 198 | tkey.remote_ipv6[2] = 0; | ||
| 199 | tkey.remote_ipv6[3] = _htonl(0x00000066); | ||
| 200 | bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6); | ||
| 201 | return bpf_redirect(*ifindex, 0); | ||
| 202 | } | ||
| 203 | |||
| 204 | SEC("drop_non_tun_vip") | ||
| 205 | int _drop_non_tun_vip(struct __sk_buff *skb) | ||
| 206 | { | ||
| 207 | struct bpf_tunnel_key tkey = {}; | ||
| 208 | void *data = (void *)(long)skb->data; | ||
| 209 | struct eth_hdr *eth = data; | ||
| 210 | void *data_end = (void *)(long)skb->data_end; | ||
| 211 | |||
| 212 | if (data + sizeof(*eth) > data_end) | ||
| 213 | return TC_ACT_OK; | ||
| 214 | |||
| 215 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
| 216 | struct iphdr *iph = data + sizeof(*eth); | ||
| 217 | |||
| 218 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
| 219 | return TC_ACT_OK; | ||
| 220 | |||
| 221 | if (is_vip_addr(eth->h_proto, iph->daddr)) | ||
| 222 | return TC_ACT_SHOT; | ||
| 223 | } else if (eth->h_proto == htons(ETH_P_IPV6)) { | ||
| 224 | struct ipv6hdr *ip6h = data + sizeof(*eth); | ||
| 225 | |||
| 226 | if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) | ||
| 227 | return TC_ACT_OK; | ||
| 228 | |||
| 229 | if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) | ||
| 230 | return TC_ACT_SHOT; | ||
| 231 | } | ||
| 232 | |||
| 233 | return TC_ACT_OK; | ||
| 234 | } | ||
| 235 | |||
| 236 | char _license[] SEC("license") = "GPL"; | ||
diff --git a/samples/bpf/tc_l2_redirect_user.c b/samples/bpf/tc_l2_redirect_user.c new file mode 100644 index 000000000000..4013c5337b91 --- /dev/null +++ b/samples/bpf/tc_l2_redirect_user.c | |||
| @@ -0,0 +1,73 @@ | |||
| 1 | /* Copyright (c) 2016 Facebook | ||
| 2 | * | ||
| 3 | * This program is free software; you can redistribute it and/or | ||
| 4 | * modify it under the terms of version 2 of the GNU General Public | ||
| 5 | * License as published by the Free Software Foundation. | ||
| 6 | */ | ||
| 7 | #include <linux/unistd.h> | ||
| 8 | #include <linux/bpf.h> | ||
| 9 | |||
| 10 | #include <stdlib.h> | ||
| 11 | #include <stdio.h> | ||
| 12 | #include <unistd.h> | ||
| 13 | #include <string.h> | ||
| 14 | #include <errno.h> | ||
| 15 | |||
| 16 | #include "libbpf.h" | ||
| 17 | |||
| 18 | static void usage(void) | ||
| 19 | { | ||
| 20 | printf("Usage: tc_l2_ipip_redirect [...]\n"); | ||
| 21 | printf(" -U <file> Update an already pinned BPF array\n"); | ||
| 22 | printf(" -i <ifindex> Interface index\n"); | ||
| 23 | printf(" -h Display this help\n"); | ||
| 24 | } | ||
| 25 | |||
| 26 | int main(int argc, char **argv) | ||
| 27 | { | ||
| 28 | const char *pinned_file = NULL; | ||
| 29 | int ifindex = -1; | ||
| 30 | int array_key = 0; | ||
| 31 | int array_fd = -1; | ||
| 32 | int ret = -1; | ||
| 33 | int opt; | ||
| 34 | |||
| 35 | while ((opt = getopt(argc, argv, "F:U:i:")) != -1) { | ||
| 36 | switch (opt) { | ||
| 37 | /* General args */ | ||
| 38 | case 'U': | ||
| 39 | pinned_file = optarg; | ||
| 40 | break; | ||
| 41 | case 'i': | ||
| 42 | ifindex = atoi(optarg); | ||
| 43 | break; | ||
| 44 | default: | ||
| 45 | usage(); | ||
| 46 | goto out; | ||
| 47 | } | ||
| 48 | } | ||
| 49 | |||
| 50 | if (ifindex < 0 || !pinned_file) { | ||
| 51 | usage(); | ||
| 52 | goto out; | ||
| 53 | } | ||
| 54 | |||
| 55 | array_fd = bpf_obj_get(pinned_file); | ||
| 56 | if (array_fd < 0) { | ||
| 57 | fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n", | ||
| 58 | pinned_file, strerror(errno), errno); | ||
| 59 | goto out; | ||
| 60 | } | ||
| 61 | |||
| 62 | /* bpf_tunnel_key.remote_ipv4 expects host byte orders */ | ||
| 63 | ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0); | ||
| 64 | if (ret) { | ||
| 65 | perror("bpf_update_elem"); | ||
| 66 | goto out; | ||
| 67 | } | ||
| 68 | |||
| 69 | out: | ||
| 70 | if (array_fd != -1) | ||
| 71 | close(array_fd); | ||
| 72 | return ret; | ||
| 73 | } | ||
