diff options
63 files changed, 454 insertions, 279 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index a8efb18e42fa..0ab83708b6a1 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
| @@ -8627,6 +8627,7 @@ bnx2_remove_one(struct pci_dev *pdev) | |||
| 8627 | pci_disable_device(pdev); | 8627 | pci_disable_device(pdev); |
| 8628 | } | 8628 | } |
| 8629 | 8629 | ||
| 8630 | #ifdef CONFIG_PM_SLEEP | ||
| 8630 | static int | 8631 | static int |
| 8631 | bnx2_suspend(struct device *device) | 8632 | bnx2_suspend(struct device *device) |
| 8632 | { | 8633 | { |
| @@ -8665,7 +8666,6 @@ bnx2_resume(struct device *device) | |||
| 8665 | return 0; | 8666 | return 0; |
| 8666 | } | 8667 | } |
| 8667 | 8668 | ||
| 8668 | #ifdef CONFIG_PM_SLEEP | ||
| 8669 | static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); | 8669 | static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); |
| 8670 | #define BNX2_PM_OPS (&bnx2_pm_ops) | 8670 | #define BNX2_PM_OPS (&bnx2_pm_ops) |
| 8671 | 8671 | ||
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 751d5c7b312d..7e49c43b7af3 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | config NET_CADENCE | 5 | config NET_CADENCE |
| 6 | bool "Cadence devices" | 6 | bool "Cadence devices" |
| 7 | depends on HAS_IOMEM | 7 | depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST) |
| 8 | default y | 8 | default y |
| 9 | ---help--- | 9 | ---help--- |
| 10 | If you have a network (Ethernet) card belonging to this class, say Y. | 10 | If you have a network (Ethernet) card belonging to this class, say Y. |
| @@ -22,7 +22,7 @@ if NET_CADENCE | |||
| 22 | 22 | ||
| 23 | config ARM_AT91_ETHER | 23 | config ARM_AT91_ETHER |
| 24 | tristate "AT91RM9200 Ethernet support" | 24 | tristate "AT91RM9200 Ethernet support" |
| 25 | depends on HAS_DMA | 25 | depends on HAS_DMA && (ARCH_AT91RM9200 || COMPILE_TEST) |
| 26 | select MACB | 26 | select MACB |
| 27 | ---help--- | 27 | ---help--- |
| 28 | If you wish to compile a kernel for the AT91RM9200 and enable | 28 | If you wish to compile a kernel for the AT91RM9200 and enable |
| @@ -30,7 +30,7 @@ config ARM_AT91_ETHER | |||
| 30 | 30 | ||
| 31 | config MACB | 31 | config MACB |
| 32 | tristate "Cadence MACB/GEM support" | 32 | tristate "Cadence MACB/GEM support" |
| 33 | depends on HAS_DMA | 33 | depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST) |
| 34 | select PHYLIB | 34 | select PHYLIB |
| 35 | ---help--- | 35 | ---help--- |
| 36 | The Cadence MACB ethernet interface is found on many Atmel AT32 and | 36 | The Cadence MACB ethernet interface is found on many Atmel AT32 and |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 81e8402a74b4..8a96572fdde0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c | |||
| @@ -154,7 +154,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) | |||
| 154 | req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); | 154 | req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); |
| 155 | req->l2t_idx = htons(e->idx); | 155 | req->l2t_idx = htons(e->idx); |
| 156 | req->vlan = htons(e->vlan); | 156 | req->vlan = htons(e->vlan); |
| 157 | if (e->neigh) | 157 | if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) |
| 158 | memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); | 158 | memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); |
| 159 | memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); | 159 | memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); |
| 160 | 160 | ||
| @@ -394,6 +394,8 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, | |||
| 394 | if (e) { | 394 | if (e) { |
| 395 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ | 395 | spin_lock(&e->lock); /* avoid race with t4_l2t_free */ |
| 396 | e->state = L2T_STATE_RESOLVING; | 396 | e->state = L2T_STATE_RESOLVING; |
| 397 | if (neigh->dev->flags & IFF_LOOPBACK) | ||
| 398 | memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac)); | ||
| 397 | memcpy(e->addr, addr, addr_len); | 399 | memcpy(e->addr, addr, addr_len); |
| 398 | e->ifindex = ifidx; | 400 | e->ifindex = ifidx; |
| 399 | e->hash = hash; | 401 | e->hash = hash; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index fb2fe65903c2..bba67681aeaa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -682,7 +682,7 @@ enum { | |||
| 682 | SF_RD_ID = 0x9f, /* read ID */ | 682 | SF_RD_ID = 0x9f, /* read ID */ |
| 683 | SF_ERASE_SECTOR = 0xd8, /* erase sector */ | 683 | SF_ERASE_SECTOR = 0xd8, /* erase sector */ |
| 684 | 684 | ||
| 685 | FW_MAX_SIZE = 512 * 1024, | 685 | FW_MAX_SIZE = 16 * SF_SEC_SIZE, |
| 686 | }; | 686 | }; |
| 687 | 687 | ||
| 688 | /** | 688 | /** |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 8ccaa2520dc3..97db5a7179df 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
| @@ -374,6 +374,7 @@ enum vf_state { | |||
| 374 | #define BE_FLAGS_NAPI_ENABLED (1 << 9) | 374 | #define BE_FLAGS_NAPI_ENABLED (1 << 9) |
| 375 | #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) | 375 | #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) |
| 376 | #define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) | 376 | #define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) |
| 377 | #define BE_FLAGS_SETUP_DONE (1 << 13) | ||
| 377 | 378 | ||
| 378 | #define BE_UC_PMAC_COUNT 30 | 379 | #define BE_UC_PMAC_COUNT 30 |
| 379 | #define BE_VF_UC_PMAC_COUNT 2 | 380 | #define BE_VF_UC_PMAC_COUNT 2 |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3e6df47b6973..a18645407d21 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -2033,11 +2033,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter) | |||
| 2033 | bool dummy_wrb; | 2033 | bool dummy_wrb; |
| 2034 | int i, pending_txqs; | 2034 | int i, pending_txqs; |
| 2035 | 2035 | ||
| 2036 | /* Wait for a max of 200ms for all the tx-completions to arrive. */ | 2036 | /* Stop polling for compls when HW has been silent for 10ms */ |
| 2037 | do { | 2037 | do { |
| 2038 | pending_txqs = adapter->num_tx_qs; | 2038 | pending_txqs = adapter->num_tx_qs; |
| 2039 | 2039 | ||
| 2040 | for_all_tx_queues(adapter, txo, i) { | 2040 | for_all_tx_queues(adapter, txo, i) { |
| 2041 | cmpl = 0; | ||
| 2042 | num_wrbs = 0; | ||
| 2041 | txq = &txo->q; | 2043 | txq = &txo->q; |
| 2042 | while ((txcp = be_tx_compl_get(&txo->cq))) { | 2044 | while ((txcp = be_tx_compl_get(&txo->cq))) { |
| 2043 | end_idx = | 2045 | end_idx = |
| @@ -2050,14 +2052,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter) | |||
| 2050 | if (cmpl) { | 2052 | if (cmpl) { |
| 2051 | be_cq_notify(adapter, txo->cq.id, false, cmpl); | 2053 | be_cq_notify(adapter, txo->cq.id, false, cmpl); |
| 2052 | atomic_sub(num_wrbs, &txq->used); | 2054 | atomic_sub(num_wrbs, &txq->used); |
| 2053 | cmpl = 0; | 2055 | timeo = 0; |
| 2054 | num_wrbs = 0; | ||
| 2055 | } | 2056 | } |
| 2056 | if (atomic_read(&txq->used) == 0) | 2057 | if (atomic_read(&txq->used) == 0) |
| 2057 | pending_txqs--; | 2058 | pending_txqs--; |
| 2058 | } | 2059 | } |
| 2059 | 2060 | ||
| 2060 | if (pending_txqs == 0 || ++timeo > 200) | 2061 | if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter)) |
| 2061 | break; | 2062 | break; |
| 2062 | 2063 | ||
| 2063 | mdelay(1); | 2064 | mdelay(1); |
| @@ -2725,6 +2726,12 @@ static int be_close(struct net_device *netdev) | |||
| 2725 | struct be_eq_obj *eqo; | 2726 | struct be_eq_obj *eqo; |
| 2726 | int i; | 2727 | int i; |
| 2727 | 2728 | ||
| 2729 | /* This protection is needed as be_close() may be called even when the | ||
| 2730 | * adapter is in cleared state (after eeh perm failure) | ||
| 2731 | */ | ||
| 2732 | if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) | ||
| 2733 | return 0; | ||
| 2734 | |||
| 2728 | be_roce_dev_close(adapter); | 2735 | be_roce_dev_close(adapter); |
| 2729 | 2736 | ||
| 2730 | if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { | 2737 | if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { |
| @@ -3055,6 +3062,7 @@ static int be_clear(struct be_adapter *adapter) | |||
| 3055 | be_clear_queues(adapter); | 3062 | be_clear_queues(adapter); |
| 3056 | 3063 | ||
| 3057 | be_msix_disable(adapter); | 3064 | be_msix_disable(adapter); |
| 3065 | adapter->flags &= ~BE_FLAGS_SETUP_DONE; | ||
| 3058 | return 0; | 3066 | return 0; |
| 3059 | } | 3067 | } |
| 3060 | 3068 | ||
| @@ -3559,6 +3567,7 @@ static int be_setup(struct be_adapter *adapter) | |||
| 3559 | adapter->phy.fc_autoneg = 1; | 3567 | adapter->phy.fc_autoneg = 1; |
| 3560 | 3568 | ||
| 3561 | be_schedule_worker(adapter); | 3569 | be_schedule_worker(adapter); |
| 3570 | adapter->flags |= BE_FLAGS_SETUP_DONE; | ||
| 3562 | return 0; | 3571 | return 0; |
| 3563 | err: | 3572 | err: |
| 3564 | be_clear(adapter); | 3573 | be_clear(adapter); |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index d04b1c3c9b85..b248bcbdae63 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
| @@ -89,9 +89,8 @@ | |||
| 89 | #define MVNETA_TX_IN_PRGRS BIT(1) | 89 | #define MVNETA_TX_IN_PRGRS BIT(1) |
| 90 | #define MVNETA_TX_FIFO_EMPTY BIT(8) | 90 | #define MVNETA_TX_FIFO_EMPTY BIT(8) |
| 91 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c | 91 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c |
| 92 | #define MVNETA_SERDES_CFG 0x24A0 | 92 | #define MVNETA_SGMII_SERDES_CFG 0x24A0 |
| 93 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 | 93 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 |
| 94 | #define MVNETA_RGMII_SERDES_PROTO 0x0667 | ||
| 95 | #define MVNETA_TYPE_PRIO 0x24bc | 94 | #define MVNETA_TYPE_PRIO 0x24bc |
| 96 | #define MVNETA_FORCE_UNI BIT(21) | 95 | #define MVNETA_FORCE_UNI BIT(21) |
| 97 | #define MVNETA_TXQ_CMD_1 0x24e4 | 96 | #define MVNETA_TXQ_CMD_1 0x24e4 |
| @@ -712,6 +711,35 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp, | |||
| 712 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); | 711 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); |
| 713 | } | 712 | } |
| 714 | 713 | ||
| 714 | |||
| 715 | |||
| 716 | /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */ | ||
| 717 | static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable) | ||
| 718 | { | ||
| 719 | u32 val; | ||
| 720 | |||
| 721 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
| 722 | |||
| 723 | if (enable) | ||
| 724 | val |= MVNETA_GMAC2_PORT_RGMII; | ||
| 725 | else | ||
| 726 | val &= ~MVNETA_GMAC2_PORT_RGMII; | ||
| 727 | |||
| 728 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | ||
| 729 | } | ||
| 730 | |||
| 731 | /* Config SGMII port */ | ||
| 732 | static void mvneta_port_sgmii_config(struct mvneta_port *pp) | ||
| 733 | { | ||
| 734 | u32 val; | ||
| 735 | |||
| 736 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
| 737 | val |= MVNETA_GMAC2_PCS_ENABLE; | ||
| 738 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | ||
| 739 | |||
| 740 | mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); | ||
| 741 | } | ||
| 742 | |||
| 715 | /* Start the Ethernet port RX and TX activity */ | 743 | /* Start the Ethernet port RX and TX activity */ |
| 716 | static void mvneta_port_up(struct mvneta_port *pp) | 744 | static void mvneta_port_up(struct mvneta_port *pp) |
| 717 | { | 745 | { |
| @@ -2729,15 +2757,12 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) | |||
| 2729 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); | 2757 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); |
| 2730 | 2758 | ||
| 2731 | if (phy_mode == PHY_INTERFACE_MODE_SGMII) | 2759 | if (phy_mode == PHY_INTERFACE_MODE_SGMII) |
| 2732 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); | 2760 | mvneta_port_sgmii_config(pp); |
| 2733 | else | ||
| 2734 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO); | ||
| 2735 | 2761 | ||
| 2736 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | 2762 | mvneta_gmac_rgmii_set(pp, 1); |
| 2737 | |||
| 2738 | val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; | ||
| 2739 | 2763 | ||
| 2740 | /* Cancel Port Reset */ | 2764 | /* Cancel Port Reset */ |
| 2765 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
| 2741 | val &= ~MVNETA_GMAC2_PORT_RESET; | 2766 | val &= ~MVNETA_GMAC2_PORT_RESET; |
| 2742 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | 2767 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); |
| 2743 | 2768 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index f0ae95f66ceb..cef267e24f9c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -2301,13 +2301,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) | |||
| 2301 | /* Allow large DMA segments, up to the firmware limit of 1 GB */ | 2301 | /* Allow large DMA segments, up to the firmware limit of 1 GB */ |
| 2302 | dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); | 2302 | dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); |
| 2303 | 2303 | ||
| 2304 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 2304 | dev = pci_get_drvdata(pdev); |
| 2305 | if (!priv) { | 2305 | priv = mlx4_priv(dev); |
| 2306 | err = -ENOMEM; | ||
| 2307 | goto err_release_regions; | ||
| 2308 | } | ||
| 2309 | |||
| 2310 | dev = &priv->dev; | ||
| 2311 | dev->pdev = pdev; | 2306 | dev->pdev = pdev; |
| 2312 | INIT_LIST_HEAD(&priv->ctx_list); | 2307 | INIT_LIST_HEAD(&priv->ctx_list); |
| 2313 | spin_lock_init(&priv->ctx_lock); | 2308 | spin_lock_init(&priv->ctx_lock); |
| @@ -2374,10 +2369,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) | |||
| 2374 | } else { | 2369 | } else { |
| 2375 | atomic_inc(&pf_loading); | 2370 | atomic_inc(&pf_loading); |
| 2376 | err = pci_enable_sriov(pdev, total_vfs); | 2371 | err = pci_enable_sriov(pdev, total_vfs); |
| 2377 | atomic_dec(&pf_loading); | ||
| 2378 | if (err) { | 2372 | if (err) { |
| 2379 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", | 2373 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", |
| 2380 | err); | 2374 | err); |
| 2375 | atomic_dec(&pf_loading); | ||
| 2381 | err = 0; | 2376 | err = 0; |
| 2382 | } else { | 2377 | } else { |
| 2383 | mlx4_warn(dev, "Running in master mode\n"); | 2378 | mlx4_warn(dev, "Running in master mode\n"); |
| @@ -2535,8 +2530,10 @@ slave_start: | |||
| 2535 | mlx4_sense_init(dev); | 2530 | mlx4_sense_init(dev); |
| 2536 | mlx4_start_sense(dev); | 2531 | mlx4_start_sense(dev); |
| 2537 | 2532 | ||
| 2538 | priv->pci_dev_data = pci_dev_data; | 2533 | priv->removed = 0; |
| 2539 | pci_set_drvdata(pdev, dev); | 2534 | |
| 2535 | if (mlx4_is_master(dev) && dev->num_vfs) | ||
| 2536 | atomic_dec(&pf_loading); | ||
| 2540 | 2537 | ||
| 2541 | return 0; | 2538 | return 0; |
| 2542 | 2539 | ||
| @@ -2588,6 +2585,9 @@ err_rel_own: | |||
| 2588 | if (!mlx4_is_slave(dev)) | 2585 | if (!mlx4_is_slave(dev)) |
| 2589 | mlx4_free_ownership(dev); | 2586 | mlx4_free_ownership(dev); |
| 2590 | 2587 | ||
| 2588 | if (mlx4_is_master(dev) && dev->num_vfs) | ||
| 2589 | atomic_dec(&pf_loading); | ||
| 2590 | |||
| 2591 | kfree(priv->dev.dev_vfs); | 2591 | kfree(priv->dev.dev_vfs); |
| 2592 | 2592 | ||
| 2593 | err_free_dev: | 2593 | err_free_dev: |
| @@ -2604,85 +2604,110 @@ err_disable_pdev: | |||
| 2604 | 2604 | ||
| 2605 | static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | 2605 | static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) |
| 2606 | { | 2606 | { |
| 2607 | struct mlx4_priv *priv; | ||
| 2608 | struct mlx4_dev *dev; | ||
| 2609 | |||
| 2607 | printk_once(KERN_INFO "%s", mlx4_version); | 2610 | printk_once(KERN_INFO "%s", mlx4_version); |
| 2608 | 2611 | ||
| 2612 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
| 2613 | if (!priv) | ||
| 2614 | return -ENOMEM; | ||
| 2615 | |||
| 2616 | dev = &priv->dev; | ||
| 2617 | pci_set_drvdata(pdev, dev); | ||
| 2618 | priv->pci_dev_data = id->driver_data; | ||
| 2619 | |||
| 2609 | return __mlx4_init_one(pdev, id->driver_data); | 2620 | return __mlx4_init_one(pdev, id->driver_data); |
| 2610 | } | 2621 | } |
| 2611 | 2622 | ||
| 2612 | static void mlx4_remove_one(struct pci_dev *pdev) | 2623 | static void __mlx4_remove_one(struct pci_dev *pdev) |
| 2613 | { | 2624 | { |
| 2614 | struct mlx4_dev *dev = pci_get_drvdata(pdev); | 2625 | struct mlx4_dev *dev = pci_get_drvdata(pdev); |
| 2615 | struct mlx4_priv *priv = mlx4_priv(dev); | 2626 | struct mlx4_priv *priv = mlx4_priv(dev); |
| 2627 | int pci_dev_data; | ||
| 2616 | int p; | 2628 | int p; |
| 2617 | 2629 | ||
| 2618 | if (dev) { | 2630 | if (priv->removed) |
| 2619 | /* in SRIOV it is not allowed to unload the pf's | 2631 | return; |
| 2620 | * driver while there are alive vf's */ | ||
| 2621 | if (mlx4_is_master(dev)) { | ||
| 2622 | if (mlx4_how_many_lives_vf(dev)) | ||
| 2623 | printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); | ||
| 2624 | } | ||
| 2625 | mlx4_stop_sense(dev); | ||
| 2626 | mlx4_unregister_device(dev); | ||
| 2627 | 2632 | ||
| 2628 | for (p = 1; p <= dev->caps.num_ports; p++) { | 2633 | pci_dev_data = priv->pci_dev_data; |
| 2629 | mlx4_cleanup_port_info(&priv->port[p]); | ||
| 2630 | mlx4_CLOSE_PORT(dev, p); | ||
| 2631 | } | ||
| 2632 | 2634 | ||
| 2633 | if (mlx4_is_master(dev)) | 2635 | /* in SRIOV it is not allowed to unload the pf's |
| 2634 | mlx4_free_resource_tracker(dev, | 2636 | * driver while there are alive vf's */ |
| 2635 | RES_TR_FREE_SLAVES_ONLY); | 2637 | if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) |
| 2636 | 2638 | printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); | |
| 2637 | mlx4_cleanup_counters_table(dev); | 2639 | mlx4_stop_sense(dev); |
| 2638 | mlx4_cleanup_qp_table(dev); | 2640 | mlx4_unregister_device(dev); |
| 2639 | mlx4_cleanup_srq_table(dev); | ||
| 2640 | mlx4_cleanup_cq_table(dev); | ||
| 2641 | mlx4_cmd_use_polling(dev); | ||
| 2642 | mlx4_cleanup_eq_table(dev); | ||
| 2643 | mlx4_cleanup_mcg_table(dev); | ||
| 2644 | mlx4_cleanup_mr_table(dev); | ||
| 2645 | mlx4_cleanup_xrcd_table(dev); | ||
| 2646 | mlx4_cleanup_pd_table(dev); | ||
| 2647 | 2641 | ||
| 2648 | if (mlx4_is_master(dev)) | 2642 | for (p = 1; p <= dev->caps.num_ports; p++) { |
| 2649 | mlx4_free_resource_tracker(dev, | 2643 | mlx4_cleanup_port_info(&priv->port[p]); |
| 2650 | RES_TR_FREE_STRUCTS_ONLY); | 2644 | mlx4_CLOSE_PORT(dev, p); |
| 2651 | 2645 | } | |
| 2652 | iounmap(priv->kar); | ||
| 2653 | mlx4_uar_free(dev, &priv->driver_uar); | ||
| 2654 | mlx4_cleanup_uar_table(dev); | ||
| 2655 | if (!mlx4_is_slave(dev)) | ||
| 2656 | mlx4_clear_steering(dev); | ||
| 2657 | mlx4_free_eq_table(dev); | ||
| 2658 | if (mlx4_is_master(dev)) | ||
| 2659 | mlx4_multi_func_cleanup(dev); | ||
| 2660 | mlx4_close_hca(dev); | ||
| 2661 | if (mlx4_is_slave(dev)) | ||
| 2662 | mlx4_multi_func_cleanup(dev); | ||
| 2663 | mlx4_cmd_cleanup(dev); | ||
| 2664 | |||
| 2665 | if (dev->flags & MLX4_FLAG_MSI_X) | ||
| 2666 | pci_disable_msix(pdev); | ||
| 2667 | if (dev->flags & MLX4_FLAG_SRIOV) { | ||
| 2668 | mlx4_warn(dev, "Disabling SR-IOV\n"); | ||
| 2669 | pci_disable_sriov(pdev); | ||
| 2670 | } | ||
| 2671 | 2646 | ||
| 2672 | if (!mlx4_is_slave(dev)) | 2647 | if (mlx4_is_master(dev)) |
| 2673 | mlx4_free_ownership(dev); | 2648 | mlx4_free_resource_tracker(dev, |
| 2649 | RES_TR_FREE_SLAVES_ONLY); | ||
| 2674 | 2650 | ||
| 2675 | kfree(dev->caps.qp0_tunnel); | 2651 | mlx4_cleanup_counters_table(dev); |
| 2676 | kfree(dev->caps.qp0_proxy); | 2652 | mlx4_cleanup_qp_table(dev); |
| 2677 | kfree(dev->caps.qp1_tunnel); | 2653 | mlx4_cleanup_srq_table(dev); |
| 2678 | kfree(dev->caps.qp1_proxy); | 2654 | mlx4_cleanup_cq_table(dev); |
| 2679 | kfree(dev->dev_vfs); | 2655 | mlx4_cmd_use_polling(dev); |
| 2656 | mlx4_cleanup_eq_table(dev); | ||
| 2657 | mlx4_cleanup_mcg_table(dev); | ||
| 2658 | mlx4_cleanup_mr_table(dev); | ||
| 2659 | mlx4_cleanup_xrcd_table(dev); | ||
| 2660 | mlx4_cleanup_pd_table(dev); | ||
| 2680 | 2661 | ||
| 2681 | kfree(priv); | 2662 | if (mlx4_is_master(dev)) |
| 2682 | pci_release_regions(pdev); | 2663 | mlx4_free_resource_tracker(dev, |
| 2683 | pci_disable_device(pdev); | 2664 | RES_TR_FREE_STRUCTS_ONLY); |
| 2684 | pci_set_drvdata(pdev, NULL); | 2665 | |
| 2666 | iounmap(priv->kar); | ||
| 2667 | mlx4_uar_free(dev, &priv->driver_uar); | ||
| 2668 | mlx4_cleanup_uar_table(dev); | ||
| 2669 | if (!mlx4_is_slave(dev)) | ||
| 2670 | mlx4_clear_steering(dev); | ||
| 2671 | mlx4_free_eq_table(dev); | ||
| 2672 | if (mlx4_is_master(dev)) | ||
| 2673 | mlx4_multi_func_cleanup(dev); | ||
| 2674 | mlx4_close_hca(dev); | ||
| 2675 | if (mlx4_is_slave(dev)) | ||
| 2676 | mlx4_multi_func_cleanup(dev); | ||
| 2677 | mlx4_cmd_cleanup(dev); | ||
| 2678 | |||
| 2679 | if (dev->flags & MLX4_FLAG_MSI_X) | ||
| 2680 | pci_disable_msix(pdev); | ||
| 2681 | if (dev->flags & MLX4_FLAG_SRIOV) { | ||
| 2682 | mlx4_warn(dev, "Disabling SR-IOV\n"); | ||
| 2683 | pci_disable_sriov(pdev); | ||
| 2684 | dev->num_vfs = 0; | ||
| 2685 | } | 2685 | } |
| 2686 | |||
| 2687 | if (!mlx4_is_slave(dev)) | ||
| 2688 | mlx4_free_ownership(dev); | ||
| 2689 | |||
| 2690 | kfree(dev->caps.qp0_tunnel); | ||
| 2691 | kfree(dev->caps.qp0_proxy); | ||
| 2692 | kfree(dev->caps.qp1_tunnel); | ||
| 2693 | kfree(dev->caps.qp1_proxy); | ||
| 2694 | kfree(dev->dev_vfs); | ||
| 2695 | |||
| 2696 | pci_release_regions(pdev); | ||
| 2697 | pci_disable_device(pdev); | ||
| 2698 | memset(priv, 0, sizeof(*priv)); | ||
| 2699 | priv->pci_dev_data = pci_dev_data; | ||
| 2700 | priv->removed = 1; | ||
| 2701 | } | ||
| 2702 | |||
| 2703 | static void mlx4_remove_one(struct pci_dev *pdev) | ||
| 2704 | { | ||
| 2705 | struct mlx4_dev *dev = pci_get_drvdata(pdev); | ||
| 2706 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 2707 | |||
| 2708 | __mlx4_remove_one(pdev); | ||
| 2709 | kfree(priv); | ||
| 2710 | pci_set_drvdata(pdev, NULL); | ||
| 2686 | } | 2711 | } |
| 2687 | 2712 | ||
| 2688 | int mlx4_restart_one(struct pci_dev *pdev) | 2713 | int mlx4_restart_one(struct pci_dev *pdev) |
| @@ -2692,7 +2717,7 @@ int mlx4_restart_one(struct pci_dev *pdev) | |||
| 2692 | int pci_dev_data; | 2717 | int pci_dev_data; |
| 2693 | 2718 | ||
| 2694 | pci_dev_data = priv->pci_dev_data; | 2719 | pci_dev_data = priv->pci_dev_data; |
| 2695 | mlx4_remove_one(pdev); | 2720 | __mlx4_remove_one(pdev); |
| 2696 | return __mlx4_init_one(pdev, pci_dev_data); | 2721 | return __mlx4_init_one(pdev, pci_dev_data); |
| 2697 | } | 2722 | } |
| 2698 | 2723 | ||
| @@ -2747,7 +2772,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table); | |||
| 2747 | static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, | 2772 | static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, |
| 2748 | pci_channel_state_t state) | 2773 | pci_channel_state_t state) |
| 2749 | { | 2774 | { |
| 2750 | mlx4_remove_one(pdev); | 2775 | __mlx4_remove_one(pdev); |
| 2751 | 2776 | ||
| 2752 | return state == pci_channel_io_perm_failure ? | 2777 | return state == pci_channel_io_perm_failure ? |
| 2753 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; | 2778 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; |
| @@ -2755,11 +2780,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, | |||
| 2755 | 2780 | ||
| 2756 | static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) | 2781 | static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) |
| 2757 | { | 2782 | { |
| 2758 | const struct pci_device_id *id; | 2783 | struct mlx4_dev *dev = pci_get_drvdata(pdev); |
| 2759 | int ret; | 2784 | struct mlx4_priv *priv = mlx4_priv(dev); |
| 2785 | int ret; | ||
| 2760 | 2786 | ||
| 2761 | id = pci_match_id(mlx4_pci_table, pdev); | 2787 | ret = __mlx4_init_one(pdev, priv->pci_dev_data); |
| 2762 | ret = __mlx4_init_one(pdev, id->driver_data); | ||
| 2763 | 2788 | ||
| 2764 | return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; | 2789 | return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; |
| 2765 | } | 2790 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index cf8be41abb36..f9c465101963 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
| @@ -800,6 +800,7 @@ struct mlx4_priv { | |||
| 800 | spinlock_t ctx_lock; | 800 | spinlock_t ctx_lock; |
| 801 | 801 | ||
| 802 | int pci_dev_data; | 802 | int pci_dev_data; |
| 803 | int removed; | ||
| 803 | 804 | ||
| 804 | struct list_head pgdir_list; | 805 | struct list_head pgdir_list; |
| 805 | struct mutex pgdir_mutex; | 806 | struct mutex pgdir_mutex; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index b48737dcd3c5..ba20c721ee97 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
| @@ -2139,8 +2139,6 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) | |||
| 2139 | ahw->max_mac_filters = nic_info.max_mac_filters; | 2139 | ahw->max_mac_filters = nic_info.max_mac_filters; |
| 2140 | ahw->max_mtu = nic_info.max_mtu; | 2140 | ahw->max_mtu = nic_info.max_mtu; |
| 2141 | 2141 | ||
| 2142 | adapter->max_tx_rings = ahw->max_tx_ques; | ||
| 2143 | adapter->max_sds_rings = ahw->max_rx_ques; | ||
| 2144 | /* eSwitch capability indicates vNIC mode. | 2142 | /* eSwitch capability indicates vNIC mode. |
| 2145 | * vNIC and SRIOV are mutually exclusive operational modes. | 2143 | * vNIC and SRIOV are mutually exclusive operational modes. |
| 2146 | * If SR-IOV capability is detected, SR-IOV physical function | 2144 | * If SR-IOV capability is detected, SR-IOV physical function |
| @@ -2161,6 +2159,7 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) | |||
| 2161 | int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) | 2159 | int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) |
| 2162 | { | 2160 | { |
| 2163 | struct qlcnic_hardware_context *ahw = adapter->ahw; | 2161 | struct qlcnic_hardware_context *ahw = adapter->ahw; |
| 2162 | u16 max_sds_rings, max_tx_rings; | ||
| 2164 | int ret; | 2163 | int ret; |
| 2165 | 2164 | ||
| 2166 | ret = qlcnic_83xx_get_nic_configuration(adapter); | 2165 | ret = qlcnic_83xx_get_nic_configuration(adapter); |
| @@ -2173,18 +2172,21 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) | |||
| 2173 | if (qlcnic_83xx_config_vnic_opmode(adapter)) | 2172 | if (qlcnic_83xx_config_vnic_opmode(adapter)) |
| 2174 | return -EIO; | 2173 | return -EIO; |
| 2175 | 2174 | ||
| 2176 | adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; | 2175 | max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; |
| 2177 | adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; | 2176 | max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; |
| 2178 | } else if (ret == QLC_83XX_DEFAULT_OPMODE) { | 2177 | } else if (ret == QLC_83XX_DEFAULT_OPMODE) { |
| 2179 | ahw->nic_mode = QLCNIC_DEFAULT_MODE; | 2178 | ahw->nic_mode = QLCNIC_DEFAULT_MODE; |
| 2180 | adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; | 2179 | adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; |
| 2181 | ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; | 2180 | ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; |
| 2182 | adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; | 2181 | max_sds_rings = QLCNIC_MAX_SDS_RINGS; |
| 2183 | adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; | 2182 | max_tx_rings = QLCNIC_MAX_TX_RINGS; |
| 2184 | } else { | 2183 | } else { |
| 2185 | return -EIO; | 2184 | return -EIO; |
| 2186 | } | 2185 | } |
| 2187 | 2186 | ||
| 2187 | adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings); | ||
| 2188 | adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings); | ||
| 2189 | |||
| 2188 | return 0; | 2190 | return 0; |
| 2189 | } | 2191 | } |
| 2190 | 2192 | ||
| @@ -2348,15 +2350,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) | |||
| 2348 | goto disable_intr; | 2350 | goto disable_intr; |
| 2349 | } | 2351 | } |
| 2350 | 2352 | ||
| 2353 | INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); | ||
| 2354 | |||
| 2351 | err = qlcnic_83xx_setup_mbx_intr(adapter); | 2355 | err = qlcnic_83xx_setup_mbx_intr(adapter); |
| 2352 | if (err) | 2356 | if (err) |
| 2353 | goto disable_mbx_intr; | 2357 | goto disable_mbx_intr; |
| 2354 | 2358 | ||
| 2355 | qlcnic_83xx_clear_function_resources(adapter); | 2359 | qlcnic_83xx_clear_function_resources(adapter); |
| 2356 | 2360 | qlcnic_dcb_enable(adapter->dcb); | |
| 2357 | INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); | ||
| 2358 | |||
| 2359 | qlcnic_83xx_initialize_nic(adapter, 1); | 2361 | qlcnic_83xx_initialize_nic(adapter, 1); |
| 2362 | qlcnic_dcb_get_info(adapter->dcb); | ||
| 2360 | 2363 | ||
| 2361 | /* Configure default, SR-IOV or Virtual NIC mode of operation */ | 2364 | /* Configure default, SR-IOV or Virtual NIC mode of operation */ |
| 2362 | err = qlcnic_83xx_configure_opmode(adapter); | 2365 | err = qlcnic_83xx_configure_opmode(adapter); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 64dcbf33d8f0..c1e11f5715b0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | |||
| @@ -883,8 +883,6 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, | |||
| 883 | npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); | 883 | npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); |
| 884 | npar_info->capabilities = le32_to_cpu(nic_info->capabilities); | 884 | npar_info->capabilities = le32_to_cpu(nic_info->capabilities); |
| 885 | npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); | 885 | npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); |
| 886 | adapter->max_tx_rings = npar_info->max_tx_ques; | ||
| 887 | adapter->max_sds_rings = npar_info->max_rx_ques; | ||
| 888 | } | 886 | } |
| 889 | 887 | ||
| 890 | qlcnic_free_mbx_args(&cmd); | 888 | qlcnic_free_mbx_args(&cmd); |
| @@ -1356,6 +1354,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, | |||
| 1356 | arg2 &= ~BIT_3; | 1354 | arg2 &= ~BIT_3; |
| 1357 | break; | 1355 | break; |
| 1358 | case QLCNIC_ADD_VLAN: | 1356 | case QLCNIC_ADD_VLAN: |
| 1357 | arg1 &= ~(0x0ffff << 16); | ||
| 1359 | arg1 |= (BIT_2 | BIT_5); | 1358 | arg1 |= (BIT_2 | BIT_5); |
| 1360 | arg1 |= (esw_cfg->vlan_id << 16); | 1359 | arg1 |= (esw_cfg->vlan_id << 16); |
| 1361 | break; | 1360 | break; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index 7d4f54912bad..a51fe18f09a8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c | |||
| @@ -330,8 +330,6 @@ static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb) | |||
| 330 | goto out_free_cfg; | 330 | goto out_free_cfg; |
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | qlcnic_dcb_get_info(dcb); | ||
| 334 | |||
| 335 | return 0; | 333 | return 0; |
| 336 | out_free_cfg: | 334 | out_free_cfg: |
| 337 | kfree(dcb->cfg); | 335 | kfree(dcb->cfg); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 309d05640883..dbf75393f758 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
| @@ -670,7 +670,7 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter) | |||
| 670 | else | 670 | else |
| 671 | num_msix += adapter->drv_tx_rings; | 671 | num_msix += adapter->drv_tx_rings; |
| 672 | 672 | ||
| 673 | if (adapter->drv_rss_rings > 0) | 673 | if (adapter->drv_rss_rings > 0) |
| 674 | num_msix += adapter->drv_rss_rings; | 674 | num_msix += adapter->drv_rss_rings; |
| 675 | else | 675 | else |
| 676 | num_msix += adapter->drv_sds_rings; | 676 | num_msix += adapter->drv_sds_rings; |
| @@ -686,19 +686,15 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter) | |||
| 686 | return -ENOMEM; | 686 | return -ENOMEM; |
| 687 | } | 687 | } |
| 688 | 688 | ||
| 689 | restore: | ||
| 690 | for (vector = 0; vector < num_msix; vector++) | 689 | for (vector = 0; vector < num_msix; vector++) |
| 691 | adapter->msix_entries[vector].entry = vector; | 690 | adapter->msix_entries[vector].entry = vector; |
| 692 | 691 | ||
| 692 | restore: | ||
| 693 | err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); | 693 | err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); |
| 694 | if (err == 0) { | 694 | if (err > 0) { |
| 695 | adapter->ahw->num_msix = num_msix; | 695 | if (!adapter->drv_tss_rings && !adapter->drv_rss_rings) |
| 696 | if (adapter->drv_tss_rings > 0) | 696 | return -ENOSPC; |
| 697 | adapter->drv_tx_rings = adapter->drv_tss_rings; | ||
| 698 | 697 | ||
| 699 | if (adapter->drv_rss_rings > 0) | ||
| 700 | adapter->drv_sds_rings = adapter->drv_rss_rings; | ||
| 701 | } else { | ||
| 702 | netdev_info(adapter->netdev, | 698 | netdev_info(adapter->netdev, |
| 703 | "Unable to allocate %d MSI-X vectors, Available vectors %d\n", | 699 | "Unable to allocate %d MSI-X vectors, Available vectors %d\n", |
| 704 | num_msix, err); | 700 | num_msix, err); |
| @@ -716,12 +712,20 @@ restore: | |||
| 716 | "Restoring %d Tx, %d SDS rings for total %d vectors.\n", | 712 | "Restoring %d Tx, %d SDS rings for total %d vectors.\n", |
| 717 | adapter->drv_tx_rings, adapter->drv_sds_rings, | 713 | adapter->drv_tx_rings, adapter->drv_sds_rings, |
| 718 | num_msix); | 714 | num_msix); |
| 719 | goto restore; | ||
| 720 | 715 | ||
| 721 | err = -EIO; | 716 | goto restore; |
| 717 | } else if (err < 0) { | ||
| 718 | return err; | ||
| 722 | } | 719 | } |
| 723 | 720 | ||
| 724 | return err; | 721 | adapter->ahw->num_msix = num_msix; |
| 722 | if (adapter->drv_tss_rings > 0) | ||
| 723 | adapter->drv_tx_rings = adapter->drv_tss_rings; | ||
| 724 | |||
| 725 | if (adapter->drv_rss_rings > 0) | ||
| 726 | adapter->drv_sds_rings = adapter->drv_rss_rings; | ||
| 727 | |||
| 728 | return 0; | ||
| 725 | } | 729 | } |
| 726 | 730 | ||
| 727 | int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) | 731 | int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) |
| @@ -2528,8 +2532,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2528 | goto err_out_free_hw; | 2532 | goto err_out_free_hw; |
| 2529 | } | 2533 | } |
| 2530 | 2534 | ||
| 2531 | qlcnic_dcb_enable(adapter->dcb); | ||
| 2532 | |||
| 2533 | if (qlcnic_read_mac_addr(adapter)) | 2535 | if (qlcnic_read_mac_addr(adapter)) |
| 2534 | dev_warn(&pdev->dev, "failed to read mac addr\n"); | 2536 | dev_warn(&pdev->dev, "failed to read mac addr\n"); |
| 2535 | 2537 | ||
| @@ -2549,7 +2551,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2549 | "Device does not support MSI interrupts\n"); | 2551 | "Device does not support MSI interrupts\n"); |
| 2550 | 2552 | ||
| 2551 | if (qlcnic_82xx_check(adapter)) { | 2553 | if (qlcnic_82xx_check(adapter)) { |
| 2554 | qlcnic_dcb_enable(adapter->dcb); | ||
| 2555 | qlcnic_dcb_get_info(adapter->dcb); | ||
| 2552 | err = qlcnic_setup_intr(adapter); | 2556 | err = qlcnic_setup_intr(adapter); |
| 2557 | |||
| 2553 | if (err) { | 2558 | if (err) { |
| 2554 | dev_err(&pdev->dev, "Failed to setup interrupt\n"); | 2559 | dev_err(&pdev->dev, "Failed to setup interrupt\n"); |
| 2555 | goto err_out_disable_msi; | 2560 | goto err_out_disable_msi; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 14f748cbf0de..280137991544 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | |||
| @@ -461,6 +461,16 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter) | |||
| 461 | { | 461 | { |
| 462 | struct net_device *netdev = adapter->netdev; | 462 | struct net_device *netdev = adapter->netdev; |
| 463 | 463 | ||
| 464 | if (pci_vfs_assigned(adapter->pdev)) { | ||
| 465 | netdev_err(adapter->netdev, | ||
| 466 | "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n", | ||
| 467 | adapter->portnum); | ||
| 468 | netdev_info(adapter->netdev, | ||
| 469 | "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n", | ||
| 470 | adapter->portnum); | ||
| 471 | return -EPERM; | ||
| 472 | } | ||
| 473 | |||
| 464 | rtnl_lock(); | 474 | rtnl_lock(); |
| 465 | if (netif_running(netdev)) | 475 | if (netif_running(netdev)) |
| 466 | __qlcnic_down(adapter, netdev); | 476 | __qlcnic_down(adapter, netdev); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 448d156c3d08..cd346e27f2e1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
| @@ -354,7 +354,7 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) | |||
| 354 | { | 354 | { |
| 355 | int i; | 355 | int i; |
| 356 | 356 | ||
| 357 | for (i = 0; i < adapter->ahw->max_vnic_func; i++) { | 357 | for (i = 0; i < adapter->ahw->total_nic_func; i++) { |
| 358 | if (adapter->npars[i].pci_func == pci_func) | 358 | if (adapter->npars[i].pci_func == pci_func) |
| 359 | return i; | 359 | return i; |
| 360 | } | 360 | } |
| @@ -720,6 +720,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, | |||
| 720 | struct qlcnic_adapter *adapter = dev_get_drvdata(dev); | 720 | struct qlcnic_adapter *adapter = dev_get_drvdata(dev); |
| 721 | struct qlcnic_npar_func_cfg *np_cfg; | 721 | struct qlcnic_npar_func_cfg *np_cfg; |
| 722 | struct qlcnic_info nic_info; | 722 | struct qlcnic_info nic_info; |
| 723 | u8 pci_func; | ||
| 723 | int i, ret; | 724 | int i, ret; |
| 724 | u32 count; | 725 | u32 count; |
| 725 | 726 | ||
| @@ -729,26 +730,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, | |||
| 729 | 730 | ||
| 730 | count = size / sizeof(struct qlcnic_npar_func_cfg); | 731 | count = size / sizeof(struct qlcnic_npar_func_cfg); |
| 731 | for (i = 0; i < adapter->ahw->total_nic_func; i++) { | 732 | for (i = 0; i < adapter->ahw->total_nic_func; i++) { |
| 732 | if (qlcnic_is_valid_nic_func(adapter, i) < 0) | ||
| 733 | continue; | ||
| 734 | if (adapter->npars[i].pci_func >= count) { | 733 | if (adapter->npars[i].pci_func >= count) { |
| 735 | dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", | 734 | dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", |
| 736 | __func__, adapter->ahw->total_nic_func, count); | 735 | __func__, adapter->ahw->total_nic_func, count); |
| 737 | continue; | 736 | continue; |
| 738 | } | 737 | } |
| 739 | ret = qlcnic_get_nic_info(adapter, &nic_info, i); | ||
| 740 | if (ret) | ||
| 741 | return ret; | ||
| 742 | if (!adapter->npars[i].eswitch_status) | 738 | if (!adapter->npars[i].eswitch_status) |
| 743 | continue; | 739 | continue; |
| 744 | np_cfg[i].pci_func = i; | 740 | pci_func = adapter->npars[i].pci_func; |
| 745 | np_cfg[i].op_mode = (u8)nic_info.op_mode; | 741 | if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) |
| 746 | np_cfg[i].port_num = nic_info.phys_port; | 742 | continue; |
| 747 | np_cfg[i].fw_capab = nic_info.capabilities; | 743 | ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); |
| 748 | np_cfg[i].min_bw = nic_info.min_tx_bw; | 744 | if (ret) |
| 749 | np_cfg[i].max_bw = nic_info.max_tx_bw; | 745 | return ret; |
| 750 | np_cfg[i].max_tx_queues = nic_info.max_tx_ques; | 746 | |
| 751 | np_cfg[i].max_rx_queues = nic_info.max_rx_ques; | 747 | np_cfg[pci_func].pci_func = pci_func; |
| 748 | np_cfg[pci_func].op_mode = (u8)nic_info.op_mode; | ||
| 749 | np_cfg[pci_func].port_num = nic_info.phys_port; | ||
| 750 | np_cfg[pci_func].fw_capab = nic_info.capabilities; | ||
| 751 | np_cfg[pci_func].min_bw = nic_info.min_tx_bw; | ||
| 752 | np_cfg[pci_func].max_bw = nic_info.max_tx_bw; | ||
| 753 | np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques; | ||
| 754 | np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques; | ||
| 752 | } | 755 | } |
| 753 | return size; | 756 | return size; |
| 754 | } | 757 | } |
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 430bb0db9bc4..e36f194673a4 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c | |||
| @@ -365,7 +365,7 @@ __at86rf230_read_subreg(struct at86rf230_local *lp, | |||
| 365 | dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); | 365 | dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); |
| 366 | 366 | ||
| 367 | if (status == 0) | 367 | if (status == 0) |
| 368 | *data = buf[1]; | 368 | *data = (buf[1] & mask) >> shift; |
| 369 | 369 | ||
| 370 | return status; | 370 | return status; |
| 371 | } | 371 | } |
| @@ -1025,14 +1025,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) | |||
| 1025 | return -EINVAL; | 1025 | return -EINVAL; |
| 1026 | } | 1026 | } |
| 1027 | 1027 | ||
| 1028 | rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status); | ||
| 1029 | if (rc) | ||
| 1030 | return rc; | ||
| 1031 | if (!status) { | ||
| 1032 | dev_err(&lp->spi->dev, "AVDD error\n"); | ||
| 1033 | return -EINVAL; | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | return 0; | 1028 | return 0; |
| 1037 | } | 1029 | } |
| 1038 | 1030 | ||
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c55e316373a1..82355d5d155a 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -1755,8 +1755,8 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, | |||
| 1755 | if (err) | 1755 | if (err) |
| 1756 | return err; | 1756 | return err; |
| 1757 | 1757 | ||
| 1758 | return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, | 1758 | return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP, |
| 1759 | false); | 1759 | tos, ttl, df, false); |
| 1760 | } | 1760 | } |
| 1761 | EXPORT_SYMBOL_GPL(vxlan_xmit_skb); | 1761 | EXPORT_SYMBOL_GPL(vxlan_xmit_skb); |
| 1762 | 1762 | ||
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 84734a805092..83c39e2858bf 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c | |||
| @@ -1521,11 +1521,7 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring) | |||
| 1521 | cosa_putstatus(cosa, 0); | 1521 | cosa_putstatus(cosa, 0); |
| 1522 | cosa_getdata8(cosa); | 1522 | cosa_getdata8(cosa); |
| 1523 | cosa_putstatus(cosa, SR_RST); | 1523 | cosa_putstatus(cosa, SR_RST); |
| 1524 | #ifdef MODULE | ||
| 1525 | msleep(500); | 1524 | msleep(500); |
| 1526 | #else | ||
| 1527 | udelay(5*100000); | ||
| 1528 | #endif | ||
| 1529 | /* Disable all IRQs from the card */ | 1525 | /* Disable all IRQs from the card */ |
| 1530 | cosa_putstatus(cosa, 0); | 1526 | cosa_putstatus(cosa, 0); |
| 1531 | 1527 | ||
diff --git a/include/linux/filter.h b/include/linux/filter.h index 262dcbb75ffe..024fd03e5d18 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
| @@ -220,7 +220,6 @@ enum { | |||
| 220 | BPF_S_ANC_RXHASH, | 220 | BPF_S_ANC_RXHASH, |
| 221 | BPF_S_ANC_CPU, | 221 | BPF_S_ANC_CPU, |
| 222 | BPF_S_ANC_ALU_XOR_X, | 222 | BPF_S_ANC_ALU_XOR_X, |
| 223 | BPF_S_ANC_SECCOMP_LD_W, | ||
| 224 | BPF_S_ANC_VLAN_TAG, | 223 | BPF_S_ANC_VLAN_TAG, |
| 225 | BPF_S_ANC_VLAN_TAG_PRESENT, | 224 | BPF_S_ANC_VLAN_TAG_PRESENT, |
| 226 | BPF_S_ANC_PAY_OFFSET, | 225 | BPF_S_ANC_PAY_OFFSET, |
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index ec2ffaf418c8..df78dc2b5524 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h | |||
| @@ -87,7 +87,6 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, | |||
| 87 | /* delete keymap entries */ | 87 | /* delete keymap entries */ |
| 88 | void nf_ct_gre_keymap_destroy(struct nf_conn *ct); | 88 | void nf_ct_gre_keymap_destroy(struct nf_conn *ct); |
| 89 | 89 | ||
| 90 | void nf_ct_gre_keymap_flush(struct net *net); | ||
| 91 | void nf_nat_need_gre(void); | 90 | void nf_nat_need_gre(void); |
| 92 | 91 | ||
| 93 | #endif /* __KERNEL__ */ | 92 | #endif /* __KERNEL__ */ |
diff --git a/include/net/dst.h b/include/net/dst.h index 46ed958e0c6e..71c60f42be48 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
| @@ -45,7 +45,7 @@ struct dst_entry { | |||
| 45 | void *__pad1; | 45 | void *__pad1; |
| 46 | #endif | 46 | #endif |
| 47 | int (*input)(struct sk_buff *); | 47 | int (*input)(struct sk_buff *); |
| 48 | int (*output)(struct sk_buff *); | 48 | int (*output)(struct sock *sk, struct sk_buff *skb); |
| 49 | 49 | ||
| 50 | unsigned short flags; | 50 | unsigned short flags; |
| 51 | #define DST_HOST 0x0001 | 51 | #define DST_HOST 0x0001 |
| @@ -367,7 +367,11 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) | |||
| 367 | return child; | 367 | return child; |
| 368 | } | 368 | } |
| 369 | 369 | ||
| 370 | int dst_discard(struct sk_buff *skb); | 370 | int dst_discard_sk(struct sock *sk, struct sk_buff *skb); |
| 371 | static inline int dst_discard(struct sk_buff *skb) | ||
| 372 | { | ||
| 373 | return dst_discard_sk(skb->sk, skb); | ||
| 374 | } | ||
| 371 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, | 375 | void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, |
| 372 | int initial_obsolete, unsigned short flags); | 376 | int initial_obsolete, unsigned short flags); |
| 373 | void __dst_free(struct dst_entry *dst); | 377 | void __dst_free(struct dst_entry *dst); |
| @@ -449,9 +453,13 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout) | |||
| 449 | } | 453 | } |
| 450 | 454 | ||
| 451 | /* Output packet to network from transport. */ | 455 | /* Output packet to network from transport. */ |
| 456 | static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb) | ||
| 457 | { | ||
| 458 | return skb_dst(skb)->output(sk, skb); | ||
| 459 | } | ||
| 452 | static inline int dst_output(struct sk_buff *skb) | 460 | static inline int dst_output(struct sk_buff *skb) |
| 453 | { | 461 | { |
| 454 | return skb_dst(skb)->output(skb); | 462 | return dst_output_sk(skb->sk, skb); |
| 455 | } | 463 | } |
| 456 | 464 | ||
| 457 | /* Input packet from network to transport. */ | 465 | /* Input packet from network to transport. */ |
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index f981ba7adeed..74af137304be 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h | |||
| @@ -40,7 +40,7 @@ void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, | |||
| 40 | 40 | ||
| 41 | void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); | 41 | void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); |
| 42 | 42 | ||
| 43 | int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl); | 43 | int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); |
| 44 | 44 | ||
| 45 | struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu); | 45 | struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu); |
| 46 | #endif /* _INET6_CONNECTION_SOCK_H */ | 46 | #endif /* _INET6_CONNECTION_SOCK_H */ |
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index c55aeed41ace..7a4313887568 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h | |||
| @@ -36,7 +36,7 @@ struct tcp_congestion_ops; | |||
| 36 | * (i.e. things that depend on the address family) | 36 | * (i.e. things that depend on the address family) |
| 37 | */ | 37 | */ |
| 38 | struct inet_connection_sock_af_ops { | 38 | struct inet_connection_sock_af_ops { |
| 39 | int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl); | 39 | int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl); |
| 40 | void (*send_check)(struct sock *sk, struct sk_buff *skb); | 40 | void (*send_check)(struct sock *sk, struct sk_buff *skb); |
| 41 | int (*rebuild_header)(struct sock *sk); | 41 | int (*rebuild_header)(struct sock *sk); |
| 42 | void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); | 42 | void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); |
diff --git a/include/net/ip.h b/include/net/ip.h index 25064c28e059..3ec2b0fb9d83 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
| @@ -104,14 +104,19 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
| 104 | struct net_device *orig_dev); | 104 | struct net_device *orig_dev); |
| 105 | int ip_local_deliver(struct sk_buff *skb); | 105 | int ip_local_deliver(struct sk_buff *skb); |
| 106 | int ip_mr_input(struct sk_buff *skb); | 106 | int ip_mr_input(struct sk_buff *skb); |
| 107 | int ip_output(struct sk_buff *skb); | 107 | int ip_output(struct sock *sk, struct sk_buff *skb); |
| 108 | int ip_mc_output(struct sk_buff *skb); | 108 | int ip_mc_output(struct sock *sk, struct sk_buff *skb); |
| 109 | int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); | 109 | int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); |
| 110 | int ip_do_nat(struct sk_buff *skb); | 110 | int ip_do_nat(struct sk_buff *skb); |
| 111 | void ip_send_check(struct iphdr *ip); | 111 | void ip_send_check(struct iphdr *ip); |
| 112 | int __ip_local_out(struct sk_buff *skb); | 112 | int __ip_local_out(struct sk_buff *skb); |
| 113 | int ip_local_out(struct sk_buff *skb); | 113 | int ip_local_out_sk(struct sock *sk, struct sk_buff *skb); |
| 114 | int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl); | 114 | static inline int ip_local_out(struct sk_buff *skb) |
| 115 | { | ||
| 116 | return ip_local_out_sk(skb->sk, skb); | ||
| 117 | } | ||
| 118 | |||
| 119 | int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); | ||
| 115 | void ip_init(void); | 120 | void ip_init(void); |
| 116 | int ip_append_data(struct sock *sk, struct flowi4 *fl4, | 121 | int ip_append_data(struct sock *sk, struct flowi4 *fl4, |
| 117 | int getfrag(void *from, char *to, int offset, int len, | 122 | int getfrag(void *from, char *to, int offset, int len, |
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 3c3bb184eb8f..6c4f5eac98e7 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
| @@ -32,6 +32,11 @@ struct route_info { | |||
| 32 | #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 | 32 | #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 |
| 33 | #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 | 33 | #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 |
| 34 | 34 | ||
| 35 | /* We do not (yet ?) support IPv6 jumbograms (RFC 2675) | ||
| 36 | * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header | ||
| 37 | */ | ||
| 38 | #define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr)) | ||
| 39 | |||
| 35 | /* | 40 | /* |
| 36 | * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate | 41 | * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate |
| 37 | * between IPV6_ADDR_PREFERENCES socket option values | 42 | * between IPV6_ADDR_PREFERENCES socket option values |
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index e77c10405d51..a4daf9eb8562 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h | |||
| @@ -153,7 +153,7 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, | |||
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); | 155 | int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); |
| 156 | int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, | 156 | int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, |
| 157 | __be32 src, __be32 dst, __u8 proto, | 157 | __be32 src, __be32 dst, __u8 proto, |
| 158 | __u8 tos, __u8 ttl, __be16 df, bool xnet); | 158 | __u8 tos, __u8 ttl, __be16 df, bool xnet); |
| 159 | 159 | ||
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4f541f11ce63..d640925bc454 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
| @@ -731,7 +731,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, | |||
| 731 | * skb processing functions | 731 | * skb processing functions |
| 732 | */ | 732 | */ |
| 733 | 733 | ||
| 734 | int ip6_output(struct sk_buff *skb); | 734 | int ip6_output(struct sock *sk, struct sk_buff *skb); |
| 735 | int ip6_forward(struct sk_buff *skb); | 735 | int ip6_forward(struct sk_buff *skb); |
| 736 | int ip6_input(struct sk_buff *skb); | 736 | int ip6_input(struct sk_buff *skb); |
| 737 | int ip6_mc_input(struct sk_buff *skb); | 737 | int ip6_mc_input(struct sk_buff *skb); |
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index cf2b7ae2b9d8..a75fc8e27cd6 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h | |||
| @@ -13,6 +13,16 @@ struct nft_cmp_fast_expr { | |||
| 13 | u8 len; | 13 | u8 len; |
| 14 | }; | 14 | }; |
| 15 | 15 | ||
| 16 | /* Calculate the mask for the nft_cmp_fast expression. On big endian the | ||
| 17 | * mask needs to include the *upper* bytes when interpreting that data as | ||
| 18 | * something smaller than the full u32, therefore a cpu_to_le32 is done. | ||
| 19 | */ | ||
| 20 | static inline u32 nft_cmp_fast_mask(unsigned int len) | ||
| 21 | { | ||
| 22 | return cpu_to_le32(~0U >> (FIELD_SIZEOF(struct nft_cmp_fast_expr, | ||
| 23 | data) * BITS_PER_BYTE - len)); | ||
| 24 | } | ||
| 25 | |||
| 16 | extern const struct nft_expr_ops nft_cmp_fast_ops; | 26 | extern const struct nft_expr_ops nft_cmp_fast_ops; |
| 17 | 27 | ||
| 18 | int nft_cmp_module_init(void); | 28 | int nft_cmp_module_init(void); |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 6ee76c804893..d992ca3145fe 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
| @@ -1653,6 +1653,17 @@ struct sctp_association { | |||
| 1653 | /* This is the last advertised value of rwnd over a SACK chunk. */ | 1653 | /* This is the last advertised value of rwnd over a SACK chunk. */ |
| 1654 | __u32 a_rwnd; | 1654 | __u32 a_rwnd; |
| 1655 | 1655 | ||
| 1656 | /* Number of bytes by which the rwnd has slopped. The rwnd is allowed | ||
| 1657 | * to slop over a maximum of the association's frag_point. | ||
| 1658 | */ | ||
| 1659 | __u32 rwnd_over; | ||
| 1660 | |||
| 1661 | /* Keeps treack of rwnd pressure. This happens when we have | ||
| 1662 | * a window, but not recevie buffer (i.e small packets). This one | ||
| 1663 | * is releases slowly (1 PMTU at a time ). | ||
| 1664 | */ | ||
| 1665 | __u32 rwnd_press; | ||
| 1666 | |||
| 1656 | /* This is the sndbuf size in use for the association. | 1667 | /* This is the sndbuf size in use for the association. |
| 1657 | * This corresponds to the sndbuf size for the association, | 1668 | * This corresponds to the sndbuf size for the association, |
| 1658 | * as specified in the sk->sndbuf. | 1669 | * as specified in the sk->sndbuf. |
| @@ -1881,7 +1892,8 @@ void sctp_assoc_update(struct sctp_association *old, | |||
| 1881 | __u32 sctp_association_get_next_tsn(struct sctp_association *); | 1892 | __u32 sctp_association_get_next_tsn(struct sctp_association *); |
| 1882 | 1893 | ||
| 1883 | void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); | 1894 | void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); |
| 1884 | void sctp_assoc_rwnd_update(struct sctp_association *, bool); | 1895 | void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); |
| 1896 | void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); | ||
| 1885 | void sctp_assoc_set_primary(struct sctp_association *, | 1897 | void sctp_assoc_set_primary(struct sctp_association *, |
| 1886 | struct sctp_transport *); | 1898 | struct sctp_transport *); |
| 1887 | void sctp_assoc_del_nonprimary_peers(struct sctp_association *, | 1899 | void sctp_assoc_del_nonprimary_peers(struct sctp_association *, |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 32682ae47b3f..116e9c7e19cb 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
| @@ -333,7 +333,7 @@ struct xfrm_state_afinfo { | |||
| 333 | const xfrm_address_t *saddr); | 333 | const xfrm_address_t *saddr); |
| 334 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); | 334 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); |
| 335 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); | 335 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); |
| 336 | int (*output)(struct sk_buff *skb); | 336 | int (*output)(struct sock *sk, struct sk_buff *skb); |
| 337 | int (*output_finish)(struct sk_buff *skb); | 337 | int (*output_finish)(struct sk_buff *skb); |
| 338 | int (*extract_input)(struct xfrm_state *x, | 338 | int (*extract_input)(struct xfrm_state *x, |
| 339 | struct sk_buff *skb); | 339 | struct sk_buff *skb); |
| @@ -1540,7 +1540,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) | |||
| 1540 | 1540 | ||
| 1541 | int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1541 | int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
| 1542 | int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); | 1542 | int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); |
| 1543 | int xfrm4_output(struct sk_buff *skb); | 1543 | int xfrm4_output(struct sock *sk, struct sk_buff *skb); |
| 1544 | int xfrm4_output_finish(struct sk_buff *skb); | 1544 | int xfrm4_output_finish(struct sk_buff *skb); |
| 1545 | int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err); | 1545 | int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err); |
| 1546 | int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); | 1546 | int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); |
| @@ -1565,7 +1565,7 @@ __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); | |||
| 1565 | __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); | 1565 | __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); |
| 1566 | int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1566 | int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
| 1567 | int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); | 1567 | int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); |
| 1568 | int xfrm6_output(struct sk_buff *skb); | 1568 | int xfrm6_output(struct sock *sk, struct sk_buff *skb); |
| 1569 | int xfrm6_output_finish(struct sk_buff *skb); | 1569 | int xfrm6_output_finish(struct sk_buff *skb); |
| 1570 | int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, | 1570 | int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, |
| 1571 | u8 **prevhdr); | 1571 | u8 **prevhdr); |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index d8d046c0726a..590c37925084 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -69,18 +69,17 @@ static void populate_seccomp_data(struct seccomp_data *sd) | |||
| 69 | { | 69 | { |
| 70 | struct task_struct *task = current; | 70 | struct task_struct *task = current; |
| 71 | struct pt_regs *regs = task_pt_regs(task); | 71 | struct pt_regs *regs = task_pt_regs(task); |
| 72 | unsigned long args[6]; | ||
| 72 | 73 | ||
| 73 | sd->nr = syscall_get_nr(task, regs); | 74 | sd->nr = syscall_get_nr(task, regs); |
| 74 | sd->arch = syscall_get_arch(); | 75 | sd->arch = syscall_get_arch(); |
| 75 | 76 | syscall_get_arguments(task, regs, 0, 6, args); | |
| 76 | /* Unroll syscall_get_args to help gcc on arm. */ | 77 | sd->args[0] = args[0]; |
| 77 | syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); | 78 | sd->args[1] = args[1]; |
| 78 | syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]); | 79 | sd->args[2] = args[2]; |
| 79 | syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]); | 80 | sd->args[3] = args[3]; |
| 80 | syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]); | 81 | sd->args[4] = args[4]; |
| 81 | syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]); | 82 | sd->args[5] = args[5]; |
| 82 | syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]); | ||
| 83 | |||
| 84 | sd->instruction_pointer = KSTK_EIP(task); | 83 | sd->instruction_pointer = KSTK_EIP(task); |
| 85 | } | 84 | } |
| 86 | 85 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 14dac0654f28..5b3042e69f85 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2284,7 +2284,7 @@ EXPORT_SYMBOL(skb_checksum_help); | |||
| 2284 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth) | 2284 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth) |
| 2285 | { | 2285 | { |
| 2286 | __be16 type = skb->protocol; | 2286 | __be16 type = skb->protocol; |
| 2287 | int vlan_depth = ETH_HLEN; | 2287 | int vlan_depth = skb->mac_len; |
| 2288 | 2288 | ||
| 2289 | /* Tunnel gso handlers can set protocol to ethernet. */ | 2289 | /* Tunnel gso handlers can set protocol to ethernet. */ |
| 2290 | if (type == htons(ETH_P_TEB)) { | 2290 | if (type == htons(ETH_P_TEB)) { |
diff --git a/net/core/dst.c b/net/core/dst.c index ca4231ec7347..80d6286c8b62 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
| @@ -142,12 +142,12 @@ loop: | |||
| 142 | mutex_unlock(&dst_gc_mutex); | 142 | mutex_unlock(&dst_gc_mutex); |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | int dst_discard(struct sk_buff *skb) | 145 | int dst_discard_sk(struct sock *sk, struct sk_buff *skb) |
| 146 | { | 146 | { |
| 147 | kfree_skb(skb); | 147 | kfree_skb(skb); |
| 148 | return 0; | 148 | return 0; |
| 149 | } | 149 | } |
| 150 | EXPORT_SYMBOL(dst_discard); | 150 | EXPORT_SYMBOL(dst_discard_sk); |
| 151 | 151 | ||
| 152 | const u32 dst_default_metrics[RTAX_MAX + 1] = { | 152 | const u32 dst_default_metrics[RTAX_MAX + 1] = { |
| 153 | /* This initializer is needed to force linker to place this variable | 153 | /* This initializer is needed to force linker to place this variable |
| @@ -184,7 +184,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev, | |||
| 184 | dst->xfrm = NULL; | 184 | dst->xfrm = NULL; |
| 185 | #endif | 185 | #endif |
| 186 | dst->input = dst_discard; | 186 | dst->input = dst_discard; |
| 187 | dst->output = dst_discard; | 187 | dst->output = dst_discard_sk; |
| 188 | dst->error = 0; | 188 | dst->error = 0; |
| 189 | dst->obsolete = initial_obsolete; | 189 | dst->obsolete = initial_obsolete; |
| 190 | dst->header_len = 0; | 190 | dst->header_len = 0; |
| @@ -209,8 +209,10 @@ static void ___dst_free(struct dst_entry *dst) | |||
| 209 | /* The first case (dev==NULL) is required, when | 209 | /* The first case (dev==NULL) is required, when |
| 210 | protocol module is unloaded. | 210 | protocol module is unloaded. |
| 211 | */ | 211 | */ |
| 212 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) | 212 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { |
| 213 | dst->input = dst->output = dst_discard; | 213 | dst->input = dst_discard; |
| 214 | dst->output = dst_discard_sk; | ||
| 215 | } | ||
| 214 | dst->obsolete = DST_OBSOLETE_DEAD; | 216 | dst->obsolete = DST_OBSOLETE_DEAD; |
| 215 | } | 217 | } |
| 216 | 218 | ||
| @@ -361,7 +363,8 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |||
| 361 | return; | 363 | return; |
| 362 | 364 | ||
| 363 | if (!unregister) { | 365 | if (!unregister) { |
| 364 | dst->input = dst->output = dst_discard; | 366 | dst->input = dst_discard; |
| 367 | dst->output = dst_discard_sk; | ||
| 365 | } else { | 368 | } else { |
| 366 | dst->dev = dev_net(dst->dev)->loopback_dev; | 369 | dst->dev = dev_net(dst->dev)->loopback_dev; |
| 367 | dev_hold(dst->dev); | 370 | dev_hold(dst->dev); |
diff --git a/net/core/filter.c b/net/core/filter.c index e08b3822c72a..cd58614660cf 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -600,6 +600,9 @@ static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) | |||
| 600 | if (skb_is_nonlinear(skb)) | 600 | if (skb_is_nonlinear(skb)) |
| 601 | return 0; | 601 | return 0; |
| 602 | 602 | ||
| 603 | if (skb->len < sizeof(struct nlattr)) | ||
| 604 | return 0; | ||
| 605 | |||
| 603 | if (A > skb->len - sizeof(struct nlattr)) | 606 | if (A > skb->len - sizeof(struct nlattr)) |
| 604 | return 0; | 607 | return 0; |
| 605 | 608 | ||
| @@ -618,11 +621,14 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) | |||
| 618 | if (skb_is_nonlinear(skb)) | 621 | if (skb_is_nonlinear(skb)) |
| 619 | return 0; | 622 | return 0; |
| 620 | 623 | ||
| 624 | if (skb->len < sizeof(struct nlattr)) | ||
| 625 | return 0; | ||
| 626 | |||
| 621 | if (A > skb->len - sizeof(struct nlattr)) | 627 | if (A > skb->len - sizeof(struct nlattr)) |
| 622 | return 0; | 628 | return 0; |
| 623 | 629 | ||
| 624 | nla = (struct nlattr *) &skb->data[A]; | 630 | nla = (struct nlattr *) &skb->data[A]; |
| 625 | if (nla->nla_len > A - skb->len) | 631 | if (nla->nla_len > skb->len - A) |
| 626 | return 0; | 632 | return 0; |
| 627 | 633 | ||
| 628 | nla = nla_find_nested(nla, X); | 634 | nla = nla_find_nested(nla, X); |
| @@ -1737,7 +1743,6 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) | |||
| 1737 | [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS, | 1743 | [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS, |
| 1738 | [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS, | 1744 | [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS, |
| 1739 | [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS, | 1745 | [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS, |
| 1740 | [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS, | ||
| 1741 | [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, | 1746 | [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, |
| 1742 | [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, | 1747 | [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, |
| 1743 | [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS, | 1748 | [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS, |
diff --git a/net/dccp/output.c b/net/dccp/output.c index 8876078859da..0248e8a3460c 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
| @@ -138,7 +138,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
| 138 | 138 | ||
| 139 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 139 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
| 140 | 140 | ||
| 141 | err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); | 141 | err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); |
| 142 | return net_xmit_eval(err); | 142 | return net_xmit_eval(err); |
| 143 | } | 143 | } |
| 144 | return -ENOBUFS; | 144 | return -ENOBUFS; |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index ce0cbbfe0f43..daccc4a36d80 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
| @@ -752,7 +752,7 @@ static int dn_to_neigh_output(struct sk_buff *skb) | |||
| 752 | return n->output(n, skb); | 752 | return n->output(n, skb); |
| 753 | } | 753 | } |
| 754 | 754 | ||
| 755 | static int dn_output(struct sk_buff *skb) | 755 | static int dn_output(struct sock *sk, struct sk_buff *skb) |
| 756 | { | 756 | { |
| 757 | struct dst_entry *dst = skb_dst(skb); | 757 | struct dst_entry *dst = skb_dst(skb); |
| 758 | struct dn_route *rt = (struct dn_route *)dst; | 758 | struct dn_route *rt = (struct dn_route *)dst; |
| @@ -838,6 +838,18 @@ drop: | |||
| 838 | * Used to catch bugs. This should never normally get | 838 | * Used to catch bugs. This should never normally get |
| 839 | * called. | 839 | * called. |
| 840 | */ | 840 | */ |
| 841 | static int dn_rt_bug_sk(struct sock *sk, struct sk_buff *skb) | ||
| 842 | { | ||
| 843 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | ||
| 844 | |||
| 845 | net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n", | ||
| 846 | le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); | ||
| 847 | |||
| 848 | kfree_skb(skb); | ||
| 849 | |||
| 850 | return NET_RX_DROP; | ||
| 851 | } | ||
| 852 | |||
| 841 | static int dn_rt_bug(struct sk_buff *skb) | 853 | static int dn_rt_bug(struct sk_buff *skb) |
| 842 | { | 854 | { |
| 843 | struct dn_skb_cb *cb = DN_SKB_CB(skb); | 855 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
| @@ -1463,7 +1475,7 @@ make_route: | |||
| 1463 | 1475 | ||
| 1464 | rt->n = neigh; | 1476 | rt->n = neigh; |
| 1465 | rt->dst.lastuse = jiffies; | 1477 | rt->dst.lastuse = jiffies; |
| 1466 | rt->dst.output = dn_rt_bug; | 1478 | rt->dst.output = dn_rt_bug_sk; |
| 1467 | switch (res.type) { | 1479 | switch (res.type) { |
| 1468 | case RTN_UNICAST: | 1480 | case RTN_UNICAST: |
| 1469 | rt->dst.input = dn_forward; | 1481 | rt->dst.input = dn_forward; |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 1a0755fea491..1cbeba5edff9 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -101,17 +101,17 @@ int __ip_local_out(struct sk_buff *skb) | |||
| 101 | skb_dst(skb)->dev, dst_output); | 101 | skb_dst(skb)->dev, dst_output); |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | int ip_local_out(struct sk_buff *skb) | 104 | int ip_local_out_sk(struct sock *sk, struct sk_buff *skb) |
| 105 | { | 105 | { |
| 106 | int err; | 106 | int err; |
| 107 | 107 | ||
| 108 | err = __ip_local_out(skb); | 108 | err = __ip_local_out(skb); |
| 109 | if (likely(err == 1)) | 109 | if (likely(err == 1)) |
| 110 | err = dst_output(skb); | 110 | err = dst_output_sk(sk, skb); |
| 111 | 111 | ||
| 112 | return err; | 112 | return err; |
| 113 | } | 113 | } |
| 114 | EXPORT_SYMBOL_GPL(ip_local_out); | 114 | EXPORT_SYMBOL_GPL(ip_local_out_sk); |
| 115 | 115 | ||
| 116 | static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) | 116 | static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) |
| 117 | { | 117 | { |
| @@ -226,9 +226,8 @@ static int ip_finish_output(struct sk_buff *skb) | |||
| 226 | return ip_finish_output2(skb); | 226 | return ip_finish_output2(skb); |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | int ip_mc_output(struct sk_buff *skb) | 229 | int ip_mc_output(struct sock *sk, struct sk_buff *skb) |
| 230 | { | 230 | { |
| 231 | struct sock *sk = skb->sk; | ||
| 232 | struct rtable *rt = skb_rtable(skb); | 231 | struct rtable *rt = skb_rtable(skb); |
| 233 | struct net_device *dev = rt->dst.dev; | 232 | struct net_device *dev = rt->dst.dev; |
| 234 | 233 | ||
| @@ -287,7 +286,7 @@ int ip_mc_output(struct sk_buff *skb) | |||
| 287 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 286 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
| 288 | } | 287 | } |
| 289 | 288 | ||
| 290 | int ip_output(struct sk_buff *skb) | 289 | int ip_output(struct sock *sk, struct sk_buff *skb) |
| 291 | { | 290 | { |
| 292 | struct net_device *dev = skb_dst(skb)->dev; | 291 | struct net_device *dev = skb_dst(skb)->dev; |
| 293 | 292 | ||
| @@ -315,9 +314,9 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) | |||
| 315 | sizeof(fl4->saddr) + sizeof(fl4->daddr)); | 314 | sizeof(fl4->saddr) + sizeof(fl4->daddr)); |
| 316 | } | 315 | } |
| 317 | 316 | ||
| 318 | int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl) | 317 | /* Note: skb->sk can be different from sk, in case of tunnels */ |
| 318 | int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) | ||
| 319 | { | 319 | { |
| 320 | struct sock *sk = skb->sk; | ||
| 321 | struct inet_sock *inet = inet_sk(sk); | 320 | struct inet_sock *inet = inet_sk(sk); |
| 322 | struct ip_options_rcu *inet_opt; | 321 | struct ip_options_rcu *inet_opt; |
| 323 | struct flowi4 *fl4; | 322 | struct flowi4 *fl4; |
| @@ -389,6 +388,7 @@ packet_routed: | |||
| 389 | ip_select_ident_more(skb, &rt->dst, sk, | 388 | ip_select_ident_more(skb, &rt->dst, sk, |
| 390 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); | 389 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
| 391 | 390 | ||
| 391 | /* TODO : should we use skb->sk here instead of sk ? */ | ||
| 392 | skb->priority = sk->sk_priority; | 392 | skb->priority = sk->sk_priority; |
| 393 | skb->mark = sk->sk_mark; | 393 | skb->mark = sk->sk_mark; |
| 394 | 394 | ||
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index e77381d1df9a..484d0ce27ef7 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
| @@ -670,7 +670,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 670 | return; | 670 | return; |
| 671 | } | 671 | } |
| 672 | 672 | ||
| 673 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol, | 673 | err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol, |
| 674 | tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); | 674 | tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); |
| 675 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); | 675 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
| 676 | 676 | ||
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index e0c2b1d2ea4e..bcf206c79005 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | #include <net/netns/generic.h> | 46 | #include <net/netns/generic.h> |
| 47 | #include <net/rtnetlink.h> | 47 | #include <net/rtnetlink.h> |
| 48 | 48 | ||
| 49 | int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, | 49 | int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, |
| 50 | __be32 src, __be32 dst, __u8 proto, | 50 | __be32 src, __be32 dst, __u8 proto, |
| 51 | __u8 tos, __u8 ttl, __be16 df, bool xnet) | 51 | __u8 tos, __u8 ttl, __be16 df, bool xnet) |
| 52 | { | 52 | { |
| @@ -76,7 +76,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, | |||
| 76 | iph->ttl = ttl; | 76 | iph->ttl = ttl; |
| 77 | __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); | 77 | __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
| 78 | 78 | ||
| 79 | err = ip_local_out(skb); | 79 | err = ip_local_out_sk(sk, skb); |
| 80 | if (unlikely(net_xmit_eval(err))) | 80 | if (unlikely(net_xmit_eval(err))) |
| 81 | pkt_len = 0; | 81 | pkt_len = 0; |
| 82 | return pkt_len; | 82 | return pkt_len; |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index f4b19e5dde54..8210964a9f19 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
| @@ -252,26 +252,33 @@ int ping_init_sock(struct sock *sk) | |||
| 252 | { | 252 | { |
| 253 | struct net *net = sock_net(sk); | 253 | struct net *net = sock_net(sk); |
| 254 | kgid_t group = current_egid(); | 254 | kgid_t group = current_egid(); |
| 255 | struct group_info *group_info = get_current_groups(); | 255 | struct group_info *group_info; |
| 256 | int i, j, count = group_info->ngroups; | 256 | int i, j, count; |
| 257 | kgid_t low, high; | 257 | kgid_t low, high; |
| 258 | int ret = 0; | ||
| 258 | 259 | ||
| 259 | inet_get_ping_group_range_net(net, &low, &high); | 260 | inet_get_ping_group_range_net(net, &low, &high); |
| 260 | if (gid_lte(low, group) && gid_lte(group, high)) | 261 | if (gid_lte(low, group) && gid_lte(group, high)) |
| 261 | return 0; | 262 | return 0; |
| 262 | 263 | ||
| 264 | group_info = get_current_groups(); | ||
| 265 | count = group_info->ngroups; | ||
| 263 | for (i = 0; i < group_info->nblocks; i++) { | 266 | for (i = 0; i < group_info->nblocks; i++) { |
| 264 | int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); | 267 | int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); |
| 265 | for (j = 0; j < cp_count; j++) { | 268 | for (j = 0; j < cp_count; j++) { |
| 266 | kgid_t gid = group_info->blocks[i][j]; | 269 | kgid_t gid = group_info->blocks[i][j]; |
| 267 | if (gid_lte(low, gid) && gid_lte(gid, high)) | 270 | if (gid_lte(low, gid) && gid_lte(gid, high)) |
| 268 | return 0; | 271 | goto out_release_group; |
| 269 | } | 272 | } |
| 270 | 273 | ||
| 271 | count -= cp_count; | 274 | count -= cp_count; |
| 272 | } | 275 | } |
| 273 | 276 | ||
| 274 | return -EACCES; | 277 | ret = -EACCES; |
| 278 | |||
| 279 | out_release_group: | ||
| 280 | put_group_info(group_info); | ||
| 281 | return ret; | ||
| 275 | } | 282 | } |
| 276 | EXPORT_SYMBOL_GPL(ping_init_sock); | 283 | EXPORT_SYMBOL_GPL(ping_init_sock); |
| 277 | 284 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 34d094cadb11..1485aafcad59 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -1129,7 +1129,7 @@ static void ipv4_link_failure(struct sk_buff *skb) | |||
| 1129 | dst_set_expires(&rt->dst, 0); | 1129 | dst_set_expires(&rt->dst, 0); |
| 1130 | } | 1130 | } |
| 1131 | 1131 | ||
| 1132 | static int ip_rt_bug(struct sk_buff *skb) | 1132 | static int ip_rt_bug(struct sock *sk, struct sk_buff *skb) |
| 1133 | { | 1133 | { |
| 1134 | pr_debug("%s: %pI4 -> %pI4, %s\n", | 1134 | pr_debug("%s: %pI4 -> %pI4, %s\n", |
| 1135 | __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, | 1135 | __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, |
| @@ -2218,7 +2218,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
| 2218 | 2218 | ||
| 2219 | new->__use = 1; | 2219 | new->__use = 1; |
| 2220 | new->input = dst_discard; | 2220 | new->input = dst_discard; |
| 2221 | new->output = dst_discard; | 2221 | new->output = dst_discard_sk; |
| 2222 | 2222 | ||
| 2223 | new->dev = ort->dst.dev; | 2223 | new->dev = ort->dst.dev; |
| 2224 | if (new->dev) | 2224 | if (new->dev) |
| @@ -2357,7 +2357,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, | |||
| 2357 | } | 2357 | } |
| 2358 | } else | 2358 | } else |
| 2359 | #endif | 2359 | #endif |
| 2360 | if (nla_put_u32(skb, RTA_IIF, rt->rt_iif)) | 2360 | if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex)) |
| 2361 | goto nla_put_failure; | 2361 | goto nla_put_failure; |
| 2362 | } | 2362 | } |
| 2363 | 2363 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 699fb102e971..025e25093984 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -981,7 +981,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
| 981 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, | 981 | TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, |
| 982 | tcp_skb_pcount(skb)); | 982 | tcp_skb_pcount(skb)); |
| 983 | 983 | ||
| 984 | err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); | 984 | err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); |
| 985 | if (likely(err <= 0)) | 985 | if (likely(err <= 0)) |
| 986 | return err; | 986 | return err; |
| 987 | 987 | ||
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index baa0f63731fd..40e701f2e1e0 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
| @@ -86,7 +86,7 @@ int xfrm4_output_finish(struct sk_buff *skb) | |||
| 86 | return xfrm_output(skb); | 86 | return xfrm_output(skb); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | int xfrm4_output(struct sk_buff *skb) | 89 | int xfrm4_output(struct sock *sk, struct sk_buff *skb) |
| 90 | { | 90 | { |
| 91 | struct dst_entry *dst = skb_dst(skb); | 91 | struct dst_entry *dst = skb_dst(skb); |
| 92 | struct xfrm_state *x = dst->xfrm; | 92 | struct xfrm_state *x = dst->xfrm; |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index c9138189415a..d4ade34ab375 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
| @@ -224,9 +224,8 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, | |||
| 224 | return dst; | 224 | return dst; |
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) | 227 | int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused) |
| 228 | { | 228 | { |
| 229 | struct sock *sk = skb->sk; | ||
| 230 | struct ipv6_pinfo *np = inet6_sk(sk); | 229 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 231 | struct flowi6 fl6; | 230 | struct flowi6 fl6; |
| 232 | struct dst_entry *dst; | 231 | struct dst_entry *dst; |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index c98338b81d30..9d921462b57f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -1559,6 +1559,15 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], | |||
| 1559 | return 0; | 1559 | return 0; |
| 1560 | } | 1560 | } |
| 1561 | 1561 | ||
| 1562 | static void ip6gre_dellink(struct net_device *dev, struct list_head *head) | ||
| 1563 | { | ||
| 1564 | struct net *net = dev_net(dev); | ||
| 1565 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); | ||
| 1566 | |||
| 1567 | if (dev != ign->fb_tunnel_dev) | ||
| 1568 | unregister_netdevice_queue(dev, head); | ||
| 1569 | } | ||
| 1570 | |||
| 1562 | static size_t ip6gre_get_size(const struct net_device *dev) | 1571 | static size_t ip6gre_get_size(const struct net_device *dev) |
| 1563 | { | 1572 | { |
| 1564 | return | 1573 | return |
| @@ -1636,6 +1645,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { | |||
| 1636 | .validate = ip6gre_tunnel_validate, | 1645 | .validate = ip6gre_tunnel_validate, |
| 1637 | .newlink = ip6gre_newlink, | 1646 | .newlink = ip6gre_newlink, |
| 1638 | .changelink = ip6gre_changelink, | 1647 | .changelink = ip6gre_changelink, |
| 1648 | .dellink = ip6gre_dellink, | ||
| 1639 | .get_size = ip6gre_get_size, | 1649 | .get_size = ip6gre_get_size, |
| 1640 | .fill_info = ip6gre_fill_info, | 1650 | .fill_info = ip6gre_fill_info, |
| 1641 | }; | 1651 | }; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 3284d61577c0..40e7581374f7 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -132,7 +132,7 @@ static int ip6_finish_output(struct sk_buff *skb) | |||
| 132 | return ip6_finish_output2(skb); | 132 | return ip6_finish_output2(skb); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | int ip6_output(struct sk_buff *skb) | 135 | int ip6_output(struct sock *sk, struct sk_buff *skb) |
| 136 | { | 136 | { |
| 137 | struct net_device *dev = skb_dst(skb)->dev; | 137 | struct net_device *dev = skb_dst(skb)->dev; |
| 138 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); | 138 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 5015c50a5ba7..4011617cca68 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -84,9 +84,9 @@ static void ip6_dst_ifdown(struct dst_entry *, | |||
| 84 | static int ip6_dst_gc(struct dst_ops *ops); | 84 | static int ip6_dst_gc(struct dst_ops *ops); |
| 85 | 85 | ||
| 86 | static int ip6_pkt_discard(struct sk_buff *skb); | 86 | static int ip6_pkt_discard(struct sk_buff *skb); |
| 87 | static int ip6_pkt_discard_out(struct sk_buff *skb); | 87 | static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb); |
| 88 | static int ip6_pkt_prohibit(struct sk_buff *skb); | 88 | static int ip6_pkt_prohibit(struct sk_buff *skb); |
| 89 | static int ip6_pkt_prohibit_out(struct sk_buff *skb); | 89 | static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb); |
| 90 | static void ip6_link_failure(struct sk_buff *skb); | 90 | static void ip6_link_failure(struct sk_buff *skb); |
| 91 | static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, | 91 | static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, |
| 92 | struct sk_buff *skb, u32 mtu); | 92 | struct sk_buff *skb, u32 mtu); |
| @@ -290,7 +290,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = { | |||
| 290 | .obsolete = DST_OBSOLETE_FORCE_CHK, | 290 | .obsolete = DST_OBSOLETE_FORCE_CHK, |
| 291 | .error = -EINVAL, | 291 | .error = -EINVAL, |
| 292 | .input = dst_discard, | 292 | .input = dst_discard, |
| 293 | .output = dst_discard, | 293 | .output = dst_discard_sk, |
| 294 | }, | 294 | }, |
| 295 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 295 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
| 296 | .rt6i_protocol = RTPROT_KERNEL, | 296 | .rt6i_protocol = RTPROT_KERNEL, |
| @@ -1058,7 +1058,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori | |||
| 1058 | 1058 | ||
| 1059 | new->__use = 1; | 1059 | new->__use = 1; |
| 1060 | new->input = dst_discard; | 1060 | new->input = dst_discard; |
| 1061 | new->output = dst_discard; | 1061 | new->output = dst_discard_sk; |
| 1062 | 1062 | ||
| 1063 | if (dst_metrics_read_only(&ort->dst)) | 1063 | if (dst_metrics_read_only(&ort->dst)) |
| 1064 | new->_metrics = ort->dst._metrics; | 1064 | new->_metrics = ort->dst._metrics; |
| @@ -1338,7 +1338,7 @@ static unsigned int ip6_mtu(const struct dst_entry *dst) | |||
| 1338 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); | 1338 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
| 1339 | 1339 | ||
| 1340 | if (mtu) | 1340 | if (mtu) |
| 1341 | return mtu; | 1341 | goto out; |
| 1342 | 1342 | ||
| 1343 | mtu = IPV6_MIN_MTU; | 1343 | mtu = IPV6_MIN_MTU; |
| 1344 | 1344 | ||
| @@ -1348,7 +1348,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst) | |||
| 1348 | mtu = idev->cnf.mtu6; | 1348 | mtu = idev->cnf.mtu6; |
| 1349 | rcu_read_unlock(); | 1349 | rcu_read_unlock(); |
| 1350 | 1350 | ||
| 1351 | return mtu; | 1351 | out: |
| 1352 | return min_t(unsigned int, mtu, IP6_MAX_MTU); | ||
| 1352 | } | 1353 | } |
| 1353 | 1354 | ||
| 1354 | static struct dst_entry *icmp6_dst_gc_list; | 1355 | static struct dst_entry *icmp6_dst_gc_list; |
| @@ -1576,7 +1577,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
| 1576 | switch (cfg->fc_type) { | 1577 | switch (cfg->fc_type) { |
| 1577 | case RTN_BLACKHOLE: | 1578 | case RTN_BLACKHOLE: |
| 1578 | rt->dst.error = -EINVAL; | 1579 | rt->dst.error = -EINVAL; |
| 1579 | rt->dst.output = dst_discard; | 1580 | rt->dst.output = dst_discard_sk; |
| 1580 | rt->dst.input = dst_discard; | 1581 | rt->dst.input = dst_discard; |
| 1581 | break; | 1582 | break; |
| 1582 | case RTN_PROHIBIT: | 1583 | case RTN_PROHIBIT: |
| @@ -2128,7 +2129,7 @@ static int ip6_pkt_discard(struct sk_buff *skb) | |||
| 2128 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); | 2129 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); |
| 2129 | } | 2130 | } |
| 2130 | 2131 | ||
| 2131 | static int ip6_pkt_discard_out(struct sk_buff *skb) | 2132 | static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb) |
| 2132 | { | 2133 | { |
| 2133 | skb->dev = skb_dst(skb)->dev; | 2134 | skb->dev = skb_dst(skb)->dev; |
| 2134 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); | 2135 | return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); |
| @@ -2139,7 +2140,7 @@ static int ip6_pkt_prohibit(struct sk_buff *skb) | |||
| 2139 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); | 2140 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); |
| 2140 | } | 2141 | } |
| 2141 | 2142 | ||
| 2142 | static int ip6_pkt_prohibit_out(struct sk_buff *skb) | 2143 | static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb) |
| 2143 | { | 2144 | { |
| 2144 | skb->dev = skb_dst(skb)->dev; | 2145 | skb->dev = skb_dst(skb)->dev; |
| 2145 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); | 2146 | return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1693c8d885f0..8da8268d65f8 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
| @@ -974,8 +974,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
| 974 | goto out; | 974 | goto out; |
| 975 | } | 975 | } |
| 976 | 976 | ||
| 977 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, | 977 | err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, |
| 978 | ttl, df, !net_eq(tunnel->net, dev_net(dev))); | 978 | IPPROTO_IPV6, tos, ttl, df, |
| 979 | !net_eq(tunnel->net, dev_net(dev))); | ||
| 979 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); | 980 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); |
| 980 | return NETDEV_TX_OK; | 981 | return NETDEV_TX_OK; |
| 981 | 982 | ||
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 6cd625e37706..19ef329bdbf8 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
| @@ -163,7 +163,7 @@ static int __xfrm6_output(struct sk_buff *skb) | |||
| 163 | return x->outer_mode->afinfo->output_finish(skb); | 163 | return x->outer_mode->afinfo->output_finish(skb); |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | int xfrm6_output(struct sk_buff *skb) | 166 | int xfrm6_output(struct sock *sk, struct sk_buff *skb) |
| 167 | { | 167 | { |
| 168 | return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, | 168 | return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, |
| 169 | skb_dst(skb)->dev, __xfrm6_output); | 169 | skb_dst(skb)->dev, __xfrm6_output); |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 47f7a5490555..a4e37d7158dc 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -1131,10 +1131,10 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, | |||
| 1131 | skb->local_df = 1; | 1131 | skb->local_df = 1; |
| 1132 | #if IS_ENABLED(CONFIG_IPV6) | 1132 | #if IS_ENABLED(CONFIG_IPV6) |
| 1133 | if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) | 1133 | if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) |
| 1134 | error = inet6_csk_xmit(skb, NULL); | 1134 | error = inet6_csk_xmit(tunnel->sock, skb, NULL); |
| 1135 | else | 1135 | else |
| 1136 | #endif | 1136 | #endif |
| 1137 | error = ip_queue_xmit(skb, fl); | 1137 | error = ip_queue_xmit(tunnel->sock, skb, fl); |
| 1138 | 1138 | ||
| 1139 | /* Update stats */ | 1139 | /* Update stats */ |
| 1140 | if (error >= 0) { | 1140 | if (error >= 0) { |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 0b44d855269c..3397fe6897c0 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
| @@ -487,7 +487,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
| 487 | 487 | ||
| 488 | xmit: | 488 | xmit: |
| 489 | /* Queue the packet to IP for output */ | 489 | /* Queue the packet to IP for output */ |
| 490 | rc = ip_queue_xmit(skb, &inet->cork.fl); | 490 | rc = ip_queue_xmit(sk, skb, &inet->cork.fl); |
| 491 | rcu_read_unlock(); | 491 | rcu_read_unlock(); |
| 492 | 492 | ||
| 493 | error: | 493 | error: |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 6dba48efe01e..75421f2ba8be 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
| @@ -1795,6 +1795,7 @@ int nf_conntrack_init_net(struct net *net) | |||
| 1795 | int cpu; | 1795 | int cpu; |
| 1796 | 1796 | ||
| 1797 | atomic_set(&net->ct.count, 0); | 1797 | atomic_set(&net->ct.count, 0); |
| 1798 | seqcount_init(&net->ct.generation); | ||
| 1798 | 1799 | ||
| 1799 | net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); | 1800 | net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); |
| 1800 | if (!net->ct.pcpu_lists) | 1801 | if (!net->ct.pcpu_lists) |
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 7bd03decd36c..825c3e3f8305 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c | |||
| @@ -605,32 +605,14 @@ static struct nf_conntrack_helper pptp __read_mostly = { | |||
| 605 | .expect_policy = &pptp_exp_policy, | 605 | .expect_policy = &pptp_exp_policy, |
| 606 | }; | 606 | }; |
| 607 | 607 | ||
| 608 | static void nf_conntrack_pptp_net_exit(struct net *net) | ||
| 609 | { | ||
| 610 | nf_ct_gre_keymap_flush(net); | ||
| 611 | } | ||
| 612 | |||
| 613 | static struct pernet_operations nf_conntrack_pptp_net_ops = { | ||
| 614 | .exit = nf_conntrack_pptp_net_exit, | ||
| 615 | }; | ||
| 616 | |||
| 617 | static int __init nf_conntrack_pptp_init(void) | 608 | static int __init nf_conntrack_pptp_init(void) |
| 618 | { | 609 | { |
| 619 | int rv; | 610 | return nf_conntrack_helper_register(&pptp); |
| 620 | |||
| 621 | rv = nf_conntrack_helper_register(&pptp); | ||
| 622 | if (rv < 0) | ||
| 623 | return rv; | ||
| 624 | rv = register_pernet_subsys(&nf_conntrack_pptp_net_ops); | ||
| 625 | if (rv < 0) | ||
| 626 | nf_conntrack_helper_unregister(&pptp); | ||
| 627 | return rv; | ||
| 628 | } | 611 | } |
| 629 | 612 | ||
| 630 | static void __exit nf_conntrack_pptp_fini(void) | 613 | static void __exit nf_conntrack_pptp_fini(void) |
| 631 | { | 614 | { |
| 632 | nf_conntrack_helper_unregister(&pptp); | 615 | nf_conntrack_helper_unregister(&pptp); |
| 633 | unregister_pernet_subsys(&nf_conntrack_pptp_net_ops); | ||
| 634 | } | 616 | } |
| 635 | 617 | ||
| 636 | module_init(nf_conntrack_pptp_init); | 618 | module_init(nf_conntrack_pptp_init); |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 9d9c0dade602..d5665739e3b1 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
| @@ -66,7 +66,7 @@ static inline struct netns_proto_gre *gre_pernet(struct net *net) | |||
| 66 | return net_generic(net, proto_gre_net_id); | 66 | return net_generic(net, proto_gre_net_id); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | void nf_ct_gre_keymap_flush(struct net *net) | 69 | static void nf_ct_gre_keymap_flush(struct net *net) |
| 70 | { | 70 | { |
| 71 | struct netns_proto_gre *net_gre = gre_pernet(net); | 71 | struct netns_proto_gre *net_gre = gre_pernet(net); |
| 72 | struct nf_ct_gre_keymap *km, *tmp; | 72 | struct nf_ct_gre_keymap *km, *tmp; |
| @@ -78,7 +78,6 @@ void nf_ct_gre_keymap_flush(struct net *net) | |||
| 78 | } | 78 | } |
| 79 | write_unlock_bh(&net_gre->keymap_lock); | 79 | write_unlock_bh(&net_gre->keymap_lock); |
| 80 | } | 80 | } |
| 81 | EXPORT_SYMBOL(nf_ct_gre_keymap_flush); | ||
| 82 | 81 | ||
| 83 | static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km, | 82 | static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km, |
| 84 | const struct nf_conntrack_tuple *t) | 83 | const struct nf_conntrack_tuple *t) |
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 90998a6ff8b9..804105391b9a 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c | |||
| @@ -25,9 +25,8 @@ static void nft_cmp_fast_eval(const struct nft_expr *expr, | |||
| 25 | struct nft_data data[NFT_REG_MAX + 1]) | 25 | struct nft_data data[NFT_REG_MAX + 1]) |
| 26 | { | 26 | { |
| 27 | const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); | 27 | const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); |
| 28 | u32 mask; | 28 | u32 mask = nft_cmp_fast_mask(priv->len); |
| 29 | 29 | ||
| 30 | mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - priv->len); | ||
| 31 | if ((data[priv->sreg].data[0] & mask) == priv->data) | 30 | if ((data[priv->sreg].data[0] & mask) == priv->data) |
| 32 | return; | 31 | return; |
| 33 | data[NFT_REG_VERDICT].verdict = NFT_BREAK; | 32 | data[NFT_REG_VERDICT].verdict = NFT_BREAK; |
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index 954925db414d..e2b3f51c81f1 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c | |||
| @@ -128,7 +128,7 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx, | |||
| 128 | BUG_ON(err < 0); | 128 | BUG_ON(err < 0); |
| 129 | desc.len *= BITS_PER_BYTE; | 129 | desc.len *= BITS_PER_BYTE; |
| 130 | 130 | ||
| 131 | mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - desc.len); | 131 | mask = nft_cmp_fast_mask(desc.len); |
| 132 | priv->data = data.data[0] & mask; | 132 | priv->data = data.data[0] & mask; |
| 133 | priv->len = desc.len; | 133 | priv->len = desc.len; |
| 134 | return 0; | 134 | return 0; |
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index a3d6951602db..ebb6e2442554 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c | |||
| @@ -174,7 +174,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) | |||
| 174 | 174 | ||
| 175 | skb->local_df = 1; | 175 | skb->local_df = 1; |
| 176 | 176 | ||
| 177 | return iptunnel_xmit(rt, skb, fl.saddr, | 177 | return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, |
| 178 | OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, | 178 | OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, |
| 179 | OVS_CB(skb)->tun_key->ipv4_tos, | 179 | OVS_CB(skb)->tun_key->ipv4_tos, |
| 180 | OVS_CB(skb)->tun_key->ipv4_ttl, df, false); | 180 | OVS_CB(skb)->tun_key->ipv4_ttl, df, false); |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 4f6d6f9d1274..39579c3e0d14 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -1395,35 +1395,44 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc) | |||
| 1395 | return false; | 1395 | return false; |
| 1396 | } | 1396 | } |
| 1397 | 1397 | ||
| 1398 | /* Update asoc's rwnd for the approximated state in the buffer, | 1398 | /* Increase asoc's rwnd by len and send any window update SACK if needed. */ |
| 1399 | * and check whether SACK needs to be sent. | 1399 | void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) |
| 1400 | */ | ||
| 1401 | void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) | ||
| 1402 | { | 1400 | { |
| 1403 | int rx_count; | ||
| 1404 | struct sctp_chunk *sack; | 1401 | struct sctp_chunk *sack; |
| 1405 | struct timer_list *timer; | 1402 | struct timer_list *timer; |
| 1406 | 1403 | ||
| 1407 | if (asoc->ep->rcvbuf_policy) | 1404 | if (asoc->rwnd_over) { |
| 1408 | rx_count = atomic_read(&asoc->rmem_alloc); | 1405 | if (asoc->rwnd_over >= len) { |
| 1409 | else | 1406 | asoc->rwnd_over -= len; |
| 1410 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | 1407 | } else { |
| 1408 | asoc->rwnd += (len - asoc->rwnd_over); | ||
| 1409 | asoc->rwnd_over = 0; | ||
| 1410 | } | ||
| 1411 | } else { | ||
| 1412 | asoc->rwnd += len; | ||
| 1413 | } | ||
| 1411 | 1414 | ||
| 1412 | if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) | 1415 | /* If we had window pressure, start recovering it |
| 1413 | asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; | 1416 | * once our rwnd had reached the accumulated pressure |
| 1414 | else | 1417 | * threshold. The idea is to recover slowly, but up |
| 1415 | asoc->rwnd = 0; | 1418 | * to the initial advertised window. |
| 1419 | */ | ||
| 1420 | if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { | ||
| 1421 | int change = min(asoc->pathmtu, asoc->rwnd_press); | ||
| 1422 | asoc->rwnd += change; | ||
| 1423 | asoc->rwnd_press -= change; | ||
| 1424 | } | ||
| 1416 | 1425 | ||
| 1417 | pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", | 1426 | pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", |
| 1418 | __func__, asoc, asoc->rwnd, rx_count, | 1427 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, |
| 1419 | asoc->base.sk->sk_rcvbuf); | 1428 | asoc->a_rwnd); |
| 1420 | 1429 | ||
| 1421 | /* Send a window update SACK if the rwnd has increased by at least the | 1430 | /* Send a window update SACK if the rwnd has increased by at least the |
| 1422 | * minimum of the association's PMTU and half of the receive buffer. | 1431 | * minimum of the association's PMTU and half of the receive buffer. |
| 1423 | * The algorithm used is similar to the one described in | 1432 | * The algorithm used is similar to the one described in |
| 1424 | * Section 4.2.3.3 of RFC 1122. | 1433 | * Section 4.2.3.3 of RFC 1122. |
| 1425 | */ | 1434 | */ |
| 1426 | if (update_peer && sctp_peer_needs_update(asoc)) { | 1435 | if (sctp_peer_needs_update(asoc)) { |
| 1427 | asoc->a_rwnd = asoc->rwnd; | 1436 | asoc->a_rwnd = asoc->rwnd; |
| 1428 | 1437 | ||
| 1429 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " | 1438 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " |
| @@ -1445,6 +1454,45 @@ void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) | |||
| 1445 | } | 1454 | } |
| 1446 | } | 1455 | } |
| 1447 | 1456 | ||
| 1457 | /* Decrease asoc's rwnd by len. */ | ||
| 1458 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) | ||
| 1459 | { | ||
| 1460 | int rx_count; | ||
| 1461 | int over = 0; | ||
| 1462 | |||
| 1463 | if (unlikely(!asoc->rwnd || asoc->rwnd_over)) | ||
| 1464 | pr_debug("%s: association:%p has asoc->rwnd:%u, " | ||
| 1465 | "asoc->rwnd_over:%u!\n", __func__, asoc, | ||
| 1466 | asoc->rwnd, asoc->rwnd_over); | ||
| 1467 | |||
| 1468 | if (asoc->ep->rcvbuf_policy) | ||
| 1469 | rx_count = atomic_read(&asoc->rmem_alloc); | ||
| 1470 | else | ||
| 1471 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | ||
| 1472 | |||
| 1473 | /* If we've reached or overflowed our receive buffer, announce | ||
| 1474 | * a 0 rwnd if rwnd would still be positive. Store the | ||
| 1475 | * the potential pressure overflow so that the window can be restored | ||
| 1476 | * back to original value. | ||
| 1477 | */ | ||
| 1478 | if (rx_count >= asoc->base.sk->sk_rcvbuf) | ||
| 1479 | over = 1; | ||
| 1480 | |||
| 1481 | if (asoc->rwnd >= len) { | ||
| 1482 | asoc->rwnd -= len; | ||
| 1483 | if (over) { | ||
| 1484 | asoc->rwnd_press += asoc->rwnd; | ||
| 1485 | asoc->rwnd = 0; | ||
| 1486 | } | ||
| 1487 | } else { | ||
| 1488 | asoc->rwnd_over = len - asoc->rwnd; | ||
| 1489 | asoc->rwnd = 0; | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", | ||
| 1493 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, | ||
| 1494 | asoc->rwnd_press); | ||
| 1495 | } | ||
| 1448 | 1496 | ||
| 1449 | /* Build the bind address list for the association based on info from the | 1497 | /* Build the bind address list for the association based on info from the |
| 1450 | * local endpoint and the remote peer. | 1498 | * local endpoint and the remote peer. |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 4e1d0fcb028e..c09757fbf803 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
| @@ -957,7 +957,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, | |||
| 957 | 957 | ||
| 958 | SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS); | 958 | SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS); |
| 959 | 959 | ||
| 960 | return ip_queue_xmit(skb, &transport->fl); | 960 | return ip_queue_xmit(&inet->sk, skb, &transport->fl); |
| 961 | } | 961 | } |
| 962 | 962 | ||
| 963 | static struct sctp_af sctp_af_inet; | 963 | static struct sctp_af sctp_af_inet; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 01e002430c85..ae9fbeba40b0 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -6178,7 +6178,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
| 6178 | * PMTU. In cases, such as loopback, this might be a rather | 6178 | * PMTU. In cases, such as loopback, this might be a rather |
| 6179 | * large spill over. | 6179 | * large spill over. |
| 6180 | */ | 6180 | */ |
| 6181 | if ((!chunk->data_accepted) && (!asoc->rwnd || | 6181 | if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || |
| 6182 | (datalen > asoc->rwnd + asoc->frag_point))) { | 6182 | (datalen > asoc->rwnd + asoc->frag_point))) { |
| 6183 | 6183 | ||
| 6184 | /* If this is the next TSN, consider reneging to make | 6184 | /* If this is the next TSN, consider reneging to make |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e13519e9df80..ff20e2dbbbc7 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -2115,6 +2115,12 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
| 2115 | sctp_skb_pull(skb, copied); | 2115 | sctp_skb_pull(skb, copied); |
| 2116 | skb_queue_head(&sk->sk_receive_queue, skb); | 2116 | skb_queue_head(&sk->sk_receive_queue, skb); |
| 2117 | 2117 | ||
| 2118 | /* When only partial message is copied to the user, increase | ||
| 2119 | * rwnd by that amount. If all the data in the skb is read, | ||
| 2120 | * rwnd is updated when the event is freed. | ||
| 2121 | */ | ||
| 2122 | if (!sctp_ulpevent_is_notification(event)) | ||
| 2123 | sctp_assoc_rwnd_increase(event->asoc, copied); | ||
| 2118 | goto out; | 2124 | goto out; |
| 2119 | } else if ((event->msg_flags & MSG_NOTIFICATION) || | 2125 | } else if ((event->msg_flags & MSG_NOTIFICATION) || |
| 2120 | (event->msg_flags & MSG_EOR)) | 2126 | (event->msg_flags & MSG_EOR)) |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 8d198ae03606..85c64658bd0b 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
| @@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, | |||
| 989 | skb = sctp_event2skb(event); | 989 | skb = sctp_event2skb(event); |
| 990 | /* Set the owner and charge rwnd for bytes received. */ | 990 | /* Set the owner and charge rwnd for bytes received. */ |
| 991 | sctp_ulpevent_set_owner(event, asoc); | 991 | sctp_ulpevent_set_owner(event, asoc); |
| 992 | sctp_assoc_rwnd_update(asoc, false); | 992 | sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); |
| 993 | 993 | ||
| 994 | if (!skb->data_len) | 994 | if (!skb->data_len) |
| 995 | return; | 995 | return; |
| @@ -1011,7 +1011,6 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
| 1011 | { | 1011 | { |
| 1012 | struct sk_buff *skb, *frag; | 1012 | struct sk_buff *skb, *frag; |
| 1013 | unsigned int len; | 1013 | unsigned int len; |
| 1014 | struct sctp_association *asoc; | ||
| 1015 | 1014 | ||
| 1016 | /* Current stack structures assume that the rcv buffer is | 1015 | /* Current stack structures assume that the rcv buffer is |
| 1017 | * per socket. For UDP style sockets this is not true as | 1016 | * per socket. For UDP style sockets this is not true as |
| @@ -1036,11 +1035,8 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
| 1036 | } | 1035 | } |
| 1037 | 1036 | ||
| 1038 | done: | 1037 | done: |
| 1039 | asoc = event->asoc; | 1038 | sctp_assoc_rwnd_increase(event->asoc, len); |
| 1040 | sctp_association_hold(asoc); | ||
| 1041 | sctp_ulpevent_release_owner(event); | 1039 | sctp_ulpevent_release_owner(event); |
| 1042 | sctp_assoc_rwnd_update(asoc, true); | ||
| 1043 | sctp_association_put(asoc); | ||
| 1044 | } | 1040 | } |
| 1045 | 1041 | ||
| 1046 | static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) | 1042 | static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f02f511b7107..c08fbd11ceff 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
| @@ -1842,7 +1842,7 @@ purge_queue: | |||
| 1842 | xfrm_pol_put(pol); | 1842 | xfrm_pol_put(pol); |
| 1843 | } | 1843 | } |
| 1844 | 1844 | ||
| 1845 | static int xdst_queue_output(struct sk_buff *skb) | 1845 | static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) |
| 1846 | { | 1846 | { |
| 1847 | unsigned long sched_next; | 1847 | unsigned long sched_next; |
| 1848 | struct dst_entry *dst = skb_dst(skb); | 1848 | struct dst_entry *dst = skb_dst(skb); |
