diff options
Diffstat (limited to 'drivers/net')
28 files changed, 427 insertions, 161 deletions
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index fdc9ec09e453..da1a2500c91c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev) | |||
2140 | goto err_irq0; | 2140 | goto err_irq0; |
2141 | } | 2141 | } |
2142 | 2142 | ||
2143 | /* Re-configure the port multiplexer towards the PHY device */ | ||
2144 | bcmgenet_mii_config(priv->dev, false); | ||
2145 | |||
2146 | phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, | ||
2147 | priv->phy_interface); | ||
2148 | |||
2143 | bcmgenet_netif_start(dev); | 2149 | bcmgenet_netif_start(dev); |
2144 | 2150 | ||
2145 | return 0; | 2151 | return 0; |
@@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev) | |||
2184 | 2190 | ||
2185 | bcmgenet_netif_stop(dev); | 2191 | bcmgenet_netif_stop(dev); |
2186 | 2192 | ||
2193 | /* Really kill the PHY state machine and disconnect from it */ | ||
2194 | phy_disconnect(priv->phydev); | ||
2195 | |||
2187 | /* Disable MAC receive */ | 2196 | /* Disable MAC receive */ |
2188 | umac_enable_set(priv, CMD_RX_EN, false); | 2197 | umac_enable_set(priv, CMD_RX_EN, false); |
2189 | 2198 | ||
@@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d) | |||
2685 | 2694 | ||
2686 | phy_init_hw(priv->phydev); | 2695 | phy_init_hw(priv->phydev); |
2687 | /* Speed settings must be restored */ | 2696 | /* Speed settings must be restored */ |
2688 | bcmgenet_mii_config(priv->dev); | 2697 | bcmgenet_mii_config(priv->dev, false); |
2689 | 2698 | ||
2690 | /* disable ethernet MAC while updating its registers */ | 2699 | /* disable ethernet MAC while updating its registers */ |
2691 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); | 2700 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index dbf524ea3b19..31b2da5f9b82 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
@@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); | |||
617 | 617 | ||
618 | /* MDIO routines */ | 618 | /* MDIO routines */ |
619 | int bcmgenet_mii_init(struct net_device *dev); | 619 | int bcmgenet_mii_init(struct net_device *dev); |
620 | int bcmgenet_mii_config(struct net_device *dev); | 620 | int bcmgenet_mii_config(struct net_device *dev, bool init); |
621 | void bcmgenet_mii_exit(struct net_device *dev); | 621 | void bcmgenet_mii_exit(struct net_device *dev); |
622 | void bcmgenet_mii_reset(struct net_device *dev); | 622 | void bcmgenet_mii_reset(struct net_device *dev); |
623 | void bcmgenet_mii_setup(struct net_device *dev); | ||
623 | 624 | ||
624 | /* Wake-on-LAN routines */ | 625 | /* Wake-on-LAN routines */ |
625 | void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); | 626 | void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 9ff799a9f801..933cd7e7cd33 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
@@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, | |||
77 | /* setup netdev link state when PHY link status change and | 77 | /* setup netdev link state when PHY link status change and |
78 | * update UMAC and RGMII block when link up | 78 | * update UMAC and RGMII block when link up |
79 | */ | 79 | */ |
80 | static void bcmgenet_mii_setup(struct net_device *dev) | 80 | void bcmgenet_mii_setup(struct net_device *dev) |
81 | { | 81 | { |
82 | struct bcmgenet_priv *priv = netdev_priv(dev); | 82 | struct bcmgenet_priv *priv = netdev_priv(dev); |
83 | struct phy_device *phydev = priv->phydev; | 83 | struct phy_device *phydev = priv->phydev; |
@@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) | |||
211 | bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); | 211 | bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); |
212 | } | 212 | } |
213 | 213 | ||
214 | int bcmgenet_mii_config(struct net_device *dev) | 214 | int bcmgenet_mii_config(struct net_device *dev, bool init) |
215 | { | 215 | { |
216 | struct bcmgenet_priv *priv = netdev_priv(dev); | 216 | struct bcmgenet_priv *priv = netdev_priv(dev); |
217 | struct phy_device *phydev = priv->phydev; | 217 | struct phy_device *phydev = priv->phydev; |
@@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev) | |||
298 | return -EINVAL; | 298 | return -EINVAL; |
299 | } | 299 | } |
300 | 300 | ||
301 | dev_info(kdev, "configuring instance for %s\n", phy_name); | 301 | if (init) |
302 | dev_info(kdev, "configuring instance for %s\n", phy_name); | ||
302 | 303 | ||
303 | return 0; | 304 | return 0; |
304 | } | 305 | } |
@@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) | |||
350 | * PHY speed which is needed for bcmgenet_mii_config() to configure | 351 | * PHY speed which is needed for bcmgenet_mii_config() to configure |
351 | * things appropriately. | 352 | * things appropriately. |
352 | */ | 353 | */ |
353 | ret = bcmgenet_mii_config(dev); | 354 | ret = bcmgenet_mii_config(dev, true); |
354 | if (ret) { | 355 | if (ret) { |
355 | phy_disconnect(priv->phydev); | 356 | phy_disconnect(priv->phydev); |
356 | return ret; | 357 | return ret; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 6fe300e316c3..cca604994003 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | |||
@@ -79,8 +79,9 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev) | |||
79 | app.protocol = dcb->app_priority[i].protocolid; | 79 | app.protocol = dcb->app_priority[i].protocolid; |
80 | 80 | ||
81 | if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { | 81 | if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { |
82 | app.priority = dcb->app_priority[i].user_prio_map; | ||
82 | app.selector = dcb->app_priority[i].sel_field + 1; | 83 | app.selector = dcb->app_priority[i].sel_field + 1; |
83 | err = dcb_ieee_setapp(dev, &app); | 84 | err = dcb_ieee_delapp(dev, &app); |
84 | } else { | 85 | } else { |
85 | app.selector = !!(dcb->app_priority[i].sel_field); | 86 | app.selector = !!(dcb->app_priority[i].sel_field); |
86 | err = dcb_setapp(dev, &app); | 87 | err = dcb_setapp(dev, &app); |
@@ -122,7 +123,11 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, | |||
122 | case CXGB4_DCB_INPUT_FW_ENABLED: { | 123 | case CXGB4_DCB_INPUT_FW_ENABLED: { |
123 | /* we're going to use Firmware DCB */ | 124 | /* we're going to use Firmware DCB */ |
124 | dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; | 125 | dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; |
125 | dcb->supported = CXGB4_DCBX_FW_SUPPORT; | 126 | dcb->supported = DCB_CAP_DCBX_LLD_MANAGED; |
127 | if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) | ||
128 | dcb->supported |= DCB_CAP_DCBX_VER_IEEE; | ||
129 | else | ||
130 | dcb->supported |= DCB_CAP_DCBX_VER_CEE; | ||
126 | break; | 131 | break; |
127 | } | 132 | } |
128 | 133 | ||
@@ -436,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc, | |||
436 | *up_tc_map = (1 << tc); | 441 | *up_tc_map = (1 << tc); |
437 | 442 | ||
438 | /* prio_type is link strict */ | 443 | /* prio_type is link strict */ |
439 | *prio_type = 0x2; | 444 | if (*pgid != 0xF) |
445 | *prio_type = 0x2; | ||
440 | } | 446 | } |
441 | 447 | ||
442 | static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, | 448 | static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, |
443 | u8 *prio_type, u8 *pgid, u8 *bw_per, | 449 | u8 *prio_type, u8 *pgid, u8 *bw_per, |
444 | u8 *up_tc_map) | 450 | u8 *up_tc_map) |
445 | { | 451 | { |
446 | return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1); | 452 | /* tc 0 is written at MSB position */ |
453 | return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, | ||
454 | up_tc_map, 1); | ||
447 | } | 455 | } |
448 | 456 | ||
449 | 457 | ||
@@ -451,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc, | |||
451 | u8 *prio_type, u8 *pgid, u8 *bw_per, | 459 | u8 *prio_type, u8 *pgid, u8 *bw_per, |
452 | u8 *up_tc_map) | 460 | u8 *up_tc_map) |
453 | { | 461 | { |
454 | return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0); | 462 | /* tc 0 is written at MSB position */ |
463 | return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, | ||
464 | up_tc_map, 0); | ||
455 | } | 465 | } |
456 | 466 | ||
457 | static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, | 467 | static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, |
@@ -461,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, | |||
461 | struct fw_port_cmd pcmd; | 471 | struct fw_port_cmd pcmd; |
462 | struct port_info *pi = netdev2pinfo(dev); | 472 | struct port_info *pi = netdev2pinfo(dev); |
463 | struct adapter *adap = pi->adapter; | 473 | struct adapter *adap = pi->adapter; |
474 | int fw_tc = 7 - tc; | ||
464 | u32 _pgid; | 475 | u32 _pgid; |
465 | int err; | 476 | int err; |
466 | 477 | ||
@@ -479,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, | |||
479 | } | 490 | } |
480 | 491 | ||
481 | _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); | 492 | _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); |
482 | _pgid &= ~(0xF << (tc * 4)); | 493 | _pgid &= ~(0xF << (fw_tc * 4)); |
483 | _pgid |= pgid << (tc * 4); | 494 | _pgid |= pgid << (fw_tc * 4); |
484 | pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); | 495 | pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); |
485 | 496 | ||
486 | INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); | 497 | INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); |
@@ -593,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg) | |||
593 | priority >= CXGB4_MAX_PRIORITY) | 604 | priority >= CXGB4_MAX_PRIORITY) |
594 | *pfccfg = 0; | 605 | *pfccfg = 0; |
595 | else | 606 | else |
596 | *pfccfg = (pi->dcb.pfcen >> priority) & 1; | 607 | *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1; |
597 | } | 608 | } |
598 | 609 | ||
599 | /* Enable/disable Priority Pause Frames for the specified Traffic Class | 610 | /* Enable/disable Priority Pause Frames for the specified Traffic Class |
@@ -618,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) | |||
618 | pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; | 629 | pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; |
619 | 630 | ||
620 | if (pfccfg) | 631 | if (pfccfg) |
621 | pcmd.u.dcb.pfc.pfcen |= (1 << priority); | 632 | pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority)); |
622 | else | 633 | else |
623 | pcmd.u.dcb.pfc.pfcen &= (~(1 << priority)); | 634 | pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority))); |
624 | 635 | ||
625 | err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); | 636 | err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); |
626 | if (err != FW_PORT_DCB_CFG_SUCCESS) { | 637 | if (err != FW_PORT_DCB_CFG_SUCCESS) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 91dbf98036cc..5cc5e19286a1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap) | |||
2914 | int t4_sge_init(struct adapter *adap) | 2914 | int t4_sge_init(struct adapter *adap) |
2915 | { | 2915 | { |
2916 | struct sge *s = &adap->sge; | 2916 | struct sge *s = &adap->sge; |
2917 | u32 sge_control, sge_conm_ctrl; | 2917 | u32 sge_control, sge_control2, sge_conm_ctrl; |
2918 | unsigned int ingpadboundary, ingpackboundary; | ||
2918 | int ret, egress_threshold; | 2919 | int ret, egress_threshold; |
2919 | 2920 | ||
2920 | /* | 2921 | /* |
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap) | |||
2924 | sge_control = t4_read_reg(adap, SGE_CONTROL); | 2925 | sge_control = t4_read_reg(adap, SGE_CONTROL); |
2925 | s->pktshift = PKTSHIFT_GET(sge_control); | 2926 | s->pktshift = PKTSHIFT_GET(sge_control); |
2926 | s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; | 2927 | s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; |
2927 | s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + | 2928 | |
2928 | X_INGPADBOUNDARY_SHIFT); | 2929 | /* T4 uses a single control field to specify both the PCIe Padding and |
2930 | * Packing Boundary. T5 introduced the ability to specify these | ||
2931 | * separately. The actual Ingress Packet Data alignment boundary | ||
2932 | * within Packed Buffer Mode is the maximum of these two | ||
2933 | * specifications. | ||
2934 | */ | ||
2935 | ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) + | ||
2936 | X_INGPADBOUNDARY_SHIFT); | ||
2937 | if (is_t4(adap->params.chip)) { | ||
2938 | s->fl_align = ingpadboundary; | ||
2939 | } else { | ||
2940 | /* T5 has a different interpretation of one of the PCIe Packing | ||
2941 | * Boundary values. | ||
2942 | */ | ||
2943 | sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); | ||
2944 | ingpackboundary = INGPACKBOUNDARY_G(sge_control2); | ||
2945 | if (ingpackboundary == INGPACKBOUNDARY_16B_X) | ||
2946 | ingpackboundary = 16; | ||
2947 | else | ||
2948 | ingpackboundary = 1 << (ingpackboundary + | ||
2949 | INGPACKBOUNDARY_SHIFT_X); | ||
2950 | |||
2951 | s->fl_align = max(ingpadboundary, ingpackboundary); | ||
2952 | } | ||
2929 | 2953 | ||
2930 | if (adap->flags & USING_SOFT_PARAMS) | 2954 | if (adap->flags & USING_SOFT_PARAMS) |
2931 | ret = t4_sge_init_soft(adap); | 2955 | ret = t4_sge_init_soft(adap); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 2bb4efa7db98..4d32df5041f6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -3130,12 +3130,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, | |||
3130 | HOSTPAGESIZEPF6(sge_hps) | | 3130 | HOSTPAGESIZEPF6(sge_hps) | |
3131 | HOSTPAGESIZEPF7(sge_hps)); | 3131 | HOSTPAGESIZEPF7(sge_hps)); |
3132 | 3132 | ||
3133 | t4_set_reg_field(adap, SGE_CONTROL, | 3133 | if (is_t4(adap->params.chip)) { |
3134 | INGPADBOUNDARY_MASK | | 3134 | t4_set_reg_field(adap, SGE_CONTROL, |
3135 | EGRSTATUSPAGESIZE_MASK, | 3135 | INGPADBOUNDARY_MASK | |
3136 | INGPADBOUNDARY(fl_align_log - 5) | | 3136 | EGRSTATUSPAGESIZE_MASK, |
3137 | EGRSTATUSPAGESIZE(stat_len != 64)); | 3137 | INGPADBOUNDARY(fl_align_log - 5) | |
3138 | 3138 | EGRSTATUSPAGESIZE(stat_len != 64)); | |
3139 | } else { | ||
3140 | /* T5 introduced the separation of the Free List Padding and | ||
3141 | * Packing Boundaries. Thus, we can select a smaller Padding | ||
3142 | * Boundary to avoid uselessly chewing up PCIe Link and Memory | ||
3143 | * Bandwidth, and use a Packing Boundary which is large enough | ||
3144 | * to avoid false sharing between CPUs, etc. | ||
3145 | * | ||
3146 | * For the PCI Link, the smaller the Padding Boundary the | ||
3147 | * better. For the Memory Controller, a smaller Padding | ||
3148 | * Boundary is better until we cross under the Memory Line | ||
3149 | * Size (the minimum unit of transfer to/from Memory). If we | ||
3150 | * have a Padding Boundary which is smaller than the Memory | ||
3151 | * Line Size, that'll involve a Read-Modify-Write cycle on the | ||
3152 | * Memory Controller which is never good. For T5 the smallest | ||
3153 | * Padding Boundary which we can select is 32 bytes which is | ||
3154 | * larger than any known Memory Controller Line Size so we'll | ||
3155 | * use that. | ||
3156 | * | ||
3157 | * T5 has a different interpretation of the "0" value for the | ||
3158 | * Packing Boundary. This corresponds to 16 bytes instead of | ||
3159 | * the expected 32 bytes. We never have a Packing Boundary | ||
3160 | * less than 32 bytes so we can't use that special value but | ||
3161 | * on the other hand, if we wanted 32 bytes, the best we can | ||
3162 | * really do is 64 bytes. | ||
3163 | */ | ||
3164 | if (fl_align <= 32) { | ||
3165 | fl_align = 64; | ||
3166 | fl_align_log = 6; | ||
3167 | } | ||
3168 | t4_set_reg_field(adap, SGE_CONTROL, | ||
3169 | INGPADBOUNDARY_MASK | | ||
3170 | EGRSTATUSPAGESIZE_MASK, | ||
3171 | INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) | | ||
3172 | EGRSTATUSPAGESIZE(stat_len != 64)); | ||
3173 | t4_set_reg_field(adap, SGE_CONTROL2_A, | ||
3174 | INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), | ||
3175 | INGPACKBOUNDARY_V(fl_align_log - | ||
3176 | INGPACKBOUNDARY_SHIFT_X)); | ||
3177 | } | ||
3139 | /* | 3178 | /* |
3140 | * Adjust various SGE Free List Host Buffer Sizes. | 3179 | * Adjust various SGE Free List Host Buffer Sizes. |
3141 | * | 3180 | * |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index c8eb7ba225e1..ccdf8a7f4916 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
@@ -95,6 +95,7 @@ | |||
95 | #define X_INGPADBOUNDARY_SHIFT 5 | 95 | #define X_INGPADBOUNDARY_SHIFT 5 |
96 | 96 | ||
97 | #define SGE_CONTROL 0x1008 | 97 | #define SGE_CONTROL 0x1008 |
98 | #define SGE_CONTROL2_A 0x1124 | ||
98 | #define DCASYSTYPE 0x00080000U | 99 | #define DCASYSTYPE 0x00080000U |
99 | #define RXPKTCPLMODE_MASK 0x00040000U | 100 | #define RXPKTCPLMODE_MASK 0x00040000U |
100 | #define RXPKTCPLMODE_SHIFT 18 | 101 | #define RXPKTCPLMODE_SHIFT 18 |
@@ -106,6 +107,7 @@ | |||
106 | #define PKTSHIFT_SHIFT 10 | 107 | #define PKTSHIFT_SHIFT 10 |
107 | #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) | 108 | #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) |
108 | #define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) | 109 | #define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) |
110 | #define INGPCIEBOUNDARY_32B_X 0 | ||
109 | #define INGPCIEBOUNDARY_MASK 0x00000380U | 111 | #define INGPCIEBOUNDARY_MASK 0x00000380U |
110 | #define INGPCIEBOUNDARY_SHIFT 7 | 112 | #define INGPCIEBOUNDARY_SHIFT 7 |
111 | #define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) | 113 | #define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) |
@@ -114,6 +116,14 @@ | |||
114 | #define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) | 116 | #define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) |
115 | #define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ | 117 | #define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ |
116 | >> INGPADBOUNDARY_SHIFT) | 118 | >> INGPADBOUNDARY_SHIFT) |
119 | #define INGPACKBOUNDARY_16B_X 0 | ||
120 | #define INGPACKBOUNDARY_SHIFT_X 5 | ||
121 | |||
122 | #define INGPACKBOUNDARY_S 16 | ||
123 | #define INGPACKBOUNDARY_M 0x7U | ||
124 | #define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S) | ||
125 | #define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \ | ||
126 | & INGPACKBOUNDARY_M) | ||
117 | #define EGRPCIEBOUNDARY_MASK 0x0000000eU | 127 | #define EGRPCIEBOUNDARY_MASK 0x0000000eU |
118 | #define EGRPCIEBOUNDARY_SHIFT 1 | 128 | #define EGRPCIEBOUNDARY_SHIFT 1 |
119 | #define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) | 129 | #define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 68eaa9c88c7d..3d06e77d7121 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | |||
@@ -299,6 +299,14 @@ struct sge { | |||
299 | u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ | 299 | u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ |
300 | u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ | 300 | u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ |
301 | 301 | ||
302 | /* Decoded Adapter Parameters. | ||
303 | */ | ||
304 | u32 fl_pg_order; /* large page allocation size */ | ||
305 | u32 stat_len; /* length of status page at ring end */ | ||
306 | u32 pktshift; /* padding between CPL & packet data */ | ||
307 | u32 fl_align; /* response queue message alignment */ | ||
308 | u32 fl_starve_thres; /* Free List starvation threshold */ | ||
309 | |||
302 | /* | 310 | /* |
303 | * Reverse maps from Absolute Queue IDs to associated queue pointers. | 311 | * Reverse maps from Absolute Queue IDs to associated queue pointers. |
304 | * The absolute Queue IDs are in a compact range which start at a | 312 | * The absolute Queue IDs are in a compact range which start at a |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index aff6d37f2676..50b1b34bde6d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -51,14 +51,6 @@ | |||
51 | #include "../cxgb4/t4_msg.h" | 51 | #include "../cxgb4/t4_msg.h" |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Decoded Adapter Parameters. | ||
55 | */ | ||
56 | static u32 FL_PG_ORDER; /* large page allocation size */ | ||
57 | static u32 STAT_LEN; /* length of status page at ring end */ | ||
58 | static u32 PKTSHIFT; /* padding between CPL and packet data */ | ||
59 | static u32 FL_ALIGN; /* response queue message alignment */ | ||
60 | |||
61 | /* | ||
62 | * Constants ... | 54 | * Constants ... |
63 | */ | 55 | */ |
64 | enum { | 56 | enum { |
@@ -102,12 +94,6 @@ enum { | |||
102 | MAX_TIMER_TX_RECLAIM = 100, | 94 | MAX_TIMER_TX_RECLAIM = 100, |
103 | 95 | ||
104 | /* | 96 | /* |
105 | * An FL with <= FL_STARVE_THRES buffers is starving and a periodic | ||
106 | * timer will attempt to refill it. | ||
107 | */ | ||
108 | FL_STARVE_THRES = 4, | ||
109 | |||
110 | /* | ||
111 | * Suspend an Ethernet TX queue with fewer available descriptors than | 97 | * Suspend an Ethernet TX queue with fewer available descriptors than |
112 | * this. We always want to have room for a maximum sized packet: | 98 | * this. We always want to have room for a maximum sized packet: |
113 | * inline immediate data + MAX_SKB_FRAGS. This is the same as | 99 | * inline immediate data + MAX_SKB_FRAGS. This is the same as |
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl) | |||
264 | 250 | ||
265 | /** | 251 | /** |
266 | * fl_starving - return whether a Free List is starving. | 252 | * fl_starving - return whether a Free List is starving. |
253 | * @adapter: pointer to the adapter | ||
267 | * @fl: the Free List | 254 | * @fl: the Free List |
268 | * | 255 | * |
269 | * Tests specified Free List to see whether the number of buffers | 256 | * Tests specified Free List to see whether the number of buffers |
270 | * available to the hardware has falled below our "starvation" | 257 | * available to the hardware has falled below our "starvation" |
271 | * threshold. | 258 | * threshold. |
272 | */ | 259 | */ |
273 | static inline bool fl_starving(const struct sge_fl *fl) | 260 | static inline bool fl_starving(const struct adapter *adapter, |
261 | const struct sge_fl *fl) | ||
274 | { | 262 | { |
275 | return fl->avail - fl->pend_cred <= FL_STARVE_THRES; | 263 | const struct sge *s = &adapter->sge; |
264 | |||
265 | return fl->avail - fl->pend_cred <= s->fl_starve_thres; | ||
276 | } | 266 | } |
277 | 267 | ||
278 | /** | 268 | /** |
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter, | |||
457 | 447 | ||
458 | /** | 448 | /** |
459 | * get_buf_size - return the size of an RX Free List buffer. | 449 | * get_buf_size - return the size of an RX Free List buffer. |
450 | * @adapter: pointer to the associated adapter | ||
460 | * @sdesc: pointer to the software buffer descriptor | 451 | * @sdesc: pointer to the software buffer descriptor |
461 | */ | 452 | */ |
462 | static inline int get_buf_size(const struct rx_sw_desc *sdesc) | 453 | static inline int get_buf_size(const struct adapter *adapter, |
454 | const struct rx_sw_desc *sdesc) | ||
463 | { | 455 | { |
464 | return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) | 456 | const struct sge *s = &adapter->sge; |
465 | ? (PAGE_SIZE << FL_PG_ORDER) | 457 | |
466 | : PAGE_SIZE; | 458 | return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF) |
459 | ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE); | ||
467 | } | 460 | } |
468 | 461 | ||
469 | /** | 462 | /** |
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) | |||
483 | 476 | ||
484 | if (is_buf_mapped(sdesc)) | 477 | if (is_buf_mapped(sdesc)) |
485 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), | 478 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), |
486 | get_buf_size(sdesc), PCI_DMA_FROMDEVICE); | 479 | get_buf_size(adapter, sdesc), |
480 | PCI_DMA_FROMDEVICE); | ||
487 | put_page(sdesc->page); | 481 | put_page(sdesc->page); |
488 | sdesc->page = NULL; | 482 | sdesc->page = NULL; |
489 | if (++fl->cidx == fl->size) | 483 | if (++fl->cidx == fl->size) |
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) | |||
511 | 505 | ||
512 | if (is_buf_mapped(sdesc)) | 506 | if (is_buf_mapped(sdesc)) |
513 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), | 507 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), |
514 | get_buf_size(sdesc), PCI_DMA_FROMDEVICE); | 508 | get_buf_size(adapter, sdesc), |
509 | PCI_DMA_FROMDEVICE); | ||
515 | sdesc->page = NULL; | 510 | sdesc->page = NULL; |
516 | if (++fl->cidx == fl->size) | 511 | if (++fl->cidx == fl->size) |
517 | fl->cidx = 0; | 512 | fl->cidx = 0; |
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz) | |||
589 | static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, | 584 | static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, |
590 | int n, gfp_t gfp) | 585 | int n, gfp_t gfp) |
591 | { | 586 | { |
587 | struct sge *s = &adapter->sge; | ||
592 | struct page *page; | 588 | struct page *page; |
593 | dma_addr_t dma_addr; | 589 | dma_addr_t dma_addr; |
594 | unsigned int cred = fl->avail; | 590 | unsigned int cred = fl->avail; |
@@ -610,11 +606,11 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, | |||
610 | * If we don't support large pages, drop directly into the small page | 606 | * If we don't support large pages, drop directly into the small page |
611 | * allocation code. | 607 | * allocation code. |
612 | */ | 608 | */ |
613 | if (FL_PG_ORDER == 0) | 609 | if (s->fl_pg_order == 0) |
614 | goto alloc_small_pages; | 610 | goto alloc_small_pages; |
615 | 611 | ||
616 | while (n) { | 612 | while (n) { |
617 | page = __dev_alloc_pages(gfp, FL_PG_ORDER); | 613 | page = __dev_alloc_pages(gfp, s->fl_pg_order); |
618 | if (unlikely(!page)) { | 614 | if (unlikely(!page)) { |
619 | /* | 615 | /* |
620 | * We've failed inour attempt to allocate a "large | 616 | * We've failed inour attempt to allocate a "large |
@@ -624,10 +620,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, | |||
624 | fl->large_alloc_failed++; | 620 | fl->large_alloc_failed++; |
625 | break; | 621 | break; |
626 | } | 622 | } |
627 | poison_buf(page, PAGE_SIZE << FL_PG_ORDER); | 623 | poison_buf(page, PAGE_SIZE << s->fl_pg_order); |
628 | 624 | ||
629 | dma_addr = dma_map_page(adapter->pdev_dev, page, 0, | 625 | dma_addr = dma_map_page(adapter->pdev_dev, page, 0, |
630 | PAGE_SIZE << FL_PG_ORDER, | 626 | PAGE_SIZE << s->fl_pg_order, |
631 | PCI_DMA_FROMDEVICE); | 627 | PCI_DMA_FROMDEVICE); |
632 | if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { | 628 | if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { |
633 | /* | 629 | /* |
@@ -638,7 +634,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, | |||
638 | * because DMA mapping resources are typically | 634 | * because DMA mapping resources are typically |
639 | * critical resources once they become scarse. | 635 | * critical resources once they become scarse. |
640 | */ | 636 | */ |
641 | __free_pages(page, FL_PG_ORDER); | 637 | __free_pages(page, s->fl_pg_order); |
642 | goto out; | 638 | goto out; |
643 | } | 639 | } |
644 | dma_addr |= RX_LARGE_BUF; | 640 | dma_addr |= RX_LARGE_BUF; |
@@ -694,7 +690,7 @@ out: | |||
694 | fl->pend_cred += cred; | 690 | fl->pend_cred += cred; |
695 | ring_fl_db(adapter, fl); | 691 | ring_fl_db(adapter, fl); |
696 | 692 | ||
697 | if (unlikely(fl_starving(fl))) { | 693 | if (unlikely(fl_starving(adapter, fl))) { |
698 | smp_wmb(); | 694 | smp_wmb(); |
699 | set_bit(fl->cntxt_id, adapter->sge.starving_fl); | 695 | set_bit(fl->cntxt_id, adapter->sge.starving_fl); |
700 | } | 696 | } |
@@ -1469,6 +1465,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl) | |||
1469 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, | 1465 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, |
1470 | const struct cpl_rx_pkt *pkt) | 1466 | const struct cpl_rx_pkt *pkt) |
1471 | { | 1467 | { |
1468 | struct adapter *adapter = rxq->rspq.adapter; | ||
1469 | struct sge *s = &adapter->sge; | ||
1472 | int ret; | 1470 | int ret; |
1473 | struct sk_buff *skb; | 1471 | struct sk_buff *skb; |
1474 | 1472 | ||
@@ -1479,8 +1477,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, | |||
1479 | return; | 1477 | return; |
1480 | } | 1478 | } |
1481 | 1479 | ||
1482 | copy_frags(skb, gl, PKTSHIFT); | 1480 | copy_frags(skb, gl, s->pktshift); |
1483 | skb->len = gl->tot_len - PKTSHIFT; | 1481 | skb->len = gl->tot_len - s->pktshift; |
1484 | skb->data_len = skb->len; | 1482 | skb->data_len = skb->len; |
1485 | skb->truesize += skb->data_len; | 1483 | skb->truesize += skb->data_len; |
1486 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1484 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -1517,6 +1515,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1517 | bool csum_ok = pkt->csum_calc && !pkt->err_vec && | 1515 | bool csum_ok = pkt->csum_calc && !pkt->err_vec && |
1518 | (rspq->netdev->features & NETIF_F_RXCSUM); | 1516 | (rspq->netdev->features & NETIF_F_RXCSUM); |
1519 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); | 1517 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); |
1518 | struct adapter *adapter = rspq->adapter; | ||
1519 | struct sge *s = &adapter->sge; | ||
1520 | 1520 | ||
1521 | /* | 1521 | /* |
1522 | * If this is a good TCP packet and we have Generic Receive Offload | 1522 | * If this is a good TCP packet and we have Generic Receive Offload |
@@ -1538,7 +1538,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1538 | rxq->stats.rx_drops++; | 1538 | rxq->stats.rx_drops++; |
1539 | return 0; | 1539 | return 0; |
1540 | } | 1540 | } |
1541 | __skb_pull(skb, PKTSHIFT); | 1541 | __skb_pull(skb, s->pktshift); |
1542 | skb->protocol = eth_type_trans(skb, rspq->netdev); | 1542 | skb->protocol = eth_type_trans(skb, rspq->netdev); |
1543 | skb_record_rx_queue(skb, rspq->idx); | 1543 | skb_record_rx_queue(skb, rspq->idx); |
1544 | rxq->stats.pkts++; | 1544 | rxq->stats.pkts++; |
@@ -1649,6 +1649,8 @@ static inline void rspq_next(struct sge_rspq *rspq) | |||
1649 | static int process_responses(struct sge_rspq *rspq, int budget) | 1649 | static int process_responses(struct sge_rspq *rspq, int budget) |
1650 | { | 1650 | { |
1651 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); | 1651 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); |
1652 | struct adapter *adapter = rspq->adapter; | ||
1653 | struct sge *s = &adapter->sge; | ||
1652 | int budget_left = budget; | 1654 | int budget_left = budget; |
1653 | 1655 | ||
1654 | while (likely(budget_left)) { | 1656 | while (likely(budget_left)) { |
@@ -1698,7 +1700,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) | |||
1698 | BUG_ON(frag >= MAX_SKB_FRAGS); | 1700 | BUG_ON(frag >= MAX_SKB_FRAGS); |
1699 | BUG_ON(rxq->fl.avail == 0); | 1701 | BUG_ON(rxq->fl.avail == 0); |
1700 | sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; | 1702 | sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; |
1701 | bufsz = get_buf_size(sdesc); | 1703 | bufsz = get_buf_size(adapter, sdesc); |
1702 | fp->page = sdesc->page; | 1704 | fp->page = sdesc->page; |
1703 | fp->offset = rspq->offset; | 1705 | fp->offset = rspq->offset; |
1704 | fp->size = min(bufsz, len); | 1706 | fp->size = min(bufsz, len); |
@@ -1727,7 +1729,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) | |||
1727 | */ | 1729 | */ |
1728 | ret = rspq->handler(rspq, rspq->cur_desc, &gl); | 1730 | ret = rspq->handler(rspq, rspq->cur_desc, &gl); |
1729 | if (likely(ret == 0)) | 1731 | if (likely(ret == 0)) |
1730 | rspq->offset += ALIGN(fp->size, FL_ALIGN); | 1732 | rspq->offset += ALIGN(fp->size, s->fl_align); |
1731 | else | 1733 | else |
1732 | restore_rx_bufs(&gl, &rxq->fl, frag); | 1734 | restore_rx_bufs(&gl, &rxq->fl, frag); |
1733 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { | 1735 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { |
@@ -1964,7 +1966,7 @@ static void sge_rx_timer_cb(unsigned long data) | |||
1964 | * schedule napi but the FL is no longer starving. | 1966 | * schedule napi but the FL is no longer starving. |
1965 | * No biggie. | 1967 | * No biggie. |
1966 | */ | 1968 | */ |
1967 | if (fl_starving(fl)) { | 1969 | if (fl_starving(adapter, fl)) { |
1968 | struct sge_eth_rxq *rxq; | 1970 | struct sge_eth_rxq *rxq; |
1969 | 1971 | ||
1970 | rxq = container_of(fl, struct sge_eth_rxq, fl); | 1972 | rxq = container_of(fl, struct sge_eth_rxq, fl); |
@@ -2048,6 +2050,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
2048 | int intr_dest, | 2050 | int intr_dest, |
2049 | struct sge_fl *fl, rspq_handler_t hnd) | 2051 | struct sge_fl *fl, rspq_handler_t hnd) |
2050 | { | 2052 | { |
2053 | struct sge *s = &adapter->sge; | ||
2051 | struct port_info *pi = netdev_priv(dev); | 2054 | struct port_info *pi = netdev_priv(dev); |
2052 | struct fw_iq_cmd cmd, rpl; | 2055 | struct fw_iq_cmd cmd, rpl; |
2053 | int ret, iqandst, flsz = 0; | 2056 | int ret, iqandst, flsz = 0; |
@@ -2118,7 +2121,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
2118 | fl->size = roundup(fl->size, FL_PER_EQ_UNIT); | 2121 | fl->size = roundup(fl->size, FL_PER_EQ_UNIT); |
2119 | fl->desc = alloc_ring(adapter->pdev_dev, fl->size, | 2122 | fl->desc = alloc_ring(adapter->pdev_dev, fl->size, |
2120 | sizeof(__be64), sizeof(struct rx_sw_desc), | 2123 | sizeof(__be64), sizeof(struct rx_sw_desc), |
2121 | &fl->addr, &fl->sdesc, STAT_LEN); | 2124 | &fl->addr, &fl->sdesc, s->stat_len); |
2122 | if (!fl->desc) { | 2125 | if (!fl->desc) { |
2123 | ret = -ENOMEM; | 2126 | ret = -ENOMEM; |
2124 | goto err; | 2127 | goto err; |
@@ -2130,7 +2133,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
2130 | * free list ring) in Egress Queue Units. | 2133 | * free list ring) in Egress Queue Units. |
2131 | */ | 2134 | */ |
2132 | flsz = (fl->size / FL_PER_EQ_UNIT + | 2135 | flsz = (fl->size / FL_PER_EQ_UNIT + |
2133 | STAT_LEN / EQ_UNIT); | 2136 | s->stat_len / EQ_UNIT); |
2134 | 2137 | ||
2135 | /* | 2138 | /* |
2136 | * Fill in all the relevant firmware Ingress Queue Command | 2139 | * Fill in all the relevant firmware Ingress Queue Command |
@@ -2218,6 +2221,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
2218 | struct net_device *dev, struct netdev_queue *devq, | 2221 | struct net_device *dev, struct netdev_queue *devq, |
2219 | unsigned int iqid) | 2222 | unsigned int iqid) |
2220 | { | 2223 | { |
2224 | struct sge *s = &adapter->sge; | ||
2221 | int ret, nentries; | 2225 | int ret, nentries; |
2222 | struct fw_eq_eth_cmd cmd, rpl; | 2226 | struct fw_eq_eth_cmd cmd, rpl; |
2223 | struct port_info *pi = netdev_priv(dev); | 2227 | struct port_info *pi = netdev_priv(dev); |
@@ -2226,7 +2230,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
2226 | * Calculate the size of the hardware TX Queue (including the Status | 2230 | * Calculate the size of the hardware TX Queue (including the Status |
2227 | * Page on the end of the TX Queue) in units of TX Descriptors. | 2231 | * Page on the end of the TX Queue) in units of TX Descriptors. |
2228 | */ | 2232 | */ |
2229 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); | 2233 | nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); |
2230 | 2234 | ||
2231 | /* | 2235 | /* |
2232 | * Allocate the hardware ring for the TX ring (with space for its | 2236 | * Allocate the hardware ring for the TX ring (with space for its |
@@ -2235,7 +2239,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
2235 | txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, | 2239 | txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, |
2236 | sizeof(struct tx_desc), | 2240 | sizeof(struct tx_desc), |
2237 | sizeof(struct tx_sw_desc), | 2241 | sizeof(struct tx_sw_desc), |
2238 | &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); | 2242 | &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); |
2239 | if (!txq->q.desc) | 2243 | if (!txq->q.desc) |
2240 | return -ENOMEM; | 2244 | return -ENOMEM; |
2241 | 2245 | ||
@@ -2308,8 +2312,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
2308 | */ | 2312 | */ |
2309 | static void free_txq(struct adapter *adapter, struct sge_txq *tq) | 2313 | static void free_txq(struct adapter *adapter, struct sge_txq *tq) |
2310 | { | 2314 | { |
2315 | struct sge *s = &adapter->sge; | ||
2316 | |||
2311 | dma_free_coherent(adapter->pdev_dev, | 2317 | dma_free_coherent(adapter->pdev_dev, |
2312 | tq->size * sizeof(*tq->desc) + STAT_LEN, | 2318 | tq->size * sizeof(*tq->desc) + s->stat_len, |
2313 | tq->desc, tq->phys_addr); | 2319 | tq->desc, tq->phys_addr); |
2314 | tq->cntxt_id = 0; | 2320 | tq->cntxt_id = 0; |
2315 | tq->sdesc = NULL; | 2321 | tq->sdesc = NULL; |
@@ -2323,6 +2329,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq) | |||
2323 | static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, | 2329 | static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, |
2324 | struct sge_fl *fl) | 2330 | struct sge_fl *fl) |
2325 | { | 2331 | { |
2332 | struct sge *s = &adapter->sge; | ||
2326 | unsigned int flid = fl ? fl->cntxt_id : 0xffff; | 2333 | unsigned int flid = fl ? fl->cntxt_id : 0xffff; |
2327 | 2334 | ||
2328 | t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, | 2335 | t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, |
@@ -2338,7 +2345,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, | |||
2338 | if (fl) { | 2345 | if (fl) { |
2339 | free_rx_bufs(adapter, fl, fl->avail); | 2346 | free_rx_bufs(adapter, fl, fl->avail); |
2340 | dma_free_coherent(adapter->pdev_dev, | 2347 | dma_free_coherent(adapter->pdev_dev, |
2341 | fl->size * sizeof(*fl->desc) + STAT_LEN, | 2348 | fl->size * sizeof(*fl->desc) + s->stat_len, |
2342 | fl->desc, fl->addr); | 2349 | fl->desc, fl->addr); |
2343 | kfree(fl->sdesc); | 2350 | kfree(fl->sdesc); |
2344 | fl->sdesc = NULL; | 2351 | fl->sdesc = NULL; |
@@ -2424,6 +2431,7 @@ int t4vf_sge_init(struct adapter *adapter) | |||
2424 | u32 fl0 = sge_params->sge_fl_buffer_size[0]; | 2431 | u32 fl0 = sge_params->sge_fl_buffer_size[0]; |
2425 | u32 fl1 = sge_params->sge_fl_buffer_size[1]; | 2432 | u32 fl1 = sge_params->sge_fl_buffer_size[1]; |
2426 | struct sge *s = &adapter->sge; | 2433 | struct sge *s = &adapter->sge; |
2434 | unsigned int ingpadboundary, ingpackboundary; | ||
2427 | 2435 | ||
2428 | /* | 2436 | /* |
2429 | * Start by vetting the basic SGE parameters which have been set up by | 2437 | * Start by vetting the basic SGE parameters which have been set up by |
@@ -2444,12 +2452,48 @@ int t4vf_sge_init(struct adapter *adapter) | |||
2444 | * Now translate the adapter parameters into our internal forms. | 2452 | * Now translate the adapter parameters into our internal forms. |
2445 | */ | 2453 | */ |
2446 | if (fl1) | 2454 | if (fl1) |
2447 | FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; | 2455 | s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; |
2448 | STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) | 2456 | s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) |
2449 | ? 128 : 64); | 2457 | ? 128 : 64); |
2450 | PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); | 2458 | s->pktshift = PKTSHIFT_GET(sge_params->sge_control); |
2451 | FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + | 2459 | |
2452 | SGE_INGPADBOUNDARY_SHIFT); | 2460 | /* T4 uses a single control field to specify both the PCIe Padding and |
2461 | * Packing Boundary. T5 introduced the ability to specify these | ||
2462 | * separately. The actual Ingress Packet Data alignment boundary | ||
2463 | * within Packed Buffer Mode is the maximum of these two | ||
2464 | * specifications. (Note that it makes no real practical sense to | ||
2465 | * have the Pading Boudary be larger than the Packing Boundary but you | ||
2466 | * could set the chip up that way and, in fact, legacy T4 code would | ||
2467 | * end doing this because it would initialize the Padding Boundary and | ||
2468 | * leave the Packing Boundary initialized to 0 (16 bytes).) | ||
2469 | */ | ||
2470 | ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + | ||
2471 | X_INGPADBOUNDARY_SHIFT); | ||
2472 | if (is_t4(adapter->params.chip)) { | ||
2473 | s->fl_align = ingpadboundary; | ||
2474 | } else { | ||
2475 | /* T5 has a different interpretation of one of the PCIe Packing | ||
2476 | * Boundary values. | ||
2477 | */ | ||
2478 | ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2); | ||
2479 | if (ingpackboundary == INGPACKBOUNDARY_16B_X) | ||
2480 | ingpackboundary = 16; | ||
2481 | else | ||
2482 | ingpackboundary = 1 << (ingpackboundary + | ||
2483 | INGPACKBOUNDARY_SHIFT_X); | ||
2484 | |||
2485 | s->fl_align = max(ingpadboundary, ingpackboundary); | ||
2486 | } | ||
2487 | |||
2488 | /* A FL with <= fl_starve_thres buffers is starving and a periodic | ||
2489 | * timer will attempt to refill it. This needs to be larger than the | ||
2490 | * SGE's Egress Congestion Threshold. If it isn't, then we can get | ||
2491 | * stuck waiting for new packets while the SGE is waiting for us to | ||
2492 | * give it more Free List entries. (Note that the SGE's Egress | ||
2493 | * Congestion Threshold is in units of 2 Free List pointers.) | ||
2494 | */ | ||
2495 | s->fl_starve_thres | ||
2496 | = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1; | ||
2453 | 2497 | ||
2454 | /* | 2498 | /* |
2455 | * Set up tasklet timers. | 2499 | * Set up tasklet timers. |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 2cfa4396b003..a608c6657d63 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | |||
@@ -134,11 +134,13 @@ struct dev_params { | |||
134 | */ | 134 | */ |
135 | struct sge_params { | 135 | struct sge_params { |
136 | u32 sge_control; /* padding, boundaries, lengths, etc. */ | 136 | u32 sge_control; /* padding, boundaries, lengths, etc. */ |
137 | u32 sge_control2; /* T5: more of the same */ | ||
137 | u32 sge_host_page_size; /* RDMA page sizes */ | 138 | u32 sge_host_page_size; /* RDMA page sizes */ |
138 | u32 sge_queues_per_page; /* RDMA queues/page */ | 139 | u32 sge_queues_per_page; /* RDMA queues/page */ |
139 | u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ | 140 | u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ |
140 | u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ | 141 | u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ |
141 | u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ | 142 | u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ |
143 | u32 sge_congestion_control; /* congestion thresholds, etc. */ | ||
142 | u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ | 144 | u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ |
143 | u32 sge_timer_value_2_and_3; | 145 | u32 sge_timer_value_2_and_3; |
144 | u32 sge_timer_value_4_and_5; | 146 | u32 sge_timer_value_4_and_5; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 570b895ae06f..fae0c95e1a6b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter) | |||
468 | sge_params->sge_timer_value_2_and_3 = vals[5]; | 468 | sge_params->sge_timer_value_2_and_3 = vals[5]; |
469 | sge_params->sge_timer_value_4_and_5 = vals[6]; | 469 | sge_params->sge_timer_value_4_and_5 = vals[6]; |
470 | 470 | ||
471 | /* T4 uses a single control field to specify both the PCIe Padding and | ||
472 | * Packing Boundary. T5 introduced the ability to specify these | ||
473 | * separately with the Padding Boundary in SGE_CONTROL and and Packing | ||
474 | * Boundary in SGE_CONTROL2. So for T5 and later we need to grab | ||
475 | * SGE_CONTROL in order to determine how ingress packet data will be | ||
476 | * laid out in Packed Buffer Mode. Unfortunately, older versions of | ||
477 | * the firmware won't let us retrieve SGE_CONTROL2 so if we get a | ||
478 | * failure grabbing it we throw an error since we can't figure out the | ||
479 | * right value. | ||
480 | */ | ||
481 | if (!is_t4(adapter->params.chip)) { | ||
482 | params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | | ||
483 | FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A)); | ||
484 | v = t4vf_query_params(adapter, 1, params, vals); | ||
485 | if (v != FW_SUCCESS) { | ||
486 | dev_err(adapter->pdev_dev, | ||
487 | "Unable to get SGE Control2; " | ||
488 | "probably old firmware.\n"); | ||
489 | return v; | ||
490 | } | ||
491 | sge_params->sge_control2 = vals[0]; | ||
492 | } | ||
493 | |||
471 | params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | | 494 | params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | |
472 | FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); | 495 | FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); |
473 | v = t4vf_query_params(adapter, 1, params, vals); | 496 | params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | |
497 | FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL)); | ||
498 | v = t4vf_query_params(adapter, 2, params, vals); | ||
474 | if (v) | 499 | if (v) |
475 | return v; | 500 | return v; |
476 | sge_params->sge_ingress_rx_threshold = vals[0]; | 501 | sge_params->sge_ingress_rx_threshold = vals[0]; |
502 | sge_params->sge_congestion_control = vals[1]; | ||
477 | 503 | ||
478 | return 0; | 504 | return 0; |
479 | } | 505 | } |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c27128de8dde..3dca494797bd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len) | |||
298 | return bufaddr; | 298 | return bufaddr; |
299 | } | 299 | } |
300 | 300 | ||
301 | static void swap_buffer2(void *dst_buf, void *src_buf, int len) | ||
302 | { | ||
303 | int i; | ||
304 | unsigned int *src = src_buf; | ||
305 | unsigned int *dst = dst_buf; | ||
306 | |||
307 | for (i = 0; i < len; i += 4, src++, dst++) | ||
308 | *dst = swab32p(src); | ||
309 | } | ||
310 | |||
301 | static void fec_dump(struct net_device *ndev) | 311 | static void fec_dump(struct net_device *ndev) |
302 | { | 312 | { |
303 | struct fec_enet_private *fep = netdev_priv(ndev); | 313 | struct fec_enet_private *fep = netdev_priv(ndev); |
@@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff | |||
1307 | } | 1317 | } |
1308 | 1318 | ||
1309 | static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, | 1319 | static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, |
1310 | struct bufdesc *bdp, u32 length) | 1320 | struct bufdesc *bdp, u32 length, bool swap) |
1311 | { | 1321 | { |
1312 | struct fec_enet_private *fep = netdev_priv(ndev); | 1322 | struct fec_enet_private *fep = netdev_priv(ndev); |
1313 | struct sk_buff *new_skb; | 1323 | struct sk_buff *new_skb; |
@@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, | |||
1322 | dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, | 1332 | dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, |
1323 | FEC_ENET_RX_FRSIZE - fep->rx_align, | 1333 | FEC_ENET_RX_FRSIZE - fep->rx_align, |
1324 | DMA_FROM_DEVICE); | 1334 | DMA_FROM_DEVICE); |
1325 | memcpy(new_skb->data, (*skb)->data, length); | 1335 | if (!swap) |
1336 | memcpy(new_skb->data, (*skb)->data, length); | ||
1337 | else | ||
1338 | swap_buffer2(new_skb->data, (*skb)->data, length); | ||
1326 | *skb = new_skb; | 1339 | *skb = new_skb; |
1327 | 1340 | ||
1328 | return true; | 1341 | return true; |
@@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) | |||
1352 | u16 vlan_tag; | 1365 | u16 vlan_tag; |
1353 | int index = 0; | 1366 | int index = 0; |
1354 | bool is_copybreak; | 1367 | bool is_copybreak; |
1368 | bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME; | ||
1355 | 1369 | ||
1356 | #ifdef CONFIG_M532x | 1370 | #ifdef CONFIG_M532x |
1357 | flush_cache_all(); | 1371 | flush_cache_all(); |
@@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) | |||
1415 | * include that when passing upstream as it messes up | 1429 | * include that when passing upstream as it messes up |
1416 | * bridging applications. | 1430 | * bridging applications. |
1417 | */ | 1431 | */ |
1418 | is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4); | 1432 | is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, |
1433 | need_swap); | ||
1419 | if (!is_copybreak) { | 1434 | if (!is_copybreak) { |
1420 | skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); | 1435 | skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); |
1421 | if (unlikely(!skb_new)) { | 1436 | if (unlikely(!skb_new)) { |
@@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) | |||
1430 | prefetch(skb->data - NET_IP_ALIGN); | 1445 | prefetch(skb->data - NET_IP_ALIGN); |
1431 | skb_put(skb, pkt_len - 4); | 1446 | skb_put(skb, pkt_len - 4); |
1432 | data = skb->data; | 1447 | data = skb->data; |
1433 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | 1448 | if (!is_copybreak && need_swap) |
1434 | swap_buffer(data, pkt_len); | 1449 | swap_buffer(data, pkt_len); |
1435 | 1450 | ||
1436 | /* Extract the enhanced buffer descriptor */ | 1451 | /* Extract the enhanced buffer descriptor */ |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index dc97c03134ec..acafe391f0a3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | |||
@@ -706,7 +706,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) | |||
706 | 706 | ||
707 | hw->phy.ops.write_reg(hw, MDIO_CTRL1, | 707 | hw->phy.ops.write_reg(hw, MDIO_CTRL1, |
708 | MDIO_MMD_AN, autoneg_reg); | 708 | MDIO_MMD_AN, autoneg_reg); |
709 | |||
710 | return 0; | 709 | return 0; |
711 | } | 710 | } |
712 | 711 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d1eb25dbff56..f3df9b350d87 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -2291,8 +2291,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work) | |||
2291 | ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, | 2291 | ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, |
2292 | VXLAN_STEER_BY_OUTER_MAC, 1); | 2292 | VXLAN_STEER_BY_OUTER_MAC, 1); |
2293 | out: | 2293 | out: |
2294 | if (ret) | 2294 | if (ret) { |
2295 | en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); | 2295 | en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); |
2296 | return; | ||
2297 | } | ||
2298 | |||
2299 | /* set offloads */ | ||
2300 | priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | | ||
2301 | NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; | ||
2302 | priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; | ||
2303 | priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; | ||
2296 | } | 2304 | } |
2297 | 2305 | ||
2298 | static void mlx4_en_del_vxlan_offloads(struct work_struct *work) | 2306 | static void mlx4_en_del_vxlan_offloads(struct work_struct *work) |
@@ -2300,6 +2308,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) | |||
2300 | int ret; | 2308 | int ret; |
2301 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | 2309 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, |
2302 | vxlan_del_task); | 2310 | vxlan_del_task); |
2311 | /* unset offloads */ | ||
2312 | priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | | ||
2313 | NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); | ||
2314 | priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; | ||
2315 | priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; | ||
2303 | 2316 | ||
2304 | ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, | 2317 | ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, |
2305 | VXLAN_STEER_BY_OUTER_MAC, 0); | 2318 | VXLAN_STEER_BY_OUTER_MAC, 0); |
@@ -2583,13 +2596,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2583 | if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) | 2596 | if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) |
2584 | dev->priv_flags |= IFF_UNICAST_FLT; | 2597 | dev->priv_flags |= IFF_UNICAST_FLT; |
2585 | 2598 | ||
2586 | if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | ||
2587 | dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | | ||
2588 | NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; | ||
2589 | dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; | ||
2590 | dev->features |= NETIF_F_GSO_UDP_TUNNEL; | ||
2591 | } | ||
2592 | |||
2593 | mdev->pndev[port] = dev; | 2599 | mdev->pndev[port] = dev; |
2594 | 2600 | ||
2595 | netif_carrier_off(dev); | 2601 | netif_carrier_off(dev); |
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig index f3a47147937d..9a49f42ac2ba 100644 --- a/drivers/net/ethernet/qualcomm/Kconfig +++ b/drivers/net/ethernet/qualcomm/Kconfig | |||
@@ -5,7 +5,6 @@ | |||
5 | config NET_VENDOR_QUALCOMM | 5 | config NET_VENDOR_QUALCOMM |
6 | bool "Qualcomm devices" | 6 | bool "Qualcomm devices" |
7 | default y | 7 | default y |
8 | depends on SPI_MASTER && OF_GPIO | ||
9 | ---help--- | 8 | ---help--- |
10 | If you have a network (Ethernet) card belonging to this class, say Y | 9 | If you have a network (Ethernet) card belonging to this class, say Y |
11 | and read the Ethernet-HOWTO, available from | 10 | and read the Ethernet-HOWTO, available from |
@@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM | |||
20 | 19 | ||
21 | config QCA7000 | 20 | config QCA7000 |
22 | tristate "Qualcomm Atheros QCA7000 support" | 21 | tristate "Qualcomm Atheros QCA7000 support" |
23 | depends on SPI_MASTER && OF_GPIO | 22 | depends on SPI_MASTER && OF |
24 | ---help--- | 23 | ---help--- |
25 | This SPI protocol driver supports the Qualcomm Atheros QCA7000. | 24 | This SPI protocol driver supports the Qualcomm Atheros QCA7000. |
26 | 25 | ||
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index affb29da353e..77ed74561e5f 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c | |||
@@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) | |||
1342 | spin_unlock(&pdata->mac_lock); | 1342 | spin_unlock(&pdata->mac_lock); |
1343 | } | 1343 | } |
1344 | 1344 | ||
1345 | static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) | ||
1346 | { | ||
1347 | int rc = 0; | ||
1348 | |||
1349 | if (!pdata->phy_dev) | ||
1350 | return rc; | ||
1351 | |||
1352 | /* If the internal PHY is in General Power-Down mode, all, except the | ||
1353 | * management interface, is powered-down and stays in that condition as | ||
1354 | * long as Phy register bit 0.11 is HIGH. | ||
1355 | * | ||
1356 | * In that case, clear the bit 0.11, so the PHY powers up and we can | ||
1357 | * access to the phy registers. | ||
1358 | */ | ||
1359 | rc = phy_read(pdata->phy_dev, MII_BMCR); | ||
1360 | if (rc < 0) { | ||
1361 | SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); | ||
1362 | return rc; | ||
1363 | } | ||
1364 | |||
1365 | /* If the PHY general power-down bit is not set is not necessary to | ||
1366 | * disable the general power down-mode. | ||
1367 | */ | ||
1368 | if (rc & BMCR_PDOWN) { | ||
1369 | rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); | ||
1370 | if (rc < 0) { | ||
1371 | SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); | ||
1372 | return rc; | ||
1373 | } | ||
1374 | |||
1375 | usleep_range(1000, 1500); | ||
1376 | } | ||
1377 | |||
1378 | return 0; | ||
1379 | } | ||
1380 | |||
1345 | static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) | 1381 | static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) |
1346 | { | 1382 | { |
1347 | int rc = 0; | 1383 | int rc = 0; |
@@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) | |||
1356 | return rc; | 1392 | return rc; |
1357 | } | 1393 | } |
1358 | 1394 | ||
1359 | /* | 1395 | /* Only disable if energy detect mode is already enabled */ |
1360 | * If energy is detected the PHY is already awake so is not necessary | 1396 | if (rc & MII_LAN83C185_EDPWRDOWN) { |
1361 | * to disable the energy detect power-down mode. | ||
1362 | */ | ||
1363 | if ((rc & MII_LAN83C185_EDPWRDOWN) && | ||
1364 | !(rc & MII_LAN83C185_ENERGYON)) { | ||
1365 | /* Disable energy detect mode for this SMSC Transceivers */ | 1397 | /* Disable energy detect mode for this SMSC Transceivers */ |
1366 | rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, | 1398 | rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, |
1367 | rc & (~MII_LAN83C185_EDPWRDOWN)); | 1399 | rc & (~MII_LAN83C185_EDPWRDOWN)); |
@@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) | |||
1370 | SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); | 1402 | SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); |
1371 | return rc; | 1403 | return rc; |
1372 | } | 1404 | } |
1373 | 1405 | /* Allow PHY to wakeup */ | |
1374 | mdelay(1); | 1406 | mdelay(2); |
1375 | } | 1407 | } |
1376 | 1408 | ||
1377 | return 0; | 1409 | return 0; |
@@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) | |||
1393 | 1425 | ||
1394 | /* Only enable if energy detect mode is already disabled */ | 1426 | /* Only enable if energy detect mode is already disabled */ |
1395 | if (!(rc & MII_LAN83C185_EDPWRDOWN)) { | 1427 | if (!(rc & MII_LAN83C185_EDPWRDOWN)) { |
1396 | mdelay(100); | ||
1397 | /* Enable energy detect mode for this SMSC Transceivers */ | 1428 | /* Enable energy detect mode for this SMSC Transceivers */ |
1398 | rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, | 1429 | rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, |
1399 | rc | MII_LAN83C185_EDPWRDOWN); | 1430 | rc | MII_LAN83C185_EDPWRDOWN); |
@@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) | |||
1402 | SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); | 1433 | SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); |
1403 | return rc; | 1434 | return rc; |
1404 | } | 1435 | } |
1405 | |||
1406 | mdelay(1); | ||
1407 | } | 1436 | } |
1408 | return 0; | 1437 | return 0; |
1409 | } | 1438 | } |
@@ -1415,6 +1444,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata) | |||
1415 | int ret; | 1444 | int ret; |
1416 | 1445 | ||
1417 | /* | 1446 | /* |
1447 | * Make sure to power-up the PHY chip before doing a reset, otherwise | ||
1448 | * the reset fails. | ||
1449 | */ | ||
1450 | ret = smsc911x_phy_general_power_up(pdata); | ||
1451 | if (ret) { | ||
1452 | SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip"); | ||
1453 | return ret; | ||
1454 | } | ||
1455 | |||
1456 | /* | ||
1418 | * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that | 1457 | * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that |
1419 | * are initialized in a Energy Detect Power-Down mode that prevents | 1458 | * are initialized in a Energy Detect Power-Down mode that prevents |
1420 | * the MAC chip to be software reseted. So we have to wakeup the PHY | 1459 | * the MAC chip to be software reseted. So we have to wakeup the PHY |
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index ab92f67da035..4a4388b813ac 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c | |||
@@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, | |||
264 | 264 | ||
265 | switch (ptp_class & PTP_CLASS_PMASK) { | 265 | switch (ptp_class & PTP_CLASS_PMASK) { |
266 | case PTP_CLASS_IPV4: | 266 | case PTP_CLASS_IPV4: |
267 | offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; | 267 | offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; |
268 | break; | 268 | break; |
269 | case PTP_CLASS_IPV6: | 269 | case PTP_CLASS_IPV6: |
270 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; | 270 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 2954052706e8..e22e602beef3 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) | |||
791 | 791 | ||
792 | switch (type & PTP_CLASS_PMASK) { | 792 | switch (type & PTP_CLASS_PMASK) { |
793 | case PTP_CLASS_IPV4: | 793 | case PTP_CLASS_IPV4: |
794 | offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; | 794 | offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; |
795 | break; | 795 | break; |
796 | case PTP_CLASS_IPV6: | 796 | case PTP_CLASS_IPV6: |
797 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; | 797 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; |
@@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type) | |||
934 | 934 | ||
935 | switch (type & PTP_CLASS_PMASK) { | 935 | switch (type & PTP_CLASS_PMASK) { |
936 | case PTP_CLASS_IPV4: | 936 | case PTP_CLASS_IPV4: |
937 | offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; | 937 | offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; |
938 | break; | 938 | break; |
939 | case PTP_CLASS_IPV6: | 939 | case PTP_CLASS_IPV6: |
940 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; | 940 | offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1dfffdc9dfc3..767cd110f496 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) | |||
352 | { | 352 | { |
353 | struct mii_ioctl_data *mii_data = if_mii(ifr); | 353 | struct mii_ioctl_data *mii_data = if_mii(ifr); |
354 | u16 val = mii_data->val_in; | 354 | u16 val = mii_data->val_in; |
355 | bool change_autoneg = false; | ||
355 | 356 | ||
356 | switch (cmd) { | 357 | switch (cmd) { |
357 | case SIOCGMIIPHY: | 358 | case SIOCGMIIPHY: |
@@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) | |||
367 | if (mii_data->phy_id == phydev->addr) { | 368 | if (mii_data->phy_id == phydev->addr) { |
368 | switch (mii_data->reg_num) { | 369 | switch (mii_data->reg_num) { |
369 | case MII_BMCR: | 370 | case MII_BMCR: |
370 | if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) | 371 | if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) { |
372 | if (phydev->autoneg == AUTONEG_ENABLE) | ||
373 | change_autoneg = true; | ||
371 | phydev->autoneg = AUTONEG_DISABLE; | 374 | phydev->autoneg = AUTONEG_DISABLE; |
372 | else | 375 | if (val & BMCR_FULLDPLX) |
376 | phydev->duplex = DUPLEX_FULL; | ||
377 | else | ||
378 | phydev->duplex = DUPLEX_HALF; | ||
379 | if (val & BMCR_SPEED1000) | ||
380 | phydev->speed = SPEED_1000; | ||
381 | else if (val & BMCR_SPEED100) | ||
382 | phydev->speed = SPEED_100; | ||
383 | else phydev->speed = SPEED_10; | ||
384 | } | ||
385 | else { | ||
386 | if (phydev->autoneg == AUTONEG_DISABLE) | ||
387 | change_autoneg = true; | ||
373 | phydev->autoneg = AUTONEG_ENABLE; | 388 | phydev->autoneg = AUTONEG_ENABLE; |
374 | if (!phydev->autoneg && (val & BMCR_FULLDPLX)) | 389 | } |
375 | phydev->duplex = DUPLEX_FULL; | ||
376 | else | ||
377 | phydev->duplex = DUPLEX_HALF; | ||
378 | if (!phydev->autoneg && (val & BMCR_SPEED1000)) | ||
379 | phydev->speed = SPEED_1000; | ||
380 | else if (!phydev->autoneg && | ||
381 | (val & BMCR_SPEED100)) | ||
382 | phydev->speed = SPEED_100; | ||
383 | break; | 390 | break; |
384 | case MII_ADVERTISE: | 391 | case MII_ADVERTISE: |
385 | phydev->advertising = val; | 392 | phydev->advertising = mii_adv_to_ethtool_adv_t(val); |
393 | change_autoneg = true; | ||
386 | break; | 394 | break; |
387 | default: | 395 | default: |
388 | /* do nothing */ | 396 | /* do nothing */ |
@@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) | |||
396 | if (mii_data->reg_num == MII_BMCR && | 404 | if (mii_data->reg_num == MII_BMCR && |
397 | val & BMCR_RESET) | 405 | val & BMCR_RESET) |
398 | return phy_init_hw(phydev); | 406 | return phy_init_hw(phydev); |
407 | |||
408 | if (change_autoneg) | ||
409 | return phy_start_aneg(phydev); | ||
410 | |||
399 | return 0; | 411 | return 0; |
400 | 412 | ||
401 | case SIOCSHWTSTAMP: | 413 | case SIOCSHWTSTAMP: |
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 68c3a3f4e0ab..794a47329368 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
755 | 755 | ||
756 | err = get_filter(argp, &code); | 756 | err = get_filter(argp, &code); |
757 | if (err >= 0) { | 757 | if (err >= 0) { |
758 | struct bpf_prog *pass_filter = NULL; | ||
758 | struct sock_fprog_kern fprog = { | 759 | struct sock_fprog_kern fprog = { |
759 | .len = err, | 760 | .len = err, |
760 | .filter = code, | 761 | .filter = code, |
761 | }; | 762 | }; |
762 | 763 | ||
763 | ppp_lock(ppp); | 764 | err = 0; |
764 | if (ppp->pass_filter) { | 765 | if (fprog.filter) |
765 | bpf_prog_destroy(ppp->pass_filter); | 766 | err = bpf_prog_create(&pass_filter, &fprog); |
766 | ppp->pass_filter = NULL; | 767 | if (!err) { |
768 | ppp_lock(ppp); | ||
769 | if (ppp->pass_filter) | ||
770 | bpf_prog_destroy(ppp->pass_filter); | ||
771 | ppp->pass_filter = pass_filter; | ||
772 | ppp_unlock(ppp); | ||
767 | } | 773 | } |
768 | if (fprog.filter != NULL) | ||
769 | err = bpf_prog_create(&ppp->pass_filter, | ||
770 | &fprog); | ||
771 | else | ||
772 | err = 0; | ||
773 | kfree(code); | 774 | kfree(code); |
774 | ppp_unlock(ppp); | ||
775 | } | 775 | } |
776 | break; | 776 | break; |
777 | } | 777 | } |
@@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
781 | 781 | ||
782 | err = get_filter(argp, &code); | 782 | err = get_filter(argp, &code); |
783 | if (err >= 0) { | 783 | if (err >= 0) { |
784 | struct bpf_prog *active_filter = NULL; | ||
784 | struct sock_fprog_kern fprog = { | 785 | struct sock_fprog_kern fprog = { |
785 | .len = err, | 786 | .len = err, |
786 | .filter = code, | 787 | .filter = code, |
787 | }; | 788 | }; |
788 | 789 | ||
789 | ppp_lock(ppp); | 790 | err = 0; |
790 | if (ppp->active_filter) { | 791 | if (fprog.filter) |
791 | bpf_prog_destroy(ppp->active_filter); | 792 | err = bpf_prog_create(&active_filter, &fprog); |
792 | ppp->active_filter = NULL; | 793 | if (!err) { |
794 | ppp_lock(ppp); | ||
795 | if (ppp->active_filter) | ||
796 | bpf_prog_destroy(ppp->active_filter); | ||
797 | ppp->active_filter = active_filter; | ||
798 | ppp_unlock(ppp); | ||
793 | } | 799 | } |
794 | if (fprog.filter != NULL) | ||
795 | err = bpf_prog_create(&ppp->active_filter, | ||
796 | &fprog); | ||
797 | else | ||
798 | err = 0; | ||
799 | kfree(code); | 800 | kfree(code); |
800 | ppp_unlock(ppp); | ||
801 | } | 801 | } |
802 | break; | 802 | break; |
803 | } | 803 | } |
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 2c05f6cdb12f..816d511e34d3 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c | |||
@@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) | |||
465 | return ret; | 465 | return ret; |
466 | } | 466 | } |
467 | 467 | ||
468 | ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL); | 468 | ax88772_reset(dev); |
469 | if (ret < 0) | ||
470 | return ret; | ||
471 | |||
472 | msleep(150); | ||
473 | |||
474 | ret = asix_sw_reset(dev, AX_SWRESET_CLEAR); | ||
475 | if (ret < 0) | ||
476 | return ret; | ||
477 | |||
478 | msleep(150); | ||
479 | |||
480 | ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE); | ||
481 | 469 | ||
482 | /* Read PHYID register *AFTER* the PHY was reset properly */ | 470 | /* Read PHYID register *AFTER* the PHY was reset properly */ |
483 | phyid = asix_get_phyid(dev); | 471 | phyid = asix_get_phyid(dev); |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 0ab411461d2e..23b1e8c0d547 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -275,13 +275,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) | |||
275 | return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); | 275 | return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); |
276 | } | 276 | } |
277 | 277 | ||
278 | /* Find VXLAN socket based on network namespace and UDP port */ | 278 | /* Find VXLAN socket based on network namespace, address family and UDP port */ |
279 | static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) | 279 | static struct vxlan_sock *vxlan_find_sock(struct net *net, |
280 | sa_family_t family, __be16 port) | ||
280 | { | 281 | { |
281 | struct vxlan_sock *vs; | 282 | struct vxlan_sock *vs; |
282 | 283 | ||
283 | hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { | 284 | hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { |
284 | if (inet_sk(vs->sock->sk)->inet_sport == port) | 285 | if (inet_sk(vs->sock->sk)->inet_sport == port && |
286 | inet_sk(vs->sock->sk)->sk.sk_family == family) | ||
285 | return vs; | 287 | return vs; |
286 | } | 288 | } |
287 | return NULL; | 289 | return NULL; |
@@ -300,11 +302,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) | |||
300 | } | 302 | } |
301 | 303 | ||
302 | /* Look up VNI in a per net namespace table */ | 304 | /* Look up VNI in a per net namespace table */ |
303 | static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) | 305 | static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, |
306 | sa_family_t family, __be16 port) | ||
304 | { | 307 | { |
305 | struct vxlan_sock *vs; | 308 | struct vxlan_sock *vs; |
306 | 309 | ||
307 | vs = vxlan_find_sock(net, port); | 310 | vs = vxlan_find_sock(net, family, port); |
308 | if (!vs) | 311 | if (!vs) |
309 | return NULL; | 312 | return NULL; |
310 | 313 | ||
@@ -621,6 +624,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) | |||
621 | int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); | 624 | int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); |
622 | int err = -ENOSYS; | 625 | int err = -ENOSYS; |
623 | 626 | ||
627 | udp_tunnel_gro_complete(skb, nhoff); | ||
628 | |||
624 | eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); | 629 | eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); |
625 | type = eh->h_proto; | 630 | type = eh->h_proto; |
626 | 631 | ||
@@ -1771,7 +1776,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1771 | struct vxlan_dev *dst_vxlan; | 1776 | struct vxlan_dev *dst_vxlan; |
1772 | 1777 | ||
1773 | ip_rt_put(rt); | 1778 | ip_rt_put(rt); |
1774 | dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); | 1779 | dst_vxlan = vxlan_find_vni(vxlan->net, vni, |
1780 | dst->sa.sa_family, dst_port); | ||
1775 | if (!dst_vxlan) | 1781 | if (!dst_vxlan) |
1776 | goto tx_error; | 1782 | goto tx_error; |
1777 | vxlan_encap_bypass(skb, vxlan, dst_vxlan); | 1783 | vxlan_encap_bypass(skb, vxlan, dst_vxlan); |
@@ -1825,7 +1831,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1825 | struct vxlan_dev *dst_vxlan; | 1831 | struct vxlan_dev *dst_vxlan; |
1826 | 1832 | ||
1827 | dst_release(ndst); | 1833 | dst_release(ndst); |
1828 | dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); | 1834 | dst_vxlan = vxlan_find_vni(vxlan->net, vni, |
1835 | dst->sa.sa_family, dst_port); | ||
1829 | if (!dst_vxlan) | 1836 | if (!dst_vxlan) |
1830 | goto tx_error; | 1837 | goto tx_error; |
1831 | vxlan_encap_bypass(skb, vxlan, dst_vxlan); | 1838 | vxlan_encap_bypass(skb, vxlan, dst_vxlan); |
@@ -1985,13 +1992,15 @@ static int vxlan_init(struct net_device *dev) | |||
1985 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1992 | struct vxlan_dev *vxlan = netdev_priv(dev); |
1986 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); | 1993 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); |
1987 | struct vxlan_sock *vs; | 1994 | struct vxlan_sock *vs; |
1995 | bool ipv6 = vxlan->flags & VXLAN_F_IPV6; | ||
1988 | 1996 | ||
1989 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | 1997 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
1990 | if (!dev->tstats) | 1998 | if (!dev->tstats) |
1991 | return -ENOMEM; | 1999 | return -ENOMEM; |
1992 | 2000 | ||
1993 | spin_lock(&vn->sock_lock); | 2001 | spin_lock(&vn->sock_lock); |
1994 | vs = vxlan_find_sock(vxlan->net, vxlan->dst_port); | 2002 | vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, |
2003 | vxlan->dst_port); | ||
1995 | if (vs) { | 2004 | if (vs) { |
1996 | /* If we have a socket with same port already, reuse it */ | 2005 | /* If we have a socket with same port already, reuse it */ |
1997 | atomic_inc(&vs->refcnt); | 2006 | atomic_inc(&vs->refcnt); |
@@ -2385,6 +2394,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, | |||
2385 | { | 2394 | { |
2386 | struct vxlan_net *vn = net_generic(net, vxlan_net_id); | 2395 | struct vxlan_net *vn = net_generic(net, vxlan_net_id); |
2387 | struct vxlan_sock *vs; | 2396 | struct vxlan_sock *vs; |
2397 | bool ipv6 = flags & VXLAN_F_IPV6; | ||
2388 | 2398 | ||
2389 | vs = vxlan_socket_create(net, port, rcv, data, flags); | 2399 | vs = vxlan_socket_create(net, port, rcv, data, flags); |
2390 | if (!IS_ERR(vs)) | 2400 | if (!IS_ERR(vs)) |
@@ -2394,7 +2404,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, | |||
2394 | return vs; | 2404 | return vs; |
2395 | 2405 | ||
2396 | spin_lock(&vn->sock_lock); | 2406 | spin_lock(&vn->sock_lock); |
2397 | vs = vxlan_find_sock(net, port); | 2407 | vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); |
2398 | if (vs) { | 2408 | if (vs) { |
2399 | if (vs->rcv == rcv) | 2409 | if (vs->rcv == rcv) |
2400 | atomic_inc(&vs->refcnt); | 2410 | atomic_inc(&vs->refcnt); |
@@ -2553,7 +2563,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, | |||
2553 | nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) | 2563 | nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) |
2554 | vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; | 2564 | vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; |
2555 | 2565 | ||
2556 | if (vxlan_find_vni(net, vni, vxlan->dst_port)) { | 2566 | if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, |
2567 | vxlan->dst_port)) { | ||
2557 | pr_info("duplicate VNI %u\n", vni); | 2568 | pr_info("duplicate VNI %u\n", vni); |
2558 | return -EEXIST; | 2569 | return -EEXIST; |
2559 | } | 2570 | } |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index e0d9f19650b0..eb03943f8463 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c | |||
@@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) | |||
284 | 284 | ||
285 | lockdep_assert_held(&mvm->mutex); | 285 | lockdep_assert_held(&mvm->mutex); |
286 | 286 | ||
287 | if (WARN_ON_ONCE(mvm->init_ucode_complete)) | 287 | if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating)) |
288 | return 0; | 288 | return 0; |
289 | 289 | ||
290 | iwl_init_notification_wait(&mvm->notif_wait, | 290 | iwl_init_notification_wait(&mvm->notif_wait, |
@@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) | |||
334 | goto out; | 334 | goto out; |
335 | } | 335 | } |
336 | 336 | ||
337 | mvm->calibrating = true; | ||
338 | |||
337 | /* Send TX valid antennas before triggering calibrations */ | 339 | /* Send TX valid antennas before triggering calibrations */ |
338 | ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); | 340 | ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); |
339 | if (ret) | 341 | if (ret) |
@@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) | |||
358 | MVM_UCODE_CALIB_TIMEOUT); | 360 | MVM_UCODE_CALIB_TIMEOUT); |
359 | if (!ret) | 361 | if (!ret) |
360 | mvm->init_ucode_complete = true; | 362 | mvm->init_ucode_complete = true; |
363 | |||
364 | if (ret && iwl_mvm_is_radio_killed(mvm)) { | ||
365 | IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); | ||
366 | ret = 1; | ||
367 | } | ||
361 | goto out; | 368 | goto out; |
362 | 369 | ||
363 | error: | 370 | error: |
364 | iwl_remove_notification(&mvm->notif_wait, &calib_wait); | 371 | iwl_remove_notification(&mvm->notif_wait, &calib_wait); |
365 | out: | 372 | out: |
373 | mvm->calibrating = false; | ||
366 | if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { | 374 | if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { |
367 | /* we want to debug INIT and we have no NVM - fake */ | 375 | /* we want to debug INIT and we have no NVM - fake */ |
368 | mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + | 376 | mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index f308e52781f6..57325589ee5b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -825,6 +825,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) | |||
825 | 825 | ||
826 | mvm->scan_status = IWL_MVM_SCAN_NONE; | 826 | mvm->scan_status = IWL_MVM_SCAN_NONE; |
827 | mvm->ps_disabled = false; | 827 | mvm->ps_disabled = false; |
828 | mvm->calibrating = false; | ||
828 | 829 | ||
829 | /* just in case one was running */ | 830 | /* just in case one was running */ |
830 | ieee80211_remain_on_channel_expired(mvm->hw); | 831 | ieee80211_remain_on_channel_expired(mvm->hw); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 256765accbc6..d015fac06a62 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h | |||
@@ -548,6 +548,7 @@ struct iwl_mvm { | |||
548 | enum iwl_ucode_type cur_ucode; | 548 | enum iwl_ucode_type cur_ucode; |
549 | bool ucode_loaded; | 549 | bool ucode_loaded; |
550 | bool init_ucode_complete; | 550 | bool init_ucode_complete; |
551 | bool calibrating; | ||
551 | u32 error_event_table; | 552 | u32 error_event_table; |
552 | u32 log_event_table; | 553 | u32 log_event_table; |
553 | u32 umac_error_event_table; | 554 | u32 umac_error_event_table; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index bd52ecfabedb..7a9578567f4f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c | |||
@@ -427,6 +427,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
427 | } | 427 | } |
428 | mvm->sf_state = SF_UNINIT; | 428 | mvm->sf_state = SF_UNINIT; |
429 | mvm->low_latency_agg_frame_limit = 6; | 429 | mvm->low_latency_agg_frame_limit = 6; |
430 | mvm->cur_ucode = IWL_UCODE_INIT; | ||
430 | 431 | ||
431 | mutex_init(&mvm->mutex); | 432 | mutex_init(&mvm->mutex); |
432 | mutex_init(&mvm->d0i3_suspend_mutex); | 433 | mutex_init(&mvm->d0i3_suspend_mutex); |
@@ -757,6 +758,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) | |||
757 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) | 758 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) |
758 | { | 759 | { |
759 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 760 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
761 | bool calibrating = ACCESS_ONCE(mvm->calibrating); | ||
760 | 762 | ||
761 | if (state) | 763 | if (state) |
762 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); | 764 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); |
@@ -765,7 +767,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) | |||
765 | 767 | ||
766 | wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); | 768 | wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); |
767 | 769 | ||
768 | return state && mvm->cur_ucode != IWL_UCODE_INIT; | 770 | /* iwl_run_init_mvm_ucode is waiting for results, abort it */ |
771 | if (calibrating) | ||
772 | iwl_abort_notification_waits(&mvm->notif_wait); | ||
773 | |||
774 | /* | ||
775 | * Stop the device if we run OPERATIONAL firmware or if we are in the | ||
776 | * middle of the calibrations. | ||
777 | */ | ||
778 | return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating); | ||
769 | } | 779 | } |
770 | 780 | ||
771 | static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) | 781 | static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 40a290603ead..836725e92687 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -913,7 +913,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
913 | * restart. So don't process again if the device is | 913 | * restart. So don't process again if the device is |
914 | * already dead. | 914 | * already dead. |
915 | */ | 915 | */ |
916 | if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { | 916 | if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { |
917 | IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); | ||
917 | iwl_pcie_tx_stop(trans); | 918 | iwl_pcie_tx_stop(trans); |
918 | iwl_pcie_rx_stop(trans); | 919 | iwl_pcie_rx_stop(trans); |
919 | 920 | ||
@@ -943,7 +944,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
943 | /* clear all status bits */ | 944 | /* clear all status bits */ |
944 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); | 945 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); |
945 | clear_bit(STATUS_INT_ENABLED, &trans->status); | 946 | clear_bit(STATUS_INT_ENABLED, &trans->status); |
946 | clear_bit(STATUS_DEVICE_ENABLED, &trans->status); | ||
947 | clear_bit(STATUS_TPOWER_PMI, &trans->status); | 947 | clear_bit(STATUS_TPOWER_PMI, &trans->status); |
948 | clear_bit(STATUS_RFKILL, &trans->status); | 948 | clear_bit(STATUS_RFKILL, &trans->status); |
949 | 949 | ||
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 209db62ee627..77fbf3035038 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -2191,7 +2191,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
2191 | if (err != 0) { | 2191 | if (err != 0) { |
2192 | printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n", | 2192 | printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n", |
2193 | err); | 2193 | err); |
2194 | goto failed_hw; | 2194 | goto failed_bind; |
2195 | } | 2195 | } |
2196 | 2196 | ||
2197 | skb_queue_head_init(&data->pending); | 2197 | skb_queue_head_init(&data->pending); |
@@ -2397,6 +2397,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
2397 | return idx; | 2397 | return idx; |
2398 | 2398 | ||
2399 | failed_hw: | 2399 | failed_hw: |
2400 | device_release_driver(data->dev); | ||
2401 | failed_bind: | ||
2400 | device_unregister(data->dev); | 2402 | device_unregister(data->dev); |
2401 | failed_drvdata: | 2403 | failed_drvdata: |
2402 | ieee80211_free_hw(hw); | 2404 | ieee80211_free_hw(hw); |