diff options
Diffstat (limited to 'drivers/net/sfc/efx.c')
-rw-r--r-- | drivers/net/sfc/efx.c | 29 |
1 files changed, 12 insertions, 17 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 7269a426051c..343e8da1fa30 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -50,16 +50,6 @@ static struct workqueue_struct *reset_workqueue; | |||
50 | *************************************************************************/ | 50 | *************************************************************************/ |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * Enable large receive offload (LRO) aka soft segment reassembly (SSR) | ||
54 | * | ||
55 | * This sets the default for new devices. It can be controlled later | ||
56 | * using ethtool. | ||
57 | */ | ||
58 | static int lro = true; | ||
59 | module_param(lro, int, 0644); | ||
60 | MODULE_PARM_DESC(lro, "Large receive offload acceleration"); | ||
61 | |||
62 | /* | ||
63 | * Use separate channels for TX and RX events | 53 | * Use separate channels for TX and RX events |
64 | * | 54 | * |
65 | * Set this to 1 to use separate channels for TX and RX. It allows us | 55 | * Set this to 1 to use separate channels for TX and RX. It allows us |
@@ -894,9 +884,9 @@ static int efx_wanted_rx_queues(void) | |||
894 | int count; | 884 | int count; |
895 | int cpu; | 885 | int cpu; |
896 | 886 | ||
897 | if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) { | 887 | if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) { |
898 | printk(KERN_WARNING | 888 | printk(KERN_WARNING |
899 | "efx.c: allocation failure, irq balancing hobbled\n"); | 889 | "sfc: RSS disabled due to allocation failure\n"); |
900 | return 1; | 890 | return 1; |
901 | } | 891 | } |
902 | 892 | ||
@@ -1300,10 +1290,16 @@ out_requeue: | |||
1300 | static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) | 1290 | static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) |
1301 | { | 1291 | { |
1302 | struct efx_nic *efx = netdev_priv(net_dev); | 1292 | struct efx_nic *efx = netdev_priv(net_dev); |
1293 | struct mii_ioctl_data *data = if_mii(ifr); | ||
1303 | 1294 | ||
1304 | EFX_ASSERT_RESET_SERIALISED(efx); | 1295 | EFX_ASSERT_RESET_SERIALISED(efx); |
1305 | 1296 | ||
1306 | return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL); | 1297 | /* Convert phy_id from older PRTAD/DEVAD format */ |
1298 | if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && | ||
1299 | (data->phy_id & 0xfc00) == 0x0400) | ||
1300 | data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; | ||
1301 | |||
1302 | return mdio_mii_ioctl(&efx->mdio, data, cmd); | ||
1307 | } | 1303 | } |
1308 | 1304 | ||
1309 | /************************************************************************** | 1305 | /************************************************************************** |
@@ -1945,7 +1941,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
1945 | mutex_init(&efx->mac_lock); | 1941 | mutex_init(&efx->mac_lock); |
1946 | efx->mac_op = &efx_dummy_mac_operations; | 1942 | efx->mac_op = &efx_dummy_mac_operations; |
1947 | efx->phy_op = &efx_dummy_phy_operations; | 1943 | efx->phy_op = &efx_dummy_phy_operations; |
1948 | efx->mii.dev = net_dev; | 1944 | efx->mdio.dev = net_dev; |
1949 | INIT_WORK(&efx->phy_work, efx_phy_work); | 1945 | INIT_WORK(&efx->phy_work, efx_phy_work); |
1950 | INIT_WORK(&efx->mac_work, efx_mac_work); | 1946 | INIT_WORK(&efx->mac_work, efx_mac_work); |
1951 | atomic_set(&efx->netif_stop_count, 1); | 1947 | atomic_set(&efx->netif_stop_count, 1); |
@@ -2161,9 +2157,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2161 | if (!net_dev) | 2157 | if (!net_dev) |
2162 | return -ENOMEM; | 2158 | return -ENOMEM; |
2163 | net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | | 2159 | net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | |
2164 | NETIF_F_HIGHDMA | NETIF_F_TSO); | 2160 | NETIF_F_HIGHDMA | NETIF_F_TSO | |
2165 | if (lro) | 2161 | NETIF_F_GRO); |
2166 | net_dev->features |= NETIF_F_GRO; | ||
2167 | /* Mask for features that also apply to VLAN devices */ | 2162 | /* Mask for features that also apply to VLAN devices */ |
2168 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | | 2163 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | |
2169 | NETIF_F_HIGHDMA | NETIF_F_TSO); | 2164 | NETIF_F_HIGHDMA | NETIF_F_TSO); |