aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/forcedeth.c49
-rw-r--r--drivers/net/gianfar_mii.c4
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c91
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c305
-rw-r--r--drivers/net/mv643xx_eth.c11
-rw-r--r--drivers/net/sky2.c17
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/tlan.c25
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/ucc_geth_mii.c4
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/wan/hdlc.c24
-rw-r--r--drivers/net/wan/hdlc_cisco.c5
-rw-r--r--drivers/net/wan/hdlc_fr.c53
-rw-r--r--drivers/net/wan/hdlc_ppp.c2
-rw-r--r--drivers/net/wan/hdlc_raw.c2
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c6
-rw-r--r--drivers/net/wan/hdlc_x25.c10
-rw-r--r--include/linux/hdlc.h25
21 files changed, 398 insertions, 255 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 36342230a6de..d4843d014bc9 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -323,8 +323,8 @@ enum {
323 NvRegMIIStatus = 0x180, 323 NvRegMIIStatus = 0x180,
324#define NVREG_MIISTAT_ERROR 0x0001 324#define NVREG_MIISTAT_ERROR 0x0001
325#define NVREG_MIISTAT_LINKCHANGE 0x0008 325#define NVREG_MIISTAT_LINKCHANGE 0x0008
326#define NVREG_MIISTAT_MASK 0x000f 326#define NVREG_MIISTAT_MASK_RW 0x0007
327#define NVREG_MIISTAT_MASK2 0x000f 327#define NVREG_MIISTAT_MASK_ALL 0x000f
328 NvRegMIIMask = 0x184, 328 NvRegMIIMask = 0x184,
329#define NVREG_MII_LINKCHANGE 0x0008 329#define NVREG_MII_LINKCHANGE 0x0008
330 330
@@ -624,6 +624,9 @@ union ring_type {
624#define NV_MSI_X_VECTOR_TX 0x1 624#define NV_MSI_X_VECTOR_TX 0x1
625#define NV_MSI_X_VECTOR_OTHER 0x2 625#define NV_MSI_X_VECTOR_OTHER 0x2
626 626
627#define NV_RESTART_TX 0x1
628#define NV_RESTART_RX 0x2
629
627/* statistics */ 630/* statistics */
628struct nv_ethtool_str { 631struct nv_ethtool_str {
629 char name[ETH_GSTRING_LEN]; 632 char name[ETH_GSTRING_LEN];
@@ -1061,7 +1064,7 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1061 u32 reg; 1064 u32 reg;
1062 int retval; 1065 int retval;
1063 1066
1064 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 1067 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1065 1068
1066 reg = readl(base + NvRegMIIControl); 1069 reg = readl(base + NvRegMIIControl);
1067 if (reg & NVREG_MIICTL_INUSE) { 1070 if (reg & NVREG_MIICTL_INUSE) {
@@ -1432,16 +1435,30 @@ static void nv_mac_reset(struct net_device *dev)
1432{ 1435{
1433 struct fe_priv *np = netdev_priv(dev); 1436 struct fe_priv *np = netdev_priv(dev);
1434 u8 __iomem *base = get_hwbase(dev); 1437 u8 __iomem *base = get_hwbase(dev);
1438 u32 temp1, temp2, temp3;
1435 1439
1436 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); 1440 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1441
1437 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1442 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1438 pci_push(base); 1443 pci_push(base);
1444
1445 /* save registers since they will be cleared on reset */
1446 temp1 = readl(base + NvRegMacAddrA);
1447 temp2 = readl(base + NvRegMacAddrB);
1448 temp3 = readl(base + NvRegTransmitPoll);
1449
1439 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); 1450 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1440 pci_push(base); 1451 pci_push(base);
1441 udelay(NV_MAC_RESET_DELAY); 1452 udelay(NV_MAC_RESET_DELAY);
1442 writel(0, base + NvRegMacReset); 1453 writel(0, base + NvRegMacReset);
1443 pci_push(base); 1454 pci_push(base);
1444 udelay(NV_MAC_RESET_DELAY); 1455 udelay(NV_MAC_RESET_DELAY);
1456
1457 /* restore saved registers */
1458 writel(temp1, base + NvRegMacAddrA);
1459 writel(temp2, base + NvRegMacAddrB);
1460 writel(temp3, base + NvRegTransmitPoll);
1461
1445 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); 1462 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1446 pci_push(base); 1463 pci_push(base);
1447} 1464}
@@ -2767,6 +2784,7 @@ static int nv_update_linkspeed(struct net_device *dev)
2767 int mii_status; 2784 int mii_status;
2768 int retval = 0; 2785 int retval = 0;
2769 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 2786 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
2787 u32 txrxFlags = 0;
2770 2788
2771 /* BMSR_LSTATUS is latched, read it twice: 2789 /* BMSR_LSTATUS is latched, read it twice:
2772 * we want the current value. 2790 * we want the current value.
@@ -2862,6 +2880,16 @@ set_speed:
2862 np->duplex = newdup; 2880 np->duplex = newdup;
2863 np->linkspeed = newls; 2881 np->linkspeed = newls;
2864 2882
2883 /* The transmitter and receiver must be restarted for safe update */
2884 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
2885 txrxFlags |= NV_RESTART_TX;
2886 nv_stop_tx(dev);
2887 }
2888 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
2889 txrxFlags |= NV_RESTART_RX;
2890 nv_stop_rx(dev);
2891 }
2892
2865 if (np->gigabit == PHY_GIGABIT) { 2893 if (np->gigabit == PHY_GIGABIT) {
2866 phyreg = readl(base + NvRegRandomSeed); 2894 phyreg = readl(base + NvRegRandomSeed);
2867 phyreg &= ~(0x3FF00); 2895 phyreg &= ~(0x3FF00);
@@ -2950,6 +2978,11 @@ set_speed:
2950 } 2978 }
2951 nv_update_pause(dev, pause_flags); 2979 nv_update_pause(dev, pause_flags);
2952 2980
2981 if (txrxFlags & NV_RESTART_TX)
2982 nv_start_tx(dev);
2983 if (txrxFlags & NV_RESTART_RX)
2984 nv_start_rx(dev);
2985
2953 return retval; 2986 return retval;
2954} 2987}
2955 2988
@@ -2976,7 +3009,7 @@ static void nv_link_irq(struct net_device *dev)
2976 u32 miistat; 3009 u32 miistat;
2977 3010
2978 miistat = readl(base + NvRegMIIStatus); 3011 miistat = readl(base + NvRegMIIStatus);
2979 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 3012 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
2980 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); 3013 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
2981 3014
2982 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3015 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
@@ -4851,7 +4884,7 @@ static int nv_open(struct net_device *dev)
4851 4884
4852 writel(0, base + NvRegMIIMask); 4885 writel(0, base + NvRegMIIMask);
4853 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4886 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4854 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4887 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
4855 4888
4856 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 4889 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
4857 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 4890 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
@@ -4889,7 +4922,7 @@ static int nv_open(struct net_device *dev)
4889 4922
4890 nv_disable_hw_interrupts(dev, np->irqmask); 4923 nv_disable_hw_interrupts(dev, np->irqmask);
4891 pci_push(base); 4924 pci_push(base);
4892 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4925 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
4893 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4926 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4894 pci_push(base); 4927 pci_push(base);
4895 4928
@@ -4912,7 +4945,7 @@ static int nv_open(struct net_device *dev)
4912 { 4945 {
4913 u32 miistat; 4946 u32 miistat;
4914 miistat = readl(base + NvRegMIIStatus); 4947 miistat = readl(base + NvRegMIIStatus);
4915 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 4948 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
4916 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); 4949 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
4917 } 4950 }
4918 /* set linkspeed to invalid value, thus force nv_update_linkspeed 4951 /* set linkspeed to invalid value, thus force nv_update_linkspeed
@@ -5280,7 +5313,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5280 phystate &= ~NVREG_ADAPTCTL_RUNNING; 5313 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5281 writel(phystate, base + NvRegAdapterControl); 5314 writel(phystate, base + NvRegAdapterControl);
5282 } 5315 }
5283 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 5316 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5284 5317
5285 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 5318 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5286 /* management unit running on the mac? */ 5319 /* management unit running on the mac? */
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 100bf410bf5f..6a647d95e6ea 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -127,7 +127,7 @@ int gfar_mdio_reset(struct mii_bus *bus)
127 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; 127 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
128 unsigned int timeout = PHY_INIT_TIMEOUT; 128 unsigned int timeout = PHY_INIT_TIMEOUT;
129 129
130 spin_lock_bh(&bus->mdio_lock); 130 mutex_lock(&bus->mdio_lock);
131 131
132 /* Reset the management interface */ 132 /* Reset the management interface */
133 gfar_write(&regs->miimcfg, MIIMCFG_RESET); 133 gfar_write(&regs->miimcfg, MIIMCFG_RESET);
@@ -140,7 +140,7 @@ int gfar_mdio_reset(struct mii_bus *bus)
140 timeout--) 140 timeout--)
141 cpu_relax(); 141 cpu_relax();
142 142
143 spin_unlock_bh(&bus->mdio_lock); 143 mutex_unlock(&bus->mdio_lock);
144 144
145 if(timeout <= 0) { 145 if(timeout <= 0) {
146 printk(KERN_ERR "%s: The MII Bus is stuck!\n", 146 printk(KERN_ERR "%s: The MII Bus is stuck!\n",
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 419861cbc65e..58d3bb622da6 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1020,7 +1020,7 @@ static const struct ethtool_ops ops = {
1020 .get_link = veth_get_link, 1020 .get_link = veth_get_link,
1021}; 1021};
1022 1022
1023static struct net_device * __init veth_probe_one(int vlan, 1023static struct net_device *veth_probe_one(int vlan,
1024 struct vio_dev *vio_dev) 1024 struct vio_dev *vio_dev)
1025{ 1025{
1026 struct net_device *dev; 1026 struct net_device *dev;
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index a021a6e72641..d0bf206632ca 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -136,8 +136,6 @@ struct ixgbe_ring {
136 u16 head; 136 u16 head;
137 u16 tail; 137 u16 tail;
138 138
139 /* To protect race between sender and clean_tx_irq */
140 spinlock_t tx_lock;
141 139
142 struct ixgbe_queue_stats stats; 140 struct ixgbe_queue_stats stats;
143 141
@@ -174,7 +172,6 @@ struct ixgbe_adapter {
174 struct vlan_group *vlgrp; 172 struct vlan_group *vlgrp;
175 u16 bd_number; 173 u16 bd_number;
176 u16 rx_buf_len; 174 u16 rx_buf_len;
177 atomic_t irq_sem;
178 struct work_struct reset_task; 175 struct work_struct reset_task;
179 176
180 /* TX */ 177 /* TX */
@@ -244,6 +241,7 @@ extern const char ixgbe_driver_version[];
244 241
245extern int ixgbe_up(struct ixgbe_adapter *adapter); 242extern int ixgbe_up(struct ixgbe_adapter *adapter);
246extern void ixgbe_down(struct ixgbe_adapter *adapter); 243extern void ixgbe_down(struct ixgbe_adapter *adapter);
244extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
247extern void ixgbe_reset(struct ixgbe_adapter *adapter); 245extern void ixgbe_reset(struct ixgbe_adapter *adapter);
248extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 246extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
249extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 247extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 36353447716d..a119cbd8dbb8 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -103,21 +103,41 @@ static int ixgbe_get_settings(struct net_device *netdev,
103 struct ethtool_cmd *ecmd) 103 struct ethtool_cmd *ecmd)
104{ 104{
105 struct ixgbe_adapter *adapter = netdev_priv(netdev); 105 struct ixgbe_adapter *adapter = netdev_priv(netdev);
106 struct ixgbe_hw *hw = &adapter->hw;
107 u32 link_speed = 0;
108 bool link_up;
106 109
107 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 110 ecmd->supported = SUPPORTED_10000baseT_Full;
108 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 111 ecmd->autoneg = AUTONEG_ENABLE;
109 ecmd->port = PORT_FIBRE;
110 ecmd->transceiver = XCVR_EXTERNAL; 112 ecmd->transceiver = XCVR_EXTERNAL;
113 if (hw->phy.media_type == ixgbe_media_type_copper) {
114 ecmd->supported |= (SUPPORTED_1000baseT_Full |
115 SUPPORTED_TP | SUPPORTED_Autoneg);
116
117 ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
118 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
119 ecmd->advertising |= ADVERTISED_10000baseT_Full;
120 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
121 ecmd->advertising |= ADVERTISED_1000baseT_Full;
122
123 ecmd->port = PORT_TP;
124 } else {
125 ecmd->supported |= SUPPORTED_FIBRE;
126 ecmd->advertising = (ADVERTISED_10000baseT_Full |
127 ADVERTISED_FIBRE);
128 ecmd->port = PORT_FIBRE;
129 }
111 130
112 if (netif_carrier_ok(adapter->netdev)) { 131 adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up);
113 ecmd->speed = SPEED_10000; 132 if (link_up) {
133 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
134 SPEED_10000 : SPEED_1000;
114 ecmd->duplex = DUPLEX_FULL; 135 ecmd->duplex = DUPLEX_FULL;
115 } else { 136 } else {
116 ecmd->speed = -1; 137 ecmd->speed = -1;
117 ecmd->duplex = -1; 138 ecmd->duplex = -1;
118 } 139 }
119 140
120 ecmd->autoneg = AUTONEG_DISABLE;
121 return 0; 141 return 0;
122} 142}
123 143
@@ -125,17 +145,17 @@ static int ixgbe_set_settings(struct net_device *netdev,
125 struct ethtool_cmd *ecmd) 145 struct ethtool_cmd *ecmd)
126{ 146{
127 struct ixgbe_adapter *adapter = netdev_priv(netdev); 147 struct ixgbe_adapter *adapter = netdev_priv(netdev);
148 struct ixgbe_hw *hw = &adapter->hw;
128 149
129 if (ecmd->autoneg == AUTONEG_ENABLE || 150 switch (hw->phy.media_type) {
130 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) 151 case ixgbe_media_type_fiber:
131 return -EINVAL; 152 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
132 153 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
133 if (netif_running(adapter->netdev)) { 154 return -EINVAL;
134 ixgbe_down(adapter); 155 /* in this case we currently only support 10Gb/FULL */
135 ixgbe_reset(adapter); 156 break;
136 ixgbe_up(adapter); 157 default:
137 } else { 158 break;
138 ixgbe_reset(adapter);
139 } 159 }
140 160
141 return 0; 161 return 0;
@@ -147,7 +167,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
147 struct ixgbe_adapter *adapter = netdev_priv(netdev); 167 struct ixgbe_adapter *adapter = netdev_priv(netdev);
148 struct ixgbe_hw *hw = &adapter->hw; 168 struct ixgbe_hw *hw = &adapter->hw;
149 169
150 pause->autoneg = AUTONEG_DISABLE; 170 pause->autoneg = (hw->fc.type == ixgbe_fc_full ? 1 : 0);
151 171
152 if (hw->fc.type == ixgbe_fc_rx_pause) { 172 if (hw->fc.type == ixgbe_fc_rx_pause) {
153 pause->rx_pause = 1; 173 pause->rx_pause = 1;
@@ -165,10 +185,8 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
165 struct ixgbe_adapter *adapter = netdev_priv(netdev); 185 struct ixgbe_adapter *adapter = netdev_priv(netdev);
166 struct ixgbe_hw *hw = &adapter->hw; 186 struct ixgbe_hw *hw = &adapter->hw;
167 187
168 if (pause->autoneg == AUTONEG_ENABLE) 188 if ((pause->autoneg == AUTONEG_ENABLE) ||
169 return -EINVAL; 189 (pause->rx_pause && pause->tx_pause))
170
171 if (pause->rx_pause && pause->tx_pause)
172 hw->fc.type = ixgbe_fc_full; 190 hw->fc.type = ixgbe_fc_full;
173 else if (pause->rx_pause && !pause->tx_pause) 191 else if (pause->rx_pause && !pause->tx_pause)
174 hw->fc.type = ixgbe_fc_rx_pause; 192 hw->fc.type = ixgbe_fc_rx_pause;
@@ -176,15 +194,15 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
176 hw->fc.type = ixgbe_fc_tx_pause; 194 hw->fc.type = ixgbe_fc_tx_pause;
177 else if (!pause->rx_pause && !pause->tx_pause) 195 else if (!pause->rx_pause && !pause->tx_pause)
178 hw->fc.type = ixgbe_fc_none; 196 hw->fc.type = ixgbe_fc_none;
197 else
198 return -EINVAL;
179 199
180 hw->fc.original_type = hw->fc.type; 200 hw->fc.original_type = hw->fc.type;
181 201
182 if (netif_running(adapter->netdev)) { 202 if (netif_running(netdev))
183 ixgbe_down(adapter); 203 ixgbe_reinit_locked(adapter);
184 ixgbe_up(adapter); 204 else
185 } else {
186 ixgbe_reset(adapter); 205 ixgbe_reset(adapter);
187 }
188 206
189 return 0; 207 return 0;
190} 208}
@@ -203,12 +221,10 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
203 else 221 else
204 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; 222 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
205 223
206 if (netif_running(netdev)) { 224 if (netif_running(netdev))
207 ixgbe_down(adapter); 225 ixgbe_reinit_locked(adapter);
208 ixgbe_up(adapter); 226 else
209 } else {
210 ixgbe_reset(adapter); 227 ixgbe_reset(adapter);
211 }
212 228
213 return 0; 229 return 0;
214} 230}
@@ -662,7 +678,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
662 return 0; 678 return 0;
663 } 679 }
664 680
665 if (netif_running(adapter->netdev)) 681 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
682 msleep(1);
683
684 if (netif_running(netdev))
666 ixgbe_down(adapter); 685 ixgbe_down(adapter);
667 686
668 /* 687 /*
@@ -733,6 +752,7 @@ err_setup:
733 if (netif_running(adapter->netdev)) 752 if (netif_running(adapter->netdev))
734 ixgbe_up(adapter); 753 ixgbe_up(adapter);
735 754
755 clear_bit(__IXGBE_RESETTING, &adapter->state);
736 return err; 756 return err;
737} 757}
738 758
@@ -820,11 +840,8 @@ static int ixgbe_nway_reset(struct net_device *netdev)
820{ 840{
821 struct ixgbe_adapter *adapter = netdev_priv(netdev); 841 struct ixgbe_adapter *adapter = netdev_priv(netdev);
822 842
823 if (netif_running(netdev)) { 843 if (netif_running(netdev))
824 ixgbe_down(adapter); 844 ixgbe_reinit_locked(adapter);
825 ixgbe_reset(adapter);
826 ixgbe_up(adapter);
827 }
828 845
829 return 0; 846 return 0;
830} 847}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 3732dd6c4b2a..ead49e54f31b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -87,6 +87,25 @@ MODULE_VERSION(DRV_VERSION);
87 87
88#define DEFAULT_DEBUG_LEVEL_SHIFT 3 88#define DEFAULT_DEBUG_LEVEL_SHIFT 3
89 89
90static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
91{
92 u32 ctrl_ext;
93
94 /* Let firmware take over control of h/w */
95 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
96 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
97 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
98}
99
100static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
101{
102 u32 ctrl_ext;
103
104 /* Let firmware know the driver has taken over */
105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
107 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
108}
90 109
91#ifdef DEBUG 110#ifdef DEBUG
92/** 111/**
@@ -165,6 +184,15 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
165 return false; 184 return false;
166} 185}
167 186
187#define IXGBE_MAX_TXD_PWR 14
188#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
189
190/* Tx Descriptors needed, worst case */
191#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
192 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
193#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
194 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
195
168/** 196/**
169 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 197 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
170 * @adapter: board private structure 198 * @adapter: board private structure
@@ -177,18 +205,34 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
177 struct ixgbe_tx_buffer *tx_buffer_info; 205 struct ixgbe_tx_buffer *tx_buffer_info;
178 unsigned int i, eop; 206 unsigned int i, eop;
179 bool cleaned = false; 207 bool cleaned = false;
180 int count = 0; 208 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
181 209
182 i = tx_ring->next_to_clean; 210 i = tx_ring->next_to_clean;
183 eop = tx_ring->tx_buffer_info[i].next_to_watch; 211 eop = tx_ring->tx_buffer_info[i].next_to_watch;
184 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 212 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
185 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) { 213 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
186 for (cleaned = false; !cleaned;) { 214 cleaned = false;
215 while (!cleaned) {
187 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 216 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
188 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 217 tx_buffer_info = &tx_ring->tx_buffer_info[i];
189 cleaned = (i == eop); 218 cleaned = (i == eop);
190 219
191 tx_ring->stats.bytes += tx_buffer_info->length; 220 tx_ring->stats.bytes += tx_buffer_info->length;
221 if (cleaned) {
222 struct sk_buff *skb = tx_buffer_info->skb;
223#ifdef NETIF_F_TSO
224 unsigned int segs, bytecount;
225 segs = skb_shinfo(skb)->gso_segs ?: 1;
226 /* multiply data chunks by size of headers */
227 bytecount = ((segs - 1) * skb_headlen(skb)) +
228 skb->len;
229 total_tx_packets += segs;
230 total_tx_bytes += bytecount;
231#else
232 total_tx_packets++;
233 total_tx_bytes += skb->len;
234#endif
235 }
192 ixgbe_unmap_and_free_tx_resource(adapter, 236 ixgbe_unmap_and_free_tx_resource(adapter,
193 tx_buffer_info); 237 tx_buffer_info);
194 tx_desc->wb.status = 0; 238 tx_desc->wb.status = 0;
@@ -204,29 +248,36 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
204 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 248 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
205 249
206 /* weight of a sort for tx, avoid endless transmit cleanup */ 250 /* weight of a sort for tx, avoid endless transmit cleanup */
207 if (count++ >= tx_ring->work_limit) 251 if (total_tx_packets >= tx_ring->work_limit)
208 break; 252 break;
209 } 253 }
210 254
211 tx_ring->next_to_clean = i; 255 tx_ring->next_to_clean = i;
212 256
213#define TX_WAKE_THRESHOLD 32 257#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
214 spin_lock(&tx_ring->tx_lock); 258 if (total_tx_packets && netif_carrier_ok(netdev) &&
215 259 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
216 if (cleaned && netif_carrier_ok(netdev) && 260 /* Make sure that anybody stopping the queue after this
217 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) && 261 * sees the new next_to_clean.
218 !test_bit(__IXGBE_DOWN, &adapter->state)) 262 */
219 netif_wake_queue(netdev); 263 smp_mb();
220 264 if (netif_queue_stopped(netdev) &&
221 spin_unlock(&tx_ring->tx_lock); 265 !test_bit(__IXGBE_DOWN, &adapter->state)) {
266 netif_wake_queue(netdev);
267 adapter->restart_queue++;
268 }
269 }
222 270
223 if (adapter->detect_tx_hung) 271 if (adapter->detect_tx_hung)
224 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) 272 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
225 netif_stop_queue(netdev); 273 netif_stop_queue(netdev);
226 274
227 if (count >= tx_ring->work_limit) 275 if (total_tx_packets >= tx_ring->work_limit)
228 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); 276 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
229 277
278 adapter->net_stats.tx_bytes += total_tx_bytes;
279 adapter->net_stats.tx_packets += total_tx_packets;
280 cleaned = total_tx_packets ? true : false;
230 return cleaned; 281 return cleaned;
231} 282}
232 283
@@ -255,25 +306,40 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
255 } 306 }
256} 307}
257 308
309/**
310 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
311 * @adapter: address of board private structure
312 * @status_err: hardware indication of status of receive
313 * @skb: skb currently being received and modified
314 **/
258static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, 315static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
259 u32 status_err, 316 u32 status_err,
260 struct sk_buff *skb) 317 struct sk_buff *skb)
261{ 318{
262 skb->ip_summed = CHECKSUM_NONE; 319 skb->ip_summed = CHECKSUM_NONE;
263 320
264 /* Ignore Checksum bit is set */ 321 /* Ignore Checksum bit is set, or rx csum disabled */
265 if ((status_err & IXGBE_RXD_STAT_IXSM) || 322 if ((status_err & IXGBE_RXD_STAT_IXSM) ||
266 !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 323 !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
267 return; 324 return;
268 /* TCP/UDP checksum error bit is set */ 325
269 if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) { 326 /* if IP and error */
270 /* let the stack verify checksum errors */ 327 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
328 (status_err & IXGBE_RXDADV_ERR_IPE)) {
271 adapter->hw_csum_rx_error++; 329 adapter->hw_csum_rx_error++;
272 return; 330 return;
273 } 331 }
332
333 if (!(status_err & IXGBE_RXD_STAT_L4CS))
334 return;
335
336 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
337 adapter->hw_csum_rx_error++;
338 return;
339 }
340
274 /* It must be a TCP or UDP packet with a valid checksum */ 341 /* It must be a TCP or UDP packet with a valid checksum */
275 if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS)) 342 skb->ip_summed = CHECKSUM_UNNECESSARY;
276 skb->ip_summed = CHECKSUM_UNNECESSARY;
277 adapter->hw_csum_rx_good++; 343 adapter->hw_csum_rx_good++;
278} 344}
279 345
@@ -379,6 +445,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
379 u16 hdr_info, vlan_tag; 445 u16 hdr_info, vlan_tag;
380 bool is_vlan, cleaned = false; 446 bool is_vlan, cleaned = false;
381 int cleaned_count = 0; 447 int cleaned_count = 0;
448 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
382 449
383 i = rx_ring->next_to_clean; 450 i = rx_ring->next_to_clean;
384 upper_len = 0; 451 upper_len = 0;
@@ -458,6 +525,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
458 } 525 }
459 526
460 ixgbe_rx_checksum(adapter, staterr, skb); 527 ixgbe_rx_checksum(adapter, staterr, skb);
528
529 /* probably a little skewed due to removing CRC */
530 total_rx_bytes += skb->len;
531 total_rx_packets++;
532
461 skb->protocol = eth_type_trans(skb, netdev); 533 skb->protocol = eth_type_trans(skb, netdev);
462 ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag); 534 ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag);
463 netdev->last_rx = jiffies; 535 netdev->last_rx = jiffies;
@@ -486,6 +558,9 @@ next_desc:
486 if (cleaned_count) 558 if (cleaned_count)
487 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 559 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
488 560
561 adapter->net_stats.rx_bytes += total_rx_bytes;
562 adapter->net_stats.rx_packets += total_rx_packets;
563
489 return cleaned; 564 return cleaned;
490} 565}
491 566
@@ -535,7 +610,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
535 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 610 if (!test_bit(__IXGBE_DOWN, &adapter->state))
536 mod_timer(&adapter->watchdog_timer, jiffies); 611 mod_timer(&adapter->watchdog_timer, jiffies);
537 } 612 }
538 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 613
614 if (!test_bit(__IXGBE_DOWN, &adapter->state))
615 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
539 616
540 return IRQ_HANDLED; 617 return IRQ_HANDLED;
541} 618}
@@ -713,7 +790,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
713 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 790 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
714 /* Disable interrupts and register for poll. The flush of the 791 /* Disable interrupts and register for poll. The flush of the
715 * posted write is intentionally left out. */ 792 * posted write is intentionally left out. */
716 atomic_inc(&adapter->irq_sem);
717 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 793 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
718 __netif_rx_schedule(netdev, &adapter->napi); 794 __netif_rx_schedule(netdev, &adapter->napi);
719 } 795 }
@@ -801,7 +877,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
801 **/ 877 **/
802static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 878static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
803{ 879{
804 atomic_inc(&adapter->irq_sem);
805 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 880 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
806 IXGBE_WRITE_FLUSH(&adapter->hw); 881 IXGBE_WRITE_FLUSH(&adapter->hw);
807 synchronize_irq(adapter->pdev->irq); 882 synchronize_irq(adapter->pdev->irq);
@@ -813,15 +888,13 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
813 **/ 888 **/
814static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 889static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
815{ 890{
816 if (atomic_dec_and_test(&adapter->irq_sem)) { 891 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
817 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 892 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
818 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 893 (IXGBE_EIMS_ENABLE_MASK &
819 (IXGBE_EIMS_ENABLE_MASK & 894 ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
820 ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC))); 895 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
821 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, 896 IXGBE_EIMS_ENABLE_MASK);
822 IXGBE_EIMS_ENABLE_MASK); 897 IXGBE_WRITE_FLUSH(&adapter->hw);
823 IXGBE_WRITE_FLUSH(&adapter->hw);
824 }
825} 898}
826 899
827/** 900/**
@@ -1040,7 +1113,8 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1040 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1113 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1041 u32 ctrl; 1114 u32 ctrl;
1042 1115
1043 ixgbe_irq_disable(adapter); 1116 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1117 ixgbe_irq_disable(adapter);
1044 adapter->vlgrp = grp; 1118 adapter->vlgrp = grp;
1045 1119
1046 if (grp) { 1120 if (grp) {
@@ -1051,7 +1125,8 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1051 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1125 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1052 } 1126 }
1053 1127
1054 ixgbe_irq_enable(adapter); 1128 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1129 ixgbe_irq_enable(adapter);
1055} 1130}
1056 1131
1057static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1132static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -1066,9 +1141,13 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1066{ 1141{
1067 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1142 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1068 1143
1069 ixgbe_irq_disable(adapter); 1144 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1145 ixgbe_irq_disable(adapter);
1146
1070 vlan_group_set_device(adapter->vlgrp, vid, NULL); 1147 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1071 ixgbe_irq_enable(adapter); 1148
1149 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1150 ixgbe_irq_enable(adapter);
1072 1151
1073 /* remove VID from filter table */ 1152 /* remove VID from filter table */
1074 ixgbe_set_vfta(&adapter->hw, vid, 0, false); 1153 ixgbe_set_vfta(&adapter->hw, vid, 0, false);
@@ -1170,6 +1249,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1170 u32 txdctl, rxdctl, mhadd; 1249 u32 txdctl, rxdctl, mhadd;
1171 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1250 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1172 1251
1252 ixgbe_get_hw_control(adapter);
1253
1173 if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED | 1254 if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED |
1174 IXGBE_FLAG_MSI_ENABLED)) { 1255 IXGBE_FLAG_MSI_ENABLED)) {
1175 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1256 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1224,6 +1305,16 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1224 return 0; 1305 return 0;
1225} 1306}
1226 1307
1308void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
1309{
1310 WARN_ON(in_interrupt());
1311 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1312 msleep(1);
1313 ixgbe_down(adapter);
1314 ixgbe_up(adapter);
1315 clear_bit(__IXGBE_RESETTING, &adapter->state);
1316}
1317
1227int ixgbe_up(struct ixgbe_adapter *adapter) 1318int ixgbe_up(struct ixgbe_adapter *adapter)
1228{ 1319{
1229 /* hardware has been reset, we need to reload some things */ 1320 /* hardware has been reset, we need to reload some things */
@@ -1408,7 +1499,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
1408 msleep(10); 1499 msleep(10);
1409 1500
1410 napi_disable(&adapter->napi); 1501 napi_disable(&adapter->napi);
1411 atomic_set(&adapter->irq_sem, 0);
1412 1502
1413 ixgbe_irq_disable(adapter); 1503 ixgbe_irq_disable(adapter);
1414 1504
@@ -1447,6 +1537,8 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
1447 pci_enable_wake(pdev, PCI_D3hot, 0); 1537 pci_enable_wake(pdev, PCI_D3hot, 0);
1448 pci_enable_wake(pdev, PCI_D3cold, 0); 1538 pci_enable_wake(pdev, PCI_D3cold, 0);
1449 1539
1540 ixgbe_release_hw_control(adapter);
1541
1450 pci_disable_device(pdev); 1542 pci_disable_device(pdev);
1451 1543
1452 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1544 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1481,7 +1573,8 @@ static int ixgbe_clean(struct napi_struct *napi, int budget)
1481 /* If budget not fully consumed, exit the polling mode */ 1573 /* If budget not fully consumed, exit the polling mode */
1482 if (work_done < budget) { 1574 if (work_done < budget) {
1483 netif_rx_complete(netdev, napi); 1575 netif_rx_complete(netdev, napi);
1484 ixgbe_irq_enable(adapter); 1576 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1577 ixgbe_irq_enable(adapter);
1485 } 1578 }
1486 1579
1487 return work_done; 1580 return work_done;
@@ -1506,8 +1599,7 @@ static void ixgbe_reset_task(struct work_struct *work)
1506 1599
1507 adapter->tx_timeout_count++; 1600 adapter->tx_timeout_count++;
1508 1601
1509 ixgbe_down(adapter); 1602 ixgbe_reinit_locked(adapter);
1510 ixgbe_up(adapter);
1511} 1603}
1512 1604
1513/** 1605/**
@@ -1590,7 +1682,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
1590 return -ENOMEM; 1682 return -ENOMEM;
1591 } 1683 }
1592 1684
1593 atomic_set(&adapter->irq_sem, 1);
1594 set_bit(__IXGBE_DOWN, &adapter->state); 1685 set_bit(__IXGBE_DOWN, &adapter->state);
1595 1686
1596 return 0; 1687 return 0;
@@ -1634,7 +1725,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
1634 txdr->next_to_use = 0; 1725 txdr->next_to_use = 0;
1635 txdr->next_to_clean = 0; 1726 txdr->next_to_clean = 0;
1636 txdr->work_limit = txdr->count; 1727 txdr->work_limit = txdr->count;
1637 spin_lock_init(&txdr->tx_lock);
1638 1728
1639 return 0; 1729 return 0;
1640} 1730}
@@ -1828,10 +1918,8 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
1828 1918
1829 netdev->mtu = new_mtu; 1919 netdev->mtu = new_mtu;
1830 1920
1831 if (netif_running(netdev)) { 1921 if (netif_running(netdev))
1832 ixgbe_down(adapter); 1922 ixgbe_reinit_locked(adapter);
1833 ixgbe_up(adapter);
1834 }
1835 1923
1836 return 0; 1924 return 0;
1837} 1925}
@@ -1852,14 +1940,8 @@ static int ixgbe_open(struct net_device *netdev)
1852{ 1940{
1853 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1941 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1854 int err; 1942 int err;
1855 u32 ctrl_ext;
1856 u32 num_rx_queues = adapter->num_rx_queues; 1943 u32 num_rx_queues = adapter->num_rx_queues;
1857 1944
1858 /* Let firmware know the driver has taken over */
1859 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1860 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
1861 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
1862
1863try_intr_reinit: 1945try_intr_reinit:
1864 /* allocate transmit descriptors */ 1946 /* allocate transmit descriptors */
1865 err = ixgbe_setup_all_tx_resources(adapter); 1947 err = ixgbe_setup_all_tx_resources(adapter);
@@ -1910,6 +1992,7 @@ try_intr_reinit:
1910 return 0; 1992 return 0;
1911 1993
1912err_up: 1994err_up:
1995 ixgbe_release_hw_control(adapter);
1913 ixgbe_free_irq(adapter); 1996 ixgbe_free_irq(adapter);
1914err_req_irq: 1997err_req_irq:
1915 ixgbe_free_all_rx_resources(adapter); 1998 ixgbe_free_all_rx_resources(adapter);
@@ -1935,7 +2018,6 @@ err_setup_tx:
1935static int ixgbe_close(struct net_device *netdev) 2018static int ixgbe_close(struct net_device *netdev)
1936{ 2019{
1937 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2020 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1938 u32 ctrl_ext;
1939 2021
1940 ixgbe_down(adapter); 2022 ixgbe_down(adapter);
1941 ixgbe_free_irq(adapter); 2023 ixgbe_free_irq(adapter);
@@ -1943,9 +2025,7 @@ static int ixgbe_close(struct net_device *netdev)
1943 ixgbe_free_all_tx_resources(adapter); 2025 ixgbe_free_all_tx_resources(adapter);
1944 ixgbe_free_all_rx_resources(adapter); 2026 ixgbe_free_all_rx_resources(adapter);
1945 2027
1946 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 2028 ixgbe_release_hw_control(adapter);
1947 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
1948 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
1949 2029
1950 return 0; 2030 return 0;
1951} 2031}
@@ -1957,22 +2037,26 @@ static int ixgbe_close(struct net_device *netdev)
1957void ixgbe_update_stats(struct ixgbe_adapter *adapter) 2037void ixgbe_update_stats(struct ixgbe_adapter *adapter)
1958{ 2038{
1959 struct ixgbe_hw *hw = &adapter->hw; 2039 struct ixgbe_hw *hw = &adapter->hw;
1960 u64 good_rx, missed_rx, bprc; 2040 u64 total_mpc = 0;
2041 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
1961 2042
1962 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 2043 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1963 good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC); 2044 for (i = 0; i < 8; i++) {
1964 missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0)); 2045 /* for packet buffers not used, the register should read 0 */
1965 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1)); 2046 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1966 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2)); 2047 missed_rx += mpc;
1967 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3)); 2048 adapter->stats.mpc[i] += mpc;
1968 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4)); 2049 total_mpc += adapter->stats.mpc[i];
1969 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5)); 2050 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1970 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6)); 2051 }
1971 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7)); 2052 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1972 adapter->stats.gprc += (good_rx - missed_rx); 2053 /* work around hardware counting issue */
1973 2054 adapter->stats.gprc -= missed_rx;
1974 adapter->stats.mpc[0] += missed_rx; 2055
2056 /* 82598 hardware only has a 32 bit counter in the high register */
1975 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 2057 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2058 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2059 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1976 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 2060 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1977 adapter->stats.bprc += bprc; 2061 adapter->stats.bprc += bprc;
1978 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 2062 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
@@ -1984,35 +2068,37 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
1984 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 2068 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1985 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 2069 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1986 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 2070 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1987
1988 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 2071 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1989 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 2072 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1990 adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1991 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 2073 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1992 adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 2074 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2075 adapter->stats.lxontxc += lxon;
2076 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2077 adapter->stats.lxofftxc += lxoff;
1993 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 2078 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1994 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 2079 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1995 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 2080 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1996 adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0)); 2081 /*
2082 * 82598 errata - tx of flow control packets is included in tx counters
2083 */
2084 xon_off_tot = lxon + lxoff;
2085 adapter->stats.gptc -= xon_off_tot;
2086 adapter->stats.mptc -= xon_off_tot;
2087 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
1997 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 2088 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1998 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 2089 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1999 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 2090 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2000 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2001 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 2091 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2002 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 2092 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2093 adapter->stats.ptc64 -= xon_off_tot;
2003 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 2094 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2004 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 2095 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2005 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 2096 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2006 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 2097 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2007 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 2098 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2008 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2009 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 2099 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2010 2100
2011 /* Fill out the OS statistics structure */ 2101 /* Fill out the OS statistics structure */
2012 adapter->net_stats.rx_packets = adapter->stats.gprc;
2013 adapter->net_stats.tx_packets = adapter->stats.gptc;
2014 adapter->net_stats.rx_bytes = adapter->stats.gorc;
2015 adapter->net_stats.tx_bytes = adapter->stats.gotc;
2016 adapter->net_stats.multicast = adapter->stats.mprc; 2102 adapter->net_stats.multicast = adapter->stats.mprc;
2017 2103
2018 /* Rx Errors */ 2104 /* Rx Errors */
@@ -2021,8 +2107,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2021 adapter->net_stats.rx_dropped = 0; 2107 adapter->net_stats.rx_dropped = 0;
2022 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 2108 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2023 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 2109 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2024 adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0]; 2110 adapter->net_stats.rx_missed_errors = total_mpc;
2025
2026} 2111}
2027 2112
2028/** 2113/**
@@ -2076,15 +2161,6 @@ static void ixgbe_watchdog(unsigned long data)
2076 round_jiffies(jiffies + 2 * HZ)); 2161 round_jiffies(jiffies + 2 * HZ));
2077} 2162}
2078 2163
2079#define IXGBE_MAX_TXD_PWR 14
2080#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
2081
2082/* Tx Descriptors needed, worst case */
2083#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
2084 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
2085#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
2086 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
2087
2088static int ixgbe_tso(struct ixgbe_adapter *adapter, 2164static int ixgbe_tso(struct ixgbe_adapter *adapter,
2089 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 2165 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
2090 u32 tx_flags, u8 *hdr_len) 2166 u32 tx_flags, u8 *hdr_len)
@@ -2356,6 +2432,37 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
2356 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2432 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2357} 2433}
2358 2434
2435static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
2436 struct ixgbe_ring *tx_ring, int size)
2437{
2438 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2439
2440 netif_stop_queue(netdev);
2441 /* Herbert's original patch had:
2442 * smp_mb__after_netif_stop_queue();
2443 * but since that doesn't exist yet, just open code it. */
2444 smp_mb();
2445
2446 /* We need to check again in a case another CPU has just
2447 * made room available. */
2448 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2449 return -EBUSY;
2450
2451 /* A reprieve! - use start_queue because it doesn't call schedule */
2452 netif_wake_queue(netdev);
2453 ++adapter->restart_queue;
2454 return 0;
2455}
2456
2457static int ixgbe_maybe_stop_tx(struct net_device *netdev,
2458 struct ixgbe_ring *tx_ring, int size)
2459{
2460 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2461 return 0;
2462 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
2463}
2464
2465
2359static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2466static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2360{ 2467{
2361 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2468 struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2363,7 +2470,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2363 unsigned int len = skb->len; 2470 unsigned int len = skb->len;
2364 unsigned int first; 2471 unsigned int first;
2365 unsigned int tx_flags = 0; 2472 unsigned int tx_flags = 0;
2366 unsigned long flags = 0;
2367 u8 hdr_len; 2473 u8 hdr_len;
2368 int tso; 2474 int tso;
2369 unsigned int mss = 0; 2475 unsigned int mss = 0;
@@ -2389,14 +2495,10 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2389 for (f = 0; f < nr_frags; f++) 2495 for (f = 0; f < nr_frags; f++)
2390 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2496 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2391 2497
2392 spin_lock_irqsave(&tx_ring->tx_lock, flags); 2498 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
2393 if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) {
2394 adapter->tx_busy++; 2499 adapter->tx_busy++;
2395 netif_stop_queue(netdev);
2396 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2397 return NETDEV_TX_BUSY; 2500 return NETDEV_TX_BUSY;
2398 } 2501 }
2399 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2400 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 2502 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2401 tx_flags |= IXGBE_TX_FLAGS_VLAN; 2503 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2402 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT); 2504 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
@@ -2423,11 +2525,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2423 2525
2424 netdev->trans_start = jiffies; 2526 netdev->trans_start = jiffies;
2425 2527
2426 spin_lock_irqsave(&tx_ring->tx_lock, flags); 2528 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
2427 /* Make sure there is space in the ring for the next send. */
2428 if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED)
2429 netif_stop_queue(netdev);
2430 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2431 2529
2432 return NETDEV_TX_OK; 2530 return NETDEV_TX_OK;
2433} 2531}
@@ -2697,6 +2795,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2697 return 0; 2795 return 0;
2698 2796
2699err_register: 2797err_register:
2798 ixgbe_release_hw_control(adapter);
2700err_hw_init: 2799err_hw_init:
2701err_sw_init: 2800err_sw_init:
2702err_eeprom: 2801err_eeprom:
@@ -2732,6 +2831,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
2732 2831
2733 unregister_netdev(netdev); 2832 unregister_netdev(netdev);
2734 2833
2834 ixgbe_release_hw_control(adapter);
2835
2735 kfree(adapter->tx_ring); 2836 kfree(adapter->tx_ring);
2736 kfree(adapter->rx_ring); 2837 kfree(adapter->rx_ring);
2737 2838
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 651c2699d5e1..b528ce77c406 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1652,6 +1652,11 @@ static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
1652 } 1652 }
1653} 1653}
1654 1654
1655static inline __be16 sum16_as_be(__sum16 sum)
1656{
1657 return (__force __be16)sum;
1658}
1659
1655/** 1660/**
1656 * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw 1661 * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw
1657 * 1662 *
@@ -1689,7 +1694,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1689 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); 1694 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
1690 1695
1691 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1696 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1692 BUG_ON(skb->protocol != ETH_P_IP); 1697 BUG_ON(skb->protocol != htons(ETH_P_IP));
1693 1698
1694 cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | 1699 cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
1695 ETH_GEN_IP_V_4_CHECKSUM | 1700 ETH_GEN_IP_V_4_CHECKSUM |
@@ -1698,10 +1703,10 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1698 switch (ip_hdr(skb)->protocol) { 1703 switch (ip_hdr(skb)->protocol) {
1699 case IPPROTO_UDP: 1704 case IPPROTO_UDP:
1700 cmd_sts |= ETH_UDP_FRAME; 1705 cmd_sts |= ETH_UDP_FRAME;
1701 desc->l4i_chk = udp_hdr(skb)->check; 1706 desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
1702 break; 1707 break;
1703 case IPPROTO_TCP: 1708 case IPPROTO_TCP:
1704 desc->l4i_chk = tcp_hdr(skb)->check; 1709 desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
1705 break; 1710 break;
1706 default: 1711 default:
1707 BUG(); 1712 BUG();
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index dc062367a1c8..9a6295909e43 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -857,7 +857,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
857 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 857 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
858 858
859 /* On chips without ram buffer, pause is controled by MAC level */ 859 /* On chips without ram buffer, pause is controled by MAC level */
860 if (sky2_read8(hw, B2_E_0) == 0) { 860 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
861 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 861 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
862 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 862 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
863 863
@@ -1194,7 +1194,7 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
1194 struct sk_buff *skb; 1194 struct sk_buff *skb;
1195 int i; 1195 int i;
1196 1196
1197 if (sky2->hw->flags & SKY2_HW_FIFO_HANG_CHECK) { 1197 if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
1198 unsigned char *start; 1198 unsigned char *start;
1199 /* 1199 /*
1200 * Workaround for a bug in FIFO that cause hang 1200 * Workaround for a bug in FIFO that cause hang
@@ -1387,6 +1387,7 @@ static int sky2_up(struct net_device *dev)
1387 if (ramsize > 0) { 1387 if (ramsize > 0) {
1388 u32 rxspace; 1388 u32 rxspace;
1389 1389
1390 hw->flags |= SKY2_HW_RAM_BUFFER;
1390 pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize); 1391 pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
1391 if (ramsize < 16) 1392 if (ramsize < 16)
1392 rxspace = ramsize / 2; 1393 rxspace = ramsize / 2;
@@ -2026,7 +2027,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2026 2027
2027 synchronize_irq(hw->pdev->irq); 2028 synchronize_irq(hw->pdev->irq);
2028 2029
2029 if (sky2_read8(hw, B2_E_0) == 0) 2030 if (!(hw->flags & SKY2_HW_RAM_BUFFER))
2030 sky2_set_tx_stfwd(hw, port); 2031 sky2_set_tx_stfwd(hw, port);
2031 2032
2032 ctl = gma_read16(hw, port, GM_GP_CTRL); 2033 ctl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2566,7 +2567,7 @@ static void sky2_watchdog(unsigned long arg)
2566 ++active; 2567 ++active;
2567 2568
2568 /* For chips with Rx FIFO, check if stuck */ 2569 /* For chips with Rx FIFO, check if stuck */
2569 if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) && 2570 if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
2570 sky2_rx_hung(dev)) { 2571 sky2_rx_hung(dev)) {
2571 pr_info(PFX "%s: receiver hang detected\n", 2572 pr_info(PFX "%s: receiver hang detected\n",
2572 dev->name); 2573 dev->name);
@@ -2722,11 +2723,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2722 2723
2723 switch(hw->chip_id) { 2724 switch(hw->chip_id) {
2724 case CHIP_ID_YUKON_XL: 2725 case CHIP_ID_YUKON_XL:
2725 hw->flags = SKY2_HW_GIGABIT 2726 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
2726 | SKY2_HW_NEWER_PHY;
2727 if (hw->chip_rev < 3)
2728 hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
2729
2730 break; 2727 break;
2731 2728
2732 case CHIP_ID_YUKON_EC_U: 2729 case CHIP_ID_YUKON_EC_U:
@@ -2752,7 +2749,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2752 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); 2749 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
2753 return -EOPNOTSUPP; 2750 return -EOPNOTSUPP;
2754 } 2751 }
2755 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK; 2752 hw->flags = SKY2_HW_GIGABIT;
2756 break; 2753 break;
2757 2754
2758 case CHIP_ID_YUKON_FE: 2755 case CHIP_ID_YUKON_FE:
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 2bced1a0898f..5ab5c1c7c5aa 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2045,7 +2045,7 @@ struct sky2_hw {
2045#define SKY2_HW_FIBRE_PHY 0x00000002 2045#define SKY2_HW_FIBRE_PHY 0x00000002
2046#define SKY2_HW_GIGABIT 0x00000004 2046#define SKY2_HW_GIGABIT 0x00000004
2047#define SKY2_HW_NEWER_PHY 0x00000008 2047#define SKY2_HW_NEWER_PHY 0x00000008
2048#define SKY2_HW_FIFO_HANG_CHECK 0x00000010 2048#define SKY2_HW_RAM_BUFFER 0x00000010
2049#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2049#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2050#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2050#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2051#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2051#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c99ce74a7aff..3af5b92b48c8 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -465,7 +465,7 @@ static struct pci_driver tlan_driver = {
465 465
466static int __init tlan_probe(void) 466static int __init tlan_probe(void)
467{ 467{
468 static int pad_allocated; 468 int rc = -ENODEV;
469 469
470 printk(KERN_INFO "%s", tlan_banner); 470 printk(KERN_INFO "%s", tlan_banner);
471 471
@@ -473,17 +473,22 @@ static int __init tlan_probe(void)
473 473
474 if (TLanPadBuffer == NULL) { 474 if (TLanPadBuffer == NULL) {
475 printk(KERN_ERR "TLAN: Could not allocate memory for pad buffer.\n"); 475 printk(KERN_ERR "TLAN: Could not allocate memory for pad buffer.\n");
476 return -ENOMEM; 476 rc = -ENOMEM;
477 goto err_out;
477 } 478 }
478 479
479 memset(TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE); 480 memset(TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE);
480 pad_allocated = 1;
481 481
482 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); 482 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
483 483
484 /* Use new style PCI probing. Now the kernel will 484 /* Use new style PCI probing. Now the kernel will
485 do most of this for us */ 485 do most of this for us */
486 pci_register_driver(&tlan_driver); 486 rc = pci_register_driver(&tlan_driver);
487
488 if (rc != 0) {
489 printk(KERN_ERR "TLAN: Could not register pci driver.\n");
490 goto err_out_pci_free;
491 }
487 492
488 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n"); 493 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
489 TLan_EisaProbe(); 494 TLan_EisaProbe();
@@ -493,11 +498,17 @@ static int __init tlan_probe(void)
493 tlan_have_pci, tlan_have_eisa); 498 tlan_have_pci, tlan_have_eisa);
494 499
495 if (TLanDevicesInstalled == 0) { 500 if (TLanDevicesInstalled == 0) {
496 pci_unregister_driver(&tlan_driver); 501 rc = -ENODEV;
497 pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA); 502 goto err_out_pci_unreg;
498 return -ENODEV;
499 } 503 }
500 return 0; 504 return 0;
505
506err_out_pci_unreg:
507 pci_unregister_driver(&tlan_driver);
508err_out_pci_free:
509 pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
510err_out:
511 return rc;
501} 512}
502 513
503 514
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 8fc7274642eb..6b93d0169116 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -441,7 +441,7 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
441 spin_unlock_irqrestore(&card->lock,flags); 441 spin_unlock_irqrestore(&card->lock,flags);
442 trigger_transmit(card); 442 trigger_transmit(card);
443 443
444 return -EIO; 444 return NETDEV_TX_BUSY;
445} 445}
446 446
447 447
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index e3ba14a19915..c69e654d539f 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -109,7 +109,7 @@ int uec_mdio_reset(struct mii_bus *bus)
109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv; 109 struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
110 unsigned int timeout = PHY_INIT_TIMEOUT; 110 unsigned int timeout = PHY_INIT_TIMEOUT;
111 111
112 spin_lock_bh(&bus->mdio_lock); 112 mutex_lock(&bus->mdio_lock);
113 113
114 /* Reset the management interface */ 114 /* Reset the management interface */
115 out_be32(&regs->miimcfg, MIIMCFG_RESET_MANAGEMENT); 115 out_be32(&regs->miimcfg, MIIMCFG_RESET_MANAGEMENT);
@@ -121,7 +121,7 @@ int uec_mdio_reset(struct mii_bus *bus)
121 while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--) 121 while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
122 cpu_relax(); 122 cpu_relax();
123 123
124 spin_unlock_bh(&bus->mdio_lock); 124 mutex_unlock(&bus->mdio_lock);
125 125
126 if (timeout <= 0) { 126 if (timeout <= 0) {
127 printk(KERN_ERR "%s: The MII Bus is stuck!\n", bus->name); 127 printk(KERN_ERR "%s: The MII Bus is stuck!\n", bus->name);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e66de0c12fc1..fdc23678117b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -302,10 +302,12 @@ static int virtnet_open(struct net_device *dev)
302 302
303 /* If all buffers were filled by other side before we napi_enabled, we 303 /* If all buffers were filled by other side before we napi_enabled, we
304 * won't get another interrupt, so process any outstanding packets 304 * won't get another interrupt, so process any outstanding packets
305 * now. virtnet_poll wants re-enable the queue, so we disable here. */ 305 * now. virtnet_poll wants re-enable the queue, so we disable here.
306 vi->rvq->vq_ops->disable_cb(vi->rvq); 306 * We synchronize against interrupts via NAPI_STATE_SCHED */
307 netif_rx_schedule(vi->dev, &vi->napi); 307 if (netif_rx_schedule_prep(dev, &vi->napi)) {
308 308 vi->rvq->vq_ops->disable_cb(vi->rvq);
309 __netif_rx_schedule(dev, &vi->napi);
310 }
309 return 0; 311 return 0;
310} 312}
311 313
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index d553e6f32851..39951d0c34d6 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Generic HDLC support routines for Linux 2 * Generic HDLC support routines for Linux
3 * 3 *
4 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> 4 * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
@@ -39,7 +39,7 @@
39#include <net/net_namespace.h> 39#include <net/net_namespace.h>
40 40
41 41
42static const char* version = "HDLC support module revision 1.21"; 42static const char* version = "HDLC support module revision 1.22";
43 43
44#undef DEBUG_LINK 44#undef DEBUG_LINK
45 45
@@ -66,19 +66,15 @@ static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
66static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, 66static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
67 struct packet_type *p, struct net_device *orig_dev) 67 struct packet_type *p, struct net_device *orig_dev)
68{ 68{
69 struct hdlc_device_desc *desc = dev_to_desc(dev); 69 struct hdlc_device *hdlc = dev_to_hdlc(dev);
70 70
71 if (dev->nd_net != &init_net) { 71 if (dev->nd_net != &init_net) {
72 kfree_skb(skb); 72 kfree_skb(skb);
73 return 0; 73 return 0;
74 } 74 }
75 75
76 if (desc->netif_rx) 76 BUG_ON(!hdlc->proto->netif_rx);
77 return desc->netif_rx(skb); 77 return hdlc->proto->netif_rx(skb);
78
79 desc->stats.rx_dropped++; /* Shouldn't happen */
80 dev_kfree_skb(skb);
81 return NET_RX_DROP;
82} 78}
83 79
84 80
@@ -87,7 +83,7 @@ static inline void hdlc_proto_start(struct net_device *dev)
87{ 83{
88 hdlc_device *hdlc = dev_to_hdlc(dev); 84 hdlc_device *hdlc = dev_to_hdlc(dev);
89 if (hdlc->proto->start) 85 if (hdlc->proto->start)
90 return hdlc->proto->start(dev); 86 hdlc->proto->start(dev);
91} 87}
92 88
93 89
@@ -96,7 +92,7 @@ static inline void hdlc_proto_stop(struct net_device *dev)
96{ 92{
97 hdlc_device *hdlc = dev_to_hdlc(dev); 93 hdlc_device *hdlc = dev_to_hdlc(dev);
98 if (hdlc->proto->stop) 94 if (hdlc->proto->stop)
99 return hdlc->proto->stop(dev); 95 hdlc->proto->stop(dev);
100} 96}
101 97
102 98
@@ -263,8 +259,7 @@ static void hdlc_setup(struct net_device *dev)
263struct net_device *alloc_hdlcdev(void *priv) 259struct net_device *alloc_hdlcdev(void *priv)
264{ 260{
265 struct net_device *dev; 261 struct net_device *dev;
266 dev = alloc_netdev(sizeof(struct hdlc_device_desc) + 262 dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d", hdlc_setup);
267 sizeof(hdlc_device), "hdlc%d", hdlc_setup);
268 if (dev) 263 if (dev)
269 dev_to_hdlc(dev)->priv = priv; 264 dev_to_hdlc(dev)->priv = priv;
270 return dev; 265 return dev;
@@ -281,7 +276,7 @@ void unregister_hdlc_device(struct net_device *dev)
281 276
282 277
283int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, 278int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
284 int (*rx)(struct sk_buff *skb), size_t size) 279 size_t size)
285{ 280{
286 detach_hdlc_protocol(dev); 281 detach_hdlc_protocol(dev);
287 282
@@ -297,7 +292,6 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
297 return -ENOBUFS; 292 return -ENOBUFS;
298 } 293 }
299 dev_to_hdlc(dev)->proto = proto; 294 dev_to_hdlc(dev)->proto = proto;
300 dev_to_desc(dev)->netif_rx = rx;
301 return 0; 295 return 0;
302} 296}
303 297
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 038a6e748bbf..7133c688cf20 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -250,7 +250,7 @@ static int cisco_rx(struct sk_buff *skb)
250 return NET_RX_DROP; 250 return NET_RX_DROP;
251 251
252 rx_error: 252 rx_error:
253 dev_to_desc(dev)->stats.rx_errors++; /* Mark error */ 253 dev_to_hdlc(dev)->stats.rx_errors++; /* Mark error */
254 dev_kfree_skb_any(skb); 254 dev_kfree_skb_any(skb);
255 return NET_RX_DROP; 255 return NET_RX_DROP;
256} 256}
@@ -314,6 +314,7 @@ static struct hdlc_proto proto = {
314 .stop = cisco_stop, 314 .stop = cisco_stop,
315 .type_trans = cisco_type_trans, 315 .type_trans = cisco_type_trans,
316 .ioctl = cisco_ioctl, 316 .ioctl = cisco_ioctl,
317 .netif_rx = cisco_rx,
317 .module = THIS_MODULE, 318 .module = THIS_MODULE,
318}; 319};
319 320
@@ -360,7 +361,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
360 if (result) 361 if (result)
361 return result; 362 return result;
362 363
363 result = attach_hdlc_protocol(dev, &proto, cisco_rx, 364 result = attach_hdlc_protocol(dev, &proto,
364 sizeof(struct cisco_state)); 365 sizeof(struct cisco_state));
365 if (result) 366 if (result)
366 return result; 367 return result;
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 071a64cacd5c..c4ab0326f911 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -42,7 +42,6 @@
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/skbuff.h> 43#include <linux/skbuff.h>
44#include <linux/pkt_sched.h> 44#include <linux/pkt_sched.h>
45#include <linux/random.h>
46#include <linux/inetdevice.h> 45#include <linux/inetdevice.h>
47#include <linux/lapb.h> 46#include <linux/lapb.h>
48#include <linux/rtnetlink.h> 47#include <linux/rtnetlink.h>
@@ -136,6 +135,10 @@ typedef struct pvc_device_struct {
136 }state; 135 }state;
137}pvc_device; 136}pvc_device;
138 137
138struct pvc_desc {
139 struct net_device_stats stats;
140 pvc_device *pvc;
141};
139 142
140struct frad_state { 143struct frad_state {
141 fr_proto settings; 144 fr_proto settings;
@@ -171,17 +174,20 @@ static inline void dlci_to_q922(u8 *hdr, u16 dlci)
171} 174}
172 175
173 176
174static inline struct frad_state * state(hdlc_device *hdlc) 177static inline struct frad_state* state(hdlc_device *hdlc)
175{ 178{
176 return(struct frad_state *)(hdlc->state); 179 return(struct frad_state *)(hdlc->state);
177} 180}
178 181
179 182static inline struct pvc_desc* pvcdev_to_desc(struct net_device *dev)
180static __inline__ pvc_device* dev_to_pvc(struct net_device *dev)
181{ 183{
182 return dev->priv; 184 return dev->priv;
183} 185}
184 186
187static inline struct net_device_stats* pvc_get_stats(struct net_device *dev)
188{
189 return &pvcdev_to_desc(dev)->stats;
190}
185 191
186static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci) 192static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
187{ 193{
@@ -351,7 +357,7 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
351 357
352static int pvc_open(struct net_device *dev) 358static int pvc_open(struct net_device *dev)
353{ 359{
354 pvc_device *pvc = dev_to_pvc(dev); 360 pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
355 361
356 if ((pvc->frad->flags & IFF_UP) == 0) 362 if ((pvc->frad->flags & IFF_UP) == 0)
357 return -EIO; /* Frad must be UP in order to activate PVC */ 363 return -EIO; /* Frad must be UP in order to activate PVC */
@@ -371,7 +377,7 @@ static int pvc_open(struct net_device *dev)
371 377
372static int pvc_close(struct net_device *dev) 378static int pvc_close(struct net_device *dev)
373{ 379{
374 pvc_device *pvc = dev_to_pvc(dev); 380 pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
375 381
376 if (--pvc->open_count == 0) { 382 if (--pvc->open_count == 0) {
377 hdlc_device *hdlc = dev_to_hdlc(pvc->frad); 383 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
@@ -390,7 +396,7 @@ static int pvc_close(struct net_device *dev)
390 396
391static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 397static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
392{ 398{
393 pvc_device *pvc = dev_to_pvc(dev); 399 pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
394 fr_proto_pvc_info info; 400 fr_proto_pvc_info info;
395 401
396 if (ifr->ifr_settings.type == IF_GET_PROTO) { 402 if (ifr->ifr_settings.type == IF_GET_PROTO) {
@@ -416,17 +422,9 @@ static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
416 return -EINVAL; 422 return -EINVAL;
417} 423}
418 424
419
420static inline struct net_device_stats *pvc_get_stats(struct net_device *dev)
421{
422 return &dev_to_desc(dev)->stats;
423}
424
425
426
427static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) 425static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
428{ 426{
429 pvc_device *pvc = dev_to_pvc(dev); 427 pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
430 struct net_device_stats *stats = pvc_get_stats(dev); 428 struct net_device_stats *stats = pvc_get_stats(dev);
431 429
432 if (pvc->state.active) { 430 if (pvc->state.active) {
@@ -957,7 +955,7 @@ static int fr_rx(struct sk_buff *skb)
957 955
958 956
959 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 957 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
960 dev_to_desc(frad)->stats.rx_dropped++; 958 dev_to_hdlc(frad)->stats.rx_dropped++;
961 return NET_RX_DROP; 959 return NET_RX_DROP;
962 } 960 }
963 961
@@ -1018,7 +1016,7 @@ static int fr_rx(struct sk_buff *skb)
1018 } 1016 }
1019 1017
1020 rx_error: 1018 rx_error:
1021 dev_to_desc(frad)->stats.rx_errors++; /* Mark error */ 1019 dev_to_hdlc(frad)->stats.rx_errors++; /* Mark error */
1022 dev_kfree_skb_any(skb); 1020 dev_kfree_skb_any(skb);
1023 return NET_RX_DROP; 1021 return NET_RX_DROP;
1024} 1022}
@@ -1109,11 +1107,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1109 used = pvc_is_used(pvc); 1107 used = pvc_is_used(pvc);
1110 1108
1111 if (type == ARPHRD_ETHER) 1109 if (type == ARPHRD_ETHER)
1112 dev = alloc_netdev(sizeof(struct net_device_stats), 1110 dev = alloc_netdev(sizeof(struct pvc_desc), "pvceth%d",
1113 "pvceth%d", ether_setup); 1111 ether_setup);
1114 else 1112 else
1115 dev = alloc_netdev(sizeof(struct net_device_stats), 1113 dev = alloc_netdev(sizeof(struct pvc_desc), "pvc%d", pvc_setup);
1116 "pvc%d", pvc_setup);
1117 1114
1118 if (!dev) { 1115 if (!dev) {
1119 printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n", 1116 printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
@@ -1122,10 +1119,9 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1122 return -ENOBUFS; 1119 return -ENOBUFS;
1123 } 1120 }
1124 1121
1125 if (type == ARPHRD_ETHER) { 1122 if (type == ARPHRD_ETHER)
1126 memcpy(dev->dev_addr, "\x00\x01", 2); 1123 random_ether_addr(dev->dev_addr);
1127 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); 1124 else {
1128 } else {
1129 *(__be16*)dev->dev_addr = htons(dlci); 1125 *(__be16*)dev->dev_addr = htons(dlci);
1130 dlci_to_q922(dev->broadcast, dlci); 1126 dlci_to_q922(dev->broadcast, dlci);
1131 } 1127 }
@@ -1137,7 +1133,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1137 dev->change_mtu = pvc_change_mtu; 1133 dev->change_mtu = pvc_change_mtu;
1138 dev->mtu = HDLC_MAX_MTU; 1134 dev->mtu = HDLC_MAX_MTU;
1139 dev->tx_queue_len = 0; 1135 dev->tx_queue_len = 0;
1140 dev->priv = pvc; 1136 pvcdev_to_desc(dev)->pvc = pvc;
1141 1137
1142 result = dev_alloc_name(dev, dev->name); 1138 result = dev_alloc_name(dev, dev->name);
1143 if (result < 0) { 1139 if (result < 0) {
@@ -1219,6 +1215,7 @@ static struct hdlc_proto proto = {
1219 .stop = fr_stop, 1215 .stop = fr_stop,
1220 .detach = fr_destroy, 1216 .detach = fr_destroy,
1221 .ioctl = fr_ioctl, 1217 .ioctl = fr_ioctl,
1218 .netif_rx = fr_rx,
1222 .module = THIS_MODULE, 1219 .module = THIS_MODULE,
1223}; 1220};
1224 1221
@@ -1277,7 +1274,7 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1277 return result; 1274 return result;
1278 1275
1279 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */ 1276 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1280 result = attach_hdlc_protocol(dev, &proto, fr_rx, 1277 result = attach_hdlc_protocol(dev, &proto,
1281 sizeof(struct frad_state)); 1278 sizeof(struct frad_state));
1282 if (result) 1279 if (result)
1283 return result; 1280 return result;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 519e1550e2e7..10396d9686f4 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -122,7 +122,7 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
122 if (result) 122 if (result)
123 return result; 123 return result;
124 124
125 result = attach_hdlc_protocol(dev, &proto, NULL, 125 result = attach_hdlc_protocol(dev, &proto,
126 sizeof(struct ppp_state)); 126 sizeof(struct ppp_state));
127 if (result) 127 if (result)
128 return result; 128 return result;
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index e23bc6656267..bbbb819d764c 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -82,7 +82,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
82 if (result) 82 if (result)
83 return result; 83 return result;
84 84
85 result = attach_hdlc_protocol(dev, &proto, NULL, 85 result = attach_hdlc_protocol(dev, &proto,
86 sizeof(raw_hdlc_proto)); 86 sizeof(raw_hdlc_proto));
87 if (result) 87 if (result)
88 return result; 88 return result;
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 8895394e6006..d20c685f6711 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -18,7 +18,6 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/skbuff.h> 19#include <linux/skbuff.h>
20#include <linux/pkt_sched.h> 20#include <linux/pkt_sched.h>
21#include <linux/random.h>
22#include <linux/inetdevice.h> 21#include <linux/inetdevice.h>
23#include <linux/lapb.h> 22#include <linux/lapb.h>
24#include <linux/rtnetlink.h> 23#include <linux/rtnetlink.h>
@@ -96,7 +95,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
96 if (result) 95 if (result)
97 return result; 96 return result;
98 97
99 result = attach_hdlc_protocol(dev, &proto, NULL, 98 result = attach_hdlc_protocol(dev, &proto,
100 sizeof(raw_hdlc_proto)); 99 sizeof(raw_hdlc_proto));
101 if (result) 100 if (result)
102 return result; 101 return result;
@@ -107,8 +106,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
107 ether_setup(dev); 106 ether_setup(dev);
108 dev->change_mtu = old_ch_mtu; 107 dev->change_mtu = old_ch_mtu;
109 dev->tx_queue_len = old_qlen; 108 dev->tx_queue_len = old_qlen;
110 memcpy(dev->dev_addr, "\x00\x01", 2); 109 random_ether_addr(dev->dev_addr);
111 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
112 netif_dormant_off(dev); 110 netif_dormant_off(dev);
113 return 0; 111 return 0;
114 } 112 }
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index cd7b22f50edc..c15cc11e399b 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -164,17 +164,17 @@ static void x25_close(struct net_device *dev)
164 164
165static int x25_rx(struct sk_buff *skb) 165static int x25_rx(struct sk_buff *skb)
166{ 166{
167 struct hdlc_device_desc *desc = dev_to_desc(skb->dev); 167 struct hdlc_device *hdlc = dev_to_hdlc(skb->dev);
168 168
169 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 169 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
170 desc->stats.rx_dropped++; 170 hdlc->stats.rx_dropped++;
171 return NET_RX_DROP; 171 return NET_RX_DROP;
172 } 172 }
173 173
174 if (lapb_data_received(skb->dev, skb) == LAPB_OK) 174 if (lapb_data_received(skb->dev, skb) == LAPB_OK)
175 return NET_RX_SUCCESS; 175 return NET_RX_SUCCESS;
176 176
177 desc->stats.rx_errors++; 177 hdlc->stats.rx_errors++;
178 dev_kfree_skb_any(skb); 178 dev_kfree_skb_any(skb);
179 return NET_RX_DROP; 179 return NET_RX_DROP;
180} 180}
@@ -184,6 +184,7 @@ static struct hdlc_proto proto = {
184 .open = x25_open, 184 .open = x25_open,
185 .close = x25_close, 185 .close = x25_close,
186 .ioctl = x25_ioctl, 186 .ioctl = x25_ioctl,
187 .netif_rx = x25_rx,
187 .module = THIS_MODULE, 188 .module = THIS_MODULE,
188}; 189};
189 190
@@ -211,8 +212,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
211 if (result) 212 if (result)
212 return result; 213 return result;
213 214
214 if ((result = attach_hdlc_protocol(dev, &proto, 215 if ((result = attach_hdlc_protocol(dev, &proto, 0)))
215 x25_rx, 0)) != 0)
216 return result; 216 return result;
217 dev->hard_start_xmit = x25_xmit; 217 dev->hard_start_xmit = x25_xmit;
218 dev->type = ARPHRD_X25; 218 dev->type = ARPHRD_X25;
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index db390c511ada..6115545a5b9c 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -26,13 +26,6 @@
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/hdlc/ioctl.h> 27#include <linux/hdlc/ioctl.h>
28 28
29
30/* Used by all network devices here, pointed to by netdev_priv(dev) */
31struct hdlc_device_desc {
32 int (*netif_rx)(struct sk_buff *skb);
33 struct net_device_stats stats;
34};
35
36/* This structure is a private property of HDLC protocols. 29/* This structure is a private property of HDLC protocols.
37 Hardware drivers have no interest here */ 30 Hardware drivers have no interest here */
38 31
@@ -44,12 +37,15 @@ struct hdlc_proto {
44 void (*detach)(struct net_device *dev); 37 void (*detach)(struct net_device *dev);
45 int (*ioctl)(struct net_device *dev, struct ifreq *ifr); 38 int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
46 __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); 39 __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
40 int (*netif_rx)(struct sk_buff *skb);
47 struct module *module; 41 struct module *module;
48 struct hdlc_proto *next; /* next protocol in the list */ 42 struct hdlc_proto *next; /* next protocol in the list */
49}; 43};
50 44
51 45
46/* Pointed to by dev->priv */
52typedef struct hdlc_device { 47typedef struct hdlc_device {
48 struct net_device_stats stats;
53 /* used by HDLC layer to take control over HDLC device from hw driver*/ 49 /* used by HDLC layer to take control over HDLC device from hw driver*/
54 int (*attach)(struct net_device *dev, 50 int (*attach)(struct net_device *dev,
55 unsigned short encoding, unsigned short parity); 51 unsigned short encoding, unsigned short parity);
@@ -83,18 +79,11 @@ void unregister_hdlc_protocol(struct hdlc_proto *proto);
83 79
84struct net_device *alloc_hdlcdev(void *priv); 80struct net_device *alloc_hdlcdev(void *priv);
85 81
86 82static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev)
87static __inline__ struct hdlc_device_desc* dev_to_desc(struct net_device *dev)
88{
89 return netdev_priv(dev);
90}
91
92static __inline__ hdlc_device* dev_to_hdlc(struct net_device *dev)
93{ 83{
94 return netdev_priv(dev) + sizeof(struct hdlc_device_desc); 84 return dev->priv;
95} 85}
96 86
97
98static __inline__ void debug_frame(const struct sk_buff *skb) 87static __inline__ void debug_frame(const struct sk_buff *skb)
99{ 88{
100 int i; 89 int i;
@@ -116,13 +105,13 @@ int hdlc_open(struct net_device *dev);
116void hdlc_close(struct net_device *dev); 105void hdlc_close(struct net_device *dev);
117 106
118int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, 107int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
119 int (*rx)(struct sk_buff *skb), size_t size); 108 size_t size);
120/* May be used by hardware driver to gain control over HDLC device */ 109/* May be used by hardware driver to gain control over HDLC device */
121void detach_hdlc_protocol(struct net_device *dev); 110void detach_hdlc_protocol(struct net_device *dev);
122 111
123static __inline__ struct net_device_stats *hdlc_stats(struct net_device *dev) 112static __inline__ struct net_device_stats *hdlc_stats(struct net_device *dev)
124{ 113{
125 return &dev_to_desc(dev)->stats; 114 return &dev_to_hdlc(dev)->stats;
126} 115}
127 116
128 117