aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/via-rhine.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/via-rhine.c')
-rw-r--r--drivers/net/via-rhine.c40
1 files changed, 9 insertions, 31 deletions
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 6a23964c1317..fdc21037f6dc 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -129,6 +129,7 @@
129 - Massive clean-up 129 - Massive clean-up
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff) 130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good 131 - Fix Tx engine race for good
132 - Craig Brind: Zero padded aligned buffers for short packets.
132 133
133*/ 134*/
134 135
@@ -490,8 +491,6 @@ struct rhine_private {
490 u8 tx_thresh, rx_thresh; 491 u8 tx_thresh, rx_thresh;
491 492
492 struct mii_if_info mii_if; 493 struct mii_if_info mii_if;
493 struct work_struct tx_timeout_task;
494 struct work_struct check_media_task;
495 void __iomem *base; 494 void __iomem *base;
496}; 495};
497 496
@@ -499,8 +498,6 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
499static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 498static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
500static int rhine_open(struct net_device *dev); 499static int rhine_open(struct net_device *dev);
501static void rhine_tx_timeout(struct net_device *dev); 500static void rhine_tx_timeout(struct net_device *dev);
502static void rhine_tx_timeout_task(struct net_device *dev);
503static void rhine_check_media_task(struct net_device *dev);
504static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); 501static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
505static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 502static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
506static void rhine_tx(struct net_device *dev); 503static void rhine_tx(struct net_device *dev);
@@ -855,12 +852,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
855 if (rp->quirks & rqRhineI) 852 if (rp->quirks & rqRhineI)
856 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 853 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
857 854
858 INIT_WORK(&rp->tx_timeout_task,
859 (void (*)(void *))rhine_tx_timeout_task, dev);
860
861 INIT_WORK(&rp->check_media_task,
862 (void (*)(void *))rhine_check_media_task, dev);
863
864 /* dev->name not defined before register_netdev()! */ 855 /* dev->name not defined before register_netdev()! */
865 rc = register_netdev(dev); 856 rc = register_netdev(dev);
866 if (rc) 857 if (rc)
@@ -1107,11 +1098,6 @@ static void rhine_set_carrier(struct mii_if_info *mii)
1107 netif_carrier_ok(mii->dev)); 1098 netif_carrier_ok(mii->dev));
1108} 1099}
1109 1100
1110static void rhine_check_media_task(struct net_device *dev)
1111{
1112 rhine_check_media(dev, 0);
1113}
1114
1115static void init_registers(struct net_device *dev) 1101static void init_registers(struct net_device *dev)
1116{ 1102{
1117 struct rhine_private *rp = netdev_priv(dev); 1103 struct rhine_private *rp = netdev_priv(dev);
@@ -1165,8 +1151,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1165 if (quirks & rqRhineI) { 1151 if (quirks & rqRhineI) {
1166 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR 1152 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1167 1153
1168 /* Do not call from ISR! */ 1154 /* Can be called from ISR. Evil. */
1169 msleep(1); 1155 mdelay(1);
1170 1156
1171 /* 0x80 must be set immediately before turning it off */ 1157 /* 0x80 must be set immediately before turning it off */
1172 iowrite8(0x80, ioaddr + MIICmd); 1158 iowrite8(0x80, ioaddr + MIICmd);
@@ -1256,16 +1242,6 @@ static int rhine_open(struct net_device *dev)
1256static void rhine_tx_timeout(struct net_device *dev) 1242static void rhine_tx_timeout(struct net_device *dev)
1257{ 1243{
1258 struct rhine_private *rp = netdev_priv(dev); 1244 struct rhine_private *rp = netdev_priv(dev);
1259
1260 /*
1261 * Move bulk of work outside of interrupt context
1262 */
1263 schedule_work(&rp->tx_timeout_task);
1264}
1265
1266static void rhine_tx_timeout_task(struct net_device *dev)
1267{
1268 struct rhine_private *rp = netdev_priv(dev);
1269 void __iomem *ioaddr = rp->base; 1245 void __iomem *ioaddr = rp->base;
1270 1246
1271 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " 1247 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
@@ -1326,7 +1302,12 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1326 rp->stats.tx_dropped++; 1302 rp->stats.tx_dropped++;
1327 return 0; 1303 return 0;
1328 } 1304 }
1305
1306 /* Padding is not copied and so must be redone. */
1329 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); 1307 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1308 if (skb->len < ETH_ZLEN)
1309 memset(rp->tx_buf[entry] + skb->len, 0,
1310 ETH_ZLEN - skb->len);
1330 rp->tx_skbuff_dma[entry] = 0; 1311 rp->tx_skbuff_dma[entry] = 0;
1331 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + 1312 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1332 (rp->tx_buf[entry] - 1313 (rp->tx_buf[entry] -
@@ -1671,7 +1652,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
1671 spin_lock(&rp->lock); 1652 spin_lock(&rp->lock);
1672 1653
1673 if (intr_status & IntrLinkChange) 1654 if (intr_status & IntrLinkChange)
1674 schedule_work(&rp->check_media_task); 1655 rhine_check_media(dev, 0);
1675 if (intr_status & IntrStatsMax) { 1656 if (intr_status & IntrStatsMax) {
1676 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 1657 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1677 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 1658 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
@@ -1921,9 +1902,6 @@ static int rhine_close(struct net_device *dev)
1921 spin_unlock_irq(&rp->lock); 1902 spin_unlock_irq(&rp->lock);
1922 1903
1923 free_irq(rp->pdev->irq, dev); 1904 free_irq(rp->pdev->irq, dev);
1924
1925 flush_scheduled_work();
1926
1927 free_rbufs(dev); 1905 free_rbufs(dev);
1928 free_tbufs(dev); 1906 free_tbufs(dev);
1929 free_ring(dev); 1907 free_ring(dev);