diff options
Diffstat (limited to 'drivers/net/sundance.c')
-rw-r--r-- | drivers/net/sundance.c | 44 |
1 files changed, 22 insertions, 22 deletions
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index a3a7a3506bd2..f64a28513ba2 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c | |||
@@ -907,7 +907,7 @@ static void tx_timeout(struct net_device *dev) | |||
907 | struct netdev_private *np = netdev_priv(dev); | 907 | struct netdev_private *np = netdev_priv(dev); |
908 | void __iomem *ioaddr = np->base; | 908 | void __iomem *ioaddr = np->base; |
909 | unsigned long flag; | 909 | unsigned long flag; |
910 | 910 | ||
911 | netif_stop_queue(dev); | 911 | netif_stop_queue(dev); |
912 | tasklet_disable(&np->tx_tasklet); | 912 | tasklet_disable(&np->tx_tasklet); |
913 | iowrite16(0, ioaddr + IntrEnable); | 913 | iowrite16(0, ioaddr + IntrEnable); |
@@ -924,13 +924,13 @@ static void tx_timeout(struct net_device *dev) | |||
924 | le32_to_cpu(np->tx_ring[i].next_desc), | 924 | le32_to_cpu(np->tx_ring[i].next_desc), |
925 | le32_to_cpu(np->tx_ring[i].status), | 925 | le32_to_cpu(np->tx_ring[i].status), |
926 | (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, | 926 | (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, |
927 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | 927 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
928 | le32_to_cpu(np->tx_ring[i].frag[0].length)); | 928 | le32_to_cpu(np->tx_ring[i].frag[0].length)); |
929 | } | 929 | } |
930 | printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", | 930 | printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", |
931 | ioread32(np->base + TxListPtr), | 931 | ioread32(np->base + TxListPtr), |
932 | netif_queue_stopped(dev)); | 932 | netif_queue_stopped(dev)); |
933 | printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", | 933 | printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", |
934 | np->cur_tx, np->cur_tx % TX_RING_SIZE, | 934 | np->cur_tx, np->cur_tx % TX_RING_SIZE, |
935 | np->dirty_tx, np->dirty_tx % TX_RING_SIZE); | 935 | np->dirty_tx, np->dirty_tx % TX_RING_SIZE); |
936 | printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); | 936 | printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); |
@@ -1002,9 +1002,9 @@ static void tx_poll (unsigned long data) | |||
1002 | struct net_device *dev = (struct net_device *)data; | 1002 | struct net_device *dev = (struct net_device *)data; |
1003 | struct netdev_private *np = netdev_priv(dev); | 1003 | struct netdev_private *np = netdev_priv(dev); |
1004 | unsigned head = np->cur_task % TX_RING_SIZE; | 1004 | unsigned head = np->cur_task % TX_RING_SIZE; |
1005 | struct netdev_desc *txdesc = | 1005 | struct netdev_desc *txdesc = |
1006 | &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; | 1006 | &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; |
1007 | 1007 | ||
1008 | /* Chain the next pointer */ | 1008 | /* Chain the next pointer */ |
1009 | for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { | 1009 | for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { |
1010 | int entry = np->cur_task % TX_RING_SIZE; | 1010 | int entry = np->cur_task % TX_RING_SIZE; |
@@ -1074,7 +1074,7 @@ reset_tx (struct net_device *dev) | |||
1074 | struct sk_buff *skb; | 1074 | struct sk_buff *skb; |
1075 | int i; | 1075 | int i; |
1076 | int irq = in_interrupt(); | 1076 | int irq = in_interrupt(); |
1077 | 1077 | ||
1078 | /* Reset tx logic, TxListPtr will be cleaned */ | 1078 | /* Reset tx logic, TxListPtr will be cleaned */ |
1079 | iowrite16 (TxDisable, ioaddr + MACCtrl1); | 1079 | iowrite16 (TxDisable, ioaddr + MACCtrl1); |
1080 | sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16); | 1080 | sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16); |
@@ -1083,7 +1083,7 @@ reset_tx (struct net_device *dev) | |||
1083 | for (i = 0; i < TX_RING_SIZE; i++) { | 1083 | for (i = 0; i < TX_RING_SIZE; i++) { |
1084 | skb = np->tx_skbuff[i]; | 1084 | skb = np->tx_skbuff[i]; |
1085 | if (skb) { | 1085 | if (skb) { |
1086 | pci_unmap_single(np->pci_dev, | 1086 | pci_unmap_single(np->pci_dev, |
1087 | np->tx_ring[i].frag[0].addr, skb->len, | 1087 | np->tx_ring[i].frag[0].addr, skb->len, |
1088 | PCI_DMA_TODEVICE); | 1088 | PCI_DMA_TODEVICE); |
1089 | if (irq) | 1089 | if (irq) |
@@ -1100,7 +1100,7 @@ reset_tx (struct net_device *dev) | |||
1100 | return 0; | 1100 | return 0; |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | /* The interrupt handler cleans up after the Tx thread, | 1103 | /* The interrupt handler cleans up after the Tx thread, |
1104 | and schedule a Rx thread work */ | 1104 | and schedule a Rx thread work */ |
1105 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) | 1105 | static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) |
1106 | { | 1106 | { |
@@ -1181,8 +1181,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs | |||
1181 | } else { | 1181 | } else { |
1182 | hw_frame_id = ioread8(ioaddr + TxFrameId); | 1182 | hw_frame_id = ioread8(ioaddr + TxFrameId); |
1183 | } | 1183 | } |
1184 | 1184 | ||
1185 | if (np->pci_rev_id >= 0x14) { | 1185 | if (np->pci_rev_id >= 0x14) { |
1186 | spin_lock(&np->lock); | 1186 | spin_lock(&np->lock); |
1187 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { | 1187 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { |
1188 | int entry = np->dirty_tx % TX_RING_SIZE; | 1188 | int entry = np->dirty_tx % TX_RING_SIZE; |
@@ -1194,7 +1194,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs | |||
1194 | !(le32_to_cpu(np->tx_ring[entry].status) | 1194 | !(le32_to_cpu(np->tx_ring[entry].status) |
1195 | & 0x00010000)) | 1195 | & 0x00010000)) |
1196 | break; | 1196 | break; |
1197 | if (sw_frame_id == (hw_frame_id + 1) % | 1197 | if (sw_frame_id == (hw_frame_id + 1) % |
1198 | TX_RING_SIZE) | 1198 | TX_RING_SIZE) |
1199 | break; | 1199 | break; |
1200 | skb = np->tx_skbuff[entry]; | 1200 | skb = np->tx_skbuff[entry]; |
@@ -1213,7 +1213,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs | |||
1213 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { | 1213 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { |
1214 | int entry = np->dirty_tx % TX_RING_SIZE; | 1214 | int entry = np->dirty_tx % TX_RING_SIZE; |
1215 | struct sk_buff *skb; | 1215 | struct sk_buff *skb; |
1216 | if (!(le32_to_cpu(np->tx_ring[entry].status) | 1216 | if (!(le32_to_cpu(np->tx_ring[entry].status) |
1217 | & 0x00010000)) | 1217 | & 0x00010000)) |
1218 | break; | 1218 | break; |
1219 | skb = np->tx_skbuff[entry]; | 1219 | skb = np->tx_skbuff[entry]; |
@@ -1228,7 +1228,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs | |||
1228 | } | 1228 | } |
1229 | spin_unlock(&np->lock); | 1229 | spin_unlock(&np->lock); |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | if (netif_queue_stopped(dev) && | 1232 | if (netif_queue_stopped(dev) && |
1233 | np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { | 1233 | np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { |
1234 | /* The ring is no longer full, clear busy flag. */ | 1234 | /* The ring is no longer full, clear busy flag. */ |
@@ -1598,18 +1598,18 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1598 | case SIOCDEVPRIVATE: | 1598 | case SIOCDEVPRIVATE: |
1599 | for (i=0; i<TX_RING_SIZE; i++) { | 1599 | for (i=0; i<TX_RING_SIZE; i++) { |
1600 | printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i, | 1600 | printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i, |
1601 | (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), | 1601 | (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), |
1602 | le32_to_cpu(np->tx_ring[i].next_desc), | 1602 | le32_to_cpu(np->tx_ring[i].next_desc), |
1603 | le32_to_cpu(np->tx_ring[i].status), | 1603 | le32_to_cpu(np->tx_ring[i].status), |
1604 | (le32_to_cpu(np->tx_ring[i].status) >> 2) | 1604 | (le32_to_cpu(np->tx_ring[i].status) >> 2) |
1605 | & 0xff, | 1605 | & 0xff, |
1606 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | 1606 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
1607 | le32_to_cpu(np->tx_ring[i].frag[0].length)); | 1607 | le32_to_cpu(np->tx_ring[i].frag[0].length)); |
1608 | } | 1608 | } |
1609 | printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", | 1609 | printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", |
1610 | ioread32(np->base + TxListPtr), | 1610 | ioread32(np->base + TxListPtr), |
1611 | netif_queue_stopped(dev)); | 1611 | netif_queue_stopped(dev)); |
1612 | printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", | 1612 | printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", |
1613 | np->cur_tx, np->cur_tx % TX_RING_SIZE, | 1613 | np->cur_tx, np->cur_tx % TX_RING_SIZE, |
1614 | np->dirty_tx, np->dirty_tx % TX_RING_SIZE); | 1614 | np->dirty_tx, np->dirty_tx % TX_RING_SIZE); |
1615 | printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); | 1615 | printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); |
@@ -1617,7 +1617,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
1617 | printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus)); | 1617 | printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus)); |
1618 | return 0; | 1618 | return 0; |
1619 | } | 1619 | } |
1620 | 1620 | ||
1621 | 1621 | ||
1622 | return rc; | 1622 | return rc; |
1623 | } | 1623 | } |