aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/epic100.c47
1 files changed, 27 insertions, 20 deletions
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 0b365b8d947b..76118ddd1042 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -131,8 +131,8 @@ IIIa. Ring buffers
131 131
132IVb. References 132IVb. References
133 133
134http://www.smsc.com/main/datasheets/83c171.pdf 134http://www.smsc.com/main/tools/discontinued/83c171.pdf
135http://www.smsc.com/main/datasheets/83c175.pdf 135http://www.smsc.com/main/tools/discontinued/83c175.pdf
136http://scyld.com/expert/NWay.html 136http://scyld.com/expert/NWay.html
137http://www.national.com/pf/DP/DP83840A.html 137http://www.national.com/pf/DP/DP83840A.html
138 138
@@ -227,7 +227,12 @@ static const u16 media2miictl[16] = {
227 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, 227 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
228 0, 0, 0, 0, 0, 0, 0, 0 }; 228 0, 0, 0, 0, 0, 0, 0, 0 };
229 229
230/* The EPIC100 Rx and Tx buffer descriptors. */ 230/*
231 * The EPIC100 Rx and Tx buffer descriptors. Note that these
232 * really ARE host-endian; it's not a misannotation. We tell
233 * the card to byteswap them internally on big-endian hosts -
234 * look for #ifdef CONFIG_BIG_ENDIAN in epic_open().
235 */
231 236
232struct epic_tx_desc { 237struct epic_tx_desc {
233 u32 txstatus; 238 u32 txstatus;
@@ -418,7 +423,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
418 423
419 /* Note: the '175 does not have a serial EEPROM. */ 424 /* Note: the '175 does not have a serial EEPROM. */
420 for (i = 0; i < 3; i++) 425 for (i = 0; i < 3; i++)
421 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4)); 426 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4));
422 427
423 if (debug > 2) { 428 if (debug > 2) {
424 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n"); 429 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
@@ -682,7 +687,8 @@ static int epic_open(struct net_device *dev)
682 if (ep->chip_flags & MII_PWRDWN) 687 if (ep->chip_flags & MII_PWRDWN)
683 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 688 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
684 689
685#if defined(__powerpc__) || defined(__sparc__) /* Big endian */ 690 /* Tell the chip to byteswap descriptors on big-endian hosts */
691#ifdef CONFIG_BIG_ENDIAN
686 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 692 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
687 inl(ioaddr + GENCTL); 693 inl(ioaddr + GENCTL);
688 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 694 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
@@ -695,7 +701,7 @@ static int epic_open(struct net_device *dev)
695 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ 701 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
696 702
697 for (i = 0; i < 3; i++) 703 for (i = 0; i < 3; i++)
698 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); 704 outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
699 705
700 ep->tx_threshold = TX_FIFO_THRESH; 706 ep->tx_threshold = TX_FIFO_THRESH;
701 outl(ep->tx_threshold, ioaddr + TxThresh); 707 outl(ep->tx_threshold, ioaddr + TxThresh);
@@ -798,7 +804,7 @@ static void epic_restart(struct net_device *dev)
798 for (i = 16; i > 0; i--) 804 for (i = 16; i > 0; i--)
799 outl(0x0008, ioaddr + TEST1); 805 outl(0x0008, ioaddr + TEST1);
800 806
801#if defined(__powerpc__) || defined(__sparc__) /* Big endian */ 807#ifdef CONFIG_BIG_ENDIAN
802 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 808 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
803#else 809#else
804 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 810 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
@@ -808,7 +814,7 @@ static void epic_restart(struct net_device *dev)
808 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 814 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
809 815
810 for (i = 0; i < 3; i++) 816 for (i = 0; i < 3; i++)
811 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); 817 outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
812 818
813 ep->tx_threshold = TX_FIFO_THRESH; 819 ep->tx_threshold = TX_FIFO_THRESH;
814 outl(ep->tx_threshold, ioaddr + TxThresh); 820 outl(ep->tx_threshold, ioaddr + TxThresh);
@@ -919,7 +925,7 @@ static void epic_init_ring(struct net_device *dev)
919 /* Initialize all Rx descriptors. */ 925 /* Initialize all Rx descriptors. */
920 for (i = 0; i < RX_RING_SIZE; i++) { 926 for (i = 0; i < RX_RING_SIZE; i++) {
921 ep->rx_ring[i].rxstatus = 0; 927 ep->rx_ring[i].rxstatus = 0;
922 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz); 928 ep->rx_ring[i].buflength = ep->rx_buf_sz;
923 ep->rx_ring[i].next = ep->rx_ring_dma + 929 ep->rx_ring[i].next = ep->rx_ring_dma +
924 (i+1)*sizeof(struct epic_rx_desc); 930 (i+1)*sizeof(struct epic_rx_desc);
925 ep->rx_skbuff[i] = NULL; 931 ep->rx_skbuff[i] = NULL;
@@ -936,7 +942,7 @@ static void epic_init_ring(struct net_device *dev)
936 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 942 skb_reserve(skb, 2); /* 16 byte align the IP header. */
937 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, 943 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
938 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 944 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
939 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn); 945 ep->rx_ring[i].rxstatus = DescOwn;
940 } 946 }
941 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 947 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
942 948
@@ -974,20 +980,20 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
974 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, 980 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
975 skb->len, PCI_DMA_TODEVICE); 981 skb->len, PCI_DMA_TODEVICE);
976 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */ 982 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
977 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */ 983 ctrl_word = 0x100000; /* No interrupt */
978 } else if (free_count == TX_QUEUE_LEN/2) { 984 } else if (free_count == TX_QUEUE_LEN/2) {
979 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */ 985 ctrl_word = 0x140000; /* Tx-done intr. */
980 } else if (free_count < TX_QUEUE_LEN - 1) { 986 } else if (free_count < TX_QUEUE_LEN - 1) {
981 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */ 987 ctrl_word = 0x100000; /* No Tx-done intr. */
982 } else { 988 } else {
983 /* Leave room for an additional entry. */ 989 /* Leave room for an additional entry. */
984 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */ 990 ctrl_word = 0x140000; /* Tx-done intr. */
985 ep->tx_full = 1; 991 ep->tx_full = 1;
986 } 992 }
987 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len); 993 ep->tx_ring[entry].buflength = ctrl_word | skb->len;
988 ep->tx_ring[entry].txstatus = 994 ep->tx_ring[entry].txstatus =
989 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16) 995 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
990 | cpu_to_le32(DescOwn); 996 | DescOwn;
991 997
992 ep->cur_tx++; 998 ep->cur_tx++;
993 if (ep->tx_full) 999 if (ep->tx_full)
@@ -1041,7 +1047,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
1041 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) { 1047 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1042 struct sk_buff *skb; 1048 struct sk_buff *skb;
1043 int entry = dirty_tx % TX_RING_SIZE; 1049 int entry = dirty_tx % TX_RING_SIZE;
1044 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus); 1050 int txstatus = ep->tx_ring[entry].txstatus;
1045 1051
1046 if (txstatus & DescOwn) 1052 if (txstatus & DescOwn)
1047 break; /* It still hasn't been Txed */ 1053 break; /* It still hasn't been Txed */
@@ -1163,8 +1169,8 @@ static int epic_rx(struct net_device *dev, int budget)
1163 rx_work_limit = budget; 1169 rx_work_limit = budget;
1164 1170
1165 /* If we own the next entry, it's a new packet. Send it up. */ 1171 /* If we own the next entry, it's a new packet. Send it up. */
1166 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) { 1172 while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1167 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus); 1173 int status = ep->rx_ring[entry].rxstatus;
1168 1174
1169 if (debug > 4) 1175 if (debug > 4)
1170 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status); 1176 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
@@ -1238,7 +1244,8 @@ static int epic_rx(struct net_device *dev, int budget)
1238 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1244 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1239 work_done++; 1245 work_done++;
1240 } 1246 }
1241 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn); 1247 /* AV: shouldn't we add a barrier here? */
1248 ep->rx_ring[entry].rxstatus = DescOwn;
1242 } 1249 }
1243 return work_done; 1250 return work_done;
1244} 1251}