aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/tulip/winbond-840.c68
1 files changed, 24 insertions, 44 deletions
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 6b82d1498223..b54378fac8f0 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -90,10 +90,8 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
90 Making the Tx ring too large decreases the effectiveness of channel 90 Making the Tx ring too large decreases the effectiveness of channel
91 bonding and packet priority. 91 bonding and packet priority.
92 There are no ill effects from too-large receive rings. */ 92 There are no ill effects from too-large receive rings. */
93#define TX_RING_SIZE 16
94#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ 93#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
95#define TX_QUEUE_LEN_RESTART 5 94#define TX_QUEUE_LEN_RESTART 5
96#define RX_RING_SIZE 32
97 95
98#define TX_BUFLIMIT (1024-128) 96#define TX_BUFLIMIT (1024-128)
99 97
@@ -137,6 +135,8 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
137#include <asm/io.h> 135#include <asm/io.h>
138#include <asm/irq.h> 136#include <asm/irq.h>
139 137
138#include "tulip.h"
139
140/* These identify the driver base version and may not be removed. */ 140/* These identify the driver base version and may not be removed. */
141static char version[] = 141static char version[] =
142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" 142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
@@ -242,8 +242,8 @@ static const struct pci_id_info pci_id_tbl[] __devinitdata = {
242}; 242};
243 243
244/* This driver was written to use PCI memory space, however some x86 systems 244/* This driver was written to use PCI memory space, however some x86 systems
245 work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space 245 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
246 accesses instead of memory space. */ 246*/
247 247
248/* Offsets to the Command and Status Registers, "CSRs". 248/* Offsets to the Command and Status Registers, "CSRs".
249 While similar to the Tulip, these registers are longword aligned. 249 While similar to the Tulip, these registers are longword aligned.
@@ -261,21 +261,11 @@ enum w840_offsets {
261 CurTxDescAddr=0x4C, CurTxBufAddr=0x50, 261 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
262}; 262};
263 263
264/* Bits in the interrupt status/enable registers. */
265/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
266enum intr_status_bits {
267 NormalIntr=0x10000, AbnormalIntr=0x8000,
268 IntrPCIErr=0x2000, TimerInt=0x800,
269 IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
270 TxFIFOUnderflow=0x20, RxErrIntr=0x10,
271 TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
272};
273
274/* Bits in the NetworkConfig register. */ 264/* Bits in the NetworkConfig register. */
275enum rx_mode_bits { 265enum rx_mode_bits {
276 AcceptErr=0x80, AcceptRunt=0x40, 266 AcceptErr=0x80,
277 AcceptBroadcast=0x20, AcceptMulticast=0x10, 267 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
278 AcceptAllPhys=0x08, AcceptMyPhys=0x02, 268 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
279}; 269};
280 270
281enum mii_reg_bits { 271enum mii_reg_bits {
@@ -297,13 +287,6 @@ struct w840_tx_desc {
297 u32 buffer1, buffer2; 287 u32 buffer1, buffer2;
298}; 288};
299 289
300/* Bits in network_desc.status */
301enum desc_status_bits {
302 DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
303 DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
304 DescIntr=0x80000000,
305};
306
307#define MII_CNT 1 /* winbond only supports one MII */ 290#define MII_CNT 1 /* winbond only supports one MII */
308struct netdev_private { 291struct netdev_private {
309 struct w840_rx_desc *rx_ring; 292 struct w840_rx_desc *rx_ring;
@@ -371,7 +354,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
371 int irq; 354 int irq;
372 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; 355 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
373 void __iomem *ioaddr; 356 void __iomem *ioaddr;
374 int bar = 1;
375 357
376 i = pci_enable_device(pdev); 358 i = pci_enable_device(pdev);
377 if (i) return i; 359 if (i) return i;
@@ -393,10 +375,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
393 375
394 if (pci_request_regions(pdev, DRV_NAME)) 376 if (pci_request_regions(pdev, DRV_NAME))
395 goto err_out_netdev; 377 goto err_out_netdev;
396#ifdef USE_IO_OPS 378
397 bar = 0; 379 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
398#endif
399 ioaddr = pci_iomap(pdev, bar, netdev_res_size);
400 if (!ioaddr) 380 if (!ioaddr)
401 goto err_out_free_res; 381 goto err_out_free_res;
402 382
@@ -838,7 +818,7 @@ static void init_rxtx_rings(struct net_device *dev)
838 np->rx_buf_sz,PCI_DMA_FROMDEVICE); 818 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
839 819
840 np->rx_ring[i].buffer1 = np->rx_addr[i]; 820 np->rx_ring[i].buffer1 = np->rx_addr[i];
841 np->rx_ring[i].status = DescOwn; 821 np->rx_ring[i].status = DescOwned;
842 } 822 }
843 823
844 np->cur_rx = 0; 824 np->cur_rx = 0;
@@ -923,7 +903,7 @@ static void init_registers(struct net_device *dev)
923 } 903 }
924#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) 904#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
925 i |= 0xE000; 905 i |= 0xE000;
926#elif defined(__sparc__) 906#elif defined(__sparc__) || defined (CONFIG_PARISC)
927 i |= 0x4800; 907 i |= 0x4800;
928#else 908#else
929#warning Processor architecture undefined 909#warning Processor architecture undefined
@@ -1043,11 +1023,11 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1043 1023
1044 /* Now acquire the irq spinlock. 1024 /* Now acquire the irq spinlock.
1045 * The difficult race is the the ordering between 1025 * The difficult race is the the ordering between
1046 * increasing np->cur_tx and setting DescOwn: 1026 * increasing np->cur_tx and setting DescOwned:
1047 * - if np->cur_tx is increased first the interrupt 1027 * - if np->cur_tx is increased first the interrupt
1048 * handler could consider the packet as transmitted 1028 * handler could consider the packet as transmitted
1049 * since DescOwn is cleared. 1029 * since DescOwned is cleared.
1050 * - If DescOwn is set first the NIC could report the 1030 * - If DescOwned is set first the NIC could report the
1051 * packet as sent, but the interrupt handler would ignore it 1031 * packet as sent, but the interrupt handler would ignore it
1052 * since the np->cur_tx was not yet increased. 1032 * since the np->cur_tx was not yet increased.
1053 */ 1033 */
@@ -1055,7 +1035,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1055 np->cur_tx++; 1035 np->cur_tx++;
1056 1036
1057 wmb(); /* flush length, buffer1, buffer2 */ 1037 wmb(); /* flush length, buffer1, buffer2 */
1058 np->tx_ring[entry].status = DescOwn; 1038 np->tx_ring[entry].status = DescOwned;
1059 wmb(); /* flush status and kick the hardware */ 1039 wmb(); /* flush status and kick the hardware */
1060 iowrite32(0, np->base_addr + TxStartDemand); 1040 iowrite32(0, np->base_addr + TxStartDemand);
1061 np->tx_q_bytes += skb->len; 1041 np->tx_q_bytes += skb->len;
@@ -1155,12 +1135,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
1155 1135
1156 handled = 1; 1136 handled = 1;
1157 1137
1158 if (intr_status & (IntrRxDone | RxNoBuf)) 1138 if (intr_status & (RxIntr | RxNoBuf))
1159 netdev_rx(dev); 1139 netdev_rx(dev);
1160 if (intr_status & RxNoBuf) 1140 if (intr_status & RxNoBuf)
1161 iowrite32(0, ioaddr + RxStartDemand); 1141 iowrite32(0, ioaddr + RxStartDemand);
1162 1142
1163 if (intr_status & (TxIdle | IntrTxDone) && 1143 if (intr_status & (TxNoBuf | TxIntr) &&
1164 np->cur_tx != np->dirty_tx) { 1144 np->cur_tx != np->dirty_tx) {
1165 spin_lock(&np->lock); 1145 spin_lock(&np->lock);
1166 netdev_tx_done(dev); 1146 netdev_tx_done(dev);
@@ -1168,8 +1148,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
1168 } 1148 }
1169 1149
1170 /* Abnormal error summary/uncommon events handlers. */ 1150 /* Abnormal error summary/uncommon events handlers. */
1171 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr | 1151 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SytemError |
1172 TimerInt | IntrTxStopped)) 1152 TimerInt | TxDied))
1173 netdev_error(dev, intr_status); 1153 netdev_error(dev, intr_status);
1174 1154
1175 if (--work_limit < 0) { 1155 if (--work_limit < 0) {
@@ -1305,7 +1285,7 @@ static int netdev_rx(struct net_device *dev)
1305 np->rx_ring[entry].buffer1 = np->rx_addr[entry]; 1285 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1306 } 1286 }
1307 wmb(); 1287 wmb();
1308 np->rx_ring[entry].status = DescOwn; 1288 np->rx_ring[entry].status = DescOwned;
1309 } 1289 }
1310 1290
1311 return 0; 1291 return 0;
@@ -1342,7 +1322,7 @@ static void netdev_error(struct net_device *dev, int intr_status)
1342 dev->name, new); 1322 dev->name, new);
1343 update_csr6(dev, new); 1323 update_csr6(dev, new);
1344 } 1324 }
1345 if (intr_status & IntrRxDied) { /* Missed a Rx frame. */ 1325 if (intr_status & RxDied) { /* Missed a Rx frame. */
1346 np->stats.rx_errors++; 1326 np->stats.rx_errors++;
1347 } 1327 }
1348 if (intr_status & TimerInt) { 1328 if (intr_status & TimerInt) {
@@ -1381,13 +1361,13 @@ static u32 __set_rx_mode(struct net_device *dev)
1381 /* Unconditionally log net taps. */ 1361 /* Unconditionally log net taps. */
1382 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); 1362 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1383 memset(mc_filter, 0xff, sizeof(mc_filter)); 1363 memset(mc_filter, 0xff, sizeof(mc_filter));
1384 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys 1364 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1385 | AcceptMyPhys; 1365 | AcceptMyPhys;
1386 } else if ((dev->mc_count > multicast_filter_limit) 1366 } else if ((dev->mc_count > multicast_filter_limit)
1387 || (dev->flags & IFF_ALLMULTI)) { 1367 || (dev->flags & IFF_ALLMULTI)) {
1388 /* Too many to match, or accept all multicasts. */ 1368 /* Too many to match, or accept all multicasts. */
1389 memset(mc_filter, 0xff, sizeof(mc_filter)); 1369 memset(mc_filter, 0xff, sizeof(mc_filter));
1390 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1370 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1391 } else { 1371 } else {
1392 struct dev_mc_list *mclist; 1372 struct dev_mc_list *mclist;
1393 int i; 1373 int i;
@@ -1398,7 +1378,7 @@ static u32 __set_rx_mode(struct net_device *dev)
1398 filterbit &= 0x3f; 1378 filterbit &= 0x3f;
1399 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1379 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1400 } 1380 }
1401 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1381 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1402 } 1382 }
1403 iowrite32(mc_filter[0], ioaddr + MulticastFilter0); 1383 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1404 iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 1384 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);