diff options
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 560 |
1 files changed, 360 insertions, 200 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 11b8f1b43dd5..59f9a515c07c 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -109,6 +109,7 @@ | |||
109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. | 109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. |
110 | * 0.55: 22 Mar 2006: Add flow control (pause frame). | 110 | * 0.55: 22 Mar 2006: Add flow control (pause frame). |
111 | * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. | 111 | * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. |
112 | * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. | ||
112 | * | 113 | * |
113 | * Known bugs: | 114 | * Known bugs: |
114 | * We suspect that on some hardware no TX done interrupts are generated. | 115 | * We suspect that on some hardware no TX done interrupts are generated. |
@@ -120,7 +121,12 @@ | |||
120 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 121 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
121 | * superfluous timer interrupts from the nic. | 122 | * superfluous timer interrupts from the nic. |
122 | */ | 123 | */ |
123 | #define FORCEDETH_VERSION "0.56" | 124 | #ifdef CONFIG_FORCEDETH_NAPI |
125 | #define DRIVERNAPI "-NAPI" | ||
126 | #else | ||
127 | #define DRIVERNAPI | ||
128 | #endif | ||
129 | #define FORCEDETH_VERSION "0.57" | ||
124 | #define DRV_NAME "forcedeth" | 130 | #define DRV_NAME "forcedeth" |
125 | 131 | ||
126 | #include <linux/module.h> | 132 | #include <linux/module.h> |
@@ -262,7 +268,8 @@ enum { | |||
262 | NvRegRingSizes = 0x108, | 268 | NvRegRingSizes = 0x108, |
263 | #define NVREG_RINGSZ_TXSHIFT 0 | 269 | #define NVREG_RINGSZ_TXSHIFT 0 |
264 | #define NVREG_RINGSZ_RXSHIFT 16 | 270 | #define NVREG_RINGSZ_RXSHIFT 16 |
265 | NvRegUnknownTransmitterReg = 0x10c, | 271 | NvRegTransmitPoll = 0x10c, |
272 | #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 | ||
266 | NvRegLinkSpeed = 0x110, | 273 | NvRegLinkSpeed = 0x110, |
267 | #define NVREG_LINKSPEED_FORCE 0x10000 | 274 | #define NVREG_LINKSPEED_FORCE 0x10000 |
268 | #define NVREG_LINKSPEED_10 1000 | 275 | #define NVREG_LINKSPEED_10 1000 |
@@ -381,21 +388,21 @@ enum { | |||
381 | 388 | ||
382 | /* Big endian: should work, but is untested */ | 389 | /* Big endian: should work, but is untested */ |
383 | struct ring_desc { | 390 | struct ring_desc { |
384 | u32 PacketBuffer; | 391 | __le32 buf; |
385 | u32 FlagLen; | 392 | __le32 flaglen; |
386 | }; | 393 | }; |
387 | 394 | ||
388 | struct ring_desc_ex { | 395 | struct ring_desc_ex { |
389 | u32 PacketBufferHigh; | 396 | __le32 bufhigh; |
390 | u32 PacketBufferLow; | 397 | __le32 buflow; |
391 | u32 TxVlan; | 398 | __le32 txvlan; |
392 | u32 FlagLen; | 399 | __le32 flaglen; |
393 | }; | 400 | }; |
394 | 401 | ||
395 | typedef union _ring_type { | 402 | union ring_type { |
396 | struct ring_desc* orig; | 403 | struct ring_desc* orig; |
397 | struct ring_desc_ex* ex; | 404 | struct ring_desc_ex* ex; |
398 | } ring_type; | 405 | }; |
399 | 406 | ||
400 | #define FLAG_MASK_V1 0xffff0000 | 407 | #define FLAG_MASK_V1 0xffff0000 |
401 | #define FLAG_MASK_V2 0xffffc000 | 408 | #define FLAG_MASK_V2 0xffffc000 |
@@ -536,6 +543,9 @@ typedef union _ring_type { | |||
536 | #define PHYID1_OUI_SHFT 6 | 543 | #define PHYID1_OUI_SHFT 6 |
537 | #define PHYID2_OUI_MASK 0xfc00 | 544 | #define PHYID2_OUI_MASK 0xfc00 |
538 | #define PHYID2_OUI_SHFT 10 | 545 | #define PHYID2_OUI_SHFT 10 |
546 | #define PHYID2_MODEL_MASK 0x03f0 | ||
547 | #define PHY_MODEL_MARVELL_E3016 0x220 | ||
548 | #define PHY_MARVELL_E3016_INITMASK 0x0300 | ||
539 | #define PHY_INIT1 0x0f000 | 549 | #define PHY_INIT1 0x0f000 |
540 | #define PHY_INIT2 0x0e00 | 550 | #define PHY_INIT2 0x0e00 |
541 | #define PHY_INIT3 0x01000 | 551 | #define PHY_INIT3 0x01000 |
@@ -653,8 +663,8 @@ static const struct nv_ethtool_str nv_etests_str[] = { | |||
653 | }; | 663 | }; |
654 | 664 | ||
655 | struct register_test { | 665 | struct register_test { |
656 | u32 reg; | 666 | __le32 reg; |
657 | u32 mask; | 667 | __le32 mask; |
658 | }; | 668 | }; |
659 | 669 | ||
660 | static const struct register_test nv_registers_test[] = { | 670 | static const struct register_test nv_registers_test[] = { |
@@ -694,6 +704,7 @@ struct fe_priv { | |||
694 | int phyaddr; | 704 | int phyaddr; |
695 | int wolenabled; | 705 | int wolenabled; |
696 | unsigned int phy_oui; | 706 | unsigned int phy_oui; |
707 | unsigned int phy_model; | ||
697 | u16 gigabit; | 708 | u16 gigabit; |
698 | int intr_test; | 709 | int intr_test; |
699 | 710 | ||
@@ -707,13 +718,14 @@ struct fe_priv { | |||
707 | u32 vlanctl_bits; | 718 | u32 vlanctl_bits; |
708 | u32 driver_data; | 719 | u32 driver_data; |
709 | u32 register_size; | 720 | u32 register_size; |
721 | int rx_csum; | ||
710 | 722 | ||
711 | void __iomem *base; | 723 | void __iomem *base; |
712 | 724 | ||
713 | /* rx specific fields. | 725 | /* rx specific fields. |
714 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 726 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
715 | */ | 727 | */ |
716 | ring_type rx_ring; | 728 | union ring_type rx_ring; |
717 | unsigned int cur_rx, refill_rx; | 729 | unsigned int cur_rx, refill_rx; |
718 | struct sk_buff **rx_skbuff; | 730 | struct sk_buff **rx_skbuff; |
719 | dma_addr_t *rx_dma; | 731 | dma_addr_t *rx_dma; |
@@ -733,7 +745,7 @@ struct fe_priv { | |||
733 | /* | 745 | /* |
734 | * tx specific fields. | 746 | * tx specific fields. |
735 | */ | 747 | */ |
736 | ring_type tx_ring; | 748 | union ring_type tx_ring; |
737 | unsigned int next_tx, nic_tx; | 749 | unsigned int next_tx, nic_tx; |
738 | struct sk_buff **tx_skbuff; | 750 | struct sk_buff **tx_skbuff; |
739 | dma_addr_t *tx_dma; | 751 | dma_addr_t *tx_dma; |
@@ -826,13 +838,13 @@ static inline void pci_push(u8 __iomem *base) | |||
826 | 838 | ||
827 | static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) | 839 | static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) |
828 | { | 840 | { |
829 | return le32_to_cpu(prd->FlagLen) | 841 | return le32_to_cpu(prd->flaglen) |
830 | & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); | 842 | & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); |
831 | } | 843 | } |
832 | 844 | ||
833 | static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) | 845 | static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) |
834 | { | 846 | { |
835 | return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; | 847 | return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; |
836 | } | 848 | } |
837 | 849 | ||
838 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, | 850 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, |
@@ -885,7 +897,7 @@ static void free_rings(struct net_device *dev) | |||
885 | struct fe_priv *np = get_nvpriv(dev); | 897 | struct fe_priv *np = get_nvpriv(dev); |
886 | 898 | ||
887 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 899 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
888 | if(np->rx_ring.orig) | 900 | if (np->rx_ring.orig) |
889 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), | 901 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), |
890 | np->rx_ring.orig, np->ring_addr); | 902 | np->rx_ring.orig, np->ring_addr); |
891 | } else { | 903 | } else { |
@@ -1020,14 +1032,13 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value) | |||
1020 | return retval; | 1032 | return retval; |
1021 | } | 1033 | } |
1022 | 1034 | ||
1023 | static int phy_reset(struct net_device *dev) | 1035 | static int phy_reset(struct net_device *dev, u32 bmcr_setup) |
1024 | { | 1036 | { |
1025 | struct fe_priv *np = netdev_priv(dev); | 1037 | struct fe_priv *np = netdev_priv(dev); |
1026 | u32 miicontrol; | 1038 | u32 miicontrol; |
1027 | unsigned int tries = 0; | 1039 | unsigned int tries = 0; |
1028 | 1040 | ||
1029 | miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | 1041 | miicontrol = BMCR_RESET | bmcr_setup; |
1030 | miicontrol |= BMCR_RESET; | ||
1031 | if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { | 1042 | if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { |
1032 | return -1; | 1043 | return -1; |
1033 | } | 1044 | } |
@@ -1052,6 +1063,16 @@ static int phy_init(struct net_device *dev) | |||
1052 | u8 __iomem *base = get_hwbase(dev); | 1063 | u8 __iomem *base = get_hwbase(dev); |
1053 | u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; | 1064 | u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; |
1054 | 1065 | ||
1066 | /* phy errata for E3016 phy */ | ||
1067 | if (np->phy_model == PHY_MODEL_MARVELL_E3016) { | ||
1068 | reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); | ||
1069 | reg &= ~PHY_MARVELL_E3016_INITMASK; | ||
1070 | if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { | ||
1071 | printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); | ||
1072 | return PHY_ERROR; | ||
1073 | } | ||
1074 | } | ||
1075 | |||
1055 | /* set advertise register */ | 1076 | /* set advertise register */ |
1056 | reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | 1077 | reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
1057 | reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); | 1078 | reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); |
@@ -1082,8 +1103,13 @@ static int phy_init(struct net_device *dev) | |||
1082 | else | 1103 | else |
1083 | np->gigabit = 0; | 1104 | np->gigabit = 0; |
1084 | 1105 | ||
1085 | /* reset the phy */ | 1106 | mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
1086 | if (phy_reset(dev)) { | 1107 | mii_control |= BMCR_ANENABLE; |
1108 | |||
1109 | /* reset the phy | ||
1110 | * (certain phys need bmcr to be setup with reset) | ||
1111 | */ | ||
1112 | if (phy_reset(dev, mii_control)) { | ||
1087 | printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); | 1113 | printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); |
1088 | return PHY_ERROR; | 1114 | return PHY_ERROR; |
1089 | } | 1115 | } |
@@ -1178,7 +1204,7 @@ static void nv_stop_tx(struct net_device *dev) | |||
1178 | KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); | 1204 | KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); |
1179 | 1205 | ||
1180 | udelay(NV_TXSTOP_DELAY2); | 1206 | udelay(NV_TXSTOP_DELAY2); |
1181 | writel(0, base + NvRegUnknownTransmitterReg); | 1207 | writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); |
1182 | } | 1208 | } |
1183 | 1209 | ||
1184 | static void nv_txrx_reset(struct net_device *dev) | 1210 | static void nv_txrx_reset(struct net_device *dev) |
@@ -1258,14 +1284,14 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1258 | np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, | 1284 | np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, |
1259 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | 1285 | skb->end-skb->data, PCI_DMA_FROMDEVICE); |
1260 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1286 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1261 | np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); | 1287 | np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]); |
1262 | wmb(); | 1288 | wmb(); |
1263 | np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | 1289 | np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); |
1264 | } else { | 1290 | } else { |
1265 | np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; | 1291 | np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32; |
1266 | np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; | 1292 | np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; |
1267 | wmb(); | 1293 | wmb(); |
1268 | np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); | 1294 | np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); |
1269 | } | 1295 | } |
1270 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", | 1296 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", |
1271 | dev->name, refill_rx); | 1297 | dev->name, refill_rx); |
@@ -1277,6 +1303,16 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1277 | return 0; | 1303 | return 0; |
1278 | } | 1304 | } |
1279 | 1305 | ||
1306 | /* If rx bufs are exhausted called after 50ms to attempt to refresh */ | ||
1307 | #ifdef CONFIG_FORCEDETH_NAPI | ||
1308 | static void nv_do_rx_refill(unsigned long data) | ||
1309 | { | ||
1310 | struct net_device *dev = (struct net_device *) data; | ||
1311 | |||
1312 | /* Just reschedule NAPI rx processing */ | ||
1313 | netif_rx_schedule(dev); | ||
1314 | } | ||
1315 | #else | ||
1280 | static void nv_do_rx_refill(unsigned long data) | 1316 | static void nv_do_rx_refill(unsigned long data) |
1281 | { | 1317 | { |
1282 | struct net_device *dev = (struct net_device *) data; | 1318 | struct net_device *dev = (struct net_device *) data; |
@@ -1305,6 +1341,7 @@ static void nv_do_rx_refill(unsigned long data) | |||
1305 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | 1341 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
1306 | } | 1342 | } |
1307 | } | 1343 | } |
1344 | #endif | ||
1308 | 1345 | ||
1309 | static void nv_init_rx(struct net_device *dev) | 1346 | static void nv_init_rx(struct net_device *dev) |
1310 | { | 1347 | { |
@@ -1315,9 +1352,9 @@ static void nv_init_rx(struct net_device *dev) | |||
1315 | np->refill_rx = 0; | 1352 | np->refill_rx = 0; |
1316 | for (i = 0; i < np->rx_ring_size; i++) | 1353 | for (i = 0; i < np->rx_ring_size; i++) |
1317 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1354 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1318 | np->rx_ring.orig[i].FlagLen = 0; | 1355 | np->rx_ring.orig[i].flaglen = 0; |
1319 | else | 1356 | else |
1320 | np->rx_ring.ex[i].FlagLen = 0; | 1357 | np->rx_ring.ex[i].flaglen = 0; |
1321 | } | 1358 | } |
1322 | 1359 | ||
1323 | static void nv_init_tx(struct net_device *dev) | 1360 | static void nv_init_tx(struct net_device *dev) |
@@ -1328,9 +1365,9 @@ static void nv_init_tx(struct net_device *dev) | |||
1328 | np->next_tx = np->nic_tx = 0; | 1365 | np->next_tx = np->nic_tx = 0; |
1329 | for (i = 0; i < np->tx_ring_size; i++) { | 1366 | for (i = 0; i < np->tx_ring_size; i++) { |
1330 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1367 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1331 | np->tx_ring.orig[i].FlagLen = 0; | 1368 | np->tx_ring.orig[i].flaglen = 0; |
1332 | else | 1369 | else |
1333 | np->tx_ring.ex[i].FlagLen = 0; | 1370 | np->tx_ring.ex[i].flaglen = 0; |
1334 | np->tx_skbuff[i] = NULL; | 1371 | np->tx_skbuff[i] = NULL; |
1335 | np->tx_dma[i] = 0; | 1372 | np->tx_dma[i] = 0; |
1336 | } | 1373 | } |
@@ -1373,9 +1410,9 @@ static void nv_drain_tx(struct net_device *dev) | |||
1373 | 1410 | ||
1374 | for (i = 0; i < np->tx_ring_size; i++) { | 1411 | for (i = 0; i < np->tx_ring_size; i++) { |
1375 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1412 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1376 | np->tx_ring.orig[i].FlagLen = 0; | 1413 | np->tx_ring.orig[i].flaglen = 0; |
1377 | else | 1414 | else |
1378 | np->tx_ring.ex[i].FlagLen = 0; | 1415 | np->tx_ring.ex[i].flaglen = 0; |
1379 | if (nv_release_txskb(dev, i)) | 1416 | if (nv_release_txskb(dev, i)) |
1380 | np->stats.tx_dropped++; | 1417 | np->stats.tx_dropped++; |
1381 | } | 1418 | } |
@@ -1387,9 +1424,9 @@ static void nv_drain_rx(struct net_device *dev) | |||
1387 | int i; | 1424 | int i; |
1388 | for (i = 0; i < np->rx_ring_size; i++) { | 1425 | for (i = 0; i < np->rx_ring_size; i++) { |
1389 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1426 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1390 | np->rx_ring.orig[i].FlagLen = 0; | 1427 | np->rx_ring.orig[i].flaglen = 0; |
1391 | else | 1428 | else |
1392 | np->rx_ring.ex[i].FlagLen = 0; | 1429 | np->rx_ring.ex[i].flaglen = 0; |
1393 | wmb(); | 1430 | wmb(); |
1394 | if (np->rx_skbuff[i]) { | 1431 | if (np->rx_skbuff[i]) { |
1395 | pci_unmap_single(np->pci_dev, np->rx_dma[i], | 1432 | pci_unmap_single(np->pci_dev, np->rx_dma[i], |
@@ -1450,17 +1487,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1450 | np->tx_dma_len[nr] = bcnt; | 1487 | np->tx_dma_len[nr] = bcnt; |
1451 | 1488 | ||
1452 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1489 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1453 | np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); | 1490 | np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); |
1454 | np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); | 1491 | np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
1455 | } else { | 1492 | } else { |
1456 | np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; | 1493 | np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; |
1457 | np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; | 1494 | np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; |
1458 | np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); | 1495 | np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
1459 | } | 1496 | } |
1460 | tx_flags = np->tx_flags; | 1497 | tx_flags = np->tx_flags; |
1461 | offset += bcnt; | 1498 | offset += bcnt; |
1462 | size -= bcnt; | 1499 | size -= bcnt; |
1463 | } while(size); | 1500 | } while (size); |
1464 | 1501 | ||
1465 | /* setup the fragments */ | 1502 | /* setup the fragments */ |
1466 | for (i = 0; i < fragments; i++) { | 1503 | for (i = 0; i < fragments; i++) { |
@@ -1477,12 +1514,12 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1477 | np->tx_dma_len[nr] = bcnt; | 1514 | np->tx_dma_len[nr] = bcnt; |
1478 | 1515 | ||
1479 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1516 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1480 | np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); | 1517 | np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); |
1481 | np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); | 1518 | np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
1482 | } else { | 1519 | } else { |
1483 | np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; | 1520 | np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; |
1484 | np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; | 1521 | np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; |
1485 | np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); | 1522 | np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
1486 | } | 1523 | } |
1487 | offset += bcnt; | 1524 | offset += bcnt; |
1488 | size -= bcnt; | 1525 | size -= bcnt; |
@@ -1491,9 +1528,9 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1491 | 1528 | ||
1492 | /* set last fragment flag */ | 1529 | /* set last fragment flag */ |
1493 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1530 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1494 | np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); | 1531 | np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra); |
1495 | } else { | 1532 | } else { |
1496 | np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); | 1533 | np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra); |
1497 | } | 1534 | } |
1498 | 1535 | ||
1499 | np->tx_skbuff[nr] = skb; | 1536 | np->tx_skbuff[nr] = skb; |
@@ -1512,10 +1549,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1512 | 1549 | ||
1513 | /* set tx flags */ | 1550 | /* set tx flags */ |
1514 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1551 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1515 | np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1552 | np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1516 | } else { | 1553 | } else { |
1517 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); | 1554 | np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan); |
1518 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1555 | np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1519 | } | 1556 | } |
1520 | 1557 | ||
1521 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", | 1558 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", |
@@ -1547,7 +1584,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1547 | static void nv_tx_done(struct net_device *dev) | 1584 | static void nv_tx_done(struct net_device *dev) |
1548 | { | 1585 | { |
1549 | struct fe_priv *np = netdev_priv(dev); | 1586 | struct fe_priv *np = netdev_priv(dev); |
1550 | u32 Flags; | 1587 | u32 flags; |
1551 | unsigned int i; | 1588 | unsigned int i; |
1552 | struct sk_buff *skb; | 1589 | struct sk_buff *skb; |
1553 | 1590 | ||
@@ -1555,22 +1592,22 @@ static void nv_tx_done(struct net_device *dev) | |||
1555 | i = np->nic_tx % np->tx_ring_size; | 1592 | i = np->nic_tx % np->tx_ring_size; |
1556 | 1593 | ||
1557 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1594 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1558 | Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); | 1595 | flags = le32_to_cpu(np->tx_ring.orig[i].flaglen); |
1559 | else | 1596 | else |
1560 | Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); | 1597 | flags = le32_to_cpu(np->tx_ring.ex[i].flaglen); |
1561 | 1598 | ||
1562 | dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", | 1599 | dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n", |
1563 | dev->name, np->nic_tx, Flags); | 1600 | dev->name, np->nic_tx, flags); |
1564 | if (Flags & NV_TX_VALID) | 1601 | if (flags & NV_TX_VALID) |
1565 | break; | 1602 | break; |
1566 | if (np->desc_ver == DESC_VER_1) { | 1603 | if (np->desc_ver == DESC_VER_1) { |
1567 | if (Flags & NV_TX_LASTPACKET) { | 1604 | if (flags & NV_TX_LASTPACKET) { |
1568 | skb = np->tx_skbuff[i]; | 1605 | skb = np->tx_skbuff[i]; |
1569 | if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| | 1606 | if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| |
1570 | NV_TX_UNDERFLOW|NV_TX_ERROR)) { | 1607 | NV_TX_UNDERFLOW|NV_TX_ERROR)) { |
1571 | if (Flags & NV_TX_UNDERFLOW) | 1608 | if (flags & NV_TX_UNDERFLOW) |
1572 | np->stats.tx_fifo_errors++; | 1609 | np->stats.tx_fifo_errors++; |
1573 | if (Flags & NV_TX_CARRIERLOST) | 1610 | if (flags & NV_TX_CARRIERLOST) |
1574 | np->stats.tx_carrier_errors++; | 1611 | np->stats.tx_carrier_errors++; |
1575 | np->stats.tx_errors++; | 1612 | np->stats.tx_errors++; |
1576 | } else { | 1613 | } else { |
@@ -1579,13 +1616,13 @@ static void nv_tx_done(struct net_device *dev) | |||
1579 | } | 1616 | } |
1580 | } | 1617 | } |
1581 | } else { | 1618 | } else { |
1582 | if (Flags & NV_TX2_LASTPACKET) { | 1619 | if (flags & NV_TX2_LASTPACKET) { |
1583 | skb = np->tx_skbuff[i]; | 1620 | skb = np->tx_skbuff[i]; |
1584 | if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| | 1621 | if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| |
1585 | NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { | 1622 | NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { |
1586 | if (Flags & NV_TX2_UNDERFLOW) | 1623 | if (flags & NV_TX2_UNDERFLOW) |
1587 | np->stats.tx_fifo_errors++; | 1624 | np->stats.tx_fifo_errors++; |
1588 | if (Flags & NV_TX2_CARRIERLOST) | 1625 | if (flags & NV_TX2_CARRIERLOST) |
1589 | np->stats.tx_carrier_errors++; | 1626 | np->stats.tx_carrier_errors++; |
1590 | np->stats.tx_errors++; | 1627 | np->stats.tx_errors++; |
1591 | } else { | 1628 | } else { |
@@ -1638,29 +1675,29 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1638 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1675 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1639 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | 1676 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", |
1640 | i, | 1677 | i, |
1641 | le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), | 1678 | le32_to_cpu(np->tx_ring.orig[i].buf), |
1642 | le32_to_cpu(np->tx_ring.orig[i].FlagLen), | 1679 | le32_to_cpu(np->tx_ring.orig[i].flaglen), |
1643 | le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), | 1680 | le32_to_cpu(np->tx_ring.orig[i+1].buf), |
1644 | le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), | 1681 | le32_to_cpu(np->tx_ring.orig[i+1].flaglen), |
1645 | le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), | 1682 | le32_to_cpu(np->tx_ring.orig[i+2].buf), |
1646 | le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), | 1683 | le32_to_cpu(np->tx_ring.orig[i+2].flaglen), |
1647 | le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), | 1684 | le32_to_cpu(np->tx_ring.orig[i+3].buf), |
1648 | le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); | 1685 | le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); |
1649 | } else { | 1686 | } else { |
1650 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", | 1687 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", |
1651 | i, | 1688 | i, |
1652 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), | 1689 | le32_to_cpu(np->tx_ring.ex[i].bufhigh), |
1653 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), | 1690 | le32_to_cpu(np->tx_ring.ex[i].buflow), |
1654 | le32_to_cpu(np->tx_ring.ex[i].FlagLen), | 1691 | le32_to_cpu(np->tx_ring.ex[i].flaglen), |
1655 | le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), | 1692 | le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), |
1656 | le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), | 1693 | le32_to_cpu(np->tx_ring.ex[i+1].buflow), |
1657 | le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), | 1694 | le32_to_cpu(np->tx_ring.ex[i+1].flaglen), |
1658 | le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), | 1695 | le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), |
1659 | le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), | 1696 | le32_to_cpu(np->tx_ring.ex[i+2].buflow), |
1660 | le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), | 1697 | le32_to_cpu(np->tx_ring.ex[i+2].flaglen), |
1661 | le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), | 1698 | le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), |
1662 | le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), | 1699 | le32_to_cpu(np->tx_ring.ex[i+3].buflow), |
1663 | le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); | 1700 | le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); |
1664 | } | 1701 | } |
1665 | } | 1702 | } |
1666 | } | 1703 | } |
@@ -1697,7 +1734,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen) | |||
1697 | int protolen; /* length as stored in the proto field */ | 1734 | int protolen; /* length as stored in the proto field */ |
1698 | 1735 | ||
1699 | /* 1) calculate len according to header */ | 1736 | /* 1) calculate len according to header */ |
1700 | if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { | 1737 | if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { |
1701 | protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); | 1738 | protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); |
1702 | hdrlen = VLAN_HLEN; | 1739 | hdrlen = VLAN_HLEN; |
1703 | } else { | 1740 | } else { |
@@ -1740,13 +1777,14 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen) | |||
1740 | } | 1777 | } |
1741 | } | 1778 | } |
1742 | 1779 | ||
1743 | static void nv_rx_process(struct net_device *dev) | 1780 | static int nv_rx_process(struct net_device *dev, int limit) |
1744 | { | 1781 | { |
1745 | struct fe_priv *np = netdev_priv(dev); | 1782 | struct fe_priv *np = netdev_priv(dev); |
1746 | u32 Flags; | 1783 | u32 flags; |
1747 | u32 vlanflags = 0; | 1784 | u32 vlanflags = 0; |
1785 | int count; | ||
1748 | 1786 | ||
1749 | for (;;) { | 1787 | for (count = 0; count < limit; ++count) { |
1750 | struct sk_buff *skb; | 1788 | struct sk_buff *skb; |
1751 | int len; | 1789 | int len; |
1752 | int i; | 1790 | int i; |
@@ -1755,18 +1793,18 @@ static void nv_rx_process(struct net_device *dev) | |||
1755 | 1793 | ||
1756 | i = np->cur_rx % np->rx_ring_size; | 1794 | i = np->cur_rx % np->rx_ring_size; |
1757 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1795 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1758 | Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); | 1796 | flags = le32_to_cpu(np->rx_ring.orig[i].flaglen); |
1759 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); | 1797 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); |
1760 | } else { | 1798 | } else { |
1761 | Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); | 1799 | flags = le32_to_cpu(np->rx_ring.ex[i].flaglen); |
1762 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); | 1800 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); |
1763 | vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); | 1801 | vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow); |
1764 | } | 1802 | } |
1765 | 1803 | ||
1766 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", | 1804 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n", |
1767 | dev->name, np->cur_rx, Flags); | 1805 | dev->name, np->cur_rx, flags); |
1768 | 1806 | ||
1769 | if (Flags & NV_RX_AVAIL) | 1807 | if (flags & NV_RX_AVAIL) |
1770 | break; /* still owned by hardware, */ | 1808 | break; /* still owned by hardware, */ |
1771 | 1809 | ||
1772 | /* | 1810 | /* |
@@ -1780,7 +1818,7 @@ static void nv_rx_process(struct net_device *dev) | |||
1780 | 1818 | ||
1781 | { | 1819 | { |
1782 | int j; | 1820 | int j; |
1783 | dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); | 1821 | dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); |
1784 | for (j=0; j<64; j++) { | 1822 | for (j=0; j<64; j++) { |
1785 | if ((j%16) == 0) | 1823 | if ((j%16) == 0) |
1786 | dprintk("\n%03x:", j); | 1824 | dprintk("\n%03x:", j); |
@@ -1790,30 +1828,30 @@ static void nv_rx_process(struct net_device *dev) | |||
1790 | } | 1828 | } |
1791 | /* look at what we actually got: */ | 1829 | /* look at what we actually got: */ |
1792 | if (np->desc_ver == DESC_VER_1) { | 1830 | if (np->desc_ver == DESC_VER_1) { |
1793 | if (!(Flags & NV_RX_DESCRIPTORVALID)) | 1831 | if (!(flags & NV_RX_DESCRIPTORVALID)) |
1794 | goto next_pkt; | 1832 | goto next_pkt; |
1795 | 1833 | ||
1796 | if (Flags & NV_RX_ERROR) { | 1834 | if (flags & NV_RX_ERROR) { |
1797 | if (Flags & NV_RX_MISSEDFRAME) { | 1835 | if (flags & NV_RX_MISSEDFRAME) { |
1798 | np->stats.rx_missed_errors++; | 1836 | np->stats.rx_missed_errors++; |
1799 | np->stats.rx_errors++; | 1837 | np->stats.rx_errors++; |
1800 | goto next_pkt; | 1838 | goto next_pkt; |
1801 | } | 1839 | } |
1802 | if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { | 1840 | if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { |
1803 | np->stats.rx_errors++; | 1841 | np->stats.rx_errors++; |
1804 | goto next_pkt; | 1842 | goto next_pkt; |
1805 | } | 1843 | } |
1806 | if (Flags & NV_RX_CRCERR) { | 1844 | if (flags & NV_RX_CRCERR) { |
1807 | np->stats.rx_crc_errors++; | 1845 | np->stats.rx_crc_errors++; |
1808 | np->stats.rx_errors++; | 1846 | np->stats.rx_errors++; |
1809 | goto next_pkt; | 1847 | goto next_pkt; |
1810 | } | 1848 | } |
1811 | if (Flags & NV_RX_OVERFLOW) { | 1849 | if (flags & NV_RX_OVERFLOW) { |
1812 | np->stats.rx_over_errors++; | 1850 | np->stats.rx_over_errors++; |
1813 | np->stats.rx_errors++; | 1851 | np->stats.rx_errors++; |
1814 | goto next_pkt; | 1852 | goto next_pkt; |
1815 | } | 1853 | } |
1816 | if (Flags & NV_RX_ERROR4) { | 1854 | if (flags & NV_RX_ERROR4) { |
1817 | len = nv_getlen(dev, np->rx_skbuff[i]->data, len); | 1855 | len = nv_getlen(dev, np->rx_skbuff[i]->data, len); |
1818 | if (len < 0) { | 1856 | if (len < 0) { |
1819 | np->stats.rx_errors++; | 1857 | np->stats.rx_errors++; |
@@ -1821,32 +1859,32 @@ static void nv_rx_process(struct net_device *dev) | |||
1821 | } | 1859 | } |
1822 | } | 1860 | } |
1823 | /* framing errors are soft errors. */ | 1861 | /* framing errors are soft errors. */ |
1824 | if (Flags & NV_RX_FRAMINGERR) { | 1862 | if (flags & NV_RX_FRAMINGERR) { |
1825 | if (Flags & NV_RX_SUBSTRACT1) { | 1863 | if (flags & NV_RX_SUBSTRACT1) { |
1826 | len--; | 1864 | len--; |
1827 | } | 1865 | } |
1828 | } | 1866 | } |
1829 | } | 1867 | } |
1830 | } else { | 1868 | } else { |
1831 | if (!(Flags & NV_RX2_DESCRIPTORVALID)) | 1869 | if (!(flags & NV_RX2_DESCRIPTORVALID)) |
1832 | goto next_pkt; | 1870 | goto next_pkt; |
1833 | 1871 | ||
1834 | if (Flags & NV_RX2_ERROR) { | 1872 | if (flags & NV_RX2_ERROR) { |
1835 | if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { | 1873 | if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { |
1836 | np->stats.rx_errors++; | 1874 | np->stats.rx_errors++; |
1837 | goto next_pkt; | 1875 | goto next_pkt; |
1838 | } | 1876 | } |
1839 | if (Flags & NV_RX2_CRCERR) { | 1877 | if (flags & NV_RX2_CRCERR) { |
1840 | np->stats.rx_crc_errors++; | 1878 | np->stats.rx_crc_errors++; |
1841 | np->stats.rx_errors++; | 1879 | np->stats.rx_errors++; |
1842 | goto next_pkt; | 1880 | goto next_pkt; |
1843 | } | 1881 | } |
1844 | if (Flags & NV_RX2_OVERFLOW) { | 1882 | if (flags & NV_RX2_OVERFLOW) { |
1845 | np->stats.rx_over_errors++; | 1883 | np->stats.rx_over_errors++; |
1846 | np->stats.rx_errors++; | 1884 | np->stats.rx_errors++; |
1847 | goto next_pkt; | 1885 | goto next_pkt; |
1848 | } | 1886 | } |
1849 | if (Flags & NV_RX2_ERROR4) { | 1887 | if (flags & NV_RX2_ERROR4) { |
1850 | len = nv_getlen(dev, np->rx_skbuff[i]->data, len); | 1888 | len = nv_getlen(dev, np->rx_skbuff[i]->data, len); |
1851 | if (len < 0) { | 1889 | if (len < 0) { |
1852 | np->stats.rx_errors++; | 1890 | np->stats.rx_errors++; |
@@ -1854,17 +1892,17 @@ static void nv_rx_process(struct net_device *dev) | |||
1854 | } | 1892 | } |
1855 | } | 1893 | } |
1856 | /* framing errors are soft errors */ | 1894 | /* framing errors are soft errors */ |
1857 | if (Flags & NV_RX2_FRAMINGERR) { | 1895 | if (flags & NV_RX2_FRAMINGERR) { |
1858 | if (Flags & NV_RX2_SUBSTRACT1) { | 1896 | if (flags & NV_RX2_SUBSTRACT1) { |
1859 | len--; | 1897 | len--; |
1860 | } | 1898 | } |
1861 | } | 1899 | } |
1862 | } | 1900 | } |
1863 | if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { | 1901 | if (np->rx_csum) { |
1864 | Flags &= NV_RX2_CHECKSUMMASK; | 1902 | flags &= NV_RX2_CHECKSUMMASK; |
1865 | if (Flags == NV_RX2_CHECKSUMOK1 || | 1903 | if (flags == NV_RX2_CHECKSUMOK1 || |
1866 | Flags == NV_RX2_CHECKSUMOK2 || | 1904 | flags == NV_RX2_CHECKSUMOK2 || |
1867 | Flags == NV_RX2_CHECKSUMOK3) { | 1905 | flags == NV_RX2_CHECKSUMOK3) { |
1868 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); | 1906 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); |
1869 | np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; | 1907 | np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; |
1870 | } else { | 1908 | } else { |
@@ -1880,17 +1918,27 @@ static void nv_rx_process(struct net_device *dev) | |||
1880 | skb->protocol = eth_type_trans(skb, dev); | 1918 | skb->protocol = eth_type_trans(skb, dev); |
1881 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", | 1919 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", |
1882 | dev->name, np->cur_rx, len, skb->protocol); | 1920 | dev->name, np->cur_rx, len, skb->protocol); |
1883 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { | 1921 | #ifdef CONFIG_FORCEDETH_NAPI |
1884 | vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); | 1922 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) |
1885 | } else { | 1923 | vlan_hwaccel_receive_skb(skb, np->vlangrp, |
1924 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
1925 | else | ||
1926 | netif_receive_skb(skb); | ||
1927 | #else | ||
1928 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) | ||
1929 | vlan_hwaccel_rx(skb, np->vlangrp, | ||
1930 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
1931 | else | ||
1886 | netif_rx(skb); | 1932 | netif_rx(skb); |
1887 | } | 1933 | #endif |
1888 | dev->last_rx = jiffies; | 1934 | dev->last_rx = jiffies; |
1889 | np->stats.rx_packets++; | 1935 | np->stats.rx_packets++; |
1890 | np->stats.rx_bytes += len; | 1936 | np->stats.rx_bytes += len; |
1891 | next_pkt: | 1937 | next_pkt: |
1892 | np->cur_rx++; | 1938 | np->cur_rx++; |
1893 | } | 1939 | } |
1940 | |||
1941 | return count; | ||
1894 | } | 1942 | } |
1895 | 1943 | ||
1896 | static void set_bufsize(struct net_device *dev) | 1944 | static void set_bufsize(struct net_device *dev) |
@@ -1990,7 +2038,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr) | |||
1990 | struct fe_priv *np = netdev_priv(dev); | 2038 | struct fe_priv *np = netdev_priv(dev); |
1991 | struct sockaddr *macaddr = (struct sockaddr*)addr; | 2039 | struct sockaddr *macaddr = (struct sockaddr*)addr; |
1992 | 2040 | ||
1993 | if(!is_valid_ether_addr(macaddr->sa_data)) | 2041 | if (!is_valid_ether_addr(macaddr->sa_data)) |
1994 | return -EADDRNOTAVAIL; | 2042 | return -EADDRNOTAVAIL; |
1995 | 2043 | ||
1996 | /* synchronized against open : rtnl_lock() held by caller */ | 2044 | /* synchronized against open : rtnl_lock() held by caller */ |
@@ -2032,7 +2080,6 @@ static void nv_set_multicast(struct net_device *dev) | |||
2032 | memset(mask, 0, sizeof(mask)); | 2080 | memset(mask, 0, sizeof(mask)); |
2033 | 2081 | ||
2034 | if (dev->flags & IFF_PROMISC) { | 2082 | if (dev->flags & IFF_PROMISC) { |
2035 | printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); | ||
2036 | pff |= NVREG_PFF_PROMISC; | 2083 | pff |= NVREG_PFF_PROMISC; |
2037 | } else { | 2084 | } else { |
2038 | pff |= NVREG_PFF_MYADDR; | 2085 | pff |= NVREG_PFF_MYADDR; |
@@ -2283,20 +2330,20 @@ set_speed: | |||
2283 | lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); | 2330 | lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); |
2284 | 2331 | ||
2285 | switch (adv_pause) { | 2332 | switch (adv_pause) { |
2286 | case (ADVERTISE_PAUSE_CAP): | 2333 | case ADVERTISE_PAUSE_CAP: |
2287 | if (lpa_pause & LPA_PAUSE_CAP) { | 2334 | if (lpa_pause & LPA_PAUSE_CAP) { |
2288 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | 2335 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; |
2289 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | 2336 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) |
2290 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | 2337 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; |
2291 | } | 2338 | } |
2292 | break; | 2339 | break; |
2293 | case (ADVERTISE_PAUSE_ASYM): | 2340 | case ADVERTISE_PAUSE_ASYM: |
2294 | if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) | 2341 | if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) |
2295 | { | 2342 | { |
2296 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | 2343 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; |
2297 | } | 2344 | } |
2298 | break; | 2345 | break; |
2299 | case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): | 2346 | case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: |
2300 | if (lpa_pause & LPA_PAUSE_CAP) | 2347 | if (lpa_pause & LPA_PAUSE_CAP) |
2301 | { | 2348 | { |
2302 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | 2349 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; |
@@ -2376,14 +2423,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
2376 | nv_tx_done(dev); | 2423 | nv_tx_done(dev); |
2377 | spin_unlock(&np->lock); | 2424 | spin_unlock(&np->lock); |
2378 | 2425 | ||
2379 | nv_rx_process(dev); | ||
2380 | if (nv_alloc_rx(dev)) { | ||
2381 | spin_lock(&np->lock); | ||
2382 | if (!np->in_shutdown) | ||
2383 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2384 | spin_unlock(&np->lock); | ||
2385 | } | ||
2386 | |||
2387 | if (events & NVREG_IRQ_LINK) { | 2426 | if (events & NVREG_IRQ_LINK) { |
2388 | spin_lock(&np->lock); | 2427 | spin_lock(&np->lock); |
2389 | nv_link_irq(dev); | 2428 | nv_link_irq(dev); |
@@ -2403,6 +2442,29 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
2403 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | 2442 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", |
2404 | dev->name, events); | 2443 | dev->name, events); |
2405 | } | 2444 | } |
2445 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2446 | if (events & NVREG_IRQ_RX_ALL) { | ||
2447 | netif_rx_schedule(dev); | ||
2448 | |||
2449 | /* Disable furthur receive irq's */ | ||
2450 | spin_lock(&np->lock); | ||
2451 | np->irqmask &= ~NVREG_IRQ_RX_ALL; | ||
2452 | |||
2453 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
2454 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | ||
2455 | else | ||
2456 | writel(np->irqmask, base + NvRegIrqMask); | ||
2457 | spin_unlock(&np->lock); | ||
2458 | } | ||
2459 | #else | ||
2460 | nv_rx_process(dev, dev->weight); | ||
2461 | if (nv_alloc_rx(dev)) { | ||
2462 | spin_lock(&np->lock); | ||
2463 | if (!np->in_shutdown) | ||
2464 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2465 | spin_unlock(&np->lock); | ||
2466 | } | ||
2467 | #endif | ||
2406 | if (i > max_interrupt_work) { | 2468 | if (i > max_interrupt_work) { |
2407 | spin_lock(&np->lock); | 2469 | spin_lock(&np->lock); |
2408 | /* disable interrupts on the nic */ | 2470 | /* disable interrupts on the nic */ |
@@ -2474,6 +2536,63 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |||
2474 | return IRQ_RETVAL(i); | 2536 | return IRQ_RETVAL(i); |
2475 | } | 2537 | } |
2476 | 2538 | ||
2539 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2540 | static int nv_napi_poll(struct net_device *dev, int *budget) | ||
2541 | { | ||
2542 | int pkts, limit = min(*budget, dev->quota); | ||
2543 | struct fe_priv *np = netdev_priv(dev); | ||
2544 | u8 __iomem *base = get_hwbase(dev); | ||
2545 | |||
2546 | pkts = nv_rx_process(dev, limit); | ||
2547 | |||
2548 | if (nv_alloc_rx(dev)) { | ||
2549 | spin_lock_irq(&np->lock); | ||
2550 | if (!np->in_shutdown) | ||
2551 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
2552 | spin_unlock_irq(&np->lock); | ||
2553 | } | ||
2554 | |||
2555 | if (pkts < limit) { | ||
2556 | /* all done, no more packets present */ | ||
2557 | netif_rx_complete(dev); | ||
2558 | |||
2559 | /* re-enable receive interrupts */ | ||
2560 | spin_lock_irq(&np->lock); | ||
2561 | np->irqmask |= NVREG_IRQ_RX_ALL; | ||
2562 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
2563 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | ||
2564 | else | ||
2565 | writel(np->irqmask, base + NvRegIrqMask); | ||
2566 | spin_unlock_irq(&np->lock); | ||
2567 | return 0; | ||
2568 | } else { | ||
2569 | /* used up our quantum, so reschedule */ | ||
2570 | dev->quota -= pkts; | ||
2571 | *budget -= pkts; | ||
2572 | return 1; | ||
2573 | } | ||
2574 | } | ||
2575 | #endif | ||
2576 | |||
2577 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2578 | static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | ||
2579 | { | ||
2580 | struct net_device *dev = (struct net_device *) data; | ||
2581 | u8 __iomem *base = get_hwbase(dev); | ||
2582 | u32 events; | ||
2583 | |||
2584 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | ||
2585 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | ||
2586 | |||
2587 | if (events) { | ||
2588 | netif_rx_schedule(dev); | ||
2589 | /* disable receive interrupts on the nic */ | ||
2590 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | ||
2591 | pci_push(base); | ||
2592 | } | ||
2593 | return IRQ_HANDLED; | ||
2594 | } | ||
2595 | #else | ||
2477 | static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | 2596 | static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) |
2478 | { | 2597 | { |
2479 | struct net_device *dev = (struct net_device *) data; | 2598 | struct net_device *dev = (struct net_device *) data; |
@@ -2492,7 +2611,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
2492 | if (!(events & np->irqmask)) | 2611 | if (!(events & np->irqmask)) |
2493 | break; | 2612 | break; |
2494 | 2613 | ||
2495 | nv_rx_process(dev); | 2614 | nv_rx_process(dev, dev->weight); |
2496 | if (nv_alloc_rx(dev)) { | 2615 | if (nv_alloc_rx(dev)) { |
2497 | spin_lock_irq(&np->lock); | 2616 | spin_lock_irq(&np->lock); |
2498 | if (!np->in_shutdown) | 2617 | if (!np->in_shutdown) |
@@ -2514,12 +2633,12 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
2514 | spin_unlock_irq(&np->lock); | 2633 | spin_unlock_irq(&np->lock); |
2515 | break; | 2634 | break; |
2516 | } | 2635 | } |
2517 | |||
2518 | } | 2636 | } |
2519 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); | 2637 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); |
2520 | 2638 | ||
2521 | return IRQ_RETVAL(i); | 2639 | return IRQ_RETVAL(i); |
2522 | } | 2640 | } |
2641 | #endif | ||
2523 | 2642 | ||
2524 | static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | 2643 | static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) |
2525 | { | 2644 | { |
@@ -3057,9 +3176,18 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
3057 | if (netif_running(dev)) | 3176 | if (netif_running(dev)) |
3058 | printk(KERN_INFO "%s: link down.\n", dev->name); | 3177 | printk(KERN_INFO "%s: link down.\n", dev->name); |
3059 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | 3178 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
3060 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | 3179 | if (np->phy_model == PHY_MODEL_MARVELL_E3016) { |
3061 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | 3180 | bmcr |= BMCR_ANENABLE; |
3062 | 3181 | /* reset the phy in order for settings to stick, | |
3182 | * and cause autoneg to start */ | ||
3183 | if (phy_reset(dev, bmcr)) { | ||
3184 | printk(KERN_INFO "%s: phy reset failed\n", dev->name); | ||
3185 | return -EINVAL; | ||
3186 | } | ||
3187 | } else { | ||
3188 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | ||
3189 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | ||
3190 | } | ||
3063 | } else { | 3191 | } else { |
3064 | int adv, bmcr; | 3192 | int adv, bmcr; |
3065 | 3193 | ||
@@ -3099,17 +3227,19 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
3099 | bmcr |= BMCR_FULLDPLX; | 3227 | bmcr |= BMCR_FULLDPLX; |
3100 | if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) | 3228 | if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) |
3101 | bmcr |= BMCR_SPEED100; | 3229 | bmcr |= BMCR_SPEED100; |
3102 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | ||
3103 | if (np->phy_oui == PHY_OUI_MARVELL) { | 3230 | if (np->phy_oui == PHY_OUI_MARVELL) { |
3104 | /* reset the phy */ | 3231 | /* reset the phy in order for forced mode settings to stick */ |
3105 | if (phy_reset(dev)) { | 3232 | if (phy_reset(dev, bmcr)) { |
3106 | printk(KERN_INFO "%s: phy reset failed\n", dev->name); | 3233 | printk(KERN_INFO "%s: phy reset failed\n", dev->name); |
3107 | return -EINVAL; | 3234 | return -EINVAL; |
3108 | } | 3235 | } |
3109 | } else if (netif_running(dev)) { | 3236 | } else { |
3110 | /* Wait a bit and then reconfigure the nic. */ | 3237 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); |
3111 | udelay(10); | 3238 | if (netif_running(dev)) { |
3112 | nv_linkchange(dev); | 3239 | /* Wait a bit and then reconfigure the nic. */ |
3240 | udelay(10); | ||
3241 | nv_linkchange(dev); | ||
3242 | } | ||
3113 | } | 3243 | } |
3114 | } | 3244 | } |
3115 | 3245 | ||
@@ -3166,8 +3296,17 @@ static int nv_nway_reset(struct net_device *dev) | |||
3166 | } | 3296 | } |
3167 | 3297 | ||
3168 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | 3298 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
3169 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | 3299 | if (np->phy_model == PHY_MODEL_MARVELL_E3016) { |
3170 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | 3300 | bmcr |= BMCR_ANENABLE; |
3301 | /* reset the phy in order for settings to stick*/ | ||
3302 | if (phy_reset(dev, bmcr)) { | ||
3303 | printk(KERN_INFO "%s: phy reset failed\n", dev->name); | ||
3304 | return -EINVAL; | ||
3305 | } | ||
3306 | } else { | ||
3307 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | ||
3308 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | ||
3309 | } | ||
3171 | 3310 | ||
3172 | if (netif_running(dev)) { | 3311 | if (netif_running(dev)) { |
3173 | nv_start_rx(dev); | 3312 | nv_start_rx(dev); |
@@ -3245,7 +3384,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri | |||
3245 | if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { | 3384 | if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { |
3246 | /* fall back to old rings */ | 3385 | /* fall back to old rings */ |
3247 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 3386 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
3248 | if(rxtx_ring) | 3387 | if (rxtx_ring) |
3249 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | 3388 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), |
3250 | rxtx_ring, ring_addr); | 3389 | rxtx_ring, ring_addr); |
3251 | } else { | 3390 | } else { |
@@ -3418,7 +3557,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* | |||
3418 | static u32 nv_get_rx_csum(struct net_device *dev) | 3557 | static u32 nv_get_rx_csum(struct net_device *dev) |
3419 | { | 3558 | { |
3420 | struct fe_priv *np = netdev_priv(dev); | 3559 | struct fe_priv *np = netdev_priv(dev); |
3421 | return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0; | 3560 | return (np->rx_csum) != 0; |
3422 | } | 3561 | } |
3423 | 3562 | ||
3424 | static int nv_set_rx_csum(struct net_device *dev, u32 data) | 3563 | static int nv_set_rx_csum(struct net_device *dev, u32 data) |
@@ -3428,22 +3567,15 @@ static int nv_set_rx_csum(struct net_device *dev, u32 data) | |||
3428 | int retcode = 0; | 3567 | int retcode = 0; |
3429 | 3568 | ||
3430 | if (np->driver_data & DEV_HAS_CHECKSUM) { | 3569 | if (np->driver_data & DEV_HAS_CHECKSUM) { |
3431 | |||
3432 | if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) || | ||
3433 | (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) { | ||
3434 | /* already set or unset */ | ||
3435 | return 0; | ||
3436 | } | ||
3437 | |||
3438 | if (data) { | 3570 | if (data) { |
3571 | np->rx_csum = 1; | ||
3439 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; | 3572 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
3440 | } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) { | ||
3441 | np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; | ||
3442 | } else { | 3573 | } else { |
3443 | printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n"); | 3574 | np->rx_csum = 0; |
3444 | return -EINVAL; | 3575 | /* vlan is dependent on rx checksum offload */ |
3576 | if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) | ||
3577 | np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; | ||
3445 | } | 3578 | } |
3446 | |||
3447 | if (netif_running(dev)) { | 3579 | if (netif_running(dev)) { |
3448 | spin_lock_irq(&np->lock); | 3580 | spin_lock_irq(&np->lock); |
3449 | writel(np->txrxctl_bits, base + NvRegTxRxControl); | 3581 | writel(np->txrxctl_bits, base + NvRegTxRxControl); |
@@ -3481,7 +3613,7 @@ static int nv_get_stats_count(struct net_device *dev) | |||
3481 | struct fe_priv *np = netdev_priv(dev); | 3613 | struct fe_priv *np = netdev_priv(dev); |
3482 | 3614 | ||
3483 | if (np->driver_data & DEV_HAS_STATISTICS) | 3615 | if (np->driver_data & DEV_HAS_STATISTICS) |
3484 | return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); | 3616 | return sizeof(struct nv_ethtool_stats)/sizeof(u64); |
3485 | else | 3617 | else |
3486 | return 0; | 3618 | return 0; |
3487 | } | 3619 | } |
@@ -3619,7 +3751,7 @@ static int nv_loopback_test(struct net_device *dev) | |||
3619 | struct sk_buff *tx_skb, *rx_skb; | 3751 | struct sk_buff *tx_skb, *rx_skb; |
3620 | dma_addr_t test_dma_addr; | 3752 | dma_addr_t test_dma_addr; |
3621 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); | 3753 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); |
3622 | u32 Flags; | 3754 | u32 flags; |
3623 | int len, i, pkt_len; | 3755 | int len, i, pkt_len; |
3624 | u8 *pkt_data; | 3756 | u8 *pkt_data; |
3625 | u32 filter_flags = 0; | 3757 | u32 filter_flags = 0; |
@@ -3663,12 +3795,12 @@ static int nv_loopback_test(struct net_device *dev) | |||
3663 | tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); | 3795 | tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); |
3664 | 3796 | ||
3665 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 3797 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
3666 | np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); | 3798 | np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); |
3667 | np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | 3799 | np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); |
3668 | } else { | 3800 | } else { |
3669 | np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; | 3801 | np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32; |
3670 | np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; | 3802 | np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; |
3671 | np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | 3803 | np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); |
3672 | } | 3804 | } |
3673 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | 3805 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
3674 | pci_push(get_hwbase(dev)); | 3806 | pci_push(get_hwbase(dev)); |
@@ -3677,21 +3809,21 @@ static int nv_loopback_test(struct net_device *dev) | |||
3677 | 3809 | ||
3678 | /* check for rx of the packet */ | 3810 | /* check for rx of the packet */ |
3679 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 3811 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
3680 | Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); | 3812 | flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); |
3681 | len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); | 3813 | len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); |
3682 | 3814 | ||
3683 | } else { | 3815 | } else { |
3684 | Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); | 3816 | flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); |
3685 | len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); | 3817 | len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); |
3686 | } | 3818 | } |
3687 | 3819 | ||
3688 | if (Flags & NV_RX_AVAIL) { | 3820 | if (flags & NV_RX_AVAIL) { |
3689 | ret = 0; | 3821 | ret = 0; |
3690 | } else if (np->desc_ver == DESC_VER_1) { | 3822 | } else if (np->desc_ver == DESC_VER_1) { |
3691 | if (Flags & NV_RX_ERROR) | 3823 | if (flags & NV_RX_ERROR) |
3692 | ret = 0; | 3824 | ret = 0; |
3693 | } else { | 3825 | } else { |
3694 | if (Flags & NV_RX2_ERROR) { | 3826 | if (flags & NV_RX2_ERROR) { |
3695 | ret = 0; | 3827 | ret = 0; |
3696 | } | 3828 | } |
3697 | } | 3829 | } |
@@ -3753,6 +3885,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 | |||
3753 | if (test->flags & ETH_TEST_FL_OFFLINE) { | 3885 | if (test->flags & ETH_TEST_FL_OFFLINE) { |
3754 | if (netif_running(dev)) { | 3886 | if (netif_running(dev)) { |
3755 | netif_stop_queue(dev); | 3887 | netif_stop_queue(dev); |
3888 | netif_poll_disable(dev); | ||
3756 | netif_tx_lock_bh(dev); | 3889 | netif_tx_lock_bh(dev); |
3757 | spin_lock_irq(&np->lock); | 3890 | spin_lock_irq(&np->lock); |
3758 | nv_disable_hw_interrupts(dev, np->irqmask); | 3891 | nv_disable_hw_interrupts(dev, np->irqmask); |
@@ -3811,6 +3944,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 | |||
3811 | nv_start_rx(dev); | 3944 | nv_start_rx(dev); |
3812 | nv_start_tx(dev); | 3945 | nv_start_tx(dev); |
3813 | netif_start_queue(dev); | 3946 | netif_start_queue(dev); |
3947 | netif_poll_enable(dev); | ||
3814 | nv_enable_hw_interrupts(dev, np->irqmask); | 3948 | nv_enable_hw_interrupts(dev, np->irqmask); |
3815 | } | 3949 | } |
3816 | } | 3950 | } |
@@ -3895,10 +4029,9 @@ static int nv_open(struct net_device *dev) | |||
3895 | 4029 | ||
3896 | dprintk(KERN_DEBUG "nv_open: begin\n"); | 4030 | dprintk(KERN_DEBUG "nv_open: begin\n"); |
3897 | 4031 | ||
3898 | /* 1) erase previous misconfiguration */ | 4032 | /* erase previous misconfiguration */ |
3899 | if (np->driver_data & DEV_HAS_POWER_CNTRL) | 4033 | if (np->driver_data & DEV_HAS_POWER_CNTRL) |
3900 | nv_mac_reset(dev); | 4034 | nv_mac_reset(dev); |
3901 | /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ | ||
3902 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | 4035 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); |
3903 | writel(0, base + NvRegMulticastAddrB); | 4036 | writel(0, base + NvRegMulticastAddrB); |
3904 | writel(0, base + NvRegMulticastMaskA); | 4037 | writel(0, base + NvRegMulticastMaskA); |
@@ -3913,26 +4046,22 @@ static int nv_open(struct net_device *dev) | |||
3913 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) | 4046 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) |
3914 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | 4047 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); |
3915 | 4048 | ||
3916 | /* 2) initialize descriptor rings */ | 4049 | /* initialize descriptor rings */ |
3917 | set_bufsize(dev); | 4050 | set_bufsize(dev); |
3918 | oom = nv_init_ring(dev); | 4051 | oom = nv_init_ring(dev); |
3919 | 4052 | ||
3920 | writel(0, base + NvRegLinkSpeed); | 4053 | writel(0, base + NvRegLinkSpeed); |
3921 | writel(0, base + NvRegUnknownTransmitterReg); | 4054 | writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); |
3922 | nv_txrx_reset(dev); | 4055 | nv_txrx_reset(dev); |
3923 | writel(0, base + NvRegUnknownSetupReg6); | 4056 | writel(0, base + NvRegUnknownSetupReg6); |
3924 | 4057 | ||
3925 | np->in_shutdown = 0; | 4058 | np->in_shutdown = 0; |
3926 | 4059 | ||
3927 | /* 3) set mac address */ | 4060 | /* give hw rings */ |
3928 | nv_copy_mac_to_hw(dev); | ||
3929 | |||
3930 | /* 4) give hw rings */ | ||
3931 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | 4061 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
3932 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | 4062 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
3933 | base + NvRegRingSizes); | 4063 | base + NvRegRingSizes); |
3934 | 4064 | ||
3935 | /* 5) continue setup */ | ||
3936 | writel(np->linkspeed, base + NvRegLinkSpeed); | 4065 | writel(np->linkspeed, base + NvRegLinkSpeed); |
3937 | if (np->desc_ver == DESC_VER_1) | 4066 | if (np->desc_ver == DESC_VER_1) |
3938 | writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); | 4067 | writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); |
@@ -3950,7 +4079,6 @@ static int nv_open(struct net_device *dev) | |||
3950 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 4079 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
3951 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | 4080 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); |
3952 | 4081 | ||
3953 | /* 6) continue setup */ | ||
3954 | writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); | 4082 | writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); |
3955 | writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); | 4083 | writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); |
3956 | writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); | 4084 | writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); |
@@ -4020,6 +4148,8 @@ static int nv_open(struct net_device *dev) | |||
4020 | nv_start_rx(dev); | 4148 | nv_start_rx(dev); |
4021 | nv_start_tx(dev); | 4149 | nv_start_tx(dev); |
4022 | netif_start_queue(dev); | 4150 | netif_start_queue(dev); |
4151 | netif_poll_enable(dev); | ||
4152 | |||
4023 | if (ret) { | 4153 | if (ret) { |
4024 | netif_carrier_on(dev); | 4154 | netif_carrier_on(dev); |
4025 | } else { | 4155 | } else { |
@@ -4049,6 +4179,7 @@ static int nv_close(struct net_device *dev) | |||
4049 | spin_lock_irq(&np->lock); | 4179 | spin_lock_irq(&np->lock); |
4050 | np->in_shutdown = 1; | 4180 | np->in_shutdown = 1; |
4051 | spin_unlock_irq(&np->lock); | 4181 | spin_unlock_irq(&np->lock); |
4182 | netif_poll_disable(dev); | ||
4052 | synchronize_irq(dev->irq); | 4183 | synchronize_irq(dev->irq); |
4053 | 4184 | ||
4054 | del_timer_sync(&np->oom_kick); | 4185 | del_timer_sync(&np->oom_kick); |
@@ -4076,12 +4207,6 @@ static int nv_close(struct net_device *dev) | |||
4076 | if (np->wolenabled) | 4207 | if (np->wolenabled) |
4077 | nv_start_rx(dev); | 4208 | nv_start_rx(dev); |
4078 | 4209 | ||
4079 | /* special op: write back the misordered MAC address - otherwise | ||
4080 | * the next nv_probe would see a wrong address. | ||
4081 | */ | ||
4082 | writel(np->orig_mac[0], base + NvRegMacAddrA); | ||
4083 | writel(np->orig_mac[1], base + NvRegMacAddrB); | ||
4084 | |||
4085 | /* FIXME: power down nic */ | 4210 | /* FIXME: power down nic */ |
4086 | 4211 | ||
4087 | return 0; | 4212 | return 0; |
@@ -4094,7 +4219,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4094 | unsigned long addr; | 4219 | unsigned long addr; |
4095 | u8 __iomem *base; | 4220 | u8 __iomem *base; |
4096 | int err, i; | 4221 | int err, i; |
4097 | u32 powerstate; | 4222 | u32 powerstate, txreg; |
4098 | 4223 | ||
4099 | dev = alloc_etherdev(sizeof(struct fe_priv)); | 4224 | dev = alloc_etherdev(sizeof(struct fe_priv)); |
4100 | err = -ENOMEM; | 4225 | err = -ENOMEM; |
@@ -4190,6 +4315,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4190 | np->pkt_limit = NV_PKTLIMIT_2; | 4315 | np->pkt_limit = NV_PKTLIMIT_2; |
4191 | 4316 | ||
4192 | if (id->driver_data & DEV_HAS_CHECKSUM) { | 4317 | if (id->driver_data & DEV_HAS_CHECKSUM) { |
4318 | np->rx_csum = 1; | ||
4193 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; | 4319 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; |
4194 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; | 4320 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
4195 | #ifdef NETIF_F_TSO | 4321 | #ifdef NETIF_F_TSO |
@@ -4270,6 +4396,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4270 | #ifdef CONFIG_NET_POLL_CONTROLLER | 4396 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4271 | dev->poll_controller = nv_poll_controller; | 4397 | dev->poll_controller = nv_poll_controller; |
4272 | #endif | 4398 | #endif |
4399 | dev->weight = 64; | ||
4400 | #ifdef CONFIG_FORCEDETH_NAPI | ||
4401 | dev->poll = nv_napi_poll; | ||
4402 | #endif | ||
4273 | SET_ETHTOOL_OPS(dev, &ops); | 4403 | SET_ETHTOOL_OPS(dev, &ops); |
4274 | dev->tx_timeout = nv_tx_timeout; | 4404 | dev->tx_timeout = nv_tx_timeout; |
4275 | dev->watchdog_timeo = NV_WATCHDOG_TIMEO; | 4405 | dev->watchdog_timeo = NV_WATCHDOG_TIMEO; |
@@ -4281,12 +4411,30 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4281 | np->orig_mac[0] = readl(base + NvRegMacAddrA); | 4411 | np->orig_mac[0] = readl(base + NvRegMacAddrA); |
4282 | np->orig_mac[1] = readl(base + NvRegMacAddrB); | 4412 | np->orig_mac[1] = readl(base + NvRegMacAddrB); |
4283 | 4413 | ||
4284 | dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; | 4414 | /* check the workaround bit for correct mac address order */ |
4285 | dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; | 4415 | txreg = readl(base + NvRegTransmitPoll); |
4286 | dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; | 4416 | if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { |
4287 | dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; | 4417 | /* mac address is already in correct order */ |
4288 | dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; | 4418 | dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; |
4289 | dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; | 4419 | dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; |
4420 | dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; | ||
4421 | dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; | ||
4422 | dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; | ||
4423 | dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; | ||
4424 | } else { | ||
4425 | /* need to reverse mac address to correct order */ | ||
4426 | dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; | ||
4427 | dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; | ||
4428 | dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; | ||
4429 | dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; | ||
4430 | dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; | ||
4431 | dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; | ||
4432 | /* set permanent address to be correct aswell */ | ||
4433 | np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + | ||
4434 | (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); | ||
4435 | np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); | ||
4436 | writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); | ||
4437 | } | ||
4290 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | 4438 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
4291 | 4439 | ||
4292 | if (!is_valid_ether_addr(dev->perm_addr)) { | 4440 | if (!is_valid_ether_addr(dev->perm_addr)) { |
@@ -4309,6 +4457,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4309 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | 4457 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], |
4310 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | 4458 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); |
4311 | 4459 | ||
4460 | /* set mac address */ | ||
4461 | nv_copy_mac_to_hw(dev); | ||
4462 | |||
4312 | /* disable WOL */ | 4463 | /* disable WOL */ |
4313 | writel(0, base + NvRegWakeUpFlags); | 4464 | writel(0, base + NvRegWakeUpFlags); |
4314 | np->wolenabled = 0; | 4465 | np->wolenabled = 0; |
@@ -4369,6 +4520,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
4369 | if (id2 < 0 || id2 == 0xffff) | 4520 | if (id2 < 0 || id2 == 0xffff) |
4370 | continue; | 4521 | continue; |
4371 | 4522 | ||
4523 | np->phy_model = id2 & PHYID2_MODEL_MASK; | ||
4372 | id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; | 4524 | id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; |
4373 | id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; | 4525 | id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; |
4374 | dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", | 4526 | dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", |
@@ -4421,9 +4573,17 @@ out: | |||
4421 | static void __devexit nv_remove(struct pci_dev *pci_dev) | 4573 | static void __devexit nv_remove(struct pci_dev *pci_dev) |
4422 | { | 4574 | { |
4423 | struct net_device *dev = pci_get_drvdata(pci_dev); | 4575 | struct net_device *dev = pci_get_drvdata(pci_dev); |
4576 | struct fe_priv *np = netdev_priv(dev); | ||
4577 | u8 __iomem *base = get_hwbase(dev); | ||
4424 | 4578 | ||
4425 | unregister_netdev(dev); | 4579 | unregister_netdev(dev); |
4426 | 4580 | ||
4581 | /* special op: write back the misordered MAC address - otherwise | ||
4582 | * the next nv_probe would see a wrong address. | ||
4583 | */ | ||
4584 | writel(np->orig_mac[0], base + NvRegMacAddrA); | ||
4585 | writel(np->orig_mac[1], base + NvRegMacAddrB); | ||
4586 | |||
4427 | /* free all structures */ | 4587 | /* free all structures */ |
4428 | free_rings(dev); | 4588 | free_rings(dev); |
4429 | iounmap(get_hwbase(dev)); | 4589 | iounmap(get_hwbase(dev)); |
@@ -4540,7 +4700,7 @@ static struct pci_driver driver = { | |||
4540 | static int __init init_nic(void) | 4700 | static int __init init_nic(void) |
4541 | { | 4701 | { |
4542 | printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); | 4702 | printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); |
4543 | return pci_module_init(&driver); | 4703 | return pci_register_driver(&driver); |
4544 | } | 4704 | } |
4545 | 4705 | ||
4546 | static void __exit exit_nic(void) | 4706 | static void __exit exit_nic(void) |