aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Makefile2
-rwxr-xr-xdrivers/net/amd8111e.c24
-rw-r--r--drivers/net/e100.c165
-rw-r--r--drivers/net/e1000/e1000.h37
-rw-r--r--drivers/net/e1000/e1000_ethtool.c105
-rw-r--r--drivers/net/e1000/e1000_hw.c1987
-rw-r--r--drivers/net/e1000/e1000_hw.h570
-rw-r--r--drivers/net/e1000/e1000_main.c1147
-rw-r--r--drivers/net/e1000/e1000_osdep.h32
-rw-r--r--drivers/net/e1000/e1000_param.c3
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c2
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c24
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c4
-rw-r--r--drivers/net/ixgb/ixgb_main.c153
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h3
-rw-r--r--drivers/net/pcnet32.c7
-rw-r--r--drivers/net/tg3.c488
-rw-r--r--drivers/net/tg3.h8
-rw-r--r--drivers/net/tulip/media.c1
-rw-r--r--drivers/net/tulip/tulip_core.c2
-rw-r--r--drivers/net/wireless/Kconfig2
22 files changed, 3777 insertions, 991 deletions
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 6202b10dbb4d..e038d55e4f6f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -187,7 +187,7 @@ obj-$(CONFIG_TR) += tokenring/
187obj-$(CONFIG_WAN) += wan/ 187obj-$(CONFIG_WAN) += wan/
188obj-$(CONFIG_ARCNET) += arcnet/ 188obj-$(CONFIG_ARCNET) += arcnet/
189obj-$(CONFIG_NET_PCMCIA) += pcmcia/ 189obj-$(CONFIG_NET_PCMCIA) += pcmcia/
190obj-$(CONFIG_NET_WIRELESS) += wireless/ 190obj-$(CONFIG_NET_RADIO) += wireless/
191obj-$(CONFIG_NET_TULIP) += tulip/ 191obj-$(CONFIG_NET_TULIP) += tulip/
192obj-$(CONFIG_HAMRADIO) += hamradio/ 192obj-$(CONFIG_HAMRADIO) += hamradio/
193obj-$(CONFIG_IRDA) += irda/ 193obj-$(CONFIG_IRDA) += irda/
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index f2e937abf7b4..b7dd7260cafb 100755
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -738,6 +738,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget)
738 short vtag; 738 short vtag;
739#endif 739#endif
740 int rx_pkt_limit = dev->quota; 740 int rx_pkt_limit = dev->quota;
741 unsigned long flags;
741 742
742 do{ 743 do{
743 /* process receive packets until we use the quota*/ 744 /* process receive packets until we use the quota*/
@@ -841,18 +842,19 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget)
841 /* Receive descriptor is empty now */ 842 /* Receive descriptor is empty now */
842 dev->quota -= num_rx_pkt; 843 dev->quota -= num_rx_pkt;
843 *budget -= num_rx_pkt; 844 *budget -= num_rx_pkt;
845
846 spin_lock_irqsave(&lp->lock, flags);
844 netif_rx_complete(dev); 847 netif_rx_complete(dev);
845 /* enable receive interrupt */
846 writel(VAL0|RINTEN0, mmio + INTEN0); 848 writel(VAL0|RINTEN0, mmio + INTEN0);
847 writel(VAL2 | RDMD0, mmio + CMD0); 849 writel(VAL2 | RDMD0, mmio + CMD0);
850 spin_unlock_irqrestore(&lp->lock, flags);
848 return 0; 851 return 0;
852
849rx_not_empty: 853rx_not_empty:
850 /* Do not call a netif_rx_complete */ 854 /* Do not call a netif_rx_complete */
851 dev->quota -= num_rx_pkt; 855 dev->quota -= num_rx_pkt;
852 *budget -= num_rx_pkt; 856 *budget -= num_rx_pkt;
853 return 1; 857 return 1;
854
855
856} 858}
857 859
858#else 860#else
@@ -1261,18 +1263,20 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
1261 struct net_device * dev = (struct net_device *) dev_id; 1263 struct net_device * dev = (struct net_device *) dev_id;
1262 struct amd8111e_priv *lp = netdev_priv(dev); 1264 struct amd8111e_priv *lp = netdev_priv(dev);
1263 void __iomem *mmio = lp->mmio; 1265 void __iomem *mmio = lp->mmio;
1264 unsigned int intr0; 1266 unsigned int intr0, intren0;
1265 unsigned int handled = 1; 1267 unsigned int handled = 1;
1266 1268
1267 if(dev == NULL) 1269 if(unlikely(dev == NULL))
1268 return IRQ_NONE; 1270 return IRQ_NONE;
1269 1271
1270 if (regs) spin_lock (&lp->lock); 1272 spin_lock(&lp->lock);
1273
1271 /* disabling interrupt */ 1274 /* disabling interrupt */
1272 writel(INTREN, mmio + CMD0); 1275 writel(INTREN, mmio + CMD0);
1273 1276
1274 /* Read interrupt status */ 1277 /* Read interrupt status */
1275 intr0 = readl(mmio + INT0); 1278 intr0 = readl(mmio + INT0);
1279 intren0 = readl(mmio + INTEN0);
1276 1280
1277 /* Process all the INT event until INTR bit is clear. */ 1281 /* Process all the INT event until INTR bit is clear. */
1278 1282
@@ -1293,11 +1297,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
1293 /* Schedule a polling routine */ 1297 /* Schedule a polling routine */
1294 __netif_rx_schedule(dev); 1298 __netif_rx_schedule(dev);
1295 } 1299 }
1296 else { 1300 else if (intren0 & RINTEN0) {
1297 printk("************Driver bug! \ 1301 printk("************Driver bug! \
1298 interrupt while in poll\n"); 1302 interrupt while in poll\n");
1299 /* Fix by disabling interrupts */ 1303 /* Fix by disable receive interrupts */
1300 writel(RINT0, mmio + INT0); 1304 writel(RINTEN0, mmio + INTEN0);
1301 } 1305 }
1302 } 1306 }
1303#else 1307#else
@@ -1321,7 +1325,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
1321err_no_interrupt: 1325err_no_interrupt:
1322 writel( VAL0 | INTREN,mmio + CMD0); 1326 writel( VAL0 | INTREN,mmio + CMD0);
1323 1327
1324 if (regs) spin_unlock(&lp->lock); 1328 spin_unlock(&lp->lock);
1325 1329
1326 return IRQ_RETVAL(handled); 1330 return IRQ_RETVAL(handled);
1327} 1331}
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 1b68dd5a49b6..4a47df5a9ff9 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -155,9 +155,9 @@
155 155
156#define DRV_NAME "e100" 156#define DRV_NAME "e100"
157#define DRV_EXT "-NAPI" 157#define DRV_EXT "-NAPI"
158#define DRV_VERSION "3.3.6-k2"DRV_EXT 158#define DRV_VERSION "3.4.8-k2"DRV_EXT
159#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 159#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
160#define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation" 160#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
161#define PFX DRV_NAME ": " 161#define PFX DRV_NAME ": "
162 162
163#define E100_WATCHDOG_PERIOD (2 * HZ) 163#define E100_WATCHDOG_PERIOD (2 * HZ)
@@ -210,11 +210,17 @@ static struct pci_device_id e100_id_table[] = {
210 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), 210 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
211 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), 211 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
212 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), 212 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
213 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
214 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
215 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
216 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
217 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
213 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), 218 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), 219 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
215 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), 220 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
216 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), 221 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
217 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), 222 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
223 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
218 { 0, } 224 { 0, }
219}; 225};
220MODULE_DEVICE_TABLE(pci, e100_id_table); 226MODULE_DEVICE_TABLE(pci, e100_id_table);
@@ -269,6 +275,12 @@ enum scb_status {
269 rus_mask = 0x3C, 275 rus_mask = 0x3C,
270}; 276};
271 277
278enum ru_state {
279 RU_SUSPENDED = 0,
280 RU_RUNNING = 1,
281 RU_UNINITIALIZED = -1,
282};
283
272enum scb_stat_ack { 284enum scb_stat_ack {
273 stat_ack_not_ours = 0x00, 285 stat_ack_not_ours = 0x00,
274 stat_ack_sw_gen = 0x04, 286 stat_ack_sw_gen = 0x04,
@@ -510,7 +522,7 @@ struct nic {
510 struct rx *rx_to_use; 522 struct rx *rx_to_use;
511 struct rx *rx_to_clean; 523 struct rx *rx_to_clean;
512 struct rfd blank_rfd; 524 struct rfd blank_rfd;
513 int ru_running; 525 enum ru_state ru_running;
514 526
515 spinlock_t cb_lock ____cacheline_aligned; 527 spinlock_t cb_lock ____cacheline_aligned;
516 spinlock_t cmd_lock; 528 spinlock_t cmd_lock;
@@ -539,6 +551,7 @@ struct nic {
539 struct timer_list watchdog; 551 struct timer_list watchdog;
540 struct timer_list blink_timer; 552 struct timer_list blink_timer;
541 struct mii_if_info mii; 553 struct mii_if_info mii;
554 struct work_struct tx_timeout_task;
542 enum loopback loopback; 555 enum loopback loopback;
543 556
544 struct mem *mem; 557 struct mem *mem;
@@ -770,7 +783,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
770 return 0; 783 return 0;
771} 784}
772 785
773#define E100_WAIT_SCB_TIMEOUT 40 786#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
774static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 787static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
775{ 788{
776 unsigned long flags; 789 unsigned long flags;
@@ -840,6 +853,10 @@ static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
840 * because the controller is too busy, so 853 * because the controller is too busy, so
841 * let's just queue the command and try again 854 * let's just queue the command and try again
842 * when another command is scheduled. */ 855 * when another command is scheduled. */
856 if(err == -ENOSPC) {
857 //request a reset
858 schedule_work(&nic->tx_timeout_task);
859 }
843 break; 860 break;
844 } else { 861 } else {
845 nic->cuc_cmd = cuc_resume; 862 nic->cuc_cmd = cuc_resume;
@@ -884,7 +901,7 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
884 901
885static void e100_get_defaults(struct nic *nic) 902static void e100_get_defaults(struct nic *nic)
886{ 903{
887 struct param_range rfds = { .min = 64, .max = 256, .count = 64 }; 904 struct param_range rfds = { .min = 16, .max = 256, .count = 64 };
888 struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; 905 struct param_range cbs = { .min = 64, .max = 256, .count = 64 };
889 906
890 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); 907 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
@@ -899,8 +916,9 @@ static void e100_get_defaults(struct nic *nic)
899 /* Quadwords to DMA into FIFO before starting frame transmit */ 916 /* Quadwords to DMA into FIFO before starting frame transmit */
900 nic->tx_threshold = 0xE0; 917 nic->tx_threshold = 0xE0;
901 918
902 nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf | 919 /* no interrupt for every tx completion, delay = 256us if not 557*/
903 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0)); 920 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
921 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
904 922
905 /* Template for a freshly allocated RFD */ 923 /* Template for a freshly allocated RFD */
906 nic->blank_rfd.command = cpu_to_le16(cb_el); 924 nic->blank_rfd.command = cpu_to_le16(cb_el);
@@ -964,7 +982,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
964 if(nic->flags & multicast_all) 982 if(nic->flags & multicast_all)
965 config->multicast_all = 0x1; /* 1=accept, 0=no */ 983 config->multicast_all = 0x1; /* 1=accept, 0=no */
966 984
967 if(!(nic->flags & wol_magic)) 985 /* disable WoL when up */
986 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
968 config->magic_packet_disable = 0x1; /* 1=off, 0=on */ 987 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
969 988
970 if(nic->mac >= mac_82558_D101_A4) { 989 if(nic->mac >= mac_82558_D101_A4) {
@@ -1203,7 +1222,9 @@ static void e100_update_stats(struct nic *nic)
1203 } 1222 }
1204 } 1223 }
1205 1224
1206 e100_exec_cmd(nic, cuc_dump_reset, 0); 1225
1226 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1227 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1207} 1228}
1208 1229
1209static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) 1230static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
@@ -1279,12 +1300,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1279 struct sk_buff *skb) 1300 struct sk_buff *skb)
1280{ 1301{
1281 cb->command = nic->tx_command; 1302 cb->command = nic->tx_command;
1303 /* interrupt every 16 packets regardless of delay */
1304 if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i;
1282 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); 1305 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1283 cb->u.tcb.tcb_byte_count = 0; 1306 cb->u.tcb.tcb_byte_count = 0;
1284 cb->u.tcb.threshold = nic->tx_threshold; 1307 cb->u.tcb.threshold = nic->tx_threshold;
1285 cb->u.tcb.tbd_count = 1; 1308 cb->u.tcb.tbd_count = 1;
1286 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, 1309 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1287 skb->data, skb->len, PCI_DMA_TODEVICE)); 1310 skb->data, skb->len, PCI_DMA_TODEVICE));
1311 // check for mapping failure?
1288 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1312 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1289} 1313}
1290 1314
@@ -1297,7 +1321,8 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1297 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. 1321 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1298 Issue a NOP command followed by a 1us delay before 1322 Issue a NOP command followed by a 1us delay before
1299 issuing the Tx command. */ 1323 issuing the Tx command. */
1300 e100_exec_cmd(nic, cuc_nop, 0); 1324 if(e100_exec_cmd(nic, cuc_nop, 0))
1325 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1301 udelay(1); 1326 udelay(1);
1302 } 1327 }
1303 1328
@@ -1415,12 +1440,18 @@ static int e100_alloc_cbs(struct nic *nic)
1415 return 0; 1440 return 0;
1416} 1441}
1417 1442
1418static inline void e100_start_receiver(struct nic *nic) 1443static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1419{ 1444{
1445 if(!nic->rxs) return;
1446 if(RU_SUSPENDED != nic->ru_running) return;
1447
1448 /* handle init time starts */
1449 if(!rx) rx = nic->rxs;
1450
1420 /* (Re)start RU if suspended or idle and RFA is non-NULL */ 1451 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1421 if(!nic->ru_running && nic->rx_to_clean->skb) { 1452 if(rx->skb) {
1422 e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr); 1453 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1423 nic->ru_running = 1; 1454 nic->ru_running = RU_RUNNING;
1424 } 1455 }
1425} 1456}
1426 1457
@@ -1437,6 +1468,13 @@ static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1437 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1468 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1438 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1469 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1439 1470
1471 if(pci_dma_mapping_error(rx->dma_addr)) {
1472 dev_kfree_skb_any(rx->skb);
1473 rx->skb = 0;
1474 rx->dma_addr = 0;
1475 return -ENOMEM;
1476 }
1477
1440 /* Link the RFD to end of RFA by linking previous RFD to 1478 /* Link the RFD to end of RFA by linking previous RFD to
1441 * this one, and clearing EL bit of previous. */ 1479 * this one, and clearing EL bit of previous. */
1442 if(rx->prev->skb) { 1480 if(rx->prev->skb) {
@@ -1471,7 +1509,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1471 1509
1472 /* If data isn't ready, nothing to indicate */ 1510 /* If data isn't ready, nothing to indicate */
1473 if(unlikely(!(rfd_status & cb_complete))) 1511 if(unlikely(!(rfd_status & cb_complete)))
1474 return -EAGAIN; 1512 return -ENODATA;
1475 1513
1476 /* Get actual data size */ 1514 /* Get actual data size */
1477 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; 1515 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
@@ -1482,6 +1520,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1482 pci_unmap_single(nic->pdev, rx->dma_addr, 1520 pci_unmap_single(nic->pdev, rx->dma_addr,
1483 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1521 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1484 1522
1523 /* this allows for a fast restart without re-enabling interrupts */
1524 if(le16_to_cpu(rfd->command) & cb_el)
1525 nic->ru_running = RU_SUSPENDED;
1526
1485 /* Pull off the RFD and put the actual data (minus eth hdr) */ 1527 /* Pull off the RFD and put the actual data (minus eth hdr) */
1486 skb_reserve(skb, sizeof(struct rfd)); 1528 skb_reserve(skb, sizeof(struct rfd));
1487 skb_put(skb, actual_size); 1529 skb_put(skb, actual_size);
@@ -1514,20 +1556,45 @@ static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1514 unsigned int work_to_do) 1556 unsigned int work_to_do)
1515{ 1557{
1516 struct rx *rx; 1558 struct rx *rx;
1559 int restart_required = 0;
1560 struct rx *rx_to_start = NULL;
1561
1562 /* are we already rnr? then pay attention!!! this ensures that
1563 * the state machine progression never allows a start with a
1564 * partially cleaned list, avoiding a race between hardware
1565 * and rx_to_clean when in NAPI mode */
1566 if(RU_SUSPENDED == nic->ru_running)
1567 restart_required = 1;
1517 1568
1518 /* Indicate newly arrived packets */ 1569 /* Indicate newly arrived packets */
1519 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { 1570 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1520 if(e100_rx_indicate(nic, rx, work_done, work_to_do)) 1571 int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1572 if(-EAGAIN == err) {
1573 /* hit quota so have more work to do, restart once
1574 * cleanup is complete */
1575 restart_required = 0;
1576 break;
1577 } else if(-ENODATA == err)
1521 break; /* No more to clean */ 1578 break; /* No more to clean */
1522 } 1579 }
1523 1580
1581 /* save our starting point as the place we'll restart the receiver */
1582 if(restart_required)
1583 rx_to_start = nic->rx_to_clean;
1584
1524 /* Alloc new skbs to refill list */ 1585 /* Alloc new skbs to refill list */
1525 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { 1586 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1526 if(unlikely(e100_rx_alloc_skb(nic, rx))) 1587 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1527 break; /* Better luck next time (see watchdog) */ 1588 break; /* Better luck next time (see watchdog) */
1528 } 1589 }
1529 1590
1530 e100_start_receiver(nic); 1591 if(restart_required) {
1592 // ack the rnr?
1593 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1594 e100_start_receiver(nic, rx_to_start);
1595 if(work_done)
1596 (*work_done)++;
1597 }
1531} 1598}
1532 1599
1533static void e100_rx_clean_list(struct nic *nic) 1600static void e100_rx_clean_list(struct nic *nic)
@@ -1535,6 +1602,8 @@ static void e100_rx_clean_list(struct nic *nic)
1535 struct rx *rx; 1602 struct rx *rx;
1536 unsigned int i, count = nic->params.rfds.count; 1603 unsigned int i, count = nic->params.rfds.count;
1537 1604
1605 nic->ru_running = RU_UNINITIALIZED;
1606
1538 if(nic->rxs) { 1607 if(nic->rxs) {
1539 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { 1608 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1540 if(rx->skb) { 1609 if(rx->skb) {
@@ -1548,7 +1617,6 @@ static void e100_rx_clean_list(struct nic *nic)
1548 } 1617 }
1549 1618
1550 nic->rx_to_use = nic->rx_to_clean = NULL; 1619 nic->rx_to_use = nic->rx_to_clean = NULL;
1551 nic->ru_running = 0;
1552} 1620}
1553 1621
1554static int e100_rx_alloc_list(struct nic *nic) 1622static int e100_rx_alloc_list(struct nic *nic)
@@ -1557,6 +1625,7 @@ static int e100_rx_alloc_list(struct nic *nic)
1557 unsigned int i, count = nic->params.rfds.count; 1625 unsigned int i, count = nic->params.rfds.count;
1558 1626
1559 nic->rx_to_use = nic->rx_to_clean = NULL; 1627 nic->rx_to_use = nic->rx_to_clean = NULL;
1628 nic->ru_running = RU_UNINITIALIZED;
1560 1629
1561 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) 1630 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))
1562 return -ENOMEM; 1631 return -ENOMEM;
@@ -1572,6 +1641,7 @@ static int e100_rx_alloc_list(struct nic *nic)
1572 } 1641 }
1573 1642
1574 nic->rx_to_use = nic->rx_to_clean = nic->rxs; 1643 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1644 nic->ru_running = RU_SUSPENDED;
1575 1645
1576 return 0; 1646 return 0;
1577} 1647}
@@ -1593,7 +1663,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
1593 1663
1594 /* We hit Receive No Resource (RNR); restart RU after cleaning */ 1664 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1595 if(stat_ack & stat_ack_rnr) 1665 if(stat_ack & stat_ack_rnr)
1596 nic->ru_running = 0; 1666 nic->ru_running = RU_SUSPENDED;
1597 1667
1598 e100_disable_irq(nic); 1668 e100_disable_irq(nic);
1599 netif_rx_schedule(netdev); 1669 netif_rx_schedule(netdev);
@@ -1663,6 +1733,7 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu)
1663 return 0; 1733 return 0;
1664} 1734}
1665 1735
1736#ifdef CONFIG_PM
1666static int e100_asf(struct nic *nic) 1737static int e100_asf(struct nic *nic)
1667{ 1738{
1668 /* ASF can be enabled from eeprom */ 1739 /* ASF can be enabled from eeprom */
@@ -1671,6 +1742,7 @@ static int e100_asf(struct nic *nic)
1671 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && 1742 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
1672 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); 1743 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
1673} 1744}
1745#endif
1674 1746
1675static int e100_up(struct nic *nic) 1747static int e100_up(struct nic *nic)
1676{ 1748{
@@ -1683,13 +1755,16 @@ static int e100_up(struct nic *nic)
1683 if((err = e100_hw_init(nic))) 1755 if((err = e100_hw_init(nic)))
1684 goto err_clean_cbs; 1756 goto err_clean_cbs;
1685 e100_set_multicast_list(nic->netdev); 1757 e100_set_multicast_list(nic->netdev);
1686 e100_start_receiver(nic); 1758 e100_start_receiver(nic, 0);
1687 mod_timer(&nic->watchdog, jiffies); 1759 mod_timer(&nic->watchdog, jiffies);
1688 if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ, 1760 if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,
1689 nic->netdev->name, nic->netdev))) 1761 nic->netdev->name, nic->netdev)))
1690 goto err_no_irq; 1762 goto err_no_irq;
1691 e100_enable_irq(nic);
1692 netif_wake_queue(nic->netdev); 1763 netif_wake_queue(nic->netdev);
1764 netif_poll_enable(nic->netdev);
1765 /* enable ints _after_ enabling poll, preventing a race between
1766 * disable ints+schedule */
1767 e100_enable_irq(nic);
1693 return 0; 1768 return 0;
1694 1769
1695err_no_irq: 1770err_no_irq:
@@ -1703,11 +1778,13 @@ err_rx_clean_list:
1703 1778
1704static void e100_down(struct nic *nic) 1779static void e100_down(struct nic *nic)
1705{ 1780{
1781 /* wait here for poll to complete */
1782 netif_poll_disable(nic->netdev);
1783 netif_stop_queue(nic->netdev);
1706 e100_hw_reset(nic); 1784 e100_hw_reset(nic);
1707 free_irq(nic->pdev->irq, nic->netdev); 1785 free_irq(nic->pdev->irq, nic->netdev);
1708 del_timer_sync(&nic->watchdog); 1786 del_timer_sync(&nic->watchdog);
1709 netif_carrier_off(nic->netdev); 1787 netif_carrier_off(nic->netdev);
1710 netif_stop_queue(nic->netdev);
1711 e100_clean_cbs(nic); 1788 e100_clean_cbs(nic);
1712 e100_rx_clean_list(nic); 1789 e100_rx_clean_list(nic);
1713} 1790}
@@ -1716,6 +1793,15 @@ static void e100_tx_timeout(struct net_device *netdev)
1716{ 1793{
1717 struct nic *nic = netdev_priv(netdev); 1794 struct nic *nic = netdev_priv(netdev);
1718 1795
1796 /* Reset outside of interrupt context, to avoid request_irq
1797 * in interrupt context */
1798 schedule_work(&nic->tx_timeout_task);
1799}
1800
1801static void e100_tx_timeout_task(struct net_device *netdev)
1802{
1803 struct nic *nic = netdev_priv(netdev);
1804
1719 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", 1805 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
1720 readb(&nic->csr->scb.status)); 1806 readb(&nic->csr->scb.status));
1721 e100_down(netdev_priv(netdev)); 1807 e100_down(netdev_priv(netdev));
@@ -1749,7 +1835,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
1749 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 1835 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
1750 BMCR_LOOPBACK); 1836 BMCR_LOOPBACK);
1751 1837
1752 e100_start_receiver(nic); 1838 e100_start_receiver(nic, 0);
1753 1839
1754 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { 1840 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
1755 err = -ENOMEM; 1841 err = -ENOMEM;
@@ -1869,7 +1955,6 @@ static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1869 else 1955 else
1870 nic->flags &= ~wol_magic; 1956 nic->flags &= ~wol_magic;
1871 1957
1872 pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
1873 e100_exec_cb(nic, NULL, e100_configure); 1958 e100_exec_cb(nic, NULL, e100_configure);
1874 1959
1875 return 0; 1960 return 0;
@@ -2223,6 +2308,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2223 2308
2224 e100_get_defaults(nic); 2309 e100_get_defaults(nic);
2225 2310
2311 /* locks must be initialized before calling hw_reset */
2226 spin_lock_init(&nic->cb_lock); 2312 spin_lock_init(&nic->cb_lock);
2227 spin_lock_init(&nic->cmd_lock); 2313 spin_lock_init(&nic->cmd_lock);
2228 2314
@@ -2240,6 +2326,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2240 nic->blink_timer.function = e100_blink_led; 2326 nic->blink_timer.function = e100_blink_led;
2241 nic->blink_timer.data = (unsigned long)nic; 2327 nic->blink_timer.data = (unsigned long)nic;
2242 2328
2329 INIT_WORK(&nic->tx_timeout_task,
2330 (void (*)(void *))e100_tx_timeout_task, netdev);
2331
2243 if((err = e100_alloc(nic))) { 2332 if((err = e100_alloc(nic))) {
2244 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); 2333 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2245 goto err_out_iounmap; 2334 goto err_out_iounmap;
@@ -2263,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2263 (nic->eeprom[eeprom_id] & eeprom_id_wol)) 2352 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2264 nic->flags |= wol_magic; 2353 nic->flags |= wol_magic;
2265 2354
2266 pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 2355 /* ack any pending wake events, disable PME */
2356 pci_enable_wake(pdev, 0, 0);
2267 2357
2268 strcpy(netdev->name, "eth%d"); 2358 strcpy(netdev->name, "eth%d");
2269 if((err = register_netdev(netdev))) { 2359 if((err = register_netdev(netdev))) {
@@ -2335,7 +2425,10 @@ static int e100_resume(struct pci_dev *pdev)
2335 2425
2336 pci_set_power_state(pdev, PCI_D0); 2426 pci_set_power_state(pdev, PCI_D0);
2337 pci_restore_state(pdev); 2427 pci_restore_state(pdev);
2338 e100_hw_init(nic); 2428 /* ack any pending wake events, disable PME */
2429 pci_enable_wake(pdev, 0, 0);
2430 if(e100_hw_init(nic))
2431 DPRINTK(HW, ERR, "e100_hw_init failed\n");
2339 2432
2340 netif_device_attach(netdev); 2433 netif_device_attach(netdev);
2341 if(netif_running(netdev)) 2434 if(netif_running(netdev))
@@ -2345,6 +2438,21 @@ static int e100_resume(struct pci_dev *pdev)
2345} 2438}
2346#endif 2439#endif
2347 2440
2441
2442static void e100_shutdown(struct device *dev)
2443{
2444 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
2445 struct net_device *netdev = pci_get_drvdata(pdev);
2446 struct nic *nic = netdev_priv(netdev);
2447
2448#ifdef CONFIG_PM
2449 pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
2450#else
2451 pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
2452#endif
2453}
2454
2455
2348static struct pci_driver e100_driver = { 2456static struct pci_driver e100_driver = {
2349 .name = DRV_NAME, 2457 .name = DRV_NAME,
2350 .id_table = e100_id_table, 2458 .id_table = e100_id_table,
@@ -2354,6 +2462,11 @@ static struct pci_driver e100_driver = {
2354 .suspend = e100_suspend, 2462 .suspend = e100_suspend,
2355 .resume = e100_resume, 2463 .resume = e100_resume,
2356#endif 2464#endif
2465
2466 .driver = {
2467 .shutdown = e100_shutdown,
2468 }
2469
2357}; 2470};
2358 2471
2359static int __init e100_init_module(void) 2472static int __init e100_init_module(void)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 148930d4e9bd..af1e82c5b808 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -112,6 +112,8 @@ struct e1000_adapter;
112#define E1000_MAX_82544_RXD 4096 112#define E1000_MAX_82544_RXD 4096
113 113
114/* Supported Rx Buffer Sizes */ 114/* Supported Rx Buffer Sizes */
115#define E1000_RXBUFFER_128 128 /* Used for packet split */
116#define E1000_RXBUFFER_256 256 /* Used for packet split */
115#define E1000_RXBUFFER_2048 2048 117#define E1000_RXBUFFER_2048 2048
116#define E1000_RXBUFFER_4096 4096 118#define E1000_RXBUFFER_4096 4096
117#define E1000_RXBUFFER_8192 8192 119#define E1000_RXBUFFER_8192 8192
@@ -137,15 +139,19 @@ struct e1000_adapter;
137/* How many Rx Buffers do we bundle into one write to the hardware ? */ 139/* How many Rx Buffers do we bundle into one write to the hardware ? */
138#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 140#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
139 141
140#define AUTO_ALL_MODES 0 142#define AUTO_ALL_MODES 0
141#define E1000_EEPROM_82544_APM 0x0004 143#define E1000_EEPROM_82544_APM 0x0400
142#define E1000_EEPROM_APME 0x0400 144#define E1000_EEPROM_APME 0x0400
143 145
144#ifndef E1000_MASTER_SLAVE 146#ifndef E1000_MASTER_SLAVE
145/* Switch to override PHY master/slave setting */ 147/* Switch to override PHY master/slave setting */
146#define E1000_MASTER_SLAVE e1000_ms_hw_default 148#define E1000_MASTER_SLAVE e1000_ms_hw_default
147#endif 149#endif
148 150
151#define E1000_MNG_VLAN_NONE -1
152/* Number of packet split data buffers (not including the header buffer) */
153#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
154
149/* only works for sizes that are powers of 2 */ 155/* only works for sizes that are powers of 2 */
150#define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) 156#define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
151 157
@@ -159,6 +165,9 @@ struct e1000_buffer {
159 uint16_t next_to_watch; 165 uint16_t next_to_watch;
160}; 166};
161 167
168struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; };
169struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; };
170
162struct e1000_desc_ring { 171struct e1000_desc_ring {
163 /* pointer to the descriptor ring memory */ 172 /* pointer to the descriptor ring memory */
164 void *desc; 173 void *desc;
@@ -174,12 +183,19 @@ struct e1000_desc_ring {
174 unsigned int next_to_clean; 183 unsigned int next_to_clean;
175 /* array of buffer information structs */ 184 /* array of buffer information structs */
176 struct e1000_buffer *buffer_info; 185 struct e1000_buffer *buffer_info;
186 /* arrays of page information for packet split */
187 struct e1000_ps_page *ps_page;
188 struct e1000_ps_page_dma *ps_page_dma;
177}; 189};
178 190
179#define E1000_DESC_UNUSED(R) \ 191#define E1000_DESC_UNUSED(R) \
180 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 192 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
181 (R)->next_to_clean - (R)->next_to_use - 1) 193 (R)->next_to_clean - (R)->next_to_use - 1)
182 194
195#define E1000_RX_DESC_PS(R, i) \
196 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
197#define E1000_RX_DESC_EXT(R, i) \
198 (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
183#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 199#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
184#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) 200#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
185#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) 201#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
@@ -192,6 +208,7 @@ struct e1000_adapter {
192 struct timer_list watchdog_timer; 208 struct timer_list watchdog_timer;
193 struct timer_list phy_info_timer; 209 struct timer_list phy_info_timer;
194 struct vlan_group *vlgrp; 210 struct vlan_group *vlgrp;
211 uint16_t mng_vlan_id;
195 uint32_t bd_number; 212 uint32_t bd_number;
196 uint32_t rx_buffer_len; 213 uint32_t rx_buffer_len;
197 uint32_t part_num; 214 uint32_t part_num;
@@ -228,14 +245,23 @@ struct e1000_adapter {
228 boolean_t detect_tx_hung; 245 boolean_t detect_tx_hung;
229 246
230 /* RX */ 247 /* RX */
248#ifdef CONFIG_E1000_NAPI
249 boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done,
250 int work_to_do);
251#else
252 boolean_t (*clean_rx) (struct e1000_adapter *adapter);
253#endif
254 void (*alloc_rx_buf) (struct e1000_adapter *adapter);
231 struct e1000_desc_ring rx_ring; 255 struct e1000_desc_ring rx_ring;
232 uint64_t hw_csum_err; 256 uint64_t hw_csum_err;
233 uint64_t hw_csum_good; 257 uint64_t hw_csum_good;
234 uint32_t rx_int_delay; 258 uint32_t rx_int_delay;
235 uint32_t rx_abs_int_delay; 259 uint32_t rx_abs_int_delay;
236 boolean_t rx_csum; 260 boolean_t rx_csum;
261 boolean_t rx_ps;
237 uint32_t gorcl; 262 uint32_t gorcl;
238 uint64_t gorcl_old; 263 uint64_t gorcl_old;
264 uint16_t rx_ps_bsize0;
239 265
240 /* Interrupt Throttle Rate */ 266 /* Interrupt Throttle Rate */
241 uint32_t itr; 267 uint32_t itr;
@@ -257,5 +283,8 @@ struct e1000_adapter {
257 283
258 284
259 int msg_enable; 285 int msg_enable;
286#ifdef CONFIG_PCI_MSI
287 boolean_t have_msi;
288#endif
260}; 289};
261#endif /* _E1000_H_ */ 290#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 0a2ca7c73a41..237247f74df4 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -69,6 +69,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
69 { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, 69 { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) },
70 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, 70 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
71 { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) }, 71 { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) },
72 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
72 { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, 73 { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) },
73 { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, 74 { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) },
74 { "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) }, 75 { "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) },
@@ -593,7 +594,7 @@ e1000_set_ringparam(struct net_device *netdev,
593 tx_old = adapter->tx_ring; 594 tx_old = adapter->tx_ring;
594 rx_old = adapter->rx_ring; 595 rx_old = adapter->rx_ring;
595 596
596 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 597 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
597 return -EINVAL; 598 return -EINVAL;
598 599
599 if(netif_running(adapter->netdev)) 600 if(netif_running(adapter->netdev))
@@ -784,8 +785,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
784 /* Hook up test interrupt handler just for this test */ 785 /* Hook up test interrupt handler just for this test */
785 if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 786 if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
786 shared_int = FALSE; 787 shared_int = FALSE;
787 } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, 788 } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
788 netdev->name, netdev)){ 789 netdev->name, netdev)){
789 *data = 1; 790 *data = 1;
790 return -1; 791 return -1;
791 } 792 }
@@ -842,10 +843,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
842 * test failed. 843 * test failed.
843 */ 844 */
844 adapter->test_icr = 0; 845 adapter->test_icr = 0;
845 E1000_WRITE_REG(&adapter->hw, IMC, 846 E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF);
846 (~mask & 0x00007FFF)); 847 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
847 E1000_WRITE_REG(&adapter->hw, ICS,
848 (~mask & 0x00007FFF));
849 msec_delay(10); 848 msec_delay(10);
850 849
851 if(adapter->test_icr) { 850 if(adapter->test_icr) {
@@ -919,7 +918,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
919 918
920 /* Setup Tx descriptor ring and Tx buffers */ 919 /* Setup Tx descriptor ring and Tx buffers */
921 920
922 txdr->count = 80; 921 if(!txdr->count)
922 txdr->count = E1000_DEFAULT_TXD;
923 923
924 size = txdr->count * sizeof(struct e1000_buffer); 924 size = txdr->count * sizeof(struct e1000_buffer);
925 if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 925 if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
@@ -974,7 +974,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
974 974
975 /* Setup Rx descriptor ring and Rx buffers */ 975 /* Setup Rx descriptor ring and Rx buffers */
976 976
977 rxdr->count = 80; 977 if(!rxdr->count)
978 rxdr->count = E1000_DEFAULT_RXD;
978 979
979 size = rxdr->count * sizeof(struct e1000_buffer); 980 size = rxdr->count * sizeof(struct e1000_buffer);
980 if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { 981 if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
@@ -1008,7 +1009,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1008 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); 1009 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
1009 struct sk_buff *skb; 1010 struct sk_buff *skb;
1010 1011
1011 if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, 1012 if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
1012 GFP_KERNEL))) { 1013 GFP_KERNEL))) {
1013 ret_val = 6; 1014 ret_val = 6;
1014 goto err_nomem; 1015 goto err_nomem;
@@ -1310,31 +1311,62 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1310 struct e1000_desc_ring *txdr = &adapter->test_tx_ring; 1311 struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
1311 struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; 1312 struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
1312 struct pci_dev *pdev = adapter->pdev; 1313 struct pci_dev *pdev = adapter->pdev;
1313 int i, ret_val; 1314 int i, j, k, l, lc, good_cnt, ret_val=0;
1315 unsigned long time;
1314 1316
1315 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); 1317 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
1316 1318
1317 for(i = 0; i < 64; i++) { 1319 /* Calculate the loop count based on the largest descriptor ring
1318 e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024); 1320 * The idea is to wrap the largest ring a number of times using 64
1319 pci_dma_sync_single_for_device(pdev, txdr->buffer_info[i].dma, 1321 * send/receive pairs during each loop
1320 txdr->buffer_info[i].length, 1322 */
1321 PCI_DMA_TODEVICE);
1322 }
1323 E1000_WRITE_REG(&adapter->hw, TDT, i);
1324
1325 msec_delay(200);
1326
1327 i = 0;
1328 do {
1329 pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[i].dma,
1330 rxdr->buffer_info[i].length,
1331 PCI_DMA_FROMDEVICE);
1332
1333 ret_val = e1000_check_lbtest_frame(rxdr->buffer_info[i].skb,
1334 1024);
1335 i++;
1336 } while (ret_val != 0 && i < 64);
1337 1323
1324 if(rxdr->count <= txdr->count)
1325 lc = ((txdr->count / 64) * 2) + 1;
1326 else
1327 lc = ((rxdr->count / 64) * 2) + 1;
1328
1329 k = l = 0;
1330 for(j = 0; j <= lc; j++) { /* loop count loop */
1331 for(i = 0; i < 64; i++) { /* send the packets */
1332 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1333 1024);
1334 pci_dma_sync_single_for_device(pdev,
1335 txdr->buffer_info[k].dma,
1336 txdr->buffer_info[k].length,
1337 PCI_DMA_TODEVICE);
1338 if(unlikely(++k == txdr->count)) k = 0;
1339 }
1340 E1000_WRITE_REG(&adapter->hw, TDT, k);
1341 msec_delay(200);
1342 time = jiffies; /* set the start time for the receive */
1343 good_cnt = 0;
1344 do { /* receive the sent packets */
1345 pci_dma_sync_single_for_cpu(pdev,
1346 rxdr->buffer_info[l].dma,
1347 rxdr->buffer_info[l].length,
1348 PCI_DMA_FROMDEVICE);
1349
1350 ret_val = e1000_check_lbtest_frame(
1351 rxdr->buffer_info[l].skb,
1352 1024);
1353 if(!ret_val)
1354 good_cnt++;
1355 if(unlikely(++l == rxdr->count)) l = 0;
1356 /* time + 20 msecs (200 msecs on 2.4) is more than
1357 * enough time to complete the receives, if it's
1358 * exceeded, break and error off
1359 */
1360 } while (good_cnt < 64 && jiffies < (time + 20));
1361 if(good_cnt != 64) {
1362 ret_val = 13; /* ret_val is the same as mis-compare */
1363 break;
1364 }
1365 if(jiffies >= (time + 2)) {
1366 ret_val = 14; /* error code for time out error */
1367 break;
1368 }
1369 } /* end loop count loop */
1338 return ret_val; 1370 return ret_val;
1339} 1371}
1340 1372
@@ -1354,13 +1386,12 @@ static int
1354e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) 1386e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
1355{ 1387{
1356 *data = 0; 1388 *data = 0;
1357
1358 if (adapter->hw.media_type == e1000_media_type_internal_serdes) { 1389 if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
1359 int i = 0; 1390 int i = 0;
1360 adapter->hw.serdes_link_down = TRUE; 1391 adapter->hw.serdes_link_down = TRUE;
1361 1392
1362 /* on some blade server designs link establishment */ 1393 /* On some blade server designs, link establishment
1363 /* could take as long as 2-3 minutes. */ 1394 * could take as long as 2-3 minutes */
1364 do { 1395 do {
1365 e1000_check_for_link(&adapter->hw); 1396 e1000_check_for_link(&adapter->hw);
1366 if (adapter->hw.serdes_link_down == FALSE) 1397 if (adapter->hw.serdes_link_down == FALSE)
@@ -1368,9 +1399,11 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
1368 msec_delay(20); 1399 msec_delay(20);
1369 } while (i++ < 3750); 1400 } while (i++ < 3750);
1370 1401
1371 *data = 1; 1402 *data = 1;
1372 } else { 1403 } else {
1373 e1000_check_for_link(&adapter->hw); 1404 e1000_check_for_link(&adapter->hw);
1405 if(adapter->hw.autoneg) /* if auto_neg is set wait for it */
1406 msec_delay(4000);
1374 1407
1375 if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 1408 if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
1376 *data = 1; 1409 *data = 1;
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 786a9b935659..723589b28be5 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -63,10 +63,11 @@ static uint16_t e1000_shift_in_ee_bits(struct e1000_hw *hw, uint16_t count);
63static int32_t e1000_acquire_eeprom(struct e1000_hw *hw); 63static int32_t e1000_acquire_eeprom(struct e1000_hw *hw);
64static void e1000_release_eeprom(struct e1000_hw *hw); 64static void e1000_release_eeprom(struct e1000_hw *hw);
65static void e1000_standby_eeprom(struct e1000_hw *hw); 65static void e1000_standby_eeprom(struct e1000_hw *hw);
66static int32_t e1000_id_led_init(struct e1000_hw * hw);
67static int32_t e1000_set_vco_speed(struct e1000_hw *hw); 66static int32_t e1000_set_vco_speed(struct e1000_hw *hw);
68static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw); 67static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw);
69static int32_t e1000_set_phy_mode(struct e1000_hw *hw); 68static int32_t e1000_set_phy_mode(struct e1000_hw *hw);
69static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer);
70static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length);
70 71
71/* IGP cable length table */ 72/* IGP cable length table */
72static const 73static const
@@ -80,6 +81,17 @@ uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
80 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 81 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
81 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; 82 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
82 83
84static const
85uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
86 { 8, 13, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43,
87 22, 24, 27, 30, 32, 35, 37, 40, 42, 44, 47, 49, 51, 54, 56, 58,
88 32, 35, 38, 41, 44, 47, 50, 53, 55, 58, 61, 63, 66, 69, 71, 74,
89 43, 47, 51, 54, 58, 61, 64, 67, 71, 74, 77, 80, 82, 85, 88, 90,
90 57, 62, 66, 70, 74, 77, 81, 85, 88, 91, 94, 97, 100, 103, 106, 108,
91 73, 78, 82, 87, 91, 95, 98, 102, 105, 109, 112, 114, 117, 119, 122, 124,
92 91, 96, 101, 105, 109, 113, 116, 119, 122, 125, 127, 128, 128, 128, 128, 128,
93 108, 113, 117, 121, 124, 127, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128};
94
83 95
84/****************************************************************************** 96/******************************************************************************
85 * Set the phy type member in the hw struct. 97 * Set the phy type member in the hw struct.
@@ -91,10 +103,14 @@ e1000_set_phy_type(struct e1000_hw *hw)
91{ 103{
92 DEBUGFUNC("e1000_set_phy_type"); 104 DEBUGFUNC("e1000_set_phy_type");
93 105
106 if(hw->mac_type == e1000_undefined)
107 return -E1000_ERR_PHY_TYPE;
108
94 switch(hw->phy_id) { 109 switch(hw->phy_id) {
95 case M88E1000_E_PHY_ID: 110 case M88E1000_E_PHY_ID:
96 case M88E1000_I_PHY_ID: 111 case M88E1000_I_PHY_ID:
97 case M88E1011_I_PHY_ID: 112 case M88E1011_I_PHY_ID:
113 case M88E1111_I_PHY_ID:
98 hw->phy_type = e1000_phy_m88; 114 hw->phy_type = e1000_phy_m88;
99 break; 115 break;
100 case IGP01E1000_I_PHY_ID: 116 case IGP01E1000_I_PHY_ID:
@@ -128,7 +144,6 @@ e1000_phy_init_script(struct e1000_hw *hw)
128 144
129 DEBUGFUNC("e1000_phy_init_script"); 145 DEBUGFUNC("e1000_phy_init_script");
130 146
131
132 if(hw->phy_init_script) { 147 if(hw->phy_init_script) {
133 msec_delay(20); 148 msec_delay(20);
134 149
@@ -271,6 +286,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
271 case E1000_DEV_ID_82546GB_FIBER: 286 case E1000_DEV_ID_82546GB_FIBER:
272 case E1000_DEV_ID_82546GB_SERDES: 287 case E1000_DEV_ID_82546GB_SERDES:
273 case E1000_DEV_ID_82546GB_PCIE: 288 case E1000_DEV_ID_82546GB_PCIE:
289 case E1000_DEV_ID_82546GB_QUAD_COPPER:
274 hw->mac_type = e1000_82546_rev_3; 290 hw->mac_type = e1000_82546_rev_3;
275 break; 291 break;
276 case E1000_DEV_ID_82541EI: 292 case E1000_DEV_ID_82541EI:
@@ -289,12 +305,19 @@ e1000_set_mac_type(struct e1000_hw *hw)
289 case E1000_DEV_ID_82547GI: 305 case E1000_DEV_ID_82547GI:
290 hw->mac_type = e1000_82547_rev_2; 306 hw->mac_type = e1000_82547_rev_2;
291 break; 307 break;
308 case E1000_DEV_ID_82573E:
309 case E1000_DEV_ID_82573E_IAMT:
310 hw->mac_type = e1000_82573;
311 break;
292 default: 312 default:
293 /* Should never have loaded on this device */ 313 /* Should never have loaded on this device */
294 return -E1000_ERR_MAC_TYPE; 314 return -E1000_ERR_MAC_TYPE;
295 } 315 }
296 316
297 switch(hw->mac_type) { 317 switch(hw->mac_type) {
318 case e1000_82573:
319 hw->eeprom_semaphore_present = TRUE;
320 /* fall through */
298 case e1000_82541: 321 case e1000_82541:
299 case e1000_82547: 322 case e1000_82547:
300 case e1000_82541_rev_2: 323 case e1000_82541_rev_2:
@@ -360,6 +383,9 @@ e1000_reset_hw(struct e1000_hw *hw)
360 uint32_t icr; 383 uint32_t icr;
361 uint32_t manc; 384 uint32_t manc;
362 uint32_t led_ctrl; 385 uint32_t led_ctrl;
386 uint32_t timeout;
387 uint32_t extcnf_ctrl;
388 int32_t ret_val;
363 389
364 DEBUGFUNC("e1000_reset_hw"); 390 DEBUGFUNC("e1000_reset_hw");
365 391
@@ -369,6 +395,15 @@ e1000_reset_hw(struct e1000_hw *hw)
369 e1000_pci_clear_mwi(hw); 395 e1000_pci_clear_mwi(hw);
370 } 396 }
371 397
398 if(hw->bus_type == e1000_bus_type_pci_express) {
399 /* Prevent the PCI-E bus from sticking if there is no TLP connection
400 * on the last TLP read/write transaction when MAC is reset.
401 */
402 if(e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
403 DEBUGOUT("PCI-E Master disable polling has failed.\n");
404 }
405 }
406
372 /* Clear interrupt mask to stop board from generating interrupts */ 407 /* Clear interrupt mask to stop board from generating interrupts */
373 DEBUGOUT("Masking off all interrupts\n"); 408 DEBUGOUT("Masking off all interrupts\n");
374 E1000_WRITE_REG(hw, IMC, 0xffffffff); 409 E1000_WRITE_REG(hw, IMC, 0xffffffff);
@@ -393,10 +428,32 @@ e1000_reset_hw(struct e1000_hw *hw)
393 428
394 /* Must reset the PHY before resetting the MAC */ 429 /* Must reset the PHY before resetting the MAC */
395 if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 430 if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
396 E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); 431 E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST));
397 msec_delay(5); 432 msec_delay(5);
398 } 433 }
399 434
435 /* Must acquire the MDIO ownership before MAC reset.
436 * Ownership defaults to firmware after a reset. */
437 if(hw->mac_type == e1000_82573) {
438 timeout = 10;
439
440 extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
441 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
442
443 do {
444 E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
445 extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
446
447 if(extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
448 break;
449 else
450 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
451
452 msec_delay(2);
453 timeout--;
454 } while(timeout);
455 }
456
400 /* Issue a global reset to the MAC. This will reset the chip's 457 /* Issue a global reset to the MAC. This will reset the chip's
401 * transmit, receive, DMA, and link units. It will not effect 458 * transmit, receive, DMA, and link units. It will not effect
402 * the current PCI configuration. The global reset bit is self- 459 * the current PCI configuration. The global reset bit is self-
@@ -450,6 +507,18 @@ e1000_reset_hw(struct e1000_hw *hw)
450 /* Wait for EEPROM reload */ 507 /* Wait for EEPROM reload */
451 msec_delay(20); 508 msec_delay(20);
452 break; 509 break;
510 case e1000_82573:
511 udelay(10);
512 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
513 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
514 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
515 E1000_WRITE_FLUSH(hw);
516 /* fall through */
517 ret_val = e1000_get_auto_rd_done(hw);
518 if(ret_val)
519 /* We don't want to continue accessing MAC registers. */
520 return ret_val;
521 break;
453 default: 522 default:
454 /* Wait for EEPROM reload (it happens automatically) */ 523 /* Wait for EEPROM reload (it happens automatically) */
455 msec_delay(5); 524 msec_delay(5);
@@ -457,7 +526,7 @@ e1000_reset_hw(struct e1000_hw *hw)
457 } 526 }
458 527
459 /* Disable HW ARPs on ASF enabled adapters */ 528 /* Disable HW ARPs on ASF enabled adapters */
460 if(hw->mac_type >= e1000_82540) { 529 if(hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
461 manc = E1000_READ_REG(hw, MANC); 530 manc = E1000_READ_REG(hw, MANC);
462 manc &= ~(E1000_MANC_ARP_EN); 531 manc &= ~(E1000_MANC_ARP_EN);
463 E1000_WRITE_REG(hw, MANC, manc); 532 E1000_WRITE_REG(hw, MANC, manc);
@@ -510,6 +579,8 @@ e1000_init_hw(struct e1000_hw *hw)
510 uint16_t pcix_stat_hi_word; 579 uint16_t pcix_stat_hi_word;
511 uint16_t cmd_mmrbc; 580 uint16_t cmd_mmrbc;
512 uint16_t stat_mmrbc; 581 uint16_t stat_mmrbc;
582 uint32_t mta_size;
583
513 DEBUGFUNC("e1000_init_hw"); 584 DEBUGFUNC("e1000_init_hw");
514 585
515 /* Initialize Identification LED */ 586 /* Initialize Identification LED */
@@ -524,8 +595,8 @@ e1000_init_hw(struct e1000_hw *hw)
524 595
525 /* Disabling VLAN filtering. */ 596 /* Disabling VLAN filtering. */
526 DEBUGOUT("Initializing the IEEE VLAN\n"); 597 DEBUGOUT("Initializing the IEEE VLAN\n");
527 E1000_WRITE_REG(hw, VET, 0); 598 if (hw->mac_type < e1000_82545_rev_3)
528 599 E1000_WRITE_REG(hw, VET, 0);
529 e1000_clear_vfta(hw); 600 e1000_clear_vfta(hw);
530 601
531 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ 602 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
@@ -553,14 +624,16 @@ e1000_init_hw(struct e1000_hw *hw)
553 624
554 /* Zero out the Multicast HASH table */ 625 /* Zero out the Multicast HASH table */
555 DEBUGOUT("Zeroing the MTA\n"); 626 DEBUGOUT("Zeroing the MTA\n");
556 for(i = 0; i < E1000_MC_TBL_SIZE; i++) 627 mta_size = E1000_MC_TBL_SIZE;
628 for(i = 0; i < mta_size; i++)
557 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 629 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
558 630
559 /* Set the PCI priority bit correctly in the CTRL register. This 631 /* Set the PCI priority bit correctly in the CTRL register. This
560 * determines if the adapter gives priority to receives, or if it 632 * determines if the adapter gives priority to receives, or if it
561 * gives equal priority to transmits and receives. 633 * gives equal priority to transmits and receives. Valid only on
634 * 82542 and 82543 silicon.
562 */ 635 */
563 if(hw->dma_fairness) { 636 if(hw->dma_fairness && hw->mac_type <= e1000_82543) {
564 ctrl = E1000_READ_REG(hw, CTRL); 637 ctrl = E1000_READ_REG(hw, CTRL);
565 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); 638 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR);
566 } 639 }
@@ -598,9 +671,21 @@ e1000_init_hw(struct e1000_hw *hw)
598 if(hw->mac_type > e1000_82544) { 671 if(hw->mac_type > e1000_82544) {
599 ctrl = E1000_READ_REG(hw, TXDCTL); 672 ctrl = E1000_READ_REG(hw, TXDCTL);
600 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; 673 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
674 switch (hw->mac_type) {
675 default:
676 break;
677 case e1000_82573:
678 ctrl |= E1000_TXDCTL_COUNT_DESC;
679 break;
680 }
601 E1000_WRITE_REG(hw, TXDCTL, ctrl); 681 E1000_WRITE_REG(hw, TXDCTL, ctrl);
602 } 682 }
603 683
684 if (hw->mac_type == e1000_82573) {
685 e1000_enable_tx_pkt_filtering(hw);
686 }
687
688
604 /* Clear all of the statistics registers (clear on read). It is 689 /* Clear all of the statistics registers (clear on read). It is
605 * important that we do this after we have tried to establish link 690 * important that we do this after we have tried to establish link
606 * because the symbol error count will increment wildly if there 691 * because the symbol error count will increment wildly if there
@@ -679,7 +764,7 @@ e1000_setup_link(struct e1000_hw *hw)
679 * control setting, then the variable hw->fc will 764 * control setting, then the variable hw->fc will
680 * be initialized based on a value in the EEPROM. 765 * be initialized based on a value in the EEPROM.
681 */ 766 */
682 if(e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data) < 0) { 767 if(e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data)) {
683 DEBUGOUT("EEPROM Read Error\n"); 768 DEBUGOUT("EEPROM Read Error\n");
684 return -E1000_ERR_EEPROM; 769 return -E1000_ERR_EEPROM;
685 } 770 }
@@ -736,6 +821,7 @@ e1000_setup_link(struct e1000_hw *hw)
736 E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); 821 E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW);
737 E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); 822 E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH);
738 E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); 823 E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE);
824
739 E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); 825 E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time);
740 826
741 /* Set the flow control receive threshold registers. Normally, 827 /* Set the flow control receive threshold registers. Normally,
@@ -906,20 +992,18 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
906} 992}
907 993
908/****************************************************************************** 994/******************************************************************************
909* Detects which PHY is present and the speed and duplex 995* Make sure we have a valid PHY and change PHY mode before link setup.
910* 996*
911* hw - Struct containing variables accessed by shared code 997* hw - Struct containing variables accessed by shared code
912******************************************************************************/ 998******************************************************************************/
913static int32_t 999static int32_t
914e1000_setup_copper_link(struct e1000_hw *hw) 1000e1000_copper_link_preconfig(struct e1000_hw *hw)
915{ 1001{
916 uint32_t ctrl; 1002 uint32_t ctrl;
917 uint32_t led_ctrl;
918 int32_t ret_val; 1003 int32_t ret_val;
919 uint16_t i;
920 uint16_t phy_data; 1004 uint16_t phy_data;
921 1005
922 DEBUGFUNC("e1000_setup_copper_link"); 1006 DEBUGFUNC("e1000_copper_link_preconfig");
923 1007
924 ctrl = E1000_READ_REG(hw, CTRL); 1008 ctrl = E1000_READ_REG(hw, CTRL);
925 /* With 82543, we need to force speed and duplex on the MAC equal to what 1009 /* With 82543, we need to force speed and duplex on the MAC equal to what
@@ -933,7 +1017,9 @@ e1000_setup_copper_link(struct e1000_hw *hw)
933 } else { 1017 } else {
934 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); 1018 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
935 E1000_WRITE_REG(hw, CTRL, ctrl); 1019 E1000_WRITE_REG(hw, CTRL, ctrl);
936 e1000_phy_hw_reset(hw); 1020 ret_val = e1000_phy_hw_reset(hw);
1021 if(ret_val)
1022 return ret_val;
937 } 1023 }
938 1024
939 /* Make sure we have a valid PHY */ 1025 /* Make sure we have a valid PHY */
@@ -961,274 +1047,398 @@ e1000_setup_copper_link(struct e1000_hw *hw)
961 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) 1047 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
962 hw->phy_reset_disable = FALSE; 1048 hw->phy_reset_disable = FALSE;
963 1049
964 if(!hw->phy_reset_disable) { 1050 return E1000_SUCCESS;
965 if (hw->phy_type == e1000_phy_igp) { 1051}
966 1052
967 ret_val = e1000_phy_reset(hw);
968 if(ret_val) {
969 DEBUGOUT("Error Resetting the PHY\n");
970 return ret_val;
971 }
972 1053
973 /* Wait 10ms for MAC to configure PHY from eeprom settings */ 1054/********************************************************************
974 msec_delay(15); 1055* Copper link setup for e1000_phy_igp series.
1056*
1057* hw - Struct containing variables accessed by shared code
1058*********************************************************************/
1059static int32_t
1060e1000_copper_link_igp_setup(struct e1000_hw *hw)
1061{
1062 uint32_t led_ctrl;
1063 int32_t ret_val;
1064 uint16_t phy_data;
975 1065
976 /* Configure activity LED after PHY reset */ 1066 DEBUGFUNC("e1000_copper_link_igp_setup");
977 led_ctrl = E1000_READ_REG(hw, LEDCTL);
978 led_ctrl &= IGP_ACTIVITY_LED_MASK;
979 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
980 E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
981 1067
982 /* disable lplu d3 during driver init */ 1068 if (hw->phy_reset_disable)
983 ret_val = e1000_set_d3_lplu_state(hw, FALSE); 1069 return E1000_SUCCESS;
984 if(ret_val) { 1070
985 DEBUGOUT("Error Disabling LPLU D3\n"); 1071 ret_val = e1000_phy_reset(hw);
986 return ret_val; 1072 if (ret_val) {
987 } 1073 DEBUGOUT("Error Resetting the PHY\n");
1074 return ret_val;
1075 }
988 1076
989 /* Configure mdi-mdix settings */ 1077 /* Wait 10ms for MAC to configure PHY from eeprom settings */
990 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, 1078 msec_delay(15);
991 &phy_data);
992 if(ret_val)
993 return ret_val;
994 1079
995 if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 1080 /* Configure activity LED after PHY reset */
996 hw->dsp_config_state = e1000_dsp_config_disabled; 1081 led_ctrl = E1000_READ_REG(hw, LEDCTL);
997 /* Force MDI for earlier revs of the IGP PHY */ 1082 led_ctrl &= IGP_ACTIVITY_LED_MASK;
998 phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | 1083 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
999 IGP01E1000_PSCR_FORCE_MDI_MDIX); 1084 E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
1000 hw->mdix = 1;
1001 1085
1002 } else { 1086 /* disable lplu d3 during driver init */
1003 hw->dsp_config_state = e1000_dsp_config_enabled; 1087 ret_val = e1000_set_d3_lplu_state(hw, FALSE);
1004 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; 1088 if (ret_val) {
1005 1089 DEBUGOUT("Error Disabling LPLU D3\n");
1006 switch (hw->mdix) { 1090 return ret_val;
1007 case 1: 1091 }
1008 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
1009 break;
1010 case 2:
1011 phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
1012 break;
1013 case 0:
1014 default:
1015 phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
1016 break;
1017 }
1018 }
1019 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL,
1020 phy_data);
1021 if(ret_val)
1022 return ret_val;
1023 1092
1024 /* set auto-master slave resolution settings */ 1093 /* disable lplu d0 during driver init */
1025 if(hw->autoneg) { 1094 ret_val = e1000_set_d0_lplu_state(hw, FALSE);
1026 e1000_ms_type phy_ms_setting = hw->master_slave; 1095 if (ret_val) {
1096 DEBUGOUT("Error Disabling LPLU D0\n");
1097 return ret_val;
1098 }
1099 /* Configure mdi-mdix settings */
1100 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
1101 if (ret_val)
1102 return ret_val;
1027 1103
1028 if(hw->ffe_config_state == e1000_ffe_config_active) 1104 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
1029 hw->ffe_config_state = e1000_ffe_config_enabled; 1105 hw->dsp_config_state = e1000_dsp_config_disabled;
1106 /* Force MDI for earlier revs of the IGP PHY */
1107 phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
1108 hw->mdix = 1;
1030 1109
1031 if(hw->dsp_config_state == e1000_dsp_config_activated) 1110 } else {
1032 hw->dsp_config_state = e1000_dsp_config_enabled; 1111 hw->dsp_config_state = e1000_dsp_config_enabled;
1112 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
1033 1113
1034 /* when autonegotiation advertisment is only 1000Mbps then we 1114 switch (hw->mdix) {
1035 * should disable SmartSpeed and enable Auto MasterSlave 1115 case 1:
1036 * resolution as hardware default. */ 1116 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
1037 if(hw->autoneg_advertised == ADVERTISE_1000_FULL) { 1117 break;
1038 /* Disable SmartSpeed */ 1118 case 2:
1039 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 1119 phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
1040 &phy_data); 1120 break;
1041 if(ret_val) 1121 case 0:
1042 return ret_val; 1122 default:
1043 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1123 phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
1044 ret_val = e1000_write_phy_reg(hw, 1124 break;
1045 IGP01E1000_PHY_PORT_CONFIG, 1125 }
1046 phy_data); 1126 }
1047 if(ret_val) 1127 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
1048 return ret_val; 1128 if(ret_val)
1049 /* Set auto Master/Slave resolution process */ 1129 return ret_val;
1050 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1051 if(ret_val)
1052 return ret_val;
1053 phy_data &= ~CR_1000T_MS_ENABLE;
1054 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1055 if(ret_val)
1056 return ret_val;
1057 }
1058 1130
1059 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1131 /* set auto-master slave resolution settings */
1060 if(ret_val) 1132 if(hw->autoneg) {
1061 return ret_val; 1133 e1000_ms_type phy_ms_setting = hw->master_slave;
1062 1134
1063 /* load defaults for future use */ 1135 if(hw->ffe_config_state == e1000_ffe_config_active)
1064 hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? 1136 hw->ffe_config_state = e1000_ffe_config_enabled;
1065 ((phy_data & CR_1000T_MS_VALUE) ? 1137
1066 e1000_ms_force_master : 1138 if(hw->dsp_config_state == e1000_dsp_config_activated)
1067 e1000_ms_force_slave) : 1139 hw->dsp_config_state = e1000_dsp_config_enabled;
1068 e1000_ms_auto; 1140
1069 1141 /* when autonegotiation advertisment is only 1000Mbps then we
1070 switch (phy_ms_setting) { 1142 * should disable SmartSpeed and enable Auto MasterSlave
1071 case e1000_ms_force_master: 1143 * resolution as hardware default. */
1072 phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); 1144 if(hw->autoneg_advertised == ADVERTISE_1000_FULL) {
1073 break; 1145 /* Disable SmartSpeed */
1074 case e1000_ms_force_slave: 1146 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
1075 phy_data |= CR_1000T_MS_ENABLE; 1147 if(ret_val)
1076 phy_data &= ~(CR_1000T_MS_VALUE); 1148 return ret_val;
1077 break; 1149 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1078 case e1000_ms_auto: 1150 ret_val = e1000_write_phy_reg(hw,
1079 phy_data &= ~CR_1000T_MS_ENABLE; 1151 IGP01E1000_PHY_PORT_CONFIG,
1080 default: 1152 phy_data);
1081 break; 1153 if(ret_val)
1082 } 1154 return ret_val;
1083 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1155 /* Set auto Master/Slave resolution process */
1084 if(ret_val) 1156 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1085 return ret_val; 1157 if(ret_val)
1086 } 1158 return ret_val;
1087 } else { 1159 phy_data &= ~CR_1000T_MS_ENABLE;
1088 /* Enable CRS on TX. This must be set for half-duplex operation. */ 1160 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1089 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
1090 &phy_data);
1091 if(ret_val) 1161 if(ret_val)
1092 return ret_val; 1162 return ret_val;
1163 }
1093 1164
1094 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1165 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1166 if(ret_val)
1167 return ret_val;
1095 1168
1096 /* Options: 1169 /* load defaults for future use */
1097 * MDI/MDI-X = 0 (default) 1170 hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
1098 * 0 - Auto for all speeds 1171 ((phy_data & CR_1000T_MS_VALUE) ?
1099 * 1 - MDI mode 1172 e1000_ms_force_master :
1100 * 2 - MDI-X mode 1173 e1000_ms_force_slave) :
1101 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) 1174 e1000_ms_auto;
1102 */
1103 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
1104 1175
1105 switch (hw->mdix) { 1176 switch (phy_ms_setting) {
1106 case 1: 1177 case e1000_ms_force_master:
1107 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; 1178 phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
1108 break; 1179 break;
1109 case 2: 1180 case e1000_ms_force_slave:
1110 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; 1181 phy_data |= CR_1000T_MS_ENABLE;
1111 break; 1182 phy_data &= ~(CR_1000T_MS_VALUE);
1112 case 3: 1183 break;
1113 phy_data |= M88E1000_PSCR_AUTO_X_1000T; 1184 case e1000_ms_auto:
1114 break; 1185 phy_data &= ~CR_1000T_MS_ENABLE;
1115 case 0:
1116 default: 1186 default:
1117 phy_data |= M88E1000_PSCR_AUTO_X_MODE; 1187 break;
1118 break; 1188 }
1119 } 1189 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1190 if(ret_val)
1191 return ret_val;
1192 }
1120 1193
1121 /* Options: 1194 return E1000_SUCCESS;
1122 * disable_polarity_correction = 0 (default) 1195}
1123 * Automatic Correction for Reversed Cable Polarity
1124 * 0 - Disabled
1125 * 1 - Enabled
1126 */
1127 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
1128 if(hw->disable_polarity_correction == 1)
1129 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
1130 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
1131 phy_data);
1132 if(ret_val)
1133 return ret_val;
1134 1196
1135 /* Force TX_CLK in the Extended PHY Specific Control Register
1136 * to 25MHz clock.
1137 */
1138 ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
1139 &phy_data);
1140 if(ret_val)
1141 return ret_val;
1142 1197
1143 phy_data |= M88E1000_EPSCR_TX_CLK_25; 1198/********************************************************************
1199* Copper link setup for e1000_phy_m88 series.
1200*
1201* hw - Struct containing variables accessed by shared code
1202*********************************************************************/
1203static int32_t
1204e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1205{
1206 int32_t ret_val;
1207 uint16_t phy_data;
1208
1209 DEBUGFUNC("e1000_copper_link_mgp_setup");
1210
1211 if(hw->phy_reset_disable)
1212 return E1000_SUCCESS;
1213
1214 /* Enable CRS on TX. This must be set for half-duplex operation. */
1215 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1216 if(ret_val)
1217 return ret_val;
1218
1219 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
1220
1221 /* Options:
1222 * MDI/MDI-X = 0 (default)
1223 * 0 - Auto for all speeds
1224 * 1 - MDI mode
1225 * 2 - MDI-X mode
1226 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
1227 */
1228 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
1229
1230 switch (hw->mdix) {
1231 case 1:
1232 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
1233 break;
1234 case 2:
1235 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
1236 break;
1237 case 3:
1238 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
1239 break;
1240 case 0:
1241 default:
1242 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
1243 break;
1244 }
1245
1246 /* Options:
1247 * disable_polarity_correction = 0 (default)
1248 * Automatic Correction for Reversed Cable Polarity
1249 * 0 - Disabled
1250 * 1 - Enabled
1251 */
1252 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
1253 if(hw->disable_polarity_correction == 1)
1254 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
1255 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1256 if(ret_val)
1257 return ret_val;
1144 1258
1145 if (hw->phy_revision < M88E1011_I_REV_4) { 1259 /* Force TX_CLK in the Extended PHY Specific Control Register
1146 /* Configure Master and Slave downshift values */ 1260 * to 25MHz clock.
1147 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | 1261 */
1262 ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
1263 if(ret_val)
1264 return ret_val;
1265
1266 phy_data |= M88E1000_EPSCR_TX_CLK_25;
1267
1268 if (hw->phy_revision < M88E1011_I_REV_4) {
1269 /* Configure Master and Slave downshift values */
1270 phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
1148 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); 1271 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
1149 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | 1272 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
1150 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); 1273 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
1151 ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 1274 ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
1152 phy_data); 1275 if(ret_val)
1153 if(ret_val) 1276 return ret_val;
1154 return ret_val; 1277 }
1155 }
1156 1278
1157 /* SW Reset the PHY so all changes take effect */ 1279 /* SW Reset the PHY so all changes take effect */
1158 ret_val = e1000_phy_reset(hw); 1280 ret_val = e1000_phy_reset(hw);
1159 if(ret_val) { 1281 if(ret_val) {
1160 DEBUGOUT("Error Resetting the PHY\n"); 1282 DEBUGOUT("Error Resetting the PHY\n");
1161 return ret_val; 1283 return ret_val;
1162 } 1284 }
1285
1286 return E1000_SUCCESS;
1287}
1288
1289/********************************************************************
1290* Setup auto-negotiation and flow control advertisements,
1291* and then perform auto-negotiation.
1292*
1293* hw - Struct containing variables accessed by shared code
1294*********************************************************************/
1295static int32_t
1296e1000_copper_link_autoneg(struct e1000_hw *hw)
1297{
1298 int32_t ret_val;
1299 uint16_t phy_data;
1300
1301 DEBUGFUNC("e1000_copper_link_autoneg");
1302
1303 /* Perform some bounds checking on the hw->autoneg_advertised
1304 * parameter. If this variable is zero, then set it to the default.
1305 */
1306 hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
1307
1308 /* If autoneg_advertised is zero, we assume it was not defaulted
1309 * by the calling code so we set to advertise full capability.
1310 */
1311 if(hw->autoneg_advertised == 0)
1312 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1313
1314 DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
1315 ret_val = e1000_phy_setup_autoneg(hw);
1316 if(ret_val) {
1317 DEBUGOUT("Error Setting up Auto-Negotiation\n");
1318 return ret_val;
1319 }
1320 DEBUGOUT("Restarting Auto-Neg\n");
1321
1322 /* Restart auto-negotiation by setting the Auto Neg Enable bit and
1323 * the Auto Neg Restart bit in the PHY control register.
1324 */
1325 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
1326 if(ret_val)
1327 return ret_val;
1328
1329 phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
1330 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
1331 if(ret_val)
1332 return ret_val;
1333
1334 /* Does the user want to wait for Auto-Neg to complete here, or
1335 * check at a later time (for example, callback routine).
1336 */
1337 if(hw->wait_autoneg_complete) {
1338 ret_val = e1000_wait_autoneg(hw);
1339 if(ret_val) {
1340 DEBUGOUT("Error while waiting for autoneg to complete\n");
1341 return ret_val;
1163 } 1342 }
1343 }
1164 1344
1165 /* Options: 1345 hw->get_link_status = TRUE;
1166 * autoneg = 1 (default)
1167 * PHY will advertise value(s) parsed from
1168 * autoneg_advertised and fc
1169 * autoneg = 0
1170 * PHY will be set to 10H, 10F, 100H, or 100F
1171 * depending on value parsed from forced_speed_duplex.
1172 */
1173 1346
1174 /* Is autoneg enabled? This is enabled by default or by software 1347 return E1000_SUCCESS;
1175 * override. If so, call e1000_phy_setup_autoneg routine to parse the 1348}
1176 * autoneg_advertised and fc options. If autoneg is NOT enabled, then
1177 * the user should have provided a speed/duplex override. If so, then
1178 * call e1000_phy_force_speed_duplex to parse and set this up.
1179 */
1180 if(hw->autoneg) {
1181 /* Perform some bounds checking on the hw->autoneg_advertised
1182 * parameter. If this variable is zero, then set it to the default.
1183 */
1184 hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
1185 1349
1186 /* If autoneg_advertised is zero, we assume it was not defaulted
1187 * by the calling code so we set to advertise full capability.
1188 */
1189 if(hw->autoneg_advertised == 0)
1190 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1191 1350
1192 DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1351/******************************************************************************
1193 ret_val = e1000_phy_setup_autoneg(hw); 1352* Config the MAC and the PHY after link is up.
1194 if(ret_val) { 1353* 1) Set up the MAC to the current PHY speed/duplex
1195 DEBUGOUT("Error Setting up Auto-Negotiation\n"); 1354* if we are on 82543. If we
1196 return ret_val; 1355* are on newer silicon, we only need to configure
1197 } 1356* collision distance in the Transmit Control Register.
1198 DEBUGOUT("Restarting Auto-Neg\n"); 1357* 2) Set up flow control on the MAC to that established with
1358* the link partner.
1359* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
1360*
1361* hw - Struct containing variables accessed by shared code
1362******************************************************************************/
1363static int32_t
1364e1000_copper_link_postconfig(struct e1000_hw *hw)
1365{
1366 int32_t ret_val;
1367 DEBUGFUNC("e1000_copper_link_postconfig");
1368
1369 if(hw->mac_type >= e1000_82544) {
1370 e1000_config_collision_dist(hw);
1371 } else {
1372 ret_val = e1000_config_mac_to_phy(hw);
1373 if(ret_val) {
1374 DEBUGOUT("Error configuring MAC to PHY settings\n");
1375 return ret_val;
1376 }
1377 }
1378 ret_val = e1000_config_fc_after_link_up(hw);
1379 if(ret_val) {
1380 DEBUGOUT("Error Configuring Flow Control\n");
1381 return ret_val;
1382 }
1199 1383
1200 /* Restart auto-negotiation by setting the Auto Neg Enable bit and 1384 /* Config DSP to improve Giga link quality */
1201 * the Auto Neg Restart bit in the PHY control register. 1385 if(hw->phy_type == e1000_phy_igp) {
1202 */ 1386 ret_val = e1000_config_dsp_after_link_change(hw, TRUE);
1203 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 1387 if(ret_val) {
1204 if(ret_val) 1388 DEBUGOUT("Error Configuring DSP after link up\n");
1205 return ret_val; 1389 return ret_val;
1390 }
1391 }
1392
1393 return E1000_SUCCESS;
1394}
1206 1395
1207 phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 1396/******************************************************************************
1208 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); 1397* Detects which PHY is present and setup the speed and duplex
1209 if(ret_val) 1398*
1210 return ret_val; 1399* hw - Struct containing variables accessed by shared code
1400******************************************************************************/
1401static int32_t
1402e1000_setup_copper_link(struct e1000_hw *hw)
1403{
1404 int32_t ret_val;
1405 uint16_t i;
1406 uint16_t phy_data;
1211 1407
1212 /* Does the user want to wait for Auto-Neg to complete here, or 1408 DEBUGFUNC("e1000_setup_copper_link");
1213 * check at a later time (for example, callback routine). 1409
1214 */ 1410 /* Check if it is a valid PHY and set PHY mode if necessary. */
1215 if(hw->wait_autoneg_complete) { 1411 ret_val = e1000_copper_link_preconfig(hw);
1216 ret_val = e1000_wait_autoneg(hw); 1412 if(ret_val)
1217 if(ret_val) { 1413 return ret_val;
1218 DEBUGOUT("Error while waiting for autoneg to complete\n"); 1414
1219 return ret_val; 1415 if (hw->phy_type == e1000_phy_igp ||
1220 } 1416 hw->phy_type == e1000_phy_igp_2) {
1221 } 1417 ret_val = e1000_copper_link_igp_setup(hw);
1222 hw->get_link_status = TRUE; 1418 if(ret_val)
1223 } else { 1419 return ret_val;
1224 DEBUGOUT("Forcing speed and duplex\n"); 1420 } else if (hw->phy_type == e1000_phy_m88) {
1225 ret_val = e1000_phy_force_speed_duplex(hw); 1421 ret_val = e1000_copper_link_mgp_setup(hw);
1226 if(ret_val) { 1422 if(ret_val)
1227 DEBUGOUT("Error Forcing Speed and Duplex\n"); 1423 return ret_val;
1228 return ret_val; 1424 }
1229 } 1425
1426 if(hw->autoneg) {
1427 /* Setup autoneg and flow control advertisement
1428 * and perform autonegotiation */
1429 ret_val = e1000_copper_link_autoneg(hw);
1430 if(ret_val)
1431 return ret_val;
1432 } else {
1433 /* PHY will be set to 10H, 10F, 100H,or 100F
1434 * depending on value from forced_speed_duplex. */
1435 DEBUGOUT("Forcing speed and duplex\n");
1436 ret_val = e1000_phy_force_speed_duplex(hw);
1437 if(ret_val) {
1438 DEBUGOUT("Error Forcing Speed and Duplex\n");
1439 return ret_val;
1230 } 1440 }
1231 } /* !hw->phy_reset_disable */ 1441 }
1232 1442
1233 /* Check link status. Wait up to 100 microseconds for link to become 1443 /* Check link status. Wait up to 100 microseconds for link to become
1234 * valid. 1444 * valid.
@@ -1242,37 +1452,11 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1242 return ret_val; 1452 return ret_val;
1243 1453
1244 if(phy_data & MII_SR_LINK_STATUS) { 1454 if(phy_data & MII_SR_LINK_STATUS) {
1245 /* We have link, so we need to finish the config process: 1455 /* Config the MAC and PHY after link is up */
1246 * 1) Set up the MAC to the current PHY speed/duplex 1456 ret_val = e1000_copper_link_postconfig(hw);
1247 * if we are on 82543. If we 1457 if(ret_val)
1248 * are on newer silicon, we only need to configure
1249 * collision distance in the Transmit Control Register.
1250 * 2) Set up flow control on the MAC to that established with
1251 * the link partner.
1252 */
1253 if(hw->mac_type >= e1000_82544) {
1254 e1000_config_collision_dist(hw);
1255 } else {
1256 ret_val = e1000_config_mac_to_phy(hw);
1257 if(ret_val) {
1258 DEBUGOUT("Error configuring MAC to PHY settings\n");
1259 return ret_val;
1260 }
1261 }
1262 ret_val = e1000_config_fc_after_link_up(hw);
1263 if(ret_val) {
1264 DEBUGOUT("Error Configuring Flow Control\n");
1265 return ret_val; 1458 return ret_val;
1266 } 1459
1267 DEBUGOUT("Valid link established!!!\n");
1268
1269 if(hw->phy_type == e1000_phy_igp) {
1270 ret_val = e1000_config_dsp_after_link_change(hw, TRUE);
1271 if(ret_val) {
1272 DEBUGOUT("Error Configuring DSP after link up\n");
1273 return ret_val;
1274 }
1275 }
1276 DEBUGOUT("Valid link established!!!\n"); 1460 DEBUGOUT("Valid link established!!!\n");
1277 return E1000_SUCCESS; 1461 return E1000_SUCCESS;
1278 } 1462 }
@@ -1302,10 +1486,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1302 if(ret_val) 1486 if(ret_val)
1303 return ret_val; 1487 return ret_val;
1304 1488
1305 /* Read the MII 1000Base-T Control Register (Address 9). */ 1489 /* Read the MII 1000Base-T Control Register (Address 9). */
1306 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1490 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1307 if(ret_val) 1491 if(ret_val)
1308 return ret_val; 1492 return ret_val;
1309 1493
1310 /* Need to parse both autoneg_advertised and fc and set up 1494 /* Need to parse both autoneg_advertised and fc and set up
1311 * the appropriate PHY registers. First we will parse for 1495 * the appropriate PHY registers. First we will parse for
@@ -1417,7 +1601,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
1417 1601
1418 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1602 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1419 1603
1420 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1604 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
1421 if(ret_val) 1605 if(ret_val)
1422 return ret_val; 1606 return ret_val;
1423 1607
@@ -1678,6 +1862,11 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
1678 1862
1679 DEBUGFUNC("e1000_config_mac_to_phy"); 1863 DEBUGFUNC("e1000_config_mac_to_phy");
1680 1864
1865 /* 82544 or newer MAC, Auto Speed Detection takes care of
1866 * MAC speed/duplex configuration.*/
1867 if (hw->mac_type >= e1000_82544)
1868 return E1000_SUCCESS;
1869
1681 /* Read the Device Control Register and set the bits to Force Speed 1870 /* Read the Device Control Register and set the bits to Force Speed
1682 * and Duplex. 1871 * and Duplex.
1683 */ 1872 */
@@ -1688,45 +1877,25 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
1688 /* Set up duplex in the Device Control and Transmit Control 1877 /* Set up duplex in the Device Control and Transmit Control
1689 * registers depending on negotiated values. 1878 * registers depending on negotiated values.
1690 */ 1879 */
1691 if (hw->phy_type == e1000_phy_igp) { 1880 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1692 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 1881 if(ret_val)
1693 &phy_data); 1882 return ret_val;
1694 if(ret_val)
1695 return ret_val;
1696
1697 if(phy_data & IGP01E1000_PSSR_FULL_DUPLEX) ctrl |= E1000_CTRL_FD;
1698 else ctrl &= ~E1000_CTRL_FD;
1699
1700 e1000_config_collision_dist(hw);
1701 1883
1702 /* Set up speed in the Device Control register depending on 1884 if(phy_data & M88E1000_PSSR_DPLX)
1703 * negotiated values. 1885 ctrl |= E1000_CTRL_FD;
1704 */ 1886 else
1705 if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 1887 ctrl &= ~E1000_CTRL_FD;
1706 IGP01E1000_PSSR_SPEED_1000MBPS)
1707 ctrl |= E1000_CTRL_SPD_1000;
1708 else if((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
1709 IGP01E1000_PSSR_SPEED_100MBPS)
1710 ctrl |= E1000_CTRL_SPD_100;
1711 } else {
1712 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
1713 &phy_data);
1714 if(ret_val)
1715 return ret_val;
1716 1888
1717 if(phy_data & M88E1000_PSSR_DPLX) ctrl |= E1000_CTRL_FD; 1889 e1000_config_collision_dist(hw);
1718 else ctrl &= ~E1000_CTRL_FD;
1719 1890
1720 e1000_config_collision_dist(hw); 1891 /* Set up speed in the Device Control register depending on
1892 * negotiated values.
1893 */
1894 if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
1895 ctrl |= E1000_CTRL_SPD_1000;
1896 else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
1897 ctrl |= E1000_CTRL_SPD_100;
1721 1898
1722 /* Set up speed in the Device Control register depending on
1723 * negotiated values.
1724 */
1725 if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
1726 ctrl |= E1000_CTRL_SPD_1000;
1727 else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
1728 ctrl |= E1000_CTRL_SPD_100;
1729 }
1730 /* Write the configured values back to the Device Control Reg. */ 1899 /* Write the configured values back to the Device Control Reg. */
1731 E1000_WRITE_REG(hw, CTRL, ctrl); 1900 E1000_WRITE_REG(hw, CTRL, ctrl);
1732 return E1000_SUCCESS; 1901 return E1000_SUCCESS;
@@ -2494,8 +2663,8 @@ e1000_read_phy_reg(struct e1000_hw *hw,
2494 2663
2495 DEBUGFUNC("e1000_read_phy_reg"); 2664 DEBUGFUNC("e1000_read_phy_reg");
2496 2665
2497 2666 if((hw->phy_type == e1000_phy_igp ||
2498 if(hw->phy_type == e1000_phy_igp && 2667 hw->phy_type == e1000_phy_igp_2) &&
2499 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2668 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
2500 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2669 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
2501 (uint16_t)reg_addr); 2670 (uint16_t)reg_addr);
@@ -2600,8 +2769,8 @@ e1000_write_phy_reg(struct e1000_hw *hw,
2600 2769
2601 DEBUGFUNC("e1000_write_phy_reg"); 2770 DEBUGFUNC("e1000_write_phy_reg");
2602 2771
2603 2772 if((hw->phy_type == e1000_phy_igp ||
2604 if(hw->phy_type == e1000_phy_igp && 2773 hw->phy_type == e1000_phy_igp_2) &&
2605 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2774 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
2606 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2775 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
2607 (uint16_t)reg_addr); 2776 (uint16_t)reg_addr);
@@ -2679,19 +2848,27 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
2679 return E1000_SUCCESS; 2848 return E1000_SUCCESS;
2680} 2849}
2681 2850
2851
2682/****************************************************************************** 2852/******************************************************************************
2683* Returns the PHY to the power-on reset state 2853* Returns the PHY to the power-on reset state
2684* 2854*
2685* hw - Struct containing variables accessed by shared code 2855* hw - Struct containing variables accessed by shared code
2686******************************************************************************/ 2856******************************************************************************/
2687void 2857int32_t
2688e1000_phy_hw_reset(struct e1000_hw *hw) 2858e1000_phy_hw_reset(struct e1000_hw *hw)
2689{ 2859{
2690 uint32_t ctrl, ctrl_ext; 2860 uint32_t ctrl, ctrl_ext;
2691 uint32_t led_ctrl; 2861 uint32_t led_ctrl;
2862 int32_t ret_val;
2692 2863
2693 DEBUGFUNC("e1000_phy_hw_reset"); 2864 DEBUGFUNC("e1000_phy_hw_reset");
2694 2865
2866 /* In the case of the phy reset being blocked, it's not an error, we
2867 * simply return success without performing the reset. */
2868 ret_val = e1000_check_phy_reset_block(hw);
2869 if (ret_val)
2870 return E1000_SUCCESS;
2871
2695 DEBUGOUT("Resetting Phy...\n"); 2872 DEBUGOUT("Resetting Phy...\n");
2696 2873
2697 if(hw->mac_type > e1000_82543) { 2874 if(hw->mac_type > e1000_82543) {
@@ -2727,6 +2904,11 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
2727 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 2904 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
2728 E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 2905 E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
2729 } 2906 }
2907
2908 /* Wait for FW to finish PHY configuration. */
2909 ret_val = e1000_get_phy_cfg_done(hw);
2910
2911 return ret_val;
2730} 2912}
2731 2913
2732/****************************************************************************** 2914/******************************************************************************
@@ -2744,7 +2926,19 @@ e1000_phy_reset(struct e1000_hw *hw)
2744 2926
2745 DEBUGFUNC("e1000_phy_reset"); 2927 DEBUGFUNC("e1000_phy_reset");
2746 2928
2747 if(hw->mac_type != e1000_82541_rev_2) { 2929 /* In the case of the phy reset being blocked, it's not an error, we
2930 * simply return success without performing the reset. */
2931 ret_val = e1000_check_phy_reset_block(hw);
2932 if (ret_val)
2933 return E1000_SUCCESS;
2934
2935 switch (hw->mac_type) {
2936 case e1000_82541_rev_2:
2937 ret_val = e1000_phy_hw_reset(hw);
2938 if(ret_val)
2939 return ret_val;
2940 break;
2941 default:
2748 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 2942 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
2749 if(ret_val) 2943 if(ret_val)
2750 return ret_val; 2944 return ret_val;
@@ -2755,9 +2949,10 @@ e1000_phy_reset(struct e1000_hw *hw)
2755 return ret_val; 2949 return ret_val;
2756 2950
2757 udelay(1); 2951 udelay(1);
2758 } else e1000_phy_hw_reset(hw); 2952 break;
2953 }
2759 2954
2760 if(hw->phy_type == e1000_phy_igp) 2955 if(hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
2761 e1000_phy_init_script(hw); 2956 e1000_phy_init_script(hw);
2762 2957
2763 return E1000_SUCCESS; 2958 return E1000_SUCCESS;
@@ -2811,6 +3006,9 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
2811 case e1000_82547_rev_2: 3006 case e1000_82547_rev_2:
2812 if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; 3007 if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE;
2813 break; 3008 break;
3009 case e1000_82573:
3010 if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE;
3011 break;
2814 default: 3012 default:
2815 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); 3013 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
2816 return -E1000_ERR_CONFIG; 3014 return -E1000_ERR_CONFIG;
@@ -2866,7 +3064,7 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
2866 3064
2867 /* The downshift status is checked only once, after link is established, 3065 /* The downshift status is checked only once, after link is established,
2868 * and it stored in the hw->speed_downgraded parameter. */ 3066 * and it stored in the hw->speed_downgraded parameter. */
2869 phy_info->downshift = hw->speed_downgraded; 3067 phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
2870 3068
2871 /* IGP01E1000 does not need to support it. */ 3069 /* IGP01E1000 does not need to support it. */
2872 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; 3070 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
@@ -2905,7 +3103,7 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
2905 if(ret_val) 3103 if(ret_val)
2906 return ret_val; 3104 return ret_val;
2907 3105
2908 /* transalte to old method */ 3106 /* Translate to old method */
2909 average = (max_length + min_length) / 2; 3107 average = (max_length + min_length) / 2;
2910 3108
2911 if(average <= e1000_igp_cable_length_50) 3109 if(average <= e1000_igp_cable_length_50)
@@ -2940,7 +3138,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
2940 3138
2941 /* The downshift status is checked only once, after link is established, 3139 /* The downshift status is checked only once, after link is established,
2942 * and it stored in the hw->speed_downgraded parameter. */ 3140 * and it stored in the hw->speed_downgraded parameter. */
2943 phy_info->downshift = hw->speed_downgraded; 3141 phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
2944 3142
2945 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 3143 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
2946 if(ret_val) 3144 if(ret_val)
@@ -3029,7 +3227,8 @@ e1000_phy_get_info(struct e1000_hw *hw,
3029 return -E1000_ERR_CONFIG; 3227 return -E1000_ERR_CONFIG;
3030 } 3228 }
3031 3229
3032 if(hw->phy_type == e1000_phy_igp) 3230 if(hw->phy_type == e1000_phy_igp ||
3231 hw->phy_type == e1000_phy_igp_2)
3033 return e1000_phy_igp_get_info(hw, phy_info); 3232 return e1000_phy_igp_get_info(hw, phy_info);
3034 else 3233 else
3035 return e1000_phy_m88_get_info(hw, phy_info); 3234 return e1000_phy_m88_get_info(hw, phy_info);
@@ -3055,11 +3254,12 @@ e1000_validate_mdi_setting(struct e1000_hw *hw)
3055 * 3254 *
3056 * hw - Struct containing variables accessed by shared code 3255 * hw - Struct containing variables accessed by shared code
3057 *****************************************************************************/ 3256 *****************************************************************************/
3058void 3257int32_t
3059e1000_init_eeprom_params(struct e1000_hw *hw) 3258e1000_init_eeprom_params(struct e1000_hw *hw)
3060{ 3259{
3061 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3260 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3062 uint32_t eecd = E1000_READ_REG(hw, EECD); 3261 uint32_t eecd = E1000_READ_REG(hw, EECD);
3262 int32_t ret_val = E1000_SUCCESS;
3063 uint16_t eeprom_size; 3263 uint16_t eeprom_size;
3064 3264
3065 DEBUGFUNC("e1000_init_eeprom_params"); 3265 DEBUGFUNC("e1000_init_eeprom_params");
@@ -3074,6 +3274,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
3074 eeprom->opcode_bits = 3; 3274 eeprom->opcode_bits = 3;
3075 eeprom->address_bits = 6; 3275 eeprom->address_bits = 6;
3076 eeprom->delay_usec = 50; 3276 eeprom->delay_usec = 50;
3277 eeprom->use_eerd = FALSE;
3278 eeprom->use_eewr = FALSE;
3077 break; 3279 break;
3078 case e1000_82540: 3280 case e1000_82540:
3079 case e1000_82545: 3281 case e1000_82545:
@@ -3090,6 +3292,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
3090 eeprom->word_size = 64; 3292 eeprom->word_size = 64;
3091 eeprom->address_bits = 6; 3293 eeprom->address_bits = 6;
3092 } 3294 }
3295 eeprom->use_eerd = FALSE;
3296 eeprom->use_eewr = FALSE;
3093 break; 3297 break;
3094 case e1000_82541: 3298 case e1000_82541:
3095 case e1000_82541_rev_2: 3299 case e1000_82541_rev_2:
@@ -3118,42 +3322,60 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
3118 eeprom->address_bits = 6; 3322 eeprom->address_bits = 6;
3119 } 3323 }
3120 } 3324 }
3325 eeprom->use_eerd = FALSE;
3326 eeprom->use_eewr = FALSE;
3327 break;
3328 case e1000_82573:
3329 eeprom->type = e1000_eeprom_spi;
3330 eeprom->opcode_bits = 8;
3331 eeprom->delay_usec = 1;
3332 if (eecd & E1000_EECD_ADDR_BITS) {
3333 eeprom->page_size = 32;
3334 eeprom->address_bits = 16;
3335 } else {
3336 eeprom->page_size = 8;
3337 eeprom->address_bits = 8;
3338 }
3339 eeprom->use_eerd = TRUE;
3340 eeprom->use_eewr = TRUE;
3341 if(e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
3342 eeprom->type = e1000_eeprom_flash;
3343 eeprom->word_size = 2048;
3344
3345 /* Ensure that the Autonomous FLASH update bit is cleared due to
3346 * Flash update issue on parts which use a FLASH for NVM. */
3347 eecd &= ~E1000_EECD_AUPDEN;
3348 E1000_WRITE_REG(hw, EECD, eecd);
3349 }
3121 break; 3350 break;
3122 default: 3351 default:
3123 break; 3352 break;
3124 } 3353 }
3125 3354
3126 if (eeprom->type == e1000_eeprom_spi) { 3355 if (eeprom->type == e1000_eeprom_spi) {
3127 eeprom->word_size = 64; 3356 /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
3128 if (e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size) == 0) { 3357 * 32KB (incremented by powers of 2).
3129 eeprom_size &= EEPROM_SIZE_MASK; 3358 */
3130 3359 if(hw->mac_type <= e1000_82547_rev_2) {
3131 switch (eeprom_size) { 3360 /* Set to default value for initial eeprom read. */
3132 case EEPROM_SIZE_16KB: 3361 eeprom->word_size = 64;
3133 eeprom->word_size = 8192; 3362 ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
3134 break; 3363 if(ret_val)
3135 case EEPROM_SIZE_8KB: 3364 return ret_val;
3136 eeprom->word_size = 4096; 3365 eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
3137 break; 3366 /* 256B eeprom size was not supported in earlier hardware, so we
3138 case EEPROM_SIZE_4KB: 3367 * bump eeprom_size up one to ensure that "1" (which maps to 256B)
3139 eeprom->word_size = 2048; 3368 * is never the result used in the shifting logic below. */
3140 break; 3369 if(eeprom_size)
3141 case EEPROM_SIZE_2KB: 3370 eeprom_size++;
3142 eeprom->word_size = 1024; 3371 } else {
3143 break; 3372 eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >>
3144 case EEPROM_SIZE_1KB: 3373 E1000_EECD_SIZE_EX_SHIFT);
3145 eeprom->word_size = 512;
3146 break;
3147 case EEPROM_SIZE_512B:
3148 eeprom->word_size = 256;
3149 break;
3150 case EEPROM_SIZE_128B:
3151 default:
3152 eeprom->word_size = 64;
3153 break;
3154 }
3155 } 3374 }
3375
3376 eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
3156 } 3377 }
3378 return ret_val;
3157} 3379}
3158 3380
3159/****************************************************************************** 3381/******************************************************************************
@@ -3306,8 +3528,12 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
3306 3528
3307 DEBUGFUNC("e1000_acquire_eeprom"); 3529 DEBUGFUNC("e1000_acquire_eeprom");
3308 3530
3531 if(e1000_get_hw_eeprom_semaphore(hw))
3532 return -E1000_ERR_EEPROM;
3533
3309 eecd = E1000_READ_REG(hw, EECD); 3534 eecd = E1000_READ_REG(hw, EECD);
3310 3535
3536 if (hw->mac_type != e1000_82573) {
3311 /* Request EEPROM Access */ 3537 /* Request EEPROM Access */
3312 if(hw->mac_type > e1000_82544) { 3538 if(hw->mac_type > e1000_82544) {
3313 eecd |= E1000_EECD_REQ; 3539 eecd |= E1000_EECD_REQ;
@@ -3326,6 +3552,7 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
3326 return -E1000_ERR_EEPROM; 3552 return -E1000_ERR_EEPROM;
3327 } 3553 }
3328 } 3554 }
3555 }
3329 3556
3330 /* Setup EEPROM for Read/Write */ 3557 /* Setup EEPROM for Read/Write */
3331 3558
@@ -3443,6 +3670,8 @@ e1000_release_eeprom(struct e1000_hw *hw)
3443 eecd &= ~E1000_EECD_REQ; 3670 eecd &= ~E1000_EECD_REQ;
3444 E1000_WRITE_REG(hw, EECD, eecd); 3671 E1000_WRITE_REG(hw, EECD, eecd);
3445 } 3672 }
3673
3674 e1000_put_hw_eeprom_semaphore(hw);
3446} 3675}
3447 3676
3448/****************************************************************************** 3677/******************************************************************************
@@ -3504,8 +3733,10 @@ e1000_read_eeprom(struct e1000_hw *hw,
3504{ 3733{
3505 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3734 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3506 uint32_t i = 0; 3735 uint32_t i = 0;
3736 int32_t ret_val;
3507 3737
3508 DEBUGFUNC("e1000_read_eeprom"); 3738 DEBUGFUNC("e1000_read_eeprom");
3739
3509 /* A check for invalid values: offset too large, too many words, and not 3740 /* A check for invalid values: offset too large, too many words, and not
3510 * enough words. 3741 * enough words.
3511 */ 3742 */
@@ -3515,9 +3746,23 @@ e1000_read_eeprom(struct e1000_hw *hw,
3515 return -E1000_ERR_EEPROM; 3746 return -E1000_ERR_EEPROM;
3516 } 3747 }
3517 3748
3518 /* Prepare the EEPROM for reading */ 3749 /* FLASH reads without acquiring the semaphore are safe in 82573-based
3519 if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3750 * controllers.
3520 return -E1000_ERR_EEPROM; 3751 */
3752 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
3753 (hw->mac_type != e1000_82573)) {
3754 /* Prepare the EEPROM for reading */
3755 if(e1000_acquire_eeprom(hw) != E1000_SUCCESS)
3756 return -E1000_ERR_EEPROM;
3757 }
3758
3759 if(eeprom->use_eerd == TRUE) {
3760 ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
3761 if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
3762 (hw->mac_type != e1000_82573))
3763 e1000_release_eeprom(hw);
3764 return ret_val;
3765 }
3521 3766
3522 if(eeprom->type == e1000_eeprom_spi) { 3767 if(eeprom->type == e1000_eeprom_spi) {
3523 uint16_t word_in; 3768 uint16_t word_in;
@@ -3569,6 +3814,132 @@ e1000_read_eeprom(struct e1000_hw *hw,
3569} 3814}
3570 3815
3571/****************************************************************************** 3816/******************************************************************************
3817 * Reads a 16 bit word from the EEPROM using the EERD register.
3818 *
3819 * hw - Struct containing variables accessed by shared code
3820 * offset - offset of word in the EEPROM to read
3821 * data - word read from the EEPROM
3822 * words - number of words to read
3823 *****************************************************************************/
3824int32_t
3825e1000_read_eeprom_eerd(struct e1000_hw *hw,
3826 uint16_t offset,
3827 uint16_t words,
3828 uint16_t *data)
3829{
3830 uint32_t i, eerd = 0;
3831 int32_t error = 0;
3832
3833 for (i = 0; i < words; i++) {
3834 eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
3835 E1000_EEPROM_RW_REG_START;
3836
3837 E1000_WRITE_REG(hw, EERD, eerd);
3838 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
3839
3840 if(error) {
3841 break;
3842 }
3843 data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
3844
3845 }
3846
3847 return error;
3848}
3849
3850/******************************************************************************
3851 * Writes a 16 bit word from the EEPROM using the EEWR register.
3852 *
3853 * hw - Struct containing variables accessed by shared code
3854 * offset - offset of word in the EEPROM to read
3855 * data - word read from the EEPROM
3856 * words - number of words to read
3857 *****************************************************************************/
3858int32_t
3859e1000_write_eeprom_eewr(struct e1000_hw *hw,
3860 uint16_t offset,
3861 uint16_t words,
3862 uint16_t *data)
3863{
3864 uint32_t register_value = 0;
3865 uint32_t i = 0;
3866 int32_t error = 0;
3867
3868 for (i = 0; i < words; i++) {
3869 register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
3870 ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
3871 E1000_EEPROM_RW_REG_START;
3872
3873 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
3874 if(error) {
3875 break;
3876 }
3877
3878 E1000_WRITE_REG(hw, EEWR, register_value);
3879
3880 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
3881
3882 if(error) {
3883 break;
3884 }
3885 }
3886
3887 return error;
3888}
3889
3890/******************************************************************************
3891 * Polls the status bit (bit 1) of the EERD to determine when the read is done.
3892 *
3893 * hw - Struct containing variables accessed by shared code
3894 *****************************************************************************/
3895int32_t
3896e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
3897{
3898 uint32_t attempts = 100000;
3899 uint32_t i, reg = 0;
3900 int32_t done = E1000_ERR_EEPROM;
3901
3902 for(i = 0; i < attempts; i++) {
3903 if(eerd == E1000_EEPROM_POLL_READ)
3904 reg = E1000_READ_REG(hw, EERD);
3905 else
3906 reg = E1000_READ_REG(hw, EEWR);
3907
3908 if(reg & E1000_EEPROM_RW_REG_DONE) {
3909 done = E1000_SUCCESS;
3910 break;
3911 }
3912 udelay(5);
3913 }
3914
3915 return done;
3916}
3917
3918/***************************************************************************
3919* Description: Determines if the onboard NVM is FLASH or EEPROM.
3920*
3921* hw - Struct containing variables accessed by shared code
3922****************************************************************************/
3923boolean_t
3924e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
3925{
3926 uint32_t eecd = 0;
3927
3928 if(hw->mac_type == e1000_82573) {
3929 eecd = E1000_READ_REG(hw, EECD);
3930
3931 /* Isolate bits 15 & 16 */
3932 eecd = ((eecd >> 15) & 0x03);
3933
3934 /* If both bits are set, device is Flash type */
3935 if(eecd == 0x03) {
3936 return FALSE;
3937 }
3938 }
3939 return TRUE;
3940}
3941
3942/******************************************************************************
3572 * Verifies that the EEPROM has a valid checksum 3943 * Verifies that the EEPROM has a valid checksum
3573 * 3944 *
3574 * hw - Struct containing variables accessed by shared code 3945 * hw - Struct containing variables accessed by shared code
@@ -3585,6 +3956,25 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
3585 3956
3586 DEBUGFUNC("e1000_validate_eeprom_checksum"); 3957 DEBUGFUNC("e1000_validate_eeprom_checksum");
3587 3958
3959 if ((hw->mac_type == e1000_82573) &&
3960 (e1000_is_onboard_nvm_eeprom(hw) == FALSE)) {
3961 /* Check bit 4 of word 10h. If it is 0, firmware is done updating
3962 * 10h-12h. Checksum may need to be fixed. */
3963 e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
3964 if ((eeprom_data & 0x10) == 0) {
3965 /* Read 0x23 and check bit 15. This bit is a 1 when the checksum
3966 * has already been fixed. If the checksum is still wrong and this
3967 * bit is a 1, we need to return bad checksum. Otherwise, we need
3968 * to set this bit to a 1 and update the checksum. */
3969 e1000_read_eeprom(hw, 0x23, 1, &eeprom_data);
3970 if ((eeprom_data & 0x8000) == 0) {
3971 eeprom_data |= 0x8000;
3972 e1000_write_eeprom(hw, 0x23, 1, &eeprom_data);
3973 e1000_update_eeprom_checksum(hw);
3974 }
3975 }
3976 }
3977
3588 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 3978 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
3589 if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 3979 if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
3590 DEBUGOUT("EEPROM Read Error\n"); 3980 DEBUGOUT("EEPROM Read Error\n");
@@ -3628,6 +4018,8 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
3628 if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 4018 if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
3629 DEBUGOUT("EEPROM Write Error\n"); 4019 DEBUGOUT("EEPROM Write Error\n");
3630 return -E1000_ERR_EEPROM; 4020 return -E1000_ERR_EEPROM;
4021 } else if (hw->eeprom.type == e1000_eeprom_flash) {
4022 e1000_commit_shadow_ram(hw);
3631 } 4023 }
3632 return E1000_SUCCESS; 4024 return E1000_SUCCESS;
3633} 4025}
@@ -3663,6 +4055,10 @@ e1000_write_eeprom(struct e1000_hw *hw,
3663 return -E1000_ERR_EEPROM; 4055 return -E1000_ERR_EEPROM;
3664 } 4056 }
3665 4057
4058 /* 82573 reads only through eerd */
4059 if(eeprom->use_eewr == TRUE)
4060 return e1000_write_eeprom_eewr(hw, offset, words, data);
4061
3666 /* Prepare the EEPROM for writing */ 4062 /* Prepare the EEPROM for writing */
3667 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) 4063 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
3668 return -E1000_ERR_EEPROM; 4064 return -E1000_ERR_EEPROM;
@@ -3833,6 +4229,65 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
3833} 4229}
3834 4230
3835/****************************************************************************** 4231/******************************************************************************
4232 * Flushes the cached eeprom to NVM. This is done by saving the modified values
4233 * in the eeprom cache and the non modified values in the currently active bank
4234 * to the new bank.
4235 *
4236 * hw - Struct containing variables accessed by shared code
4237 * offset - offset of word in the EEPROM to read
4238 * data - word read from the EEPROM
4239 * words - number of words to read
4240 *****************************************************************************/
4241int32_t
4242e1000_commit_shadow_ram(struct e1000_hw *hw)
4243{
4244 uint32_t attempts = 100000;
4245 uint32_t eecd = 0;
4246 uint32_t flop = 0;
4247 uint32_t i = 0;
4248 int32_t error = E1000_SUCCESS;
4249
4250 /* The flop register will be used to determine if flash type is STM */
4251 flop = E1000_READ_REG(hw, FLOP);
4252
4253 if (hw->mac_type == e1000_82573) {
4254 for (i=0; i < attempts; i++) {
4255 eecd = E1000_READ_REG(hw, EECD);
4256 if ((eecd & E1000_EECD_FLUPD) == 0) {
4257 break;
4258 }
4259 udelay(5);
4260 }
4261
4262 if (i == attempts) {
4263 return -E1000_ERR_EEPROM;
4264 }
4265
4266 /* If STM opcode located in bits 15:8 of flop, reset firmware */
4267 if ((flop & 0xFF00) == E1000_STM_OPCODE) {
4268 E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
4269 }
4270
4271 /* Perform the flash update */
4272 E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
4273
4274 for (i=0; i < attempts; i++) {
4275 eecd = E1000_READ_REG(hw, EECD);
4276 if ((eecd & E1000_EECD_FLUPD) == 0) {
4277 break;
4278 }
4279 udelay(5);
4280 }
4281
4282 if (i == attempts) {
4283 return -E1000_ERR_EEPROM;
4284 }
4285 }
4286
4287 return error;
4288}
4289
4290/******************************************************************************
3836 * Reads the adapter's part number from the EEPROM 4291 * Reads the adapter's part number from the EEPROM
3837 * 4292 *
3838 * hw - Struct containing variables accessed by shared code 4293 * hw - Struct containing variables accessed by shared code
@@ -3911,6 +4366,7 @@ void
3911e1000_init_rx_addrs(struct e1000_hw *hw) 4366e1000_init_rx_addrs(struct e1000_hw *hw)
3912{ 4367{
3913 uint32_t i; 4368 uint32_t i;
4369 uint32_t rar_num;
3914 4370
3915 DEBUGFUNC("e1000_init_rx_addrs"); 4371 DEBUGFUNC("e1000_init_rx_addrs");
3916 4372
@@ -3919,9 +4375,10 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
3919 4375
3920 e1000_rar_set(hw, hw->mac_addr, 0); 4376 e1000_rar_set(hw, hw->mac_addr, 0);
3921 4377
4378 rar_num = E1000_RAR_ENTRIES;
3922 /* Zero out the other 15 receive addresses. */ 4379 /* Zero out the other 15 receive addresses. */
3923 DEBUGOUT("Clearing RAR[1-15]\n"); 4380 DEBUGOUT("Clearing RAR[1-15]\n");
3924 for(i = 1; i < E1000_RAR_ENTRIES; i++) { 4381 for(i = 1; i < rar_num; i++) {
3925 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4382 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
3926 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 4383 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
3927 } 4384 }
@@ -3950,7 +4407,9 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
3950{ 4407{
3951 uint32_t hash_value; 4408 uint32_t hash_value;
3952 uint32_t i; 4409 uint32_t i;
3953 4410 uint32_t num_rar_entry;
4411 uint32_t num_mta_entry;
4412
3954 DEBUGFUNC("e1000_mc_addr_list_update"); 4413 DEBUGFUNC("e1000_mc_addr_list_update");
3955 4414
3956 /* Set the new number of MC addresses that we are being requested to use. */ 4415 /* Set the new number of MC addresses that we are being requested to use. */
@@ -3958,14 +4417,16 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
3958 4417
3959 /* Clear RAR[1-15] */ 4418 /* Clear RAR[1-15] */
3960 DEBUGOUT(" Clearing RAR[1-15]\n"); 4419 DEBUGOUT(" Clearing RAR[1-15]\n");
3961 for(i = rar_used_count; i < E1000_RAR_ENTRIES; i++) { 4420 num_rar_entry = E1000_RAR_ENTRIES;
4421 for(i = rar_used_count; i < num_rar_entry; i++) {
3962 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4422 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
3963 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 4423 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
3964 } 4424 }
3965 4425
3966 /* Clear the MTA */ 4426 /* Clear the MTA */
3967 DEBUGOUT(" Clearing MTA\n"); 4427 DEBUGOUT(" Clearing MTA\n");
3968 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) { 4428 num_mta_entry = E1000_NUM_MTA_REGISTERS;
4429 for(i = 0; i < num_mta_entry; i++) {
3969 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 4430 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
3970 } 4431 }
3971 4432
@@ -3989,7 +4450,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
3989 /* Place this multicast address in the RAR if there is room, * 4450 /* Place this multicast address in the RAR if there is room, *
3990 * else put it in the MTA 4451 * else put it in the MTA
3991 */ 4452 */
3992 if(rar_used_count < E1000_RAR_ENTRIES) { 4453 if (rar_used_count < num_rar_entry) {
3993 e1000_rar_set(hw, 4454 e1000_rar_set(hw,
3994 mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)), 4455 mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)),
3995 rar_used_count); 4456 rar_used_count);
@@ -4040,6 +4501,7 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
4040 } 4501 }
4041 4502
4042 hash_value &= 0xFFF; 4503 hash_value &= 0xFFF;
4504
4043 return hash_value; 4505 return hash_value;
4044} 4506}
4045 4507
@@ -4144,12 +4606,33 @@ void
4144e1000_clear_vfta(struct e1000_hw *hw) 4606e1000_clear_vfta(struct e1000_hw *hw)
4145{ 4607{
4146 uint32_t offset; 4608 uint32_t offset;
4147 4609 uint32_t vfta_value = 0;
4148 for(offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) 4610 uint32_t vfta_offset = 0;
4149 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); 4611 uint32_t vfta_bit_in_reg = 0;
4612
4613 if (hw->mac_type == e1000_82573) {
4614 if (hw->mng_cookie.vlan_id != 0) {
4615 /* The VFTA is a 4096b bit-field, each identifying a single VLAN
4616 * ID. The following operations determine which 32b entry
4617 * (i.e. offset) into the array we want to set the VLAN ID
4618 * (i.e. bit) of the manageability unit. */
4619 vfta_offset = (hw->mng_cookie.vlan_id >>
4620 E1000_VFTA_ENTRY_SHIFT) &
4621 E1000_VFTA_ENTRY_MASK;
4622 vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
4623 E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
4624 }
4625 }
4626 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
4627 /* If the offset we want to clear is the same offset of the
4628 * manageability VLAN ID, then clear all bits except that of the
4629 * manageability unit */
4630 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
4631 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
4632 }
4150} 4633}
4151 4634
4152static int32_t 4635int32_t
4153e1000_id_led_init(struct e1000_hw * hw) 4636e1000_id_led_init(struct e1000_hw * hw)
4154{ 4637{
4155 uint32_t ledctl; 4638 uint32_t ledctl;
@@ -4480,6 +4963,19 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
4480 temp = E1000_READ_REG(hw, MGTPRC); 4963 temp = E1000_READ_REG(hw, MGTPRC);
4481 temp = E1000_READ_REG(hw, MGTPDC); 4964 temp = E1000_READ_REG(hw, MGTPDC);
4482 temp = E1000_READ_REG(hw, MGTPTC); 4965 temp = E1000_READ_REG(hw, MGTPTC);
4966
4967 if(hw->mac_type <= e1000_82547_rev_2) return;
4968
4969 temp = E1000_READ_REG(hw, IAC);
4970 temp = E1000_READ_REG(hw, ICRXOC);
4971 temp = E1000_READ_REG(hw, ICRXPTC);
4972 temp = E1000_READ_REG(hw, ICRXATC);
4973 temp = E1000_READ_REG(hw, ICTXPTC);
4974 temp = E1000_READ_REG(hw, ICTXATC);
4975 temp = E1000_READ_REG(hw, ICTXQEC);
4976 temp = E1000_READ_REG(hw, ICTXQMTC);
4977 temp = E1000_READ_REG(hw, ICRXDMTC);
4978
4483} 4979}
4484 4980
4485/****************************************************************************** 4981/******************************************************************************
@@ -4646,6 +5142,11 @@ e1000_get_bus_info(struct e1000_hw *hw)
4646 hw->bus_speed = e1000_bus_speed_unknown; 5142 hw->bus_speed = e1000_bus_speed_unknown;
4647 hw->bus_width = e1000_bus_width_unknown; 5143 hw->bus_width = e1000_bus_width_unknown;
4648 break; 5144 break;
5145 case e1000_82573:
5146 hw->bus_type = e1000_bus_type_pci_express;
5147 hw->bus_speed = e1000_bus_speed_2500;
5148 hw->bus_width = e1000_bus_width_pciex_4;
5149 break;
4649 default: 5150 default:
4650 status = E1000_READ_REG(hw, STATUS); 5151 status = E1000_READ_REG(hw, STATUS);
4651 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? 5152 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
@@ -4749,6 +5250,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
4749 5250
4750 /* Use old method for Phy older than IGP */ 5251 /* Use old method for Phy older than IGP */
4751 if(hw->phy_type == e1000_phy_m88) { 5252 if(hw->phy_type == e1000_phy_m88) {
5253
4752 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 5254 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
4753 &phy_data); 5255 &phy_data);
4754 if(ret_val) 5256 if(ret_val)
@@ -4865,7 +5367,8 @@ e1000_check_polarity(struct e1000_hw *hw,
4865 return ret_val; 5367 return ret_val;
4866 *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> 5368 *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >>
4867 M88E1000_PSSR_REV_POLARITY_SHIFT; 5369 M88E1000_PSSR_REV_POLARITY_SHIFT;
4868 } else if(hw->phy_type == e1000_phy_igp) { 5370 } else if(hw->phy_type == e1000_phy_igp ||
5371 hw->phy_type == e1000_phy_igp_2) {
4869 /* Read the Status register to check the speed */ 5372 /* Read the Status register to check the speed */
4870 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 5373 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
4871 &phy_data); 5374 &phy_data);
@@ -4917,7 +5420,8 @@ e1000_check_downshift(struct e1000_hw *hw)
4917 5420
4918 DEBUGFUNC("e1000_check_downshift"); 5421 DEBUGFUNC("e1000_check_downshift");
4919 5422
4920 if(hw->phy_type == e1000_phy_igp) { 5423 if(hw->phy_type == e1000_phy_igp ||
5424 hw->phy_type == e1000_phy_igp_2) {
4921 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, 5425 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
4922 &phy_data); 5426 &phy_data);
4923 if(ret_val) 5427 if(ret_val)
@@ -4933,6 +5437,7 @@ e1000_check_downshift(struct e1000_hw *hw)
4933 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> 5437 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
4934 M88E1000_PSSR_DOWNSHIFT_SHIFT; 5438 M88E1000_PSSR_DOWNSHIFT_SHIFT;
4935 } 5439 }
5440
4936 return E1000_SUCCESS; 5441 return E1000_SUCCESS;
4937} 5442}
4938 5443
@@ -5047,7 +5552,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
5047 if(ret_val) 5552 if(ret_val)
5048 return ret_val; 5553 return ret_val;
5049 5554
5050 msec_delay(20); 5555 msec_delay_irq(20);
5051 5556
5052 ret_val = e1000_write_phy_reg(hw, 0x0000, 5557 ret_val = e1000_write_phy_reg(hw, 0x0000,
5053 IGP01E1000_IEEE_FORCE_GIGA); 5558 IGP01E1000_IEEE_FORCE_GIGA);
@@ -5071,7 +5576,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
5071 if(ret_val) 5576 if(ret_val)
5072 return ret_val; 5577 return ret_val;
5073 5578
5074 msec_delay(20); 5579 msec_delay_irq(20);
5075 5580
5076 /* Now enable the transmitter */ 5581 /* Now enable the transmitter */
5077 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); 5582 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
@@ -5096,7 +5601,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
5096 if(ret_val) 5601 if(ret_val)
5097 return ret_val; 5602 return ret_val;
5098 5603
5099 msec_delay(20); 5604 msec_delay_irq(20);
5100 5605
5101 ret_val = e1000_write_phy_reg(hw, 0x0000, 5606 ret_val = e1000_write_phy_reg(hw, 0x0000,
5102 IGP01E1000_IEEE_FORCE_GIGA); 5607 IGP01E1000_IEEE_FORCE_GIGA);
@@ -5112,7 +5617,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
5112 if(ret_val) 5617 if(ret_val)
5113 return ret_val; 5618 return ret_val;
5114 5619
5115 msec_delay(20); 5620 msec_delay_irq(20);
5116 5621
5117 /* Now enable the transmitter */ 5622 /* Now enable the transmitter */
5118 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); 5623 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
@@ -5187,22 +5692,36 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
5187 uint16_t phy_data; 5692 uint16_t phy_data;
5188 DEBUGFUNC("e1000_set_d3_lplu_state"); 5693 DEBUGFUNC("e1000_set_d3_lplu_state");
5189 5694
5190 if(!((hw->mac_type == e1000_82541_rev_2) || 5695 if(hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2)
5191 (hw->mac_type == e1000_82547_rev_2)))
5192 return E1000_SUCCESS; 5696 return E1000_SUCCESS;
5193 5697
5194 /* During driver activity LPLU should not be used or it will attain link 5698 /* During driver activity LPLU should not be used or it will attain link
5195 * from the lowest speeds starting from 10Mbps. The capability is used for 5699 * from the lowest speeds starting from 10Mbps. The capability is used for
5196 * Dx transitions and states */ 5700 * Dx transitions and states */
5197 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); 5701 if(hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
5198 if(ret_val) 5702 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
5199 return ret_val;
5200
5201 if(!active) {
5202 phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
5203 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
5204 if(ret_val) 5703 if(ret_val)
5205 return ret_val; 5704 return ret_val;
5705 } else {
5706 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
5707 if(ret_val)
5708 return ret_val;
5709 }
5710
5711 if(!active) {
5712 if(hw->mac_type == e1000_82541_rev_2 ||
5713 hw->mac_type == e1000_82547_rev_2) {
5714 phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
5715 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
5716 if(ret_val)
5717 return ret_val;
5718 } else {
5719 phy_data &= ~IGP02E1000_PM_D3_LPLU;
5720 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
5721 phy_data);
5722 if (ret_val)
5723 return ret_val;
5724 }
5206 5725
5207 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during 5726 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
5208 * Dx states where the power conservation is most important. During 5727 * Dx states where the power conservation is most important. During
@@ -5236,11 +5755,105 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
5236 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || 5755 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
5237 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { 5756 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
5238 5757
5239 phy_data |= IGP01E1000_GMII_FLEX_SPD; 5758 if(hw->mac_type == e1000_82541_rev_2 ||
5240 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 5759 hw->mac_type == e1000_82547_rev_2) {
5760 phy_data |= IGP01E1000_GMII_FLEX_SPD;
5761 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
5762 if(ret_val)
5763 return ret_val;
5764 } else {
5765 phy_data |= IGP02E1000_PM_D3_LPLU;
5766 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
5767 phy_data);
5768 if (ret_val)
5769 return ret_val;
5770 }
5771
5772 /* When LPLU is enabled we should disable SmartSpeed */
5773 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
5774 if(ret_val)
5775 return ret_val;
5776
5777 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
5778 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
5241 if(ret_val) 5779 if(ret_val)
5242 return ret_val; 5780 return ret_val;
5243 5781
5782 }
5783 return E1000_SUCCESS;
5784}
5785
5786/*****************************************************************************
5787 *
5788 * This function sets the lplu d0 state according to the active flag. When
5789 * activating lplu this function also disables smart speed and vise versa.
5790 * lplu will not be activated unless the device autonegotiation advertisment
5791 * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
5792 * hw: Struct containing variables accessed by shared code
5793 * active - true to enable lplu false to disable lplu.
5794 *
5795 * returns: - E1000_ERR_PHY if fail to read/write the PHY
5796 * E1000_SUCCESS at any other case.
5797 *
5798 ****************************************************************************/
5799
5800int32_t
5801e1000_set_d0_lplu_state(struct e1000_hw *hw,
5802 boolean_t active)
5803{
5804 int32_t ret_val;
5805 uint16_t phy_data;
5806 DEBUGFUNC("e1000_set_d0_lplu_state");
5807
5808 if(hw->mac_type <= e1000_82547_rev_2)
5809 return E1000_SUCCESS;
5810
5811 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
5812 if(ret_val)
5813 return ret_val;
5814
5815 if (!active) {
5816 phy_data &= ~IGP02E1000_PM_D0_LPLU;
5817 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
5818 if (ret_val)
5819 return ret_val;
5820
5821 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
5822 * Dx states where the power conservation is most important. During
5823 * driver activity we should enable SmartSpeed, so performance is
5824 * maintained. */
5825 if (hw->smart_speed == e1000_smart_speed_on) {
5826 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5827 &phy_data);
5828 if(ret_val)
5829 return ret_val;
5830
5831 phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
5832 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5833 phy_data);
5834 if(ret_val)
5835 return ret_val;
5836 } else if (hw->smart_speed == e1000_smart_speed_off) {
5837 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5838 &phy_data);
5839 if (ret_val)
5840 return ret_val;
5841
5842 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
5843 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
5844 phy_data);
5845 if(ret_val)
5846 return ret_val;
5847 }
5848
5849
5850 } else {
5851
5852 phy_data |= IGP02E1000_PM_D0_LPLU;
5853 ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
5854 if (ret_val)
5855 return ret_val;
5856
5244 /* When LPLU is enabled we should disable SmartSpeed */ 5857 /* When LPLU is enabled we should disable SmartSpeed */
5245 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); 5858 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
5246 if(ret_val) 5859 if(ret_val)
@@ -5318,6 +5931,338 @@ e1000_set_vco_speed(struct e1000_hw *hw)
5318 return E1000_SUCCESS; 5931 return E1000_SUCCESS;
5319} 5932}
5320 5933
5934
5935/*****************************************************************************
5936 * This function reads the cookie from ARC ram.
5937 *
5938 * returns: - E1000_SUCCESS .
5939 ****************************************************************************/
5940int32_t
5941e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
5942{
5943 uint8_t i;
5944 uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
5945 uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH;
5946
5947 length = (length >> 2);
5948 offset = (offset >> 2);
5949
5950 for (i = 0; i < length; i++) {
5951 *((uint32_t *) buffer + i) =
5952 E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
5953 }
5954 return E1000_SUCCESS;
5955}
5956
5957
5958/*****************************************************************************
5959 * This function checks whether the HOST IF is enabled for command operaton
5960 * and also checks whether the previous command is completed.
5961 * It busy waits in case of previous command is not completed.
5962 *
5963 * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
5964 * timeout
5965 * - E1000_SUCCESS for success.
5966 ****************************************************************************/
5967int32_t
5968e1000_mng_enable_host_if(struct e1000_hw * hw)
5969{
5970 uint32_t hicr;
5971 uint8_t i;
5972
5973 /* Check that the host interface is enabled. */
5974 hicr = E1000_READ_REG(hw, HICR);
5975 if ((hicr & E1000_HICR_EN) == 0) {
5976 DEBUGOUT("E1000_HOST_EN bit disabled.\n");
5977 return -E1000_ERR_HOST_INTERFACE_COMMAND;
5978 }
5979 /* check the previous command is completed */
5980 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
5981 hicr = E1000_READ_REG(hw, HICR);
5982 if (!(hicr & E1000_HICR_C))
5983 break;
5984 msec_delay_irq(1);
5985 }
5986
5987 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
5988 DEBUGOUT("Previous command timeout failed .\n");
5989 return -E1000_ERR_HOST_INTERFACE_COMMAND;
5990 }
5991 return E1000_SUCCESS;
5992}
5993
5994/*****************************************************************************
5995 * This function writes the buffer content at the offset given on the host if.
5996 * It also does alignment considerations to do the writes in most efficient way.
5997 * Also fills up the sum of the buffer in *buffer parameter.
5998 *
5999 * returns - E1000_SUCCESS for success.
6000 ****************************************************************************/
6001int32_t
6002e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
6003 uint16_t length, uint16_t offset, uint8_t *sum)
6004{
6005 uint8_t *tmp;
6006 uint8_t *bufptr = buffer;
6007 uint32_t data;
6008 uint16_t remaining, i, j, prev_bytes;
6009
6010 /* sum = only sum of the data and it is not checksum */
6011
6012 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
6013 return -E1000_ERR_PARAM;
6014 }
6015
6016 tmp = (uint8_t *)&data;
6017 prev_bytes = offset & 0x3;
6018 offset &= 0xFFFC;
6019 offset >>= 2;
6020
6021 if (prev_bytes) {
6022 data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
6023 for (j = prev_bytes; j < sizeof(uint32_t); j++) {
6024 *(tmp + j) = *bufptr++;
6025 *sum += *(tmp + j);
6026 }
6027 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
6028 length -= j - prev_bytes;
6029 offset++;
6030 }
6031
6032 remaining = length & 0x3;
6033 length -= remaining;
6034
6035 /* Calculate length in DWORDs */
6036 length >>= 2;
6037
6038 /* The device driver writes the relevant command block into the
6039 * ram area. */
6040 for (i = 0; i < length; i++) {
6041 for (j = 0; j < sizeof(uint32_t); j++) {
6042 *(tmp + j) = *bufptr++;
6043 *sum += *(tmp + j);
6044 }
6045
6046 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
6047 }
6048 if (remaining) {
6049 for (j = 0; j < sizeof(uint32_t); j++) {
6050 if (j < remaining)
6051 *(tmp + j) = *bufptr++;
6052 else
6053 *(tmp + j) = 0;
6054
6055 *sum += *(tmp + j);
6056 }
6057 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
6058 }
6059
6060 return E1000_SUCCESS;
6061}
6062
6063
6064/*****************************************************************************
6065 * This function writes the command header after does the checksum calculation.
6066 *
6067 * returns - E1000_SUCCESS for success.
6068 ****************************************************************************/
6069int32_t
6070e1000_mng_write_cmd_header(struct e1000_hw * hw,
6071 struct e1000_host_mng_command_header * hdr)
6072{
6073 uint16_t i;
6074 uint8_t sum;
6075 uint8_t *buffer;
6076
6077 /* Write the whole command header structure which includes sum of
6078 * the buffer */
6079
6080 uint16_t length = sizeof(struct e1000_host_mng_command_header);
6081
6082 sum = hdr->checksum;
6083 hdr->checksum = 0;
6084
6085 buffer = (uint8_t *) hdr;
6086 i = length;
6087 while(i--)
6088 sum += buffer[i];
6089
6090 hdr->checksum = 0 - sum;
6091
6092 length >>= 2;
6093 /* The device driver writes the relevant command block into the ram area. */
6094 for (i = 0; i < length; i++)
6095 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i));
6096
6097 return E1000_SUCCESS;
6098}
6099
6100
6101/*****************************************************************************
6102 * This function indicates to ARC that a new command is pending which completes
6103 * one write operation by the driver.
6104 *
6105 * returns - E1000_SUCCESS for success.
6106 ****************************************************************************/
6107int32_t
6108e1000_mng_write_commit(
6109 struct e1000_hw * hw)
6110{
6111 uint32_t hicr;
6112
6113 hicr = E1000_READ_REG(hw, HICR);
6114 /* Setting this bit tells the ARC that a new command is pending. */
6115 E1000_WRITE_REG(hw, HICR, hicr | E1000_HICR_C);
6116
6117 return E1000_SUCCESS;
6118}
6119
6120
6121/*****************************************************************************
6122 * This function checks the mode of the firmware.
6123 *
6124 * returns - TRUE when the mode is IAMT or FALSE.
6125 ****************************************************************************/
6126boolean_t
6127e1000_check_mng_mode(
6128 struct e1000_hw *hw)
6129{
6130 uint32_t fwsm;
6131
6132 fwsm = E1000_READ_REG(hw, FWSM);
6133
6134 if((fwsm & E1000_FWSM_MODE_MASK) ==
6135 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
6136 return TRUE;
6137
6138 return FALSE;
6139}
6140
6141
6142/*****************************************************************************
6143 * This function writes the dhcp info .
6144 ****************************************************************************/
6145int32_t
6146e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer,
6147 uint16_t length)
6148{
6149 int32_t ret_val;
6150 struct e1000_host_mng_command_header hdr;
6151
6152 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
6153 hdr.command_length = length;
6154 hdr.reserved1 = 0;
6155 hdr.reserved2 = 0;
6156 hdr.checksum = 0;
6157
6158 ret_val = e1000_mng_enable_host_if(hw);
6159 if (ret_val == E1000_SUCCESS) {
6160 ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
6161 &(hdr.checksum));
6162 if (ret_val == E1000_SUCCESS) {
6163 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
6164 if (ret_val == E1000_SUCCESS)
6165 ret_val = e1000_mng_write_commit(hw);
6166 }
6167 }
6168 return ret_val;
6169}
6170
6171
6172/*****************************************************************************
6173 * This function calculates the checksum.
6174 *
6175 * returns - checksum of buffer contents.
6176 ****************************************************************************/
6177uint8_t
6178e1000_calculate_mng_checksum(char *buffer, uint32_t length)
6179{
6180 uint8_t sum = 0;
6181 uint32_t i;
6182
6183 if (!buffer)
6184 return 0;
6185
6186 for (i=0; i < length; i++)
6187 sum += buffer[i];
6188
6189 return (uint8_t) (0 - sum);
6190}
6191
6192/*****************************************************************************
6193 * This function checks whether tx pkt filtering needs to be enabled or not.
6194 *
6195 * returns - TRUE for packet filtering or FALSE.
6196 ****************************************************************************/
6197boolean_t
6198e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
6199{
6200 /* called in init as well as watchdog timer functions */
6201
6202 int32_t ret_val, checksum;
6203 boolean_t tx_filter = FALSE;
6204 struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
6205 uint8_t *buffer = (uint8_t *) &(hw->mng_cookie);
6206
6207 if (e1000_check_mng_mode(hw)) {
6208 ret_val = e1000_mng_enable_host_if(hw);
6209 if (ret_val == E1000_SUCCESS) {
6210 ret_val = e1000_host_if_read_cookie(hw, buffer);
6211 if (ret_val == E1000_SUCCESS) {
6212 checksum = hdr->checksum;
6213 hdr->checksum = 0;
6214 if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
6215 checksum == e1000_calculate_mng_checksum((char *)buffer,
6216 E1000_MNG_DHCP_COOKIE_LENGTH)) {
6217 if (hdr->status &
6218 E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
6219 tx_filter = TRUE;
6220 } else
6221 tx_filter = TRUE;
6222 } else
6223 tx_filter = TRUE;
6224 }
6225 }
6226
6227 hw->tx_pkt_filtering = tx_filter;
6228 return tx_filter;
6229}
6230
6231/******************************************************************************
6232 * Verifies the hardware needs to allow ARPs to be processed by the host
6233 *
6234 * hw - Struct containing variables accessed by shared code
6235 *
6236 * returns: - TRUE/FALSE
6237 *
6238 *****************************************************************************/
6239uint32_t
6240e1000_enable_mng_pass_thru(struct e1000_hw *hw)
6241{
6242 uint32_t manc;
6243 uint32_t fwsm, factps;
6244
6245 if (hw->asf_firmware_present) {
6246 manc = E1000_READ_REG(hw, MANC);
6247
6248 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
6249 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
6250 return FALSE;
6251 if (e1000_arc_subsystem_valid(hw) == TRUE) {
6252 fwsm = E1000_READ_REG(hw, FWSM);
6253 factps = E1000_READ_REG(hw, FACTPS);
6254
6255 if (((fwsm & E1000_FWSM_MODE_MASK) ==
6256 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)) &&
6257 (factps & E1000_FACTPS_MNGCG))
6258 return TRUE;
6259 } else
6260 if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
6261 return TRUE;
6262 }
6263 return FALSE;
6264}
6265
5321static int32_t 6266static int32_t
5322e1000_polarity_reversal_workaround(struct e1000_hw *hw) 6267e1000_polarity_reversal_workaround(struct e1000_hw *hw)
5323{ 6268{
@@ -5403,3 +6348,265 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
5403 return E1000_SUCCESS; 6348 return E1000_SUCCESS;
5404} 6349}
5405 6350
6351/***************************************************************************
6352 *
6353 * Disables PCI-Express master access.
6354 *
6355 * hw: Struct containing variables accessed by shared code
6356 *
6357 * returns: - none.
6358 *
6359 ***************************************************************************/
6360void
6361e1000_set_pci_express_master_disable(struct e1000_hw *hw)
6362{
6363 uint32_t ctrl;
6364
6365 DEBUGFUNC("e1000_set_pci_express_master_disable");
6366
6367 if (hw->bus_type != e1000_bus_type_pci_express)
6368 return;
6369
6370 ctrl = E1000_READ_REG(hw, CTRL);
6371 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
6372 E1000_WRITE_REG(hw, CTRL, ctrl);
6373}
6374
6375/***************************************************************************
6376 *
6377 * Enables PCI-Express master access.
6378 *
6379 * hw: Struct containing variables accessed by shared code
6380 *
6381 * returns: - none.
6382 *
6383 ***************************************************************************/
6384void
6385e1000_enable_pciex_master(struct e1000_hw *hw)
6386{
6387 uint32_t ctrl;
6388
6389 DEBUGFUNC("e1000_enable_pciex_master");
6390
6391 if (hw->bus_type != e1000_bus_type_pci_express)
6392 return;
6393
6394 ctrl = E1000_READ_REG(hw, CTRL);
6395 ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE;
6396 E1000_WRITE_REG(hw, CTRL, ctrl);
6397}
6398
6399/*******************************************************************************
6400 *
6401 * Disables PCI-Express master access and verifies there are no pending requests
6402 *
6403 * hw: Struct containing variables accessed by shared code
6404 *
6405 * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
6406 * caused the master requests to be disabled.
6407 * E1000_SUCCESS master requests disabled.
6408 *
6409 ******************************************************************************/
6410int32_t
6411e1000_disable_pciex_master(struct e1000_hw *hw)
6412{
6413 int32_t timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */
6414
6415 DEBUGFUNC("e1000_disable_pciex_master");
6416
6417 if (hw->bus_type != e1000_bus_type_pci_express)
6418 return E1000_SUCCESS;
6419
6420 e1000_set_pci_express_master_disable(hw);
6421
6422 while(timeout) {
6423 if(!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
6424 break;
6425 else
6426 udelay(100);
6427 timeout--;
6428 }
6429
6430 if(!timeout) {
6431 DEBUGOUT("Master requests are pending.\n");
6432 return -E1000_ERR_MASTER_REQUESTS_PENDING;
6433 }
6434
6435 return E1000_SUCCESS;
6436}
6437
6438/*******************************************************************************
6439 *
6440 * Check for EEPROM Auto Read bit done.
6441 *
6442 * hw: Struct containing variables accessed by shared code
6443 *
6444 * returns: - E1000_ERR_RESET if fail to reset MAC
6445 * E1000_SUCCESS at any other case.
6446 *
6447 ******************************************************************************/
6448int32_t
6449e1000_get_auto_rd_done(struct e1000_hw *hw)
6450{
6451 int32_t timeout = AUTO_READ_DONE_TIMEOUT;
6452
6453 DEBUGFUNC("e1000_get_auto_rd_done");
6454
6455 switch (hw->mac_type) {
6456 default:
6457 msec_delay(5);
6458 break;
6459 case e1000_82573:
6460 while(timeout) {
6461 if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break;
6462 else msec_delay(1);
6463 timeout--;
6464 }
6465
6466 if(!timeout) {
6467 DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
6468 return -E1000_ERR_RESET;
6469 }
6470 break;
6471 }
6472
6473 return E1000_SUCCESS;
6474}
6475
6476/***************************************************************************
6477 * Checks if the PHY configuration is done
6478 *
6479 * hw: Struct containing variables accessed by shared code
6480 *
6481 * returns: - E1000_ERR_RESET if fail to reset MAC
6482 * E1000_SUCCESS at any other case.
6483 *
6484 ***************************************************************************/
6485int32_t
6486e1000_get_phy_cfg_done(struct e1000_hw *hw)
6487{
6488 DEBUGFUNC("e1000_get_phy_cfg_done");
6489
6490 /* Simply wait for 10ms */
6491 msec_delay(10);
6492
6493 return E1000_SUCCESS;
6494}
6495
6496/***************************************************************************
6497 *
6498 * Using the combination of SMBI and SWESMBI semaphore bits when resetting
6499 * adapter or Eeprom access.
6500 *
6501 * hw: Struct containing variables accessed by shared code
6502 *
6503 * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
6504 * E1000_SUCCESS at any other case.
6505 *
6506 ***************************************************************************/
6507int32_t
6508e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
6509{
6510 int32_t timeout;
6511 uint32_t swsm;
6512
6513 DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
6514
6515 if(!hw->eeprom_semaphore_present)
6516 return E1000_SUCCESS;
6517
6518
6519 /* Get the FW semaphore. */
6520 timeout = hw->eeprom.word_size + 1;
6521 while(timeout) {
6522 swsm = E1000_READ_REG(hw, SWSM);
6523 swsm |= E1000_SWSM_SWESMBI;
6524 E1000_WRITE_REG(hw, SWSM, swsm);
6525 /* if we managed to set the bit we got the semaphore. */
6526 swsm = E1000_READ_REG(hw, SWSM);
6527 if(swsm & E1000_SWSM_SWESMBI)
6528 break;
6529
6530 udelay(50);
6531 timeout--;
6532 }
6533
6534 if(!timeout) {
6535 /* Release semaphores */
6536 e1000_put_hw_eeprom_semaphore(hw);
6537 DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
6538 return -E1000_ERR_EEPROM;
6539 }
6540
6541 return E1000_SUCCESS;
6542}
6543
6544/***************************************************************************
6545 * This function clears HW semaphore bits.
6546 *
6547 * hw: Struct containing variables accessed by shared code
6548 *
6549 * returns: - None.
6550 *
6551 ***************************************************************************/
6552void
6553e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
6554{
6555 uint32_t swsm;
6556
6557 DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
6558
6559 if(!hw->eeprom_semaphore_present)
6560 return;
6561
6562 swsm = E1000_READ_REG(hw, SWSM);
6563 /* Release both semaphores. */
6564 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
6565 E1000_WRITE_REG(hw, SWSM, swsm);
6566}
6567
6568/******************************************************************************
6569 * Checks if PHY reset is blocked due to SOL/IDER session, for example.
6570 * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to
6571 * the caller to figure out how to deal with it.
6572 *
6573 * hw - Struct containing variables accessed by shared code
6574 *
6575 * returns: - E1000_BLK_PHY_RESET
6576 * E1000_SUCCESS
6577 *
6578 *****************************************************************************/
6579int32_t
6580e1000_check_phy_reset_block(struct e1000_hw *hw)
6581{
6582 uint32_t manc = 0;
6583 if(hw->mac_type > e1000_82547_rev_2)
6584 manc = E1000_READ_REG(hw, MANC);
6585 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
6586 E1000_BLK_PHY_RESET : E1000_SUCCESS;
6587}
6588
6589uint8_t
6590e1000_arc_subsystem_valid(struct e1000_hw *hw)
6591{
6592 uint32_t fwsm;
6593
6594 /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
6595 * may not be provided a DMA clock when no manageability features are
6596 * enabled. We do not want to perform any reads/writes to these registers
6597 * if this is the case. We read FWSM to determine the manageability mode.
6598 */
6599 switch (hw->mac_type) {
6600 case e1000_82573:
6601 fwsm = E1000_READ_REG(hw, FWSM);
6602 if((fwsm & E1000_FWSM_MODE_MASK) != 0)
6603 return TRUE;
6604 break;
6605 default:
6606 break;
6607 }
6608 return FALSE;
6609}
6610
6611
6612
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index f397e637a3c5..a0263ee96c6b 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -57,6 +57,7 @@ typedef enum {
57 e1000_82541_rev_2, 57 e1000_82541_rev_2,
58 e1000_82547, 58 e1000_82547,
59 e1000_82547_rev_2, 59 e1000_82547_rev_2,
60 e1000_82573,
60 e1000_num_macs 61 e1000_num_macs
61} e1000_mac_type; 62} e1000_mac_type;
62 63
@@ -64,6 +65,7 @@ typedef enum {
64 e1000_eeprom_uninitialized = 0, 65 e1000_eeprom_uninitialized = 0,
65 e1000_eeprom_spi, 66 e1000_eeprom_spi,
66 e1000_eeprom_microwire, 67 e1000_eeprom_microwire,
68 e1000_eeprom_flash,
67 e1000_num_eeprom_types 69 e1000_num_eeprom_types
68} e1000_eeprom_type; 70} e1000_eeprom_type;
69 71
@@ -96,6 +98,7 @@ typedef enum {
96 e1000_bus_type_unknown = 0, 98 e1000_bus_type_unknown = 0,
97 e1000_bus_type_pci, 99 e1000_bus_type_pci,
98 e1000_bus_type_pcix, 100 e1000_bus_type_pcix,
101 e1000_bus_type_pci_express,
99 e1000_bus_type_reserved 102 e1000_bus_type_reserved
100} e1000_bus_type; 103} e1000_bus_type;
101 104
@@ -107,6 +110,7 @@ typedef enum {
107 e1000_bus_speed_100, 110 e1000_bus_speed_100,
108 e1000_bus_speed_120, 111 e1000_bus_speed_120,
109 e1000_bus_speed_133, 112 e1000_bus_speed_133,
113 e1000_bus_speed_2500,
110 e1000_bus_speed_reserved 114 e1000_bus_speed_reserved
111} e1000_bus_speed; 115} e1000_bus_speed;
112 116
@@ -115,6 +119,8 @@ typedef enum {
115 e1000_bus_width_unknown = 0, 119 e1000_bus_width_unknown = 0,
116 e1000_bus_width_32, 120 e1000_bus_width_32,
117 e1000_bus_width_64, 121 e1000_bus_width_64,
122 e1000_bus_width_pciex_1,
123 e1000_bus_width_pciex_4,
118 e1000_bus_width_reserved 124 e1000_bus_width_reserved
119} e1000_bus_width; 125} e1000_bus_width;
120 126
@@ -196,6 +202,7 @@ typedef enum {
196typedef enum { 202typedef enum {
197 e1000_phy_m88 = 0, 203 e1000_phy_m88 = 0,
198 e1000_phy_igp, 204 e1000_phy_igp,
205 e1000_phy_igp_2,
199 e1000_phy_undefined = 0xFF 206 e1000_phy_undefined = 0xFF
200} e1000_phy_type; 207} e1000_phy_type;
201 208
@@ -242,8 +249,19 @@ struct e1000_eeprom_info {
242 uint16_t address_bits; 249 uint16_t address_bits;
243 uint16_t delay_usec; 250 uint16_t delay_usec;
244 uint16_t page_size; 251 uint16_t page_size;
252 boolean_t use_eerd;
253 boolean_t use_eewr;
245}; 254};
246 255
256/* Flex ASF Information */
257#define E1000_HOST_IF_MAX_SIZE 2048
258
259typedef enum {
260 e1000_byte_align = 0,
261 e1000_word_align = 1,
262 e1000_dword_align = 2
263} e1000_align_type;
264
247 265
248 266
249/* Error Codes */ 267/* Error Codes */
@@ -254,11 +272,16 @@ struct e1000_eeprom_info {
254#define E1000_ERR_PARAM 4 272#define E1000_ERR_PARAM 4
255#define E1000_ERR_MAC_TYPE 5 273#define E1000_ERR_MAC_TYPE 5
256#define E1000_ERR_PHY_TYPE 6 274#define E1000_ERR_PHY_TYPE 6
275#define E1000_ERR_RESET 9
276#define E1000_ERR_MASTER_REQUESTS_PENDING 10
277#define E1000_ERR_HOST_INTERFACE_COMMAND 11
278#define E1000_BLK_PHY_RESET 12
257 279
258/* Function prototypes */ 280/* Function prototypes */
259/* Initialization */ 281/* Initialization */
260int32_t e1000_reset_hw(struct e1000_hw *hw); 282int32_t e1000_reset_hw(struct e1000_hw *hw);
261int32_t e1000_init_hw(struct e1000_hw *hw); 283int32_t e1000_init_hw(struct e1000_hw *hw);
284int32_t e1000_id_led_init(struct e1000_hw * hw);
262int32_t e1000_set_mac_type(struct e1000_hw *hw); 285int32_t e1000_set_mac_type(struct e1000_hw *hw);
263void e1000_set_media_type(struct e1000_hw *hw); 286void e1000_set_media_type(struct e1000_hw *hw);
264 287
@@ -275,7 +298,7 @@ int32_t e1000_force_mac_fc(struct e1000_hw *hw);
275/* PHY */ 298/* PHY */
276int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data); 299int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data);
277int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); 300int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
278void e1000_phy_hw_reset(struct e1000_hw *hw); 301int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
279int32_t e1000_phy_reset(struct e1000_hw *hw); 302int32_t e1000_phy_reset(struct e1000_hw *hw);
280int32_t e1000_detect_gig_phy(struct e1000_hw *hw); 303int32_t e1000_detect_gig_phy(struct e1000_hw *hw);
281int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 304int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
@@ -287,13 +310,86 @@ int32_t e1000_check_downshift(struct e1000_hw *hw);
287int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 310int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
288 311
289/* EEPROM Functions */ 312/* EEPROM Functions */
290void e1000_init_eeprom_params(struct e1000_hw *hw); 313int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
314boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
315int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
316int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
317int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
318
319/* MNG HOST IF functions */
320uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
321
322#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
323#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
324
325#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
326#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
327#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
328#define E1000_MNG_IAMT_MODE 0x3
329#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
330
331#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
332#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */
333#define E1000_VFTA_ENTRY_SHIFT 0x5
334#define E1000_VFTA_ENTRY_MASK 0x7F
335#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
336
337struct e1000_host_mng_command_header {
338 uint8_t command_id;
339 uint8_t checksum;
340 uint16_t reserved1;
341 uint16_t reserved2;
342 uint16_t command_length;
343};
344
345struct e1000_host_mng_command_info {
346 struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
347 uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/
348};
349#ifdef __BIG_ENDIAN
350struct e1000_host_mng_dhcp_cookie{
351 uint32_t signature;
352 uint16_t vlan_id;
353 uint8_t reserved0;
354 uint8_t status;
355 uint32_t reserved1;
356 uint8_t checksum;
357 uint8_t reserved3;
358 uint16_t reserved2;
359};
360#else
361struct e1000_host_mng_dhcp_cookie{
362 uint32_t signature;
363 uint8_t status;
364 uint8_t reserved0;
365 uint16_t vlan_id;
366 uint32_t reserved1;
367 uint16_t reserved2;
368 uint8_t reserved3;
369 uint8_t checksum;
370};
371#endif
372
373int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
374 uint16_t length);
375boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
376boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
377int32_t e1000_mng_enable_host_if(struct e1000_hw *hw);
378int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer,
379 uint16_t length, uint16_t offset, uint8_t *sum);
380int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw,
381 struct e1000_host_mng_command_header* hdr);
382
383int32_t e1000_mng_write_commit(struct e1000_hw *hw);
384
291int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 385int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
292int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); 386int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
293int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); 387int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
294int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 388int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
295int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); 389int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
296int32_t e1000_read_mac_addr(struct e1000_hw * hw); 390int32_t e1000_read_mac_addr(struct e1000_hw * hw);
391int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
392void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
297 393
298/* Filters (multicast, vlan, receive) */ 394/* Filters (multicast, vlan, receive) */
299void e1000_init_rx_addrs(struct e1000_hw *hw); 395void e1000_init_rx_addrs(struct e1000_hw *hw);
@@ -313,7 +409,6 @@ int32_t e1000_led_off(struct e1000_hw *hw);
313/* Adaptive IFS Functions */ 409/* Adaptive IFS Functions */
314 410
315/* Everything else */ 411/* Everything else */
316uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
317void e1000_clear_hw_cntrs(struct e1000_hw *hw); 412void e1000_clear_hw_cntrs(struct e1000_hw *hw);
318void e1000_reset_adaptive(struct e1000_hw *hw); 413void e1000_reset_adaptive(struct e1000_hw *hw);
319void e1000_update_adaptive(struct e1000_hw *hw); 414void e1000_update_adaptive(struct e1000_hw *hw);
@@ -330,6 +425,19 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
330void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value); 425void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value);
331int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up); 426int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up);
332int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active); 427int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
428int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active);
429void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
430void e1000_enable_pciex_master(struct e1000_hw *hw);
431int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
432int32_t e1000_get_auto_rd_done(struct e1000_hw *hw);
433int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw);
434int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
435void e1000_release_software_semaphore(struct e1000_hw *hw);
436int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
437int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
438void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
439int32_t e1000_commit_shadow_ram(struct e1000_hw *hw);
440uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw);
333 441
334#define E1000_READ_REG_IO(a, reg) \ 442#define E1000_READ_REG_IO(a, reg) \
335 e1000_read_reg_io((a), E1000_##reg) 443 e1000_read_reg_io((a), E1000_##reg)
@@ -369,6 +477,10 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
369#define E1000_DEV_ID_82546GB_SERDES 0x107B 477#define E1000_DEV_ID_82546GB_SERDES 0x107B
370#define E1000_DEV_ID_82546GB_PCIE 0x108A 478#define E1000_DEV_ID_82546GB_PCIE 0x108A
371#define E1000_DEV_ID_82547EI 0x1019 479#define E1000_DEV_ID_82547EI 0x1019
480#define E1000_DEV_ID_82573E 0x108B
481#define E1000_DEV_ID_82573E_IAMT 0x108C
482
483#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
372 484
373#define NODE_ADDRESS_SIZE 6 485#define NODE_ADDRESS_SIZE 6
374#define ETH_LENGTH_OF_ADDRESS 6 486#define ETH_LENGTH_OF_ADDRESS 6
@@ -381,6 +493,7 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
381#define E1000_REVISION_0 0 493#define E1000_REVISION_0 0
382#define E1000_REVISION_1 1 494#define E1000_REVISION_1 1
383#define E1000_REVISION_2 2 495#define E1000_REVISION_2 2
496#define E1000_REVISION_3 3
384 497
385#define SPEED_10 10 498#define SPEED_10 10
386#define SPEED_100 100 499#define SPEED_100 100
@@ -437,6 +550,7 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
437 E1000_IMS_RXSEQ | \ 550 E1000_IMS_RXSEQ | \
438 E1000_IMS_LSC) 551 E1000_IMS_LSC)
439 552
553
440/* Number of high/low register pairs in the RAR. The RAR (Receive Address 554/* Number of high/low register pairs in the RAR. The RAR (Receive Address
441 * Registers) holds the directed and multicast addresses that we monitor. We 555 * Registers) holds the directed and multicast addresses that we monitor. We
442 * reserve one of these spots for our directed address, allowing us room for 556 * reserve one of these spots for our directed address, allowing us room for
@@ -457,14 +571,74 @@ struct e1000_rx_desc {
457 uint16_t special; 571 uint16_t special;
458}; 572};
459 573
574/* Receive Descriptor - Extended */
575union e1000_rx_desc_extended {
576 struct {
577 uint64_t buffer_addr;
578 uint64_t reserved;
579 } read;
580 struct {
581 struct {
582 uint32_t mrq; /* Multiple Rx Queues */
583 union {
584 uint32_t rss; /* RSS Hash */
585 struct {
586 uint16_t ip_id; /* IP id */
587 uint16_t csum; /* Packet Checksum */
588 } csum_ip;
589 } hi_dword;
590 } lower;
591 struct {
592 uint32_t status_error; /* ext status/error */
593 uint16_t length;
594 uint16_t vlan; /* VLAN tag */
595 } upper;
596 } wb; /* writeback */
597};
598
599#define MAX_PS_BUFFERS 4
600/* Receive Descriptor - Packet Split */
601union e1000_rx_desc_packet_split {
602 struct {
603 /* one buffer for protocol header(s), three data buffers */
604 uint64_t buffer_addr[MAX_PS_BUFFERS];
605 } read;
606 struct {
607 struct {
608 uint32_t mrq; /* Multiple Rx Queues */
609 union {
610 uint32_t rss; /* RSS Hash */
611 struct {
612 uint16_t ip_id; /* IP id */
613 uint16_t csum; /* Packet Checksum */
614 } csum_ip;
615 } hi_dword;
616 } lower;
617 struct {
618 uint32_t status_error; /* ext status/error */
619 uint16_t length0; /* length of buffer 0 */
620 uint16_t vlan; /* VLAN tag */
621 } middle;
622 struct {
623 uint16_t header_status;
624 uint16_t length[3]; /* length of buffers 1-3 */
625 } upper;
626 uint64_t reserved;
627 } wb; /* writeback */
628};
629
460/* Receive Decriptor bit definitions */ 630/* Receive Decriptor bit definitions */
461#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 631#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
462#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ 632#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
463#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 633#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
464#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 634#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
635#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
465#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 636#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
466#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 637#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
467#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 638#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
639#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
640#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
641#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
468#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ 642#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
469#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ 643#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
470#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ 644#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
@@ -474,9 +648,20 @@ struct e1000_rx_desc {
474#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ 648#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
475#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 649#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
476#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ 650#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
477#define E1000_RXD_SPC_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */ 651#define E1000_RXD_SPC_PRI_SHIFT 13
478#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ 652#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
479#define E1000_RXD_SPC_CFI_SHIFT 0x000C /* CFI is bit 12 */ 653#define E1000_RXD_SPC_CFI_SHIFT 12
654
655#define E1000_RXDEXT_STATERR_CE 0x01000000
656#define E1000_RXDEXT_STATERR_SE 0x02000000
657#define E1000_RXDEXT_STATERR_SEQ 0x04000000
658#define E1000_RXDEXT_STATERR_CXE 0x10000000
659#define E1000_RXDEXT_STATERR_TCPE 0x20000000
660#define E1000_RXDEXT_STATERR_IPE 0x40000000
661#define E1000_RXDEXT_STATERR_RXE 0x80000000
662
663#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
664#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
480 665
481/* mask to determine if packets should be dropped due to frame errors */ 666/* mask to determine if packets should be dropped due to frame errors */
482#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ 667#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
@@ -486,6 +671,15 @@ struct e1000_rx_desc {
486 E1000_RXD_ERR_CXE | \ 671 E1000_RXD_ERR_CXE | \
487 E1000_RXD_ERR_RXE) 672 E1000_RXD_ERR_RXE)
488 673
674
675/* Same mask, but for extended and packet split descriptors */
676#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
677 E1000_RXDEXT_STATERR_CE | \
678 E1000_RXDEXT_STATERR_SE | \
679 E1000_RXDEXT_STATERR_SEQ | \
680 E1000_RXDEXT_STATERR_CXE | \
681 E1000_RXDEXT_STATERR_RXE)
682
489/* Transmit Descriptor */ 683/* Transmit Descriptor */
490struct e1000_tx_desc { 684struct e1000_tx_desc {
491 uint64_t buffer_addr; /* Address of the descriptor's data buffer */ 685 uint64_t buffer_addr; /* Address of the descriptor's data buffer */
@@ -667,6 +861,7 @@ struct e1000_ffvt_entry {
667#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ 861#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
668#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 862#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
669#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 863#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
864#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
670#define E1000_RCTL 0x00100 /* RX Control - RW */ 865#define E1000_RCTL 0x00100 /* RX Control - RW */
671#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ 866#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
672#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ 867#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
@@ -676,9 +871,23 @@ struct e1000_ffvt_entry {
676#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ 871#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
677#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ 872#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
678#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ 873#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
874#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
875#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
679#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 876#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
877#define E1000_PBS 0x01008 /* Packet Buffer Size */
878#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
879#define E1000_FLASH_UPDATES 1000
880#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
881#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
882#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
883#define E1000_FLSWCTL 0x01030 /* FLASH control register */
884#define E1000_FLSWDATA 0x01034 /* FLASH data register */
885#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
886#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
887#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
680#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ 888#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
681#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ 889#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
890#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
682#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ 891#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
683#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ 892#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
684#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ 893#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
@@ -688,6 +897,7 @@ struct e1000_ffvt_entry {
688#define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ 897#define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */
689#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ 898#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
690#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ 899#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
900#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
691#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ 901#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
692#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 902#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
693#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ 903#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
@@ -703,6 +913,14 @@ struct e1000_ffvt_entry {
703#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 913#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
704#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 914#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
705#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 915#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
916#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
917#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
918#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
919#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
920#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
921#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
922#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
923#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
706#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 924#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
707#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 925#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
708#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ 926#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
@@ -761,7 +979,17 @@ struct e1000_ffvt_entry {
761#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ 979#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
762#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ 980#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
763#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ 981#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
982#define E1000_IAC 0x4100 /* Interrupt Assertion Count */
983#define E1000_ICRXPTC 0x4104 /* Interrupt Cause Rx Packet Timer Expire Count */
984#define E1000_ICRXATC 0x4108 /* Interrupt Cause Rx Absolute Timer Expire Count */
985#define E1000_ICTXPTC 0x410C /* Interrupt Cause Tx Packet Timer Expire Count */
986#define E1000_ICTXATC 0x4110 /* Interrupt Cause Tx Absolute Timer Expire Count */
987#define E1000_ICTXQEC 0x4118 /* Interrupt Cause Tx Queue Empty Count */
988#define E1000_ICTXQMTC 0x411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
989#define E1000_ICRXDMTC 0x4120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
990#define E1000_ICRXOC 0x4124 /* Interrupt Cause Receiver Overrun Count */
764#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ 991#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
992#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
765#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 993#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
766#define E1000_RA 0x05400 /* Receive Address - RW Array */ 994#define E1000_RA 0x05400 /* Receive Address - RW Array */
767#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 995#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
@@ -779,6 +1007,16 @@ struct e1000_ffvt_entry {
779#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ 1007#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
780#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ 1008#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
781 1009
1010#define E1000_GCR 0x05B00 /* PCI-Ex Control */
1011#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
1012#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
1013#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
1014#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
1015#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
1016#define E1000_SWSM 0x05B50 /* SW Semaphore */
1017#define E1000_FWSM 0x05B54 /* FW Semaphore */
1018#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
1019#define E1000_HICR 0x08F00 /* Host Inteface Control */
782/* Register Set (82542) 1020/* Register Set (82542)
783 * 1021 *
784 * Some of the 82542 registers are located at different offsets than they are 1022 * Some of the 82542 registers are located at different offsets than they are
@@ -829,6 +1067,18 @@ struct e1000_ffvt_entry {
829#define E1000_82542_VFTA 0x00600 1067#define E1000_82542_VFTA 0x00600
830#define E1000_82542_LEDCTL E1000_LEDCTL 1068#define E1000_82542_LEDCTL E1000_LEDCTL
831#define E1000_82542_PBA E1000_PBA 1069#define E1000_82542_PBA E1000_PBA
1070#define E1000_82542_PBS E1000_PBS
1071#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
1072#define E1000_82542_EEARBC E1000_EEARBC
1073#define E1000_82542_FLASHT E1000_FLASHT
1074#define E1000_82542_EEWR E1000_EEWR
1075#define E1000_82542_FLSWCTL E1000_FLSWCTL
1076#define E1000_82542_FLSWDATA E1000_FLSWDATA
1077#define E1000_82542_FLSWCNT E1000_FLSWCNT
1078#define E1000_82542_FLOP E1000_FLOP
1079#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL
1080#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE
1081#define E1000_82542_ERT E1000_ERT
832#define E1000_82542_RXDCTL E1000_RXDCTL 1082#define E1000_82542_RXDCTL E1000_RXDCTL
833#define E1000_82542_RADV E1000_RADV 1083#define E1000_82542_RADV E1000_RADV
834#define E1000_82542_RSRPD E1000_RSRPD 1084#define E1000_82542_RSRPD E1000_RSRPD
@@ -913,6 +1163,38 @@ struct e1000_ffvt_entry {
913#define E1000_82542_FFMT E1000_FFMT 1163#define E1000_82542_FFMT E1000_FFMT
914#define E1000_82542_FFVT E1000_FFVT 1164#define E1000_82542_FFVT E1000_FFVT
915#define E1000_82542_HOST_IF E1000_HOST_IF 1165#define E1000_82542_HOST_IF E1000_HOST_IF
1166#define E1000_82542_IAM E1000_IAM
1167#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
1168#define E1000_82542_PSRCTL E1000_PSRCTL
1169#define E1000_82542_RAID E1000_RAID
1170#define E1000_82542_TARC0 E1000_TARC0
1171#define E1000_82542_TDBAL1 E1000_TDBAL1
1172#define E1000_82542_TDBAH1 E1000_TDBAH1
1173#define E1000_82542_TDLEN1 E1000_TDLEN1
1174#define E1000_82542_TDH1 E1000_TDH1
1175#define E1000_82542_TDT1 E1000_TDT1
1176#define E1000_82542_TXDCTL1 E1000_TXDCTL1
1177#define E1000_82542_TARC1 E1000_TARC1
1178#define E1000_82542_RFCTL E1000_RFCTL
1179#define E1000_82542_GCR E1000_GCR
1180#define E1000_82542_GSCL_1 E1000_GSCL_1
1181#define E1000_82542_GSCL_2 E1000_GSCL_2
1182#define E1000_82542_GSCL_3 E1000_GSCL_3
1183#define E1000_82542_GSCL_4 E1000_GSCL_4
1184#define E1000_82542_FACTPS E1000_FACTPS
1185#define E1000_82542_SWSM E1000_SWSM
1186#define E1000_82542_FWSM E1000_FWSM
1187#define E1000_82542_FFLT_DBG E1000_FFLT_DBG
1188#define E1000_82542_IAC E1000_IAC
1189#define E1000_82542_ICRXPTC E1000_ICRXPTC
1190#define E1000_82542_ICRXATC E1000_ICRXATC
1191#define E1000_82542_ICTXPTC E1000_ICTXPTC
1192#define E1000_82542_ICTXATC E1000_ICTXATC
1193#define E1000_82542_ICTXQEC E1000_ICTXQEC
1194#define E1000_82542_ICTXQMTC E1000_ICTXQMTC
1195#define E1000_82542_ICRXDMTC E1000_ICRXDMTC
1196#define E1000_82542_ICRXOC E1000_ICRXOC
1197#define E1000_82542_HICR E1000_HICR
916 1198
917/* Statistics counters collected by the MAC */ 1199/* Statistics counters collected by the MAC */
918struct e1000_hw_stats { 1200struct e1000_hw_stats {
@@ -974,11 +1256,21 @@ struct e1000_hw_stats {
974 uint64_t bptc; 1256 uint64_t bptc;
975 uint64_t tsctc; 1257 uint64_t tsctc;
976 uint64_t tsctfc; 1258 uint64_t tsctfc;
1259 uint64_t iac;
1260 uint64_t icrxptc;
1261 uint64_t icrxatc;
1262 uint64_t ictxptc;
1263 uint64_t ictxatc;
1264 uint64_t ictxqec;
1265 uint64_t ictxqmtc;
1266 uint64_t icrxdmtc;
1267 uint64_t icrxoc;
977}; 1268};
978 1269
979/* Structure containing variables used by the shared code (e1000_hw.c) */ 1270/* Structure containing variables used by the shared code (e1000_hw.c) */
980struct e1000_hw { 1271struct e1000_hw {
981 uint8_t __iomem *hw_addr; 1272 uint8_t *hw_addr;
1273 uint8_t *flash_address;
982 e1000_mac_type mac_type; 1274 e1000_mac_type mac_type;
983 e1000_phy_type phy_type; 1275 e1000_phy_type phy_type;
984 uint32_t phy_init_script; 1276 uint32_t phy_init_script;
@@ -993,6 +1285,7 @@ struct e1000_hw {
993 e1000_ms_type original_master_slave; 1285 e1000_ms_type original_master_slave;
994 e1000_ffe_config ffe_config_state; 1286 e1000_ffe_config ffe_config_state;
995 uint32_t asf_firmware_present; 1287 uint32_t asf_firmware_present;
1288 uint32_t eeprom_semaphore_present;
996 unsigned long io_base; 1289 unsigned long io_base;
997 uint32_t phy_id; 1290 uint32_t phy_id;
998 uint32_t phy_revision; 1291 uint32_t phy_revision;
@@ -1009,6 +1302,8 @@ struct e1000_hw {
1009 uint32_t ledctl_default; 1302 uint32_t ledctl_default;
1010 uint32_t ledctl_mode1; 1303 uint32_t ledctl_mode1;
1011 uint32_t ledctl_mode2; 1304 uint32_t ledctl_mode2;
1305 boolean_t tx_pkt_filtering;
1306 struct e1000_host_mng_dhcp_cookie mng_cookie;
1012 uint16_t phy_spd_default; 1307 uint16_t phy_spd_default;
1013 uint16_t autoneg_advertised; 1308 uint16_t autoneg_advertised;
1014 uint16_t pci_cmd_word; 1309 uint16_t pci_cmd_word;
@@ -1047,16 +1342,24 @@ struct e1000_hw {
1047 boolean_t adaptive_ifs; 1342 boolean_t adaptive_ifs;
1048 boolean_t ifs_params_forced; 1343 boolean_t ifs_params_forced;
1049 boolean_t in_ifs_mode; 1344 boolean_t in_ifs_mode;
1345 boolean_t mng_reg_access_disabled;
1050}; 1346};
1051 1347
1052 1348
1053#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ 1349#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
1054#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ 1350#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
1351#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
1352#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
1353#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
1354#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
1355#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
1356#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
1055/* Register Bit Masks */ 1357/* Register Bit Masks */
1056/* Device Control */ 1358/* Device Control */
1057#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ 1359#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
1058#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ 1360#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
1059#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ 1361#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
1362#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
1060#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ 1363#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
1061#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ 1364#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
1062#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ 1365#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
@@ -1070,6 +1373,7 @@ struct e1000_hw {
1070#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ 1373#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
1071#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ 1374#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
1072#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 1375#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
1376#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
1073#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 1377#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
1074#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 1378#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
1075#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ 1379#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
@@ -1089,6 +1393,7 @@ struct e1000_hw {
1089#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ 1393#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
1090#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ 1394#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
1091#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ 1395#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
1396#define E1000_STATUS_FUNC_SHIFT 2
1092#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ 1397#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
1093#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ 1398#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
1094#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ 1399#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
@@ -1098,6 +1403,8 @@ struct e1000_hw {
1098#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 1403#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
1099#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 1404#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
1100#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ 1405#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
1406#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
1407#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
1101#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ 1408#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
1102#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ 1409#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
1103#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ 1410#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
@@ -1128,6 +1435,18 @@ struct e1000_hw {
1128#ifndef E1000_EEPROM_GRANT_ATTEMPTS 1435#ifndef E1000_EEPROM_GRANT_ATTEMPTS
1129#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1436#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
1130#endif 1437#endif
1438#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
1439#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
1440#define E1000_EECD_SIZE_EX_SHIFT 11
1441#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
1442#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
1443#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
1444#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
1445#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
1446#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
1447#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
1448#define E1000_STM_OPCODE 0xDB00
1449#define E1000_HICR_FW_RESET 0xC0
1131 1450
1132/* EEPROM Read */ 1451/* EEPROM Read */
1133#define E1000_EERD_START 0x00000001 /* Start Read */ 1452#define E1000_EERD_START 0x00000001 /* Start Read */
@@ -1171,6 +1490,8 @@ struct e1000_hw {
1171#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 1490#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
1172#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 1491#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
1173#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 1492#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
1493#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
1494#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
1174 1495
1175/* MDI Control */ 1496/* MDI Control */
1176#define E1000_MDIC_DATA_MASK 0x0000FFFF 1497#define E1000_MDIC_DATA_MASK 0x0000FFFF
@@ -1187,14 +1508,17 @@ struct e1000_hw {
1187/* LED Control */ 1508/* LED Control */
1188#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F 1509#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
1189#define E1000_LEDCTL_LED0_MODE_SHIFT 0 1510#define E1000_LEDCTL_LED0_MODE_SHIFT 0
1511#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020
1190#define E1000_LEDCTL_LED0_IVRT 0x00000040 1512#define E1000_LEDCTL_LED0_IVRT 0x00000040
1191#define E1000_LEDCTL_LED0_BLINK 0x00000080 1513#define E1000_LEDCTL_LED0_BLINK 0x00000080
1192#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 1514#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00
1193#define E1000_LEDCTL_LED1_MODE_SHIFT 8 1515#define E1000_LEDCTL_LED1_MODE_SHIFT 8
1516#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000
1194#define E1000_LEDCTL_LED1_IVRT 0x00004000 1517#define E1000_LEDCTL_LED1_IVRT 0x00004000
1195#define E1000_LEDCTL_LED1_BLINK 0x00008000 1518#define E1000_LEDCTL_LED1_BLINK 0x00008000
1196#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 1519#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000
1197#define E1000_LEDCTL_LED2_MODE_SHIFT 16 1520#define E1000_LEDCTL_LED2_MODE_SHIFT 16
1521#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000
1198#define E1000_LEDCTL_LED2_IVRT 0x00400000 1522#define E1000_LEDCTL_LED2_IVRT 0x00400000
1199#define E1000_LEDCTL_LED2_BLINK 0x00800000 1523#define E1000_LEDCTL_LED2_BLINK 0x00800000
1200#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 1524#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
@@ -1238,6 +1562,10 @@ struct e1000_hw {
1238#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ 1562#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
1239#define E1000_ICR_TXD_LOW 0x00008000 1563#define E1000_ICR_TXD_LOW 0x00008000
1240#define E1000_ICR_SRPD 0x00010000 1564#define E1000_ICR_SRPD 0x00010000
1565#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
1566#define E1000_ICR_MNG 0x00040000 /* Manageability event */
1567#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
1568#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
1241 1569
1242/* Interrupt Cause Set */ 1570/* Interrupt Cause Set */
1243#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1571#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1255,6 +1583,9 @@ struct e1000_hw {
1255#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1583#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
1256#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW 1584#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
1257#define E1000_ICS_SRPD E1000_ICR_SRPD 1585#define E1000_ICS_SRPD E1000_ICR_SRPD
1586#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
1587#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
1588#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
1258 1589
1259/* Interrupt Mask Set */ 1590/* Interrupt Mask Set */
1260#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1591#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1272,6 +1603,9 @@ struct e1000_hw {
1272#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1603#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
1273#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW 1604#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
1274#define E1000_IMS_SRPD E1000_ICR_SRPD 1605#define E1000_IMS_SRPD E1000_ICR_SRPD
1606#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
1607#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
1608#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
1275 1609
1276/* Interrupt Mask Clear */ 1610/* Interrupt Mask Clear */
1277#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 1611#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -1289,6 +1623,9 @@ struct e1000_hw {
1289#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1623#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
1290#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW 1624#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
1291#define E1000_IMC_SRPD E1000_ICR_SRPD 1625#define E1000_IMC_SRPD E1000_ICR_SRPD
1626#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
1627#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
1628#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
1292 1629
1293/* Receive Control */ 1630/* Receive Control */
1294#define E1000_RCTL_RST 0x00000001 /* Software reset */ 1631#define E1000_RCTL_RST 0x00000001 /* Software reset */
@@ -1301,6 +1638,8 @@ struct e1000_hw {
1301#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 1638#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
1302#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ 1639#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
1303#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 1640#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
1641#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
1642#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
1304#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 1643#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
1305#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ 1644#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
1306#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ 1645#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
@@ -1327,6 +1666,34 @@ struct e1000_hw {
1327#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ 1666#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
1328#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 1667#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
1329#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 1668#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
1669#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
1670#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
1671
1672/* Use byte values for the following shift parameters
1673 * Usage:
1674 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
1675 * E1000_PSRCTL_BSIZE0_MASK) |
1676 * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
1677 * E1000_PSRCTL_BSIZE1_MASK) |
1678 * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
1679 * E1000_PSRCTL_BSIZE2_MASK) |
1680 * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
1681 * E1000_PSRCTL_BSIZE3_MASK))
1682 * where value0 = [128..16256], default=256
1683 * value1 = [1024..64512], default=4096
1684 * value2 = [0..64512], default=4096
1685 * value3 = [0..64512], default=0
1686 */
1687
1688#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
1689#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
1690#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
1691#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
1692
1693#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
1694#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
1695#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
1696#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
1330 1697
1331/* Receive Descriptor */ 1698/* Receive Descriptor */
1332#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ 1699#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
@@ -1341,6 +1708,23 @@ struct e1000_hw {
1341#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ 1708#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
1342#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 1709#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
1343 1710
1711/* Header split receive */
1712#define E1000_RFCTL_ISCSI_DIS 0x00000001
1713#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E
1714#define E1000_RFCTL_ISCSI_DWC_SHIFT 1
1715#define E1000_RFCTL_NFSW_DIS 0x00000040
1716#define E1000_RFCTL_NFSR_DIS 0x00000080
1717#define E1000_RFCTL_NFS_VER_MASK 0x00000300
1718#define E1000_RFCTL_NFS_VER_SHIFT 8
1719#define E1000_RFCTL_IPV6_DIS 0x00000400
1720#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
1721#define E1000_RFCTL_ACK_DIS 0x00001000
1722#define E1000_RFCTL_ACKD_DIS 0x00002000
1723#define E1000_RFCTL_IPFRSP_DIS 0x00004000
1724#define E1000_RFCTL_EXTEN 0x00008000
1725#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
1726#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
1727
1344/* Receive Descriptor Control */ 1728/* Receive Descriptor Control */
1345#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ 1729#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
1346#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ 1730#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
@@ -1354,6 +1738,8 @@ struct e1000_hw {
1354#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ 1738#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
1355#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ 1739#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
1356#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1740#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
1741#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
1742 still to be processed. */
1357 1743
1358/* Transmit Configuration Word */ 1744/* Transmit Configuration Word */
1359#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ 1745#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
@@ -1387,12 +1773,16 @@ struct e1000_hw {
1387#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ 1773#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
1388#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 1774#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
1389#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ 1775#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
1776#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
1390 1777
1391/* Receive Checksum Control */ 1778/* Receive Checksum Control */
1392#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ 1779#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
1393#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ 1780#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
1394#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ 1781#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
1395#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ 1782#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
1783#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
1784#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
1785
1396 1786
1397/* Definitions for power management and wakeup registers */ 1787/* Definitions for power management and wakeup registers */
1398/* Wake Up Control */ 1788/* Wake Up Control */
@@ -1411,6 +1801,7 @@ struct e1000_hw {
1411#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ 1801#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
1412#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ 1802#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
1413#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ 1803#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
1804#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
1414#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ 1805#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
1415#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ 1806#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
1416#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ 1807#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
@@ -1446,13 +1837,19 @@ struct e1000_hw {
1446#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ 1837#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
1447#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery 1838#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
1448 * Filtering */ 1839 * Filtering */
1840#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
1449#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ 1841#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
1450#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 1842#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
1451#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ 1843#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
1844#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
1452#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 1845#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
1453 * filtering */ 1846 * filtering */
1454#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host 1847#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
1455 * memory */ 1848 * memory */
1849#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
1850 * filtering */
1851#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
1852#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
1456#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ 1853#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
1457#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ 1854#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
1458#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ 1855#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
@@ -1463,11 +1860,97 @@ struct e1000_hw {
1463#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ 1860#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
1464#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ 1861#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
1465 1862
1863/* SW Semaphore Register */
1864#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
1865#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
1866#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
1867#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
1868
1869/* FW Semaphore Register */
1870#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */
1871#define E1000_FWSM_MODE_SHIFT 1
1872#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
1873
1874/* FFLT Debug Register */
1875#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
1876
1877typedef enum {
1878 e1000_mng_mode_none = 0,
1879 e1000_mng_mode_asf,
1880 e1000_mng_mode_pt,
1881 e1000_mng_mode_ipmi,
1882 e1000_mng_mode_host_interface_only
1883} e1000_mng_mode;
1884
1885/* Host Inteface Control Register */
1886#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */
1887#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done
1888 * to put command in RAM */
1889#define E1000_HICR_SV 0x00000004 /* Status Validity */
1890#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */
1891
1892/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
1893#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */
1894#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */
1895#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */
1896#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
1897
1898struct e1000_host_command_header {
1899 uint8_t command_id;
1900 uint8_t command_length;
1901 uint8_t command_options; /* I/F bits for command, status for return */
1902 uint8_t checksum;
1903};
1904struct e1000_host_command_info {
1905 struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
1906 uint8_t command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
1907};
1908
1909/* Host SMB register #0 */
1910#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */
1911#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */
1912#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */
1913#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */
1914
1915/* Host SMB register #1 */
1916#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN
1917#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN
1918#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT
1919#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT
1920
1921/* FW Status Register */
1922#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */
1923
1466/* Wake Up Packet Length */ 1924/* Wake Up Packet Length */
1467#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ 1925#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
1468 1926
1469#define E1000_MDALIGN 4096 1927#define E1000_MDALIGN 4096
1470 1928
1929#define E1000_GCR_BEM32 0x00400000
1930/* Function Active and Power State to MNG */
1931#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
1932#define E1000_FACTPS_LAN0_VALID 0x00000004
1933#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008
1934#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0
1935#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6
1936#define E1000_FACTPS_LAN1_VALID 0x00000100
1937#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200
1938#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000
1939#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12
1940#define E1000_FACTPS_IDE_ENABLE 0x00004000
1941#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000
1942#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000
1943#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18
1944#define E1000_FACTPS_SP_ENABLE 0x00100000
1945#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000
1946#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000
1947#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24
1948#define E1000_FACTPS_IPMI_ENABLE 0x04000000
1949#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000
1950#define E1000_FACTPS_MNGCG 0x20000000
1951#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000
1952#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000
1953
1471/* EEPROM Commands - Microwire */ 1954/* EEPROM Commands - Microwire */
1472#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ 1955#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
1473#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ 1956#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
@@ -1477,22 +1960,20 @@ struct e1000_hw {
1477 1960
1478/* EEPROM Commands - SPI */ 1961/* EEPROM Commands - SPI */
1479#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ 1962#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
1480#define EEPROM_READ_OPCODE_SPI 0x3 /* EEPROM read opcode */ 1963#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
1481#define EEPROM_WRITE_OPCODE_SPI 0x2 /* EEPROM write opcode */ 1964#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
1482#define EEPROM_A8_OPCODE_SPI 0x8 /* opcode bit-3 = address bit-8 */ 1965#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
1483#define EEPROM_WREN_OPCODE_SPI 0x6 /* EEPROM set Write Enable latch */ 1966#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */
1484#define EEPROM_WRDI_OPCODE_SPI 0x4 /* EEPROM reset Write Enable latch */ 1967#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */
1485#define EEPROM_RDSR_OPCODE_SPI 0x5 /* EEPROM read Status register */ 1968#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */
1486#define EEPROM_WRSR_OPCODE_SPI 0x1 /* EEPROM write Status register */ 1969#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */
1970#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
1971#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
1972#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
1487 1973
1488/* EEPROM Size definitions */ 1974/* EEPROM Size definitions */
1489#define EEPROM_SIZE_16KB 0x1800 1975#define EEPROM_WORD_SIZE_SHIFT 6
1490#define EEPROM_SIZE_8KB 0x1400 1976#define EEPROM_SIZE_SHIFT 10
1491#define EEPROM_SIZE_4KB 0x1000
1492#define EEPROM_SIZE_2KB 0x0C00
1493#define EEPROM_SIZE_1KB 0x0800
1494#define EEPROM_SIZE_512B 0x0400
1495#define EEPROM_SIZE_128B 0x0000
1496#define EEPROM_SIZE_MASK 0x1C00 1977#define EEPROM_SIZE_MASK 0x1C00
1497 1978
1498/* EEPROM Word Offsets */ 1979/* EEPROM Word Offsets */
@@ -1606,7 +2087,22 @@ struct e1000_hw {
1606#define IFS_MIN 40 2087#define IFS_MIN 40
1607#define IFS_RATIO 4 2088#define IFS_RATIO 4
1608 2089
2090/* Extended Configuration Control and Size */
2091#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001
2092#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002
2093#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004
2094#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008
2095#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010
2096#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
2097#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
2098#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000
2099
2100#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
2101#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
2102#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
2103
1609/* PBA constants */ 2104/* PBA constants */
2105#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
1610#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 2106#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
1611#define E1000_PBA_22K 0x0016 2107#define E1000_PBA_22K 0x0016
1612#define E1000_PBA_24K 0x0018 2108#define E1000_PBA_24K 0x0018
@@ -1663,6 +2159,13 @@ struct e1000_hw {
1663/* Number of milliseconds we wait for auto-negotiation to complete */ 2159/* Number of milliseconds we wait for auto-negotiation to complete */
1664#define LINK_UP_TIMEOUT 500 2160#define LINK_UP_TIMEOUT 500
1665 2161
2162/* Number of 100 microseconds we wait for PCI Express master disable */
2163#define MASTER_DISABLE_TIMEOUT 800
2164/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
2165#define AUTO_READ_DONE_TIMEOUT 10
2166/* Number of milliseconds we wait for PHY configuration done after MAC reset */
2167#define PHY_CFG_TIMEOUT 40
2168
1666#define E1000_TX_BUFFER_SIZE ((uint32_t)1514) 2169#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
1667 2170
1668/* The carrier extension symbol, as received by the NIC. */ 2171/* The carrier extension symbol, as received by the NIC. */
@@ -1763,6 +2266,7 @@ struct e1000_hw {
1763#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ 2266#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
1764#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ 2267#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */
1765#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ 2268#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
2269#define IGP02E1000_PHY_POWER_MGMT 0x19
1766#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ 2270#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */
1767 2271
1768/* IGP01E1000 AGC Registers - stores the cable length values*/ 2272/* IGP01E1000 AGC Registers - stores the cable length values*/
@@ -1771,12 +2275,20 @@ struct e1000_hw {
1771#define IGP01E1000_PHY_AGC_C 0x1472 2275#define IGP01E1000_PHY_AGC_C 0x1472
1772#define IGP01E1000_PHY_AGC_D 0x1872 2276#define IGP01E1000_PHY_AGC_D 0x1872
1773 2277
2278/* IGP02E1000 AGC Registers for cable length values */
2279#define IGP02E1000_PHY_AGC_A 0x11B1
2280#define IGP02E1000_PHY_AGC_B 0x12B1
2281#define IGP02E1000_PHY_AGC_C 0x14B1
2282#define IGP02E1000_PHY_AGC_D 0x18B1
2283
1774/* IGP01E1000 DSP Reset Register */ 2284/* IGP01E1000 DSP Reset Register */
1775#define IGP01E1000_PHY_DSP_RESET 0x1F33 2285#define IGP01E1000_PHY_DSP_RESET 0x1F33
1776#define IGP01E1000_PHY_DSP_SET 0x1F71 2286#define IGP01E1000_PHY_DSP_SET 0x1F71
1777#define IGP01E1000_PHY_DSP_FFE 0x1F35 2287#define IGP01E1000_PHY_DSP_FFE 0x1F35
1778 2288
1779#define IGP01E1000_PHY_CHANNEL_NUM 4 2289#define IGP01E1000_PHY_CHANNEL_NUM 4
2290#define IGP02E1000_PHY_CHANNEL_NUM 4
2291
1780#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 2292#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
1781#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 2293#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
1782#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 2294#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
@@ -2060,20 +2572,30 @@ struct e1000_hw {
2060#define IGP01E1000_MSE_CHANNEL_B 0x0F00 2572#define IGP01E1000_MSE_CHANNEL_B 0x0F00
2061#define IGP01E1000_MSE_CHANNEL_A 0xF000 2573#define IGP01E1000_MSE_CHANNEL_A 0xF000
2062 2574
2575#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
2576#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */
2577#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */
2578
2063/* IGP01E1000 DSP reset macros */ 2579/* IGP01E1000 DSP reset macros */
2064#define DSP_RESET_ENABLE 0x0 2580#define DSP_RESET_ENABLE 0x0
2065#define DSP_RESET_DISABLE 0x2 2581#define DSP_RESET_DISABLE 0x2
2066#define E1000_MAX_DSP_RESETS 10 2582#define E1000_MAX_DSP_RESETS 10
2067 2583
2068/* IGP01E1000 AGC Registers */ 2584/* IGP01E1000 & IGP02E1000 AGC Registers */
2069 2585
2070#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ 2586#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */
2587#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */
2588
2589/* IGP02E1000 AGC Register Length 9-bit mask */
2590#define IGP02E1000_AGC_LENGTH_MASK 0x7F
2071 2591
2072/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ 2592/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
2073#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 2593#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
2594#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 128
2074 2595
2075/* The precision of the length is +/- 10 meters */ 2596/* The precision error of the cable length is +/- 10 meters */
2076#define IGP01E1000_AGC_RANGE 10 2597#define IGP01E1000_AGC_RANGE 10
2598#define IGP02E1000_AGC_RANGE 10
2077 2599
2078/* IGP01E1000 PCS Initialization register */ 2600/* IGP01E1000 PCS Initialization register */
2079/* bits 3:6 in the PCS registers stores the channels polarity */ 2601/* bits 3:6 in the PCS registers stores the channels polarity */
@@ -2113,6 +2635,8 @@ struct e1000_hw {
2113#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID 2635#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
2114#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID 2636#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
2115#define M88E1011_I_REV_4 0x04 2637#define M88E1011_I_REV_4 0x04
2638#define M88E1111_I_PHY_ID 0x01410CC0
2639#define L1LXT971A_PHY_ID 0x001378E0
2116 2640
2117/* Miscellaneous PHY bit definitions. */ 2641/* Miscellaneous PHY bit definitions. */
2118#define PHY_PREAMBLE 0xFFFFFFFF 2642#define PHY_PREAMBLE 0xFFFFFFFF
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 82549a6fcfb3..325495b8b60c 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -29,33 +29,9 @@
29#include "e1000.h" 29#include "e1000.h"
30 30
31/* Change Log 31/* Change Log
32 * 5.3.12 6/7/04 32 * 6.0.44+ 2/15/05
33 * - kcompat NETIF_MSG for older kernels (2.4.9) <sean.p.mcdermott@intel.com> 33 * o applied Anton's patch to resolve tx hang in hardware
34 * - if_mii support and associated kcompat for older kernels 34 * o Applied Andrew Mortons patch - e1000 stops working after resume
35 * - More errlogging support from Jon Mason <jonmason@us.ibm.com>
36 * - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com>
37 *
38 * 5.7.1 12/16/04
39 * - Resurrect 82547EI/GI related fix in e1000_intr to avoid deadlocks. This
40 * fix was removed as it caused system instability. The suspected cause of
41 * this is the called to e1000_irq_disable in e1000_intr. Inlined the
42 * required piece of e1000_irq_disable into e1000_intr - Anton Blanchard
43 * 5.7.0 12/10/04
44 * - include fix to the condition that determines when to quit NAPI - Robert Olsson
45 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
46 * 5.6.5 11/01/04
47 * - Enabling NETIF_F_SG without checksum offload is illegal -
48 John Mason <jdmason@us.ibm.com>
49 * 5.6.3 10/26/04
50 * - Remove redundant initialization - Jamal Hadi
51 * - Reset buffer_info->dma in tx resource cleanup logic
52 * 5.6.2 10/12/04
53 * - Avoid filling tx_ring completely - shemminger@osdl.org
54 * - Replace schedule_timeout() with msleep()/msleep_interruptible() -
55 * nacc@us.ibm.com
56 * - Sparse cleanup - shemminger@osdl.org
57 * - Fix tx resource cleanup logic
58 * - LLTX support - ak@suse.de and hadi@cyberus.ca
59 */ 35 */
60 36
61char e1000_driver_name[] = "e1000"; 37char e1000_driver_name[] = "e1000";
@@ -65,7 +41,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
65#else 41#else
66#define DRIVERNAPI "-NAPI" 42#define DRIVERNAPI "-NAPI"
67#endif 43#endif
68#define DRV_VERSION "5.7.6-k2"DRIVERNAPI 44#define DRV_VERSION "6.0.54-k2"DRIVERNAPI
69char e1000_driver_version[] = DRV_VERSION; 45char e1000_driver_version[] = DRV_VERSION;
70char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation."; 46char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
71 47
@@ -96,6 +72,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
96 INTEL_E1000_ETHERNET_DEVICE(0x1017), 72 INTEL_E1000_ETHERNET_DEVICE(0x1017),
97 INTEL_E1000_ETHERNET_DEVICE(0x1018), 73 INTEL_E1000_ETHERNET_DEVICE(0x1018),
98 INTEL_E1000_ETHERNET_DEVICE(0x1019), 74 INTEL_E1000_ETHERNET_DEVICE(0x1019),
75 INTEL_E1000_ETHERNET_DEVICE(0x101A),
99 INTEL_E1000_ETHERNET_DEVICE(0x101D), 76 INTEL_E1000_ETHERNET_DEVICE(0x101D),
100 INTEL_E1000_ETHERNET_DEVICE(0x101E), 77 INTEL_E1000_ETHERNET_DEVICE(0x101E),
101 INTEL_E1000_ETHERNET_DEVICE(0x1026), 78 INTEL_E1000_ETHERNET_DEVICE(0x1026),
@@ -110,6 +87,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
110 INTEL_E1000_ETHERNET_DEVICE(0x107B), 87 INTEL_E1000_ETHERNET_DEVICE(0x107B),
111 INTEL_E1000_ETHERNET_DEVICE(0x107C), 88 INTEL_E1000_ETHERNET_DEVICE(0x107C),
112 INTEL_E1000_ETHERNET_DEVICE(0x108A), 89 INTEL_E1000_ETHERNET_DEVICE(0x108A),
90 INTEL_E1000_ETHERNET_DEVICE(0x108B),
91 INTEL_E1000_ETHERNET_DEVICE(0x108C),
92 INTEL_E1000_ETHERNET_DEVICE(0x1099),
113 /* required last entry */ 93 /* required last entry */
114 {0,} 94 {0,}
115}; 95};
@@ -155,10 +135,14 @@ static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
155static int e1000_clean(struct net_device *netdev, int *budget); 135static int e1000_clean(struct net_device *netdev, int *budget);
156static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 136static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
157 int *work_done, int work_to_do); 137 int *work_done, int work_to_do);
138static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
139 int *work_done, int work_to_do);
158#else 140#else
159static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); 141static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
142static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter);
160#endif 143#endif
161static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); 144static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
145static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter);
162static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 146static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
163static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 147static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
164 int cmd); 148 int cmd);
@@ -286,7 +270,29 @@ e1000_irq_enable(struct e1000_adapter *adapter)
286 E1000_WRITE_FLUSH(&adapter->hw); 270 E1000_WRITE_FLUSH(&adapter->hw);
287 } 271 }
288} 272}
289 273void
274e1000_update_mng_vlan(struct e1000_adapter *adapter)
275{
276 struct net_device *netdev = adapter->netdev;
277 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
278 uint16_t old_vid = adapter->mng_vlan_id;
279 if(adapter->vlgrp) {
280 if(!adapter->vlgrp->vlan_devices[vid]) {
281 if(adapter->hw.mng_cookie.status &
282 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
283 e1000_vlan_rx_add_vid(netdev, vid);
284 adapter->mng_vlan_id = vid;
285 } else
286 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
287
288 if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
289 (vid != old_vid) &&
290 !adapter->vlgrp->vlan_devices[old_vid])
291 e1000_vlan_rx_kill_vid(netdev, old_vid);
292 }
293 }
294}
295
290int 296int
291e1000_up(struct e1000_adapter *adapter) 297e1000_up(struct e1000_adapter *adapter)
292{ 298{
@@ -310,19 +316,33 @@ e1000_up(struct e1000_adapter *adapter)
310 e1000_configure_tx(adapter); 316 e1000_configure_tx(adapter);
311 e1000_setup_rctl(adapter); 317 e1000_setup_rctl(adapter);
312 e1000_configure_rx(adapter); 318 e1000_configure_rx(adapter);
313 e1000_alloc_rx_buffers(adapter); 319 adapter->alloc_rx_buf(adapter);
314 320
321#ifdef CONFIG_PCI_MSI
322 if(adapter->hw.mac_type > e1000_82547_rev_2) {
323 adapter->have_msi = TRUE;
324 if((err = pci_enable_msi(adapter->pdev))) {
325 DPRINTK(PROBE, ERR,
326 "Unable to allocate MSI interrupt Error: %d\n", err);
327 adapter->have_msi = FALSE;
328 }
329 }
330#endif
315 if((err = request_irq(adapter->pdev->irq, &e1000_intr, 331 if((err = request_irq(adapter->pdev->irq, &e1000_intr,
316 SA_SHIRQ | SA_SAMPLE_RANDOM, 332 SA_SHIRQ | SA_SAMPLE_RANDOM,
317 netdev->name, netdev))) 333 netdev->name, netdev))) {
334 DPRINTK(PROBE, ERR,
335 "Unable to allocate interrupt Error: %d\n", err);
318 return err; 336 return err;
337 }
319 338
320 mod_timer(&adapter->watchdog_timer, jiffies); 339 mod_timer(&adapter->watchdog_timer, jiffies);
321 e1000_irq_enable(adapter);
322 340
323#ifdef CONFIG_E1000_NAPI 341#ifdef CONFIG_E1000_NAPI
324 netif_poll_enable(netdev); 342 netif_poll_enable(netdev);
325#endif 343#endif
344 e1000_irq_enable(adapter);
345
326 return 0; 346 return 0;
327} 347}
328 348
@@ -333,6 +353,11 @@ e1000_down(struct e1000_adapter *adapter)
333 353
334 e1000_irq_disable(adapter); 354 e1000_irq_disable(adapter);
335 free_irq(adapter->pdev->irq, netdev); 355 free_irq(adapter->pdev->irq, netdev);
356#ifdef CONFIG_PCI_MSI
357 if(adapter->hw.mac_type > e1000_82547_rev_2 &&
358 adapter->have_msi == TRUE)
359 pci_disable_msi(adapter->pdev);
360#endif
336 del_timer_sync(&adapter->tx_fifo_stall_timer); 361 del_timer_sync(&adapter->tx_fifo_stall_timer);
337 del_timer_sync(&adapter->watchdog_timer); 362 del_timer_sync(&adapter->watchdog_timer);
338 del_timer_sync(&adapter->phy_info_timer); 363 del_timer_sync(&adapter->phy_info_timer);
@@ -350,62 +375,93 @@ e1000_down(struct e1000_adapter *adapter)
350 e1000_clean_rx_ring(adapter); 375 e1000_clean_rx_ring(adapter);
351 376
352 /* If WoL is not enabled 377 /* If WoL is not enabled
378 * and management mode is not IAMT
353 * Power down the PHY so no link is implied when interface is down */ 379 * Power down the PHY so no link is implied when interface is down */
354 if(!adapter->wol && adapter->hw.media_type == e1000_media_type_copper) { 380 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
381 adapter->hw.media_type == e1000_media_type_copper &&
382 !e1000_check_mng_mode(&adapter->hw) &&
383 !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) {
355 uint16_t mii_reg; 384 uint16_t mii_reg;
356 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 385 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
357 mii_reg |= MII_CR_POWER_DOWN; 386 mii_reg |= MII_CR_POWER_DOWN;
358 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); 387 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
388 mdelay(1);
359 } 389 }
360} 390}
361 391
362void 392void
363e1000_reset(struct e1000_adapter *adapter) 393e1000_reset(struct e1000_adapter *adapter)
364{ 394{
365 uint32_t pba; 395 struct net_device *netdev = adapter->netdev;
396 uint32_t pba, manc;
397 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
398 uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
366 399
367 /* Repartition Pba for greater than 9k mtu 400 /* Repartition Pba for greater than 9k mtu
368 * To take effect CTRL.RST is required. 401 * To take effect CTRL.RST is required.
369 */ 402 */
370 403
371 if(adapter->hw.mac_type < e1000_82547) { 404 switch (adapter->hw.mac_type) {
372 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192) 405 case e1000_82547:
373 pba = E1000_PBA_40K; 406 case e1000_82547_rev_2:
374 else 407 pba = E1000_PBA_30K;
375 pba = E1000_PBA_48K; 408 break;
376 } else { 409 case e1000_82573:
377 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192) 410 pba = E1000_PBA_12K;
378 pba = E1000_PBA_22K; 411 break;
379 else 412 default:
380 pba = E1000_PBA_30K; 413 pba = E1000_PBA_48K;
414 break;
415 }
416
417 if((adapter->hw.mac_type != e1000_82573) &&
418 (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) {
419 pba -= 8; /* allocate more FIFO for Tx */
420 /* send an XOFF when there is enough space in the
421 * Rx FIFO to hold one extra full size Rx packet
422 */
423 fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
424 ETHERNET_FCS_SIZE + 1;
425 fc_low_water_mark = fc_high_water_mark + 8;
426 }
427
428
429 if(adapter->hw.mac_type == e1000_82547) {
381 adapter->tx_fifo_head = 0; 430 adapter->tx_fifo_head = 0;
382 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 431 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
383 adapter->tx_fifo_size = 432 adapter->tx_fifo_size =
384 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 433 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
385 atomic_set(&adapter->tx_fifo_stall, 0); 434 atomic_set(&adapter->tx_fifo_stall, 0);
386 } 435 }
436
387 E1000_WRITE_REG(&adapter->hw, PBA, pba); 437 E1000_WRITE_REG(&adapter->hw, PBA, pba);
388 438
389 /* flow control settings */ 439 /* flow control settings */
390 adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - 440 adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
391 E1000_FC_HIGH_DIFF; 441 fc_high_water_mark;
392 adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - 442 adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
393 E1000_FC_LOW_DIFF; 443 fc_low_water_mark;
394 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; 444 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
395 adapter->hw.fc_send_xon = 1; 445 adapter->hw.fc_send_xon = 1;
396 adapter->hw.fc = adapter->hw.original_fc; 446 adapter->hw.fc = adapter->hw.original_fc;
397 447
448 /* Allow time for pending master requests to run */
398 e1000_reset_hw(&adapter->hw); 449 e1000_reset_hw(&adapter->hw);
399 if(adapter->hw.mac_type >= e1000_82544) 450 if(adapter->hw.mac_type >= e1000_82544)
400 E1000_WRITE_REG(&adapter->hw, WUC, 0); 451 E1000_WRITE_REG(&adapter->hw, WUC, 0);
401 if(e1000_init_hw(&adapter->hw)) 452 if(e1000_init_hw(&adapter->hw))
402 DPRINTK(PROBE, ERR, "Hardware Error\n"); 453 DPRINTK(PROBE, ERR, "Hardware Error\n");
403 454 e1000_update_mng_vlan(adapter);
404 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 455 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
405 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); 456 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
406 457
407 e1000_reset_adaptive(&adapter->hw); 458 e1000_reset_adaptive(&adapter->hw);
408 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 459 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
460 if (adapter->en_mng_pt) {
461 manc = E1000_READ_REG(&adapter->hw, MANC);
462 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
463 E1000_WRITE_REG(&adapter->hw, MANC, manc);
464 }
409} 465}
410 466
411/** 467/**
@@ -426,15 +482,13 @@ e1000_probe(struct pci_dev *pdev,
426{ 482{
427 struct net_device *netdev; 483 struct net_device *netdev;
428 struct e1000_adapter *adapter; 484 struct e1000_adapter *adapter;
485 unsigned long mmio_start, mmio_len;
486 uint32_t swsm;
487
429 static int cards_found = 0; 488 static int cards_found = 0;
430 unsigned long mmio_start; 489 int i, err, pci_using_dac;
431 int mmio_len;
432 int pci_using_dac;
433 int i;
434 int err;
435 uint16_t eeprom_data; 490 uint16_t eeprom_data;
436 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 491 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
437
438 if((err = pci_enable_device(pdev))) 492 if((err = pci_enable_device(pdev)))
439 return err; 493 return err;
440 494
@@ -521,6 +575,9 @@ e1000_probe(struct pci_dev *pdev,
521 if((err = e1000_sw_init(adapter))) 575 if((err = e1000_sw_init(adapter)))
522 goto err_sw_init; 576 goto err_sw_init;
523 577
578 if((err = e1000_check_phy_reset_block(&adapter->hw)))
579 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
580
524 if(adapter->hw.mac_type >= e1000_82543) { 581 if(adapter->hw.mac_type >= e1000_82543) {
525 netdev->features = NETIF_F_SG | 582 netdev->features = NETIF_F_SG |
526 NETIF_F_HW_CSUM | 583 NETIF_F_HW_CSUM |
@@ -533,6 +590,11 @@ e1000_probe(struct pci_dev *pdev,
533 if((adapter->hw.mac_type >= e1000_82544) && 590 if((adapter->hw.mac_type >= e1000_82544) &&
534 (adapter->hw.mac_type != e1000_82547)) 591 (adapter->hw.mac_type != e1000_82547))
535 netdev->features |= NETIF_F_TSO; 592 netdev->features |= NETIF_F_TSO;
593
594#ifdef NETIF_F_TSO_IPV6
595 if(adapter->hw.mac_type > e1000_82547_rev_2)
596 netdev->features |= NETIF_F_TSO_IPV6;
597#endif
536#endif 598#endif
537 if(pci_using_dac) 599 if(pci_using_dac)
538 netdev->features |= NETIF_F_HIGHDMA; 600 netdev->features |= NETIF_F_HIGHDMA;
@@ -540,6 +602,8 @@ e1000_probe(struct pci_dev *pdev,
540 /* hard_start_xmit is safe against parallel locking */ 602 /* hard_start_xmit is safe against parallel locking */
541 netdev->features |= NETIF_F_LLTX; 603 netdev->features |= NETIF_F_LLTX;
542 604
605 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
606
543 /* before reading the EEPROM, reset the controller to 607 /* before reading the EEPROM, reset the controller to
544 * put the device in a known good starting state */ 608 * put the device in a known good starting state */
545 609
@@ -555,7 +619,7 @@ e1000_probe(struct pci_dev *pdev,
555 619
556 /* copy the MAC address out of the EEPROM */ 620 /* copy the MAC address out of the EEPROM */
557 621
558 if (e1000_read_mac_addr(&adapter->hw)) 622 if(e1000_read_mac_addr(&adapter->hw))
559 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 623 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
560 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 624 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
561 625
@@ -629,6 +693,17 @@ e1000_probe(struct pci_dev *pdev,
629 /* reset the hardware with the new settings */ 693 /* reset the hardware with the new settings */
630 e1000_reset(adapter); 694 e1000_reset(adapter);
631 695
696 /* Let firmware know the driver has taken over */
697 switch(adapter->hw.mac_type) {
698 case e1000_82573:
699 swsm = E1000_READ_REG(&adapter->hw, SWSM);
700 E1000_WRITE_REG(&adapter->hw, SWSM,
701 swsm | E1000_SWSM_DRV_LOAD);
702 break;
703 default:
704 break;
705 }
706
632 strcpy(netdev->name, "eth%d"); 707 strcpy(netdev->name, "eth%d");
633 if((err = register_netdev(netdev))) 708 if((err = register_netdev(netdev)))
634 goto err_register; 709 goto err_register;
@@ -664,7 +739,7 @@ e1000_remove(struct pci_dev *pdev)
664{ 739{
665 struct net_device *netdev = pci_get_drvdata(pdev); 740 struct net_device *netdev = pci_get_drvdata(pdev);
666 struct e1000_adapter *adapter = netdev->priv; 741 struct e1000_adapter *adapter = netdev->priv;
667 uint32_t manc; 742 uint32_t manc, swsm;
668 743
669 flush_scheduled_work(); 744 flush_scheduled_work();
670 745
@@ -677,9 +752,21 @@ e1000_remove(struct pci_dev *pdev)
677 } 752 }
678 } 753 }
679 754
755 switch(adapter->hw.mac_type) {
756 case e1000_82573:
757 swsm = E1000_READ_REG(&adapter->hw, SWSM);
758 E1000_WRITE_REG(&adapter->hw, SWSM,
759 swsm & ~E1000_SWSM_DRV_LOAD);
760 break;
761
762 default:
763 break;
764 }
765
680 unregister_netdev(netdev); 766 unregister_netdev(netdev);
681 767
682 e1000_phy_hw_reset(&adapter->hw); 768 if(!e1000_check_phy_reset_block(&adapter->hw))
769 e1000_phy_hw_reset(&adapter->hw);
683 770
684 iounmap(adapter->hw.hw_addr); 771 iounmap(adapter->hw.hw_addr);
685 pci_release_regions(pdev); 772 pci_release_regions(pdev);
@@ -717,6 +804,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
717 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 804 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
718 805
719 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 806 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
807 adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
720 hw->max_frame_size = netdev->mtu + 808 hw->max_frame_size = netdev->mtu +
721 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 809 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
722 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 810 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -730,7 +818,10 @@ e1000_sw_init(struct e1000_adapter *adapter)
730 818
731 /* initialize eeprom parameters */ 819 /* initialize eeprom parameters */
732 820
733 e1000_init_eeprom_params(hw); 821 if(e1000_init_eeprom_params(hw)) {
822 E1000_ERR("EEPROM initialization failed\n");
823 return -EIO;
824 }
734 825
735 switch(hw->mac_type) { 826 switch(hw->mac_type) {
736 default: 827 default:
@@ -795,6 +886,11 @@ e1000_open(struct net_device *netdev)
795 886
796 if((err = e1000_up(adapter))) 887 if((err = e1000_up(adapter)))
797 goto err_up; 888 goto err_up;
889 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
890 if((adapter->hw.mng_cookie.status &
891 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
892 e1000_update_mng_vlan(adapter);
893 }
798 894
799 return E1000_SUCCESS; 895 return E1000_SUCCESS;
800 896
@@ -830,14 +926,18 @@ e1000_close(struct net_device *netdev)
830 e1000_free_tx_resources(adapter); 926 e1000_free_tx_resources(adapter);
831 e1000_free_rx_resources(adapter); 927 e1000_free_rx_resources(adapter);
832 928
929 if((adapter->hw.mng_cookie.status &
930 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
931 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
932 }
833 return 0; 933 return 0;
834} 934}
835 935
836/** 936/**
837 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 937 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
838 * @adapter: address of board private structure 938 * @adapter: address of board private structure
839 * @begin: address of beginning of memory 939 * @start: address of beginning of memory
840 * @end: address of end of memory 940 * @len: length of memory
841 **/ 941 **/
842static inline boolean_t 942static inline boolean_t
843e1000_check_64k_bound(struct e1000_adapter *adapter, 943e1000_check_64k_bound(struct e1000_adapter *adapter,
@@ -846,12 +946,10 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
846 unsigned long begin = (unsigned long) start; 946 unsigned long begin = (unsigned long) start;
847 unsigned long end = begin + len; 947 unsigned long end = begin + len;
848 948
849 /* first rev 82545 and 82546 need to not allow any memory 949 /* First rev 82545 and 82546 need to not allow any memory
850 * write location to cross a 64k boundary due to errata 23 */ 950 * write location to cross 64k boundary due to errata 23 */
851 if (adapter->hw.mac_type == e1000_82545 || 951 if (adapter->hw.mac_type == e1000_82545 ||
852 adapter->hw.mac_type == e1000_82546 ) { 952 adapter->hw.mac_type == e1000_82546) {
853
854 /* check buffer doesn't cross 64kB */
855 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; 953 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
856 } 954 }
857 955
@@ -875,8 +973,8 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
875 size = sizeof(struct e1000_buffer) * txdr->count; 973 size = sizeof(struct e1000_buffer) * txdr->count;
876 txdr->buffer_info = vmalloc(size); 974 txdr->buffer_info = vmalloc(size);
877 if(!txdr->buffer_info) { 975 if(!txdr->buffer_info) {
878 DPRINTK(PROBE, ERR, 976 DPRINTK(PROBE, ERR,
879 "Unable to Allocate Memory for the Transmit descriptor ring\n"); 977 "Unable to allocate memory for the transmit descriptor ring\n");
880 return -ENOMEM; 978 return -ENOMEM;
881 } 979 }
882 memset(txdr->buffer_info, 0, size); 980 memset(txdr->buffer_info, 0, size);
@@ -889,38 +987,38 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
889 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 987 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
890 if(!txdr->desc) { 988 if(!txdr->desc) {
891setup_tx_desc_die: 989setup_tx_desc_die:
892 DPRINTK(PROBE, ERR,
893 "Unable to Allocate Memory for the Transmit descriptor ring\n");
894 vfree(txdr->buffer_info); 990 vfree(txdr->buffer_info);
991 DPRINTK(PROBE, ERR,
992 "Unable to allocate memory for the transmit descriptor ring\n");
895 return -ENOMEM; 993 return -ENOMEM;
896 } 994 }
897 995
898 /* fix for errata 23, cant cross 64kB boundary */ 996 /* Fix for errata 23, can't cross 64kB boundary */
899 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 997 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
900 void *olddesc = txdr->desc; 998 void *olddesc = txdr->desc;
901 dma_addr_t olddma = txdr->dma; 999 dma_addr_t olddma = txdr->dma;
902 DPRINTK(TX_ERR,ERR,"txdr align check failed: %u bytes at %p\n", 1000 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
903 txdr->size, txdr->desc); 1001 "at %p\n", txdr->size, txdr->desc);
904 /* try again, without freeing the previous */ 1002 /* Try again, without freeing the previous */
905 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1003 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
906 /* failed allocation, critial failure */
907 if(!txdr->desc) { 1004 if(!txdr->desc) {
1005 /* Failed allocation, critical failure */
908 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1006 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
909 goto setup_tx_desc_die; 1007 goto setup_tx_desc_die;
910 } 1008 }
911 1009
912 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1010 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
913 /* give up */ 1011 /* give up */
914 pci_free_consistent(pdev, txdr->size, 1012 pci_free_consistent(pdev, txdr->size, txdr->desc,
915 txdr->desc, txdr->dma); 1013 txdr->dma);
916 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1014 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
917 DPRINTK(PROBE, ERR, 1015 DPRINTK(PROBE, ERR,
918 "Unable to Allocate aligned Memory for the Transmit" 1016 "Unable to allocate aligned memory "
919 " descriptor ring\n"); 1017 "for the transmit descriptor ring\n");
920 vfree(txdr->buffer_info); 1018 vfree(txdr->buffer_info);
921 return -ENOMEM; 1019 return -ENOMEM;
922 } else { 1020 } else {
923 /* free old, move on with the new one since its okay */ 1021 /* Free old allocation, new allocation was successful */
924 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1022 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
925 } 1023 }
926 } 1024 }
@@ -1022,59 +1120,88 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
1022{ 1120{
1023 struct e1000_desc_ring *rxdr = &adapter->rx_ring; 1121 struct e1000_desc_ring *rxdr = &adapter->rx_ring;
1024 struct pci_dev *pdev = adapter->pdev; 1122 struct pci_dev *pdev = adapter->pdev;
1025 int size; 1123 int size, desc_len;
1026 1124
1027 size = sizeof(struct e1000_buffer) * rxdr->count; 1125 size = sizeof(struct e1000_buffer) * rxdr->count;
1028 rxdr->buffer_info = vmalloc(size); 1126 rxdr->buffer_info = vmalloc(size);
1029 if(!rxdr->buffer_info) { 1127 if(!rxdr->buffer_info) {
1030 DPRINTK(PROBE, ERR, 1128 DPRINTK(PROBE, ERR,
1031 "Unable to Allocate Memory for the Recieve descriptor ring\n"); 1129 "Unable to allocate memory for the receive descriptor ring\n");
1032 return -ENOMEM; 1130 return -ENOMEM;
1033 } 1131 }
1034 memset(rxdr->buffer_info, 0, size); 1132 memset(rxdr->buffer_info, 0, size);
1035 1133
1134 size = sizeof(struct e1000_ps_page) * rxdr->count;
1135 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1136 if(!rxdr->ps_page) {
1137 vfree(rxdr->buffer_info);
1138 DPRINTK(PROBE, ERR,
1139 "Unable to allocate memory for the receive descriptor ring\n");
1140 return -ENOMEM;
1141 }
1142 memset(rxdr->ps_page, 0, size);
1143
1144 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1145 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1146 if(!rxdr->ps_page_dma) {
1147 vfree(rxdr->buffer_info);
1148 kfree(rxdr->ps_page);
1149 DPRINTK(PROBE, ERR,
1150 "Unable to allocate memory for the receive descriptor ring\n");
1151 return -ENOMEM;
1152 }
1153 memset(rxdr->ps_page_dma, 0, size);
1154
1155 if(adapter->hw.mac_type <= e1000_82547_rev_2)
1156 desc_len = sizeof(struct e1000_rx_desc);
1157 else
1158 desc_len = sizeof(union e1000_rx_desc_packet_split);
1159
1036 /* Round up to nearest 4K */ 1160 /* Round up to nearest 4K */
1037 1161
1038 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1162 rxdr->size = rxdr->count * desc_len;
1039 E1000_ROUNDUP(rxdr->size, 4096); 1163 E1000_ROUNDUP(rxdr->size, 4096);
1040 1164
1041 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1165 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1042 1166
1043 if(!rxdr->desc) { 1167 if(!rxdr->desc) {
1044setup_rx_desc_die: 1168setup_rx_desc_die:
1045 DPRINTK(PROBE, ERR,
1046 "Unble to Allocate Memory for the Recieve descriptor ring\n");
1047 vfree(rxdr->buffer_info); 1169 vfree(rxdr->buffer_info);
1170 kfree(rxdr->ps_page);
1171 kfree(rxdr->ps_page_dma);
1172 DPRINTK(PROBE, ERR,
1173 "Unable to allocate memory for the receive descriptor ring\n");
1048 return -ENOMEM; 1174 return -ENOMEM;
1049 } 1175 }
1050 1176
1051 /* fix for errata 23, cant cross 64kB boundary */ 1177 /* Fix for errata 23, can't cross 64kB boundary */
1052 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1178 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1053 void *olddesc = rxdr->desc; 1179 void *olddesc = rxdr->desc;
1054 dma_addr_t olddma = rxdr->dma; 1180 dma_addr_t olddma = rxdr->dma;
1055 DPRINTK(RX_ERR,ERR, 1181 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1056 "rxdr align check failed: %u bytes at %p\n", 1182 "at %p\n", rxdr->size, rxdr->desc);
1057 rxdr->size, rxdr->desc); 1183 /* Try again, without freeing the previous */
1058 /* try again, without freeing the previous */
1059 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1184 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1060 /* failed allocation, critial failure */
1061 if(!rxdr->desc) { 1185 if(!rxdr->desc) {
1186 /* Failed allocation, critical failure */
1062 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1187 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1063 goto setup_rx_desc_die; 1188 goto setup_rx_desc_die;
1064 } 1189 }
1065 1190
1066 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1191 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1067 /* give up */ 1192 /* give up */
1068 pci_free_consistent(pdev, rxdr->size, 1193 pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1069 rxdr->desc, rxdr->dma); 1194 rxdr->dma);
1070 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1195 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1071 DPRINTK(PROBE, ERR, 1196 DPRINTK(PROBE, ERR,
1072 "Unable to Allocate aligned Memory for the" 1197 "Unable to allocate aligned memory "
1073 " Receive descriptor ring\n"); 1198 "for the receive descriptor ring\n");
1074 vfree(rxdr->buffer_info); 1199 vfree(rxdr->buffer_info);
1200 kfree(rxdr->ps_page);
1201 kfree(rxdr->ps_page_dma);
1075 return -ENOMEM; 1202 return -ENOMEM;
1076 } else { 1203 } else {
1077 /* free old, move on with the new one since its okay */ 1204 /* Free old allocation, new allocation was successful */
1078 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1205 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1079 } 1206 }
1080 } 1207 }
@@ -1087,14 +1214,15 @@ setup_rx_desc_die:
1087} 1214}
1088 1215
1089/** 1216/**
1090 * e1000_setup_rctl - configure the receive control register 1217 * e1000_setup_rctl - configure the receive control registers
1091 * @adapter: Board private structure 1218 * @adapter: Board private structure
1092 **/ 1219 **/
1093 1220
1094static void 1221static void
1095e1000_setup_rctl(struct e1000_adapter *adapter) 1222e1000_setup_rctl(struct e1000_adapter *adapter)
1096{ 1223{
1097 uint32_t rctl; 1224 uint32_t rctl, rfctl;
1225 uint32_t psrctl = 0;
1098 1226
1099 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1227 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1100 1228
@@ -1109,24 +1237,69 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1109 else 1237 else
1110 rctl &= ~E1000_RCTL_SBP; 1238 rctl &= ~E1000_RCTL_SBP;
1111 1239
1240 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1241 rctl &= ~E1000_RCTL_LPE;
1242 else
1243 rctl |= E1000_RCTL_LPE;
1244
1112 /* Setup buffer sizes */ 1245 /* Setup buffer sizes */
1113 rctl &= ~(E1000_RCTL_SZ_4096); 1246 if(adapter->hw.mac_type == e1000_82573) {
1114 rctl |= (E1000_RCTL_BSEX | E1000_RCTL_LPE); 1247 /* We can now specify buffers in 1K increments.
1115 switch (adapter->rx_buffer_len) { 1248 * BSIZE and BSEX are ignored in this case. */
1116 case E1000_RXBUFFER_2048: 1249 rctl |= adapter->rx_buffer_len << 0x11;
1117 default: 1250 } else {
1118 rctl |= E1000_RCTL_SZ_2048; 1251 rctl &= ~E1000_RCTL_SZ_4096;
1119 rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE); 1252 rctl |= E1000_RCTL_BSEX;
1120 break; 1253 switch (adapter->rx_buffer_len) {
1121 case E1000_RXBUFFER_4096: 1254 case E1000_RXBUFFER_2048:
1122 rctl |= E1000_RCTL_SZ_4096; 1255 default:
1123 break; 1256 rctl |= E1000_RCTL_SZ_2048;
1124 case E1000_RXBUFFER_8192: 1257 rctl &= ~E1000_RCTL_BSEX;
1125 rctl |= E1000_RCTL_SZ_8192; 1258 break;
1126 break; 1259 case E1000_RXBUFFER_4096:
1127 case E1000_RXBUFFER_16384: 1260 rctl |= E1000_RCTL_SZ_4096;
1128 rctl |= E1000_RCTL_SZ_16384; 1261 break;
1129 break; 1262 case E1000_RXBUFFER_8192:
1263 rctl |= E1000_RCTL_SZ_8192;
1264 break;
1265 case E1000_RXBUFFER_16384:
1266 rctl |= E1000_RCTL_SZ_16384;
1267 break;
1268 }
1269 }
1270
1271#ifdef CONFIG_E1000_PACKET_SPLIT
1272 /* 82571 and greater support packet-split where the protocol
1273 * header is placed in skb->data and the packet data is
1274 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1275 * In the case of a non-split, skb->data is linearly filled,
1276 * followed by the page buffers. Therefore, skb->data is
1277 * sized to hold the largest protocol header.
1278 */
1279 adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2)
1280 && (adapter->netdev->mtu
1281 < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0));
1282#endif
1283 if(adapter->rx_ps) {
1284 /* Configure extra packet-split registers */
1285 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1286 rfctl |= E1000_RFCTL_EXTEN;
1287 /* disable IPv6 packet split support */
1288 rfctl |= E1000_RFCTL_IPV6_DIS;
1289 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1290
1291 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
1292
1293 psrctl |= adapter->rx_ps_bsize0 >>
1294 E1000_PSRCTL_BSIZE0_SHIFT;
1295 psrctl |= PAGE_SIZE >>
1296 E1000_PSRCTL_BSIZE1_SHIFT;
1297 psrctl |= PAGE_SIZE <<
1298 E1000_PSRCTL_BSIZE2_SHIFT;
1299 psrctl |= PAGE_SIZE <<
1300 E1000_PSRCTL_BSIZE3_SHIFT;
1301
1302 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1130 } 1303 }
1131 1304
1132 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1305 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
@@ -1143,9 +1316,18 @@ static void
1143e1000_configure_rx(struct e1000_adapter *adapter) 1316e1000_configure_rx(struct e1000_adapter *adapter)
1144{ 1317{
1145 uint64_t rdba = adapter->rx_ring.dma; 1318 uint64_t rdba = adapter->rx_ring.dma;
1146 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); 1319 uint32_t rdlen, rctl, rxcsum;
1147 uint32_t rctl; 1320
1148 uint32_t rxcsum; 1321 if(adapter->rx_ps) {
1322 rdlen = adapter->rx_ring.count *
1323 sizeof(union e1000_rx_desc_packet_split);
1324 adapter->clean_rx = e1000_clean_rx_irq_ps;
1325 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
1326 } else {
1327 rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
1328 adapter->clean_rx = e1000_clean_rx_irq;
1329 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1330 }
1149 1331
1150 /* disable receives while setting up the descriptors */ 1332 /* disable receives while setting up the descriptors */
1151 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1333 rctl = E1000_READ_REG(&adapter->hw, RCTL);
@@ -1172,13 +1354,27 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1172 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1354 E1000_WRITE_REG(&adapter->hw, RDT, 0);
1173 1355
1174 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1356 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1175 if((adapter->hw.mac_type >= e1000_82543) && 1357 if(adapter->hw.mac_type >= e1000_82543) {
1176 (adapter->rx_csum == TRUE)) {
1177 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 1358 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
1178 rxcsum |= E1000_RXCSUM_TUOFL; 1359 if(adapter->rx_csum == TRUE) {
1360 rxcsum |= E1000_RXCSUM_TUOFL;
1361
1362 /* Enable 82573 IPv4 payload checksum for UDP fragments
1363 * Must be used in conjunction with packet-split. */
1364 if((adapter->hw.mac_type > e1000_82547_rev_2) &&
1365 (adapter->rx_ps)) {
1366 rxcsum |= E1000_RXCSUM_IPPCSE;
1367 }
1368 } else {
1369 rxcsum &= ~E1000_RXCSUM_TUOFL;
1370 /* don't need to clear IPPCSE as it defaults to 0 */
1371 }
1179 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); 1372 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
1180 } 1373 }
1181 1374
1375 if (adapter->hw.mac_type == e1000_82573)
1376 E1000_WRITE_REG(&adapter->hw, ERT, 0x0100);
1377
1182 /* Enable Receives */ 1378 /* Enable Receives */
1183 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1379 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1184} 1380}
@@ -1210,13 +1406,11 @@ static inline void
1210e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1406e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1211 struct e1000_buffer *buffer_info) 1407 struct e1000_buffer *buffer_info)
1212{ 1408{
1213 struct pci_dev *pdev = adapter->pdev;
1214
1215 if(buffer_info->dma) { 1409 if(buffer_info->dma) {
1216 pci_unmap_page(pdev, 1410 pci_unmap_page(adapter->pdev,
1217 buffer_info->dma, 1411 buffer_info->dma,
1218 buffer_info->length, 1412 buffer_info->length,
1219 PCI_DMA_TODEVICE); 1413 PCI_DMA_TODEVICE);
1220 buffer_info->dma = 0; 1414 buffer_info->dma = 0;
1221 } 1415 }
1222 if(buffer_info->skb) { 1416 if(buffer_info->skb) {
@@ -1241,7 +1435,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
1241 /* Free all the Tx ring sk_buffs */ 1435 /* Free all the Tx ring sk_buffs */
1242 1436
1243 if (likely(adapter->previous_buffer_info.skb != NULL)) { 1437 if (likely(adapter->previous_buffer_info.skb != NULL)) {
1244 e1000_unmap_and_free_tx_resource(adapter, 1438 e1000_unmap_and_free_tx_resource(adapter,
1245 &adapter->previous_buffer_info); 1439 &adapter->previous_buffer_info);
1246 } 1440 }
1247 1441
@@ -1281,6 +1475,10 @@ e1000_free_rx_resources(struct e1000_adapter *adapter)
1281 1475
1282 vfree(rx_ring->buffer_info); 1476 vfree(rx_ring->buffer_info);
1283 rx_ring->buffer_info = NULL; 1477 rx_ring->buffer_info = NULL;
1478 kfree(rx_ring->ps_page);
1479 rx_ring->ps_page = NULL;
1480 kfree(rx_ring->ps_page_dma);
1481 rx_ring->ps_page_dma = NULL;
1284 1482
1285 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 1483 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1286 1484
@@ -1297,16 +1495,19 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
1297{ 1495{
1298 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 1496 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1299 struct e1000_buffer *buffer_info; 1497 struct e1000_buffer *buffer_info;
1498 struct e1000_ps_page *ps_page;
1499 struct e1000_ps_page_dma *ps_page_dma;
1300 struct pci_dev *pdev = adapter->pdev; 1500 struct pci_dev *pdev = adapter->pdev;
1301 unsigned long size; 1501 unsigned long size;
1302 unsigned int i; 1502 unsigned int i, j;
1303 1503
1304 /* Free all the Rx ring sk_buffs */ 1504 /* Free all the Rx ring sk_buffs */
1305 1505
1306 for(i = 0; i < rx_ring->count; i++) { 1506 for(i = 0; i < rx_ring->count; i++) {
1307 buffer_info = &rx_ring->buffer_info[i]; 1507 buffer_info = &rx_ring->buffer_info[i];
1308 if(buffer_info->skb) { 1508 if(buffer_info->skb) {
1309 1509 ps_page = &rx_ring->ps_page[i];
1510 ps_page_dma = &rx_ring->ps_page_dma[i];
1310 pci_unmap_single(pdev, 1511 pci_unmap_single(pdev,
1311 buffer_info->dma, 1512 buffer_info->dma,
1312 buffer_info->length, 1513 buffer_info->length,
@@ -1314,11 +1515,25 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
1314 1515
1315 dev_kfree_skb(buffer_info->skb); 1516 dev_kfree_skb(buffer_info->skb);
1316 buffer_info->skb = NULL; 1517 buffer_info->skb = NULL;
1518
1519 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
1520 if(!ps_page->ps_page[j]) break;
1521 pci_unmap_single(pdev,
1522 ps_page_dma->ps_page_dma[j],
1523 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1524 ps_page_dma->ps_page_dma[j] = 0;
1525 put_page(ps_page->ps_page[j]);
1526 ps_page->ps_page[j] = NULL;
1527 }
1317 } 1528 }
1318 } 1529 }
1319 1530
1320 size = sizeof(struct e1000_buffer) * rx_ring->count; 1531 size = sizeof(struct e1000_buffer) * rx_ring->count;
1321 memset(rx_ring->buffer_info, 0, size); 1532 memset(rx_ring->buffer_info, 0, size);
1533 size = sizeof(struct e1000_ps_page) * rx_ring->count;
1534 memset(rx_ring->ps_page, 0, size);
1535 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
1536 memset(rx_ring->ps_page_dma, 0, size);
1322 1537
1323 /* Zero out the descriptor ring */ 1538 /* Zero out the descriptor ring */
1324 1539
@@ -1422,15 +1637,15 @@ e1000_set_multi(struct net_device *netdev)
1422 struct e1000_adapter *adapter = netdev->priv; 1637 struct e1000_adapter *adapter = netdev->priv;
1423 struct e1000_hw *hw = &adapter->hw; 1638 struct e1000_hw *hw = &adapter->hw;
1424 struct dev_mc_list *mc_ptr; 1639 struct dev_mc_list *mc_ptr;
1640 unsigned long flags;
1425 uint32_t rctl; 1641 uint32_t rctl;
1426 uint32_t hash_value; 1642 uint32_t hash_value;
1427 int i; 1643 int i;
1428 unsigned long flags;
1429
1430 /* Check for Promiscuous and All Multicast modes */
1431 1644
1432 spin_lock_irqsave(&adapter->tx_lock, flags); 1645 spin_lock_irqsave(&adapter->tx_lock, flags);
1433 1646
1647 /* Check for Promiscuous and All Multicast modes */
1648
1434 rctl = E1000_READ_REG(hw, RCTL); 1649 rctl = E1000_READ_REG(hw, RCTL);
1435 1650
1436 if(netdev->flags & IFF_PROMISC) { 1651 if(netdev->flags & IFF_PROMISC) {
@@ -1556,6 +1771,11 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1556 uint32_t link; 1771 uint32_t link;
1557 1772
1558 e1000_check_for_link(&adapter->hw); 1773 e1000_check_for_link(&adapter->hw);
1774 if (adapter->hw.mac_type == e1000_82573) {
1775 e1000_enable_tx_pkt_filtering(&adapter->hw);
1776 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
1777 e1000_update_mng_vlan(adapter);
1778 }
1559 1779
1560 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 1780 if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1561 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) 1781 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
@@ -1632,7 +1852,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1632 /* Cause software interrupt to ensure rx ring is cleaned */ 1852 /* Cause software interrupt to ensure rx ring is cleaned */
1633 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); 1853 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
1634 1854
1635 /* Force detection of hung controller every watchdog period*/ 1855 /* Force detection of hung controller every watchdog period */
1636 adapter->detect_tx_hung = TRUE; 1856 adapter->detect_tx_hung = TRUE;
1637 1857
1638 /* Reset the timer */ 1858 /* Reset the timer */
@@ -1642,6 +1862,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1642#define E1000_TX_FLAGS_CSUM 0x00000001 1862#define E1000_TX_FLAGS_CSUM 0x00000001
1643#define E1000_TX_FLAGS_VLAN 0x00000002 1863#define E1000_TX_FLAGS_VLAN 0x00000002
1644#define E1000_TX_FLAGS_TSO 0x00000004 1864#define E1000_TX_FLAGS_TSO 0x00000004
1865#define E1000_TX_FLAGS_IPV4 0x00000008
1645#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 1866#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
1646#define E1000_TX_FLAGS_VLAN_SHIFT 16 1867#define E1000_TX_FLAGS_VLAN_SHIFT 16
1647 1868
@@ -1652,7 +1873,7 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1652 struct e1000_context_desc *context_desc; 1873 struct e1000_context_desc *context_desc;
1653 unsigned int i; 1874 unsigned int i;
1654 uint32_t cmd_length = 0; 1875 uint32_t cmd_length = 0;
1655 uint16_t ipcse, tucse, mss; 1876 uint16_t ipcse = 0, tucse, mss;
1656 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1877 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1657 int err; 1878 int err;
1658 1879
@@ -1665,23 +1886,37 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1665 1886
1666 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 1887 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1667 mss = skb_shinfo(skb)->tso_size; 1888 mss = skb_shinfo(skb)->tso_size;
1668 skb->nh.iph->tot_len = 0; 1889 if(skb->protocol == ntohs(ETH_P_IP)) {
1669 skb->nh.iph->check = 0; 1890 skb->nh.iph->tot_len = 0;
1670 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, 1891 skb->nh.iph->check = 0;
1671 skb->nh.iph->daddr, 1892 skb->h.th->check =
1672 0, 1893 ~csum_tcpudp_magic(skb->nh.iph->saddr,
1673 IPPROTO_TCP, 1894 skb->nh.iph->daddr,
1674 0); 1895 0,
1896 IPPROTO_TCP,
1897 0);
1898 cmd_length = E1000_TXD_CMD_IP;
1899 ipcse = skb->h.raw - skb->data - 1;
1900#ifdef NETIF_F_TSO_IPV6
1901 } else if(skb->protocol == ntohs(ETH_P_IPV6)) {
1902 skb->nh.ipv6h->payload_len = 0;
1903 skb->h.th->check =
1904 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
1905 &skb->nh.ipv6h->daddr,
1906 0,
1907 IPPROTO_TCP,
1908 0);
1909 ipcse = 0;
1910#endif
1911 }
1675 ipcss = skb->nh.raw - skb->data; 1912 ipcss = skb->nh.raw - skb->data;
1676 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 1913 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1677 ipcse = skb->h.raw - skb->data - 1;
1678 tucss = skb->h.raw - skb->data; 1914 tucss = skb->h.raw - skb->data;
1679 tucso = (void *)&(skb->h.th->check) - (void *)skb->data; 1915 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1680 tucse = 0; 1916 tucse = 0;
1681 1917
1682 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 1918 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1683 E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP | 1919 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1684 (skb->len - (hdr_len)));
1685 1920
1686 i = adapter->tx_ring.next_to_use; 1921 i = adapter->tx_ring.next_to_use;
1687 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); 1922 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
@@ -1760,6 +1995,15 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
1760 if(unlikely(mss && !nr_frags && size == len && size > 8)) 1995 if(unlikely(mss && !nr_frags && size == len && size > 8))
1761 size -= 4; 1996 size -= 4;
1762#endif 1997#endif
1998 /* work-around for errata 10 and it applies
1999 * to all controllers in PCI-X mode
2000 * The fix is to make sure that the first descriptor of a
2001 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2002 */
2003 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2004 (size > 2015) && count == 0))
2005 size = 2015;
2006
1763 /* Workaround for potential 82544 hang in PCI-X. Avoid 2007 /* Workaround for potential 82544 hang in PCI-X. Avoid
1764 * terminating buffers within evenly-aligned dwords. */ 2008 * terminating buffers within evenly-aligned dwords. */
1765 if(unlikely(adapter->pcix_82544 && 2009 if(unlikely(adapter->pcix_82544 &&
@@ -1840,7 +2084,10 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
1840 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2084 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
1841 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2085 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
1842 E1000_TXD_CMD_TSE; 2086 E1000_TXD_CMD_TSE;
1843 txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 2087 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2088
2089 if(likely(tx_flags & E1000_TX_FLAGS_IPV4))
2090 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
1844 } 2091 }
1845 2092
1846 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2093 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
@@ -1915,6 +2162,53 @@ no_fifo_stall_required:
1915 return 0; 2162 return 0;
1916} 2163}
1917 2164
2165#define MINIMUM_DHCP_PACKET_SIZE 282
2166static inline int
2167e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2168{
2169 struct e1000_hw *hw = &adapter->hw;
2170 uint16_t length, offset;
2171 if(vlan_tx_tag_present(skb)) {
2172 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2173 ( adapter->hw.mng_cookie.status &
2174 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2175 return 0;
2176 }
2177 if(htons(ETH_P_IP) == skb->protocol) {
2178 const struct iphdr *ip = skb->nh.iph;
2179 if(IPPROTO_UDP == ip->protocol) {
2180 struct udphdr *udp = (struct udphdr *)(skb->h.uh);
2181 if(ntohs(udp->dest) == 67) {
2182 offset = (uint8_t *)udp + 8 - skb->data;
2183 length = skb->len - offset;
2184
2185 return e1000_mng_write_dhcp_info(hw,
2186 (uint8_t *)udp + 8, length);
2187 }
2188 }
2189 } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2190 struct ethhdr *eth = (struct ethhdr *) skb->data;
2191 if((htons(ETH_P_IP) == eth->h_proto)) {
2192 const struct iphdr *ip =
2193 (struct iphdr *)((uint8_t *)skb->data+14);
2194 if(IPPROTO_UDP == ip->protocol) {
2195 struct udphdr *udp =
2196 (struct udphdr *)((uint8_t *)ip +
2197 (ip->ihl << 2));
2198 if(ntohs(udp->dest) == 67) {
2199 offset = (uint8_t *)udp + 8 - skb->data;
2200 length = skb->len - offset;
2201
2202 return e1000_mng_write_dhcp_info(hw,
2203 (uint8_t *)udp + 8,
2204 length);
2205 }
2206 }
2207 }
2208 }
2209 return 0;
2210}
2211
1918#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 2212#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
1919static int 2213static int
1920e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2214e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -1939,7 +2233,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1939 2233
1940#ifdef NETIF_F_TSO 2234#ifdef NETIF_F_TSO
1941 mss = skb_shinfo(skb)->tso_size; 2235 mss = skb_shinfo(skb)->tso_size;
1942 /* The controller does a simple calculation to 2236 /* The controller does a simple calculation to
1943 * make sure there is enough room in the FIFO before 2237 * make sure there is enough room in the FIFO before
1944 * initiating the DMA for each buffer. The calc is: 2238 * initiating the DMA for each buffer. The calc is:
1945 * 4 = ceil(buffer len/mss). To make sure we don't 2239 * 4 = ceil(buffer len/mss). To make sure we don't
@@ -1952,7 +2246,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1952 2246
1953 if((mss) || (skb->ip_summed == CHECKSUM_HW)) 2247 if((mss) || (skb->ip_summed == CHECKSUM_HW))
1954 count++; 2248 count++;
1955 count++; /* for sentinel desc */ 2249 count++;
1956#else 2250#else
1957 if(skb->ip_summed == CHECKSUM_HW) 2251 if(skb->ip_summed == CHECKSUM_HW)
1958 count++; 2252 count++;
@@ -1962,6 +2256,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1962 if(adapter->pcix_82544) 2256 if(adapter->pcix_82544)
1963 count++; 2257 count++;
1964 2258
2259 /* work-around for errata 10 and it applies to all controllers
2260 * in PCI-X mode, so add one more descriptor to the count
2261 */
2262 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2263 (len > 2015)))
2264 count++;
2265
1965 nr_frags = skb_shinfo(skb)->nr_frags; 2266 nr_frags = skb_shinfo(skb)->nr_frags;
1966 for(f = 0; f < nr_frags; f++) 2267 for(f = 0; f < nr_frags; f++)
1967 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 2268 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
@@ -1975,6 +2276,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1975 local_irq_restore(flags); 2276 local_irq_restore(flags);
1976 return NETDEV_TX_LOCKED; 2277 return NETDEV_TX_LOCKED;
1977 } 2278 }
2279 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2280 e1000_transfer_dhcp_info(adapter, skb);
2281
1978 2282
1979 /* need: count + 2 desc gap to keep tail from touching 2283 /* need: count + 2 desc gap to keep tail from touching
1980 * head, otherwise try next time */ 2284 * head, otherwise try next time */
@@ -2011,6 +2315,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2011 else if(likely(e1000_tx_csum(adapter, skb))) 2315 else if(likely(e1000_tx_csum(adapter, skb)))
2012 tx_flags |= E1000_TX_FLAGS_CSUM; 2316 tx_flags |= E1000_TX_FLAGS_CSUM;
2013 2317
2318 /* Old method was to assume IPv4 packet by default if TSO was enabled.
2319 * 82573 hardware supports TSO capabilities for IPv6 as well...
2320 * no longer assume, we must. */
2321 if(likely(skb->protocol == ntohs(ETH_P_IP)))
2322 tx_flags |= E1000_TX_FLAGS_IPV4;
2323
2014 e1000_tx_queue(adapter, 2324 e1000_tx_queue(adapter,
2015 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), 2325 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
2016 tx_flags); 2326 tx_flags);
@@ -2077,7 +2387,6 @@ static int
2077e1000_change_mtu(struct net_device *netdev, int new_mtu) 2387e1000_change_mtu(struct net_device *netdev, int new_mtu)
2078{ 2388{
2079 struct e1000_adapter *adapter = netdev->priv; 2389 struct e1000_adapter *adapter = netdev->priv;
2080 int old_mtu = adapter->rx_buffer_len;
2081 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 2390 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
2082 2391
2083 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 2392 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
@@ -2086,29 +2395,45 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2086 return -EINVAL; 2395 return -EINVAL;
2087 } 2396 }
2088 2397
2089 if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) { 2398#define MAX_STD_JUMBO_FRAME_SIZE 9216
2090 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 2399 /* might want this to be bigger enum check... */
2091 2400 if (adapter->hw.mac_type == e1000_82573 &&
2092 } else if(adapter->hw.mac_type < e1000_82543) { 2401 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2093 DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n"); 2402 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2403 "on 82573\n");
2094 return -EINVAL; 2404 return -EINVAL;
2405 }
2095 2406
2096 } else if(max_frame <= E1000_RXBUFFER_4096) { 2407 if(adapter->hw.mac_type > e1000_82547_rev_2) {
2097 adapter->rx_buffer_len = E1000_RXBUFFER_4096; 2408 adapter->rx_buffer_len = max_frame;
2098 2409 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
2099 } else if(max_frame <= E1000_RXBUFFER_8192) {
2100 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
2101
2102 } else { 2410 } else {
2103 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 2411 if(unlikely((adapter->hw.mac_type < e1000_82543) &&
2412 (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
2413 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2414 "on 82542\n");
2415 return -EINVAL;
2416
2417 } else {
2418 if(max_frame <= E1000_RXBUFFER_2048) {
2419 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2420 } else if(max_frame <= E1000_RXBUFFER_4096) {
2421 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
2422 } else if(max_frame <= E1000_RXBUFFER_8192) {
2423 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
2424 } else if(max_frame <= E1000_RXBUFFER_16384) {
2425 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
2426 }
2427 }
2104 } 2428 }
2105 2429
2106 if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) { 2430 netdev->mtu = new_mtu;
2431
2432 if(netif_running(netdev)) {
2107 e1000_down(adapter); 2433 e1000_down(adapter);
2108 e1000_up(adapter); 2434 e1000_up(adapter);
2109 } 2435 }
2110 2436
2111 netdev->mtu = new_mtu;
2112 adapter->hw.max_frame_size = max_frame; 2437 adapter->hw.max_frame_size = max_frame;
2113 2438
2114 return 0; 2439 return 0;
@@ -2199,6 +2524,17 @@ e1000_update_stats(struct e1000_adapter *adapter)
2199 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 2524 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
2200 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 2525 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
2201 } 2526 }
2527 if(hw->mac_type > e1000_82547_rev_2) {
2528 adapter->stats.iac += E1000_READ_REG(hw, IAC);
2529 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
2530 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
2531 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
2532 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
2533 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
2534 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
2535 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
2536 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
2537 }
2202 2538
2203 /* Fill out the OS statistics structure */ 2539 /* Fill out the OS statistics structure */
2204 2540
@@ -2213,9 +2549,9 @@ e1000_update_stats(struct e1000_adapter *adapter)
2213 2549
2214 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 2550 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2215 adapter->stats.crcerrs + adapter->stats.algnerrc + 2551 adapter->stats.crcerrs + adapter->stats.algnerrc +
2216 adapter->stats.rlec + adapter->stats.rnbc + 2552 adapter->stats.rlec + adapter->stats.mpc +
2217 adapter->stats.mpc + adapter->stats.cexterr; 2553 adapter->stats.cexterr;
2218 adapter->net_stats.rx_dropped = adapter->stats.rnbc; 2554 adapter->net_stats.rx_dropped = adapter->stats.mpc;
2219 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 2555 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2220 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 2556 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2221 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 2557 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
@@ -2300,11 +2636,11 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2300 */ 2636 */
2301 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ 2637 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){
2302 atomic_inc(&adapter->irq_sem); 2638 atomic_inc(&adapter->irq_sem);
2303 E1000_WRITE_REG(&adapter->hw, IMC, ~0); 2639 E1000_WRITE_REG(hw, IMC, ~0);
2304 } 2640 }
2305 2641
2306 for(i = 0; i < E1000_MAX_INTR; i++) 2642 for(i = 0; i < E1000_MAX_INTR; i++)
2307 if(unlikely(!e1000_clean_rx_irq(adapter) & 2643 if(unlikely(!adapter->clean_rx(adapter) &
2308 !e1000_clean_tx_irq(adapter))) 2644 !e1000_clean_tx_irq(adapter)))
2309 break; 2645 break;
2310 2646
@@ -2328,16 +2664,15 @@ e1000_clean(struct net_device *netdev, int *budget)
2328 int work_to_do = min(*budget, netdev->quota); 2664 int work_to_do = min(*budget, netdev->quota);
2329 int tx_cleaned; 2665 int tx_cleaned;
2330 int work_done = 0; 2666 int work_done = 0;
2331 2667
2332 tx_cleaned = e1000_clean_tx_irq(adapter); 2668 tx_cleaned = e1000_clean_tx_irq(adapter);
2333 e1000_clean_rx_irq(adapter, &work_done, work_to_do); 2669 adapter->clean_rx(adapter, &work_done, work_to_do);
2334 2670
2335 *budget -= work_done; 2671 *budget -= work_done;
2336 netdev->quota -= work_done; 2672 netdev->quota -= work_done;
2337 2673
2338 /* if no Tx and not enough Rx work done, exit the polling mode */ 2674 /* If no Tx and no Rx work done, exit the polling mode */
2339 if((!tx_cleaned && (work_done < work_to_do)) || 2675 if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
2340 !netif_running(netdev)) {
2341 netif_rx_complete(netdev); 2676 netif_rx_complete(netdev);
2342 e1000_irq_enable(adapter); 2677 e1000_irq_enable(adapter);
2343 return 0; 2678 return 0;
@@ -2367,11 +2702,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2367 eop_desc = E1000_TX_DESC(*tx_ring, eop); 2702 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2368 2703
2369 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 2704 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2370 /* pre-mature writeback of Tx descriptors */ 2705 /* Premature writeback of Tx descriptors clear (free buffers
2371 /* clear (free buffers and unmap pci_mapping) */ 2706 * and unmap pci_mapping) previous_buffer_info */
2372 /* previous_buffer_info */
2373 if (likely(adapter->previous_buffer_info.skb != NULL)) { 2707 if (likely(adapter->previous_buffer_info.skb != NULL)) {
2374 e1000_unmap_and_free_tx_resource(adapter, 2708 e1000_unmap_and_free_tx_resource(adapter,
2375 &adapter->previous_buffer_info); 2709 &adapter->previous_buffer_info);
2376 } 2710 }
2377 2711
@@ -2380,26 +2714,30 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2380 buffer_info = &tx_ring->buffer_info[i]; 2714 buffer_info = &tx_ring->buffer_info[i];
2381 cleaned = (i == eop); 2715 cleaned = (i == eop);
2382 2716
2383 /* pre-mature writeback of Tx descriptors */ 2717#ifdef NETIF_F_TSO
2384 /* save the cleaning of the this for the */ 2718 if (!(netdev->features & NETIF_F_TSO)) {
2385 /* next iteration */ 2719#endif
2386 if (cleaned) { 2720 e1000_unmap_and_free_tx_resource(adapter,
2387 memcpy(&adapter->previous_buffer_info, 2721 buffer_info);
2388 buffer_info, 2722#ifdef NETIF_F_TSO
2389 sizeof(struct e1000_buffer));
2390 memset(buffer_info,
2391 0,
2392 sizeof(struct e1000_buffer));
2393 } else { 2723 } else {
2394 e1000_unmap_and_free_tx_resource(adapter, 2724 if (cleaned) {
2395 buffer_info); 2725 memcpy(&adapter->previous_buffer_info,
2726 buffer_info,
2727 sizeof(struct e1000_buffer));
2728 memset(buffer_info, 0,
2729 sizeof(struct e1000_buffer));
2730 } else {
2731 e1000_unmap_and_free_tx_resource(
2732 adapter, buffer_info);
2733 }
2396 } 2734 }
2735#endif
2397 2736
2398 tx_desc->buffer_addr = 0; 2737 tx_desc->buffer_addr = 0;
2399 tx_desc->lower.data = 0; 2738 tx_desc->lower.data = 0;
2400 tx_desc->upper.data = 0; 2739 tx_desc->upper.data = 0;
2401 2740
2402 cleaned = (i == eop);
2403 if(unlikely(++i == tx_ring->count)) i = 0; 2741 if(unlikely(++i == tx_ring->count)) i = 0;
2404 } 2742 }
2405 2743
@@ -2416,57 +2754,107 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2416 netif_wake_queue(netdev); 2754 netif_wake_queue(netdev);
2417 2755
2418 spin_unlock(&adapter->tx_lock); 2756 spin_unlock(&adapter->tx_lock);
2419
2420 if(adapter->detect_tx_hung) { 2757 if(adapter->detect_tx_hung) {
2421 /* detect a transmit hang in hardware, this serializes the 2758
2759 /* Detect a transmit hang in hardware, this serializes the
2422 * check with the clearing of time_stamp and movement of i */ 2760 * check with the clearing of time_stamp and movement of i */
2423 adapter->detect_tx_hung = FALSE; 2761 adapter->detect_tx_hung = FALSE;
2424 if(tx_ring->buffer_info[i].dma && 2762 if (tx_ring->buffer_info[i].dma &&
2425 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) && 2763 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
2426 !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF)) 2764 && !(E1000_READ_REG(&adapter->hw, STATUS) &
2765 E1000_STATUS_TXOFF)) {
2766
2767 /* detected Tx unit hang */
2768 i = tx_ring->next_to_clean;
2769 eop = tx_ring->buffer_info[i].next_to_watch;
2770 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2771 DPRINTK(TX_ERR, ERR, "Detected Tx Unit Hang\n"
2772 " TDH <%x>\n"
2773 " TDT <%x>\n"
2774 " next_to_use <%x>\n"
2775 " next_to_clean <%x>\n"
2776 "buffer_info[next_to_clean]\n"
2777 " dma <%llx>\n"
2778 " time_stamp <%lx>\n"
2779 " next_to_watch <%x>\n"
2780 " jiffies <%lx>\n"
2781 " next_to_watch.status <%x>\n",
2782 E1000_READ_REG(&adapter->hw, TDH),
2783 E1000_READ_REG(&adapter->hw, TDT),
2784 tx_ring->next_to_use,
2785 i,
2786 tx_ring->buffer_info[i].dma,
2787 tx_ring->buffer_info[i].time_stamp,
2788 eop,
2789 jiffies,
2790 eop_desc->upper.fields.status);
2427 netif_stop_queue(netdev); 2791 netif_stop_queue(netdev);
2792 }
2428 } 2793 }
2794#ifdef NETIF_F_TSO
2795
2796 if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
2797 time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ)))
2798 e1000_unmap_and_free_tx_resource(
2799 adapter, &adapter->previous_buffer_info);
2429 2800
2801#endif
2430 return cleaned; 2802 return cleaned;
2431} 2803}
2432 2804
2433/** 2805/**
2434 * e1000_rx_checksum - Receive Checksum Offload for 82543 2806 * e1000_rx_checksum - Receive Checksum Offload for 82543
2435 * @adapter: board private structure 2807 * @adapter: board private structure
2436 * @rx_desc: receive descriptor 2808 * @status_err: receive descriptor status and error fields
2437 * @sk_buff: socket buffer with received data 2809 * @csum: receive descriptor csum field
2810 * @sk_buff: socket buffer with received data
2438 **/ 2811 **/
2439 2812
2440static inline void 2813static inline void
2441e1000_rx_checksum(struct e1000_adapter *adapter, 2814e1000_rx_checksum(struct e1000_adapter *adapter,
2442 struct e1000_rx_desc *rx_desc, 2815 uint32_t status_err, uint32_t csum,
2443 struct sk_buff *skb) 2816 struct sk_buff *skb)
2444{ 2817{
2818 uint16_t status = (uint16_t)status_err;
2819 uint8_t errors = (uint8_t)(status_err >> 24);
2820 skb->ip_summed = CHECKSUM_NONE;
2821
2445 /* 82543 or newer only */ 2822 /* 82543 or newer only */
2446 if(unlikely((adapter->hw.mac_type < e1000_82543) || 2823 if(unlikely(adapter->hw.mac_type < e1000_82543)) return;
2447 /* Ignore Checksum bit is set */ 2824 /* Ignore Checksum bit is set */
2448 (rx_desc->status & E1000_RXD_STAT_IXSM) || 2825 if(unlikely(status & E1000_RXD_STAT_IXSM)) return;
2449 /* TCP Checksum has not been calculated */ 2826 /* TCP/UDP checksum error bit is set */
2450 (!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) { 2827 if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
2451 skb->ip_summed = CHECKSUM_NONE;
2452 return;
2453 }
2454
2455 /* At this point we know the hardware did the TCP checksum */
2456 /* now look at the TCP checksum error bit */
2457 if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
2458 /* let the stack verify checksum errors */ 2828 /* let the stack verify checksum errors */
2459 skb->ip_summed = CHECKSUM_NONE;
2460 adapter->hw_csum_err++; 2829 adapter->hw_csum_err++;
2830 return;
2831 }
2832 /* TCP/UDP Checksum has not been calculated */
2833 if(adapter->hw.mac_type <= e1000_82547_rev_2) {
2834 if(!(status & E1000_RXD_STAT_TCPCS))
2835 return;
2461 } else { 2836 } else {
2837 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
2838 return;
2839 }
2840 /* It must be a TCP or UDP packet with a valid checksum */
2841 if (likely(status & E1000_RXD_STAT_TCPCS)) {
2462 /* TCP checksum is good */ 2842 /* TCP checksum is good */
2463 skb->ip_summed = CHECKSUM_UNNECESSARY; 2843 skb->ip_summed = CHECKSUM_UNNECESSARY;
2464 adapter->hw_csum_good++; 2844 } else if (adapter->hw.mac_type > e1000_82547_rev_2) {
2845 /* IP fragment with UDP payload */
2846 /* Hardware complements the payload checksum, so we undo it
2847 * and then put the value in host order for further stack use.
2848 */
2849 csum = ntohl(csum ^ 0xFFFF);
2850 skb->csum = csum;
2851 skb->ip_summed = CHECKSUM_HW;
2465 } 2852 }
2853 adapter->hw_csum_good++;
2466} 2854}
2467 2855
2468/** 2856/**
2469 * e1000_clean_rx_irq - Send received data up the network stack 2857 * e1000_clean_rx_irq - Send received data up the network stack; legacy
2470 * @adapter: board private structure 2858 * @adapter: board private structure
2471 **/ 2859 **/
2472 2860
@@ -2513,7 +2901,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
2513 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { 2901 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
2514 /* All receives must fit into a single buffer */ 2902 /* All receives must fit into a single buffer */
2515 E1000_DBG("%s: Receive packet consumed multiple" 2903 E1000_DBG("%s: Receive packet consumed multiple"
2516 " buffers\n", netdev->name); 2904 " buffers\n", netdev->name);
2517 dev_kfree_skb_irq(skb); 2905 dev_kfree_skb_irq(skb);
2518 goto next_desc; 2906 goto next_desc;
2519 } 2907 }
@@ -2539,15 +2927,17 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
2539 skb_put(skb, length - ETHERNET_FCS_SIZE); 2927 skb_put(skb, length - ETHERNET_FCS_SIZE);
2540 2928
2541 /* Receive Checksum Offload */ 2929 /* Receive Checksum Offload */
2542 e1000_rx_checksum(adapter, rx_desc, skb); 2930 e1000_rx_checksum(adapter,
2543 2931 (uint32_t)(rx_desc->status) |
2932 ((uint32_t)(rx_desc->errors) << 24),
2933 rx_desc->csum, skb);
2544 skb->protocol = eth_type_trans(skb, netdev); 2934 skb->protocol = eth_type_trans(skb, netdev);
2545#ifdef CONFIG_E1000_NAPI 2935#ifdef CONFIG_E1000_NAPI
2546 if(unlikely(adapter->vlgrp && 2936 if(unlikely(adapter->vlgrp &&
2547 (rx_desc->status & E1000_RXD_STAT_VP))) { 2937 (rx_desc->status & E1000_RXD_STAT_VP))) {
2548 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2938 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2549 le16_to_cpu(rx_desc->special) & 2939 le16_to_cpu(rx_desc->special) &
2550 E1000_RXD_SPC_VLAN_MASK); 2940 E1000_RXD_SPC_VLAN_MASK);
2551 } else { 2941 } else {
2552 netif_receive_skb(skb); 2942 netif_receive_skb(skb);
2553 } 2943 }
@@ -2570,16 +2960,142 @@ next_desc:
2570 2960
2571 rx_desc = E1000_RX_DESC(*rx_ring, i); 2961 rx_desc = E1000_RX_DESC(*rx_ring, i);
2572 } 2962 }
2573
2574 rx_ring->next_to_clean = i; 2963 rx_ring->next_to_clean = i;
2964 adapter->alloc_rx_buf(adapter);
2965
2966 return cleaned;
2967}
2968
2969/**
2970 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
2971 * @adapter: board private structure
2972 **/
2973
2974static boolean_t
2975#ifdef CONFIG_E1000_NAPI
2976e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done,
2977 int work_to_do)
2978#else
2979e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
2980#endif
2981{
2982 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2983 union e1000_rx_desc_packet_split *rx_desc;
2984 struct net_device *netdev = adapter->netdev;
2985 struct pci_dev *pdev = adapter->pdev;
2986 struct e1000_buffer *buffer_info;
2987 struct e1000_ps_page *ps_page;
2988 struct e1000_ps_page_dma *ps_page_dma;
2989 struct sk_buff *skb;
2990 unsigned int i, j;
2991 uint32_t length, staterr;
2992 boolean_t cleaned = FALSE;
2993
2994 i = rx_ring->next_to_clean;
2995 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
2996 staterr = rx_desc->wb.middle.status_error;
2997
2998 while(staterr & E1000_RXD_STAT_DD) {
2999 buffer_info = &rx_ring->buffer_info[i];
3000 ps_page = &rx_ring->ps_page[i];
3001 ps_page_dma = &rx_ring->ps_page_dma[i];
3002#ifdef CONFIG_E1000_NAPI
3003 if(unlikely(*work_done >= work_to_do))
3004 break;
3005 (*work_done)++;
3006#endif
3007 cleaned = TRUE;
3008 pci_unmap_single(pdev, buffer_info->dma,
3009 buffer_info->length,
3010 PCI_DMA_FROMDEVICE);
3011
3012 skb = buffer_info->skb;
3013
3014 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3015 E1000_DBG("%s: Packet Split buffers didn't pick up"
3016 " the full packet\n", netdev->name);
3017 dev_kfree_skb_irq(skb);
3018 goto next_desc;
3019 }
3020
3021 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3022 dev_kfree_skb_irq(skb);
3023 goto next_desc;
3024 }
3025
3026 length = le16_to_cpu(rx_desc->wb.middle.length0);
3027
3028 if(unlikely(!length)) {
3029 E1000_DBG("%s: Last part of the packet spanning"
3030 " multiple descriptors\n", netdev->name);
3031 dev_kfree_skb_irq(skb);
3032 goto next_desc;
3033 }
3034
3035 /* Good Receive */
3036 skb_put(skb, length);
3037
3038 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3039 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3040 break;
3041
3042 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
3043 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3044 ps_page_dma->ps_page_dma[j] = 0;
3045 skb_shinfo(skb)->frags[j].page =
3046 ps_page->ps_page[j];
3047 ps_page->ps_page[j] = NULL;
3048 skb_shinfo(skb)->frags[j].page_offset = 0;
3049 skb_shinfo(skb)->frags[j].size = length;
3050 skb_shinfo(skb)->nr_frags++;
3051 skb->len += length;
3052 skb->data_len += length;
3053 }
2575 3054
2576 e1000_alloc_rx_buffers(adapter); 3055 e1000_rx_checksum(adapter, staterr,
3056 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3057 skb->protocol = eth_type_trans(skb, netdev);
3058
3059#ifdef HAVE_RX_ZERO_COPY
3060 if(likely(rx_desc->wb.upper.header_status &
3061 E1000_RXDPS_HDRSTAT_HDRSP))
3062 skb_shinfo(skb)->zero_copy = TRUE;
3063#endif
3064#ifdef CONFIG_E1000_NAPI
3065 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3066 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3067 le16_to_cpu(rx_desc->wb.middle.vlan &
3068 E1000_RXD_SPC_VLAN_MASK));
3069 } else {
3070 netif_receive_skb(skb);
3071 }
3072#else /* CONFIG_E1000_NAPI */
3073 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3074 vlan_hwaccel_rx(skb, adapter->vlgrp,
3075 le16_to_cpu(rx_desc->wb.middle.vlan &
3076 E1000_RXD_SPC_VLAN_MASK));
3077 } else {
3078 netif_rx(skb);
3079 }
3080#endif /* CONFIG_E1000_NAPI */
3081 netdev->last_rx = jiffies;
3082
3083next_desc:
3084 rx_desc->wb.middle.status_error &= ~0xFF;
3085 buffer_info->skb = NULL;
3086 if(unlikely(++i == rx_ring->count)) i = 0;
3087
3088 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3089 staterr = rx_desc->wb.middle.status_error;
3090 }
3091 rx_ring->next_to_clean = i;
3092 adapter->alloc_rx_buf(adapter);
2577 3093
2578 return cleaned; 3094 return cleaned;
2579} 3095}
2580 3096
2581/** 3097/**
2582 * e1000_alloc_rx_buffers - Replace used receive buffers 3098 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
2583 * @adapter: address of board private structure 3099 * @adapter: address of board private structure
2584 **/ 3100 **/
2585 3101
@@ -2592,43 +3108,43 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2592 struct e1000_rx_desc *rx_desc; 3108 struct e1000_rx_desc *rx_desc;
2593 struct e1000_buffer *buffer_info; 3109 struct e1000_buffer *buffer_info;
2594 struct sk_buff *skb; 3110 struct sk_buff *skb;
2595 unsigned int i, bufsz; 3111 unsigned int i;
3112 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
2596 3113
2597 i = rx_ring->next_to_use; 3114 i = rx_ring->next_to_use;
2598 buffer_info = &rx_ring->buffer_info[i]; 3115 buffer_info = &rx_ring->buffer_info[i];
2599 3116
2600 while(!buffer_info->skb) { 3117 while(!buffer_info->skb) {
2601 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
2602
2603 skb = dev_alloc_skb(bufsz); 3118 skb = dev_alloc_skb(bufsz);
3119
2604 if(unlikely(!skb)) { 3120 if(unlikely(!skb)) {
2605 /* Better luck next round */ 3121 /* Better luck next round */
2606 break; 3122 break;
2607 } 3123 }
2608 3124
2609 /* fix for errata 23, cant cross 64kB boundary */ 3125 /* Fix for errata 23, can't cross 64kB boundary */
2610 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3126 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
2611 struct sk_buff *oldskb = skb; 3127 struct sk_buff *oldskb = skb;
2612 DPRINTK(RX_ERR,ERR, 3128 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
2613 "skb align check failed: %u bytes at %p\n", 3129 "at %p\n", bufsz, skb->data);
2614 bufsz, skb->data); 3130 /* Try again, without freeing the previous */
2615 /* try again, without freeing the previous */
2616 skb = dev_alloc_skb(bufsz); 3131 skb = dev_alloc_skb(bufsz);
3132 /* Failed allocation, critical failure */
2617 if (!skb) { 3133 if (!skb) {
2618 dev_kfree_skb(oldskb); 3134 dev_kfree_skb(oldskb);
2619 break; 3135 break;
2620 } 3136 }
3137
2621 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3138 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
2622 /* give up */ 3139 /* give up */
2623 dev_kfree_skb(skb); 3140 dev_kfree_skb(skb);
2624 dev_kfree_skb(oldskb); 3141 dev_kfree_skb(oldskb);
2625 break; /* while !buffer_info->skb */ 3142 break; /* while !buffer_info->skb */
2626 } else { 3143 } else {
2627 /* move on with the new one */ 3144 /* Use new allocation */
2628 dev_kfree_skb(oldskb); 3145 dev_kfree_skb(oldskb);
2629 } 3146 }
2630 } 3147 }
2631
2632 /* Make buffer alignment 2 beyond a 16 byte boundary 3148 /* Make buffer alignment 2 beyond a 16 byte boundary
2633 * this will result in a 16 byte aligned IP header after 3149 * this will result in a 16 byte aligned IP header after
2634 * the 14 byte MAC header is removed 3150 * the 14 byte MAC header is removed
@@ -2644,25 +3160,23 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2644 adapter->rx_buffer_len, 3160 adapter->rx_buffer_len,
2645 PCI_DMA_FROMDEVICE); 3161 PCI_DMA_FROMDEVICE);
2646 3162
2647 /* fix for errata 23, cant cross 64kB boundary */ 3163 /* Fix for errata 23, can't cross 64kB boundary */
2648 if(!e1000_check_64k_bound(adapter, 3164 if (!e1000_check_64k_bound(adapter,
2649 (void *)(unsigned long)buffer_info->dma, 3165 (void *)(unsigned long)buffer_info->dma,
2650 adapter->rx_buffer_len)) { 3166 adapter->rx_buffer_len)) {
2651 DPRINTK(RX_ERR,ERR, 3167 DPRINTK(RX_ERR, ERR,
2652 "dma align check failed: %u bytes at %ld\n", 3168 "dma align check failed: %u bytes at %p\n",
2653 adapter->rx_buffer_len, (unsigned long)buffer_info->dma); 3169 adapter->rx_buffer_len,
2654 3170 (void *)(unsigned long)buffer_info->dma);
2655 dev_kfree_skb(skb); 3171 dev_kfree_skb(skb);
2656 buffer_info->skb = NULL; 3172 buffer_info->skb = NULL;
2657 3173
2658 pci_unmap_single(pdev, 3174 pci_unmap_single(pdev, buffer_info->dma,
2659 buffer_info->dma,
2660 adapter->rx_buffer_len, 3175 adapter->rx_buffer_len,
2661 PCI_DMA_FROMDEVICE); 3176 PCI_DMA_FROMDEVICE);
2662 3177
2663 break; /* while !buffer_info->skb */ 3178 break; /* while !buffer_info->skb */
2664 } 3179 }
2665
2666 rx_desc = E1000_RX_DESC(*rx_ring, i); 3180 rx_desc = E1000_RX_DESC(*rx_ring, i);
2667 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3181 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2668 3182
@@ -2672,7 +3186,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2672 * applicable for weak-ordered memory model archs, 3186 * applicable for weak-ordered memory model archs,
2673 * such as IA-64). */ 3187 * such as IA-64). */
2674 wmb(); 3188 wmb();
2675
2676 E1000_WRITE_REG(&adapter->hw, RDT, i); 3189 E1000_WRITE_REG(&adapter->hw, RDT, i);
2677 } 3190 }
2678 3191
@@ -2684,6 +3197,95 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2684} 3197}
2685 3198
2686/** 3199/**
3200 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
3201 * @adapter: address of board private structure
3202 **/
3203
3204static void
3205e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
3206{
3207 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3208 struct net_device *netdev = adapter->netdev;
3209 struct pci_dev *pdev = adapter->pdev;
3210 union e1000_rx_desc_packet_split *rx_desc;
3211 struct e1000_buffer *buffer_info;
3212 struct e1000_ps_page *ps_page;
3213 struct e1000_ps_page_dma *ps_page_dma;
3214 struct sk_buff *skb;
3215 unsigned int i, j;
3216
3217 i = rx_ring->next_to_use;
3218 buffer_info = &rx_ring->buffer_info[i];
3219 ps_page = &rx_ring->ps_page[i];
3220 ps_page_dma = &rx_ring->ps_page_dma[i];
3221
3222 while(!buffer_info->skb) {
3223 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3224
3225 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3226 if(unlikely(!ps_page->ps_page[j])) {
3227 ps_page->ps_page[j] =
3228 alloc_page(GFP_ATOMIC);
3229 if(unlikely(!ps_page->ps_page[j]))
3230 goto no_buffers;
3231 ps_page_dma->ps_page_dma[j] =
3232 pci_map_page(pdev,
3233 ps_page->ps_page[j],
3234 0, PAGE_SIZE,
3235 PCI_DMA_FROMDEVICE);
3236 }
3237 /* Refresh the desc even if buffer_addrs didn't
3238 * change because each write-back erases this info.
3239 */
3240 rx_desc->read.buffer_addr[j+1] =
3241 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
3242 }
3243
3244 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
3245
3246 if(unlikely(!skb))
3247 break;
3248
3249 /* Make buffer alignment 2 beyond a 16 byte boundary
3250 * this will result in a 16 byte aligned IP header after
3251 * the 14 byte MAC header is removed
3252 */
3253 skb_reserve(skb, NET_IP_ALIGN);
3254
3255 skb->dev = netdev;
3256
3257 buffer_info->skb = skb;
3258 buffer_info->length = adapter->rx_ps_bsize0;
3259 buffer_info->dma = pci_map_single(pdev, skb->data,
3260 adapter->rx_ps_bsize0,
3261 PCI_DMA_FROMDEVICE);
3262
3263 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
3264
3265 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
3266 /* Force memory writes to complete before letting h/w
3267 * know there are new descriptors to fetch. (Only
3268 * applicable for weak-ordered memory model archs,
3269 * such as IA-64). */
3270 wmb();
3271 /* Hardware increments by 16 bytes, but packet split
3272 * descriptors are 32 bytes...so we increment tail
3273 * twice as much.
3274 */
3275 E1000_WRITE_REG(&adapter->hw, RDT, i<<1);
3276 }
3277
3278 if(unlikely(++i == rx_ring->count)) i = 0;
3279 buffer_info = &rx_ring->buffer_info[i];
3280 ps_page = &rx_ring->ps_page[i];
3281 ps_page_dma = &rx_ring->ps_page_dma[i];
3282 }
3283
3284no_buffers:
3285 rx_ring->next_to_use = i;
3286}
3287
3288/**
2687 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 3289 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
2688 * @adapter: 3290 * @adapter:
2689 **/ 3291 **/
@@ -2856,9 +3458,10 @@ void
2856e1000_pci_set_mwi(struct e1000_hw *hw) 3458e1000_pci_set_mwi(struct e1000_hw *hw)
2857{ 3459{
2858 struct e1000_adapter *adapter = hw->back; 3460 struct e1000_adapter *adapter = hw->back;
3461 int ret_val = pci_set_mwi(adapter->pdev);
2859 3462
2860 int ret; 3463 if(ret_val)
2861 ret = pci_set_mwi(adapter->pdev); 3464 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
2862} 3465}
2863 3466
2864void 3467void
@@ -2917,6 +3520,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2917 rctl |= E1000_RCTL_VFE; 3520 rctl |= E1000_RCTL_VFE;
2918 rctl &= ~E1000_RCTL_CFIEN; 3521 rctl &= ~E1000_RCTL_CFIEN;
2919 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 3522 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3523 e1000_update_mng_vlan(adapter);
2920 } else { 3524 } else {
2921 /* disable VLAN tag insert/strip */ 3525 /* disable VLAN tag insert/strip */
2922 ctrl = E1000_READ_REG(&adapter->hw, CTRL); 3526 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
@@ -2927,6 +3531,10 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2927 rctl = E1000_READ_REG(&adapter->hw, RCTL); 3531 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2928 rctl &= ~E1000_RCTL_VFE; 3532 rctl &= ~E1000_RCTL_VFE;
2929 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 3533 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3534 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
3535 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3536 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3537 }
2930 } 3538 }
2931 3539
2932 e1000_irq_enable(adapter); 3540 e1000_irq_enable(adapter);
@@ -2937,7 +3545,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2937{ 3545{
2938 struct e1000_adapter *adapter = netdev->priv; 3546 struct e1000_adapter *adapter = netdev->priv;
2939 uint32_t vfta, index; 3547 uint32_t vfta, index;
2940 3548 if((adapter->hw.mng_cookie.status &
3549 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
3550 (vid == adapter->mng_vlan_id))
3551 return;
2941 /* add VID to filter table */ 3552 /* add VID to filter table */
2942 index = (vid >> 5) & 0x7F; 3553 index = (vid >> 5) & 0x7F;
2943 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); 3554 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -2958,6 +3569,10 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2958 3569
2959 e1000_irq_enable(adapter); 3570 e1000_irq_enable(adapter);
2960 3571
3572 if((adapter->hw.mng_cookie.status &
3573 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
3574 (vid == adapter->mng_vlan_id))
3575 return;
2961 /* remove VID from filter table */ 3576 /* remove VID from filter table */
2962 index = (vid >> 5) & 0x7F; 3577 index = (vid >> 5) & 0x7F;
2963 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); 3578 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -3004,8 +3619,7 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
3004 break; 3619 break;
3005 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 3620 case SPEED_1000 + DUPLEX_HALF: /* not supported */
3006 default: 3621 default:
3007 DPRINTK(PROBE, ERR, 3622 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
3008 "Unsupported Speed/Duplexity configuration\n");
3009 return -EINVAL; 3623 return -EINVAL;
3010 } 3624 }
3011 return 0; 3625 return 0;
@@ -3033,7 +3647,7 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
3033{ 3647{
3034 struct net_device *netdev = pci_get_drvdata(pdev); 3648 struct net_device *netdev = pci_get_drvdata(pdev);
3035 struct e1000_adapter *adapter = netdev->priv; 3649 struct e1000_adapter *adapter = netdev->priv;
3036 uint32_t ctrl, ctrl_ext, rctl, manc, status; 3650 uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
3037 uint32_t wufc = adapter->wol; 3651 uint32_t wufc = adapter->wol;
3038 3652
3039 netif_device_detach(netdev); 3653 netif_device_detach(netdev);
@@ -3075,6 +3689,9 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
3075 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); 3689 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
3076 } 3690 }
3077 3691
3692 /* Allow time for pending master requests to run */
3693 e1000_disable_pciex_master(&adapter->hw);
3694
3078 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 3695 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
3079 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 3696 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
3080 pci_enable_wake(pdev, 3, 1); 3697 pci_enable_wake(pdev, 3, 1);
@@ -3099,6 +3716,16 @@ e1000_suspend(struct pci_dev *pdev, uint32_t state)
3099 } 3716 }
3100 } 3717 }
3101 3718
3719 switch(adapter->hw.mac_type) {
3720 case e1000_82573:
3721 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3722 E1000_WRITE_REG(&adapter->hw, SWSM,
3723 swsm & ~E1000_SWSM_DRV_LOAD);
3724 break;
3725 default:
3726 break;
3727 }
3728
3102 pci_disable_device(pdev); 3729 pci_disable_device(pdev);
3103 3730
3104 state = (state > 0) ? 3 : 0; 3731 state = (state > 0) ? 3 : 0;
@@ -3113,13 +3740,12 @@ e1000_resume(struct pci_dev *pdev)
3113{ 3740{
3114 struct net_device *netdev = pci_get_drvdata(pdev); 3741 struct net_device *netdev = pci_get_drvdata(pdev);
3115 struct e1000_adapter *adapter = netdev->priv; 3742 struct e1000_adapter *adapter = netdev->priv;
3116 uint32_t manc, ret; 3743 uint32_t manc, ret, swsm;
3117 3744
3118 pci_set_power_state(pdev, 0); 3745 pci_set_power_state(pdev, 0);
3119 pci_restore_state(pdev); 3746 pci_restore_state(pdev);
3120 ret = pci_enable_device(pdev); 3747 ret = pci_enable_device(pdev);
3121 if (pdev->is_busmaster) 3748 pci_set_master(pdev);
3122 pci_set_master(pdev);
3123 3749
3124 pci_enable_wake(pdev, 3, 0); 3750 pci_enable_wake(pdev, 3, 0);
3125 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ 3751 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
@@ -3139,10 +3765,19 @@ e1000_resume(struct pci_dev *pdev)
3139 E1000_WRITE_REG(&adapter->hw, MANC, manc); 3765 E1000_WRITE_REG(&adapter->hw, MANC, manc);
3140 } 3766 }
3141 3767
3768 switch(adapter->hw.mac_type) {
3769 case e1000_82573:
3770 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3771 E1000_WRITE_REG(&adapter->hw, SWSM,
3772 swsm | E1000_SWSM_DRV_LOAD);
3773 break;
3774 default:
3775 break;
3776 }
3777
3142 return 0; 3778 return 0;
3143} 3779}
3144#endif 3780#endif
3145
3146#ifdef CONFIG_NET_POLL_CONTROLLER 3781#ifdef CONFIG_NET_POLL_CONTROLLER
3147/* 3782/*
3148 * Polling 'interrupt' - used by things like netconsole to send skbs 3783 * Polling 'interrupt' - used by things like netconsole to send skbs
@@ -3150,7 +3785,7 @@ e1000_resume(struct pci_dev *pdev)
3150 * the interrupt routine is executing. 3785 * the interrupt routine is executing.
3151 */ 3786 */
3152static void 3787static void
3153e1000_netpoll (struct net_device *netdev) 3788e1000_netpoll(struct net_device *netdev)
3154{ 3789{
3155 struct e1000_adapter *adapter = netdev->priv; 3790 struct e1000_adapter *adapter = netdev->priv;
3156 disable_irq(adapter->pdev->irq); 3791 disable_irq(adapter->pdev->irq);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 970c656a517c..aac64de61437 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -42,7 +42,12 @@
42#include <linux/sched.h> 42#include <linux/sched.h>
43 43
44#ifndef msec_delay 44#ifndef msec_delay
45#define msec_delay(x) msleep(x) 45#define msec_delay(x) do { if(in_interrupt()) { \
46 /* Don't mdelay in interrupt context! */ \
47 BUG(); \
48 } else { \
49 msleep(x); \
50 } } while(0)
46 51
47/* Some workarounds require millisecond delays and are run during interrupt 52/* Some workarounds require millisecond delays and are run during interrupt
48 * context. Most notably, when establishing link, the phy may need tweaking 53 * context. Most notably, when establishing link, the phy may need tweaking
@@ -96,6 +101,29 @@ typedef enum {
96 (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 101 (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
97 ((offset) << 2))) 102 ((offset) << 2)))
98 103
104#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
105#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
106
107#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
108 writew((value), ((a)->hw_addr + \
109 (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
110 ((offset) << 1))))
111
112#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
113 readw((a)->hw_addr + \
114 (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
115 ((offset) << 1)))
116
117#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
118 writeb((value), ((a)->hw_addr + \
119 (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
120 (offset))))
121
122#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
123 readb((a)->hw_addr + \
124 (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
125 (offset)))
126
99#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) 127#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
100 128
101#endif /* _E1000_OSDEP_H_ */ 129#endif /* _E1000_OSDEP_H_ */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index e914d09fe6f9..676247f9f1cc 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -478,7 +478,6 @@ e1000_check_options(struct e1000_adapter *adapter)
478 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 478 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
479 opt.name); 479 opt.name);
480 break; 480 break;
481 case -1:
482 default: 481 default:
483 e1000_validate_option(&adapter->itr, &opt, 482 e1000_validate_option(&adapter->itr, &opt,
484 adapter); 483 adapter);
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index ab44358ddbfc..6482d994d489 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1595,7 +1595,7 @@ static struct ethtool_ops emac_ethtool_ops = {
1595static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1595static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1596{ 1596{
1597 struct ocp_enet_private *fep = dev->priv; 1597 struct ocp_enet_private *fep = dev->priv;
1598 uint *data = (uint *) & rq->ifr_ifru; 1598 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1599 1599
1600 switch (cmd) { 1600 switch (cmd) {
1601 case SIOCGMIIPHY: 1601 case SIOCGMIIPHY:
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 26c4f15f7fc0..f8d3385c7842 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -110,7 +110,7 @@ struct ixgb_adapter;
110#define IXGB_TX_QUEUE_WAKE 16 110#define IXGB_TX_QUEUE_WAKE 16
111 111
112/* How many Rx Buffers do we bundle into one write to the hardware ? */ 112/* How many Rx Buffers do we bundle into one write to the hardware ? */
113#define IXGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 113#define IXGB_RX_BUFFER_WRITE 4 /* Must be power of 2 */
114 114
115/* only works for sizes that are powers of 2 */ 115/* only works for sizes that are powers of 2 */
116#define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) 116#define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 653e99f919ce..3aae110c5560 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -411,7 +411,7 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
411 ixgb_cleanup_eeprom(hw); 411 ixgb_cleanup_eeprom(hw);
412 412
413 /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ 413 /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
414 ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR; 414 ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR);
415 415
416 return; 416 return;
417} 417}
@@ -483,7 +483,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
483 DEBUGOUT("ixgb_ee: Checksum invalid.\n"); 483 DEBUGOUT("ixgb_ee: Checksum invalid.\n");
484 /* clear the init_ctrl_reg_1 to signify that the cache is 484 /* clear the init_ctrl_reg_1 to signify that the cache is
485 * invalidated */ 485 * invalidated */
486 ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR; 486 ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR);
487 return (FALSE); 487 return (FALSE);
488 } 488 }
489 489
@@ -579,7 +579,7 @@ ixgb_get_ee_compatibility(struct ixgb_hw *hw)
579 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 579 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
580 580
581 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 581 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
582 return(ee_map->compatibility); 582 return (le16_to_cpu(ee_map->compatibility));
583 583
584 return(0); 584 return(0);
585} 585}
@@ -616,7 +616,7 @@ ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
616 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 616 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
617 617
618 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 618 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
619 return(ee_map->init_ctrl_reg_1); 619 return (le16_to_cpu(ee_map->init_ctrl_reg_1));
620 620
621 return(0); 621 return(0);
622} 622}
@@ -635,7 +635,7 @@ ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
635 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 635 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
636 636
637 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 637 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
638 return(ee_map->init_ctrl_reg_2); 638 return (le16_to_cpu(ee_map->init_ctrl_reg_2));
639 639
640 return(0); 640 return(0);
641} 641}
@@ -654,7 +654,7 @@ ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
654 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 654 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
655 655
656 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 656 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
657 return(ee_map->subsystem_id); 657 return (le16_to_cpu(ee_map->subsystem_id));
658 658
659 return(0); 659 return(0);
660} 660}
@@ -673,7 +673,7 @@ ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
673 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 673 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
674 674
675 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 675 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
676 return(ee_map->subvendor_id); 676 return (le16_to_cpu(ee_map->subvendor_id));
677 677
678 return(0); 678 return(0);
679} 679}
@@ -692,7 +692,7 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
692 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 692 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
693 693
694 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 694 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
695 return(ee_map->device_id); 695 return (le16_to_cpu(ee_map->device_id));
696 696
697 return(0); 697 return(0);
698} 698}
@@ -711,7 +711,7 @@ ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
711 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 711 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
712 712
713 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 713 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
714 return(ee_map->vendor_id); 714 return (le16_to_cpu(ee_map->vendor_id));
715 715
716 return(0); 716 return(0);
717} 717}
@@ -730,7 +730,7 @@ ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
730 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 730 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
731 731
732 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 732 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
733 return(ee_map->swdpins_reg); 733 return (le16_to_cpu(ee_map->swdpins_reg));
734 734
735 return(0); 735 return(0);
736} 736}
@@ -749,7 +749,7 @@ ixgb_get_ee_d3_power(struct ixgb_hw *hw)
749 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 749 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
750 750
751 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 751 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
752 return(ee_map->d3_power); 752 return (le16_to_cpu(ee_map->d3_power));
753 753
754 return(0); 754 return(0);
755} 755}
@@ -768,7 +768,7 @@ ixgb_get_ee_d0_power(struct ixgb_hw *hw)
768 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 768 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
769 769
770 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 770 if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
771 return(ee_map->d0_power); 771 return (le16_to_cpu(ee_map->d0_power));
772 772
773 return(0); 773 return(0);
774} 774}
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index aea10e8aaa72..3fa113854eeb 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -252,7 +252,9 @@ ixgb_get_regs(struct net_device *netdev,
252 uint32_t *reg_start = reg; 252 uint32_t *reg_start = reg;
253 uint8_t i; 253 uint8_t i;
254 254
255 regs->version = (adapter->hw.device_id << 16) | adapter->hw.subsystem_id; 255 /* the 1 (one) below indicates an attempt at versioning, if the
256 * interface in ethtool or the driver this 1 should be incremented */
257 regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
256 258
257 /* General Registers */ 259 /* General Registers */
258 *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */ 260 *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 7d26623d8592..35f6a7c271a2 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -47,7 +47,7 @@ char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
47#else 47#else
48#define DRIVERNAPI "-NAPI" 48#define DRIVERNAPI "-NAPI"
49#endif 49#endif
50char ixgb_driver_version[] = "1.0.90-k2"DRIVERNAPI; 50char ixgb_driver_version[] = "1.0.95-k2"DRIVERNAPI;
51char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 51char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
52 52
53/* ixgb_pci_tbl - PCI Device ID Table 53/* ixgb_pci_tbl - PCI Device ID Table
@@ -103,6 +103,7 @@ static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
103static int ixgb_set_mac(struct net_device *netdev, void *p); 103static int ixgb_set_mac(struct net_device *netdev, void *p);
104static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs); 104static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
105static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); 105static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
106
106#ifdef CONFIG_IXGB_NAPI 107#ifdef CONFIG_IXGB_NAPI
107static int ixgb_clean(struct net_device *netdev, int *budget); 108static int ixgb_clean(struct net_device *netdev, int *budget);
108static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, 109static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
@@ -120,33 +121,20 @@ static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
120static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 121static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
121static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 122static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
122 123
123static int ixgb_notify_reboot(struct notifier_block *, unsigned long event,
124 void *ptr);
125static int ixgb_suspend(struct pci_dev *pdev, uint32_t state);
126
127#ifdef CONFIG_NET_POLL_CONTROLLER 124#ifdef CONFIG_NET_POLL_CONTROLLER
128/* for netdump / net console */ 125/* for netdump / net console */
129static void ixgb_netpoll(struct net_device *dev); 126static void ixgb_netpoll(struct net_device *dev);
130#endif 127#endif
131 128
132struct notifier_block ixgb_notifier_reboot = {
133 .notifier_call = ixgb_notify_reboot,
134 .next = NULL,
135 .priority = 0
136};
137
138/* Exported from other modules */ 129/* Exported from other modules */
139 130
140extern void ixgb_check_options(struct ixgb_adapter *adapter); 131extern void ixgb_check_options(struct ixgb_adapter *adapter);
141 132
142static struct pci_driver ixgb_driver = { 133static struct pci_driver ixgb_driver = {
143 .name = ixgb_driver_name, 134 .name = ixgb_driver_name,
144 .id_table = ixgb_pci_tbl, 135 .id_table = ixgb_pci_tbl,
145 .probe = ixgb_probe, 136 .probe = ixgb_probe,
146 .remove = __devexit_p(ixgb_remove), 137 .remove = __devexit_p(ixgb_remove),
147 /* Power Managment Hooks */
148 .suspend = NULL,
149 .resume = NULL
150}; 138};
151 139
152MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 140MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -169,17 +157,12 @@ MODULE_LICENSE("GPL");
169static int __init 157static int __init
170ixgb_init_module(void) 158ixgb_init_module(void)
171{ 159{
172 int ret;
173 printk(KERN_INFO "%s - version %s\n", 160 printk(KERN_INFO "%s - version %s\n",
174 ixgb_driver_string, ixgb_driver_version); 161 ixgb_driver_string, ixgb_driver_version);
175 162
176 printk(KERN_INFO "%s\n", ixgb_copyright); 163 printk(KERN_INFO "%s\n", ixgb_copyright);
177 164
178 ret = pci_module_init(&ixgb_driver); 165 return pci_module_init(&ixgb_driver);
179 if(ret >= 0) {
180 register_reboot_notifier(&ixgb_notifier_reboot);
181 }
182 return ret;
183} 166}
184 167
185module_init(ixgb_init_module); 168module_init(ixgb_init_module);
@@ -194,7 +177,6 @@ module_init(ixgb_init_module);
194static void __exit 177static void __exit
195ixgb_exit_module(void) 178ixgb_exit_module(void)
196{ 179{
197 unregister_reboot_notifier(&ixgb_notifier_reboot);
198 pci_unregister_driver(&ixgb_driver); 180 pci_unregister_driver(&ixgb_driver);
199} 181}
200 182
@@ -224,8 +206,8 @@ ixgb_irq_enable(struct ixgb_adapter *adapter)
224{ 206{
225 if(atomic_dec_and_test(&adapter->irq_sem)) { 207 if(atomic_dec_and_test(&adapter->irq_sem)) {
226 IXGB_WRITE_REG(&adapter->hw, IMS, 208 IXGB_WRITE_REG(&adapter->hw, IMS,
227 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW | 209 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
228 IXGB_INT_RXO | IXGB_INT_LSC); 210 IXGB_INT_LSC);
229 IXGB_WRITE_FLUSH(&adapter->hw); 211 IXGB_WRITE_FLUSH(&adapter->hw);
230 } 212 }
231} 213}
@@ -1209,10 +1191,10 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1209 | IXGB_CONTEXT_DESC_CMD_TSE 1191 | IXGB_CONTEXT_DESC_CMD_TSE
1210 | IXGB_CONTEXT_DESC_CMD_IP 1192 | IXGB_CONTEXT_DESC_CMD_IP
1211 | IXGB_CONTEXT_DESC_CMD_TCP 1193 | IXGB_CONTEXT_DESC_CMD_TCP
1212 | IXGB_CONTEXT_DESC_CMD_RS
1213 | IXGB_CONTEXT_DESC_CMD_IDE 1194 | IXGB_CONTEXT_DESC_CMD_IDE
1214 | (skb->len - (hdr_len))); 1195 | (skb->len - (hdr_len)));
1215 1196
1197
1216 if(++i == adapter->tx_ring.count) i = 0; 1198 if(++i == adapter->tx_ring.count) i = 0;
1217 adapter->tx_ring.next_to_use = i; 1199 adapter->tx_ring.next_to_use = i;
1218 1200
@@ -1247,8 +1229,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1247 context_desc->mss = 0; 1229 context_desc->mss = 0;
1248 context_desc->cmd_type_len = 1230 context_desc->cmd_type_len =
1249 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE 1231 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1250 | IXGB_TX_DESC_CMD_RS 1232 | IXGB_TX_DESC_CMD_IDE);
1251 | IXGB_TX_DESC_CMD_IDE);
1252 1233
1253 if(++i == adapter->tx_ring.count) i = 0; 1234 if(++i == adapter->tx_ring.count) i = 0;
1254 adapter->tx_ring.next_to_use = i; 1235 adapter->tx_ring.next_to_use = i;
@@ -1273,6 +1254,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1273 1254
1274 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1255 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1275 unsigned int f; 1256 unsigned int f;
1257
1276 len -= skb->data_len; 1258 len -= skb->data_len;
1277 1259
1278 i = tx_ring->next_to_use; 1260 i = tx_ring->next_to_use;
@@ -1526,14 +1508,33 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1526void 1508void
1527ixgb_update_stats(struct ixgb_adapter *adapter) 1509ixgb_update_stats(struct ixgb_adapter *adapter)
1528{ 1510{
1511 struct net_device *netdev = adapter->netdev;
1512
1513 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1514 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1515 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1516 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1517 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1518 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1519
1520 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1521 /* fix up multicast stats by removing broadcasts */
1522 multi -= bcast;
1523
1524 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1525 adapter->stats.mprch += (multi >> 32);
1526 adapter->stats.bprcl += bcast_l;
1527 adapter->stats.bprch += bcast_h;
1528 } else {
1529 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1530 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1531 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1532 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1533 }
1529 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); 1534 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1530 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); 1535 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1531 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); 1536 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1532 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); 1537 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1533 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1534 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1535 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1536 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1537 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); 1538 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1538 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); 1539 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1539 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); 1540 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
@@ -1823,7 +1824,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1823 struct pci_dev *pdev = adapter->pdev; 1824 struct pci_dev *pdev = adapter->pdev;
1824 struct ixgb_rx_desc *rx_desc, *next_rxd; 1825 struct ixgb_rx_desc *rx_desc, *next_rxd;
1825 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; 1826 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1826 struct sk_buff *skb, *next_skb;
1827 uint32_t length; 1827 uint32_t length;
1828 unsigned int i, j; 1828 unsigned int i, j;
1829 boolean_t cleaned = FALSE; 1829 boolean_t cleaned = FALSE;
@@ -1833,6 +1833,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1833 buffer_info = &rx_ring->buffer_info[i]; 1833 buffer_info = &rx_ring->buffer_info[i];
1834 1834
1835 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) { 1835 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1836 struct sk_buff *skb, *next_skb;
1837 u8 status;
1836 1838
1837#ifdef CONFIG_IXGB_NAPI 1839#ifdef CONFIG_IXGB_NAPI
1838 if(*work_done >= work_to_do) 1840 if(*work_done >= work_to_do)
@@ -1840,7 +1842,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1840 1842
1841 (*work_done)++; 1843 (*work_done)++;
1842#endif 1844#endif
1845 status = rx_desc->status;
1843 skb = buffer_info->skb; 1846 skb = buffer_info->skb;
1847
1844 prefetch(skb->data); 1848 prefetch(skb->data);
1845 1849
1846 if(++i == rx_ring->count) i = 0; 1850 if(++i == rx_ring->count) i = 0;
@@ -1855,7 +1859,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1855 next_skb = next_buffer->skb; 1859 next_skb = next_buffer->skb;
1856 prefetch(next_skb); 1860 prefetch(next_skb);
1857 1861
1858
1859 cleaned = TRUE; 1862 cleaned = TRUE;
1860 1863
1861 pci_unmap_single(pdev, 1864 pci_unmap_single(pdev,
@@ -1865,7 +1868,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1865 1868
1866 length = le16_to_cpu(rx_desc->length); 1869 length = le16_to_cpu(rx_desc->length);
1867 1870
1868 if(unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) { 1871 if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1869 1872
1870 /* All receives must fit into a single buffer */ 1873 /* All receives must fit into a single buffer */
1871 1874
@@ -1873,12 +1876,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1873 "length<%x>\n", length); 1876 "length<%x>\n", length);
1874 1877
1875 dev_kfree_skb_irq(skb); 1878 dev_kfree_skb_irq(skb);
1876 rx_desc->status = 0; 1879 goto rxdesc_done;
1877 buffer_info->skb = NULL;
1878
1879 rx_desc = next_rxd;
1880 buffer_info = next_buffer;
1881 continue;
1882 } 1880 }
1883 1881
1884 if (unlikely(rx_desc->errors 1882 if (unlikely(rx_desc->errors
@@ -1887,12 +1885,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1887 IXGB_RX_DESC_ERRORS_RXE))) { 1885 IXGB_RX_DESC_ERRORS_RXE))) {
1888 1886
1889 dev_kfree_skb_irq(skb); 1887 dev_kfree_skb_irq(skb);
1890 rx_desc->status = 0; 1888 goto rxdesc_done;
1891 buffer_info->skb = NULL;
1892
1893 rx_desc = next_rxd;
1894 buffer_info = next_buffer;
1895 continue;
1896 } 1889 }
1897 1890
1898 /* Good Receive */ 1891 /* Good Receive */
@@ -1903,7 +1896,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1903 1896
1904 skb->protocol = eth_type_trans(skb, netdev); 1897 skb->protocol = eth_type_trans(skb, netdev);
1905#ifdef CONFIG_IXGB_NAPI 1898#ifdef CONFIG_IXGB_NAPI
1906 if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) { 1899 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
1907 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 1900 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
1908 le16_to_cpu(rx_desc->special) & 1901 le16_to_cpu(rx_desc->special) &
1909 IXGB_RX_DESC_SPECIAL_VLAN_MASK); 1902 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
@@ -1911,7 +1904,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1911 netif_receive_skb(skb); 1904 netif_receive_skb(skb);
1912 } 1905 }
1913#else /* CONFIG_IXGB_NAPI */ 1906#else /* CONFIG_IXGB_NAPI */
1914 if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) { 1907 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
1915 vlan_hwaccel_rx(skb, adapter->vlgrp, 1908 vlan_hwaccel_rx(skb, adapter->vlgrp,
1916 le16_to_cpu(rx_desc->special) & 1909 le16_to_cpu(rx_desc->special) &
1917 IXGB_RX_DESC_SPECIAL_VLAN_MASK); 1910 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
@@ -1921,9 +1914,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1921#endif /* CONFIG_IXGB_NAPI */ 1914#endif /* CONFIG_IXGB_NAPI */
1922 netdev->last_rx = jiffies; 1915 netdev->last_rx = jiffies;
1923 1916
1917rxdesc_done:
1918 /* clean up descriptor, might be written over by hw */
1924 rx_desc->status = 0; 1919 rx_desc->status = 0;
1925 buffer_info->skb = NULL; 1920 buffer_info->skb = NULL;
1926 1921
1922 /* use prefetched values */
1927 rx_desc = next_rxd; 1923 rx_desc = next_rxd;
1928 buffer_info = next_buffer; 1924 buffer_info = next_buffer;
1929 } 1925 }
@@ -1959,8 +1955,8 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
1959 1955
1960 num_group_tail_writes = IXGB_RX_BUFFER_WRITE; 1956 num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
1961 1957
1962 /* leave one descriptor unused */ 1958 /* leave three descriptors unused */
1963 while(--cleancount > 0) { 1959 while(--cleancount > 2) {
1964 rx_desc = IXGB_RX_DESC(*rx_ring, i); 1960 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1965 1961
1966 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); 1962 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
@@ -1987,6 +1983,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
1987 PCI_DMA_FROMDEVICE); 1983 PCI_DMA_FROMDEVICE);
1988 1984
1989 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 1985 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1986 /* guarantee DD bit not set now before h/w gets descriptor
1987 * this is the rest of the workaround for h/w double
1988 * writeback. */
1989 rx_desc->status = 0;
1990 1990
1991 if((i & ~(num_group_tail_writes- 1)) == i) { 1991 if((i & ~(num_group_tail_writes- 1)) == i) {
1992 /* Force memory writes to complete before letting h/w 1992 /* Force memory writes to complete before letting h/w
@@ -2099,54 +2099,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2099 } 2099 }
2100} 2100}
2101 2101
2102/**
2103 * ixgb_notify_reboot - handles OS notification of reboot event.
2104 * @param nb notifier block, unused
2105 * @param event Event being passed to driver to act upon
2106 * @param p A pointer to our net device
2107 **/
2108static int
2109ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2110{
2111 struct pci_dev *pdev = NULL;
2112
2113 switch(event) {
2114 case SYS_DOWN:
2115 case SYS_HALT:
2116 case SYS_POWER_OFF:
2117 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2118 if (pci_dev_driver(pdev) == &ixgb_driver)
2119 ixgb_suspend(pdev, 3);
2120 }
2121 }
2122 return NOTIFY_DONE;
2123}
2124
2125/**
2126 * ixgb_suspend - driver suspend function called from notify.
2127 * @param pdev pci driver structure used for passing to
2128 * @param state power state to enter
2129 **/
2130static int
2131ixgb_suspend(struct pci_dev *pdev, uint32_t state)
2132{
2133 struct net_device *netdev = pci_get_drvdata(pdev);
2134 struct ixgb_adapter *adapter = netdev->priv;
2135
2136 netif_device_detach(netdev);
2137
2138 if(netif_running(netdev))
2139 ixgb_down(adapter, TRUE);
2140
2141 pci_save_state(pdev);
2142
2143 state = (state > 0) ? 3 : 0;
2144 pci_set_power_state(pdev, state);
2145 msec_delay(200);
2146
2147 return 0;
2148}
2149
2150#ifdef CONFIG_NET_POLL_CONTROLLER 2102#ifdef CONFIG_NET_POLL_CONTROLLER
2151/* 2103/*
2152 * Polling 'interrupt' - used by things like netconsole to send skbs 2104 * Polling 'interrupt' - used by things like netconsole to send skbs
@@ -2157,6 +2109,7 @@ ixgb_suspend(struct pci_dev *pdev, uint32_t state)
2157static void ixgb_netpoll(struct net_device *dev) 2109static void ixgb_netpoll(struct net_device *dev)
2158{ 2110{
2159 struct ixgb_adapter *adapter = dev->priv; 2111 struct ixgb_adapter *adapter = dev->priv;
2112
2160 disable_irq(adapter->pdev->irq); 2113 disable_irq(adapter->pdev->irq);
2161 ixgb_intr(adapter->pdev->irq, dev, NULL); 2114 ixgb_intr(adapter->pdev->irq, dev, NULL);
2162 enable_irq(adapter->pdev->irq); 2115 enable_irq(adapter->pdev->irq);
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 9eba92891901..dba20481ee80 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -45,8 +45,7 @@
45 /* Don't mdelay in interrupt context! */ \ 45 /* Don't mdelay in interrupt context! */ \
46 BUG(); \ 46 BUG(); \
47 } else { \ 47 } else { \
48 set_current_state(TASK_UNINTERRUPTIBLE); \ 48 msleep(x); \
49 schedule_timeout((x * HZ)/1000 + 2); \
50 } } while(0) 49 } } while(0)
51#endif 50#endif
52 51
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 17947e6c8793..13f114876965 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,8 +22,8 @@
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define DRV_NAME "pcnet32" 24#define DRV_NAME "pcnet32"
25#define DRV_VERSION "1.30i" 25#define DRV_VERSION "1.30j"
26#define DRV_RELDATE "06.28.2004" 26#define DRV_RELDATE "29.04.2005"
27#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
28 28
29static const char *version = 29static const char *version =
@@ -256,6 +256,7 @@ static int homepna[MAX_UNITS];
256 * homepna for selecting HomePNA mode for PCNet/Home 79C978. 256 * homepna for selecting HomePNA mode for PCNet/Home 79C978.
257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32. 257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
258 * v1.30i 28 Jun 2004 Don Fry change to use module_param. 258 * v1.30i 28 Jun 2004 Don Fry change to use module_param.
259 * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
259 */ 260 */
260 261
261 262
@@ -395,6 +396,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev);
395static int pcnet32_get_regs_len(struct net_device *dev); 396static int pcnet32_get_regs_len(struct net_device *dev);
396static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 397static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
397 void *ptr); 398 void *ptr);
399static void pcnet32_purge_tx_ring(struct net_device *dev);
398 400
399enum pci_flags_bit { 401enum pci_flags_bit {
400 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, 402 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
@@ -785,6 +787,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1)
785 } 787 }
786 788
787clean_up: 789clean_up:
790 pcnet32_purge_tx_ring(dev);
788 x = a->read_csr(ioaddr, 15) & 0xFFFF; 791 x = a->read_csr(ioaddr, 15) & 0xFFFF;
789 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ 792 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
790 793
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f79b02e80e75..f10dd74988c4 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -61,8 +61,8 @@
61 61
62#define DRV_MODULE_NAME "tg3" 62#define DRV_MODULE_NAME "tg3"
63#define PFX DRV_MODULE_NAME ": " 63#define PFX DRV_MODULE_NAME ": "
64#define DRV_MODULE_VERSION "3.27" 64#define DRV_MODULE_VERSION "3.29"
65#define DRV_MODULE_RELDATE "May 5, 2005" 65#define DRV_MODULE_RELDATE "May 23, 2005"
66 66
67#define TG3_DEF_MAC_MODE 0 67#define TG3_DEF_MAC_MODE 0
68#define TG3_DEF_RX_MODE 0 68#define TG3_DEF_RX_MODE 0
@@ -206,6 +206,8 @@ static struct pci_device_id tg3_pci_tbl[] = {
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752, 207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753, 211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M, 213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
@@ -420,7 +422,8 @@ static void tg3_enable_ints(struct tg3 *tp)
420{ 422{
421 tw32(TG3PCI_MISC_HOST_CTRL, 423 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 424 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 425 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
426 (tp->last_tag << 24));
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 427 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425 428
426 tg3_cond_int(tp); 429 tg3_cond_int(tp);
@@ -455,10 +458,16 @@ static void tg3_restart_ints(struct tg3 *tp)
455{ 458{
456 tw32(TG3PCI_MISC_HOST_CTRL, 459 tw32(TG3PCI_MISC_HOST_CTRL,
457 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 460 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 461 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
462 tp->last_tag << 24);
459 mmiowb(); 463 mmiowb();
460 464
461 if (tg3_has_work(tp)) 465 /* When doing tagged status, this work check is unnecessary.
466 * The last_tag we write above tells the chip which piece of
467 * work we've completed.
468 */
469 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
470 tg3_has_work(tp))
462 tw32(HOSTCC_MODE, tp->coalesce_mode | 471 tw32(HOSTCC_MODE, tp->coalesce_mode |
463 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 472 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464} 473}
@@ -2500,7 +2509,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2509 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501 if (netif_carrier_ok(tp->dev)) { 2510 if (netif_carrier_ok(tp->dev)) {
2502 tw32(HOSTCC_STAT_COAL_TICKS, 2511 tw32(HOSTCC_STAT_COAL_TICKS,
2503 DEFAULT_STAT_COAL_TICKS); 2512 tp->coal.stats_block_coalesce_usecs);
2504 } else { 2513 } else {
2505 tw32(HOSTCC_STAT_COAL_TICKS, 0); 2514 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506 } 2515 }
@@ -2886,7 +2895,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2886 * All RX "locking" is done by ensuring outside 2895 * All RX "locking" is done by ensuring outside
2887 * code synchronizes with dev->poll() 2896 * code synchronizes with dev->poll()
2888 */ 2897 */
2889 done = 1;
2890 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { 2898 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891 int orig_budget = *budget; 2899 int orig_budget = *budget;
2892 int work_done; 2900 int work_done;
@@ -2898,12 +2906,14 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2898 2906
2899 *budget -= work_done; 2907 *budget -= work_done;
2900 netdev->quota -= work_done; 2908 netdev->quota -= work_done;
2901
2902 if (work_done >= orig_budget)
2903 done = 0;
2904 } 2909 }
2905 2910
2911 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2912 tp->last_tag = sblk->status_tag;
2913 rmb();
2914
2906 /* if no more work, tell net stack and NIC we're done */ 2915 /* if no more work, tell net stack and NIC we're done */
2916 done = !tg3_has_work(tp);
2907 if (done) { 2917 if (done) {
2908 spin_lock_irqsave(&tp->lock, flags); 2918 spin_lock_irqsave(&tp->lock, flags);
2909 __netif_rx_complete(netdev); 2919 __netif_rx_complete(netdev);
@@ -2928,22 +2938,21 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2928 spin_lock_irqsave(&tp->lock, flags); 2938 spin_lock_irqsave(&tp->lock, flags);
2929 2939
2930 /* 2940 /*
2931 * writing any value to intr-mbox-0 clears PCI INTA# and 2941 * Writing any value to intr-mbox-0 clears PCI INTA# and
2932 * chip-internal interrupt pending events. 2942 * chip-internal interrupt pending events.
2933 * writing non-zero to intr-mbox-0 additional tells the 2943 * Writing non-zero to intr-mbox-0 additional tells the
2934 * NIC to stop sending us irqs, engaging "in-intr-handler" 2944 * NIC to stop sending us irqs, engaging "in-intr-handler"
2935 * event coalescing. 2945 * event coalescing.
2936 */ 2946 */
2937 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2947 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2948 tp->last_tag = sblk->status_tag;
2938 sblk->status &= ~SD_STATUS_UPDATED; 2949 sblk->status &= ~SD_STATUS_UPDATED;
2939
2940 if (likely(tg3_has_work(tp))) 2950 if (likely(tg3_has_work(tp)))
2941 netif_rx_schedule(dev); /* schedule NAPI poll */ 2951 netif_rx_schedule(dev); /* schedule NAPI poll */
2942 else { 2952 else {
2943 /* no work, re-enable interrupts 2953 /* No work, re-enable interrupts. */
2944 */
2945 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2954 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946 0x00000000); 2955 tp->last_tag << 24);
2947 } 2956 }
2948 2957
2949 spin_unlock_irqrestore(&tp->lock, flags); 2958 spin_unlock_irqrestore(&tp->lock, flags);
@@ -2969,21 +2978,62 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2969 if ((sblk->status & SD_STATUS_UPDATED) || 2978 if ((sblk->status & SD_STATUS_UPDATED) ||
2970 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 2979 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2971 /* 2980 /*
2972 * writing any value to intr-mbox-0 clears PCI INTA# and 2981 * Writing any value to intr-mbox-0 clears PCI INTA# and
2973 * chip-internal interrupt pending events. 2982 * chip-internal interrupt pending events.
2974 * writing non-zero to intr-mbox-0 additional tells the 2983 * Writing non-zero to intr-mbox-0 additional tells the
2975 * NIC to stop sending us irqs, engaging "in-intr-handler" 2984 * NIC to stop sending us irqs, engaging "in-intr-handler"
2976 * event coalescing. 2985 * event coalescing.
2977 */ 2986 */
2978 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2987 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979 0x00000001); 2988 0x00000001);
2989 sblk->status &= ~SD_STATUS_UPDATED;
2990 if (likely(tg3_has_work(tp)))
2991 netif_rx_schedule(dev); /* schedule NAPI poll */
2992 else {
2993 /* No work, shared interrupt perhaps? re-enable
2994 * interrupts, and flush that PCI write
2995 */
2996 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2997 0x00000000);
2998 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2999 }
3000 } else { /* shared interrupt */
3001 handled = 0;
3002 }
3003
3004 spin_unlock_irqrestore(&tp->lock, flags);
3005
3006 return IRQ_RETVAL(handled);
3007}
3008
3009static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3010{
3011 struct net_device *dev = dev_id;
3012 struct tg3 *tp = netdev_priv(dev);
3013 struct tg3_hw_status *sblk = tp->hw_status;
3014 unsigned long flags;
3015 unsigned int handled = 1;
3016
3017 spin_lock_irqsave(&tp->lock, flags);
3018
3019 /* In INTx mode, it is possible for the interrupt to arrive at
3020 * the CPU before the status block posted prior to the interrupt.
3021 * Reading the PCI State register will confirm whether the
3022 * interrupt is ours and will flush the status block.
3023 */
3024 if ((sblk->status & SD_STATUS_UPDATED) ||
3025 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2980 /* 3026 /*
2981 * Flush PCI write. This also guarantees that our 3027 * writing any value to intr-mbox-0 clears PCI INTA# and
2982 * status block has been flushed to host memory. 3028 * chip-internal interrupt pending events.
3029 * writing non-zero to intr-mbox-0 additional tells the
3030 * NIC to stop sending us irqs, engaging "in-intr-handler"
3031 * event coalescing.
2983 */ 3032 */
2984 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3033 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3034 0x00000001);
3035 tp->last_tag = sblk->status_tag;
2985 sblk->status &= ~SD_STATUS_UPDATED; 3036 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987 if (likely(tg3_has_work(tp))) 3037 if (likely(tg3_has_work(tp)))
2988 netif_rx_schedule(dev); /* schedule NAPI poll */ 3038 netif_rx_schedule(dev); /* schedule NAPI poll */
2989 else { 3039 else {
@@ -2991,7 +3041,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2991 * interrupts, and flush that PCI write 3041 * interrupts, and flush that PCI write
2992 */ 3042 */
2993 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3043 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994 0x00000000); 3044 tp->last_tag << 24);
2995 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3045 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996 } 3046 }
2997 } else { /* shared interrupt */ 3047 } else { /* shared interrupt */
@@ -5044,6 +5094,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5044} 5094}
5045 5095
5046static void __tg3_set_rx_mode(struct net_device *); 5096static void __tg3_set_rx_mode(struct net_device *);
5097static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5098{
5099 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5100 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5101 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5102 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5103 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5104 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5105 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5106 }
5107 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5108 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5109 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5110 u32 val = ec->stats_block_coalesce_usecs;
5111
5112 if (!netif_carrier_ok(tp->dev))
5113 val = 0;
5114
5115 tw32(HOSTCC_STAT_COAL_TICKS, val);
5116 }
5117}
5047 5118
5048/* tp->lock is held. */ 5119/* tp->lock is held. */
5049static int tg3_reset_hw(struct tg3 *tp) 5120static int tg3_reset_hw(struct tg3 *tp)
@@ -5366,16 +5437,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5366 udelay(10); 5437 udelay(10);
5367 } 5438 }
5368 5439
5369 tw32(HOSTCC_RXCOL_TICKS, 0); 5440 tg3_set_coalesce(tp, &tp->coal);
5370 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5371 tw32(HOSTCC_RXMAX_FRAMES, 1);
5372 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5373 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5374 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5375 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5376 }
5377 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5378 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5379 5441
5380 /* set status block DMA address */ 5442 /* set status block DMA address */
5381 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5443 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@@ -5388,8 +5450,6 @@ static int tg3_reset_hw(struct tg3 *tp)
5388 * the tg3_periodic_fetch_stats call there, and 5450 * the tg3_periodic_fetch_stats call there, and
5389 * tg3_get_stats to see how this works for 5705/5750 chips. 5451 * tg3_get_stats to see how this works for 5705/5750 chips.
5390 */ 5452 */
5391 tw32(HOSTCC_STAT_COAL_TICKS,
5392 DEFAULT_STAT_COAL_TICKS);
5393 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5453 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5394 ((u64) tp->stats_mapping >> 32)); 5454 ((u64) tp->stats_mapping >> 32));
5395 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 5455 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -5445,7 +5505,8 @@ static int tg3_reset_hw(struct tg3 *tp)
5445 udelay(100); 5505 udelay(100);
5446 5506
5447 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5507 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5448 tr32(MAILBOX_INTERRUPT_0); 5508 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5509 tp->last_tag = 0;
5449 5510
5450 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5511 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5451 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 5512 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -5723,31 +5784,33 @@ static void tg3_timer(unsigned long __opaque)
5723 spin_lock_irqsave(&tp->lock, flags); 5784 spin_lock_irqsave(&tp->lock, flags);
5724 spin_lock(&tp->tx_lock); 5785 spin_lock(&tp->tx_lock);
5725 5786
5726 /* All of this garbage is because when using non-tagged 5787 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5727 * IRQ status the mailbox/status_block protocol the chip 5788 /* All of this garbage is because when using non-tagged
5728 * uses with the cpu is race prone. 5789 * IRQ status the mailbox/status_block protocol the chip
5729 */ 5790 * uses with the cpu is race prone.
5730 if (tp->hw_status->status & SD_STATUS_UPDATED) { 5791 */
5731 tw32(GRC_LOCAL_CTRL, 5792 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5732 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 5793 tw32(GRC_LOCAL_CTRL,
5733 } else { 5794 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5734 tw32(HOSTCC_MODE, tp->coalesce_mode | 5795 } else {
5735 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 5796 tw32(HOSTCC_MODE, tp->coalesce_mode |
5736 } 5797 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5798 }
5737 5799
5738 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 5800 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5739 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 5801 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5740 spin_unlock(&tp->tx_lock); 5802 spin_unlock(&tp->tx_lock);
5741 spin_unlock_irqrestore(&tp->lock, flags); 5803 spin_unlock_irqrestore(&tp->lock, flags);
5742 schedule_work(&tp->reset_task); 5804 schedule_work(&tp->reset_task);
5743 return; 5805 return;
5806 }
5744 } 5807 }
5745 5808
5746 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5747 tg3_periodic_fetch_stats(tp);
5748
5749 /* This part only runs once per second. */ 5809 /* This part only runs once per second. */
5750 if (!--tp->timer_counter) { 5810 if (!--tp->timer_counter) {
5811 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5812 tg3_periodic_fetch_stats(tp);
5813
5751 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 5814 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5752 u32 mac_stat; 5815 u32 mac_stat;
5753 int phy_event; 5816 int phy_event;
@@ -5846,9 +5909,13 @@ static int tg3_test_interrupt(struct tg3 *tp)
5846 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 5909 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5847 err = request_irq(tp->pdev->irq, tg3_msi, 5910 err = request_irq(tp->pdev->irq, tg3_msi,
5848 SA_SAMPLE_RANDOM, dev->name, dev); 5911 SA_SAMPLE_RANDOM, dev->name, dev);
5849 else 5912 else {
5850 err = request_irq(tp->pdev->irq, tg3_interrupt, 5913 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5914 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5915 fn = tg3_interrupt_tagged;
5916 err = request_irq(tp->pdev->irq, fn,
5851 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5917 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5918 }
5852 5919
5853 if (err) 5920 if (err)
5854 return err; 5921 return err;
@@ -5900,9 +5967,14 @@ static int tg3_test_msi(struct tg3 *tp)
5900 5967
5901 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 5968 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5902 5969
5903 err = request_irq(tp->pdev->irq, tg3_interrupt, 5970 {
5904 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5971 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5972 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5973 fn = tg3_interrupt_tagged;
5905 5974
5975 err = request_irq(tp->pdev->irq, fn,
5976 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5977 }
5906 if (err) 5978 if (err)
5907 return err; 5979 return err;
5908 5980
@@ -5948,7 +6020,13 @@ static int tg3_open(struct net_device *dev)
5948 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 6020 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5949 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && 6021 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5950 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { 6022 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5951 if (pci_enable_msi(tp->pdev) == 0) { 6023 /* All MSI supporting chips should support tagged
6024 * status. Assert that this is the case.
6025 */
6026 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6027 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6028 "Not using MSI.\n", tp->dev->name);
6029 } else if (pci_enable_msi(tp->pdev) == 0) {
5952 u32 msi_mode; 6030 u32 msi_mode;
5953 6031
5954 msi_mode = tr32(MSGINT_MODE); 6032 msi_mode = tr32(MSGINT_MODE);
@@ -5959,9 +6037,14 @@ static int tg3_open(struct net_device *dev)
5959 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 6037 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5960 err = request_irq(tp->pdev->irq, tg3_msi, 6038 err = request_irq(tp->pdev->irq, tg3_msi,
5961 SA_SAMPLE_RANDOM, dev->name, dev); 6039 SA_SAMPLE_RANDOM, dev->name, dev);
5962 else 6040 else {
5963 err = request_irq(tp->pdev->irq, tg3_interrupt, 6041 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6042 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6043 fn = tg3_interrupt_tagged;
6044
6045 err = request_irq(tp->pdev->irq, fn,
5964 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 6046 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6047 }
5965 6048
5966 if (err) { 6049 if (err) {
5967 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6050 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -5980,9 +6063,16 @@ static int tg3_open(struct net_device *dev)
5980 tg3_halt(tp, 1); 6063 tg3_halt(tp, 1);
5981 tg3_free_rings(tp); 6064 tg3_free_rings(tp);
5982 } else { 6065 } else {
5983 tp->timer_offset = HZ / 10; 6066 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5984 tp->timer_counter = tp->timer_multiplier = 10; 6067 tp->timer_offset = HZ;
5985 tp->asf_counter = tp->asf_multiplier = (10 * 120); 6068 else
6069 tp->timer_offset = HZ / 10;
6070
6071 BUG_ON(tp->timer_offset > HZ);
6072 tp->timer_counter = tp->timer_multiplier =
6073 (HZ / tp->timer_offset);
6074 tp->asf_counter = tp->asf_multiplier =
6075 ((HZ / tp->timer_offset) * 120);
5986 6076
5987 init_timer(&tp->timer); 6077 init_timer(&tp->timer);
5988 tp->timer.expires = jiffies + tp->timer_offset; 6078 tp->timer.expires = jiffies + tp->timer_offset;
@@ -6005,6 +6095,7 @@ static int tg3_open(struct net_device *dev)
6005 6095
6006 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6096 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6007 err = tg3_test_msi(tp); 6097 err = tg3_test_msi(tp);
6098
6008 if (err) { 6099 if (err) {
6009 spin_lock_irq(&tp->lock); 6100 spin_lock_irq(&tp->lock);
6010 spin_lock(&tp->tx_lock); 6101 spin_lock(&tp->tx_lock);
@@ -7203,6 +7294,14 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7203} 7294}
7204#endif 7295#endif
7205 7296
7297static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7298{
7299 struct tg3 *tp = netdev_priv(dev);
7300
7301 memcpy(ec, &tp->coal, sizeof(*ec));
7302 return 0;
7303}
7304
7206static struct ethtool_ops tg3_ethtool_ops = { 7305static struct ethtool_ops tg3_ethtool_ops = {
7207 .get_settings = tg3_get_settings, 7306 .get_settings = tg3_get_settings,
7208 .set_settings = tg3_set_settings, 7307 .set_settings = tg3_set_settings,
@@ -7235,6 +7334,7 @@ static struct ethtool_ops tg3_ethtool_ops = {
7235 .get_strings = tg3_get_strings, 7334 .get_strings = tg3_get_strings,
7236 .get_stats_count = tg3_get_stats_count, 7335 .get_stats_count = tg3_get_stats_count,
7237 .get_ethtool_stats = tg3_get_ethtool_stats, 7336 .get_ethtool_stats = tg3_get_ethtool_stats,
7337 .get_coalesce = tg3_get_coalesce,
7238}; 7338};
7239 7339
7240static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 7340static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -8422,15 +8522,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8422 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 8522 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8423 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 8523 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8424 8524
8425 /* Only 5701 and later support tagged irq status mode.
8426 * Also, 5788 chips cannot use tagged irq status.
8427 *
8428 * However, since we are using NAPI avoid tagged irq status
8429 * because the interrupt condition is more difficult to
8430 * fully clear in that mode.
8431 */
8432 tp->coalesce_mode = 0; 8525 tp->coalesce_mode = 0;
8433
8434 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 8526 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8435 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 8527 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8436 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 8528 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@@ -8494,6 +8586,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8494 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 8586 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8495 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 8587 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8496 8588
8589 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8590 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
8591 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
8592 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
8593 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
8594 HOSTCC_MODE_CLRTICK_TXBD);
8595
8596 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
8597 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8598 tp->misc_host_ctrl);
8599 }
8600
8497 /* these are limited to 10/100 only */ 8601 /* these are limited to 10/100 only */
8498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 8602 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8499 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 8603 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -8671,6 +8775,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
8671 return 0; 8775 return 0;
8672} 8776}
8673 8777
8778#define BOUNDARY_SINGLE_CACHELINE 1
8779#define BOUNDARY_MULTI_CACHELINE 2
8780
8781static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
8782{
8783 int cacheline_size;
8784 u8 byte;
8785 int goal;
8786
8787 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8788 if (byte == 0)
8789 cacheline_size = 1024;
8790 else
8791 cacheline_size = (int) byte * 4;
8792
8793 /* On 5703 and later chips, the boundary bits have no
8794 * effect.
8795 */
8796 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8797 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
8798 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8799 goto out;
8800
8801#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
8802 goal = BOUNDARY_MULTI_CACHELINE;
8803#else
8804#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
8805 goal = BOUNDARY_SINGLE_CACHELINE;
8806#else
8807 goal = 0;
8808#endif
8809#endif
8810
8811 if (!goal)
8812 goto out;
8813
8814 /* PCI controllers on most RISC systems tend to disconnect
8815 * when a device tries to burst across a cache-line boundary.
8816 * Therefore, letting tg3 do so just wastes PCI bandwidth.
8817 *
8818 * Unfortunately, for PCI-E there are only limited
8819 * write-side controls for this, and thus for reads
8820 * we will still get the disconnects. We'll also waste
8821 * these PCI cycles for both read and write for chips
8822 * other than 5700 and 5701 which do not implement the
8823 * boundary bits.
8824 */
8825 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8826 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8827 switch (cacheline_size) {
8828 case 16:
8829 case 32:
8830 case 64:
8831 case 128:
8832 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8833 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
8834 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
8835 } else {
8836 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
8837 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
8838 }
8839 break;
8840
8841 case 256:
8842 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
8843 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
8844 break;
8845
8846 default:
8847 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
8848 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
8849 break;
8850 };
8851 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8852 switch (cacheline_size) {
8853 case 16:
8854 case 32:
8855 case 64:
8856 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8857 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
8858 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
8859 break;
8860 }
8861 /* fallthrough */
8862 case 128:
8863 default:
8864 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
8865 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8866 break;
8867 };
8868 } else {
8869 switch (cacheline_size) {
8870 case 16:
8871 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8872 val |= (DMA_RWCTRL_READ_BNDRY_16 |
8873 DMA_RWCTRL_WRITE_BNDRY_16);
8874 break;
8875 }
8876 /* fallthrough */
8877 case 32:
8878 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8879 val |= (DMA_RWCTRL_READ_BNDRY_32 |
8880 DMA_RWCTRL_WRITE_BNDRY_32);
8881 break;
8882 }
8883 /* fallthrough */
8884 case 64:
8885 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8886 val |= (DMA_RWCTRL_READ_BNDRY_64 |
8887 DMA_RWCTRL_WRITE_BNDRY_64);
8888 break;
8889 }
8890 /* fallthrough */
8891 case 128:
8892 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8893 val |= (DMA_RWCTRL_READ_BNDRY_128 |
8894 DMA_RWCTRL_WRITE_BNDRY_128);
8895 break;
8896 }
8897 /* fallthrough */
8898 case 256:
8899 val |= (DMA_RWCTRL_READ_BNDRY_256 |
8900 DMA_RWCTRL_WRITE_BNDRY_256);
8901 break;
8902 case 512:
8903 val |= (DMA_RWCTRL_READ_BNDRY_512 |
8904 DMA_RWCTRL_WRITE_BNDRY_512);
8905 break;
8906 case 1024:
8907 default:
8908 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
8909 DMA_RWCTRL_WRITE_BNDRY_1024);
8910 break;
8911 };
8912 }
8913
8914out:
8915 return val;
8916}
8917
8674static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 8918static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8675{ 8919{
8676 struct tg3_internal_buffer_desc test_desc; 8920 struct tg3_internal_buffer_desc test_desc;
@@ -8752,12 +8996,12 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
8752 return ret; 8996 return ret;
8753} 8997}
8754 8998
8755#define TEST_BUFFER_SIZE 0x400 8999#define TEST_BUFFER_SIZE 0x2000
8756 9000
8757static int __devinit tg3_test_dma(struct tg3 *tp) 9001static int __devinit tg3_test_dma(struct tg3 *tp)
8758{ 9002{
8759 dma_addr_t buf_dma; 9003 dma_addr_t buf_dma;
8760 u32 *buf; 9004 u32 *buf, saved_dma_rwctrl;
8761 int ret; 9005 int ret;
8762 9006
8763 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 9007 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@@ -8769,46 +9013,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8769 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 9013 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8770 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 9014 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8771 9015
8772#ifndef CONFIG_X86 9016 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
8773 {
8774 u8 byte;
8775 int cacheline_size;
8776 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8777
8778 if (byte == 0)
8779 cacheline_size = 1024;
8780 else
8781 cacheline_size = (int) byte * 4;
8782
8783 switch (cacheline_size) {
8784 case 16:
8785 case 32:
8786 case 64:
8787 case 128:
8788 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8789 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8790 tp->dma_rwctrl |=
8791 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8792 break;
8793 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8794 tp->dma_rwctrl &=
8795 ~(DMA_RWCTRL_PCI_WRITE_CMD);
8796 tp->dma_rwctrl |=
8797 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8798 break;
8799 }
8800 /* fallthrough */
8801 case 256:
8802 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8803 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8804 tp->dma_rwctrl |=
8805 DMA_RWCTRL_WRITE_BNDRY_256;
8806 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8807 tp->dma_rwctrl |=
8808 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8809 };
8810 }
8811#endif
8812 9017
8813 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 9018 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8814 /* DMA read watermark not used on PCIE */ 9019 /* DMA read watermark not used on PCIE */
@@ -8827,7 +9032,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8827 if (ccval == 0x6 || ccval == 0x7) 9032 if (ccval == 0x6 || ccval == 0x7)
8828 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 9033 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8829 9034
8830 /* Set bit 23 to renable PCIX hw bug fix */ 9035 /* Set bit 23 to enable PCIX hw bug fix */
8831 tp->dma_rwctrl |= 0x009f0000; 9036 tp->dma_rwctrl |= 0x009f0000;
8832 } else { 9037 } else {
8833 tp->dma_rwctrl |= 0x001b000f; 9038 tp->dma_rwctrl |= 0x001b000f;
@@ -8868,6 +9073,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8868 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 9073 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8869 goto out; 9074 goto out;
8870 9075
9076 /* It is best to perform DMA test with maximum write burst size
9077 * to expose the 5700/5701 write DMA bug.
9078 */
9079 saved_dma_rwctrl = tp->dma_rwctrl;
9080 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9081 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9082
8871 while (1) { 9083 while (1) {
8872 u32 *p = buf, i; 9084 u32 *p = buf, i;
8873 9085
@@ -8906,8 +9118,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8906 if (p[i] == i) 9118 if (p[i] == i)
8907 continue; 9119 continue;
8908 9120
8909 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == 9121 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
8910 DMA_RWCTRL_WRITE_BNDRY_DISAB) { 9122 DMA_RWCTRL_WRITE_BNDRY_16) {
9123 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
8911 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 9124 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8912 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 9125 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8913 break; 9126 break;
@@ -8924,6 +9137,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8924 break; 9137 break;
8925 } 9138 }
8926 } 9139 }
9140 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9141 DMA_RWCTRL_WRITE_BNDRY_16) {
9142 /* DMA test passed without adjusting DMA boundary,
9143 * just restore the calculated DMA boundary
9144 */
9145 tp->dma_rwctrl = saved_dma_rwctrl;
9146 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9147 }
8927 9148
8928out: 9149out:
8929 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 9150 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@@ -9011,6 +9232,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9011 return peer; 9232 return peer;
9012} 9233}
9013 9234
9235static void __devinit tg3_init_coal(struct tg3 *tp)
9236{
9237 struct ethtool_coalesce *ec = &tp->coal;
9238
9239 memset(ec, 0, sizeof(*ec));
9240 ec->cmd = ETHTOOL_GCOALESCE;
9241 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9242 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9243 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9244 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9245 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9246 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9247 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9248 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9249 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9250
9251 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9252 HOSTCC_MODE_CLRTICK_TXBD)) {
9253 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9254 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9255 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9256 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9257 }
9258}
9259
9014static int __devinit tg3_init_one(struct pci_dev *pdev, 9260static int __devinit tg3_init_one(struct pci_dev *pdev,
9015 const struct pci_device_id *ent) 9261 const struct pci_device_id *ent)
9016{ 9262{
@@ -9256,6 +9502,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9256 /* flow control autonegotiation is default behavior */ 9502 /* flow control autonegotiation is default behavior */
9257 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 9503 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9258 9504
9505 tg3_init_coal(tp);
9506
9259 err = register_netdev(dev); 9507 err = register_netdev(dev);
9260 if (err) { 9508 if (err) {
9261 printk(KERN_ERR PFX "Cannot register net device, " 9509 printk(KERN_ERR PFX "Cannot register net device, "
@@ -9298,6 +9546,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9298 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, 9546 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9299 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, 9547 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9300 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 9548 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9549 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
9550 dev->name, tp->dma_rwctrl);
9301 9551
9302 return 0; 9552 return 0;
9303 9553
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8de6f21037ba..993f84c93dc4 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -876,10 +876,12 @@
876#define HOSTCC_STATUS_ERROR_ATTN 0x00000004 876#define HOSTCC_STATUS_ERROR_ATTN 0x00000004
877#define HOSTCC_RXCOL_TICKS 0x00003c08 877#define HOSTCC_RXCOL_TICKS 0x00003c08
878#define LOW_RXCOL_TICKS 0x00000032 878#define LOW_RXCOL_TICKS 0x00000032
879#define LOW_RXCOL_TICKS_CLRTCKS 0x00000014
879#define DEFAULT_RXCOL_TICKS 0x00000048 880#define DEFAULT_RXCOL_TICKS 0x00000048
880#define HIGH_RXCOL_TICKS 0x00000096 881#define HIGH_RXCOL_TICKS 0x00000096
881#define HOSTCC_TXCOL_TICKS 0x00003c0c 882#define HOSTCC_TXCOL_TICKS 0x00003c0c
882#define LOW_TXCOL_TICKS 0x00000096 883#define LOW_TXCOL_TICKS 0x00000096
884#define LOW_TXCOL_TICKS_CLRTCKS 0x00000048
883#define DEFAULT_TXCOL_TICKS 0x0000012c 885#define DEFAULT_TXCOL_TICKS 0x0000012c
884#define HIGH_TXCOL_TICKS 0x00000145 886#define HIGH_TXCOL_TICKS 0x00000145
885#define HOSTCC_RXMAX_FRAMES 0x00003c10 887#define HOSTCC_RXMAX_FRAMES 0x00003c10
@@ -892,8 +894,10 @@
892#define HIGH_TXMAX_FRAMES 0x00000052 894#define HIGH_TXMAX_FRAMES 0x00000052
893#define HOSTCC_RXCOAL_TICK_INT 0x00003c18 895#define HOSTCC_RXCOAL_TICK_INT 0x00003c18
894#define DEFAULT_RXCOAL_TICK_INT 0x00000019 896#define DEFAULT_RXCOAL_TICK_INT 0x00000019
897#define DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014
895#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c 898#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c
896#define DEFAULT_TXCOAL_TICK_INT 0x00000019 899#define DEFAULT_TXCOAL_TICK_INT 0x00000019
900#define DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014
897#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20 901#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20
898#define DEFAULT_RXCOAL_MAXF_INT 0x00000005 902#define DEFAULT_RXCOAL_MAXF_INT 0x00000005
899#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24 903#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24
@@ -2023,6 +2027,7 @@ struct tg3 {
2023 2027
2024 struct tg3_hw_status *hw_status; 2028 struct tg3_hw_status *hw_status;
2025 dma_addr_t status_mapping; 2029 dma_addr_t status_mapping;
2030 u32 last_tag;
2026 2031
2027 u32 msg_enable; 2032 u32 msg_enable;
2028 2033
@@ -2068,6 +2073,7 @@ struct tg3 {
2068 2073
2069 u32 rx_offset; 2074 u32 rx_offset;
2070 u32 tg3_flags; 2075 u32 tg3_flags;
2076#define TG3_FLAG_TAGGED_STATUS 0x00000001
2071#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 2077#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
2072#define TG3_FLAG_RX_CHECKSUMS 0x00000004 2078#define TG3_FLAG_RX_CHECKSUMS 0x00000004
2073#define TG3_FLAG_USE_LINKCHG_REG 0x00000008 2079#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
@@ -2225,7 +2231,7 @@ struct tg3 {
2225 2231
2226#define SST_25VF0X0_PAGE_SIZE 4098 2232#define SST_25VF0X0_PAGE_SIZE 4098
2227 2233
2228 2234 struct ethtool_coalesce coal;
2229}; 2235};
2230 2236
2231#endif /* !(_T3_H) */ 2237#endif /* !(_T3_H) */
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index edae09a4b021..919c40cd635c 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -174,6 +174,7 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
174 break; 174 break;
175 } 175 }
176 spin_unlock_irqrestore(&tp->mii_lock, flags); 176 spin_unlock_irqrestore(&tp->mii_lock, flags);
177 return;
177 } 178 }
178 179
179 /* Establish sync by sending 32 logic ones. */ 180 /* Establish sync by sending 32 logic ones. */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index d098b3ba3538..e0ae3ed6e578 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1104,7 +1104,7 @@ static void set_rx_mode(struct net_device *dev)
1104 if (entry != 0) { 1104 if (entry != 0) {
1105 /* Avoid a chip errata by prefixing a dummy entry. Don't do 1105 /* Avoid a chip errata by prefixing a dummy entry. Don't do
1106 this on the ULI526X as it triggers a different problem */ 1106 this on the ULI526X as it triggers a different problem */
1107 if (!(tp->chip_id == ULI526X && (tp->revision = 0x40 || tp->revision == 0x50))) { 1107 if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) {
1108 tp->tx_buffers[entry].skb = NULL; 1108 tp->tx_buffers[entry].skb = NULL;
1109 tp->tx_buffers[entry].mapping = 0; 1109 tp->tx_buffers[entry].mapping = 0;
1110 tp->tx_ring[entry].length = 1110 tp->tx_ring[entry].length =
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 0aaa12c0c098..1d3231cc471a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -323,7 +323,7 @@ config PRISM54
323 For a complete list of supported cards visit <http://prism54.org>. 323 For a complete list of supported cards visit <http://prism54.org>.
324 Here is the latest confirmed list of supported cards: 324 Here is the latest confirmed list of supported cards:
325 325
326 3com OfficeConnect 11g Cardbus Card aka 3CRWE154G72 326 3com OfficeConnect 11g Cardbus Card aka 3CRWE154G72 (version 1)
327 Allnet ALL0271 PCI Card 327 Allnet ALL0271 PCI Card
328 Compex WL54G Cardbus Card 328 Compex WL54G Cardbus Card
329 Corega CG-WLCB54GT Cardbus Card 329 Corega CG-WLCB54GT Cardbus Card