diff options
author | <jgarzik@pretzel.yyz.us> | 2005-05-25 13:56:55 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-05-25 13:56:55 -0400 |
commit | d6d78f63cea62851806bf6ac40c6f53349de442b (patch) | |
tree | b9bebd1bf342ae93f1d909641725e3d084ed61a9 | |
parent | a83d5cf7a1d1dd22f3a8162b728a7e895192dda8 (diff) | |
parent | 042e2fb70006f135469d546726451b7d14768980 (diff) |
Automatic merge of /spare/repo/netdev-2.6 branch e100
-rw-r--r-- | drivers/net/e100.c | 165 |
1 files changed, 139 insertions, 26 deletions
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 1b68dd5a49b6..4a47df5a9ff9 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -155,9 +155,9 @@ | |||
155 | 155 | ||
156 | #define DRV_NAME "e100" | 156 | #define DRV_NAME "e100" |
157 | #define DRV_EXT "-NAPI" | 157 | #define DRV_EXT "-NAPI" |
158 | #define DRV_VERSION "3.3.6-k2"DRV_EXT | 158 | #define DRV_VERSION "3.4.8-k2"DRV_EXT |
159 | #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" | 159 | #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" |
160 | #define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation" | 160 | #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" |
161 | #define PFX DRV_NAME ": " | 161 | #define PFX DRV_NAME ": " |
162 | 162 | ||
163 | #define E100_WATCHDOG_PERIOD (2 * HZ) | 163 | #define E100_WATCHDOG_PERIOD (2 * HZ) |
@@ -210,11 +210,17 @@ static struct pci_device_id e100_id_table[] = { | |||
210 | INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), | 210 | INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), |
211 | INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), | 211 | INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), |
212 | INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), | 212 | INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), |
213 | INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), | ||
214 | INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), | ||
215 | INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), | ||
216 | INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), | ||
217 | INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), | ||
213 | INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), | 218 | INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), |
214 | INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), | 219 | INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), |
215 | INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), | 220 | INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), |
216 | INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), | 221 | INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), |
217 | INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), | 222 | INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), |
223 | INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), | ||
218 | { 0, } | 224 | { 0, } |
219 | }; | 225 | }; |
220 | MODULE_DEVICE_TABLE(pci, e100_id_table); | 226 | MODULE_DEVICE_TABLE(pci, e100_id_table); |
@@ -269,6 +275,12 @@ enum scb_status { | |||
269 | rus_mask = 0x3C, | 275 | rus_mask = 0x3C, |
270 | }; | 276 | }; |
271 | 277 | ||
278 | enum ru_state { | ||
279 | RU_SUSPENDED = 0, | ||
280 | RU_RUNNING = 1, | ||
281 | RU_UNINITIALIZED = -1, | ||
282 | }; | ||
283 | |||
272 | enum scb_stat_ack { | 284 | enum scb_stat_ack { |
273 | stat_ack_not_ours = 0x00, | 285 | stat_ack_not_ours = 0x00, |
274 | stat_ack_sw_gen = 0x04, | 286 | stat_ack_sw_gen = 0x04, |
@@ -510,7 +522,7 @@ struct nic { | |||
510 | struct rx *rx_to_use; | 522 | struct rx *rx_to_use; |
511 | struct rx *rx_to_clean; | 523 | struct rx *rx_to_clean; |
512 | struct rfd blank_rfd; | 524 | struct rfd blank_rfd; |
513 | int ru_running; | 525 | enum ru_state ru_running; |
514 | 526 | ||
515 | spinlock_t cb_lock ____cacheline_aligned; | 527 | spinlock_t cb_lock ____cacheline_aligned; |
516 | spinlock_t cmd_lock; | 528 | spinlock_t cmd_lock; |
@@ -539,6 +551,7 @@ struct nic { | |||
539 | struct timer_list watchdog; | 551 | struct timer_list watchdog; |
540 | struct timer_list blink_timer; | 552 | struct timer_list blink_timer; |
541 | struct mii_if_info mii; | 553 | struct mii_if_info mii; |
554 | struct work_struct tx_timeout_task; | ||
542 | enum loopback loopback; | 555 | enum loopback loopback; |
543 | 556 | ||
544 | struct mem *mem; | 557 | struct mem *mem; |
@@ -770,7 +783,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) | |||
770 | return 0; | 783 | return 0; |
771 | } | 784 | } |
772 | 785 | ||
773 | #define E100_WAIT_SCB_TIMEOUT 40 | 786 | #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ |
774 | static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) | 787 | static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) |
775 | { | 788 | { |
776 | unsigned long flags; | 789 | unsigned long flags; |
@@ -840,6 +853,10 @@ static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | |||
840 | * because the controller is too busy, so | 853 | * because the controller is too busy, so |
841 | * let's just queue the command and try again | 854 | * let's just queue the command and try again |
842 | * when another command is scheduled. */ | 855 | * when another command is scheduled. */ |
856 | if(err == -ENOSPC) { | ||
857 | //request a reset | ||
858 | schedule_work(&nic->tx_timeout_task); | ||
859 | } | ||
843 | break; | 860 | break; |
844 | } else { | 861 | } else { |
845 | nic->cuc_cmd = cuc_resume; | 862 | nic->cuc_cmd = cuc_resume; |
@@ -884,7 +901,7 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data) | |||
884 | 901 | ||
885 | static void e100_get_defaults(struct nic *nic) | 902 | static void e100_get_defaults(struct nic *nic) |
886 | { | 903 | { |
887 | struct param_range rfds = { .min = 64, .max = 256, .count = 64 }; | 904 | struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; |
888 | struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; | 905 | struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; |
889 | 906 | ||
890 | pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); | 907 | pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); |
@@ -899,8 +916,9 @@ static void e100_get_defaults(struct nic *nic) | |||
899 | /* Quadwords to DMA into FIFO before starting frame transmit */ | 916 | /* Quadwords to DMA into FIFO before starting frame transmit */ |
900 | nic->tx_threshold = 0xE0; | 917 | nic->tx_threshold = 0xE0; |
901 | 918 | ||
902 | nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf | | 919 | /* no interrupt for every tx completion, delay = 256us if not 557*/ |
903 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0)); | 920 | nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | |
921 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); | ||
904 | 922 | ||
905 | /* Template for a freshly allocated RFD */ | 923 | /* Template for a freshly allocated RFD */ |
906 | nic->blank_rfd.command = cpu_to_le16(cb_el); | 924 | nic->blank_rfd.command = cpu_to_le16(cb_el); |
@@ -964,7 +982,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
964 | if(nic->flags & multicast_all) | 982 | if(nic->flags & multicast_all) |
965 | config->multicast_all = 0x1; /* 1=accept, 0=no */ | 983 | config->multicast_all = 0x1; /* 1=accept, 0=no */ |
966 | 984 | ||
967 | if(!(nic->flags & wol_magic)) | 985 | /* disable WoL when up */ |
986 | if(netif_running(nic->netdev) || !(nic->flags & wol_magic)) | ||
968 | config->magic_packet_disable = 0x1; /* 1=off, 0=on */ | 987 | config->magic_packet_disable = 0x1; /* 1=off, 0=on */ |
969 | 988 | ||
970 | if(nic->mac >= mac_82558_D101_A4) { | 989 | if(nic->mac >= mac_82558_D101_A4) { |
@@ -1203,7 +1222,9 @@ static void e100_update_stats(struct nic *nic) | |||
1203 | } | 1222 | } |
1204 | } | 1223 | } |
1205 | 1224 | ||
1206 | e100_exec_cmd(nic, cuc_dump_reset, 0); | 1225 | |
1226 | if(e100_exec_cmd(nic, cuc_dump_reset, 0)) | ||
1227 | DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); | ||
1207 | } | 1228 | } |
1208 | 1229 | ||
1209 | static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) | 1230 | static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) |
@@ -1279,12 +1300,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb, | |||
1279 | struct sk_buff *skb) | 1300 | struct sk_buff *skb) |
1280 | { | 1301 | { |
1281 | cb->command = nic->tx_command; | 1302 | cb->command = nic->tx_command; |
1303 | /* interrupt every 16 packets regardless of delay */ | ||
1304 | if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; | ||
1282 | cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); | 1305 | cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); |
1283 | cb->u.tcb.tcb_byte_count = 0; | 1306 | cb->u.tcb.tcb_byte_count = 0; |
1284 | cb->u.tcb.threshold = nic->tx_threshold; | 1307 | cb->u.tcb.threshold = nic->tx_threshold; |
1285 | cb->u.tcb.tbd_count = 1; | 1308 | cb->u.tcb.tbd_count = 1; |
1286 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, | 1309 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, |
1287 | skb->data, skb->len, PCI_DMA_TODEVICE)); | 1310 | skb->data, skb->len, PCI_DMA_TODEVICE)); |
1311 | // check for mapping failure? | ||
1288 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); | 1312 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); |
1289 | } | 1313 | } |
1290 | 1314 | ||
@@ -1297,7 +1321,8 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1297 | /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. | 1321 | /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. |
1298 | Issue a NOP command followed by a 1us delay before | 1322 | Issue a NOP command followed by a 1us delay before |
1299 | issuing the Tx command. */ | 1323 | issuing the Tx command. */ |
1300 | e100_exec_cmd(nic, cuc_nop, 0); | 1324 | if(e100_exec_cmd(nic, cuc_nop, 0)) |
1325 | DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n"); | ||
1301 | udelay(1); | 1326 | udelay(1); |
1302 | } | 1327 | } |
1303 | 1328 | ||
@@ -1415,12 +1440,18 @@ static int e100_alloc_cbs(struct nic *nic) | |||
1415 | return 0; | 1440 | return 0; |
1416 | } | 1441 | } |
1417 | 1442 | ||
1418 | static inline void e100_start_receiver(struct nic *nic) | 1443 | static inline void e100_start_receiver(struct nic *nic, struct rx *rx) |
1419 | { | 1444 | { |
1445 | if(!nic->rxs) return; | ||
1446 | if(RU_SUSPENDED != nic->ru_running) return; | ||
1447 | |||
1448 | /* handle init time starts */ | ||
1449 | if(!rx) rx = nic->rxs; | ||
1450 | |||
1420 | /* (Re)start RU if suspended or idle and RFA is non-NULL */ | 1451 | /* (Re)start RU if suspended or idle and RFA is non-NULL */ |
1421 | if(!nic->ru_running && nic->rx_to_clean->skb) { | 1452 | if(rx->skb) { |
1422 | e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr); | 1453 | e100_exec_cmd(nic, ruc_start, rx->dma_addr); |
1423 | nic->ru_running = 1; | 1454 | nic->ru_running = RU_RUNNING; |
1424 | } | 1455 | } |
1425 | } | 1456 | } |
1426 | 1457 | ||
@@ -1437,6 +1468,13 @@ static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1437 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, | 1468 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, |
1438 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); | 1469 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); |
1439 | 1470 | ||
1471 | if(pci_dma_mapping_error(rx->dma_addr)) { | ||
1472 | dev_kfree_skb_any(rx->skb); | ||
1473 | rx->skb = 0; | ||
1474 | rx->dma_addr = 0; | ||
1475 | return -ENOMEM; | ||
1476 | } | ||
1477 | |||
1440 | /* Link the RFD to end of RFA by linking previous RFD to | 1478 | /* Link the RFD to end of RFA by linking previous RFD to |
1441 | * this one, and clearing EL bit of previous. */ | 1479 | * this one, and clearing EL bit of previous. */ |
1442 | if(rx->prev->skb) { | 1480 | if(rx->prev->skb) { |
@@ -1471,7 +1509,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx, | |||
1471 | 1509 | ||
1472 | /* If data isn't ready, nothing to indicate */ | 1510 | /* If data isn't ready, nothing to indicate */ |
1473 | if(unlikely(!(rfd_status & cb_complete))) | 1511 | if(unlikely(!(rfd_status & cb_complete))) |
1474 | return -EAGAIN; | 1512 | return -ENODATA; |
1475 | 1513 | ||
1476 | /* Get actual data size */ | 1514 | /* Get actual data size */ |
1477 | actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; | 1515 | actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; |
@@ -1482,6 +1520,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx, | |||
1482 | pci_unmap_single(nic->pdev, rx->dma_addr, | 1520 | pci_unmap_single(nic->pdev, rx->dma_addr, |
1483 | RFD_BUF_LEN, PCI_DMA_FROMDEVICE); | 1521 | RFD_BUF_LEN, PCI_DMA_FROMDEVICE); |
1484 | 1522 | ||
1523 | /* this allows for a fast restart without re-enabling interrupts */ | ||
1524 | if(le16_to_cpu(rfd->command) & cb_el) | ||
1525 | nic->ru_running = RU_SUSPENDED; | ||
1526 | |||
1485 | /* Pull off the RFD and put the actual data (minus eth hdr) */ | 1527 | /* Pull off the RFD and put the actual data (minus eth hdr) */ |
1486 | skb_reserve(skb, sizeof(struct rfd)); | 1528 | skb_reserve(skb, sizeof(struct rfd)); |
1487 | skb_put(skb, actual_size); | 1529 | skb_put(skb, actual_size); |
@@ -1514,20 +1556,45 @@ static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done, | |||
1514 | unsigned int work_to_do) | 1556 | unsigned int work_to_do) |
1515 | { | 1557 | { |
1516 | struct rx *rx; | 1558 | struct rx *rx; |
1559 | int restart_required = 0; | ||
1560 | struct rx *rx_to_start = NULL; | ||
1561 | |||
1562 | /* are we already rnr? then pay attention!!! this ensures that | ||
1563 | * the state machine progression never allows a start with a | ||
1564 | * partially cleaned list, avoiding a race between hardware | ||
1565 | * and rx_to_clean when in NAPI mode */ | ||
1566 | if(RU_SUSPENDED == nic->ru_running) | ||
1567 | restart_required = 1; | ||
1517 | 1568 | ||
1518 | /* Indicate newly arrived packets */ | 1569 | /* Indicate newly arrived packets */ |
1519 | for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { | 1570 | for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { |
1520 | if(e100_rx_indicate(nic, rx, work_done, work_to_do)) | 1571 | int err = e100_rx_indicate(nic, rx, work_done, work_to_do); |
1572 | if(-EAGAIN == err) { | ||
1573 | /* hit quota so have more work to do, restart once | ||
1574 | * cleanup is complete */ | ||
1575 | restart_required = 0; | ||
1576 | break; | ||
1577 | } else if(-ENODATA == err) | ||
1521 | break; /* No more to clean */ | 1578 | break; /* No more to clean */ |
1522 | } | 1579 | } |
1523 | 1580 | ||
1581 | /* save our starting point as the place we'll restart the receiver */ | ||
1582 | if(restart_required) | ||
1583 | rx_to_start = nic->rx_to_clean; | ||
1584 | |||
1524 | /* Alloc new skbs to refill list */ | 1585 | /* Alloc new skbs to refill list */ |
1525 | for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { | 1586 | for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { |
1526 | if(unlikely(e100_rx_alloc_skb(nic, rx))) | 1587 | if(unlikely(e100_rx_alloc_skb(nic, rx))) |
1527 | break; /* Better luck next time (see watchdog) */ | 1588 | break; /* Better luck next time (see watchdog) */ |
1528 | } | 1589 | } |
1529 | 1590 | ||
1530 | e100_start_receiver(nic); | 1591 | if(restart_required) { |
1592 | // ack the rnr? | ||
1593 | writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); | ||
1594 | e100_start_receiver(nic, rx_to_start); | ||
1595 | if(work_done) | ||
1596 | (*work_done)++; | ||
1597 | } | ||
1531 | } | 1598 | } |
1532 | 1599 | ||
1533 | static void e100_rx_clean_list(struct nic *nic) | 1600 | static void e100_rx_clean_list(struct nic *nic) |
@@ -1535,6 +1602,8 @@ static void e100_rx_clean_list(struct nic *nic) | |||
1535 | struct rx *rx; | 1602 | struct rx *rx; |
1536 | unsigned int i, count = nic->params.rfds.count; | 1603 | unsigned int i, count = nic->params.rfds.count; |
1537 | 1604 | ||
1605 | nic->ru_running = RU_UNINITIALIZED; | ||
1606 | |||
1538 | if(nic->rxs) { | 1607 | if(nic->rxs) { |
1539 | for(rx = nic->rxs, i = 0; i < count; rx++, i++) { | 1608 | for(rx = nic->rxs, i = 0; i < count; rx++, i++) { |
1540 | if(rx->skb) { | 1609 | if(rx->skb) { |
@@ -1548,7 +1617,6 @@ static void e100_rx_clean_list(struct nic *nic) | |||
1548 | } | 1617 | } |
1549 | 1618 | ||
1550 | nic->rx_to_use = nic->rx_to_clean = NULL; | 1619 | nic->rx_to_use = nic->rx_to_clean = NULL; |
1551 | nic->ru_running = 0; | ||
1552 | } | 1620 | } |
1553 | 1621 | ||
1554 | static int e100_rx_alloc_list(struct nic *nic) | 1622 | static int e100_rx_alloc_list(struct nic *nic) |
@@ -1557,6 +1625,7 @@ static int e100_rx_alloc_list(struct nic *nic) | |||
1557 | unsigned int i, count = nic->params.rfds.count; | 1625 | unsigned int i, count = nic->params.rfds.count; |
1558 | 1626 | ||
1559 | nic->rx_to_use = nic->rx_to_clean = NULL; | 1627 | nic->rx_to_use = nic->rx_to_clean = NULL; |
1628 | nic->ru_running = RU_UNINITIALIZED; | ||
1560 | 1629 | ||
1561 | if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) | 1630 | if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) |
1562 | return -ENOMEM; | 1631 | return -ENOMEM; |
@@ -1572,6 +1641,7 @@ static int e100_rx_alloc_list(struct nic *nic) | |||
1572 | } | 1641 | } |
1573 | 1642 | ||
1574 | nic->rx_to_use = nic->rx_to_clean = nic->rxs; | 1643 | nic->rx_to_use = nic->rx_to_clean = nic->rxs; |
1644 | nic->ru_running = RU_SUSPENDED; | ||
1575 | 1645 | ||
1576 | return 0; | 1646 | return 0; |
1577 | } | 1647 | } |
@@ -1593,7 +1663,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
1593 | 1663 | ||
1594 | /* We hit Receive No Resource (RNR); restart RU after cleaning */ | 1664 | /* We hit Receive No Resource (RNR); restart RU after cleaning */ |
1595 | if(stat_ack & stat_ack_rnr) | 1665 | if(stat_ack & stat_ack_rnr) |
1596 | nic->ru_running = 0; | 1666 | nic->ru_running = RU_SUSPENDED; |
1597 | 1667 | ||
1598 | e100_disable_irq(nic); | 1668 | e100_disable_irq(nic); |
1599 | netif_rx_schedule(netdev); | 1669 | netif_rx_schedule(netdev); |
@@ -1663,6 +1733,7 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu) | |||
1663 | return 0; | 1733 | return 0; |
1664 | } | 1734 | } |
1665 | 1735 | ||
1736 | #ifdef CONFIG_PM | ||
1666 | static int e100_asf(struct nic *nic) | 1737 | static int e100_asf(struct nic *nic) |
1667 | { | 1738 | { |
1668 | /* ASF can be enabled from eeprom */ | 1739 | /* ASF can be enabled from eeprom */ |
@@ -1671,6 +1742,7 @@ static int e100_asf(struct nic *nic) | |||
1671 | !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && | 1742 | !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && |
1672 | ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); | 1743 | ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); |
1673 | } | 1744 | } |
1745 | #endif | ||
1674 | 1746 | ||
1675 | static int e100_up(struct nic *nic) | 1747 | static int e100_up(struct nic *nic) |
1676 | { | 1748 | { |
@@ -1683,13 +1755,16 @@ static int e100_up(struct nic *nic) | |||
1683 | if((err = e100_hw_init(nic))) | 1755 | if((err = e100_hw_init(nic))) |
1684 | goto err_clean_cbs; | 1756 | goto err_clean_cbs; |
1685 | e100_set_multicast_list(nic->netdev); | 1757 | e100_set_multicast_list(nic->netdev); |
1686 | e100_start_receiver(nic); | 1758 | e100_start_receiver(nic, 0); |
1687 | mod_timer(&nic->watchdog, jiffies); | 1759 | mod_timer(&nic->watchdog, jiffies); |
1688 | if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ, | 1760 | if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ, |
1689 | nic->netdev->name, nic->netdev))) | 1761 | nic->netdev->name, nic->netdev))) |
1690 | goto err_no_irq; | 1762 | goto err_no_irq; |
1691 | e100_enable_irq(nic); | ||
1692 | netif_wake_queue(nic->netdev); | 1763 | netif_wake_queue(nic->netdev); |
1764 | netif_poll_enable(nic->netdev); | ||
1765 | /* enable ints _after_ enabling poll, preventing a race between | ||
1766 | * disable ints+schedule */ | ||
1767 | e100_enable_irq(nic); | ||
1693 | return 0; | 1768 | return 0; |
1694 | 1769 | ||
1695 | err_no_irq: | 1770 | err_no_irq: |
@@ -1703,11 +1778,13 @@ err_rx_clean_list: | |||
1703 | 1778 | ||
1704 | static void e100_down(struct nic *nic) | 1779 | static void e100_down(struct nic *nic) |
1705 | { | 1780 | { |
1781 | /* wait here for poll to complete */ | ||
1782 | netif_poll_disable(nic->netdev); | ||
1783 | netif_stop_queue(nic->netdev); | ||
1706 | e100_hw_reset(nic); | 1784 | e100_hw_reset(nic); |
1707 | free_irq(nic->pdev->irq, nic->netdev); | 1785 | free_irq(nic->pdev->irq, nic->netdev); |
1708 | del_timer_sync(&nic->watchdog); | 1786 | del_timer_sync(&nic->watchdog); |
1709 | netif_carrier_off(nic->netdev); | 1787 | netif_carrier_off(nic->netdev); |
1710 | netif_stop_queue(nic->netdev); | ||
1711 | e100_clean_cbs(nic); | 1788 | e100_clean_cbs(nic); |
1712 | e100_rx_clean_list(nic); | 1789 | e100_rx_clean_list(nic); |
1713 | } | 1790 | } |
@@ -1716,6 +1793,15 @@ static void e100_tx_timeout(struct net_device *netdev) | |||
1716 | { | 1793 | { |
1717 | struct nic *nic = netdev_priv(netdev); | 1794 | struct nic *nic = netdev_priv(netdev); |
1718 | 1795 | ||
1796 | /* Reset outside of interrupt context, to avoid request_irq | ||
1797 | * in interrupt context */ | ||
1798 | schedule_work(&nic->tx_timeout_task); | ||
1799 | } | ||
1800 | |||
1801 | static void e100_tx_timeout_task(struct net_device *netdev) | ||
1802 | { | ||
1803 | struct nic *nic = netdev_priv(netdev); | ||
1804 | |||
1719 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", | 1805 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", |
1720 | readb(&nic->csr->scb.status)); | 1806 | readb(&nic->csr->scb.status)); |
1721 | e100_down(netdev_priv(netdev)); | 1807 | e100_down(netdev_priv(netdev)); |
@@ -1749,7 +1835,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) | |||
1749 | mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, | 1835 | mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, |
1750 | BMCR_LOOPBACK); | 1836 | BMCR_LOOPBACK); |
1751 | 1837 | ||
1752 | e100_start_receiver(nic); | 1838 | e100_start_receiver(nic, 0); |
1753 | 1839 | ||
1754 | if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { | 1840 | if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { |
1755 | err = -ENOMEM; | 1841 | err = -ENOMEM; |
@@ -1869,7 +1955,6 @@ static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1869 | else | 1955 | else |
1870 | nic->flags &= ~wol_magic; | 1956 | nic->flags &= ~wol_magic; |
1871 | 1957 | ||
1872 | pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); | ||
1873 | e100_exec_cb(nic, NULL, e100_configure); | 1958 | e100_exec_cb(nic, NULL, e100_configure); |
1874 | 1959 | ||
1875 | return 0; | 1960 | return 0; |
@@ -2223,6 +2308,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2223 | 2308 | ||
2224 | e100_get_defaults(nic); | 2309 | e100_get_defaults(nic); |
2225 | 2310 | ||
2311 | /* locks must be initialized before calling hw_reset */ | ||
2226 | spin_lock_init(&nic->cb_lock); | 2312 | spin_lock_init(&nic->cb_lock); |
2227 | spin_lock_init(&nic->cmd_lock); | 2313 | spin_lock_init(&nic->cmd_lock); |
2228 | 2314 | ||
@@ -2240,6 +2326,9 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2240 | nic->blink_timer.function = e100_blink_led; | 2326 | nic->blink_timer.function = e100_blink_led; |
2241 | nic->blink_timer.data = (unsigned long)nic; | 2327 | nic->blink_timer.data = (unsigned long)nic; |
2242 | 2328 | ||
2329 | INIT_WORK(&nic->tx_timeout_task, | ||
2330 | (void (*)(void *))e100_tx_timeout_task, netdev); | ||
2331 | |||
2243 | if((err = e100_alloc(nic))) { | 2332 | if((err = e100_alloc(nic))) { |
2244 | DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); | 2333 | DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); |
2245 | goto err_out_iounmap; | 2334 | goto err_out_iounmap; |
@@ -2263,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2263 | (nic->eeprom[eeprom_id] & eeprom_id_wol)) | 2352 | (nic->eeprom[eeprom_id] & eeprom_id_wol)) |
2264 | nic->flags |= wol_magic; | 2353 | nic->flags |= wol_magic; |
2265 | 2354 | ||
2266 | pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); | 2355 | /* ack any pending wake events, disable PME */ |
2356 | pci_enable_wake(pdev, 0, 0); | ||
2267 | 2357 | ||
2268 | strcpy(netdev->name, "eth%d"); | 2358 | strcpy(netdev->name, "eth%d"); |
2269 | if((err = register_netdev(netdev))) { | 2359 | if((err = register_netdev(netdev))) { |
@@ -2335,7 +2425,10 @@ static int e100_resume(struct pci_dev *pdev) | |||
2335 | 2425 | ||
2336 | pci_set_power_state(pdev, PCI_D0); | 2426 | pci_set_power_state(pdev, PCI_D0); |
2337 | pci_restore_state(pdev); | 2427 | pci_restore_state(pdev); |
2338 | e100_hw_init(nic); | 2428 | /* ack any pending wake events, disable PME */ |
2429 | pci_enable_wake(pdev, 0, 0); | ||
2430 | if(e100_hw_init(nic)) | ||
2431 | DPRINTK(HW, ERR, "e100_hw_init failed\n"); | ||
2339 | 2432 | ||
2340 | netif_device_attach(netdev); | 2433 | netif_device_attach(netdev); |
2341 | if(netif_running(netdev)) | 2434 | if(netif_running(netdev)) |
@@ -2345,6 +2438,21 @@ static int e100_resume(struct pci_dev *pdev) | |||
2345 | } | 2438 | } |
2346 | #endif | 2439 | #endif |
2347 | 2440 | ||
2441 | |||
2442 | static void e100_shutdown(struct device *dev) | ||
2443 | { | ||
2444 | struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); | ||
2445 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2446 | struct nic *nic = netdev_priv(netdev); | ||
2447 | |||
2448 | #ifdef CONFIG_PM | ||
2449 | pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); | ||
2450 | #else | ||
2451 | pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); | ||
2452 | #endif | ||
2453 | } | ||
2454 | |||
2455 | |||
2348 | static struct pci_driver e100_driver = { | 2456 | static struct pci_driver e100_driver = { |
2349 | .name = DRV_NAME, | 2457 | .name = DRV_NAME, |
2350 | .id_table = e100_id_table, | 2458 | .id_table = e100_id_table, |
@@ -2354,6 +2462,11 @@ static struct pci_driver e100_driver = { | |||
2354 | .suspend = e100_suspend, | 2462 | .suspend = e100_suspend, |
2355 | .resume = e100_resume, | 2463 | .resume = e100_resume, |
2356 | #endif | 2464 | #endif |
2465 | |||
2466 | .driver = { | ||
2467 | .shutdown = e100_shutdown, | ||
2468 | } | ||
2469 | |||
2357 | }; | 2470 | }; |
2358 | 2471 | ||
2359 | static int __init e100_init_module(void) | 2472 | static int __init e100_init_module(void) |