aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e100.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e100.c')
-rw-r--r--drivers/net/e100.c159
1 files changed, 58 insertions, 101 deletions
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 4d0e0aea72bf..61696637a21e 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -159,7 +159,7 @@
159 159
160#define DRV_NAME "e100" 160#define DRV_NAME "e100"
161#define DRV_EXT "-NAPI" 161#define DRV_EXT "-NAPI"
162#define DRV_VERSION "3.5.17-k2"DRV_EXT 162#define DRV_VERSION "3.5.17-k4"DRV_EXT
163#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 163#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
164#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" 164#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
165#define PFX DRV_NAME ": " 165#define PFX DRV_NAME ": "
@@ -174,10 +174,13 @@ MODULE_VERSION(DRV_VERSION);
174 174
175static int debug = 3; 175static int debug = 3;
176static int eeprom_bad_csum_allow = 0; 176static int eeprom_bad_csum_allow = 0;
177static int use_io = 0;
177module_param(debug, int, 0); 178module_param(debug, int, 0);
178module_param(eeprom_bad_csum_allow, int, 0); 179module_param(eeprom_bad_csum_allow, int, 0);
180module_param(use_io, int, 0);
179MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 181MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
180MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); 182MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
183MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
181#define DPRINTK(nlevel, klevel, fmt, args...) \ 184#define DPRINTK(nlevel, klevel, fmt, args...) \
182 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ 185 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
183 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ 186 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
@@ -282,12 +285,6 @@ enum scb_status {
282 rus_mask = 0x3C, 285 rus_mask = 0x3C,
283}; 286};
284 287
285enum ru_state {
286 RU_SUSPENDED = 0,
287 RU_RUNNING = 1,
288 RU_UNINITIALIZED = -1,
289};
290
291enum scb_stat_ack { 288enum scb_stat_ack {
292 stat_ack_not_ours = 0x00, 289 stat_ack_not_ours = 0x00,
293 stat_ack_sw_gen = 0x04, 290 stat_ack_sw_gen = 0x04,
@@ -529,7 +526,6 @@ struct nic {
529 struct rx *rx_to_use; 526 struct rx *rx_to_use;
530 struct rx *rx_to_clean; 527 struct rx *rx_to_clean;
531 struct rfd blank_rfd; 528 struct rfd blank_rfd;
532 enum ru_state ru_running;
533 529
534 spinlock_t cb_lock ____cacheline_aligned; 530 spinlock_t cb_lock ____cacheline_aligned;
535 spinlock_t cmd_lock; 531 spinlock_t cmd_lock;
@@ -591,7 +587,7 @@ static inline void e100_write_flush(struct nic *nic)
591{ 587{
592 /* Flush previous PCI writes through intermediate bridges 588 /* Flush previous PCI writes through intermediate bridges
593 * by doing a benign read */ 589 * by doing a benign read */
594 (void)readb(&nic->csr->scb.status); 590 (void)ioread8(&nic->csr->scb.status);
595} 591}
596 592
597static void e100_enable_irq(struct nic *nic) 593static void e100_enable_irq(struct nic *nic)
@@ -599,7 +595,7 @@ static void e100_enable_irq(struct nic *nic)
599 unsigned long flags; 595 unsigned long flags;
600 596
601 spin_lock_irqsave(&nic->cmd_lock, flags); 597 spin_lock_irqsave(&nic->cmd_lock, flags);
602 writeb(irq_mask_none, &nic->csr->scb.cmd_hi); 598 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
603 e100_write_flush(nic); 599 e100_write_flush(nic);
604 spin_unlock_irqrestore(&nic->cmd_lock, flags); 600 spin_unlock_irqrestore(&nic->cmd_lock, flags);
605} 601}
@@ -609,7 +605,7 @@ static void e100_disable_irq(struct nic *nic)
609 unsigned long flags; 605 unsigned long flags;
610 606
611 spin_lock_irqsave(&nic->cmd_lock, flags); 607 spin_lock_irqsave(&nic->cmd_lock, flags);
612 writeb(irq_mask_all, &nic->csr->scb.cmd_hi); 608 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
613 e100_write_flush(nic); 609 e100_write_flush(nic);
614 spin_unlock_irqrestore(&nic->cmd_lock, flags); 610 spin_unlock_irqrestore(&nic->cmd_lock, flags);
615} 611}
@@ -618,11 +614,11 @@ static void e100_hw_reset(struct nic *nic)
618{ 614{
619 /* Put CU and RU into idle with a selective reset to get 615 /* Put CU and RU into idle with a selective reset to get
620 * device off of PCI bus */ 616 * device off of PCI bus */
621 writel(selective_reset, &nic->csr->port); 617 iowrite32(selective_reset, &nic->csr->port);
622 e100_write_flush(nic); udelay(20); 618 e100_write_flush(nic); udelay(20);
623 619
624 /* Now fully reset device */ 620 /* Now fully reset device */
625 writel(software_reset, &nic->csr->port); 621 iowrite32(software_reset, &nic->csr->port);
626 e100_write_flush(nic); udelay(20); 622 e100_write_flush(nic); udelay(20);
627 623
628 /* Mask off our interrupt line - it's unmasked after reset */ 624 /* Mask off our interrupt line - it's unmasked after reset */
@@ -639,7 +635,7 @@ static int e100_self_test(struct nic *nic)
639 nic->mem->selftest.signature = 0; 635 nic->mem->selftest.signature = 0;
640 nic->mem->selftest.result = 0xFFFFFFFF; 636 nic->mem->selftest.result = 0xFFFFFFFF;
641 637
642 writel(selftest | dma_addr, &nic->csr->port); 638 iowrite32(selftest | dma_addr, &nic->csr->port);
643 e100_write_flush(nic); 639 e100_write_flush(nic);
644 /* Wait 10 msec for self-test to complete */ 640 /* Wait 10 msec for self-test to complete */
645 msleep(10); 641 msleep(10);
@@ -677,23 +673,23 @@ static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
677 for(j = 0; j < 3; j++) { 673 for(j = 0; j < 3; j++) {
678 674
679 /* Chip select */ 675 /* Chip select */
680 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo); 676 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
681 e100_write_flush(nic); udelay(4); 677 e100_write_flush(nic); udelay(4);
682 678
683 for(i = 31; i >= 0; i--) { 679 for(i = 31; i >= 0; i--) {
684 ctrl = (cmd_addr_data[j] & (1 << i)) ? 680 ctrl = (cmd_addr_data[j] & (1 << i)) ?
685 eecs | eedi : eecs; 681 eecs | eedi : eecs;
686 writeb(ctrl, &nic->csr->eeprom_ctrl_lo); 682 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
687 e100_write_flush(nic); udelay(4); 683 e100_write_flush(nic); udelay(4);
688 684
689 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); 685 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
690 e100_write_flush(nic); udelay(4); 686 e100_write_flush(nic); udelay(4);
691 } 687 }
692 /* Wait 10 msec for cmd to complete */ 688 /* Wait 10 msec for cmd to complete */
693 msleep(10); 689 msleep(10);
694 690
695 /* Chip deselect */ 691 /* Chip deselect */
696 writeb(0, &nic->csr->eeprom_ctrl_lo); 692 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
697 e100_write_flush(nic); udelay(4); 693 e100_write_flush(nic); udelay(4);
698 } 694 }
699}; 695};
@@ -709,21 +705,21 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
709 cmd_addr_data = ((op_read << *addr_len) | addr) << 16; 705 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
710 706
711 /* Chip select */ 707 /* Chip select */
712 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo); 708 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
713 e100_write_flush(nic); udelay(4); 709 e100_write_flush(nic); udelay(4);
714 710
715 /* Bit-bang to read word from eeprom */ 711 /* Bit-bang to read word from eeprom */
716 for(i = 31; i >= 0; i--) { 712 for(i = 31; i >= 0; i--) {
717 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; 713 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
718 writeb(ctrl, &nic->csr->eeprom_ctrl_lo); 714 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
719 e100_write_flush(nic); udelay(4); 715 e100_write_flush(nic); udelay(4);
720 716
721 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); 717 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
722 e100_write_flush(nic); udelay(4); 718 e100_write_flush(nic); udelay(4);
723 719
724 /* Eeprom drives a dummy zero to EEDO after receiving 720 /* Eeprom drives a dummy zero to EEDO after receiving
725 * complete address. Use this to adjust addr_len. */ 721 * complete address. Use this to adjust addr_len. */
726 ctrl = readb(&nic->csr->eeprom_ctrl_lo); 722 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
727 if(!(ctrl & eedo) && i > 16) { 723 if(!(ctrl & eedo) && i > 16) {
728 *addr_len -= (i - 16); 724 *addr_len -= (i - 16);
729 i = 17; 725 i = 17;
@@ -733,7 +729,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
733 } 729 }
734 730
735 /* Chip deselect */ 731 /* Chip deselect */
736 writeb(0, &nic->csr->eeprom_ctrl_lo); 732 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
737 e100_write_flush(nic); udelay(4); 733 e100_write_flush(nic); udelay(4);
738 734
739 return le16_to_cpu(data); 735 return le16_to_cpu(data);
@@ -804,7 +800,7 @@ static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
804 800
805 /* Previous command is accepted when SCB clears */ 801 /* Previous command is accepted when SCB clears */
806 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { 802 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
807 if(likely(!readb(&nic->csr->scb.cmd_lo))) 803 if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
808 break; 804 break;
809 cpu_relax(); 805 cpu_relax();
810 if(unlikely(i > E100_WAIT_SCB_FAST)) 806 if(unlikely(i > E100_WAIT_SCB_FAST))
@@ -816,8 +812,8 @@ static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
816 } 812 }
817 813
818 if(unlikely(cmd != cuc_resume)) 814 if(unlikely(cmd != cuc_resume))
819 writel(dma_addr, &nic->csr->scb.gen_ptr); 815 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
820 writeb(cmd, &nic->csr->scb.cmd_lo); 816 iowrite8(cmd, &nic->csr->scb.cmd_lo);
821 817
822err_unlock: 818err_unlock:
823 spin_unlock_irqrestore(&nic->cmd_lock, flags); 819 spin_unlock_irqrestore(&nic->cmd_lock, flags);
@@ -895,7 +891,7 @@ static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
895 */ 891 */
896 spin_lock_irqsave(&nic->mdio_lock, flags); 892 spin_lock_irqsave(&nic->mdio_lock, flags);
897 for (i = 100; i; --i) { 893 for (i = 100; i; --i) {
898 if (readl(&nic->csr->mdi_ctrl) & mdi_ready) 894 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
899 break; 895 break;
900 udelay(20); 896 udelay(20);
901 } 897 }
@@ -905,11 +901,11 @@ static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
905 spin_unlock_irqrestore(&nic->mdio_lock, flags); 901 spin_unlock_irqrestore(&nic->mdio_lock, flags);
906 return 0; /* No way to indicate timeout error */ 902 return 0; /* No way to indicate timeout error */
907 } 903 }
908 writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); 904 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
909 905
910 for (i = 0; i < 100; i++) { 906 for (i = 0; i < 100; i++) {
911 udelay(20); 907 udelay(20);
912 if ((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready) 908 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
913 break; 909 break;
914 } 910 }
915 spin_unlock_irqrestore(&nic->mdio_lock, flags); 911 spin_unlock_irqrestore(&nic->mdio_lock, flags);
@@ -951,7 +947,7 @@ static void e100_get_defaults(struct nic *nic)
951 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); 947 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
952 948
953 /* Template for a freshly allocated RFD */ 949 /* Template for a freshly allocated RFD */
954 nic->blank_rfd.command = cpu_to_le16(cb_el); 950 nic->blank_rfd.command = cpu_to_le16(cb_el & cb_s);
955 nic->blank_rfd.rbd = 0xFFFFFFFF; 951 nic->blank_rfd.rbd = 0xFFFFFFFF;
956 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN); 952 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
957 953
@@ -1318,7 +1314,7 @@ static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1318 } 1314 }
1319 1315
1320 /* ack any interupts, something could have been set */ 1316 /* ack any interupts, something could have been set */
1321 writeb(~0, &nic->csr->scb.stat_ack); 1317 iowrite8(~0, &nic->csr->scb.stat_ack);
1322 1318
1323 /* if the command failed, or is not OK, notify and return */ 1319 /* if the command failed, or is not OK, notify and return */
1324 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { 1320 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
@@ -1580,7 +1576,7 @@ static void e100_watchdog(unsigned long data)
1580 * accidentally, due to hardware that shares a register between the 1576 * accidentally, due to hardware that shares a register between the
1581 * interrupt mask bit and the SW Interrupt generation bit */ 1577 * interrupt mask bit and the SW Interrupt generation bit */
1582 spin_lock_irq(&nic->cmd_lock); 1578 spin_lock_irq(&nic->cmd_lock);
1583 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); 1579 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1584 e100_write_flush(nic); 1580 e100_write_flush(nic);
1585 spin_unlock_irq(&nic->cmd_lock); 1581 spin_unlock_irq(&nic->cmd_lock);
1586 1582
@@ -1746,19 +1742,11 @@ static int e100_alloc_cbs(struct nic *nic)
1746 return 0; 1742 return 0;
1747} 1743}
1748 1744
1749static inline void e100_start_receiver(struct nic *nic, struct rx *rx) 1745static inline void e100_start_receiver(struct nic *nic)
1750{ 1746{
1751 if(!nic->rxs) return; 1747 /* Start if RFA is non-NULL */
1752 if(RU_SUSPENDED != nic->ru_running) return; 1748 if(nic->rx_to_clean->skb)
1753 1749 e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr);
1754 /* handle init time starts */
1755 if(!rx) rx = nic->rxs;
1756
1757 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1758 if(rx->skb) {
1759 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1760 nic->ru_running = RU_RUNNING;
1761 }
1762} 1750}
1763 1751
1764#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) 1752#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
@@ -1787,7 +1775,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1787 put_unaligned(cpu_to_le32(rx->dma_addr), 1775 put_unaligned(cpu_to_le32(rx->dma_addr),
1788 (u32 *)&prev_rfd->link); 1776 (u32 *)&prev_rfd->link);
1789 wmb(); 1777 wmb();
1790 prev_rfd->command &= ~cpu_to_le16(cb_el); 1778 prev_rfd->command &= ~cpu_to_le16(cb_el & cb_s);
1791 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, 1779 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1792 sizeof(struct rfd), PCI_DMA_TODEVICE); 1780 sizeof(struct rfd), PCI_DMA_TODEVICE);
1793 } 1781 }
@@ -1825,10 +1813,6 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1825 pci_unmap_single(nic->pdev, rx->dma_addr, 1813 pci_unmap_single(nic->pdev, rx->dma_addr,
1826 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1814 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1827 1815
1828 /* this allows for a fast restart without re-enabling interrupts */
1829 if(le16_to_cpu(rfd->command) & cb_el)
1830 nic->ru_running = RU_SUSPENDED;
1831
1832 /* Pull off the RFD and put the actual data (minus eth hdr) */ 1816 /* Pull off the RFD and put the actual data (minus eth hdr) */
1833 skb_reserve(skb, sizeof(struct rfd)); 1817 skb_reserve(skb, sizeof(struct rfd));
1834 skb_put(skb, actual_size); 1818 skb_put(skb, actual_size);
@@ -1859,45 +1843,18 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1859 unsigned int work_to_do) 1843 unsigned int work_to_do)
1860{ 1844{
1861 struct rx *rx; 1845 struct rx *rx;
1862 int restart_required = 0;
1863 struct rx *rx_to_start = NULL;
1864
1865 /* are we already rnr? then pay attention!!! this ensures that
1866 * the state machine progression never allows a start with a
1867 * partially cleaned list, avoiding a race between hardware
1868 * and rx_to_clean when in NAPI mode */
1869 if(RU_SUSPENDED == nic->ru_running)
1870 restart_required = 1;
1871 1846
1872 /* Indicate newly arrived packets */ 1847 /* Indicate newly arrived packets */
1873 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { 1848 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1874 int err = e100_rx_indicate(nic, rx, work_done, work_to_do); 1849 if(e100_rx_indicate(nic, rx, work_done, work_to_do))
1875 if(-EAGAIN == err) {
1876 /* hit quota so have more work to do, restart once
1877 * cleanup is complete */
1878 restart_required = 0;
1879 break;
1880 } else if(-ENODATA == err)
1881 break; /* No more to clean */ 1850 break; /* No more to clean */
1882 } 1851 }
1883 1852
1884 /* save our starting point as the place we'll restart the receiver */
1885 if(restart_required)
1886 rx_to_start = nic->rx_to_clean;
1887
1888 /* Alloc new skbs to refill list */ 1853 /* Alloc new skbs to refill list */
1889 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { 1854 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1890 if(unlikely(e100_rx_alloc_skb(nic, rx))) 1855 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1891 break; /* Better luck next time (see watchdog) */ 1856 break; /* Better luck next time (see watchdog) */
1892 } 1857 }
1893
1894 if(restart_required) {
1895 // ack the rnr?
1896 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1897 e100_start_receiver(nic, rx_to_start);
1898 if(work_done)
1899 (*work_done)++;
1900 }
1901} 1858}
1902 1859
1903static void e100_rx_clean_list(struct nic *nic) 1860static void e100_rx_clean_list(struct nic *nic)
@@ -1905,8 +1862,6 @@ static void e100_rx_clean_list(struct nic *nic)
1905 struct rx *rx; 1862 struct rx *rx;
1906 unsigned int i, count = nic->params.rfds.count; 1863 unsigned int i, count = nic->params.rfds.count;
1907 1864
1908 nic->ru_running = RU_UNINITIALIZED;
1909
1910 if(nic->rxs) { 1865 if(nic->rxs) {
1911 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { 1866 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1912 if(rx->skb) { 1867 if(rx->skb) {
@@ -1928,7 +1883,6 @@ static int e100_rx_alloc_list(struct nic *nic)
1928 unsigned int i, count = nic->params.rfds.count; 1883 unsigned int i, count = nic->params.rfds.count;
1929 1884
1930 nic->rx_to_use = nic->rx_to_clean = NULL; 1885 nic->rx_to_use = nic->rx_to_clean = NULL;
1931 nic->ru_running = RU_UNINITIALIZED;
1932 1886
1933 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC))) 1887 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1934 return -ENOMEM; 1888 return -ENOMEM;
@@ -1943,7 +1897,6 @@ static int e100_rx_alloc_list(struct nic *nic)
1943 } 1897 }
1944 1898
1945 nic->rx_to_use = nic->rx_to_clean = nic->rxs; 1899 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1946 nic->ru_running = RU_SUSPENDED;
1947 1900
1948 return 0; 1901 return 0;
1949} 1902}
@@ -1952,7 +1905,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
1952{ 1905{
1953 struct net_device *netdev = dev_id; 1906 struct net_device *netdev = dev_id;
1954 struct nic *nic = netdev_priv(netdev); 1907 struct nic *nic = netdev_priv(netdev);
1955 u8 stat_ack = readb(&nic->csr->scb.stat_ack); 1908 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1956 1909
1957 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack); 1910 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1958 1911
@@ -1961,11 +1914,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
1961 return IRQ_NONE; 1914 return IRQ_NONE;
1962 1915
1963 /* Ack interrupt(s) */ 1916 /* Ack interrupt(s) */
1964 writeb(stat_ack, &nic->csr->scb.stat_ack); 1917 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1965
1966 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1967 if(stat_ack & stat_ack_rnr)
1968 nic->ru_running = RU_SUSPENDED;
1969 1918
1970 if(likely(netif_rx_schedule_prep(netdev))) { 1919 if(likely(netif_rx_schedule_prep(netdev))) {
1971 e100_disable_irq(nic); 1920 e100_disable_irq(nic);
@@ -2058,7 +2007,7 @@ static int e100_up(struct nic *nic)
2058 if((err = e100_hw_init(nic))) 2007 if((err = e100_hw_init(nic)))
2059 goto err_clean_cbs; 2008 goto err_clean_cbs;
2060 e100_set_multicast_list(nic->netdev); 2009 e100_set_multicast_list(nic->netdev);
2061 e100_start_receiver(nic, NULL); 2010 e100_start_receiver(nic);
2062 mod_timer(&nic->watchdog, jiffies); 2011 mod_timer(&nic->watchdog, jiffies);
2063 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, 2012 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2064 nic->netdev->name, nic->netdev))) 2013 nic->netdev->name, nic->netdev)))
@@ -2107,7 +2056,7 @@ static void e100_tx_timeout_task(struct work_struct *work)
2107 struct net_device *netdev = nic->netdev; 2056 struct net_device *netdev = nic->netdev;
2108 2057
2109 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", 2058 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2110 readb(&nic->csr->scb.status)); 2059 ioread8(&nic->csr->scb.status));
2111 e100_down(netdev_priv(netdev)); 2060 e100_down(netdev_priv(netdev));
2112 e100_up(netdev_priv(netdev)); 2061 e100_up(netdev_priv(netdev));
2113} 2062}
@@ -2139,7 +2088,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2139 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 2088 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2140 BMCR_LOOPBACK); 2089 BMCR_LOOPBACK);
2141 2090
2142 e100_start_receiver(nic, NULL); 2091 e100_start_receiver(nic);
2143 2092
2144 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { 2093 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2145 err = -ENOMEM; 2094 err = -ENOMEM;
@@ -2230,9 +2179,9 @@ static void e100_get_regs(struct net_device *netdev,
2230 int i; 2179 int i;
2231 2180
2232 regs->version = (1 << 24) | nic->rev_id; 2181 regs->version = (1 << 24) | nic->rev_id;
2233 buff[0] = readb(&nic->csr->scb.cmd_hi) << 24 | 2182 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2234 readb(&nic->csr->scb.cmd_lo) << 16 | 2183 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2235 readw(&nic->csr->scb.status); 2184 ioread16(&nic->csr->scb.status);
2236 for(i = E100_PHY_REGS; i >= 0; i--) 2185 for(i = E100_PHY_REGS; i >= 0; i--)
2237 buff[1 + E100_PHY_REGS - i] = 2186 buff[1 + E100_PHY_REGS - i] =
2238 mdio_read(netdev, nic->mii.phy_id, i); 2187 mdio_read(netdev, nic->mii.phy_id, i);
@@ -2604,7 +2553,10 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2604 SET_MODULE_OWNER(netdev); 2553 SET_MODULE_OWNER(netdev);
2605 SET_NETDEV_DEV(netdev, &pdev->dev); 2554 SET_NETDEV_DEV(netdev, &pdev->dev);
2606 2555
2607 nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr)); 2556 if (use_io)
2557 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2558
2559 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2608 if(!nic->csr) { 2560 if(!nic->csr) {
2609 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n"); 2561 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2610 err = -ENOMEM; 2562 err = -ENOMEM;
@@ -2651,11 +2603,16 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2651 2603
2652 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); 2604 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2653 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN); 2605 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2654 if(!is_valid_ether_addr(netdev->perm_addr)) { 2606 if (!is_valid_ether_addr(netdev->perm_addr)) {
2655 DPRINTK(PROBE, ERR, "Invalid MAC address from " 2607 if (!eeprom_bad_csum_allow) {
2656 "EEPROM, aborting.\n"); 2608 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2657 err = -EAGAIN; 2609 "EEPROM, aborting.\n");
2658 goto err_out_free; 2610 err = -EAGAIN;
2611 goto err_out_free;
2612 } else {
2613 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2614 "you MUST configure one.\n");
2615 }
2659 } 2616 }
2660 2617
2661 /* Wol magic packet can be enabled from eeprom */ 2618 /* Wol magic packet can be enabled from eeprom */
@@ -2676,7 +2633,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2676 2633
2677 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, " 2634 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
2678 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n", 2635 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
2679 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, 2636 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), pdev->irq,
2680 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 2637 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2681 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); 2638 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2682 2639
@@ -2685,7 +2642,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2685err_out_free: 2642err_out_free:
2686 e100_free(nic); 2643 e100_free(nic);
2687err_out_iounmap: 2644err_out_iounmap:
2688 iounmap(nic->csr); 2645 pci_iounmap(pdev, nic->csr);
2689err_out_free_res: 2646err_out_free_res:
2690 pci_release_regions(pdev); 2647 pci_release_regions(pdev);
2691err_out_disable_pdev: 2648err_out_disable_pdev: