diff options
author | Scott Feldman <sfeldma@pobox.com> | 2005-11-09 02:18:52 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-04-28 11:01:05 -0400 |
commit | d52df4a35af569071fda3f4eb08e47cc7023f094 (patch) | |
tree | b18fbd4ad63f3e19995d4b19017a44a02df9b707 /drivers | |
parent | 2933d42cb7b0f14e0f83f6f231c966e97c79cdc9 (diff) |
[netdrvr e100] experiment with doing RX in a similar manner to eepro100
I was going to say that eepro100's speedo_rx_link() does the same DMA
abuse as e100, but then I noticed one little detail: eepro100 sets both
EL (end of list) and S (suspend) bits in the RFD as it chains it to the
RFD list. e100 was only setting the EL bit. Hmmm, that's interesting.
That means that if HW reads a RFD with the S-bit set, it'll process
that RFD and then suspend the receive unit. The receive unit will
resume when SW clears the S-bit. There is no need for SW to restart
the receive unit. Which means a lot of the receive unit state tracking
code in the driver goes away.
So here's a patch against 2.6.14. (Sorry for inlining it; the mailer
I'm using now will mess with the word wrap). I can't test this on
XScale (unless someone has an e100 module for Gumstix :) . It should
be doing exactly what eepro100 does with RFDs. I don't believe this
change will introduce a performance hit because the S-bit and EL-bit go
hand-in-hand meaning if we're going to suspend because of the S- bit,
we're on the last resource anyway, so we'll have to wait for SW to
replenish.
(cherry picked from 29e79da9495261119e3b2e4e7c72507348e75976 commit)
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/e100.c | 72 |
1 files changed, 9 insertions, 63 deletions
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 4d0e0aea72bf..71c6d334bd7f 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -282,12 +282,6 @@ enum scb_status { | |||
282 | rus_mask = 0x3C, | 282 | rus_mask = 0x3C, |
283 | }; | 283 | }; |
284 | 284 | ||
285 | enum ru_state { | ||
286 | RU_SUSPENDED = 0, | ||
287 | RU_RUNNING = 1, | ||
288 | RU_UNINITIALIZED = -1, | ||
289 | }; | ||
290 | |||
291 | enum scb_stat_ack { | 285 | enum scb_stat_ack { |
292 | stat_ack_not_ours = 0x00, | 286 | stat_ack_not_ours = 0x00, |
293 | stat_ack_sw_gen = 0x04, | 287 | stat_ack_sw_gen = 0x04, |
@@ -529,7 +523,6 @@ struct nic { | |||
529 | struct rx *rx_to_use; | 523 | struct rx *rx_to_use; |
530 | struct rx *rx_to_clean; | 524 | struct rx *rx_to_clean; |
531 | struct rfd blank_rfd; | 525 | struct rfd blank_rfd; |
532 | enum ru_state ru_running; | ||
533 | 526 | ||
534 | spinlock_t cb_lock ____cacheline_aligned; | 527 | spinlock_t cb_lock ____cacheline_aligned; |
535 | spinlock_t cmd_lock; | 528 | spinlock_t cmd_lock; |
@@ -951,7 +944,7 @@ static void e100_get_defaults(struct nic *nic) | |||
951 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); | 944 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); |
952 | 945 | ||
953 | /* Template for a freshly allocated RFD */ | 946 | /* Template for a freshly allocated RFD */ |
954 | nic->blank_rfd.command = cpu_to_le16(cb_el); | 947 | nic->blank_rfd.command = cpu_to_le16(cb_el & cb_s); |
955 | nic->blank_rfd.rbd = 0xFFFFFFFF; | 948 | nic->blank_rfd.rbd = 0xFFFFFFFF; |
956 | nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN); | 949 | nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN); |
957 | 950 | ||
@@ -1746,19 +1739,11 @@ static int e100_alloc_cbs(struct nic *nic) | |||
1746 | return 0; | 1739 | return 0; |
1747 | } | 1740 | } |
1748 | 1741 | ||
1749 | static inline void e100_start_receiver(struct nic *nic, struct rx *rx) | 1742 | static inline void e100_start_receiver(struct nic *nic) |
1750 | { | 1743 | { |
1751 | if(!nic->rxs) return; | 1744 | /* Start if RFA is non-NULL */ |
1752 | if(RU_SUSPENDED != nic->ru_running) return; | 1745 | if(nic->rx_to_clean->skb) |
1753 | 1746 | e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr); | |
1754 | /* handle init time starts */ | ||
1755 | if(!rx) rx = nic->rxs; | ||
1756 | |||
1757 | /* (Re)start RU if suspended or idle and RFA is non-NULL */ | ||
1758 | if(rx->skb) { | ||
1759 | e100_exec_cmd(nic, ruc_start, rx->dma_addr); | ||
1760 | nic->ru_running = RU_RUNNING; | ||
1761 | } | ||
1762 | } | 1747 | } |
1763 | 1748 | ||
1764 | #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) | 1749 | #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) |
@@ -1787,7 +1772,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1787 | put_unaligned(cpu_to_le32(rx->dma_addr), | 1772 | put_unaligned(cpu_to_le32(rx->dma_addr), |
1788 | (u32 *)&prev_rfd->link); | 1773 | (u32 *)&prev_rfd->link); |
1789 | wmb(); | 1774 | wmb(); |
1790 | prev_rfd->command &= ~cpu_to_le16(cb_el); | 1775 | prev_rfd->command &= ~cpu_to_le16(cb_el & cb_s); |
1791 | pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, | 1776 | pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, |
1792 | sizeof(struct rfd), PCI_DMA_TODEVICE); | 1777 | sizeof(struct rfd), PCI_DMA_TODEVICE); |
1793 | } | 1778 | } |
@@ -1825,10 +1810,6 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, | |||
1825 | pci_unmap_single(nic->pdev, rx->dma_addr, | 1810 | pci_unmap_single(nic->pdev, rx->dma_addr, |
1826 | RFD_BUF_LEN, PCI_DMA_FROMDEVICE); | 1811 | RFD_BUF_LEN, PCI_DMA_FROMDEVICE); |
1827 | 1812 | ||
1828 | /* this allows for a fast restart without re-enabling interrupts */ | ||
1829 | if(le16_to_cpu(rfd->command) & cb_el) | ||
1830 | nic->ru_running = RU_SUSPENDED; | ||
1831 | |||
1832 | /* Pull off the RFD and put the actual data (minus eth hdr) */ | 1813 | /* Pull off the RFD and put the actual data (minus eth hdr) */ |
1833 | skb_reserve(skb, sizeof(struct rfd)); | 1814 | skb_reserve(skb, sizeof(struct rfd)); |
1834 | skb_put(skb, actual_size); | 1815 | skb_put(skb, actual_size); |
@@ -1859,45 +1840,18 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done, | |||
1859 | unsigned int work_to_do) | 1840 | unsigned int work_to_do) |
1860 | { | 1841 | { |
1861 | struct rx *rx; | 1842 | struct rx *rx; |
1862 | int restart_required = 0; | ||
1863 | struct rx *rx_to_start = NULL; | ||
1864 | |||
1865 | /* are we already rnr? then pay attention!!! this ensures that | ||
1866 | * the state machine progression never allows a start with a | ||
1867 | * partially cleaned list, avoiding a race between hardware | ||
1868 | * and rx_to_clean when in NAPI mode */ | ||
1869 | if(RU_SUSPENDED == nic->ru_running) | ||
1870 | restart_required = 1; | ||
1871 | 1843 | ||
1872 | /* Indicate newly arrived packets */ | 1844 | /* Indicate newly arrived packets */ |
1873 | for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { | 1845 | for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { |
1874 | int err = e100_rx_indicate(nic, rx, work_done, work_to_do); | 1846 | if(e100_rx_indicate(nic, rx, work_done, work_to_do)) |
1875 | if(-EAGAIN == err) { | ||
1876 | /* hit quota so have more work to do, restart once | ||
1877 | * cleanup is complete */ | ||
1878 | restart_required = 0; | ||
1879 | break; | ||
1880 | } else if(-ENODATA == err) | ||
1881 | break; /* No more to clean */ | 1847 | break; /* No more to clean */ |
1882 | } | 1848 | } |
1883 | 1849 | ||
1884 | /* save our starting point as the place we'll restart the receiver */ | ||
1885 | if(restart_required) | ||
1886 | rx_to_start = nic->rx_to_clean; | ||
1887 | |||
1888 | /* Alloc new skbs to refill list */ | 1850 | /* Alloc new skbs to refill list */ |
1889 | for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { | 1851 | for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { |
1890 | if(unlikely(e100_rx_alloc_skb(nic, rx))) | 1852 | if(unlikely(e100_rx_alloc_skb(nic, rx))) |
1891 | break; /* Better luck next time (see watchdog) */ | 1853 | break; /* Better luck next time (see watchdog) */ |
1892 | } | 1854 | } |
1893 | |||
1894 | if(restart_required) { | ||
1895 | // ack the rnr? | ||
1896 | writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); | ||
1897 | e100_start_receiver(nic, rx_to_start); | ||
1898 | if(work_done) | ||
1899 | (*work_done)++; | ||
1900 | } | ||
1901 | } | 1855 | } |
1902 | 1856 | ||
1903 | static void e100_rx_clean_list(struct nic *nic) | 1857 | static void e100_rx_clean_list(struct nic *nic) |
@@ -1905,8 +1859,6 @@ static void e100_rx_clean_list(struct nic *nic) | |||
1905 | struct rx *rx; | 1859 | struct rx *rx; |
1906 | unsigned int i, count = nic->params.rfds.count; | 1860 | unsigned int i, count = nic->params.rfds.count; |
1907 | 1861 | ||
1908 | nic->ru_running = RU_UNINITIALIZED; | ||
1909 | |||
1910 | if(nic->rxs) { | 1862 | if(nic->rxs) { |
1911 | for(rx = nic->rxs, i = 0; i < count; rx++, i++) { | 1863 | for(rx = nic->rxs, i = 0; i < count; rx++, i++) { |
1912 | if(rx->skb) { | 1864 | if(rx->skb) { |
@@ -1928,7 +1880,6 @@ static int e100_rx_alloc_list(struct nic *nic) | |||
1928 | unsigned int i, count = nic->params.rfds.count; | 1880 | unsigned int i, count = nic->params.rfds.count; |
1929 | 1881 | ||
1930 | nic->rx_to_use = nic->rx_to_clean = NULL; | 1882 | nic->rx_to_use = nic->rx_to_clean = NULL; |
1931 | nic->ru_running = RU_UNINITIALIZED; | ||
1932 | 1883 | ||
1933 | if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC))) | 1884 | if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC))) |
1934 | return -ENOMEM; | 1885 | return -ENOMEM; |
@@ -1943,7 +1894,6 @@ static int e100_rx_alloc_list(struct nic *nic) | |||
1943 | } | 1894 | } |
1944 | 1895 | ||
1945 | nic->rx_to_use = nic->rx_to_clean = nic->rxs; | 1896 | nic->rx_to_use = nic->rx_to_clean = nic->rxs; |
1946 | nic->ru_running = RU_SUSPENDED; | ||
1947 | 1897 | ||
1948 | return 0; | 1898 | return 0; |
1949 | } | 1899 | } |
@@ -1963,10 +1913,6 @@ static irqreturn_t e100_intr(int irq, void *dev_id) | |||
1963 | /* Ack interrupt(s) */ | 1913 | /* Ack interrupt(s) */ |
1964 | writeb(stat_ack, &nic->csr->scb.stat_ack); | 1914 | writeb(stat_ack, &nic->csr->scb.stat_ack); |
1965 | 1915 | ||
1966 | /* We hit Receive No Resource (RNR); restart RU after cleaning */ | ||
1967 | if(stat_ack & stat_ack_rnr) | ||
1968 | nic->ru_running = RU_SUSPENDED; | ||
1969 | |||
1970 | if(likely(netif_rx_schedule_prep(netdev))) { | 1916 | if(likely(netif_rx_schedule_prep(netdev))) { |
1971 | e100_disable_irq(nic); | 1917 | e100_disable_irq(nic); |
1972 | __netif_rx_schedule(netdev); | 1918 | __netif_rx_schedule(netdev); |
@@ -2058,7 +2004,7 @@ static int e100_up(struct nic *nic) | |||
2058 | if((err = e100_hw_init(nic))) | 2004 | if((err = e100_hw_init(nic))) |
2059 | goto err_clean_cbs; | 2005 | goto err_clean_cbs; |
2060 | e100_set_multicast_list(nic->netdev); | 2006 | e100_set_multicast_list(nic->netdev); |
2061 | e100_start_receiver(nic, NULL); | 2007 | e100_start_receiver(nic); |
2062 | mod_timer(&nic->watchdog, jiffies); | 2008 | mod_timer(&nic->watchdog, jiffies); |
2063 | if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, | 2009 | if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, |
2064 | nic->netdev->name, nic->netdev))) | 2010 | nic->netdev->name, nic->netdev))) |
@@ -2139,7 +2085,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) | |||
2139 | mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, | 2085 | mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, |
2140 | BMCR_LOOPBACK); | 2086 | BMCR_LOOPBACK); |
2141 | 2087 | ||
2142 | e100_start_receiver(nic, NULL); | 2088 | e100_start_receiver(nic); |
2143 | 2089 | ||
2144 | if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { | 2090 | if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { |
2145 | err = -ENOMEM; | 2091 | err = -ENOMEM; |