diff options
-rw-r--r-- | drivers/net/e100.c | 72 |
1 files changed, 63 insertions, 9 deletions
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 61696637a21e..763810c7f33a 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -285,6 +285,12 @@ enum scb_status { | |||
285 | rus_mask = 0x3C, | 285 | rus_mask = 0x3C, |
286 | }; | 286 | }; |
287 | 287 | ||
288 | enum ru_state { | ||
289 | RU_SUSPENDED = 0, | ||
290 | RU_RUNNING = 1, | ||
291 | RU_UNINITIALIZED = -1, | ||
292 | }; | ||
293 | |||
288 | enum scb_stat_ack { | 294 | enum scb_stat_ack { |
289 | stat_ack_not_ours = 0x00, | 295 | stat_ack_not_ours = 0x00, |
290 | stat_ack_sw_gen = 0x04, | 296 | stat_ack_sw_gen = 0x04, |
@@ -526,6 +532,7 @@ struct nic { | |||
526 | struct rx *rx_to_use; | 532 | struct rx *rx_to_use; |
527 | struct rx *rx_to_clean; | 533 | struct rx *rx_to_clean; |
528 | struct rfd blank_rfd; | 534 | struct rfd blank_rfd; |
535 | enum ru_state ru_running; | ||
529 | 536 | ||
530 | spinlock_t cb_lock ____cacheline_aligned; | 537 | spinlock_t cb_lock ____cacheline_aligned; |
531 | spinlock_t cmd_lock; | 538 | spinlock_t cmd_lock; |
@@ -947,7 +954,7 @@ static void e100_get_defaults(struct nic *nic) | |||
947 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); | 954 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); |
948 | 955 | ||
949 | /* Template for a freshly allocated RFD */ | 956 | /* Template for a freshly allocated RFD */ |
950 | nic->blank_rfd.command = cpu_to_le16(cb_el & cb_s); | 957 | nic->blank_rfd.command = cpu_to_le16(cb_el); |
951 | nic->blank_rfd.rbd = 0xFFFFFFFF; | 958 | nic->blank_rfd.rbd = 0xFFFFFFFF; |
952 | nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN); | 959 | nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN); |
953 | 960 | ||
@@ -1742,11 +1749,19 @@ static int e100_alloc_cbs(struct nic *nic) | |||
1742 | return 0; | 1749 | return 0; |
1743 | } | 1750 | } |
1744 | 1751 | ||
1745 | static inline void e100_start_receiver(struct nic *nic) | 1752 | static inline void e100_start_receiver(struct nic *nic, struct rx *rx) |
1746 | { | 1753 | { |
1747 | /* Start if RFA is non-NULL */ | 1754 | if(!nic->rxs) return; |
1748 | if(nic->rx_to_clean->skb) | 1755 | if(RU_SUSPENDED != nic->ru_running) return; |
1749 | e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr); | 1756 | |
1757 | /* handle init time starts */ | ||
1758 | if(!rx) rx = nic->rxs; | ||
1759 | |||
1760 | /* (Re)start RU if suspended or idle and RFA is non-NULL */ | ||
1761 | if(rx->skb) { | ||
1762 | e100_exec_cmd(nic, ruc_start, rx->dma_addr); | ||
1763 | nic->ru_running = RU_RUNNING; | ||
1764 | } | ||
1750 | } | 1765 | } |
1751 | 1766 | ||
1752 | #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) | 1767 | #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) |
@@ -1775,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1775 | put_unaligned(cpu_to_le32(rx->dma_addr), | 1790 | put_unaligned(cpu_to_le32(rx->dma_addr), |
1776 | (u32 *)&prev_rfd->link); | 1791 | (u32 *)&prev_rfd->link); |
1777 | wmb(); | 1792 | wmb(); |
1778 | prev_rfd->command &= ~cpu_to_le16(cb_el & cb_s); | 1793 | prev_rfd->command &= ~cpu_to_le16(cb_el); |
1779 | pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, | 1794 | pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, |
1780 | sizeof(struct rfd), PCI_DMA_TODEVICE); | 1795 | sizeof(struct rfd), PCI_DMA_TODEVICE); |
1781 | } | 1796 | } |
@@ -1813,6 +1828,10 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, | |||
1813 | pci_unmap_single(nic->pdev, rx->dma_addr, | 1828 | pci_unmap_single(nic->pdev, rx->dma_addr, |
1814 | RFD_BUF_LEN, PCI_DMA_FROMDEVICE); | 1829 | RFD_BUF_LEN, PCI_DMA_FROMDEVICE); |
1815 | 1830 | ||
1831 | /* this allows for a fast restart without re-enabling interrupts */ | ||
1832 | if(le16_to_cpu(rfd->command) & cb_el) | ||
1833 | nic->ru_running = RU_SUSPENDED; | ||
1834 | |||
1816 | /* Pull off the RFD and put the actual data (minus eth hdr) */ | 1835 | /* Pull off the RFD and put the actual data (minus eth hdr) */ |
1817 | skb_reserve(skb, sizeof(struct rfd)); | 1836 | skb_reserve(skb, sizeof(struct rfd)); |
1818 | skb_put(skb, actual_size); | 1837 | skb_put(skb, actual_size); |
@@ -1843,18 +1862,45 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done, | |||
1843 | unsigned int work_to_do) | 1862 | unsigned int work_to_do) |
1844 | { | 1863 | { |
1845 | struct rx *rx; | 1864 | struct rx *rx; |
1865 | int restart_required = 0; | ||
1866 | struct rx *rx_to_start = NULL; | ||
1867 | |||
1868 | /* are we already rnr? then pay attention!!! this ensures that | ||
1869 | * the state machine progression never allows a start with a | ||
1870 | * partially cleaned list, avoiding a race between hardware | ||
1871 | * and rx_to_clean when in NAPI mode */ | ||
1872 | if(RU_SUSPENDED == nic->ru_running) | ||
1873 | restart_required = 1; | ||
1846 | 1874 | ||
1847 | /* Indicate newly arrived packets */ | 1875 | /* Indicate newly arrived packets */ |
1848 | for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { | 1876 | for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { |
1849 | if(e100_rx_indicate(nic, rx, work_done, work_to_do)) | 1877 | int err = e100_rx_indicate(nic, rx, work_done, work_to_do); |
1878 | if(-EAGAIN == err) { | ||
1879 | /* hit quota so have more work to do, restart once | ||
1880 | * cleanup is complete */ | ||
1881 | restart_required = 0; | ||
1882 | break; | ||
1883 | } else if(-ENODATA == err) | ||
1850 | break; /* No more to clean */ | 1884 | break; /* No more to clean */ |
1851 | } | 1885 | } |
1852 | 1886 | ||
1887 | /* save our starting point as the place we'll restart the receiver */ | ||
1888 | if(restart_required) | ||
1889 | rx_to_start = nic->rx_to_clean; | ||
1890 | |||
1853 | /* Alloc new skbs to refill list */ | 1891 | /* Alloc new skbs to refill list */ |
1854 | for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { | 1892 | for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { |
1855 | if(unlikely(e100_rx_alloc_skb(nic, rx))) | 1893 | if(unlikely(e100_rx_alloc_skb(nic, rx))) |
1856 | break; /* Better luck next time (see watchdog) */ | 1894 | break; /* Better luck next time (see watchdog) */ |
1857 | } | 1895 | } |
1896 | |||
1897 | if(restart_required) { | ||
1898 | // ack the rnr? | ||
1899 | writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); | ||
1900 | e100_start_receiver(nic, rx_to_start); | ||
1901 | if(work_done) | ||
1902 | (*work_done)++; | ||
1903 | } | ||
1858 | } | 1904 | } |
1859 | 1905 | ||
1860 | static void e100_rx_clean_list(struct nic *nic) | 1906 | static void e100_rx_clean_list(struct nic *nic) |
@@ -1862,6 +1908,8 @@ static void e100_rx_clean_list(struct nic *nic) | |||
1862 | struct rx *rx; | 1908 | struct rx *rx; |
1863 | unsigned int i, count = nic->params.rfds.count; | 1909 | unsigned int i, count = nic->params.rfds.count; |
1864 | 1910 | ||
1911 | nic->ru_running = RU_UNINITIALIZED; | ||
1912 | |||
1865 | if(nic->rxs) { | 1913 | if(nic->rxs) { |
1866 | for(rx = nic->rxs, i = 0; i < count; rx++, i++) { | 1914 | for(rx = nic->rxs, i = 0; i < count; rx++, i++) { |
1867 | if(rx->skb) { | 1915 | if(rx->skb) { |
@@ -1883,6 +1931,7 @@ static int e100_rx_alloc_list(struct nic *nic) | |||
1883 | unsigned int i, count = nic->params.rfds.count; | 1931 | unsigned int i, count = nic->params.rfds.count; |
1884 | 1932 | ||
1885 | nic->rx_to_use = nic->rx_to_clean = NULL; | 1933 | nic->rx_to_use = nic->rx_to_clean = NULL; |
1934 | nic->ru_running = RU_UNINITIALIZED; | ||
1886 | 1935 | ||
1887 | if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC))) | 1936 | if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC))) |
1888 | return -ENOMEM; | 1937 | return -ENOMEM; |
@@ -1897,6 +1946,7 @@ static int e100_rx_alloc_list(struct nic *nic) | |||
1897 | } | 1946 | } |
1898 | 1947 | ||
1899 | nic->rx_to_use = nic->rx_to_clean = nic->rxs; | 1948 | nic->rx_to_use = nic->rx_to_clean = nic->rxs; |
1949 | nic->ru_running = RU_SUSPENDED; | ||
1900 | 1950 | ||
1901 | return 0; | 1951 | return 0; |
1902 | } | 1952 | } |
@@ -1916,6 +1966,10 @@ static irqreturn_t e100_intr(int irq, void *dev_id) | |||
1916 | /* Ack interrupt(s) */ | 1966 | /* Ack interrupt(s) */ |
1917 | iowrite8(stat_ack, &nic->csr->scb.stat_ack); | 1967 | iowrite8(stat_ack, &nic->csr->scb.stat_ack); |
1918 | 1968 | ||
1969 | /* We hit Receive No Resource (RNR); restart RU after cleaning */ | ||
1970 | if(stat_ack & stat_ack_rnr) | ||
1971 | nic->ru_running = RU_SUSPENDED; | ||
1972 | |||
1919 | if(likely(netif_rx_schedule_prep(netdev))) { | 1973 | if(likely(netif_rx_schedule_prep(netdev))) { |
1920 | e100_disable_irq(nic); | 1974 | e100_disable_irq(nic); |
1921 | __netif_rx_schedule(netdev); | 1975 | __netif_rx_schedule(netdev); |
@@ -2007,7 +2061,7 @@ static int e100_up(struct nic *nic) | |||
2007 | if((err = e100_hw_init(nic))) | 2061 | if((err = e100_hw_init(nic))) |
2008 | goto err_clean_cbs; | 2062 | goto err_clean_cbs; |
2009 | e100_set_multicast_list(nic->netdev); | 2063 | e100_set_multicast_list(nic->netdev); |
2010 | e100_start_receiver(nic); | 2064 | e100_start_receiver(nic, NULL); |
2011 | mod_timer(&nic->watchdog, jiffies); | 2065 | mod_timer(&nic->watchdog, jiffies); |
2012 | if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, | 2066 | if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, |
2013 | nic->netdev->name, nic->netdev))) | 2067 | nic->netdev->name, nic->netdev))) |
@@ -2088,7 +2142,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) | |||
2088 | mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, | 2142 | mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, |
2089 | BMCR_LOOPBACK); | 2143 | BMCR_LOOPBACK); |
2090 | 2144 | ||
2091 | e100_start_receiver(nic); | 2145 | e100_start_receiver(nic, NULL); |
2092 | 2146 | ||
2093 | if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { | 2147 | if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { |
2094 | err = -ENOMEM; | 2148 | err = -ENOMEM; |