aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qla3xxx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qla3xxx.c')
-rwxr-xr-xdrivers/net/qla3xxx.c492
1 files changed, 322 insertions, 170 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index a142cdfd947b..3a14d19b72bd 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -39,7 +39,7 @@
39 39
40#define DRV_NAME "qla3xxx" 40#define DRV_NAME "qla3xxx"
41#define DRV_STRING "QLogic ISP3XXX Network Driver" 41#define DRV_STRING "QLogic ISP3XXX Network Driver"
42#define DRV_VERSION "v2.02.00-k36" 42#define DRV_VERSION "v2.03.00-k3"
43#define PFX DRV_NAME " " 43#define PFX DRV_NAME " "
44 44
45static const char ql3xxx_driver_name[] = DRV_NAME; 45static const char ql3xxx_driver_name[] = DRV_NAME;
@@ -276,7 +276,8 @@ static void ql_enable_interrupts(struct ql3_adapter *qdev)
276static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 276static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
277 struct ql_rcv_buf_cb *lrg_buf_cb) 277 struct ql_rcv_buf_cb *lrg_buf_cb)
278{ 278{
279 u64 map; 279 dma_addr_t map;
280 int err;
280 lrg_buf_cb->next = NULL; 281 lrg_buf_cb->next = NULL;
281 282
282 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 283 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
@@ -287,9 +288,10 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
287 } 288 }
288 289
289 if (!lrg_buf_cb->skb) { 290 if (!lrg_buf_cb->skb) {
290 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len); 291 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
292 qdev->lrg_buffer_len);
291 if (unlikely(!lrg_buf_cb->skb)) { 293 if (unlikely(!lrg_buf_cb->skb)) {
292 printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n", 294 printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
293 qdev->ndev->name); 295 qdev->ndev->name);
294 qdev->lrg_buf_skb_check++; 296 qdev->lrg_buf_skb_check++;
295 } else { 297 } else {
@@ -303,6 +305,17 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
303 qdev->lrg_buffer_len - 305 qdev->lrg_buffer_len -
304 QL_HEADER_SPACE, 306 QL_HEADER_SPACE,
305 PCI_DMA_FROMDEVICE); 307 PCI_DMA_FROMDEVICE);
308 err = pci_dma_mapping_error(map);
309 if(err) {
310 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
311 qdev->ndev->name, err);
312 dev_kfree_skb(lrg_buf_cb->skb);
313 lrg_buf_cb->skb = NULL;
314
315 qdev->lrg_buf_skb_check++;
316 return;
317 }
318
306 lrg_buf_cb->buf_phy_addr_low = 319 lrg_buf_cb->buf_phy_addr_low =
307 cpu_to_le32(LS_64BITS(map)); 320 cpu_to_le32(LS_64BITS(map));
308 lrg_buf_cb->buf_phy_addr_high = 321 lrg_buf_cb->buf_phy_addr_high =
@@ -1387,6 +1400,8 @@ static void ql_link_state_machine(struct ql3_adapter *qdev)
1387 printk(KERN_INFO PFX 1400 printk(KERN_INFO PFX
1388 "%s: Reset in progress, skip processing link " 1401 "%s: Reset in progress, skip processing link "
1389 "state.\n", qdev->ndev->name); 1402 "state.\n", qdev->ndev->name);
1403
1404 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1390 return; 1405 return;
1391 } 1406 }
1392 1407
@@ -1518,8 +1533,10 @@ static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1518 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1519 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1520 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1521 2) << 7)) 1536 2) << 7)) {
1537 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1522 return 0; 1538 return 0;
1539 }
1523 status = ql_is_auto_cfg(qdev); 1540 status = ql_is_auto_cfg(qdev);
1524 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1541 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1525 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1542 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -1533,8 +1550,10 @@ static u32 ql_get_speed(struct ql3_adapter *qdev)
1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1550 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1551 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1552 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1536 2) << 7)) 1553 2) << 7)) {
1554 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1537 return 0; 1555 return 0;
1556 }
1538 status = ql_get_link_speed(qdev); 1557 status = ql_get_link_speed(qdev);
1539 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1558 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1540 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1559 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -1548,8 +1567,10 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
1548 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1567 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1549 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1568 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1550 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1569 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1551 2) << 7)) 1570 2) << 7)) {
1571 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1552 return 0; 1572 return 0;
1573 }
1553 status = ql_is_link_full_dup(qdev); 1574 status = ql_is_link_full_dup(qdev);
1554 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1575 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1555 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1576 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -1615,14 +1636,16 @@ static const struct ethtool_ops ql3xxx_ethtool_ops = {
1615static int ql_populate_free_queue(struct ql3_adapter *qdev) 1636static int ql_populate_free_queue(struct ql3_adapter *qdev)
1616{ 1637{
1617 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1638 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1618 u64 map; 1639 dma_addr_t map;
1640 int err;
1619 1641
1620 while (lrg_buf_cb) { 1642 while (lrg_buf_cb) {
1621 if (!lrg_buf_cb->skb) { 1643 if (!lrg_buf_cb->skb) {
1622 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len); 1644 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1645 qdev->lrg_buffer_len);
1623 if (unlikely(!lrg_buf_cb->skb)) { 1646 if (unlikely(!lrg_buf_cb->skb)) {
1624 printk(KERN_DEBUG PFX 1647 printk(KERN_DEBUG PFX
1625 "%s: Failed dev_alloc_skb().\n", 1648 "%s: Failed netdev_alloc_skb().\n",
1626 qdev->ndev->name); 1649 qdev->ndev->name);
1627 break; 1650 break;
1628 } else { 1651 } else {
@@ -1636,6 +1659,17 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1636 qdev->lrg_buffer_len - 1659 qdev->lrg_buffer_len -
1637 QL_HEADER_SPACE, 1660 QL_HEADER_SPACE,
1638 PCI_DMA_FROMDEVICE); 1661 PCI_DMA_FROMDEVICE);
1662
1663 err = pci_dma_mapping_error(map);
1664 if(err) {
1665 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1666 qdev->ndev->name, err);
1667 dev_kfree_skb(lrg_buf_cb->skb);
1668 lrg_buf_cb->skb = NULL;
1669 break;
1670 }
1671
1672
1639 lrg_buf_cb->buf_phy_addr_low = 1673 lrg_buf_cb->buf_phy_addr_low =
1640 cpu_to_le32(LS_64BITS(map)); 1674 cpu_to_le32(LS_64BITS(map));
1641 lrg_buf_cb->buf_phy_addr_high = 1675 lrg_buf_cb->buf_phy_addr_high =
@@ -1690,11 +1724,11 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1690 1724
1691 qdev->lrg_buf_q_producer_index++; 1725 qdev->lrg_buf_q_producer_index++;
1692 1726
1693 if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES) 1727 if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
1694 qdev->lrg_buf_q_producer_index = 0; 1728 qdev->lrg_buf_q_producer_index = 0;
1695 1729
1696 if (qdev->lrg_buf_q_producer_index == 1730 if (qdev->lrg_buf_q_producer_index ==
1697 (NUM_LBUFQ_ENTRIES - 1)) { 1731 (qdev->num_lbufq_entries - 1)) {
1698 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1732 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1699 } 1733 }
1700 } 1734 }
@@ -1713,8 +1747,31 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1713{ 1747{
1714 struct ql_tx_buf_cb *tx_cb; 1748 struct ql_tx_buf_cb *tx_cb;
1715 int i; 1749 int i;
1750 int retval = 0;
1716 1751
1752 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1753 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
1754 }
1755
1717 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1756 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1757
1758 /* Check the transmit response flags for any errors */
1759 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1760 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
1761
1762 qdev->stats.tx_errors++;
1763 retval = -EIO;
1764 goto frame_not_sent;
1765 }
1766
1767 if(tx_cb->seg_count == 0) {
1768 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
1769
1770 qdev->stats.tx_errors++;
1771 retval = -EIO;
1772 goto invalid_seg_count;
1773 }
1774
1718 pci_unmap_single(qdev->pdev, 1775 pci_unmap_single(qdev->pdev,
1719 pci_unmap_addr(&tx_cb->map[0], mapaddr), 1776 pci_unmap_addr(&tx_cb->map[0], mapaddr),
1720 pci_unmap_len(&tx_cb->map[0], maplen), 1777 pci_unmap_len(&tx_cb->map[0], maplen),
@@ -1731,11 +1788,32 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1731 } 1788 }
1732 qdev->stats.tx_packets++; 1789 qdev->stats.tx_packets++;
1733 qdev->stats.tx_bytes += tx_cb->skb->len; 1790 qdev->stats.tx_bytes += tx_cb->skb->len;
1791
1792frame_not_sent:
1734 dev_kfree_skb_irq(tx_cb->skb); 1793 dev_kfree_skb_irq(tx_cb->skb);
1735 tx_cb->skb = NULL; 1794 tx_cb->skb = NULL;
1795
1796invalid_seg_count:
1736 atomic_inc(&qdev->tx_count); 1797 atomic_inc(&qdev->tx_count);
1737} 1798}
1738 1799
1800void ql_get_sbuf(struct ql3_adapter *qdev)
1801{
1802 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1803 qdev->small_buf_index = 0;
1804 qdev->small_buf_release_cnt++;
1805}
1806
1807struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1808{
1809 struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
1810 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1811 qdev->lrg_buf_release_cnt++;
1812 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1813 qdev->lrg_buf_index = 0;
1814 return(lrg_buf_cb);
1815}
1816
1739/* 1817/*
1740 * The difference between 3022 and 3032 for inbound completions: 1818 * The difference between 3022 and 3032 for inbound completions:
1741 * 3022 uses two buffers per completion. The first buffer contains 1819 * 3022 uses two buffers per completion. The first buffer contains
@@ -1751,47 +1829,21 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1751static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 1829static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1752 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 1830 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1753{ 1831{
1754 long int offset;
1755 u32 lrg_buf_phy_addr_low = 0;
1756 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1832 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1757 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1833 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1758 u32 *curr_ial_ptr;
1759 struct sk_buff *skb; 1834 struct sk_buff *skb;
1760 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 1835 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
1761 1836
1762 /* 1837 /*
1763 * Get the inbound address list (small buffer). 1838 * Get the inbound address list (small buffer).
1764 */ 1839 */
1765 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE; 1840 ql_get_sbuf(qdev);
1766 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1767 qdev->small_buf_index = 0;
1768 1841
1769 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset); 1842 if (qdev->device_id == QL3022_DEVICE_ID)
1770 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1843 lrg_buf_cb1 = ql_get_lbuf(qdev);
1771 qdev->small_buf_release_cnt++;
1772
1773 if (qdev->device_id == QL3022_DEVICE_ID) {
1774 /* start of first buffer (3022 only) */
1775 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1776 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1777 qdev->lrg_buf_release_cnt++;
1778 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
1779 qdev->lrg_buf_index = 0;
1780 }
1781 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1782 curr_ial_ptr++;
1783 }
1784 1844
1785 /* start of second buffer */ 1845 /* start of second buffer */
1786 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1846 lrg_buf_cb2 = ql_get_lbuf(qdev);
1787 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1788
1789 /*
1790 * Second buffer gets sent up the stack.
1791 */
1792 qdev->lrg_buf_release_cnt++;
1793 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1794 qdev->lrg_buf_index = 0;
1795 skb = lrg_buf_cb2->skb; 1847 skb = lrg_buf_cb2->skb;
1796 1848
1797 qdev->stats.rx_packets++; 1849 qdev->stats.rx_packets++;
@@ -1819,11 +1871,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1819static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 1871static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1820 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 1872 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
1821{ 1873{
1822 long int offset;
1823 u32 lrg_buf_phy_addr_low = 0;
1824 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1874 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1825 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1875 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1826 u32 *curr_ial_ptr;
1827 struct sk_buff *skb1 = NULL, *skb2; 1876 struct sk_buff *skb1 = NULL, *skb2;
1828 struct net_device *ndev = qdev->ndev; 1877 struct net_device *ndev = qdev->ndev;
1829 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 1878 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
@@ -1833,35 +1882,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1833 * Get the inbound address list (small buffer). 1882 * Get the inbound address list (small buffer).
1834 */ 1883 */
1835 1884
1836 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE; 1885 ql_get_sbuf(qdev);
1837 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1838 qdev->small_buf_index = 0;
1839 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1840 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1841 qdev->small_buf_release_cnt++;
1842 1886
1843 if (qdev->device_id == QL3022_DEVICE_ID) { 1887 if (qdev->device_id == QL3022_DEVICE_ID) {
1844 /* start of first buffer on 3022 */ 1888 /* start of first buffer on 3022 */
1845 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1889 lrg_buf_cb1 = ql_get_lbuf(qdev);
1846 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1847 qdev->lrg_buf_release_cnt++;
1848 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1849 qdev->lrg_buf_index = 0;
1850 skb1 = lrg_buf_cb1->skb; 1890 skb1 = lrg_buf_cb1->skb;
1851 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1852 curr_ial_ptr++;
1853 size = ETH_HLEN; 1891 size = ETH_HLEN;
1854 if (*((u16 *) skb1->data) != 0xFFFF) 1892 if (*((u16 *) skb1->data) != 0xFFFF)
1855 size += VLAN_ETH_HLEN - ETH_HLEN; 1893 size += VLAN_ETH_HLEN - ETH_HLEN;
1856 } 1894 }
1857 1895
1858 /* start of second buffer */ 1896 /* start of second buffer */
1859 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1897 lrg_buf_cb2 = ql_get_lbuf(qdev);
1860 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1861 skb2 = lrg_buf_cb2->skb; 1898 skb2 = lrg_buf_cb2->skb;
1862 qdev->lrg_buf_release_cnt++;
1863 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1864 qdev->lrg_buf_index = 0;
1865 1899
1866 skb_put(skb2, length); /* Just the second buffer length here. */ 1900 skb_put(skb2, length); /* Just the second buffer length here. */
1867 pci_unmap_single(qdev->pdev, 1901 pci_unmap_single(qdev->pdev,
@@ -1914,10 +1948,13 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1914 struct net_rsp_iocb *net_rsp; 1948 struct net_rsp_iocb *net_rsp;
1915 struct net_device *ndev = qdev->ndev; 1949 struct net_device *ndev = qdev->ndev;
1916 unsigned long hw_flags; 1950 unsigned long hw_flags;
1951 int work_done = 0;
1952
1953 u32 rsp_producer_index = le32_to_cpu(*(qdev->prsp_producer_index));
1917 1954
1918 /* While there are entries in the completion queue. */ 1955 /* While there are entries in the completion queue. */
1919 while ((cpu_to_le32(*(qdev->prsp_producer_index)) != 1956 while ((rsp_producer_index !=
1920 qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) { 1957 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
1921 1958
1922 net_rsp = qdev->rsp_current; 1959 net_rsp = qdev->rsp_current;
1923 switch (net_rsp->opcode) { 1960 switch (net_rsp->opcode) {
@@ -1968,37 +2005,34 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1968 } else { 2005 } else {
1969 qdev->rsp_current++; 2006 qdev->rsp_current++;
1970 } 2007 }
2008
2009 work_done = *tx_cleaned + *rx_cleaned;
1971 } 2010 }
1972 2011
1973 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2012 if(work_done) {
2013 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1974 2014
1975 ql_update_lrg_bufq_prod_index(qdev); 2015 ql_update_lrg_bufq_prod_index(qdev);
1976 2016
1977 if (qdev->small_buf_release_cnt >= 16) { 2017 if (qdev->small_buf_release_cnt >= 16) {
1978 while (qdev->small_buf_release_cnt >= 16) { 2018 while (qdev->small_buf_release_cnt >= 16) {
1979 qdev->small_buf_q_producer_index++; 2019 qdev->small_buf_q_producer_index++;
1980 2020
1981 if (qdev->small_buf_q_producer_index == 2021 if (qdev->small_buf_q_producer_index ==
1982 NUM_SBUFQ_ENTRIES) 2022 NUM_SBUFQ_ENTRIES)
1983 qdev->small_buf_q_producer_index = 0; 2023 qdev->small_buf_q_producer_index = 0;
1984 qdev->small_buf_release_cnt -= 8; 2024 qdev->small_buf_release_cnt -= 8;
1985 } 2025 }
1986 2026
1987 ql_write_common_reg(qdev, 2027 wmb();
1988 &port_regs->CommonRegs. 2028 ql_write_common_reg(qdev,
1989 rxSmallQProducerIndex, 2029 &port_regs->CommonRegs.
1990 qdev->small_buf_q_producer_index); 2030 rxSmallQProducerIndex,
1991 } 2031 qdev->small_buf_q_producer_index);
1992 2032
1993 ql_write_common_reg(qdev, 2033 }
1994 &port_regs->CommonRegs.rspQConsumerIndex,
1995 qdev->rsp_consumer_index);
1996 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1997 2034
1998 if (unlikely(netif_queue_stopped(qdev->ndev))) { 2035 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1999 if (netif_queue_stopped(qdev->ndev) &&
2000 (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
2001 netif_wake_queue(qdev->ndev);
2002 } 2036 }
2003 2037
2004 return *tx_cleaned + *rx_cleaned; 2038 return *tx_cleaned + *rx_cleaned;
@@ -2009,6 +2043,8 @@ static int ql_poll(struct net_device *ndev, int *budget)
2009 struct ql3_adapter *qdev = netdev_priv(ndev); 2043 struct ql3_adapter *qdev = netdev_priv(ndev);
2010 int work_to_do = min(*budget, ndev->quota); 2044 int work_to_do = min(*budget, ndev->quota);
2011 int rx_cleaned = 0, tx_cleaned = 0; 2045 int rx_cleaned = 0, tx_cleaned = 0;
2046 unsigned long hw_flags;
2047 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2012 2048
2013 if (!netif_carrier_ok(ndev)) 2049 if (!netif_carrier_ok(ndev))
2014 goto quit_polling; 2050 goto quit_polling;
@@ -2017,9 +2053,17 @@ static int ql_poll(struct net_device *ndev, int *budget)
2017 *budget -= rx_cleaned; 2053 *budget -= rx_cleaned;
2018 ndev->quota -= rx_cleaned; 2054 ndev->quota -= rx_cleaned;
2019 2055
2020 if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) { 2056 if( tx_cleaned + rx_cleaned != work_to_do ||
2057 !netif_running(ndev)) {
2021quit_polling: 2058quit_polling:
2022 netif_rx_complete(ndev); 2059 netif_rx_complete(ndev);
2060
2061 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2062 ql_write_common_reg(qdev,
2063 &port_regs->CommonRegs.rspQConsumerIndex,
2064 qdev->rsp_consumer_index);
2065 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2066
2023 ql_enable_interrupts(qdev); 2067 ql_enable_interrupts(qdev);
2024 return 0; 2068 return 0;
2025 } 2069 }
@@ -2073,10 +2117,9 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2073 spin_unlock(&qdev->adapter_lock); 2117 spin_unlock(&qdev->adapter_lock);
2074 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2118 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2075 ql_disable_interrupts(qdev); 2119 ql_disable_interrupts(qdev);
2076 if (likely(netif_rx_schedule_prep(ndev))) 2120 if (likely(netif_rx_schedule_prep(ndev))) {
2077 __netif_rx_schedule(ndev); 2121 __netif_rx_schedule(ndev);
2078 else 2122 }
2079 ql_enable_interrupts(qdev);
2080 } else { 2123 } else {
2081 return IRQ_NONE; 2124 return IRQ_NONE;
2082 } 2125 }
@@ -2093,8 +2136,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2093 * the next AOL if more frags are coming. 2136 * the next AOL if more frags are coming.
2094 * That is why the frags:segment count ratio is not linear. 2137 * That is why the frags:segment count ratio is not linear.
2095 */ 2138 */
2096static int ql_get_seg_count(unsigned short frags) 2139static int ql_get_seg_count(struct ql3_adapter *qdev,
2140 unsigned short frags)
2097{ 2141{
2142 if (qdev->device_id == QL3022_DEVICE_ID)
2143 return 1;
2144
2098 switch(frags) { 2145 switch(frags) {
2099 case 0: return 1; /* just the skb->data seg */ 2146 case 0: return 1; /* just the skb->data seg */
2100 case 1: return 2; /* skb->data + 1 frag */ 2147 case 1: return 2; /* skb->data + 1 frag */
@@ -2139,11 +2186,13 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
2139 2186
2140 if (ip) { 2187 if (ip) {
2141 if (ip->protocol == IPPROTO_TCP) { 2188 if (ip->protocol == IPPROTO_TCP) {
2142 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC; 2189 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2190 OB_3032MAC_IOCB_REQ_IC;
2143 mac_iocb_ptr->ip_hdr_off = offset; 2191 mac_iocb_ptr->ip_hdr_off = offset;
2144 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2192 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2145 } else if (ip->protocol == IPPROTO_UDP) { 2193 } else if (ip->protocol == IPPROTO_UDP) {
2146 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC; 2194 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2195 OB_3032MAC_IOCB_REQ_IC;
2147 mac_iocb_ptr->ip_hdr_off = offset; 2196 mac_iocb_ptr->ip_hdr_off = offset;
2148 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2197 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2149 } 2198 }
@@ -2151,53 +2200,42 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
2151} 2200}
2152 2201
2153/* 2202/*
2154 * The difference between 3022 and 3032 sends: 2203 * Map the buffers for this transmit. This will return
2155 * 3022 only supports a simple single segment transmission. 2204 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2156 * 3032 supports checksumming and scatter/gather lists (fragments).
2157 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2158 * in the IOCB plus a chain of outbound address lists (OAL) that
2159 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2160 * will used to point to an OAL when more ALP entries are required.
2161 * The IOCB is always the top of the chain followed by one or more
2162 * OALs (when necessary).
2163 */ 2205 */
2164static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2206static int ql_send_map(struct ql3_adapter *qdev,
2207 struct ob_mac_iocb_req *mac_iocb_ptr,
2208 struct ql_tx_buf_cb *tx_cb,
2209 struct sk_buff *skb)
2165{ 2210{
2166 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2167 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2168 struct ql_tx_buf_cb *tx_cb;
2169 u32 tot_len = skb->len;
2170 struct oal *oal; 2211 struct oal *oal;
2171 struct oal_entry *oal_entry; 2212 struct oal_entry *oal_entry;
2172 int len; 2213 int len = skb->len;
2173 struct ob_mac_iocb_req *mac_iocb_ptr; 2214 dma_addr_t map;
2174 u64 map; 2215 int err;
2216 int completed_segs, i;
2175 int seg_cnt, seg = 0; 2217 int seg_cnt, seg = 0;
2176 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2218 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2177 2219
2178 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2220 seg_cnt = tx_cb->seg_count = ql_get_seg_count(qdev,
2179 if (!netif_queue_stopped(ndev)) 2221 (skb_shinfo(skb)->nr_frags));
2180 netif_stop_queue(ndev);
2181 return NETDEV_TX_BUSY;
2182 }
2183 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2184 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
2185 if(seg_cnt == -1) { 2222 if(seg_cnt == -1) {
2186 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2223 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2187 return NETDEV_TX_OK; 2224 return NETDEV_TX_BUSY;
2188
2189 } 2225 }
2190 mac_iocb_ptr = tx_cb->queue_entry; 2226 /*
2191 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2227 * Map the skb buffer first.
2192 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2228 */
2193 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2194 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2195 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2196 tx_cb->skb = skb;
2197 if (skb->ip_summed == CHECKSUM_PARTIAL)
2198 ql_hw_csum_setup(skb, mac_iocb_ptr);
2199 len = skb_headlen(skb);
2200 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2229 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2230
2231 err = pci_dma_mapping_error(map);
2232 if(err) {
2233 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2234 qdev->ndev->name, err);
2235
2236 return NETDEV_TX_BUSY;
2237 }
2238
2201 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2239 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2202 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2240 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2203 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2241 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
@@ -2206,15 +2244,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2206 pci_unmap_len_set(&tx_cb->map[seg], maplen, len); 2244 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2207 seg++; 2245 seg++;
2208 2246
2209 if (!skb_shinfo(skb)->nr_frags) { 2247 if (seg_cnt == 1) {
2210 /* Terminate the last segment. */ 2248 /* Terminate the last segment. */
2211 oal_entry->len = 2249 oal_entry->len =
2212 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); 2250 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2213 } else { 2251 } else {
2214 int i;
2215 oal = tx_cb->oal; 2252 oal = tx_cb->oal;
2216 for (i=0; i<frag_cnt; i++,seg++) { 2253 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2254 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2218 oal_entry++; 2255 oal_entry++;
2219 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */ 2256 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2220 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */ 2257 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
@@ -2224,6 +2261,15 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2224 map = pci_map_single(qdev->pdev, oal, 2261 map = pci_map_single(qdev->pdev, oal,
2225 sizeof(struct oal), 2262 sizeof(struct oal),
2226 PCI_DMA_TODEVICE); 2263 PCI_DMA_TODEVICE);
2264
2265 err = pci_dma_mapping_error(map);
2266 if(err) {
2267
2268 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
2269 qdev->ndev->name, err);
2270 goto map_error;
2271 }
2272
2227 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2273 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2228 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2274 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2229 oal_entry->len = 2275 oal_entry->len =
@@ -2242,6 +2288,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2242 pci_map_page(qdev->pdev, frag->page, 2288 pci_map_page(qdev->pdev, frag->page,
2243 frag->page_offset, frag->size, 2289 frag->page_offset, frag->size,
2244 PCI_DMA_TODEVICE); 2290 PCI_DMA_TODEVICE);
2291
2292 err = pci_dma_mapping_error(map);
2293 if(err) {
2294 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2295 qdev->ndev->name, err);
2296 goto map_error;
2297 }
2298
2245 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2299 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2246 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2300 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2247 oal_entry->len = cpu_to_le32(frag->size); 2301 oal_entry->len = cpu_to_le32(frag->size);
@@ -2253,6 +2307,94 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2253 oal_entry->len = 2307 oal_entry->len =
2254 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); 2308 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2255 } 2309 }
2310
2311 return NETDEV_TX_OK;
2312
2313map_error:
2314 /* A PCI mapping failed and now we will need to back out
2315 * We need to traverse through the oal's and associated pages which
2316 * have been mapped and now we must unmap them to clean up properly
2317 */
2318
2319 seg = 1;
2320 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2321 oal = tx_cb->oal;
2322 for (i=0; i<completed_segs; i++,seg++) {
2323 oal_entry++;
2324
2325 if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2326 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2327 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2328 (seg == 17 && seg_cnt > 18)) {
2329 pci_unmap_single(qdev->pdev,
2330 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2331 pci_unmap_len(&tx_cb->map[seg], maplen),
2332 PCI_DMA_TODEVICE);
2333 oal++;
2334 seg++;
2335 }
2336
2337 pci_unmap_page(qdev->pdev,
2338 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2339 pci_unmap_len(&tx_cb->map[seg], maplen),
2340 PCI_DMA_TODEVICE);
2341 }
2342
2343 pci_unmap_single(qdev->pdev,
2344 pci_unmap_addr(&tx_cb->map[0], mapaddr),
2345 pci_unmap_addr(&tx_cb->map[0], maplen),
2346 PCI_DMA_TODEVICE);
2347
2348 return NETDEV_TX_BUSY;
2349
2350}
2351
2352/*
2353 * The difference between 3022 and 3032 sends:
2354 * 3022 only supports a simple single segment transmission.
2355 * 3032 supports checksumming and scatter/gather lists (fragments).
2356 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2357 * in the IOCB plus a chain of outbound address lists (OAL) that
2358 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2359 * will used to point to an OAL when more ALP entries are required.
2360 * The IOCB is always the top of the chain followed by one or more
2361 * OALs (when necessary).
2362 */
2363static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2364{
2365 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2366 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2367 struct ql_tx_buf_cb *tx_cb;
2368 u32 tot_len = skb->len;
2369 struct ob_mac_iocb_req *mac_iocb_ptr;
2370
2371 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2372 return NETDEV_TX_BUSY;
2373 }
2374
2375 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2376 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2377 (skb_shinfo(skb)->nr_frags))) == -1) {
2378 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2379 return NETDEV_TX_OK;
2380 }
2381
2382 mac_iocb_ptr = tx_cb->queue_entry;
2383 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2384 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2385 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2386 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2387 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2388 tx_cb->skb = skb;
2389 if (qdev->device_id == QL3032_DEVICE_ID &&
2390 skb->ip_summed == CHECKSUM_PARTIAL)
2391 ql_hw_csum_setup(skb, mac_iocb_ptr);
2392
2393 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2394 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2395 return NETDEV_TX_BUSY;
2396 }
2397
2256 wmb(); 2398 wmb();
2257 qdev->req_producer_index++; 2399 qdev->req_producer_index++;
2258 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2400 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
@@ -2338,12 +2480,19 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2338{ 2480{
2339 /* Create Large Buffer Queue */ 2481 /* Create Large Buffer Queue */
2340 qdev->lrg_buf_q_size = 2482 qdev->lrg_buf_q_size =
2341 NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2483 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2342 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2484 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2343 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2485 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2344 else 2486 else
2345 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2487 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2346 2488
2489 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2490 if (qdev->lrg_buf == NULL) {
2491 printk(KERN_ERR PFX
2492 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2493 return -ENOMEM;
2494 }
2495
2347 qdev->lrg_buf_q_alloc_virt_addr = 2496 qdev->lrg_buf_q_alloc_virt_addr =
2348 pci_alloc_consistent(qdev->pdev, 2497 pci_alloc_consistent(qdev->pdev,
2349 qdev->lrg_buf_q_alloc_size, 2498 qdev->lrg_buf_q_alloc_size,
@@ -2393,7 +2542,7 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2393 "%s: Already done.\n", qdev->ndev->name); 2542 "%s: Already done.\n", qdev->ndev->name);
2394 return; 2543 return;
2395 } 2544 }
2396 2545 if(qdev->lrg_buf) kfree(qdev->lrg_buf);
2397 pci_free_consistent(qdev->pdev, 2546 pci_free_consistent(qdev->pdev,
2398 qdev->lrg_buf_q_alloc_size, 2547 qdev->lrg_buf_q_alloc_size,
2399 qdev->lrg_buf_q_alloc_virt_addr, 2548 qdev->lrg_buf_q_alloc_virt_addr,
@@ -2438,8 +2587,6 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2438 2587
2439 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2588 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2440 2589
2441 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
2442
2443 /* Initialize the small buffer queue. */ 2590 /* Initialize the small buffer queue. */
2444 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2591 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2445 small_buf_q_entry->addr_high = 2592 small_buf_q_entry->addr_high =
@@ -2476,7 +2623,7 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
2476 int i = 0; 2623 int i = 0;
2477 struct ql_rcv_buf_cb *lrg_buf_cb; 2624 struct ql_rcv_buf_cb *lrg_buf_cb;
2478 2625
2479 for (i = 0; i < NUM_LARGE_BUFFERS; i++) { 2626 for (i = 0; i < qdev->num_large_buffers; i++) {
2480 lrg_buf_cb = &qdev->lrg_buf[i]; 2627 lrg_buf_cb = &qdev->lrg_buf[i];
2481 if (lrg_buf_cb->skb) { 2628 if (lrg_buf_cb->skb) {
2482 dev_kfree_skb(lrg_buf_cb->skb); 2629 dev_kfree_skb(lrg_buf_cb->skb);
@@ -2497,7 +2644,7 @@ static void ql_init_large_buffers(struct ql3_adapter *qdev)
2497 struct ql_rcv_buf_cb *lrg_buf_cb; 2644 struct ql_rcv_buf_cb *lrg_buf_cb;
2498 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2645 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2499 2646
2500 for (i = 0; i < NUM_LARGE_BUFFERS; i++) { 2647 for (i = 0; i < qdev->num_large_buffers; i++) {
2501 lrg_buf_cb = &qdev->lrg_buf[i]; 2648 lrg_buf_cb = &qdev->lrg_buf[i];
2502 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2649 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2503 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2650 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
@@ -2512,10 +2659,12 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2512 int i; 2659 int i;
2513 struct ql_rcv_buf_cb *lrg_buf_cb; 2660 struct ql_rcv_buf_cb *lrg_buf_cb;
2514 struct sk_buff *skb; 2661 struct sk_buff *skb;
2515 u64 map; 2662 dma_addr_t map;
2663 int err;
2516 2664
2517 for (i = 0; i < NUM_LARGE_BUFFERS; i++) { 2665 for (i = 0; i < qdev->num_large_buffers; i++) {
2518 skb = dev_alloc_skb(qdev->lrg_buffer_len); 2666 skb = netdev_alloc_skb(qdev->ndev,
2667 qdev->lrg_buffer_len);
2519 if (unlikely(!skb)) { 2668 if (unlikely(!skb)) {
2520 /* Better luck next round */ 2669 /* Better luck next round */
2521 printk(KERN_ERR PFX 2670 printk(KERN_ERR PFX
@@ -2541,6 +2690,15 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2541 qdev->lrg_buffer_len - 2690 qdev->lrg_buffer_len -
2542 QL_HEADER_SPACE, 2691 QL_HEADER_SPACE,
2543 PCI_DMA_FROMDEVICE); 2692 PCI_DMA_FROMDEVICE);
2693
2694 err = pci_dma_mapping_error(map);
2695 if(err) {
2696 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2697 qdev->ndev->name, err);
2698 ql_free_large_buffers(qdev);
2699 return -ENOMEM;
2700 }
2701
2544 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2702 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2545 pci_unmap_len_set(lrg_buf_cb, maplen, 2703 pci_unmap_len_set(lrg_buf_cb, maplen,
2546 qdev->lrg_buffer_len - 2704 qdev->lrg_buffer_len -
@@ -2592,9 +2750,15 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev)
2592 2750
2593static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2751static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2594{ 2752{
2595 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) 2753 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2754 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2596 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2755 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2756 }
2597 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2757 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2758 /*
2759 * Bigger buffers, so less of them.
2760 */
2761 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2598 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2762 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2599 } else { 2763 } else {
2600 printk(KERN_ERR PFX 2764 printk(KERN_ERR PFX
@@ -2602,6 +2766,7 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2602 qdev->ndev->name); 2766 qdev->ndev->name);
2603 return -ENOMEM; 2767 return -ENOMEM;
2604 } 2768 }
2769 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2605 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2770 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2606 qdev->max_frame_size = 2771 qdev->max_frame_size =
2607 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2772 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
@@ -2834,7 +2999,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2834 &hmem_regs->rxLargeQBaseAddrLow, 2999 &hmem_regs->rxLargeQBaseAddrLow,
2835 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3000 LS_64BITS(qdev->lrg_buf_q_phy_addr));
2836 3001
2837 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES); 3002 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
2838 3003
2839 ql_write_page1_reg(qdev, 3004 ql_write_page1_reg(qdev,
2840 &hmem_regs->rxLargeBufferLength, 3005 &hmem_regs->rxLargeBufferLength,
@@ -2856,7 +3021,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2856 3021
2857 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3022 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
2858 qdev->small_buf_release_cnt = 8; 3023 qdev->small_buf_release_cnt = 8;
2859 qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1; 3024 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
2860 qdev->lrg_buf_release_cnt = 8; 3025 qdev->lrg_buf_release_cnt = 8;
2861 qdev->lrg_buf_next_free = 3026 qdev->lrg_buf_next_free =
2862 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr; 3027 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
@@ -3292,6 +3457,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
3292err_init: 3457err_init:
3293 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3458 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3294err_lock: 3459err_lock:
3460 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3295 free_irq(qdev->pdev->irq, ndev); 3461 free_irq(qdev->pdev->irq, ndev);
3296err_irq: 3462err_irq:
3297 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) { 3463 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
@@ -3343,27 +3509,6 @@ static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3343 return &qdev->stats; 3509 return &qdev->stats;
3344} 3510}
3345 3511
3346static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
3347{
3348 struct ql3_adapter *qdev = netdev_priv(ndev);
3349 printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
3350 if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
3351 printk(KERN_ERR PFX
3352 "%s: mtu size of %d is not valid. Use exactly %d or "
3353 "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
3354 JUMBO_MTU_SIZE);
3355 return -EINVAL;
3356 }
3357
3358 if (!netif_running(ndev)) {
3359 ndev->mtu = new_mtu;
3360 return 0;
3361 }
3362
3363 ndev->mtu = new_mtu;
3364 return ql_cycle_adapter(qdev,QL_DO_RESET);
3365}
3366
3367static void ql3xxx_set_multicast_list(struct net_device *ndev) 3512static void ql3xxx_set_multicast_list(struct net_device *ndev)
3368{ 3513{
3369 /* 3514 /*
@@ -3609,8 +3754,12 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3609 } 3754 }
3610 3755
3611 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3756 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3612 if (!ndev) 3757 if (!ndev) {
3758 printk(KERN_ERR PFX "%s could not alloc etherdev\n",
3759 pci_name(pdev));
3760 err = -ENOMEM;
3613 goto err_out_free_regions; 3761 goto err_out_free_regions;
3762 }
3614 3763
3615 SET_MODULE_OWNER(ndev); 3764 SET_MODULE_OWNER(ndev);
3616 SET_NETDEV_DEV(ndev, &pdev->dev); 3765 SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -3639,6 +3788,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3639 if (!qdev->mem_map_registers) { 3788 if (!qdev->mem_map_registers) {
3640 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3789 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3641 pci_name(pdev)); 3790 pci_name(pdev));
3791 err = -EIO;
3642 goto err_out_free_ndev; 3792 goto err_out_free_ndev;
3643 } 3793 }
3644 3794
@@ -3650,7 +3800,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3650 ndev->hard_start_xmit = ql3xxx_send; 3800 ndev->hard_start_xmit = ql3xxx_send;
3651 ndev->stop = ql3xxx_close; 3801 ndev->stop = ql3xxx_close;
3652 ndev->get_stats = ql3xxx_get_stats; 3802 ndev->get_stats = ql3xxx_get_stats;
3653 ndev->change_mtu = ql3xxx_change_mtu;
3654 ndev->set_multicast_list = ql3xxx_set_multicast_list; 3803 ndev->set_multicast_list = ql3xxx_set_multicast_list;
3655 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3804 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3656 ndev->set_mac_address = ql3xxx_set_mac_address; 3805 ndev->set_mac_address = ql3xxx_set_mac_address;
@@ -3667,6 +3816,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3667 printk(KERN_ALERT PFX 3816 printk(KERN_ALERT PFX
3668 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n", 3817 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
3669 qdev->index); 3818 qdev->index);
3819 err = -EIO;
3670 goto err_out_iounmap; 3820 goto err_out_iounmap;
3671 } 3821 }
3672 3822
@@ -3674,9 +3824,11 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3674 3824
3675 /* Validate and set parameters */ 3825 /* Validate and set parameters */
3676 if (qdev->mac_index) { 3826 if (qdev->mac_index) {
3827 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3677 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress, 3828 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
3678 ETH_ALEN); 3829 ETH_ALEN);
3679 } else { 3830 } else {
3831 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3680 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress, 3832 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
3681 ETH_ALEN); 3833 ETH_ALEN);
3682 } 3834 }