aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qla3xxx.c
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2007-01-03 19:26:08 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-05 16:58:47 -0500
commitbd36b0ac5d06378c95b5149b6df5f413a6c985a5 (patch)
tree96652dbb364cd7cd1782dbb137a6a8813620c72f /drivers/net/qla3xxx.c
parent83d98b401c053d760e38571595d8f4fa76ee271b (diff)
qla3xxx: Add support for Qlogic 4032 chip.
Qlogic 4032 chip is an incremental change from the 4022. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/qla3xxx.c')
-rwxr-xr-x[-rw-r--r--]drivers/net/qla3xxx.c363
1 files changed, 301 insertions, 62 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 8844c20eac2d..2429b274f0b0 100644..100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -22,6 +22,7 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/ip.h> 24#include <linux/ip.h>
25#include <linux/in.h>
25#include <linux/if_arp.h> 26#include <linux/if_arp.h>
26#include <linux/if_ether.h> 27#include <linux/if_ether.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
@@ -63,6 +64,7 @@ MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63 64
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { 65static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
67 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
66 /* required last entry */ 68 /* required last entry */
67 {0,} 69 {0,}
68}; 70};
@@ -1475,6 +1477,10 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
1475 2) << 7)) 1477 2) << 7))
1476 return -1; 1478 return -1;
1477 1479
1480 if (qdev->device_id == QL3032_DEVICE_ID)
1481 ql_write_page0_reg(qdev,
1482 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1483
1478 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1484 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1479 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1485 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1480 1486
@@ -1706,18 +1712,42 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1706 struct ob_mac_iocb_rsp *mac_rsp) 1712 struct ob_mac_iocb_rsp *mac_rsp)
1707{ 1713{
1708 struct ql_tx_buf_cb *tx_cb; 1714 struct ql_tx_buf_cb *tx_cb;
1715 int i;
1709 1716
1710 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1717 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1711 pci_unmap_single(qdev->pdev, 1718 pci_unmap_single(qdev->pdev,
1712 pci_unmap_addr(tx_cb, mapaddr), 1719 pci_unmap_addr(&tx_cb->map[0], mapaddr),
1713 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); 1720 pci_unmap_len(&tx_cb->map[0], maplen),
1714 dev_kfree_skb_irq(tx_cb->skb); 1721 PCI_DMA_TODEVICE);
1722 tx_cb->seg_count--;
1723 if (tx_cb->seg_count) {
1724 for (i = 1; i < tx_cb->seg_count; i++) {
1725 pci_unmap_page(qdev->pdev,
1726 pci_unmap_addr(&tx_cb->map[i],
1727 mapaddr),
1728 pci_unmap_len(&tx_cb->map[i], maplen),
1729 PCI_DMA_TODEVICE);
1730 }
1731 }
1715 qdev->stats.tx_packets++; 1732 qdev->stats.tx_packets++;
1716 qdev->stats.tx_bytes += tx_cb->skb->len; 1733 qdev->stats.tx_bytes += tx_cb->skb->len;
1734 dev_kfree_skb_irq(tx_cb->skb);
1717 tx_cb->skb = NULL; 1735 tx_cb->skb = NULL;
1718 atomic_inc(&qdev->tx_count); 1736 atomic_inc(&qdev->tx_count);
1719} 1737}
1720 1738
1739/*
1740 * The difference between 3022 and 3032 for inbound completions:
1741 * 3022 uses two buffers per completion. The first buffer contains
1742 * (some) header info, the second the remainder of the headers plus
1743 * the data. For this chip we reserve some space at the top of the
1744 * receive buffer so that the header info in buffer one can be
1745 * prepended to the buffer two. Buffer two is the sent up while
1746 * buffer one is returned to the hardware to be reused.
1747 * 3032 receives all of it's data and headers in one buffer for a
1748 * simpler process. 3032 also supports checksum verification as
1749 * can be seen in ql_process_macip_rx_intr().
1750 */
1721static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 1751static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1722 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 1752 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1723{ 1753{
@@ -1740,14 +1770,17 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1740 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1770 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1741 qdev->small_buf_release_cnt++; 1771 qdev->small_buf_release_cnt++;
1742 1772
1743 /* start of first buffer */ 1773 if (qdev->device_id == QL3022_DEVICE_ID) {
1744 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1774 /* start of first buffer (3022 only) */
1745 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; 1775 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1746 qdev->lrg_buf_release_cnt++; 1776 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1747 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1777 qdev->lrg_buf_release_cnt++;
1748 qdev->lrg_buf_index = 0; 1778 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
1749 curr_ial_ptr++; /* 64-bit pointers require two incs. */ 1779 qdev->lrg_buf_index = 0;
1750 curr_ial_ptr++; 1780 }
1781 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1782 curr_ial_ptr++;
1783 }
1751 1784
1752 /* start of second buffer */ 1785 /* start of second buffer */
1753 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1786 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1778,7 +1811,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1778 qdev->ndev->last_rx = jiffies; 1811 qdev->ndev->last_rx = jiffies;
1779 lrg_buf_cb2->skb = NULL; 1812 lrg_buf_cb2->skb = NULL;
1780 1813
1781 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 1814 if (qdev->device_id == QL3022_DEVICE_ID)
1815 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1782 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 1816 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1783} 1817}
1784 1818
@@ -1790,7 +1824,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1790 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1824 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1791 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1825 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1792 u32 *curr_ial_ptr; 1826 u32 *curr_ial_ptr;
1793 struct sk_buff *skb1, *skb2; 1827 struct sk_buff *skb1 = NULL, *skb2;
1794 struct net_device *ndev = qdev->ndev; 1828 struct net_device *ndev = qdev->ndev;
1795 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 1829 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
1796 u16 size = 0; 1830 u16 size = 0;
@@ -1806,16 +1840,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1806 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1840 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1807 qdev->small_buf_release_cnt++; 1841 qdev->small_buf_release_cnt++;
1808 1842
1809 /* start of first buffer */ 1843 if (qdev->device_id == QL3022_DEVICE_ID) {
1810 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1844 /* start of first buffer on 3022 */
1811 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; 1845 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1812 1846 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1813 qdev->lrg_buf_release_cnt++; 1847 qdev->lrg_buf_release_cnt++;
1814 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1848 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1815 qdev->lrg_buf_index = 0; 1849 qdev->lrg_buf_index = 0;
1816 skb1 = lrg_buf_cb1->skb; 1850 skb1 = lrg_buf_cb1->skb;
1817 curr_ial_ptr++; /* 64-bit pointers require two incs. */ 1851 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1818 curr_ial_ptr++; 1852 curr_ial_ptr++;
1853 size = ETH_HLEN;
1854 if (*((u16 *) skb1->data) != 0xFFFF)
1855 size += VLAN_ETH_HLEN - ETH_HLEN;
1856 }
1819 1857
1820 /* start of second buffer */ 1858 /* start of second buffer */
1821 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1859 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1825,18 +1863,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1825 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1863 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1826 qdev->lrg_buf_index = 0; 1864 qdev->lrg_buf_index = 0;
1827 1865
1828 qdev->stats.rx_packets++;
1829 qdev->stats.rx_bytes += length;
1830
1831 /*
1832 * Copy the ethhdr from first buffer to second. This
1833 * is necessary for IP completions.
1834 */
1835 if (*((u16 *) skb1->data) != 0xFFFF)
1836 size = VLAN_ETH_HLEN;
1837 else
1838 size = ETH_HLEN;
1839
1840 skb_put(skb2, length); /* Just the second buffer length here. */ 1866 skb_put(skb2, length); /* Just the second buffer length here. */
1841 pci_unmap_single(qdev->pdev, 1867 pci_unmap_single(qdev->pdev,
1842 pci_unmap_addr(lrg_buf_cb2, mapaddr), 1868 pci_unmap_addr(lrg_buf_cb2, mapaddr),
@@ -1844,16 +1870,40 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1844 PCI_DMA_FROMDEVICE); 1870 PCI_DMA_FROMDEVICE);
1845 prefetch(skb2->data); 1871 prefetch(skb2->data);
1846 1872
1847 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1848 skb2->dev = qdev->ndev;
1849 skb2->ip_summed = CHECKSUM_NONE; 1873 skb2->ip_summed = CHECKSUM_NONE;
1874 if (qdev->device_id == QL3022_DEVICE_ID) {
1875 /*
1876 * Copy the ethhdr from first buffer to second. This
1877 * is necessary for 3022 IP completions.
1878 */
1879 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1880 } else {
1881 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
1882 if (checksum &
1883 (IB_IP_IOCB_RSP_3032_ICE |
1884 IB_IP_IOCB_RSP_3032_CE |
1885 IB_IP_IOCB_RSP_3032_NUC)) {
1886 printk(KERN_ERR
1887 "%s: Bad checksum for this %s packet, checksum = %x.\n",
1888 __func__,
1889 ((checksum &
1890 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
1891 "UDP"),checksum);
1892 } else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
1893 skb2->ip_summed = CHECKSUM_UNNECESSARY;
1894 }
1895 }
1896 skb2->dev = qdev->ndev;
1850 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 1897 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1851 1898
1852 netif_receive_skb(skb2); 1899 netif_receive_skb(skb2);
1900 qdev->stats.rx_packets++;
1901 qdev->stats.rx_bytes += length;
1853 ndev->last_rx = jiffies; 1902 ndev->last_rx = jiffies;
1854 lrg_buf_cb2->skb = NULL; 1903 lrg_buf_cb2->skb = NULL;
1855 1904
1856 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 1905 if (qdev->device_id == QL3022_DEVICE_ID)
1906 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1857 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 1907 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1858} 1908}
1859 1909
@@ -1880,12 +1930,14 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1880 break; 1930 break;
1881 1931
1882 case OPCODE_IB_MAC_IOCB: 1932 case OPCODE_IB_MAC_IOCB:
1933 case OPCODE_IB_3032_MAC_IOCB:
1883 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 1934 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
1884 net_rsp); 1935 net_rsp);
1885 (*rx_cleaned)++; 1936 (*rx_cleaned)++;
1886 break; 1937 break;
1887 1938
1888 case OPCODE_IB_IP_IOCB: 1939 case OPCODE_IB_IP_IOCB:
1940 case OPCODE_IB_3032_IP_IOCB:
1889 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 1941 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
1890 net_rsp); 1942 net_rsp);
1891 (*rx_cleaned)++; 1943 (*rx_cleaned)++;
@@ -2032,13 +2084,96 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2032 return IRQ_RETVAL(handled); 2084 return IRQ_RETVAL(handled);
2033} 2085}
2034 2086
2087/*
2088 * Get the total number of segments needed for the
2089 * given number of fragments. This is necessary because
2090 * outbound address lists (OAL) will be used when more than
2091 * two frags are given. Each address list has 5 addr/len
2092 * pairs. The 5th pair in each AOL is used to point to
2093 * the next AOL if more frags are coming.
2094 * That is why the frags:segment count ratio is not linear.
2095 */
2096static int ql_get_seg_count(unsigned short frags)
2097{
2098 switch(frags) {
2099 case 0: return 1; /* just the skb->data seg */
2100 case 1: return 2; /* skb->data + 1 frag */
2101 case 2: return 3; /* skb->data + 2 frags */
2102 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2103 case 4: return 6;
2104 case 5: return 7;
2105 case 6: return 8;
2106 case 7: return 10;
2107 case 8: return 11;
2108 case 9: return 12;
2109 case 10: return 13;
2110 case 11: return 15;
2111 case 12: return 16;
2112 case 13: return 17;
2113 case 14: return 18;
2114 case 15: return 20;
2115 case 16: return 21;
2116 case 17: return 22;
2117 case 18: return 23;
2118 }
2119 return -1;
2120}
2121
2122static void ql_hw_csum_setup(struct sk_buff *skb,
2123 struct ob_mac_iocb_req *mac_iocb_ptr)
2124{
2125 struct ethhdr *eth;
2126 struct iphdr *ip = NULL;
2127 u8 offset = ETH_HLEN;
2128
2129 eth = (struct ethhdr *)(skb->data);
2130
2131 if (eth->h_proto == __constant_htons(ETH_P_IP)) {
2132 ip = (struct iphdr *)&skb->data[ETH_HLEN];
2133 } else if (eth->h_proto == htons(ETH_P_8021Q) &&
2134 ((struct vlan_ethhdr *)skb->data)->
2135 h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
2136 ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
2137 offset = VLAN_ETH_HLEN;
2138 }
2139
2140 if (ip) {
2141 if (ip->protocol == IPPROTO_TCP) {
2142 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
2143 mac_iocb_ptr->ip_hdr_off = offset;
2144 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2145 } else if (ip->protocol == IPPROTO_UDP) {
2146 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
2147 mac_iocb_ptr->ip_hdr_off = offset;
2148 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2149 }
2150 }
2151}
2152
2153/*
2154 * The difference between 3022 and 3032 sends:
2155 * 3022 only supports a simple single segment transmission.
2156 * 3032 supports checksumming and scatter/gather lists (fragments).
2157 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2158 * in the IOCB plus a chain of outbound address lists (OAL) that
2159 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2160 * will used to point to an OAL when more ALP entries are required.
2161 * The IOCB is always the top of the chain followed by one or more
2162 * OALs (when necessary).
2163 */
2035static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2164static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2036{ 2165{
2037 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2166 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2038 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2167 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2039 struct ql_tx_buf_cb *tx_cb; 2168 struct ql_tx_buf_cb *tx_cb;
2169 u32 tot_len = skb->len;
2170 struct oal *oal;
2171 struct oal_entry *oal_entry;
2172 int len;
2040 struct ob_mac_iocb_req *mac_iocb_ptr; 2173 struct ob_mac_iocb_req *mac_iocb_ptr;
2041 u64 map; 2174 u64 map;
2175 int seg_cnt, seg = 0;
2176 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2042 2177
2043 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2178 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2044 if (!netif_queue_stopped(ndev)) 2179 if (!netif_queue_stopped(ndev))
@@ -2046,21 +2181,79 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2046 return NETDEV_TX_BUSY; 2181 return NETDEV_TX_BUSY;
2047 } 2182 }
2048 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2183 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2184 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
2185 if(seg_cnt == -1) {
2186 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2187 return NETDEV_TX_OK;
2188
2189 }
2049 mac_iocb_ptr = tx_cb->queue_entry; 2190 mac_iocb_ptr = tx_cb->queue_entry;
2050 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2191 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2051 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2192 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2052 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2193 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2053 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2194 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2054 mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len); 2195 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2055 tx_cb->skb = skb; 2196 tx_cb->skb = skb;
2056 map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 2197 if (skb->ip_summed == CHECKSUM_PARTIAL)
2057 mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map)); 2198 ql_hw_csum_setup(skb, mac_iocb_ptr);
2058 mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map)); 2199 len = skb_headlen(skb);
2059 mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E); 2200 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2060 pci_unmap_addr_set(tx_cb, mapaddr, map); 2201 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2061 pci_unmap_len_set(tx_cb, maplen, skb->len); 2202 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2062 atomic_dec(&qdev->tx_count); 2203 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2204 oal_entry->len = cpu_to_le32(len);
2205 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2206 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2207 seg++;
2208
2209 if (!skb_shinfo(skb)->nr_frags) {
2210 /* Terminate the last segment. */
2211 oal_entry->len =
2212 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2213 } else {
2214 int i;
2215 oal = tx_cb->oal;
2216 for (i=0; i<frag_cnt; i++,seg++) {
2217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2218 oal_entry++;
2219 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2220 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2221 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2222 (seg == 17 && seg_cnt > 18)) {
2223 /* Continuation entry points to outbound address list. */
2224 map = pci_map_single(qdev->pdev, oal,
2225 sizeof(struct oal),
2226 PCI_DMA_TODEVICE);
2227 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2228 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2229 oal_entry->len =
2230 cpu_to_le32(sizeof(struct oal) |
2231 OAL_CONT_ENTRY);
2232 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2233 map);
2234 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2235 len);
2236 oal_entry = (struct oal_entry *)oal;
2237 oal++;
2238 seg++;
2239 }
2063 2240
2241 map =
2242 pci_map_page(qdev->pdev, frag->page,
2243 frag->page_offset, frag->size,
2244 PCI_DMA_TODEVICE);
2245 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2246 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2247 oal_entry->len = cpu_to_le32(frag->size);
2248 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2249 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2250 frag->size);
2251 }
2252 /* Terminate the last segment. */
2253 oal_entry->len =
2254 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2255 }
2256 wmb();
2064 qdev->req_producer_index++; 2257 qdev->req_producer_index++;
2065 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2258 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2066 qdev->req_producer_index = 0; 2259 qdev->req_producer_index = 0;
@@ -2074,8 +2267,10 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2074 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", 2267 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2075 ndev->name, qdev->req_producer_index, skb->len); 2268 ndev->name, qdev->req_producer_index, skb->len);
2076 2269
2270 atomic_dec(&qdev->tx_count);
2077 return NETDEV_TX_OK; 2271 return NETDEV_TX_OK;
2078} 2272}
2273
2079static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2274static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2080{ 2275{
2081 qdev->req_q_size = 2276 qdev->req_q_size =
@@ -2359,7 +2554,22 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2359 return 0; 2554 return 0;
2360} 2555}
2361 2556
2362static void ql_create_send_free_list(struct ql3_adapter *qdev) 2557static void ql_free_send_free_list(struct ql3_adapter *qdev)
2558{
2559 struct ql_tx_buf_cb *tx_cb;
2560 int i;
2561
2562 tx_cb = &qdev->tx_buf[0];
2563 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2564 if (tx_cb->oal) {
2565 kfree(tx_cb->oal);
2566 tx_cb->oal = NULL;
2567 }
2568 tx_cb++;
2569 }
2570}
2571
2572static int ql_create_send_free_list(struct ql3_adapter *qdev)
2363{ 2573{
2364 struct ql_tx_buf_cb *tx_cb; 2574 struct ql_tx_buf_cb *tx_cb;
2365 int i; 2575 int i;
@@ -2368,11 +2578,16 @@ static void ql_create_send_free_list(struct ql3_adapter *qdev)
2368 2578
2369 /* Create free list of transmit buffers */ 2579 /* Create free list of transmit buffers */
2370 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2580 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2581
2371 tx_cb = &qdev->tx_buf[i]; 2582 tx_cb = &qdev->tx_buf[i];
2372 tx_cb->skb = NULL; 2583 tx_cb->skb = NULL;
2373 tx_cb->queue_entry = req_q_curr; 2584 tx_cb->queue_entry = req_q_curr;
2374 req_q_curr++; 2585 req_q_curr++;
2586 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2587 if (tx_cb->oal == NULL)
2588 return -1;
2375 } 2589 }
2590 return 0;
2376} 2591}
2377 2592
2378static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2593static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
@@ -2447,12 +2662,14 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2447 2662
2448 /* Initialize the large buffer queue. */ 2663 /* Initialize the large buffer queue. */
2449 ql_init_large_buffers(qdev); 2664 ql_init_large_buffers(qdev);
2450 ql_create_send_free_list(qdev); 2665 if (ql_create_send_free_list(qdev))
2666 goto err_free_list;
2451 2667
2452 qdev->rsp_current = qdev->rsp_q_virt_addr; 2668 qdev->rsp_current = qdev->rsp_q_virt_addr;
2453 2669
2454 return 0; 2670 return 0;
2455 2671err_free_list:
2672 ql_free_send_free_list(qdev);
2456err_small_buffers: 2673err_small_buffers:
2457 ql_free_buffer_queues(qdev); 2674 ql_free_buffer_queues(qdev);
2458err_buffer_queues: 2675err_buffer_queues:
@@ -2468,6 +2685,7 @@ err_req_rsp:
2468 2685
2469static void ql_free_mem_resources(struct ql3_adapter *qdev) 2686static void ql_free_mem_resources(struct ql3_adapter *qdev)
2470{ 2687{
2688 ql_free_send_free_list(qdev);
2471 ql_free_large_buffers(qdev); 2689 ql_free_large_buffers(qdev);
2472 ql_free_small_buffers(qdev); 2690 ql_free_small_buffers(qdev);
2473 ql_free_buffer_queues(qdev); 2691 ql_free_buffer_queues(qdev);
@@ -2766,11 +2984,20 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2766 } 2984 }
2767 2985
2768 /* Enable Ethernet Function */ 2986 /* Enable Ethernet Function */
2769 value = 2987 if (qdev->device_id == QL3032_DEVICE_ID) {
2770 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 2988 value =
2771 PORT_CONTROL_HH); 2989 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
2772 ql_write_page0_reg(qdev, &port_regs->portControl, 2990 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
2773 ((value << 16) | value)); 2991 ql_write_page0_reg(qdev, &port_regs->functionControl,
2992 ((value << 16) | value));
2993 } else {
2994 value =
2995 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
2996 PORT_CONTROL_HH);
2997 ql_write_page0_reg(qdev, &port_regs->portControl,
2998 ((value << 16) | value));
2999 }
3000
2774 3001
2775out: 3002out:
2776 return status; 3003 return status;
@@ -2917,8 +3144,10 @@ static void ql_display_dev_info(struct net_device *ndev)
2917 struct pci_dev *pdev = qdev->pdev; 3144 struct pci_dev *pdev = qdev->pdev;
2918 3145
2919 printk(KERN_INFO PFX 3146 printk(KERN_INFO PFX
2920 "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n", 3147 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
2921 DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot); 3148 DRV_NAME, qdev->index, qdev->chip_rev_id,
3149 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3150 qdev->pci_slot);
2922 printk(KERN_INFO PFX 3151 printk(KERN_INFO PFX
2923 "%s Interface.\n", 3152 "%s Interface.\n",
2924 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); 3153 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
@@ -3212,15 +3441,22 @@ static void ql_reset_work(struct work_struct *work)
3212 * Loop through the active list and return the skb. 3441 * Loop through the active list and return the skb.
3213 */ 3442 */
3214 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3443 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3444 int j;
3215 tx_cb = &qdev->tx_buf[i]; 3445 tx_cb = &qdev->tx_buf[i];
3216 if (tx_cb->skb) { 3446 if (tx_cb->skb) {
3217
3218 printk(KERN_DEBUG PFX 3447 printk(KERN_DEBUG PFX
3219 "%s: Freeing lost SKB.\n", 3448 "%s: Freeing lost SKB.\n",
3220 qdev->ndev->name); 3449 qdev->ndev->name);
3221 pci_unmap_single(qdev->pdev, 3450 pci_unmap_single(qdev->pdev,
3222 pci_unmap_addr(tx_cb, mapaddr), 3451 pci_unmap_addr(&tx_cb->map[0], mapaddr),
3223 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); 3452 pci_unmap_len(&tx_cb->map[0], maplen),
3453 PCI_DMA_TODEVICE);
3454 for(j=1;j<tx_cb->seg_count;j++) {
3455 pci_unmap_page(qdev->pdev,
3456 pci_unmap_addr(&tx_cb->map[j],mapaddr),
3457 pci_unmap_len(&tx_cb->map[j],maplen),
3458 PCI_DMA_TODEVICE);
3459 }
3224 dev_kfree_skb(tx_cb->skb); 3460 dev_kfree_skb(tx_cb->skb);
3225 tx_cb->skb = NULL; 3461 tx_cb->skb = NULL;
3226 } 3462 }
@@ -3379,21 +3615,24 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3379 SET_MODULE_OWNER(ndev); 3615 SET_MODULE_OWNER(ndev);
3380 SET_NETDEV_DEV(ndev, &pdev->dev); 3616 SET_NETDEV_DEV(ndev, &pdev->dev);
3381 3617
3382 if (pci_using_dac)
3383 ndev->features |= NETIF_F_HIGHDMA;
3384
3385 pci_set_drvdata(pdev, ndev); 3618 pci_set_drvdata(pdev, ndev);
3386 3619
3387 qdev = netdev_priv(ndev); 3620 qdev = netdev_priv(ndev);
3388 qdev->index = cards_found; 3621 qdev->index = cards_found;
3389 qdev->ndev = ndev; 3622 qdev->ndev = ndev;
3390 qdev->pdev = pdev; 3623 qdev->pdev = pdev;
3624 qdev->device_id = pci_entry->device;
3391 qdev->port_link_state = LS_DOWN; 3625 qdev->port_link_state = LS_DOWN;
3392 if (msi) 3626 if (msi)
3393 qdev->msi = 1; 3627 qdev->msi = 1;
3394 3628
3395 qdev->msg_enable = netif_msg_init(debug, default_msg); 3629 qdev->msg_enable = netif_msg_init(debug, default_msg);
3396 3630
3631 if (pci_using_dac)
3632 ndev->features |= NETIF_F_HIGHDMA;
3633 if (qdev->device_id == QL3032_DEVICE_ID)
3634 ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
3635
3397 qdev->mem_map_registers = 3636 qdev->mem_map_registers =
3398 ioremap_nocache(pci_resource_start(pdev, 1), 3637 ioremap_nocache(pci_resource_start(pdev, 1),
3399 pci_resource_len(qdev->pdev, 1)); 3638 pci_resource_len(qdev->pdev, 1));