aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2007-01-03 19:26:08 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-05 16:58:47 -0500
commitbd36b0ac5d06378c95b5149b6df5f413a6c985a5 (patch)
tree96652dbb364cd7cd1782dbb137a6a8813620c72f /drivers
parent83d98b401c053d760e38571595d8f4fa76ee271b (diff)
qla3xxx: Add support for Qlogic 4032 chip.
Qlogic 4032 chip is an incremental change from the 4022. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rwxr-xr-x[-rw-r--r--]drivers/net/qla3xxx.c363
-rwxr-xr-x[-rw-r--r--]drivers/net/qla3xxx.h88
2 files changed, 379 insertions, 72 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 8844c20eac2d..2429b274f0b0 100644..100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -22,6 +22,7 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/ip.h> 24#include <linux/ip.h>
25#include <linux/in.h>
25#include <linux/if_arp.h> 26#include <linux/if_arp.h>
26#include <linux/if_ether.h> 27#include <linux/if_ether.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
@@ -63,6 +64,7 @@ MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63 64
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { 65static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
67 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
66 /* required last entry */ 68 /* required last entry */
67 {0,} 69 {0,}
68}; 70};
@@ -1475,6 +1477,10 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
1475 2) << 7)) 1477 2) << 7))
1476 return -1; 1478 return -1;
1477 1479
1480 if (qdev->device_id == QL3032_DEVICE_ID)
1481 ql_write_page0_reg(qdev,
1482 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1483
1478 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1484 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1479 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1485 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1480 1486
@@ -1706,18 +1712,42 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1706 struct ob_mac_iocb_rsp *mac_rsp) 1712 struct ob_mac_iocb_rsp *mac_rsp)
1707{ 1713{
1708 struct ql_tx_buf_cb *tx_cb; 1714 struct ql_tx_buf_cb *tx_cb;
1715 int i;
1709 1716
1710 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1717 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1711 pci_unmap_single(qdev->pdev, 1718 pci_unmap_single(qdev->pdev,
1712 pci_unmap_addr(tx_cb, mapaddr), 1719 pci_unmap_addr(&tx_cb->map[0], mapaddr),
1713 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); 1720 pci_unmap_len(&tx_cb->map[0], maplen),
1714 dev_kfree_skb_irq(tx_cb->skb); 1721 PCI_DMA_TODEVICE);
1722 tx_cb->seg_count--;
1723 if (tx_cb->seg_count) {
1724 for (i = 1; i < tx_cb->seg_count; i++) {
1725 pci_unmap_page(qdev->pdev,
1726 pci_unmap_addr(&tx_cb->map[i],
1727 mapaddr),
1728 pci_unmap_len(&tx_cb->map[i], maplen),
1729 PCI_DMA_TODEVICE);
1730 }
1731 }
1715 qdev->stats.tx_packets++; 1732 qdev->stats.tx_packets++;
1716 qdev->stats.tx_bytes += tx_cb->skb->len; 1733 qdev->stats.tx_bytes += tx_cb->skb->len;
1734 dev_kfree_skb_irq(tx_cb->skb);
1717 tx_cb->skb = NULL; 1735 tx_cb->skb = NULL;
1718 atomic_inc(&qdev->tx_count); 1736 atomic_inc(&qdev->tx_count);
1719} 1737}
1720 1738
1739/*
1740 * The difference between 3022 and 3032 for inbound completions:
1741 * 3022 uses two buffers per completion. The first buffer contains
1742 * (some) header info, the second the remainder of the headers plus
1743 * the data. For this chip we reserve some space at the top of the
1744 * receive buffer so that the header info in buffer one can be
1745 * prepended to the buffer two. Buffer two is the sent up while
1746 * buffer one is returned to the hardware to be reused.
1747 * 3032 receives all of it's data and headers in one buffer for a
1748 * simpler process. 3032 also supports checksum verification as
1749 * can be seen in ql_process_macip_rx_intr().
1750 */
1721static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 1751static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1722 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 1752 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1723{ 1753{
@@ -1740,14 +1770,17 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1740 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1770 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1741 qdev->small_buf_release_cnt++; 1771 qdev->small_buf_release_cnt++;
1742 1772
1743 /* start of first buffer */ 1773 if (qdev->device_id == QL3022_DEVICE_ID) {
1744 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1774 /* start of first buffer (3022 only) */
1745 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; 1775 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1746 qdev->lrg_buf_release_cnt++; 1776 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1747 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1777 qdev->lrg_buf_release_cnt++;
1748 qdev->lrg_buf_index = 0; 1778 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
1749 curr_ial_ptr++; /* 64-bit pointers require two incs. */ 1779 qdev->lrg_buf_index = 0;
1750 curr_ial_ptr++; 1780 }
1781 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1782 curr_ial_ptr++;
1783 }
1751 1784
1752 /* start of second buffer */ 1785 /* start of second buffer */
1753 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1786 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1778,7 +1811,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1778 qdev->ndev->last_rx = jiffies; 1811 qdev->ndev->last_rx = jiffies;
1779 lrg_buf_cb2->skb = NULL; 1812 lrg_buf_cb2->skb = NULL;
1780 1813
1781 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 1814 if (qdev->device_id == QL3022_DEVICE_ID)
1815 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1782 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 1816 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1783} 1817}
1784 1818
@@ -1790,7 +1824,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1790 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1824 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1791 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1825 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1792 u32 *curr_ial_ptr; 1826 u32 *curr_ial_ptr;
1793 struct sk_buff *skb1, *skb2; 1827 struct sk_buff *skb1 = NULL, *skb2;
1794 struct net_device *ndev = qdev->ndev; 1828 struct net_device *ndev = qdev->ndev;
1795 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 1829 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
1796 u16 size = 0; 1830 u16 size = 0;
@@ -1806,16 +1840,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1806 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1840 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1807 qdev->small_buf_release_cnt++; 1841 qdev->small_buf_release_cnt++;
1808 1842
1809 /* start of first buffer */ 1843 if (qdev->device_id == QL3022_DEVICE_ID) {
1810 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1844 /* start of first buffer on 3022 */
1811 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; 1845 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1812 1846 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1813 qdev->lrg_buf_release_cnt++; 1847 qdev->lrg_buf_release_cnt++;
1814 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1848 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1815 qdev->lrg_buf_index = 0; 1849 qdev->lrg_buf_index = 0;
1816 skb1 = lrg_buf_cb1->skb; 1850 skb1 = lrg_buf_cb1->skb;
1817 curr_ial_ptr++; /* 64-bit pointers require two incs. */ 1851 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1818 curr_ial_ptr++; 1852 curr_ial_ptr++;
1853 size = ETH_HLEN;
1854 if (*((u16 *) skb1->data) != 0xFFFF)
1855 size += VLAN_ETH_HLEN - ETH_HLEN;
1856 }
1819 1857
1820 /* start of second buffer */ 1858 /* start of second buffer */
1821 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1859 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1825,18 +1863,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1825 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1863 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1826 qdev->lrg_buf_index = 0; 1864 qdev->lrg_buf_index = 0;
1827 1865
1828 qdev->stats.rx_packets++;
1829 qdev->stats.rx_bytes += length;
1830
1831 /*
1832 * Copy the ethhdr from first buffer to second. This
1833 * is necessary for IP completions.
1834 */
1835 if (*((u16 *) skb1->data) != 0xFFFF)
1836 size = VLAN_ETH_HLEN;
1837 else
1838 size = ETH_HLEN;
1839
1840 skb_put(skb2, length); /* Just the second buffer length here. */ 1866 skb_put(skb2, length); /* Just the second buffer length here. */
1841 pci_unmap_single(qdev->pdev, 1867 pci_unmap_single(qdev->pdev,
1842 pci_unmap_addr(lrg_buf_cb2, mapaddr), 1868 pci_unmap_addr(lrg_buf_cb2, mapaddr),
@@ -1844,16 +1870,40 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1844 PCI_DMA_FROMDEVICE); 1870 PCI_DMA_FROMDEVICE);
1845 prefetch(skb2->data); 1871 prefetch(skb2->data);
1846 1872
1847 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1848 skb2->dev = qdev->ndev;
1849 skb2->ip_summed = CHECKSUM_NONE; 1873 skb2->ip_summed = CHECKSUM_NONE;
1874 if (qdev->device_id == QL3022_DEVICE_ID) {
1875 /*
1876 * Copy the ethhdr from first buffer to second. This
1877 * is necessary for 3022 IP completions.
1878 */
1879 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1880 } else {
1881 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
1882 if (checksum &
1883 (IB_IP_IOCB_RSP_3032_ICE |
1884 IB_IP_IOCB_RSP_3032_CE |
1885 IB_IP_IOCB_RSP_3032_NUC)) {
1886 printk(KERN_ERR
1887 "%s: Bad checksum for this %s packet, checksum = %x.\n",
1888 __func__,
1889 ((checksum &
1890 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
1891 "UDP"),checksum);
1892 } else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
1893 skb2->ip_summed = CHECKSUM_UNNECESSARY;
1894 }
1895 }
1896 skb2->dev = qdev->ndev;
1850 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 1897 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1851 1898
1852 netif_receive_skb(skb2); 1899 netif_receive_skb(skb2);
1900 qdev->stats.rx_packets++;
1901 qdev->stats.rx_bytes += length;
1853 ndev->last_rx = jiffies; 1902 ndev->last_rx = jiffies;
1854 lrg_buf_cb2->skb = NULL; 1903 lrg_buf_cb2->skb = NULL;
1855 1904
1856 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 1905 if (qdev->device_id == QL3022_DEVICE_ID)
1906 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1857 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 1907 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1858} 1908}
1859 1909
@@ -1880,12 +1930,14 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1880 break; 1930 break;
1881 1931
1882 case OPCODE_IB_MAC_IOCB: 1932 case OPCODE_IB_MAC_IOCB:
1933 case OPCODE_IB_3032_MAC_IOCB:
1883 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 1934 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
1884 net_rsp); 1935 net_rsp);
1885 (*rx_cleaned)++; 1936 (*rx_cleaned)++;
1886 break; 1937 break;
1887 1938
1888 case OPCODE_IB_IP_IOCB: 1939 case OPCODE_IB_IP_IOCB:
1940 case OPCODE_IB_3032_IP_IOCB:
1889 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 1941 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
1890 net_rsp); 1942 net_rsp);
1891 (*rx_cleaned)++; 1943 (*rx_cleaned)++;
@@ -2032,13 +2084,96 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2032 return IRQ_RETVAL(handled); 2084 return IRQ_RETVAL(handled);
2033} 2085}
2034 2086
2087/*
2088 * Get the total number of segments needed for the
2089 * given number of fragments. This is necessary because
2090 * outbound address lists (OAL) will be used when more than
2091 * two frags are given. Each address list has 5 addr/len
2092 * pairs. The 5th pair in each AOL is used to point to
2093 * the next AOL if more frags are coming.
2094 * That is why the frags:segment count ratio is not linear.
2095 */
2096static int ql_get_seg_count(unsigned short frags)
2097{
2098 switch(frags) {
2099 case 0: return 1; /* just the skb->data seg */
2100 case 1: return 2; /* skb->data + 1 frag */
2101 case 2: return 3; /* skb->data + 2 frags */
2102 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2103 case 4: return 6;
2104 case 5: return 7;
2105 case 6: return 8;
2106 case 7: return 10;
2107 case 8: return 11;
2108 case 9: return 12;
2109 case 10: return 13;
2110 case 11: return 15;
2111 case 12: return 16;
2112 case 13: return 17;
2113 case 14: return 18;
2114 case 15: return 20;
2115 case 16: return 21;
2116 case 17: return 22;
2117 case 18: return 23;
2118 }
2119 return -1;
2120}
2121
2122static void ql_hw_csum_setup(struct sk_buff *skb,
2123 struct ob_mac_iocb_req *mac_iocb_ptr)
2124{
2125 struct ethhdr *eth;
2126 struct iphdr *ip = NULL;
2127 u8 offset = ETH_HLEN;
2128
2129 eth = (struct ethhdr *)(skb->data);
2130
2131 if (eth->h_proto == __constant_htons(ETH_P_IP)) {
2132 ip = (struct iphdr *)&skb->data[ETH_HLEN];
2133 } else if (eth->h_proto == htons(ETH_P_8021Q) &&
2134 ((struct vlan_ethhdr *)skb->data)->
2135 h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
2136 ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
2137 offset = VLAN_ETH_HLEN;
2138 }
2139
2140 if (ip) {
2141 if (ip->protocol == IPPROTO_TCP) {
2142 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
2143 mac_iocb_ptr->ip_hdr_off = offset;
2144 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2145 } else if (ip->protocol == IPPROTO_UDP) {
2146 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
2147 mac_iocb_ptr->ip_hdr_off = offset;
2148 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2149 }
2150 }
2151}
2152
2153/*
2154 * The difference between 3022 and 3032 sends:
2155 * 3022 only supports a simple single segment transmission.
2156 * 3032 supports checksumming and scatter/gather lists (fragments).
2157 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2158 * in the IOCB plus a chain of outbound address lists (OAL) that
2159 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2160 * will used to point to an OAL when more ALP entries are required.
2161 * The IOCB is always the top of the chain followed by one or more
2162 * OALs (when necessary).
2163 */
2035static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2164static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2036{ 2165{
2037 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2166 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2038 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2167 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2039 struct ql_tx_buf_cb *tx_cb; 2168 struct ql_tx_buf_cb *tx_cb;
2169 u32 tot_len = skb->len;
2170 struct oal *oal;
2171 struct oal_entry *oal_entry;
2172 int len;
2040 struct ob_mac_iocb_req *mac_iocb_ptr; 2173 struct ob_mac_iocb_req *mac_iocb_ptr;
2041 u64 map; 2174 u64 map;
2175 int seg_cnt, seg = 0;
2176 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2042 2177
2043 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2178 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2044 if (!netif_queue_stopped(ndev)) 2179 if (!netif_queue_stopped(ndev))
@@ -2046,21 +2181,79 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2046 return NETDEV_TX_BUSY; 2181 return NETDEV_TX_BUSY;
2047 } 2182 }
2048 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2183 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2184 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
2185 if(seg_cnt == -1) {
2186 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2187 return NETDEV_TX_OK;
2188
2189 }
2049 mac_iocb_ptr = tx_cb->queue_entry; 2190 mac_iocb_ptr = tx_cb->queue_entry;
2050 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2191 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2051 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2192 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2052 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2193 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2053 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2194 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2054 mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len); 2195 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2055 tx_cb->skb = skb; 2196 tx_cb->skb = skb;
2056 map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 2197 if (skb->ip_summed == CHECKSUM_PARTIAL)
2057 mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map)); 2198 ql_hw_csum_setup(skb, mac_iocb_ptr);
2058 mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map)); 2199 len = skb_headlen(skb);
2059 mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E); 2200 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2060 pci_unmap_addr_set(tx_cb, mapaddr, map); 2201 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2061 pci_unmap_len_set(tx_cb, maplen, skb->len); 2202 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2062 atomic_dec(&qdev->tx_count); 2203 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2204 oal_entry->len = cpu_to_le32(len);
2205 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2206 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2207 seg++;
2208
2209 if (!skb_shinfo(skb)->nr_frags) {
2210 /* Terminate the last segment. */
2211 oal_entry->len =
2212 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2213 } else {
2214 int i;
2215 oal = tx_cb->oal;
2216 for (i=0; i<frag_cnt; i++,seg++) {
2217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2218 oal_entry++;
2219 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2220 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2221 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2222 (seg == 17 && seg_cnt > 18)) {
2223 /* Continuation entry points to outbound address list. */
2224 map = pci_map_single(qdev->pdev, oal,
2225 sizeof(struct oal),
2226 PCI_DMA_TODEVICE);
2227 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2228 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2229 oal_entry->len =
2230 cpu_to_le32(sizeof(struct oal) |
2231 OAL_CONT_ENTRY);
2232 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2233 map);
2234 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2235 len);
2236 oal_entry = (struct oal_entry *)oal;
2237 oal++;
2238 seg++;
2239 }
2063 2240
2241 map =
2242 pci_map_page(qdev->pdev, frag->page,
2243 frag->page_offset, frag->size,
2244 PCI_DMA_TODEVICE);
2245 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2246 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2247 oal_entry->len = cpu_to_le32(frag->size);
2248 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2249 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2250 frag->size);
2251 }
2252 /* Terminate the last segment. */
2253 oal_entry->len =
2254 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2255 }
2256 wmb();
2064 qdev->req_producer_index++; 2257 qdev->req_producer_index++;
2065 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2258 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2066 qdev->req_producer_index = 0; 2259 qdev->req_producer_index = 0;
@@ -2074,8 +2267,10 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2074 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", 2267 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2075 ndev->name, qdev->req_producer_index, skb->len); 2268 ndev->name, qdev->req_producer_index, skb->len);
2076 2269
2270 atomic_dec(&qdev->tx_count);
2077 return NETDEV_TX_OK; 2271 return NETDEV_TX_OK;
2078} 2272}
2273
2079static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2274static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2080{ 2275{
2081 qdev->req_q_size = 2276 qdev->req_q_size =
@@ -2359,7 +2554,22 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2359 return 0; 2554 return 0;
2360} 2555}
2361 2556
2362static void ql_create_send_free_list(struct ql3_adapter *qdev) 2557static void ql_free_send_free_list(struct ql3_adapter *qdev)
2558{
2559 struct ql_tx_buf_cb *tx_cb;
2560 int i;
2561
2562 tx_cb = &qdev->tx_buf[0];
2563 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2564 if (tx_cb->oal) {
2565 kfree(tx_cb->oal);
2566 tx_cb->oal = NULL;
2567 }
2568 tx_cb++;
2569 }
2570}
2571
2572static int ql_create_send_free_list(struct ql3_adapter *qdev)
2363{ 2573{
2364 struct ql_tx_buf_cb *tx_cb; 2574 struct ql_tx_buf_cb *tx_cb;
2365 int i; 2575 int i;
@@ -2368,11 +2578,16 @@ static void ql_create_send_free_list(struct ql3_adapter *qdev)
2368 2578
2369 /* Create free list of transmit buffers */ 2579 /* Create free list of transmit buffers */
2370 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2580 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2581
2371 tx_cb = &qdev->tx_buf[i]; 2582 tx_cb = &qdev->tx_buf[i];
2372 tx_cb->skb = NULL; 2583 tx_cb->skb = NULL;
2373 tx_cb->queue_entry = req_q_curr; 2584 tx_cb->queue_entry = req_q_curr;
2374 req_q_curr++; 2585 req_q_curr++;
2586 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2587 if (tx_cb->oal == NULL)
2588 return -1;
2375 } 2589 }
2590 return 0;
2376} 2591}
2377 2592
2378static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2593static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
@@ -2447,12 +2662,14 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2447 2662
2448 /* Initialize the large buffer queue. */ 2663 /* Initialize the large buffer queue. */
2449 ql_init_large_buffers(qdev); 2664 ql_init_large_buffers(qdev);
2450 ql_create_send_free_list(qdev); 2665 if (ql_create_send_free_list(qdev))
2666 goto err_free_list;
2451 2667
2452 qdev->rsp_current = qdev->rsp_q_virt_addr; 2668 qdev->rsp_current = qdev->rsp_q_virt_addr;
2453 2669
2454 return 0; 2670 return 0;
2455 2671err_free_list:
2672 ql_free_send_free_list(qdev);
2456err_small_buffers: 2673err_small_buffers:
2457 ql_free_buffer_queues(qdev); 2674 ql_free_buffer_queues(qdev);
2458err_buffer_queues: 2675err_buffer_queues:
@@ -2468,6 +2685,7 @@ err_req_rsp:
2468 2685
2469static void ql_free_mem_resources(struct ql3_adapter *qdev) 2686static void ql_free_mem_resources(struct ql3_adapter *qdev)
2470{ 2687{
2688 ql_free_send_free_list(qdev);
2471 ql_free_large_buffers(qdev); 2689 ql_free_large_buffers(qdev);
2472 ql_free_small_buffers(qdev); 2690 ql_free_small_buffers(qdev);
2473 ql_free_buffer_queues(qdev); 2691 ql_free_buffer_queues(qdev);
@@ -2766,11 +2984,20 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2766 } 2984 }
2767 2985
2768 /* Enable Ethernet Function */ 2986 /* Enable Ethernet Function */
2769 value = 2987 if (qdev->device_id == QL3032_DEVICE_ID) {
2770 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 2988 value =
2771 PORT_CONTROL_HH); 2989 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
2772 ql_write_page0_reg(qdev, &port_regs->portControl, 2990 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
2773 ((value << 16) | value)); 2991 ql_write_page0_reg(qdev, &port_regs->functionControl,
2992 ((value << 16) | value));
2993 } else {
2994 value =
2995 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
2996 PORT_CONTROL_HH);
2997 ql_write_page0_reg(qdev, &port_regs->portControl,
2998 ((value << 16) | value));
2999 }
3000
2774 3001
2775out: 3002out:
2776 return status; 3003 return status;
@@ -2917,8 +3144,10 @@ static void ql_display_dev_info(struct net_device *ndev)
2917 struct pci_dev *pdev = qdev->pdev; 3144 struct pci_dev *pdev = qdev->pdev;
2918 3145
2919 printk(KERN_INFO PFX 3146 printk(KERN_INFO PFX
2920 "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n", 3147 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
2921 DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot); 3148 DRV_NAME, qdev->index, qdev->chip_rev_id,
3149 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3150 qdev->pci_slot);
2922 printk(KERN_INFO PFX 3151 printk(KERN_INFO PFX
2923 "%s Interface.\n", 3152 "%s Interface.\n",
2924 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); 3153 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
@@ -3212,15 +3441,22 @@ static void ql_reset_work(struct work_struct *work)
3212 * Loop through the active list and return the skb. 3441 * Loop through the active list and return the skb.
3213 */ 3442 */
3214 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3443 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3444 int j;
3215 tx_cb = &qdev->tx_buf[i]; 3445 tx_cb = &qdev->tx_buf[i];
3216 if (tx_cb->skb) { 3446 if (tx_cb->skb) {
3217
3218 printk(KERN_DEBUG PFX 3447 printk(KERN_DEBUG PFX
3219 "%s: Freeing lost SKB.\n", 3448 "%s: Freeing lost SKB.\n",
3220 qdev->ndev->name); 3449 qdev->ndev->name);
3221 pci_unmap_single(qdev->pdev, 3450 pci_unmap_single(qdev->pdev,
3222 pci_unmap_addr(tx_cb, mapaddr), 3451 pci_unmap_addr(&tx_cb->map[0], mapaddr),
3223 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); 3452 pci_unmap_len(&tx_cb->map[0], maplen),
3453 PCI_DMA_TODEVICE);
3454 for(j=1;j<tx_cb->seg_count;j++) {
3455 pci_unmap_page(qdev->pdev,
3456 pci_unmap_addr(&tx_cb->map[j],mapaddr),
3457 pci_unmap_len(&tx_cb->map[j],maplen),
3458 PCI_DMA_TODEVICE);
3459 }
3224 dev_kfree_skb(tx_cb->skb); 3460 dev_kfree_skb(tx_cb->skb);
3225 tx_cb->skb = NULL; 3461 tx_cb->skb = NULL;
3226 } 3462 }
@@ -3379,21 +3615,24 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3379 SET_MODULE_OWNER(ndev); 3615 SET_MODULE_OWNER(ndev);
3380 SET_NETDEV_DEV(ndev, &pdev->dev); 3616 SET_NETDEV_DEV(ndev, &pdev->dev);
3381 3617
3382 if (pci_using_dac)
3383 ndev->features |= NETIF_F_HIGHDMA;
3384
3385 pci_set_drvdata(pdev, ndev); 3618 pci_set_drvdata(pdev, ndev);
3386 3619
3387 qdev = netdev_priv(ndev); 3620 qdev = netdev_priv(ndev);
3388 qdev->index = cards_found; 3621 qdev->index = cards_found;
3389 qdev->ndev = ndev; 3622 qdev->ndev = ndev;
3390 qdev->pdev = pdev; 3623 qdev->pdev = pdev;
3624 qdev->device_id = pci_entry->device;
3391 qdev->port_link_state = LS_DOWN; 3625 qdev->port_link_state = LS_DOWN;
3392 if (msi) 3626 if (msi)
3393 qdev->msi = 1; 3627 qdev->msi = 1;
3394 3628
3395 qdev->msg_enable = netif_msg_init(debug, default_msg); 3629 qdev->msg_enable = netif_msg_init(debug, default_msg);
3396 3630
3631 if (pci_using_dac)
3632 ndev->features |= NETIF_F_HIGHDMA;
3633 if (qdev->device_id == QL3032_DEVICE_ID)
3634 ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
3635
3397 qdev->mem_map_registers = 3636 qdev->mem_map_registers =
3398 ioremap_nocache(pci_resource_start(pdev, 1), 3637 ioremap_nocache(pci_resource_start(pdev, 1),
3399 pci_resource_len(qdev->pdev, 1)); 3638 pci_resource_len(qdev->pdev, 1));
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index ea94de7fd071..b2d76ea68827 100644..100755
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -21,7 +21,9 @@
21 21
22#define OPCODE_UPDATE_NCB_IOCB 0xF0 22#define OPCODE_UPDATE_NCB_IOCB 0xF0
23#define OPCODE_IB_MAC_IOCB 0xF9 23#define OPCODE_IB_MAC_IOCB 0xF9
24#define OPCODE_IB_3032_MAC_IOCB 0x09
24#define OPCODE_IB_IP_IOCB 0xFA 25#define OPCODE_IB_IP_IOCB 0xFA
26#define OPCODE_IB_3032_IP_IOCB 0x0A
25#define OPCODE_IB_TCP_IOCB 0xFB 27#define OPCODE_IB_TCP_IOCB 0xFB
26#define OPCODE_DUMP_PROTO_IOCB 0xFE 28#define OPCODE_DUMP_PROTO_IOCB 0xFE
27#define OPCODE_BUFFER_ALERT_IOCB 0xFB 29#define OPCODE_BUFFER_ALERT_IOCB 0xFB
@@ -37,18 +39,23 @@
37struct ob_mac_iocb_req { 39struct ob_mac_iocb_req {
38 u8 opcode; 40 u8 opcode;
39 u8 flags; 41 u8 flags;
40#define OB_MAC_IOCB_REQ_MA 0xC0 42#define OB_MAC_IOCB_REQ_MA 0xe0
41#define OB_MAC_IOCB_REQ_F 0x20 43#define OB_MAC_IOCB_REQ_F 0x10
42#define OB_MAC_IOCB_REQ_X 0x10 44#define OB_MAC_IOCB_REQ_X 0x08
43#define OB_MAC_IOCB_REQ_D 0x02 45#define OB_MAC_IOCB_REQ_D 0x02
44#define OB_MAC_IOCB_REQ_I 0x01 46#define OB_MAC_IOCB_REQ_I 0x01
45 __le16 reserved0; 47 u8 flags1;
48#define OB_3032MAC_IOCB_REQ_IC 0x04
49#define OB_3032MAC_IOCB_REQ_TC 0x02
50#define OB_3032MAC_IOCB_REQ_UC 0x01
51 u8 reserved0;
46 52
47 __le32 transaction_id; 53 __le32 transaction_id;
48 __le16 data_len; 54 __le16 data_len;
49 __le16 reserved1; 55 u8 ip_hdr_off;
56 u8 ip_hdr_len;
57 __le32 reserved1;
50 __le32 reserved2; 58 __le32 reserved2;
51 __le32 reserved3;
52 __le32 buf_addr0_low; 59 __le32 buf_addr0_low;
53 __le32 buf_addr0_high; 60 __le32 buf_addr0_high;
54 __le32 buf_0_len; 61 __le32 buf_0_len;
@@ -58,8 +65,8 @@ struct ob_mac_iocb_req {
58 __le32 buf_addr2_low; 65 __le32 buf_addr2_low;
59 __le32 buf_addr2_high; 66 __le32 buf_addr2_high;
60 __le32 buf_2_len; 67 __le32 buf_2_len;
68 __le32 reserved3;
61 __le32 reserved4; 69 __le32 reserved4;
62 __le32 reserved5;
63}; 70};
64/* 71/*
65 * The following constants define control bits for buffer 72 * The following constants define control bits for buffer
@@ -74,6 +81,7 @@ struct ob_mac_iocb_rsp {
74 u8 opcode; 81 u8 opcode;
75 u8 flags; 82 u8 flags;
76#define OB_MAC_IOCB_RSP_P 0x08 83#define OB_MAC_IOCB_RSP_P 0x08
84#define OB_MAC_IOCB_RSP_L 0x04
77#define OB_MAC_IOCB_RSP_S 0x02 85#define OB_MAC_IOCB_RSP_S 0x02
78#define OB_MAC_IOCB_RSP_I 0x01 86#define OB_MAC_IOCB_RSP_I 0x01
79 87
@@ -85,6 +93,7 @@ struct ob_mac_iocb_rsp {
85 93
86struct ib_mac_iocb_rsp { 94struct ib_mac_iocb_rsp {
87 u8 opcode; 95 u8 opcode;
96#define IB_MAC_IOCB_RSP_V 0x80
88 u8 flags; 97 u8 flags;
89#define IB_MAC_IOCB_RSP_S 0x80 98#define IB_MAC_IOCB_RSP_S 0x80
90#define IB_MAC_IOCB_RSP_H1 0x40 99#define IB_MAC_IOCB_RSP_H1 0x40
@@ -138,6 +147,7 @@ struct ob_ip_iocb_req {
138struct ob_ip_iocb_rsp { 147struct ob_ip_iocb_rsp {
139 u8 opcode; 148 u8 opcode;
140 u8 flags; 149 u8 flags;
150#define OB_MAC_IOCB_RSP_H 0x10
141#define OB_MAC_IOCB_RSP_E 0x08 151#define OB_MAC_IOCB_RSP_E 0x08
142#define OB_MAC_IOCB_RSP_L 0x04 152#define OB_MAC_IOCB_RSP_L 0x04
143#define OB_MAC_IOCB_RSP_S 0x02 153#define OB_MAC_IOCB_RSP_S 0x02
@@ -220,6 +230,10 @@ struct ob_tcp_iocb_rsp {
220 230
221struct ib_ip_iocb_rsp { 231struct ib_ip_iocb_rsp {
222 u8 opcode; 232 u8 opcode;
233#define IB_IP_IOCB_RSP_3032_V 0x80
234#define IB_IP_IOCB_RSP_3032_O 0x40
235#define IB_IP_IOCB_RSP_3032_I 0x20
236#define IB_IP_IOCB_RSP_3032_R 0x10
223 u8 flags; 237 u8 flags;
224#define IB_IP_IOCB_RSP_S 0x80 238#define IB_IP_IOCB_RSP_S 0x80
225#define IB_IP_IOCB_RSP_H1 0x40 239#define IB_IP_IOCB_RSP_H1 0x40
@@ -230,6 +244,12 @@ struct ib_ip_iocb_rsp {
230 244
231 __le16 length; 245 __le16 length;
232 __le16 checksum; 246 __le16 checksum;
247#define IB_IP_IOCB_RSP_3032_ICE 0x01
248#define IB_IP_IOCB_RSP_3032_CE 0x02
249#define IB_IP_IOCB_RSP_3032_NUC 0x04
250#define IB_IP_IOCB_RSP_3032_UDP 0x08
251#define IB_IP_IOCB_RSP_3032_TCP 0x10
252#define IB_IP_IOCB_RSP_3032_IPE 0x20
233 __le16 reserved; 253 __le16 reserved;
234#define IB_IP_IOCB_RSP_R 0x01 254#define IB_IP_IOCB_RSP_R 0x01
235 __le32 ial_low; 255 __le32 ial_low;
@@ -524,6 +544,21 @@ enum {
524 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, 544 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
525 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, 545 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
526 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, 546 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
547 IP_ADDR_INDEX_REG_6 = 0x0008,
548 IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
549 IP_ADDR_INDEX_REG_E = 0x0040,
550};
551enum {
552 QL3032_PORT_CONTROL_DS = 0x0001,
553 QL3032_PORT_CONTROL_HH = 0x0002,
554 QL3032_PORT_CONTROL_EIv6 = 0x0004,
555 QL3032_PORT_CONTROL_EIv4 = 0x0008,
556 QL3032_PORT_CONTROL_ET = 0x0010,
557 QL3032_PORT_CONTROL_EF = 0x0020,
558 QL3032_PORT_CONTROL_DRM = 0x0040,
559 QL3032_PORT_CONTROL_RLB = 0x0080,
560 QL3032_PORT_CONTROL_RCB = 0x0100,
561 QL3032_PORT_CONTROL_KIE = 0x0200,
527}; 562};
528 563
529enum { 564enum {
@@ -657,7 +692,8 @@ struct ql3xxx_port_registers {
657 u32 internalRamWDataReg; 692 u32 internalRamWDataReg;
658 u32 reclaimedBufferAddrRegLow; 693 u32 reclaimedBufferAddrRegLow;
659 u32 reclaimedBufferAddrRegHigh; 694 u32 reclaimedBufferAddrRegHigh;
660 u32 reserved[2]; 695 u32 tcpConfiguration;
696 u32 functionControl;
661 u32 fpgaRevID; 697 u32 fpgaRevID;
662 u32 localRamAddr; 698 u32 localRamAddr;
663 u32 localRamDataAutoIncr; 699 u32 localRamDataAutoIncr;
@@ -963,6 +999,7 @@ struct eeprom_data {
963 999
964#define QL3XXX_VENDOR_ID 0x1077 1000#define QL3XXX_VENDOR_ID 0x1077
965#define QL3022_DEVICE_ID 0x3022 1001#define QL3022_DEVICE_ID 0x3022
1002#define QL3032_DEVICE_ID 0x3032
966 1003
967/* MTU & Frame Size stuff */ 1004/* MTU & Frame Size stuff */
968#define NORMAL_MTU_SIZE ETH_DATA_LEN 1005#define NORMAL_MTU_SIZE ETH_DATA_LEN
@@ -1038,11 +1075,41 @@ struct ql_rcv_buf_cb {
1038 int index; 1075 int index;
1039}; 1076};
1040 1077
1078/*
1079 * Original IOCB has 3 sg entries:
1080 * first points to skb-data area
1081 * second points to first frag
1082 * third points to next oal.
1083 * OAL has 5 entries:
1084 * 1 thru 4 point to frags
1085 * fifth points to next oal.
1086 */
1087#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
1088
1089struct oal_entry {
1090 u32 dma_lo;
1091 u32 dma_hi;
1092 u32 len;
1093#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
1094#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
1095 u32 reserved;
1096};
1097
1098struct oal {
1099 struct oal_entry oal_entry[5];
1100};
1101
1102struct map_list {
1103 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1104 DECLARE_PCI_UNMAP_LEN(maplen);
1105};
1106
1041struct ql_tx_buf_cb { 1107struct ql_tx_buf_cb {
1042 struct sk_buff *skb; 1108 struct sk_buff *skb;
1043 struct ob_mac_iocb_req *queue_entry ; 1109 struct ob_mac_iocb_req *queue_entry ;
1044 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1110 int seg_count;
1045 DECLARE_PCI_UNMAP_LEN(maplen); 1111 struct oal *oal;
1112 struct map_list map[MAX_SKB_FRAGS+1];
1046}; 1113};
1047 1114
1048/* definitions for type field */ 1115/* definitions for type field */
@@ -1189,6 +1256,7 @@ struct ql3_adapter {
1189 struct delayed_work reset_work; 1256 struct delayed_work reset_work;
1190 struct delayed_work tx_timeout_work; 1257 struct delayed_work tx_timeout_work;
1191 u32 max_frame_size; 1258 u32 max_frame_size;
1259 u32 device_id;
1192}; 1260};
1193 1261
1194#endif /* _QLA3XXX_H_ */ 1262#endif /* _QLA3XXX_H_ */