aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2007-02-26 14:06:39 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-27 04:21:44 -0500
commit3e71f6dd47e7e64461328adcdc3fbad1465b4c2f (patch)
tree57b9ec7e0e41702f23ab2e431b3df2fda89146c5
parent97916330e12371b44df659abb25d4d5d528e3ff7 (diff)
qla3xxx: bugfix: Fixed jumbo frame handling for 3032 chip.
The scatter/gather lists were not being build correctly. When large frames spanned several buffers the chip would panic. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rwxr-xr-xdrivers/net/qla3xxx.c99
-rwxr-xr-xdrivers/net/qla3xxx.h1
2 files changed, 64 insertions, 36 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index bdf1a400430a..4e9b25f475d0 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2122,11 +2122,13 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
2122 2122
2123 if (ip) { 2123 if (ip) {
2124 if (ip->protocol == IPPROTO_TCP) { 2124 if (ip->protocol == IPPROTO_TCP) {
2125 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC; 2125 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2126 OB_3032MAC_IOCB_REQ_IC;
2126 mac_iocb_ptr->ip_hdr_off = offset; 2127 mac_iocb_ptr->ip_hdr_off = offset;
2127 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2128 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2128 } else if (ip->protocol == IPPROTO_UDP) { 2129 } else if (ip->protocol == IPPROTO_UDP) {
2129 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC; 2130 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2131 OB_3032MAC_IOCB_REQ_IC;
2130 mac_iocb_ptr->ip_hdr_off = offset; 2132 mac_iocb_ptr->ip_hdr_off = offset;
2131 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2133 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2132 } 2134 }
@@ -2134,51 +2136,29 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
2134} 2136}
2135 2137
2136/* 2138/*
2137 * The difference between 3022 and 3032 sends: 2139 * Map the buffers for this transmit. This will return
2138 * 3022 only supports a simple single segment transmission. 2140 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2139 * 3032 supports checksumming and scatter/gather lists (fragments).
2140 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2141 * in the IOCB plus a chain of outbound address lists (OAL) that
2142 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2143 * will used to point to an OAL when more ALP entries are required.
2144 * The IOCB is always the top of the chain followed by one or more
2145 * OALs (when necessary).
2146 */ 2141 */
2147static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2142static int ql_send_map(struct ql3_adapter *qdev,
2143 struct ob_mac_iocb_req *mac_iocb_ptr,
2144 struct ql_tx_buf_cb *tx_cb,
2145 struct sk_buff *skb)
2148{ 2146{
2149 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2150 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2151 struct ql_tx_buf_cb *tx_cb;
2152 u32 tot_len = skb->len;
2153 struct oal *oal; 2147 struct oal *oal;
2154 struct oal_entry *oal_entry; 2148 struct oal_entry *oal_entry;
2155 int len; 2149 int len = skb_headlen(skb);
2156 struct ob_mac_iocb_req *mac_iocb_ptr;
2157 u64 map; 2150 u64 map;
2158 int seg_cnt, seg = 0; 2151 int seg_cnt, seg = 0;
2159 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2152 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2160 2153
2161 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2162 if (!netif_queue_stopped(ndev))
2163 netif_stop_queue(ndev);
2164 return NETDEV_TX_BUSY;
2165 }
2166 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2167 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags)); 2154 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
2168 if(seg_cnt == -1) { 2155 if(seg_cnt == -1) {
2169 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2156 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2170 return NETDEV_TX_OK; 2157 return NETDEV_TX_BUSY;
2171
2172 } 2158 }
2173 mac_iocb_ptr = tx_cb->queue_entry; 2159 /*
2174 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2160 * Map the skb buffer first.
2175 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2161 */
2176 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2177 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2178 tx_cb->skb = skb;
2179 if (skb->ip_summed == CHECKSUM_PARTIAL)
2180 ql_hw_csum_setup(skb, mac_iocb_ptr);
2181 len = skb_headlen(skb);
2182 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2162 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2183 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2163 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2184 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2164 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
@@ -2235,6 +2215,55 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2235 oal_entry->len = 2215 oal_entry->len =
2236 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); 2216 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2237 } 2217 }
2218 return NETDEV_TX_OK;
2219}
2220
2221/*
2222 * The difference between 3022 and 3032 sends:
2223 * 3022 only supports a simple single segment transmission.
2224 * 3032 supports checksumming and scatter/gather lists (fragments).
2225 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2226 * in the IOCB plus a chain of outbound address lists (OAL) that
2227 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2228 * will used to point to an OAL when more ALP entries are required.
2229 * The IOCB is always the top of the chain followed by one or more
2230 * OALs (when necessary).
2231 */
2232static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2233{
2234 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2235 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2236 struct ql_tx_buf_cb *tx_cb;
2237 u32 tot_len = skb->len;
2238 struct ob_mac_iocb_req *mac_iocb_ptr;
2239
2240 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2241 if (!netif_queue_stopped(ndev))
2242 netif_stop_queue(ndev);
2243 return NETDEV_TX_BUSY;
2244 }
2245
2246 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2247 if((tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags))) == -1) {
2248 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2249 return NETDEV_TX_OK;
2250 }
2251
2252 mac_iocb_ptr = tx_cb->queue_entry;
2253 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2254 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2255 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2256 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2257 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2258 tx_cb->skb = skb;
2259 if (skb->ip_summed == CHECKSUM_PARTIAL)
2260 ql_hw_csum_setup(skb, mac_iocb_ptr);
2261
2262 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2263 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2264 return NETDEV_TX_BUSY;
2265 }
2266
2238 wmb(); 2267 wmb();
2239 qdev->req_producer_index++; 2268 qdev->req_producer_index++;
2240 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2269 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 51173e585ce9..40913d2c7097 100755
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -1094,7 +1094,6 @@ struct oal_entry {
1094 u32 len; 1094 u32 len;
1095#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ 1095#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
1096#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ 1096#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
1097 u32 reserved;
1098}; 1097};
1099 1098
1100struct oal { 1099struct oal {