aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorBenjamin Li <benjamin.li@qlogic.com>2007-02-26 14:06:42 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-27 04:21:45 -0500
commite8f4df2491d07fc369e9d631e0638da03a9b3a38 (patch)
treedc4b183711b05b7717e36cf5fb2c5b915b357c45 /drivers/net
parent63b66d12de57d8455615d9f619e18824137ed547 (diff)
qla3xxx: Kernic Panic on pSeries under stress conditions
To reproduce this panic consistently, we run an intensive network application like 'netperf'. After waiting for a couple of seconds, you will see a stack trace and a kernel panic where we are calling pci_unmap_single() in ql_poll(). Changes: 1) Check the flags on the Response MAC IO Control block to check for errors 2) Ensure that if we are on the 4022 we only use one segment 3) Before, we were reading the memory mapped producer index register everytime we iterated in the loop when clearing the queue. We should only be iterating to a known point, not as the producer index is being updated. Signed-off-by: Benjamin Li <benjamin.li@qlogic.com> Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rwxr-xr-xdrivers/net/qla3xxx.c64
-rwxr-xr-xdrivers/net/qla3xxx.h2
2 files changed, 47 insertions, 19 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 5bf446f7be15..e44e8504904b 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1747,8 +1747,31 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1747{ 1747{
1748 struct ql_tx_buf_cb *tx_cb; 1748 struct ql_tx_buf_cb *tx_cb;
1749 int i; 1749 int i;
1750 int retval = 0;
1750 1751
1752 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1753 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
1754 }
1755
1751 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1756 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1757
1758 /* Check the transmit response flags for any errors */
1759 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1760 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
1761
1762 qdev->stats.tx_errors++;
1763 retval = -EIO;
1764 goto frame_not_sent;
1765 }
1766
1767 if(tx_cb->seg_count == 0) {
1768 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
1769
1770 qdev->stats.tx_errors++;
1771 retval = -EIO;
1772 goto invalid_seg_count;
1773 }
1774
1752 pci_unmap_single(qdev->pdev, 1775 pci_unmap_single(qdev->pdev,
1753 pci_unmap_addr(&tx_cb->map[0], mapaddr), 1776 pci_unmap_addr(&tx_cb->map[0], mapaddr),
1754 pci_unmap_len(&tx_cb->map[0], maplen), 1777 pci_unmap_len(&tx_cb->map[0], maplen),
@@ -1765,8 +1788,12 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1765 } 1788 }
1766 qdev->stats.tx_packets++; 1789 qdev->stats.tx_packets++;
1767 qdev->stats.tx_bytes += tx_cb->skb->len; 1790 qdev->stats.tx_bytes += tx_cb->skb->len;
1791
1792frame_not_sent:
1768 dev_kfree_skb_irq(tx_cb->skb); 1793 dev_kfree_skb_irq(tx_cb->skb);
1769 tx_cb->skb = NULL; 1794 tx_cb->skb = NULL;
1795
1796invalid_seg_count:
1770 atomic_inc(&qdev->tx_count); 1797 atomic_inc(&qdev->tx_count);
1771} 1798}
1772 1799
@@ -1923,8 +1950,10 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1923 unsigned long hw_flags; 1950 unsigned long hw_flags;
1924 int work_done = 0; 1951 int work_done = 0;
1925 1952
1953 u32 rsp_producer_index = le32_to_cpu(*(qdev->prsp_producer_index));
1954
1926 /* While there are entries in the completion queue. */ 1955 /* While there are entries in the completion queue. */
1927 while ((cpu_to_le32(*(qdev->prsp_producer_index)) != 1956 while ((rsp_producer_index !=
1928 qdev->rsp_consumer_index) && (work_done < work_to_do)) { 1957 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
1929 1958
1930 net_rsp = qdev->rsp_current; 1959 net_rsp = qdev->rsp_current;
@@ -2004,13 +2033,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2004 } 2033 }
2005 2034
2006 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 2035 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2007
2008 if (unlikely(netif_queue_stopped(qdev->ndev))) {
2009 if (netif_queue_stopped(qdev->ndev) &&
2010 (atomic_read(&qdev->tx_count) >
2011 (NUM_REQ_Q_ENTRIES / 4)))
2012 netif_wake_queue(qdev->ndev);
2013 }
2014 } 2036 }
2015 2037
2016 return *tx_cleaned + *rx_cleaned; 2038 return *tx_cleaned + *rx_cleaned;
@@ -2031,7 +2053,8 @@ static int ql_poll(struct net_device *ndev, int *budget)
2031 *budget -= rx_cleaned; 2053 *budget -= rx_cleaned;
2032 ndev->quota -= rx_cleaned; 2054 ndev->quota -= rx_cleaned;
2033 2055
2034 if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) { 2056 if( tx_cleaned + rx_cleaned != work_to_do ||
2057 !netif_running(ndev)) {
2035quit_polling: 2058quit_polling:
2036 netif_rx_complete(ndev); 2059 netif_rx_complete(ndev);
2037 2060
@@ -2093,8 +2116,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2093 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2116 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2094 spin_unlock(&qdev->adapter_lock); 2117 spin_unlock(&qdev->adapter_lock);
2095 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2118 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2119 ql_disable_interrupts(qdev);
2096 if (likely(netif_rx_schedule_prep(ndev))) { 2120 if (likely(netif_rx_schedule_prep(ndev))) {
2097 ql_disable_interrupts(qdev);
2098 __netif_rx_schedule(ndev); 2121 __netif_rx_schedule(ndev);
2099 } 2122 }
2100 } else { 2123 } else {
@@ -2113,8 +2136,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2113 * the next AOL if more frags are coming. 2136 * the next AOL if more frags are coming.
2114 * That is why the frags:segment count ratio is not linear. 2137 * That is why the frags:segment count ratio is not linear.
2115 */ 2138 */
2116static int ql_get_seg_count(unsigned short frags) 2139static int ql_get_seg_count(struct ql3_adapter *qdev,
2140 unsigned short frags)
2117{ 2141{
2142 if (qdev->device_id == QL3022_DEVICE_ID)
2143 return 1;
2144
2118 switch(frags) { 2145 switch(frags) {
2119 case 0: return 1; /* just the skb->data seg */ 2146 case 0: return 1; /* just the skb->data seg */
2120 case 1: return 2; /* skb->data + 1 frag */ 2147 case 1: return 2; /* skb->data + 1 frag */
@@ -2183,14 +2210,15 @@ static int ql_send_map(struct ql3_adapter *qdev,
2183{ 2210{
2184 struct oal *oal; 2211 struct oal *oal;
2185 struct oal_entry *oal_entry; 2212 struct oal_entry *oal_entry;
2186 int len = skb_headlen(skb); 2213 int len = skb->len;
2187 dma_addr_t map; 2214 dma_addr_t map;
2188 int err; 2215 int err;
2189 int completed_segs, i; 2216 int completed_segs, i;
2190 int seg_cnt, seg = 0; 2217 int seg_cnt, seg = 0;
2191 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2218 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2192 2219
2193 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags)); 2220 seg_cnt = tx_cb->seg_count = ql_get_seg_count(qdev,
2221 (skb_shinfo(skb)->nr_frags));
2194 if(seg_cnt == -1) { 2222 if(seg_cnt == -1) {
2195 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2223 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2196 return NETDEV_TX_BUSY; 2224 return NETDEV_TX_BUSY;
@@ -2216,7 +2244,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
2216 pci_unmap_len_set(&tx_cb->map[seg], maplen, len); 2244 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2217 seg++; 2245 seg++;
2218 2246
2219 if (!skb_shinfo(skb)->nr_frags) { 2247 if (seg_cnt == 1) {
2220 /* Terminate the last segment. */ 2248 /* Terminate the last segment. */
2221 oal_entry->len = 2249 oal_entry->len =
2222 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); 2250 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
@@ -2341,13 +2369,12 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2341 struct ob_mac_iocb_req *mac_iocb_ptr; 2369 struct ob_mac_iocb_req *mac_iocb_ptr;
2342 2370
2343 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2371 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2344 if (!netif_queue_stopped(ndev))
2345 netif_stop_queue(ndev);
2346 return NETDEV_TX_BUSY; 2372 return NETDEV_TX_BUSY;
2347 } 2373 }
2348 2374
2349 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2375 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2350 if((tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags))) == -1) { 2376 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2377 (skb_shinfo(skb)->nr_frags))) == -1) {
2351 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); 2378 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2352 return NETDEV_TX_OK; 2379 return NETDEV_TX_OK;
2353 } 2380 }
@@ -2359,7 +2386,8 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2359 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2386 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2360 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2387 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2361 tx_cb->skb = skb; 2388 tx_cb->skb = skb;
2362 if (skb->ip_summed == CHECKSUM_PARTIAL) 2389 if (qdev->device_id == QL3032_DEVICE_ID &&
2390 skb->ip_summed == CHECKSUM_PARTIAL)
2363 ql_hw_csum_setup(skb, mac_iocb_ptr); 2391 ql_hw_csum_setup(skb, mac_iocb_ptr);
2364 2392
2365 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { 2393 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 40913d2c7097..34cd6580fd07 100755
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -1194,7 +1194,7 @@ struct ql3_adapter {
1194 struct net_rsp_iocb *rsp_current; 1194 struct net_rsp_iocb *rsp_current;
1195 u16 rsp_consumer_index; 1195 u16 rsp_consumer_index;
1196 u16 reserved_06; 1196 u16 reserved_06;
1197 u32 *prsp_producer_index; 1197 volatile u32 *prsp_producer_index;
1198 u32 rsp_producer_index_phy_addr_high; 1198 u32 rsp_producer_index_phy_addr_high;
1199 u32 rsp_producer_index_phy_addr_low; 1199 u32 rsp_producer_index_phy_addr_low;
1200 1200