aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDan Williams <dcbw@redhat.com>2008-02-14 17:49:41 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-02-15 13:44:19 -0500
commit943dbef4b84b9cee3501e45b654e38335900570b (patch)
tree9caf4f18b3fb12f3fbb74e9a82530b6903d53411 /drivers/net
parenta6477249b4a1c2da6376f47fc175882be9adb844 (diff)
ipw2200: fix ucode assertion for RX queue overrun
Restock the RX queue when there are a lot of unused frames so that the RX ring buffer doesn't overrun, causing a ucode assertion. Backport of patch "iwlwifi: fix ucode assertion for RX queue overrun". Signed-off-by: Dan Williams <dcbw@redhat.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/wireless/ipw2200.c45
1 files changed, 34 insertions, 11 deletions
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 3e6ad7b92c83..a56d9fc6354f 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -3365,7 +3365,6 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
3365 /* Set us so that we have processed and used all buffers, but have 3365 /* Set us so that we have processed and used all buffers, but have
3366 * not restocked the Rx queue with fresh buffers */ 3366 * not restocked the Rx queue with fresh buffers */
3367 rxq->read = rxq->write = 0; 3367 rxq->read = rxq->write = 0;
3368 rxq->processed = RX_QUEUE_SIZE - 1;
3369 rxq->free_count = 0; 3368 rxq->free_count = 0;
3370 spin_unlock_irqrestore(&rxq->lock, flags); 3369 spin_unlock_irqrestore(&rxq->lock, flags);
3371} 3370}
@@ -3607,7 +3606,22 @@ static int ipw_load(struct ipw_priv *priv)
3607 * Driver allocates buffers of this size for Rx 3606 * Driver allocates buffers of this size for Rx
3608 */ 3607 */
3609 3608
3610static inline int ipw_queue_space(const struct clx2_queue *q) 3609/**
3610 * ipw_rx_queue_space - Return number of free slots available in queue.
3611 */
3612static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3613{
3614 int s = q->read - q->write;
3615 if (s <= 0)
3616 s += RX_QUEUE_SIZE;
3617 /* keep some buffer to not confuse full and empty queue */
3618 s -= 2;
3619 if (s < 0)
3620 s = 0;
3621 return s;
3622}
3623
3624static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3611{ 3625{
3612 int s = q->last_used - q->first_empty; 3626 int s = q->last_used - q->first_empty;
3613 if (s <= 0) 3627 if (s <= 0)
@@ -4947,7 +4961,7 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4947 priv->tx_packets++; 4961 priv->tx_packets++;
4948 } 4962 }
4949 done: 4963 done:
4950 if ((ipw_queue_space(q) > q->low_mark) && 4964 if ((ipw_tx_queue_space(q) > q->low_mark) &&
4951 (qindex >= 0) && 4965 (qindex >= 0) &&
4952 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev)) 4966 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4953 netif_wake_queue(priv->net_dev); 4967 netif_wake_queue(priv->net_dev);
@@ -4965,7 +4979,7 @@ static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4965 struct clx2_queue *q = &txq->q; 4979 struct clx2_queue *q = &txq->q;
4966 struct tfd_frame *tfd; 4980 struct tfd_frame *tfd;
4967 4981
4968 if (ipw_queue_space(q) < (sync ? 1 : 2)) { 4982 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
4969 IPW_ERROR("No space for Tx\n"); 4983 IPW_ERROR("No space for Tx\n");
4970 return -EBUSY; 4984 return -EBUSY;
4971 } 4985 }
@@ -5070,7 +5084,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv)
5070 5084
5071 spin_lock_irqsave(&rxq->lock, flags); 5085 spin_lock_irqsave(&rxq->lock, flags);
5072 write = rxq->write; 5086 write = rxq->write;
5073 while ((rxq->write != rxq->processed) && (rxq->free_count)) { 5087 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5074 element = rxq->rx_free.next; 5088 element = rxq->rx_free.next;
5075 rxb = list_entry(element, struct ipw_rx_mem_buffer, list); 5089 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5076 list_del(element); 5090 list_del(element);
@@ -5187,7 +5201,6 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5187 /* Set us so that we have processed and used all buffers, but have 5201 /* Set us so that we have processed and used all buffers, but have
5188 * not restocked the Rx queue with fresh buffers */ 5202 * not restocked the Rx queue with fresh buffers */
5189 rxq->read = rxq->write = 0; 5203 rxq->read = rxq->write = 0;
5190 rxq->processed = RX_QUEUE_SIZE - 1;
5191 rxq->free_count = 0; 5204 rxq->free_count = 0;
5192 5205
5193 return rxq; 5206 return rxq;
@@ -8223,13 +8236,17 @@ static void ipw_rx(struct ipw_priv *priv)
8223 struct ieee80211_hdr_4addr *header; 8236 struct ieee80211_hdr_4addr *header;
8224 u32 r, w, i; 8237 u32 r, w, i;
8225 u8 network_packet; 8238 u8 network_packet;
8239 u8 fill_rx = 0;
8226 DECLARE_MAC_BUF(mac); 8240 DECLARE_MAC_BUF(mac);
8227 DECLARE_MAC_BUF(mac2); 8241 DECLARE_MAC_BUF(mac2);
8228 DECLARE_MAC_BUF(mac3); 8242 DECLARE_MAC_BUF(mac3);
8229 8243
8230 r = ipw_read32(priv, IPW_RX_READ_INDEX); 8244 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8231 w = ipw_read32(priv, IPW_RX_WRITE_INDEX); 8245 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8232 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE; 8246 i = priv->rxq->read;
8247
8248 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8249 fill_rx = 1;
8233 8250
8234 while (i != r) { 8251 while (i != r) {
8235 rxb = priv->rxq->queue[i]; 8252 rxb = priv->rxq->queue[i];
@@ -8404,11 +8421,17 @@ static void ipw_rx(struct ipw_priv *priv)
8404 list_add_tail(&rxb->list, &priv->rxq->rx_used); 8421 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8405 8422
8406 i = (i + 1) % RX_QUEUE_SIZE; 8423 i = (i + 1) % RX_QUEUE_SIZE;
8424
8425 /* If there are a lot of unsued frames, restock the Rx queue
8426 * so the ucode won't assert */
8427 if (fill_rx) {
8428 priv->rxq->read = i;
8429 ipw_rx_queue_replenish(priv);
8430 }
8407 } 8431 }
8408 8432
8409 /* Backtrack one entry */ 8433 /* Backtrack one entry */
8410 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1; 8434 priv->rxq->read = i;
8411
8412 ipw_rx_queue_restock(priv); 8435 ipw_rx_queue_restock(priv);
8413} 8436}
8414 8437
@@ -10336,7 +10359,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10336 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 10359 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10337 ipw_write32(priv, q->reg_w, q->first_empty); 10360 ipw_write32(priv, q->reg_w, q->first_empty);
10338 10361
10339 if (ipw_queue_space(q) < q->high_mark) 10362 if (ipw_tx_queue_space(q) < q->high_mark)
10340 netif_stop_queue(priv->net_dev); 10363 netif_stop_queue(priv->net_dev);
10341 10364
10342 return NETDEV_TX_OK; 10365 return NETDEV_TX_OK;
@@ -10357,7 +10380,7 @@ static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10357 struct clx2_tx_queue *txq = &priv->txq[0]; 10380 struct clx2_tx_queue *txq = &priv->txq[0];
10358#endif /* CONFIG_IPW2200_QOS */ 10381#endif /* CONFIG_IPW2200_QOS */
10359 10382
10360 if (ipw_queue_space(&txq->q) < txq->q.high_mark) 10383 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10361 return 1; 10384 return 1;
10362 10385
10363 return 0; 10386 return 0;