aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2009-04-29 07:57:34 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-29 20:24:17 -0400
commit1319ebadf185933e6b7ff95211d3cef9004e9754 (patch)
tree9b52363bc4eb6b97489a4913868dee0064e21249
parentddc9f824b09d790e93a800ba29ff3462f8fb5d0b (diff)
mv643xx_eth: OOM handling fixes
Currently, when OOM occurs during rx ring refill, mv643xx_eth will get into an infinite loop, due to the refill function setting the OOM bit but not clearing the 'rx refill needed' bit for this queue, while the calling function (the NAPI poll handler) will call the refill function in a loop until the 'rx refill needed' bit goes off, without checking the OOM bit. This patch fixes this by checking the OOM bit in the NAPI poll handler before attempting to do rx refill. This means that once OOM occurs, we won't try to do any memory allocations again until the next invocation of the poll handler. While we're at it, change the OOM flag to be a single bit instead of one bit per receive queue since OOM is a system state rather than a per-queue state, and cancel the OOM timer on entry to the NAPI poll handler if it's running to prevent it from firing when we've already come out of OOM. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Cc: stable@kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/mv643xx_eth.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index b3185bf2c158..038beff7da80 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -393,12 +393,12 @@ struct mv643xx_eth_private {
393 struct work_struct tx_timeout_task; 393 struct work_struct tx_timeout_task;
394 394
395 struct napi_struct napi; 395 struct napi_struct napi;
396 u8 oom;
396 u8 work_link; 397 u8 work_link;
397 u8 work_tx; 398 u8 work_tx;
398 u8 work_tx_end; 399 u8 work_tx_end;
399 u8 work_rx; 400 u8 work_rx;
400 u8 work_rx_refill; 401 u8 work_rx_refill;
401 u8 work_rx_oom;
402 402
403 int skb_size; 403 int skb_size;
404 struct sk_buff_head rx_recycle; 404 struct sk_buff_head rx_recycle;
@@ -661,7 +661,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
661 dma_get_cache_alignment() - 1); 661 dma_get_cache_alignment() - 1);
662 662
663 if (skb == NULL) { 663 if (skb == NULL) {
664 mp->work_rx_oom |= 1 << rxq->index; 664 mp->oom = 1;
665 goto oom; 665 goto oom;
666 } 666 }
667 667
@@ -2167,8 +2167,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2167 2167
2168 mp = container_of(napi, struct mv643xx_eth_private, napi); 2168 mp = container_of(napi, struct mv643xx_eth_private, napi);
2169 2169
2170 mp->work_rx_refill |= mp->work_rx_oom; 2170 if (unlikely(mp->oom)) {
2171 mp->work_rx_oom = 0; 2171 mp->oom = 0;
2172 del_timer(&mp->rx_oom);
2173 }
2172 2174
2173 work_done = 0; 2175 work_done = 0;
2174 while (work_done < budget) { 2176 while (work_done < budget) {
@@ -2182,8 +2184,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2182 continue; 2184 continue;
2183 } 2185 }
2184 2186
2185 queue_mask = mp->work_tx | mp->work_tx_end | 2187 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2186 mp->work_rx | mp->work_rx_refill; 2188 if (likely(!mp->oom))
2189 queue_mask |= mp->work_rx_refill;
2190
2187 if (!queue_mask) { 2191 if (!queue_mask) {
2188 if (mv643xx_eth_collect_events(mp)) 2192 if (mv643xx_eth_collect_events(mp))
2189 continue; 2193 continue;
@@ -2204,7 +2208,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2204 txq_maybe_wake(mp->txq + queue); 2208 txq_maybe_wake(mp->txq + queue);
2205 } else if (mp->work_rx & queue_mask) { 2209 } else if (mp->work_rx & queue_mask) {
2206 work_done += rxq_process(mp->rxq + queue, work_tbd); 2210 work_done += rxq_process(mp->rxq + queue, work_tbd);
2207 } else if (mp->work_rx_refill & queue_mask) { 2211 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
2208 work_done += rxq_refill(mp->rxq + queue, work_tbd); 2212 work_done += rxq_refill(mp->rxq + queue, work_tbd);
2209 } else { 2213 } else {
2210 BUG(); 2214 BUG();
@@ -2212,7 +2216,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2212 } 2216 }
2213 2217
2214 if (work_done < budget) { 2218 if (work_done < budget) {
2215 if (mp->work_rx_oom) 2219 if (mp->oom)
2216 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2220 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2217 napi_complete(napi); 2221 napi_complete(napi);
2218 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); 2222 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
@@ -2372,7 +2376,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2372 rxq_refill(mp->rxq + i, INT_MAX); 2376 rxq_refill(mp->rxq + i, INT_MAX);
2373 } 2377 }
2374 2378
2375 if (mp->work_rx_oom) { 2379 if (mp->oom) {
2376 mp->rx_oom.expires = jiffies + (HZ / 10); 2380 mp->rx_oom.expires = jiffies + (HZ / 10);
2377 add_timer(&mp->rx_oom); 2381 add_timer(&mp->rx_oom);
2378 } 2382 }