diff options
author | Phil Sutter <n0-1@freewrt.org> | 2009-01-15 00:48:59 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-01-15 11:28:19 -0500 |
commit | 4cf83b664fc14f8262d3013566ca36645f891df2 (patch) | |
tree | c60d11959dd582606d3bb2c2b67cdf9cd8846392 /drivers/net/korina.c | |
parent | beb0babfb77eab0cbcc7f64a7b8f3545fec5c0ba (diff) |
korina: rework korina_rx() for use with napi
This function needs an early exit condition to function properly, or
else caller assumes napi workload wasn't enough to handle all received
packets and korina_rx is called again (and again and again and ...).
Signed-off-by: Phil Sutter <n0-1@freewrt.org>
Acked-by: Florian Fainelli <florian@openwrt.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/korina.c')
-rw-r--r-- | drivers/net/korina.c | 109 |
1 files changed, 53 insertions, 56 deletions
diff --git a/drivers/net/korina.c b/drivers/net/korina.c index 65b8487c1896..a1d8af7d0bcd 100644 --- a/drivers/net/korina.c +++ b/drivers/net/korina.c | |||
@@ -353,15 +353,20 @@ static int korina_rx(struct net_device *dev, int limit) | |||
353 | struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done]; | 353 | struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done]; |
354 | struct sk_buff *skb, *skb_new; | 354 | struct sk_buff *skb, *skb_new; |
355 | u8 *pkt_buf; | 355 | u8 *pkt_buf; |
356 | u32 devcs, pkt_len, dmas, rx_free_desc; | 356 | u32 devcs, pkt_len, dmas; |
357 | int count; | 357 | int count; |
358 | 358 | ||
359 | dma_cache_inv((u32)rd, sizeof(*rd)); | 359 | dma_cache_inv((u32)rd, sizeof(*rd)); |
360 | 360 | ||
361 | for (count = 0; count < limit; count++) { | 361 | for (count = 0; count < limit; count++) { |
362 | skb = lp->rx_skb[lp->rx_next_done]; | ||
363 | skb_new = NULL; | ||
362 | 364 | ||
363 | devcs = rd->devcs; | 365 | devcs = rd->devcs; |
364 | 366 | ||
367 | if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0) | ||
368 | break; | ||
369 | |||
365 | /* Update statistics counters */ | 370 | /* Update statistics counters */ |
366 | if (devcs & ETH_RX_CRC) | 371 | if (devcs & ETH_RX_CRC) |
367 | dev->stats.rx_crc_errors++; | 372 | dev->stats.rx_crc_errors++; |
@@ -384,63 +389,55 @@ static int korina_rx(struct net_device *dev, int limit) | |||
384 | * in Rc32434 (errata ref #077) */ | 389 | * in Rc32434 (errata ref #077) */ |
385 | dev->stats.rx_errors++; | 390 | dev->stats.rx_errors++; |
386 | dev->stats.rx_dropped++; | 391 | dev->stats.rx_dropped++; |
387 | } | 392 | } else if ((devcs & ETH_RX_ROK)) { |
388 | |||
389 | while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) { | ||
390 | /* init the var. used for the later | ||
391 | * operations within the while loop */ | ||
392 | skb_new = NULL; | ||
393 | pkt_len = RCVPKT_LENGTH(devcs); | 393 | pkt_len = RCVPKT_LENGTH(devcs); |
394 | skb = lp->rx_skb[lp->rx_next_done]; | 394 | |
395 | 395 | /* must be the (first and) last | |
396 | if ((devcs & ETH_RX_ROK)) { | 396 | * descriptor then */ |
397 | /* must be the (first and) last | 397 | pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; |
398 | * descriptor then */ | 398 | |
399 | pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; | 399 | /* invalidate the cache */ |
400 | 400 | dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); | |
401 | /* invalidate the cache */ | 401 | |
402 | dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); | 402 | /* Malloc up new buffer. */ |
403 | 403 | skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2); | |
404 | /* Malloc up new buffer. */ | 404 | |
405 | skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2); | 405 | if (!skb_new) |
406 | 406 | break; | |
407 | if (!skb_new) | 407 | /* Do not count the CRC */ |
408 | break; | 408 | skb_put(skb, pkt_len - 4); |
409 | /* Do not count the CRC */ | 409 | skb->protocol = eth_type_trans(skb, dev); |
410 | skb_put(skb, pkt_len - 4); | 410 | |
411 | skb->protocol = eth_type_trans(skb, dev); | 411 | /* Pass the packet to upper layers */ |
412 | 412 | netif_receive_skb(skb); | |
413 | /* Pass the packet to upper layers */ | 413 | dev->stats.rx_packets++; |
414 | netif_receive_skb(skb); | 414 | dev->stats.rx_bytes += pkt_len; |
415 | dev->stats.rx_packets++; | 415 | |
416 | dev->stats.rx_bytes += pkt_len; | 416 | /* Update the mcast stats */ |
417 | 417 | if (devcs & ETH_RX_MP) | |
418 | /* Update the mcast stats */ | 418 | dev->stats.multicast++; |
419 | if (devcs & ETH_RX_MP) | 419 | |
420 | dev->stats.multicast++; | 420 | lp->rx_skb[lp->rx_next_done] = skb_new; |
421 | |||
422 | lp->rx_skb[lp->rx_next_done] = skb_new; | ||
423 | } | ||
424 | |||
425 | rd->devcs = 0; | ||
426 | |||
427 | /* Restore descriptor's curr_addr */ | ||
428 | if (skb_new) | ||
429 | rd->ca = CPHYSADDR(skb_new->data); | ||
430 | else | ||
431 | rd->ca = CPHYSADDR(skb->data); | ||
432 | |||
433 | rd->control = DMA_COUNT(KORINA_RBSIZE) | | ||
434 | DMA_DESC_COD | DMA_DESC_IOD; | ||
435 | lp->rd_ring[(lp->rx_next_done - 1) & | ||
436 | KORINA_RDS_MASK].control &= | ||
437 | ~DMA_DESC_COD; | ||
438 | |||
439 | lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK; | ||
440 | dma_cache_wback((u32)rd, sizeof(*rd)); | ||
441 | rd = &lp->rd_ring[lp->rx_next_done]; | ||
442 | writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas); | ||
443 | } | 421 | } |
422 | |||
423 | rd->devcs = 0; | ||
424 | |||
425 | /* Restore descriptor's curr_addr */ | ||
426 | if (skb_new) | ||
427 | rd->ca = CPHYSADDR(skb_new->data); | ||
428 | else | ||
429 | rd->ca = CPHYSADDR(skb->data); | ||
430 | |||
431 | rd->control = DMA_COUNT(KORINA_RBSIZE) | | ||
432 | DMA_DESC_COD | DMA_DESC_IOD; | ||
433 | lp->rd_ring[(lp->rx_next_done - 1) & | ||
434 | KORINA_RDS_MASK].control &= | ||
435 | ~DMA_DESC_COD; | ||
436 | |||
437 | lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK; | ||
438 | dma_cache_wback((u32)rd, sizeof(*rd)); | ||
439 | rd = &lp->rd_ring[lp->rx_next_done]; | ||
440 | writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas); | ||
444 | } | 441 | } |
445 | 442 | ||
446 | dmas = readl(&lp->rx_dma_regs->dmas); | 443 | dmas = readl(&lp->rx_dma_regs->dmas); |