diff options
author | Olof Johansson <olof@lixom.net> | 2007-10-02 17:27:57 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:54:27 -0400 |
commit | b5254eee7994ba0a44ba7386cb66c2ce2f30fcc6 (patch) | |
tree | f21fe0be6225cc6fce029ffcadb45ec4a5d93faf /drivers/net/pasemi_mac.c | |
parent | 9e81d331f2ec65695e4366ce592e14f9700bae8b (diff) |
pasemi_mac: use buffer index pointer in clean_rx()
pasemi_mac: use buffer index pointer in clean_rx()
Use the new features in B0 for buffer ring index on the receive side. This
means we no longer have to search in the ring for where the buffer
came from.
Also cleanup the RX cleaning side a little, while I was at it.
Note: Pre-B0 hardware is no longer supported, and needs a pile of other
workarounds that are not being submitted for mainline inclusion. So the
fact that this breaks old hardware is not a problem at this time.
Signed-off-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/pasemi_mac.c')
-rw-r--r-- | drivers/net/pasemi_mac.c | 60 |
1 files changed, 33 insertions, 27 deletions
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 967ff8c96b0f..31ad2b9093a7 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -243,9 +243,9 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) | |||
243 | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); | 243 | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); |
244 | 244 | ||
245 | write_dma_reg(mac, PAS_DMA_RXINT_CFG(mac->dma_if), | 245 | write_dma_reg(mac, PAS_DMA_RXINT_CFG(mac->dma_if), |
246 | PAS_DMA_RXINT_CFG_DHL(3) | | 246 | PAS_DMA_RXINT_CFG_DHL(3) | PAS_DMA_RXINT_CFG_L2 | |
247 | PAS_DMA_RXINT_CFG_L2 | | 247 | PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | |
248 | PAS_DMA_RXINT_CFG_LW); | 248 | PAS_DMA_RXINT_CFG_HEN); |
249 | 249 | ||
250 | ring->next_to_fill = 0; | 250 | ring->next_to_fill = 0; |
251 | ring->next_to_clean = 0; | 251 | ring->next_to_clean = 0; |
@@ -402,13 +402,12 @@ static void pasemi_mac_free_rx_resources(struct net_device *dev) | |||
402 | static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) | 402 | static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) |
403 | { | 403 | { |
404 | struct pasemi_mac *mac = netdev_priv(dev); | 404 | struct pasemi_mac *mac = netdev_priv(dev); |
405 | int start = mac->rx->next_to_fill; | 405 | int fill, count; |
406 | unsigned int fill, count; | ||
407 | 406 | ||
408 | if (limit <= 0) | 407 | if (limit <= 0) |
409 | return; | 408 | return; |
410 | 409 | ||
411 | fill = start; | 410 | fill = mac->rx->next_to_fill; |
412 | for (count = 0; count < limit; count++) { | 411 | for (count = 0; count < limit; count++) { |
413 | struct pasemi_mac_buffer *info = &RX_RING_INFO(mac, fill); | 412 | struct pasemi_mac_buffer *info = &RX_RING_INFO(mac, fill); |
414 | u64 *buff = &RX_BUFF(mac, fill); | 413 | u64 *buff = &RX_BUFF(mac, fill); |
@@ -446,10 +445,10 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) | |||
446 | 445 | ||
447 | wmb(); | 446 | wmb(); |
448 | 447 | ||
449 | write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), count); | ||
450 | write_dma_reg(mac, PAS_DMA_RXINT_INCR(mac->dma_if), count); | 448 | write_dma_reg(mac, PAS_DMA_RXINT_INCR(mac->dma_if), count); |
451 | 449 | ||
452 | mac->rx->next_to_fill += count; | 450 | mac->rx->next_to_fill = (mac->rx->next_to_fill + count) & |
451 | (RX_RING_SIZE - 1); | ||
453 | } | 452 | } |
454 | 453 | ||
455 | static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) | 454 | static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) |
@@ -517,15 +516,19 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
517 | int count; | 516 | int count; |
518 | struct pasemi_mac_buffer *info; | 517 | struct pasemi_mac_buffer *info; |
519 | struct sk_buff *skb; | 518 | struct sk_buff *skb; |
520 | unsigned int i, len; | 519 | unsigned int len; |
521 | u64 macrx; | 520 | u64 macrx; |
522 | dma_addr_t dma; | 521 | dma_addr_t dma; |
522 | int buf_index; | ||
523 | u64 eval; | ||
523 | 524 | ||
524 | spin_lock(&mac->rx->lock); | 525 | spin_lock(&mac->rx->lock); |
525 | 526 | ||
526 | n = mac->rx->next_to_clean; | 527 | n = mac->rx->next_to_clean; |
527 | 528 | ||
528 | for (count = limit; count; count--) { | 529 | prefetch(RX_RING(mac, n)); |
530 | |||
531 | for (count = 0; count < limit; count++) { | ||
529 | macrx = RX_RING(mac, n); | 532 | macrx = RX_RING(mac, n); |
530 | 533 | ||
531 | if ((macrx & XCT_MACRX_E) || | 534 | if ((macrx & XCT_MACRX_E) || |
@@ -537,21 +540,14 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
537 | 540 | ||
538 | info = NULL; | 541 | info = NULL; |
539 | 542 | ||
540 | /* We have to scan for our skb since there's no way | 543 | BUG_ON(!(macrx & XCT_MACRX_RR_8BRES)); |
541 | * to back-map them from the descriptor, and if we | ||
542 | * have several receive channels then they might not | ||
543 | * show up in the same order as they were put on the | ||
544 | * interface ring. | ||
545 | */ | ||
546 | 544 | ||
547 | dma = (RX_RING(mac, n+1) & XCT_PTR_ADDR_M); | 545 | eval = (RX_RING(mac, n+1) & XCT_RXRES_8B_EVAL_M) >> |
548 | for (i = mac->rx->next_to_fill; | 546 | XCT_RXRES_8B_EVAL_S; |
549 | i < (mac->rx->next_to_fill + RX_RING_SIZE); | 547 | buf_index = eval-1; |
550 | i++) { | 548 | |
551 | info = &RX_RING_INFO(mac, i); | 549 | dma = (RX_RING(mac, n+2) & XCT_PTR_ADDR_M); |
552 | if (info->dma == dma) | 550 | info = &RX_RING_INFO(mac, buf_index); |
553 | break; | ||
554 | } | ||
555 | 551 | ||
556 | skb = info->skb; | 552 | skb = info->skb; |
557 | 553 | ||
@@ -600,9 +596,9 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
600 | /* Need to zero it out since hardware doesn't, since the | 596 | /* Need to zero it out since hardware doesn't, since the |
601 | * replenish loop uses it to tell when it's done. | 597 | * replenish loop uses it to tell when it's done. |
602 | */ | 598 | */ |
603 | RX_BUFF(mac, i) = 0; | 599 | RX_BUFF(mac, buf_index) = 0; |
604 | 600 | ||
605 | n += 2; | 601 | n += 4; |
606 | } | 602 | } |
607 | 603 | ||
608 | if (n > RX_RING_SIZE) { | 604 | if (n > RX_RING_SIZE) { |
@@ -610,8 +606,16 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
610 | write_iob_reg(mac, PAS_IOB_COM_PKTHDRCNT, 0); | 606 | write_iob_reg(mac, PAS_IOB_COM_PKTHDRCNT, 0); |
611 | n &= (RX_RING_SIZE-1); | 607 | n &= (RX_RING_SIZE-1); |
612 | } | 608 | } |
609 | |||
613 | mac->rx->next_to_clean = n; | 610 | mac->rx->next_to_clean = n; |
614 | pasemi_mac_replenish_rx_ring(mac->netdev, limit-count); | 611 | |
612 | /* Increase is in number of 16-byte entries, and since each descriptor | ||
613 | * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with | ||
614 | * count*2. | ||
615 | */ | ||
616 | write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), count << 1); | ||
617 | |||
618 | pasemi_mac_replenish_rx_ring(mac->netdev, count); | ||
615 | 619 | ||
616 | spin_unlock(&mac->rx->lock); | 620 | spin_unlock(&mac->rx->lock); |
617 | 621 | ||
@@ -927,6 +931,8 @@ static int pasemi_mac_open(struct net_device *dev) | |||
927 | 931 | ||
928 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); | 932 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); |
929 | 933 | ||
934 | write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), RX_RING_SIZE>>1); | ||
935 | |||
930 | flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE | | 936 | flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE | |
931 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; | 937 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; |
932 | 938 | ||