diff options
author | Olof Johansson <olof@lixom.net> | 2007-10-02 17:27:57 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:54:27 -0400 |
commit | b5254eee7994ba0a44ba7386cb66c2ce2f30fcc6 (patch) | |
tree | f21fe0be6225cc6fce029ffcadb45ec4a5d93faf /drivers | |
parent | 9e81d331f2ec65695e4366ce592e14f9700bae8b (diff) |
pasemi_mac: use buffer index pointer in clean_rx()
pasemi_mac: use buffer index pointer in clean_rx()
Use the new features in B0 for buffer ring index on the receive side. This
means we no longer have to search in the ring for where the buffer
came from.
Also cleanup the RX cleaning side a little, while I was at it.
Note: Pre-B0 hardware is no longer supported, and needs a pile of other
workarounds that are not being submitted for mainline inclusion. So the
fact that this breaks old hardware is not a problem at this time.
Signed-off-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/pasemi_mac.c | 60 | ||||
-rw-r--r-- | drivers/net/pasemi_mac.h | 21 |
2 files changed, 50 insertions, 31 deletions
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 967ff8c96b0f..31ad2b9093a7 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -243,9 +243,9 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) | |||
243 | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); | 243 | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); |
244 | 244 | ||
245 | write_dma_reg(mac, PAS_DMA_RXINT_CFG(mac->dma_if), | 245 | write_dma_reg(mac, PAS_DMA_RXINT_CFG(mac->dma_if), |
246 | PAS_DMA_RXINT_CFG_DHL(3) | | 246 | PAS_DMA_RXINT_CFG_DHL(3) | PAS_DMA_RXINT_CFG_L2 | |
247 | PAS_DMA_RXINT_CFG_L2 | | 247 | PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | |
248 | PAS_DMA_RXINT_CFG_LW); | 248 | PAS_DMA_RXINT_CFG_HEN); |
249 | 249 | ||
250 | ring->next_to_fill = 0; | 250 | ring->next_to_fill = 0; |
251 | ring->next_to_clean = 0; | 251 | ring->next_to_clean = 0; |
@@ -402,13 +402,12 @@ static void pasemi_mac_free_rx_resources(struct net_device *dev) | |||
402 | static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) | 402 | static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) |
403 | { | 403 | { |
404 | struct pasemi_mac *mac = netdev_priv(dev); | 404 | struct pasemi_mac *mac = netdev_priv(dev); |
405 | int start = mac->rx->next_to_fill; | 405 | int fill, count; |
406 | unsigned int fill, count; | ||
407 | 406 | ||
408 | if (limit <= 0) | 407 | if (limit <= 0) |
409 | return; | 408 | return; |
410 | 409 | ||
411 | fill = start; | 410 | fill = mac->rx->next_to_fill; |
412 | for (count = 0; count < limit; count++) { | 411 | for (count = 0; count < limit; count++) { |
413 | struct pasemi_mac_buffer *info = &RX_RING_INFO(mac, fill); | 412 | struct pasemi_mac_buffer *info = &RX_RING_INFO(mac, fill); |
414 | u64 *buff = &RX_BUFF(mac, fill); | 413 | u64 *buff = &RX_BUFF(mac, fill); |
@@ -446,10 +445,10 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) | |||
446 | 445 | ||
447 | wmb(); | 446 | wmb(); |
448 | 447 | ||
449 | write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), count); | ||
450 | write_dma_reg(mac, PAS_DMA_RXINT_INCR(mac->dma_if), count); | 448 | write_dma_reg(mac, PAS_DMA_RXINT_INCR(mac->dma_if), count); |
451 | 449 | ||
452 | mac->rx->next_to_fill += count; | 450 | mac->rx->next_to_fill = (mac->rx->next_to_fill + count) & |
451 | (RX_RING_SIZE - 1); | ||
453 | } | 452 | } |
454 | 453 | ||
455 | static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) | 454 | static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) |
@@ -517,15 +516,19 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
517 | int count; | 516 | int count; |
518 | struct pasemi_mac_buffer *info; | 517 | struct pasemi_mac_buffer *info; |
519 | struct sk_buff *skb; | 518 | struct sk_buff *skb; |
520 | unsigned int i, len; | 519 | unsigned int len; |
521 | u64 macrx; | 520 | u64 macrx; |
522 | dma_addr_t dma; | 521 | dma_addr_t dma; |
522 | int buf_index; | ||
523 | u64 eval; | ||
523 | 524 | ||
524 | spin_lock(&mac->rx->lock); | 525 | spin_lock(&mac->rx->lock); |
525 | 526 | ||
526 | n = mac->rx->next_to_clean; | 527 | n = mac->rx->next_to_clean; |
527 | 528 | ||
528 | for (count = limit; count; count--) { | 529 | prefetch(RX_RING(mac, n)); |
530 | |||
531 | for (count = 0; count < limit; count++) { | ||
529 | macrx = RX_RING(mac, n); | 532 | macrx = RX_RING(mac, n); |
530 | 533 | ||
531 | if ((macrx & XCT_MACRX_E) || | 534 | if ((macrx & XCT_MACRX_E) || |
@@ -537,21 +540,14 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
537 | 540 | ||
538 | info = NULL; | 541 | info = NULL; |
539 | 542 | ||
540 | /* We have to scan for our skb since there's no way | 543 | BUG_ON(!(macrx & XCT_MACRX_RR_8BRES)); |
541 | * to back-map them from the descriptor, and if we | ||
542 | * have several receive channels then they might not | ||
543 | * show up in the same order as they were put on the | ||
544 | * interface ring. | ||
545 | */ | ||
546 | 544 | ||
547 | dma = (RX_RING(mac, n+1) & XCT_PTR_ADDR_M); | 545 | eval = (RX_RING(mac, n+1) & XCT_RXRES_8B_EVAL_M) >> |
548 | for (i = mac->rx->next_to_fill; | 546 | XCT_RXRES_8B_EVAL_S; |
549 | i < (mac->rx->next_to_fill + RX_RING_SIZE); | 547 | buf_index = eval-1; |
550 | i++) { | 548 | |
551 | info = &RX_RING_INFO(mac, i); | 549 | dma = (RX_RING(mac, n+2) & XCT_PTR_ADDR_M); |
552 | if (info->dma == dma) | 550 | info = &RX_RING_INFO(mac, buf_index); |
553 | break; | ||
554 | } | ||
555 | 551 | ||
556 | skb = info->skb; | 552 | skb = info->skb; |
557 | 553 | ||
@@ -600,9 +596,9 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
600 | /* Need to zero it out since hardware doesn't, since the | 596 | /* Need to zero it out since hardware doesn't, since the |
601 | * replenish loop uses it to tell when it's done. | 597 | * replenish loop uses it to tell when it's done. |
602 | */ | 598 | */ |
603 | RX_BUFF(mac, i) = 0; | 599 | RX_BUFF(mac, buf_index) = 0; |
604 | 600 | ||
605 | n += 2; | 601 | n += 4; |
606 | } | 602 | } |
607 | 603 | ||
608 | if (n > RX_RING_SIZE) { | 604 | if (n > RX_RING_SIZE) { |
@@ -610,8 +606,16 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) | |||
610 | write_iob_reg(mac, PAS_IOB_COM_PKTHDRCNT, 0); | 606 | write_iob_reg(mac, PAS_IOB_COM_PKTHDRCNT, 0); |
611 | n &= (RX_RING_SIZE-1); | 607 | n &= (RX_RING_SIZE-1); |
612 | } | 608 | } |
609 | |||
613 | mac->rx->next_to_clean = n; | 610 | mac->rx->next_to_clean = n; |
614 | pasemi_mac_replenish_rx_ring(mac->netdev, limit-count); | 611 | |
612 | /* Increase is in number of 16-byte entries, and since each descriptor | ||
613 | * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with | ||
614 | * count*2. | ||
615 | */ | ||
616 | write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), count << 1); | ||
617 | |||
618 | pasemi_mac_replenish_rx_ring(mac->netdev, count); | ||
615 | 619 | ||
616 | spin_unlock(&mac->rx->lock); | 620 | spin_unlock(&mac->rx->lock); |
617 | 621 | ||
@@ -927,6 +931,8 @@ static int pasemi_mac_open(struct net_device *dev) | |||
927 | 931 | ||
928 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); | 932 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); |
929 | 933 | ||
934 | write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), RX_RING_SIZE>>1); | ||
935 | |||
930 | flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE | | 936 | flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE | |
931 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; | 937 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; |
932 | 938 | ||
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index 0bb3c487478d..1a120408cf3f 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h | |||
@@ -206,12 +206,15 @@ enum { | |||
206 | #define PAS_DMA_RXINT_RCMDSTA_DROPS_M 0xfffe0000 | 206 | #define PAS_DMA_RXINT_RCMDSTA_DROPS_M 0xfffe0000 |
207 | #define PAS_DMA_RXINT_RCMDSTA_DROPS_S 17 | 207 | #define PAS_DMA_RXINT_RCMDSTA_DROPS_S 17 |
208 | #define PAS_DMA_RXINT_CFG(i) (0x204+(i)*_PAS_DMA_RXINT_STRIDE) | 208 | #define PAS_DMA_RXINT_CFG(i) (0x204+(i)*_PAS_DMA_RXINT_STRIDE) |
209 | #define PAS_DMA_RXINT_CFG_RBP 0x80000000 | ||
210 | #define PAS_DMA_RXINT_CFG_ITRR 0x40000000 | ||
209 | #define PAS_DMA_RXINT_CFG_DHL_M 0x07000000 | 211 | #define PAS_DMA_RXINT_CFG_DHL_M 0x07000000 |
210 | #define PAS_DMA_RXINT_CFG_DHL_S 24 | 212 | #define PAS_DMA_RXINT_CFG_DHL_S 24 |
211 | #define PAS_DMA_RXINT_CFG_DHL(x) (((x) << PAS_DMA_RXINT_CFG_DHL_S) & \ | 213 | #define PAS_DMA_RXINT_CFG_DHL(x) (((x) << PAS_DMA_RXINT_CFG_DHL_S) & \ |
212 | PAS_DMA_RXINT_CFG_DHL_M) | 214 | PAS_DMA_RXINT_CFG_DHL_M) |
213 | #define PAS_DMA_RXINT_CFG_LW 0x00200000 | 215 | #define PAS_DMA_RXINT_CFG_LW 0x00200000 |
214 | #define PAS_DMA_RXINT_CFG_L2 0x00100000 | 216 | #define PAS_DMA_RXINT_CFG_L2 0x00100000 |
217 | #define PAS_DMA_RXINT_CFG_HEN 0x00080000 | ||
215 | #define PAS_DMA_RXINT_CFG_WIF 0x00000002 | 218 | #define PAS_DMA_RXINT_CFG_WIF 0x00000002 |
216 | #define PAS_DMA_RXINT_CFG_WIL 0x00000001 | 219 | #define PAS_DMA_RXINT_CFG_WIL 0x00000001 |
217 | 220 | ||
@@ -425,10 +428,9 @@ enum { | |||
425 | /* Receive descriptor fields */ | 428 | /* Receive descriptor fields */ |
426 | #define XCT_MACRX_T 0x8000000000000000ull | 429 | #define XCT_MACRX_T 0x8000000000000000ull |
427 | #define XCT_MACRX_ST 0x4000000000000000ull | 430 | #define XCT_MACRX_ST 0x4000000000000000ull |
428 | #define XCT_MACRX_NORES 0x0000000000000000ull | 431 | #define XCT_MACRX_RR_M 0x3000000000000000ull |
429 | #define XCT_MACRX_8BRES 0x1000000000000000ull | 432 | #define XCT_MACRX_RR_NORES 0x0000000000000000ull |
430 | #define XCT_MACRX_24BRES 0x2000000000000000ull | 433 | #define XCT_MACRX_RR_8BRES 0x1000000000000000ull |
431 | #define XCT_MACRX_40BRES 0x3000000000000000ull | ||
432 | #define XCT_MACRX_O 0x0400000000000000ull | 434 | #define XCT_MACRX_O 0x0400000000000000ull |
433 | #define XCT_MACRX_E 0x0200000000000000ull | 435 | #define XCT_MACRX_E 0x0200000000000000ull |
434 | #define XCT_MACRX_FF 0x0100000000000000ull | 436 | #define XCT_MACRX_FF 0x0100000000000000ull |
@@ -476,6 +478,17 @@ enum { | |||
476 | #define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \ | 478 | #define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \ |
477 | XCT_PTR_ADDR_M) | 479 | XCT_PTR_ADDR_M) |
478 | 480 | ||
481 | /* Receive interface 8byte result fields */ | ||
482 | #define XCT_RXRES_8B_L4O_M 0xff00000000000000ull | ||
483 | #define XCT_RXRES_8B_L4O_S 56 | ||
484 | #define XCT_RXRES_8B_RULE_M 0x00ffff0000000000ull | ||
485 | #define XCT_RXRES_8B_RULE_S 40 | ||
486 | #define XCT_RXRES_8B_EVAL_M 0x000000ffff000000ull | ||
487 | #define XCT_RXRES_8B_EVAL_S 24 | ||
488 | #define XCT_RXRES_8B_HTYPE_M 0x0000000000f00000ull | ||
489 | #define XCT_RXRES_8B_HASH_M 0x00000000000fffffull | ||
490 | #define XCT_RXRES_8B_HASH_S 0 | ||
491 | |||
479 | /* Receive interface buffer fields */ | 492 | /* Receive interface buffer fields */ |
480 | #define XCT_RXB_LEN_M 0x0ffff00000000000ull | 493 | #define XCT_RXB_LEN_M 0x0ffff00000000000ull |
481 | #define XCT_RXB_LEN_S 44 | 494 | #define XCT_RXB_LEN_S 44 |