diff options
author | Matt Carlson <mcarlson@broadcom.com> | 2009-08-28 09:58:46 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-29 18:42:43 -0400 |
commit | 287be12e1774d842bff21ea0c1809c2387d7b310 (patch) | |
tree | edeb3e887cc9e7e3d0f5197a262a6acf2e86ce9a /drivers/net/tg3.c | |
parent | 8f666b07ac53eeedd6c035adf6d4299f9ed0df2d (diff) |
tg3: Clarify rx buffer relationships
This patch attempts to document the various rx buffer sizes used by the
driver and how they relate to each other.
Signed-off-by: Matt Carlson <mcarlson@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 42 |
1 files changed, 23 insertions, 19 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 5e74a19e7c12..a2a5f318315b 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -125,8 +125,15 @@ | |||
125 | TG3_TX_RING_SIZE) | 125 | TG3_TX_RING_SIZE) |
126 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 126 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) |
127 | 127 | ||
128 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) | 128 | #define TG3_DMA_BYTE_ENAB 64 |
129 | #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64) | 129 | |
130 | #define TG3_RX_STD_DMA_SZ 1536 | ||
131 | #define TG3_RX_JMB_DMA_SZ 9046 | ||
132 | |||
133 | #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) | ||
134 | |||
135 | #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) | ||
136 | #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) | ||
130 | 137 | ||
131 | /* minimum number of free TX descriptors required to wake up TX process */ | 138 | /* minimum number of free TX descriptors required to wake up TX process */ |
132 | #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4) | 139 | #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4) |
@@ -4354,7 +4361,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, | |||
4354 | map = &tp->rx_std_buffers[dest_idx]; | 4361 | map = &tp->rx_std_buffers[dest_idx]; |
4355 | if (src_idx >= 0) | 4362 | if (src_idx >= 0) |
4356 | src_map = &tp->rx_std_buffers[src_idx]; | 4363 | src_map = &tp->rx_std_buffers[src_idx]; |
4357 | skb_size = tp->rx_pkt_buf_sz; | 4364 | skb_size = tp->rx_pkt_map_sz; |
4358 | break; | 4365 | break; |
4359 | 4366 | ||
4360 | case RXD_OPAQUE_RING_JUMBO: | 4367 | case RXD_OPAQUE_RING_JUMBO: |
@@ -4363,7 +4370,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, | |||
4363 | map = &tp->rx_jumbo_buffers[dest_idx]; | 4370 | map = &tp->rx_jumbo_buffers[dest_idx]; |
4364 | if (src_idx >= 0) | 4371 | if (src_idx >= 0) |
4365 | src_map = &tp->rx_jumbo_buffers[src_idx]; | 4372 | src_map = &tp->rx_jumbo_buffers[src_idx]; |
4366 | skb_size = RX_JUMBO_PKT_BUF_SZ; | 4373 | skb_size = TG3_RX_JMB_MAP_SZ; |
4367 | break; | 4374 | break; |
4368 | 4375 | ||
4369 | default: | 4376 | default: |
@@ -4376,14 +4383,13 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, | |||
4376 | * Callers depend upon this behavior and assume that | 4383 | * Callers depend upon this behavior and assume that |
4377 | * we leave everything unchanged if we fail. | 4384 | * we leave everything unchanged if we fail. |
4378 | */ | 4385 | */ |
4379 | skb = netdev_alloc_skb(tp->dev, skb_size); | 4386 | skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset); |
4380 | if (skb == NULL) | 4387 | if (skb == NULL) |
4381 | return -ENOMEM; | 4388 | return -ENOMEM; |
4382 | 4389 | ||
4383 | skb_reserve(skb, tp->rx_offset); | 4390 | skb_reserve(skb, tp->rx_offset); |
4384 | 4391 | ||
4385 | mapping = pci_map_single(tp->pdev, skb->data, | 4392 | mapping = pci_map_single(tp->pdev, skb->data, skb_size, |
4386 | skb_size - tp->rx_offset, | ||
4387 | PCI_DMA_FROMDEVICE); | 4393 | PCI_DMA_FROMDEVICE); |
4388 | 4394 | ||
4389 | map->skb = skb; | 4395 | map->skb = skb; |
@@ -4540,8 +4546,7 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
4540 | if (skb_size < 0) | 4546 | if (skb_size < 0) |
4541 | goto drop_it; | 4547 | goto drop_it; |
4542 | 4548 | ||
4543 | pci_unmap_single(tp->pdev, dma_addr, | 4549 | pci_unmap_single(tp->pdev, dma_addr, skb_size, |
4544 | skb_size - tp->rx_offset, | ||
4545 | PCI_DMA_FROMDEVICE); | 4550 | PCI_DMA_FROMDEVICE); |
4546 | 4551 | ||
4547 | skb_put(skb, len); | 4552 | skb_put(skb, len); |
@@ -5531,7 +5536,7 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5531 | continue; | 5536 | continue; |
5532 | pci_unmap_single(tp->pdev, | 5537 | pci_unmap_single(tp->pdev, |
5533 | pci_unmap_addr(rxp, mapping), | 5538 | pci_unmap_addr(rxp, mapping), |
5534 | tp->rx_pkt_buf_sz - tp->rx_offset, | 5539 | tp->rx_pkt_map_sz, |
5535 | PCI_DMA_FROMDEVICE); | 5540 | PCI_DMA_FROMDEVICE); |
5536 | dev_kfree_skb_any(rxp->skb); | 5541 | dev_kfree_skb_any(rxp->skb); |
5537 | rxp->skb = NULL; | 5542 | rxp->skb = NULL; |
@@ -5544,7 +5549,7 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5544 | continue; | 5549 | continue; |
5545 | pci_unmap_single(tp->pdev, | 5550 | pci_unmap_single(tp->pdev, |
5546 | pci_unmap_addr(rxp, mapping), | 5551 | pci_unmap_addr(rxp, mapping), |
5547 | RX_JUMBO_PKT_BUF_SZ - tp->rx_offset, | 5552 | TG3_RX_JMB_MAP_SZ, |
5548 | PCI_DMA_FROMDEVICE); | 5553 | PCI_DMA_FROMDEVICE); |
5549 | dev_kfree_skb_any(rxp->skb); | 5554 | dev_kfree_skb_any(rxp->skb); |
5550 | rxp->skb = NULL; | 5555 | rxp->skb = NULL; |
@@ -5581,7 +5586,7 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5581 | */ | 5586 | */ |
5582 | static int tg3_init_rings(struct tg3 *tp) | 5587 | static int tg3_init_rings(struct tg3 *tp) |
5583 | { | 5588 | { |
5584 | u32 i; | 5589 | u32 i, rx_pkt_dma_sz; |
5585 | 5590 | ||
5586 | /* Free up all the SKBs. */ | 5591 | /* Free up all the SKBs. */ |
5587 | tg3_free_rings(tp); | 5592 | tg3_free_rings(tp); |
@@ -5592,10 +5597,11 @@ static int tg3_init_rings(struct tg3 *tp) | |||
5592 | memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | 5597 | memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); |
5593 | memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); | 5598 | memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); |
5594 | 5599 | ||
5595 | tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ; | 5600 | rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; |
5596 | if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && | 5601 | if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && |
5597 | (tp->dev->mtu > ETH_DATA_LEN)) | 5602 | tp->dev->mtu > ETH_DATA_LEN) |
5598 | tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ; | 5603 | rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; |
5604 | tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); | ||
5599 | 5605 | ||
5600 | /* Initialize invariants of the rings, we only set this | 5606 | /* Initialize invariants of the rings, we only set this |
5601 | * stuff once. This works because the card does not | 5607 | * stuff once. This works because the card does not |
@@ -5605,8 +5611,7 @@ static int tg3_init_rings(struct tg3 *tp) | |||
5605 | struct tg3_rx_buffer_desc *rxd; | 5611 | struct tg3_rx_buffer_desc *rxd; |
5606 | 5612 | ||
5607 | rxd = &tp->rx_std[i]; | 5613 | rxd = &tp->rx_std[i]; |
5608 | rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64) | 5614 | rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; |
5609 | << RXD_LEN_SHIFT; | ||
5610 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); | 5615 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); |
5611 | rxd->opaque = (RXD_OPAQUE_RING_STD | | 5616 | rxd->opaque = (RXD_OPAQUE_RING_STD | |
5612 | (i << RXD_OPAQUE_INDEX_SHIFT)); | 5617 | (i << RXD_OPAQUE_INDEX_SHIFT)); |
@@ -5617,8 +5622,7 @@ static int tg3_init_rings(struct tg3 *tp) | |||
5617 | struct tg3_rx_buffer_desc *rxd; | 5622 | struct tg3_rx_buffer_desc *rxd; |
5618 | 5623 | ||
5619 | rxd = &tp->rx_jumbo[i]; | 5624 | rxd = &tp->rx_jumbo[i]; |
5620 | rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64) | 5625 | rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; |
5621 | << RXD_LEN_SHIFT; | ||
5622 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | | 5626 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | |
5623 | RXD_FLAG_JUMBO; | 5627 | RXD_FLAG_JUMBO; |
5624 | rxd->opaque = (RXD_OPAQUE_RING_JUMBO | | 5628 | rxd->opaque = (RXD_OPAQUE_RING_JUMBO | |