diff options
author | Matt Carlson <mcarlson@broadcom.com> | 2009-11-13 08:03:44 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-16 01:14:48 -0500 |
commit | a3896167160ce9ad1eadeb88fd2f3971888444ae (patch) | |
tree | 504efd6573241c592e75d7139715cece3a15bc46 /drivers/net/tg3.c | |
parent | afc081f83c59a7cf2c025a3ed89d011b5db556eb (diff) |
tg3: Add prodring parameter to tg3_alloc_rx_skb()
This patch changes the tg3_alloc_rx_skb() implementation to accept the
destination producer ring set pointer as a parameter rather than
assuming the source and destination producer rings are the same.
Signed-off-by: Matt Carlson <mcarlson@broadcom.com>
Reviewed-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 41 |
1 files changed, 21 insertions, 20 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index ef6408018d2f..9251bb523e9e 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -4408,8 +4408,9 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4408 | * buffers the cpu only reads the last cacheline of the RX descriptor | 4408 | * buffers the cpu only reads the last cacheline of the RX descriptor |
4409 | * (to fetch the error flags, vlan tag, checksum, and opaque cookie). | 4409 | * (to fetch the error flags, vlan tag, checksum, and opaque cookie). |
4410 | */ | 4410 | */ |
4411 | static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | 4411 | static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, |
4412 | u32 dest_idx_unmasked) | 4412 | struct tg3_rx_prodring_set *tpr, |
4413 | u32 opaque_key, u32 dest_idx_unmasked) | ||
4413 | { | 4414 | { |
4414 | struct tg3 *tp = tnapi->tp; | 4415 | struct tg3 *tp = tnapi->tp; |
4415 | struct tg3_rx_buffer_desc *desc; | 4416 | struct tg3_rx_buffer_desc *desc; |
@@ -4417,7 +4418,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | |||
4417 | struct sk_buff *skb; | 4418 | struct sk_buff *skb; |
4418 | dma_addr_t mapping; | 4419 | dma_addr_t mapping; |
4419 | int skb_size, dest_idx; | 4420 | int skb_size, dest_idx; |
4420 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | ||
4421 | 4421 | ||
4422 | src_map = NULL; | 4422 | src_map = NULL; |
4423 | switch (opaque_key) { | 4423 | switch (opaque_key) { |
@@ -4471,30 +4471,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | |||
4471 | * members of the RX descriptor are invariant. See notes above | 4471 | * members of the RX descriptor are invariant. See notes above |
4472 | * tg3_alloc_rx_skb for full details. | 4472 | * tg3_alloc_rx_skb for full details. |
4473 | */ | 4473 | */ |
4474 | static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, | 4474 | static void tg3_recycle_rx(struct tg3_napi *tnapi, |
4475 | int src_idx, u32 dest_idx_unmasked) | 4475 | struct tg3_rx_prodring_set *dpr, |
4476 | u32 opaque_key, int src_idx, | ||
4477 | u32 dest_idx_unmasked) | ||
4476 | { | 4478 | { |
4477 | struct tg3 *tp = tnapi->tp; | 4479 | struct tg3 *tp = tnapi->tp; |
4478 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; | 4480 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; |
4479 | struct ring_info *src_map, *dest_map; | 4481 | struct ring_info *src_map, *dest_map; |
4480 | int dest_idx; | 4482 | int dest_idx; |
4481 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | 4483 | struct tg3_rx_prodring_set *spr = &tp->prodring[0]; |
4482 | 4484 | ||
4483 | switch (opaque_key) { | 4485 | switch (opaque_key) { |
4484 | case RXD_OPAQUE_RING_STD: | 4486 | case RXD_OPAQUE_RING_STD: |
4485 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 4487 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; |
4486 | dest_desc = &tpr->rx_std[dest_idx]; | 4488 | dest_desc = &dpr->rx_std[dest_idx]; |
4487 | dest_map = &tpr->rx_std_buffers[dest_idx]; | 4489 | dest_map = &dpr->rx_std_buffers[dest_idx]; |
4488 | src_desc = &tpr->rx_std[src_idx]; | 4490 | src_desc = &spr->rx_std[src_idx]; |
4489 | src_map = &tpr->rx_std_buffers[src_idx]; | 4491 | src_map = &spr->rx_std_buffers[src_idx]; |
4490 | break; | 4492 | break; |
4491 | 4493 | ||
4492 | case RXD_OPAQUE_RING_JUMBO: | 4494 | case RXD_OPAQUE_RING_JUMBO: |
4493 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 4495 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; |
4494 | dest_desc = &tpr->rx_jmb[dest_idx].std; | 4496 | dest_desc = &dpr->rx_jmb[dest_idx].std; |
4495 | dest_map = &tpr->rx_jmb_buffers[dest_idx]; | 4497 | dest_map = &dpr->rx_jmb_buffers[dest_idx]; |
4496 | src_desc = &tpr->rx_jmb[src_idx].std; | 4498 | src_desc = &spr->rx_jmb[src_idx].std; |
4497 | src_map = &tpr->rx_jmb_buffers[src_idx]; | 4499 | src_map = &spr->rx_jmb_buffers[src_idx]; |
4498 | break; | 4500 | break; |
4499 | 4501 | ||
4500 | default: | 4502 | default: |
@@ -4506,7 +4508,6 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, | |||
4506 | pci_unmap_addr(src_map, mapping)); | 4508 | pci_unmap_addr(src_map, mapping)); |
4507 | dest_desc->addr_hi = src_desc->addr_hi; | 4509 | dest_desc->addr_hi = src_desc->addr_hi; |
4508 | dest_desc->addr_lo = src_desc->addr_lo; | 4510 | dest_desc->addr_lo = src_desc->addr_lo; |
4509 | |||
4510 | src_map->skb = NULL; | 4511 | src_map->skb = NULL; |
4511 | } | 4512 | } |
4512 | 4513 | ||
@@ -4580,7 +4581,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4580 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 4581 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && |
4581 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { | 4582 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { |
4582 | drop_it: | 4583 | drop_it: |
4583 | tg3_recycle_rx(tnapi, opaque_key, | 4584 | tg3_recycle_rx(tnapi, tpr, opaque_key, |
4584 | desc_idx, *post_ptr); | 4585 | desc_idx, *post_ptr); |
4585 | drop_it_no_recycle: | 4586 | drop_it_no_recycle: |
4586 | /* Other statistics kept track of by card. */ | 4587 | /* Other statistics kept track of by card. */ |
@@ -4600,7 +4601,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4600 | ) { | 4601 | ) { |
4601 | int skb_size; | 4602 | int skb_size; |
4602 | 4603 | ||
4603 | skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, | 4604 | skb_size = tg3_alloc_rx_skb(tnapi, tpr, opaque_key, |
4604 | *post_ptr); | 4605 | *post_ptr); |
4605 | if (skb_size < 0) | 4606 | if (skb_size < 0) |
4606 | goto drop_it; | 4607 | goto drop_it; |
@@ -4614,7 +4615,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4614 | } else { | 4615 | } else { |
4615 | struct sk_buff *copy_skb; | 4616 | struct sk_buff *copy_skb; |
4616 | 4617 | ||
4617 | tg3_recycle_rx(tnapi, opaque_key, | 4618 | tg3_recycle_rx(tnapi, tpr, opaque_key, |
4618 | desc_idx, *post_ptr); | 4619 | desc_idx, *post_ptr); |
4619 | 4620 | ||
4620 | copy_skb = netdev_alloc_skb(tp->dev, | 4621 | copy_skb = netdev_alloc_skb(tp->dev, |
@@ -5770,7 +5771,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5770 | 5771 | ||
5771 | /* Now allocate fresh SKBs for each rx ring. */ | 5772 | /* Now allocate fresh SKBs for each rx ring. */ |
5772 | for (i = 0; i < tp->rx_pending; i++) { | 5773 | for (i = 0; i < tp->rx_pending; i++) { |
5773 | if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, i) < 0) { | 5774 | if (tg3_alloc_rx_skb(tnapi, tpr, RXD_OPAQUE_RING_STD, i) < 0) { |
5774 | printk(KERN_WARNING PFX | 5775 | printk(KERN_WARNING PFX |
5775 | "%s: Using a smaller RX standard ring, " | 5776 | "%s: Using a smaller RX standard ring, " |
5776 | "only %d out of %d buffers were allocated " | 5777 | "only %d out of %d buffers were allocated " |
@@ -5801,7 +5802,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5801 | } | 5802 | } |
5802 | 5803 | ||
5803 | for (i = 0; i < tp->rx_jumbo_pending; i++) { | 5804 | for (i = 0; i < tp->rx_jumbo_pending; i++) { |
5804 | if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, | 5805 | if (tg3_alloc_rx_skb(tnapi, tpr, RXD_OPAQUE_RING_JUMBO, |
5805 | i) < 0) { | 5806 | i) < 0) { |
5806 | printk(KERN_WARNING PFX | 5807 | printk(KERN_WARNING PFX |
5807 | "%s: Using a smaller RX jumbo ring, " | 5808 | "%s: Using a smaller RX jumbo ring, " |