diff options
author | Matt Carlson <mcarlson@broadcom.com> | 2009-08-28 10:02:18 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-29 18:42:59 -0400 |
commit | 17375d25d3dcd3d4caf9456fa94f60e29d6b1146 (patch) | |
tree | ab33928dc44e87c37b0067dc6f54c6a38bab73be /drivers/net/tg3.c | |
parent | 09943a1819a240ff4a72f924d0038818fcdd0a90 (diff) |
tg3: Convert napi handlers to use tnapi
This patch converts the napi interrupt handler functions to accept and
use tg3_napi structures.
Signed-off-by: Matt Carlson <mcarlson@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 55 |
1 files changed, 32 insertions, 23 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index bf9a33305319..27ebe3b363cc 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -643,8 +643,9 @@ static void tg3_enable_ints(struct tg3 *tp) | |||
643 | tg3_cond_int(tp); | 643 | tg3_cond_int(tp); |
644 | } | 644 | } |
645 | 645 | ||
646 | static inline unsigned int tg3_has_work(struct tg3 *tp) | 646 | static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) |
647 | { | 647 | { |
648 | struct tg3 *tp = tnapi->tp; | ||
648 | struct tg3_hw_status *sblk = tp->hw_status; | 649 | struct tg3_hw_status *sblk = tp->hw_status; |
649 | unsigned int work_exists = 0; | 650 | unsigned int work_exists = 0; |
650 | 651 | ||
@@ -663,13 +664,15 @@ static inline unsigned int tg3_has_work(struct tg3 *tp) | |||
663 | return work_exists; | 664 | return work_exists; |
664 | } | 665 | } |
665 | 666 | ||
666 | /* tg3_restart_ints | 667 | /* tg3_int_reenable |
667 | * similar to tg3_enable_ints, but it accurately determines whether there | 668 | * similar to tg3_enable_ints, but it accurately determines whether there |
668 | * is new work pending and can return without flushing the PIO write | 669 | * is new work pending and can return without flushing the PIO write |
669 | * which reenables interrupts | 670 | * which reenables interrupts |
670 | */ | 671 | */ |
671 | static void tg3_restart_ints(struct tg3 *tp) | 672 | static void tg3_int_reenable(struct tg3_napi *tnapi) |
672 | { | 673 | { |
674 | struct tg3 *tp = tnapi->tp; | ||
675 | |||
673 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | 676 | tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
674 | tp->last_tag << 24); | 677 | tp->last_tag << 24); |
675 | mmiowb(); | 678 | mmiowb(); |
@@ -679,7 +682,7 @@ static void tg3_restart_ints(struct tg3 *tp) | |||
679 | * work we've completed. | 682 | * work we've completed. |
680 | */ | 683 | */ |
681 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && | 684 | if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && |
682 | tg3_has_work(tp)) | 685 | tg3_has_work(tnapi)) |
683 | tw32(HOSTCC_MODE, tp->coalesce_mode | | 686 | tw32(HOSTCC_MODE, tp->coalesce_mode | |
684 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); | 687 | (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); |
685 | } | 688 | } |
@@ -4278,8 +4281,9 @@ static inline u32 tg3_tx_avail(struct tg3 *tp) | |||
4278 | * need special logic to handle SKBs that have not had all | 4281 | * need special logic to handle SKBs that have not had all |
4279 | * of their frags sent yet, like SunGEM does. | 4282 | * of their frags sent yet, like SunGEM does. |
4280 | */ | 4283 | */ |
4281 | static void tg3_tx(struct tg3 *tp) | 4284 | static void tg3_tx(struct tg3_napi *tnapi) |
4282 | { | 4285 | { |
4286 | struct tg3 *tp = tnapi->tp; | ||
4283 | u32 hw_idx = tp->hw_status->idx[0].tx_consumer; | 4287 | u32 hw_idx = tp->hw_status->idx[0].tx_consumer; |
4284 | u32 sw_idx = tp->tx_cons; | 4288 | u32 sw_idx = tp->tx_cons; |
4285 | 4289 | ||
@@ -4344,9 +4348,10 @@ static void tg3_tx(struct tg3 *tp) | |||
4344 | * buffers the cpu only reads the last cacheline of the RX descriptor | 4348 | * buffers the cpu only reads the last cacheline of the RX descriptor |
4345 | * (to fetch the error flags, vlan tag, checksum, and opaque cookie). | 4349 | * (to fetch the error flags, vlan tag, checksum, and opaque cookie). |
4346 | */ | 4350 | */ |
4347 | static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, | 4351 | static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, |
4348 | int src_idx, u32 dest_idx_unmasked) | 4352 | int src_idx, u32 dest_idx_unmasked) |
4349 | { | 4353 | { |
4354 | struct tg3 *tp = tnapi->tp; | ||
4350 | struct tg3_rx_buffer_desc *desc; | 4355 | struct tg3_rx_buffer_desc *desc; |
4351 | struct ring_info *map, *src_map; | 4356 | struct ring_info *map, *src_map; |
4352 | struct sk_buff *skb; | 4357 | struct sk_buff *skb; |
@@ -4409,9 +4414,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, | |||
4409 | * members of the RX descriptor are invariant. See notes above | 4414 | * members of the RX descriptor are invariant. See notes above |
4410 | * tg3_alloc_rx_skb for full details. | 4415 | * tg3_alloc_rx_skb for full details. |
4411 | */ | 4416 | */ |
4412 | static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, | 4417 | static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, |
4413 | int src_idx, u32 dest_idx_unmasked) | 4418 | int src_idx, u32 dest_idx_unmasked) |
4414 | { | 4419 | { |
4420 | struct tg3 *tp = tnapi->tp; | ||
4415 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; | 4421 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; |
4416 | struct ring_info *src_map, *dest_map; | 4422 | struct ring_info *src_map, *dest_map; |
4417 | int dest_idx; | 4423 | int dest_idx; |
@@ -4471,8 +4477,9 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, | |||
4471 | * If both the host and chip were to write into the same ring, cache line | 4477 | * If both the host and chip were to write into the same ring, cache line |
4472 | * eviction could occur since both entities want it in an exclusive state. | 4478 | * eviction could occur since both entities want it in an exclusive state. |
4473 | */ | 4479 | */ |
4474 | static int tg3_rx(struct tg3 *tp, int budget) | 4480 | static int tg3_rx(struct tg3_napi *tnapi, int budget) |
4475 | { | 4481 | { |
4482 | struct tg3 *tp = tnapi->tp; | ||
4476 | u32 work_mask, rx_std_posted = 0; | 4483 | u32 work_mask, rx_std_posted = 0; |
4477 | u32 sw_idx = tp->rx_rcb_ptr; | 4484 | u32 sw_idx = tp->rx_rcb_ptr; |
4478 | u16 hw_idx; | 4485 | u16 hw_idx; |
@@ -4515,7 +4522,7 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
4515 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 4522 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && |
4516 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { | 4523 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { |
4517 | drop_it: | 4524 | drop_it: |
4518 | tg3_recycle_rx(tp, opaque_key, | 4525 | tg3_recycle_rx(tnapi, opaque_key, |
4519 | desc_idx, *post_ptr); | 4526 | desc_idx, *post_ptr); |
4520 | drop_it_no_recycle: | 4527 | drop_it_no_recycle: |
4521 | /* Other statistics kept track of by card. */ | 4528 | /* Other statistics kept track of by card. */ |
@@ -4535,7 +4542,7 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
4535 | ) { | 4542 | ) { |
4536 | int skb_size; | 4543 | int skb_size; |
4537 | 4544 | ||
4538 | skb_size = tg3_alloc_rx_skb(tp, opaque_key, | 4545 | skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, |
4539 | desc_idx, *post_ptr); | 4546 | desc_idx, *post_ptr); |
4540 | if (skb_size < 0) | 4547 | if (skb_size < 0) |
4541 | goto drop_it; | 4548 | goto drop_it; |
@@ -4547,7 +4554,7 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
4547 | } else { | 4554 | } else { |
4548 | struct sk_buff *copy_skb; | 4555 | struct sk_buff *copy_skb; |
4549 | 4556 | ||
4550 | tg3_recycle_rx(tp, opaque_key, | 4557 | tg3_recycle_rx(tnapi, opaque_key, |
4551 | desc_idx, *post_ptr); | 4558 | desc_idx, *post_ptr); |
4552 | 4559 | ||
4553 | copy_skb = netdev_alloc_skb(tp->dev, | 4560 | copy_skb = netdev_alloc_skb(tp->dev, |
@@ -4584,11 +4591,11 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
4584 | #if TG3_VLAN_TAG_USED | 4591 | #if TG3_VLAN_TAG_USED |
4585 | if (tp->vlgrp != NULL && | 4592 | if (tp->vlgrp != NULL && |
4586 | desc->type_flags & RXD_FLAG_VLAN) { | 4593 | desc->type_flags & RXD_FLAG_VLAN) { |
4587 | vlan_gro_receive(&tp->napi[0].napi, tp->vlgrp, | 4594 | vlan_gro_receive(&tnapi->napi, tp->vlgrp, |
4588 | desc->err_vlan & RXD_VLAN_MASK, skb); | 4595 | desc->err_vlan & RXD_VLAN_MASK, skb); |
4589 | } else | 4596 | } else |
4590 | #endif | 4597 | #endif |
4591 | napi_gro_receive(&tp->napi[0].napi, skb); | 4598 | napi_gro_receive(&tnapi->napi, skb); |
4592 | 4599 | ||
4593 | received++; | 4600 | received++; |
4594 | budget--; | 4601 | budget--; |
@@ -4635,8 +4642,9 @@ next_pkt_nopost: | |||
4635 | return received; | 4642 | return received; |
4636 | } | 4643 | } |
4637 | 4644 | ||
4638 | static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) | 4645 | static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) |
4639 | { | 4646 | { |
4647 | struct tg3 *tp = tnapi->tp; | ||
4640 | struct tg3_hw_status *sblk = tp->hw_status; | 4648 | struct tg3_hw_status *sblk = tp->hw_status; |
4641 | 4649 | ||
4642 | /* handle link change and other phy events */ | 4650 | /* handle link change and other phy events */ |
@@ -4662,7 +4670,7 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) | |||
4662 | 4670 | ||
4663 | /* run TX completion thread */ | 4671 | /* run TX completion thread */ |
4664 | if (sblk->idx[0].tx_consumer != tp->tx_cons) { | 4672 | if (sblk->idx[0].tx_consumer != tp->tx_cons) { |
4665 | tg3_tx(tp); | 4673 | tg3_tx(tnapi); |
4666 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | 4674 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) |
4667 | return work_done; | 4675 | return work_done; |
4668 | } | 4676 | } |
@@ -4672,7 +4680,7 @@ static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) | |||
4672 | * code synchronizes with tg3->napi.poll() | 4680 | * code synchronizes with tg3->napi.poll() |
4673 | */ | 4681 | */ |
4674 | if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) | 4682 | if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) |
4675 | work_done += tg3_rx(tp, budget - work_done); | 4683 | work_done += tg3_rx(tnapi, budget - work_done); |
4676 | 4684 | ||
4677 | return work_done; | 4685 | return work_done; |
4678 | } | 4686 | } |
@@ -4685,7 +4693,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
4685 | struct tg3_hw_status *sblk = tp->hw_status; | 4693 | struct tg3_hw_status *sblk = tp->hw_status; |
4686 | 4694 | ||
4687 | while (1) { | 4695 | while (1) { |
4688 | work_done = tg3_poll_work(tp, work_done, budget); | 4696 | work_done = tg3_poll_work(tnapi, work_done, budget); |
4689 | 4697 | ||
4690 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | 4698 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) |
4691 | goto tx_recovery; | 4699 | goto tx_recovery; |
@@ -4694,7 +4702,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
4694 | break; | 4702 | break; |
4695 | 4703 | ||
4696 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { | 4704 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { |
4697 | /* tp->last_tag is used in tg3_restart_ints() below | 4705 | /* tp->last_tag is used in tg3_int_reenable() below |
4698 | * to tell the hw how much work has been processed, | 4706 | * to tell the hw how much work has been processed, |
4699 | * so we must read it before checking for more work. | 4707 | * so we must read it before checking for more work. |
4700 | */ | 4708 | */ |
@@ -4704,9 +4712,9 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
4704 | } else | 4712 | } else |
4705 | sblk->status &= ~SD_STATUS_UPDATED; | 4713 | sblk->status &= ~SD_STATUS_UPDATED; |
4706 | 4714 | ||
4707 | if (likely(!tg3_has_work(tp))) { | 4715 | if (likely(!tg3_has_work(tnapi))) { |
4708 | napi_complete(napi); | 4716 | napi_complete(napi); |
4709 | tg3_restart_ints(tp); | 4717 | tg3_int_reenable(tnapi); |
4710 | break; | 4718 | break; |
4711 | } | 4719 | } |
4712 | } | 4720 | } |
@@ -4829,7 +4837,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id) | |||
4829 | if (tg3_irq_sync(tp)) | 4837 | if (tg3_irq_sync(tp)) |
4830 | goto out; | 4838 | goto out; |
4831 | sblk->status &= ~SD_STATUS_UPDATED; | 4839 | sblk->status &= ~SD_STATUS_UPDATED; |
4832 | if (likely(tg3_has_work(tp))) { | 4840 | if (likely(tg3_has_work(tnapi))) { |
4833 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | 4841 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); |
4834 | napi_schedule(&tnapi->napi); | 4842 | napi_schedule(&tnapi->napi); |
4835 | } else { | 4843 | } else { |
@@ -5560,6 +5568,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5560 | struct tg3_rx_prodring_set *tpr) | 5568 | struct tg3_rx_prodring_set *tpr) |
5561 | { | 5569 | { |
5562 | u32 i, rx_pkt_dma_sz; | 5570 | u32 i, rx_pkt_dma_sz; |
5571 | struct tg3_napi *tnapi = &tp->napi[0]; | ||
5563 | 5572 | ||
5564 | /* Zero out all descriptors. */ | 5573 | /* Zero out all descriptors. */ |
5565 | memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); | 5574 | memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); |
@@ -5586,7 +5595,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5586 | 5595 | ||
5587 | /* Now allocate fresh SKBs for each rx ring. */ | 5596 | /* Now allocate fresh SKBs for each rx ring. */ |
5588 | for (i = 0; i < tp->rx_pending; i++) { | 5597 | for (i = 0; i < tp->rx_pending; i++) { |
5589 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { | 5598 | if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { |
5590 | printk(KERN_WARNING PFX | 5599 | printk(KERN_WARNING PFX |
5591 | "%s: Using a smaller RX standard ring, " | 5600 | "%s: Using a smaller RX standard ring, " |
5592 | "only %d out of %d buffers were allocated " | 5601 | "only %d out of %d buffers were allocated " |
@@ -5617,7 +5626,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5617 | } | 5626 | } |
5618 | 5627 | ||
5619 | for (i = 0; i < tp->rx_jumbo_pending; i++) { | 5628 | for (i = 0; i < tp->rx_jumbo_pending; i++) { |
5620 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, | 5629 | if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, |
5621 | -1, i) < 0) { | 5630 | -1, i) < 0) { |
5622 | printk(KERN_WARNING PFX | 5631 | printk(KERN_WARNING PFX |
5623 | "%s: Using a smaller RX jumbo ring, " | 5632 | "%s: Using a smaller RX jumbo ring, " |