aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-26 10:19:05 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-26 10:19:05 -0400
commita1342206e192709a405485dbe2e647d5c4005d20 (patch)
tree0394ae057dd8012c852ef08acbb3d02c27e665b2 /drivers
parentb453257f057b834fdf9f4a6ad6133598b79bd982 (diff)
parent5523662c4cd585b892811d7bb3e25d9a787e19b3 (diff)
Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/tg3.c73
1 files changed, 40 insertions, 33 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 10d476153ee0..903d0ced7ddb 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -426,9 +426,30 @@ static void tg3_enable_ints(struct tg3 *tp)
426 tg3_cond_int(tp); 426 tg3_cond_int(tp);
427} 427}
428 428
429static inline unsigned int tg3_has_work(struct tg3 *tp)
430{
431 struct tg3_hw_status *sblk = tp->hw_status;
432 unsigned int work_exists = 0;
433
434 /* check for phy events */
435 if (!(tp->tg3_flags &
436 (TG3_FLAG_USE_LINKCHG_REG |
437 TG3_FLAG_POLL_SERDES))) {
438 if (sblk->status & SD_STATUS_LINK_CHG)
439 work_exists = 1;
440 }
441 /* check for RX/TX work to do */
442 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
443 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
444 work_exists = 1;
445
446 return work_exists;
447}
448
429/* tg3_restart_ints 449/* tg3_restart_ints
430 * similar to tg3_enable_ints, but it can return without flushing the 450 * similar to tg3_enable_ints, but it accurately determines whether there
431 * PIO write which reenables interrupts 451 * is new work pending and can return without flushing the PIO write
452 * which reenables interrupts
432 */ 453 */
433static void tg3_restart_ints(struct tg3 *tp) 454static void tg3_restart_ints(struct tg3 *tp)
434{ 455{
@@ -437,7 +458,9 @@ static void tg3_restart_ints(struct tg3 *tp)
437 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 458 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438 mmiowb(); 459 mmiowb();
439 460
440 tg3_cond_int(tp); 461 if (tg3_has_work(tp))
462 tw32(HOSTCC_MODE, tp->coalesce_mode |
463 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
441} 464}
442 465
443static inline void tg3_netif_stop(struct tg3 *tp) 466static inline void tg3_netif_stop(struct tg3 *tp)
@@ -2686,8 +2709,8 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2686static int tg3_rx(struct tg3 *tp, int budget) 2709static int tg3_rx(struct tg3 *tp, int budget)
2687{ 2710{
2688 u32 work_mask; 2711 u32 work_mask;
2689 u32 rx_rcb_ptr = tp->rx_rcb_ptr; 2712 u32 sw_idx = tp->rx_rcb_ptr;
2690 u16 hw_idx, sw_idx; 2713 u16 hw_idx;
2691 int received; 2714 int received;
2692 2715
2693 hw_idx = tp->hw_status->idx[0].rx_producer; 2716 hw_idx = tp->hw_status->idx[0].rx_producer;
@@ -2696,7 +2719,6 @@ static int tg3_rx(struct tg3 *tp, int budget)
2696 * the opaque cookie. 2719 * the opaque cookie.
2697 */ 2720 */
2698 rmb(); 2721 rmb();
2699 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2700 work_mask = 0; 2722 work_mask = 0;
2701 received = 0; 2723 received = 0;
2702 while (sw_idx != hw_idx && budget > 0) { 2724 while (sw_idx != hw_idx && budget > 0) {
@@ -2801,14 +2823,19 @@ static int tg3_rx(struct tg3 *tp, int budget)
2801next_pkt: 2823next_pkt:
2802 (*post_ptr)++; 2824 (*post_ptr)++;
2803next_pkt_nopost: 2825next_pkt_nopost:
2804 rx_rcb_ptr++; 2826 sw_idx++;
2805 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp); 2827 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2828
2829 /* Refresh hw_idx to see if there is new work */
2830 if (sw_idx == hw_idx) {
2831 hw_idx = tp->hw_status->idx[0].rx_producer;
2832 rmb();
2833 }
2806 } 2834 }
2807 2835
2808 /* ACK the status ring. */ 2836 /* ACK the status ring. */
2809 tp->rx_rcb_ptr = rx_rcb_ptr; 2837 tp->rx_rcb_ptr = sw_idx;
2810 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 2838 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2811 (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2812 2839
2813 /* Refill RX ring(s). */ 2840 /* Refill RX ring(s). */
2814 if (work_mask & RXD_OPAQUE_RING_STD) { 2841 if (work_mask & RXD_OPAQUE_RING_STD) {
@@ -2887,26 +2914,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2887 return (done ? 0 : 1); 2914 return (done ? 0 : 1);
2888} 2915}
2889 2916
2890static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2891{
2892 struct tg3_hw_status *sblk = tp->hw_status;
2893 unsigned int work_exists = 0;
2894
2895 /* check for phy events */
2896 if (!(tp->tg3_flags &
2897 (TG3_FLAG_USE_LINKCHG_REG |
2898 TG3_FLAG_POLL_SERDES))) {
2899 if (sblk->status & SD_STATUS_LINK_CHG)
2900 work_exists = 1;
2901 }
2902 /* check for RX/TX work to do */
2903 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2904 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2905 work_exists = 1;
2906
2907 return work_exists;
2908}
2909
2910/* MSI ISR - No need to check for interrupt sharing and no need to 2917/* MSI ISR - No need to check for interrupt sharing and no need to
2911 * flush status block and interrupt mailbox. PCI ordering rules 2918 * flush status block and interrupt mailbox. PCI ordering rules
2912 * guarantee that MSI will arrive after the status block. 2919 * guarantee that MSI will arrive after the status block.
@@ -2930,7 +2937,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2930 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2937 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2931 sblk->status &= ~SD_STATUS_UPDATED; 2938 sblk->status &= ~SD_STATUS_UPDATED;
2932 2939
2933 if (likely(tg3_has_work(dev, tp))) 2940 if (likely(tg3_has_work(tp)))
2934 netif_rx_schedule(dev); /* schedule NAPI poll */ 2941 netif_rx_schedule(dev); /* schedule NAPI poll */
2935 else { 2942 else {
2936 /* no work, re-enable interrupts 2943 /* no work, re-enable interrupts
@@ -2977,7 +2984,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2977 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 2984 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2978 sblk->status &= ~SD_STATUS_UPDATED; 2985 sblk->status &= ~SD_STATUS_UPDATED;
2979 2986
2980 if (likely(tg3_has_work(dev, tp))) 2987 if (likely(tg3_has_work(tp)))
2981 netif_rx_schedule(dev); /* schedule NAPI poll */ 2988 netif_rx_schedule(dev); /* schedule NAPI poll */
2982 else { 2989 else {
2983 /* no work, shared interrupt perhaps? re-enable 2990 /* no work, shared interrupt perhaps? re-enable