aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c2
-rw-r--r--drivers/net/tg3.c488
-rw-r--r--drivers/net/tg3.h8
-rw-r--r--drivers/net/tulip/media.c1
-rw-r--r--drivers/net/tulip/tulip_core.c2
-rw-r--r--drivers/net/wireless/Kconfig2
7 files changed, 381 insertions, 124 deletions
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 6202b10dbb4d..e038d55e4f6f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -187,7 +187,7 @@ obj-$(CONFIG_TR) += tokenring/
187obj-$(CONFIG_WAN) += wan/ 187obj-$(CONFIG_WAN) += wan/
188obj-$(CONFIG_ARCNET) += arcnet/ 188obj-$(CONFIG_ARCNET) += arcnet/
189obj-$(CONFIG_NET_PCMCIA) += pcmcia/ 189obj-$(CONFIG_NET_PCMCIA) += pcmcia/
190obj-$(CONFIG_NET_WIRELESS) += wireless/ 190obj-$(CONFIG_NET_RADIO) += wireless/
191obj-$(CONFIG_NET_TULIP) += tulip/ 191obj-$(CONFIG_NET_TULIP) += tulip/
192obj-$(CONFIG_HAMRADIO) += hamradio/ 192obj-$(CONFIG_HAMRADIO) += hamradio/
193obj-$(CONFIG_IRDA) += irda/ 193obj-$(CONFIG_IRDA) += irda/
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index ab44358ddbfc..6482d994d489 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1595,7 +1595,7 @@ static struct ethtool_ops emac_ethtool_ops = {
1595static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1595static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1596{ 1596{
1597 struct ocp_enet_private *fep = dev->priv; 1597 struct ocp_enet_private *fep = dev->priv;
1598 uint *data = (uint *) & rq->ifr_ifru; 1598 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1599 1599
1600 switch (cmd) { 1600 switch (cmd) {
1601 case SIOCGMIIPHY: 1601 case SIOCGMIIPHY:
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f79b02e80e75..f10dd74988c4 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -61,8 +61,8 @@
61 61
62#define DRV_MODULE_NAME "tg3" 62#define DRV_MODULE_NAME "tg3"
63#define PFX DRV_MODULE_NAME ": " 63#define PFX DRV_MODULE_NAME ": "
64#define DRV_MODULE_VERSION "3.27" 64#define DRV_MODULE_VERSION "3.29"
65#define DRV_MODULE_RELDATE "May 5, 2005" 65#define DRV_MODULE_RELDATE "May 23, 2005"
66 66
67#define TG3_DEF_MAC_MODE 0 67#define TG3_DEF_MAC_MODE 0
68#define TG3_DEF_RX_MODE 0 68#define TG3_DEF_RX_MODE 0
@@ -206,6 +206,8 @@ static struct pci_device_id tg3_pci_tbl[] = {
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752, 207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753, 211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M, 213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
@@ -420,7 +422,8 @@ static void tg3_enable_ints(struct tg3 *tp)
420{ 422{
421 tw32(TG3PCI_MISC_HOST_CTRL, 423 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 424 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 425 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
426 (tp->last_tag << 24));
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 427 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425 428
426 tg3_cond_int(tp); 429 tg3_cond_int(tp);
@@ -455,10 +458,16 @@ static void tg3_restart_ints(struct tg3 *tp)
455{ 458{
456 tw32(TG3PCI_MISC_HOST_CTRL, 459 tw32(TG3PCI_MISC_HOST_CTRL,
457 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 460 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 461 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
462 tp->last_tag << 24);
459 mmiowb(); 463 mmiowb();
460 464
461 if (tg3_has_work(tp)) 465 /* When doing tagged status, this work check is unnecessary.
466 * The last_tag we write above tells the chip which piece of
467 * work we've completed.
468 */
469 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
470 tg3_has_work(tp))
462 tw32(HOSTCC_MODE, tp->coalesce_mode | 471 tw32(HOSTCC_MODE, tp->coalesce_mode |
463 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 472 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464} 473}
@@ -2500,7 +2509,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2509 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501 if (netif_carrier_ok(tp->dev)) { 2510 if (netif_carrier_ok(tp->dev)) {
2502 tw32(HOSTCC_STAT_COAL_TICKS, 2511 tw32(HOSTCC_STAT_COAL_TICKS,
2503 DEFAULT_STAT_COAL_TICKS); 2512 tp->coal.stats_block_coalesce_usecs);
2504 } else { 2513 } else {
2505 tw32(HOSTCC_STAT_COAL_TICKS, 0); 2514 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506 } 2515 }
@@ -2886,7 +2895,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2886 * All RX "locking" is done by ensuring outside 2895 * All RX "locking" is done by ensuring outside
2887 * code synchronizes with dev->poll() 2896 * code synchronizes with dev->poll()
2888 */ 2897 */
2889 done = 1;
2890 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { 2898 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891 int orig_budget = *budget; 2899 int orig_budget = *budget;
2892 int work_done; 2900 int work_done;
@@ -2898,12 +2906,14 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2898 2906
2899 *budget -= work_done; 2907 *budget -= work_done;
2900 netdev->quota -= work_done; 2908 netdev->quota -= work_done;
2901
2902 if (work_done >= orig_budget)
2903 done = 0;
2904 } 2909 }
2905 2910
2911 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2912 tp->last_tag = sblk->status_tag;
2913 rmb();
2914
2906 /* if no more work, tell net stack and NIC we're done */ 2915 /* if no more work, tell net stack and NIC we're done */
2916 done = !tg3_has_work(tp);
2907 if (done) { 2917 if (done) {
2908 spin_lock_irqsave(&tp->lock, flags); 2918 spin_lock_irqsave(&tp->lock, flags);
2909 __netif_rx_complete(netdev); 2919 __netif_rx_complete(netdev);
@@ -2928,22 +2938,21 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2928 spin_lock_irqsave(&tp->lock, flags); 2938 spin_lock_irqsave(&tp->lock, flags);
2929 2939
2930 /* 2940 /*
2931 * writing any value to intr-mbox-0 clears PCI INTA# and 2941 * Writing any value to intr-mbox-0 clears PCI INTA# and
2932 * chip-internal interrupt pending events. 2942 * chip-internal interrupt pending events.
2933 * writing non-zero to intr-mbox-0 additional tells the 2943 * Writing non-zero to intr-mbox-0 additional tells the
2934 * NIC to stop sending us irqs, engaging "in-intr-handler" 2944 * NIC to stop sending us irqs, engaging "in-intr-handler"
2935 * event coalescing. 2945 * event coalescing.
2936 */ 2946 */
2937 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2947 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2948 tp->last_tag = sblk->status_tag;
2938 sblk->status &= ~SD_STATUS_UPDATED; 2949 sblk->status &= ~SD_STATUS_UPDATED;
2939
2940 if (likely(tg3_has_work(tp))) 2950 if (likely(tg3_has_work(tp)))
2941 netif_rx_schedule(dev); /* schedule NAPI poll */ 2951 netif_rx_schedule(dev); /* schedule NAPI poll */
2942 else { 2952 else {
2943 /* no work, re-enable interrupts 2953 /* No work, re-enable interrupts. */
2944 */
2945 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2954 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946 0x00000000); 2955 tp->last_tag << 24);
2947 } 2956 }
2948 2957
2949 spin_unlock_irqrestore(&tp->lock, flags); 2958 spin_unlock_irqrestore(&tp->lock, flags);
@@ -2969,21 +2978,62 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2969 if ((sblk->status & SD_STATUS_UPDATED) || 2978 if ((sblk->status & SD_STATUS_UPDATED) ||
2970 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 2979 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2971 /* 2980 /*
2972 * writing any value to intr-mbox-0 clears PCI INTA# and 2981 * Writing any value to intr-mbox-0 clears PCI INTA# and
2973 * chip-internal interrupt pending events. 2982 * chip-internal interrupt pending events.
2974 * writing non-zero to intr-mbox-0 additional tells the 2983 * Writing non-zero to intr-mbox-0 additional tells the
2975 * NIC to stop sending us irqs, engaging "in-intr-handler" 2984 * NIC to stop sending us irqs, engaging "in-intr-handler"
2976 * event coalescing. 2985 * event coalescing.
2977 */ 2986 */
2978 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2987 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979 0x00000001); 2988 0x00000001);
2989 sblk->status &= ~SD_STATUS_UPDATED;
2990 if (likely(tg3_has_work(tp)))
2991 netif_rx_schedule(dev); /* schedule NAPI poll */
2992 else {
2993 /* No work, shared interrupt perhaps? re-enable
2994 * interrupts, and flush that PCI write
2995 */
2996 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2997 0x00000000);
2998 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2999 }
3000 } else { /* shared interrupt */
3001 handled = 0;
3002 }
3003
3004 spin_unlock_irqrestore(&tp->lock, flags);
3005
3006 return IRQ_RETVAL(handled);
3007}
3008
3009static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3010{
3011 struct net_device *dev = dev_id;
3012 struct tg3 *tp = netdev_priv(dev);
3013 struct tg3_hw_status *sblk = tp->hw_status;
3014 unsigned long flags;
3015 unsigned int handled = 1;
3016
3017 spin_lock_irqsave(&tp->lock, flags);
3018
3019 /* In INTx mode, it is possible for the interrupt to arrive at
3020 * the CPU before the status block posted prior to the interrupt.
3021 * Reading the PCI State register will confirm whether the
3022 * interrupt is ours and will flush the status block.
3023 */
3024 if ((sblk->status & SD_STATUS_UPDATED) ||
3025 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2980 /* 3026 /*
2981 * Flush PCI write. This also guarantees that our 3027 * writing any value to intr-mbox-0 clears PCI INTA# and
2982 * status block has been flushed to host memory. 3028 * chip-internal interrupt pending events.
3029 * writing non-zero to intr-mbox-0 additional tells the
3030 * NIC to stop sending us irqs, engaging "in-intr-handler"
3031 * event coalescing.
2983 */ 3032 */
2984 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3033 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3034 0x00000001);
3035 tp->last_tag = sblk->status_tag;
2985 sblk->status &= ~SD_STATUS_UPDATED; 3036 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987 if (likely(tg3_has_work(tp))) 3037 if (likely(tg3_has_work(tp)))
2988 netif_rx_schedule(dev); /* schedule NAPI poll */ 3038 netif_rx_schedule(dev); /* schedule NAPI poll */
2989 else { 3039 else {
@@ -2991,7 +3041,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2991 * interrupts, and flush that PCI write 3041 * interrupts, and flush that PCI write
2992 */ 3042 */
2993 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3043 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994 0x00000000); 3044 tp->last_tag << 24);
2995 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3045 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996 } 3046 }
2997 } else { /* shared interrupt */ 3047 } else { /* shared interrupt */
@@ -5044,6 +5094,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5044} 5094}
5045 5095
5046static void __tg3_set_rx_mode(struct net_device *); 5096static void __tg3_set_rx_mode(struct net_device *);
5097static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5098{
5099 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5100 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5101 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5102 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5103 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5104 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5105 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5106 }
5107 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5108 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5109 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5110 u32 val = ec->stats_block_coalesce_usecs;
5111
5112 if (!netif_carrier_ok(tp->dev))
5113 val = 0;
5114
5115 tw32(HOSTCC_STAT_COAL_TICKS, val);
5116 }
5117}
5047 5118
5048/* tp->lock is held. */ 5119/* tp->lock is held. */
5049static int tg3_reset_hw(struct tg3 *tp) 5120static int tg3_reset_hw(struct tg3 *tp)
@@ -5366,16 +5437,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5366 udelay(10); 5437 udelay(10);
5367 } 5438 }
5368 5439
5369 tw32(HOSTCC_RXCOL_TICKS, 0); 5440 tg3_set_coalesce(tp, &tp->coal);
5370 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5371 tw32(HOSTCC_RXMAX_FRAMES, 1);
5372 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5373 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5374 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5375 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5376 }
5377 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5378 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5379 5441
5380 /* set status block DMA address */ 5442 /* set status block DMA address */
5381 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5443 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@@ -5388,8 +5450,6 @@ static int tg3_reset_hw(struct tg3 *tp)
5388 * the tg3_periodic_fetch_stats call there, and 5450 * the tg3_periodic_fetch_stats call there, and
5389 * tg3_get_stats to see how this works for 5705/5750 chips. 5451 * tg3_get_stats to see how this works for 5705/5750 chips.
5390 */ 5452 */
5391 tw32(HOSTCC_STAT_COAL_TICKS,
5392 DEFAULT_STAT_COAL_TICKS);
5393 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5453 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5394 ((u64) tp->stats_mapping >> 32)); 5454 ((u64) tp->stats_mapping >> 32));
5395 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 5455 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -5445,7 +5505,8 @@ static int tg3_reset_hw(struct tg3 *tp)
5445 udelay(100); 5505 udelay(100);
5446 5506
5447 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5507 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5448 tr32(MAILBOX_INTERRUPT_0); 5508 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5509 tp->last_tag = 0;
5449 5510
5450 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5511 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5451 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 5512 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -5723,31 +5784,33 @@ static void tg3_timer(unsigned long __opaque)
5723 spin_lock_irqsave(&tp->lock, flags); 5784 spin_lock_irqsave(&tp->lock, flags);
5724 spin_lock(&tp->tx_lock); 5785 spin_lock(&tp->tx_lock);
5725 5786
5726 /* All of this garbage is because when using non-tagged 5787 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5727 * IRQ status the mailbox/status_block protocol the chip 5788 /* All of this garbage is because when using non-tagged
5728 * uses with the cpu is race prone. 5789 * IRQ status the mailbox/status_block protocol the chip
5729 */ 5790 * uses with the cpu is race prone.
5730 if (tp->hw_status->status & SD_STATUS_UPDATED) { 5791 */
5731 tw32(GRC_LOCAL_CTRL, 5792 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5732 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 5793 tw32(GRC_LOCAL_CTRL,
5733 } else { 5794 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5734 tw32(HOSTCC_MODE, tp->coalesce_mode | 5795 } else {
5735 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 5796 tw32(HOSTCC_MODE, tp->coalesce_mode |
5736 } 5797 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5798 }
5737 5799
5738 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 5800 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5739 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 5801 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5740 spin_unlock(&tp->tx_lock); 5802 spin_unlock(&tp->tx_lock);
5741 spin_unlock_irqrestore(&tp->lock, flags); 5803 spin_unlock_irqrestore(&tp->lock, flags);
5742 schedule_work(&tp->reset_task); 5804 schedule_work(&tp->reset_task);
5743 return; 5805 return;
5806 }
5744 } 5807 }
5745 5808
5746 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5747 tg3_periodic_fetch_stats(tp);
5748
5749 /* This part only runs once per second. */ 5809 /* This part only runs once per second. */
5750 if (!--tp->timer_counter) { 5810 if (!--tp->timer_counter) {
5811 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5812 tg3_periodic_fetch_stats(tp);
5813
5751 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 5814 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5752 u32 mac_stat; 5815 u32 mac_stat;
5753 int phy_event; 5816 int phy_event;
@@ -5846,9 +5909,13 @@ static int tg3_test_interrupt(struct tg3 *tp)
5846 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 5909 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5847 err = request_irq(tp->pdev->irq, tg3_msi, 5910 err = request_irq(tp->pdev->irq, tg3_msi,
5848 SA_SAMPLE_RANDOM, dev->name, dev); 5911 SA_SAMPLE_RANDOM, dev->name, dev);
5849 else 5912 else {
5850 err = request_irq(tp->pdev->irq, tg3_interrupt, 5913 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5914 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5915 fn = tg3_interrupt_tagged;
5916 err = request_irq(tp->pdev->irq, fn,
5851 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5917 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5918 }
5852 5919
5853 if (err) 5920 if (err)
5854 return err; 5921 return err;
@@ -5900,9 +5967,14 @@ static int tg3_test_msi(struct tg3 *tp)
5900 5967
5901 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 5968 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5902 5969
5903 err = request_irq(tp->pdev->irq, tg3_interrupt, 5970 {
5904 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5971 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5972 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5973 fn = tg3_interrupt_tagged;
5905 5974
5975 err = request_irq(tp->pdev->irq, fn,
5976 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5977 }
5906 if (err) 5978 if (err)
5907 return err; 5979 return err;
5908 5980
@@ -5948,7 +6020,13 @@ static int tg3_open(struct net_device *dev)
5948 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 6020 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5949 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && 6021 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5950 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { 6022 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5951 if (pci_enable_msi(tp->pdev) == 0) { 6023 /* All MSI supporting chips should support tagged
6024 * status. Assert that this is the case.
6025 */
6026 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6027 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6028 "Not using MSI.\n", tp->dev->name);
6029 } else if (pci_enable_msi(tp->pdev) == 0) {
5952 u32 msi_mode; 6030 u32 msi_mode;
5953 6031
5954 msi_mode = tr32(MSGINT_MODE); 6032 msi_mode = tr32(MSGINT_MODE);
@@ -5959,9 +6037,14 @@ static int tg3_open(struct net_device *dev)
5959 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 6037 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5960 err = request_irq(tp->pdev->irq, tg3_msi, 6038 err = request_irq(tp->pdev->irq, tg3_msi,
5961 SA_SAMPLE_RANDOM, dev->name, dev); 6039 SA_SAMPLE_RANDOM, dev->name, dev);
5962 else 6040 else {
5963 err = request_irq(tp->pdev->irq, tg3_interrupt, 6041 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6042 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6043 fn = tg3_interrupt_tagged;
6044
6045 err = request_irq(tp->pdev->irq, fn,
5964 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 6046 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6047 }
5965 6048
5966 if (err) { 6049 if (err) {
5967 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6050 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -5980,9 +6063,16 @@ static int tg3_open(struct net_device *dev)
5980 tg3_halt(tp, 1); 6063 tg3_halt(tp, 1);
5981 tg3_free_rings(tp); 6064 tg3_free_rings(tp);
5982 } else { 6065 } else {
5983 tp->timer_offset = HZ / 10; 6066 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5984 tp->timer_counter = tp->timer_multiplier = 10; 6067 tp->timer_offset = HZ;
5985 tp->asf_counter = tp->asf_multiplier = (10 * 120); 6068 else
6069 tp->timer_offset = HZ / 10;
6070
6071 BUG_ON(tp->timer_offset > HZ);
6072 tp->timer_counter = tp->timer_multiplier =
6073 (HZ / tp->timer_offset);
6074 tp->asf_counter = tp->asf_multiplier =
6075 ((HZ / tp->timer_offset) * 120);
5986 6076
5987 init_timer(&tp->timer); 6077 init_timer(&tp->timer);
5988 tp->timer.expires = jiffies + tp->timer_offset; 6078 tp->timer.expires = jiffies + tp->timer_offset;
@@ -6005,6 +6095,7 @@ static int tg3_open(struct net_device *dev)
6005 6095
6006 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6096 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6007 err = tg3_test_msi(tp); 6097 err = tg3_test_msi(tp);
6098
6008 if (err) { 6099 if (err) {
6009 spin_lock_irq(&tp->lock); 6100 spin_lock_irq(&tp->lock);
6010 spin_lock(&tp->tx_lock); 6101 spin_lock(&tp->tx_lock);
@@ -7203,6 +7294,14 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7203} 7294}
7204#endif 7295#endif
7205 7296
7297static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7298{
7299 struct tg3 *tp = netdev_priv(dev);
7300
7301 memcpy(ec, &tp->coal, sizeof(*ec));
7302 return 0;
7303}
7304
7206static struct ethtool_ops tg3_ethtool_ops = { 7305static struct ethtool_ops tg3_ethtool_ops = {
7207 .get_settings = tg3_get_settings, 7306 .get_settings = tg3_get_settings,
7208 .set_settings = tg3_set_settings, 7307 .set_settings = tg3_set_settings,
@@ -7235,6 +7334,7 @@ static struct ethtool_ops tg3_ethtool_ops = {
7235 .get_strings = tg3_get_strings, 7334 .get_strings = tg3_get_strings,
7236 .get_stats_count = tg3_get_stats_count, 7335 .get_stats_count = tg3_get_stats_count,
7237 .get_ethtool_stats = tg3_get_ethtool_stats, 7336 .get_ethtool_stats = tg3_get_ethtool_stats,
7337 .get_coalesce = tg3_get_coalesce,
7238}; 7338};
7239 7339
7240static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 7340static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -8422,15 +8522,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8422 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 8522 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8423 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 8523 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8424 8524
8425 /* Only 5701 and later support tagged irq status mode.
8426 * Also, 5788 chips cannot use tagged irq status.
8427 *
8428 * However, since we are using NAPI avoid tagged irq status
8429 * because the interrupt condition is more difficult to
8430 * fully clear in that mode.
8431 */
8432 tp->coalesce_mode = 0; 8525 tp->coalesce_mode = 0;
8433
8434 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 8526 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8435 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 8527 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8436 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 8528 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@@ -8494,6 +8586,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8494 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 8586 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8495 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 8587 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8496 8588
8589 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8590 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
8591 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
8592 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
8593 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
8594 HOSTCC_MODE_CLRTICK_TXBD);
8595
8596 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
8597 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8598 tp->misc_host_ctrl);
8599 }
8600
8497 /* these are limited to 10/100 only */ 8601 /* these are limited to 10/100 only */
8498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 8602 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8499 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 8603 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -8671,6 +8775,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
8671 return 0; 8775 return 0;
8672} 8776}
8673 8777
8778#define BOUNDARY_SINGLE_CACHELINE 1
8779#define BOUNDARY_MULTI_CACHELINE 2
8780
8781static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
8782{
8783 int cacheline_size;
8784 u8 byte;
8785 int goal;
8786
8787 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8788 if (byte == 0)
8789 cacheline_size = 1024;
8790 else
8791 cacheline_size = (int) byte * 4;
8792
8793 /* On 5703 and later chips, the boundary bits have no
8794 * effect.
8795 */
8796 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8797 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
8798 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8799 goto out;
8800
8801#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
8802 goal = BOUNDARY_MULTI_CACHELINE;
8803#else
8804#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
8805 goal = BOUNDARY_SINGLE_CACHELINE;
8806#else
8807 goal = 0;
8808#endif
8809#endif
8810
8811 if (!goal)
8812 goto out;
8813
8814 /* PCI controllers on most RISC systems tend to disconnect
8815 * when a device tries to burst across a cache-line boundary.
8816 * Therefore, letting tg3 do so just wastes PCI bandwidth.
8817 *
8818 * Unfortunately, for PCI-E there are only limited
8819 * write-side controls for this, and thus for reads
8820 * we will still get the disconnects. We'll also waste
8821 * these PCI cycles for both read and write for chips
8822 * other than 5700 and 5701 which do not implement the
8823 * boundary bits.
8824 */
8825 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8826 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8827 switch (cacheline_size) {
8828 case 16:
8829 case 32:
8830 case 64:
8831 case 128:
8832 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8833 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
8834 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
8835 } else {
8836 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
8837 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
8838 }
8839 break;
8840
8841 case 256:
8842 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
8843 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
8844 break;
8845
8846 default:
8847 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
8848 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
8849 break;
8850 };
8851 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8852 switch (cacheline_size) {
8853 case 16:
8854 case 32:
8855 case 64:
8856 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8857 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
8858 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
8859 break;
8860 }
8861 /* fallthrough */
8862 case 128:
8863 default:
8864 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
8865 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8866 break;
8867 };
8868 } else {
8869 switch (cacheline_size) {
8870 case 16:
8871 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8872 val |= (DMA_RWCTRL_READ_BNDRY_16 |
8873 DMA_RWCTRL_WRITE_BNDRY_16);
8874 break;
8875 }
8876 /* fallthrough */
8877 case 32:
8878 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8879 val |= (DMA_RWCTRL_READ_BNDRY_32 |
8880 DMA_RWCTRL_WRITE_BNDRY_32);
8881 break;
8882 }
8883 /* fallthrough */
8884 case 64:
8885 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8886 val |= (DMA_RWCTRL_READ_BNDRY_64 |
8887 DMA_RWCTRL_WRITE_BNDRY_64);
8888 break;
8889 }
8890 /* fallthrough */
8891 case 128:
8892 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8893 val |= (DMA_RWCTRL_READ_BNDRY_128 |
8894 DMA_RWCTRL_WRITE_BNDRY_128);
8895 break;
8896 }
8897 /* fallthrough */
8898 case 256:
8899 val |= (DMA_RWCTRL_READ_BNDRY_256 |
8900 DMA_RWCTRL_WRITE_BNDRY_256);
8901 break;
8902 case 512:
8903 val |= (DMA_RWCTRL_READ_BNDRY_512 |
8904 DMA_RWCTRL_WRITE_BNDRY_512);
8905 break;
8906 case 1024:
8907 default:
8908 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
8909 DMA_RWCTRL_WRITE_BNDRY_1024);
8910 break;
8911 };
8912 }
8913
8914out:
8915 return val;
8916}
8917
8674static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 8918static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8675{ 8919{
8676 struct tg3_internal_buffer_desc test_desc; 8920 struct tg3_internal_buffer_desc test_desc;
@@ -8752,12 +8996,12 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
8752 return ret; 8996 return ret;
8753} 8997}
8754 8998
8755#define TEST_BUFFER_SIZE 0x400 8999#define TEST_BUFFER_SIZE 0x2000
8756 9000
8757static int __devinit tg3_test_dma(struct tg3 *tp) 9001static int __devinit tg3_test_dma(struct tg3 *tp)
8758{ 9002{
8759 dma_addr_t buf_dma; 9003 dma_addr_t buf_dma;
8760 u32 *buf; 9004 u32 *buf, saved_dma_rwctrl;
8761 int ret; 9005 int ret;
8762 9006
8763 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 9007 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@@ -8769,46 +9013,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8769 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 9013 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8770 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 9014 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8771 9015
8772#ifndef CONFIG_X86 9016 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
8773 {
8774 u8 byte;
8775 int cacheline_size;
8776 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8777
8778 if (byte == 0)
8779 cacheline_size = 1024;
8780 else
8781 cacheline_size = (int) byte * 4;
8782
8783 switch (cacheline_size) {
8784 case 16:
8785 case 32:
8786 case 64:
8787 case 128:
8788 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8789 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8790 tp->dma_rwctrl |=
8791 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8792 break;
8793 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8794 tp->dma_rwctrl &=
8795 ~(DMA_RWCTRL_PCI_WRITE_CMD);
8796 tp->dma_rwctrl |=
8797 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8798 break;
8799 }
8800 /* fallthrough */
8801 case 256:
8802 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8803 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8804 tp->dma_rwctrl |=
8805 DMA_RWCTRL_WRITE_BNDRY_256;
8806 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8807 tp->dma_rwctrl |=
8808 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8809 };
8810 }
8811#endif
8812 9017
8813 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 9018 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8814 /* DMA read watermark not used on PCIE */ 9019 /* DMA read watermark not used on PCIE */
@@ -8827,7 +9032,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8827 if (ccval == 0x6 || ccval == 0x7) 9032 if (ccval == 0x6 || ccval == 0x7)
8828 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 9033 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8829 9034
8830 /* Set bit 23 to renable PCIX hw bug fix */ 9035 /* Set bit 23 to enable PCIX hw bug fix */
8831 tp->dma_rwctrl |= 0x009f0000; 9036 tp->dma_rwctrl |= 0x009f0000;
8832 } else { 9037 } else {
8833 tp->dma_rwctrl |= 0x001b000f; 9038 tp->dma_rwctrl |= 0x001b000f;
@@ -8868,6 +9073,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8868 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 9073 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8869 goto out; 9074 goto out;
8870 9075
9076 /* It is best to perform DMA test with maximum write burst size
9077 * to expose the 5700/5701 write DMA bug.
9078 */
9079 saved_dma_rwctrl = tp->dma_rwctrl;
9080 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9081 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9082
8871 while (1) { 9083 while (1) {
8872 u32 *p = buf, i; 9084 u32 *p = buf, i;
8873 9085
@@ -8906,8 +9118,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8906 if (p[i] == i) 9118 if (p[i] == i)
8907 continue; 9119 continue;
8908 9120
8909 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == 9121 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
8910 DMA_RWCTRL_WRITE_BNDRY_DISAB) { 9122 DMA_RWCTRL_WRITE_BNDRY_16) {
9123 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
8911 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 9124 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8912 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 9125 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8913 break; 9126 break;
@@ -8924,6 +9137,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8924 break; 9137 break;
8925 } 9138 }
8926 } 9139 }
9140 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9141 DMA_RWCTRL_WRITE_BNDRY_16) {
9142 /* DMA test passed without adjusting DMA boundary,
9143 * just restore the calculated DMA boundary
9144 */
9145 tp->dma_rwctrl = saved_dma_rwctrl;
9146 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9147 }
8927 9148
8928out: 9149out:
8929 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 9150 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@@ -9011,6 +9232,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9011 return peer; 9232 return peer;
9012} 9233}
9013 9234
9235static void __devinit tg3_init_coal(struct tg3 *tp)
9236{
9237 struct ethtool_coalesce *ec = &tp->coal;
9238
9239 memset(ec, 0, sizeof(*ec));
9240 ec->cmd = ETHTOOL_GCOALESCE;
9241 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9242 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9243 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9244 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9245 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9246 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9247 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9248 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9249 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9250
9251 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9252 HOSTCC_MODE_CLRTICK_TXBD)) {
9253 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9254 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9255 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9256 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9257 }
9258}
9259
9014static int __devinit tg3_init_one(struct pci_dev *pdev, 9260static int __devinit tg3_init_one(struct pci_dev *pdev,
9015 const struct pci_device_id *ent) 9261 const struct pci_device_id *ent)
9016{ 9262{
@@ -9256,6 +9502,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9256 /* flow control autonegotiation is default behavior */ 9502 /* flow control autonegotiation is default behavior */
9257 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 9503 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9258 9504
9505 tg3_init_coal(tp);
9506
9259 err = register_netdev(dev); 9507 err = register_netdev(dev);
9260 if (err) { 9508 if (err) {
9261 printk(KERN_ERR PFX "Cannot register net device, " 9509 printk(KERN_ERR PFX "Cannot register net device, "
@@ -9298,6 +9546,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9298 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, 9546 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9299 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, 9547 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9300 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 9548 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9549 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
9550 dev->name, tp->dma_rwctrl);
9301 9551
9302 return 0; 9552 return 0;
9303 9553
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8de6f21037ba..993f84c93dc4 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -876,10 +876,12 @@
876#define HOSTCC_STATUS_ERROR_ATTN 0x00000004 876#define HOSTCC_STATUS_ERROR_ATTN 0x00000004
877#define HOSTCC_RXCOL_TICKS 0x00003c08 877#define HOSTCC_RXCOL_TICKS 0x00003c08
878#define LOW_RXCOL_TICKS 0x00000032 878#define LOW_RXCOL_TICKS 0x00000032
879#define LOW_RXCOL_TICKS_CLRTCKS 0x00000014
879#define DEFAULT_RXCOL_TICKS 0x00000048 880#define DEFAULT_RXCOL_TICKS 0x00000048
880#define HIGH_RXCOL_TICKS 0x00000096 881#define HIGH_RXCOL_TICKS 0x00000096
881#define HOSTCC_TXCOL_TICKS 0x00003c0c 882#define HOSTCC_TXCOL_TICKS 0x00003c0c
882#define LOW_TXCOL_TICKS 0x00000096 883#define LOW_TXCOL_TICKS 0x00000096
884#define LOW_TXCOL_TICKS_CLRTCKS 0x00000048
883#define DEFAULT_TXCOL_TICKS 0x0000012c 885#define DEFAULT_TXCOL_TICKS 0x0000012c
884#define HIGH_TXCOL_TICKS 0x00000145 886#define HIGH_TXCOL_TICKS 0x00000145
885#define HOSTCC_RXMAX_FRAMES 0x00003c10 887#define HOSTCC_RXMAX_FRAMES 0x00003c10
@@ -892,8 +894,10 @@
892#define HIGH_TXMAX_FRAMES 0x00000052 894#define HIGH_TXMAX_FRAMES 0x00000052
893#define HOSTCC_RXCOAL_TICK_INT 0x00003c18 895#define HOSTCC_RXCOAL_TICK_INT 0x00003c18
894#define DEFAULT_RXCOAL_TICK_INT 0x00000019 896#define DEFAULT_RXCOAL_TICK_INT 0x00000019
897#define DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014
895#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c 898#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c
896#define DEFAULT_TXCOAL_TICK_INT 0x00000019 899#define DEFAULT_TXCOAL_TICK_INT 0x00000019
900#define DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014
897#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20 901#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20
898#define DEFAULT_RXCOAL_MAXF_INT 0x00000005 902#define DEFAULT_RXCOAL_MAXF_INT 0x00000005
899#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24 903#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24
@@ -2023,6 +2027,7 @@ struct tg3 {
2023 2027
2024 struct tg3_hw_status *hw_status; 2028 struct tg3_hw_status *hw_status;
2025 dma_addr_t status_mapping; 2029 dma_addr_t status_mapping;
2030 u32 last_tag;
2026 2031
2027 u32 msg_enable; 2032 u32 msg_enable;
2028 2033
@@ -2068,6 +2073,7 @@ struct tg3 {
2068 2073
2069 u32 rx_offset; 2074 u32 rx_offset;
2070 u32 tg3_flags; 2075 u32 tg3_flags;
2076#define TG3_FLAG_TAGGED_STATUS 0x00000001
2071#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 2077#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
2072#define TG3_FLAG_RX_CHECKSUMS 0x00000004 2078#define TG3_FLAG_RX_CHECKSUMS 0x00000004
2073#define TG3_FLAG_USE_LINKCHG_REG 0x00000008 2079#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
@@ -2225,7 +2231,7 @@ struct tg3 {
2225 2231
2226#define SST_25VF0X0_PAGE_SIZE 4098 2232#define SST_25VF0X0_PAGE_SIZE 4098
2227 2233
2228 2234 struct ethtool_coalesce coal;
2229}; 2235};
2230 2236
2231#endif /* !(_T3_H) */ 2237#endif /* !(_T3_H) */
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index edae09a4b021..919c40cd635c 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -174,6 +174,7 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
174 break; 174 break;
175 } 175 }
176 spin_unlock_irqrestore(&tp->mii_lock, flags); 176 spin_unlock_irqrestore(&tp->mii_lock, flags);
177 return;
177 } 178 }
178 179
179 /* Establish sync by sending 32 logic ones. */ 180 /* Establish sync by sending 32 logic ones. */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index d098b3ba3538..e0ae3ed6e578 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1104,7 +1104,7 @@ static void set_rx_mode(struct net_device *dev)
1104 if (entry != 0) { 1104 if (entry != 0) {
1105 /* Avoid a chip errata by prefixing a dummy entry. Don't do 1105 /* Avoid a chip errata by prefixing a dummy entry. Don't do
1106 this on the ULI526X as it triggers a different problem */ 1106 this on the ULI526X as it triggers a different problem */
1107 if (!(tp->chip_id == ULI526X && (tp->revision = 0x40 || tp->revision == 0x50))) { 1107 if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) {
1108 tp->tx_buffers[entry].skb = NULL; 1108 tp->tx_buffers[entry].skb = NULL;
1109 tp->tx_buffers[entry].mapping = 0; 1109 tp->tx_buffers[entry].mapping = 0;
1110 tp->tx_ring[entry].length = 1110 tp->tx_ring[entry].length =
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 0aaa12c0c098..1d3231cc471a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -323,7 +323,7 @@ config PRISM54
323 For a complete list of supported cards visit <http://prism54.org>. 323 For a complete list of supported cards visit <http://prism54.org>.
324 Here is the latest confirmed list of supported cards: 324 Here is the latest confirmed list of supported cards:
325 325
326 3com OfficeConnect 11g Cardbus Card aka 3CRWE154G72 326 3com OfficeConnect 11g Cardbus Card aka 3CRWE154G72 (version 1)
327 Allnet ALL0271 PCI Card 327 Allnet ALL0271 PCI Card
328 Compex WL54G Cardbus Card 328 Compex WL54G Cardbus Card
329 Corega CG-WLCB54GT Cardbus Card 329 Corega CG-WLCB54GT Cardbus Card