aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c480
1 files changed, 364 insertions, 116 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f79b02e80e75..4d2bdbdd34e8 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -420,7 +420,8 @@ static void tg3_enable_ints(struct tg3 *tp)
420{ 420{
421 tw32(TG3PCI_MISC_HOST_CTRL, 421 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
424 (tp->last_tag << 24));
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 425 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425 426
426 tg3_cond_int(tp); 427 tg3_cond_int(tp);
@@ -455,10 +456,16 @@ static void tg3_restart_ints(struct tg3 *tp)
455{ 456{
456 tw32(TG3PCI_MISC_HOST_CTRL, 457 tw32(TG3PCI_MISC_HOST_CTRL,
457 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 458 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 459 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
460 tp->last_tag << 24);
459 mmiowb(); 461 mmiowb();
460 462
461 if (tg3_has_work(tp)) 463 /* When doing tagged status, this work check is unnecessary.
464 * The last_tag we write above tells the chip which piece of
465 * work we've completed.
466 */
467 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
468 tg3_has_work(tp))
462 tw32(HOSTCC_MODE, tp->coalesce_mode | 469 tw32(HOSTCC_MODE, tp->coalesce_mode |
463 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 470 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464} 471}
@@ -2500,7 +2507,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2507 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501 if (netif_carrier_ok(tp->dev)) { 2508 if (netif_carrier_ok(tp->dev)) {
2502 tw32(HOSTCC_STAT_COAL_TICKS, 2509 tw32(HOSTCC_STAT_COAL_TICKS,
2503 DEFAULT_STAT_COAL_TICKS); 2510 tp->coal.stats_block_coalesce_usecs);
2504 } else { 2511 } else {
2505 tw32(HOSTCC_STAT_COAL_TICKS, 0); 2512 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506 } 2513 }
@@ -2886,7 +2893,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2886 * All RX "locking" is done by ensuring outside 2893 * All RX "locking" is done by ensuring outside
2887 * code synchronizes with dev->poll() 2894 * code synchronizes with dev->poll()
2888 */ 2895 */
2889 done = 1;
2890 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { 2896 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891 int orig_budget = *budget; 2897 int orig_budget = *budget;
2892 int work_done; 2898 int work_done;
@@ -2898,12 +2904,14 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2898 2904
2899 *budget -= work_done; 2905 *budget -= work_done;
2900 netdev->quota -= work_done; 2906 netdev->quota -= work_done;
2901
2902 if (work_done >= orig_budget)
2903 done = 0;
2904 } 2907 }
2905 2908
2909 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2910 tp->last_tag = sblk->status_tag;
2911 rmb();
2912
2906 /* if no more work, tell net stack and NIC we're done */ 2913 /* if no more work, tell net stack and NIC we're done */
2914 done = !tg3_has_work(tp);
2907 if (done) { 2915 if (done) {
2908 spin_lock_irqsave(&tp->lock, flags); 2916 spin_lock_irqsave(&tp->lock, flags);
2909 __netif_rx_complete(netdev); 2917 __netif_rx_complete(netdev);
@@ -2928,22 +2936,21 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2928 spin_lock_irqsave(&tp->lock, flags); 2936 spin_lock_irqsave(&tp->lock, flags);
2929 2937
2930 /* 2938 /*
2931 * writing any value to intr-mbox-0 clears PCI INTA# and 2939 * Writing any value to intr-mbox-0 clears PCI INTA# and
2932 * chip-internal interrupt pending events. 2940 * chip-internal interrupt pending events.
2933 * writing non-zero to intr-mbox-0 additional tells the 2941 * Writing non-zero to intr-mbox-0 additional tells the
2934 * NIC to stop sending us irqs, engaging "in-intr-handler" 2942 * NIC to stop sending us irqs, engaging "in-intr-handler"
2935 * event coalescing. 2943 * event coalescing.
2936 */ 2944 */
2937 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2945 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2946 tp->last_tag = sblk->status_tag;
2938 sblk->status &= ~SD_STATUS_UPDATED; 2947 sblk->status &= ~SD_STATUS_UPDATED;
2939
2940 if (likely(tg3_has_work(tp))) 2948 if (likely(tg3_has_work(tp)))
2941 netif_rx_schedule(dev); /* schedule NAPI poll */ 2949 netif_rx_schedule(dev); /* schedule NAPI poll */
2942 else { 2950 else {
2943 /* no work, re-enable interrupts 2951 /* No work, re-enable interrupts. */
2944 */
2945 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2952 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946 0x00000000); 2953 tp->last_tag << 24);
2947 } 2954 }
2948 2955
2949 spin_unlock_irqrestore(&tp->lock, flags); 2956 spin_unlock_irqrestore(&tp->lock, flags);
@@ -2969,21 +2976,62 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2969 if ((sblk->status & SD_STATUS_UPDATED) || 2976 if ((sblk->status & SD_STATUS_UPDATED) ||
2970 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 2977 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2971 /* 2978 /*
2972 * writing any value to intr-mbox-0 clears PCI INTA# and 2979 * Writing any value to intr-mbox-0 clears PCI INTA# and
2973 * chip-internal interrupt pending events. 2980 * chip-internal interrupt pending events.
2974 * writing non-zero to intr-mbox-0 additional tells the 2981 * Writing non-zero to intr-mbox-0 additional tells the
2975 * NIC to stop sending us irqs, engaging "in-intr-handler" 2982 * NIC to stop sending us irqs, engaging "in-intr-handler"
2976 * event coalescing. 2983 * event coalescing.
2977 */ 2984 */
2978 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2985 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979 0x00000001); 2986 0x00000001);
2987 sblk->status &= ~SD_STATUS_UPDATED;
2988 if (likely(tg3_has_work(tp)))
2989 netif_rx_schedule(dev); /* schedule NAPI poll */
2990 else {
2991 /* No work, shared interrupt perhaps? re-enable
2992 * interrupts, and flush that PCI write
2993 */
2994 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2995 0x00000000);
2996 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2997 }
2998 } else { /* shared interrupt */
2999 handled = 0;
3000 }
3001
3002 spin_unlock_irqrestore(&tp->lock, flags);
3003
3004 return IRQ_RETVAL(handled);
3005}
3006
3007static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3008{
3009 struct net_device *dev = dev_id;
3010 struct tg3 *tp = netdev_priv(dev);
3011 struct tg3_hw_status *sblk = tp->hw_status;
3012 unsigned long flags;
3013 unsigned int handled = 1;
3014
3015 spin_lock_irqsave(&tp->lock, flags);
3016
3017 /* In INTx mode, it is possible for the interrupt to arrive at
3018 * the CPU before the status block posted prior to the interrupt.
3019 * Reading the PCI State register will confirm whether the
3020 * interrupt is ours and will flush the status block.
3021 */
3022 if ((sblk->status & SD_STATUS_UPDATED) ||
3023 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2980 /* 3024 /*
2981 * Flush PCI write. This also guarantees that our 3025 * writing any value to intr-mbox-0 clears PCI INTA# and
2982 * status block has been flushed to host memory. 3026 * chip-internal interrupt pending events.
3027 * writing non-zero to intr-mbox-0 additional tells the
3028 * NIC to stop sending us irqs, engaging "in-intr-handler"
3029 * event coalescing.
2983 */ 3030 */
2984 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3031 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3032 0x00000001);
3033 tp->last_tag = sblk->status_tag;
2985 sblk->status &= ~SD_STATUS_UPDATED; 3034 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987 if (likely(tg3_has_work(tp))) 3035 if (likely(tg3_has_work(tp)))
2988 netif_rx_schedule(dev); /* schedule NAPI poll */ 3036 netif_rx_schedule(dev); /* schedule NAPI poll */
2989 else { 3037 else {
@@ -2991,7 +3039,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2991 * interrupts, and flush that PCI write 3039 * interrupts, and flush that PCI write
2992 */ 3040 */
2993 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3041 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994 0x00000000); 3042 tp->last_tag << 24);
2995 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3043 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996 } 3044 }
2997 } else { /* shared interrupt */ 3045 } else { /* shared interrupt */
@@ -5044,6 +5092,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5044} 5092}
5045 5093
5046static void __tg3_set_rx_mode(struct net_device *); 5094static void __tg3_set_rx_mode(struct net_device *);
5095static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5096{
5097 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5098 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5099 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5100 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5101 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5102 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5103 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5104 }
5105 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5106 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5107 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5108 u32 val = ec->stats_block_coalesce_usecs;
5109
5110 if (!netif_carrier_ok(tp->dev))
5111 val = 0;
5112
5113 tw32(HOSTCC_STAT_COAL_TICKS, val);
5114 }
5115}
5047 5116
5048/* tp->lock is held. */ 5117/* tp->lock is held. */
5049static int tg3_reset_hw(struct tg3 *tp) 5118static int tg3_reset_hw(struct tg3 *tp)
@@ -5366,16 +5435,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5366 udelay(10); 5435 udelay(10);
5367 } 5436 }
5368 5437
5369 tw32(HOSTCC_RXCOL_TICKS, 0); 5438 tg3_set_coalesce(tp, &tp->coal);
5370 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5371 tw32(HOSTCC_RXMAX_FRAMES, 1);
5372 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5373 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5374 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5375 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5376 }
5377 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5378 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5379 5439
5380 /* set status block DMA address */ 5440 /* set status block DMA address */
5381 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5441 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@@ -5388,8 +5448,6 @@ static int tg3_reset_hw(struct tg3 *tp)
5388 * the tg3_periodic_fetch_stats call there, and 5448 * the tg3_periodic_fetch_stats call there, and
5389 * tg3_get_stats to see how this works for 5705/5750 chips. 5449 * tg3_get_stats to see how this works for 5705/5750 chips.
5390 */ 5450 */
5391 tw32(HOSTCC_STAT_COAL_TICKS,
5392 DEFAULT_STAT_COAL_TICKS);
5393 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5451 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5394 ((u64) tp->stats_mapping >> 32)); 5452 ((u64) tp->stats_mapping >> 32));
5395 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 5453 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -5445,7 +5503,8 @@ static int tg3_reset_hw(struct tg3 *tp)
5445 udelay(100); 5503 udelay(100);
5446 5504
5447 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5505 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5448 tr32(MAILBOX_INTERRUPT_0); 5506 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5507 tp->last_tag = 0;
5449 5508
5450 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5509 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5451 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 5510 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -5723,31 +5782,33 @@ static void tg3_timer(unsigned long __opaque)
5723 spin_lock_irqsave(&tp->lock, flags); 5782 spin_lock_irqsave(&tp->lock, flags);
5724 spin_lock(&tp->tx_lock); 5783 spin_lock(&tp->tx_lock);
5725 5784
5726 /* All of this garbage is because when using non-tagged 5785 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5727 * IRQ status the mailbox/status_block protocol the chip 5786 /* All of this garbage is because when using non-tagged
5728 * uses with the cpu is race prone. 5787 * IRQ status the mailbox/status_block protocol the chip
5729 */ 5788 * uses with the cpu is race prone.
5730 if (tp->hw_status->status & SD_STATUS_UPDATED) { 5789 */
5731 tw32(GRC_LOCAL_CTRL, 5790 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5732 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 5791 tw32(GRC_LOCAL_CTRL,
5733 } else { 5792 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5734 tw32(HOSTCC_MODE, tp->coalesce_mode | 5793 } else {
5735 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 5794 tw32(HOSTCC_MODE, tp->coalesce_mode |
5736 } 5795 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5796 }
5737 5797
5738 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 5798 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5739 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 5799 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5740 spin_unlock(&tp->tx_lock); 5800 spin_unlock(&tp->tx_lock);
5741 spin_unlock_irqrestore(&tp->lock, flags); 5801 spin_unlock_irqrestore(&tp->lock, flags);
5742 schedule_work(&tp->reset_task); 5802 schedule_work(&tp->reset_task);
5743 return; 5803 return;
5804 }
5744 } 5805 }
5745 5806
5746 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5747 tg3_periodic_fetch_stats(tp);
5748
5749 /* This part only runs once per second. */ 5807 /* This part only runs once per second. */
5750 if (!--tp->timer_counter) { 5808 if (!--tp->timer_counter) {
5809 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5810 tg3_periodic_fetch_stats(tp);
5811
5751 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 5812 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5752 u32 mac_stat; 5813 u32 mac_stat;
5753 int phy_event; 5814 int phy_event;
@@ -5846,9 +5907,13 @@ static int tg3_test_interrupt(struct tg3 *tp)
5846 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 5907 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5847 err = request_irq(tp->pdev->irq, tg3_msi, 5908 err = request_irq(tp->pdev->irq, tg3_msi,
5848 SA_SAMPLE_RANDOM, dev->name, dev); 5909 SA_SAMPLE_RANDOM, dev->name, dev);
5849 else 5910 else {
5850 err = request_irq(tp->pdev->irq, tg3_interrupt, 5911 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5912 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5913 fn = tg3_interrupt_tagged;
5914 err = request_irq(tp->pdev->irq, fn,
5851 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5915 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5916 }
5852 5917
5853 if (err) 5918 if (err)
5854 return err; 5919 return err;
@@ -5900,9 +5965,14 @@ static int tg3_test_msi(struct tg3 *tp)
5900 5965
5901 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 5966 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5902 5967
5903 err = request_irq(tp->pdev->irq, tg3_interrupt, 5968 {
5904 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5969 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5970 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5971 fn = tg3_interrupt_tagged;
5905 5972
5973 err = request_irq(tp->pdev->irq, fn,
5974 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5975 }
5906 if (err) 5976 if (err)
5907 return err; 5977 return err;
5908 5978
@@ -5948,7 +6018,13 @@ static int tg3_open(struct net_device *dev)
5948 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 6018 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5949 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && 6019 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5950 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { 6020 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5951 if (pci_enable_msi(tp->pdev) == 0) { 6021 /* All MSI supporting chips should support tagged
6022 * status. Assert that this is the case.
6023 */
6024 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6025 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6026 "Not using MSI.\n", tp->dev->name);
6027 } else if (pci_enable_msi(tp->pdev) == 0) {
5952 u32 msi_mode; 6028 u32 msi_mode;
5953 6029
5954 msi_mode = tr32(MSGINT_MODE); 6030 msi_mode = tr32(MSGINT_MODE);
@@ -5959,9 +6035,14 @@ static int tg3_open(struct net_device *dev)
5959 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 6035 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5960 err = request_irq(tp->pdev->irq, tg3_msi, 6036 err = request_irq(tp->pdev->irq, tg3_msi,
5961 SA_SAMPLE_RANDOM, dev->name, dev); 6037 SA_SAMPLE_RANDOM, dev->name, dev);
5962 else 6038 else {
5963 err = request_irq(tp->pdev->irq, tg3_interrupt, 6039 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6040 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6041 fn = tg3_interrupt_tagged;
6042
6043 err = request_irq(tp->pdev->irq, fn,
5964 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 6044 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6045 }
5965 6046
5966 if (err) { 6047 if (err) {
5967 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6048 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -5980,9 +6061,16 @@ static int tg3_open(struct net_device *dev)
5980 tg3_halt(tp, 1); 6061 tg3_halt(tp, 1);
5981 tg3_free_rings(tp); 6062 tg3_free_rings(tp);
5982 } else { 6063 } else {
5983 tp->timer_offset = HZ / 10; 6064 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5984 tp->timer_counter = tp->timer_multiplier = 10; 6065 tp->timer_offset = HZ;
5985 tp->asf_counter = tp->asf_multiplier = (10 * 120); 6066 else
6067 tp->timer_offset = HZ / 10;
6068
6069 BUG_ON(tp->timer_offset > HZ);
6070 tp->timer_counter = tp->timer_multiplier =
6071 (HZ / tp->timer_offset);
6072 tp->asf_counter = tp->asf_multiplier =
6073 ((HZ / tp->timer_offset) * 120);
5986 6074
5987 init_timer(&tp->timer); 6075 init_timer(&tp->timer);
5988 tp->timer.expires = jiffies + tp->timer_offset; 6076 tp->timer.expires = jiffies + tp->timer_offset;
@@ -6005,6 +6093,7 @@ static int tg3_open(struct net_device *dev)
6005 6093
6006 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6094 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6007 err = tg3_test_msi(tp); 6095 err = tg3_test_msi(tp);
6096
6008 if (err) { 6097 if (err) {
6009 spin_lock_irq(&tp->lock); 6098 spin_lock_irq(&tp->lock);
6010 spin_lock(&tp->tx_lock); 6099 spin_lock(&tp->tx_lock);
@@ -7203,6 +7292,14 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7203} 7292}
7204#endif 7293#endif
7205 7294
7295static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7296{
7297 struct tg3 *tp = netdev_priv(dev);
7298
7299 memcpy(ec, &tp->coal, sizeof(*ec));
7300 return 0;
7301}
7302
7206static struct ethtool_ops tg3_ethtool_ops = { 7303static struct ethtool_ops tg3_ethtool_ops = {
7207 .get_settings = tg3_get_settings, 7304 .get_settings = tg3_get_settings,
7208 .set_settings = tg3_set_settings, 7305 .set_settings = tg3_set_settings,
@@ -7235,6 +7332,7 @@ static struct ethtool_ops tg3_ethtool_ops = {
7235 .get_strings = tg3_get_strings, 7332 .get_strings = tg3_get_strings,
7236 .get_stats_count = tg3_get_stats_count, 7333 .get_stats_count = tg3_get_stats_count,
7237 .get_ethtool_stats = tg3_get_ethtool_stats, 7334 .get_ethtool_stats = tg3_get_ethtool_stats,
7335 .get_coalesce = tg3_get_coalesce,
7238}; 7336};
7239 7337
7240static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 7338static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -8422,15 +8520,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8422 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 8520 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8423 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 8521 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8424 8522
8425 /* Only 5701 and later support tagged irq status mode.
8426 * Also, 5788 chips cannot use tagged irq status.
8427 *
8428 * However, since we are using NAPI avoid tagged irq status
8429 * because the interrupt condition is more difficult to
8430 * fully clear in that mode.
8431 */
8432 tp->coalesce_mode = 0; 8523 tp->coalesce_mode = 0;
8433
8434 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 8524 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8435 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 8525 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8436 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 8526 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@@ -8494,6 +8584,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8494 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 8584 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8495 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 8585 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8496 8586
8587 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8588 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
8589 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
8590 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
8591 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
8592 HOSTCC_MODE_CLRTICK_TXBD);
8593
8594 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
8595 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8596 tp->misc_host_ctrl);
8597 }
8598
8497 /* these are limited to 10/100 only */ 8599 /* these are limited to 10/100 only */
8498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 8600 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8499 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 8601 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -8671,6 +8773,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
8671 return 0; 8773 return 0;
8672} 8774}
8673 8775
8776#define BOUNDARY_SINGLE_CACHELINE 1
8777#define BOUNDARY_MULTI_CACHELINE 2
8778
8779static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
8780{
8781 int cacheline_size;
8782 u8 byte;
8783 int goal;
8784
8785 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8786 if (byte == 0)
8787 cacheline_size = 1024;
8788 else
8789 cacheline_size = (int) byte * 4;
8790
8791 /* On 5703 and later chips, the boundary bits have no
8792 * effect.
8793 */
8794 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8795 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
8796 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8797 goto out;
8798
8799#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
8800 goal = BOUNDARY_MULTI_CACHELINE;
8801#else
8802#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
8803 goal = BOUNDARY_SINGLE_CACHELINE;
8804#else
8805 goal = 0;
8806#endif
8807#endif
8808
8809 if (!goal)
8810 goto out;
8811
8812 /* PCI controllers on most RISC systems tend to disconnect
8813 * when a device tries to burst across a cache-line boundary.
8814 * Therefore, letting tg3 do so just wastes PCI bandwidth.
8815 *
8816 * Unfortunately, for PCI-E there are only limited
8817 * write-side controls for this, and thus for reads
8818 * we will still get the disconnects. We'll also waste
8819 * these PCI cycles for both read and write for chips
8820 * other than 5700 and 5701 which do not implement the
8821 * boundary bits.
8822 */
8823 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8824 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8825 switch (cacheline_size) {
8826 case 16:
8827 case 32:
8828 case 64:
8829 case 128:
8830 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8831 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
8832 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
8833 } else {
8834 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
8835 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
8836 }
8837 break;
8838
8839 case 256:
8840 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
8841 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
8842 break;
8843
8844 default:
8845 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
8846 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
8847 break;
8848 };
8849 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8850 switch (cacheline_size) {
8851 case 16:
8852 case 32:
8853 case 64:
8854 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8855 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
8856 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
8857 break;
8858 }
8859 /* fallthrough */
8860 case 128:
8861 default:
8862 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
8863 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8864 break;
8865 };
8866 } else {
8867 switch (cacheline_size) {
8868 case 16:
8869 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8870 val |= (DMA_RWCTRL_READ_BNDRY_16 |
8871 DMA_RWCTRL_WRITE_BNDRY_16);
8872 break;
8873 }
8874 /* fallthrough */
8875 case 32:
8876 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8877 val |= (DMA_RWCTRL_READ_BNDRY_32 |
8878 DMA_RWCTRL_WRITE_BNDRY_32);
8879 break;
8880 }
8881 /* fallthrough */
8882 case 64:
8883 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8884 val |= (DMA_RWCTRL_READ_BNDRY_64 |
8885 DMA_RWCTRL_WRITE_BNDRY_64);
8886 break;
8887 }
8888 /* fallthrough */
8889 case 128:
8890 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8891 val |= (DMA_RWCTRL_READ_BNDRY_128 |
8892 DMA_RWCTRL_WRITE_BNDRY_128);
8893 break;
8894 }
8895 /* fallthrough */
8896 case 256:
8897 val |= (DMA_RWCTRL_READ_BNDRY_256 |
8898 DMA_RWCTRL_WRITE_BNDRY_256);
8899 break;
8900 case 512:
8901 val |= (DMA_RWCTRL_READ_BNDRY_512 |
8902 DMA_RWCTRL_WRITE_BNDRY_512);
8903 break;
8904 case 1024:
8905 default:
8906 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
8907 DMA_RWCTRL_WRITE_BNDRY_1024);
8908 break;
8909 };
8910 }
8911
8912out:
8913 return val;
8914}
8915
8674static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 8916static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8675{ 8917{
8676 struct tg3_internal_buffer_desc test_desc; 8918 struct tg3_internal_buffer_desc test_desc;
@@ -8757,7 +8999,7 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
8757static int __devinit tg3_test_dma(struct tg3 *tp) 8999static int __devinit tg3_test_dma(struct tg3 *tp)
8758{ 9000{
8759 dma_addr_t buf_dma; 9001 dma_addr_t buf_dma;
8760 u32 *buf; 9002 u32 *buf, saved_dma_rwctrl;
8761 int ret; 9003 int ret;
8762 9004
8763 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 9005 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@@ -8769,46 +9011,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8769 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 9011 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8770 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 9012 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8771 9013
8772#ifndef CONFIG_X86 9014 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
8773 {
8774 u8 byte;
8775 int cacheline_size;
8776 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8777
8778 if (byte == 0)
8779 cacheline_size = 1024;
8780 else
8781 cacheline_size = (int) byte * 4;
8782
8783 switch (cacheline_size) {
8784 case 16:
8785 case 32:
8786 case 64:
8787 case 128:
8788 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8789 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8790 tp->dma_rwctrl |=
8791 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8792 break;
8793 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8794 tp->dma_rwctrl &=
8795 ~(DMA_RWCTRL_PCI_WRITE_CMD);
8796 tp->dma_rwctrl |=
8797 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8798 break;
8799 }
8800 /* fallthrough */
8801 case 256:
8802 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8803 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8804 tp->dma_rwctrl |=
8805 DMA_RWCTRL_WRITE_BNDRY_256;
8806 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8807 tp->dma_rwctrl |=
8808 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8809 };
8810 }
8811#endif
8812 9015
8813 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 9016 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8814 /* DMA read watermark not used on PCIE */ 9017 /* DMA read watermark not used on PCIE */
@@ -8827,7 +9030,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8827 if (ccval == 0x6 || ccval == 0x7) 9030 if (ccval == 0x6 || ccval == 0x7)
8828 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 9031 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8829 9032
8830 /* Set bit 23 to renable PCIX hw bug fix */ 9033 /* Set bit 23 to enable PCIX hw bug fix */
8831 tp->dma_rwctrl |= 0x009f0000; 9034 tp->dma_rwctrl |= 0x009f0000;
8832 } else { 9035 } else {
8833 tp->dma_rwctrl |= 0x001b000f; 9036 tp->dma_rwctrl |= 0x001b000f;
@@ -8868,6 +9071,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8868 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 9071 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8869 goto out; 9072 goto out;
8870 9073
9074 /* It is best to perform DMA test with maximum write burst size
9075 * to expose the 5700/5701 write DMA bug.
9076 */
9077 saved_dma_rwctrl = tp->dma_rwctrl;
9078 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9079 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9080
8871 while (1) { 9081 while (1) {
8872 u32 *p = buf, i; 9082 u32 *p = buf, i;
8873 9083
@@ -8906,8 +9116,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8906 if (p[i] == i) 9116 if (p[i] == i)
8907 continue; 9117 continue;
8908 9118
8909 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == 9119 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
8910 DMA_RWCTRL_WRITE_BNDRY_DISAB) { 9120 DMA_RWCTRL_WRITE_BNDRY_16) {
9121 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
8911 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 9122 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8912 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 9123 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8913 break; 9124 break;
@@ -8924,6 +9135,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8924 break; 9135 break;
8925 } 9136 }
8926 } 9137 }
9138 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9139 DMA_RWCTRL_WRITE_BNDRY_16) {
9140 /* DMA test passed without adjusting DMA boundary,
9141 * just restore the calculated DMA boundary
9142 */
9143 tp->dma_rwctrl = saved_dma_rwctrl;
9144 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9145 }
8927 9146
8928out: 9147out:
8929 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 9148 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@@ -9011,6 +9230,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9011 return peer; 9230 return peer;
9012} 9231}
9013 9232
9233static void __devinit tg3_init_coal(struct tg3 *tp)
9234{
9235 struct ethtool_coalesce *ec = &tp->coal;
9236
9237 memset(ec, 0, sizeof(*ec));
9238 ec->cmd = ETHTOOL_GCOALESCE;
9239 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9240 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9241 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9242 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9243 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9244 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9245 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9246 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9247 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9248
9249 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9250 HOSTCC_MODE_CLRTICK_TXBD)) {
9251 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9252 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9253 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9254 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9255 }
9256}
9257
9014static int __devinit tg3_init_one(struct pci_dev *pdev, 9258static int __devinit tg3_init_one(struct pci_dev *pdev,
9015 const struct pci_device_id *ent) 9259 const struct pci_device_id *ent)
9016{ 9260{
@@ -9256,6 +9500,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9256 /* flow control autonegotiation is default behavior */ 9500 /* flow control autonegotiation is default behavior */
9257 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 9501 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9258 9502
9503 tg3_init_coal(tp);
9504
9259 err = register_netdev(dev); 9505 err = register_netdev(dev);
9260 if (err) { 9506 if (err) {
9261 printk(KERN_ERR PFX "Cannot register net device, " 9507 printk(KERN_ERR PFX "Cannot register net device, "
@@ -9298,6 +9544,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9298 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, 9544 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9299 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, 9545 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9300 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 9546 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9547 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
9548 dev->name, tp->dma_rwctrl);
9301 9549
9302 return 0; 9550 return 0;
9303 9551