diff options
author | Ayaz Abdulla <aabdulla@nvidia.com> | 2006-06-10 22:48:13 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-06-11 09:25:16 -0400 |
commit | 9589c77a0de19c0c95370d5212eb1f9006d8abcb (patch) | |
tree | 428c84c41d0e53bef95577fbb74ef764e7b8be89 /drivers/net/forcedeth.c | |
parent | 7a1854b7977d36360fde4e06c2d9cedcc3dd0933 (diff) |
[PATCH] forcedeth config: diagnostics
This patch adds support for diagnostic tests through ethtool support.
Signed-Off-By: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 406 |
1 files changed, 398 insertions, 8 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 8d7666856420..6ee3e8d5a04d 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -166,6 +166,7 @@ | |||
166 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ | 166 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ |
167 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ | 167 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ |
168 | #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ | 168 | #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ |
169 | #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ | ||
169 | 170 | ||
170 | enum { | 171 | enum { |
171 | NvRegIrqStatus = 0x000, | 172 | NvRegIrqStatus = 0x000, |
@@ -222,6 +223,7 @@ enum { | |||
222 | #define NVREG_PFF_ALWAYS 0x7F0000 | 223 | #define NVREG_PFF_ALWAYS 0x7F0000 |
223 | #define NVREG_PFF_PROMISC 0x80 | 224 | #define NVREG_PFF_PROMISC 0x80 |
224 | #define NVREG_PFF_MYADDR 0x20 | 225 | #define NVREG_PFF_MYADDR 0x20 |
226 | #define NVREG_PFF_LOOPBACK 0x10 | ||
225 | 227 | ||
226 | NvRegOffloadConfig = 0x90, | 228 | NvRegOffloadConfig = 0x90, |
227 | #define NVREG_OFFLOAD_HOMEPHY 0x601 | 229 | #define NVREG_OFFLOAD_HOMEPHY 0x601 |
@@ -634,6 +636,32 @@ struct nv_ethtool_stats { | |||
634 | u64 rx_errors_total; | 636 | u64 rx_errors_total; |
635 | }; | 637 | }; |
636 | 638 | ||
639 | /* diagnostics */ | ||
640 | #define NV_TEST_COUNT_BASE 3 | ||
641 | #define NV_TEST_COUNT_EXTENDED 4 | ||
642 | |||
643 | static const struct nv_ethtool_str nv_etests_str[] = { | ||
644 | { "link (online/offline)" }, | ||
645 | { "register (offline) " }, | ||
646 | { "interrupt (offline) " }, | ||
647 | { "loopback (offline) " } | ||
648 | }; | ||
649 | |||
650 | struct register_test { | ||
651 | u32 reg; | ||
652 | u32 mask; | ||
653 | }; | ||
654 | |||
655 | static const struct register_test nv_registers_test[] = { | ||
656 | { NvRegUnknownSetupReg6, 0x01 }, | ||
657 | { NvRegMisc1, 0x03c }, | ||
658 | { NvRegOffloadConfig, 0x03ff }, | ||
659 | { NvRegMulticastAddrA, 0xffffffff }, | ||
660 | { NvRegUnknownSetupReg3, 0x0ff }, | ||
661 | { NvRegWakeUpFlags, 0x07777 }, | ||
662 | { 0,0 } | ||
663 | }; | ||
664 | |||
637 | /* | 665 | /* |
638 | * SMP locking: | 666 | * SMP locking: |
639 | * All hardware access under dev->priv->lock, except the performance | 667 | * All hardware access under dev->priv->lock, except the performance |
@@ -662,6 +690,7 @@ struct fe_priv { | |||
662 | int wolenabled; | 690 | int wolenabled; |
663 | unsigned int phy_oui; | 691 | unsigned int phy_oui; |
664 | u16 gigabit; | 692 | u16 gigabit; |
693 | int intr_test; | ||
665 | 694 | ||
666 | /* General data: RO fields */ | 695 | /* General data: RO fields */ |
667 | dma_addr_t ring_addr; | 696 | dma_addr_t ring_addr; |
@@ -2502,6 +2531,36 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
2502 | return IRQ_RETVAL(i); | 2531 | return IRQ_RETVAL(i); |
2503 | } | 2532 | } |
2504 | 2533 | ||
2534 | static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs) | ||
2535 | { | ||
2536 | struct net_device *dev = (struct net_device *) data; | ||
2537 | struct fe_priv *np = netdev_priv(dev); | ||
2538 | u8 __iomem *base = get_hwbase(dev); | ||
2539 | u32 events; | ||
2540 | |||
2541 | dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); | ||
2542 | |||
2543 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | ||
2544 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | ||
2545 | writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); | ||
2546 | } else { | ||
2547 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | ||
2548 | writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); | ||
2549 | } | ||
2550 | pci_push(base); | ||
2551 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | ||
2552 | if (!(events & NVREG_IRQ_TIMER)) | ||
2553 | return IRQ_RETVAL(0); | ||
2554 | |||
2555 | spin_lock(&np->lock); | ||
2556 | np->intr_test = 1; | ||
2557 | spin_unlock(&np->lock); | ||
2558 | |||
2559 | dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); | ||
2560 | |||
2561 | return IRQ_RETVAL(1); | ||
2562 | } | ||
2563 | |||
2505 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | 2564 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) |
2506 | { | 2565 | { |
2507 | u8 __iomem *base = get_hwbase(dev); | 2566 | u8 __iomem *base = get_hwbase(dev); |
@@ -2528,7 +2587,7 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | |||
2528 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | 2587 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); |
2529 | } | 2588 | } |
2530 | 2589 | ||
2531 | static int nv_request_irq(struct net_device *dev) | 2590 | static int nv_request_irq(struct net_device *dev, int intr_test) |
2532 | { | 2591 | { |
2533 | struct fe_priv *np = get_nvpriv(dev); | 2592 | struct fe_priv *np = get_nvpriv(dev); |
2534 | u8 __iomem *base = get_hwbase(dev); | 2593 | u8 __iomem *base = get_hwbase(dev); |
@@ -2541,7 +2600,7 @@ static int nv_request_irq(struct net_device *dev) | |||
2541 | } | 2600 | } |
2542 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | 2601 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { |
2543 | np->msi_flags |= NV_MSI_X_ENABLED; | 2602 | np->msi_flags |= NV_MSI_X_ENABLED; |
2544 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | 2603 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { |
2545 | /* Request irq for rx handling */ | 2604 | /* Request irq for rx handling */ |
2546 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | 2605 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { |
2547 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | 2606 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); |
@@ -2571,7 +2630,10 @@ static int nv_request_irq(struct net_device *dev) | |||
2571 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | 2630 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); |
2572 | } else { | 2631 | } else { |
2573 | /* Request irq for all interrupts */ | 2632 | /* Request irq for all interrupts */ |
2574 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | 2633 | if ((!intr_test && |
2634 | request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || | ||
2635 | (intr_test && | ||
2636 | request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) { | ||
2575 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | 2637 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); |
2576 | pci_disable_msix(np->pci_dev); | 2638 | pci_disable_msix(np->pci_dev); |
2577 | np->msi_flags &= ~NV_MSI_X_ENABLED; | 2639 | np->msi_flags &= ~NV_MSI_X_ENABLED; |
@@ -2587,7 +2649,8 @@ static int nv_request_irq(struct net_device *dev) | |||
2587 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | 2649 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { |
2588 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | 2650 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { |
2589 | np->msi_flags |= NV_MSI_ENABLED; | 2651 | np->msi_flags |= NV_MSI_ENABLED; |
2590 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | 2652 | if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || |
2653 | (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) { | ||
2591 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | 2654 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); |
2592 | pci_disable_msi(np->pci_dev); | 2655 | pci_disable_msi(np->pci_dev); |
2593 | np->msi_flags &= ~NV_MSI_ENABLED; | 2656 | np->msi_flags &= ~NV_MSI_ENABLED; |
@@ -2602,8 +2665,10 @@ static int nv_request_irq(struct net_device *dev) | |||
2602 | } | 2665 | } |
2603 | } | 2666 | } |
2604 | if (ret != 0) { | 2667 | if (ret != 0) { |
2605 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | 2668 | if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || |
2669 | (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) | ||
2606 | goto out_err; | 2670 | goto out_err; |
2671 | |||
2607 | } | 2672 | } |
2608 | 2673 | ||
2609 | return 0; | 2674 | return 0; |
@@ -3387,12 +3452,335 @@ static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *e | |||
3387 | memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64)); | 3452 | memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64)); |
3388 | } | 3453 | } |
3389 | 3454 | ||
3455 | static int nv_self_test_count(struct net_device *dev) | ||
3456 | { | ||
3457 | struct fe_priv *np = netdev_priv(dev); | ||
3458 | |||
3459 | if (np->driver_data & DEV_HAS_TEST_EXTENDED) | ||
3460 | return NV_TEST_COUNT_EXTENDED; | ||
3461 | else | ||
3462 | return NV_TEST_COUNT_BASE; | ||
3463 | } | ||
3464 | |||
3465 | static int nv_link_test(struct net_device *dev) | ||
3466 | { | ||
3467 | struct fe_priv *np = netdev_priv(dev); | ||
3468 | int mii_status; | ||
3469 | |||
3470 | mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | ||
3471 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | ||
3472 | |||
3473 | /* check phy link status */ | ||
3474 | if (!(mii_status & BMSR_LSTATUS)) | ||
3475 | return 0; | ||
3476 | else | ||
3477 | return 1; | ||
3478 | } | ||
3479 | |||
3480 | static int nv_register_test(struct net_device *dev) | ||
3481 | { | ||
3482 | u8 __iomem *base = get_hwbase(dev); | ||
3483 | int i = 0; | ||
3484 | u32 orig_read, new_read; | ||
3485 | |||
3486 | do { | ||
3487 | orig_read = readl(base + nv_registers_test[i].reg); | ||
3488 | |||
3489 | /* xor with mask to toggle bits */ | ||
3490 | orig_read ^= nv_registers_test[i].mask; | ||
3491 | |||
3492 | writel(orig_read, base + nv_registers_test[i].reg); | ||
3493 | |||
3494 | new_read = readl(base + nv_registers_test[i].reg); | ||
3495 | |||
3496 | if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) | ||
3497 | return 0; | ||
3498 | |||
3499 | /* restore original value */ | ||
3500 | orig_read ^= nv_registers_test[i].mask; | ||
3501 | writel(orig_read, base + nv_registers_test[i].reg); | ||
3502 | |||
3503 | } while (nv_registers_test[++i].reg != 0); | ||
3504 | |||
3505 | return 1; | ||
3506 | } | ||
3507 | |||
3508 | static int nv_interrupt_test(struct net_device *dev) | ||
3509 | { | ||
3510 | struct fe_priv *np = netdev_priv(dev); | ||
3511 | u8 __iomem *base = get_hwbase(dev); | ||
3512 | int ret = 1; | ||
3513 | int testcnt; | ||
3514 | u32 save_msi_flags, save_poll_interval = 0; | ||
3515 | |||
3516 | if (netif_running(dev)) { | ||
3517 | /* free current irq */ | ||
3518 | nv_free_irq(dev); | ||
3519 | save_poll_interval = readl(base+NvRegPollingInterval); | ||
3520 | } | ||
3521 | |||
3522 | /* flag to test interrupt handler */ | ||
3523 | np->intr_test = 0; | ||
3524 | |||
3525 | /* setup test irq */ | ||
3526 | save_msi_flags = np->msi_flags; | ||
3527 | np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; | ||
3528 | np->msi_flags |= 0x001; /* setup 1 vector */ | ||
3529 | if (nv_request_irq(dev, 1)) | ||
3530 | return 0; | ||
3531 | |||
3532 | /* setup timer interrupt */ | ||
3533 | writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); | ||
3534 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); | ||
3535 | |||
3536 | nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); | ||
3537 | |||
3538 | /* wait for at least one interrupt */ | ||
3539 | msleep(100); | ||
3540 | |||
3541 | spin_lock_irq(&np->lock); | ||
3542 | |||
3543 | /* flag should be set within ISR */ | ||
3544 | testcnt = np->intr_test; | ||
3545 | if (!testcnt) | ||
3546 | ret = 2; | ||
3547 | |||
3548 | nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); | ||
3549 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | ||
3550 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | ||
3551 | else | ||
3552 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | ||
3553 | |||
3554 | spin_unlock_irq(&np->lock); | ||
3555 | |||
3556 | nv_free_irq(dev); | ||
3557 | |||
3558 | np->msi_flags = save_msi_flags; | ||
3559 | |||
3560 | if (netif_running(dev)) { | ||
3561 | writel(save_poll_interval, base + NvRegPollingInterval); | ||
3562 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); | ||
3563 | /* restore original irq */ | ||
3564 | if (nv_request_irq(dev, 0)) | ||
3565 | return 0; | ||
3566 | } | ||
3567 | |||
3568 | return ret; | ||
3569 | } | ||
3570 | |||
3571 | static int nv_loopback_test(struct net_device *dev) | ||
3572 | { | ||
3573 | struct fe_priv *np = netdev_priv(dev); | ||
3574 | u8 __iomem *base = get_hwbase(dev); | ||
3575 | struct sk_buff *tx_skb, *rx_skb; | ||
3576 | dma_addr_t test_dma_addr; | ||
3577 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); | ||
3578 | u32 Flags; | ||
3579 | int len, i, pkt_len; | ||
3580 | u8 *pkt_data; | ||
3581 | u32 filter_flags = 0; | ||
3582 | u32 misc1_flags = 0; | ||
3583 | int ret = 1; | ||
3584 | |||
3585 | if (netif_running(dev)) { | ||
3586 | nv_disable_irq(dev); | ||
3587 | filter_flags = readl(base + NvRegPacketFilterFlags); | ||
3588 | misc1_flags = readl(base + NvRegMisc1); | ||
3589 | } else { | ||
3590 | nv_txrx_reset(dev); | ||
3591 | } | ||
3592 | |||
3593 | /* reinit driver view of the rx queue */ | ||
3594 | set_bufsize(dev); | ||
3595 | nv_init_ring(dev); | ||
3596 | |||
3597 | /* setup hardware for loopback */ | ||
3598 | writel(NVREG_MISC1_FORCE, base + NvRegMisc1); | ||
3599 | writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); | ||
3600 | |||
3601 | /* reinit nic view of the rx queue */ | ||
3602 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | ||
3603 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | ||
3604 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | ||
3605 | base + NvRegRingSizes); | ||
3606 | pci_push(base); | ||
3607 | |||
3608 | /* restart rx engine */ | ||
3609 | nv_start_rx(dev); | ||
3610 | nv_start_tx(dev); | ||
3611 | |||
3612 | /* setup packet for tx */ | ||
3613 | pkt_len = ETH_DATA_LEN; | ||
3614 | tx_skb = dev_alloc_skb(pkt_len); | ||
3615 | pkt_data = skb_put(tx_skb, pkt_len); | ||
3616 | for (i = 0; i < pkt_len; i++) | ||
3617 | pkt_data[i] = (u8)(i & 0xff); | ||
3618 | test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, | ||
3619 | tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); | ||
3620 | |||
3621 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
3622 | np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); | ||
3623 | np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | ||
3624 | } else { | ||
3625 | np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; | ||
3626 | np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; | ||
3627 | np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | ||
3628 | } | ||
3629 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
3630 | pci_push(get_hwbase(dev)); | ||
3631 | |||
3632 | msleep(500); | ||
3633 | |||
3634 | /* check for rx of the packet */ | ||
3635 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
3636 | Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); | ||
3637 | len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); | ||
3638 | |||
3639 | } else { | ||
3640 | Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); | ||
3641 | len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); | ||
3642 | } | ||
3643 | |||
3644 | if (Flags & NV_RX_AVAIL) { | ||
3645 | ret = 0; | ||
3646 | } else if (np->desc_ver == DESC_VER_1) { | ||
3647 | if (Flags & NV_RX_ERROR) | ||
3648 | ret = 0; | ||
3649 | } else { | ||
3650 | if (Flags & NV_RX2_ERROR) { | ||
3651 | ret = 0; | ||
3652 | } | ||
3653 | } | ||
3654 | |||
3655 | if (ret) { | ||
3656 | if (len != pkt_len) { | ||
3657 | ret = 0; | ||
3658 | dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", | ||
3659 | dev->name, len, pkt_len); | ||
3660 | } else { | ||
3661 | rx_skb = np->rx_skbuff[0]; | ||
3662 | for (i = 0; i < pkt_len; i++) { | ||
3663 | if (rx_skb->data[i] != (u8)(i & 0xff)) { | ||
3664 | ret = 0; | ||
3665 | dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", | ||
3666 | dev->name, i); | ||
3667 | break; | ||
3668 | } | ||
3669 | } | ||
3670 | } | ||
3671 | } else { | ||
3672 | dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); | ||
3673 | } | ||
3674 | |||
3675 | pci_unmap_page(np->pci_dev, test_dma_addr, | ||
3676 | tx_skb->end-tx_skb->data, | ||
3677 | PCI_DMA_TODEVICE); | ||
3678 | dev_kfree_skb_any(tx_skb); | ||
3679 | |||
3680 | /* stop engines */ | ||
3681 | nv_stop_rx(dev); | ||
3682 | nv_stop_tx(dev); | ||
3683 | nv_txrx_reset(dev); | ||
3684 | /* drain rx queue */ | ||
3685 | nv_drain_rx(dev); | ||
3686 | nv_drain_tx(dev); | ||
3687 | |||
3688 | if (netif_running(dev)) { | ||
3689 | writel(misc1_flags, base + NvRegMisc1); | ||
3690 | writel(filter_flags, base + NvRegPacketFilterFlags); | ||
3691 | nv_enable_irq(dev); | ||
3692 | } | ||
3693 | |||
3694 | return ret; | ||
3695 | } | ||
3696 | |||
3697 | static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) | ||
3698 | { | ||
3699 | struct fe_priv *np = netdev_priv(dev); | ||
3700 | u8 __iomem *base = get_hwbase(dev); | ||
3701 | int result; | ||
3702 | memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64)); | ||
3703 | |||
3704 | if (!nv_link_test(dev)) { | ||
3705 | test->flags |= ETH_TEST_FL_FAILED; | ||
3706 | buffer[0] = 1; | ||
3707 | } | ||
3708 | |||
3709 | if (test->flags & ETH_TEST_FL_OFFLINE) { | ||
3710 | if (netif_running(dev)) { | ||
3711 | netif_stop_queue(dev); | ||
3712 | spin_lock_bh(&dev->xmit_lock); | ||
3713 | spin_lock_irq(&np->lock); | ||
3714 | nv_disable_hw_interrupts(dev, np->irqmask); | ||
3715 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | ||
3716 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | ||
3717 | } else { | ||
3718 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | ||
3719 | } | ||
3720 | /* stop engines */ | ||
3721 | nv_stop_rx(dev); | ||
3722 | nv_stop_tx(dev); | ||
3723 | nv_txrx_reset(dev); | ||
3724 | /* drain rx queue */ | ||
3725 | nv_drain_rx(dev); | ||
3726 | nv_drain_tx(dev); | ||
3727 | spin_unlock_irq(&np->lock); | ||
3728 | spin_unlock_bh(&dev->xmit_lock); | ||
3729 | } | ||
3730 | |||
3731 | if (!nv_register_test(dev)) { | ||
3732 | test->flags |= ETH_TEST_FL_FAILED; | ||
3733 | buffer[1] = 1; | ||
3734 | } | ||
3735 | |||
3736 | result = nv_interrupt_test(dev); | ||
3737 | if (result != 1) { | ||
3738 | test->flags |= ETH_TEST_FL_FAILED; | ||
3739 | buffer[2] = 1; | ||
3740 | } | ||
3741 | if (result == 0) { | ||
3742 | /* bail out */ | ||
3743 | return; | ||
3744 | } | ||
3745 | |||
3746 | if (!nv_loopback_test(dev)) { | ||
3747 | test->flags |= ETH_TEST_FL_FAILED; | ||
3748 | buffer[3] = 1; | ||
3749 | } | ||
3750 | |||
3751 | if (netif_running(dev)) { | ||
3752 | /* reinit driver view of the rx queue */ | ||
3753 | set_bufsize(dev); | ||
3754 | if (nv_init_ring(dev)) { | ||
3755 | if (!np->in_shutdown) | ||
3756 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | ||
3757 | } | ||
3758 | /* reinit nic view of the rx queue */ | ||
3759 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | ||
3760 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | ||
3761 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | ||
3762 | base + NvRegRingSizes); | ||
3763 | pci_push(base); | ||
3764 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | ||
3765 | pci_push(base); | ||
3766 | /* restart rx engine */ | ||
3767 | nv_start_rx(dev); | ||
3768 | nv_start_tx(dev); | ||
3769 | netif_start_queue(dev); | ||
3770 | nv_enable_hw_interrupts(dev, np->irqmask); | ||
3771 | } | ||
3772 | } | ||
3773 | } | ||
3774 | |||
3390 | static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) | 3775 | static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) |
3391 | { | 3776 | { |
3392 | switch (stringset) { | 3777 | switch (stringset) { |
3393 | case ETH_SS_STATS: | 3778 | case ETH_SS_STATS: |
3394 | memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str)); | 3779 | memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str)); |
3395 | break; | 3780 | break; |
3781 | case ETH_SS_TEST: | ||
3782 | memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str)); | ||
3783 | break; | ||
3396 | } | 3784 | } |
3397 | } | 3785 | } |
3398 | 3786 | ||
@@ -3422,6 +3810,8 @@ static struct ethtool_ops ops = { | |||
3422 | .get_strings = nv_get_strings, | 3810 | .get_strings = nv_get_strings, |
3423 | .get_stats_count = nv_get_stats_count, | 3811 | .get_stats_count = nv_get_stats_count, |
3424 | .get_ethtool_stats = nv_get_ethtool_stats, | 3812 | .get_ethtool_stats = nv_get_ethtool_stats, |
3813 | .self_test_count = nv_self_test_count, | ||
3814 | .self_test = nv_self_test, | ||
3425 | }; | 3815 | }; |
3426 | 3816 | ||
3427 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | 3817 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) |
@@ -3554,7 +3944,7 @@ static int nv_open(struct net_device *dev) | |||
3554 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 3944 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
3555 | pci_push(base); | 3945 | pci_push(base); |
3556 | 3946 | ||
3557 | if (nv_request_irq(dev)) { | 3947 | if (nv_request_irq(dev, 0)) { |
3558 | goto out_drain; | 3948 | goto out_drain; |
3559 | } | 3949 | } |
3560 | 3950 | ||
@@ -4049,11 +4439,11 @@ static struct pci_device_id pci_tbl[] = { | |||
4049 | }, | 4439 | }, |
4050 | { /* MCP55 Ethernet Controller */ | 4440 | { /* MCP55 Ethernet Controller */ |
4051 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), | 4441 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
4052 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS, | 4442 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, |
4053 | }, | 4443 | }, |
4054 | { /* MCP55 Ethernet Controller */ | 4444 | { /* MCP55 Ethernet Controller */ |
4055 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), | 4445 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
4056 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS, | 4446 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, |
4057 | }, | 4447 | }, |
4058 | {0,}, | 4448 | {0,}, |
4059 | }; | 4449 | }; |