aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/jme.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/jme.c')
-rw-r--r--drivers/net/jme.c189
1 files changed, 94 insertions, 95 deletions
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 1e3c63d67b91..e7068c7cd627 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -322,20 +322,6 @@ jme_stop_irq(struct jme_adapter *jme)
322 jwrite32f(jme, JME_IENC, INTR_ENABLE); 322 jwrite32f(jme, JME_IENC, INTR_ENABLE);
323} 323}
324 324
325static inline void
326jme_enable_shadow(struct jme_adapter *jme)
327{
328 jwrite32(jme,
329 JME_SHBA_LO,
330 ((u32)jme->shadow_dma & ~((u32)0x1F)) | SHBA_POSTEN);
331}
332
333static inline void
334jme_disable_shadow(struct jme_adapter *jme)
335{
336 jwrite32(jme, JME_SHBA_LO, 0x0);
337}
338
339static u32 325static u32
340jme_linkstat_from_phy(struct jme_adapter *jme) 326jme_linkstat_from_phy(struct jme_adapter *jme)
341{ 327{
@@ -522,12 +508,8 @@ jme_setup_tx_resources(struct jme_adapter *jme)
522 &(txring->dmaalloc), 508 &(txring->dmaalloc),
523 GFP_ATOMIC); 509 GFP_ATOMIC);
524 510
525 if (!txring->alloc) { 511 if (!txring->alloc)
526 txring->desc = NULL; 512 goto err_set_null;
527 txring->dmaalloc = 0;
528 txring->dma = 0;
529 return -ENOMEM;
530 }
531 513
532 /* 514 /*
533 * 16 Bytes align 515 * 16 Bytes align
@@ -539,6 +521,11 @@ jme_setup_tx_resources(struct jme_adapter *jme)
539 atomic_set(&txring->next_to_clean, 0); 521 atomic_set(&txring->next_to_clean, 0);
540 atomic_set(&txring->nr_free, jme->tx_ring_size); 522 atomic_set(&txring->nr_free, jme->tx_ring_size);
541 523
524 txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
525 jme->tx_ring_size, GFP_ATOMIC);
526 if (unlikely(!(txring->bufinf)))
527 goto err_free_txring;
528
542 /* 529 /*
543 * Initialize Transmit Descriptors 530 * Initialize Transmit Descriptors
544 */ 531 */
@@ -547,6 +534,20 @@ jme_setup_tx_resources(struct jme_adapter *jme)
547 sizeof(struct jme_buffer_info) * jme->tx_ring_size); 534 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
548 535
549 return 0; 536 return 0;
537
538err_free_txring:
539 dma_free_coherent(&(jme->pdev->dev),
540 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
541 txring->alloc,
542 txring->dmaalloc);
543
544err_set_null:
545 txring->desc = NULL;
546 txring->dmaalloc = 0;
547 txring->dma = 0;
548 txring->bufinf = NULL;
549
550 return -ENOMEM;
550} 551}
551 552
552static void 553static void
@@ -554,19 +555,22 @@ jme_free_tx_resources(struct jme_adapter *jme)
554{ 555{
555 int i; 556 int i;
556 struct jme_ring *txring = &(jme->txring[0]); 557 struct jme_ring *txring = &(jme->txring[0]);
557 struct jme_buffer_info *txbi = txring->bufinf; 558 struct jme_buffer_info *txbi;
558 559
559 if (txring->alloc) { 560 if (txring->alloc) {
560 for (i = 0 ; i < jme->tx_ring_size ; ++i) { 561 if (txring->bufinf) {
561 txbi = txring->bufinf + i; 562 for (i = 0 ; i < jme->tx_ring_size ; ++i) {
562 if (txbi->skb) { 563 txbi = txring->bufinf + i;
563 dev_kfree_skb(txbi->skb); 564 if (txbi->skb) {
564 txbi->skb = NULL; 565 dev_kfree_skb(txbi->skb);
566 txbi->skb = NULL;
567 }
568 txbi->mapping = 0;
569 txbi->len = 0;
570 txbi->nr_desc = 0;
571 txbi->start_xmit = 0;
565 } 572 }
566 txbi->mapping = 0; 573 kfree(txring->bufinf);
567 txbi->len = 0;
568 txbi->nr_desc = 0;
569 txbi->start_xmit = 0;
570 } 574 }
571 575
572 dma_free_coherent(&(jme->pdev->dev), 576 dma_free_coherent(&(jme->pdev->dev),
@@ -578,11 +582,11 @@ jme_free_tx_resources(struct jme_adapter *jme)
578 txring->desc = NULL; 582 txring->desc = NULL;
579 txring->dmaalloc = 0; 583 txring->dmaalloc = 0;
580 txring->dma = 0; 584 txring->dma = 0;
585 txring->bufinf = NULL;
581 } 586 }
582 txring->next_to_use = 0; 587 txring->next_to_use = 0;
583 atomic_set(&txring->next_to_clean, 0); 588 atomic_set(&txring->next_to_clean, 0);
584 atomic_set(&txring->nr_free, 0); 589 atomic_set(&txring->nr_free, 0);
585
586} 590}
587 591
588static inline void 592static inline void
@@ -653,7 +657,7 @@ jme_disable_tx_engine(struct jme_adapter *jme)
653static void 657static void
654jme_set_clean_rxdesc(struct jme_adapter *jme, int i) 658jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
655{ 659{
656 struct jme_ring *rxring = jme->rxring; 660 struct jme_ring *rxring = &(jme->rxring[0]);
657 register struct rxdesc *rxdesc = rxring->desc; 661 register struct rxdesc *rxdesc = rxring->desc;
658 struct jme_buffer_info *rxbi = rxring->bufinf; 662 struct jme_buffer_info *rxbi = rxring->bufinf;
659 rxdesc += i; 663 rxdesc += i;
@@ -720,8 +724,11 @@ jme_free_rx_resources(struct jme_adapter *jme)
720 struct jme_ring *rxring = &(jme->rxring[0]); 724 struct jme_ring *rxring = &(jme->rxring[0]);
721 725
722 if (rxring->alloc) { 726 if (rxring->alloc) {
723 for (i = 0 ; i < jme->rx_ring_size ; ++i) 727 if (rxring->bufinf) {
724 jme_free_rx_buf(jme, i); 728 for (i = 0 ; i < jme->rx_ring_size ; ++i)
729 jme_free_rx_buf(jme, i);
730 kfree(rxring->bufinf);
731 }
725 732
726 dma_free_coherent(&(jme->pdev->dev), 733 dma_free_coherent(&(jme->pdev->dev),
727 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 734 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
@@ -731,6 +738,7 @@ jme_free_rx_resources(struct jme_adapter *jme)
731 rxring->desc = NULL; 738 rxring->desc = NULL;
732 rxring->dmaalloc = 0; 739 rxring->dmaalloc = 0;
733 rxring->dma = 0; 740 rxring->dma = 0;
741 rxring->bufinf = NULL;
734 } 742 }
735 rxring->next_to_use = 0; 743 rxring->next_to_use = 0;
736 atomic_set(&rxring->next_to_clean, 0); 744 atomic_set(&rxring->next_to_clean, 0);
@@ -746,12 +754,8 @@ jme_setup_rx_resources(struct jme_adapter *jme)
746 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 754 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
747 &(rxring->dmaalloc), 755 &(rxring->dmaalloc),
748 GFP_ATOMIC); 756 GFP_ATOMIC);
749 if (!rxring->alloc) { 757 if (!rxring->alloc)
750 rxring->desc = NULL; 758 goto err_set_null;
751 rxring->dmaalloc = 0;
752 rxring->dma = 0;
753 return -ENOMEM;
754 }
755 759
756 /* 760 /*
757 * 16 Bytes align 761 * 16 Bytes align
@@ -762,9 +766,16 @@ jme_setup_rx_resources(struct jme_adapter *jme)
762 rxring->next_to_use = 0; 766 rxring->next_to_use = 0;
763 atomic_set(&rxring->next_to_clean, 0); 767 atomic_set(&rxring->next_to_clean, 0);
764 768
769 rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
770 jme->rx_ring_size, GFP_ATOMIC);
771 if (unlikely(!(rxring->bufinf)))
772 goto err_free_rxring;
773
765 /* 774 /*
766 * Initiallize Receive Descriptors 775 * Initiallize Receive Descriptors
767 */ 776 */
777 memset(rxring->bufinf, 0,
778 sizeof(struct jme_buffer_info) * jme->rx_ring_size);
768 for (i = 0 ; i < jme->rx_ring_size ; ++i) { 779 for (i = 0 ; i < jme->rx_ring_size ; ++i) {
769 if (unlikely(jme_make_new_rx_buf(jme, i))) { 780 if (unlikely(jme_make_new_rx_buf(jme, i))) {
770 jme_free_rx_resources(jme); 781 jme_free_rx_resources(jme);
@@ -775,6 +786,19 @@ jme_setup_rx_resources(struct jme_adapter *jme)
775 } 786 }
776 787
777 return 0; 788 return 0;
789
790err_free_rxring:
791 dma_free_coherent(&(jme->pdev->dev),
792 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
793 rxring->alloc,
794 rxring->dmaalloc);
795err_set_null:
796 rxring->desc = NULL;
797 rxring->dmaalloc = 0;
798 rxring->dma = 0;
799 rxring->bufinf = NULL;
800
801 return -ENOMEM;
778} 802}
779 803
780static inline void 804static inline void
@@ -790,9 +814,9 @@ jme_enable_rx_engine(struct jme_adapter *jme)
790 /* 814 /*
791 * Setup RX DMA Bass Address 815 * Setup RX DMA Bass Address
792 */ 816 */
793 jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL); 817 jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
794 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); 818 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
795 jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL); 819 jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
796 820
797 /* 821 /*
798 * Setup RX Descriptor Count 822 * Setup RX Descriptor Count
@@ -856,27 +880,27 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
856 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) 880 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
857 return false; 881 return false;
858 882
859 if (unlikely(!(flags & RXWBFLAG_MF) && 883 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
860 (flags & RXWBFLAG_TCPON) && !(flags & RXWBFLAG_TCPCS))) { 884 == RXWBFLAG_TCPON)) {
861 msg_rx_err(jme, "TCP Checksum error.\n"); 885 if (flags & RXWBFLAG_IPV4)
862 goto out_sumerr; 886 msg_rx_err(jme, "TCP Checksum error\n");
887 return false;
863 } 888 }
864 889
865 if (unlikely(!(flags & RXWBFLAG_MF) && 890 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
866 (flags & RXWBFLAG_UDPON) && !(flags & RXWBFLAG_UDPCS))) { 891 == RXWBFLAG_UDPON)) {
867 msg_rx_err(jme, "UDP Checksum error.\n"); 892 if (flags & RXWBFLAG_IPV4)
868 goto out_sumerr; 893 msg_rx_err(jme, "UDP Checksum error.\n");
894 return false;
869 } 895 }
870 896
871 if (unlikely((flags & RXWBFLAG_IPV4) && !(flags & RXWBFLAG_IPCS))) { 897 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
898 == RXWBFLAG_IPV4)) {
872 msg_rx_err(jme, "IPv4 Checksum error.\n"); 899 msg_rx_err(jme, "IPv4 Checksum error.\n");
873 goto out_sumerr; 900 return false;
874 } 901 }
875 902
876 return true; 903 return true;
877
878out_sumerr:
879 return false;
880} 904}
881 905
882static void 906static void
@@ -1296,7 +1320,7 @@ jme_rx_empty_tasklet(unsigned long arg)
1296static void 1320static void
1297jme_wake_queue_if_stopped(struct jme_adapter *jme) 1321jme_wake_queue_if_stopped(struct jme_adapter *jme)
1298{ 1322{
1299 struct jme_ring *txring = jme->txring; 1323 struct jme_ring *txring = &(jme->txring[0]);
1300 1324
1301 smp_wmb(); 1325 smp_wmb();
1302 if (unlikely(netif_queue_stopped(jme->dev) && 1326 if (unlikely(netif_queue_stopped(jme->dev) &&
@@ -1483,12 +1507,7 @@ jme_msi(int irq, void *dev_id)
1483 struct jme_adapter *jme = netdev_priv(netdev); 1507 struct jme_adapter *jme = netdev_priv(netdev);
1484 u32 intrstat; 1508 u32 intrstat;
1485 1509
1486 pci_dma_sync_single_for_cpu(jme->pdev, 1510 intrstat = jread32(jme, JME_IEVE);
1487 jme->shadow_dma,
1488 sizeof(u32) * SHADOW_REG_NR,
1489 PCI_DMA_FROMDEVICE);
1490 intrstat = jme->shadow_regs[SHADOW_IEVE];
1491 jme->shadow_regs[SHADOW_IEVE] = 0;
1492 1511
1493 jme_intr_msi(jme, intrstat); 1512 jme_intr_msi(jme, intrstat);
1494 1513
@@ -1566,6 +1585,7 @@ jme_open(struct net_device *netdev)
1566 jme_clear_pm(jme); 1585 jme_clear_pm(jme);
1567 JME_NAPI_ENABLE(jme); 1586 JME_NAPI_ENABLE(jme);
1568 1587
1588 tasklet_enable(&jme->linkch_task);
1569 tasklet_enable(&jme->txclean_task); 1589 tasklet_enable(&jme->txclean_task);
1570 tasklet_hi_enable(&jme->rxclean_task); 1590 tasklet_hi_enable(&jme->rxclean_task);
1571 tasklet_hi_enable(&jme->rxempty_task); 1591 tasklet_hi_enable(&jme->rxempty_task);
@@ -1574,7 +1594,6 @@ jme_open(struct net_device *netdev)
1574 if (rc) 1594 if (rc)
1575 goto err_out; 1595 goto err_out;
1576 1596
1577 jme_enable_shadow(jme);
1578 jme_start_irq(jme); 1597 jme_start_irq(jme);
1579 1598
1580 if (test_bit(JME_FLAG_SSET, &jme->flags)) 1599 if (test_bit(JME_FLAG_SSET, &jme->flags))
@@ -1642,15 +1661,14 @@ jme_close(struct net_device *netdev)
1642 netif_carrier_off(netdev); 1661 netif_carrier_off(netdev);
1643 1662
1644 jme_stop_irq(jme); 1663 jme_stop_irq(jme);
1645 jme_disable_shadow(jme);
1646 jme_free_irq(jme); 1664 jme_free_irq(jme);
1647 1665
1648 JME_NAPI_DISABLE(jme); 1666 JME_NAPI_DISABLE(jme);
1649 1667
1650 tasklet_kill(&jme->linkch_task); 1668 tasklet_disable(&jme->linkch_task);
1651 tasklet_kill(&jme->txclean_task); 1669 tasklet_disable(&jme->txclean_task);
1652 tasklet_kill(&jme->rxclean_task); 1670 tasklet_disable(&jme->rxclean_task);
1653 tasklet_kill(&jme->rxempty_task); 1671 tasklet_disable(&jme->rxempty_task);
1654 1672
1655 jme_reset_ghc_speed(jme); 1673 jme_reset_ghc_speed(jme);
1656 jme_disable_rx_engine(jme); 1674 jme_disable_rx_engine(jme);
@@ -1668,7 +1686,7 @@ static int
1668jme_alloc_txdesc(struct jme_adapter *jme, 1686jme_alloc_txdesc(struct jme_adapter *jme,
1669 struct sk_buff *skb) 1687 struct sk_buff *skb)
1670{ 1688{
1671 struct jme_ring *txring = jme->txring; 1689 struct jme_ring *txring = &(jme->txring[0]);
1672 int idx, nr_alloc, mask = jme->tx_ring_mask; 1690 int idx, nr_alloc, mask = jme->tx_ring_mask;
1673 1691
1674 idx = txring->next_to_use; 1692 idx = txring->next_to_use;
@@ -1722,7 +1740,7 @@ jme_fill_tx_map(struct pci_dev *pdev,
1722static void 1740static void
1723jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 1741jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1724{ 1742{
1725 struct jme_ring *txring = jme->txring; 1743 struct jme_ring *txring = &(jme->txring[0]);
1726 struct txdesc *txdesc = txring->desc, *ctxdesc; 1744 struct txdesc *txdesc = txring->desc, *ctxdesc;
1727 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; 1745 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1728 u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; 1746 u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
@@ -1835,7 +1853,7 @@ jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
1835static int 1853static int
1836jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) 1854jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1837{ 1855{
1838 struct jme_ring *txring = jme->txring; 1856 struct jme_ring *txring = &(jme->txring[0]);
1839 struct txdesc *txdesc; 1857 struct txdesc *txdesc;
1840 struct jme_buffer_info *txbi; 1858 struct jme_buffer_info *txbi;
1841 u8 flags; 1859 u8 flags;
@@ -1883,7 +1901,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1883static void 1901static void
1884jme_stop_queue_if_full(struct jme_adapter *jme) 1902jme_stop_queue_if_full(struct jme_adapter *jme)
1885{ 1903{
1886 struct jme_ring *txring = jme->txring; 1904 struct jme_ring *txring = &(jme->txring[0]);
1887 struct jme_buffer_info *txbi = txring->bufinf; 1905 struct jme_buffer_info *txbi = txring->bufinf;
1888 int idx = atomic_read(&txring->next_to_clean); 1906 int idx = atomic_read(&txring->next_to_clean);
1889 1907
@@ -2725,14 +2743,6 @@ jme_init_one(struct pci_dev *pdev,
2725 rc = -ENOMEM; 2743 rc = -ENOMEM;
2726 goto err_out_free_netdev; 2744 goto err_out_free_netdev;
2727 } 2745 }
2728 jme->shadow_regs = pci_alloc_consistent(pdev,
2729 sizeof(u32) * SHADOW_REG_NR,
2730 &(jme->shadow_dma));
2731 if (!(jme->shadow_regs)) {
2732 jeprintk(pdev, "Allocating shadow register mapping error.\n");
2733 rc = -ENOMEM;
2734 goto err_out_unmap;
2735 }
2736 2746
2737 if (no_pseudohp) { 2747 if (no_pseudohp) {
2738 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; 2748 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
@@ -2768,6 +2778,7 @@ jme_init_one(struct pci_dev *pdev,
2768 tasklet_init(&jme->rxempty_task, 2778 tasklet_init(&jme->rxempty_task,
2769 &jme_rx_empty_tasklet, 2779 &jme_rx_empty_tasklet,
2770 (unsigned long) jme); 2780 (unsigned long) jme);
2781 tasklet_disable_nosync(&jme->linkch_task);
2771 tasklet_disable_nosync(&jme->txclean_task); 2782 tasklet_disable_nosync(&jme->txclean_task);
2772 tasklet_disable_nosync(&jme->rxclean_task); 2783 tasklet_disable_nosync(&jme->rxclean_task);
2773 tasklet_disable_nosync(&jme->rxempty_task); 2784 tasklet_disable_nosync(&jme->rxempty_task);
@@ -2817,7 +2828,7 @@ jme_init_one(struct pci_dev *pdev,
2817 if (!jme->mii_if.phy_id) { 2828 if (!jme->mii_if.phy_id) {
2818 rc = -EIO; 2829 rc = -EIO;
2819 jeprintk(pdev, "Can not find phy_id.\n"); 2830 jeprintk(pdev, "Can not find phy_id.\n");
2820 goto err_out_free_shadow; 2831 goto err_out_unmap;
2821 } 2832 }
2822 2833
2823 jme->reg_ghc |= GHC_LINK_POLL; 2834 jme->reg_ghc |= GHC_LINK_POLL;
@@ -2846,7 +2857,7 @@ jme_init_one(struct pci_dev *pdev,
2846 if (rc) { 2857 if (rc) {
2847 jeprintk(pdev, 2858 jeprintk(pdev,
2848 "Reload eeprom for reading MAC Address error.\n"); 2859 "Reload eeprom for reading MAC Address error.\n");
2849 goto err_out_free_shadow; 2860 goto err_out_unmap;
2850 } 2861 }
2851 jme_load_macaddr(netdev); 2862 jme_load_macaddr(netdev);
2852 2863
@@ -2862,7 +2873,7 @@ jme_init_one(struct pci_dev *pdev,
2862 rc = register_netdev(netdev); 2873 rc = register_netdev(netdev);
2863 if (rc) { 2874 if (rc) {
2864 jeprintk(pdev, "Cannot register net device.\n"); 2875 jeprintk(pdev, "Cannot register net device.\n");
2865 goto err_out_free_shadow; 2876 goto err_out_unmap;
2866 } 2877 }
2867 2878
2868 msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n", 2879 msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n",
@@ -2876,11 +2887,6 @@ jme_init_one(struct pci_dev *pdev,
2876 2887
2877 return 0; 2888 return 0;
2878 2889
2879err_out_free_shadow:
2880 pci_free_consistent(pdev,
2881 sizeof(u32) * SHADOW_REG_NR,
2882 jme->shadow_regs,
2883 jme->shadow_dma);
2884err_out_unmap: 2890err_out_unmap:
2885 iounmap(jme->regs); 2891 iounmap(jme->regs);
2886err_out_free_netdev: 2892err_out_free_netdev:
@@ -2901,10 +2907,6 @@ jme_remove_one(struct pci_dev *pdev)
2901 struct jme_adapter *jme = netdev_priv(netdev); 2907 struct jme_adapter *jme = netdev_priv(netdev);
2902 2908
2903 unregister_netdev(netdev); 2909 unregister_netdev(netdev);
2904 pci_free_consistent(pdev,
2905 sizeof(u32) * SHADOW_REG_NR,
2906 jme->shadow_regs,
2907 jme->shadow_dma);
2908 iounmap(jme->regs); 2910 iounmap(jme->regs);
2909 pci_set_drvdata(pdev, NULL); 2911 pci_set_drvdata(pdev, NULL);
2910 free_netdev(netdev); 2912 free_netdev(netdev);
@@ -2930,8 +2932,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
2930 tasklet_disable(&jme->rxclean_task); 2932 tasklet_disable(&jme->rxclean_task);
2931 tasklet_disable(&jme->rxempty_task); 2933 tasklet_disable(&jme->rxempty_task);
2932 2934
2933 jme_disable_shadow(jme);
2934
2935 if (netif_carrier_ok(netdev)) { 2935 if (netif_carrier_ok(netdev)) {
2936 if (test_bit(JME_FLAG_POLL, &jme->flags)) 2936 if (test_bit(JME_FLAG_POLL, &jme->flags))
2937 jme_polling_mode(jme); 2937 jme_polling_mode(jme);
@@ -2983,7 +2983,6 @@ jme_resume(struct pci_dev *pdev)
2983 else 2983 else
2984 jme_reset_phy_processor(jme); 2984 jme_reset_phy_processor(jme);
2985 2985
2986 jme_enable_shadow(jme);
2987 jme_start_irq(jme); 2986 jme_start_irq(jme);
2988 netif_device_attach(netdev); 2987 netif_device_attach(netdev);
2989 2988