aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-02-18 17:15:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-02-18 17:15:05 -0500
commit4c3021da451fe0ea1deaba8fa3805d8d065ec84e (patch)
tree7dba26f1fb51be87dfd5d136ddbbd752b60e9a6c /drivers/net
parenta5bbef0b2deb7b943f095181309ecc9e1fc91c0f (diff)
parentceaaec98ad99859ac90ac6863ad0a6cd075d8e0e (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (37 commits) net: deinit automatic LIST_HEAD net: dont leave active on stack LIST_HEAD net: provide default_advmss() methods to blackhole dst_ops tg3: Restrict phy ioctl access drivers/net: Call netif_carrier_off at the end of the probe ixgbe: work around for DDP last buffer size ixgbe: fix panic due to uninitialised pointer e1000e: flush all writebacks before unload e1000e: check down flag in tasks isdn: hisax: Use l2headersize() instead of dup (and buggy) func. arp_notify: unconditionally send gratuitous ARP for NETDEV_NOTIFY_PEERS. cxgb4vf: Use defined Mailbox Timeout cxgb4vf: Quiesce Virtual Interfaces on shutdown ... cxgb4vf: Behave properly when CONFIG_DEBUG_FS isn't defined ... cxgb4vf: Check driver parameters in the right place ... pch_gbe: Fix the MAC Address load issue. iwlwifi: Delete iwl3945_good_plcp_health. net/can/softing: make CAN_SOFTING_CS depend on CAN_SOFTING netfilter: nf_iterate: fix incorrect RCU usage pch_gbe: Fix the issue that the receiving data is not normal. ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/softing/Kconfig2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c80
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/e1000e/netdev.c52
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c51
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c104
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/tg3.c8
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c67
16 files changed, 245 insertions, 155 deletions
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 8ba81b3ddd90..5de46a9a77bb 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -18,7 +18,7 @@ config CAN_SOFTING
18config CAN_SOFTING_CS 18config CAN_SOFTING_CS
19 tristate "Softing Gmbh CAN pcmcia cards" 19 tristate "Softing Gmbh CAN pcmcia cards"
20 depends on PCMCIA 20 depends on PCMCIA
21 select CAN_SOFTING 21 depends on CAN_SOFTING
22 ---help--- 22 ---help---
23 Support for PCMCIA cards from Softing Gmbh & some cards 23 Support for PCMCIA cards from Softing Gmbh & some cards
24 from Vector Gmbh. 24 from Vector Gmbh.
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 56166ae2059f..6aad64df4dcb 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2040{ 2040{
2041 int i; 2041 int i;
2042 2042
2043 BUG_ON(adapter->debugfs_root == NULL); 2043 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2044 2044
2045 /* 2045 /*
2046 * Debugfs support is best effort. 2046 * Debugfs support is best effort.
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2061 */ 2061 */
2062static void cleanup_debugfs(struct adapter *adapter) 2062static void cleanup_debugfs(struct adapter *adapter)
2063{ 2063{
2064 BUG_ON(adapter->debugfs_root == NULL); 2064 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2065 2065
2066 /* 2066 /*
2067 * Unlike our sister routine cleanup_proc(), we don't need to remove 2067 * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2489 struct net_device *netdev; 2489 struct net_device *netdev;
2490 2490
2491 /* 2491 /*
2492 * Vet our module parameters.
2493 */
2494 if (msi != MSI_MSIX && msi != MSI_MSI) {
2495 dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
2496 " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
2497 MSI_MSI);
2498 err = -EINVAL;
2499 goto err_out;
2500 }
2501
2502 /*
2503 * Print our driver banner the first time we're called to initialize a 2492 * Print our driver banner the first time we're called to initialize a
2504 * device. 2493 * device.
2505 */ 2494 */
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2711 /* 2700 /*
2712 * Set up our debugfs entries. 2701 * Set up our debugfs entries.
2713 */ 2702 */
2714 if (cxgb4vf_debugfs_root) { 2703 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
2715 adapter->debugfs_root = 2704 adapter->debugfs_root =
2716 debugfs_create_dir(pci_name(pdev), 2705 debugfs_create_dir(pci_name(pdev),
2717 cxgb4vf_debugfs_root); 2706 cxgb4vf_debugfs_root);
2718 if (adapter->debugfs_root == NULL) 2707 if (IS_ERR_OR_NULL(adapter->debugfs_root))
2719 dev_warn(&pdev->dev, "could not create debugfs" 2708 dev_warn(&pdev->dev, "could not create debugfs"
2720 " directory"); 2709 " directory");
2721 else 2710 else
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2770 */ 2759 */
2771 2760
2772err_free_debugfs: 2761err_free_debugfs:
2773 if (adapter->debugfs_root) { 2762 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2774 cleanup_debugfs(adapter); 2763 cleanup_debugfs(adapter);
2775 debugfs_remove_recursive(adapter->debugfs_root); 2764 debugfs_remove_recursive(adapter->debugfs_root);
2776 } 2765 }
@@ -2802,7 +2791,6 @@ err_release_regions:
2802err_disable_device: 2791err_disable_device:
2803 pci_disable_device(pdev); 2792 pci_disable_device(pdev);
2804 2793
2805err_out:
2806 return err; 2794 return err;
2807} 2795}
2808 2796
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2840 /* 2828 /*
2841 * Tear down our debugfs entries. 2829 * Tear down our debugfs entries.
2842 */ 2830 */
2843 if (adapter->debugfs_root) { 2831 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2844 cleanup_debugfs(adapter); 2832 cleanup_debugfs(adapter);
2845 debugfs_remove_recursive(adapter->debugfs_root); 2833 debugfs_remove_recursive(adapter->debugfs_root);
2846 } 2834 }
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2874} 2862}
2875 2863
2876/* 2864/*
2865 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2866 * delivery.
2867 */
2868static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
2869{
2870 struct adapter *adapter;
2871 int pidx;
2872
2873 adapter = pci_get_drvdata(pdev);
2874 if (!adapter)
2875 return;
2876
2877 /*
2878 * Disable all Virtual Interfaces. This will shut down the
2879 * delivery of all ingress packets into the chip for these
2880 * Virtual Interfaces.
2881 */
2882 for_each_port(adapter, pidx) {
2883 struct net_device *netdev;
2884 struct port_info *pi;
2885
2886 if (!test_bit(pidx, &adapter->registered_device_map))
2887 continue;
2888
2889 netdev = adapter->port[pidx];
2890 if (!netdev)
2891 continue;
2892
2893 pi = netdev_priv(netdev);
2894 t4vf_enable_vi(adapter, pi->viid, false, false);
2895 }
2896
2897 /*
2898 * Free up all Queues which will prevent further DMA and
2899 * Interrupts allowing various internal pathways to drain.
2900 */
2901 t4vf_free_sge_resources(adapter);
2902}
2903
2904/*
2877 * PCI Device registration data structures. 2905 * PCI Device registration data structures.
2878 */ 2906 */
2879#define CH_DEVICE(devid, idx) \ 2907#define CH_DEVICE(devid, idx) \
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
2906 .id_table = cxgb4vf_pci_tbl, 2934 .id_table = cxgb4vf_pci_tbl,
2907 .probe = cxgb4vf_pci_probe, 2935 .probe = cxgb4vf_pci_probe,
2908 .remove = __devexit_p(cxgb4vf_pci_remove), 2936 .remove = __devexit_p(cxgb4vf_pci_remove),
2937 .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
2909}; 2938};
2910 2939
2911/* 2940/*
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
2915{ 2944{
2916 int ret; 2945 int ret;
2917 2946
2947 /*
2948 * Vet our module parameters.
2949 */
2950 if (msi != MSI_MSIX && msi != MSI_MSI) {
2951 printk(KERN_WARNING KBUILD_MODNAME
2952 ": bad module parameter msi=%d; must be %d"
2953 " (MSI-X or MSI) or %d (MSI)\n",
2954 msi, MSI_MSIX, MSI_MSI);
2955 return -EINVAL;
2956 }
2957
2918 /* Debugfs support is optional, just warn if this fails */ 2958 /* Debugfs support is optional, just warn if this fails */
2919 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 2959 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
2920 if (!cxgb4vf_debugfs_root) 2960 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2921 printk(KERN_WARNING KBUILD_MODNAME ": could not create" 2961 printk(KERN_WARNING KBUILD_MODNAME ": could not create"
2922 " debugfs entry, continuing\n"); 2962 " debugfs entry, continuing\n");
2923 2963
2924 ret = pci_register_driver(&cxgb4vf_driver); 2964 ret = pci_register_driver(&cxgb4vf_driver);
2925 if (ret < 0) 2965 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2926 debugfs_remove(cxgb4vf_debugfs_root); 2966 debugfs_remove(cxgb4vf_debugfs_root);
2927 return ret; 2967 return ret;
2928} 2968}
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 0f51c80475ce..192db226ec7f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
171 delay_idx = 0; 171 delay_idx = 0;
172 ms = delay[0]; 172 ms = delay[0];
173 173
174 for (i = 0; i < 500; i += ms) { 174 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
175 if (sleep_ok) { 175 if (sleep_ok) {
176 ms = delay[delay_idx]; 176 ms = delay[delay_idx];
177 if (delay_idx < ARRAY_SIZE(delay) - 1) 177 if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3065870cf2a7..3fa110ddb041 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
937 u16 phy_status, phy_1000t_status, phy_ext_status; 937 u16 phy_status, phy_1000t_status, phy_ext_status;
938 u16 pci_status; 938 u16 pci_status;
939 939
940 if (test_bit(__E1000_DOWN, &adapter->state))
941 return;
942
940 e1e_rphy(hw, PHY_STATUS, &phy_status); 943 e1e_rphy(hw, PHY_STATUS, &phy_status);
941 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 944 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
942 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); 945 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
1506 struct e1000_adapter *adapter = container_of(work, 1509 struct e1000_adapter *adapter = container_of(work,
1507 struct e1000_adapter, downshift_task); 1510 struct e1000_adapter, downshift_task);
1508 1511
1512 if (test_bit(__E1000_DOWN, &adapter->state))
1513 return;
1514
1509 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1515 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1510} 1516}
1511 1517
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter)
3338 return 0; 3344 return 0;
3339} 3345}
3340 3346
3347static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3348{
3349 struct e1000_hw *hw = &adapter->hw;
3350
3351 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3352 return;
3353
3354 /* flush pending descriptor writebacks to memory */
3355 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3356 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3357
3358 /* execute the writes immediately */
3359 e1e_flush();
3360}
3361
3341void e1000e_down(struct e1000_adapter *adapter) 3362void e1000e_down(struct e1000_adapter *adapter)
3342{ 3363{
3343 struct net_device *netdev = adapter->netdev; 3364 struct net_device *netdev = adapter->netdev;
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter)
3377 3398
3378 if (!pci_channel_offline(adapter->pdev)) 3399 if (!pci_channel_offline(adapter->pdev))
3379 e1000e_reset(adapter); 3400 e1000e_reset(adapter);
3401
3402 e1000e_flush_descriptors(adapter);
3403
3380 e1000_clean_tx_ring(adapter); 3404 e1000_clean_tx_ring(adapter);
3381 e1000_clean_rx_ring(adapter); 3405 e1000_clean_rx_ring(adapter);
3382 3406
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
3765{ 3789{
3766 struct e1000_adapter *adapter = container_of(work, 3790 struct e1000_adapter *adapter = container_of(work,
3767 struct e1000_adapter, update_phy_task); 3791 struct e1000_adapter, update_phy_task);
3792
3793 if (test_bit(__E1000_DOWN, &adapter->state))
3794 return;
3795
3768 e1000_get_phy_info(&adapter->hw); 3796 e1000_get_phy_info(&adapter->hw);
3769} 3797}
3770 3798
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
3775static void e1000_update_phy_info(unsigned long data) 3803static void e1000_update_phy_info(unsigned long data)
3776{ 3804{
3777 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 3805 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3806
3807 if (test_bit(__E1000_DOWN, &adapter->state))
3808 return;
3809
3778 schedule_work(&adapter->update_phy_task); 3810 schedule_work(&adapter->update_phy_task);
3779} 3811}
3780 3812
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work)
4149 u32 link, tctl; 4181 u32 link, tctl;
4150 int tx_pending = 0; 4182 int tx_pending = 0;
4151 4183
4184 if (test_bit(__E1000_DOWN, &adapter->state))
4185 return;
4186
4152 link = e1000e_has_link(adapter); 4187 link = e1000e_has_link(adapter);
4153 if ((netif_carrier_ok(netdev)) && link) { 4188 if ((netif_carrier_ok(netdev)) && link) {
4154 /* Cancel scheduled suspend requests. */ 4189 /* Cancel scheduled suspend requests. */
@@ -4337,19 +4372,12 @@ link_up:
4337 else 4372 else
4338 ew32(ICS, E1000_ICS_RXDMT0); 4373 ew32(ICS, E1000_ICS_RXDMT0);
4339 4374
4375 /* flush pending descriptors to memory before detecting Tx hang */
4376 e1000e_flush_descriptors(adapter);
4377
4340 /* Force detection of hung controller every watchdog period */ 4378 /* Force detection of hung controller every watchdog period */
4341 adapter->detect_tx_hung = 1; 4379 adapter->detect_tx_hung = 1;
4342 4380
4343 /* flush partial descriptors to memory before detecting Tx hang */
4344 if (adapter->flags2 & FLAG2_DMA_BURST) {
4345 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4346 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4347 /*
4348 * no need to flush the writes because the timeout code does
4349 * an er32 first thing
4350 */
4351 }
4352
4353 /* 4381 /*
4354 * With 82571 controllers, LAA may be overwritten due to controller 4382 * With 82571 controllers, LAA may be overwritten due to controller
4355 * reset from the other port. Set the appropriate LAA in RAR[0] 4383 * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4887,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work)
4887 struct e1000_adapter *adapter; 4915 struct e1000_adapter *adapter;
4888 adapter = container_of(work, struct e1000_adapter, reset_task); 4916 adapter = container_of(work, struct e1000_adapter, reset_task);
4889 4917
4918 /* don't run the task if already down */
4919 if (test_bit(__E1000_DOWN, &adapter->state))
4920 return;
4921
4890 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4922 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4891 (adapter->flags & FLAG_RX_RESTART_NOW))) { 4923 (adapter->flags & FLAG_RX_RESTART_NOW))) {
4892 e1000e_dump(adapter); 4924 e1000e_dump(adapter);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index af09296ef0dd..9c0b1bac6af6 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5645 goto out_error; 5645 goto out_error;
5646 } 5646 }
5647 5647
5648 netif_carrier_off(dev);
5649
5648 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5650 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5649 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5651 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5650 5652
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 8753980668c7..c54a88274d51 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -159,7 +159,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
159 struct scatterlist *sg; 159 struct scatterlist *sg;
160 unsigned int i, j, dmacount; 160 unsigned int i, j, dmacount;
161 unsigned int len; 161 unsigned int len;
162 static const unsigned int bufflen = 4096; 162 static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
163 unsigned int firstoff = 0; 163 unsigned int firstoff = 0;
164 unsigned int lastsize; 164 unsigned int lastsize;
165 unsigned int thisoff = 0; 165 unsigned int thisoff = 0;
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
254 /* only the last buffer may have non-full bufflen */ 254 /* only the last buffer may have non-full bufflen */
255 lastsize = thisoff + thislen; 255 lastsize = thisoff + thislen;
256 256
257 /*
258 * lastsize can not be buffer len.
259 * If it is then adding another buffer with lastsize = 1.
260 */
261 if (lastsize == bufflen) {
262 if (j >= IXGBE_BUFFCNT_MAX) {
263 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
264 "not enough user buffers. We need an extra "
265 "buffer because lastsize is bufflen.\n",
266 xid, i, j, dmacount, (u64)addr);
267 goto out_noddp_free;
268 }
269
270 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
271 j++;
272 lastsize = 1;
273 }
274
257 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 275 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
258 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 276 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
259 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 277 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
532 e_err(drv, "failed to allocated FCoE DDP pool\n"); 550 e_err(drv, "failed to allocated FCoE DDP pool\n");
533 551
534 spin_lock_init(&fcoe->lock); 552 spin_lock_init(&fcoe->lock);
553
554 /* Extra buffer to be shared by all DDPs for HW work around */
555 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
556 if (fcoe->extra_ddp_buffer == NULL) {
557 e_err(drv, "failed to allocated extra DDP buffer\n");
558 goto out_extra_ddp_buffer_alloc;
559 }
560
561 fcoe->extra_ddp_buffer_dma =
562 dma_map_single(&adapter->pdev->dev,
563 fcoe->extra_ddp_buffer,
564 IXGBE_FCBUFF_MIN,
565 DMA_FROM_DEVICE);
566 if (dma_mapping_error(&adapter->pdev->dev,
567 fcoe->extra_ddp_buffer_dma)) {
568 e_err(drv, "failed to map extra DDP buffer\n");
569 goto out_extra_ddp_buffer_dma;
570 }
535 } 571 }
536 572
537 /* Enable L2 eth type filter for FCoE */ 573 /* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
581 } 617 }
582 } 618 }
583#endif 619#endif
620
621 return;
622
623out_extra_ddp_buffer_dma:
624 kfree(fcoe->extra_ddp_buffer);
625out_extra_ddp_buffer_alloc:
626 pci_pool_destroy(fcoe->pool);
627 fcoe->pool = NULL;
584} 628}
585 629
586/** 630/**
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
600 if (fcoe->pool) { 644 if (fcoe->pool) {
601 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 645 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
602 ixgbe_fcoe_ddp_put(adapter->netdev, i); 646 ixgbe_fcoe_ddp_put(adapter->netdev, i);
647 dma_unmap_single(&adapter->pdev->dev,
648 fcoe->extra_ddp_buffer_dma,
649 IXGBE_FCBUFF_MIN,
650 DMA_FROM_DEVICE);
651 kfree(fcoe->extra_ddp_buffer);
603 pci_pool_destroy(fcoe->pool); 652 pci_pool_destroy(fcoe->pool);
604 fcoe->pool = NULL; 653 fcoe->pool = NULL;
605 } 654 }
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c551c8db..65cc8fb14fe7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -70,6 +70,8 @@ struct ixgbe_fcoe {
70 spinlock_t lock; 70 spinlock_t lock;
71 struct pci_pool *pool; 71 struct pci_pool *pool;
72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
73 unsigned char *extra_ddp_buffer;
74 dma_addr_t extra_ddp_buffer_dma;
73}; 75};
74 76
75#endif /* _IXGBE_FCOE_H */ 77#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index fbae703b46d7..30f9ccfb4f87 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3728,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3728 * We need to try and force an autonegotiation 3728 * We need to try and force an autonegotiation
3729 * session, then bring up link. 3729 * session, then bring up link.
3730 */ 3730 */
3731 hw->mac.ops.setup_sfp(hw); 3731 if (hw->mac.ops.setup_sfp)
3732 hw->mac.ops.setup_sfp(hw);
3732 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 3733 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3733 schedule_work(&adapter->multispeed_fiber_task); 3734 schedule_work(&adapter->multispeed_fiber_task);
3734 } else { 3735 } else {
@@ -5968,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
5968 unregister_netdev(adapter->netdev); 5969 unregister_netdev(adapter->netdev);
5969 return; 5970 return;
5970 } 5971 }
5971 hw->mac.ops.setup_sfp(hw); 5972 if (hw->mac.ops.setup_sfp)
5973 hw->mac.ops.setup_sfp(hw);
5972 5974
5973 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 5975 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
5974 /* This will also work for DA Twinax connections */ 5976 /* This will also work for DA Twinax connections */
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a99520f..e1e33c80fb25 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
73 struct pch_gbe_regs_mac_adr mac_adr[16]; 73 struct pch_gbe_regs_mac_adr mac_adr[16];
74 u32 ADDR_MASK; 74 u32 ADDR_MASK;
75 u32 MIIM; 75 u32 MIIM;
76 u32 reserve2; 76 u32 MAC_ADDR_LOAD;
77 u32 RGMII_ST; 77 u32 RGMII_ST;
78 u32 RGMII_CTRL; 78 u32 RGMII_CTRL;
79 u32 reserve3[3]; 79 u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 4c9a7d4f3fca..b99e90aca37d 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
29#define PCH_GBE_SHORT_PKT 64 29#define PCH_GBE_SHORT_PKT 64
30#define DSC_INIT16 0xC000 30#define DSC_INIT16 0xC000
31#define PCH_GBE_DMA_ALIGN 0 31#define PCH_GBE_DMA_ALIGN 0
32#define PCH_GBE_DMA_PADDING 2
32#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
33#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
34#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
88static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 89static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
89static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, 90static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
90 int data); 91 int data);
92
93inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
94{
95 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
96}
97
91/** 98/**
92 * pch_gbe_mac_read_mac_addr - Read MAC address 99 * pch_gbe_mac_read_mac_addr - Read MAC address
93 * @hw: Pointer to the HW structure 100 * @hw: Pointer to the HW structure
@@ -1365,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1365 struct pch_gbe_buffer *buffer_info; 1372 struct pch_gbe_buffer *buffer_info;
1366 struct pch_gbe_rx_desc *rx_desc; 1373 struct pch_gbe_rx_desc *rx_desc;
1367 u32 length; 1374 u32 length;
1368 unsigned char tmp_packet[ETH_HLEN];
1369 unsigned int i; 1375 unsigned int i;
1370 unsigned int cleaned_count = 0; 1376 unsigned int cleaned_count = 0;
1371 bool cleaned = false; 1377 bool cleaned = false;
1372 struct sk_buff *skb; 1378 struct sk_buff *skb, *new_skb;
1373 u8 dma_status; 1379 u8 dma_status;
1374 u16 gbec_status; 1380 u16 gbec_status;
1375 u32 tcp_ip_status; 1381 u32 tcp_ip_status;
1376 u8 skb_copy_flag = 0;
1377 u8 skb_padding_flag = 0;
1378 1382
1379 i = rx_ring->next_to_clean; 1383 i = rx_ring->next_to_clean;
1380 1384
@@ -1418,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1418 pr_err("Receive CRC Error\n"); 1422 pr_err("Receive CRC Error\n");
1419 } else { 1423 } else {
1420 /* get receive length */ 1424 /* get receive length */
1421 /* length convert[-3], padding[-2] */ 1425 /* length convert[-3] */
1422 length = (rx_desc->rx_words_eob) - 3 - 2; 1426 length = (rx_desc->rx_words_eob) - 3;
1423 1427
1424 /* Decide the data conversion method */ 1428 /* Decide the data conversion method */
1425 if (!adapter->rx_csum) { 1429 if (!adapter->rx_csum) {
1426 /* [Header:14][payload] */ 1430 /* [Header:14][payload] */
1427 skb_padding_flag = 0; 1431 if (NET_IP_ALIGN) {
1428 skb_copy_flag = 1; 1432 /* Because alignment differs,
1433 * the new_skb is newly allocated,
1434 * and data is copied to new_skb.*/
1435 new_skb = netdev_alloc_skb(netdev,
1436 length + NET_IP_ALIGN);
1437 if (!new_skb) {
1438 /* dorrop error */
1439 pr_err("New skb allocation "
1440 "Error\n");
1441 goto dorrop;
1442 }
1443 skb_reserve(new_skb, NET_IP_ALIGN);
1444 memcpy(new_skb->data, skb->data,
1445 length);
1446 skb = new_skb;
1447 } else {
1448 /* DMA buffer is used as SKB as it is.*/
1449 buffer_info->skb = NULL;
1450 }
1429 } else { 1451 } else {
1430 /* [Header:14][padding:2][payload] */ 1452 /* [Header:14][padding:2][payload] */
1431 skb_padding_flag = 1; 1453 /* The length includes padding length */
1432 if (length < copybreak) 1454 length = length - PCH_GBE_DMA_PADDING;
1433 skb_copy_flag = 1; 1455 if ((length < copybreak) ||
1434 else 1456 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1435 skb_copy_flag = 0; 1457 /* Because alignment differs,
1436 } 1458 * the new_skb is newly allocated,
1437 1459 * and data is copied to new_skb.
1438 /* Data conversion */ 1460 * Padding data is deleted
1439 if (skb_copy_flag) { /* recycle skb */ 1461 * at the time of a copy.*/
1440 struct sk_buff *new_skb; 1462 new_skb = netdev_alloc_skb(netdev,
1441 new_skb = 1463 length + NET_IP_ALIGN);
1442 netdev_alloc_skb(netdev, 1464 if (!new_skb) {
1443 length + NET_IP_ALIGN); 1465 /* dorrop error */
1444 if (new_skb) { 1466 pr_err("New skb allocation "
1445 if (!skb_padding_flag) { 1467 "Error\n");
1446 skb_reserve(new_skb, 1468 goto dorrop;
1447 NET_IP_ALIGN);
1448 } 1469 }
1470 skb_reserve(new_skb, NET_IP_ALIGN);
1449 memcpy(new_skb->data, skb->data, 1471 memcpy(new_skb->data, skb->data,
1450 length); 1472 ETH_HLEN);
1451 /* save the skb 1473 memcpy(&new_skb->data[ETH_HLEN],
1452 * in buffer_info as good */ 1474 &skb->data[ETH_HLEN +
1475 PCH_GBE_DMA_PADDING],
1476 length - ETH_HLEN);
1453 skb = new_skb; 1477 skb = new_skb;
1454 } else if (!skb_padding_flag) { 1478 } else {
1455 /* dorrop error */ 1479 /* Padding data is deleted
1456 pr_err("New skb allocation Error\n"); 1480 * by moving header data.*/
1457 goto dorrop; 1481 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1482 &skb->data[0], ETH_HLEN);
1483 skb_reserve(skb, NET_IP_ALIGN);
1484 buffer_info->skb = NULL;
1458 } 1485 }
1459 } else {
1460 buffer_info->skb = NULL;
1461 } 1486 }
1462 if (skb_padding_flag) { 1487 /* The length includes FCS length */
1463 memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); 1488 length = length - ETH_FCS_LEN;
1464 memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
1465 ETH_HLEN);
1466 skb_reserve(skb, NET_IP_ALIGN);
1467
1468 }
1469
1470 /* update status of driver */ 1489 /* update status of driver */
1471 adapter->stats.rx_bytes += length; 1490 adapter->stats.rx_bytes += length;
1472 adapter->stats.rx_packets++; 1491 adapter->stats.rx_packets++;
@@ -2318,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2318 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; 2337 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
2319 pch_gbe_set_ethtool_ops(netdev); 2338 pch_gbe_set_ethtool_ops(netdev);
2320 2339
2340 pch_gbe_mac_load_mac_addr(&adapter->hw);
2321 pch_gbe_mac_reset_hw(&adapter->hw); 2341 pch_gbe_mac_reset_hw(&adapter->hw);
2322 2342
2323 /* setup the private structure */ 2343 /* setup the private structure */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 59ccf0c5c610..469ab0b7ce31 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3190,6 +3190,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3190 if (pci_dev_run_wake(pdev)) 3190 if (pci_dev_run_wake(pdev))
3191 pm_runtime_put_noidle(&pdev->dev); 3191 pm_runtime_put_noidle(&pdev->dev);
3192 3192
3193 netif_carrier_off(dev);
3194
3193out: 3195out:
3194 return rc; 3196 return rc;
3195 3197
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3837f9..0e5f03135b50 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1560 1560
1561 priv->hw = device; 1561 priv->hw = device;
1562 1562
1563 if (device_can_wakeup(priv->device)) 1563 if (device_can_wakeup(priv->device)) {
1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1565 enable_irq_wake(dev->irq);
1566 }
1565 1567
1566 return 0; 1568 return 0;
1567} 1569}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 93b32d366611..06c0e5033656 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11158 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11158 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11159 break; /* We have no PHY */ 11159 break; /* We have no PHY */
11160 11160
11161 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11161 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11162 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11163 !netif_running(dev)))
11162 return -EAGAIN; 11164 return -EAGAIN;
11163 11165
11164 spin_lock_bh(&tp->lock); 11166 spin_lock_bh(&tp->lock);
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11174 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11176 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11175 break; /* We have no PHY */ 11177 break; /* We have no PHY */
11176 11178
11177 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11179 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11180 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11181 !netif_running(dev)))
11178 return -EAGAIN; 11182 return -EAGAIN;
11179 11183
11180 spin_lock_bh(&tp->lock); 11184 spin_lock_bh(&tp->lock);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fcedff49..6d83812603b6 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2628,15 +2628,15 @@ exit:
2628 2628
2629static void hso_free_tiomget(struct hso_serial *serial) 2629static void hso_free_tiomget(struct hso_serial *serial)
2630{ 2630{
2631 struct hso_tiocmget *tiocmget = serial->tiocmget; 2631 struct hso_tiocmget *tiocmget;
2632 if (!serial)
2633 return;
2634 tiocmget = serial->tiocmget;
2632 if (tiocmget) { 2635 if (tiocmget) {
2633 if (tiocmget->urb) { 2636 usb_free_urb(tiocmget->urb);
2634 usb_free_urb(tiocmget->urb); 2637 tiocmget->urb = NULL;
2635 tiocmget->urb = NULL;
2636 }
2637 serial->tiocmget = NULL; 2638 serial->tiocmget = NULL;
2638 kfree(tiocmget); 2639 kfree(tiocmget);
2639
2640 } 2640 }
2641} 2641}
2642 2642
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a41643ff4..95c41d56631c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@ fail_halt:
931 if (urb != NULL) { 931 if (urb != NULL) {
932 clear_bit (EVENT_RX_MEMORY, &dev->flags); 932 clear_bit (EVENT_RX_MEMORY, &dev->flags);
933 status = usb_autopm_get_interface(dev->intf); 933 status = usb_autopm_get_interface(dev->intf);
934 if (status < 0) 934 if (status < 0) {
935 usb_free_urb(urb);
935 goto fail_lowmem; 936 goto fail_lowmem;
937 }
936 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 938 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
937 resched = 0; 939 resched = 0;
938 usb_autopm_put_interface(dev->intf); 940 usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a9b852be4509..39b6f16c87fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
402} 402}
403#endif 403#endif
404 404
405/**
406 * iwl3945_good_plcp_health - checks for plcp error.
407 *
408 * When the plcp error is exceeding the thresholds, reset the radio
409 * to improve the throughput.
410 */
411static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
412 struct iwl_rx_packet *pkt)
413{
414 bool rc = true;
415 struct iwl3945_notif_statistics current_stat;
416 int combined_plcp_delta;
417 unsigned int plcp_msec;
418 unsigned long plcp_received_jiffies;
419
420 if (priv->cfg->base_params->plcp_delta_threshold ==
421 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
422 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
423 return rc;
424 }
425 memcpy(&current_stat, pkt->u.raw, sizeof(struct
426 iwl3945_notif_statistics));
427 /*
428 * check for plcp_err and trigger radio reset if it exceeds
429 * the plcp error threshold plcp_delta.
430 */
431 plcp_received_jiffies = jiffies;
432 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
433 (long) priv->plcp_jiffies);
434 priv->plcp_jiffies = plcp_received_jiffies;
435 /*
436 * check to make sure plcp_msec is not 0 to prevent division
437 * by zero.
438 */
439 if (plcp_msec) {
440 combined_plcp_delta =
441 (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
442 le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
443
444 if ((combined_plcp_delta > 0) &&
445 ((combined_plcp_delta * 100) / plcp_msec) >
446 priv->cfg->base_params->plcp_delta_threshold) {
447 /*
448 * if plcp_err exceed the threshold, the following
449 * data is printed in csv format:
450 * Text: plcp_err exceeded %d,
451 * Received ofdm.plcp_err,
452 * Current ofdm.plcp_err,
453 * combined_plcp_delta,
454 * plcp_msec
455 */
456 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
457 "%u, %d, %u mSecs\n",
458 priv->cfg->base_params->plcp_delta_threshold,
459 le32_to_cpu(current_stat.rx.ofdm.plcp_err),
460 combined_plcp_delta, plcp_msec);
461 /*
462 * Reset the RF radio due to the high plcp
463 * error rate
464 */
465 rc = false;
466 }
467 }
468 return rc;
469}
470
471void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 405void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
472 struct iwl_rx_mem_buffer *rxb) 406 struct iwl_rx_mem_buffer *rxb)
473{ 407{
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = {
2734 .isr_ops = { 2668 .isr_ops = {
2735 .isr = iwl_isr_legacy, 2669 .isr = iwl_isr_legacy,
2736 }, 2670 },
2737 .check_plcp_health = iwl3945_good_plcp_health,
2738 2671
2739 .debugfs_ops = { 2672 .debugfs_ops = {
2740 .rx_stats_read = iwl3945_ucode_rx_stats_read, 2673 .rx_stats_read = iwl3945_ucode_rx_stats_read,