aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrancois Romieu <romieu@fr.zoreil.com>2012-01-26 08:18:23 -0500
committerFrancois Romieu <romieu@fr.zoreil.com>2012-01-27 15:26:25 -0500
commitda78dbff2e05630921c551dbbc70a4b7981a8fff (patch)
tree1ab4460f30ad2652a1b7d94aebb4a5f2caf42c5c
parent1e874e041fc7c222cbd85b20c4406070be1f687a (diff)
r8169: remove work from irq handler.
The irq handler was a mess. See 7ab87ff4c770eed71e3777936299292739fcd0fe ("via-rhine: move work from irq handler to softirq and beyond") for similar changes. One can notice: - all non-napi tasks are explicitely scheduled trough a single work queue. - hiding software tx queue start behind the rtl_hw_start method is mildly natural. Move it in the caller where needed. - as can be seen from the heavy use of bh disabling locks, the driver is not safe for irq context messages with netconsole. It is still quite usable for general messaging though. Tested ok with concurrent registers dump (ethtool -d) + background traffic + "echo t > /proc/sysrq-trigger". Tested with old PCI chipset, PCIe 8168 and 810x: - XID 0c900800 RTL8168evl/8111evl - XID 18000000 RTL8168b/8111b - XID 98000000 RTL8169sc/8110sc - XID 083000c0 RTL8168d/8111d - XID 081000c0 RTL8168d/8111d - XID 00b00000 RTL8105e - XID 04a00000 RTL8102e As a side note, the comments in f11a377b3f4e897d11f0e8d1fc688667e2f19708 ("r8169: avoid losing MSI interrupts") does not seem completely clear: if I hack the driver further to stop acking the irq link event bit, MSI interrupts keep being delivered (RTL8168b/8111b, XID 18000000). Signed-off-by: Francois Romieu <romieu@fr.zoreil.com> Cc: Hayes Wang <hayeswang@realtek.com>
-rw-r--r--drivers/net/ethernet/realtek/r8169.c449
1 files changed, 231 insertions, 218 deletions
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 8dd13f5a9203..d039d39963ad 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -667,6 +667,13 @@ struct rtl8169_counters {
667 __le16 tx_underun; 667 __le16 tx_underun;
668}; 668};
669 669
670enum rtl_flag {
671 RTL_FLAG_TASK_SLOW_PENDING,
672 RTL_FLAG_TASK_RESET_PENDING,
673 RTL_FLAG_TASK_PHY_PENDING,
674 RTL_FLAG_MAX
675};
676
670struct rtl8169_private { 677struct rtl8169_private {
671 void __iomem *mmio_addr; /* memory map physical address */ 678 void __iomem *mmio_addr; /* memory map physical address */
672 struct pci_dev *pci_dev; 679 struct pci_dev *pci_dev;
@@ -688,9 +695,8 @@ struct rtl8169_private {
688 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ 695 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
689 struct timer_list timer; 696 struct timer_list timer;
690 u16 cp_cmd; 697 u16 cp_cmd;
691 u16 intr_event; 698
692 u16 napi_event; 699 u16 event_slow;
693 u16 intr_mask;
694 700
695 struct mdio_ops { 701 struct mdio_ops {
696 void (*write)(void __iomem *, int, int); 702 void (*write)(void __iomem *, int, int);
@@ -716,7 +722,10 @@ struct rtl8169_private {
716 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); 722 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
717 723
718 struct { 724 struct {
725 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
726 struct mutex mutex;
719 struct work_struct work; 727 struct work_struct work;
728 bool enabled;
720 } wk; 729 } wk;
721 730
722 unsigned features; 731 unsigned features;
@@ -768,13 +777,20 @@ static int rtl8169_close(struct net_device *dev);
768static void rtl_set_rx_mode(struct net_device *dev); 777static void rtl_set_rx_mode(struct net_device *dev);
769static void rtl8169_tx_timeout(struct net_device *dev); 778static void rtl8169_tx_timeout(struct net_device *dev);
770static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); 779static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
771static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
772 void __iomem *, u32 budget);
773static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); 780static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
774static void rtl8169_down(struct net_device *dev);
775static void rtl8169_rx_clear(struct rtl8169_private *tp); 781static void rtl8169_rx_clear(struct rtl8169_private *tp);
776static int rtl8169_poll(struct napi_struct *napi, int budget); 782static int rtl8169_poll(struct napi_struct *napi, int budget);
777 783
784static void rtl_lock_work(struct rtl8169_private *tp)
785{
786 mutex_lock(&tp->wk.mutex);
787}
788
789static void rtl_unlock_work(struct rtl8169_private *tp)
790{
791 mutex_unlock(&tp->wk.mutex);
792}
793
778static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) 794static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
779{ 795{
780 int cap = pci_pcie_cap(pdev); 796 int cap = pci_pcie_cap(pdev);
@@ -1214,12 +1230,21 @@ static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1214 RTL_W16(IntrMask, bits); 1230 RTL_W16(IntrMask, bits);
1215} 1231}
1216 1232
1233#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1234#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1235#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1236
1237static void rtl_irq_enable_all(struct rtl8169_private *tp)
1238{
1239 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1240}
1241
1217static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) 1242static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1218{ 1243{
1219 void __iomem *ioaddr = tp->mmio_addr; 1244 void __iomem *ioaddr = tp->mmio_addr;
1220 1245
1221 rtl_irq_disable(tp); 1246 rtl_irq_disable(tp);
1222 rtl_ack_events(tp, tp->intr_event); 1247 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1223 RTL_R8(ChipCmd); 1248 RTL_R8(ChipCmd);
1224} 1249}
1225 1250
@@ -1310,9 +1335,6 @@ static void __rtl8169_check_link_status(struct net_device *dev,
1310 struct rtl8169_private *tp, 1335 struct rtl8169_private *tp,
1311 void __iomem *ioaddr, bool pm) 1336 void __iomem *ioaddr, bool pm)
1312{ 1337{
1313 unsigned long flags;
1314
1315 spin_lock_irqsave(&tp->lock, flags);
1316 if (tp->link_ok(ioaddr)) { 1338 if (tp->link_ok(ioaddr)) {
1317 rtl_link_chg_patch(tp); 1339 rtl_link_chg_patch(tp);
1318 /* This is to cancel a scheduled suspend if there's one. */ 1340 /* This is to cancel a scheduled suspend if there's one. */
@@ -1327,7 +1349,6 @@ static void __rtl8169_check_link_status(struct net_device *dev,
1327 if (pm) 1349 if (pm)
1328 pm_schedule_suspend(&tp->pci_dev->dev, 5000); 1350 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1329 } 1351 }
1330 spin_unlock_irqrestore(&tp->lock, flags);
1331} 1352}
1332 1353
1333static void rtl8169_check_link_status(struct net_device *dev, 1354static void rtl8169_check_link_status(struct net_device *dev,
@@ -1370,12 +1391,12 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1370{ 1391{
1371 struct rtl8169_private *tp = netdev_priv(dev); 1392 struct rtl8169_private *tp = netdev_priv(dev);
1372 1393
1373 spin_lock_irq(&tp->lock); 1394 rtl_lock_work(tp);
1374 1395
1375 wol->supported = WAKE_ANY; 1396 wol->supported = WAKE_ANY;
1376 wol->wolopts = __rtl8169_get_wol(tp); 1397 wol->wolopts = __rtl8169_get_wol(tp);
1377 1398
1378 spin_unlock_irq(&tp->lock); 1399 rtl_unlock_work(tp);
1379} 1400}
1380 1401
1381static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) 1402static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1412,14 +1433,15 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1412{ 1433{
1413 struct rtl8169_private *tp = netdev_priv(dev); 1434 struct rtl8169_private *tp = netdev_priv(dev);
1414 1435
1415 spin_lock_irq(&tp->lock); 1436 rtl_lock_work(tp);
1416 1437
1417 if (wol->wolopts) 1438 if (wol->wolopts)
1418 tp->features |= RTL_FEATURE_WOL; 1439 tp->features |= RTL_FEATURE_WOL;
1419 else 1440 else
1420 tp->features &= ~RTL_FEATURE_WOL; 1441 tp->features &= ~RTL_FEATURE_WOL;
1421 __rtl8169_set_wol(tp, wol->wolopts); 1442 __rtl8169_set_wol(tp, wol->wolopts);
1422 spin_unlock_irq(&tp->lock); 1443
1444 rtl_unlock_work(tp);
1423 1445
1424 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); 1446 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1425 1447
@@ -1574,15 +1596,14 @@ out:
1574static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1596static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1575{ 1597{
1576 struct rtl8169_private *tp = netdev_priv(dev); 1598 struct rtl8169_private *tp = netdev_priv(dev);
1577 unsigned long flags;
1578 int ret; 1599 int ret;
1579 1600
1580 del_timer_sync(&tp->timer); 1601 del_timer_sync(&tp->timer);
1581 1602
1582 spin_lock_irqsave(&tp->lock, flags); 1603 rtl_lock_work(tp);
1583 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), 1604 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1584 cmd->duplex, cmd->advertising); 1605 cmd->duplex, cmd->advertising);
1585 spin_unlock_irqrestore(&tp->lock, flags); 1606 rtl_unlock_work(tp);
1586 1607
1587 return ret; 1608 return ret;
1588} 1609}
@@ -1602,14 +1623,12 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1602 return features; 1623 return features;
1603} 1624}
1604 1625
1605static int rtl8169_set_features(struct net_device *dev, 1626static void __rtl8169_set_features(struct net_device *dev,
1606 netdev_features_t features) 1627 netdev_features_t features)
1607{ 1628{
1608 struct rtl8169_private *tp = netdev_priv(dev); 1629 struct rtl8169_private *tp = netdev_priv(dev);
1609 void __iomem *ioaddr = tp->mmio_addr;
1610 unsigned long flags;
1611 1630
1612 spin_lock_irqsave(&tp->lock, flags); 1631 void __iomem *ioaddr = tp->mmio_addr;
1613 1632
1614 if (features & NETIF_F_RXCSUM) 1633 if (features & NETIF_F_RXCSUM)
1615 tp->cp_cmd |= RxChkSum; 1634 tp->cp_cmd |= RxChkSum;
@@ -1623,12 +1642,21 @@ static int rtl8169_set_features(struct net_device *dev,
1623 1642
1624 RTL_W16(CPlusCmd, tp->cp_cmd); 1643 RTL_W16(CPlusCmd, tp->cp_cmd);
1625 RTL_R16(CPlusCmd); 1644 RTL_R16(CPlusCmd);
1645}
1626 1646
1627 spin_unlock_irqrestore(&tp->lock, flags); 1647static int rtl8169_set_features(struct net_device *dev,
1648 netdev_features_t features)
1649{
1650 struct rtl8169_private *tp = netdev_priv(dev);
1651
1652 rtl_lock_work(tp);
1653 __rtl8169_set_features(dev, features);
1654 rtl_unlock_work(tp);
1628 1655
1629 return 0; 1656 return 0;
1630} 1657}
1631 1658
1659
1632static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, 1660static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1633 struct sk_buff *skb) 1661 struct sk_buff *skb)
1634{ 1662{
@@ -1677,14 +1705,12 @@ static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1677static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1705static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1678{ 1706{
1679 struct rtl8169_private *tp = netdev_priv(dev); 1707 struct rtl8169_private *tp = netdev_priv(dev);
1680 unsigned long flags;
1681 int rc; 1708 int rc;
1682 1709
1683 spin_lock_irqsave(&tp->lock, flags); 1710 rtl_lock_work(tp);
1684
1685 rc = tp->get_settings(dev, cmd); 1711 rc = tp->get_settings(dev, cmd);
1712 rtl_unlock_work(tp);
1686 1713
1687 spin_unlock_irqrestore(&tp->lock, flags);
1688 return rc; 1714 return rc;
1689} 1715}
1690 1716
@@ -1692,14 +1718,15 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1692 void *p) 1718 void *p)
1693{ 1719{
1694 struct rtl8169_private *tp = netdev_priv(dev); 1720 struct rtl8169_private *tp = netdev_priv(dev);
1695 unsigned long flags;
1696 1721
1697 if (regs->len > R8169_REGS_SIZE) 1722 if (regs->len > R8169_REGS_SIZE)
1698 regs->len = R8169_REGS_SIZE; 1723 regs->len = R8169_REGS_SIZE;
1699 1724
1700 spin_lock_irqsave(&tp->lock, flags); 1725 rtl_lock_work(tp);
1726 spin_lock_bh(&tp->lock);
1701 memcpy_fromio(p, tp->mmio_addr, regs->len); 1727 memcpy_fromio(p, tp->mmio_addr, regs->len);
1702 spin_unlock_irqrestore(&tp->lock, flags); 1728 spin_unlock_bh(&tp->lock);
1729 rtl_unlock_work(tp);
1703} 1730}
1704 1731
1705static u32 rtl8169_get_msglevel(struct net_device *dev) 1732static u32 rtl8169_get_msglevel(struct net_device *dev)
@@ -3216,18 +3243,14 @@ static void rtl_hw_phy_config(struct net_device *dev)
3216 } 3243 }
3217} 3244}
3218 3245
3219static void rtl8169_phy_timer(unsigned long __opaque) 3246static void rtl_phy_work(struct rtl8169_private *tp)
3220{ 3247{
3221 struct net_device *dev = (struct net_device *)__opaque;
3222 struct rtl8169_private *tp = netdev_priv(dev);
3223 struct timer_list *timer = &tp->timer; 3248 struct timer_list *timer = &tp->timer;
3224 void __iomem *ioaddr = tp->mmio_addr; 3249 void __iomem *ioaddr = tp->mmio_addr;
3225 unsigned long timeout = RTL8169_PHY_TIMEOUT; 3250 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3226 3251
3227 assert(tp->mac_version > RTL_GIGA_MAC_VER_01); 3252 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3228 3253
3229 spin_lock_irq(&tp->lock);
3230
3231 if (tp->phy_reset_pending(tp)) { 3254 if (tp->phy_reset_pending(tp)) {
3232 /* 3255 /*
3233 * A busy loop could burn quite a few cycles on nowadays CPU. 3256 * A busy loop could burn quite a few cycles on nowadays CPU.
@@ -3238,32 +3261,45 @@ static void rtl8169_phy_timer(unsigned long __opaque)
3238 } 3261 }
3239 3262
3240 if (tp->link_ok(ioaddr)) 3263 if (tp->link_ok(ioaddr))
3241 goto out_unlock; 3264 return;
3242 3265
3243 netif_warn(tp, link, dev, "PHY reset until link up\n"); 3266 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3244 3267
3245 tp->phy_reset_enable(tp); 3268 tp->phy_reset_enable(tp);
3246 3269
3247out_mod_timer: 3270out_mod_timer:
3248 mod_timer(timer, jiffies + timeout); 3271 mod_timer(timer, jiffies + timeout);
3249out_unlock: 3272}
3250 spin_unlock_irq(&tp->lock); 3273
3274static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3275{
3276 spin_lock(&tp->lock);
3277 if (!test_and_set_bit(flag, tp->wk.flags))
3278 schedule_work(&tp->wk.work);
3279 spin_unlock(&tp->lock);
3280}
3281
3282static void rtl_schedule_task_bh(struct rtl8169_private *tp, enum rtl_flag flag)
3283{
3284 local_bh_disable();
3285 rtl_schedule_task(tp, flag);
3286 local_bh_enable();
3287}
3288
3289static void rtl8169_phy_timer(unsigned long __opaque)
3290{
3291 struct net_device *dev = (struct net_device *)__opaque;
3292 struct rtl8169_private *tp = netdev_priv(dev);
3293
3294 rtl_schedule_task_bh(tp, RTL_FLAG_TASK_PHY_PENDING);
3251} 3295}
3252 3296
3253#ifdef CONFIG_NET_POLL_CONTROLLER 3297#ifdef CONFIG_NET_POLL_CONTROLLER
3254/*
3255 * Polling 'interrupt' - used by things like netconsole to send skbs
3256 * without having to re-enable interrupts. It's not called while
3257 * the interrupt routine is executing.
3258 */
3259static void rtl8169_netpoll(struct net_device *dev) 3298static void rtl8169_netpoll(struct net_device *dev)
3260{ 3299{
3261 struct rtl8169_private *tp = netdev_priv(dev); 3300 struct rtl8169_private *tp = netdev_priv(dev);
3262 struct pci_dev *pdev = tp->pci_dev;
3263 3301
3264 disable_irq(pdev->irq); 3302 rtl8169_interrupt(tp->pci_dev->irq, dev);
3265 rtl8169_interrupt(pdev->irq, dev);
3266 enable_irq(pdev->irq);
3267} 3303}
3268#endif 3304#endif
3269 3305
@@ -3344,7 +3380,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3344 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); 3380 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3345 high = addr[4] | (addr[5] << 8); 3381 high = addr[4] | (addr[5] << 8);
3346 3382
3347 spin_lock_irq(&tp->lock); 3383 rtl_lock_work(tp);
3348 3384
3349 RTL_W8(Cfg9346, Cfg9346_Unlock); 3385 RTL_W8(Cfg9346, Cfg9346_Unlock);
3350 3386
@@ -3368,7 +3404,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3368 3404
3369 RTL_W8(Cfg9346, Cfg9346_Lock); 3405 RTL_W8(Cfg9346, Cfg9346_Lock);
3370 3406
3371 spin_unlock_irq(&tp->lock); 3407 rtl_unlock_work(tp);
3372} 3408}
3373 3409
3374static int rtl_set_mac_address(struct net_device *dev, void *p) 3410static int rtl_set_mac_address(struct net_device *dev, void *p)
@@ -3422,8 +3458,7 @@ static const struct rtl_cfg_info {
3422 void (*hw_start)(struct net_device *); 3458 void (*hw_start)(struct net_device *);
3423 unsigned int region; 3459 unsigned int region;
3424 unsigned int align; 3460 unsigned int align;
3425 u16 intr_event; 3461 u16 event_slow;
3426 u16 napi_event;
3427 unsigned features; 3462 unsigned features;
3428 u8 default_ver; 3463 u8 default_ver;
3429} rtl_cfg_infos [] = { 3464} rtl_cfg_infos [] = {
@@ -3431,9 +3466,7 @@ static const struct rtl_cfg_info {
3431 .hw_start = rtl_hw_start_8169, 3466 .hw_start = rtl_hw_start_8169,
3432 .region = 1, 3467 .region = 1,
3433 .align = 0, 3468 .align = 0,
3434 .intr_event = SYSErr | LinkChg | RxOverflow | 3469 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
3435 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
3436 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
3437 .features = RTL_FEATURE_GMII, 3470 .features = RTL_FEATURE_GMII,
3438 .default_ver = RTL_GIGA_MAC_VER_01, 3471 .default_ver = RTL_GIGA_MAC_VER_01,
3439 }, 3472 },
@@ -3441,9 +3474,7 @@ static const struct rtl_cfg_info {
3441 .hw_start = rtl_hw_start_8168, 3474 .hw_start = rtl_hw_start_8168,
3442 .region = 2, 3475 .region = 2,
3443 .align = 8, 3476 .align = 8,
3444 .intr_event = SYSErr | LinkChg | RxOverflow | 3477 .event_slow = SYSErr | LinkChg | RxOverflow,
3445 TxErr | TxOK | RxOK | RxErr,
3446 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
3447 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, 3478 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
3448 .default_ver = RTL_GIGA_MAC_VER_11, 3479 .default_ver = RTL_GIGA_MAC_VER_11,
3449 }, 3480 },
@@ -3451,9 +3482,8 @@ static const struct rtl_cfg_info {
3451 .hw_start = rtl_hw_start_8101, 3482 .hw_start = rtl_hw_start_8101,
3452 .region = 2, 3483 .region = 2,
3453 .align = 8, 3484 .align = 8,
3454 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | 3485 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
3455 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 3486 PCSTimeout,
3456 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
3457 .features = RTL_FEATURE_MSI, 3487 .features = RTL_FEATURE_MSI,
3458 .default_ver = RTL_GIGA_MAC_VER_13, 3488 .default_ver = RTL_GIGA_MAC_VER_13,
3459 } 3489 }
@@ -4131,6 +4161,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4131 } 4161 }
4132 4162
4133 spin_lock_init(&tp->lock); 4163 spin_lock_init(&tp->lock);
4164 mutex_init(&tp->wk.mutex);
4134 4165
4135 /* Get MAC address */ 4166 /* Get MAC address */
4136 for (i = 0; i < ETH_ALEN; i++) 4167 for (i = 0; i < ETH_ALEN; i++)
@@ -4158,10 +4189,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4158 /* 8110SCd requires hardware Rx VLAN - disallow toggling */ 4189 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
4159 dev->hw_features &= ~NETIF_F_HW_VLAN_RX; 4190 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
4160 4191
4161 tp->intr_mask = 0xffff;
4162 tp->hw_start = cfg->hw_start; 4192 tp->hw_start = cfg->hw_start;
4163 tp->intr_event = cfg->intr_event; 4193 tp->event_slow = cfg->event_slow;
4164 tp->napi_event = cfg->napi_event;
4165 4194
4166 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? 4195 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
4167 ~(RxBOVF | RxFOVF) : ~0; 4196 ~(RxBOVF | RxFOVF) : ~0;
@@ -4330,16 +4359,24 @@ static int rtl8169_open(struct net_device *dev)
4330 if (retval < 0) 4359 if (retval < 0)
4331 goto err_release_fw_2; 4360 goto err_release_fw_2;
4332 4361
4362 rtl_lock_work(tp);
4363
4364 tp->wk.enabled = true;
4365
4333 napi_enable(&tp->napi); 4366 napi_enable(&tp->napi);
4334 4367
4335 rtl8169_init_phy(dev, tp); 4368 rtl8169_init_phy(dev, tp);
4336 4369
4337 rtl8169_set_features(dev, dev->features); 4370 __rtl8169_set_features(dev, dev->features);
4338 4371
4339 rtl_pll_power_up(tp); 4372 rtl_pll_power_up(tp);
4340 4373
4341 rtl_hw_start(dev); 4374 rtl_hw_start(dev);
4342 4375
4376 netif_start_queue(dev);
4377
4378 rtl_unlock_work(tp);
4379
4343 tp->saved_wolopts = 0; 4380 tp->saved_wolopts = 0;
4344 pm_runtime_put_noidle(&pdev->dev); 4381 pm_runtime_put_noidle(&pdev->dev);
4345 4382
@@ -4413,9 +4450,7 @@ static void rtl_hw_start(struct net_device *dev)
4413 4450
4414 tp->hw_start(dev); 4451 tp->hw_start(dev);
4415 4452
4416 rtl_irq_enable(tp, tp->intr_event); 4453 rtl_irq_enable_all(tp);
4417
4418 netif_start_queue(dev);
4419} 4454}
4420 4455
4421static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, 4456static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
@@ -4921,8 +4956,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
4921 4956
4922 /* Work around for RxFIFO overflow. */ 4957 /* Work around for RxFIFO overflow. */
4923 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 4958 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
4924 tp->intr_event |= RxFIFOOver | PCSTimeout; 4959 tp->event_slow |= RxFIFOOver | PCSTimeout;
4925 tp->intr_event &= ~RxOverflow; 4960 tp->event_slow &= ~RxOverflow;
4926 } 4961 }
4927 4962
4928 rtl_set_rx_tx_desc_registers(tp, ioaddr); 4963 rtl_set_rx_tx_desc_registers(tp, ioaddr);
@@ -5108,10 +5143,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
5108 void __iomem *ioaddr = tp->mmio_addr; 5143 void __iomem *ioaddr = tp->mmio_addr;
5109 struct pci_dev *pdev = tp->pci_dev; 5144 struct pci_dev *pdev = tp->pci_dev;
5110 5145
5111 if (tp->mac_version >= RTL_GIGA_MAC_VER_30) { 5146 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5112 tp->intr_event &= ~RxFIFOOver; 5147 tp->event_slow &= ~RxFIFOOver;
5113 tp->napi_event &= ~RxFIFOOver;
5114 }
5115 5148
5116 if (tp->mac_version == RTL_GIGA_MAC_VER_13 || 5149 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5117 tp->mac_version == RTL_GIGA_MAC_VER_16) { 5150 tp->mac_version == RTL_GIGA_MAC_VER_16) {
@@ -5359,61 +5392,34 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
5359 tp->cur_tx = tp->dirty_tx = 0; 5392 tp->cur_tx = tp->dirty_tx = 0;
5360} 5393}
5361 5394
5362static void rtl8169_schedule_work(struct net_device *dev)
5363{
5364 struct rtl8169_private *tp = netdev_priv(dev);
5365
5366 schedule_work(&tp->wk.work);
5367}
5368
5369static void rtl8169_wait_for_quiescence(struct net_device *dev)
5370{
5371 struct rtl8169_private *tp = netdev_priv(dev);
5372 void __iomem *ioaddr = tp->mmio_addr;
5373
5374 synchronize_irq(dev->irq);
5375
5376 /* Wait for any pending NAPI task to complete */
5377 napi_disable(&tp->napi);
5378
5379 rtl8169_irq_mask_and_ack(tp);
5380
5381 tp->intr_mask = 0xffff;
5382 RTL_W16(IntrMask, tp->intr_event);
5383 napi_enable(&tp->napi);
5384}
5385
5386static void rtl_reset_work(struct rtl8169_private *tp) 5395static void rtl_reset_work(struct rtl8169_private *tp)
5387{ 5396{
5388 struct net_device *dev = tp->dev; 5397 struct net_device *dev = tp->dev;
5389 int i; 5398 int i;
5390 5399
5391 rtnl_lock(); 5400 napi_disable(&tp->napi);
5392 5401 netif_stop_queue(dev);
5393 if (!netif_running(dev)) 5402 synchronize_sched();
5394 goto out_unlock;
5395 5403
5396 rtl8169_hw_reset(tp); 5404 rtl8169_hw_reset(tp);
5397 5405
5398 rtl8169_wait_for_quiescence(dev);
5399
5400 for (i = 0; i < NUM_RX_DESC; i++) 5406 for (i = 0; i < NUM_RX_DESC; i++)
5401 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); 5407 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5402 5408
5403 rtl8169_tx_clear(tp); 5409 rtl8169_tx_clear(tp);
5404 rtl8169_init_ring_indexes(tp); 5410 rtl8169_init_ring_indexes(tp);
5405 5411
5412 napi_enable(&tp->napi);
5406 rtl_hw_start(dev); 5413 rtl_hw_start(dev);
5407 netif_wake_queue(dev); 5414 netif_wake_queue(dev);
5408 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 5415 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5409
5410out_unlock:
5411 rtnl_unlock();
5412} 5416}
5413 5417
5414static void rtl8169_tx_timeout(struct net_device *dev) 5418static void rtl8169_tx_timeout(struct net_device *dev)
5415{ 5419{
5416 rtl8169_schedule_work(dev); 5420 struct rtl8169_private *tp = netdev_priv(dev);
5421
5422 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5417} 5423}
5418 5424
5419static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, 5425static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
@@ -5550,6 +5556,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5550 5556
5551 RTL_W8(TxPoll, NPQ); 5557 RTL_W8(TxPoll, NPQ);
5552 5558
5559 mmiowb();
5560
5553 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { 5561 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
5554 netif_stop_queue(dev); 5562 netif_stop_queue(dev);
5555 smp_mb(); 5563 smp_mb();
@@ -5616,12 +5624,10 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
5616 5624
5617 rtl8169_hw_reset(tp); 5625 rtl8169_hw_reset(tp);
5618 5626
5619 rtl8169_schedule_work(dev); 5627 rtl_schedule_task_bh(tp, RTL_FLAG_TASK_RESET_PENDING);
5620} 5628}
5621 5629
5622static void rtl8169_tx_interrupt(struct net_device *dev, 5630static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5623 struct rtl8169_private *tp,
5624 void __iomem *ioaddr)
5625{ 5631{
5626 unsigned int dirty_tx, tx_left; 5632 unsigned int dirty_tx, tx_left;
5627 5633
@@ -5664,8 +5670,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
5664 * of start_xmit activity is detected (if it is not detected, 5670 * of start_xmit activity is detected (if it is not detected,
5665 * it is slow enough). -- FR 5671 * it is slow enough). -- FR
5666 */ 5672 */
5667 if (tp->cur_tx != dirty_tx) 5673 if (tp->cur_tx != dirty_tx) {
5674 void __iomem *ioaddr = tp->mmio_addr;
5675
5668 RTL_W8(TxPoll, NPQ); 5676 RTL_W8(TxPoll, NPQ);
5677 }
5669 } 5678 }
5670} 5679}
5671 5680
@@ -5704,9 +5713,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
5704 return skb; 5713 return skb;
5705} 5714}
5706 5715
5707static int rtl8169_rx_interrupt(struct net_device *dev, 5716static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5708 struct rtl8169_private *tp,
5709 void __iomem *ioaddr, u32 budget)
5710{ 5717{
5711 unsigned int cur_rx, rx_left; 5718 unsigned int cur_rx, rx_left;
5712 unsigned int count; 5719 unsigned int count;
@@ -5734,7 +5741,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
5734 if (status & RxCRC) 5741 if (status & RxCRC)
5735 dev->stats.rx_crc_errors++; 5742 dev->stats.rx_crc_errors++;
5736 if (status & RxFOVF) { 5743 if (status & RxFOVF) {
5737 rtl8169_schedule_work(dev); 5744 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5738 dev->stats.rx_fifo_errors++; 5745 dev->stats.rx_fifo_errors++;
5739 } 5746 }
5740 rtl8169_mark_to_asic(desc, rx_buf_sz); 5747 rtl8169_mark_to_asic(desc, rx_buf_sz);
@@ -5795,109 +5802,120 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5795{ 5802{
5796 struct net_device *dev = dev_instance; 5803 struct net_device *dev = dev_instance;
5797 struct rtl8169_private *tp = netdev_priv(dev); 5804 struct rtl8169_private *tp = netdev_priv(dev);
5798 void __iomem *ioaddr = tp->mmio_addr;
5799 int handled = 0; 5805 int handled = 0;
5800 u16 status; 5806 u16 status;
5801 5807
5802 /* loop handling interrupts until we have no new ones or
5803 * we hit a invalid/hotplug case.
5804 */
5805 status = rtl_get_events(tp); 5808 status = rtl_get_events(tp);
5806 while (status && status != 0xffff) { 5809 if (status && status != 0xffff) {
5807 status &= tp->intr_event; 5810 status &= RTL_EVENT_NAPI | tp->event_slow;
5808 if (!status) 5811 if (status) {
5809 break; 5812 handled = 1;
5810
5811 handled = 1;
5812 5813
5813 /* Handle all of the error cases first. These will reset 5814 rtl_irq_disable(tp);
5814 * the chip, so just exit the loop. 5815 napi_schedule(&tp->napi);
5815 */
5816 if (unlikely(!netif_running(dev))) {
5817 rtl8169_hw_reset(tp);
5818 break;
5819 } 5816 }
5817 }
5818 return IRQ_RETVAL(handled);
5819}
5820 5820
5821 if (unlikely(status & RxFIFOOver)) { 5821/*
5822 switch (tp->mac_version) { 5822 * Workqueue context.
5823 /* Work around for rx fifo overflow */ 5823 */
5824 case RTL_GIGA_MAC_VER_11: 5824static void rtl_slow_event_work(struct rtl8169_private *tp)
5825 netif_stop_queue(dev); 5825{
5826 rtl8169_tx_timeout(dev); 5826 struct net_device *dev = tp->dev;
5827 goto done; 5827 u16 status;
5828 default: 5828
5829 break; 5829 status = rtl_get_events(tp) & tp->event_slow;
5830 } 5830 rtl_ack_events(tp, status);
5831 }
5832 5831
5833 if (unlikely(status & SYSErr)) { 5832 if (unlikely(status & RxFIFOOver)) {
5834 rtl8169_pcierr_interrupt(dev); 5833 switch (tp->mac_version) {
5834 /* Work around for rx fifo overflow */
5835 case RTL_GIGA_MAC_VER_11:
5836 netif_stop_queue(dev);
5837 rtl_schedule_task_bh(tp, RTL_FLAG_TASK_RESET_PENDING);
5838 default:
5835 break; 5839 break;
5836 } 5840 }
5841 }
5837 5842
5838 if (status & LinkChg) 5843 if (unlikely(status & SYSErr))
5839 __rtl8169_check_link_status(dev, tp, ioaddr, true); 5844 rtl8169_pcierr_interrupt(dev);
5840 5845
5841 /* We need to see the lastest version of tp->intr_mask to 5846 if (status & LinkChg)
5842 * avoid ignoring an MSI interrupt and having to wait for 5847 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
5843 * another event which may never come.
5844 */
5845 smp_rmb();
5846 if (status & tp->intr_mask & tp->napi_event) {
5847 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
5848 tp->intr_mask = ~tp->napi_event;
5849
5850 if (likely(napi_schedule_prep(&tp->napi)))
5851 __napi_schedule(&tp->napi);
5852 else
5853 netif_info(tp, intr, dev,
5854 "interrupt %04x in poll\n", status);
5855 }
5856 5848
5857 /* We only get a new MSI interrupt when all active irq 5849 napi_disable(&tp->napi);
5858 * sources on the chip have been acknowledged. So, ack 5850 rtl_irq_disable(tp);
5859 * everything we've seen and check if new sources have become 5851
5860 * active to avoid blocking all interrupts from the chip. 5852 napi_enable(&tp->napi);
5861 */ 5853 napi_schedule(&tp->napi);
5862 RTL_W16(IntrStatus,
5863 (status & RxFIFOOver) ? (status | RxOverflow) : status);
5864 status = rtl_get_events(tp);
5865 }
5866done:
5867 return IRQ_RETVAL(handled);
5868} 5854}
5869 5855
5870static void rtl_task(struct work_struct *work) 5856static void rtl_task(struct work_struct *work)
5871{ 5857{
5858 static const struct {
5859 int bitnr;
5860 void (*action)(struct rtl8169_private *);
5861 } rtl_work[] = {
5862 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
5863 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
5864 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
5865 };
5872 struct rtl8169_private *tp = 5866 struct rtl8169_private *tp =
5873 container_of(work, struct rtl8169_private, wk.work); 5867 container_of(work, struct rtl8169_private, wk.work);
5868 struct net_device *dev = tp->dev;
5869 int i;
5870
5871 rtl_lock_work(tp);
5872
5873 if (!netif_running(dev) || !tp->wk.enabled)
5874 goto out_unlock;
5875
5876 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
5877 bool pending;
5878
5879 spin_lock_bh(&tp->lock);
5880 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
5881 spin_unlock_bh(&tp->lock);
5882
5883 if (pending)
5884 rtl_work[i].action(tp);
5885 }
5874 5886
5875 rtl_reset_work(tp); 5887out_unlock:
5888 rtl_unlock_work(tp);
5876} 5889}
5877 5890
5878static int rtl8169_poll(struct napi_struct *napi, int budget) 5891static int rtl8169_poll(struct napi_struct *napi, int budget)
5879{ 5892{
5880 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); 5893 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
5881 struct net_device *dev = tp->dev; 5894 struct net_device *dev = tp->dev;
5882 void __iomem *ioaddr = tp->mmio_addr; 5895 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
5883 int work_done; 5896 int work_done= 0;
5897 u16 status;
5898
5899 status = rtl_get_events(tp);
5900 rtl_ack_events(tp, status & ~tp->event_slow);
5901
5902 if (status & RTL_EVENT_NAPI_RX)
5903 work_done = rtl_rx(dev, tp, (u32) budget);
5904
5905 if (status & RTL_EVENT_NAPI_TX)
5906 rtl_tx(dev, tp);
5884 5907
5885 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget); 5908 if (status & tp->event_slow) {
5886 rtl8169_tx_interrupt(dev, tp, ioaddr); 5909 enable_mask &= ~tp->event_slow;
5910
5911 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
5912 }
5887 5913
5888 if (work_done < budget) { 5914 if (work_done < budget) {
5889 napi_complete(napi); 5915 napi_complete(napi);
5890 5916
5891 /* We need for force the visibility of tp->intr_mask 5917 rtl_irq_enable(tp, enable_mask);
5892 * for other CPUs, as we can loose an MSI interrupt 5918 mmiowb();
5893 * and potentially wait for a retransmit timeout if we don't.
5894 * The posted write to IntrMask is safe, as it will
5895 * eventually make it to the chip and we won't loose anything
5896 * until it does.
5897 */
5898 tp->intr_mask = 0xffff;
5899 wmb();
5900 RTL_W16(IntrMask, tp->intr_event);
5901 } 5919 }
5902 5920
5903 return work_done; 5921 return work_done;
@@ -5921,11 +5939,8 @@ static void rtl8169_down(struct net_device *dev)
5921 5939
5922 del_timer_sync(&tp->timer); 5940 del_timer_sync(&tp->timer);
5923 5941
5924 netif_stop_queue(dev);
5925
5926 napi_disable(&tp->napi); 5942 napi_disable(&tp->napi);
5927 5943 netif_stop_queue(dev);
5928 spin_lock_irq(&tp->lock);
5929 5944
5930 rtl8169_hw_reset(tp); 5945 rtl8169_hw_reset(tp);
5931 /* 5946 /*
@@ -5935,12 +5950,8 @@ static void rtl8169_down(struct net_device *dev)
5935 */ 5950 */
5936 rtl8169_rx_missed(dev, ioaddr); 5951 rtl8169_rx_missed(dev, ioaddr);
5937 5952
5938 spin_unlock_irq(&tp->lock);
5939
5940 synchronize_irq(dev->irq);
5941
5942 /* Give a racing hard_start_xmit a few cycles to complete. */ 5953 /* Give a racing hard_start_xmit a few cycles to complete. */
5943 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ 5954 synchronize_sched();
5944 5955
5945 rtl8169_tx_clear(tp); 5956 rtl8169_tx_clear(tp);
5946 5957
@@ -5959,7 +5970,11 @@ static int rtl8169_close(struct net_device *dev)
5959 /* Update counters before going down */ 5970 /* Update counters before going down */
5960 rtl8169_update_counters(dev); 5971 rtl8169_update_counters(dev);
5961 5972
5973 rtl_lock_work(tp);
5974 tp->wk.enabled = false;
5975
5962 rtl8169_down(dev); 5976 rtl8169_down(dev);
5977 rtl_unlock_work(tp);
5963 5978
5964 free_irq(dev->irq, dev); 5979 free_irq(dev->irq, dev);
5965 5980
@@ -5979,7 +5994,6 @@ static void rtl_set_rx_mode(struct net_device *dev)
5979{ 5994{
5980 struct rtl8169_private *tp = netdev_priv(dev); 5995 struct rtl8169_private *tp = netdev_priv(dev);
5981 void __iomem *ioaddr = tp->mmio_addr; 5996 void __iomem *ioaddr = tp->mmio_addr;
5982 unsigned long flags;
5983 u32 mc_filter[2]; /* Multicast hash filter */ 5997 u32 mc_filter[2]; /* Multicast hash filter */
5984 int rx_mode; 5998 int rx_mode;
5985 u32 tmp = 0; 5999 u32 tmp = 0;
@@ -6008,7 +6022,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
6008 } 6022 }
6009 } 6023 }
6010 6024
6011 spin_lock_irqsave(&tp->lock, flags); 6025 spin_lock_bh(&tp->lock);
6012 6026
6013 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; 6027 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
6014 6028
@@ -6024,7 +6038,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
6024 6038
6025 RTL_W32(RxConfig, tmp); 6039 RTL_W32(RxConfig, tmp);
6026 6040
6027 spin_unlock_irqrestore(&tp->lock, flags); 6041 spin_unlock_bh(&tp->lock);
6028} 6042}
6029 6043
6030/** 6044/**
@@ -6037,13 +6051,9 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
6037{ 6051{
6038 struct rtl8169_private *tp = netdev_priv(dev); 6052 struct rtl8169_private *tp = netdev_priv(dev);
6039 void __iomem *ioaddr = tp->mmio_addr; 6053 void __iomem *ioaddr = tp->mmio_addr;
6040 unsigned long flags;
6041 6054
6042 if (netif_running(dev)) { 6055 if (netif_running(dev))
6043 spin_lock_irqsave(&tp->lock, flags);
6044 rtl8169_rx_missed(dev, ioaddr); 6056 rtl8169_rx_missed(dev, ioaddr);
6045 spin_unlock_irqrestore(&tp->lock, flags);
6046 }
6047 6057
6048 return &dev->stats; 6058 return &dev->stats;
6049} 6059}
@@ -6055,10 +6065,15 @@ static void rtl8169_net_suspend(struct net_device *dev)
6055 if (!netif_running(dev)) 6065 if (!netif_running(dev))
6056 return; 6066 return;
6057 6067
6058 rtl_pll_power_down(tp);
6059
6060 netif_device_detach(dev); 6068 netif_device_detach(dev);
6061 netif_stop_queue(dev); 6069 netif_stop_queue(dev);
6070
6071 rtl_lock_work(tp);
6072 napi_disable(&tp->napi);
6073 tp->wk.enabled = false;
6074 rtl_unlock_work(tp);
6075
6076 rtl_pll_power_down(tp);
6062} 6077}
6063 6078
6064#ifdef CONFIG_PM 6079#ifdef CONFIG_PM
@@ -6081,7 +6096,9 @@ static void __rtl8169_resume(struct net_device *dev)
6081 6096
6082 rtl_pll_power_up(tp); 6097 rtl_pll_power_up(tp);
6083 6098
6084 rtl8169_schedule_work(dev); 6099 tp->wk.enabled = true;
6100
6101 rtl_schedule_task_bh(tp, RTL_FLAG_TASK_RESET_PENDING);
6085} 6102}
6086 6103
6087static int rtl8169_resume(struct device *device) 6104static int rtl8169_resume(struct device *device)
@@ -6107,10 +6124,10 @@ static int rtl8169_runtime_suspend(struct device *device)
6107 if (!tp->TxDescArray) 6124 if (!tp->TxDescArray)
6108 return 0; 6125 return 0;
6109 6126
6110 spin_lock_irq(&tp->lock); 6127 rtl_lock_work(tp);
6111 tp->saved_wolopts = __rtl8169_get_wol(tp); 6128 tp->saved_wolopts = __rtl8169_get_wol(tp);
6112 __rtl8169_set_wol(tp, WAKE_ANY); 6129 __rtl8169_set_wol(tp, WAKE_ANY);
6113 spin_unlock_irq(&tp->lock); 6130 rtl_unlock_work(tp);
6114 6131
6115 rtl8169_net_suspend(dev); 6132 rtl8169_net_suspend(dev);
6116 6133
@@ -6126,10 +6143,10 @@ static int rtl8169_runtime_resume(struct device *device)
6126 if (!tp->TxDescArray) 6143 if (!tp->TxDescArray)
6127 return 0; 6144 return 0;
6128 6145
6129 spin_lock_irq(&tp->lock); 6146 rtl_lock_work(tp);
6130 __rtl8169_set_wol(tp, tp->saved_wolopts); 6147 __rtl8169_set_wol(tp, tp->saved_wolopts);
6131 tp->saved_wolopts = 0; 6148 tp->saved_wolopts = 0;
6132 spin_unlock_irq(&tp->lock); 6149 rtl_unlock_work(tp);
6133 6150
6134 rtl8169_init_phy(dev, tp); 6151 rtl8169_init_phy(dev, tp);
6135 6152
@@ -6197,12 +6214,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
6197 /* Restore original MAC address */ 6214 /* Restore original MAC address */
6198 rtl_rar_set(tp, dev->perm_addr); 6215 rtl_rar_set(tp, dev->perm_addr);
6199 6216
6200 spin_lock_irq(&tp->lock);
6201
6202 rtl8169_hw_reset(tp); 6217 rtl8169_hw_reset(tp);
6203 6218
6204 spin_unlock_irq(&tp->lock);
6205
6206 if (system_state == SYSTEM_POWER_OFF) { 6219 if (system_state == SYSTEM_POWER_OFF) {
6207 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 6220 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6208 rtl_wol_suspend_quirk(tp); 6221 rtl_wol_suspend_quirk(tp);