aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-01-30 12:39:17 -0500
committerDavid S. Miller <davem@davemloft.net>2012-01-30 12:39:17 -0500
commite94d5b7aec6a93d8ed3b9fc8e1c75285351eb17e (patch)
treeee998a669dae13b5c9aeba44a158bcd2e4c0f3ba /drivers/net
parent30088a25e9c4dc1f8bd5c48b14a18633441b5481 (diff)
parentda78dbff2e05630921c551dbbc70a4b7981a8fff (diff)
Merge branch 'davem-next.r8169' of git://violet.fr.zoreil.com/romieu/linux
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/realtek/r8169.c558
1 files changed, 288 insertions, 270 deletions
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7a0c800b50ad..d039d39963ad 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -667,6 +667,13 @@ struct rtl8169_counters {
667 __le16 tx_underun; 667 __le16 tx_underun;
668}; 668};
669 669
670enum rtl_flag {
671 RTL_FLAG_TASK_SLOW_PENDING,
672 RTL_FLAG_TASK_RESET_PENDING,
673 RTL_FLAG_TASK_PHY_PENDING,
674 RTL_FLAG_MAX
675};
676
670struct rtl8169_private { 677struct rtl8169_private {
671 void __iomem *mmio_addr; /* memory map physical address */ 678 void __iomem *mmio_addr; /* memory map physical address */
672 struct pci_dev *pci_dev; 679 struct pci_dev *pci_dev;
@@ -688,9 +695,8 @@ struct rtl8169_private {
688 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ 695 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
689 struct timer_list timer; 696 struct timer_list timer;
690 u16 cp_cmd; 697 u16 cp_cmd;
691 u16 intr_event; 698
692 u16 napi_event; 699 u16 event_slow;
693 u16 intr_mask;
694 700
695 struct mdio_ops { 701 struct mdio_ops {
696 void (*write)(void __iomem *, int, int); 702 void (*write)(void __iomem *, int, int);
@@ -714,7 +720,14 @@ struct rtl8169_private {
714 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); 720 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
715 unsigned int (*link_ok)(void __iomem *); 721 unsigned int (*link_ok)(void __iomem *);
716 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); 722 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
717 struct delayed_work task; 723
724 struct {
725 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
726 struct mutex mutex;
727 struct work_struct work;
728 bool enabled;
729 } wk;
730
718 unsigned features; 731 unsigned features;
719 732
720 struct mii_if_info mii; 733 struct mii_if_info mii;
@@ -764,13 +777,20 @@ static int rtl8169_close(struct net_device *dev);
764static void rtl_set_rx_mode(struct net_device *dev); 777static void rtl_set_rx_mode(struct net_device *dev);
765static void rtl8169_tx_timeout(struct net_device *dev); 778static void rtl8169_tx_timeout(struct net_device *dev);
766static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); 779static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
767static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
768 void __iomem *, u32 budget);
769static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); 780static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
770static void rtl8169_down(struct net_device *dev);
771static void rtl8169_rx_clear(struct rtl8169_private *tp); 781static void rtl8169_rx_clear(struct rtl8169_private *tp);
772static int rtl8169_poll(struct napi_struct *napi, int budget); 782static int rtl8169_poll(struct napi_struct *napi, int budget);
773 783
784static void rtl_lock_work(struct rtl8169_private *tp)
785{
786 mutex_lock(&tp->wk.mutex);
787}
788
789static void rtl_unlock_work(struct rtl8169_private *tp)
790{
791 mutex_unlock(&tp->wk.mutex);
792}
793
774static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) 794static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
775{ 795{
776 int cap = pci_pcie_cap(pdev); 796 int cap = pci_pcie_cap(pdev);
@@ -1180,12 +1200,51 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1180 return value; 1200 return value;
1181} 1201}
1182 1202
1203static u16 rtl_get_events(struct rtl8169_private *tp)
1204{
1205 void __iomem *ioaddr = tp->mmio_addr;
1206
1207 return RTL_R16(IntrStatus);
1208}
1209
1210static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1211{
1212 void __iomem *ioaddr = tp->mmio_addr;
1213
1214 RTL_W16(IntrStatus, bits);
1215 mmiowb();
1216}
1217
1218static void rtl_irq_disable(struct rtl8169_private *tp)
1219{
1220 void __iomem *ioaddr = tp->mmio_addr;
1221
1222 RTL_W16(IntrMask, 0);
1223 mmiowb();
1224}
1225
1226static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1227{
1228 void __iomem *ioaddr = tp->mmio_addr;
1229
1230 RTL_W16(IntrMask, bits);
1231}
1232
1233#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1234#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1235#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1236
1237static void rtl_irq_enable_all(struct rtl8169_private *tp)
1238{
1239 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1240}
1241
1183static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) 1242static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1184{ 1243{
1185 void __iomem *ioaddr = tp->mmio_addr; 1244 void __iomem *ioaddr = tp->mmio_addr;
1186 1245
1187 RTL_W16(IntrMask, 0x0000); 1246 rtl_irq_disable(tp);
1188 RTL_W16(IntrStatus, tp->intr_event); 1247 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1189 RTL_R8(ChipCmd); 1248 RTL_R8(ChipCmd);
1190} 1249}
1191 1250
@@ -1276,9 +1335,6 @@ static void __rtl8169_check_link_status(struct net_device *dev,
1276 struct rtl8169_private *tp, 1335 struct rtl8169_private *tp,
1277 void __iomem *ioaddr, bool pm) 1336 void __iomem *ioaddr, bool pm)
1278{ 1337{
1279 unsigned long flags;
1280
1281 spin_lock_irqsave(&tp->lock, flags);
1282 if (tp->link_ok(ioaddr)) { 1338 if (tp->link_ok(ioaddr)) {
1283 rtl_link_chg_patch(tp); 1339 rtl_link_chg_patch(tp);
1284 /* This is to cancel a scheduled suspend if there's one. */ 1340 /* This is to cancel a scheduled suspend if there's one. */
@@ -1293,7 +1349,6 @@ static void __rtl8169_check_link_status(struct net_device *dev,
1293 if (pm) 1349 if (pm)
1294 pm_schedule_suspend(&tp->pci_dev->dev, 5000); 1350 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1295 } 1351 }
1296 spin_unlock_irqrestore(&tp->lock, flags);
1297} 1352}
1298 1353
1299static void rtl8169_check_link_status(struct net_device *dev, 1354static void rtl8169_check_link_status(struct net_device *dev,
@@ -1336,12 +1391,12 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1336{ 1391{
1337 struct rtl8169_private *tp = netdev_priv(dev); 1392 struct rtl8169_private *tp = netdev_priv(dev);
1338 1393
1339 spin_lock_irq(&tp->lock); 1394 rtl_lock_work(tp);
1340 1395
1341 wol->supported = WAKE_ANY; 1396 wol->supported = WAKE_ANY;
1342 wol->wolopts = __rtl8169_get_wol(tp); 1397 wol->wolopts = __rtl8169_get_wol(tp);
1343 1398
1344 spin_unlock_irq(&tp->lock); 1399 rtl_unlock_work(tp);
1345} 1400}
1346 1401
1347static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) 1402static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1378,14 +1433,15 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1378{ 1433{
1379 struct rtl8169_private *tp = netdev_priv(dev); 1434 struct rtl8169_private *tp = netdev_priv(dev);
1380 1435
1381 spin_lock_irq(&tp->lock); 1436 rtl_lock_work(tp);
1382 1437
1383 if (wol->wolopts) 1438 if (wol->wolopts)
1384 tp->features |= RTL_FEATURE_WOL; 1439 tp->features |= RTL_FEATURE_WOL;
1385 else 1440 else
1386 tp->features &= ~RTL_FEATURE_WOL; 1441 tp->features &= ~RTL_FEATURE_WOL;
1387 __rtl8169_set_wol(tp, wol->wolopts); 1442 __rtl8169_set_wol(tp, wol->wolopts);
1388 spin_unlock_irq(&tp->lock); 1443
1444 rtl_unlock_work(tp);
1389 1445
1390 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); 1446 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1391 1447
@@ -1540,15 +1596,14 @@ out:
1540static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1596static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1541{ 1597{
1542 struct rtl8169_private *tp = netdev_priv(dev); 1598 struct rtl8169_private *tp = netdev_priv(dev);
1543 unsigned long flags;
1544 int ret; 1599 int ret;
1545 1600
1546 del_timer_sync(&tp->timer); 1601 del_timer_sync(&tp->timer);
1547 1602
1548 spin_lock_irqsave(&tp->lock, flags); 1603 rtl_lock_work(tp);
1549 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), 1604 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1550 cmd->duplex, cmd->advertising); 1605 cmd->duplex, cmd->advertising);
1551 spin_unlock_irqrestore(&tp->lock, flags); 1606 rtl_unlock_work(tp);
1552 1607
1553 return ret; 1608 return ret;
1554} 1609}
@@ -1568,14 +1623,12 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1568 return features; 1623 return features;
1569} 1624}
1570 1625
1571static int rtl8169_set_features(struct net_device *dev, 1626static void __rtl8169_set_features(struct net_device *dev,
1572 netdev_features_t features) 1627 netdev_features_t features)
1573{ 1628{
1574 struct rtl8169_private *tp = netdev_priv(dev); 1629 struct rtl8169_private *tp = netdev_priv(dev);
1575 void __iomem *ioaddr = tp->mmio_addr;
1576 unsigned long flags;
1577 1630
1578 spin_lock_irqsave(&tp->lock, flags); 1631 void __iomem *ioaddr = tp->mmio_addr;
1579 1632
1580 if (features & NETIF_F_RXCSUM) 1633 if (features & NETIF_F_RXCSUM)
1581 tp->cp_cmd |= RxChkSum; 1634 tp->cp_cmd |= RxChkSum;
@@ -1589,12 +1642,21 @@ static int rtl8169_set_features(struct net_device *dev,
1589 1642
1590 RTL_W16(CPlusCmd, tp->cp_cmd); 1643 RTL_W16(CPlusCmd, tp->cp_cmd);
1591 RTL_R16(CPlusCmd); 1644 RTL_R16(CPlusCmd);
1645}
1646
1647static int rtl8169_set_features(struct net_device *dev,
1648 netdev_features_t features)
1649{
1650 struct rtl8169_private *tp = netdev_priv(dev);
1592 1651
1593 spin_unlock_irqrestore(&tp->lock, flags); 1652 rtl_lock_work(tp);
1653 __rtl8169_set_features(dev, features);
1654 rtl_unlock_work(tp);
1594 1655
1595 return 0; 1656 return 0;
1596} 1657}
1597 1658
1659
1598static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, 1660static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1599 struct sk_buff *skb) 1661 struct sk_buff *skb)
1600{ 1662{
@@ -1643,14 +1705,12 @@ static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1643static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1705static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1644{ 1706{
1645 struct rtl8169_private *tp = netdev_priv(dev); 1707 struct rtl8169_private *tp = netdev_priv(dev);
1646 unsigned long flags;
1647 int rc; 1708 int rc;
1648 1709
1649 spin_lock_irqsave(&tp->lock, flags); 1710 rtl_lock_work(tp);
1650
1651 rc = tp->get_settings(dev, cmd); 1711 rc = tp->get_settings(dev, cmd);
1712 rtl_unlock_work(tp);
1652 1713
1653 spin_unlock_irqrestore(&tp->lock, flags);
1654 return rc; 1714 return rc;
1655} 1715}
1656 1716
@@ -1658,14 +1718,15 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1658 void *p) 1718 void *p)
1659{ 1719{
1660 struct rtl8169_private *tp = netdev_priv(dev); 1720 struct rtl8169_private *tp = netdev_priv(dev);
1661 unsigned long flags;
1662 1721
1663 if (regs->len > R8169_REGS_SIZE) 1722 if (regs->len > R8169_REGS_SIZE)
1664 regs->len = R8169_REGS_SIZE; 1723 regs->len = R8169_REGS_SIZE;
1665 1724
1666 spin_lock_irqsave(&tp->lock, flags); 1725 rtl_lock_work(tp);
1726 spin_lock_bh(&tp->lock);
1667 memcpy_fromio(p, tp->mmio_addr, regs->len); 1727 memcpy_fromio(p, tp->mmio_addr, regs->len);
1668 spin_unlock_irqrestore(&tp->lock, flags); 1728 spin_unlock_bh(&tp->lock);
1729 rtl_unlock_work(tp);
1669} 1730}
1670 1731
1671static u32 rtl8169_get_msglevel(struct net_device *dev) 1732static u32 rtl8169_get_msglevel(struct net_device *dev)
@@ -3182,18 +3243,14 @@ static void rtl_hw_phy_config(struct net_device *dev)
3182 } 3243 }
3183} 3244}
3184 3245
3185static void rtl8169_phy_timer(unsigned long __opaque) 3246static void rtl_phy_work(struct rtl8169_private *tp)
3186{ 3247{
3187 struct net_device *dev = (struct net_device *)__opaque;
3188 struct rtl8169_private *tp = netdev_priv(dev);
3189 struct timer_list *timer = &tp->timer; 3248 struct timer_list *timer = &tp->timer;
3190 void __iomem *ioaddr = tp->mmio_addr; 3249 void __iomem *ioaddr = tp->mmio_addr;
3191 unsigned long timeout = RTL8169_PHY_TIMEOUT; 3250 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3192 3251
3193 assert(tp->mac_version > RTL_GIGA_MAC_VER_01); 3252 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3194 3253
3195 spin_lock_irq(&tp->lock);
3196
3197 if (tp->phy_reset_pending(tp)) { 3254 if (tp->phy_reset_pending(tp)) {
3198 /* 3255 /*
3199 * A busy loop could burn quite a few cycles on nowadays CPU. 3256 * A busy loop could burn quite a few cycles on nowadays CPU.
@@ -3204,32 +3261,45 @@ static void rtl8169_phy_timer(unsigned long __opaque)
3204 } 3261 }
3205 3262
3206 if (tp->link_ok(ioaddr)) 3263 if (tp->link_ok(ioaddr))
3207 goto out_unlock; 3264 return;
3208 3265
3209 netif_warn(tp, link, dev, "PHY reset until link up\n"); 3266 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3210 3267
3211 tp->phy_reset_enable(tp); 3268 tp->phy_reset_enable(tp);
3212 3269
3213out_mod_timer: 3270out_mod_timer:
3214 mod_timer(timer, jiffies + timeout); 3271 mod_timer(timer, jiffies + timeout);
3215out_unlock: 3272}
3216 spin_unlock_irq(&tp->lock); 3273
3274static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3275{
3276 spin_lock(&tp->lock);
3277 if (!test_and_set_bit(flag, tp->wk.flags))
3278 schedule_work(&tp->wk.work);
3279 spin_unlock(&tp->lock);
3280}
3281
3282static void rtl_schedule_task_bh(struct rtl8169_private *tp, enum rtl_flag flag)
3283{
3284 local_bh_disable();
3285 rtl_schedule_task(tp, flag);
3286 local_bh_enable();
3287}
3288
3289static void rtl8169_phy_timer(unsigned long __opaque)
3290{
3291 struct net_device *dev = (struct net_device *)__opaque;
3292 struct rtl8169_private *tp = netdev_priv(dev);
3293
3294 rtl_schedule_task_bh(tp, RTL_FLAG_TASK_PHY_PENDING);
3217} 3295}
3218 3296
3219#ifdef CONFIG_NET_POLL_CONTROLLER 3297#ifdef CONFIG_NET_POLL_CONTROLLER
3220/*
3221 * Polling 'interrupt' - used by things like netconsole to send skbs
3222 * without having to re-enable interrupts. It's not called while
3223 * the interrupt routine is executing.
3224 */
3225static void rtl8169_netpoll(struct net_device *dev) 3298static void rtl8169_netpoll(struct net_device *dev)
3226{ 3299{
3227 struct rtl8169_private *tp = netdev_priv(dev); 3300 struct rtl8169_private *tp = netdev_priv(dev);
3228 struct pci_dev *pdev = tp->pci_dev;
3229 3301
3230 disable_irq(pdev->irq); 3302 rtl8169_interrupt(tp->pci_dev->irq, dev);
3231 rtl8169_interrupt(pdev->irq, dev);
3232 enable_irq(pdev->irq);
3233} 3303}
3234#endif 3304#endif
3235 3305
@@ -3310,7 +3380,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3310 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); 3380 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3311 high = addr[4] | (addr[5] << 8); 3381 high = addr[4] | (addr[5] << 8);
3312 3382
3313 spin_lock_irq(&tp->lock); 3383 rtl_lock_work(tp);
3314 3384
3315 RTL_W8(Cfg9346, Cfg9346_Unlock); 3385 RTL_W8(Cfg9346, Cfg9346_Unlock);
3316 3386
@@ -3334,7 +3404,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3334 3404
3335 RTL_W8(Cfg9346, Cfg9346_Lock); 3405 RTL_W8(Cfg9346, Cfg9346_Lock);
3336 3406
3337 spin_unlock_irq(&tp->lock); 3407 rtl_unlock_work(tp);
3338} 3408}
3339 3409
3340static int rtl_set_mac_address(struct net_device *dev, void *p) 3410static int rtl_set_mac_address(struct net_device *dev, void *p)
@@ -3388,8 +3458,7 @@ static const struct rtl_cfg_info {
3388 void (*hw_start)(struct net_device *); 3458 void (*hw_start)(struct net_device *);
3389 unsigned int region; 3459 unsigned int region;
3390 unsigned int align; 3460 unsigned int align;
3391 u16 intr_event; 3461 u16 event_slow;
3392 u16 napi_event;
3393 unsigned features; 3462 unsigned features;
3394 u8 default_ver; 3463 u8 default_ver;
3395} rtl_cfg_infos [] = { 3464} rtl_cfg_infos [] = {
@@ -3397,9 +3466,7 @@ static const struct rtl_cfg_info {
3397 .hw_start = rtl_hw_start_8169, 3466 .hw_start = rtl_hw_start_8169,
3398 .region = 1, 3467 .region = 1,
3399 .align = 0, 3468 .align = 0,
3400 .intr_event = SYSErr | LinkChg | RxOverflow | 3469 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
3401 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
3402 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
3403 .features = RTL_FEATURE_GMII, 3470 .features = RTL_FEATURE_GMII,
3404 .default_ver = RTL_GIGA_MAC_VER_01, 3471 .default_ver = RTL_GIGA_MAC_VER_01,
3405 }, 3472 },
@@ -3407,9 +3474,7 @@ static const struct rtl_cfg_info {
3407 .hw_start = rtl_hw_start_8168, 3474 .hw_start = rtl_hw_start_8168,
3408 .region = 2, 3475 .region = 2,
3409 .align = 8, 3476 .align = 8,
3410 .intr_event = SYSErr | LinkChg | RxOverflow | 3477 .event_slow = SYSErr | LinkChg | RxOverflow,
3411 TxErr | TxOK | RxOK | RxErr,
3412 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
3413 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, 3478 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
3414 .default_ver = RTL_GIGA_MAC_VER_11, 3479 .default_ver = RTL_GIGA_MAC_VER_11,
3415 }, 3480 },
@@ -3417,9 +3482,8 @@ static const struct rtl_cfg_info {
3417 .hw_start = rtl_hw_start_8101, 3482 .hw_start = rtl_hw_start_8101,
3418 .region = 2, 3483 .region = 2,
3419 .align = 8, 3484 .align = 8,
3420 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | 3485 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
3421 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 3486 PCSTimeout,
3422 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
3423 .features = RTL_FEATURE_MSI, 3487 .features = RTL_FEATURE_MSI,
3424 .default_ver = RTL_GIGA_MAC_VER_13, 3488 .default_ver = RTL_GIGA_MAC_VER_13,
3425 } 3489 }
@@ -3824,23 +3888,21 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
3824static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) 3888static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
3825{ 3889{
3826 void __iomem *ioaddr = tp->mmio_addr; 3890 void __iomem *ioaddr = tp->mmio_addr;
3827 struct pci_dev *pdev = tp->pci_dev;
3828 3891
3829 RTL_W8(MaxTxPacketSize, 0x3f); 3892 RTL_W8(MaxTxPacketSize, 0x3f);
3830 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); 3893 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
3831 RTL_W8(Config4, RTL_R8(Config4) | 0x01); 3894 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
3832 pci_write_config_byte(pdev, 0x79, 0x20); 3895 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
3833} 3896}
3834 3897
3835static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) 3898static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
3836{ 3899{
3837 void __iomem *ioaddr = tp->mmio_addr; 3900 void __iomem *ioaddr = tp->mmio_addr;
3838 struct pci_dev *pdev = tp->pci_dev;
3839 3901
3840 RTL_W8(MaxTxPacketSize, 0x0c); 3902 RTL_W8(MaxTxPacketSize, 0x0c);
3841 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); 3903 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
3842 RTL_W8(Config4, RTL_R8(Config4) & ~0x01); 3904 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
3843 pci_write_config_byte(pdev, 0x79, 0x50); 3905 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
3844} 3906}
3845 3907
3846static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) 3908static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -4048,11 +4110,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4048 4110
4049 rtl_init_rxcfg(tp); 4111 rtl_init_rxcfg(tp);
4050 4112
4051 RTL_W16(IntrMask, 0x0000); 4113 rtl_irq_disable(tp);
4052 4114
4053 rtl_hw_reset(tp); 4115 rtl_hw_reset(tp);
4054 4116
4055 RTL_W16(IntrStatus, 0xffff); 4117 rtl_ack_events(tp, 0xffff);
4056 4118
4057 pci_set_master(pdev); 4119 pci_set_master(pdev);
4058 4120
@@ -4099,6 +4161,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4099 } 4161 }
4100 4162
4101 spin_lock_init(&tp->lock); 4163 spin_lock_init(&tp->lock);
4164 mutex_init(&tp->wk.mutex);
4102 4165
4103 /* Get MAC address */ 4166 /* Get MAC address */
4104 for (i = 0; i < ETH_ALEN; i++) 4167 for (i = 0; i < ETH_ALEN; i++)
@@ -4126,10 +4189,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4126 /* 8110SCd requires hardware Rx VLAN - disallow toggling */ 4189 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
4127 dev->hw_features &= ~NETIF_F_HW_VLAN_RX; 4190 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
4128 4191
4129 tp->intr_mask = 0xffff;
4130 tp->hw_start = cfg->hw_start; 4192 tp->hw_start = cfg->hw_start;
4131 tp->intr_event = cfg->intr_event; 4193 tp->event_slow = cfg->event_slow;
4132 tp->napi_event = cfg->napi_event;
4133 4194
4134 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? 4195 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
4135 ~(RxBOVF | RxFOVF) : ~0; 4196 ~(RxBOVF | RxFOVF) : ~0;
@@ -4196,7 +4257,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
4196 rtl8168_driver_stop(tp); 4257 rtl8168_driver_stop(tp);
4197 } 4258 }
4198 4259
4199 cancel_delayed_work_sync(&tp->task); 4260 cancel_work_sync(&tp->wk.work);
4200 4261
4201 unregister_netdev(dev); 4262 unregister_netdev(dev);
4202 4263
@@ -4257,6 +4318,8 @@ static void rtl_request_firmware(struct rtl8169_private *tp)
4257 rtl_request_uncached_firmware(tp); 4318 rtl_request_uncached_firmware(tp);
4258} 4319}
4259 4320
4321static void rtl_task(struct work_struct *);
4322
4260static int rtl8169_open(struct net_device *dev) 4323static int rtl8169_open(struct net_device *dev)
4261{ 4324{
4262 struct rtl8169_private *tp = netdev_priv(dev); 4325 struct rtl8169_private *tp = netdev_priv(dev);
@@ -4284,7 +4347,7 @@ static int rtl8169_open(struct net_device *dev)
4284 if (retval < 0) 4347 if (retval < 0)
4285 goto err_free_rx_1; 4348 goto err_free_rx_1;
4286 4349
4287 INIT_DELAYED_WORK(&tp->task, NULL); 4350 INIT_WORK(&tp->wk.work, rtl_task);
4288 4351
4289 smp_mb(); 4352 smp_mb();
4290 4353
@@ -4296,16 +4359,24 @@ static int rtl8169_open(struct net_device *dev)
4296 if (retval < 0) 4359 if (retval < 0)
4297 goto err_release_fw_2; 4360 goto err_release_fw_2;
4298 4361
4362 rtl_lock_work(tp);
4363
4364 tp->wk.enabled = true;
4365
4299 napi_enable(&tp->napi); 4366 napi_enable(&tp->napi);
4300 4367
4301 rtl8169_init_phy(dev, tp); 4368 rtl8169_init_phy(dev, tp);
4302 4369
4303 rtl8169_set_features(dev, dev->features); 4370 __rtl8169_set_features(dev, dev->features);
4304 4371
4305 rtl_pll_power_up(tp); 4372 rtl_pll_power_up(tp);
4306 4373
4307 rtl_hw_start(dev); 4374 rtl_hw_start(dev);
4308 4375
4376 netif_start_queue(dev);
4377
4378 rtl_unlock_work(tp);
4379
4309 tp->saved_wolopts = 0; 4380 tp->saved_wolopts = 0;
4310 pm_runtime_put_noidle(&pdev->dev); 4381 pm_runtime_put_noidle(&pdev->dev);
4311 4382
@@ -4379,7 +4450,7 @@ static void rtl_hw_start(struct net_device *dev)
4379 4450
4380 tp->hw_start(dev); 4451 tp->hw_start(dev);
4381 4452
4382 netif_start_queue(dev); 4453 rtl_irq_enable_all(tp);
4383} 4454}
4384 4455
4385static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, 4456static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
@@ -4506,9 +4577,6 @@ static void rtl_hw_start_8169(struct net_device *dev)
4506 4577
4507 /* no early-rx interrupts */ 4578 /* no early-rx interrupts */
4508 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 4579 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4509
4510 /* Enable all known interrupts by setting the interrupt mask. */
4511 RTL_W16(IntrMask, tp->intr_event);
4512} 4580}
4513 4581
4514static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) 4582static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
@@ -4888,8 +4956,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
4888 4956
4889 /* Work around for RxFIFO overflow. */ 4957 /* Work around for RxFIFO overflow. */
4890 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 4958 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
4891 tp->intr_event |= RxFIFOOver | PCSTimeout; 4959 tp->event_slow |= RxFIFOOver | PCSTimeout;
4892 tp->intr_event &= ~RxOverflow; 4960 tp->event_slow &= ~RxOverflow;
4893 } 4961 }
4894 4962
4895 rtl_set_rx_tx_desc_registers(tp, ioaddr); 4963 rtl_set_rx_tx_desc_registers(tp, ioaddr);
@@ -4977,8 +5045,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
4977 RTL_W8(Cfg9346, Cfg9346_Lock); 5045 RTL_W8(Cfg9346, Cfg9346_Lock);
4978 5046
4979 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 5047 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4980
4981 RTL_W16(IntrMask, tp->intr_event);
4982} 5048}
4983 5049
4984#define R810X_CPCMD_QUIRK_MASK (\ 5050#define R810X_CPCMD_QUIRK_MASK (\
@@ -5077,10 +5143,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
5077 void __iomem *ioaddr = tp->mmio_addr; 5143 void __iomem *ioaddr = tp->mmio_addr;
5078 struct pci_dev *pdev = tp->pci_dev; 5144 struct pci_dev *pdev = tp->pci_dev;
5079 5145
5080 if (tp->mac_version >= RTL_GIGA_MAC_VER_30) { 5146 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5081 tp->intr_event &= ~RxFIFOOver; 5147 tp->event_slow &= ~RxFIFOOver;
5082 tp->napi_event &= ~RxFIFOOver;
5083 }
5084 5148
5085 if (tp->mac_version == RTL_GIGA_MAC_VER_13 || 5149 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5086 tp->mac_version == RTL_GIGA_MAC_VER_16) { 5150 tp->mac_version == RTL_GIGA_MAC_VER_16) {
@@ -5136,8 +5200,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
5136 rtl_set_rx_mode(dev); 5200 rtl_set_rx_mode(dev);
5137 5201
5138 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); 5202 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5139
5140 RTL_W16(IntrMask, tp->intr_event);
5141} 5203}
5142 5204
5143static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) 5205static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -5330,92 +5392,34 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
5330 tp->cur_tx = tp->dirty_tx = 0; 5392 tp->cur_tx = tp->dirty_tx = 0;
5331} 5393}
5332 5394
5333static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) 5395static void rtl_reset_work(struct rtl8169_private *tp)
5334{ 5396{
5335 struct rtl8169_private *tp = netdev_priv(dev);
5336
5337 PREPARE_DELAYED_WORK(&tp->task, task);
5338 schedule_delayed_work(&tp->task, 4);
5339}
5340
5341static void rtl8169_wait_for_quiescence(struct net_device *dev)
5342{
5343 struct rtl8169_private *tp = netdev_priv(dev);
5344 void __iomem *ioaddr = tp->mmio_addr;
5345
5346 synchronize_irq(dev->irq);
5347
5348 /* Wait for any pending NAPI task to complete */
5349 napi_disable(&tp->napi);
5350
5351 rtl8169_irq_mask_and_ack(tp);
5352
5353 tp->intr_mask = 0xffff;
5354 RTL_W16(IntrMask, tp->intr_event);
5355 napi_enable(&tp->napi);
5356}
5357
5358static void rtl8169_reinit_task(struct work_struct *work)
5359{
5360 struct rtl8169_private *tp =
5361 container_of(work, struct rtl8169_private, task.work);
5362 struct net_device *dev = tp->dev;
5363 int ret;
5364
5365 rtnl_lock();
5366
5367 if (!netif_running(dev))
5368 goto out_unlock;
5369
5370 rtl8169_wait_for_quiescence(dev);
5371 rtl8169_close(dev);
5372
5373 ret = rtl8169_open(dev);
5374 if (unlikely(ret < 0)) {
5375 if (net_ratelimit())
5376 netif_err(tp, drv, dev,
5377 "reinit failure (status = %d). Rescheduling\n",
5378 ret);
5379 rtl8169_schedule_work(dev, rtl8169_reinit_task);
5380 }
5381
5382out_unlock:
5383 rtnl_unlock();
5384}
5385
5386static void rtl8169_reset_task(struct work_struct *work)
5387{
5388 struct rtl8169_private *tp =
5389 container_of(work, struct rtl8169_private, task.work);
5390 struct net_device *dev = tp->dev; 5397 struct net_device *dev = tp->dev;
5391 int i; 5398 int i;
5392 5399
5393 rtnl_lock(); 5400 napi_disable(&tp->napi);
5394 5401 netif_stop_queue(dev);
5395 if (!netif_running(dev)) 5402 synchronize_sched();
5396 goto out_unlock;
5397 5403
5398 rtl8169_hw_reset(tp); 5404 rtl8169_hw_reset(tp);
5399 5405
5400 rtl8169_wait_for_quiescence(dev);
5401
5402 for (i = 0; i < NUM_RX_DESC; i++) 5406 for (i = 0; i < NUM_RX_DESC; i++)
5403 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); 5407 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5404 5408
5405 rtl8169_tx_clear(tp); 5409 rtl8169_tx_clear(tp);
5406 rtl8169_init_ring_indexes(tp); 5410 rtl8169_init_ring_indexes(tp);
5407 5411
5412 napi_enable(&tp->napi);
5408 rtl_hw_start(dev); 5413 rtl_hw_start(dev);
5409 netif_wake_queue(dev); 5414 netif_wake_queue(dev);
5410 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 5415 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5411
5412out_unlock:
5413 rtnl_unlock();
5414} 5416}
5415 5417
5416static void rtl8169_tx_timeout(struct net_device *dev) 5418static void rtl8169_tx_timeout(struct net_device *dev)
5417{ 5419{
5418 rtl8169_schedule_work(dev, rtl8169_reset_task); 5420 struct rtl8169_private *tp = netdev_priv(dev);
5421
5422 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5419} 5423}
5420 5424
5421static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, 5425static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
@@ -5552,9 +5556,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5552 5556
5553 RTL_W8(TxPoll, NPQ); 5557 RTL_W8(TxPoll, NPQ);
5554 5558
5559 mmiowb();
5560
5555 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { 5561 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
5556 netif_stop_queue(dev); 5562 netif_stop_queue(dev);
5557 smp_rmb(); 5563 smp_mb();
5558 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) 5564 if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
5559 netif_wake_queue(dev); 5565 netif_wake_queue(dev);
5560 } 5566 }
@@ -5618,12 +5624,10 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
5618 5624
5619 rtl8169_hw_reset(tp); 5625 rtl8169_hw_reset(tp);
5620 5626
5621 rtl8169_schedule_work(dev, rtl8169_reinit_task); 5627 rtl_schedule_task_bh(tp, RTL_FLAG_TASK_RESET_PENDING);
5622} 5628}
5623 5629
5624static void rtl8169_tx_interrupt(struct net_device *dev, 5630static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5625 struct rtl8169_private *tp,
5626 void __iomem *ioaddr)
5627{ 5631{
5628 unsigned int dirty_tx, tx_left; 5632 unsigned int dirty_tx, tx_left;
5629 5633
@@ -5655,7 +5659,7 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
5655 5659
5656 if (tp->dirty_tx != dirty_tx) { 5660 if (tp->dirty_tx != dirty_tx) {
5657 tp->dirty_tx = dirty_tx; 5661 tp->dirty_tx = dirty_tx;
5658 smp_wmb(); 5662 smp_mb();
5659 if (netif_queue_stopped(dev) && 5663 if (netif_queue_stopped(dev) &&
5660 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { 5664 (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
5661 netif_wake_queue(dev); 5665 netif_wake_queue(dev);
@@ -5666,9 +5670,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
5666 * of start_xmit activity is detected (if it is not detected, 5670 * of start_xmit activity is detected (if it is not detected,
5667 * it is slow enough). -- FR 5671 * it is slow enough). -- FR
5668 */ 5672 */
5669 smp_rmb(); 5673 if (tp->cur_tx != dirty_tx) {
5670 if (tp->cur_tx != dirty_tx) 5674 void __iomem *ioaddr = tp->mmio_addr;
5675
5671 RTL_W8(TxPoll, NPQ); 5676 RTL_W8(TxPoll, NPQ);
5677 }
5672 } 5678 }
5673} 5679}
5674 5680
@@ -5707,9 +5713,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
5707 return skb; 5713 return skb;
5708} 5714}
5709 5715
5710static int rtl8169_rx_interrupt(struct net_device *dev, 5716static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5711 struct rtl8169_private *tp,
5712 void __iomem *ioaddr, u32 budget)
5713{ 5717{
5714 unsigned int cur_rx, rx_left; 5718 unsigned int cur_rx, rx_left;
5715 unsigned int count; 5719 unsigned int count;
@@ -5737,7 +5741,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
5737 if (status & RxCRC) 5741 if (status & RxCRC)
5738 dev->stats.rx_crc_errors++; 5742 dev->stats.rx_crc_errors++;
5739 if (status & RxFOVF) { 5743 if (status & RxFOVF) {
5740 rtl8169_schedule_work(dev, rtl8169_reset_task); 5744 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5741 dev->stats.rx_fifo_errors++; 5745 dev->stats.rx_fifo_errors++;
5742 } 5746 }
5743 rtl8169_mark_to_asic(desc, rx_buf_sz); 5747 rtl8169_mark_to_asic(desc, rx_buf_sz);
@@ -5798,101 +5802,120 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5798{ 5802{
5799 struct net_device *dev = dev_instance; 5803 struct net_device *dev = dev_instance;
5800 struct rtl8169_private *tp = netdev_priv(dev); 5804 struct rtl8169_private *tp = netdev_priv(dev);
5801 void __iomem *ioaddr = tp->mmio_addr;
5802 int handled = 0; 5805 int handled = 0;
5803 int status; 5806 u16 status;
5804
5805 /* loop handling interrupts until we have no new ones or
5806 * we hit a invalid/hotplug case.
5807 */
5808 status = RTL_R16(IntrStatus);
5809 while (status && status != 0xffff) {
5810 status &= tp->intr_event;
5811 if (!status)
5812 break;
5813 5807
5814 handled = 1; 5808 status = rtl_get_events(tp);
5809 if (status && status != 0xffff) {
5810 status &= RTL_EVENT_NAPI | tp->event_slow;
5811 if (status) {
5812 handled = 1;
5815 5813
5816 /* Handle all of the error cases first. These will reset 5814 rtl_irq_disable(tp);
5817 * the chip, so just exit the loop. 5815 napi_schedule(&tp->napi);
5818 */
5819 if (unlikely(!netif_running(dev))) {
5820 rtl8169_hw_reset(tp);
5821 break;
5822 } 5816 }
5817 }
5818 return IRQ_RETVAL(handled);
5819}
5823 5820
5824 if (unlikely(status & RxFIFOOver)) { 5821/*
5825 switch (tp->mac_version) { 5822 * Workqueue context.
5826 /* Work around for rx fifo overflow */ 5823 */
5827 case RTL_GIGA_MAC_VER_11: 5824static void rtl_slow_event_work(struct rtl8169_private *tp)
5828 netif_stop_queue(dev); 5825{
5829 rtl8169_tx_timeout(dev); 5826 struct net_device *dev = tp->dev;
5830 goto done; 5827 u16 status;
5831 default: 5828
5832 break; 5829 status = rtl_get_events(tp) & tp->event_slow;
5833 } 5830 rtl_ack_events(tp, status);
5834 }
5835 5831
5836 if (unlikely(status & SYSErr)) { 5832 if (unlikely(status & RxFIFOOver)) {
5837 rtl8169_pcierr_interrupt(dev); 5833 switch (tp->mac_version) {
5834 /* Work around for rx fifo overflow */
5835 case RTL_GIGA_MAC_VER_11:
5836 netif_stop_queue(dev);
5837 rtl_schedule_task_bh(tp, RTL_FLAG_TASK_RESET_PENDING);
5838 default:
5838 break; 5839 break;
5839 } 5840 }
5841 }
5840 5842
5841 if (status & LinkChg) 5843 if (unlikely(status & SYSErr))
5842 __rtl8169_check_link_status(dev, tp, ioaddr, true); 5844 rtl8169_pcierr_interrupt(dev);
5843 5845
5844 /* We need to see the lastest version of tp->intr_mask to 5846 if (status & LinkChg)
5845 * avoid ignoring an MSI interrupt and having to wait for 5847 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
5846 * another event which may never come.
5847 */
5848 smp_rmb();
5849 if (status & tp->intr_mask & tp->napi_event) {
5850 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
5851 tp->intr_mask = ~tp->napi_event;
5852
5853 if (likely(napi_schedule_prep(&tp->napi)))
5854 __napi_schedule(&tp->napi);
5855 else
5856 netif_info(tp, intr, dev,
5857 "interrupt %04x in poll\n", status);
5858 }
5859 5848
5860 /* We only get a new MSI interrupt when all active irq 5849 napi_disable(&tp->napi);
5861 * sources on the chip have been acknowledged. So, ack 5850 rtl_irq_disable(tp);
5862 * everything we've seen and check if new sources have become 5851
5863 * active to avoid blocking all interrupts from the chip. 5852 napi_enable(&tp->napi);
5864 */ 5853 napi_schedule(&tp->napi);
5865 RTL_W16(IntrStatus, 5854}
5866 (status & RxFIFOOver) ? (status | RxOverflow) : status); 5855
5867 status = RTL_R16(IntrStatus); 5856static void rtl_task(struct work_struct *work)
5857{
5858 static const struct {
5859 int bitnr;
5860 void (*action)(struct rtl8169_private *);
5861 } rtl_work[] = {
5862 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
5863 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
5864 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
5865 };
5866 struct rtl8169_private *tp =
5867 container_of(work, struct rtl8169_private, wk.work);
5868 struct net_device *dev = tp->dev;
5869 int i;
5870
5871 rtl_lock_work(tp);
5872
5873 if (!netif_running(dev) || !tp->wk.enabled)
5874 goto out_unlock;
5875
5876 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
5877 bool pending;
5878
5879 spin_lock_bh(&tp->lock);
5880 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
5881 spin_unlock_bh(&tp->lock);
5882
5883 if (pending)
5884 rtl_work[i].action(tp);
5868 } 5885 }
5869done: 5886
5870 return IRQ_RETVAL(handled); 5887out_unlock:
5888 rtl_unlock_work(tp);
5871} 5889}
5872 5890
5873static int rtl8169_poll(struct napi_struct *napi, int budget) 5891static int rtl8169_poll(struct napi_struct *napi, int budget)
5874{ 5892{
5875 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); 5893 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
5876 struct net_device *dev = tp->dev; 5894 struct net_device *dev = tp->dev;
5877 void __iomem *ioaddr = tp->mmio_addr; 5895 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
5878 int work_done; 5896 int work_done= 0;
5897 u16 status;
5898
5899 status = rtl_get_events(tp);
5900 rtl_ack_events(tp, status & ~tp->event_slow);
5901
5902 if (status & RTL_EVENT_NAPI_RX)
5903 work_done = rtl_rx(dev, tp, (u32) budget);
5904
5905 if (status & RTL_EVENT_NAPI_TX)
5906 rtl_tx(dev, tp);
5879 5907
5880 work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget); 5908 if (status & tp->event_slow) {
5881 rtl8169_tx_interrupt(dev, tp, ioaddr); 5909 enable_mask &= ~tp->event_slow;
5910
5911 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
5912 }
5882 5913
5883 if (work_done < budget) { 5914 if (work_done < budget) {
5884 napi_complete(napi); 5915 napi_complete(napi);
5885 5916
5886 /* We need for force the visibility of tp->intr_mask 5917 rtl_irq_enable(tp, enable_mask);
5887 * for other CPUs, as we can loose an MSI interrupt 5918 mmiowb();
5888 * and potentially wait for a retransmit timeout if we don't.
5889 * The posted write to IntrMask is safe, as it will
5890 * eventually make it to the chip and we won't loose anything
5891 * until it does.
5892 */
5893 tp->intr_mask = 0xffff;
5894 wmb();
5895 RTL_W16(IntrMask, tp->intr_event);
5896 } 5919 }
5897 5920
5898 return work_done; 5921 return work_done;
@@ -5916,26 +5939,19 @@ static void rtl8169_down(struct net_device *dev)
5916 5939
5917 del_timer_sync(&tp->timer); 5940 del_timer_sync(&tp->timer);
5918 5941
5919 netif_stop_queue(dev);
5920
5921 napi_disable(&tp->napi); 5942 napi_disable(&tp->napi);
5922 5943 netif_stop_queue(dev);
5923 spin_lock_irq(&tp->lock);
5924 5944
5925 rtl8169_hw_reset(tp); 5945 rtl8169_hw_reset(tp);
5926 /* 5946 /*
5927 * At this point device interrupts can not be enabled in any function, 5947 * At this point device interrupts can not be enabled in any function,
5928 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task, 5948 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
5929 * rtl8169_reinit_task) and napi is disabled (rtl8169_poll). 5949 * and napi is disabled (rtl8169_poll).
5930 */ 5950 */
5931 rtl8169_rx_missed(dev, ioaddr); 5951 rtl8169_rx_missed(dev, ioaddr);
5932 5952
5933 spin_unlock_irq(&tp->lock);
5934
5935 synchronize_irq(dev->irq);
5936
5937 /* Give a racing hard_start_xmit a few cycles to complete. */ 5953 /* Give a racing hard_start_xmit a few cycles to complete. */
5938 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ 5954 synchronize_sched();
5939 5955
5940 rtl8169_tx_clear(tp); 5956 rtl8169_tx_clear(tp);
5941 5957
@@ -5954,7 +5970,11 @@ static int rtl8169_close(struct net_device *dev)
5954 /* Update counters before going down */ 5970 /* Update counters before going down */
5955 rtl8169_update_counters(dev); 5971 rtl8169_update_counters(dev);
5956 5972
5973 rtl_lock_work(tp);
5974 tp->wk.enabled = false;
5975
5957 rtl8169_down(dev); 5976 rtl8169_down(dev);
5977 rtl_unlock_work(tp);
5958 5978
5959 free_irq(dev->irq, dev); 5979 free_irq(dev->irq, dev);
5960 5980
@@ -5974,7 +5994,6 @@ static void rtl_set_rx_mode(struct net_device *dev)
5974{ 5994{
5975 struct rtl8169_private *tp = netdev_priv(dev); 5995 struct rtl8169_private *tp = netdev_priv(dev);
5976 void __iomem *ioaddr = tp->mmio_addr; 5996 void __iomem *ioaddr = tp->mmio_addr;
5977 unsigned long flags;
5978 u32 mc_filter[2]; /* Multicast hash filter */ 5997 u32 mc_filter[2]; /* Multicast hash filter */
5979 int rx_mode; 5998 int rx_mode;
5980 u32 tmp = 0; 5999 u32 tmp = 0;
@@ -6003,7 +6022,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
6003 } 6022 }
6004 } 6023 }
6005 6024
6006 spin_lock_irqsave(&tp->lock, flags); 6025 spin_lock_bh(&tp->lock);
6007 6026
6008 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; 6027 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
6009 6028
@@ -6019,7 +6038,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
6019 6038
6020 RTL_W32(RxConfig, tmp); 6039 RTL_W32(RxConfig, tmp);
6021 6040
6022 spin_unlock_irqrestore(&tp->lock, flags); 6041 spin_unlock_bh(&tp->lock);
6023} 6042}
6024 6043
6025/** 6044/**
@@ -6032,13 +6051,9 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
6032{ 6051{
6033 struct rtl8169_private *tp = netdev_priv(dev); 6052 struct rtl8169_private *tp = netdev_priv(dev);
6034 void __iomem *ioaddr = tp->mmio_addr; 6053 void __iomem *ioaddr = tp->mmio_addr;
6035 unsigned long flags;
6036 6054
6037 if (netif_running(dev)) { 6055 if (netif_running(dev))
6038 spin_lock_irqsave(&tp->lock, flags);
6039 rtl8169_rx_missed(dev, ioaddr); 6056 rtl8169_rx_missed(dev, ioaddr);
6040 spin_unlock_irqrestore(&tp->lock, flags);
6041 }
6042 6057
6043 return &dev->stats; 6058 return &dev->stats;
6044} 6059}
@@ -6050,10 +6065,15 @@ static void rtl8169_net_suspend(struct net_device *dev)
6050 if (!netif_running(dev)) 6065 if (!netif_running(dev))
6051 return; 6066 return;
6052 6067
6053 rtl_pll_power_down(tp);
6054
6055 netif_device_detach(dev); 6068 netif_device_detach(dev);
6056 netif_stop_queue(dev); 6069 netif_stop_queue(dev);
6070
6071 rtl_lock_work(tp);
6072 napi_disable(&tp->napi);
6073 tp->wk.enabled = false;
6074 rtl_unlock_work(tp);
6075
6076 rtl_pll_power_down(tp);
6057} 6077}
6058 6078
6059#ifdef CONFIG_PM 6079#ifdef CONFIG_PM
@@ -6076,7 +6096,9 @@ static void __rtl8169_resume(struct net_device *dev)
6076 6096
6077 rtl_pll_power_up(tp); 6097 rtl_pll_power_up(tp);
6078 6098
6079 rtl8169_schedule_work(dev, rtl8169_reset_task); 6099 tp->wk.enabled = true;
6100
6101 rtl_schedule_task_bh(tp, RTL_FLAG_TASK_RESET_PENDING);
6080} 6102}
6081 6103
6082static int rtl8169_resume(struct device *device) 6104static int rtl8169_resume(struct device *device)
@@ -6102,10 +6124,10 @@ static int rtl8169_runtime_suspend(struct device *device)
6102 if (!tp->TxDescArray) 6124 if (!tp->TxDescArray)
6103 return 0; 6125 return 0;
6104 6126
6105 spin_lock_irq(&tp->lock); 6127 rtl_lock_work(tp);
6106 tp->saved_wolopts = __rtl8169_get_wol(tp); 6128 tp->saved_wolopts = __rtl8169_get_wol(tp);
6107 __rtl8169_set_wol(tp, WAKE_ANY); 6129 __rtl8169_set_wol(tp, WAKE_ANY);
6108 spin_unlock_irq(&tp->lock); 6130 rtl_unlock_work(tp);
6109 6131
6110 rtl8169_net_suspend(dev); 6132 rtl8169_net_suspend(dev);
6111 6133
@@ -6121,10 +6143,10 @@ static int rtl8169_runtime_resume(struct device *device)
6121 if (!tp->TxDescArray) 6143 if (!tp->TxDescArray)
6122 return 0; 6144 return 0;
6123 6145
6124 spin_lock_irq(&tp->lock); 6146 rtl_lock_work(tp);
6125 __rtl8169_set_wol(tp, tp->saved_wolopts); 6147 __rtl8169_set_wol(tp, tp->saved_wolopts);
6126 tp->saved_wolopts = 0; 6148 tp->saved_wolopts = 0;
6127 spin_unlock_irq(&tp->lock); 6149 rtl_unlock_work(tp);
6128 6150
6129 rtl8169_init_phy(dev, tp); 6151 rtl8169_init_phy(dev, tp);
6130 6152
@@ -6192,12 +6214,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
6192 /* Restore original MAC address */ 6214 /* Restore original MAC address */
6193 rtl_rar_set(tp, dev->perm_addr); 6215 rtl_rar_set(tp, dev->perm_addr);
6194 6216
6195 spin_lock_irq(&tp->lock);
6196
6197 rtl8169_hw_reset(tp); 6217 rtl8169_hw_reset(tp);
6198 6218
6199 spin_unlock_irq(&tp->lock);
6200
6201 if (system_state == SYSTEM_POWER_OFF) { 6219 if (system_state == SYSTEM_POWER_OFF) {
6202 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 6220 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6203 rtl_wol_suspend_quirk(tp); 6221 rtl_wol_suspend_quirk(tp);