diff options
author | Michael Chan <mchan@broadcom.com> | 2006-06-18 00:58:45 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-06-18 00:58:45 -0400 |
commit | 00b7050426da8e7e58c889c5c80a19920d2d41b3 (patch) | |
tree | 55a07464fdf81b23fced966feca2b041af77c6f6 /drivers/net | |
parent | c71302d61f844f766a44e1b04258086cc41f624e (diff) |
[TG3]: Convert to non-LLTX
Herbert Xu pointed out that it is unsafe to call netif_tx_disable()
from LLTX drivers because it uses dev->xmit_lock to synchronize
whereas LLTX drivers use private locks.
Convert tg3 to non-LLTX to fix this issue. tg3 is a lockless driver
where hard_start_xmit and tx completion handling can run concurrently
under normal conditions. A tx_lock is only needed to prevent
netif_stop_queue and netif_wake_queue race condtions when the queue
is full.
So whether we use LLTX or non-LLTX, it makes practically no
difference.
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/tg3.c | 27 | ||||
-rw-r--r-- | drivers/net/tg3.h | 16 |
2 files changed, 23 insertions, 20 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 542d4c3a10e5..b2ddd4522a87 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -3759,14 +3759,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3759 | 3759 | ||
3760 | len = skb_headlen(skb); | 3760 | len = skb_headlen(skb); |
3761 | 3761 | ||
3762 | /* No BH disabling for tx_lock here. We are running in BH disabled | 3762 | /* We are running in BH disabled context with netif_tx_lock |
3763 | * context and TX reclaim runs via tp->poll inside of a software | 3763 | * and TX reclaim runs via tp->poll inside of a software |
3764 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3764 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
3765 | * no IRQ context deadlocks to worry about either. Rejoice! | 3765 | * no IRQ context deadlocks to worry about either. Rejoice! |
3766 | */ | 3766 | */ |
3767 | if (!spin_trylock(&tp->tx_lock)) | ||
3768 | return NETDEV_TX_LOCKED; | ||
3769 | |||
3770 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3767 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
3771 | if (!netif_queue_stopped(dev)) { | 3768 | if (!netif_queue_stopped(dev)) { |
3772 | netif_stop_queue(dev); | 3769 | netif_stop_queue(dev); |
@@ -3775,7 +3772,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3775 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 3772 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " |
3776 | "queue awake!\n", dev->name); | 3773 | "queue awake!\n", dev->name); |
3777 | } | 3774 | } |
3778 | spin_unlock(&tp->tx_lock); | ||
3779 | return NETDEV_TX_BUSY; | 3775 | return NETDEV_TX_BUSY; |
3780 | } | 3776 | } |
3781 | 3777 | ||
@@ -3858,15 +3854,16 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3858 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 3854 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
3859 | 3855 | ||
3860 | tp->tx_prod = entry; | 3856 | tp->tx_prod = entry; |
3861 | if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { | 3857 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { |
3858 | spin_lock(&tp->tx_lock); | ||
3862 | netif_stop_queue(dev); | 3859 | netif_stop_queue(dev); |
3863 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | 3860 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) |
3864 | netif_wake_queue(tp->dev); | 3861 | netif_wake_queue(tp->dev); |
3862 | spin_unlock(&tp->tx_lock); | ||
3865 | } | 3863 | } |
3866 | 3864 | ||
3867 | out_unlock: | 3865 | out_unlock: |
3868 | mmiowb(); | 3866 | mmiowb(); |
3869 | spin_unlock(&tp->tx_lock); | ||
3870 | 3867 | ||
3871 | dev->trans_start = jiffies; | 3868 | dev->trans_start = jiffies; |
3872 | 3869 | ||
@@ -3885,14 +3882,11 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
3885 | 3882 | ||
3886 | len = skb_headlen(skb); | 3883 | len = skb_headlen(skb); |
3887 | 3884 | ||
3888 | /* No BH disabling for tx_lock here. We are running in BH disabled | 3885 | /* We are running in BH disabled context with netif_tx_lock |
3889 | * context and TX reclaim runs via tp->poll inside of a software | 3886 | * and TX reclaim runs via tp->poll inside of a software |
3890 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3887 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
3891 | * no IRQ context deadlocks to worry about either. Rejoice! | 3888 | * no IRQ context deadlocks to worry about either. Rejoice! |
3892 | */ | 3889 | */ |
3893 | if (!spin_trylock(&tp->tx_lock)) | ||
3894 | return NETDEV_TX_LOCKED; | ||
3895 | |||
3896 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3890 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
3897 | if (!netif_queue_stopped(dev)) { | 3891 | if (!netif_queue_stopped(dev)) { |
3898 | netif_stop_queue(dev); | 3892 | netif_stop_queue(dev); |
@@ -3901,7 +3895,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
3901 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 3895 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " |
3902 | "queue awake!\n", dev->name); | 3896 | "queue awake!\n", dev->name); |
3903 | } | 3897 | } |
3904 | spin_unlock(&tp->tx_lock); | ||
3905 | return NETDEV_TX_BUSY; | 3898 | return NETDEV_TX_BUSY; |
3906 | } | 3899 | } |
3907 | 3900 | ||
@@ -4039,15 +4032,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4039 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 4032 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
4040 | 4033 | ||
4041 | tp->tx_prod = entry; | 4034 | tp->tx_prod = entry; |
4042 | if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { | 4035 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { |
4036 | spin_lock(&tp->tx_lock); | ||
4043 | netif_stop_queue(dev); | 4037 | netif_stop_queue(dev); |
4044 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | 4038 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) |
4045 | netif_wake_queue(tp->dev); | 4039 | netif_wake_queue(tp->dev); |
4040 | spin_unlock(&tp->tx_lock); | ||
4046 | } | 4041 | } |
4047 | 4042 | ||
4048 | out_unlock: | 4043 | out_unlock: |
4049 | mmiowb(); | 4044 | mmiowb(); |
4050 | spin_unlock(&tp->tx_lock); | ||
4051 | 4045 | ||
4052 | dev->trans_start = jiffies; | 4046 | dev->trans_start = jiffies; |
4053 | 4047 | ||
@@ -11284,7 +11278,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
11284 | SET_MODULE_OWNER(dev); | 11278 | SET_MODULE_OWNER(dev); |
11285 | SET_NETDEV_DEV(dev, &pdev->dev); | 11279 | SET_NETDEV_DEV(dev, &pdev->dev); |
11286 | 11280 | ||
11287 | dev->features |= NETIF_F_LLTX; | ||
11288 | #if TG3_VLAN_TAG_USED | 11281 | #if TG3_VLAN_TAG_USED |
11289 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 11282 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
11290 | dev->vlan_rx_register = tg3_vlan_rx_register; | 11283 | dev->vlan_rx_register = tg3_vlan_rx_register; |
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 35669e18065c..8209da5dd15f 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
@@ -2074,12 +2074,22 @@ struct tg3 { | |||
2074 | 2074 | ||
2075 | /* SMP locking strategy: | 2075 | /* SMP locking strategy: |
2076 | * | 2076 | * |
2077 | * lock: Held during all operations except TX packet | 2077 | * lock: Held during reset, PHY access, timer, and when |
2078 | * processing. | 2078 | * updating tg3_flags and tg3_flags2. |
2079 | * | 2079 | * |
2080 | * tx_lock: Held during tg3_start_xmit and tg3_tx | 2080 | * tx_lock: Held during tg3_start_xmit and tg3_tx only |
2081 | * when calling netif_[start|stop]_queue. | ||
2082 | * tg3_start_xmit is protected by netif_tx_lock. | ||
2081 | * | 2083 | * |
2082 | * Both of these locks are to be held with BH safety. | 2084 | * Both of these locks are to be held with BH safety. |
2085 | * | ||
2086 | * Because the IRQ handler, tg3_poll, and tg3_start_xmit | ||
2087 | * are running lockless, it is necessary to completely | ||
2088 | * quiesce the chip with tg3_netif_stop and tg3_full_lock | ||
2089 | * before reconfiguring the device. | ||
2090 | * | ||
2091 | * indirect_lock: Held when accessing registers indirectly | ||
2092 | * with IRQ disabling. | ||
2083 | */ | 2093 | */ |
2084 | spinlock_t lock; | 2094 | spinlock_t lock; |
2085 | spinlock_t indirect_lock; | 2095 | spinlock_t indirect_lock; |