diff options
Diffstat (limited to 'drivers/net')
| -rw-r--r-- | drivers/net/myri10ge/myri10ge.c | 2 | ||||
| -rw-r--r-- | drivers/net/tg3.c | 51 | ||||
| -rw-r--r-- | drivers/net/tg3.h | 8 |
3 files changed, 32 insertions, 29 deletions
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 06440a86baef..9bdd43ab3573 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
| @@ -2425,7 +2425,7 @@ static int myri10ge_resume(struct pci_dev *pdev) | |||
| 2425 | } | 2425 | } |
| 2426 | 2426 | ||
| 2427 | myri10ge_reset(mgp); | 2427 | myri10ge_reset(mgp); |
| 2428 | myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); | 2428 | myri10ge_dummy_rdma(mgp, 1); |
| 2429 | 2429 | ||
| 2430 | /* Save configuration space to be restored if the | 2430 | /* Save configuration space to be restored if the |
| 2431 | * nic resets due to a parity error */ | 2431 | * nic resets due to a parity error */ |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 6f97962dd06b..eafabb253f08 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
| @@ -68,8 +68,8 @@ | |||
| 68 | 68 | ||
| 69 | #define DRV_MODULE_NAME "tg3" | 69 | #define DRV_MODULE_NAME "tg3" |
| 70 | #define PFX DRV_MODULE_NAME ": " | 70 | #define PFX DRV_MODULE_NAME ": " |
| 71 | #define DRV_MODULE_VERSION "3.64" | 71 | #define DRV_MODULE_VERSION "3.65" |
| 72 | #define DRV_MODULE_RELDATE "July 31, 2006" | 72 | #define DRV_MODULE_RELDATE "August 07, 2006" |
| 73 | 73 | ||
| 74 | #define TG3_DEF_MAC_MODE 0 | 74 | #define TG3_DEF_MAC_MODE 0 |
| 75 | #define TG3_DEF_RX_MODE 0 | 75 | #define TG3_DEF_RX_MODE 0 |
| @@ -123,9 +123,6 @@ | |||
| 123 | TG3_RX_RCB_RING_SIZE(tp)) | 123 | TG3_RX_RCB_RING_SIZE(tp)) |
| 124 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ | 124 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ |
| 125 | TG3_TX_RING_SIZE) | 125 | TG3_TX_RING_SIZE) |
| 126 | #define TX_BUFFS_AVAIL(TP) \ | ||
| 127 | ((TP)->tx_pending - \ | ||
| 128 | (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) | ||
| 129 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 126 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) |
| 130 | 127 | ||
| 131 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) | 128 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) |
| @@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp) | |||
| 2987 | spin_unlock(&tp->lock); | 2984 | spin_unlock(&tp->lock); |
| 2988 | } | 2985 | } |
| 2989 | 2986 | ||
| 2987 | static inline u32 tg3_tx_avail(struct tg3 *tp) | ||
| 2988 | { | ||
| 2989 | smp_mb(); | ||
| 2990 | return (tp->tx_pending - | ||
| 2991 | ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); | ||
| 2992 | } | ||
| 2993 | |||
| 2990 | /* Tigon3 never reports partial packet sends. So we do not | 2994 | /* Tigon3 never reports partial packet sends. So we do not |
| 2991 | * need special logic to handle SKBs that have not had all | 2995 | * need special logic to handle SKBs that have not had all |
| 2992 | * of their frags sent yet, like SunGEM does. | 2996 | * of their frags sent yet, like SunGEM does. |
| @@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp) | |||
| 3038 | 3042 | ||
| 3039 | tp->tx_cons = sw_idx; | 3043 | tp->tx_cons = sw_idx; |
| 3040 | 3044 | ||
| 3041 | if (unlikely(netif_queue_stopped(tp->dev))) { | 3045 | /* Need to make the tx_cons update visible to tg3_start_xmit() |
| 3042 | spin_lock(&tp->tx_lock); | 3046 | * before checking for netif_queue_stopped(). Without the |
| 3047 | * memory barrier, there is a small possibility that tg3_start_xmit() | ||
| 3048 | * will miss it and cause the queue to be stopped forever. | ||
| 3049 | */ | ||
| 3050 | smp_mb(); | ||
| 3051 | |||
| 3052 | if (unlikely(netif_queue_stopped(tp->dev) && | ||
| 3053 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) { | ||
| 3054 | netif_tx_lock(tp->dev); | ||
| 3043 | if (netif_queue_stopped(tp->dev) && | 3055 | if (netif_queue_stopped(tp->dev) && |
| 3044 | (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) | 3056 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)) |
| 3045 | netif_wake_queue(tp->dev); | 3057 | netif_wake_queue(tp->dev); |
| 3046 | spin_unlock(&tp->tx_lock); | 3058 | netif_tx_unlock(tp->dev); |
| 3047 | } | 3059 | } |
| 3048 | } | 3060 | } |
| 3049 | 3061 | ||
| @@ -3101,7 +3113,6 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, | |||
| 3101 | if (skb == NULL) | 3113 | if (skb == NULL) |
| 3102 | return -ENOMEM; | 3114 | return -ENOMEM; |
| 3103 | 3115 | ||
| 3104 | skb->dev = tp->dev; | ||
| 3105 | skb_reserve(skb, tp->rx_offset); | 3116 | skb_reserve(skb, tp->rx_offset); |
| 3106 | 3117 | ||
| 3107 | mapping = pci_map_single(tp->pdev, skb->data, | 3118 | mapping = pci_map_single(tp->pdev, skb->data, |
| @@ -3274,7 +3285,6 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
| 3274 | if (copy_skb == NULL) | 3285 | if (copy_skb == NULL) |
| 3275 | goto drop_it_no_recycle; | 3286 | goto drop_it_no_recycle; |
| 3276 | 3287 | ||
| 3277 | copy_skb->dev = tp->dev; | ||
| 3278 | skb_reserve(copy_skb, 2); | 3288 | skb_reserve(copy_skb, 2); |
| 3279 | skb_put(copy_skb, len); | 3289 | skb_put(copy_skb, len); |
| 3280 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 3290 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
| @@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3797 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3807 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
| 3798 | * no IRQ context deadlocks to worry about either. Rejoice! | 3808 | * no IRQ context deadlocks to worry about either. Rejoice! |
| 3799 | */ | 3809 | */ |
| 3800 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3810 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
| 3801 | if (!netif_queue_stopped(dev)) { | 3811 | if (!netif_queue_stopped(dev)) { |
| 3802 | netif_stop_queue(dev); | 3812 | netif_stop_queue(dev); |
| 3803 | 3813 | ||
| @@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3893 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 3903 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
| 3894 | 3904 | ||
| 3895 | tp->tx_prod = entry; | 3905 | tp->tx_prod = entry; |
| 3896 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { | 3906 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { |
| 3897 | spin_lock(&tp->tx_lock); | ||
| 3898 | netif_stop_queue(dev); | 3907 | netif_stop_queue(dev); |
| 3899 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | 3908 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) |
| 3900 | netif_wake_queue(tp->dev); | 3909 | netif_wake_queue(tp->dev); |
| 3901 | spin_unlock(&tp->tx_lock); | ||
| 3902 | } | 3910 | } |
| 3903 | 3911 | ||
| 3904 | out_unlock: | 3912 | out_unlock: |
| @@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | |||
| 3920 | struct sk_buff *segs, *nskb; | 3928 | struct sk_buff *segs, *nskb; |
| 3921 | 3929 | ||
| 3922 | /* Estimate the number of fragments in the worst case */ | 3930 | /* Estimate the number of fragments in the worst case */ |
| 3923 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { | 3931 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { |
| 3924 | netif_stop_queue(tp->dev); | 3932 | netif_stop_queue(tp->dev); |
| 3925 | return NETDEV_TX_BUSY; | 3933 | return NETDEV_TX_BUSY; |
| 3926 | } | 3934 | } |
| @@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
| 3960 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3968 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
| 3961 | * no IRQ context deadlocks to worry about either. Rejoice! | 3969 | * no IRQ context deadlocks to worry about either. Rejoice! |
| 3962 | */ | 3970 | */ |
| 3963 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3971 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
| 3964 | if (!netif_queue_stopped(dev)) { | 3972 | if (!netif_queue_stopped(dev)) { |
| 3965 | netif_stop_queue(dev); | 3973 | netif_stop_queue(dev); |
| 3966 | 3974 | ||
| @@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
| 4110 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 4118 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
| 4111 | 4119 | ||
| 4112 | tp->tx_prod = entry; | 4120 | tp->tx_prod = entry; |
| 4113 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { | 4121 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { |
| 4114 | spin_lock(&tp->tx_lock); | ||
| 4115 | netif_stop_queue(dev); | 4122 | netif_stop_queue(dev); |
| 4116 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | 4123 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) |
| 4117 | netif_wake_queue(tp->dev); | 4124 | netif_wake_queue(tp->dev); |
| 4118 | spin_unlock(&tp->tx_lock); | ||
| 4119 | } | 4125 | } |
| 4120 | 4126 | ||
| 4121 | out_unlock: | 4127 | out_unlock: |
| @@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
| 11474 | tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; | 11480 | tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; |
| 11475 | #endif | 11481 | #endif |
| 11476 | spin_lock_init(&tp->lock); | 11482 | spin_lock_init(&tp->lock); |
| 11477 | spin_lock_init(&tp->tx_lock); | ||
| 11478 | spin_lock_init(&tp->indirect_lock); | 11483 | spin_lock_init(&tp->indirect_lock); |
| 11479 | INIT_WORK(&tp->reset_task, tg3_reset_task, tp); | 11484 | INIT_WORK(&tp->reset_task, tg3_reset_task, tp); |
| 11480 | 11485 | ||
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index ba2c98711c88..3ecf356cfb08 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
| @@ -2079,9 +2079,9 @@ struct tg3 { | |||
| 2079 | * lock: Held during reset, PHY access, timer, and when | 2079 | * lock: Held during reset, PHY access, timer, and when |
| 2080 | * updating tg3_flags and tg3_flags2. | 2080 | * updating tg3_flags and tg3_flags2. |
| 2081 | * | 2081 | * |
| 2082 | * tx_lock: Held during tg3_start_xmit and tg3_tx only | 2082 | * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds |
| 2083 | * when calling netif_[start|stop]_queue. | 2083 | * netif_tx_lock when it needs to call |
| 2084 | * tg3_start_xmit is protected by netif_tx_lock. | 2084 | * netif_wake_queue. |
| 2085 | * | 2085 | * |
| 2086 | * Both of these locks are to be held with BH safety. | 2086 | * Both of these locks are to be held with BH safety. |
| 2087 | * | 2087 | * |
| @@ -2118,8 +2118,6 @@ struct tg3 { | |||
| 2118 | u32 tx_cons; | 2118 | u32 tx_cons; |
| 2119 | u32 tx_pending; | 2119 | u32 tx_pending; |
| 2120 | 2120 | ||
| 2121 | spinlock_t tx_lock; | ||
| 2122 | |||
| 2123 | struct tg3_tx_buffer_desc *tx_ring; | 2121 | struct tg3_tx_buffer_desc *tx_ring; |
| 2124 | struct tx_ring_info *tx_buffers; | 2122 | struct tx_ring_info *tx_buffers; |
| 2125 | dma_addr_t tx_desc_mapping; | 2123 | dma_addr_t tx_desc_mapping; |
