diff options
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 171 |
1 files changed, 118 insertions, 53 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index f645921aff8b..eafabb253f08 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -68,8 +68,8 @@ | |||
68 | 68 | ||
69 | #define DRV_MODULE_NAME "tg3" | 69 | #define DRV_MODULE_NAME "tg3" |
70 | #define PFX DRV_MODULE_NAME ": " | 70 | #define PFX DRV_MODULE_NAME ": " |
71 | #define DRV_MODULE_VERSION "3.62" | 71 | #define DRV_MODULE_VERSION "3.65" |
72 | #define DRV_MODULE_RELDATE "June 30, 2006" | 72 | #define DRV_MODULE_RELDATE "August 07, 2006" |
73 | 73 | ||
74 | #define TG3_DEF_MAC_MODE 0 | 74 | #define TG3_DEF_MAC_MODE 0 |
75 | #define TG3_DEF_RX_MODE 0 | 75 | #define TG3_DEF_RX_MODE 0 |
@@ -123,9 +123,6 @@ | |||
123 | TG3_RX_RCB_RING_SIZE(tp)) | 123 | TG3_RX_RCB_RING_SIZE(tp)) |
124 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ | 124 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ |
125 | TG3_TX_RING_SIZE) | 125 | TG3_TX_RING_SIZE) |
126 | #define TX_BUFFS_AVAIL(TP) \ | ||
127 | ((TP)->tx_pending - \ | ||
128 | (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) | ||
129 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 126 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) |
130 | 127 | ||
131 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) | 128 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) |
@@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp) | |||
2987 | spin_unlock(&tp->lock); | 2984 | spin_unlock(&tp->lock); |
2988 | } | 2985 | } |
2989 | 2986 | ||
2987 | static inline u32 tg3_tx_avail(struct tg3 *tp) | ||
2988 | { | ||
2989 | smp_mb(); | ||
2990 | return (tp->tx_pending - | ||
2991 | ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); | ||
2992 | } | ||
2993 | |||
2990 | /* Tigon3 never reports partial packet sends. So we do not | 2994 | /* Tigon3 never reports partial packet sends. So we do not |
2991 | * need special logic to handle SKBs that have not had all | 2995 | * need special logic to handle SKBs that have not had all |
2992 | * of their frags sent yet, like SunGEM does. | 2996 | * of their frags sent yet, like SunGEM does. |
@@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp) | |||
3038 | 3042 | ||
3039 | tp->tx_cons = sw_idx; | 3043 | tp->tx_cons = sw_idx; |
3040 | 3044 | ||
3041 | if (unlikely(netif_queue_stopped(tp->dev))) { | 3045 | /* Need to make the tx_cons update visible to tg3_start_xmit() |
3042 | spin_lock(&tp->tx_lock); | 3046 | * before checking for netif_queue_stopped(). Without the |
3047 | * memory barrier, there is a small possibility that tg3_start_xmit() | ||
3048 | * will miss it and cause the queue to be stopped forever. | ||
3049 | */ | ||
3050 | smp_mb(); | ||
3051 | |||
3052 | if (unlikely(netif_queue_stopped(tp->dev) && | ||
3053 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) { | ||
3054 | netif_tx_lock(tp->dev); | ||
3043 | if (netif_queue_stopped(tp->dev) && | 3055 | if (netif_queue_stopped(tp->dev) && |
3044 | (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) | 3056 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)) |
3045 | netif_wake_queue(tp->dev); | 3057 | netif_wake_queue(tp->dev); |
3046 | spin_unlock(&tp->tx_lock); | 3058 | netif_tx_unlock(tp->dev); |
3047 | } | 3059 | } |
3048 | } | 3060 | } |
3049 | 3061 | ||
@@ -3097,11 +3109,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, | |||
3097 | * Callers depend upon this behavior and assume that | 3109 | * Callers depend upon this behavior and assume that |
3098 | * we leave everything unchanged if we fail. | 3110 | * we leave everything unchanged if we fail. |
3099 | */ | 3111 | */ |
3100 | skb = dev_alloc_skb(skb_size); | 3112 | skb = netdev_alloc_skb(tp->dev, skb_size); |
3101 | if (skb == NULL) | 3113 | if (skb == NULL) |
3102 | return -ENOMEM; | 3114 | return -ENOMEM; |
3103 | 3115 | ||
3104 | skb->dev = tp->dev; | ||
3105 | skb_reserve(skb, tp->rx_offset); | 3116 | skb_reserve(skb, tp->rx_offset); |
3106 | 3117 | ||
3107 | mapping = pci_map_single(tp->pdev, skb->data, | 3118 | mapping = pci_map_single(tp->pdev, skb->data, |
@@ -3270,11 +3281,10 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
3270 | tg3_recycle_rx(tp, opaque_key, | 3281 | tg3_recycle_rx(tp, opaque_key, |
3271 | desc_idx, *post_ptr); | 3282 | desc_idx, *post_ptr); |
3272 | 3283 | ||
3273 | copy_skb = dev_alloc_skb(len + 2); | 3284 | copy_skb = netdev_alloc_skb(tp->dev, len + 2); |
3274 | if (copy_skb == NULL) | 3285 | if (copy_skb == NULL) |
3275 | goto drop_it_no_recycle; | 3286 | goto drop_it_no_recycle; |
3276 | 3287 | ||
3277 | copy_skb->dev = tp->dev; | ||
3278 | skb_reserve(copy_skb, 2); | 3288 | skb_reserve(copy_skb, 2); |
3279 | skb_put(copy_skb, len); | 3289 | skb_put(copy_skb, len); |
3280 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 3290 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
@@ -3590,6 +3600,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id, | |||
3590 | static int tg3_init_hw(struct tg3 *, int); | 3600 | static int tg3_init_hw(struct tg3 *, int); |
3591 | static int tg3_halt(struct tg3 *, int, int); | 3601 | static int tg3_halt(struct tg3 *, int, int); |
3592 | 3602 | ||
3603 | /* Restart hardware after configuration changes, self-test, etc. | ||
3604 | * Invoked with tp->lock held. | ||
3605 | */ | ||
3606 | static int tg3_restart_hw(struct tg3 *tp, int reset_phy) | ||
3607 | { | ||
3608 | int err; | ||
3609 | |||
3610 | err = tg3_init_hw(tp, reset_phy); | ||
3611 | if (err) { | ||
3612 | printk(KERN_ERR PFX "%s: Failed to re-initialize device, " | ||
3613 | "aborting.\n", tp->dev->name); | ||
3614 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | ||
3615 | tg3_full_unlock(tp); | ||
3616 | del_timer_sync(&tp->timer); | ||
3617 | tp->irq_sync = 0; | ||
3618 | netif_poll_enable(tp->dev); | ||
3619 | dev_close(tp->dev); | ||
3620 | tg3_full_lock(tp, 0); | ||
3621 | } | ||
3622 | return err; | ||
3623 | } | ||
3624 | |||
3593 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3625 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3594 | static void tg3_poll_controller(struct net_device *dev) | 3626 | static void tg3_poll_controller(struct net_device *dev) |
3595 | { | 3627 | { |
@@ -3630,13 +3662,15 @@ static void tg3_reset_task(void *_data) | |||
3630 | } | 3662 | } |
3631 | 3663 | ||
3632 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); | 3664 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); |
3633 | tg3_init_hw(tp, 1); | 3665 | if (tg3_init_hw(tp, 1)) |
3666 | goto out; | ||
3634 | 3667 | ||
3635 | tg3_netif_start(tp); | 3668 | tg3_netif_start(tp); |
3636 | 3669 | ||
3637 | if (restart_timer) | 3670 | if (restart_timer) |
3638 | mod_timer(&tp->timer, jiffies + 1); | 3671 | mod_timer(&tp->timer, jiffies + 1); |
3639 | 3672 | ||
3673 | out: | ||
3640 | tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; | 3674 | tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; |
3641 | 3675 | ||
3642 | tg3_full_unlock(tp); | 3676 | tg3_full_unlock(tp); |
@@ -3773,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3773 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3807 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
3774 | * no IRQ context deadlocks to worry about either. Rejoice! | 3808 | * no IRQ context deadlocks to worry about either. Rejoice! |
3775 | */ | 3809 | */ |
3776 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3810 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
3777 | if (!netif_queue_stopped(dev)) { | 3811 | if (!netif_queue_stopped(dev)) { |
3778 | netif_stop_queue(dev); | 3812 | netif_stop_queue(dev); |
3779 | 3813 | ||
@@ -3869,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3869 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 3903 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
3870 | 3904 | ||
3871 | tp->tx_prod = entry; | 3905 | tp->tx_prod = entry; |
3872 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { | 3906 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { |
3873 | spin_lock(&tp->tx_lock); | ||
3874 | netif_stop_queue(dev); | 3907 | netif_stop_queue(dev); |
3875 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | 3908 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) |
3876 | netif_wake_queue(tp->dev); | 3909 | netif_wake_queue(tp->dev); |
3877 | spin_unlock(&tp->tx_lock); | ||
3878 | } | 3910 | } |
3879 | 3911 | ||
3880 | out_unlock: | 3912 | out_unlock: |
@@ -3896,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | |||
3896 | struct sk_buff *segs, *nskb; | 3928 | struct sk_buff *segs, *nskb; |
3897 | 3929 | ||
3898 | /* Estimate the number of fragments in the worst case */ | 3930 | /* Estimate the number of fragments in the worst case */ |
3899 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { | 3931 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { |
3900 | netif_stop_queue(tp->dev); | 3932 | netif_stop_queue(tp->dev); |
3901 | return NETDEV_TX_BUSY; | 3933 | return NETDEV_TX_BUSY; |
3902 | } | 3934 | } |
@@ -3936,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
3936 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3968 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
3937 | * no IRQ context deadlocks to worry about either. Rejoice! | 3969 | * no IRQ context deadlocks to worry about either. Rejoice! |
3938 | */ | 3970 | */ |
3939 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3971 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
3940 | if (!netif_queue_stopped(dev)) { | 3972 | if (!netif_queue_stopped(dev)) { |
3941 | netif_stop_queue(dev); | 3973 | netif_stop_queue(dev); |
3942 | 3974 | ||
@@ -4086,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4086 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 4118 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
4087 | 4119 | ||
4088 | tp->tx_prod = entry; | 4120 | tp->tx_prod = entry; |
4089 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { | 4121 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { |
4090 | spin_lock(&tp->tx_lock); | ||
4091 | netif_stop_queue(dev); | 4122 | netif_stop_queue(dev); |
4092 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | 4123 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) |
4093 | netif_wake_queue(tp->dev); | 4124 | netif_wake_queue(tp->dev); |
4094 | spin_unlock(&tp->tx_lock); | ||
4095 | } | 4125 | } |
4096 | 4126 | ||
4097 | out_unlock: | 4127 | out_unlock: |
@@ -4124,6 +4154,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | |||
4124 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) | 4154 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) |
4125 | { | 4155 | { |
4126 | struct tg3 *tp = netdev_priv(dev); | 4156 | struct tg3 *tp = netdev_priv(dev); |
4157 | int err; | ||
4127 | 4158 | ||
4128 | if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) | 4159 | if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) |
4129 | return -EINVAL; | 4160 | return -EINVAL; |
@@ -4144,13 +4175,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) | |||
4144 | 4175 | ||
4145 | tg3_set_mtu(dev, tp, new_mtu); | 4176 | tg3_set_mtu(dev, tp, new_mtu); |
4146 | 4177 | ||
4147 | tg3_init_hw(tp, 0); | 4178 | err = tg3_restart_hw(tp, 0); |
4148 | 4179 | ||
4149 | tg3_netif_start(tp); | 4180 | if (!err) |
4181 | tg3_netif_start(tp); | ||
4150 | 4182 | ||
4151 | tg3_full_unlock(tp); | 4183 | tg3_full_unlock(tp); |
4152 | 4184 | ||
4153 | return 0; | 4185 | return err; |
4154 | } | 4186 | } |
4155 | 4187 | ||
4156 | /* Free up pending packets in all rx/tx rings. | 4188 | /* Free up pending packets in all rx/tx rings. |
@@ -4232,7 +4264,7 @@ static void tg3_free_rings(struct tg3 *tp) | |||
4232 | * end up in the driver. tp->{tx,}lock are held and thus | 4264 | * end up in the driver. tp->{tx,}lock are held and thus |
4233 | * we may not sleep. | 4265 | * we may not sleep. |
4234 | */ | 4266 | */ |
4235 | static void tg3_init_rings(struct tg3 *tp) | 4267 | static int tg3_init_rings(struct tg3 *tp) |
4236 | { | 4268 | { |
4237 | u32 i; | 4269 | u32 i; |
4238 | 4270 | ||
@@ -4281,18 +4313,38 @@ static void tg3_init_rings(struct tg3 *tp) | |||
4281 | 4313 | ||
4282 | /* Now allocate fresh SKBs for each rx ring. */ | 4314 | /* Now allocate fresh SKBs for each rx ring. */ |
4283 | for (i = 0; i < tp->rx_pending; i++) { | 4315 | for (i = 0; i < tp->rx_pending; i++) { |
4284 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, | 4316 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { |
4285 | -1, i) < 0) | 4317 | printk(KERN_WARNING PFX |
4318 | "%s: Using a smaller RX standard ring, " | ||
4319 | "only %d out of %d buffers were allocated " | ||
4320 | "successfully.\n", | ||
4321 | tp->dev->name, i, tp->rx_pending); | ||
4322 | if (i == 0) | ||
4323 | return -ENOMEM; | ||
4324 | tp->rx_pending = i; | ||
4286 | break; | 4325 | break; |
4326 | } | ||
4287 | } | 4327 | } |
4288 | 4328 | ||
4289 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 4329 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { |
4290 | for (i = 0; i < tp->rx_jumbo_pending; i++) { | 4330 | for (i = 0; i < tp->rx_jumbo_pending; i++) { |
4291 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, | 4331 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, |
4292 | -1, i) < 0) | 4332 | -1, i) < 0) { |
4333 | printk(KERN_WARNING PFX | ||
4334 | "%s: Using a smaller RX jumbo ring, " | ||
4335 | "only %d out of %d buffers were " | ||
4336 | "allocated successfully.\n", | ||
4337 | tp->dev->name, i, tp->rx_jumbo_pending); | ||
4338 | if (i == 0) { | ||
4339 | tg3_free_rings(tp); | ||
4340 | return -ENOMEM; | ||
4341 | } | ||
4342 | tp->rx_jumbo_pending = i; | ||
4293 | break; | 4343 | break; |
4344 | } | ||
4294 | } | 4345 | } |
4295 | } | 4346 | } |
4347 | return 0; | ||
4296 | } | 4348 | } |
4297 | 4349 | ||
4298 | /* | 4350 | /* |
@@ -5815,6 +5867,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5815 | { | 5867 | { |
5816 | struct tg3 *tp = netdev_priv(dev); | 5868 | struct tg3 *tp = netdev_priv(dev); |
5817 | struct sockaddr *addr = p; | 5869 | struct sockaddr *addr = p; |
5870 | int err = 0; | ||
5818 | 5871 | ||
5819 | if (!is_valid_ether_addr(addr->sa_data)) | 5872 | if (!is_valid_ether_addr(addr->sa_data)) |
5820 | return -EINVAL; | 5873 | return -EINVAL; |
@@ -5832,9 +5885,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5832 | tg3_full_lock(tp, 1); | 5885 | tg3_full_lock(tp, 1); |
5833 | 5886 | ||
5834 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 5887 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
5835 | tg3_init_hw(tp, 0); | 5888 | err = tg3_restart_hw(tp, 0); |
5836 | 5889 | if (!err) | |
5837 | tg3_netif_start(tp); | 5890 | tg3_netif_start(tp); |
5838 | tg3_full_unlock(tp); | 5891 | tg3_full_unlock(tp); |
5839 | } else { | 5892 | } else { |
5840 | spin_lock_bh(&tp->lock); | 5893 | spin_lock_bh(&tp->lock); |
@@ -5842,7 +5895,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5842 | spin_unlock_bh(&tp->lock); | 5895 | spin_unlock_bh(&tp->lock); |
5843 | } | 5896 | } |
5844 | 5897 | ||
5845 | return 0; | 5898 | return err; |
5846 | } | 5899 | } |
5847 | 5900 | ||
5848 | /* tp->lock is held. */ | 5901 | /* tp->lock is held. */ |
@@ -5942,7 +5995,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
5942 | * can only do this after the hardware has been | 5995 | * can only do this after the hardware has been |
5943 | * successfully reset. | 5996 | * successfully reset. |
5944 | */ | 5997 | */ |
5945 | tg3_init_rings(tp); | 5998 | err = tg3_init_rings(tp); |
5999 | if (err) | ||
6000 | return err; | ||
5946 | 6001 | ||
5947 | /* This value is determined during the probe time DMA | 6002 | /* This value is determined during the probe time DMA |
5948 | * engine test, tg3_test_dma. | 6003 | * engine test, tg3_test_dma. |
@@ -7956,7 +8011,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * | |||
7956 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 8011 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) |
7957 | { | 8012 | { |
7958 | struct tg3 *tp = netdev_priv(dev); | 8013 | struct tg3 *tp = netdev_priv(dev); |
7959 | int irq_sync = 0; | 8014 | int irq_sync = 0, err = 0; |
7960 | 8015 | ||
7961 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || | 8016 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || |
7962 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || | 8017 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || |
@@ -7980,13 +8035,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
7980 | 8035 | ||
7981 | if (netif_running(dev)) { | 8036 | if (netif_running(dev)) { |
7982 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8037 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
7983 | tg3_init_hw(tp, 1); | 8038 | err = tg3_restart_hw(tp, 1); |
7984 | tg3_netif_start(tp); | 8039 | if (!err) |
8040 | tg3_netif_start(tp); | ||
7985 | } | 8041 | } |
7986 | 8042 | ||
7987 | tg3_full_unlock(tp); | 8043 | tg3_full_unlock(tp); |
7988 | 8044 | ||
7989 | return 0; | 8045 | return err; |
7990 | } | 8046 | } |
7991 | 8047 | ||
7992 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 8048 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
@@ -8001,7 +8057,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
8001 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 8057 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
8002 | { | 8058 | { |
8003 | struct tg3 *tp = netdev_priv(dev); | 8059 | struct tg3 *tp = netdev_priv(dev); |
8004 | int irq_sync = 0; | 8060 | int irq_sync = 0, err = 0; |
8005 | 8061 | ||
8006 | if (netif_running(dev)) { | 8062 | if (netif_running(dev)) { |
8007 | tg3_netif_stop(tp); | 8063 | tg3_netif_stop(tp); |
@@ -8025,13 +8081,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
8025 | 8081 | ||
8026 | if (netif_running(dev)) { | 8082 | if (netif_running(dev)) { |
8027 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8083 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8028 | tg3_init_hw(tp, 1); | 8084 | err = tg3_restart_hw(tp, 1); |
8029 | tg3_netif_start(tp); | 8085 | if (!err) |
8086 | tg3_netif_start(tp); | ||
8030 | } | 8087 | } |
8031 | 8088 | ||
8032 | tg3_full_unlock(tp); | 8089 | tg3_full_unlock(tp); |
8033 | 8090 | ||
8034 | return 0; | 8091 | return err; |
8035 | } | 8092 | } |
8036 | 8093 | ||
8037 | static u32 tg3_get_rx_csum(struct net_device *dev) | 8094 | static u32 tg3_get_rx_csum(struct net_device *dev) |
@@ -8567,7 +8624,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
8567 | err = -EIO; | 8624 | err = -EIO; |
8568 | 8625 | ||
8569 | tx_len = 1514; | 8626 | tx_len = 1514; |
8570 | skb = dev_alloc_skb(tx_len); | 8627 | skb = netdev_alloc_skb(tp->dev, tx_len); |
8571 | if (!skb) | 8628 | if (!skb) |
8572 | return -ENOMEM; | 8629 | return -ENOMEM; |
8573 | 8630 | ||
@@ -8666,7 +8723,9 @@ static int tg3_test_loopback(struct tg3 *tp) | |||
8666 | if (!netif_running(tp->dev)) | 8723 | if (!netif_running(tp->dev)) |
8667 | return TG3_LOOPBACK_FAILED; | 8724 | return TG3_LOOPBACK_FAILED; |
8668 | 8725 | ||
8669 | tg3_reset_hw(tp, 1); | 8726 | err = tg3_reset_hw(tp, 1); |
8727 | if (err) | ||
8728 | return TG3_LOOPBACK_FAILED; | ||
8670 | 8729 | ||
8671 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) | 8730 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) |
8672 | err |= TG3_MAC_LOOPBACK_FAILED; | 8731 | err |= TG3_MAC_LOOPBACK_FAILED; |
@@ -8740,8 +8799,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
8740 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8799 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8741 | if (netif_running(dev)) { | 8800 | if (netif_running(dev)) { |
8742 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 8801 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
8743 | tg3_init_hw(tp, 1); | 8802 | if (!tg3_restart_hw(tp, 1)) |
8744 | tg3_netif_start(tp); | 8803 | tg3_netif_start(tp); |
8745 | } | 8804 | } |
8746 | 8805 | ||
8747 | tg3_full_unlock(tp); | 8806 | tg3_full_unlock(tp); |
@@ -10078,6 +10137,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
10078 | static struct pci_device_id write_reorder_chipsets[] = { | 10137 | static struct pci_device_id write_reorder_chipsets[] = { |
10079 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | 10138 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, |
10080 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, | 10139 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, |
10140 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | ||
10141 | PCI_DEVICE_ID_AMD_8131_BRIDGE) }, | ||
10081 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, | 10142 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, |
10082 | PCI_DEVICE_ID_VIA_8385_0) }, | 10143 | PCI_DEVICE_ID_VIA_8385_0) }, |
10083 | { }, | 10144 | { }, |
@@ -11419,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
11419 | tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; | 11480 | tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; |
11420 | #endif | 11481 | #endif |
11421 | spin_lock_init(&tp->lock); | 11482 | spin_lock_init(&tp->lock); |
11422 | spin_lock_init(&tp->tx_lock); | ||
11423 | spin_lock_init(&tp->indirect_lock); | 11483 | spin_lock_init(&tp->indirect_lock); |
11424 | INIT_WORK(&tp->reset_task, tg3_reset_task, tp); | 11484 | INIT_WORK(&tp->reset_task, tg3_reset_task, tp); |
11425 | 11485 | ||
@@ -11697,7 +11757,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
11697 | tg3_full_lock(tp, 0); | 11757 | tg3_full_lock(tp, 0); |
11698 | 11758 | ||
11699 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11759 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
11700 | tg3_init_hw(tp, 1); | 11760 | if (tg3_restart_hw(tp, 1)) |
11761 | goto out; | ||
11701 | 11762 | ||
11702 | tp->timer.expires = jiffies + tp->timer_offset; | 11763 | tp->timer.expires = jiffies + tp->timer_offset; |
11703 | add_timer(&tp->timer); | 11764 | add_timer(&tp->timer); |
@@ -11705,6 +11766,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
11705 | netif_device_attach(dev); | 11766 | netif_device_attach(dev); |
11706 | tg3_netif_start(tp); | 11767 | tg3_netif_start(tp); |
11707 | 11768 | ||
11769 | out: | ||
11708 | tg3_full_unlock(tp); | 11770 | tg3_full_unlock(tp); |
11709 | } | 11771 | } |
11710 | 11772 | ||
@@ -11731,16 +11793,19 @@ static int tg3_resume(struct pci_dev *pdev) | |||
11731 | tg3_full_lock(tp, 0); | 11793 | tg3_full_lock(tp, 0); |
11732 | 11794 | ||
11733 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11795 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
11734 | tg3_init_hw(tp, 1); | 11796 | err = tg3_restart_hw(tp, 1); |
11797 | if (err) | ||
11798 | goto out; | ||
11735 | 11799 | ||
11736 | tp->timer.expires = jiffies + tp->timer_offset; | 11800 | tp->timer.expires = jiffies + tp->timer_offset; |
11737 | add_timer(&tp->timer); | 11801 | add_timer(&tp->timer); |
11738 | 11802 | ||
11739 | tg3_netif_start(tp); | 11803 | tg3_netif_start(tp); |
11740 | 11804 | ||
11805 | out: | ||
11741 | tg3_full_unlock(tp); | 11806 | tg3_full_unlock(tp); |
11742 | 11807 | ||
11743 | return 0; | 11808 | return err; |
11744 | } | 11809 | } |
11745 | 11810 | ||
11746 | static struct pci_driver tg3_driver = { | 11811 | static struct pci_driver tg3_driver = { |