aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c107
1 files changed, 97 insertions, 10 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6bea2b89a915..953255e92633 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.60" 71#define DRV_MODULE_VERSION "3.61"
72#define DRV_MODULE_RELDATE "June 17, 2006" 72#define DRV_MODULE_RELDATE "June 29, 2006"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -3194,7 +3194,7 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3194 */ 3194 */
3195static int tg3_rx(struct tg3 *tp, int budget) 3195static int tg3_rx(struct tg3 *tp, int budget)
3196{ 3196{
3197 u32 work_mask; 3197 u32 work_mask, rx_std_posted = 0;
3198 u32 sw_idx = tp->rx_rcb_ptr; 3198 u32 sw_idx = tp->rx_rcb_ptr;
3199 u16 hw_idx; 3199 u16 hw_idx;
3200 int received; 3200 int received;
@@ -3221,6 +3221,7 @@ static int tg3_rx(struct tg3 *tp, int budget)
3221 mapping); 3221 mapping);
3222 skb = tp->rx_std_buffers[desc_idx].skb; 3222 skb = tp->rx_std_buffers[desc_idx].skb;
3223 post_ptr = &tp->rx_std_ptr; 3223 post_ptr = &tp->rx_std_ptr;
3224 rx_std_posted++;
3224 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 3225 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3225 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], 3226 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3226 mapping); 3227 mapping);
@@ -3308,6 +3309,15 @@ static int tg3_rx(struct tg3 *tp, int budget)
3308 3309
3309next_pkt: 3310next_pkt:
3310 (*post_ptr)++; 3311 (*post_ptr)++;
3312
3313 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3314 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3315
3316 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3317 TG3_64BIT_REG_LOW, idx);
3318 work_mask &= ~RXD_OPAQUE_RING_STD;
3319 rx_std_posted = 0;
3320 }
3311next_pkt_nopost: 3321next_pkt_nopost:
3312 sw_idx++; 3322 sw_idx++;
3313 sw_idx %= TG3_RX_RCB_RING_SIZE(tp); 3323 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
@@ -3869,6 +3879,40 @@ out_unlock:
3869 return NETDEV_TX_OK; 3879 return NETDEV_TX_OK;
3870} 3880}
3871 3881
3882#if TG3_TSO_SUPPORT != 0
3883static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3884
3885/* Use GSO to workaround a rare TSO bug that may be triggered when the
3886 * TSO header is greater than 80 bytes.
3887 */
3888static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3889{
3890 struct sk_buff *segs, *nskb;
3891
3892 /* Estimate the number of fragments in the worst case */
3893 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3894 netif_stop_queue(tp->dev);
3895 return NETDEV_TX_BUSY;
3896 }
3897
3898 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3899 if (unlikely(IS_ERR(segs)))
3900 goto tg3_tso_bug_end;
3901
3902 do {
3903 nskb = segs;
3904 segs = segs->next;
3905 nskb->next = NULL;
3906 tg3_start_xmit_dma_bug(nskb, tp->dev);
3907 } while (segs);
3908
3909tg3_tso_bug_end:
3910 dev_kfree_skb(skb);
3911
3912 return NETDEV_TX_OK;
3913}
3914#endif
3915
3872/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 3916/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3873 * support TG3_FLG2_HW_TSO_1 or firmware TSO only. 3917 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3874 */ 3918 */
@@ -3905,7 +3949,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3905 mss = 0; 3949 mss = 0;
3906 if (skb->len > (tp->dev->mtu + ETH_HLEN) && 3950 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3907 (mss = skb_shinfo(skb)->gso_size) != 0) { 3951 (mss = skb_shinfo(skb)->gso_size) != 0) {
3908 int tcp_opt_len, ip_tcp_len; 3952 int tcp_opt_len, ip_tcp_len, hdr_len;
3909 3953
3910 if (skb_header_cloned(skb) && 3954 if (skb_header_cloned(skb) &&
3911 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 3955 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -3916,11 +3960,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3916 tcp_opt_len = ((skb->h.th->doff - 5) * 4); 3960 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3917 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); 3961 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3918 3962
3963 hdr_len = ip_tcp_len + tcp_opt_len;
3964 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3965 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3966 return (tg3_tso_bug(tp, skb));
3967
3919 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 3968 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3920 TXD_FLAG_CPU_POST_DMA); 3969 TXD_FLAG_CPU_POST_DMA);
3921 3970
3922 skb->nh.iph->check = 0; 3971 skb->nh.iph->check = 0;
3923 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); 3972 skb->nh.iph->tot_len = htons(mss + hdr_len);
3924 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 3973 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3925 skb->h.th->check = 0; 3974 skb->h.th->check = 0;
3926 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 3975 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
@@ -5980,7 +6029,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5980 } 6029 }
5981 6030
5982 /* Setup replenish threshold. */ 6031 /* Setup replenish threshold. */
5983 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8); 6032 val = tp->rx_pending / 8;
6033 if (val == 0)
6034 val = 1;
6035 else if (val > tp->rx_std_max_post)
6036 val = tp->rx_std_max_post;
6037
6038 tw32(RCVBDI_STD_THRESH, val);
5984 6039
5985 /* Initialize TG3_BDINFO's at: 6040 /* Initialize TG3_BDINFO's at:
5986 * RCVDBDI_STD_BD: standard eth size rx ring 6041 * RCVDBDI_STD_BD: standard eth size rx ring
@@ -6140,8 +6195,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6140#endif 6195#endif
6141 6196
6142 /* Receive/send statistics. */ 6197 /* Receive/send statistics. */
6143 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 6198 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6144 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { 6199 val = tr32(RCVLPC_STATS_ENABLE);
6200 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6201 tw32(RCVLPC_STATS_ENABLE, val);
6202 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6203 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6145 val = tr32(RCVLPC_STATS_ENABLE); 6204 val = tr32(RCVLPC_STATS_ENABLE);
6146 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 6205 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6147 tw32(RCVLPC_STATS_ENABLE, val); 6206 tw32(RCVLPC_STATS_ENABLE, val);
@@ -8737,6 +8796,9 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8737{ 8796{
8738 struct tg3 *tp = netdev_priv(dev); 8797 struct tg3 *tp = netdev_priv(dev);
8739 8798
8799 if (netif_running(dev))
8800 tg3_netif_stop(tp);
8801
8740 tg3_full_lock(tp, 0); 8802 tg3_full_lock(tp, 0);
8741 8803
8742 tp->vlgrp = grp; 8804 tp->vlgrp = grp;
@@ -8745,16 +8807,25 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8745 __tg3_set_rx_mode(dev); 8807 __tg3_set_rx_mode(dev);
8746 8808
8747 tg3_full_unlock(tp); 8809 tg3_full_unlock(tp);
8810
8811 if (netif_running(dev))
8812 tg3_netif_start(tp);
8748} 8813}
8749 8814
8750static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 8815static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8751{ 8816{
8752 struct tg3 *tp = netdev_priv(dev); 8817 struct tg3 *tp = netdev_priv(dev);
8753 8818
8819 if (netif_running(dev))
8820 tg3_netif_stop(tp);
8821
8754 tg3_full_lock(tp, 0); 8822 tg3_full_lock(tp, 0);
8755 if (tp->vlgrp) 8823 if (tp->vlgrp)
8756 tp->vlgrp->vlan_devices[vid] = NULL; 8824 tp->vlgrp->vlan_devices[vid] = NULL;
8757 tg3_full_unlock(tp); 8825 tg3_full_unlock(tp);
8826
8827 if (netif_running(dev))
8828 tg3_netif_start(tp);
8758} 8829}
8759#endif 8830#endif
8760 8831
@@ -10159,8 +10230,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10159 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { 10230 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10160 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; 10231 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10161 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 10232 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10162 } else 10233 } else {
10163 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1; 10234 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10235 TG3_FLG2_HW_TSO_1_BUG;
10236 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10237 ASIC_REV_5750 &&
10238 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10239 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10240 }
10164 } 10241 }
10165 10242
10166 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && 10243 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
@@ -10532,6 +10609,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10532 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) 10609 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10533 tp->rx_offset = 0; 10610 tp->rx_offset = 0;
10534 10611
10612 tp->rx_std_max_post = TG3_RX_RING_SIZE;
10613
10614 /* Increment the rx prod index on the rx std ring by at most
10615 * 8 for these chips to workaround hw errata.
10616 */
10617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10620 tp->rx_std_max_post = 8;
10621
10535 /* By default, disable wake-on-lan. User can change this 10622 /* By default, disable wake-on-lan. User can change this
10536 * using ETHTOOL_SWOL. 10623 * using ETHTOOL_SWOL.
10537 */ 10624 */