aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2006-03-21 01:28:05 -0500
committerDavid S. Miller <davem@davemloft.net>2006-03-21 01:28:05 -0500
commit5a6f3074c2ea5a7b4ff5b18f0e1fd9b1257e1a29 (patch)
tree4bc683f620e3176ae70932f5f865fe47423eb083 /drivers/net/tg3.c
parent1b27777a9b9b2b6d1c06000b7a31262d198b4238 (diff)
[TG3]: Add new hard_start_xmit
Support 5787 hardware TSO using a new flag TG3_FLG2_HW_TSO_2. Since the TSO interface is slightly different and these chips have finally fixed the 4GB DMA problem and do not have the 40-bit DMA problem, a new hard_start_xmit is used for these chips. All previous chips will use the old hard_start_xmit that is now renamed tg3_start_xmit_dma_bug(). Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c143
1 files changed, 139 insertions, 4 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 9cd8613625f8..d4035de6440e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3655,11 +3655,139 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
3655 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; 3655 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3656} 3656}
3657 3657
3658/* hard_start_xmit for devices that don't have any bugs and
3659 * support TG3_FLG2_HW_TSO_2 only.
3660 */
3658static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 3661static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3659{ 3662{
3660 struct tg3 *tp = netdev_priv(dev); 3663 struct tg3 *tp = netdev_priv(dev);
3661 dma_addr_t mapping; 3664 dma_addr_t mapping;
3662 u32 len, entry, base_flags, mss; 3665 u32 len, entry, base_flags, mss;
3666
3667 len = skb_headlen(skb);
3668
3669 /* No BH disabling for tx_lock here. We are running in BH disabled
3670 * context and TX reclaim runs via tp->poll inside of a software
3671 * interrupt. Furthermore, IRQ processing runs lockless so we have
3672 * no IRQ context deadlocks to worry about either. Rejoice!
3673 */
3674 if (!spin_trylock(&tp->tx_lock))
3675 return NETDEV_TX_LOCKED;
3676
3677 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3678 if (!netif_queue_stopped(dev)) {
3679 netif_stop_queue(dev);
3680
3681 /* This is a hard error, log it. */
3682 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3683 "queue awake!\n", dev->name);
3684 }
3685 spin_unlock(&tp->tx_lock);
3686 return NETDEV_TX_BUSY;
3687 }
3688
3689 entry = tp->tx_prod;
3690 base_flags = 0;
3691#if TG3_TSO_SUPPORT != 0
3692 mss = 0;
3693 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3694 (mss = skb_shinfo(skb)->tso_size) != 0) {
3695 int tcp_opt_len, ip_tcp_len;
3696
3697 if (skb_header_cloned(skb) &&
3698 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3699 dev_kfree_skb(skb);
3700 goto out_unlock;
3701 }
3702
3703 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3704 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3705
3706 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3707 TXD_FLAG_CPU_POST_DMA);
3708
3709 skb->nh.iph->check = 0;
3710 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3711
3712 skb->h.th->check = 0;
3713
3714 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3715 }
3716 else if (skb->ip_summed == CHECKSUM_HW)
3717 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3718#else
3719 mss = 0;
3720 if (skb->ip_summed == CHECKSUM_HW)
3721 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3722#endif
3723#if TG3_VLAN_TAG_USED
3724 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3725 base_flags |= (TXD_FLAG_VLAN |
3726 (vlan_tx_tag_get(skb) << 16));
3727#endif
3728
3729 /* Queue skb data, a.k.a. the main skb fragment. */
3730 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3731
3732 tp->tx_buffers[entry].skb = skb;
3733 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3734
3735 tg3_set_txd(tp, entry, mapping, len, base_flags,
3736 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3737
3738 entry = NEXT_TX(entry);
3739
3740 /* Now loop through additional data fragments, and queue them. */
3741 if (skb_shinfo(skb)->nr_frags > 0) {
3742 unsigned int i, last;
3743
3744 last = skb_shinfo(skb)->nr_frags - 1;
3745 for (i = 0; i <= last; i++) {
3746 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3747
3748 len = frag->size;
3749 mapping = pci_map_page(tp->pdev,
3750 frag->page,
3751 frag->page_offset,
3752 len, PCI_DMA_TODEVICE);
3753
3754 tp->tx_buffers[entry].skb = NULL;
3755 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3756
3757 tg3_set_txd(tp, entry, mapping, len,
3758 base_flags, (i == last) | (mss << 1));
3759
3760 entry = NEXT_TX(entry);
3761 }
3762 }
3763
3764 /* Packets are ready, update Tx producer idx local and on card. */
3765 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3766
3767 tp->tx_prod = entry;
3768 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3769 netif_stop_queue(dev);
3770 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3771 netif_wake_queue(tp->dev);
3772 }
3773
3774out_unlock:
3775 mmiowb();
3776 spin_unlock(&tp->tx_lock);
3777
3778 dev->trans_start = jiffies;
3779
3780 return NETDEV_TX_OK;
3781}
3782
3783/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3784 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3785 */
3786static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3787{
3788 struct tg3 *tp = netdev_priv(dev);
3789 dma_addr_t mapping;
3790 u32 len, entry, base_flags, mss;
3663 int would_hit_hwbug; 3791 int would_hit_hwbug;
3664 3792
3665 len = skb_headlen(skb); 3793 len = skb_headlen(skb);
@@ -9811,8 +9939,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9811 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 9939 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9812 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; 9940 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9813 9941
9814 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 9942 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
9815 tp->tg3_flags2 |= TG3_FLG2_HW_TSO; 9943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9944 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
9945 else
9946 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
9947 }
9816 9948
9817 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && 9949 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9818 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && 9950 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
@@ -10163,10 +10295,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10163 else 10295 else
10164 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 10296 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10165 10297
10166 /* It seems all chips can get confused if TX buffers 10298 /* All chips before 5787 can get confused if TX buffers
10167 * straddle the 4GB address boundary in some cases. 10299 * straddle the 4GB address boundary in some cases.
10168 */ 10300 */
10169 tp->dev->hard_start_xmit = tg3_start_xmit; 10301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10302 tp->dev->hard_start_xmit = tg3_start_xmit;
10303 else
10304 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10170 10305
10171 tp->rx_offset = 2; 10306 tp->rx_offset = 2;
10172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 10307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&