aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
authorMatt Carlson <mcarlson@broadcom.com>2011-05-19 08:12:44 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-19 17:59:59 -0400
commit2ffcc981d823a0518c627ca22d51ef72d0b7ca9a (patch)
tree72847aea34b0f898c4d7b43e05aec466c21a6159 /drivers/net/tg3.c
parent5b5ed8afe48ca6916daabf9822e7a19fc19fdab4 (diff)
tg3: Set tx bug flags for more devices
It has been recently discovered that all tg3 devices have a 4Gb boundary DMA problem, and that all 5755 and newer devices can't handle fragments less than or equal to 8 bytes in size. This patch adjusts the flags and removes tg3_start_xmit(). tg3_start_xmit_dma_bug() has been renamed to tg3_start_xmit(). Signed-off-by: Matt Carlson <mcarlson@broadcom.com> Reviewed-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c262
1 files changed, 33 insertions, 229 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index d5a1f9e3794c..4c441682a291 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -5735,7 +5735,28 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5735#endif 5735#endif
5736} 5736}
5737 5737
5738static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); 5738static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5739 dma_addr_t mapping, int len, u32 flags,
5740 u32 mss_and_is_end)
5741{
5742 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5743 int is_end = (mss_and_is_end & 0x1);
5744 u32 mss = (mss_and_is_end >> 1);
5745 u32 vlan_tag = 0;
5746
5747 if (is_end)
5748 flags |= TXD_FLAG_END;
5749 if (flags & TXD_FLAG_VLAN) {
5750 vlan_tag = flags >> 16;
5751 flags &= 0xffff;
5752 }
5753 vlan_tag |= (mss << TXD_MSS_SHIFT);
5754
5755 txd->addr_hi = ((u64) mapping >> 32);
5756 txd->addr_lo = ((u64) mapping & 0xffffffff);
5757 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5758 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5759}
5739 5760
5740/* Workaround 4GB and 40-bit hardware DMA bugs. */ 5761/* Workaround 4GB and 40-bit hardware DMA bugs. */
5741static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 5762static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
@@ -5818,202 +5839,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5818 return ret; 5839 return ret;
5819} 5840}
5820 5841
5821static void tg3_set_txd(struct tg3_napi *tnapi, int entry, 5842static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5822 dma_addr_t mapping, int len, u32 flags,
5823 u32 mss_and_is_end)
5824{
5825 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5826 int is_end = (mss_and_is_end & 0x1);
5827 u32 mss = (mss_and_is_end >> 1);
5828 u32 vlan_tag = 0;
5829
5830 if (is_end)
5831 flags |= TXD_FLAG_END;
5832 if (flags & TXD_FLAG_VLAN) {
5833 vlan_tag = flags >> 16;
5834 flags &= 0xffff;
5835 }
5836 vlan_tag |= (mss << TXD_MSS_SHIFT);
5837
5838 txd->addr_hi = ((u64) mapping >> 32);
5839 txd->addr_lo = ((u64) mapping & 0xffffffff);
5840 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5841 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5842}
5843
5844/* hard_start_xmit for devices that don't have any bugs and
5845 * support TG3_FLAG_HW_TSO_2 and TG3_FLAG_HW_TSO_3 only.
5846 */
5847static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5848 struct net_device *dev)
5849{
5850 struct tg3 *tp = netdev_priv(dev);
5851 u32 len, entry, base_flags, mss;
5852 dma_addr_t mapping;
5853 struct tg3_napi *tnapi;
5854 struct netdev_queue *txq;
5855 unsigned int i, last;
5856
5857 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5858 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5859 if (tg3_flag(tp, ENABLE_TSS))
5860 tnapi++;
5861
5862 /* We are running in BH disabled context with netif_tx_lock
5863 * and TX reclaim runs via tp->napi.poll inside of a software
5864 * interrupt. Furthermore, IRQ processing runs lockless so we have
5865 * no IRQ context deadlocks to worry about either. Rejoice!
5866 */
5867 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5868 if (!netif_tx_queue_stopped(txq)) {
5869 netif_tx_stop_queue(txq);
5870
5871 /* This is a hard error, log it. */
5872 netdev_err(dev,
5873 "BUG! Tx Ring full when queue awake!\n");
5874 }
5875 return NETDEV_TX_BUSY;
5876 }
5877
5878 entry = tnapi->tx_prod;
5879 base_flags = 0;
5880 mss = skb_shinfo(skb)->gso_size;
5881 if (mss) {
5882 int tcp_opt_len, ip_tcp_len;
5883 u32 hdrlen;
5884
5885 if (skb_header_cloned(skb) &&
5886 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5887 dev_kfree_skb(skb);
5888 goto out_unlock;
5889 }
5890
5891 if (skb_is_gso_v6(skb)) {
5892 hdrlen = skb_headlen(skb) - ETH_HLEN;
5893 } else {
5894 struct iphdr *iph = ip_hdr(skb);
5895
5896 tcp_opt_len = tcp_optlen(skb);
5897 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5898
5899 iph->check = 0;
5900 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5901 hdrlen = ip_tcp_len + tcp_opt_len;
5902 }
5903
5904 if (tg3_flag(tp, HW_TSO_3)) {
5905 mss |= (hdrlen & 0xc) << 12;
5906 if (hdrlen & 0x10)
5907 base_flags |= 0x00000010;
5908 base_flags |= (hdrlen & 0x3e0) << 5;
5909 } else
5910 mss |= hdrlen << 9;
5911
5912 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5913 TXD_FLAG_CPU_POST_DMA);
5914
5915 tcp_hdr(skb)->check = 0;
5916
5917 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5918 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5919 }
5920
5921 if (vlan_tx_tag_present(skb))
5922 base_flags |= (TXD_FLAG_VLAN |
5923 (vlan_tx_tag_get(skb) << 16));
5924
5925 len = skb_headlen(skb);
5926
5927 /* Queue skb data, a.k.a. the main skb fragment. */
5928 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5929 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5930 dev_kfree_skb(skb);
5931 goto out_unlock;
5932 }
5933
5934 tnapi->tx_buffers[entry].skb = skb;
5935 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5936
5937 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
5938 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5939 base_flags |= TXD_FLAG_JMB_PKT;
5940
5941 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5942 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5943
5944 entry = NEXT_TX(entry);
5945
5946 /* Now loop through additional data fragments, and queue them. */
5947 if (skb_shinfo(skb)->nr_frags > 0) {
5948 last = skb_shinfo(skb)->nr_frags - 1;
5949 for (i = 0; i <= last; i++) {
5950 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5951
5952 len = frag->size;
5953 mapping = pci_map_page(tp->pdev,
5954 frag->page,
5955 frag->page_offset,
5956 len, PCI_DMA_TODEVICE);
5957 if (pci_dma_mapping_error(tp->pdev, mapping))
5958 goto dma_error;
5959
5960 tnapi->tx_buffers[entry].skb = NULL;
5961 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5962 mapping);
5963
5964 tg3_set_txd(tnapi, entry, mapping, len,
5965 base_flags, (i == last) | (mss << 1));
5966
5967 entry = NEXT_TX(entry);
5968 }
5969 }
5970
5971 /* Packets are ready, update Tx producer idx local and on card. */
5972 tw32_tx_mbox(tnapi->prodmbox, entry);
5973
5974 tnapi->tx_prod = entry;
5975 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5976 netif_tx_stop_queue(txq);
5977
5978 /* netif_tx_stop_queue() must be done before checking
5979 * checking tx index in tg3_tx_avail() below, because in
5980 * tg3_tx(), we update tx index before checking for
5981 * netif_tx_queue_stopped().
5982 */
5983 smp_mb();
5984 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5985 netif_tx_wake_queue(txq);
5986 }
5987
5988out_unlock:
5989 mmiowb();
5990
5991 return NETDEV_TX_OK;
5992
5993dma_error:
5994 last = i;
5995 entry = tnapi->tx_prod;
5996 tnapi->tx_buffers[entry].skb = NULL;
5997 pci_unmap_single(tp->pdev,
5998 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5999 skb_headlen(skb),
6000 PCI_DMA_TODEVICE);
6001 for (i = 0; i <= last; i++) {
6002 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6003 entry = NEXT_TX(entry);
6004
6005 pci_unmap_page(tp->pdev,
6006 dma_unmap_addr(&tnapi->tx_buffers[entry],
6007 mapping),
6008 frag->size, PCI_DMA_TODEVICE);
6009 }
6010
6011 dev_kfree_skb(skb);
6012 return NETDEV_TX_OK;
6013}
6014
6015static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
6016 struct net_device *);
6017 5843
6018/* Use GSO to workaround a rare TSO bug that may be triggered when the 5844/* Use GSO to workaround a rare TSO bug that may be triggered when the
6019 * TSO header is greater than 80 bytes. 5845 * TSO header is greater than 80 bytes.
@@ -6047,7 +5873,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6047 nskb = segs; 5873 nskb = segs;
6048 segs = segs->next; 5874 segs = segs->next;
6049 nskb->next = NULL; 5875 nskb->next = NULL;
6050 tg3_start_xmit_dma_bug(nskb, tp->dev); 5876 tg3_start_xmit(nskb, tp->dev);
6051 } while (segs); 5877 } while (segs);
6052 5878
6053tg3_tso_bug_end: 5879tg3_tso_bug_end:
@@ -6059,8 +5885,7 @@ tg3_tso_bug_end:
6059/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 5885/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6060 * support TG3_FLAG_HW_TSO_1 or firmware TSO only. 5886 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6061 */ 5887 */
6062static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, 5888static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6063 struct net_device *dev)
6064{ 5889{
6065 struct tg3 *tp = netdev_priv(dev); 5890 struct tg3 *tp = netdev_priv(dev);
6066 u32 len, entry, base_flags, mss; 5891 u32 len, entry, base_flags, mss;
@@ -13857,14 +13682,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13857 } 13682 }
13858 } 13683 }
13859 13684
13860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13685 /* All chips can get confused if TX buffers
13861 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 13686 * straddle the 4GB address boundary.
13862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13687 */
13688 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13689
13690 if (tg3_flag(tp, 5755_PLUS))
13863 tg3_flag_set(tp, SHORT_DMA_BUG); 13691 tg3_flag_set(tp, SHORT_DMA_BUG);
13864 else if (!tg3_flag(tp, 5755_PLUS)) { 13692 else
13865 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13866 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG); 13693 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13867 }
13868 13694
13869 if (tg3_flag(tp, 5717_PLUS)) 13695 if (tg3_flag(tp, 5717_PLUS))
13870 tg3_flag_set(tp, LRG_PROD_RING_CAP); 13696 tg3_flag_set(tp, LRG_PROD_RING_CAP);
@@ -15092,23 +14918,6 @@ static const struct net_device_ops tg3_netdev_ops = {
15092#endif 14918#endif
15093}; 14919};
15094 14920
15095static const struct net_device_ops tg3_netdev_ops_dma_bug = {
15096 .ndo_open = tg3_open,
15097 .ndo_stop = tg3_close,
15098 .ndo_start_xmit = tg3_start_xmit_dma_bug,
15099 .ndo_get_stats64 = tg3_get_stats64,
15100 .ndo_validate_addr = eth_validate_addr,
15101 .ndo_set_multicast_list = tg3_set_rx_mode,
15102 .ndo_set_mac_address = tg3_set_mac_addr,
15103 .ndo_do_ioctl = tg3_ioctl,
15104 .ndo_tx_timeout = tg3_tx_timeout,
15105 .ndo_change_mtu = tg3_change_mtu,
15106 .ndo_set_features = tg3_set_features,
15107#ifdef CONFIG_NET_POLL_CONTROLLER
15108 .ndo_poll_controller = tg3_poll_controller,
15109#endif
15110};
15111
15112static int __devinit tg3_init_one(struct pci_dev *pdev, 14921static int __devinit tg3_init_one(struct pci_dev *pdev,
15113 const struct pci_device_id *ent) 14922 const struct pci_device_id *ent)
15114{ 14923{
@@ -15205,6 +15014,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
15205 15014
15206 dev->ethtool_ops = &tg3_ethtool_ops; 15015 dev->ethtool_ops = &tg3_ethtool_ops;
15207 dev->watchdog_timeo = TG3_TX_TIMEOUT; 15016 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15017 dev->netdev_ops = &tg3_netdev_ops;
15208 dev->irq = pdev->irq; 15018 dev->irq = pdev->irq;
15209 15019
15210 err = tg3_get_invariants(tp); 15020 err = tg3_get_invariants(tp);
@@ -15214,12 +15024,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
15214 goto err_out_iounmap; 15024 goto err_out_iounmap;
15215 } 15025 }
15216 15026
15217 if (tg3_flag(tp, 5755_PLUS) && !tg3_flag(tp, 5717_PLUS))
15218 dev->netdev_ops = &tg3_netdev_ops;
15219 else
15220 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
15221
15222
15223 /* The EPB bridge inside 5714, 5715, and 5780 and any 15027 /* The EPB bridge inside 5714, 5715, and 5780 and any
15224 * device behind the EPB cannot support DMA addresses > 40-bit. 15028 * device behind the EPB cannot support DMA addresses > 40-bit.
15225 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 15029 * On 64-bit systems with IOMMU, use 40-bit dma_mask.