aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/tg3.c49
1 files changed, 29 insertions, 20 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7f816a0ee421..b93ba3d2192a 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -5924,7 +5924,7 @@ static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
5924 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); 5924 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5925} 5925}
5926 5926
5927static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 entry, 5927static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
5928 dma_addr_t map, u32 len, u32 flags, 5928 dma_addr_t map, u32 len, u32 flags,
5929 u32 mss, u32 vlan) 5929 u32 mss, u32 vlan)
5930{ 5930{
@@ -5940,7 +5940,14 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 entry,
5940 if (tg3_40bit_overflow_test(tp, map, len)) 5940 if (tg3_40bit_overflow_test(tp, map, len))
5941 hwbug = 1; 5941 hwbug = 1;
5942 5942
5943 tg3_tx_set_bd(&tnapi->tx_ring[entry], map, len, flags, mss, vlan); 5943 if (*budget) {
5944 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5945 len, flags, mss, vlan);
5946 (*budget)--;
5947 } else
5948 hwbug = 1;
5949
5950 *entry = NEXT_TX(*entry);
5944 5951
5945 return hwbug; 5952 return hwbug;
5946} 5953}
@@ -5986,12 +5993,12 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
5986/* Workaround 4GB and 40-bit hardware DMA bugs. */ 5993/* Workaround 4GB and 40-bit hardware DMA bugs. */
5987static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, 5994static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5988 struct sk_buff *skb, 5995 struct sk_buff *skb,
5996 u32 *entry, u32 *budget,
5989 u32 base_flags, u32 mss, u32 vlan) 5997 u32 base_flags, u32 mss, u32 vlan)
5990{ 5998{
5991 struct tg3 *tp = tnapi->tp; 5999 struct tg3 *tp = tnapi->tp;
5992 struct sk_buff *new_skb; 6000 struct sk_buff *new_skb;
5993 dma_addr_t new_addr = 0; 6001 dma_addr_t new_addr = 0;
5994 u32 entry = tnapi->tx_prod;
5995 int ret = 0; 6002 int ret = 0;
5996 6003
5997 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 6004 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
@@ -6017,14 +6024,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6017 } else { 6024 } else {
6018 base_flags |= TXD_FLAG_END; 6025 base_flags |= TXD_FLAG_END;
6019 6026
6020 tnapi->tx_buffers[entry].skb = new_skb; 6027 tnapi->tx_buffers[*entry].skb = new_skb;
6021 dma_unmap_addr_set(&tnapi->tx_buffers[entry], 6028 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6022 mapping, new_addr); 6029 mapping, new_addr);
6023 6030
6024 if (tg3_tx_frag_set(tnapi, entry, new_addr, 6031 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6025 new_skb->len, base_flags, 6032 new_skb->len, base_flags,
6026 mss, vlan)) { 6033 mss, vlan)) {
6027 tg3_tx_skb_unmap(tnapi, entry, 0); 6034 tg3_tx_skb_unmap(tnapi, *entry, 0);
6028 dev_kfree_skb(new_skb); 6035 dev_kfree_skb(new_skb);
6029 ret = -1; 6036 ret = -1;
6030 } 6037 }
@@ -6086,6 +6093,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6086{ 6093{
6087 struct tg3 *tp = netdev_priv(dev); 6094 struct tg3 *tp = netdev_priv(dev);
6088 u32 len, entry, base_flags, mss, vlan = 0; 6095 u32 len, entry, base_flags, mss, vlan = 0;
6096 u32 budget;
6089 int i = -1, would_hit_hwbug; 6097 int i = -1, would_hit_hwbug;
6090 dma_addr_t mapping; 6098 dma_addr_t mapping;
6091 struct tg3_napi *tnapi; 6099 struct tg3_napi *tnapi;
@@ -6097,12 +6105,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6097 if (tg3_flag(tp, ENABLE_TSS)) 6105 if (tg3_flag(tp, ENABLE_TSS))
6098 tnapi++; 6106 tnapi++;
6099 6107
6108 budget = tg3_tx_avail(tnapi);
6109
6100 /* We are running in BH disabled context with netif_tx_lock 6110 /* We are running in BH disabled context with netif_tx_lock
6101 * and TX reclaim runs via tp->napi.poll inside of a software 6111 * and TX reclaim runs via tp->napi.poll inside of a software
6102 * interrupt. Furthermore, IRQ processing runs lockless so we have 6112 * interrupt. Furthermore, IRQ processing runs lockless so we have
6103 * no IRQ context deadlocks to worry about either. Rejoice! 6113 * no IRQ context deadlocks to worry about either. Rejoice!
6104 */ 6114 */
6105 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { 6115 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6106 if (!netif_tx_queue_stopped(txq)) { 6116 if (!netif_tx_queue_stopped(txq)) {
6107 netif_tx_stop_queue(txq); 6117 netif_tx_stop_queue(txq);
6108 6118
@@ -6214,13 +6224,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6214 if (tg3_flag(tp, 5701_DMA_BUG)) 6224 if (tg3_flag(tp, 5701_DMA_BUG))
6215 would_hit_hwbug = 1; 6225 would_hit_hwbug = 1;
6216 6226
6217 if (tg3_tx_frag_set(tnapi, entry, mapping, len, base_flags | 6227 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6218 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 6228 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6219 mss, vlan)) 6229 mss, vlan))
6220 would_hit_hwbug = 1; 6230 would_hit_hwbug = 1;
6221 6231
6222 entry = NEXT_TX(entry);
6223
6224 /* Now loop through additional data fragments, and queue them. */ 6232 /* Now loop through additional data fragments, and queue them. */
6225 if (skb_shinfo(skb)->nr_frags > 0) { 6233 if (skb_shinfo(skb)->nr_frags > 0) {
6226 u32 tmp_mss = mss; 6234 u32 tmp_mss = mss;
@@ -6246,12 +6254,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6246 if (pci_dma_mapping_error(tp->pdev, mapping)) 6254 if (pci_dma_mapping_error(tp->pdev, mapping))
6247 goto dma_error; 6255 goto dma_error;
6248 6256
6249 if (tg3_tx_frag_set(tnapi, entry, mapping, len, 6257 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6250 base_flags | ((i == last) ? TXD_FLAG_END : 0), 6258 len, base_flags |
6259 ((i == last) ? TXD_FLAG_END : 0),
6251 tmp_mss, vlan)) 6260 tmp_mss, vlan))
6252 would_hit_hwbug = 1; 6261 would_hit_hwbug = 1;
6253
6254 entry = NEXT_TX(entry);
6255 } 6262 }
6256 } 6263 }
6257 6264
@@ -6261,11 +6268,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6261 /* If the workaround fails due to memory/mapping 6268 /* If the workaround fails due to memory/mapping
6262 * failure, silently drop this packet. 6269 * failure, silently drop this packet.
6263 */ 6270 */
6264 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, 6271 entry = tnapi->tx_prod;
6265 mss, vlan)) 6272 budget = tg3_tx_avail(tnapi);
6273 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6274 base_flags, mss, vlan))
6266 goto out_unlock; 6275 goto out_unlock;
6267
6268 entry = NEXT_TX(tnapi->tx_prod);
6269 } 6276 }
6270 6277
6271 skb_tx_timestamp(skb); 6278 skb_tx_timestamp(skb);
@@ -11206,6 +11213,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11206{ 11213{
11207 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; 11214 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11208 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; 11215 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11216 u32 budget;
11209 struct sk_buff *skb, *rx_skb; 11217 struct sk_buff *skb, *rx_skb;
11210 u8 *tx_data; 11218 u8 *tx_data;
11211 dma_addr_t map; 11219 dma_addr_t map;
@@ -11376,7 +11384,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11376 11384
11377 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 11385 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11378 11386
11379 if (tg3_tx_frag_set(tnapi, tnapi->tx_prod, map, tx_len, 11387 budget = tg3_tx_avail(tnapi);
11388 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11380 base_flags | TXD_FLAG_END, mss, 0)) { 11389 base_flags | TXD_FLAG_END, mss, 0)) {
11381 tnapi->tx_buffers[val].skb = NULL; 11390 tnapi->tx_buffers[val].skb = NULL;
11382 dev_kfree_skb(skb); 11391 dev_kfree_skb(skb);