aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Carlson <mcarlson@broadcom.com>2009-08-28 10:03:21 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-29 18:43:04 -0400
commitf3f3f27e5b4e27737f824535e6f145a3c88b976c (patch)
tree042a57424aca2fa1ff5736518321740070da767c
parent723344820aa405ac2663ab9e36fd27833d06129b (diff)
tg3: Move per-int tx members to a per-int struct
This patch moves the tx_prod, tx_cons, tx_pending, tx_ring, and tx_buffers transmit ring device members to a per-interrupt structure. It also adds a new transmit producer mailbox member (prodmbox) and converts the code to use it rather than a preprocessor constant. Signed-off-by: Matt Carlson <mcarlson@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/tg3.c150
-rw-r--r--drivers/net/tg3.h15
2 files changed, 86 insertions, 79 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 939574c571f7..e36b0b259722 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -136,7 +136,7 @@
136#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 136#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
137 137
138/* minimum number of free TX descriptors required to wake up TX process */ 138/* minimum number of free TX descriptors required to wake up TX process */
139#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4) 139#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
140 140
141#define TG3_RAW_IP_ALIGN 2 141#define TG3_RAW_IP_ALIGN 2
142 142
@@ -656,7 +656,7 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
656 work_exists = 1; 656 work_exists = 1;
657 } 657 }
658 /* check for RX/TX work to do */ 658 /* check for RX/TX work to do */
659 if (sblk->idx[0].tx_consumer != tp->tx_cons || 659 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
660 sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr) 660 sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr)
661 work_exists = 1; 661 work_exists = 1;
662 662
@@ -4269,11 +4269,11 @@ static void tg3_tx_recover(struct tg3 *tp)
4269 spin_unlock(&tp->lock); 4269 spin_unlock(&tp->lock);
4270} 4270}
4271 4271
4272static inline u32 tg3_tx_avail(struct tg3 *tp) 4272static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4273{ 4273{
4274 smp_mb(); 4274 smp_mb();
4275 return (tp->tx_pending - 4275 return tnapi->tx_pending -
4276 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); 4276 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4277} 4277}
4278 4278
4279/* Tigon3 never reports partial packet sends. So we do not 4279/* Tigon3 never reports partial packet sends. So we do not
@@ -4284,10 +4284,10 @@ static void tg3_tx(struct tg3_napi *tnapi)
4284{ 4284{
4285 struct tg3 *tp = tnapi->tp; 4285 struct tg3 *tp = tnapi->tp;
4286 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 4286 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4287 u32 sw_idx = tp->tx_cons; 4287 u32 sw_idx = tnapi->tx_cons;
4288 4288
4289 while (sw_idx != hw_idx) { 4289 while (sw_idx != hw_idx) {
4290 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; 4290 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4291 struct sk_buff *skb = ri->skb; 4291 struct sk_buff *skb = ri->skb;
4292 int i, tx_bug = 0; 4292 int i, tx_bug = 0;
4293 4293
@@ -4303,7 +4303,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4303 sw_idx = NEXT_TX(sw_idx); 4303 sw_idx = NEXT_TX(sw_idx);
4304 4304
4305 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4305 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4306 ri = &tp->tx_buffers[sw_idx]; 4306 ri = &tnapi->tx_buffers[sw_idx];
4307 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 4307 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4308 tx_bug = 1; 4308 tx_bug = 1;
4309 sw_idx = NEXT_TX(sw_idx); 4309 sw_idx = NEXT_TX(sw_idx);
@@ -4317,7 +4317,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4317 } 4317 }
4318 } 4318 }
4319 4319
4320 tp->tx_cons = sw_idx; 4320 tnapi->tx_cons = sw_idx;
4321 4321
4322 /* Need to make the tx_cons update visible to tg3_start_xmit() 4322 /* Need to make the tx_cons update visible to tg3_start_xmit()
4323 * before checking for netif_queue_stopped(). Without the 4323 * before checking for netif_queue_stopped(). Without the
@@ -4327,10 +4327,10 @@ static void tg3_tx(struct tg3_napi *tnapi)
4327 smp_mb(); 4327 smp_mb();
4328 4328
4329 if (unlikely(netif_queue_stopped(tp->dev) && 4329 if (unlikely(netif_queue_stopped(tp->dev) &&
4330 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) { 4330 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4331 netif_tx_lock(tp->dev); 4331 netif_tx_lock(tp->dev);
4332 if (netif_queue_stopped(tp->dev) && 4332 if (netif_queue_stopped(tp->dev) &&
4333 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))) 4333 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4334 netif_wake_queue(tp->dev); 4334 netif_wake_queue(tp->dev);
4335 netif_tx_unlock(tp->dev); 4335 netif_tx_unlock(tp->dev);
4336 } 4336 }
@@ -4668,7 +4668,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4668 } 4668 }
4669 4669
4670 /* run TX completion thread */ 4670 /* run TX completion thread */
4671 if (sblk->idx[0].tx_consumer != tp->tx_cons) { 4671 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4672 tg3_tx(tnapi); 4672 tg3_tx(tnapi);
4673 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 4673 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4674 return work_done; 4674 return work_done;
@@ -5044,13 +5044,14 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5044#endif 5044#endif
5045} 5045}
5046 5046
5047static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); 5047static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5048 5048
5049/* Workaround 4GB and 40-bit hardware DMA bugs. */ 5049/* Workaround 4GB and 40-bit hardware DMA bugs. */
5050static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 5050static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5051 u32 last_plus_one, u32 *start, 5051 u32 last_plus_one, u32 *start,
5052 u32 base_flags, u32 mss) 5052 u32 base_flags, u32 mss)
5053{ 5053{
5054 struct tg3_napi *tnapi = &tp->napi[0];
5054 struct sk_buff *new_skb; 5055 struct sk_buff *new_skb;
5055 dma_addr_t new_addr = 0; 5056 dma_addr_t new_addr = 0;
5056 u32 entry = *start; 5057 u32 entry = *start;
@@ -5085,7 +5086,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5085 dev_kfree_skb(new_skb); 5086 dev_kfree_skb(new_skb);
5086 new_skb = NULL; 5087 new_skb = NULL;
5087 } else { 5088 } else {
5088 tg3_set_txd(tp, entry, new_addr, new_skb->len, 5089 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5089 base_flags, 1 | (mss << 1)); 5090 base_flags, 1 | (mss << 1));
5090 *start = NEXT_TX(entry); 5091 *start = NEXT_TX(entry);
5091 } 5092 }
@@ -5094,11 +5095,10 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5094 /* Now clean up the sw ring entries. */ 5095 /* Now clean up the sw ring entries. */
5095 i = 0; 5096 i = 0;
5096 while (entry != last_plus_one) { 5097 while (entry != last_plus_one) {
5097 if (i == 0) { 5098 if (i == 0)
5098 tp->tx_buffers[entry].skb = new_skb; 5099 tnapi->tx_buffers[entry].skb = new_skb;
5099 } else { 5100 else
5100 tp->tx_buffers[entry].skb = NULL; 5101 tnapi->tx_buffers[entry].skb = NULL;
5101 }
5102 entry = NEXT_TX(entry); 5102 entry = NEXT_TX(entry);
5103 i++; 5103 i++;
5104 } 5104 }
@@ -5109,11 +5109,11 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5109 return ret; 5109 return ret;
5110} 5110}
5111 5111
5112static void tg3_set_txd(struct tg3 *tp, int entry, 5112static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5113 dma_addr_t mapping, int len, u32 flags, 5113 dma_addr_t mapping, int len, u32 flags,
5114 u32 mss_and_is_end) 5114 u32 mss_and_is_end)
5115{ 5115{
5116 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry]; 5116 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5117 int is_end = (mss_and_is_end & 0x1); 5117 int is_end = (mss_and_is_end & 0x1);
5118 u32 mss = (mss_and_is_end >> 1); 5118 u32 mss = (mss_and_is_end >> 1);
5119 u32 vlan_tag = 0; 5119 u32 vlan_tag = 0;
@@ -5141,6 +5141,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5141 u32 len, entry, base_flags, mss; 5141 u32 len, entry, base_flags, mss;
5142 struct skb_shared_info *sp; 5142 struct skb_shared_info *sp;
5143 dma_addr_t mapping; 5143 dma_addr_t mapping;
5144 struct tg3_napi *tnapi = &tp->napi[0];
5144 5145
5145 len = skb_headlen(skb); 5146 len = skb_headlen(skb);
5146 5147
@@ -5149,7 +5150,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5149 * interrupt. Furthermore, IRQ processing runs lockless so we have 5150 * interrupt. Furthermore, IRQ processing runs lockless so we have
5150 * no IRQ context deadlocks to worry about either. Rejoice! 5151 * no IRQ context deadlocks to worry about either. Rejoice!
5151 */ 5152 */
5152 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 5153 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5153 if (!netif_queue_stopped(dev)) { 5154 if (!netif_queue_stopped(dev)) {
5154 netif_stop_queue(dev); 5155 netif_stop_queue(dev);
5155 5156
@@ -5160,7 +5161,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5160 return NETDEV_TX_BUSY; 5161 return NETDEV_TX_BUSY;
5161 } 5162 }
5162 5163
5163 entry = tp->tx_prod; 5164 entry = tnapi->tx_prod;
5164 base_flags = 0; 5165 base_flags = 0;
5165 mss = 0; 5166 mss = 0;
5166 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5167 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -5208,9 +5209,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5208 5209
5209 mapping = sp->dma_head; 5210 mapping = sp->dma_head;
5210 5211
5211 tp->tx_buffers[entry].skb = skb; 5212 tnapi->tx_buffers[entry].skb = skb;
5212 5213
5213 tg3_set_txd(tp, entry, mapping, len, base_flags, 5214 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5214 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 5215 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5215 5216
5216 entry = NEXT_TX(entry); 5217 entry = NEXT_TX(entry);
@@ -5225,9 +5226,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5225 5226
5226 len = frag->size; 5227 len = frag->size;
5227 mapping = sp->dma_maps[i]; 5228 mapping = sp->dma_maps[i];
5228 tp->tx_buffers[entry].skb = NULL; 5229 tnapi->tx_buffers[entry].skb = NULL;
5229 5230
5230 tg3_set_txd(tp, entry, mapping, len, 5231 tg3_set_txd(tnapi, entry, mapping, len,
5231 base_flags, (i == last) | (mss << 1)); 5232 base_flags, (i == last) | (mss << 1));
5232 5233
5233 entry = NEXT_TX(entry); 5234 entry = NEXT_TX(entry);
@@ -5235,12 +5236,12 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5235 } 5236 }
5236 5237
5237 /* Packets are ready, update Tx producer idx local and on card. */ 5238 /* Packets are ready, update Tx producer idx local and on card. */
5238 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 5239 tw32_tx_mbox(tnapi->prodmbox, entry);
5239 5240
5240 tp->tx_prod = entry; 5241 tnapi->tx_prod = entry;
5241 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { 5242 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5242 netif_stop_queue(dev); 5243 netif_stop_queue(dev);
5243 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) 5244 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5244 netif_wake_queue(tp->dev); 5245 netif_wake_queue(tp->dev);
5245 } 5246 }
5246 5247
@@ -5258,11 +5259,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
5258static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) 5259static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5259{ 5260{
5260 struct sk_buff *segs, *nskb; 5261 struct sk_buff *segs, *nskb;
5262 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5261 5263
5262 /* Estimate the number of fragments in the worst case */ 5264 /* Estimate the number of fragments in the worst case */
5263 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { 5265 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5264 netif_stop_queue(tp->dev); 5266 netif_stop_queue(tp->dev);
5265 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3)) 5267 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5266 return NETDEV_TX_BUSY; 5268 return NETDEV_TX_BUSY;
5267 5269
5268 netif_wake_queue(tp->dev); 5270 netif_wake_queue(tp->dev);
@@ -5295,6 +5297,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5295 struct skb_shared_info *sp; 5297 struct skb_shared_info *sp;
5296 int would_hit_hwbug; 5298 int would_hit_hwbug;
5297 dma_addr_t mapping; 5299 dma_addr_t mapping;
5300 struct tg3_napi *tnapi = &tp->napi[0];
5298 5301
5299 len = skb_headlen(skb); 5302 len = skb_headlen(skb);
5300 5303
@@ -5303,7 +5306,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5303 * interrupt. Furthermore, IRQ processing runs lockless so we have 5306 * interrupt. Furthermore, IRQ processing runs lockless so we have
5304 * no IRQ context deadlocks to worry about either. Rejoice! 5307 * no IRQ context deadlocks to worry about either. Rejoice!
5305 */ 5308 */
5306 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 5309 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5307 if (!netif_queue_stopped(dev)) { 5310 if (!netif_queue_stopped(dev)) {
5308 netif_stop_queue(dev); 5311 netif_stop_queue(dev);
5309 5312
@@ -5314,7 +5317,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5314 return NETDEV_TX_BUSY; 5317 return NETDEV_TX_BUSY;
5315 } 5318 }
5316 5319
5317 entry = tp->tx_prod; 5320 entry = tnapi->tx_prod;
5318 base_flags = 0; 5321 base_flags = 0;
5319 if (skb->ip_summed == CHECKSUM_PARTIAL) 5322 if (skb->ip_summed == CHECKSUM_PARTIAL)
5320 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5323 base_flags |= TXD_FLAG_TCPUDP_CSUM;
@@ -5384,7 +5387,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5384 5387
5385 mapping = sp->dma_head; 5388 mapping = sp->dma_head;
5386 5389
5387 tp->tx_buffers[entry].skb = skb; 5390 tnapi->tx_buffers[entry].skb = skb;
5388 5391
5389 would_hit_hwbug = 0; 5392 would_hit_hwbug = 0;
5390 5393
@@ -5393,7 +5396,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5393 else if (tg3_4g_overflow_test(mapping, len)) 5396 else if (tg3_4g_overflow_test(mapping, len))
5394 would_hit_hwbug = 1; 5397 would_hit_hwbug = 1;
5395 5398
5396 tg3_set_txd(tp, entry, mapping, len, base_flags, 5399 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5397 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 5400 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5398 5401
5399 entry = NEXT_TX(entry); 5402 entry = NEXT_TX(entry);
@@ -5409,7 +5412,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5409 len = frag->size; 5412 len = frag->size;
5410 mapping = sp->dma_maps[i]; 5413 mapping = sp->dma_maps[i];
5411 5414
5412 tp->tx_buffers[entry].skb = NULL; 5415 tnapi->tx_buffers[entry].skb = NULL;
5413 5416
5414 if (tg3_4g_overflow_test(mapping, len)) 5417 if (tg3_4g_overflow_test(mapping, len))
5415 would_hit_hwbug = 1; 5418 would_hit_hwbug = 1;
@@ -5418,10 +5421,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5418 would_hit_hwbug = 1; 5421 would_hit_hwbug = 1;
5419 5422
5420 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 5423 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5421 tg3_set_txd(tp, entry, mapping, len, 5424 tg3_set_txd(tnapi, entry, mapping, len,
5422 base_flags, (i == last)|(mss << 1)); 5425 base_flags, (i == last)|(mss << 1));
5423 else 5426 else
5424 tg3_set_txd(tp, entry, mapping, len, 5427 tg3_set_txd(tnapi, entry, mapping, len,
5425 base_flags, (i == last)); 5428 base_flags, (i == last));
5426 5429
5427 entry = NEXT_TX(entry); 5430 entry = NEXT_TX(entry);
@@ -5446,12 +5449,12 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5446 } 5449 }
5447 5450
5448 /* Packets are ready, update Tx producer idx local and on card. */ 5451 /* Packets are ready, update Tx producer idx local and on card. */
5449 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 5452 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry);
5450 5453
5451 tp->tx_prod = entry; 5454 tnapi->tx_prod = entry;
5452 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { 5455 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5453 netif_stop_queue(dev); 5456 netif_stop_queue(dev);
5454 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)) 5457 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5455 netif_wake_queue(tp->dev); 5458 netif_wake_queue(tp->dev);
5456 } 5459 }
5457 5460
@@ -5522,8 +5525,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5522static void tg3_rx_prodring_free(struct tg3 *tp, 5525static void tg3_rx_prodring_free(struct tg3 *tp,
5523 struct tg3_rx_prodring_set *tpr) 5526 struct tg3_rx_prodring_set *tpr)
5524{ 5527{
5525 struct ring_info *rxp;
5526 int i; 5528 int i;
5529 struct ring_info *rxp;
5527 5530
5528 for (i = 0; i < TG3_RX_RING_SIZE; i++) { 5531 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5529 rxp = &tpr->rx_std_buffers[i]; 5532 rxp = &tpr->rx_std_buffers[i];
@@ -5710,13 +5713,14 @@ err_out:
5710 */ 5713 */
5711static void tg3_free_rings(struct tg3 *tp) 5714static void tg3_free_rings(struct tg3 *tp)
5712{ 5715{
5716 struct tg3_napi *tnapi = &tp->napi[0];
5713 int i; 5717 int i;
5714 5718
5715 for (i = 0; i < TG3_TX_RING_SIZE; ) { 5719 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5716 struct tx_ring_info *txp; 5720 struct tx_ring_info *txp;
5717 struct sk_buff *skb; 5721 struct sk_buff *skb;
5718 5722
5719 txp = &tp->tx_buffers[i]; 5723 txp = &tnapi->tx_buffers[i];
5720 skb = txp->skb; 5724 skb = txp->skb;
5721 5725
5722 if (skb == NULL) { 5726 if (skb == NULL) {
@@ -5751,7 +5755,7 @@ static int tg3_init_rings(struct tg3 *tp)
5751 tg3_free_rings(tp); 5755 tg3_free_rings(tp);
5752 5756
5753 /* Zero out all descriptors. */ 5757 /* Zero out all descriptors. */
5754 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); 5758 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
5755 5759
5756 tnapi->rx_rcb_ptr = 0; 5760 tnapi->rx_rcb_ptr = 0;
5757 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 5761 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
@@ -5767,12 +5771,12 @@ static void tg3_free_consistent(struct tg3 *tp)
5767{ 5771{
5768 struct tg3_napi *tnapi = &tp->napi[0]; 5772 struct tg3_napi *tnapi = &tp->napi[0];
5769 5773
5770 kfree(tp->tx_buffers); 5774 kfree(tnapi->tx_buffers);
5771 tp->tx_buffers = NULL; 5775 tnapi->tx_buffers = NULL;
5772 if (tp->tx_ring) { 5776 if (tnapi->tx_ring) {
5773 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, 5777 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5774 tp->tx_ring, tp->tx_desc_mapping); 5778 tnapi->tx_ring, tnapi->tx_desc_mapping);
5775 tp->tx_ring = NULL; 5779 tnapi->tx_ring = NULL;
5776 } 5780 }
5777 if (tnapi->rx_rcb) { 5781 if (tnapi->rx_rcb) {
5778 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), 5782 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
@@ -5804,14 +5808,14 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5804 if (tg3_rx_prodring_init(tp, &tp->prodring[0])) 5808 if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
5805 return -ENOMEM; 5809 return -ENOMEM;
5806 5810
5807 tp->tx_buffers = kzalloc(sizeof(struct tx_ring_info) * 5811 tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
5808 TG3_TX_RING_SIZE, GFP_KERNEL); 5812 TG3_TX_RING_SIZE, GFP_KERNEL);
5809 if (!tp->tx_buffers) 5813 if (!tnapi->tx_buffers)
5810 goto err_out; 5814 goto err_out;
5811 5815
5812 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES, 5816 tnapi->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5813 &tp->tx_desc_mapping); 5817 &tnapi->tx_desc_mapping);
5814 if (!tp->tx_ring) 5818 if (!tnapi->tx_ring)
5815 goto err_out; 5819 goto err_out;
5816 5820
5817 tnapi->hw_status = pci_alloc_consistent(tp->pdev, 5821 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
@@ -7094,13 +7098,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7094 BDINFO_FLAGS_DISABLED); 7098 BDINFO_FLAGS_DISABLED);
7095 } 7099 }
7096 7100
7097 tp->tx_prod = 0; 7101 tp->napi[0].tx_prod = 0;
7098 tp->tx_cons = 0; 7102 tp->napi[0].tx_cons = 0;
7099 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7100 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); 7103 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7101 7104
7105 val = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7106 tw32_mailbox(val, 0);
7107
7102 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB, 7108 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7103 tp->tx_desc_mapping, 7109 tp->napi[0].tx_desc_mapping,
7104 (TG3_TX_RING_SIZE << 7110 (TG3_TX_RING_SIZE <<
7105 BDINFO_FLAGS_MAXLEN_SHIFT), 7111 BDINFO_FLAGS_MAXLEN_SHIFT),
7106 NIC_SRAM_TX_BUFFER_DESC); 7112 NIC_SRAM_TX_BUFFER_DESC);
@@ -9093,7 +9099,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
9093 else 9099 else
9094 ering->rx_jumbo_pending = 0; 9100 ering->rx_jumbo_pending = 0;
9095 9101
9096 ering->tx_pending = tp->tx_pending; 9102 ering->tx_pending = tp->napi[0].tx_pending;
9097} 9103}
9098 9104
9099static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 9105static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
@@ -9123,7 +9129,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
9123 tp->rx_pending > 63) 9129 tp->rx_pending > 63)
9124 tp->rx_pending = 63; 9130 tp->rx_pending = 63;
9125 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 9131 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9126 tp->tx_pending = ering->tx_pending; 9132 tp->napi[0].tx_pending = ering->tx_pending;
9127 9133
9128 if (netif_running(dev)) { 9134 if (netif_running(dev)) {
9129 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9135 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -9928,14 +9934,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9928 9934
9929 num_pkts = 0; 9935 num_pkts = 0;
9930 9936
9931 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1); 9937 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
9932 9938
9933 tp->tx_prod++; 9939 tnapi->tx_prod++;
9934 num_pkts++; 9940 num_pkts++;
9935 9941
9936 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 9942 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
9937 tp->tx_prod); 9943 tr32_mailbox(tnapi->prodmbox);
9938 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9939 9944
9940 udelay(10); 9945 udelay(10);
9941 9946
@@ -9948,7 +9953,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9948 9953
9949 tx_idx = tnapi->hw_status->idx[0].tx_consumer; 9954 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
9950 rx_idx = rnapi->hw_status->idx[0].rx_producer; 9955 rx_idx = rnapi->hw_status->idx[0].rx_producer;
9951 if ((tx_idx == tp->tx_prod) && 9956 if ((tx_idx == tnapi->tx_prod) &&
9952 (rx_idx == (rx_start_idx + num_pkts))) 9957 (rx_idx == (rx_start_idx + num_pkts)))
9953 break; 9958 break;
9954 } 9959 }
@@ -9956,7 +9961,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9956 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 9961 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9957 dev_kfree_skb(skb); 9962 dev_kfree_skb(skb);
9958 9963
9959 if (tx_idx != tp->tx_prod) 9964 if (tx_idx != tnapi->tx_prod)
9960 goto out; 9965 goto out;
9961 9966
9962 if (rx_idx != rx_start_idx + num_pkts) 9967 if (rx_idx != rx_start_idx + num_pkts)
@@ -13415,11 +13420,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13415 13420
13416 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 13421 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13417 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 13422 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13418 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13419 13423
13420 tp->napi[0].tp = tp; 13424 tp->napi[0].tp = tp;
13421 tp->napi[0].int_mbox = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 13425 tp->napi[0].int_mbox = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
13422 tp->napi[0].consmbox = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 13426 tp->napi[0].consmbox = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
13427 tp->napi[0].prodmbox = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
13428 tp->napi[0].tx_pending = TG3_DEF_TX_RING_PENDING;
13423 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64); 13429 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
13424 dev->ethtool_ops = &tg3_ethtool_ops; 13430 dev->ethtool_ops = &tg3_ethtool_ops;
13425 dev->watchdog_timeo = TG3_TX_TIMEOUT; 13431 dev->watchdog_timeo = TG3_TX_TIMEOUT;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index b91ac29ae163..a816b2c0f167 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2497,13 +2497,21 @@ struct tg3_napi {
2497 u32 last_tag; 2497 u32 last_tag;
2498 u32 last_irq_tag; 2498 u32 last_irq_tag;
2499 u32 int_mbox; 2499 u32 int_mbox;
2500 u32 tx_prod;
2501 u32 tx_cons;
2502 u32 tx_pending;
2503 u32 prodmbox;
2504
2500 u32 consmbox; 2505 u32 consmbox;
2501 u32 rx_rcb_ptr; 2506 u32 rx_rcb_ptr;
2502 2507
2503 struct tg3_rx_buffer_desc *rx_rcb; 2508 struct tg3_rx_buffer_desc *rx_rcb;
2509 struct tg3_tx_buffer_desc *tx_ring;
2510 struct tx_ring_info *tx_buffers;
2504 2511
2505 dma_addr_t status_mapping; 2512 dma_addr_t status_mapping;
2506 dma_addr_t rx_rcb_mapping; 2513 dma_addr_t rx_rcb_mapping;
2514 dma_addr_t tx_desc_mapping;
2507}; 2515};
2508 2516
2509struct tg3 { 2517struct tg3 {
@@ -2563,13 +2571,6 @@ struct tg3 {
2563 /* begin "tx thread" cacheline section */ 2571 /* begin "tx thread" cacheline section */
2564 void (*write32_tx_mbox) (struct tg3 *, u32, 2572 void (*write32_tx_mbox) (struct tg3 *, u32,
2565 u32); 2573 u32);
2566 u32 tx_prod;
2567 u32 tx_cons;
2568 u32 tx_pending;
2569
2570 struct tg3_tx_buffer_desc *tx_ring;
2571 struct tx_ring_info *tx_buffers;
2572 dma_addr_t tx_desc_mapping;
2573 2574
2574 /* begin "rx thread" cacheline section */ 2575 /* begin "rx thread" cacheline section */
2575 struct tg3_napi napi[TG3_IRQ_MAX_VECS]; 2576 struct tg3_napi napi[TG3_IRQ_MAX_VECS];