diff options
author | Matt Carlson <mcarlson@broadcom.com> | 2009-11-13 08:03:35 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-16 01:14:37 -0500 |
commit | 24f4efd4e6c89a4093d0b8653d6669e45de45001 (patch) | |
tree | 98b671463db5330ae239266844412a1636668377 /drivers/net/tg3.c | |
parent | 87668d352aa8d135bd695a050f18bbfc7b50b506 (diff) |
tg3: Napify tg3_start_xmit_dma_bug()
This patch converts tg3_start_xmit_dma_bug() to accomodate multiple NAPI
instances. This is prep work for a later patch in this series.
Signed-off-by: Matt Carlson <mcarlson@broadcom.com>
Reviewed-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 32 |
1 files changed, 19 insertions, 13 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index e1f4a18ebb81..072e3ee4c93b 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -5119,11 +5119,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, | |||
5119 | static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); | 5119 | static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); |
5120 | 5120 | ||
5121 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ | 5121 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ |
5122 | static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | 5122 | static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, |
5123 | u32 last_plus_one, u32 *start, | 5123 | struct sk_buff *skb, u32 last_plus_one, |
5124 | u32 base_flags, u32 mss) | 5124 | u32 *start, u32 base_flags, u32 mss) |
5125 | { | 5125 | { |
5126 | struct tg3_napi *tnapi = &tp->napi[0]; | 5126 | struct tg3 *tp = tnapi->tp; |
5127 | struct sk_buff *new_skb; | 5127 | struct sk_buff *new_skb; |
5128 | dma_addr_t new_addr = 0; | 5128 | dma_addr_t new_addr = 0; |
5129 | u32 entry = *start; | 5129 | u32 entry = *start; |
@@ -5392,9 +5392,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5392 | struct skb_shared_info *sp; | 5392 | struct skb_shared_info *sp; |
5393 | int would_hit_hwbug; | 5393 | int would_hit_hwbug; |
5394 | dma_addr_t mapping; | 5394 | dma_addr_t mapping; |
5395 | struct tg3_napi *tnapi = &tp->napi[0]; | 5395 | struct tg3_napi *tnapi; |
5396 | struct netdev_queue *txq; | ||
5396 | 5397 | ||
5397 | len = skb_headlen(skb); | 5398 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
5399 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; | ||
5400 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | ||
5401 | tnapi++; | ||
5398 | 5402 | ||
5399 | /* We are running in BH disabled context with netif_tx_lock | 5403 | /* We are running in BH disabled context with netif_tx_lock |
5400 | * and TX reclaim runs via tp->napi.poll inside of a software | 5404 | * and TX reclaim runs via tp->napi.poll inside of a software |
@@ -5402,8 +5406,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5402 | * no IRQ context deadlocks to worry about either. Rejoice! | 5406 | * no IRQ context deadlocks to worry about either. Rejoice! |
5403 | */ | 5407 | */ |
5404 | if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { | 5408 | if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { |
5405 | if (!netif_queue_stopped(dev)) { | 5409 | if (!netif_tx_queue_stopped(txq)) { |
5406 | netif_stop_queue(dev); | 5410 | netif_tx_stop_queue(txq); |
5407 | 5411 | ||
5408 | /* This is a hard error, log it. */ | 5412 | /* This is a hard error, log it. */ |
5409 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 5413 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " |
@@ -5416,7 +5420,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5416 | base_flags = 0; | 5420 | base_flags = 0; |
5417 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 5421 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
5418 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | 5422 | base_flags |= TXD_FLAG_TCPUDP_CSUM; |
5419 | mss = 0; | 5423 | |
5420 | if ((mss = skb_shinfo(skb)->gso_size) != 0) { | 5424 | if ((mss = skb_shinfo(skb)->gso_size) != 0) { |
5421 | struct iphdr *iph; | 5425 | struct iphdr *iph; |
5422 | u32 tcp_opt_len, ip_tcp_len, hdr_len; | 5426 | u32 tcp_opt_len, ip_tcp_len, hdr_len; |
@@ -5488,6 +5492,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5488 | 5492 | ||
5489 | would_hit_hwbug = 0; | 5493 | would_hit_hwbug = 0; |
5490 | 5494 | ||
5495 | len = skb_headlen(skb); | ||
5496 | |||
5491 | if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) | 5497 | if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) |
5492 | would_hit_hwbug = 1; | 5498 | would_hit_hwbug = 1; |
5493 | 5499 | ||
@@ -5553,7 +5559,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5553 | /* If the workaround fails due to memory/mapping | 5559 | /* If the workaround fails due to memory/mapping |
5554 | * failure, silently drop this packet. | 5560 | * failure, silently drop this packet. |
5555 | */ | 5561 | */ |
5556 | if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, | 5562 | if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, |
5557 | &start, base_flags, mss)) | 5563 | &start, base_flags, mss)) |
5558 | goto out_unlock; | 5564 | goto out_unlock; |
5559 | 5565 | ||
@@ -5561,13 +5567,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5561 | } | 5567 | } |
5562 | 5568 | ||
5563 | /* Packets are ready, update Tx producer idx local and on card. */ | 5569 | /* Packets are ready, update Tx producer idx local and on card. */ |
5564 | tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); | 5570 | tw32_tx_mbox(tnapi->prodmbox, entry); |
5565 | 5571 | ||
5566 | tnapi->tx_prod = entry; | 5572 | tnapi->tx_prod = entry; |
5567 | if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { | 5573 | if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { |
5568 | netif_stop_queue(dev); | 5574 | netif_tx_stop_queue(txq); |
5569 | if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) | 5575 | if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) |
5570 | netif_wake_queue(tp->dev); | 5576 | netif_tx_wake_queue(txq); |
5571 | } | 5577 | } |
5572 | 5578 | ||
5573 | out_unlock: | 5579 | out_unlock: |