diff options
author | Michael Chan <mchan@broadcom.com> | 2005-09-17 03:46:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-09-17 03:46:27 -0400 |
commit | c58ec93245a1fb7354f9e331960380827b9f41db (patch) | |
tree | 0118b0145dc991b2f703279e5712a591a0bad153 /drivers | |
parent | eb8edb085716621605cc2e7131a6369d2223d992 (diff) |
[TG3]: Fix 4GB boundary tx handling
Fix and simplify the workaround code for the 4GB boundary tx buffer
hardware bug.
1. Need to unmap the original SKB's dma addresses if a new SKB cannot
be allocated.
2. Need to pass the base flag to tigon3_4gb_hwbug_workaround() or TSO
won't work properly.
3. The guilty entry and length parameters for
tigon3_4gb_hwbug_workaround() are removed as they are not necessary.
4. Remove assumption that only one fragment can hit the 4GB boundary.
Another fragment can hit 8GB for example.
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/tg3.c | 94 |
1 files changed, 39 insertions, 55 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 06b02c59d90b..81f4aedf534c 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -3442,31 +3442,47 @@ static void tg3_tx_timeout(struct net_device *dev) | |||
3442 | schedule_work(&tp->reset_task); | 3442 | schedule_work(&tp->reset_task); |
3443 | } | 3443 | } |
3444 | 3444 | ||
3445 | /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ | ||
3446 | static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) | ||
3447 | { | ||
3448 | u32 base = (u32) mapping & 0xffffffff; | ||
3449 | |||
3450 | return ((base > 0xffffdcc0) && | ||
3451 | (base + len + 8 < base)); | ||
3452 | } | ||
3453 | |||
3445 | static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); | 3454 | static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); |
3446 | 3455 | ||
3447 | static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | 3456 | static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, |
3448 | u32 guilty_entry, int guilty_len, | 3457 | u32 last_plus_one, u32 *start, |
3449 | u32 last_plus_one, u32 *start, u32 mss) | 3458 | u32 base_flags, u32 mss) |
3450 | { | 3459 | { |
3451 | struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC); | 3460 | struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC); |
3452 | dma_addr_t new_addr; | 3461 | dma_addr_t new_addr = 0; |
3453 | u32 entry = *start; | 3462 | u32 entry = *start; |
3454 | int i; | 3463 | int i, ret = 0; |
3455 | 3464 | ||
3456 | if (!new_skb) { | 3465 | if (!new_skb) { |
3457 | dev_kfree_skb(skb); | 3466 | ret = -1; |
3458 | return -1; | 3467 | } else { |
3468 | /* New SKB is guaranteed to be linear. */ | ||
3469 | entry = *start; | ||
3470 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, | ||
3471 | PCI_DMA_TODEVICE); | ||
3472 | /* Make sure new skb does not cross any 4G boundaries. | ||
3473 | * Drop the packet if it does. | ||
3474 | */ | ||
3475 | if (tg3_4g_overflow_test(new_addr, new_skb->len)) { | ||
3476 | ret = -1; | ||
3477 | dev_kfree_skb(new_skb); | ||
3478 | new_skb = NULL; | ||
3479 | } else { | ||
3480 | tg3_set_txd(tp, entry, new_addr, new_skb->len, | ||
3481 | base_flags, 1 | (mss << 1)); | ||
3482 | *start = NEXT_TX(entry); | ||
3483 | } | ||
3459 | } | 3484 | } |
3460 | 3485 | ||
3461 | /* New SKB is guaranteed to be linear. */ | ||
3462 | entry = *start; | ||
3463 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, | ||
3464 | PCI_DMA_TODEVICE); | ||
3465 | tg3_set_txd(tp, entry, new_addr, new_skb->len, | ||
3466 | (skb->ip_summed == CHECKSUM_HW) ? | ||
3467 | TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1)); | ||
3468 | *start = NEXT_TX(entry); | ||
3469 | |||
3470 | /* Now clean up the sw ring entries. */ | 3486 | /* Now clean up the sw ring entries. */ |
3471 | i = 0; | 3487 | i = 0; |
3472 | while (entry != last_plus_one) { | 3488 | while (entry != last_plus_one) { |
@@ -3491,7 +3507,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | |||
3491 | 3507 | ||
3492 | dev_kfree_skb(skb); | 3508 | dev_kfree_skb(skb); |
3493 | 3509 | ||
3494 | return 0; | 3510 | return ret; |
3495 | } | 3511 | } |
3496 | 3512 | ||
3497 | static void tg3_set_txd(struct tg3 *tp, int entry, | 3513 | static void tg3_set_txd(struct tg3 *tp, int entry, |
@@ -3517,19 +3533,10 @@ static void tg3_set_txd(struct tg3 *tp, int entry, | |||
3517 | txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; | 3533 | txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; |
3518 | } | 3534 | } |
3519 | 3535 | ||
3520 | static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) | ||
3521 | { | ||
3522 | u32 base = (u32) mapping & 0xffffffff; | ||
3523 | |||
3524 | return ((base > 0xffffdcc0) && | ||
3525 | (base + len + 8 < base)); | ||
3526 | } | ||
3527 | |||
3528 | static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | 3536 | static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
3529 | { | 3537 | { |
3530 | struct tg3 *tp = netdev_priv(dev); | 3538 | struct tg3 *tp = netdev_priv(dev); |
3531 | dma_addr_t mapping; | 3539 | dma_addr_t mapping; |
3532 | unsigned int i; | ||
3533 | u32 len, entry, base_flags, mss; | 3540 | u32 len, entry, base_flags, mss; |
3534 | int would_hit_hwbug; | 3541 | int would_hit_hwbug; |
3535 | 3542 | ||
@@ -3624,7 +3631,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3624 | would_hit_hwbug = 0; | 3631 | would_hit_hwbug = 0; |
3625 | 3632 | ||
3626 | if (tg3_4g_overflow_test(mapping, len)) | 3633 | if (tg3_4g_overflow_test(mapping, len)) |
3627 | would_hit_hwbug = entry + 1; | 3634 | would_hit_hwbug = 1; |
3628 | 3635 | ||
3629 | tg3_set_txd(tp, entry, mapping, len, base_flags, | 3636 | tg3_set_txd(tp, entry, mapping, len, base_flags, |
3630 | (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); | 3637 | (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); |
@@ -3648,12 +3655,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3648 | tp->tx_buffers[entry].skb = NULL; | 3655 | tp->tx_buffers[entry].skb = NULL; |
3649 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | 3656 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); |
3650 | 3657 | ||
3651 | if (tg3_4g_overflow_test(mapping, len)) { | 3658 | if (tg3_4g_overflow_test(mapping, len)) |
3652 | /* Only one should match. */ | 3659 | would_hit_hwbug = 1; |
3653 | if (would_hit_hwbug) | ||
3654 | BUG(); | ||
3655 | would_hit_hwbug = entry + 1; | ||
3656 | } | ||
3657 | 3660 | ||
3658 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 3661 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
3659 | tg3_set_txd(tp, entry, mapping, len, | 3662 | tg3_set_txd(tp, entry, mapping, len, |
@@ -3669,34 +3672,15 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3669 | if (would_hit_hwbug) { | 3672 | if (would_hit_hwbug) { |
3670 | u32 last_plus_one = entry; | 3673 | u32 last_plus_one = entry; |
3671 | u32 start; | 3674 | u32 start; |
3672 | unsigned int len = 0; | ||
3673 | |||
3674 | would_hit_hwbug -= 1; | ||
3675 | entry = entry - 1 - skb_shinfo(skb)->nr_frags; | ||
3676 | entry &= (TG3_TX_RING_SIZE - 1); | ||
3677 | start = entry; | ||
3678 | i = 0; | ||
3679 | while (entry != last_plus_one) { | ||
3680 | if (i == 0) | ||
3681 | len = skb_headlen(skb); | ||
3682 | else | ||
3683 | len = skb_shinfo(skb)->frags[i-1].size; | ||
3684 | |||
3685 | if (entry == would_hit_hwbug) | ||
3686 | break; | ||
3687 | 3675 | ||
3688 | i++; | 3676 | start = entry - 1 - skb_shinfo(skb)->nr_frags; |
3689 | entry = NEXT_TX(entry); | 3677 | start &= (TG3_TX_RING_SIZE - 1); |
3690 | |||
3691 | } | ||
3692 | 3678 | ||
3693 | /* If the workaround fails due to memory/mapping | 3679 | /* If the workaround fails due to memory/mapping |
3694 | * failure, silently drop this packet. | 3680 | * failure, silently drop this packet. |
3695 | */ | 3681 | */ |
3696 | if (tigon3_4gb_hwbug_workaround(tp, skb, | 3682 | if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one, |
3697 | entry, len, | 3683 | &start, base_flags, mss)) |
3698 | last_plus_one, | ||
3699 | &start, mss)) | ||
3700 | goto out_unlock; | 3684 | goto out_unlock; |
3701 | 3685 | ||
3702 | entry = start; | 3686 | entry = start; |