aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/tg3.c108
1 files changed, 47 insertions, 61 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7599f52e15b3..81f4aedf534c 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
67 67
68#define DRV_MODULE_NAME "tg3" 68#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": " 69#define PFX DRV_MODULE_NAME ": "
70#define DRV_MODULE_VERSION "3.39" 70#define DRV_MODULE_VERSION "3.40"
71#define DRV_MODULE_RELDATE "September 5, 2005" 71#define DRV_MODULE_RELDATE "September 15, 2005"
72 72
73#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -3442,31 +3442,47 @@ static void tg3_tx_timeout(struct net_device *dev)
3442 schedule_work(&tp->reset_task); 3442 schedule_work(&tp->reset_task);
3443} 3443}
3444 3444
3445/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3446static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3447{
3448 u32 base = (u32) mapping & 0xffffffff;
3449
3450 return ((base > 0xffffdcc0) &&
3451 (base + len + 8 < base));
3452}
3453
3445static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); 3454static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3446 3455
3447static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 3456static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3448 u32 guilty_entry, int guilty_len, 3457 u32 last_plus_one, u32 *start,
3449 u32 last_plus_one, u32 *start, u32 mss) 3458 u32 base_flags, u32 mss)
3450{ 3459{
3451 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC); 3460 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3452 dma_addr_t new_addr; 3461 dma_addr_t new_addr = 0;
3453 u32 entry = *start; 3462 u32 entry = *start;
3454 int i; 3463 int i, ret = 0;
3455 3464
3456 if (!new_skb) { 3465 if (!new_skb) {
3457 dev_kfree_skb(skb); 3466 ret = -1;
3458 return -1; 3467 } else {
3468 /* New SKB is guaranteed to be linear. */
3469 entry = *start;
3470 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3471 PCI_DMA_TODEVICE);
3472 /* Make sure new skb does not cross any 4G boundaries.
3473 * Drop the packet if it does.
3474 */
3475 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3476 ret = -1;
3477 dev_kfree_skb(new_skb);
3478 new_skb = NULL;
3479 } else {
3480 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3481 base_flags, 1 | (mss << 1));
3482 *start = NEXT_TX(entry);
3483 }
3459 } 3484 }
3460 3485
3461 /* New SKB is guaranteed to be linear. */
3462 entry = *start;
3463 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3464 PCI_DMA_TODEVICE);
3465 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3466 (skb->ip_summed == CHECKSUM_HW) ?
3467 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3468 *start = NEXT_TX(entry);
3469
3470 /* Now clean up the sw ring entries. */ 3486 /* Now clean up the sw ring entries. */
3471 i = 0; 3487 i = 0;
3472 while (entry != last_plus_one) { 3488 while (entry != last_plus_one) {
@@ -3491,7 +3507,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3491 3507
3492 dev_kfree_skb(skb); 3508 dev_kfree_skb(skb);
3493 3509
3494 return 0; 3510 return ret;
3495} 3511}
3496 3512
3497static void tg3_set_txd(struct tg3 *tp, int entry, 3513static void tg3_set_txd(struct tg3 *tp, int entry,
@@ -3517,19 +3533,10 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
3517 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; 3533 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3518} 3534}
3519 3535
3520static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3521{
3522 u32 base = (u32) mapping & 0xffffffff;
3523
3524 return ((base > 0xffffdcc0) &&
3525 (base + len + 8 < base));
3526}
3527
3528static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 3536static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3529{ 3537{
3530 struct tg3 *tp = netdev_priv(dev); 3538 struct tg3 *tp = netdev_priv(dev);
3531 dma_addr_t mapping; 3539 dma_addr_t mapping;
3532 unsigned int i;
3533 u32 len, entry, base_flags, mss; 3540 u32 len, entry, base_flags, mss;
3534 int would_hit_hwbug; 3541 int would_hit_hwbug;
3535 3542
@@ -3624,7 +3631,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3624 would_hit_hwbug = 0; 3631 would_hit_hwbug = 0;
3625 3632
3626 if (tg3_4g_overflow_test(mapping, len)) 3633 if (tg3_4g_overflow_test(mapping, len))
3627 would_hit_hwbug = entry + 1; 3634 would_hit_hwbug = 1;
3628 3635
3629 tg3_set_txd(tp, entry, mapping, len, base_flags, 3636 tg3_set_txd(tp, entry, mapping, len, base_flags,
3630 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 3637 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
@@ -3648,12 +3655,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3648 tp->tx_buffers[entry].skb = NULL; 3655 tp->tx_buffers[entry].skb = NULL;
3649 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); 3656 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3650 3657
3651 if (tg3_4g_overflow_test(mapping, len)) { 3658 if (tg3_4g_overflow_test(mapping, len))
3652 /* Only one should match. */ 3659 would_hit_hwbug = 1;
3653 if (would_hit_hwbug)
3654 BUG();
3655 would_hit_hwbug = entry + 1;
3656 }
3657 3660
3658 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 3661 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3659 tg3_set_txd(tp, entry, mapping, len, 3662 tg3_set_txd(tp, entry, mapping, len,
@@ -3669,34 +3672,15 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3669 if (would_hit_hwbug) { 3672 if (would_hit_hwbug) {
3670 u32 last_plus_one = entry; 3673 u32 last_plus_one = entry;
3671 u32 start; 3674 u32 start;
3672 unsigned int len = 0;
3673
3674 would_hit_hwbug -= 1;
3675 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3676 entry &= (TG3_TX_RING_SIZE - 1);
3677 start = entry;
3678 i = 0;
3679 while (entry != last_plus_one) {
3680 if (i == 0)
3681 len = skb_headlen(skb);
3682 else
3683 len = skb_shinfo(skb)->frags[i-1].size;
3684 3675
3685 if (entry == would_hit_hwbug) 3676 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3686 break; 3677 start &= (TG3_TX_RING_SIZE - 1);
3687
3688 i++;
3689 entry = NEXT_TX(entry);
3690
3691 }
3692 3678
3693 /* If the workaround fails due to memory/mapping 3679 /* If the workaround fails due to memory/mapping
3694 * failure, silently drop this packet. 3680 * failure, silently drop this packet.
3695 */ 3681 */
3696 if (tigon3_4gb_hwbug_workaround(tp, skb, 3682 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3697 entry, len, 3683 &start, base_flags, mss))
3698 last_plus_one,
3699 &start, mss))
3700 goto out_unlock; 3684 goto out_unlock;
3701 3685
3702 entry = start; 3686 entry = start;
@@ -9271,6 +9255,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9271 static struct pci_device_id write_reorder_chipsets[] = { 9255 static struct pci_device_id write_reorder_chipsets[] = {
9272 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9256 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9273 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 9257 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9258 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9259 PCI_DEVICE_ID_AMD_K8_NB) },
9274 { }, 9260 { },
9275 }; 9261 };
9276 u32 misc_ctrl_reg; 9262 u32 misc_ctrl_reg;
@@ -9285,7 +9271,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9285 tp->tg3_flags2 |= TG3_FLG2_SUN_570X; 9271 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9286#endif 9272#endif
9287 9273
9288 /* If we have an AMD 762 chipset, write 9274 /* If we have an AMD 762 or K8 chipset, write
9289 * reordering to the mailbox registers done by the host 9275 * reordering to the mailbox registers done by the host
9290 * controller can cause major troubles. We read back from 9276 * controller can cause major troubles. We read back from
9291 * every mailbox register write to force the writes to be 9277 * every mailbox register write to force the writes to be
@@ -9532,7 +9518,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9532 tp->write32_rx_mbox = tg3_write_indirect_mbox; 9518 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9533 9519
9534 iounmap(tp->regs); 9520 iounmap(tp->regs);
9535 tp->regs = 0; 9521 tp->regs = NULL;
9536 9522
9537 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 9523 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9538 pci_cmd &= ~PCI_COMMAND_MEMORY; 9524 pci_cmd &= ~PCI_COMMAND_MEMORY;
@@ -10680,7 +10666,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10680err_out_iounmap: 10666err_out_iounmap:
10681 if (tp->regs) { 10667 if (tp->regs) {
10682 iounmap(tp->regs); 10668 iounmap(tp->regs);
10683 tp->regs = 0; 10669 tp->regs = NULL;
10684 } 10670 }
10685 10671
10686err_out_free_dev: 10672err_out_free_dev:
@@ -10705,7 +10691,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
10705 unregister_netdev(dev); 10691 unregister_netdev(dev);
10706 if (tp->regs) { 10692 if (tp->regs) {
10707 iounmap(tp->regs); 10693 iounmap(tp->regs);
10708 tp->regs = 0; 10694 tp->regs = NULL;
10709 } 10695 }
10710 free_netdev(dev); 10696 free_netdev(dev);
10711 pci_release_regions(pdev); 10697 pci_release_regions(pdev);