diff options
Diffstat (limited to 'drivers/net')
| -rw-r--r-- | drivers/net/bnx2.c | 49 | ||||
| -rw-r--r-- | drivers/net/bnx2.h | 12 | ||||
| -rw-r--r-- | drivers/net/myri10ge/myri10ge.c | 2 | ||||
| -rw-r--r-- | drivers/net/ppp_generic.c | 30 | ||||
| -rw-r--r-- | drivers/net/tg3.c | 51 | ||||
| -rw-r--r-- | drivers/net/tg3.h | 8 | 
6 files changed, 81 insertions, 71 deletions
| diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index db73de0d2511..652eb05a6c2d 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
| @@ -56,8 +56,8 @@ | |||
| 56 | 56 | ||
| 57 | #define DRV_MODULE_NAME "bnx2" | 57 | #define DRV_MODULE_NAME "bnx2" | 
| 58 | #define PFX DRV_MODULE_NAME ": " | 58 | #define PFX DRV_MODULE_NAME ": " | 
| 59 | #define DRV_MODULE_VERSION "1.4.43" | 59 | #define DRV_MODULE_VERSION "1.4.44" | 
| 60 | #define DRV_MODULE_RELDATE "June 28, 2006" | 60 | #define DRV_MODULE_RELDATE "August 10, 2006" | 
| 61 | 61 | ||
| 62 | #define RUN_AT(x) (jiffies + (x)) | 62 | #define RUN_AT(x) (jiffies + (x)) | 
| 63 | 63 | ||
| @@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); | |||
| 209 | 209 | ||
| 210 | static inline u32 bnx2_tx_avail(struct bnx2 *bp) | 210 | static inline u32 bnx2_tx_avail(struct bnx2 *bp) | 
| 211 | { | 211 | { | 
| 212 | u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); | 212 | u32 diff; | 
| 213 | 213 | ||
| 214 | smp_mb(); | ||
| 215 | diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); | ||
| 214 | if (diff > MAX_TX_DESC_CNT) | 216 | if (diff > MAX_TX_DESC_CNT) | 
| 215 | diff = (diff & MAX_TX_DESC_CNT) - 1; | 217 | diff = (diff & MAX_TX_DESC_CNT) - 1; | 
| 216 | return (bp->tx_ring_size - diff); | 218 | return (bp->tx_ring_size - diff); | 
| @@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) | |||
| 1569 | struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; | 1571 | struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; | 
| 1570 | unsigned long align; | 1572 | unsigned long align; | 
| 1571 | 1573 | ||
| 1572 | skb = dev_alloc_skb(bp->rx_buf_size); | 1574 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | 
| 1573 | if (skb == NULL) { | 1575 | if (skb == NULL) { | 
| 1574 | return -ENOMEM; | 1576 | return -ENOMEM; | 
| 1575 | } | 1577 | } | 
| @@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) | |||
| 1578 | skb_reserve(skb, 8 - align); | 1580 | skb_reserve(skb, 8 - align); | 
| 1579 | } | 1581 | } | 
| 1580 | 1582 | ||
| 1581 | skb->dev = bp->dev; | ||
| 1582 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 1583 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 
| 1583 | PCI_DMA_FROMDEVICE); | 1584 | PCI_DMA_FROMDEVICE); | 
| 1584 | 1585 | ||
| @@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp) | |||
| 1686 | } | 1687 | } | 
| 1687 | 1688 | ||
| 1688 | bp->tx_cons = sw_cons; | 1689 | bp->tx_cons = sw_cons; | 
| 1690 | /* Need to make the tx_cons update visible to bnx2_start_xmit() | ||
| 1691 | * before checking for netif_queue_stopped(). Without the | ||
| 1692 | * memory barrier, there is a small possibility that bnx2_start_xmit() | ||
| 1693 | * will miss it and cause the queue to be stopped forever. | ||
| 1694 | */ | ||
| 1695 | smp_mb(); | ||
| 1689 | 1696 | ||
| 1690 | if (unlikely(netif_queue_stopped(bp->dev))) { | 1697 | if (unlikely(netif_queue_stopped(bp->dev)) && | 
| 1691 | spin_lock(&bp->tx_lock); | 1698 | (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) { | 
| 1699 | netif_tx_lock(bp->dev); | ||
| 1692 | if ((netif_queue_stopped(bp->dev)) && | 1700 | if ((netif_queue_stopped(bp->dev)) && | 
| 1693 | (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) { | 1701 | (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) | 
| 1694 | |||
| 1695 | netif_wake_queue(bp->dev); | 1702 | netif_wake_queue(bp->dev); | 
| 1696 | } | 1703 | netif_tx_unlock(bp->dev); | 
| 1697 | spin_unlock(&bp->tx_lock); | ||
| 1698 | } | 1704 | } | 
| 1699 | } | 1705 | } | 
| 1700 | 1706 | ||
| @@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
| 1786 | if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { | 1792 | if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { | 
| 1787 | struct sk_buff *new_skb; | 1793 | struct sk_buff *new_skb; | 
| 1788 | 1794 | ||
| 1789 | new_skb = dev_alloc_skb(len + 2); | 1795 | new_skb = netdev_alloc_skb(bp->dev, len + 2); | 
| 1790 | if (new_skb == NULL) | 1796 | if (new_skb == NULL) | 
| 1791 | goto reuse_rx; | 1797 | goto reuse_rx; | 
| 1792 | 1798 | ||
| @@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
| 1797 | 1803 | ||
| 1798 | skb_reserve(new_skb, 2); | 1804 | skb_reserve(new_skb, 2); | 
| 1799 | skb_put(new_skb, len); | 1805 | skb_put(new_skb, len); | 
| 1800 | new_skb->dev = bp->dev; | ||
| 1801 | 1806 | ||
| 1802 | bnx2_reuse_rx_skb(bp, skb, | 1807 | bnx2_reuse_rx_skb(bp, skb, | 
| 1803 | sw_ring_cons, sw_ring_prod); | 1808 | sw_ring_cons, sw_ring_prod); | 
| @@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp) | |||
| 3503 | struct tx_bd *txbd; | 3508 | struct tx_bd *txbd; | 
| 3504 | u32 val; | 3509 | u32 val; | 
| 3505 | 3510 | ||
| 3511 | bp->tx_wake_thresh = bp->tx_ring_size / 2; | ||
| 3512 | |||
| 3506 | txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; | 3513 | txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; | 
| 3507 | 3514 | ||
| 3508 | txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; | 3515 | txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; | 
| @@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
| 3952 | return -EINVAL; | 3959 | return -EINVAL; | 
| 3953 | 3960 | ||
| 3954 | pkt_size = 1514; | 3961 | pkt_size = 1514; | 
| 3955 | skb = dev_alloc_skb(pkt_size); | 3962 | skb = netdev_alloc_skb(bp->dev, pkt_size); | 
| 3956 | if (!skb) | 3963 | if (!skb) | 
| 3957 | return -ENOMEM; | 3964 | return -ENOMEM; | 
| 3958 | packet = skb_put(skb, pkt_size); | 3965 | packet = skb_put(skb, pkt_size); | 
| @@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) | |||
| 4390 | #endif | 4397 | #endif | 
| 4391 | 4398 | ||
| 4392 | /* Called with netif_tx_lock. | 4399 | /* Called with netif_tx_lock. | 
| 4393 | * hard_start_xmit is pseudo-lockless - a lock is only required when | 4400 | * bnx2_tx_int() runs without netif_tx_lock unless it needs to call | 
| 4394 | * the tx queue is full. This way, we get the benefit of lockless | 4401 | * netif_wake_queue(). | 
| 4395 | * operations most of the time without the complexities to handle | ||
| 4396 | * netif_stop_queue/wake_queue race conditions. | ||
| 4397 | */ | 4402 | */ | 
| 4398 | static int | 4403 | static int | 
| 4399 | bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | 4404 | bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | 
| @@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 4512 | dev->trans_start = jiffies; | 4517 | dev->trans_start = jiffies; | 
| 4513 | 4518 | ||
| 4514 | if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { | 4519 | if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { | 
| 4515 | spin_lock(&bp->tx_lock); | ||
| 4516 | netif_stop_queue(dev); | 4520 | netif_stop_queue(dev); | 
| 4517 | 4521 | if (bnx2_tx_avail(bp) > bp->tx_wake_thresh) | |
| 4518 | if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS) | ||
| 4519 | netif_wake_queue(dev); | 4522 | netif_wake_queue(dev); | 
| 4520 | spin_unlock(&bp->tx_lock); | ||
| 4521 | } | 4523 | } | 
| 4522 | 4524 | ||
| 4523 | return NETDEV_TX_OK; | 4525 | return NETDEV_TX_OK; | 
| @@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
| 5628 | bp->pdev = pdev; | 5630 | bp->pdev = pdev; | 
| 5629 | 5631 | ||
| 5630 | spin_lock_init(&bp->phy_lock); | 5632 | spin_lock_init(&bp->phy_lock); | 
| 5631 | spin_lock_init(&bp->tx_lock); | ||
| 5632 | INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); | 5633 | INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); | 
| 5633 | 5634 | ||
| 5634 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); | 5635 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); | 
| @@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
| 5751 | bp->mac_addr[5] = (u8) reg; | 5752 | bp->mac_addr[5] = (u8) reg; | 
| 5752 | 5753 | ||
| 5753 | bp->tx_ring_size = MAX_TX_DESC_CNT; | 5754 | bp->tx_ring_size = MAX_TX_DESC_CNT; | 
| 5754 | bnx2_set_rx_ring_size(bp, 100); | 5755 | bnx2_set_rx_ring_size(bp, 255); | 
| 5755 | 5756 | ||
| 5756 | bp->rx_csum = 1; | 5757 | bp->rx_csum = 1; | 
| 5757 | 5758 | ||
| diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 658c5ee95c73..fe804763c607 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
| @@ -3890,10 +3890,6 @@ struct bnx2 { | |||
| 3890 | u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); | 3890 | u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); | 
| 3891 | u16 tx_prod; | 3891 | u16 tx_prod; | 
| 3892 | 3892 | ||
| 3893 | struct tx_bd *tx_desc_ring; | ||
| 3894 | struct sw_bd *tx_buf_ring; | ||
| 3895 | int tx_ring_size; | ||
| 3896 | |||
| 3897 | u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); | 3893 | u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); | 
| 3898 | u16 hw_tx_cons; | 3894 | u16 hw_tx_cons; | 
| 3899 | 3895 | ||
| @@ -3916,9 +3912,11 @@ struct bnx2 { | |||
| 3916 | struct sw_bd *rx_buf_ring; | 3912 | struct sw_bd *rx_buf_ring; | 
| 3917 | struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; | 3913 | struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; | 
| 3918 | 3914 | ||
| 3919 | /* Only used to synchronize netif_stop_queue/wake_queue when tx */ | 3915 | /* TX constants */ | 
| 3920 | /* ring is full */ | 3916 | struct tx_bd *tx_desc_ring; | 
| 3921 | spinlock_t tx_lock; | 3917 | struct sw_bd *tx_buf_ring; | 
| 3918 | int tx_ring_size; | ||
| 3919 | u32 tx_wake_thresh; | ||
| 3922 | 3920 | ||
| 3923 | /* End of fields used in the performance code paths. */ | 3921 | /* End of fields used in the performance code paths. */ | 
| 3924 | 3922 | ||
| diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 06440a86baef..9bdd43ab3573 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
| @@ -2425,7 +2425,7 @@ static int myri10ge_resume(struct pci_dev *pdev) | |||
| 2425 | } | 2425 | } | 
| 2426 | 2426 | ||
| 2427 | myri10ge_reset(mgp); | 2427 | myri10ge_reset(mgp); | 
| 2428 | myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); | 2428 | myri10ge_dummy_rdma(mgp, 1); | 
| 2429 | 2429 | ||
| 2430 | /* Save configuration space to be restored if the | 2430 | /* Save configuration space to be restored if the | 
| 2431 | * nic resets due to a parity error */ | 2431 | * nic resets due to a parity error */ | 
| diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 0ec6e9d57b94..c872f7c6cce3 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
| @@ -192,7 +192,7 @@ struct cardmap { | |||
| 192 | void *ptr[CARDMAP_WIDTH]; | 192 | void *ptr[CARDMAP_WIDTH]; | 
| 193 | }; | 193 | }; | 
| 194 | static void *cardmap_get(struct cardmap *map, unsigned int nr); | 194 | static void *cardmap_get(struct cardmap *map, unsigned int nr); | 
| 195 | static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); | 195 | static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); | 
| 196 | static unsigned int cardmap_find_first_free(struct cardmap *map); | 196 | static unsigned int cardmap_find_first_free(struct cardmap *map); | 
| 197 | static void cardmap_destroy(struct cardmap **map); | 197 | static void cardmap_destroy(struct cardmap **map); | 
| 198 | 198 | ||
| @@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan) | |||
| 1995 | { | 1995 | { | 
| 1996 | struct channel *pch; | 1996 | struct channel *pch; | 
| 1997 | 1997 | ||
| 1998 | pch = kmalloc(sizeof(struct channel), GFP_KERNEL); | 1998 | pch = kzalloc(sizeof(struct channel), GFP_KERNEL); | 
| 1999 | if (pch == 0) | 1999 | if (pch == 0) | 
| 2000 | return -ENOMEM; | 2000 | return -ENOMEM; | 
| 2001 | memset(pch, 0, sizeof(struct channel)); | ||
| 2002 | pch->ppp = NULL; | 2001 | pch->ppp = NULL; | 
| 2003 | pch->chan = chan; | 2002 | pch->chan = chan; | 
| 2004 | chan->ppp = pch; | 2003 | chan->ppp = pch; | 
| @@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp) | |||
| 2408 | int ret = -ENOMEM; | 2407 | int ret = -ENOMEM; | 
| 2409 | int i; | 2408 | int i; | 
| 2410 | 2409 | ||
| 2411 | ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); | 2410 | ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL); | 
| 2412 | if (!ppp) | 2411 | if (!ppp) | 
| 2413 | goto out; | 2412 | goto out; | 
| 2414 | dev = alloc_netdev(0, "", ppp_setup); | 2413 | dev = alloc_netdev(0, "", ppp_setup); | 
| 2415 | if (!dev) | 2414 | if (!dev) | 
| 2416 | goto out1; | 2415 | goto out1; | 
| 2417 | memset(ppp, 0, sizeof(struct ppp)); | ||
| 2418 | 2416 | ||
| 2419 | ppp->mru = PPP_MRU; | 2417 | ppp->mru = PPP_MRU; | 
| 2420 | init_ppp_file(&ppp->file, INTERFACE); | 2418 | init_ppp_file(&ppp->file, INTERFACE); | 
| @@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp) | |||
| 2454 | } | 2452 | } | 
| 2455 | 2453 | ||
| 2456 | atomic_inc(&ppp_unit_count); | 2454 | atomic_inc(&ppp_unit_count); | 
| 2457 | cardmap_set(&all_ppp_units, unit, ppp); | 2455 | ret = cardmap_set(&all_ppp_units, unit, ppp); | 
| 2456 | if (ret != 0) | ||
| 2457 | goto out3; | ||
| 2458 | |||
| 2458 | mutex_unlock(&all_ppp_mutex); | 2459 | mutex_unlock(&all_ppp_mutex); | 
| 2459 | *retp = 0; | 2460 | *retp = 0; | 
| 2460 | return ppp; | 2461 | return ppp; | 
| 2461 | 2462 | ||
| 2463 | out3: | ||
| 2464 | atomic_dec(&ppp_unit_count); | ||
| 2462 | out2: | 2465 | out2: | 
| 2463 | mutex_unlock(&all_ppp_mutex); | 2466 | mutex_unlock(&all_ppp_mutex); | 
| 2464 | free_netdev(dev); | 2467 | free_netdev(dev); | 
| @@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr) | |||
| 2695 | return NULL; | 2698 | return NULL; | 
| 2696 | } | 2699 | } | 
| 2697 | 2700 | ||
| 2698 | static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) | 2701 | static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) | 
| 2699 | { | 2702 | { | 
| 2700 | struct cardmap *p; | 2703 | struct cardmap *p; | 
| 2701 | int i; | 2704 | int i; | 
| @@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) | |||
| 2704 | if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { | 2707 | if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { | 
| 2705 | do { | 2708 | do { | 
| 2706 | /* need a new top level */ | 2709 | /* need a new top level */ | 
| 2707 | struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); | 2710 | struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); | 
| 2708 | memset(np, 0, sizeof(*np)); | 2711 | if (!np) | 
| 2712 | goto enomem; | ||
| 2709 | np->ptr[0] = p; | 2713 | np->ptr[0] = p; | 
| 2710 | if (p != NULL) { | 2714 | if (p != NULL) { | 
| 2711 | np->shift = p->shift + CARDMAP_ORDER; | 2715 | np->shift = p->shift + CARDMAP_ORDER; | 
| @@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) | |||
| 2719 | while (p->shift > 0) { | 2723 | while (p->shift > 0) { | 
| 2720 | i = (nr >> p->shift) & CARDMAP_MASK; | 2724 | i = (nr >> p->shift) & CARDMAP_MASK; | 
| 2721 | if (p->ptr[i] == NULL) { | 2725 | if (p->ptr[i] == NULL) { | 
| 2722 | struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); | 2726 | struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); | 
| 2723 | memset(np, 0, sizeof(*np)); | 2727 | if (!np) | 
| 2728 | goto enomem; | ||
| 2724 | np->shift = p->shift - CARDMAP_ORDER; | 2729 | np->shift = p->shift - CARDMAP_ORDER; | 
| 2725 | np->parent = p; | 2730 | np->parent = p; | 
| 2726 | p->ptr[i] = np; | 2731 | p->ptr[i] = np; | 
| @@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) | |||
| 2735 | set_bit(i, &p->inuse); | 2740 | set_bit(i, &p->inuse); | 
| 2736 | else | 2741 | else | 
| 2737 | clear_bit(i, &p->inuse); | 2742 | clear_bit(i, &p->inuse); | 
| 2743 | return 0; | ||
| 2744 | enomem: | ||
| 2745 | return -ENOMEM; | ||
| 2738 | } | 2746 | } | 
| 2739 | 2747 | ||
| 2740 | static unsigned int cardmap_find_first_free(struct cardmap *map) | 2748 | static unsigned int cardmap_find_first_free(struct cardmap *map) | 
| diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 6f97962dd06b..eafabb253f08 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
| @@ -68,8 +68,8 @@ | |||
| 68 | 68 | ||
| 69 | #define DRV_MODULE_NAME "tg3" | 69 | #define DRV_MODULE_NAME "tg3" | 
| 70 | #define PFX DRV_MODULE_NAME ": " | 70 | #define PFX DRV_MODULE_NAME ": " | 
| 71 | #define DRV_MODULE_VERSION "3.64" | 71 | #define DRV_MODULE_VERSION "3.65" | 
| 72 | #define DRV_MODULE_RELDATE "July 31, 2006" | 72 | #define DRV_MODULE_RELDATE "August 07, 2006" | 
| 73 | 73 | ||
| 74 | #define TG3_DEF_MAC_MODE 0 | 74 | #define TG3_DEF_MAC_MODE 0 | 
| 75 | #define TG3_DEF_RX_MODE 0 | 75 | #define TG3_DEF_RX_MODE 0 | 
| @@ -123,9 +123,6 @@ | |||
| 123 | TG3_RX_RCB_RING_SIZE(tp)) | 123 | TG3_RX_RCB_RING_SIZE(tp)) | 
| 124 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ | 124 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ | 
| 125 | TG3_TX_RING_SIZE) | 125 | TG3_TX_RING_SIZE) | 
| 126 | #define TX_BUFFS_AVAIL(TP) \ | ||
| 127 | ((TP)->tx_pending - \ | ||
| 128 | (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) | ||
| 129 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 126 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 
| 130 | 127 | ||
| 131 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) | 128 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) | 
| @@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp) | |||
| 2987 | spin_unlock(&tp->lock); | 2984 | spin_unlock(&tp->lock); | 
| 2988 | } | 2985 | } | 
| 2989 | 2986 | ||
| 2987 | static inline u32 tg3_tx_avail(struct tg3 *tp) | ||
| 2988 | { | ||
| 2989 | smp_mb(); | ||
| 2990 | return (tp->tx_pending - | ||
| 2991 | ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); | ||
| 2992 | } | ||
| 2993 | |||
| 2990 | /* Tigon3 never reports partial packet sends. So we do not | 2994 | /* Tigon3 never reports partial packet sends. So we do not | 
| 2991 | * need special logic to handle SKBs that have not had all | 2995 | * need special logic to handle SKBs that have not had all | 
| 2992 | * of their frags sent yet, like SunGEM does. | 2996 | * of their frags sent yet, like SunGEM does. | 
| @@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp) | |||
| 3038 | 3042 | ||
| 3039 | tp->tx_cons = sw_idx; | 3043 | tp->tx_cons = sw_idx; | 
| 3040 | 3044 | ||
| 3041 | if (unlikely(netif_queue_stopped(tp->dev))) { | 3045 | /* Need to make the tx_cons update visible to tg3_start_xmit() | 
| 3042 | spin_lock(&tp->tx_lock); | 3046 | * before checking for netif_queue_stopped(). Without the | 
| 3047 | * memory barrier, there is a small possibility that tg3_start_xmit() | ||
| 3048 | * will miss it and cause the queue to be stopped forever. | ||
| 3049 | */ | ||
| 3050 | smp_mb(); | ||
| 3051 | |||
| 3052 | if (unlikely(netif_queue_stopped(tp->dev) && | ||
| 3053 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) { | ||
| 3054 | netif_tx_lock(tp->dev); | ||
| 3043 | if (netif_queue_stopped(tp->dev) && | 3055 | if (netif_queue_stopped(tp->dev) && | 
| 3044 | (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) | 3056 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)) | 
| 3045 | netif_wake_queue(tp->dev); | 3057 | netif_wake_queue(tp->dev); | 
| 3046 | spin_unlock(&tp->tx_lock); | 3058 | netif_tx_unlock(tp->dev); | 
| 3047 | } | 3059 | } | 
| 3048 | } | 3060 | } | 
| 3049 | 3061 | ||
| @@ -3101,7 +3113,6 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, | |||
| 3101 | if (skb == NULL) | 3113 | if (skb == NULL) | 
| 3102 | return -ENOMEM; | 3114 | return -ENOMEM; | 
| 3103 | 3115 | ||
| 3104 | skb->dev = tp->dev; | ||
| 3105 | skb_reserve(skb, tp->rx_offset); | 3116 | skb_reserve(skb, tp->rx_offset); | 
| 3106 | 3117 | ||
| 3107 | mapping = pci_map_single(tp->pdev, skb->data, | 3118 | mapping = pci_map_single(tp->pdev, skb->data, | 
| @@ -3274,7 +3285,6 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
| 3274 | if (copy_skb == NULL) | 3285 | if (copy_skb == NULL) | 
| 3275 | goto drop_it_no_recycle; | 3286 | goto drop_it_no_recycle; | 
| 3276 | 3287 | ||
| 3277 | copy_skb->dev = tp->dev; | ||
| 3278 | skb_reserve(copy_skb, 2); | 3288 | skb_reserve(copy_skb, 2); | 
| 3279 | skb_put(copy_skb, len); | 3289 | skb_put(copy_skb, len); | 
| 3280 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 3290 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 
| @@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3797 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3807 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 
| 3798 | * no IRQ context deadlocks to worry about either. Rejoice! | 3808 | * no IRQ context deadlocks to worry about either. Rejoice! | 
| 3799 | */ | 3809 | */ | 
| 3800 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3810 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 
| 3801 | if (!netif_queue_stopped(dev)) { | 3811 | if (!netif_queue_stopped(dev)) { | 
| 3802 | netif_stop_queue(dev); | 3812 | netif_stop_queue(dev); | 
| 3803 | 3813 | ||
| @@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3893 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 3903 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 
| 3894 | 3904 | ||
| 3895 | tp->tx_prod = entry; | 3905 | tp->tx_prod = entry; | 
| 3896 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { | 3906 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { | 
| 3897 | spin_lock(&tp->tx_lock); | ||
| 3898 | netif_stop_queue(dev); | 3907 | netif_stop_queue(dev); | 
| 3899 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | 3908 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) | 
| 3900 | netif_wake_queue(tp->dev); | 3909 | netif_wake_queue(tp->dev); | 
| 3901 | spin_unlock(&tp->tx_lock); | ||
| 3902 | } | 3910 | } | 
| 3903 | 3911 | ||
| 3904 | out_unlock: | 3912 | out_unlock: | 
| @@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | |||
| 3920 | struct sk_buff *segs, *nskb; | 3928 | struct sk_buff *segs, *nskb; | 
| 3921 | 3929 | ||
| 3922 | /* Estimate the number of fragments in the worst case */ | 3930 | /* Estimate the number of fragments in the worst case */ | 
| 3923 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { | 3931 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { | 
| 3924 | netif_stop_queue(tp->dev); | 3932 | netif_stop_queue(tp->dev); | 
| 3925 | return NETDEV_TX_BUSY; | 3933 | return NETDEV_TX_BUSY; | 
| 3926 | } | 3934 | } | 
| @@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
| 3960 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3968 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 
| 3961 | * no IRQ context deadlocks to worry about either. Rejoice! | 3969 | * no IRQ context deadlocks to worry about either. Rejoice! | 
| 3962 | */ | 3970 | */ | 
| 3963 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3971 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 
| 3964 | if (!netif_queue_stopped(dev)) { | 3972 | if (!netif_queue_stopped(dev)) { | 
| 3965 | netif_stop_queue(dev); | 3973 | netif_stop_queue(dev); | 
| 3966 | 3974 | ||
| @@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
| 4110 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 4118 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 
| 4111 | 4119 | ||
| 4112 | tp->tx_prod = entry; | 4120 | tp->tx_prod = entry; | 
| 4113 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { | 4121 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { | 
| 4114 | spin_lock(&tp->tx_lock); | ||
| 4115 | netif_stop_queue(dev); | 4122 | netif_stop_queue(dev); | 
| 4116 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | 4123 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) | 
| 4117 | netif_wake_queue(tp->dev); | 4124 | netif_wake_queue(tp->dev); | 
| 4118 | spin_unlock(&tp->tx_lock); | ||
| 4119 | } | 4125 | } | 
| 4120 | 4126 | ||
| 4121 | out_unlock: | 4127 | out_unlock: | 
| @@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
| 11474 | tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; | 11480 | tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; | 
| 11475 | #endif | 11481 | #endif | 
| 11476 | spin_lock_init(&tp->lock); | 11482 | spin_lock_init(&tp->lock); | 
| 11477 | spin_lock_init(&tp->tx_lock); | ||
| 11478 | spin_lock_init(&tp->indirect_lock); | 11483 | spin_lock_init(&tp->indirect_lock); | 
| 11479 | INIT_WORK(&tp->reset_task, tg3_reset_task, tp); | 11484 | INIT_WORK(&tp->reset_task, tg3_reset_task, tp); | 
| 11480 | 11485 | ||
| diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index ba2c98711c88..3ecf356cfb08 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
| @@ -2079,9 +2079,9 @@ struct tg3 { | |||
| 2079 | * lock: Held during reset, PHY access, timer, and when | 2079 | * lock: Held during reset, PHY access, timer, and when | 
| 2080 | * updating tg3_flags and tg3_flags2. | 2080 | * updating tg3_flags and tg3_flags2. | 
| 2081 | * | 2081 | * | 
| 2082 | * tx_lock: Held during tg3_start_xmit and tg3_tx only | 2082 | * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds | 
| 2083 | * when calling netif_[start|stop]_queue. | 2083 | * netif_tx_lock when it needs to call | 
| 2084 | * tg3_start_xmit is protected by netif_tx_lock. | 2084 | * netif_wake_queue. | 
| 2085 | * | 2085 | * | 
| 2086 | * Both of these locks are to be held with BH safety. | 2086 | * Both of these locks are to be held with BH safety. | 
| 2087 | * | 2087 | * | 
| @@ -2118,8 +2118,6 @@ struct tg3 { | |||
| 2118 | u32 tx_cons; | 2118 | u32 tx_cons; | 
| 2119 | u32 tx_pending; | 2119 | u32 tx_pending; | 
| 2120 | 2120 | ||
| 2121 | spinlock_t tx_lock; | ||
| 2122 | |||
| 2123 | struct tg3_tx_buffer_desc *tx_ring; | 2121 | struct tg3_tx_buffer_desc *tx_ring; | 
| 2124 | struct tx_ring_info *tx_buffers; | 2122 | struct tx_ring_info *tx_buffers; | 
| 2125 | dma_addr_t tx_desc_mapping; | 2123 | dma_addr_t tx_desc_mapping; | 
